diff --git "a/1798.jsonl" "b/1798.jsonl" new file mode 100644--- /dev/null +++ "b/1798.jsonl" @@ -0,0 +1,498 @@ +{"seq_id":"399778129","text":"#!/usr/bin/python3.7\r\n\r\nimport sys, random\r\nfrom typing import List\r\n\r\n\r\nstdin = sys.stdin\r\nstdout = sys.stdout\r\n\r\ndef set_stdin(buffer):\r\n global stdin\r\n stdin = buffer\r\n\r\ndef set_stdout(buffer):\r\n global stdout\r\n stdout = buffer\r\n\r\n\r\nclass RunException(Exception):\r\n def __init__(self, *args, **kwargs):\r\n return super().__init__(*args, **kwargs)\r\n\r\nclass Memory:\r\n def __init__(self, size):\r\n self.size = size\r\n self.mem = [0 for _ in range(size)]\r\n \r\n\r\n def __getitem__(self, idx):\r\n if idx >= self.size:\r\n raise RunException(\"Memory Error\")\r\n return self.mem[idx]\r\n\r\n def __setitem__(self, idx, value):\r\n self.mem[idx] = value\r\n\r\n def setip(self, ip):\r\n self.mem[-1] = ip\r\n \r\n def ip(self):\r\n return self[-1]\r\n\r\n def nextip(self, size=1):\r\n self.setip(self.ip() + size)\r\n\r\n\r\nclass CommandFetcher:\r\n def __init__(self, code):\r\n self.code = code\r\n\r\n def next(self, ip):\r\n try:\r\n cmd = self.code[ip:ip+2]\r\n if cmd == [3,1]:\r\n args, size = self.parseargs(ip+2, 2)\r\n return SetCommand(args, size+2)\r\n if cmd == [5,4]:\r\n args, size = self.parseargs(ip+2, 1)\r\n return EchoCharCommand(args, size+2)\r\n if cmd == [4,4]:\r\n args, size = self.parseargs(ip+2, 1)\r\n return EchoNumCommand(args, size+2)\r\n if cmd == [2,4]:\r\n args, size = self.parseargs(ip+2, 1)\r\n return InputCommand(args, size+2)\r\n if cmd == [5,7]:\r\n args, size = self.parseargs(ip+2, 2)\r\n return JgzCommand(args, size+2)\r\n if cmd == [4,6]:\r\n args, size = self.parseargs(ip+2, 1)\r\n return PointerCommand(args, size+2)\r\n if cmd == [8,3]:\r\n args, size = self.parseargs(ip+2, 1)\r\n return RandomCommand(args, size+2)\r\n if cmd == [8,8]:\r\n return HltCommand([],2)\r\n if cmd == [7,8]:\r\n flag = self.code[ip+2]\r\n args, size = self.parseargs(ip+3, 2)\r\n\r\n if flag == 1:\r\n func = lambda a,b:a+b\r\n elif flag == 2:\r\n func = lambda a,b:a-b\r\n elif flag == 8:\r\n func = lambda a,b:a*b\r\n elif flag == 7:\r\n func = lambda a,b:a//b\r\n else:\r\n func = lambda a,b:a+b\r\n return CalcCommand(args, size+3, func)\r\n\r\n except IndexError:\r\n raise RunException(\"Invalid Operation\")\r\n raise RunException(\"Invalid Command\")\r\n\r\n\r\n\r\n def isend(self, ip):\r\n return len(self.code) <= ip;\r\n\r\n def readint(self, ip):\r\n try:\r\n o = \"\"\r\n while not (o.endswith(\"88\") or o.endswith(\"87\")):\r\n o += str(self.code[ip])\r\n ip+=1\r\n r = int(o[:-2], 8)\r\n if o.endswith(\"87\"):\r\n r = -r;\r\n val = r\r\n except:\r\n raise RunException(\"Invalid Operation\")\r\n return (val, len(o))\r\n \r\n def parseargs(self, ip, count):\r\n args = []\r\n nip = ip\r\n for i in range(count):\r\n arg, size = self.readint(nip)\r\n args.append(arg)\r\n nip += size\r\n return (args, nip-ip)\r\n\r\n\r\n\r\n\r\nclass Command:\r\n def __init__(self, args, size):\r\n self.args = args\r\n self._size = size\r\n\r\n def __repr__(self):\r\n return \"%s(%s)\" % (type(self).__name__, str(self.args))\r\n\r\n def size(self):\r\n return self._size\r\n\r\n def execute(self, mem: Memory):\r\n return 2\r\n\r\n\r\nclass SetCommand(Command):\r\n def execute(self, mem: Memory):\r\n mem[self.args[0]] = self.args[1]\r\n return 1\r\n\r\nclass EchoCharCommand(Command):\r\n def execute(self, mem: Memory):\r\n stdout.write(chr(mem[self.args[0]]))\r\n return 1\r\n\r\nclass EchoNumCommand(Command):\r\n def execute(self, mem: Memory):\r\n stdout.write(str(mem[self.args[0]]))\r\n return 1\r\n\r\nclass InputCommand(Command):\r\n def execute(self, mem: Memory):\r\n buf = stdin.readline()\r\n for i in range(len(buf)):\r\n mem[self.args[0] + i] = ord(buf[i])\r\n return 1\r\n\r\nclass CalcCommand(Command):\r\n def __init__(self, args, size, func=lambda a,b:0):\r\n super().__init__(args,size)\r\n self.func = func\r\n def execute(self, mem: Memory):\r\n mem[self.args[0]] = self.func(mem[self.args[0]], mem[self.args[1]])\r\n return 1\r\n\r\nclass JgzCommand(Command):\r\n def execute(self, mem: Memory):\r\n if mem[self.args[0]] > 0:\r\n mem.setip(self.args[1])\r\n return 1\r\n\r\nclass PointerCommand(Command):\r\n def execute(self, mem: Memory):\r\n mem[self.args[0]] = mem[mem[self.args[0]]]\r\n return 1\r\n\r\nclass RandomCommand(Command):\r\n def execute(self, mem: Memory):\r\n mem[self.args[0]] = random.randint(0,1)\r\n return 1\r\n\r\nclass HltCommand(Command):\r\n def execute(self, mem: Memory):\r\n return 0\r\n\r\n\r\n\r\n\r\nMEM_SIZE = 0xffff\r\n\r\ndef preprocess(raw: str):\r\n output = []\r\n \r\n for c in raw:\r\n if c == '\\n' or c == ' ' or c == '\\t' or c == '\\r':\r\n continue\r\n t = ord(c) - 0x1f638\r\n if t < 0 or t > 8:\r\n return None\r\n output.append(t)\r\n\r\n return output\r\n\r\n\r\ndef run(code: List[int], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stdout):\r\n\r\n set_stdin(stdin)\r\n set_stdout(stdout)\r\n\r\n\r\n mem = Memory(MEM_SIZE)\r\n \r\n mem.setip(0)\r\n pos=0\r\n\r\n cf = CommandFetcher(code)\r\n cmds : List[Command] = []\r\n \r\n try:\r\n while not cf.isend(pos):\r\n cmd = cf.next(pos)\r\n pos += cmd.size()\r\n cmds.append(cmd)\r\n\r\n \r\n while mem.ip() < len(cmds):\r\n cmd = cmds[mem.ip()]\r\n msg = cmd.execute(mem)\r\n mem.nextip()\r\n if msg == 0:\r\n break\r\n except RunException as e:\r\n stderr.write(\"\\n\"+str(e))\r\n return False\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n if len(sys.argv) == 1:\r\n raw = input()\r\n elif len(sys.argv) == 2:\r\n try:\r\n filename = sys.argv[1]\r\n with open(filename, 'rb') as fp:\r\n data = fp.read()\r\n raw = data.decode('utf-8')\r\n except:\r\n print(\"No such file '%s' or unreadable.\" % sys.argv[1])\r\n exit(1)\r\n else:\r\n print(\"Usage: {} [FILENAME]\".format(sys.argv[0]))\r\n\r\n\r\n code = preprocess(raw)\r\n if code == None:\r\n print(\"Invalid Character\")\r\n exit(1)\r\n run(code)\r\n","repo_name":"g0pher98/CTF","sub_path":"2019/holyshield/misc/unicat/unicat.py","file_name":"unicat.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"23944326075","text":"from day15 import Computer\n\n\ndef run_springboard_program(asc):\n inputs = [ord(char) for char in asc]\n with open('day21.txt') as infile:\n program = list(map(int, infile.read().split(',')))\n computer = Computer(program)\n computer.set_input(inputs)\n output = list(computer.run_program())\n try:\n display = ''.join(chr(code) for code in output)\n except ValueError:\n display = ''.join(chr(code) for code in output[:-1]) + str(output[-1])\n return display\n\n\ndef run_with_files(input_file='day21_results/input.txt',\n output_file='day21_results/output.txt'):\n with open(input_file) as infile:\n program = infile.read()\n display = run_springboard_program(program)\n with open(output_file, 'w') as outfile:\n outfile.write(display)\n\n\nif __name__ == '__main__':\n run_with_files()\n","repo_name":"moink/AoC2019","sub_path":"day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39134957827","text":"# 헌내기는 친구가 필요해\nfrom collections import deque\n\nN, M = map(int, input().split())\ngraph = [list(input()) for _ in range(N)]\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\ncnt = 0\nfor i in range(N):\n for j in range(M):\n if graph[i][j] == \"I\":\n start_x = i\n start_y = j\n\n# BFS\nq = deque()\nq.append((start_x, start_y))\nvisited = [[False] * M for _ in range(N)]\nvisited[start_x][start_y] = True\n\nwhile q:\n x, y = q.popleft()\n if graph[x][y] == \"P\":\n cnt += 1\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 주의: 자꾸 여기서 로직 실수해서 헤맴 (graph[nx][ny] == \"O\"로 쓰면 안 됨)\n if 0 <= nx < N and 0 <= ny < M and graph[nx][ny] != \"X\" and not visited[nx][ny]:\n q.append((nx, ny))\n visited[nx][ny] = True\n\nprint(cnt if cnt != 0 else \"TT\")\n\n\"\"\"\n- 난이도: 실버2\n- 분류: BFS\n\"\"\"\n","repo_name":"yg-moon/problem-solving","sub_path":"baekjoon/class/3/3++/21736.py","file_name":"21736.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"3054038850","text":"import os\nimport pathlib\nfrom datetime import datetime\n\nimport pandas as pd\n\nfrom gen_signal import export_signal\nfrom load_data import load_data, adjust_time\n\n\n# Mix two signals together\n\n# Script params\nraw_path = os.path.dirname(os.path.realpath(__file__))\npath = pathlib.PurePosixPath(raw_path)\ndata_a_path = path / 'signals' / 'signal_a.txt'\ndata_b_path = path / 'signals' / 'signal_b.txt'\nexport_path = path / 'signals' / 'signal_c.txt'\n\n\ndef mix_signals(df_a, df_b):\n if len(df_b) > len(df_a):\n df_a, df_b = df_b, df_a\n\n df_comb = pd.concat([df_a, df_b], ignore_index=True, axis=1)\n df_comb = df_comb.fillna(0)\n series_comb = df_comb[0] + df_comb[1]\n df = pd.DataFrame(series_comb)\n df.index.name = 'Time'\n df.rename(columns={0: 'Values'}, inplace=True)\n\n return df\n\ndef run():\n df_a = load_data(data_a_path)\n df_b = load_data(data_a_path)\n current_time = datetime.utcnow()\n adjust_time(current_time, df_a)\n adjust_time(current_time, df_b)\n df = mix_signals(df_a, df_b)\n export_signal(export_path, df)\n\nif __name__ == '__main__':\n run()","repo_name":"fstakem/pywell","sub_path":"scripts/gen_data/mix_signals.py","file_name":"mix_signals.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26469324019","text":"from django.urls import path\n\nfrom .views import IndexPageView, SupportBetterplacePageView, BlogNewsPageView, OurTeamPageView, JobsPageView, PressPageView, CategoryPageView, LocationPageView, DonationOverviewPageView, DonationReceiptPageView, RecuringDonationsPageView, GallaryPageView, CeremonialActivitiesPageView, EmailAddressPageView \nurlpatterns= [path('', IndexPageView.as_view(), name='home'),\n path('about/support-betterplace', SupportBetterplacePageView.as_view(), name='support-betterplace'),\n path('about/blog-news', BlogNewsPageView.as_view(), name='blog-news'),\n path('about/our-team', OurTeamPageView.as_view(), name='our-team'),\n path('about/jobs', JobsPageView.as_view(), name='jobs'),\n path('about/press', PressPageView.as_view(), name='press'),\n path('discover/category', CategoryPageView.as_view(), name='category'),\n path('discover/location', LocationPageView.as_view(), name='location'), \n path('donations/donation-overview', DonationOverviewPageView.as_view(), name='donation-overview'),\n path('donations/donation-receipt', DonationReceiptPageView.as_view(), name='donation-receipt'),\n path('donations/recuring-donations', RecuringDonationsPageView.as_view(), name='recuring-donations'),\n path('events/gallary', GallaryPageView.as_view(), name='gallary'),\n path('events/ceremonial-activities', CeremonialActivitiesPageView.as_view(), name='ceremonial-activities'),\n path('login/email-address', EmailAddressPageView.as_view(), name='email-address'), \n \n ]\n","repo_name":"InnocentGreen/Betterplace_project","sub_path":"Betterplace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1646455937","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport requests\r\n\r\ndef format_resposta(clima): \t\r\n nome = clima['name']\r\n tempo = clima['weather'][0]['main']\r\n descricao = clima['weather'][0]['description']\r\n temperatura = clima['main']['temp']\r\n\r\n resposta = 'Cidade: %s \\nCondições: %s \\nDescrição: %s \\n Temperatura: %sºC' % (nome, tempo, descricao, temperatura)\r\n\r\n return resposta\r\n\r\ndef get_clima(cidade):\r\n clima_chave = '086e0a60948eae45a16937731465ffe5'\r\n url = 'http://api.openweathermap.org/data/2.5/weather'\r\n parametros = {'APPID' : clima_chave, 'q': cidade , 'units': 'metric'}\r\n resposta = requests.get(url, params = parametros)\r\n clima = resposta.json()\r\n\r\n label_info['text'] = format_resposta(clima)\r\n\r\ndef sair():\r\n janela_main.destroy()\r\n\r\njanela_main = Tk()\r\njanela_main.geometry(\"450x550\")\r\njanela_main.title(\"Clima e Tempo\")\r\n\r\nimg_back = PhotoImage(file = 'landscape.png')\r\n\r\nbackground = Label(janela_main, image = img_back)\r\nbackground.place(relwidth = '1', relheight = '1')\r\n\r\nmarca = Label(background, text='Software desenvolvido por:\\nLucas Eduardo Oliveira Rosa'\r\n\t\t\t\t,bg ='#2E8B57', fg = '#000000', font = 'Garamond 8')\r\nmarca.place(relx = '0.68', rely = '0.93')\r\n\r\nframe_geral = Frame(janela_main, bg = '#87CEFA')\r\nframe_geral.place(relwidth = '0.8', relheight = '0.8',relx = '0.1', rely = '0.1')\r\n\r\nlabel_titulo = Label(background, text = 'Clima e Tempo', font = 'Garamond 30',\r\n\t\t\t\t\tfg = '#FFFAFA', bg = '#87CEEB')\r\nlabel_titulo.place(rely = '0.01', relx = '0.23')\r\n\r\nframe_cidade = Label(frame_geral, bg = '#F5FFFA')\r\nframe_cidade.place(relwidth = '0.95', relheight = '0.1',relx = '0.025', rely = '0.1')\r\n\r\nlabel_cidade = Label(frame_cidade, text = 'Digite a cidade', fg = '#000000', bg = '#F5FFFA', font = 'Garamond')\r\nlabel_cidade.place(relx = '0.08', rely= '0.15')\r\n\r\nlabel_info = Label(frame_geral, fg = '#000000', bg = '#F5FFFA', font = 'Garamond 20')\r\nlabel_info.place(relheight ='0.7', relwidth = '0.95', relx = '0.025', rely = '0.25')\r\n\r\nentrada_cidade = Entry(frame_cidade, font = 'Garamond')\r\nentrada_cidade.place(relx = '0.5', rely = '0.10', relheight = '0.8', relwidth = '0.3')\r\n\r\nbt_ok = Button(frame_cidade, text = 'OK', command = lambda: get_clima(entrada_cidade.get()), font = 'Garamond')\r\nbt_ok.place(relx = '0.83', rely = '0.10', relheight = '0.8', relwidth = '0.15')\r\n\r\nbt_exit = Button(background, text = 'SAIR', command = sair, font = 'Garamond')\r\nbt_exit.place(relx = '0.35', rely = '0.915', relheight = '0.08', relwidth = '0.3')\r\n\r\njanela_main.mainloop()\r\n\r\n","repo_name":"eusoulucas/Python_diverso","sub_path":"TKINTER/api_clima.py","file_name":"api_clima.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33739398714","text":"import numpy as np\n\nfrom scipy.stats import gmean\nfrom sklearn.model_selection import StratifiedKFold\nfrom pcalibration.ivap import InductiveVennAbers\n\n\nclass CrossVennAbers:\n \"\"\"\n Cross Venn Abers Predictor (CVAP) algorithm\n \n Ref: https://arxiv.org/abs/1511.00213\n \"\"\"\n \n def __init__(self, k: int = 5):\n self.k = k\n self.calibrators = []\n \n def fit(self, X: np.ndarray, y: np.ndarray):\n \"\"\"\n Fit the model\n \n Args:\n X: input data\n y: target data\n \"\"\"\n\n kf = StratifiedKFold(n_splits=self.k)\n for train_index, _ in kf.split(X, y):\n X_train, y_train = X[train_index], y[train_index]\n \n ivap = InductiveVennAbers()\n ivap.fit(X_train, y_train)\n \n self.calibrators.append(ivap)\n \n def predict_proba(self, X: np.ndarray, info: bool = False) -> np.ndarray:\n \"\"\"\n Predict the calibrated probability\n \n Args:\n X: predicted scores\n info: whether to return additional information\n \n Returns:\n calibrated probability\n \"\"\"\n \n P0 = []\n P1 = []\n for k, calibrator in enumerate(self.calibrators):\n prob, i = calibrator.predict_proba(X, info=True)\n P0K = i[\"P0\"]\n P1K = i[\"P1\"]\n \n P0.append(P0K)\n P1.append(P1K)\n \n P0 = np.concatenate(P0, axis=1)\n P1 = np.concatenate(P1, axis=1)\n \n prob1 = gmean(P1, axis=1) / (gmean(1 - P0, axis=1) + gmean(P1, axis=1))\n prob0 = 1 - prob1\n prob = np.stack((prob0, prob1), axis=1)\n \n if not info:\n return prob\n \n return prob, dict(P0=P0, P1=P1)\n ","repo_name":"nutorbit/probability-calibration","sub_path":"pcalibration/cvap.py","file_name":"cvap.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11115417482","text":"# https://www.acmicpc.net/problem/1368\nfrom sys import stdin\nfrom heapq import heappush, heappop\nfrom collections import defaultdict\ninput = stdin.readline\n\n\ngraph = defaultdict(list)\nN = int(input())\nvis = [False] * (N+1)\n\nfor i in range(N):\n w = int(input())\n graph[N].append((i, w))\n graph[i].append((N, w))\n\nt = [list(map(int, input().split())) for _ in range(N)]\n\nfor i in range(N-1):\n for j in range(i+1, N):\n graph[i].append((j, t[i][j]))\n graph[j].append((i, t[i][j]))\n\nheap = [(0, N)]\nans = 0\nwhile heap:\n weight, u = heappop(heap)\n\n if vis[u]:\n continue\n\n vis[u] = True\n ans += weight\n\n for v, w in graph[u]:\n if not vis[v]:\n heappush(heap, (w, v))\n\nprint(ans)\n","repo_name":"dragonappear/problem-solving","sub_path":"mst/BOJ_물대기(프림).py","file_name":"BOJ_물대기(프림).py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"44033501625","text":"import sys\r\nimport math\r\n\r\nmin, max = map(int ,sys.stdin.readline().split())\r\n\r\ndata = [1] * (max - min + 1)\r\n\r\nfor i in range(2, (int(math.sqrt(max)) + 1)):\r\n \r\n num = i ** 2\r\n if min % num == 0: \r\n start = 0\r\n else :\r\n start = (num - (min % num)) # 4 -\r\n \r\n \r\n for j in range(start, len(data), num): \r\n data[j] = 0\r\n\r\n# print(data)\r\ncount = 0\r\nfor item in data :\r\n if item == 1 :\r\n count += 1\r\nprint(count)\r\n\r\n \r\n\r\n\r\n","repo_name":"KoEunseong/StudyAlgorithm","sub_path":"백준/Gold/1016. 제곱 ㄴㄴ 수/제곱 ㄴㄴ 수.py","file_name":"제곱 ㄴㄴ 수.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19414484286","text":"from pathlib import PurePath\n\ndef part1(l: list[int]) -> int:\n counts = {}\n lowest, highest = float('inf'), float('-inf')\n for num in l:\n counts[num] = counts.get(num, 0) + 1\n lowest = min(lowest, num)\n highest = max(highest, num)\n best = -1\n best_cost = float('inf')\n for pos in range(lowest, highest + 1):\n # calculate cost given position\n cost = sum(abs(k - pos) * v for k,v in counts.items())\n if cost < best_cost:\n best_cost = cost\n best = pos\n return sum(abs(k - best) * v for k,v in counts.items())\n\n\ndef part2(l: list[int]) -> int:\n counts = {}\n lowest, highest = float('inf'), float('-inf')\n for num in l:\n counts[num] = counts.get(num, 0) + 1\n lowest = min(lowest, num)\n highest = max(highest, num)\n best = -1\n best_cost = float('inf')\n cache = {}\n for pos in range(lowest, highest + 1):\n # calculate cost given position\n cost = 0\n for k,v in counts.items():\n diff = abs(k - pos)\n if diff in cache:\n single_cost = cache[diff]\n else:\n single_cost = sum(range(diff + 1))\n cache[diff] = single_cost\n cost += single_cost * v\n\n if cost < best_cost:\n best_cost = cost\n best = pos\n\n return best_cost\n\n\ndef test() -> None:\n l = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]\n assert part1(l) == 37\n assert part2(l) == 168\n\nif __name__ == '__main__':\n with open(f'./data/input-{PurePath(__file__).stem}.txt', 'r') as f:\n l = list(map(int, f.read().split(',')))\n print(\"Part 1:\", part1(l))\n print(\"Part 2:\", part2(l))\n","repo_name":"nhtsai/advent-of-code","sub_path":"2021/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39747075247","text":"import cv2\nimport os\nimport subprocess\n\nvidcap = cv2.VideoCapture('BlackManInAWhiteWorld.mp4')\nsuccess,image = vidcap.read()\ncount = 0\nos.makedirs(\"video_frames\")\n\ncommand = \"ffmpeg -i BlackManInAWhiteWorld.mp4 -vn -acodec copy audio.aac\"\nsubprocess.call(command, shell=True)\n\nwhile success:\n\tcv2.imshow('ImageWindow',image)\n\tcv2.imwrite(\"video_frames/frame\" + str(count) + \".jpg\", image) # save frame as JPEG file\n\tsuccess,image = vidcap.read()\n\tcount += 1\n\nfps = vidcap.get(cv2.CAP_PROP_FPS)\ncommand = \"ffmpeg -r \" + str(fps) + \" -i video_frames/frame%d.jpg -i audio.aac -y combined_ouput.mp4\"\nsubprocess.call(command, shell=True)\n","repo_name":"ebeets96/CS534-Project","sub_path":"video_processing.py","file_name":"video_processing.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30903560436","text":"#!/usr/bin/python3\n\nimport sys, re\nimport base40, c64text\n\nclass ArgExc(Exception):\n pass\n\nLINE_LENGTH = 32\nOFFSET_WIDTH = 6\n\nclass Diff:\n\n def __init__(self, lft_file, rgt_file, **kwargs):\n self.lft_file = lft_file\n self.rgt_file = rgt_file\n self.line_length = kwargs.get(\"line_length\", LINE_LENGTH)\n self.offset_width = kwargs.get(\"offset_width\", OFFSET_WIDTH)\n self.differences = bytearray(self.line_length)\n self.lft_line = bytearray(self.line_length)\n self.rgt_line = bytearray(self.line_length)\n self.print_c64 = kwargs.get(\"print_c64\", None)\n self.print_base40 = kwargs.get(\"print_base40\", None)\n self.print_base40_odd = kwargs.get(\"print_base40_odd\", None)\n self.print_ascii = kwargs.get(\"print_ascii\", None)\n\n def diff(self, diff_file):\n line_number = 1\n line_offset = 0\n prev_different = False\n diff_connector = \".\" * self.offset_width\n while True:\n lft_len = self.lft_file.readinto(self.lft_line)\n rgt_len = self.rgt_file.readinto(self.rgt_line)\n if lft_len == 0 and rgt_len == 0:\n break\n different = self._find_differences(lft_len, rgt_len)\n if different:\n if prev_different:\n print(diff_connector, file=diff_file)\n else:\n print(file=diff_file)\n self._print_differences(diff_file, line_offset, lft_len, rgt_len)\n prev_different = different\n line_number += 1\n line_offset += self.line_length\n\n def _find_differences(self, lft_len, rgt_len):\n min_len = min((lft_len, rgt_len))\n max_len = max((lft_len, rgt_len))\n assert min_len > 0\n assert max_len <= self.line_length\n assert min_len <= max_len\n different = False\n for i in range(min_len):\n lb = self.lft_line[i]\n rb = self.rgt_line[i]\n if lb != rb:\n different = True\n self.differences[i] = 1\n else:\n self.differences[i] = 0\n if min_len < max_len:\n different = True\n for i in range(min_len, max_len):\n self.differences[i] = 1\n if max_len < self.line_length:\n for i in range(max_len, self.line_length):\n self.differences[i] = 0\n return different\n\n def _print_differences(\n self,\n f,\n line_offset: int,\n lft_len: int,\n rgt_len: int\n ):\n self._print_line(f, line_offset, lft_len, self.lft_line)\n self._print_line(f, line_offset, rgt_len, self.rgt_line)\n\n def _print_line(\n self,\n f,\n line_offset: int,\n line_len: int,\n line: bytearray\n ):\n f.write((\"%%0%dX:\" % self.offset_width) % line_offset)\n\n for i in range(self.line_length):\n\n sep = \" \"\n if self.differences[i] == 1:\n if i == 0 or self.differences[i-1] == 0:\n sep = \"(\"\n else:\n if i > 0 and self.differences[i-1] == 1:\n sep = \")\"\n\n if sep == \")\":\n f.write(sep)\n if i % 8 == 0:\n f.write(\" \")\n if i % 16 == 0:\n f.write(\" \")\n if sep != \")\":\n f.write(sep)\n\n if i < line_len:\n f.write(\"%02X\" % line[i])\n else:\n f.write(\"--\")\n\n if self.differences[self.line_length-1] == 1:\n f.write(\")\")\n else:\n f.write(\" \")\n\n if self.print_c64:\n f.write(\" \")\n f.write(c64text.decode(line[:line_len]))\n\n if self.print_base40:\n f.write(\" \")\n f.write(base40.base40_decode(line[:line_len]))\n\n if self.print_base40_odd:\n # TODO: Carry over last byte of previous line\n # so we can render the first character here.\n f.write(\" \")\n f.write(base40.base40_decode(line[1:line_len-1]))\n\n if self.print_ascii:\n f.write(\" \")\n f.write(\"\".join([ chr(c&0x7F) if 0x20 <= (c&0x7F) < 0x7F else \"~\" for c in line[:line_len] ]))\n\n print(file=f) # end of the line\n\ndef parse_args(argv):\n filenames = {}\n opts = {}\n args = []\n for a in argv:\n if a.startswith(\"-\"):\n if a == \"-a\":\n opts[\"print_ascii\"] = True\n elif a == \"-c\":\n opts[\"print_c64\"] = True\n elif a == \"-b\":\n opts[\"print_base40\"] = True\n elif a == \"-o\":\n opts[\"print_base40_odd\"] = True\n else:\n raise ArgExc(\"Invalid switch: \" + a)\n else:\n args.append(a)\n if len(args) < 2:\n raise ArgExc(\"Too few arguments: \" + str(argv))\n if len(args) > 3:\n raise ArgExc(\"Too many arguments: \" + str(argv))\n filenames[\"lft_filename\"] = args[0]\n filenames[\"rgt_filename\"] = args[1]\n if len(args) == 3:\n filenames[\"diff_filename\"] = args[2]\n else:\n filenames[\"diff_filename\"] = \"-\"\n return filenames, opts\n\ndef main():\n filenames, opts = parse_args(sys.argv[1:])\n lft_filename = filenames[\"lft_filename\"]\n rgt_filename = filenames[\"rgt_filename\"]\n diff_filename = filenames[\"diff_filename\"]\n with open(lft_filename, \"rb\") as fl:\n with open(rgt_filename, \"rb\") as fr:\n with (sys.stdout if diff_filename == \"-\" else open(diff_filename, \"w\", encoding=\"ascii\")) as fd:\n d = Diff(fl, fr, **opts)\n d.diff(fd)\n\nif __name__ == \"__main__\":\n try:\n main()\n except ArgExc as e:\n print(e, file=sys.stderr)\n\n","repo_name":"nculwell/retrotools","sub_path":"c64/py/ddiff.py","file_name":"ddiff.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9639581181","text":"print('Training zone calculator')\r\n\r\nwhile True:\r\n age = int(input('Enter your age.\\r\\n> '))\r\n if age > 0:\r\n break\r\n else:\r\n print('Don\\'t be silly, your age isn\\'t negative.')\r\n\r\nwhile True:\r\n rate = int(input('Enter your training heart rate (in BPM).\\r\\n> '))\r\n if rate > 0:\r\n break\r\n else:\r\n print('Don\\'t be silly, your training heart rate isn\\'t negative.')\r\n\r\nmax_hr = 208 - (age / 0.7)\r\n\r\nif rate >= 0.9 * max_hr:\r\n zone = 'interval training'\r\nelif rate >= 0.7 * max_hr:\r\n zone = 'threshold training'\r\nelif rate >= 0.5 * max_hr:\r\n zone = 'aerobic training'\r\nelse:\r\n zone = 'couch potato'\r\n\r\nprint('Your training zone:', zone)\r\n","repo_name":"markspolakovs/SOF1","sub_path":"SOF1P1/practical2ex5.py","file_name":"practical2ex5.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21916846032","text":"# Uses python3\nimport sys\n\ndef optimal_weight(W, w):\n dp_result = [[0 for x in range(W + 1)] for y in range(n + 1)]\n\n for i in range(1, n+1):\n for weight in range(1, W+1):\n dp_result[i][weight] = dp_result[i-1][weight]\n if w[i-1] <= weight:\n val = dp_result[i-1][weight - w[i-1]] + w[i-1]\n if val > dp_result[i][weight]:\n dp_result[i][weight] = val\n\n return dp_result[n][W]\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n W, n, *w = list(map(int, input.split()))\n print(optimal_weight(W, w))\n","repo_name":"WeiChienHsu/Coursera_DS_Algorithm_HW","sub_path":"Algorithmic_ToolBox/Week 5/knapsack/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35694036175","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 6 12:26:38 2021\r\n\r\n@author: priceal\r\n\"\"\"\r\n\r\n# define number of reporting macrocycles and epochs per reporting cycle\r\nMacroCycles = 2\r\nInnerCycles = 2\r\n\r\n# optimization parameters\r\nlearning_rate = 0.01\r\noptimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)\r\n\r\n# learning epochs\r\nfor tt in range(MacroCycles):\r\n for t in range(InnerCycles):\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred,y)\r\n \r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n print(tt*InnerCycles,loss.item())\r\n\r\n","repo_name":"priceal/pytorch","sub_path":"trainNN1.py","file_name":"trainNN1.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30404274917","text":"import os\nimport json\nimport state\nfrom initView import InitView\n\nstate = state.State()\n\nAPP_PATH = os.getcwd()\nstate.app_path = APP_PATH\n\nROLES_JSON_PATH = os.path.join(APP_PATH, 'roles.json')\nstate.roles_json_path = ROLES_JSON_PATH\n\nif not os.path.isfile(ROLES_JSON_PATH):\n roles = {'1': 'Presidente', '2': 'Médico', '3': 'Técnico', '4': 'Preparadores Físicos', '5': 'Motoristas',\n '6': 'Cozinheiros', '7': 'Advogados', '8': 'Jogador'}\n\n with open(state.roles_json_path, 'w') as file:\n json.dump(roles, file)\n\n\n# Definição do caminho e criação do arquivo para registrar os funcionários\nEMPLOYEES_JSON_PATH = os.path.join(APP_PATH, 'employees.json')\nstate.employees_json_path = EMPLOYEES_JSON_PATH\n\nif not os.path.isfile(EMPLOYEES_JSON_PATH):\n employees_data = {\n 'presidente': [],\n 'medicos': [],\n 'tecnicos': [],\n 'preparadores': [],\n 'motoristas': [],\n 'cozinheiros': [],\n 'advogados': [],\n 'jogadores': [],\n }\n\n with open(EMPLOYEES_JSON_PATH, 'w') as file:\n json.dump(employees_data, file)\n\n# Definição do caminho e criação do arquivo para registrar os sócios\nFAN_PARTNER_JSON_PATH = os.path.join(APP_PATH, 'fan_partners.json')\nstate.fan_partners_json_path = FAN_PARTNER_JSON_PATH\n\nif not os.path.isfile(FAN_PARTNER_JSON_PATH):\n fan_partners_data = {\n 'junior': [],\n 'senior': [],\n 'elite': []\n }\n\n with open(FAN_PARTNER_JSON_PATH, 'w') as file:\n json.dump(fan_partners_data, file)\n\n# Definição do caminho e criação do arquivo para registrar os recursos\nRESOURCES_JSON_PATH = os.path.join(APP_PATH, 'resources.json')\nstate.resources_json_path = RESOURCES_JSON_PATH\n\nif not os.path.isfile(RESOURCES_JSON_PATH):\n resources_data = {\n 'stadium': [],\n 'bus': [],\n 'trainingCenter': []\n }\n\n with open(RESOURCES_JSON_PATH, 'w') as file:\n json.dump(resources_data, file)\n\nview = InitView(state)\nstate.running = True\nstate.view = view\n\nwhile state.running:\n option = state.prompt()\n state.run(option)","repo_name":"thalyssa/iSoccer-2019.2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70617808463","text":"import tkinter\n\ndef add_student():\n raise Exception(\"add_student\")\n\ndef create_event():\n raise Exception(\"create_event\")\n\ndef view_events():\n raise Exception(\"view_events\")\n\ndef view_students():\n raise Exception(\"view_students\")\n\nhome_window = tkinter.Tk()\n\n\n# -------------------\n# Main menu label\n# -------------------\nmain_menu_label = tkinter.Label(\n home_window,\n text=\"Main menu\",\n font=(\"Comic Sans MS\", 20, \"bold\")\n)\nmain_menu_label.grid(row=0, column=0)\n\n\n# ----------------\n# Button frame\n# ----------------\nbutton_frame = tkinter.Frame(home_window)\nbutton_height = 2\nbutton_width = 20\n\n\n# ----------------------\n# Add student button\n# ----------------------\nadd_student_button = tkinter.Button(\n button_frame,\n text=\"Add a student\",\n command=add_student,\n height=button_height,\n width=button_width,\n font=(\"comic sans\", 14)\n)\nadd_student_button.grid(row=0, column=0, sticky=\"nsew\")\n\n\n# -----------------------\n# Create event button\n# -----------------------\ncreate_event_button = tkinter.Button(\n button_frame,\n text=\"Create an event\",\n command=create_event,\n height=button_height,\n width=button_width,\n font=(\"comic sans\", 14)\n)\ncreate_event_button.grid(row=1, column=0, sticky=\"nsew\")\n\n\n# ----------------------\n# View events button\n# ----------------------\nview_events_button = tkinter.Button(\n button_frame,\n text=\"View events\",\n command=view_events,\n height=button_height,\n width=button_width,\n font=(\"comic sans\", 14)\n)\nview_events_button.grid(row=1, column=1, sticky=\"nsew\")\n\n\n# ------------------------\n# View students button\n# ------------------------\nview_students_button = tkinter.Button(\n button_frame,\n text=\"View students\",\n command=view_students,\n height=button_height,\n width=button_width,\n font=(\"comic sans\", 14)\n)\nview_students_button.grid(row=0, column=1, sticky=\"nsew\")\n\n\n# -------------------\n# Start main loop\n# -------------------\nbutton_frame.grid(row=1, column=0)\nhome_window.mainloop()","repo_name":"CommanderKV/stage-crew-management-system","sub_path":"home_window.py","file_name":"home_window.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28664727317","text":"import sys\nimport math\nimport time\nimport struct\nimport serial\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport utils\nfrom pid_controller import PIDController\nfrom arduino_controller import ArduinoController\n\nPORT = \"/dev/cu.usbmodem14101\"\nDEVICE = 0\n\n\ndef main():\n controller = ArduinoController(PORT)\n PID = PIDController(P=1.3, I=0.004, D=7)\n fsr_target = int(sys.argv[1])\n\n i = 0\n while i < 100:\n run_pid(controller, PID, fsr_target)\n print(i)\n i = i + 1\n if i >= 50:\n controller.run_motor(DEVICE, 1, 0)\n\n\ndef run_pid(controller, PID, fsr_target):\n PID.set_target(fsr_target)\n # while True:\n fsr = controller.read_fsr(DEVICE)\n print(\"FSR:\" + str(fsr))\n\n pid_output = PID.step(fsr)\n speed = utils.calculate_speed(pid_output)\n print(\"PID:\" + str(pid_output))\n\n error_margin = 5\n if pid_output < error_margin and pid_output > -(error_margin):\n speed = 0\n controller.run_motor(DEVICE, 1, speed)\n elif pid_output < -error_margin:\n controller.run_motor(DEVICE, 1, speed)\n else:\n controller.run_motor(DEVICE, 0, speed)\n\n time.sleep(0.01)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"humancomputerintegration/alter-softness","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"2532983162","text":"import mdtraj as md\nimport numpy as np\nimport sys\n\ndef remove_backbone(filename):\n mol = md.load(filename)\n correct_indices = np.array(list(mol.topology.select('not name NL and not name CA and not name CLP and not name OL and not name HA1 and not name HA2 and not name HA')))\n table, bonds = mol.topology.to_dataframe()\n onswitch = np.zeros(len(table), dtype=np.bool)\n onswitch[correct_indices] = 1\n new_table = table[onswitch].reset_index()\n new_table['index'] = np.arange(len(new_table))\n new_table['serial'] = 1 + np.arange(len(new_table))\n new_top = md.Topology.from_dataframe(new_table)\n new_traj = md.Trajectory(mol.xyz[0, onswitch], new_top)\n new_traj.save_pdb(filename)\n \nremove_backbone(str(sys.argv[1]))\n","repo_name":"UWPRG/mftoid-rev-residues","sub_path":"structure_maker/helper_code/nobackbone.py","file_name":"nobackbone.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72604002702","text":"import sys, cv2\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nclass Video(QMainWindow):\n\n def __init__(self, cam):\n\n super().__init__()\n\n self.cam = cam\n self.width = self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n self.setGeometry(20, 20, self.width+10, self.height+10)\n self.setWindowTitle('Webcam')\n\n self.cam_frame = QLabel()\n self.setCentralWidget(self.cam_frame)\n self.cam_frame.setAlignment(Qt.AlignCenter)\n\n self._timer = QTimer(self)\n self._timer.timeout.connect(self.play)\n self._timer.start(25)\n\n def play(self):\n ret, frame = self.cam.read()\n # print(QImage.Format_RGB888)\n if ret:\n self.cam_frame.setPixmap(QPixmap.fromImage( \\\n QImage(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), self.width, self.height, QImage.Format_RGB888)))\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n win = Video(cv2.VideoCapture(1))\n win.show()\n sys.exit(app.exec_())\n","repo_name":"hangcui1201/PERCEPTION_CAMERA","sub_path":"test/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"939150032","text":"\nimport zipfile\nimport sys\nimport util\nfrom tqdm import tqdm, trange\nimport os\nimport numpy as np\nimport math\nimport timeit\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom skimage.measure import block_reduce\nimport pickle\nimport wandb\n\nNUM_CLASSES = 8\n\n\ndef extract_zips():\n \n directory_to_extract_to = '/home/Surya/images/Images_png'\n \n for i in tqdm.trange(1, 57):\n \n path_to_zip_file = '/home/Surya/images/Images_png_%02d.zip' %i\n \n print('extracting', path_to_zip_file)\n\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n print('done extracting', path_to_zip_file)\n\n\ndef pre_process(fs = [0.01 for _ in range(1, 9)], downsample = True, save = True, load = True):\n if load: \n\n X_train = pickle.load( open( \"X_train.p\", \"rb\" ) ) \n print('loaded X_Train')\n\n Y_train = pickle.load( open( \"Y_train.p\", \"rb\" ) ) \n print('loaded Y_train')\n\n X_val = pickle.load( open( \"X_val.p\", \"rb\" ) ) \n print('loaded X_val')\n\n Y_val = pickle.load( open( \"Y_val.p\", \"rb\" ) )\n print('loaded Y_val')\n\n X_test = pickle.load( open( \"X_test.p\", \"rb\" ) )\n print('loaded X_test')\n\n Y_test = pickle.load( open( \"Y_test.p\", \"rb\" ) )\n print('loaded Y_test')\n\n return X_train, Y_train, X_val, Y_val, X_test, Y_test\n \n dl_info = util.read_dl_info() \n\n json_labels = util.read_json_labels('/home/Surya/cs230/text_mined_labels_171_and_split.json') \n\n dl_info_vector = util.read_dl_info_vector(\n image_dir = '../images/Images_png/',\n DL_INFO_PATH = '/home/Surya/cs230/') \n\n data = {}\n\n X_train = np.empty((0, 512, 512, 3), float)\n Y_train = np.empty((0, 1), float)\n X_val = np.empty((0, 512, 512, 3), float)\n Y_val = np.empty((0, 1), float)\n X_test = np.empty((0, 512, 512, 3), float)\n Y_test = np.empty((0, 1), float)\n \n Y_train_bb = np.empty((0, 4), float)\n\n downsample_factor = 2\n \n if downsample:\n X_train = block_reduce(X_train, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n X_val = block_reduce(X_val, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n X_test = block_reduce(X_test, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n\n # Use these fs to preserve same number of images across\n \n for i in range(1, 9):\n x_train,y_train_bb,x_val,y_val_bb,x_test,y_test_bb = util.data_load(dl_info, dl_info_vector, json_labels, i, f=fs[i-1])\n \n #Convert to float 32\n x_train, x_val, x_test = [np.float32(a) for a in [x_train, x_val, x_test]] \n\n #Balance the classes\n # x_train = x_train[:200, :, :, :]\n # x_val = x_val[:20, :, :, :]\n # x_test = x_test[:20, :, :, :]\n\n # y_train = y_train[:200, :]\n # y_val = y_val[:20, :]\n # y_test = y_test[:20, :]\n\n #Downsample\n if downsample :\n x_train = block_reduce(x_train, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n x_val = block_reduce(x_val, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n x_test = block_reduce(x_test, block_size=(1, downsample_factor, downsample_factor, 1), func=np.mean)\n\n y_train = (i-1) * np.ones((len(x_train), 1)) #classes are 1 indexed\n y_val = (i-1) * np.ones((len(x_val), 1)) #classes are 1 indexed\n y_test = (i-1) * np.ones((len(x_test), 1)) #classes are 1 indexed\n\n X_train = np.append(X_train, x_train, axis=0)\n Y_train = np.append(Y_train, y_train, axis=0)\n X_val = np.append(X_val, x_val, axis=0)\n Y_val = np.append(Y_val, y_val, axis=0)\n X_test = np.append(X_test, x_test, axis=0)\n Y_test = np.append(Y_test, y_test, axis=0)\n\n Y_train_bb = np.append(Y_train_bb, y_train_bb, axis=0)\n # print(Y_test.shape)\n\n shuffled_train_indices = list(range(len(X_train)))\n np.random.shuffle(shuffled_train_indices)\n \n X_train = np.squeeze(X_train[shuffled_train_indices] ) \n Y_train = np.squeeze(Y_train[shuffled_train_indices] ) \n Y_train_bb = np.squeeze(Y_train_bb[shuffled_train_indices] ) \n\n shuffled_val_indices = list(range(len(X_val)))\n np.random.shuffle(shuffled_val_indices)\n \n X_val = np.squeeze(X_val[shuffled_val_indices] ) \n Y_val = np.squeeze(Y_val[shuffled_val_indices] ) \n\n # print(Y_val)\n\n shuffled_test_indices = list(range(len(X_test)))\n\n # print('before shuffling', shuffled_test_indices)\n\n np.random.shuffle(shuffled_test_indices)\n\n # print('after shuffling', shuffled_test_indices)\n \n # print('Y_test shape', Y_test[shuffled_test_indices].shape)\n\n # print(Y_test[shuffled_test_indices])\n\n X_test = np.squeeze(X_test[shuffled_test_indices] ) \n Y_test = np.squeeze(Y_test[shuffled_test_indices] ) \n\n \n # print('size of sample image', sys.getsizeof(x))\n \n X_train, Y_train, X_val, Y_val, X_test, Y_test = [np.float32(a) for a in [X_train, Y_train, X_val, Y_val, X_test, Y_test]] \n \n if save:\n pickle.dump( X_train, open( \"X_train.p\", \"wb\" ) , protocol=4)\n pickle.dump( Y_train, open( \"Y_train.p\", \"wb\" ) , protocol=4)\n pickle.dump( X_val, open( \"X_val.p\", \"wb\" ) , protocol=4)\n pickle.dump( Y_val, open( \"Y_val.p\", \"wb\" ) , protocol=4)\n pickle.dump( X_test, open( \"X_test.p\", \"wb\" ) , protocol=4)\n pickle.dump( Y_test, open( \"Y_test.p\", \"wb\" ) , protocol=4)\n pickle.dump( Y_train_bb, open( \"Y_train_bb.p\", \"wb\" ) , protocol=4)\n \n return X_train, Y_train, X_val, Y_val, X_test, Y_test\n\ndef sample_from(x, y, k, classes, frac_data = 1):\n\n #returns n samples of x, y of class = class_num\n # X : N, D, D, 3, \n # Y : N\n \n n_classes = len(classes)\n kn = k * n_classes\n \n D = x.shape[1]\n\n X = np.empty((0, D, D, 3), np.float32)\n Y = np.empty((0, NUM_CLASSES), np.float32)\n\n labels = np.eye(NUM_CLASSES)\n counter = 0\n \n print('in sample from')\n print('classes sampling: ', classes)\n \n for class_num in tqdm(classes):\n\n # print(X.shape)\n\n x_class = np.float32(x[y == class_num])\n \n indices = list(range(len(x_class)))\n np.random.shuffle(indices)\n \n # Error handling when you try to sample more images than you have.\n k_ = int(min(k, len(indices)) * frac_data)\n \n print('adding ', k_, 'images of class', class_num)\n\n selected_samples = indices[:k_]\n\n #testing overfitting\n # selected_samples = [0 for _ in range(k)]\n #\n\n X = np.append(X, x_class[selected_samples], axis = 0)\n Y = np.append(Y, np.tile(labels[class_num], (k_ , 1)), axis = 0)\n\n # print('labels[counter]', labels[counter])\n\n \"\"\"\n pickle.dump(X, open( \"X_\" + str(class_num).p\", \"wb\" ) , protocol=4)\n \"\"\"\n\n print('sampled data , sample sizes are ', X.shape, Y.shape )\n\n\n return X, Y\n\ndef shuffle(x, y):\n shuffled_train_indices = list(range(len(x)))\n np.random.shuffle(shuffled_train_indices)\n \n x = np.squeeze(x[shuffled_train_indices] ) \n y = np.squeeze(y[shuffled_train_indices] ) \n\n return x, y\n\nif __name__ == '__main__':\n \n five_hundred_images = [0.10539629, 0.175315568, 0.12025012, 0.192233756, 0.201288245, 0.151975684, 0.257201646, 0.330033003]\n \n pre_process(fs = [0.75 * i for i in five_hundred_images], downsample = False, save = True, load = False)\n \n pass","repo_name":"surya-narayanan/Meta-Learning-Deep-Leision","sub_path":"pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"34772899405","text":"from django.urls import path, include\nfrom django.views.generic import TemplateView\nfrom handelbyadmin import views as hv\n\n\nurlpatterns = [\n path('faq/', hv.faq_view, name=\"faq-view\"),\n path('faq/add-faq/', hv.add_faq_view, name=\"add-faq\"),\n path('notification/push-notification/', hv.push_notification_view, name='push-notification'),\n path('our-story/', hv.our_story_view, name='our-story'),\n path('contact-info/', hv.contact_view, name='contact-info'),\n path('report_details/', hv.report_details_view, name='report-details'),\n path('notifications/', hv.all_notification_view, name='all-notification'),\n path('terms-of-use/', hv.termsofuse, name='terms-of-use'),\n # path('', TemplateView.as_view(template_name='help.html'), name='help-section'),\n]","repo_name":"na5imuzzaman/booi_poka_production","sub_path":"handelbyadmin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"328985548","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCBV Utility functions\n\n.. codeauthor:: Mikkel N. Lund \n.. codeauthor:: Rasmus Handberg \n\"\"\"\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import cross_val_score\nfrom bottleneck import nansum, move_median, nanmedian, allnan\nfrom scipy import stats\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.special import xlogy\nfrom scipy.spatial import distance\nfrom statsmodels.nonparametric.kde import KDEUnivariate as KDE\nfrom ..utilities import mad_to_sigma\n\n#--------------------------------------------------------------------------------------------------\ndef MAD_model(x, **kwargs):\n\t# x: difference between input\n\treturn mad_to_sigma*np.nanmedian(np.abs(x), **kwargs)\n\n#--------------------------------------------------------------------------------------------------\ndef MAD_model2(x, **kwargs):\n\t# x: difference between input\n\treturn mad_to_sigma*np.nanmedian(np.abs(x-np.nanmedian(x)), **kwargs)\n\n#--------------------------------------------------------------------------------------------------\ndef MAD_scatter(X, Y, bins=15):\n\tbin_means, bin_edges, binnumber = stats.binned_statistic(X, Y, statistic=nanmedian, bins=bins)\n\tbin_width = (bin_edges[1] - bin_edges[0])\n\tbin_centers = bin_edges[1:] - bin_width/2\n\tidx = np.isfinite(bin_centers) & np.isfinite(bin_means)\n\tspl = InterpolatedUnivariateSpline(bin_centers[idx], bin_means[idx])\n\n\tM = MAD_model(Y-spl(X))\n\treturn M\n\n#--------------------------------------------------------------------------------------------------\ndef _move_median_central_1d(x, width_points):\n\ty = move_median(x, width_points, min_count=1)\n\ty = np.roll(y, -width_points//2+1)\n\tfor k in range(width_points//2+1):\n\t\ty[k] = nanmedian(x[:(k+2)])\n\t\ty[-(k+1)] = nanmedian(x[-(k+2):])\n\treturn y\n\n#--------------------------------------------------------------------------------------------------\ndef move_median_central(x, width_points, axis=0):\n\treturn np.apply_along_axis(_move_median_central_1d, axis, x, width_points)\n\n#--------------------------------------------------------------------------------------------------\ndef pearson(x, y):\n\tindx = np.isfinite(x) & np.isfinite(y)\n\tr, _ = stats.pearsonr(x[indx], y[indx]) # Second output (p-value) is not used\n\treturn r\n\n#--------------------------------------------------------------------------------------------------\ndef compute_scores(X, n_components):\n\tpca = PCA(svd_solver='full')\n\n\tpca_scores = []\n\tfor n in n_components:\n\t\tpca.n_components = n\n\t\tpca_scores.append(np.mean(cross_val_score(pca, X, cv=5)))\n\n\treturn pca_scores\n\n#--------------------------------------------------------------------------------------------------\ndef rms(x, **kwargs):\n\treturn np.sqrt(nansum(x**2, **kwargs)/len(x))\n\n#--------------------------------------------------------------------------------------------------\ndef compute_entropy(U):\n\n\tHGauss0 = 0.5 + 0.5*np.log(2*np.pi)\n\n\tnSingVals = U.shape[1]\n\tH = np.empty(nSingVals, dtype='float64')\n\n\tfor iBasisVector in range(nSingVals):\n\n\t\tkde = KDE(np.abs(U[:, iBasisVector]))\n\t\tkde.fit(gridsize=1000)\n\n\t\tpdf = kde.density\n\t\tx = kde.support\n\n\t\tdx = x[1]-x[0]\n\n\t\t# Calculate the Gaussian entropy\n\t\tpdfMean = nansum(x * pdf)*dx\n\t\twith np.errstate(invalid='ignore'):\n\t\t\tsigma = np.sqrt( nansum(((x-pdfMean)**2) * pdf) * dx )\n\t\tHGauss = HGauss0 + np.log(sigma)\n\n\t\t# Calculate vMatrix entropy\n\t\tpdf_pos = (pdf > 0)\n\t\tHVMatrix = -np.sum(xlogy(pdf[pdf_pos], pdf[pdf_pos])) * dx\n\n\t\t# Returned entropy is difference between V-Matrix entropy and Gaussian entropy of similar width (sigma)\n\t\tH[iBasisVector] = HVMatrix - HGauss\n\n\treturn H\n\n#--------------------------------------------------------------------------------------------------\ndef reduce_std(x):\n\treturn np.median(np.abs(x-np.median(x)))\n\n#--------------------------------------------------------------------------------------------------\ndef reduce_mode(x):\n\tkde = KDE(x)\n\tkde.fit(gridsize=2000)\n\n\tpdf = kde.density\n\tx = kde.support\n\treturn x[np.argmax(pdf)]\n\n#--------------------------------------------------------------------------------------------------\ndef ndim_med_filt(v, x, n, dist='euclidean', mad_frac=2):\n\n\td = distance.cdist(x, x, dist)\n\n\tidx = np.zeros_like(v, dtype=bool)\n\tfor i in range(v.shape[0]):\n\t\tidx_sort = np.argsort(d[i,:])\n\t\tvv = v[idx_sort][1:n+1] # sort values according to distance from point\n\n\t\tvm = np.median(vv) # median value of n nearest points\n\t\tmad = MAD_model(vv-vm)\n\n\t\tif (v[i] < vm+mad_frac*mad) & (v[i] > vm-mad_frac*mad):\n\t\t\tidx[i] = True\n\treturn idx\n\n#--------------------------------------------------------------------------------------------------\ndef AlmightyCorrcoefEinsumOptimized(X, P):\n\t\"\"\"\n\tCorrelation coefficients using Einstein sums.\n\n\t.. codeauthor:: Mikkel N. Lund \n\t\"\"\"\n\n\t(n, t) = X.shape # n traces of t samples\n\t(n_bis, m) = P.shape # n predictions for each of m candidates\n\n\tDX = X - (np.einsum(\"nt->t\", X, optimize='optimal') / np.double(n)) # compute X - mean(X)\n\tDP = P - (np.einsum(\"nm->m\", P, optimize='optimal') / np.double(n)) # compute P - mean(P)\n\n\tcov = np.einsum(\"nm,nt->mt\", DP, DX, optimize='optimal')\n\n\tvarP = np.einsum(\"nm,nm->m\", DP, DP, optimize='optimal')\n\tvarX = np.einsum(\"nt,nt->t\", DX, DX, optimize='optimal')\n\ttmp = np.einsum(\"m,t->mt\", varP, varX, optimize='optimal')\n\n\treturn cov / np.sqrt(tmp)\n\n#--------------------------------------------------------------------------------------------------\ndef lightcurve_correlation_matrix(mat):\n\t\"\"\"\n\tCalculate the correlation matrix between all lightcurves in matrix.\n\n\tParameters:\n\t\tmat (numpy.array): (NxM)\n\n\tReturns:\n\t\tnumpy.array: Correlation matrix (NxN).\n\t\"\"\"\n\n\tindx_nancol = allnan(mat, axis=0)\n\tmat1 = mat[:, ~indx_nancol]\n\n\tmat1[np.isnan(mat1)] = 0\n\tcorrelations = np.abs(AlmightyCorrcoefEinsumOptimized(mat1.T, mat1.T))\n\tnp.fill_diagonal(correlations, np.nan)\n\n\treturn correlations\n","repo_name":"tasoc/corrections","sub_path":"corrections/cbv_corrector/cbv_utilities.py","file_name":"cbv_utilities.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"4355657221","text":"#!../bin/python\nfrom os.path import (join, basename)\nimport tempfile\n\nimport nuka\nfrom nuka.hosts import DockerContainer\nfrom nuka.tasks import (apt, user, virtualenv, shell, http, file)\n\nnuka.config['templates'].append('/tmp')\n\nkinto = DockerContainer('kinto')\n\nWSGI_FILE = 'https://raw.githubusercontent.com/Kinto/kinto/master/app.wsgi'\n\n\nasync def install_kinto(host):\n kinto_user = user.create_www_user('kinto')\n await nuka.wait(\n kinto_user,\n apt.install([\n 'apache2', 'libapache2-mod-wsgi',\n 'build-essential', 'libffi-dev', 'libssl-dev',\n 'python3-dev', 'redis-server'\n ], update_cache=3600))\n\n with open('/tmp/requirements.txt', 'wb') as fd:\n fd.write(b'kinto\\n')\n fd.flush()\n await virtualenv.virtualenv(\n dst=kinto_user.home,\n requirements=fd.name,\n switch_user='kinto')\n\n binary = join(kinto_user.home, 'bin', 'kinto')\n config = join(kinto_user.home, 'config', 'kinto.ini')\n\n if not await file.exists(config):\n await shell.command([binary, 'init', '--backend=redis'],\n switch_user='kinto')\n await shell.command([binary, 'migrate'],\n switch_user='kinto')\n\n if not await file.exists('app.wsgi', switch_user='kinto'):\n await http.fetch(\n src=WSGI_FILE,\n dst='app.wsgi', switch_user='kinto')\n\n with tempfile.NamedTemporaryFile(suffix='.j2') as fd:\n fd.write(b'''\nServerName kinto.example.com\n\nWSGIScriptAlias / {{kinto_user.home}}/app.wsgi\nWSGIPythonPath {{kinto_user.home}}\nSetEnv KINTO_INI {{kinto_config}}\n\n\n \n Require all granted\n \n\n''')\n fd.flush()\n conf = await file.put([dict(\n src=basename(fd.name),\n dst='/etc/apache2/sites-available/kinto.conf')],\n ctx=dict(kinto_user=kinto_user, kinto_config=config))\n\n if conf.changed:\n await shell.command(['a2ensite', 'kinto.conf'])\n\n\nnuka.run(install_kinto(kinto))\n","repo_name":"bearstech/nuka","sub_path":"examples/kinto.py","file_name":"kinto.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"47"} +{"seq_id":"22144215592","text":"import streamlit as st\r\nimport yahoo_fin.stock_info as si\r\nfrom yahoo_fin.stock_info import get_analysts_info\r\nimport pandas_datareader as pdr\r\n\r\nst.header(\"Graham's Stock Valuation Calculator\")\r\n\r\nticker = st.text_input('Ticker', 'AAPL')\r\nng_pe = st.text_input('No Growth PE', 8.5)\r\nmultiplier = st.text_input('Multiplier of Growth Rate', 2)\r\nmargin = st.text_input('Margin of Safety(%)', 35)\r\n\r\ndata = {}\r\ndef get_data(ticker, ng_pe, multiplier, margin):\r\n quote = si.get_quote_table(ticker)\r\n current_price = quote[\"Quote Price\"]\r\n eps = quote[\"EPS (TTM)\"]\r\n growth_df = get_analysts_info(ticker)['Growth Estimates']\r\n growth_rate = growth_df.iloc[4][1]\r\n growth_rate = growth_rate.rstrip(\"%\")\r\n aaa_df = pdr.get_data_fred('AAA')\r\n current_yield = aaa_df.iloc[-1][0]\r\n\r\n output = {\r\n \"current_price\": float(current_price),\r\n \"eps\": float(eps),\r\n \"growth_rate\": float(growth_rate),\r\n \"current_yield\": float(current_yield),\r\n \"ng_pe\": float(ng_pe),\r\n \"multiplier\": float(multiplier),\r\n \"margin\": float(margin)\r\n }\r\n return output \r\n\r\nif st.button('Calculate'):\r\n data = get_data(ticker, ng_pe, multiplier, margin)\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n col1, col2, col3 = st.columns(3)\r\n\r\n with col1:\r\n st.metric(label=\"EPS($)\", value=data[\"eps\"])\r\n with col2:\r\n st.metric(label=\"Projected Growth Rate (5 years)\", value=data[\"growth_rate\"])\r\n with col3:\r\n st.metric(label=\"Current Yield AAA Corp Bond\", value=data[\"current_yield\"]) \r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n int_value = (data[\"eps\"] * (data[\"ng_pe\"] + data[\"multiplier\"] * data[\"growth_rate\"] ) * 4.4)/ data[\"current_yield\"]\r\n int_value = round(int_value,2)\r\n stock_price = round(data[\"current_price\"],2)\r\n margin_rate = data[\"margin\"] / 100\r\n accept_price = (1-margin_rate) * int_value\r\n accept_price = round(accept_price,2) \r\n\r\n col4, col5, col6 = st.columns(3)\r\n with col4:\r\n st.subheader('Current Stock Price($)')\r\n st.subheader(\"**:blue[\" + str(stock_price) + \"]**\") \r\n with col5:\r\n st.subheader('Intrinsic Stock Value($)')\r\n st.subheader(\"**:blue[\" + str(int_value) + \"]**\")\r\n with col6:\r\n st.subheader('Acceptable Buy Price($)')\r\n st.subheader(\"**:blue[\" + str(accept_price) + \"]**\")\r\nelse:\r\n st.text(\"Click on Calculate button\")\r\n\r\n\r\n\r\n\r\n","repo_name":"teobeeguan/Python-For-Finance","sub_path":"GrahamValuation/GrahamValuationCalculator.py","file_name":"GrahamValuationCalculator.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"47"} +{"seq_id":"37253049882","text":"class Pokemon(object):\n ataque:int\n defensa:int\n velocidad:int\n sonido:str\n\n def __init__(self, ataque, defensa, velocidad, sonido):\n self.ataque = ataque\n self.defensa = defensa\n self.velocidad = velocidad\n self.sonido = sonido\n \n def roar(self):\n print(self.sonido.lower())\n\nclass PKMGEN2(Pokemon):\n def __init__(self, ataque, defensa, velocidad, sonido):\n Pokemon.__init__(self, ataque, defensa, velocidad, sonido)\n \n def roar(self):\n print(self.sonido.upper())\n\npika = Pokemon(ataque=40, defensa=20, velocidad=80, sonido=\"Pika CHUUUUUUUUUU\")\ncharizard = Pokemon(ataque=90, defensa=70, velocidad=40, sonido=\"chariiiiiIIzard\")\nmeow = Pokemon(ataque=100, defensa=100, velocidad=80, sonido=\"al ataque\")\n\nwobbufet = PKMGEN2(ataque=100, defensa=100, velocidad=80, sonido=\"wobbuuuuuuuuuuuuffet\")\n\npokemones = [pika,charizard,meow,wobbufet]\n\nfor pokemon in pokemones:\n pokemon.roar()","repo_name":"marco-gallegos/emulationengineer","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8222287372","text":"#\n# @lc app=leetcode id=152 lang=python3\n#\n# [152] Maximum Product Subarray\n#\n# https://leetcode.com/problems/maximum-product-subarray/description/\n#\n# algorithms\n# Medium (32.75%)\n# Likes: 6630\n# Dislikes: 219\n# Total Accepted: 469.6K\n# Total Submissions: 1.4M\n# Testcase Example: '[2,3,-2,4]'\n#\n# Given an integer array nums, find a contiguous non-empty subarray within the\n# array that has the largest product, and return the product.\n# \n# It is guaranteed that the answer will fit in a 32-bit integer.\n# \n# A subarray is a contiguous subsequence of the array.\n# \n# \n# Example 1:\n# \n# \n# Input: nums = [2,3,-2,4]\n# Output: 6\n# Explanation: [2,3] has the largest product 6.\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [-2,0,-1]\n# Output: 0\n# Explanation: The result cannot be 2, because [-2,-1] is not a subarray.\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= nums.length <= 2 * 10^4\n# -10 <= nums[i] <= 10\n# The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit\n# integer.\n# \n# \n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n curr_max = nums[0]\n curr_min = nums[0] \n max_product = curr_max \n for n in nums[1:]:\n temp_max = max(n, curr_max * n, curr_min * n)\n curr_min = min(n, curr_max * n, curr_min * n)\n curr_max = temp_max\n max_product = max(curr_max, max_product)\n\n return max_product\n\n\n# @lc code=end\nnums = [2, 3, -2, 4]\nnums = [-2, 0, -1]\nnums = [2, -5, -2, -4, 3]\ns = Solution()\nprint(s.maxProduct(nums))\n","repo_name":"wwjholmes/leetcode","sub_path":"152.maximum-product-subarray.py","file_name":"152.maximum-product-subarray.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"86559672239","text":"import torch\nimport torch.nn as nn\nimport timm\n\n\nclass CDFI_Branch(nn.Module):\n def __init__(self,channel=3,hidden = 10, dropout=0.2, model_name = 'resnet50', with_att=True):\n super(CDFI_Branch, self).__init__()\n self.gap = nn.AdaptiveAvgPool2d(1) #全局平均池化\n self.fc = nn.Sequential(\n nn.Linear(channel,hidden),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(hidden,channel),\n nn.Dropout(dropout),\n nn.Sigmoid())\n\n self.backbone = timm.create_model(model_name, pretrained=False)\n self.backbone.reset_classifier(0, '')\n self.with_att = with_att\n # self.backbone = ResNet50()\n def forward(self, x):\n if self.with_att:\n b, c, _, _ = x.size()# 得到H和W的维度,在这两个维度上进行全局池化\n y = self.gap(x).view(b, c)# Squeeze操作的实现\n y = self.fc(y).view(b, c, 1, 1)# Excitation操作的实现\n x = x * y.expand_as(x)\n out = self.backbone(x)\n # 将y扩展到x相同大小的维度后进行赋权\n return out\n\nclass MFA(nn.Module):\n def __init__(self,channel,reduction = 42):\n super(MFA, self).__init__()\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel// reduction),\n nn.ReLU(),\n nn.Linear(channel//reduction, channel),\n nn.Sigmoid())\n\n def forward(self, x, a):\n b, c, _, _ = x.size()\n y = self.gap(x).view(b,c)\n y = self.fc(y).view(b, c, 1, 1)\n y = y.expand_as(x)\n return y*a\n\nclass D_BUS_Net(nn.Module):\n def __init__(self, model_name='resnet50', fuse_att=True, cdfi_att=True, dropout=0.2):\n super(D_BUS_Net, self).__init__()\n #定义模块\n \n self.bus_branch = timm.create_model(model_name, pretrained=False)\n self.bus_branch.reset_classifier(0, '')\n\n # self.bus_branch = ResNet50()\n last_channel = {\n 'resnet50': 2048,\n 'resnet34' : 512, \n 'resnet18' : 512, \n 'efficientnet_b0' : 1280, \n 'convnext_small': 768\n }\n self.cdfi_branch = CDFI_Branch(model_name=model_name, with_att=cdfi_att, dropout=dropout)\n last_conv_channel = last_channel[model_name]\n # last_conv_channel = 1000\n\n self.MFA = MFA(last_conv_channel) #resnet50 224决定了这里2048\n\n self.flatten = nn.Flatten()\n\n \n self.bus_cls = nn.Sequential(\n nn.Linear(last_conv_channel*7*7, 64),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(64,1),\n nn.Sigmoid()\n )\n self.cdfi_cls = nn.Sequential(\n nn.Linear(last_conv_channel*7*7, 64),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(64,1),\n nn.Sigmoid()\n )\n self.cls = nn.Sequential(\n nn.Linear(last_conv_channel*2*7*7, 128),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(128,1),\n nn.Sigmoid()\n )\n self.fuse_att = fuse_att\n\n\n def forward(self,bus_input,cdfi_input):\n bus_output = self.bus_branch(bus_input)\n cdfi = self.cdfi_branch(cdfi_input)\n if self.fuse_att:\n cdfi_output = self.MFA(bus_output,cdfi)\n else:\n cdfi_output = cdfi\n bus_output = self.flatten(bus_output)\n cdfi_output = self.flatten(cdfi_output)\n out = torch.cat([bus_output,cdfi_output],dim = 1)\n bus_aux_out = self.bus_cls(bus_output)\n cdfi_aux_out = self.cdfi_cls(cdfi_output)\n out = self.cls(out)\n return out ,bus_aux_out,cdfi_aux_out\n\n\n\nif __name__ == '__main__':\n input1 = torch.randn((1, 3, 224, 224))\n input2 = torch.randn((1, 3, 224, 224))\n\n out1 ,out2 , out3 = D_BUS_Net('convnext_small')(input1,input2)\n print(out1,out2,out3)\n","repo_name":"wupeiyan/MDLRN","sub_path":"code/deeplearning/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12446824915","text":"#!/usr/bin/env python3\n\nimport multiprocessing as mp\nimport os\nimport timeit\n\nimport pandas as pd\n\nimport _init_path\nfrom spacenet7_model.configs import load_config\nfrom spacenet7_model.utils import (convert_geojsons_to_csv, ensemble_subdir,\n get_subdirs, interpolate_polys, map_wrapper,\n remove_polygon_empty_row_if_polygon_exists,\n solution_filename,\n track_footprint_identifiers)\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n t0 = timeit.default_timer()\n\n config = load_config()\n\n assert len(config.ENSEMBLE_EXP_IDS) >= 1\n\n subdir = ensemble_subdir(config.ENSEMBLE_EXP_IDS)\n input_root = os.path.join(config.POLY_ROOT, subdir)\n aois = get_subdirs(input_root)\n\n # prepare json and output directories\n tracked_poly_root = os.path.join(config.TRACKED_POLY_ROOT, subdir)\n os.makedirs(tracked_poly_root, exist_ok=False)\n\n if config.SOLUTION_OUTPUT_PATH and config.SOLUTION_OUTPUT_PATH != 'none':\n # only for deployment phase\n out_path = config.SOLUTION_OUTPUT_PATH\n else:\n out_path = os.path.join(tracked_poly_root, solution_filename())\n\n # some parameters\n verbose = True\n super_verbose = False\n\n n_thread = config.TRACKING_NUM_THREADS\n n_thread = n_thread if n_thread > 0 else mp.cpu_count()\n print(f'N_thread for multiprocessing: {n_thread}')\n\n # track footprint and save the results as geojson files\n # prepare args and output directories\n input_args = []\n for i, aoi in enumerate(aois):\n json_dir = os.path.join(tracked_poly_root, aoi)\n os.makedirs(json_dir, exist_ok=False)\n\n input_dir = os.path.join(input_root, aoi)\n\n input_args.append([\n track_footprint_identifiers, config, input_dir, json_dir, verbose,\n super_verbose\n ])\n\n # run multiprocessing\n with mp.Pool(processes=n_thread) as pool:\n with tqdm(total=len(input_args)) as t:\n for _ in pool.imap_unordered(map_wrapper, input_args):\n t.update(1)\n\n # convert the geojson files into a dataframe\n json_dirs = [\n os.path.join(tracked_poly_root, aoi)\n for aoi in get_subdirs(tracked_poly_root)\n ]\n solution_df = convert_geojsons_to_csv(json_dirs,\n output_csv_path=None,\n population='proposal')\n solution_df = pd.DataFrame(solution_df) # GeoDataFrame to DataFrame\n\n # interpolate master polys\n if config.TRACKING_ENABLE_POST_INTERPOLATION:\n print('running post interpolation. this may take ~10 min...')\n\n # XXX: SN7 train dir is hard coded...\n test_root = '/data/spacenet7/spacenet7/train' if config.TEST_TO_VAL else config.INPUT.TEST_DIR\n\n # prepare args and output directories\n input_args = []\n for aoi in aois:\n aoi_mask = solution_df.filename.str.endswith(aoi)\n solution_df_aoi = solution_df[aoi_mask]\n input_args.append([\n interpolate_polys, aoi, solution_df_aoi, tracked_poly_root,\n test_root\n ])\n\n # run multiprocessing\n pool = mp.Pool(processes=n_thread)\n polys_to_interpolate_tmp = pool.map(map_wrapper, input_args)\n pool.close()\n\n # do interpolation\n polys_to_interpolate = []\n for polys in polys_to_interpolate_tmp:\n polys_to_interpolate.extend(polys)\n polys_to_interpolate = pd.DataFrame(polys_to_interpolate)\n solution_df = solution_df.append(polys_to_interpolate)\n\n # remove \"POLYGON EMPTY\" row if needed\n solution_df = remove_polygon_empty_row_if_polygon_exists(solution_df)\n\n print('saving solution csv file...')\n solution_df.to_csv(out_path, index=False)\n print(f'saved solution csv to {out_path}')\n\n elapsed = timeit.default_timer() - t0\n print('Time: {:.3f} min'.format(elapsed / 60.0))\n","repo_name":"motokimura/spacenet7_solution","sub_path":"tools/track_polys.py","file_name":"track_polys.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"29454002755","text":"import unittest\nfrom binary_search import binary_search, linear_search\n\n\nclass TestBinarySearch(unittest.TestCase):\n def test_small(self):\n for (keys, query) in [\n ([1, 2, 3], 1),\n ([4, 5, 6], 7),\n ([4, 5, 6], 7)\n ]:\n self.assertEqual(\n linear_search(keys, query),\n binary_search(keys, query)\n )\n\n def test_large(self):\n for (keys, query, answer) in [\n (list(range(10 ** 4)), 10 ** 4, -1),\n (list(range(10 ** 4)), 10 ** 4, -1),\n (list(range(10 ** 4)), 239, 239),\n ]:\n self.assertEqual(binary_search(keys, query), answer)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"saint1729/coursera","sub_path":"data-structures-algorithms/Algorithmic Toolbox/Divide-and-Conquer/Binary Search/binary_search_unit_tests.py","file_name":"binary_search_unit_tests.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"41959686331","text":"print(\"\"\"---------------------------\r\nFind the summation of even and\r\nodd numbers at the interval (a,b)\r\n---------------------------\"\"\")\r\nlist=[]\r\nlist2 =[]\r\na =int(input(\"Enter a:\"))\r\nb =int(input(\"Enter b:\"))\r\n\r\nfor i in range(a,b):\r\n if (i % 2 == 0):\r\n list.append(i)\r\n else:\r\n list2.append(i)\r\nprint(\"Summation of Even numbers:\")\r\nprint(sum(list))\r\nprint(\"Summation of Odd numbers:\")\r\nprint(sum(list2))","repo_name":"Tunahan-Emir-Karabay/Algorithms-with-Python","sub_path":"Summation of even and odd numbers at the interval.py","file_name":"Summation of even and odd numbers at the interval.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8165221710","text":"import io\nimport os\nimport logging\nimport json\nimport threading\n\nimport requests\nimport sounddevice as sd\nimport soundfile\nimport soundfile as sf\n\n\napi_key = os.getenv(\"ELEVENLABS_API_KEY\")\nBASE_URL = \"https://api.elevenlabs.io/v1\"\nDEFAULT_HEADERS = {\"accept\": \"*/*\"}\n\n\nclass Voice(object):\n def __init__(self, data):\n self._data = data\n\n def voice_id(self):\n return self._data[\"voice_id\"]\n\n def name(self):\n return self._data[\"name\"]\n\n def say(self, text, background=False):\n data = generate_audio(self.voice_id(), text)\n play_audio(data, background=background)\n\n def __str__(self) -> str:\n return json.dumps(self._data)\n\n\ndef generate_audio(voice_id, text):\n payload = {\"text\": text, \"voice_settings\": {\"stability\": 0, \"similarity_boost\": 0}}\n response = _post(f\"/text-to-speech/{voice_id}/stream\", json_data=payload)\n return response.content\n\n\ndef play_audio(\n audio_data,\n background=False,\n on_start=lambda: None,\n on_end=lambda: None,\n device_id=None,\n):\n if device_id is None:\n device_id = sd.default.device[1]\n wrapper = _SDPlaybackWrapper(\n data=audio_data, device_id=device_id, on_start=on_start, on_end=on_end\n )\n if not background:\n with wrapper.stream:\n wrapper.end_playback_event.wait()\n else:\n wrapper.stream.start()\n\n\ndef get_available_voices():\n response = _get(\"/voices\")\n data = response.json()\n return [Voice(d) for d in data[\"voices\"]]\n\n\nclass _SDPlaybackWrapper:\n def __init__(\n self,\n data,\n device_id,\n on_start=lambda: None,\n on_end=lambda: None,\n ):\n sound_file = sf.SoundFile(io.BytesIO(data))\n sound_file.seek(0)\n self.on_start = on_start\n self.on_end = on_end\n self.start_playback_event = threading.Event()\n self.end_playback_event = threading.Event()\n self.data = sound_file.read(always_2d=True)\n self.currentFrame = 0\n self.stream = sd.OutputStream(\n channels=sound_file.channels,\n callback=self.callback,\n samplerate=sound_file.samplerate,\n device=device_id,\n finished_callback=self.end_playback,\n )\n\n def callback(self, outdata, frames, time, status):\n if status:\n print(status)\n\n if not self.start_playback_event.is_set():\n self.start_playback_event.set()\n self.on_start()\n\n chunksize = min(len(self.data) - self.currentFrame, frames)\n outdata[:chunksize] = self.data[\n self.currentFrame : self.currentFrame + chunksize\n ]\n if chunksize < frames:\n outdata[chunksize:] = 0\n raise sd.CallbackStop()\n self.currentFrame += chunksize\n\n def end_playback(self):\n self.on_end()\n self.end_playback_event.set()\n\n\ndef _get(path, headers=DEFAULT_HEADERS):\n return _call_api(\"GET\", path, headers)\n\n\ndef _post(path, json_data, headers=DEFAULT_HEADERS):\n return _call_api(\"POST\", path, headers, json_data)\n\n\ndef _call_api(method, path, headers=DEFAULT_HEADERS, json_data=None, file_data=None):\n if not api_key:\n raise Exception(\"No API key provided\")\n headers[\"xi-api-key\"] = api_key\n if path[0] != \"/\":\n path = \"/\" + path\n endpoint = BASE_URL + path\n if method == \"GET\":\n response = requests.get(endpoint, headers=headers)\n elif method == \"POST\":\n response = requests.post(endpoint, headers=headers, json=json_data)\n elif method == \"DEL\":\n response = requests.delete(endpoint, headers=headers)\n elif method == \"MULTIPART\":\n response = requests.post(\n endpoint, headers=headers, data=json_data, files=file_data\n )\n else:\n raise Exception(\"Unknown method: \" + method)\n\n response.raise_for_status()\n return response\n","repo_name":"davidbyttow/sandbox-ai","sub_path":"projects/her_gpt/elevenlabs.py","file_name":"elevenlabs.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2499215591","text":"from fastapi import APIRouter, Depends\n\nfrom datebase import get_db\nfrom v1.services.operation import *\n\nrouter = APIRouter()\n\n\n@router.post('/', tags=['Операция'])\nasync def create(data: OperationCreate = Depends(OperationCreate.as_form), db: Session = Depends(get_db)):\n return create_operation(data, db)\n\n\n@router.get('/{id}', tags=['Операция'])\nasync def get(id: int = None, db: Session = Depends(get_db)):\n return get_operation(id, db)\n\n\n@router.get('/', tags=['Операция'])\nasync def get_all(db: Session = Depends(get_db)):\n return get_all_operation(db)\n\n\n@router.put('/{id}', tags=['Операция'])\nasync def update(id: int = None, data: OperationUpdate = Depends(OperationUpdate.as_form), db: Session = Depends(get_db)):\n return update_operation(data, db, id)\n\n\n@router.delete('/{id}', tags=['Операция'])\nasync def delete(id: int = None, db: Session = Depends(get_db)):\n return remove_operation(id, db)\n\n\n","repo_name":"fleeper2133/research_sector","sub_path":"v1/routers/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20876892095","text":"from modules.pharmex.utils.exception_handler import *\nfrom pricelists.PriceListClient import PriceListClient\nimport tornado.web\n\nfrom modules.kinetic_core.DateTimeEncoder import DateTimeEncoderCompact\nimport asyncio\nfrom web.handlers.BaseHandler import *\nfrom modules.pharmex.basket.BasketExecutorClient_ph import BasketExecutorClient_ph\nimport json\nimport pandas as pd\nimport datetime\nfrom modules.kinetic_core.Connector import db\nimport math\n\n\nclass ProcurementBasketMIVSResponseHandler(BaseHandler):\n\n @throws_handler()\n @allowedRole(Role.PHARMA_CLIENT)\n async def post(self):\n\n # MOCK\n #product_ids = [8119, 9034, 5837]\n # MOCK\n #provisions_products_quantities = {8119: 1, 9034: 1, 5837: 3}\n\n\n params = json.loads(self.get_argument('values'))\n\n matrix_supplier_ids = params.get('matrix_supplier_ids', None)\n\n provision_ids = params.get('ids', None)\n selected_supplier_ids = params.get('supplier_ids', None)\n urgent_procurement = int(params.get(\"urgent_procurement\", 0))\n pharmacy = int(params.get(\"pharmacy\", 0))\n\n procurement_type = params.get(\"procurement_type\", 'standard')\n price_type = params.get(\"price_type\", 'wire100')\n\n\n provisions = await db.list(\n \"select provision_id, product_id, quantity, cost, total_cost, current_timestamp from ph_provision where provision_id = any($1::int[])\",\n (provision_ids,))\n print(provisions)\n\n mivs_provisions = {int(p_id): 1 for p_id in provision_ids}\n\n user_id = self.get_current_user()[\"user_id\"]\n\n\n basket_executor = BasketExecutorClient_ph()\n\n print('calling mivs')\n\n mivs_result = await basket_executor.calc_by_mivs(data={\"provisions\": mivs_provisions,\n \"procurement_type\": procurement_type,\n \"price_type\": price_type,\n \"matrix_supplier_ids\": matrix_supplier_ids,\n \"user_id\": user_id})\n\n #if mivs_result[\"status\"] == \"OPTIMAL\":\n # mivs_result = mivs_result[\"\"]\n\n print('mivs request complete')\n l = list()\n\n # MOCK\n \"\"\"\n mivs_result = {'351': {\n 'prices': {'wire100': {'8119': 1, '9034': 1},\n 'wire50': {},\n 'wire25': {},\n 'cash_cost': {}\n },\n 'total_price': 154571, 'cash_price': 0, 'prepayment_price': 12345, 'discount': 0},\n\n '117': {\n 'prices': {'wire100': {},\n 'wire50': {'5837': 3},\n 'wire25': {},\n 'cash_cost': {}\n },\n 'total_price': 100500, 'cash_price': 0, 'prepayment_price': 14253, 'discount': 0},\n\n 'status': 'OPTIMAL', 'status_code': 4}\n \"\"\"\n data = {}\n\n if mivs_result != None:\n\n data[\"status\"] = mivs_result[\"status\"]\n\n if mivs_result[\"status\"] == 'OPTIMAL':\n\n data[\"message\"] = 'OK'\n\n mivs_filtered = {k: v for k, v in mivs_result.items() if not k.startswith('status')}\n\n mivs_sup_ids = [int(m) for m in mivs_filtered]\n basket = await basket_executor.get_by_user_id(data={\"user_id\": user_id})\n\n from modules.pharmex.procurement.ProcurementExecutorClient_ph import ProcurementExecutorClient_ph\n procurement_client = ProcurementExecutorClient_ph()\n procurements = []\n for procurement_id in list(basket[\"procurements\"].values()):\n procurement = await procurement_client.get_one(data={\"procurement_id\": procurement_id})\n procurements.append(procurement)\n\n suppliers = await db.list(\n \"select company_name, pharmex_supplier_id, min_sum, pharmacy, urgent_procurement, current_timestamp from supplier where pharmex_supplier_id = any($1::int[])\",\n (mivs_sup_ids,))\n row_suppliers = {supplier[\"pharmex_supplier_id\"]: supplier[\"company_name\"] for supplier in suppliers}\n\n product_ids = [provision[\"product_id\"] for provision in provisions]\n #print(\"p ids: \", product_ids)\n\n products = await db.list(\n \"select product_id, name, current_timestamp from products where product_id = any($1::int[])\",\n (product_ids,))\n\n row_products = {p[\"product_id\"]: p[\"name\"] for p in products}\n\n keys_localized = {\"prices\": \"Товары\", \"wire100\": \"Перечислением 100%\", \"wire50\": \"Перечислением 50%\", \"wire25\": \"Перечислением 25%\",\n \"cash_cost\": \"Наличными\", \"total_price\": \"Общая сумма\", \"cash_price\": \"Сумма наличными\",\n \"prepayment_price\": \"Сумма предоплаты\", \"discount\": \"Скидка\"}\n\n #print(\"products: \", products)\n\n data[\"mivs_result\"] = mivs_filtered\n data[\"suppliers\"] = row_suppliers\n data[\"products\"] = row_products\n data[\"keys_localized\"] = keys_localized\n\n else:\n data[\"status\"] = \"ERROR\"\n data[\"message\"] = mivs_result[\"message\"]\n\n else:\n data[\"status\"] = \"ERROR\"\n data[\"message\"] = \"Во время вычислений произошла ошибка. Пожалуйста, попробуйте позже.\"\n\n self.write(data)\n self.finish()\n","repo_name":"shohruh-abduakhatov-portfolio/go-apteka-py","sub_path":"web/handlers/procurement/ProcurementBasketMIVSResponseHandler.py","file_name":"ProcurementBasketMIVSResponseHandler.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"415170312","text":"from pages.task_page import TaskPage\n\n\ndef test_task(browser):\n link = \"https://taskksat.pythonanywhere.com/\"\n page = TaskPage(browser, link) # инициализируем Page Object с браузером\n page.open() # открываем страницу\n page.start_task()\n page.entrance()\n #page.creating_a_task()\n #page.test_navbar_task() #тестирование кнопок навбар","repo_name":"ConsttsnoC/personal-site-testing","sub_path":"tests/task_test.py","file_name":"task_test.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24192252615","text":"# -*- coding: utf-8 -*-\n#\n# OneEuroFilter.py -\n#\n# Author: Nicolas Roussel (nicolas.roussel@inria.fr)\n\nimport math\nimport csv\nimport matplotlib.pyplot as mpl\n\n# ----------------------------------------------------------------------------\n\nclass LowPassFilter(object):\n\n def __init__(self, alpha):\n self.__setAlpha(alpha)\n self.__y = self.__s = None\n\n def __setAlpha(self, alpha):\n alpha = float(alpha)\n if alpha<=0 or alpha>1.0:\n raise ValueError(\"alpha (%s) should be in (0.0, 1.0]\"%alpha)\n self.__alpha = alpha\n\n def __call__(self, value, timestamp=None, alpha=None): \n if alpha is not None:\n self.__setAlpha(alpha)\n if self.__y is None:\n s = value\n else:\n s = self.__alpha*value + (1.0-self.__alpha)*self.__s\n self.__y = value\n self.__s = s\n return s\n\n def lastValue(self):\n return self.__y\n\n# ----------------------------------------------------------------------------\n\nclass OneEuroFilter(object):\n\n def __init__(self, freq, mincutoff=1.0, beta=0.0, dcutoff=1.0):\n if freq<=0:\n raise ValueError(\"freq should be >0\")\n if mincutoff<=0:\n raise ValueError(\"mincutoff should be >0\")\n if dcutoff<=0:\n raise ValueError(\"dcutoff should be >0\")\n self.__freq = float(freq)\n self.__mincutoff = float(mincutoff)\n self.__beta = float(beta)\n self.__dcutoff = float(dcutoff)\n self.__x = LowPassFilter(self.__alpha(self.__mincutoff))\n self.__dx = LowPassFilter(self.__alpha(self.__dcutoff))\n self.__lasttime = None\n \n def __alpha(self, cutoff):\n te = 1.0 / self.__freq\n tau = 1.0 / (2*math.pi*cutoff)\n return 1.0 / (1.0 + tau/te)\n\n def __call__(self, x, timestamp=None):\n # ---- update the sampling frequency based on timestamps\n if self.__lasttime and timestamp:\n self.__freq = 1.0 / (timestamp-self.__lasttime)\n self.__lasttime = timestamp\n # ---- estimate the current variation per second\n prev_x = self.__x.lastValue()\n dx = 0.0 if prev_x is None else (x-prev_x)*self.__freq # FIXME: 0.0 or value?\n edx = self.__dx(dx, timestamp, alpha=self.__alpha(self.__dcutoff))\n # ---- use it to update the cutoff frequency\n cutoff = self.__mincutoff + self.__beta*math.fabs(edx)\n # ---- filter the given value\n return self.__x(x, timestamp, alpha=self.__alpha(cutoff))\n\n# ----------------------------------------------------------------------------\n\nif __name__==\"__main__\":\n\n duration = 5.0 # seconds\n \n config = {\n 'freq': 650, # Hz\n 'mincutoff': 0.007, # FIXME\n 'beta': 0.005, # FIXME\n 'dcutoff': 1.0 # this one should be ok\n }\n \n print (\"#SRC OneEuroFilter.py\")\n print (\"#CFG %s\"%config)\n print (\"#LOG timestamp, signal, noisy, filtered\")\n \n f = OneEuroFilter(**config)\n timestamp = 0.0 # seconds\n\n # Lists to keep things read from csv, these will be used for plots\n timestamps = []\n signals = []\n filtereds = []\n\n # newline='' was causing errors, 'rU' for mode works\n with open('Noise.csv', 'rU') as csvdata:\n spamreader = csv.reader(csvdata, delimiter=',')\n iterationvar = 0\n for row in spamreader:\n timestamp = float(row[0])\n signal = float(row[1])\n filtered = f(signal, timestamp)\n #print (\"{0}, {1}, {2}\".format(timestamp, signal, filtered))\n timestamps.append(timestamp)\n signals.append(signal)\n filtereds.append(filtered)\n # Only handle 300 first entries, keeps the plot clear\n iterationvar += 1\n if iterationvar > 300:\n break\n\n # Draw plot with both unfiltered and filtered signals\n sig_plot, = mpl.plot(timestamps, signals, 'r', label=\"Raw\")\n fil_plot, = mpl.plot(timestamps, filtereds, 'g', label=\"Filtered\")\n mpl.xlabel('Timestamps')\n mpl.ylabel('Signals')\n mpl.legend(handles=[sig_plot, fil_plot])\n mpl.show()\n\n# Origianl code in http://cristal.univ-lille.fr/~casiez/1euro/OneEuroFilter.py\n# while timestamp 125 else 0)\n # From grayscale to black and white\n X[X < 125] = 0 \n X[X >= 125] = 1\n\n\n #train_length = (int)(data_length*test_train)\n\n X_train, X_test = X[:data_length, :], X[data_length:data_length+10000, :]\n # get labels \n y = mnist['target'].values.copy()\n y_train, y_test = y[:data_length], y[data_length:data_length+10000]\n # One hot encoding for labels \n y_train = LabelBinarizer().fit_transform(y_train)\n y_test = LabelBinarizer().fit_transform(y_test)\n return X_train, X_test, y_train, y_test\n \ndef lire_alpha_digit(digits_list):\n \"\"\"Read alpha_digit data from specific digits_list\n\n Keyword arguments:\n digits_list -- list of digits to be fetched as str\n ['0', '2', 'F', 'Z']\n \n Return: \n alpha digits data regarding the input digits as a np.array matrix\n (row is a data point, columns are the features of each picture )\n \"\"\"\n # check if data is in data/ folder \n if os.path.exists('data/binaryalphadigs.mat'):\n print('File already downloaded, using version in data folder..')\n # download it if necessary\n else:\n print('Fetching data on internet...')\n fetch_alpha_digits_data()\n # load data \n alphadigs_dict = sio.loadmat('data/binaryalphadigs.mat')\n \n # filter digits\n digit2idx = {}\n for i, digit in enumerate(alphadigs_dict['classlabels'][0]):\n digit2idx[digit[0]] = i\n \n # collect indexes \n idxs = []\n for digit in digits_list:\n idxs.append(digit2idx[digit])\n #return alphadigs_dict['dat'][idxs]\n # Adapter au format (n, p), chaque colonne designe un pixel et chaque ligne une image\n return np.stack(np.concatenate(alphadigs_dict['dat'][idxs])).reshape(-1, 20*16)\n\ndef sigmoid(x):\n return expit(x)\n \n","repo_name":"viniciusoliveirasd/rendu-projet-dl2","sub_path":"utils_projet.py","file_name":"utils_projet.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36198989603","text":"import time\nimport serial\nimport pymodbus\nimport logging\nimport math\nimport threading\n\nfrom pymodbus.pdu import ModbusRequest\nfrom pymodbus.client.sync import ModbusSerialClient as ModbusClient #initialize a serial RTU client instance\nfrom pymodbus.transaction import ModbusRtuFramer\nfrom pymodbus.constants import Endian\nfrom pymodbus.payload import BinaryPayloadDecoder\nfrom pymodbus.payload import BinaryPayloadBuilder\n\n\nclass kom:\n client = ModbusClient(method = \"rtu\", port=\"/dev/ttyUSB0\",timeout=(1/100), stopbits = 1, bitsize = 32, parity = 'N', baudrate = 115200)\n ser = serial.Serial(port=\"/dev/ttyACM0\", baudrate = 115200, timeout=1)\n path5value = (5, 0)\n speedpath6 = client.write_registers(0x050E, path5value, unit = 0x0077)\n \nclass inputservo:\n servo = \"ServoOn\"\n \nclass readmodbus:\n a = kom()\n while True:\n posisi = a.client.read_holding_registers(0x0520, 2, unit = 0x0077)\n posisireg0 = posisi.getRegister(0)\n posisireg1 = posisi.getRegister(1)\n puu = posisireg0 - posisireg1\n sudut = (puu*9)/2500\n kecepatan = a.client.read_holding_registers(0x0012, 2, unit = 0x0077)\n kecepatan0 = kecepatan.getRegister(0)\n kecepatan1 = kecepatan.getRegister(1)\n speed = kecepatan0 - kecepatan1\n\nclass inputsignal:\n f = kom()\n while True:\n x=f.ser.readline(None)\n \n if (x == b'0\\r\\n'):\n y= 0\n elif (x == b'1\\r\\n'):\n y= 5000\n elif (x == b'-1\\r\\n'):\n y= -5000\n elif (x == b'2\\r\\n'):\n y= 10000\n elif (x == b'-2\\r\\n'):\n y= -10000\n elif (x == b'3\\r\\n'):\n y= 15000\n else:\n y= -15000\n \nclass servo:\n b = inputservo()\n c = kom()\n k = b.servo\n if (b.servo == \"ServoOn\"):\n valueservo = (0x0001, 0x0000)\n setservo = c.client.write_registers(0x0214, valueservo, unit = 0x0077)\n else :\n valueservo = (0x0101, 0x0000)\n setservo = c.client.write_registers(0x0214, valueservo, unit = 0x0077)\n\nclass setposisi:\n e = kom()\n while True:\n errorposisi= y\n if (errorposisi > 0):\n input0 = errorposisi\n input1 = 0\n elif (errorposisi < 0):\n input0 = 65535 + errorposisi + 1\n input1 = 65535 \n else:\n input0 = 0\n input1 = 0 \n valuespeed = (input0, input1)\n setreg = e.client.write_registers(0x0616, valuespeed, unit = 0x0077)\n \n\n\nclass printdata:\n g = inputservo()\n h = posisi()\n i = kecepatan()\n j = inputsignal()\n while True:\n print (g.servo, 'sudut =', h.sudut, \"| puu = \", h.puu, \"| speed =\", i.speed, \"| setspeed =\", j.y)\n","repo_name":"achmadindra/TA","sub_path":"multithread.py","file_name":"multithread.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"543531874","text":"# -*- coding: utf-8 -*-\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Series, compat\nfrom pandas.core.indexes.period import IncompatibleFrequency\nimport pandas.util.testing as tm\n\n\ndef _permute(obj):\n return obj.take(np.random.permutation(len(obj)))\n\n\nclass TestSeriesFlexArithmetic(object):\n @pytest.mark.parametrize(\n 'ts',\n [\n (lambda x: x, lambda x: x * 2, False),\n (lambda x: x, lambda x: x[::2], False),\n (lambda x: x, lambda x: 5, True),\n (lambda x: tm.makeFloatSeries(),\n lambda x: tm.makeFloatSeries(),\n True)\n ])\n @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',\n 'truediv', 'div', 'pow'])\n def test_flex_method_equivalence(self, opname, ts):\n # check that Series.{opname} behaves like Series.__{opname}__,\n tser = tm.makeTimeSeries().rename('ts')\n\n series = ts[0](tser)\n other = ts[1](tser)\n check_reverse = ts[2]\n\n if opname == 'div' and compat.PY3:\n pytest.skip('div test only for Py3')\n\n op = getattr(Series, opname)\n\n if op == 'div':\n alt = operator.truediv\n else:\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n tm.assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, \"r\" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n tm.assert_almost_equal(result, expected)\n\n\nclass TestSeriesArithmetic(object):\n # Some of these may end up in tests/arithmetic, but are not yet sorted\n\n def test_add_series_with_period_index(self):\n rng = pd.period_range('1/1/2000', '1/1/2010', freq='A')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n result = ts + ts[::2]\n expected = ts + ts\n expected[1::2] = np.nan\n tm.assert_series_equal(result, expected)\n\n result = ts + _permute(ts[::2])\n tm.assert_series_equal(result, expected)\n\n msg = \"Input has different freq=D from PeriodIndex\\\\(freq=A-DEC\\\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n ts + ts.asfreq('D', how=\"end\")\n\n\n# ------------------------------------------------------------------\n# Comparisons\n\nclass TestSeriesFlexComparison(object):\n def test_comparison_flex_basic(self):\n left = pd.Series(np.random.randn(10))\n right = pd.Series(np.random.randn(10))\n\n tm.assert_series_equal(left.eq(right), left == right)\n tm.assert_series_equal(left.ne(right), left != right)\n tm.assert_series_equal(left.le(right), left < right)\n tm.assert_series_equal(left.lt(right), left <= right)\n tm.assert_series_equal(left.gt(right), left > right)\n tm.assert_series_equal(left.ge(right), left >= right)\n\n # axis\n for axis in [0, None, 'index']:\n tm.assert_series_equal(left.eq(right, axis=axis), left == right)\n tm.assert_series_equal(left.ne(right, axis=axis), left != right)\n tm.assert_series_equal(left.le(right, axis=axis), left < right)\n tm.assert_series_equal(left.lt(right, axis=axis), left <= right)\n tm.assert_series_equal(left.gt(right, axis=axis), left > right)\n tm.assert_series_equal(left.ge(right, axis=axis), left >= right)\n\n #\n msg = 'No axis named 1 for object type'\n for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:\n with pytest.raises(ValueError, match=msg):\n getattr(left, op)(right, axis=1)\n\n\nclass TestSeriesComparison(object):\n def test_comparison_different_length(self):\n a = Series(['a', 'b', 'c'])\n b = Series(['b', 'a'])\n with pytest.raises(ValueError):\n a < b\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n with pytest.raises(ValueError):\n a == b\n\n @pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])\n def test_ser_flex_cmp_return_dtypes(self, opname):\n # GH#15115\n ser = Series([1, 3, 2], index=range(3))\n const = 2\n\n result = getattr(ser, opname)(const).get_dtype_counts()\n tm.assert_series_equal(result, Series([1], ['bool']))\n\n @pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])\n def test_ser_flex_cmp_return_dtypes_empty(self, opname):\n # GH#15115 empty Series case\n ser = Series([1, 3, 2], index=range(3))\n empty = ser.iloc[:0]\n const = 2\n\n result = getattr(empty, opname)(const).get_dtype_counts()\n tm.assert_series_equal(result, Series([1], ['bool']))\n\n @pytest.mark.parametrize('op', [operator.eq, operator.ne,\n operator.le, operator.lt,\n operator.ge, operator.gt])\n @pytest.mark.parametrize('names', [(None, None, None),\n ('foo', 'bar', None),\n ('baz', 'baz', 'baz')])\n def test_ser_cmp_result_names(self, names, op):\n # datetime64 dtype\n dti = pd.date_range('1949-06-07 03:00:00',\n freq='H', periods=5, name=names[0])\n ser = Series(dti).rename(names[1])\n result = op(ser, dti)\n assert result.name == names[2]\n\n # datetime64tz dtype\n dti = dti.tz_localize('US/Central')\n ser = Series(dti).rename(names[1])\n result = op(ser, dti)\n assert result.name == names[2]\n\n # timedelta64 dtype\n tdi = dti - dti.shift(1)\n ser = Series(tdi).rename(names[1])\n result = op(ser, tdi)\n assert result.name == names[2]\n\n # categorical\n if op in [operator.eq, operator.ne]:\n # categorical dtype comparisons raise for inequalities\n cidx = tdi.astype('category')\n ser = Series(cidx).rename(names[1])\n result = op(ser, cidx)\n assert result.name == names[2]\n","repo_name":"jgagneastro/coffeegrindsize","sub_path":"App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/tests/series/test_arithmetic.py","file_name":"test_arithmetic.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":454,"dataset":"github-code","pt":"47"} +{"seq_id":"25070938348","text":"# -*- coding: utf-8 -*-\n# @Author: youerning\n# @Email: 673125641@qq.com\nimport pandas as pd\nfrom glob import glob\nfrom nobody.utils.utils import load_hist\nfrom nobody.settings import config\nfrom os import path\n\n\nclass TestUtils(object):\n def setup(self):\n self.start_date = \"2018-01-02\"\n self.end_date = \"2019-01-02\"\n\n def test_load_hist(self):\n assert len(list(load_hist(\"000001.SZ\"))) == 1\n assert len(list(load_hist([\"000001.SZ\", \"000002.SZ\"]))) == 2\n assert len(list(load_hist())) == len(glob(path.join(config[\"STOCK_DATA_PATH\"], \"*csv\")))\n\n data = list(load_hist(\"000001.SZ\", start_date=self.start_date, end_date=self.end_date))\n hist = data[0][1]\n\n assert pd.to_datetime(self.start_date) == hist.index[0]\n assert pd.to_datetime(self.end_date) == hist.index[-1]\n","repo_name":"youerning/stock_playground","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"47"} +{"seq_id":"2616050367","text":"# Given an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n\n# Input: nums = [1,1,1,2,2,3], k = 2\n# Output: [1,2]\n\nclass Solution:\n def topKFrequent(self, nums: list[int], maxFreq: int) -> list[int]:\n map = {}\n invMap = [ [] ] * len(nums) \n print(invMap)\n listToReturn = []\n\n for n in nums:\n if n not in map:\n map[n] = 0\n map[n] += 1\n for k,v in map.items():\n invMap[v-1] = invMap[v-1] + [k]\n while len(listToReturn) < maxFreq:\n for i in range(len(invMap)-1, -1, -1):\n listToReturn += invMap[i]\n if len(listToReturn) == maxFreq:\n return listToReturn\nnums = [5, 1, -1, -8, -7, 8, -5, 0, 1, 10, 8, 0, -4, 3, -1, -1, 4, -5, 4, -3, 0, 2, 2, 2, 4, -2, -4, 8, -7, -7, 2, -8, 0, -8, 10, 8, -8, -2, -9, 4, -7, 6, 6, -1, 4, 2, 8, -3, 5, -9, -3, 6, -8, -5, 5, 10, 2, -5, -1, -5, 1, -3, 7, 0, 8, -2, -3, -1, -5, 4, 7, -9, 0, 2, 10, 4, 4, -4, -1, -1, 6, -8, -9, -1, 9, -9, 3, 5, 1, 6, -1, -2, 4, 2, 4, -6, 4, 4, 5, -5]\nk = 7\nprint(Solution().topKFrequent(nums, k))\n","repo_name":"semere01/Comptetitive-Programming","sub_path":"a2sv-comminity-progress/Heaps/347-top-k-frequent-elements.py","file_name":"347-top-k-frequent-elements.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3947258673","text":"# 선분 세 개로 삼각형을 만들기 위해서는 다음과 같은 조건을 만족해야 합니다.\n\n# 가장 긴 변의 길이는 다른 두 변의 길이의 합보다 작아야 합니다.\n# 삼각형의 세 변의 길이가 담긴 배열 sides이 매개변수로 주어집니다. 세 변으로 삼각형을 만들 수 있다면 1, 만들 수 없다면 2를 return하도록 solution 함수를 완성해주세요.\n\n# 제한사항\n# sides의 원소는 자연수입니다.\n# sides의 길이는 3입니다.\n# 1 ≤ sides의 원소 ≤ 1,000\n# 입출력 예\n# sides\tresult\n# [1, 2, 3]\t2\n# [3, 6, 2]\t2\n# [199, 72, 222]\t1\n# 입출력 예 설명\n# 입출력 예 #1\n\n# 가장 큰 변인 3이 나머지 두 변의 합 3과 같으므로 삼각형을 완성할 수 없습니다. 따라서 2를 return합니다.\n# 입출력 예 #2\n\n# 가장 큰 변인 6이 나머지 두 변의 합 5보다 크므로 삼각형을 완성할 수 없습니다. 따라서 2를 return합니다.\n# 입출력 예 #3\n\n# 가장 큰 변인 222가 나머지 두 변의 합 271보다 작으므로 삼각형을 완성할 수 있습니다. 따라서 1을 return합니다.\n\n# 내 정답\ndef solution(sides):\n a = sides[0]\n b = sides[1]\n c = sides[2]\n \n answer = 0\n if a > b and a > c :\n if b+c > a:\n answer = 1\n else :\n answer = 2\n elif b > a and b > c :\n if a + c > b :\n answer = 1\n else :\n answer = 2\n else :\n if a + b > c :\n answer = 1\n else :\n answer = 2\n return answer\n\nsolution([3, 6, 2])\n'''\nsort를 해서 작은수부터 지정하는 방법도 있음\n이것은 찾아보고 한것이므로 내가한것은 아니지만 새로운 방법을 터득햇습니다.\nex)\n\ndef solution(sides):\n sides.sort()\n \n if sides[0] + sides[1] > sides[2] :\n answer = 1\n else :\n answer = 2\n return answer\n\n\n\n\n'''","repo_name":"rlalastjd782/codingTest","sub_path":"level0/삼각형의완성조건1.py","file_name":"삼각형의완성조건1.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5993517521","text":"from pbxproj import XcodeProject\nfrom pbxproj.pbxextensions import *\n\n\n'''\nUseful methods:\nproject.get_file_by_id\nproject.get_files_by_name\nproject.get_files_by_path\n\nproject.get_groups_by_name\nproject.get_groups_by_path\n\nproject.get_object(id)\n\nobject.get_id()\n'''\n\nconfig = {\n'xcodeproj': './cloud-search.xcodeproj'\n}\n# open project\nproject = XcodeProject.load(config['xcodeproj'] + '/project.pbxproj')\n\nclass GroupStruct:\n Dict = {} # if an object has parrent, then it's content is it's parent\n def __init__(self, project):\n self.groups = project.objects.get_objects_in_section('PBXGroup')\n\n # initialize the dictionary of groups\n for item in self.groups:\n itemID = item.get_id().upper()\n for child in item.children:\n self.Dict[child.upper()] = itemID\n\n '''\n input: file id\n output: None if over maxIter, list of groups\n '''\n def iterateFiles(self, fileID, maxIter = 100):\n curr = fileID\n list = []\n iter = 0\n while curr in self.Dict:\n parent = self.Dict[curr]\n list.append(parent)\n curr = parent\n iter += 1\n if iter > maxIter:\n return None\n\n return list\n\n def constructPath(self):\n pass\n\n''' \nphaseType: PBXResourcesBuildPhase. Resources/Headers/Sources/Frameworks\ntargetName: 'string'\n'''\ndef getPhaseInTarget(project, phaseType, targetObj):\n for phase in targetObj.buildPhases:\n if isinstance(project.get_object(phase), phaseType):\n return phase\n\n return None\n\n'''\ntargetName: specific target object\nfileObjs: can be more than one \n\n\n'''\ndef checkHeader(project, fileObjs, targetObj = None):\n PBXHeadersBuildPhaseList = []\n\n for fileObj in fileObjs:\n fileID = fileObj.get_id()\n if targetObj:\n phaseID = getPhaseInTarget(project, PBXHeadersBuildPhase, targetObj)\n phase = project.get_object(phaseID)\n for phaseFile in phase.files:\n if phaseFile == fileObj.get_id():\n return fileObj\n\n\n return None\n\ndef checkAllHeaders():\n TargetList = project.objects.get_targets()\n\n\n'''\nAdd exisiting file (valid fileRef) to header.\n1. Find the file ID in PBXFileReference. \n2. Add entry in PBXBuildFile (Check if exist) with correct settings (public/private/project)\n3. Find correct PBXHeadersBuildPhase section. Add PBXBuildFile to files\n\n1. Find the exisiting file. with project.get_files_by_name\n2. project.get_build_files_for_file(fileId)\n3. get Buildfile ID\n4. Find target, project.get_target_by_name('target')\n5. Find target Header using project.get_target_by_name('target').buildPhases (loop through, use type(get_object()) check for PBXHeadersBuildPhase)\n6. Get header phase, check for the Buildfile ID\n\nproject.get_build_files_for_file(file.get_id()) get the targets with the file\ninput:\n project: xcode file\n fileObj: file object, get through get_files or get_file\n headerScope: scope of headers (PROJECT/PRIVATE/PUBLIC)\n targetName: which target to add to\n force: True to override options (reduce to only one), false to allow add multiple instances. !!! should be True !!!\n'''\ndef addExistingFileToHeader(project, fileObj, targetName, headerScope = HeaderScope.PRIVATE, force=True):\n buildfiles = project.get_build_files_for_file(fileObj.get_id()) # can have multiple BuildFiles to one fileRef\n target = project.get_target_by_name(targetName)\n buildFile = checkHeader(project, buildfiles, target)\n if buildFile: # If the buildfile for specific target exist, we modify\n buildFile.settings.ATTRIBUTES = [headerScope]\n return\n\n fileOptions = FileOptions(header_scope=headerScope)\n file_type, expected_build_phase = ProjectFiles._determine_file_type(fileObj, fileOptions.ignore_unknown_type)\n buildFile = project._create_build_files(fileObj, targetName, expected_build_phase, fileOptions)\n\n return buildFile\n\nif __name__ == '__main__':\n test = GroupStruct(project)\n a = addExistingFileToHeader(project, project.get_files_by_name('AAPLAppDelegate.h')[0], targetName = 'CloudSearch')\n print(a)\n project.save()","repo_name":"qq404041481/mod-xcode","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35438410786","text":"from egz1btesty import runtests\n\n\ndef planets(D, C, T, E):\n n = len(D)\n f = [[float('inf') for _ in range(E + 1)] for _ in range(n)]\n for i in range(E + 1):\n f[0][i] = C[0] * i\n\n planet = T[0][0]\n cost = T[0][1]\n f[planet][0] = cost + f[0][0]\n for i in range(1, n):\n for b in range(E + 1):\n dist = D[i] - D[i - 1]\n if b + dist < E:\n first = f[i - 1][b + dist]\n else:\n first = f[i - 1][E] + C[i] * (b + dist - E)\n if b > 0:\n second = f[i][b - 1] + C[i]\n else:\n second = float('inf')\n f[i][b] = min(first, second, f[i][b])\n if b == 0:\n planet = T[i][0]\n cost = T[i][1]\n f[planet][0] = min(f[planet][0], f[i][b] + cost)\n return min(f[n - 1])\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(planets, all_tests=True)\n","repo_name":"WiktorDybalski/Python_projects-term_2-ASD","sub_path":"Exams/22-23/1/B-planety/egz1b.py","file_name":"egz1b.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37241996757","text":"#!/usr/bin/env python3\n\n\"\"\"new_email_inotify module.\n\nThis module periodically checks if there was an event in queue/new directory thus one or more e-mails came.\nAuthor: Silvie Chlupová\nDate Created: 04/27/2020\n\"\"\"\n\nimport inotify.adapters\nimport time\nfrom threading import Thread\nfrom apscheduler.scheduler import Scheduler\nfrom smtplib import SMTP\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport lockfile\nimport daemon\nimport argparse\nimport os\n\n\nemail_event = 0\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(allow_abbrev=False)\n parser.add_argument(\n \"--recipient\",\n \"-r\",\n type=str,\n required=True,\n help=\"error message recipient\",\n )\n args = parser.parse_args()\n return args\n\n\ndef send_mail(args):\n text = \"Salmon is down, at least 6 hours have passed since the last email.\"\n sender = \"salmon@salmon.info\"\n recipient = args.recipient\n server = \"127.0.0.1\"\n port = 2500\n\n msg = MIMEMultipart(\"alternative\")\n msg[\"Subject\"] = \"Salmon error report\"\n msg[\"From\"] = sender\n recipients = [recipient]\n msg['To'] = recipient\n part = MIMEText(text, \"plain\")\n msg.attach(part)\n\n with SMTP(server, port) as s:\n s.send_message(msg)\n\n\ndef check_dir():\n i = inotify.adapters.Inotify()\n i.add_watch('queue/new')\n global email_event\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=1)\n events = list(events)\n if len(events) > 0:\n email_event += 1\n\n\ndef check_email_event(args):\n global email_event\n if email_event == 0:\n send_mail(args)\n else:\n email_event = 0\n\n\ndef schedule(args):\n sched = Scheduler()\n sched.add_interval_job(lambda: check_email_event(args), hours=6)\n sched.start()\n\n\ndef main(args):\n worker = Thread(target=check_dir)\n worker2 = Thread(target=schedule, args=(args,))\n worker.start()\n worker2.start()\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n with daemon.DaemonContext(chroot_directory=None, working_directory=os.getcwd()):\n main(args)\n","repo_name":"avast/hermes","sub_path":"receiver/new/new_email_inotify.py","file_name":"new_email_inotify.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"71111883984","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n# Load your dataset with the correct encoding\ndata = pd.read_csv(\"spam.csv\", encoding=\"ISO-8859-1\")\n\n# Data preprocessing\ndata[\"v1\"] = data[\"v1\"].map({\"ham\": 0, \"spam\": 1})\n\n# Text preprocessing\ntfidf_vectorizer = TfidfVectorizer()\nX = tfidf_vectorizer.fit_transform(data[\"v2\"])\ny = data[\"v1\"]\n\n# Split the dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Model building\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\n\n# Model evaluation\ny_pred = model.predict(X_test) \n\ndef classify_text(input_text):\n # Preprocess the input text using the same TF-IDF vectorizer\n input_text_transformed = tfidf_vectorizer.transform([input_text])\n # Predict using the trained model\n prediction = model.predict(input_text_transformed)\n # Convert prediction label to 'spam' or 'ham'\n result = 'spam' if prediction[0] == 1 else 'ham'\n return result\n","repo_name":"HAQ7/datathon-ml-model","sub_path":"web/dist/SMSMLAlgorithm.py","file_name":"SMSMLAlgorithm.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36947428162","text":"# Good Sequence\nn=int(input())\na=list(map(int,input().split()))\nd={}\nfor num in a:\n if num not in d:\n d[num]=1\n else:\n d[num]+=1\nans=0\nfor f,l in d.items():\n if f>l:\n ans+=l\n else:\n ans+=l-f\nprint(ans)","repo_name":"SarbajitPbappy/python","sub_path":"Exam 1/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"30724452349","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import PhotoImage\nimport os\ndir = os.path.expanduser('~/')\ntinfo='''\nO desinstalador removerá os seguintes arquivos:\nvdown.py ( localizada na pasta /usr/bin )\nvdown/ ( todo o conteudo dento do repositório baixado, provavelmente na pasta {} )\nduck.ico ( localizado na pasta /usr/share/icons )\nlang.conf ( localizado na pasta /usr/share/vdown )\n\n'''.format(dir)\n\ndef remover():\n\tos.system('locate vdown.py vdown/ duck.ico lang.conf > {}temp.txt'.format(dir))\n\ta = open('{}temp.txt'.format(dir))\n\tb = a.readlines()\n\tfor i in b:\n\t\tos.system('sudo rm '+i)\n\ta.close()\n\tmessagebox.showinfo('Aviso', 'Desinstalado com Sucesso!')\n\tos.system('rm {}temp.txt'.format(dir))\n\tj.destroy()\n\t\ndef info():\n\ti = Tk()\n\ti.title(\"Info\")\n\tli = Label(i,text=tinfo)\n\tli.pack()\n\ti.mainloop()\nj = Tk()\nj.title('Desinstalar Vdown by:@Curing0')\nj.geometry(\"380x130\")\nimg= PhotoImage(file=\"/usr/share/icons/vdowninfo.png\")\nl = Label(j, text=\"Você deseja desinstalar Vdown(Beta)?\", font=(\"Arial\", \"14\"))\nl.grid(column=1)\nb = Button(j,image=img,command=info)\nb.grid(column=2)\nd = Button(j,text=\"Desinstalar\",command=remover)\nd.grid(column=1)\nj.mainloop()\n","repo_name":"KaioGomesx/vdown","sub_path":"uninstall.py","file_name":"uninstall.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"36569991883","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .forms import UserRegistrationForm, UserUpdateForm, ProfileUpdateForm\n\n\ndef register_user(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n form.save()\n messages.success(request, f'You are successfully registered as {username}!')\n return redirect('users:login_user')\n else:\n form = UserRegistrationForm()\n context = {'form': form}\n template = 'users/register_user.html'\n return render(request, template, context)\n\n\n@login_required\ndef user_profile(request):\n if request.method == 'POST':\n user_form = UserUpdateForm(request.POST, instance=request.user) # instance=request.user | to pre-populate the form\n profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, f'Your profiles details were successfully updated!')\n return redirect('users:user_profile')\n else:\n user_form = UserUpdateForm(instance=request.user) # instance=request.user | to pre-populate the form\n profile_form = ProfileUpdateForm(instance=request.user.profile)\n context = {'user_form': user_form, 'profile_form': profile_form}\n template = 'users/profile.html'\n return render(request, template, context)\n","repo_name":"ncux/Django---CoreyMS_Full_Blog","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"4816720780","text":"from django.test import TestCase\nfrom django.urls import reverse\n\nclass InicioViewTestCase(TestCase):\n\n def test_status_get_inicio(self):\n response = self.client.get(reverse('inicio'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'paginas/inicio.html')\n\nclass MenuViewTestCase(TestCase):\n\n def test_status_get_menu(self):\n response = self.client.get(reverse('menu'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'paginas/menu.html')","repo_name":"gustavopsm/Desafio-RESTful-API-Capyba","sub_path":"paginas/tests/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"31444183367","text":"#!/usr/bin/python3\n\nfrom collections import namedtuple\nimport heapq\nfrom itertools import product\nimport os\n\n\ndef get_input():\n return os.path.join(\n os.path.dirname(__file__),\n \"input\",\n os.path.basename(__file__).replace(\".py\", \".txt\"))\n\n\nwith open(get_input()) as f:\n in1 = f.read()\n\n\nex1 = \"\"\"\\\n1163751742\n1381373672\n2136511328\n3694931569\n7463417111\n1319128137\n1359912421\n3125421639\n1293138521\n2311944581\n\"\"\"\n\nPoint = namedtuple(\"Point\", [\"x\", \"y\"])\n\n\ndef parse1(xin):\n risks = {}\n for y, l in enumerate(xin.strip().split()):\n for x, r in enumerate(l):\n risks[Point(x, y)] = int(r)\n return risks\n\n\ndef parse2(xin):\n risks = parse1(xin)\n w, h = max(risks).x+1, max(risks).y+1\n erisks = {}\n for p, r in risks.items():\n for i in range(5):\n for j in range(5):\n inc = i + j\n nr = 1 + (r + inc - 1) % 9\n np = Point(p.x+i*w, p.y+j*h)\n erisks[np] = nr\n return erisks\n\n\nclass MinHeap:\n\n def __init__(self):\n self.S = set()\n self.Q = []\n\n def __contains__(self, i):\n return i in self.S\n\n def __len__(self):\n return len(self.Q)\n\n def push(self, i, p):\n self.S.add(i)\n heapq.heappush(self.Q, (p, i))\n\n def pop(self):\n _, i = heapq.heappop(self.Q)\n self.S.remove(i)\n return i\n\n\ndef min_risk_astar(risks):\n src = Point(0, 0)\n dst = max(risks.keys())\n h = lambda p: (dst.x - p.x) + (dst.y - p.y)\n d = lambda p: risks[p] # distance is risk level\n get_neighbors = lambda p: [\n n for n in\n [Point(p.x+1, p.y), Point(p.x-1, p.y),\n Point(p.x, p.y+1), Point(p.x, p.y-1)]\n if n in risks\n ]\n\n inf = float(\"inf\")\n open_set = MinHeap()\n open_set.push(src, inf)\n\n prev = {}\n g_score = {p: inf for p in risks}\n g_score[src] = 0\n\n f_score = {p: inf for p in risks}\n f_score[src] = h(src)\n\n while open_set:\n cur = open_set.pop()\n if cur == dst:\n path = [dst]\n while cur in prev:\n cur = prev[cur]\n if cur != src:\n path.append(cur)\n return sum(d(p) for p in path)\n\n for n in get_neighbors(cur):\n t_g_score = g_score[cur] + d(n)\n if t_g_score < g_score[n]:\n prev[n] = cur\n g_score[n] = t_g_score\n f_score[n] = t_g_score + h(n)\n if n not in open_set:\n open_set.push(n, g_score[n])\n raise ValueError\n\n\nprint(min_risk_astar(parse1(ex1)))\nprint(min_risk_astar(parse1(in1)))\nprint(\"===two\")\nprint(min_risk_astar(parse2(ex1)))\nprint(min_risk_astar(parse2(in1)))\n","repo_name":"dknowles2/aoc","sub_path":"2021/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"13569617573","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 16 22:24:04 2013\n\n@author: puaykai\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nfrom numpy import linalg as LA\nfrom numpy import dot\nfrom array import array\nfrom display import displayNetwork as dn\n\ndef sparseAutoencoderCost(theta, visibleSize, hiddenSize, lambd, sparsityParam, beta, data):\n \n \"\"\"\n % visibleSize: the number of input units (probably 64) \n % hiddenSize: the number of hidden units (probably 25) \n % lambda: weight decay parameter\n % sparsityParam: The desired average activation for the hidden units (denoted in the lecture\n % notes by the greek alphabet rho, which looks like a lower-case \"p\").\n % beta: weight of sparsity penalty term\n % data: Our 64x10000 matrix containing the training data. So, data(:,i) is the i-th training example. \n \n % The input theta is a vector (because minFunc expects the parameters to be a vector). \n % We first convert theta to the (W1, W2, b1, b2) matrix/vector format, so that this \n % follows the notation convention of the lecture notes. \n \"\"\"\n \"\"\" \n W1 = np.matrix(np.reshape(theta[0,0:hiddenSize*visibleSize], (hiddenSize, visibleSize)))\n W2 = np.matrix(np.reshape(theta[0,hiddenSize*visibleSize:2*hiddenSize*visibleSize], (visibleSize,hiddenSize)))\n b1 = np.matrix(theta[0,2*hiddenSize*visibleSize:2*hiddenSize*visibleSize+hiddenSize])\n b2 = np.matrix(theta[0,2*hiddenSize*visibleSize+hiddenSize:])\n \"\"\"\n W1 = np.reshape(theta[0:hiddenSize*visibleSize], ( visibleSize,hiddenSize))\n W2 = np.reshape(theta[hiddenSize*visibleSize:2*hiddenSize*visibleSize], (hiddenSize,visibleSize))\n b1 = theta[2*hiddenSize*visibleSize:2*hiddenSize*visibleSize+hiddenSize]\n b2 = theta[2*hiddenSize*visibleSize+hiddenSize:]\n \n \n #Cost and gradient variables (your code needs to compute these values). \n #Here, we initialize them to zeros.\n cost = 0\n W1grad = np.zeros(W1.shape)\n W2grad = np.zeros(W2.shape)\n b1grad = np.zeros(b1.shape)\n b2grad = np.zeros(b2.shape)\n \n \"\"\"\n %% ---------- YOUR CODE HERE --------------------------------------\n % Instructions: Compute the cost/optimization objective J_sparse(W,b) for the Sparse Autoencoder,\n % and the corresponding gradients W1grad, W2grad, b1grad, b2grad.\n %\n % W1grad, W2grad, b1grad and b2grad should be computed using backpropagation.\n % Note that W1grad has the same dimensions as W1, b1grad has the same dimensions\n % as b1, etc. Your code should set W1grad to be the partial derivative of J_sparse(W,b) with\n % respect to W1. I.e., W1grad(i,j) should be the partial derivative of J_sparse(W,b) \n % with respect to the input parameter W1(i,j). Thus, W1grad should be equal to the term \n % [(1/m) \\Delta W^{(1)} + \\lambda W^{(1)}] in the last block of pseudo-code in Section 2.2 \n % of the lecture notes (and similarly for W2grad, b1grad, b2grad).\n % \n % Stated differently, if we were using batch gradient descent to optimize the parameters,\n % the gradient descent update to W1 would be W1 := W1 - alpha * W1grad, and similarly for W2, b1, b2. \n % \n \"\"\"\n \n datasize = data.shape\n number_of_patches = datasize[0] #number of columns\n \n expander = np.ones((1, number_of_patches)) #1 X number_of_patches, adds constant to each training example\n \n #Forward Pass\n a0 = sigmoid(dot(data,W1) + b1) \n a1 = sigmoid(dot(a0,W2) + b2)\n \n #L2-norm of error\n L2 = np.sum((a1 - data) ** 2) / (2. * number_of_patches)#not checked\n \n #Back-Propagation calculation of gradients\n delta3 = (a1 - data)* a1* (1-a1)\n W2grad = dot(a0.T,delta3) / number_of_patches# average gradient\n b2grad = dot(expander, delta3) / number_of_patches\n \n #Sparsity\n average_activations = dot(expander, a0) / number_of_patches\n sparsity_error = (-sparsityParam/ average_activations) + ((1 - sparsityParam)/ (1 - average_activations)) \n KL_divergence = np.sum( sparsityParam * np.log( (sparsityParam/average_activations)) + (1 - sparsityParam) * np.log( (1-sparsityParam)/(1-average_activations) ))\n \n delta2 = (dot(delta3, W2.T) + dot(expander.T, sparsity_error) * beta) * a0 * (1-a0) \n W1grad = dot(data.T, delta2) / number_of_patches #average gradient\n b1grad = dot(expander, delta2) / number_of_patches#average b1\n \n #Update gradient\n cost = L2 + (lambd / 2.) * ( np.sum(W1**2) + np.sum(W2**2) ) + beta * KL_divergence#change norm\n W1grad = W1grad + lambd * W1\n \n W2grad = W2grad + lambd * W2\n \"\"\"\n %-------------------------------------------------------------------\n % After computing the cost and gradient, we will convert the gradients back\n % to a vector format (suitable for minFunc). Specifically, we will unroll\n % your gradient matrices into a vector.\n \"\"\"\n grad = np.concatenate((W1grad.ravel(), W2grad.ravel(),b1grad.ravel(),b2grad.ravel()))\n \n return (cost, grad)\n \n \n\ndef sigmoid(x):\n \n return 1 / (1 + np.exp(-x))\n \n","repo_name":"puaykai/U154030","sub_path":"Local Data Compression/sparseAutoencoderCost.py","file_name":"sparseAutoencoderCost.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"36718845786","text":"from art import logo\n# import random\nfrom random import randint\n\nEASY_LEVEL = 10\nHARD_LEVEL = 5\n\ndef guess_the_number(number):\n got_it = bool()\n if input(\"Choose a difficulty. Type 'easy' or 'hard': \") == 'easy':\n number_of_attempts = EASY_LEVEL\n else:\n number_of_attempts = HARD_LEVEL\n while number_of_attempts != 0:\n print(f\"You have {number_of_attempts} attempts remaining to guess the number.\")\n try:\n guess = int(input(\"Make a guess: \"))\n except:\n print(\"You type not a number. Please try again.\")\n continue\n if guess < number:\n print(\"Too low.\")\n elif guess > number:\n print(\"Too high.\")\n elif guess == number:\n print(f\"You got it! The answer was {number}.\")\n got_it = True\n break\n number_of_attempts -= 1\n print(\"Guess again.\")\n if got_it != True:\n print(\"You've run out of guesses, you lose.\")\n\n\nprint(logo)\nprint(\"Welcome to the Number Guessing Game!\\nI'm thinking of a number between 1 and 100.\")\nsecret_number = randint (1,100)\n# print(f\"Pssst, the correct answer is {secret_number}\")\n\nguess_the_number(secret_number)\n","repo_name":"usleonid/100dayscoding","sub_path":"day12/guess_the_number.py","file_name":"guess_the_number.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"5411697615","text":"import torch\nfrom tqdm import tqdm\n\nimport segmentation_models_pytorch as smp\nfrom lcc.models.UNet import UNet \nfrom lcc.dataset import LCCDataset, get_transforms, SmallDataset, get_transforms_2\nfrom lcc import OUTPUT_DIR\nfrom lcc.train_utils import train\n\nfrom lcc.dataset import TRAIN_CLASS_COUNTS\n\nCLASS_WEIGHTS = torch.tensor(1/TRAIN_CLASS_COUNTS)\nCLASS_WEIGHTS[:2] = 0\nprint(CLASS_WEIGHTS)\n\nBATCH_SIZE = 8\nN_EPOCHS = 1000\nLR = 5e-5\nPATIENCE = 3\n\ndef main(n_sample_images=10000):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = UNet(\n in_channels=4,\n out_channels=10,\n ).to(device)\n model.name = \"UNet_cross_entropy_loss_weighted\"\n optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=0)\n criterion = torch.nn.CrossEntropyLoss(weight = CLASS_WEIGHTS.to(device).float())\n # criterion = smp.losses.JaccardLoss(mode='multiclass', classes=list(range(2,10)), from_logits=True)\n dataset = SmallDataset(size=n_sample_images, transform=get_transforms())\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8)\n test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8)\n # dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n train(model, train_dataloader, test_dataloader, optimizer, criterion, device, N_EPOCHS, patience=PATIENCE)\n\n\nif __name__==\"__main__\":\n # dataset = LCCDataset(transform=get_transforms())\n main()","repo_name":"nbereux/land_cover_challenge","sub_path":"script/train_UNet.py","file_name":"train_UNet.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"73708064810","text":"import pigpio\n\nclass PiCarController:\n\n BCM_PIN_SPEED_HIGH = 19\n BCM_PIN_SPEED_LOW = 16\n BCM_PIN_STEERING_HIGH = 26\n BCM_PIN_STEERING_LOW = 20\n GPIO_PWM_FREQUENCY = 20000 #20kHz\n GPIO_TORQUE_CORRECT = 50\n GPIO_GROUND = 0\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.pi.set_PWM_range(self.BCM_PIN_STEERING_HIGH, 100)\n self.pi.set_PWM_range(self.BCM_PIN_STEERING_LOW, 100)\n self.pi.set_PWM_range(self.BCM_PIN_SPEED_HIGH, 100)\n self.pi.set_PWM_range(self.BCM_PIN_SPEED_LOW, 100)\n\n self.pi.set_PWM_frequency(self.BCM_PIN_STEERING_HIGH, self.GPIO_PWM_FREQUENCY)\n self.pi.set_PWM_frequency(self.BCM_PIN_STEERING_LOW, self.GPIO_PWM_FREQUENCY)\n self.pi.set_PWM_frequency(self.BCM_PIN_SPEED_HIGH, self.GPIO_PWM_FREQUENCY)\n self.pi.set_PWM_frequency(self.BCM_PIN_SPEED_LOW, self.GPIO_PWM_FREQUENCY)\n\n print('Initialized GPIO with PWM frequency of ' + str(self.pi.get_PWM_frequency(self.BCM_PIN_STEERING_HIGH)) + ' Hz')\n return\n\n def normalize(self,value):\n if value:\n value = self.GPIO_TORQUE_CORRECT + value/2\n return value\n\n\n def set_speed(self,direction, speed):\n speed = self.normalize(speed)\n if 0 <= speed <= 100:\n if direction:\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_HIGH, speed)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_LOW, self.GPIO_GROUND)\n print('speed: ' + str(speed) + ' forward')\n else:\n\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_HIGH, self.GPIO_GROUND)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_LOW, speed)\n print('speed: ' + str(speed) + ' backward')\n\n\n def set_steering(self,direction, steering):\n steering = self.normalize(steering)\n if 0 <= steering <= 100:\n if direction:\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_HIGH, steering)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_LOW, self.GPIO_GROUND)\n print('speed: ' + str(steering) + ' left')\n else:\n\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_HIGH, self.GPIO_GROUND)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_LOW, steering)\n print('speed: ' + str(steering) + ' right')\n\n\n def stop(self):\n\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_HIGH, self.GPIO_GROUND)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_SPEED_LOW, self.GPIO_GROUND)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_HIGH, self.GPIO_GROUND)\n self.pi.set_PWM_dutycycle(self.BCM_PIN_STEERING_LOW, self.GPIO_GROUND)\n self.pi.stop()\n\n print('Closed GPIO handler')\n\n\n#\n# GPIO Test: Expect 1.27 V between BCM 26 and BCM 20 for 20 seconds\n#\n# import time\n# if __name__ == \"__main__\":\n# try:\n# stuff = PiCarController()\n# stuff.set_speed(1,50)\n# time.sleep(20)\n# finally:\n# stuff.stop()\n","repo_name":"dnutiu/BLECar","sub_path":"BLECarPiZeroW/dbusObjects/drivers/PiCarController.py","file_name":"PiCarController.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"42332493744","text":"import re\nimport numpy as np\nimport pandas as pd\n\ndef loadBehavDat(fname):\n ''' Load the behavioral data\n '''\n \n et =[]; ro = []; fo = []; lo = []\n dx0 = []; dx1 = []; dy0 = []; dy1 = []\n cl = []; old = []; tr = []; olg = []; clg = []\n\n with open(fname, 'r') as fh:\n for i,line in enumerate(fh):\n if i>3:\n prts = re.split('\\t|\\n',line)\n for j,p in enumerate(prts):\n if j == 1: et.append(float(prts[j]))\n if j == 3: ro.append(float(prts[j]))\n if j == 5: fo.append(float(prts[j]))\n if j == 7: lo.append(float(prts[j]))\n if j == 9: dx0.append(float(prts[j]))\n if j == 11: dx1.append(float(prts[j]))\n if j == 13: dy0.append(float(prts[j]))\n if j == 15: dy1.append(float(prts[j]))\n if j == 17: cl.append(int(prts[j]))\n if j == 19: old.append(int(prts[j]))\n if j == 21: tr.append(int(prts[j]))\n if j == 23: olg.append(float(prts[j]))\n if j == 25: clg.append(float(prts[j]))\n\n behavDat = pd.DataFrame({'Elapsed time': et,\n 'Rotational offset': ro, 'Forward offset': fo, 'Lateral offset': lo,\n 'dx0': dx0, 'dx1': dx1, 'dy0': dy0, 'dy1': dy1,\n 'closed': cl, 'olsdir': old, 'trans': tr,\n 'olgain': olg, 'clgain': clg\n })\n \n return behavDat\n\ndef getSYNCTimes(SYNCDatNm, fpv):\n ''' Find the framegrab and VR display points\n '''\n # Load the voltages from the synchronization data\n SYNCDat = pd.read_csv(SYNCDatNm,header=None, names = ['VFramegrab','VVR','VStim','VPuff'])\n\n # Get the points where each framegrab starts - use a constant fraction discriminator\n cfd_fg = SYNCDat['VFramegrab'][:-1].reset_index(drop=True)-SYNCDat['VFramegrab'][1:].reset_index(drop=True)\n tFramegrab = cfd_fg[cfd_fg>0.5].index\n tFramegrab = np.delete(tFramegrab,np.argwhere(np.diff(tFramegrab) < 20)+1)\n tFramegrab = tFramegrab[0::fpv]\n\n # Get the points where each R, G, or B frame is projected - use a constant fraction discriminator\n cfd_vr = SYNCDat['VVR'][:-1].reset_index(drop=True)-SYNCDat['VVR'][1:].reset_index(drop=True)\n tVR = cfd_vr[cfd_vr>0.05].index\n tVR = np.delete(tVR,np.argwhere(np.diff(tVR) < 10)+1)\n tVR = tVR[0::3]\n \n # Get the points where the iontrophoresis function generator is outputting a high signal\n tPf = SYNCDat['VPuff'][SYNCDat['VPuff'] > 1].index\n \n return [tFramegrab, tVR, tPf]\n\ndef getMatchedBehavDat(tFramegrab, tVR, behavDatNm):\n ''' Get the behavioral data for each imaging volume\n '''\n # Load the behavioral data\n behavDat = loadBehavDat(behavDatNm)\n\n # Select only the times where the VR was active\n framesToUse = np.where((tFramegrab > np.min(tVR)) & (tFramegrab < np.max(tVR)))\n\n # Create a dataframe with the relevant timepoints and behavioral values at each framegrab point\n datPts = [] ; \n for t in tFramegrab[framesToUse]:\n datPts.append(np.argmin(np.abs(tVR-t)))\n behavDat_matched = behavDat.loc[datPts].reset_index(drop=True)\n \n return [framesToUse, behavDat_matched]\n\ndef matchDAQtoVR(tVR, behavDat, tMatch):\n ''' Match DAQ data to the behavioral data\n '''\n\n # Create a dataframe with the relevant timepoints and behavioral values at each framegrab point\n datPts = [] ; \n for t in tMatch:\n datPts.append(np.argmin(np.abs(tVR-t)))\n matchedTimes = behavDat['Elapsed time'].loc[datPts].reset_index(drop=True)\n \n return matchedTimes","repo_name":"DanTurner-Evans/GULP","sub_path":".ipynb_checkpoints/BehavPreProc-checkpoint.py","file_name":"BehavPreProc-checkpoint.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"22218712830","text":"from estatica import *\nimport math\nimport numpy as np\n\n\ndef ex1():\n print(\"Ex1\")\n r1 = Vector(0.05, 0.4, 0)\n f1 = Vector(0, 4000, 0)\n f2 = Vector(-800, 0, 0)\n print(r1.produto_vetorial(f2))\n\n\ndef ex2():\n print(\"EX2\")\n r1 = Vector(200*math.sin(math.radians(15)),\n 200*math.cos(math.radians(15)),\n 75)\n\n F = Vector(-20*math.sin(math.radians(75)),\n 20*math.cos(math.radians(75)),\n 0)\n\n resposta = r1.produto_vetorial(F)\n print(f\"Mo: {resposta}\")\n\n\nif __name__ == \"__main__\":\n ex1()\n ex2()\n","repo_name":"DinossauroBebado/Estatica","sub_path":"lista5.py","file_name":"lista5.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"36072511291","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (\n QWidget, QVBoxLayout, QHBoxLayout, QSizePolicy,\n QGraphicsOpacityEffect, QScrollArea\n)\n\nfrom base_widget import BaseWidget\nfrom card_widget import CardWidget\n\nclass UserCardListWidget(BaseWidget):\n \"\"\"\n Виджет текущих карт игрока-человека.\n Визуальная обертка для списка карт.\n \"\"\"\n def __init__(self, user=None):\n super().__init__()\n\n self.user = user\n\n self.setObjectName(\"user_card_list_widget\")\n self.set_accessable(False)\n\n def init_ui(self):\n\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n user = self.user\n #self.setStyleSheet(\"background-color: #aaf;\")\n\n user_cards_scroll = QScrollArea()\n user_cards_scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n user_cards_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n user_cards_scroll.setWidgetResizable(True)\n #self.user_cards_scroll = user_cards_scroll\n\n user_cards_layout = QHBoxLayout()\n self.user_cards_layout = user_cards_layout\n user_cards_layout.setContentsMargins(10, 15, 10, 15);\n\n for card in user.hand:\n card_widget = CardWidget(card, clickable=True)\n user_cards_layout.addWidget(card_widget)\n card_widget.setEnabled(False)\n\n\n widget = QWidget()\n user_cards_layout.setSpacing(16);\n user_cards_layout.setAlignment(Qt.AlignCenter)\n #widget.setMinimumWidth(self.frameGeometry().width() - 2)\n widget.setLayout(user_cards_layout)\n widget.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)\n widget.resize(widget.sizeHint());\n #self.user_cards_scroll_widget = widget\n user_cards_scroll.setWidget(widget)\n\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0);\n layout.addWidget(user_cards_scroll)\n self.setLayout(layout)\n\n def get_cards(self):\n \"\"\"\n Получение списка виджетов-карт\n \"\"\"\n if hasattr(self, 'user_cards_layout'):\n return (self.user_cards_layout.itemAt(i) for i in range(self.user_cards_layout.count()))\n else:\n return []\n\n def remove_card(self, number):\n \"\"\"\n Удаление виджета карты из контейнера по ее номеру\n \"\"\"\n items = self.get_cards()\n for w in items:\n if w.widget().card.n == number:\n w.widget().deleteLater()\n\n def set_user(self, user):\n \"\"\"\n Установка новых виджетов-карт для нового пользователя\n \"\"\"\n self.user = user\n self.reset()\n\n def set_accessable(self, value):\n \"\"\"\n Переключатель для изменения состояния возможности взаимодействовать с виджетом\n \"\"\"\n for card in self.get_cards():\n card.widget().setEnabled(value)\n\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.transparent)\n op = QGraphicsOpacityEffect(self)\n if value:\n op.setOpacity(1.0)\n else:\n op.setOpacity(0.4)\n self.setGraphicsEffect(op)\n #self.setAutoFillBackground(False)\n self.setPalette(p)\n","repo_name":"Svetafrolova1997/project_cow006","sub_path":"user_card_list_widget.py","file_name":"user_card_list_widget.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"24851179041","text":"from requests import *\n\nurl = 'https://websec.fr/level18/index.php'\ns = Session()\ncookies = {\n 'obj' : 'O%3a8%3a%22stdClass%22%3a2%3a%7bs%3a4%3a%22flag%22%3bi%3a1234%3bs%3a5%3a%22input%22%3bR%3a2%3b%7d'\n}\n\nr = s.post(url , cookies=cookies)\npos = r.text.find('WEBSEC{')\nwhile(True):\n print(r.text[pos] , end='')\n if (r.text[pos] == '}'):\n print()\n break\n pos += 1","repo_name":"entroychang/websec_writeup","sub_path":"medium/websec_level-18/websec_level-18.py","file_name":"websec_level-18.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"33875932612","text":"import math\nfrom dronekit import LocationGlobal, LocationGlobalRelative\nimport random\n\ndef get_distance_meters(aLocation1, aLocation2):\n \"\"\"\n Returns the ground distance in meters between two LocationGlobal objects.\n\n This method is an approximation, and will not be accurate over large distances and close to the \n earth's poles. It comes from the ArduPilot test code: \n https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py\n \"\"\"\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n dalt_meters = aLocation2.alt - aLocation1.alt\n dlat_long_meters = (math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5)\n return math.sqrt(dlat_long_meters**2 + dalt_meters**2)\n\n\ndef get_location_meters(original_location, dNorth, dEast, altitude):\n \"\"\"\n Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the \n specified `original_location`. The returned LocationGlobal has the same `alt` value\n as `original_location`.\n\n The function is useful when you want to move the vehicle around specifying locations relative to \n the current vehicle position.\n\n The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.\n\n For more information see:\n http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters\n \"\"\"\n earth_radius = 6378137.0 #Radius of \"spherical\" earth\n #Coordinate offsets in radians\n dLat = dNorth/earth_radius\n dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))\n\n #New position in decimal degrees\n newlat = original_location.lat + (dLat * 180/math.pi)\n newlon = original_location.lon + (dLon * 180/math.pi)\n if altitude is None:\n altitude = original_location.alt\n if type(original_location) is LocationGlobal:\n targetlocation=LocationGlobal(newlat, newlon, altitude)\n elif type(original_location) is LocationGlobalRelative:\n targetlocation=LocationGlobalRelative(newlat, newlon, altitude)\n else:\n raise Exception(\"Invalid Location object passed\")\n \n return targetlocation;\n\n\ndef select_out_of_my_circle(radius = 35, me_x = 10, me_y = 10, me_radius = 5):\n x = me_x\n y = me_y\n while math.sqrt((x-me_x)**2 + (y-me_y)**2) < me_radius:\n a = random.random() * 2 * math.pi\n r = radius * math.sqrt(random.random())\n\n x = r * math.cos(a)\n y = r * math.sin(a)\n print(\"In select_out_of_my_circle x, y = {}, {}\".format(x,y))\n return x, y\n\n","repo_name":"uci-iasl/HYDRA","sub_path":"devices/flight_utils.py","file_name":"flight_utils.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"24740216480","text":"# example of \"2-pairable\" graph state:\n# one can use LOCC to create any pattern of two EPR states on any subset of 4 qubits\n\nimport numpy as np\nimport itertools\n\n####################### helper functions #############################\n\ndef null2(A):\n# Argument: binary matrix A\n# Returns a matrix whose rows span the nullspace of A over the binary field GF(2) \n rows,n = A.shape\n X = np.identity(n,dtype=int)\n for i in range(rows):\n y = np.dot(A[i,:], X) % 2\n not_y = (y + 1) % 2\n good = X[:,np.nonzero(not_y)]\n good = good[:,0,:]\n bad = X[:, np.nonzero(y)]\n bad = bad[:,0,:]\n if bad.shape[1]>0 :\n bad = np.add(bad, np.roll(bad, 1, axis=1) ) % 2\n bad = np.delete(bad, 0, axis=1)\n X = np.concatenate((good, bad), axis=1)\n return np.transpose(X)\n\ndef rank2(A):\n\tKerA = null2(A)\n\treturn A.shape[1]-KerA.shape[0]\n\ndef test_epr(G,a,b):\n# Argument: stabilizer tableaux of size s x 2n\n# each row is a stabilizer, columns range(n) = X part, columns range(n,2n) = Z part\n# checks whether qubits a and b are maximally entangled with each other\n\tSa = rank2(G[:,[a,a+n]])\n\tSb = rank2(G[:,[b,b+n]])\n\tSab = rank2(G[:,[a,b,a+n,b+n]])\n\tif Sa==2 and Sb==2 and Sab==2:\n\t\treturn True\n\telse:\n\t\treturn False\n\n####################################################################\n\n# number of qubits\nn = 10\n\n# define adjacency matrix of the graph state\nA = np.zeros((n,n),dtype=int)\nfor i in range(n):\n\tj = (i+1) % n\n\tA[i,j] = 1\n\tA[j,i] = 1\n\tj = (i+int(n/2)) % n\n\tA[i,j] = 1\n\tA[j,i] = 1\n\n# compute all pairings of the set {0,1,...,n-1}\nS4 = itertools.permutations(range(4))\nfour_elem_pairings = [sigma for sigma in S4 if sigma[0] 100:\n hundreds = bills/100\n print(hundreds)\n bills -= (hundreds * 100)\n bills_tuple += (hundreds, )\n print(bills)\n if bills > 49:\n print(bills)\n print(bills_tuple)\n\n # print(bills_tuple)\n\ndef coins_breakdown(coins):\n coins_tuple = ()\n\ndef make_change(total_charge, payment):\n change = payment - total_charge\n coin_bill_seperate = math.modf(change)\n # print(coin_bill_seperate)\n coins_breakdown(coin_bill_seperate[0])\n bills_breakdown(coin_bill_seperate[1])\n \n\n# if 100 % 80:\n# print(\"It divides\")\n# print(100 % 80)\n\n(make_change(200, 452))\n","repo_name":"davidschneiderdev/Python-function-exercises","sub_path":"large_exercises/change_maker.py","file_name":"change_maker.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"24507542305","text":"\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtCore import Qt\n\nfrom UI.Base.ComboBox.StyledComboBox import ComboBox\nfrom UI.Base.CheckBox.StyledCheckBox import CheckBox\nfrom UI.Base.SpinBox.StyleSpinBoxControl import SpinBoxControl\nfrom UI.Base.Input.StyledInput import StyleInput\n\nfrom Utility.Core import QUEUE\n\n\n# Setting widget in Scheduler for a queue -- UI class\nclass SettingUI(QtWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self._input()\n self._combo()\n self._spins()\n self._checks()\n\n self.mainLayout.addStretch(1)\n \n name = 'queue-setting'\n self.setObjectName(name)\n\n\n def _input(self):\n layout = QtWidgets.QVBoxLayout()\n layout.setContentsMargins(0, 5, 0, 5)\n self.mainLayout.addLayout(layout)\n\n txt = 'Name'\n label = QtWidgets.QLabel(txt)\n # label.setObjectName(txt)\n\n self.name = StyleInput()\n self.name.setReadOnly(True)\n\n layout.addWidget(label)\n layout.addWidget(self.name)\n\n\n def _combo(self):\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 5, 0, 5)\n self.mainLayout.addLayout(layout)\n\n self.typeOption = ComboBox()\n self.typeOption.addItems(QUEUE.SETTING.TIMER_TYPE.values())\n\n txt = 'Timer Type'\n label = QtWidgets.QLabel(txt)\n\n layout.addWidget(label)\n layout.addWidget(self.typeOption)\n\n\n def _checks(self):\n layout = QtWidgets.QVBoxLayout()\n layout.setContentsMargins(0, 5, 0, 5)\n self.mainLayout.addLayout(layout)\n\n checks = [\n ('startup', 'Start Queue at startup'),\n ]\n\n for item in checks:\n if item:\n wid = CheckBox(item[1], False)\n\n layout.addWidget(wid)\n\n setattr(self, item[0], wid)\n\n\n def _spins(self):\n layout = QtWidgets.QVBoxLayout()\n layout.setContentsMargins(0, 5, 0, 5)\n self.mainLayout.addLayout(layout)\n\n spins = [\n ('concurrent', 'Number of Concurrent Download', (1, 1, 10)),\n ('retry', 'Number of Retry if download failed', (0, 0, 99)),\n ]\n\n for item in spins:\n\n temp = QtWidgets.QHBoxLayout()\n layout.addLayout(temp)\n\n label = QtWidgets.QLabel(item[1])\n wid = SpinBoxControl(*item[2])\n\n temp.addWidget(label)\n temp.addStretch(1)\n temp.addWidget(wid)\n\n setattr(self, item[0], wid)\n\n\n\n","repo_name":"SAH256/SDM","sub_path":"UI/Main/Scheduler/Components/Setting/SettingUI.py","file_name":"SettingUI.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"72313523370","text":"import logging\nimport os\nfrom optparse import OptionParser\n\nMyDebug = True\n\nclass Configuration(object):\n \"\"\"Configuration for TTS API\"\"\"\n \n def __init__(self, logger):\n global log\n log = logger\n self.cmdline_parser = OptionParser()\n \n for option, definition in self._conf_options.iteritems():\n # Set object attributes to default values\n def_val = definition.get('default', None)\n setattr(self, option, def_val)\n log.debug(\"Option %s set to value %s\", option, def_val)\n \n # Fill in the cmdline_parser object\n if definition.has_key('command_line'):\n descr = definition.get('descr', None) \n type = definition.get('type', None)\n \n if definition.has_key('arg_map'):\n type, map = definition['arg_map']\n if type == str:\n type_str = 'string'\n elif type == int:\n type_str = 'int'\n elif type == float:\n type_str = 'float'\n elif type == bool:\n type_str = None\n else:\n raise \"Unknown type\"\n \n if type != bool:\n self.cmdline_parser.add_option(type=type_str, dest=option, help=descr,\n *definition['command_line'])\n else: # type == bool\n self.cmdline_parser.add_option(action=\"store_true\", help=descr,\n *definition['command_line'])\n \n # Set options according to command line flags\n (cmdline_options, args) = self.cmdline_parser.parse_args()\n\n for option, definition in self._conf_options.iteritems():\n val = getattr(cmdline_options, option, None)\n if val != None:\n if definition.has_key('arg_map'):\n former_type, map = definition['arg_map']\n try:\n val = map[val]\n except KeyError:\n raise \"Invalid option value: \" + str(val)\n \n setattr(self, option, val)\n log.debug(\"Option %s overriden from command line to value %s\", option, val)\n \n #if len(args) != 0:\n # raise \"This command takes no positional arguments (without - or -- prefix)\"\n\n self.log_path = os.path.join(self.log_dir, self.log_name)\n","repo_name":"brailcom/tts-api-provider","sub_path":"src/python/provider/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"1358050532","text":"\nimport dateutil.parser\nfrom zenpy.lib.objects.base_object import BaseObject\n\nclass ExternalEvent(BaseObject):\n\tdef __init__(self, api=None):\n\t\tself.api = api\n\t\tself.body = None\n\t\tself.resource = None\n\t\tself.type = None\n\t\tself.id = None\n\n\n\n","repo_name":"dpetkevich/benServer","sub_path":"venv/lib/python2.7/site-packages/zenpy/lib/objects/events/externalevent.py","file_name":"externalevent.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"42322363207","text":"from sensor.logger import logging\nimport pandas as pd\nimport numpy as np\nimport sys,os\nfrom sensor.config import mongo_client\nfrom sensor.exception import SensorException\nimport yaml\nimport dill\n\ndef get_collection_as_dataframe(database_name:str,collection_name:str) -> pd.DataFrame:\n \"\"\"\n Description: This function returns collection as a DataFrame\n Params:\n database_name: Database Name\n collection_name: Collection Name\n =============================================\n return Pandas DataFrame of a collection\n \"\"\"\n try:\n logging.info(\"Connecting to mongo and retrieving data\")\n df = pd.DataFrame(list(mongo_client[database_name][collection_name].find()))\n\n if(\"_id\" in df.columns):\n df.drop(labels = [\"_id\"],axis=1,inplace=True)\n \n return df \n \n except Exception as e:\n raise SensorException(e, sys)\n\n\ndef write_yaml_files(file_path, data:dict):\n try:\n file_dirname = os.path.dirname(file_path)\n os.makedirs(file_dirname, exist_ok=True)\n\n with open(file_path, \"w\") as my_file:\n yaml.dump(data, my_file)\n\n except Exception as ex:\n raise SensorException(ex, sys)\n\ndef save_object(file_path:str, obj:object) -> None:\n try:\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n logging.info(\"Dumping pickle file into folder\")\n with open(file_path, \"wb\") as file_obj:\n dill.dump(obj,file_obj)\n logging.info(\"dumping completed\")\n except Exception as ex:\n raise SensorException(ex, sys)\n\ndef load_object(file_path:str) -> object:\n try:\n if not os.path.exists(file_path):\n raise Exception(f\"The specified file {file_path} does not exist!\")\n with open(file_path, \"rb\") as file_obj:\n return dill.load(file_obj)\n except Exception as ex:\n raise SensorException(ex, sys)\n\ndef save_numpy_array_data(file_path:str, np_array:np.array):\n try:\n dir_path = os.path.dirname(file_path)\n os.makedirs(dir_path, exist_ok=True)\n with open(file_path, \"wb\") as file_obj:\n np.save(file_obj, np_array)\n except Exception as ex:\n raise SensorException(ex, sys)\n\ndef load_numpy_array_data(file_path:str) -> np.array:\n try:\n if not os.path.exists(file_path):\n raise Exception(f\"The specified file path {file_path} does not exist!!\")\n with open(file_path, \"rb\") as file_obj:\n return np.load(file_obj)\n except Exception as ex:\n raise SensorException(ex, sys)","repo_name":"Ritwik-Gupta/Sensor-Fault-Detection-","sub_path":"sensor/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"33428760525","text":"'''\nFile: dirs.py\nProject: src\nFile Created: Wednesday, 22nd May 2019 11:09:30 am\nAuthor: Josiah Putman (joshikatsu@gmail.com)\n-----\nLast Modified: Sunday, 2nd June 2019 12:15:37 am\nModified By: Josiah Putman (joshikatsu@gmail.com)\n'''\nfrom typing import Iterator\nfrom pathlib import Path\n\nCWD = Path.cwd()\nROOTD = CWD\nSRCD = ROOTD / 'src'\nDATAD = ROOTD / 'data'\nLARKD = DATAD / 'lark'\nLARKIMGD = LARKD / 'images'\nKEYAKID = DATAD / 'KeyakiTreebank-1.1'\nTREEBANKD = KEYAKID / 'treebank'\n\ndef keyaki_trees(glob: str = '*') -> Iterator[Path]:\n for psdfile in TREEBANKD.glob(glob + '.psd'):\n yield psdfile\n","repo_name":"Katsutoshii/JPCFG","sub_path":"src/tools/dirs.py","file_name":"dirs.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"19366503135","text":"from onegov.ballot import ElectionCompound\nfrom onegov.core.security import Public\nfrom onegov.election_day import ElectionDayApp\nfrom onegov.election_day.layouts import ElectionCompoundLayout\nfrom onegov.election_day.utils import add_last_modified_header\nfrom onegov.election_day.utils.election_compound import get_list_groups\nfrom onegov.election_day.utils.election_compound import get_list_groups_data\n\n\n@ElectionDayApp.json(\n model=ElectionCompound,\n name='list-groups-data',\n permission=Public\n)\ndef view_election_compound_list_groups_data(self, request):\n\n \"\"\"\" View the list groups as JSON. Used to for the lists bar chart. \"\"\"\n\n return get_list_groups_data(self)\n\n\n@ElectionDayApp.html(\n model=ElectionCompound,\n name='list-groups-chart',\n template='embed.pt',\n permission=Public\n)\ndef view_election_compound_list_groups_chart(self, request):\n\n \"\"\"\" View the list groups as bar chart. \"\"\"\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.last_modified)\n\n return {\n 'model': self,\n 'layout': ElectionCompoundLayout(self, request),\n 'type': 'list-groups-chart',\n }\n\n\n@ElectionDayApp.html(\n model=ElectionCompound,\n name='list-groups-table',\n template='embed.pt',\n permission=Public\n)\ndef view_election_compound_list_groups_table(self, request):\n\n \"\"\"\" View the list groups as table. \"\"\"\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.last_modified)\n\n return {\n 'election': self,\n 'groups': get_list_groups(self),\n 'layout': ElectionCompoundLayout(self, request),\n 'type': 'election-compound-table',\n 'scope': 'list-groups',\n }\n\n\n@ElectionDayApp.html(\n model=ElectionCompound,\n name='list-groups',\n template='election_compound/list_groups.pt',\n permission=Public\n)\ndef view_election_compound_list_groups(self, request):\n\n \"\"\"\" The main view. \"\"\"\n\n layout = ElectionCompoundLayout(self, request, 'list-groups')\n\n return {\n 'election_compound': self,\n 'layout': layout,\n 'groups': get_list_groups(self),\n }\n\n\n@ElectionDayApp.svg_file(model=ElectionCompound, name='list-groups-svg')\ndef view_election_compound_list_groups_svg(self, request):\n\n \"\"\" View the list groups as SVG. \"\"\"\n\n layout = ElectionCompoundLayout(self, request, 'list-groups')\n return {\n 'path': layout.svg_path,\n 'name': layout.svg_name\n }\n","repo_name":"OneGov/onegov-cloud","sub_path":"src/onegov/election_day/views/election_compound/list_groups.py","file_name":"list_groups.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"55"} +{"seq_id":"72472618412","text":"\n\n# a = range(10)\n# b = range(11,20)\n# c = []\n# for i in range(10):\n# c.append((i*2,i*3))\n# k = zip(a,b)\n# for i in k:\n# print(i)\n# for i in zip(*c):\n# print(list(i))\n#\nimport tensorflow as tf\n\ndef t1():\n with tf.Session() as sess:\n seql = range(5)\n seqv = [i*5 for i in range(5)]\n\n state_ta = tf.TensorArray(dtype=tf.float32,size=100)\n\n mat_data = [range(i,i+5) for i in range(4)]\n length = range(1,5)\n mt = tf.placeholder(dtype=tf.int32,shape=[4,5])\n oh = tf.one_hot(mt,10,axis=-1)\n i = tf.constant(0)\n tmp = tf.get_variable('k',shape=[5,2],dtype=tf.float32)\n\n ic = tf.constant(0,shape=[10,1])\n\n pc = tf.constant(1,shape=[10,10])\n pc = pc[:,1:]\n cc = tf.concat([pc,ic],axis=1)\n\n cr = sess.run(oh,feed_dict={mt:mat_data})\n print(cr)\n\n # def _encoder_evid(i,state_ta):\n # vec = tf.constant(1.0,shape=[10])\n # # vec = tf.pad(vec,[[0,6-evid_len[i]],[0,0]])\n # # vec = tf.reshape(vec,[6,2])\n # state_ta = state_ta.write(i,vec)\n # print('123')\n # i = tf.add(i,1)\n # return i,state_ta\n # loop, state_ta = tf.while_loop(lambda i,state_ta: i < 5, _encoder_evid,[i,state_ta])\n #\n # init = tf.global_variables_initializer()\n # sess.run(init)\n # with tf.control_dependencies([loop]):\n # ta_t = state_ta.stack()\n # i,r = sess.run([loop,ta_t],feed_dict={})\n # print(i)\n # print(r)\n\nt1()\n","repo_name":"chugare/GMM_topic","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"15604648424","text":"from flask import Flask, jsonify, request\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef pessoas(id):\n return jsonify({'id':id, 'nome':'Leandro', 'profissao':'Desenvolvedor'})\n\n\n# @app.route('/soma///')\n# def soma(valor1, valor2):\n# return jsonify({'soma': valor1 + valor2})\n \n\n@app.route('/soma', methods=['POST', 'PUT', 'GET'])\ndef soma():\n if request.method == 'POST':\n dados = json.loads(request.data)\n total = sum(dados['valores'])\n elif request.method == 'GET':\n total = 10 + 10\n return jsonify({'soma':total})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"leandropinheiroalves/dev_studies","sub_path":"flask/primeira_api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"41078824622","text":"import logging\nfrom nba_stats.models import Coach, CoachSeason\nfrom nba_stats.utils import extract_values, get_data_from_table_cell, make_season_int\nfrom nba_stats.constants import BBREF_COACH_SEASON_DATA_FIELDS\nlog = logging.getLogger('stats')\n\n\ndef create_coach_from_web(c_id, attr):\n vals = extract_values(attrib=attr)\n names = vals[1].split(' ')\n log.debug(vals)\n if vals[10] == '':\n log.debug(\"No playoff games\")\n coach = Coach(coach_id=c_id, first_name=names[0], last_name=names[1], display_first_last=vals[1],\n from_year=vals[2], to_year=vals[3], years=vals[4], reg_season_games=vals[5],\n reg_season_wins=vals[6], reg_season_losses=vals[7], reg_season_w_pct=vals[8], above_500=vals[9])\n else:\n if vals[14] == '' or vals[15] == '':\n if vals[13] == '':\n coach = Coach(coach_id=c_id, first_name=names[0], last_name=names[1], display_first_last=vals[1],\n from_year=vals[2], to_year=vals[3], years = vals[4], reg_season_games=vals[5],\n reg_season_wins=vals[6], reg_season_losses=vals[7], reg_season_w_pct=vals[8],\n above_500=vals[9])\n else:\n coach = Coach(coach_id=c_id, first_name=names[0], last_name=names[1], display_first_last=vals[1],\n from_year=vals[2], to_year=vals[3], years=vals[4], reg_season_games=vals[5],\n reg_season_wins=vals[6], reg_season_losses=vals[7], reg_season_w_pct=vals[8],\n above_500=vals[9], post_season_games=vals[10], post_season_wins=vals[11],\n post_season_losses=vals[12], post_season_w_pct=vals[13])\n else:\n if vals[13] == '':\n coach = Coach(coach_id=c_id, first_name=names[0], last_name=names[1], display_first_last=vals[1],\n from_year=vals[2], to_year=vals[3], years=vals[4], reg_season_games=vals[5],\n reg_season_wins=vals[6], reg_season_losses=vals[7], reg_season_w_pct=vals[8],\n above_500=vals[9], post_season_games=vals[10], post_season_wins=vals[11],\n post_season_losses=vals[12], conference_champs=vals[14], league_champs=vals[15])\n else:\n coach = Coach(coach_id=c_id, first_name=names[0], last_name=names[1], display_first_last=vals[1],\n from_year=vals[2], to_year=vals[3], years=vals[4], reg_season_games=vals[5],\n reg_season_wins=vals[6], reg_season_losses=vals[7], reg_season_w_pct=vals[8],\n above_500=vals[9], post_season_games=vals[10], post_season_wins=vals[11],\n post_season_losses=vals[12], post_season_w_pct=vals[13], conference_champs=vals[14],\n league_champs=vals[15])\n coach.save()\n\n\ndef extract_coach_data_from_bbref(table_row):\n coach_basic_info = table_row.find('td', {'data-stat': \"coach\"})\n # bbref splits up there tables w/ blank rows and headers in the middle for readability.\n # In that case, coach_basic_info will be None, so skip it.\n if coach_basic_info is None:\n return None\n\n coach_id = coach_basic_info.a.get('href')[9:-5]\n names = coach_basic_info.get_text().split()\n fname = names[0]\n lname = \" \".join(names[1:])\n year_min = get_data_from_table_cell(table_row, \"year_min\")\n year_max = get_data_from_table_cell(table_row, \"year_max\")\n years = get_data_from_table_cell(table_row, \"years\")\n\n coach_data = {'coach_id': coach_id, 'first_name': fname, 'last_name': lname,\n 'year_min': year_min, 'year_max': year_max, 'years': years}\n\n return coach_data\n\n\ndef extract_coach_stats_from_bbref(coach, table):\n log.debug(\"Creating CoachSeasons for \" + coach.display_first_last)\n coach_seasons = []\n rows = table.find('tbody').find_all('tr')\n\n for row in rows:\n # log.debug(row.prettify())\n try:\n season_id = make_season_int(row.find('th', {'data-stat': 'season'}).get_text())\n season_dict = {k: get_data_from_table_cell(row, k)\n for k in BBREF_COACH_SEASON_DATA_FIELDS}\n except AttributeError as e:\n # An attribute indicates a season in which the coach was an assistant,\n # So while there is a table row present, there are not stats for that season\n continue\n season_dict['season_id'] = season_id\n season_dict['coach'] = coach\n coach_season = CoachSeason(**season_dict)\n coach_seasons.append(coach_season)\n return coach_seasons\n\n\n\n","repo_name":"johngriebel/nbapex","sub_path":"nba_stats/helpers/coach.py","file_name":"coach.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"21397289047","text":"# Handling string\nInputStr = input(\"Enter a String : \")\nsplitstr = InputStr.split(\",\")\nsumlist = []\n\nlength = len(splitstr)\nprint(\" original string is :\", InputStr)\nprint(\" Split list is :\", splitstr)\nprint(\" Split list length is :\", length)\n\n\ndef findsum(listarg):\n flen = len(listarg)\n j = 0\n fsum = 0\n print(\"listarg :\", listarg)\n print(\"flen : \", flen)\n\n while j < flen:\n # print (listarg[j])\n # print (\" Value of J is \", j)\n\n fsum = fsum + int(listarg[j])\n j += 1\n\n # print (\"lstarg is : \",listarg, \" fsum is : \", fsum)\n return fsum\n\n\nj = 0\nwhile j < length:\n print(splitstr[j])\n # print (\" Value of J is \", j)\n # print(findsum(splitstr[j]))\n sumlist.insert(j, findsum(splitstr[j]))\n j = j + 1\n\nprint(\"sumlist is \", sumlist)\nsumlist.sort()\n# print (\"sorted list- sumlist is \",sumlist)\n\n\nsortedlist = sumlist.copy()\nprint(\"sorted list is :\", sortedlist, \" length of sorted sorted list : \", len(sortedlist))\n\nj = 0\nz = 0\nresult = 0\nfor j in range(len(sortedlist) - 1):\n\n # print (\"print value of j before the statement :\", j, \" z is \",z)\n if sortedlist[z] == sortedlist[z + 1]:\n result = result + 1\n print(\"compared the values :\", sortedlist[z], \" and \", sortedlist[z + 1], \" result is :\", result)\n z = z + 2\n # print (\" z when increamented by 2 :\" , z)\n\n else:\n z = z + 1\n if z >= len(sortedlist):\n break\n\nif result > 0:\n print(\" No of equal pairs is :\", result)\nelse:\n print(\"-1\")\n\n\n\n","repo_name":"shnkumar/Assignment","sub_path":"numberpairprogram.py","file_name":"numberpairprogram.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"11145478404","text":"import pandas as pd\r\nimport pickle\r\nimport math\r\nimport numpy as np\r\n#import filter_stTokens as fsT\r\n\r\nfrom collections import *\r\nfrom scipy import spatial\r\nfrom datetime import datetime\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nwith open(\"../data/pickles/pre_st_Tokens.p\", 'rb') as file:\r\n preTokens = pickle.load(file)\r\nwith open(\"../data/pickles/post_st_Tokens.p\", 'rb') as file:\r\n postTokens = pickle.load(file)\r\n\r\n# Opening the data, among other thing to have the number N of documents\r\ndf_all = pd.read_csv('../data/Data_Kaggle/df_all.csv', encoding=\"ISO-8859-1\")\r\nproduct_description = pd.read_csv('../data/Data_Kaggle/product_description_spelled.csv', encoding=\"ISO-8859-1\")\r\nattributes = pd.read_csv('../data/Data_Kaggle/attributes.csv', encoding=\"ISO-8859-1\")\r\n\r\n# Initializing N_description and N_attribute\r\nN_description = len(product_description.product_description)\r\nN_attribute = len(attributes.value)\r\n\r\n# Initializing full search term and pre-processed and post-processed search terms\r\nprint(\"========================================\")\r\nprint(\"Computing the three types of queries\")\r\ncFsT = df_all['search_term']\r\nprodDesc = df_all['product_description']\r\nprodTitle = df_all['product_title']\r\nprodAttr = df_all['name'] #attributes\r\nrelScore = df_all['relevance']\r\n\r\n# Removing NaN item in Attributes\r\nprodAttr = prodAttr.fillna('aaaaaaaaa')\r\n\r\n# Compute pre and post tokens\r\npreTokens = preTokens[0].astype('str')\r\npostTokens = postTokens[0].astype('str')\r\n\r\nspPreTokens = preTokens.str.split()\r\npreTokList = spPreTokens.values.tolist()\r\n\r\nspPostTokens = postTokens.str.split()\r\npostTokList = spPostTokens.values.tolist()\r\n\r\n# Split full search term into tokens\r\nspcFsT = cFsT.str.split()\r\nsTList = spcFsT.values.tolist()\r\n\r\nprint(\"Queries computation done\")\r\n\r\n#####################################################################################\r\ndef calcSTProb(dataCol, search_term, prodFeat):\r\n all_features_list=[]\r\n\r\n for idx, row_data in enumerate(dataCol):\r\n count = 0\r\n dict = {}\r\n for word in search_term[idx]:\r\n if (row_data.lower().find(word)!= -1):\r\n count += 1\r\n feature= count / len(search_term[idx])\r\n dict[\"search_term_words_probability\"]= round(feature,2)\r\n dict[\"Product_length_feature\"]= len(prodFeat[idx])\r\n all_features_list.append(dict) \r\n\r\n return pd.DataFrame(all_features_list)\r\n\r\n#####################################################################################\r\ndef calcAllSTProb(prodFeat, featName, FsT, preST, postST):\r\n all_features_list=[]\r\n\r\n for idx, row_data in enumerate(prodFeat):\r\n count1 = 0\r\n count2 = 0\r\n count3 = 0\r\n dict = {}\r\n\r\n for word1 in postST[idx]:\r\n if (row_data.lower().find(word1)!= -1):\r\n count1 += 1\r\n feature1 = count1 / len(postST[idx])\r\n\r\n for word2 in preST[idx]:\r\n if (row_data.lower().find(word2)!= -1):\r\n count2 += 1\r\n feature2 = count2 / len(preST[idx])\r\n\r\n for word3 in FsT[idx]:\r\n if (row_data.lower().find(word3)!= -1):\r\n count3 += 1\r\n feature3 = count3 / len(FsT[idx])\r\n\r\n dict[featName + '_postST_prob']= round(feature1, 2)\r\n dict[featName + '_preST_prob']= round(feature2, 2)\r\n dict[featName + '_FsT_prob']= round(feature3, 2)\r\n dict[featName + '_length_feat']= len(prodFeat[idx])\r\n all_features_list.append(dict)\r\n\r\n return pd.DataFrame(all_features_list)\r\n\r\n#####################################################################################\r\n# Real Running Block of Codes\r\nprint(\"After this starts the computation of features\")\r\nbeginTime = datetime.now().time()\r\n\r\nPTList = calcAllSTProb(prodTitle, 'prodTitle', sTList, preTokList, postTokList)\r\nPDList = calcAllSTProb(prodDesc, 'prodDesc', sTList, preTokList, postTokList)\r\nPAList = calcAllSTProb(prodAttr, 'prodAttr', sTList, preTokList, postTokList)\r\n\r\nendTime = datetime.now().time()\r\n\r\nprint(PTList[1:10])\r\n\r\nallFeat = [PTList, PDList, PAList]\r\nallFeat = pd.concat(allFeat, axis=1)\r\n\r\nprint(\"=====================================================\")\r\npickle.dump(allFeat, open(\"../data/pickles/kamiAllFeat\" + \".p\", \"wb\"))\r\nallFeat.to_csv(\"../data/features/kamiAllFeat.csv\", index = False)\r\nprint(allFeat)\r\n\r\nprint(\"Done!\")\r\nprint(\"It began at :\", beginTime)\r\nprint(\"It ends at :\", endTime)\r\n\r\n","repo_name":"ulince/IRDM17","sub_path":"features/kamiCombFeatures.py","file_name":"kamiCombFeatures.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"30636731514","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import svm\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn.covariance import EllipticEnvelope\n\n### Setup ###\ndata = pd.read_csv('creditcard.csv')\ndata = data.sample(frac = 1, random_state = 1)\n\"\"\"\ndata.hist(figsize = (15, 15))\nplt.show()\n\"\"\"\nprint(\"Data Shape\")\nprint(data.shape)\nprint()\n\n# Determine fraud cases\nfraud = data[data['Class'] == 1]\nvalid = data[data['Class'] == 0]\noutlier_frac = len(fraud)/float(len(valid))\nprint(\"Outlier Fraction Percentage\")\nprint(outlier_frac*100) # Approx % fraud cases\nprint()\n\n# Create correlation matrix\ncorrmat = data.corr()\nfig = plt.figure(figsize = (12, 9))\nsns.heatmap(corrmat, vmax = 0.8, square = True)\nplt.show()\n\n# Get all columns from dataframe\ncolumns = data.columns.tolist()\n\n# Filter columns to remove data unwanted\ncolumns = [c for c in columns if c not in ['Class']]\n\n# Store variable predicted\ntarget = 'Class'\nX = data[columns]\nY = data[target]\n\n\n### Anomaly Detection ###\nstate = 1 # define random state\nclassifiers = { # define outlier detection method\n \"Isolation Forest\": IsolationForest(max_samples=len(X), contamination=outlier_frac, random_state=state),\n \"Local Outlier Factor\": LocalOutlierFactor(n_neighbors=20, contamination=outlier_frac)\n}\n\n\n# Begin Fitting Model #\nn_outliers = len(fraud)\n\nfor i, (clf_name, clf) in enumerate(classifiers.items()):\n if (clf_name == \"Local Outlier Factor\"):\n y_predict = clf.fit_predict(X)\n scores_predict = clf.negative_outlier_factor_\n else:\n y_predict = clf.fit(X).predict(X)\n\n # Reshape predictions 0 -> valid, 1 -> fraud\n y_predict[y_predict == 1] = 0\n y_predict[y_predict == -1] = 1\n n_errors = (y_predict != Y).sum()\n\n # Run Classification metrics\n print('{}: {}'.format(clf_name, n_errors))\n print(accuracy_score(Y, y_predict))\n print(classification_report(Y, y_predict))\n","repo_name":"michael1ding/Credit-Fraud","sub_path":"Card Fraud Detection.py","file_name":"Card Fraud Detection.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"73187789291","text":"from django.urls import path,include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import BotUserViewset,Change,ImageViewset\nrouter = DefaultRouter()\nrouter.register('botuser',BotUserViewset)\nrouter.register('image',ImageViewset)\nurlpatterns = [\n path('',include(router.urls)),\n path('change///',Change.as_view())\n]","repo_name":"asliddinovbehzodjon/Django-Admin-Panel","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"1278555650","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 16 10:10:21 2023\n\n@author: runep\n\"\"\"\nimport pytest\nimport numpy as np\nimport DGEMM\n\n@pytest.fixture(scope = 'session')\ndef get_fixed_dgemm_data():\n c = 0\n n = 3\n test_matrix = np.zeros([3,n,n])\n\n for i in range(3):\n for j in range(n):\n for k in range(n):\n test_matrix[i,j,k] = c\n c += 1\n expected_matrix = np.array( \\\n [[60, 64, 68], [171, 184, 197], [282, 304, 326]], dtype=float)\n return test_matrix, expected_matrix\n\n@pytest.fixture(scope = 'session')\ndef get_random_dgemm_data():\n n = 10\n test_matrix = DGEMM.generate_numpy(n)\n expected_matrix = \\\n test_matrix[2] + np.matmul(test_matrix[0], test_matrix[1])\n return test_matrix, expected_matrix\n\n@pytest.fixture(autouse = True)\ndef setup_and_teardown():\n print('\\nFetching data from db')\n yield\n print('\\nSaving test run data in db')\n\ndef test_fixed_dgemm(get_fixed_dgemm_data):\n input_values, expected_values = get_fixed_dgemm_data\n output_values, time = DGEMM.calc_DGEMM(input_values)\n assert np.allclose(expected_values, output_values)\n assert np.all(np.equal(expected_values, output_values))\n\ndef test_random_dgemm_close(get_random_dgemm_data):\n input_values, expected_values = get_random_dgemm_data\n output_values, time = DGEMM.calc_DGEMM(input_values)\n assert np.allclose(expected_values, output_values, atol=1e-14)\n\ndef test_random_dgemm_equal(get_random_dgemm_data):\n input_values, expected_values = get_random_dgemm_data\n output_values, time = DGEMM.calc_DGEMM(input_values)\n assert np.all(np.equal(expected_values, output_values))\n","repo_name":"s185177/HPC_23","sub_path":"Module_2/test_DGEMM.py","file_name":"test_DGEMM.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"7057463822","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport cv2\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef img2sketch(photo, k_size):\n #Read Image\n photo= r\"D:\\C DATA\\house.jpg\"\n image=cv2.imread(photo)\n cv2.imshow(\"original image\",image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n #Convert BGR image to RGB\n RGB_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n \n \n \n \n # Convert to Grey Image\n grey_image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Invert Image\n invert_image=cv2.bitwise_not(grey_image)\n #invert_img=255-grey_img\n\n # Blur image\n blur_image=cv2.GaussianBlur(invert_image, (k_size,k_size),0)\n\n # Invert Blurred Image\n invblur_image=cv2.bitwise_not(blur_image)\n #invblur_img=255-blur_img\n\n # Sketch Image\n sketch_image=cv2.divide(grey_image,invblur_image, scale=256.0)\n\n # Save Sketch \n cv2.imwrite('sketch.png', sketch_image)\n\n # Display sketch\n cv2.imshow('sketch image',sketch_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n plt.figure(figsize=(14,8))\n plt.subplot(1,2,1)\n plt.title('Original image', size=18)\n plt.imshow(RGB_image)\n plt.axis('off')\n plt.subplot(1,2,2)\n plt.title('Sketch', size=18)\n rgb_sketch=cv2.cvtColor(sketch_image, cv2.COLOR_BGR2RGB)\n plt.imshow(rgb_sketch)\n plt.axis('off')\n plt.show()\n\n \n#Function call\nimg2sketch(photo='image.png', k_size=7)\n\n \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"bhadurishayoun/Pencil-Sketch-Using-Python","sub_path":"Image to Pencil Sketch Model.py","file_name":"Image to Pencil Sketch Model.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"1006430000","text":"import os\n\nenemy = {\n \"A\": 1, # Rock\n \"B\": 2, # Paper\n \"C\": 3 # Scissors\n}\n\nplayer = {\n \"X\": 1, # Rock\n \"Y\": 2, # Paper\n \"Z\": 3 # Scissors\n}\n\ntotal = 0\n\nfile = open(os.path.dirname(os.path.abspath(__file__)) + \"/data.txt\", \"r\")\n\nfor line in file:\n letters = line.strip().split(' ')\n\n total += player[letters[1]]\n\n if (enemy[letters[0]] == player[letters[1]]):\n total += 3\n elif ((letters[1] == 'X' and letters[0] == 'C') or (letters[1] == 'Y' and letters[0] == 'A') or (letters[1] == 'Z' and letters[0] == 'B')):\n total += 6 \n\nfile.close()\n\nprint(total)\n","repo_name":"Zaland/advent-of-code-2022","sub_path":"day_2/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"14913025087","text":"\nimport os\nimport shutil\nimport json\nimport random\nimport logging\n\nimport openai\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nopenai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\n\n# ---------------------------------------------------------------------------- #\n# Logger #\n# ---------------------------------------------------------------------------- #\nlogger = logging.getLogger(\"tweet\")\n\n# ---------------------------------------------------------------------------- #\n# Variables #\n# ---------------------------------------------------------------------------- #\nIDEA_BANK = \"idea_bank\"\nUSED_IDEAS = \"used_ideas\"\nMODEL_NAME = \"gpt-4\"\n\n# ---------------------------------------------------------------------------- #\n# Functions #\n# ---------------------------------------------------------------------------- #\n\n\ndef fetch_idea():\n '''\n Retrieve an idea from the idea bank if available.\n '''\n ideas = os.listdir(IDEA_BANK)\n if not ideas:\n return None\n\n idea = random.choice(ideas)\n with open(f\"./{IDEA_BANK}/{idea}\", \"r\", encoding=\"utf-8\") as idea_file:\n idea_content = idea_file.read()\n\n # Move the used idea to a separate folder\n os.makedirs(USED_IDEAS, exist_ok=True)\n shutil.move(f\"{IDEA_BANK}/{idea}\", f\"{USED_IDEAS}/{idea}\")\n\n return idea_content\n\n\ndef generate_concept():\n '''\n Generate the concept for the tweet.\n '''\n with open(\"./src/prompts/concept_system.txt\", \"r\", encoding=\"utf-8\") as prompt_file:\n system_prompt = prompt_file.read()\n\n initial_idea = fetch_idea()\n user_content = \"What should I tweet about?\" if initial_idea is None else f\"I have an idea for a tweet. {initial_idea}\"\n\n conversation_history = [\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_content}\n ]\n\n concept_draft = openai.ChatCompletion.create(\n model=MODEL_NAME,\n messages=conversation_history,\n temperature=1.2\n ).choices[0].message\n\n conversation_history.append(concept_draft)\n\n # Check if the concept has been used recently\n if os.path.exists(\"tweet_history.json\"):\n with open(\"tweet_history.json\", \"r\", encoding=\"utf-8\") as tweet_history_file:\n tweet_history = json.load(tweet_history_file)\n\n last_20_tweets = tweet_history[-20:]\n\n conversation_history.append({\n \"role\": \"user\",\n \"content\": f\"\"\"\n Is the proposed topic similar to any of the following tweets?\n {last_20_tweets}\n Please reply with \"yes\" or \"no\" only.\n \"\"\"\n })\n\n review_result = openai.ChatCompletion.create(\n model=MODEL_NAME,\n messages=conversation_history\n ).choices[0].message\n\n conversation_history.append(review_result)\n\n if \"yes\" in review_result.content.lower():\n conversation_history.append({\n \"role\": \"user\",\n \"content\": \"Provide a revised topic for the tweet that is not too similar to the most recent tweets.\",\n })\n final_concept = openai.ChatCompletion.create(\n model=MODEL_NAME,\n messages=conversation_history,\n presence_penalty=0.5\n ).choices[0].message\n else:\n final_concept = concept_draft\n else:\n final_concept = concept_draft\n\n logger.debug(\"Conversation history: %s\", conversation_history)\n\n return final_concept.content.strip(), conversation_history\n\n\nif __name__ == \"__main__\":\n concept, message_history = generate_concept()\n print(concept)\n print(json.dumps(message_history, indent=4, sort_keys=True))\n","repo_name":"justinmerrell/Twitter-AutoPost","sub_path":"src/tweet_builder/tweet_concept.py","file_name":"tweet_concept.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"55"} +{"seq_id":"327603120","text":"import re\n\n\nclass Option:\n @staticmethod\n def create(name, value=None, description=None, required=False, pattern=None):\n ALL_OPTIONS = {\n \"rhost\": RHOST,\n \"lhost\": LHOST,\n \"timeout\": Timeout,\n \"rport\": RPORT,\n \"rports\": RPORTS,\n \"verbose\": Verbose,\n \"iface\": Iface,\n \"file\": File,\n \"uri\": URI,\n \"channel\": Channel,\n \"mac\": MAC,\n \"bssid\": MAC,\n \"bmac\": MAC,\n \"apishodan\": SHODAN\n } \n cl = ALL_OPTIONS.get(name, None)\n if cl:\n if description:\n return cl(value, required, description)\n else:\n return cl(value, required)\n else:\n return GenericOption(name, value, required, description, pattern)\n\n# Generic Option Class\nclass GenericOption:\n def __init__(self, key=None, value=None, required=False, description=None, match_pattern=None):\n self.key = key\n self.value = value\n self.required = required\n self.description = description\n self.match_pattern = match_pattern\n \n def _check_pattern(self, v):\n if self.match_pattern is None or v is None:\n return True\n success = False\n if v:\n m = re.match(self.match_pattern, str(v))\n if m:\n success = True\n return success\n \n def set_value(self, v):\n success = self._check_pattern(v)\n if self._check_pattern(v):\n self.value = v\n return success\n \n def get_option(self):\n return {self.key:[self.value, self.description, self.required]}\n\n# Specific Options Classes\n\nclass RHOST(GenericOption):\n def __init__(self, value=None, required=False, description=\"Remote host IP\", \n match_pattern=r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$|^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\\\\\d+$\"):\n key=\"rhost\"\n super(RHOST, self).__init__(key, value, required, description, match_pattern)\n\nclass LHOST(GenericOption):\n def __init__(self, value=None, required=False, description=\"Local host IP\", \n match_pattern=r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$|^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\\\\\d+$\"):\n key=\"rhost\"\n super(LHOST, self).__init__(key, value, required, description, match_pattern)\n\nclass RPORT(GenericOption):\n def __init__(self, value=None, required=False, \n description=\"Remote port (Example: 80)\", match_pattern=r\"^\\d+$\"):\n key=\"rport\"\n super(RPORT, self).__init__(key, value, required, description, match_pattern)\n \nclass LPORT(GenericOption):\n def __init__(self, value=None, required=False, \n description=\"Local port (Example: 8080)\", match_pattern=r\"^\\d+$\"):\n key=\"lport\"\n \n super(LPORT, self).__init__(key, value, required, description, match_pattern)\n\nclass RPORTS(GenericOption):\n def __init__(self, value=None, required=False, \n description=\"Remote ports (Example: 100-500)\", match_pattern=r\"^\\d+-\\d+$\"):\n key=\"rports\"\n super(RPORTS, self).__init__(key, value, required, description, match_pattern)\n\nclass Timeout(GenericOption):\n def __init__(self, value=None, required=False, \n description=\"Timeout to wait for search responses. (In seconds)\", match_pattern=r\"^\\d+$\"):\n key=\"timeout\"\n super(Timeout, self).__init__(key, value, required, description, match_pattern)\n\nclass Verbose(GenericOption):\n def __init__(self, value=False, required=False, description=\"Show extra info while running module\",\n match_pattern = r\"^True|False|true|false|TRUE|FALSE$\"):\n key=\"verbose\"\n super(Verbose, self).__init__(key, value, required, description, match_pattern)\n\nclass Iface(GenericOption):\n def __init__(self, value=None, required=False, description=\"Network/Bluetooth interface\", match_pattern=None): \n key=\"iface\"\n super(Iface, self).__init__(key, value, required, description, match_pattern)\n\nclass File(GenericOption):\n def __init__(self, value=None, required=False, description=\"File to dump or read the data\", match_pattern=None):\n key=\"file\"\n super(File, self).__init__(key, value, required, description, match_pattern)\n\nclass URI(GenericOption):\n def __init__(self, value=None, required=False, description=\"URI\", \n match_pattern = r\"^http://|^https://|rtsp://|ftp://\"):\n key=\"uri\"\n super(URI, self).__init__(key, value, required, description, match_pattern)\n\nclass Channel(GenericOption):\n def __init__(self,value=None, required=False, description=\"Network channel. Configure this option if you want to fix it and not 'make jumps'\", match_pattern=r\"^\\d{1,2}$\"):\n key=\"channel\"\n super(Channel, self).__init__(key, value, required, description, match_pattern)\n\nclass MAC(GenericOption):\n def __init__(self, value=None, required=False, description=\"Mac address\", match_pattern=r\"^(?:[0-9a-fA-F]:?){12}$\"):\n key=\"mac\"\n super(MAC, self).__init__(key, value, required, description, match_pattern)\n\nclass SHODAN(GenericOption):\n def __init__(self, value=None, required=False, description=\"Shodan API Key\"):\n key=\"shodan\"\n super(SHODAN, self).__init__(key, value, required, description)","repo_name":"Telefonica/HomePWN","sub_path":"utildata/dataset_options.py","file_name":"dataset_options.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":796,"dataset":"github-code","pt":"55"} +{"seq_id":"33644877816","text":"#Escreva um programa que leia a velocidade de um carro.\n#Se ele ultrapassar 80 km/h mostre uma mensagem dizendo que ele recebeu uma multa\n#A multa vai custar 7 reais por cada km acima do limite.\n\ncarro = int(input('Qual foi a velocidade do carro?'))\nmulta = (carro - 80) * 7\nif carro > 80:\n print('Informamos que o senhor recebeu uma multa no valor de {}'.format(multa))\nelse:\n print('Tudo segue dentro dos conformes.')\n","repo_name":"JuanLadeira/Meu_Aprendizado_de_Phyton","sub_path":"Exercicios/29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"4052432539","text":"import pickle\nimport pandas as pd\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.preprocessing.sequence import pad_sequences\nmodel = load_model('my_model.h5')\nwith open('tokenizer.pickle', 'rb') as handle:\n t = pickle.load(handle)\ndf = pd.read_csv(\"final.csv\")\ndf1=df[(df['Abusive']==1) & (df['Hate']==1)]\nX_test=df1['Text']\ny_test=df1['label']\nencoded_docs_test = t.texts_to_sequences(X_test)\npadded_docs_test = pad_sequences(encoded_docs_test, maxlen=25, padding='post')\nans = model.predict([padded_docs_test])\nco=0\ntot=0\nfor i in range(len(ans)):\n tot+=1\n if np.argmax(ans[i]) == 0 or np.argmax(ans[i]) == 1:\n co+=1\nprint(co)\nprint(tot)\nacc = co / tot\nprint(acc)\n","repo_name":"Pk13055/transcript-based-classification","sub_path":"src/scripts/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"1329633496","text":"import os\nfrom flask import Flask, render_template, redirect, request, url_for, request\nfrom flask_pymongo import PyMongo\nfrom flask_wtf import FlaskForm\nfrom wtforms.validators import DataRequired\nfrom bson.objectid import ObjectId\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = 'hungryHerbivore'\napp.config['MONGO_URI'] = os.environ.get('MONGO_URL')\n\nmongo = PyMongo(app)\n\n\n@app.route('/')\n@app.route('/get_recipes', methods=['POST', 'GET'])\ndef get_recipes():\n filters= {}\n difficulty = request.args.get('difficulty')\n if (difficulty):\n filters[\"difficulty\"] = difficulty\n recipes = mongo.db.recipes.find(filter=filters)\n return render_template(\"recipes.html\", recipes=recipes)\n \n \n@app.route('/add_recipe')\ndef add_recipe():\n return render_template('addrecipe.html', categories=mongo.db.categories.find())\n \n@app.route('/submit_recipe', methods=['POST'])\ndef submit_recipe():\n recipes=mongo.db.recipes\n recipes.insert_one(request.form.to_dict())\n return redirect(url_for('get_recipes'))\n \n@app.route('/modify_recipe/')\ndef modify_recipe(recipe_id):\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n all_categories = mongo.db.categories.find()\n return render_template('modifyrecipe.html', recipe=the_recipe)\n \n@app.route('/delete_recipe/')\ndef delete_recipe(recipe_id):\n mongo.db.recipes.remove({'_id': ObjectId(recipe_id)})\n return redirect(url_for('get_recipes'))\n \n@app.route('/update_recipe/', methods=[\"POST\"])\ndef update_recipe(recipe_id):\n recipes = mongo.db.recipes\n recipes.update( {'_id': ObjectId(recipe_id)},\n {\n 'meal_name':request.form.get('meal_name'),\n 'difficulty':request.form.get('difficulty'),\n 'meal_time':request.form.get('meal_time'),\n 'meal_ingredients':request.form.get('meal_ingredients'),\n 'meal_requirements':request.form.get('meal_requirements'),\n 'meal_method':request.form.get('meal_method'),\n 'meal_comment':request.form.get('meal_comment'),\n })\n return redirect(url_for('get_recipes'))\n \n@app.route('/view_recipe/')\ndef view_recipe(recipe_id):\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n return render_template('viewrecipe.html', recipe=the_recipe)\n \nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'),\n port=int(os.environ.get('PORT')),\n debug=True)\n ","repo_name":"tgarratt/HungryHerbivore-Milestone3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"11601995554","text":"import os.path as op\n\nimport mne\nfrom autoreject import get_rejection_threshold\n\n# from ..config_drago import path_maxfilter_info\npath_maxfilter_info = '/storage/local/camcan/maxfilter'\n\n# epoching params\nduration = 30. # length of epochs (in s)\noverlap = 8. # shift of overlapping epochs (in s)\nn_fft = 8192 # length of hamming windows (in #samples)\nn_overlap = 4096 # overlap of overlapping hamm win (in #samples)\n\n# spectra params\nfmin = 0. # min freq of spectra\nfmax = 150. # max freq of spectra\nfbands = [(0.1, 1.5), # low\n (1.5, 4.0), # delta\n (4.0, 8.0), # theta\n (8.0, 15.0), # alpha\n (15.0, 26.0), # beta_low\n (26.0, 35.0), # beta_high\n (35.0, 50.0), # gamma_low\n (50.0, 74.0), # gamma_mid\n (76.0, 120.0)] # gamma_high\n\n\ndef get_subject(file_raw):\n return file_raw.split('/')[-3]\n\n\n# cleaning functions\ndef _get_global_reject_epochs(raw):\n duration = 3.\n events = mne.make_fixed_length_events(\n raw, id=3000, start=0, duration=duration)\n\n epochs = mne.Epochs(\n raw, events, event_id=3000, tmin=0, tmax=duration, proj=False,\n baseline=None, reject=None)\n epochs.load_data()\n epochs.pick_types(meg=True)\n reject = get_rejection_threshold(epochs, decim=1)\n return reject\n\n\ndef _get_global_reject_ssp(raw):\n if 'eog' in raw:\n eog_epochs = mne.preprocessing.create_eog_epochs(raw)\n else:\n eog_epochs = []\n if len(eog_epochs) >= 5:\n reject_eog = get_rejection_threshold(eog_epochs, decim=8)\n del reject_eog['eog'] # we don't want to reject eog based on eog\n else:\n reject_eog = None\n\n ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)\n # we will always have an ECG as long as there are magnetometers\n if len(ecg_epochs) >= 5:\n reject_ecg = get_rejection_threshold(ecg_epochs, decim=8)\n # here we want the eog\n else:\n reject_ecg = None\n\n if reject_eog is None and reject_ecg is not None:\n reject_eog = {k: v for k, v in reject_ecg.items() if k != 'eog'}\n return reject_eog, reject_ecg\n\n\ndef _compute_add_ssp_exg(raw):\n reject_eog, reject_ecg = _get_global_reject_ssp(raw)\n if 'eog' in raw:\n proj_eog, _ = mne.preprocessing.compute_proj_eog(\n raw, average=True, reject=reject_eog, n_mag=1, n_grad=1, n_eeg=1)\n else:\n proj_eog = None\n if proj_eog is not None:\n raw.add_proj(proj_eog)\n\n proj_ecg, _ = mne.preprocessing.compute_proj_ecg(\n raw, average=True, reject=reject_ecg, n_mag=1, n_grad=1, n_eeg=1)\n if proj_ecg is not None:\n raw.add_proj(proj_ecg)\n\n\ndef parse_bad_channels(sss_log):\n \"\"\"Parse bad channels from sss_log.\"\"\"\n with open(sss_log) as fid:\n bad_lines = {l for l in fid.readlines() if 'Static bad' in l}\n bad_channels = list()\n for line in bad_lines:\n chans = line.split(':')[1].strip(' \\n').split(' ')\n for cc in chans:\n ch_name = 'MEG%01d' % int(cc)\n if ch_name not in bad_channels:\n bad_channels.append(ch_name)\n return bad_channels\n\n\ndef _parse_bads(subject, kind):\n sss_log = op.join(\n path_maxfilter_info, subject,\n kind, \"mf2pt2_{kind}_raw.log\".format(kind=kind))\n\n try:\n bads = parse_bad_channels(sss_log)\n except Exception as err:\n print(err)\n bads = []\n # first 100 channels ommit the 0.\n bads = [''.join(['MEG', '0', bb.split('MEG')[-1]])\n if len(bb) < 7 else bb for bb in bads]\n return bads\n\n\ndef _run_maxfilter(raw, subject, kind):\n bads = _parse_bads(subject, kind)\n raw.info['bads'] = bads\n cal = op.join(path_maxfilter_info, 'sss_params', 'sss_cal.dat')\n ctc = op.join(path_maxfilter_info, 'sss_params', 'ct_sparse.fif')\n raw = mne.preprocessing.maxwell_filter(\n raw, calibration=cal,\n cross_talk=ctc,\n st_duration=10.,\n st_correlation=.98,\n destination=None,\n coord_frame='head')\n return raw\n\n\ndef clean_raw(raw, subject):\n mne.channels.fix_mag_coil_types(raw.info)\n raw = _run_maxfilter(raw, subject, 'rest')\n _compute_add_ssp_exg(raw)\n reject = _get_global_reject_epochs(raw)\n return raw, reject\n\n\ndef minclean_raw(raw, subject):\n mne.channels.fix_mag_coil_types(raw.info)\n # bads = _parse_bads(subject, 'rest')\n # raw.info['bads'] = bads\n raw.add_proj([], remove_existing=True)\n # raw.interpolate_bads(reset_bads=False)\n reject = None\n return raw, reject\n","repo_name":"DavidSabbagh/NeurIPS19_manifold-regression-meeg","sub_path":"library/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"55"} +{"seq_id":"10481274361","text":"import sys\n\ndef main():\n n = (int)(sys.stdin.readline())\n scrambledParts = sys.stdin.readline().rstrip().split(\" \")\n answer = []\n for i in range (n):\n scrambledDecimal = (int)(scrambledParts[i])\n scrambledBinary = decToBinary(scrambledDecimal)\n unscrambledBinary = getUnscrambled(scrambledBinary, False)\n if(unscrambledBinary[8] == '1'):\n unscrambledBinary = getUnscrambled(scrambledBinary, True)\n answer.append(binToDec(unscrambledBinary[:-1]))\n print(*(intList(answer)))\n\ndef intList(list):\n for i in range(len(list)):\n list[i] = (int)(list[i])\n return list\n\ndef binToDec(binary):\n decimal = 0\n value = 128\n for i in range(8):\n if (binary[i] == '1'):\n decimal += value\n value /= 2\n return decimal\n\ndef getUnscrambled(binary, type):\n x = ''\n xl = ''\n if type == False:\n if (binary[0] == '0'):\n x += '00'\n xl += '0'\n else:\n x += '10'\n xl += '0'\n else:\n if (binary[0] == '0'):\n x += '11'\n xl += '1'\n else:\n x += '01'\n xl += '1'\n for i in range (7):\n if(binary[i+1] == '0'):\n x += x[i+1]\n xl += x[i+1]\n else:\n inverse = opposite(x[i+1])\n x += inverse\n xl += inverse\n return x\n\ndef opposite(c):\n if c == '0':\n return '1'\n return '0'\n\ndef decToBinary(decimal):\n binary = ''\n value = 128\n for i in range(8):\n if (decimal - value) >= 0:\n binary += '1'\n decimal -= value\n else:\n binary += '0'\n value /= 2\n return binary\n\nif __name__ == '__main__':\n main()","repo_name":"ShaunGreer/myKattisSolutions","sub_path":"Jumbled Communication/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"13102977891","text":"#!/usr/bin/env python\n\"\"\"A Simple TCP Client.\"\"\"\nimport socket\n\ntarget_host = \"www.google.com\"\ntarget_port = 80\n\n\ndef main(target_host: str, target_port: int = 80):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n client.connect((target_host, target_port))\n\n client.send(b\"GET / HTTP/1.1\\r\\nHost: google.com\\r\\n\\r\\n\")\n\n response = client.recv(4096)\n\n print(response.decode(\"utf8\"))\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\n \"host\", action=\"store\", default=\"www.google.com\", help=\"host to connect to.\"\n )\n parser.add_argument(\n \"port\",\n action=\"store\",\n default=80,\n type=int,\n help=\"port of the host to connect to.\",\n )\n args = parser.parse_args()\n host, port = args.host, args.port\n main(host, port)\n","repo_name":"TralahM/blackhat-python","sub_path":"network/tcpclient.py","file_name":"tcpclient.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"55"} +{"seq_id":"28433059237","text":"# BASIC IMAGE MANIPULATION:\nimport numpy as np\nimport cv2\n\n# PIXEL MANIPULATION/FILTERING\n\nfrank = cv2.imread(\"frank.jpg\", 1)\n\ngray = cv2.cvtColor(frank, cv2.COLOR_RGB2GRAY)\ncv2.imwrite(\"gray.jpg\", gray)\t\t\t\t\t\t\t\t# saves image (new) as grayscale\n\nfb = frank[:, :, 0]\nfg = frank[:, :, 1]\nfr = frank[:, :, 2]\n\nrgba = cv2.merge((fb, fg, fr, fg))\t\t\t\t\t\t\t# add an alpha channel (transparency) -- g means low/non green values are transparent\ncv2.imwrite(\"rgba.png\", rgba)\n","repo_name":"kaylaneal/learning-tf","sub_path":"opencv/basicOps-imgManip/pixelManip.py","file_name":"pixelManip.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"23456951982","text":"import webview\nimport threading\nfrom sys import argv\n\ndef principal(nombre):\n with open(nombre) as archivo:\n html = archivo.read()\n webview.load_html(html)\n\nif __name__ == '__main__':\n t = threading.Thread(target=principal, args=[argv[1]])\n t.start()\n\n webview.create_window(\"Resultados\", width=500, height=500, resizable=True)","repo_name":"ggzor/Integrales","sub_path":"tabla.py","file_name":"tabla.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"39970025452","text":"\n\n\nfrom sympy import pprint,symbols,Symbol\nimport sys\n\ndef solve_quad(a,b,c):\n D = (b*b -4*a*c)**0.5\n x_1 = (-b-D)/(2*a)\n x_2 = (-b+D)/(2*a)\n \n return {'x1':x_1,'x2':x_2}\n\n\ndef print_quad(a,b,c):\n x = Symbol('x')\n #xa,xb,xc = symbols(a,b,c)\n za,zb,zc = symbols('a,b,c')\n xa = Symbol(str(a))\n xb = Symbol(str(b))\n xc = Symbol(str(c))\n pm = Symbol('±')\n m = Symbol('-')\n #print original\n #original = \"{a}x**2+{b}x+{c}\".format(a=a,b=b,c=c)\n original = a*x**2+b*x+c\n pprint(original)\n \n #print half working\n #working = \"(-{b}+-({b}**2-4{a}{c})**1/2)/2{a}\".format(a=a,b=b,c=c)\n #working = \"(-{b}+-({b}**2-4{a}{c})**1/2)/2{a}\".format(a=a,b=b,c=c)\n \n working = (-zb*pm*(zb*zb -4*za*zc)**0.5)/(2*za)\n working1 = (-xb + (xb*xb -4*xa*xc)**0.5)/(2*xa)\n working2 = (-xb - (xb*xb -4*xa*xc)**0.5)/(2*xa)\n #working2 = (-b-(b**2-4*a*c)**1/2)/2*a\n pprint(working)\n pprint(working1)\n pprint(working2)\n #print(pprint(working2))\n \n #print final\n final = solve_quad(a,b,c)\n x_1 = final['x1']\n x_2 = final['x2']\n print(\"x1 = {}\".format(x_1))\n print(\"x2 = {}\".format(x_2))\n\nif __name__ == '__main__':\n #print(\"count {}\".format(sys.argv))\n if len(sys.argv) != 4:\n print(\"usage: {} \".format(sys.argv[0]))\n else:\n #print(\"a{},b{},c{}\".format(sys.argv[1],sys.argv[2],sys.argv[3]))\n print_quad(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))\n #print(solve_quad(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])))\n","repo_name":"JustinAClarke/TI-Nspire-Quadratic-Equ-ARCHIVED","sub_path":"quad.py","file_name":"quad.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"5167959459","text":"# --------------\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n# Code starts here\ndf = pd.read_csv(path)\nprint(df.iloc[:,0:5])\nprint(df.info())\ncols = ['INCOME', 'HOME_VAL', 'BLUEBOOK', 'OLDCLAIM', 'CLM_AMT']\nfor col in cols:\n df[col] = df[col].str.replace(r'[^\\d.]+', '')\n\nX = df.iloc[:,0:-1]\ny = df.iloc[:,-1]\n\ncount = y.value_counts()\nprint(count)\n\nX_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 6)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\ncols = ['INCOME', 'HOME_VAL', 'BLUEBOOK', 'OLDCLAIM', 'CLM_AMT']\nfor col in cols:\n X_train[col] = X_train[col].astype(float)\n\nfor col in cols:\n X_test[col] = X_test[col].astype(float)\n\nprint(X_train.isnull().sum())\nprint(X_test.isnull().sum())\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\nX_train.dropna(subset=['YOJ','OCCUPATION'], inplace=True)\nX_test.dropna(subset=['YOJ','OCCUPATION'], inplace=True)\n\ny_train.reset_index(drop=True, inplace=True)\ny_test.reset_index(drop=True, inplace=True)\n\ncols = ['AGE', 'CAR_AGE', 'INCOME', 'INCOME']\nfor col in cols:\n X_train[col] = X_train[col].fillna(X_train[col].mean())\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import LabelEncoder\ncolumns = [\"PARENT1\",\"MSTATUS\",\"GENDER\",\"EDUCATION\",\"OCCUPATION\",\"CAR_USE\",\"CAR_TYPE\",\"RED_CAR\",\"REVOKED\"]\n\n# Code starts here\nle = LabelEncoder()\nfor col in columns:\n X_train[col] = le.fit_transform(X_train[col].astype(str))\n\nfor col in columns:\n X_test[col] = le.fit_transform(X_test[col].astype(str))\n\n# Code ends here\n\n\n\n# --------------\nfrom sklearn.metrics import precision_score \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\n\n\n# code starts here\nmodel = LogisticRegression(random_state = 6)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint('y_pred is ', y_pred)\n\nscore = accuracy_score(y_test, y_pred)\nprint('score is ', score)\n\nprecision = precision_score(y_test, y_pred)\nprint('precision is ', precision)\n# Code ends here\n\n\n# --------------\nfrom sklearn.preprocessing import StandardScaler\nfrom imblearn.over_sampling import SMOTE\n\n# code starts here\nsmote = SMOTE(random_state = 6)\nX_train, y_train = smote.fit_sample(X_train, y_train)\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# Code ends here\n\n\n# --------------\n# Code Starts here\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint('y_pred is ', y_pred)\n\nscore = accuracy_score(y_test, y_pred)\nprint('score is ', score)\n\n# Code ends here\n\n\n","repo_name":"rdukale007/ga-learner-dsmp-repo","sub_path":"Car-insurance-claim/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40516902453","text":"import os\nimport numpy as np\nimport gym\n\nfrom ddpg import DDPG\nfrom planner import Planner\nfrom sampler import make_sample_her_transitions\nfrom sampler import make_sample_plans\nfrom common.monitor import Monitor\nimport logger\n\n\nDEFAULT_ENV_PARAMS = {\n 'FetchReach-v1': {\n 'n_cycles': 10,\n },\n 'AntMazeU-v1':{\n 'buffer_size': int(2.5E5),\n 'relative_goals': True,\n },\n 'AntMazeG-v1':{\n 'buffer_size': int(2E5),\n 'relative_goals': True,\n }\n}\n\nDEFAULT_AGENT_PARAMS = {\n # env\n 'max_u': 1., # max absolute value of actions on different coordinates\n # ddpg\n 'layers': 3, # number of layers in the critic/actor networks\n 'hidden': 256, # number of neurons in each hidden layers\n 'network_class': 'actor_critic:ActorCritic',\n 'Q_lr': 0.001, # critic learning rate\n 'pi_lr': 0.001, # actor learning rate\n 'buffer_size': int(1E6), # for experience replay\n 'polyak': 0.95, # polyak averaging coefficient\n 'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)\n 'clip_obs': 200.,\n 'scope': 'ddpg_pi', # can be tweaked for testing\n 'relative_goals': False,\n # training\n 'n_cycles': 50, # per epoch\n 'rollout_batch_size': 2, # per mpi thread\n 'n_batches': 40, # training batches per cycle\n 'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n 'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts\n 'test_with_polyak': False, # run test episodes with the target network\n # exploration\n 'random_eps': 0.3, # percentage of time a random action is taken\n 'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n 'act_rdm_dec': 'None', # supported modes: None, linear, sine\n # HER\n 'replay_strategy': 'future', # supported modes: future, none\n 'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future\n # normalization\n 'norm_eps': 0.01, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n\n 'bc_loss': 0, # whether or not to use the behavior cloning loss as an auxilliary loss\n 'q_filter': 0, # whether or not a Q value filter should be used on the Actor outputs\n 'num_demo': 100, # number of expert demo episodes\n 'demo_batch_size': 128, #number of samples to be used from the demonstrations buffer, per mpi thread 128/1024 or 32/256\n 'prm_loss_weight': 0.001, #Weight corresponding to the primary loss\n 'aux_loss_weight': 0.0078, #Weight corresponding to the auxilliary loss also called the cloning loss\n}\n\nDEFAULT_PLANNER_PARAMS = {\n 'scope': 'planner',\n 'hid_size': 64,\n 'optim_stepsize' : 0.001, # learning rate\n 'buffer_size': int(1E4), # for plan replay\n 'layerNorm' : False, # whether or not to use layerNorm in RNN\n 'seq_len' : 4, # 4 subgoals\n 'pln_batch_size': 64, # batch_size for training\n # normalization\n 'norm_eps': 0.01, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n # 'subgoal_strategy': 'time_sample',\n}\n\nCACHED_ENVS = {}\n\ndef cached_make_env(make_env):\n \"\"\"\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n \"\"\"\n if make_env not in CACHED_ENVS:\n env = make_env()\n CACHED_ENVS[make_env] = env\n return CACHED_ENVS[make_env]\n\n\ndef simple_goal_subtract(a, b):\n assert a.shape == b.shape\n return a - b\n\ndef config_params_get_policy(params, reuse=False, clip_return=True):\n env_name = params['env_name']\n\n def make_env(subrank=None):\n env = gym.make(env_name)\n if subrank is not None and logger.get_dir() is not None:\n try:\n from mpi4py import MPI\n mpi_rank = MPI.COMM_WORLD.Get_rank()\n except ImportError:\n MPI = None\n mpi_rank = 0\n logger.warn('Running with a single MPI process. This should work, but the results may differ from the ones publshed in Plappert et al.')\n\n max_episode_steps = env._max_episode_steps\n env = Monitor(env,\n os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True)\n # hack to re-expose _max_episode_steps (ideally should replace reliance on it downstream)\n env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)\n return env\n\n params['make_env'] = make_env\n env = cached_make_env(params['make_env'])\n env.reset()\n params['T'] = env.spec.max_episode_steps\n params['max_u'] = np.array(params['max_u']) if isinstance(params['max_u'], list) else params['max_u']\n params['gamma'] = 1. - 1. / params['T']\n if 'lr' in params:\n params['pi_lr'] = params['lr']\n params['Q_lr'] = params['lr']\n del params['lr']\n\n ddpg_params = dict()\n for name in ['buffer_size', 'hidden', 'layers',\n 'network_class',\n 'polyak',\n 'batch_size', 'Q_lr', 'pi_lr',\n 'norm_eps', 'norm_clip', 'max_u',\n 'action_l2', 'clip_obs', 'scope', 'relative_goals']:\n ddpg_params[name] = params[name]\n params['_' + name] = params[name]\n del params[name]\n\n params['ddpg_params'] = ddpg_params\n\n # configure_her\n def reward_fun(ag_2, g, info): # vectorized\n return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n \n def goal_delta(g1, g2, weight=None):\n return env.goal_delta(pre_goal=g1, now_goal=g2, weight=weight)\n \n her_param = {\n 'reward_fun': reward_fun,\n 'replay_strategy' : params['replay_strategy'],\n 'replay_k' : params['replay_k']\n }\n sample_her_transitions = make_sample_her_transitions(**her_param)\n params['reward_fun'] = reward_fun\n params['goal_delta'] = goal_delta\n\n # configure_dims\n env.reset()\n obs, _, _, info = env.step(env.action_space.sample())\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n for key, value in info.items():\n value = np.array(value)\n if value.ndim == 0:\n value = value.reshape(1)\n dims['info_{}'.format(key)] = value.shape[0]\n params['dims'] = dims\n \n # configure_ddpg\n gamma = params['gamma']\n ddpg_params.update({\n 'input_dims' : dims,\n 'T': params['T'],\n 'clip_pos_returns': True, # clip positive returns\n 'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return\n 'rollout_batch_size': params['rollout_batch_size'],\n 'subtract_goals': simple_goal_subtract,\n 'sample_transitions': sample_her_transitions,\n 'gamma': gamma\n })\n ddpg_params['info'] = {\n 'env_name': params['env_name'] }\n policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=True)\n return policy\n\n\ndef config_params_get_planner(params, sess=None):\n env_name = params['env_name']\n if 'make_env' not in params:\n def make_env():\n env = gym.make(env_name)\n return env\n params['make_env'] = make_env\n\n env = cached_make_env(params['make_env'])\n env.reset()\n if 'dims' not in params:\n obs, _, _, info = env.step(env.action_space.sample())\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n for key, value in info.items():\n value = np.array(value)\n if value.ndim == 0:\n value = value.reshape(1)\n dims['info_{}'.format(key)] = value.shape[0]\n params['dims'] = dims\n else:\n dims = params['dims']\n planner_params = dict()\n planner_params.update(DEFAULT_PLANNER_PARAMS)\n\n for attr in ['hid_size', 'pln_batch_size', 'seq_len']:\n if attr in params:\n planner_params[attr] = params[attr]\n params['seq_len'] = planner_params['seq_len'] # write into params for the rollout worker\n\n logger.save_params(params=planner_params, filename='planner_params.json')\n sample_func = make_sample_plans()\n planner_params['batch_size'] = planner_params['pln_batch_size']\n del planner_params['pln_batch_size']\n planner_params.update({\n 'inp_dim': dims['g'],\n 'out_dim': dims['g'],\n 'sample_func': sample_func\n })\n \n planner = Planner(**planner_params, use_mpi=True)\n\n return planner \n","repo_name":"aamas20-984/HindsightPlanner","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"20676767058","text":"import re\n\nL = {}\nDP = {}\n\nfor line in [line.strip() for line in open('d16.in').readlines()]:\n valve, rate, paths = re.search(r'Valve (\\w{2}) has flow rate=(\\d+); tunnel[s]? lead[s]? to valve[s]? (.*)', line).groups()\n rate = int(rate)\n paths = paths.split(', ')\n L[valve] = (rate, paths)\n\ndef f(pos, V, time):\n if time == 0:\n return 0\n\n key = (pos, tuple(sorted(V)), time)\n if key in DP:\n return DP[key]\n\n ans = 0\n\n if time > 0 and pos not in V and L[pos][0] > 0:\n new_V = set(V)\n new_V.add(pos)\n ans = max(ans, sum(L[o][0] for o in V) + f(pos, new_V, time - 1))\n\n if time > 0:\n for n in L[pos][1]:\n ans = max(ans, sum(L[o][0] for o in V) + f(n, V, time - 1))\n\n DP[key] = ans\n return ans\n\nprint(f('AA', set(), 30))\n","repo_name":"rschw/advent-of-code-2022","sub_path":"d16p1.py","file_name":"d16p1.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8939110773","text":"from PIL import Image\nimport pytesseract\nimport numpy as np\nimport os\n \npath_dir = 'example_images'\n\n\ndef get_files() -> list:\n files = []\n for path in os.listdir(path_dir):\n file_name = os.path.join(path_dir, path)\n if os.path.isfile(file_name):\n files.append(file_name)\n\n return files\n\n\ndef get_file_name(relative_file_path: str) -> str:\n return relative_file_path.replace(path_dir, '')\n\n\ndef convert_img_to_grey() -> str:\n im_gray = np.array(Image.open(file).convert('L'))\n filename = get_file_name(file)\n path_img_grey = path_dir + filename + '_grey.png'\n Image.fromarray(np.uint8(im_gray)).save(path_img_grey)\n\n return path_img_grey\n\n\ndef filter_value(value: str) -> str:\n if value.find(' 1 ') >= 0:\n value = value.replace(' 1 ', '')\n if value.find(' 10 ') >= 0:\n value = value.replace(' 10 ', '')\n if value.find('#€') >= 0:\n value = value.replace('#€', '')\n if value.find('01 ') >= 0:\n value = value.replace('01 ', '')\n if value.find('10( ') >= 0:\n value = value.replace('10( ', '')\n if value.find('K') >= 0:\n value = value.replace('K', '')\n if value.find('#') >= 0:\n value = value.replace('#', '')\n if value.find(' ') >= 0:\n value = value.replace(' ', '')\n if value.find('...'):\n value = value.replace('...', ' ')\n return value\n\n\ndef remove_empty_lines(data: list) -> list:\n for index, value in enumerate(data):\n if value == '':\n data.pop(index)\n return data\n\n\ndef format_data(data: list) -> list:\n for index, value in enumerate(data):\n value = filter_value(value).split('lotde')\n data[index] = value\n\n return data\n\n\nfor file in get_files():\n grey_img = convert_img_to_grey()\n\n lines = pytesseract.image_to_string(Image.open(grey_img))\n data = lines.splitlines()\n data = remove_empty_lines(data)\n data = format_data(data)\n\n print(data)\n os.remove(grey_img)\n","repo_name":"Antoin3Sc/-poc-extract-data-from-image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20737875788","text":"def decompose(N):\n if N < 2:\n return []\n p = -1\n for i in range(65):\n if 2 ** i >= N:\n p = i\n break\n l = decompose(N - 2 ** (p - 1))\n l.append(3 ** (p - 1))\n return l\n\nwhile True:\n N = int(input())\n if N == 0:\n exit(0)\n if N == 1:\n print(\"{ }\")\n continue\n l = decompose(N)\n s = \"\"\n for i in l:\n if s != \"\":\n s += \", \"\n s += str(i)\n print(\"{ \" + s + \" }\")\n","repo_name":"tr0j4n034/SPOJ","sub_path":"THRPWRS.py","file_name":"THRPWRS.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"57"} +{"seq_id":"23331302231","text":"\nimport aql.local_host\nimport aql.utils\nimport aql.setup\n\n#//---------------------------------------------------------------------------//\n\ndef _setup_vc6( options, env, os_env ):\n \n MSVC6DIR = 'd:/bin/development/compilers/vc6'\n VSCOMMONDIR = MSVC6DIR + '/common'\n MSDEVDIR = MSVC6DIR + '/common/MSDEV98'\n MSVCDIR = MSVC6DIR + '/vc98'\n \n vc6_env = options.If().cc_name['msvc'].cc_ver.ge(6).cc_ver.lt(7).os_env\n \n vc6_env['VSCOMMONDIR'] = PathOption()\n vc6_env['MSDEVDIR'] = PathOption()\n \n vc6_env['VSCOMMONDIR'] = VSCOMMONDIR\n vc6_env['MSDEVDIR'] = MSDEVDIR\n vc6_env['PATH'] += MSDEVDIR + '/bin'\n vc6_env['PATH'] += MSVCDIR + '/bin'\n vc6_env['PATH'] += VSCOMMONDIR + '/tools'\n vc6_env['INCLUDE'] += MSVCDIR + '/ATL/INCLUDE'\n vc6_env['INCLUDE'] += MSVCDIR + '/INCLUDE'\n vc6_env['INCLUDE'] += MSVCDIR + '/MFC/INCLUDE'\n vc6_env['LIB'] += MSVCDIR + '/LIB'\n vc6_env['LIB'] += MSVCDIR + '/MFC/LIB'\n \n\n#//---------------------------------------------------------------------------//\n\ndef _setup_msvc_psdk( options, env, os_env, getShellScriptEnv = aql.utils.getShellScriptEnv ):\n \n getShellScriptEnv( os_env, \"d:/bin/development/psdk/SetEnv.Bat /XP32 /RETAIL\" )\n\n#//---------------------------------------------------------------------------//\n\ndef _setup_vc71( options, env, os_env ):\n \n MSVC71DIR = 'd:/bin/development/compilers/VCToolkit'\n \n vc71_env = options.If().cc_name['msvc'].cc_ver.ge(7).cc_ver.lt(8).os_env\n \n vc71_env['PATH'] += MSVC71DIR + '/bin'\n vc71_env['INCLUDE'] += MSVCDIR + '/include'\n vc71_env['LIB'] += MSVCDIR + '/include'\n \n _setup_msvc_psdk( vc71_env )\n\n#//---------------------------------------------------------------------------//\n\ndef _setup_vc8( options, env, os_env ):\n _GetShellScriptEnv( os_env, \"%VS80COMNTOOLS%vsvars32.bat\" )\n \n _setup_msvc_psdk( options )\n\n#//---------------------------------------------------------------------------//\n\ndef setup_msvc( options, os_env, env ):\n \n if options.cc_name.isSetNotTo( 'msvc' ):\n return\n \n if options.target_os.isSetNotTo( 'windows' ):\n return\n \n if options.cc_ver.isNotSetOr( 8 ):\n _setup_vc8( options )\n \n elif options.cc_ver == '7':\n _setup_vc71( options )\n \n elif options.cc_ver == '6':\n _setup_vc6( options )\n\n#//===========================================================================//\n\naql.setup.AddToolSetup( 'aql_tool_msvc', setup_msvc )\n","repo_name":"menify/sandbox","sub_path":"tags/aql_beta_1_16032008/setup/setup_msvc.py","file_name":"setup_msvc.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10412200564","text":"#!/usr/bin/env python3\n\n\"\"\"FIPS 140-2: RNG Power-Up Tests\"\"\"\n\n# Asses the quality of your TRNG by running the statistical random\n# number generator tests from Chapter 4.9.1 (Power-Up Tests) of \"FIPS\n# PUB 140-2 - SECURITY REQUIREMENTS FOR CRYPTOGRAPHIC MODULES\". The\n# document is available on the handout server.\n\nFILENAME='random.dat'\n#FILENAME='random_radio_noise.dat'\n\nimport functools\nimport pdb\n\ndef readRandomBits(filename):\n \"\"\"Read file and return it as list of bits.\"\"\"\n rn = []\n rnFile = open(filename, 'rb')\n rn = rnFile.read()\n rnFile.close()\n return(functools.reduce(lambda x,y: x+int2bin(y,8), rn, []))\n\ndef int2bin(x, n):\n \"\"\"Convert integer to array of bits.\n x : integer\n n : length of bit array\"\"\"\n b = list(map(lambda x: ord(x)-ord('0'), list(bin(x)[2:])))\n return([0]*(n-len(b)) + b)\n\ndef bin2int(b):\n \"\"\"Convert array of bits to integer.\"\"\"\n return(int(\"\".join(map(lambda x: chr(x+ord('0')), b)), 2))\n\ndef testRandomNumbers(randomBits):\n print('Monobit Test: %s' % repr(monobitTest(randomBits)))\n print('Poker Test: %s' % repr(pokerTest(randomBits)))\n print('Runs Test: %s' % repr(runsTest(randomBits)))\n print('Long Runs Test: %s' % repr(longRunsTest(randomBits)))\n\ndef monobitTest(randomBits):\n \"\"\"FIPS 140-2 monobit test\"\"\"\n # Count the number of ones in the 20,000 bit stream. Denote this\n # quantity by x.\n #\n # The test is passed if 9725 < x < 10275\n # pass\n##################\n count = 0\n for i in range(len(randomBits)):\n if randomBits[i] == 1:\n count += 1\n count\n result = False\n if count < 10275 & count > 9725:\n result = True\n assert (result == True)\n\ndef pokerTest(randomBits):\n \"\"\"FIPS 140-2 poker test\"\"\"\n # Divide the 20000 bit stream into 5000 contiguous 4 bit\n # segments. Count and store the number of occurrences of the 16\n # possible 4 bit values. Denote f[i] as the number of each 4 bit\n # value i where 0 < i < 15.\n #\n # Evaluate the following:\n # 15\n # --\n # x = (16/5000) * ( \\ f[i]^2 ) - 5000\n # /\n # --\n # i=0\n #\n # The test is passed if 2.16 < x < 46.17\n #\n # See fips_140_2.pdf, page 39-40\n pass\n\ndef runsTest(randomBits):\n \"\"\"FIPS 140-2 runs test\"\"\"\n # A run is defined as a maximal sequence of consecutive bits of\n # either all ones or all zeros that is part of the 20000 bit\n # sample stream. The incidences of runs (for both consecutive\n # zeros and consecutive ones) of all lengths (>= 1) in the\n # sample stream should be counted and stored.\n #\n # The test is passed if the runs that occur (of lengths 1 through\n # 6) are each within the corresponding interval specified in the\n # table below. This must hold for both the zeros and ones (i.e.,\n # all 12 counts must lie in the specified interval). For the\n # purposes of this test, runs of greater than 6 are considered to\n # be of length 6.\n #\n # Length Required Interval\n # of Run \n # 1 2343 - 2657\n # 2 1135 - 1365\n # 3 542 - 708\n # 4 251 - 373\n # 5 111 - 201\n # 6+ 111 - 201\n #\n # See fips_140_2.pdf, page 40\n\n # count runs of 1,\n count1 = countRunOf1(randomBits)\n # count runs of 2,\n count2 = countRunOf2(randomBits)\n # count runs of 3,\n count3 = countRunOf3(randomBits)\n # count runs of 4,\n count4 = countRunOf4(randomBits)\n # count runs of 5,\n count5 = countRunOf5(randomBits)\n # count runs of 6,\n count6 = countRunOf6(randomBits, 6)\n result = False\n # 1 2343 - 2657\n if count1 < 2657 & count1 > 2342:\n if count2 < 1365 & count2 > 1135:\n if count3 < 708 & count3 > 542:\n if count4 < 373 & count > 251:\n if count5 < 201 & count5 > 111:\n if count6 < 201 & count6 > 111:\n result = True\n assert (result == True)\n##################\n \ndef longRunsTest(randomBits):\n \"\"\"FIPS 140-2 long runs test\"\"\"\n # A long run is defined to be a run of length 26 or more (of\n # either zeros or ones). On the sample of 20000 bits, the test is\n # passed if there are no long runs.\n #\n # See fips_140_2.pdf, page 40\n\n##################\n# YOUR CODE HERE #\n result = False\n count = countRunOf6(randomBits, 26)\n print(count)\n if count == 0:\n result = True\n\n assert (result == True)\n##################\ndef countRunOf6(randomBits, number):\n count1 = 0\n count = 0\n prevValue = randomBits[0]\n i = 0\n while i < len(randomBits) - 1:\n while i < len(randomBits) - 1:\n\n i += 1\n if prevValue == randomBits[i]:\n count1 += 1\n else:\n prevValue = randomBits[i]\n break\n prevValue = randomBits[i]\n\n if count1 >= number-1:\n count += 1\n count1 = 0\n\n return count\n\ndef countRunOf5(randomBits):\n count = 0\n prevValue = randomBits[0]\n prevValue2 = randomBits[1]\n prevValue3 = randomBits[2]\n prevValue4 = randomBits[3]\n flag = False\n value = 0\n prevIdx = 0\n for i in range(len(randomBits)):\n if i > 3:\n if flag:\n if randomBits[prevIdx + 1] != value:\n if (prevIdx - 5) < 0:\n count += 1\n elif randomBits[prevIdx - 5] != value:\n count += 1\n flag = False\n if prevValue == prevValue2:\n if prevValue == prevValue3:\n if prevValue == prevValue4:\n if prevValue == randomBits[i]:\n flag = True\n value = prevValue\n prevIdx = i\n\n prevValue = randomBits[i - 3]\n prevValue2 = randomBits[i - 2]\n prevValue3 = randomBits[i - 1]\n prevValue4 = randomBits[i]\n\n if randomBits[len(randomBits) - 1] == randomBits[len(randomBits) - 2]:\n if randomBits[len(randomBits) - 3] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 4] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 5] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 1] != randomBits[len(randomBits) - 6]:\n count += 1\n return count\n\n\ndef countRunOf4(randomBits):\n count = 0\n prevValue = randomBits[0]\n prevValue2 = randomBits[1]\n prevValue3 = randomBits[2]\n flag = False\n value = 0\n prevIdx = 0\n for i in range(len(randomBits)):\n if i > 2:\n if flag:\n if randomBits[prevIdx + 1] != value:\n if (prevIdx - 4) < 0:\n count += 1\n elif randomBits[prevIdx - 4] != value:\n count += 1\n flag = False\n if prevValue == prevValue2:\n if prevValue == prevValue3:\n if prevValue == randomBits[i]:\n flag = True\n value = prevValue\n prevIdx = i\n\n prevValue = randomBits[i - 2]\n prevValue2 = randomBits[i - 1]\n prevValue3 = randomBits[i]\n\n if randomBits[len(randomBits) - 1] == randomBits[len(randomBits) - 2]:\n if randomBits[len(randomBits) - 3] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 4] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 1] != randomBits[len(randomBits) - 5]:\n count += 1\n return count\n\n\ndef countRunOf3(randomBits):\n count = 0\n prevValue = randomBits[0]\n prevValue2 = randomBits[1]\n flag = False\n value = 0\n prevIdx = 0\n for i in range(len(randomBits)):\n if i > 1:\n if flag:\n if randomBits[prevIdx + 1] != value:\n if (prevIdx - 3) < 0:\n count += 1\n elif randomBits[prevIdx - 3] != value:\n count += 1\n flag = False\n if prevValue == prevValue2:\n if prevValue == randomBits[i]:\n flag = True\n value = prevValue\n prevIdx = i\n\n prevValue = randomBits[i-1]\n prevValue2 = randomBits[i]\n\n if randomBits[len(randomBits) - 1] == randomBits[len(randomBits) - 2]:\n if randomBits[len(randomBits) - 3] == randomBits[len(randomBits) - 1]:\n if randomBits[len(randomBits) - 1] != randomBits[len(randomBits) - 4]:\n count += 1\n\n return count\n\n\ndef countRunOf2(randomBits):\n # count runs of 2,\n count = 0\n prevValue = randomBits[0]\n flag = False\n value = 0\n prevIdx = 0\n for i in range(len(randomBits)):\n if i != 0:\n if flag:\n if randomBits[prevIdx + 1] != value:\n if (prevIdx - 2) < 0:\n count += 1\n elif randomBits[prevIdx - 2] != value:\n count += 1\n flag = False\n if prevValue == randomBits[i]:\n flag = True\n value = prevValue\n prevIdx = i\n prevValue = randomBits[i]\n\n if randomBits[len(randomBits)-1] == randomBits[len(randomBits)-2]:\n if randomBits[len(randomBits)-1] != randomBits[len(randomBits)-3]:\n count += 1\n\n return count\n\n\ndef countRunOf1(randomBits):\n # count runs of 1,\n count = 0\n prevValue = randomBits[0]\n middleValue = randomBits[1]\n futureValue = randomBits[2]\n j = 0\n # 0100100101100001110001\n # 0101\n for i in range(len(randomBits)):\n if j == 0:\n if prevValue != middleValue:\n count += 1\n elif j == 1:\n if prevValue != middleValue & middleValue != futureValue:\n count += 1\n elif j+1 != len(randomBits):\n prevValue = randomBits[j-1]\n middleValue = randomBits[j]\n futureValue = randomBits[j+1]\n\n if middleValue != prevValue:\n if middleValue != futureValue:\n count += 1\n j += 1\n\n if randomBits[len(randomBits)-1] != randomBits[len(randomBits)-2]:\n count += 1\n return count\n\nif __name__ == \"__main__\":\n randomBits = readRandomBits(filename=FILENAME)\n testRandomNumbers(randomBits=randomBits)\n","repo_name":"HusnainKhan01/True_RNG_UsingStocks","sub_path":"test_rng.py","file_name":"test_rng.py","file_ext":"py","file_size_in_byte":10677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71459274737","text":"def kemeny_aggregation(rankings):\n \"\"\"\n Computes the Kemeny-Young aggregation of a set of ranking lists.\n \n Parameters:\n rankings (list of lists): A list of ranking lists, where each list contains the items ranked in order.\n \n Returns:\n list: The Kemeny-Young aggregation, i.e., the ranking that minimizes the total number of pairwise swaps.\n \"\"\"\n n = len(rankings[0]) # Number of items\n m = len(rankings) # Number of rankings\n \n # Construct pairwise preference matrix\n P = [[0]*n for i in range(n)]\n for r in rankings:\n for i in range(n):\n for j in range(i+1, n):\n P[r[i]-1][r[j]-1] += 1\n \n # Initialize arbitrary ranking\n R = list(range(1, n+1))\n cost = kemeny_distance(R, P)\n \n # Iterate until Kemeny optimal aggregation is found\n while True:\n swap_found = False\n for i in range(n):\n for j in range(i+1, n):\n # Check if swapping (i,j) reduces the cost\n R_new = R.copy()\n R_new[i], R_new[j] = R_new[j], R_new[i]\n cost_new = kemeny_distance(R_new, P)\n if cost_new < cost:\n R = R_new\n cost = cost_new\n swap_found = True\n if not swap_found:\n break\n \n return R\n \ndef kemeny_distance(ranking, P):\n \"\"\"\n Computes the Kemeny distance between a ranking and a pairwise preference matrix.\n \n Parameters:\n ranking (list): A ranking, i.e., a list of items in order.\n P (list of lists): A pairwise preference matrix, where P[i][j] is the number of times item i is preferred over item j.\n \n Returns:\n int: The Kemeny distance between the ranking and P.\n \"\"\"\n n = len(ranking)\n cost = 0\n for i in range(n):\n for j in range(i+1, n):\n d = (ranking.index(i+1) - ranking.index(j+1)) * ((P[i][j] > P[j][i]) - (P[j][i] > P[i][j]))\n cost += abs(d)\n return cost\n\nfrom itertools import permutations\n\nfrom itertools import permutations\n\ndef kemeny_young(rankings):\n n = len(rankings[0])\n pairwise = [[0]*n for _ in range(n)]\n for ranking in rankings:\n for i, j in permutations(range(n), 2):\n pairwise[ranking[i]][ranking[j]] += 1\n \n min_kendall_tau = float('inf')\n min_permutation = None\n for permutation in permutations(range(n)):\n kendall_tau = 0\n for i, j in permutations(range(n), 2):\n kendall_tau += pairwise[i][j] * (1 if permutation.index(i) < permutation.index(j) else -1)\n if kendall_tau < min_kendall_tau:\n min_kendall_tau = kendall_tau\n min_permutation = permutation\n \n return [x for x in min_permutation]\n\nimport numpy as np\nfrom itertools import permutations\nfrom scipy.optimize import minimize\n\ndef kemeny_young_method(rankis):\n rankings = np.array(rankis)\n n_experts, n_items = rankings.shape\n \n # Define the objective function to minimize\n def objective(x):\n rank_diffs = np.abs(rankings.dot(x)[:, np.newaxis] - rankings.dot(x)[np.newaxis, :])\n return np.sum(rank_diffs)\n \n # Define the constraint that the weights must sum to 1\n cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]\n \n # Define the initial guess for the weights\n x0 = np.ones(n_experts) / n_experts\n \n # Solve the optimization problem to find the weights\n res = minimize(objective, x0, constraints=cons)\n \n # Combine the expert rankings using the weights\n weighted_rankings = rankings.dot(res.x)\n \n # Compute all possible rankings of the items\n all_rankings = permutations(range(n_items))\n \n # Find the ranking that minimizes the sum of pairwise differences with the combined ranking\n min_diff = np.inf\n for ranking in all_rankings:\n rank_diff = np.sum(np.abs(ranking - np.argsort(weighted_rankings)))\n if rank_diff < min_diff:\n min_diff = rank_diff\n ranked_items = ranking\n \n return ranked_items\n\n\n\ninput_rankings = [[1, 6, 10, 3, 2, 4, 8, 5, 7, 9], [1, 6, 10, 3, 2, 4, 8, 5, 7, 9], [1, 6, 10, 5, 7, 3, 2, 4, 8, 9]]\nkemeny_aggregation = kemeny_young_method(input_rankings)\nprint(kemeny_aggregation)\n\n\n","repo_name":"Malek-Ghorbel/ranking-semantics-Abstract-Argumentation","sub_path":"scoring_aggregation/kemeny_aggregation.py","file_name":"kemeny_aggregation.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"21970646295","text":"# pip install python-telegram-bot\nfrom lesson26 import get_info\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler, ConversationHandler, MessageHandler, Filters\n\nBTN_TODAY = 'Boshqa viloyatni tanlash'\nBTN_ERTAGA = \"Og'iz ochish duosi\"\nBTN_WEEK = \"Og'iz yopish duosi\"\nBTN_MONTH = \"Ramazon oyi haqida\"\nENG = 'ingliz tili'\n\nmain_buttons = ReplyKeyboardMarkup([\n [ENG], [BTN_ERTAGA, BTN_WEEK], [BTN_MONTH]\n], resize_keyboard=True)\n\nSTATE_ONE = 1\nSTATE_TWO = 2\nSTATE_THREE = 3\n\nviloyatlar = [ 'Andijon', 'Buxoro', 'Fargona', 'Jizzax', 'Namangan', 'Navoiy', 'Qashqadaryo',\n 'Xorazm', 'Samarqand', 'Sirdaryo', 'Surxondaryo', 'Toshkent', 'Qoraqalpogiston']\n\n\nbuttons = []\nv_detail = []\n\nfor j in range(0, 12,2):\n v_detail = []\n for i in range(j, j+2):\n v_detail.append(InlineKeyboardButton(f'{viloyatlar[i]}', callback_data=f'{viloyatlar[i].lower()}'))\n buttons.append(v_detail)\n\nv_detail = []\n\nv_detail.append(InlineKeyboardButton(f'{viloyatlar[12]}', callback_data=f'region12'))\nbuttons.append(v_detail)\n\n\n\ndef start(update, context):\n user = update.message.from_user\n update.message.reply_html('Salom {} botimizga xush kelibsiz'\n .format(user.first_name), reply_markup=InlineKeyboardMarkup(buttons))\n return 1\n\n\ndef xayr(update, context):\n update.message.reply_text('Xayr!')\n\n\ndef inline_callback(update, context):\n malumot = update.callback_query\n viloyat = malumot.data\n infos = get_info(viloyat)\n print(infos)\n print(viloyat)\n malumotlar = ''\n for i in infos:\n malumotlar += i + '\\n'\n malumot.message.delete()\n malumot.message.reply_html(f'{malumotlar}', reply_markup=main_buttons)\n return 2\n\n\ndef today(update, context):\n update.message.reply_text('Boshqa viloyatni tanlang', reply_markup=InlineKeyboardMarkup(buttons))\n return 1\n\n\ndef ertaga(update, context):\n malumot = 'Ro‘za tutish (saharlik, og‘iz yopish) duosi\\n\\\nNavaytu an asuvma sovma shahro ramazona minal fajri ilal mag‘ribi, xolisan lillahi ta’aalaa, Allohu akbar.\\n\\\nMa’nosi: Ramazon oyining ro‘zasini subhdan to kun botguncha tutmoqni niyat qildim.Xolis Alloh uchun Alloh buyukdir.'\n update.message.reply_html(malumot)\n return 1\n\ndef haftalik(update, context):\n malumot = ''\n update.message.reply_text(malumot)\n return 3\n\n\ndef oylik(update, context):\n malumot = ''\n update.message.reply_text(malumot)\n return 3\n\ndef main():\n updater = Updater('5537275802:AAGaVLbzpIGljkYlTEGB7NQTliIwATJLPYM', use_context=True)\n\n dispetcher = updater.dispatcher\n\n # dispetcher.add_handler(CommandHandler('start', start))\n # dispetcher.add_handler(CommandHandler('xayr', xayr))\n\n # dispetcher.add_handler(CallbackQueryHandler(inline_callback))\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n STATE_ONE: [CallbackQueryHandler(inline_callback)],\n STATE_TWO: [\n MessageHandler(Filters.regex('^(' + BTN_TODAY + ')$'), today),\n MessageHandler(Filters.regex('^(' + BTN_ERTAGA + ')$'), ertaga),\n MessageHandler(Filters.regex('^(' + BTN_WEEK + ')$'), haftalik),\n MessageHandler(Filters.regex('^(' + BTN_MONTH + ')$'), oylik)\n ],\n STATE_THREE: [CommandHandler('xayr', xayr)]\n },\n fallbacks=[CommandHandler('start', start)]\n )\n\n dispetcher.add_handler(conv_handler)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sadullo-o/telegrambot_darslari","sub_path":"lesson25.py","file_name":"lesson25.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"72390073138","text":"from alimata.core.board import Board\nfrom alimata.core.core import PIN_MODE, STEPPER_TYPE\nfrom typing import Union\n\nfrom abc import ABC\n\n\nclass Actuator(ABC):\n\n # Constructor of the class Actuator\n def __init__(self,\n pin: Union[str, int, list],\n board: Board,\n type_: PIN_MODE,\n min_pulse: int = 544,\n max_pulse: int = 2400,\n stepper_type: Union[STEPPER_TYPE, None] = None):\n ''' Returns None exept for the stepper'''\n\n\n # Create Private Attributes\n self.__board = board\n self.__pin = pin\n self.__type = type_\n self.__min_pulse = min_pulse\n self.__max_pulse = max_pulse\n self.__stepper_type = stepper_type\n\n # Set the pin and other properties of the actuator and save the returned id if present\n self.__id = self.board.set_pin_mode(\n pin=self.pin,\n type_=self.__type,\n min_pulse=self.__min_pulse,\n max_pulse=self.__max_pulse,\n stepper_type=self.__stepper_type)\n\n @property\n def id(self) -> int:\n '''Returns the id of the actuator'''\n return self.__id\n \n @property\n def board(self) -> Board:\n '''Returns the board of the actuator'''\n return self.__board\n \n @property\n def pin(self) -> Union[str, int, list]:\n '''Returns the pin of the actuator'''\n return self.__pin\n","repo_name":"ALIVEcode/alimata","sub_path":"alimata/actuators/actuator.py","file_name":"actuator.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"8936713983","text":"\"\"\"\n# uci_bootcamp_2021/examples/basic_variables.py\nModule containing examples on how to use variables\n\"\"\"\n\n# Assigning literals to variables.\n\nlength = 3\nwidth = 2\n\n# Using variables in expressions, and assigning the result to another variable.\narea = length * width\n\n# Using variables in function calls.\nprint(area)\n# 6\n\n# Demonstrating that variables can be re-assigned to different types.\nfoo = 42\nprint(type(foo))\n# \nfoo = \"I like pizza.\"\nprint(type(foo))\n# \n","repo_name":"theunkn0wn1/uci-bootcamp-2021","sub_path":"uci_bootcamp_2021/examples/basic_variables.py","file_name":"basic_variables.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74200612018","text":"import os\nimport sys\nimport json\nimport time\nimport unittest\n\nimport run_devpi\n\nBASE_PATH = os.path.dirname(os.path.abspath(__name__))\n# We use testpkg as a sample Python module to publish.\nTEST_PACKAGE_PATH = os.path.join(BASE_PATH, 'testpkg')\n\n\nclass DevpiTestCase(unittest.TestCase):\n basic_input = {\n \"workspace\": {\n \"path\": TEST_PACKAGE_PATH,\n },\n \"vargs\": {\n \"server\": \"http://localhost:3141/\",\n \"index\": \"root/devpitest\",\n \"username\": \"root\",\n \"password\": \"\",\n }\n }\n # We'll override the default clientdir while creating our index below.\n default_clientdir = '/tmp/devpi-testclientdir'\n\n @classmethod\n def setUpClass(cls):\n # We'll only do this once so we're not hammering the server if we\n # grow this test suite.\n cls._wait_for_devpi_to_start(cls.basic_input, cls.default_clientdir)\n\n def setUp(self):\n self.old_argv_val = sys.argv\n\n def tearDown(self):\n sys.argv = self.old_argv_val\n\n @classmethod\n def _wait_for_devpi_to_start(cls, input_dict, clientdir):\n \"\"\"\n devpi is a bit... pokey while starting. We'll just harass it until\n it responds before doing the rest of the tests.\n \"\"\"\n retries_left = 30\n while retries_left > 0:\n try:\n run_devpi.select_server(\n input_dict['vargs']['server'], clientdir=clientdir)\n except SystemExit:\n retries_left -= 1\n time.sleep(1)\n continue\n return\n\n def _ensure_test_index_exists(self, input_dict, clientdir):\n \"\"\"\n Since Drone fires up a new devpi server for each test run, we'll\n need to create an index before we can upload.\n \"\"\"\n t_vargs = input_dict['vargs']\n run_devpi.select_server(\n t_vargs['server'], clientdir=clientdir)\n run_devpi.login(\n t_vargs['username'], t_vargs['password'],\n clientdir=self.default_clientdir)\n try:\n run_devpi.create_index(\n t_vargs['index'], clientdir=clientdir)\n except SystemExit:\n pass\n\n def test_upload(self):\n \"\"\"\n Tests a simple package upload to an existing DevPi server.\n \"\"\"\n self._ensure_test_index_exists(\n self.basic_input, self.default_clientdir)\n sys.argv = ['--', json.dumps(self.basic_input)]\n run_devpi.main()\n\n\nclass ValidationTestCase(unittest.TestCase):\n\n def setUp(self):\n self.basic_input = {\n \"workspace\": {\n \"path\": TEST_PACKAGE_PATH,\n },\n \"vargs\": {\n \"server\": \"http://localhost:3141/\",\n \"index\": \"root/devpitest\",\n \"username\": \"root\",\n \"password\": \"\",\n }\n }\n\n def test_vargs_server_validation(self):\n \"\"\"\n Tests validation for vargs server keyword.\n \"\"\"\n vargs = self.basic_input.copy()['vargs']\n # Start the party with something weird.\n vargs['server'] = 'blah'\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n # Why not?\n vargs['server'] = None\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n vargs['server'] = ''\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n # Protocol isn't included.\n vargs['server'] = 'somehost.com/'\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n # Relative paths aren't useful.\n vargs['server'] = '/somewhere'\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n # As if the user didn't pass it at all.\n del vargs['server']\n self.assertRaises(SystemExit, run_devpi.check_vargs, vargs)\n # These should all be valid.\n vargs['server'] = 'http://test.com/'\n self.assertIsNone(run_devpi.check_vargs(vargs))\n vargs['server'] = 'http://test.com/devpi/'\n self.assertIsNone(run_devpi.check_vargs(vargs))\n vargs['server'] = 'http://test.com:3141/'\n self.assertIsNone(run_devpi.check_vargs(vargs))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"drone-plugins/drone-devpi","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"16488985952","text":"from ways.SubstitutionCipher import SubstitutionCipher\nfrom ways.SubstitutionNgramCipher import SubstitutionNgramCipher\nfrom ways.Vigenere import Vigenere\n\n# # SUBSTITUTION CIPHER\n# key = 'j1r5t78yEaB2CDFGHIJKLMN3O4P6QS9UVWXZa0bcdefghjklmnopqrstuvwxyz'\n\n# plaintext = 'Enter a message To Encrypt: '\n# ciphertext = SubstitutionCipher().encrypt_message(plaintext, key)\n# print('Encrypted message:', ciphertext)\n\n# decrypted_message = SubstitutionCipher().decrypt_message(ciphertext, key)\n# print('Decrypted message:', decrypted_message)\n\n# # SUBSTITUTION NGRAM CIPHER\n# cipher = SubstitutionNgramCipher()\n# # key = 'GYlj3yV5bMeNLRc0hTziSH9vW2JdqakZtgXPE f7oDAUrw61IFpCsuBKQn4xm8O'\n# key = cipher.createKey()\n# if(cipher.containingDuplicate(key=key)):\n# print('The key cannot contain duplicate character')\n# elif(len(key) != cipher.fixed_length_key):\n# print(\"the key length must be {}\".format(cipher.fixed_length_key))\n# else:\n# plaintext = 'Enter a message To Encrypt: '\n# print(\"enscrypt with key: {}\".format(key))\n# if (len(plaintext) % 2 ==1):\n# plaintext += \" \"\n \n# ciphertext = cipher.encrypt(plaintext, key)\n# print('Encrypted message:', ciphertext)\n\n# decrypted_message = cipher.decrypt(ciphertext, key)\n# print('Decrypted message:', decrypted_message)\n\n# # Vigenere\ncipherV = Vigenere()\nkey = cipherV.generate_key(10)\nplaintext = \"This is a secret message123!\"\nciphertext = cipherV.encrypt_vigenere(plaintext, key)\ndecrypted_text = cipherV.decrypt_vigenere(ciphertext, key)\n\nprint(\"Plaintext:\", plaintext)\nprint(\"Key:\", key)\nprint(\"Ciphertext:\", ciphertext)\nprint(\"Decrypted text:\", decrypted_text)","repo_name":"Minh-Khoi/EncryptDecription","sub_path":"Python/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35767632150","text":"# -*- coding:utf-8 -*-\n#!/usr/bin/env python 3.7\n# Python version 2.7.16 or 3.7.6\n'''\n# FileName: YamlToCase.py\n# Author : YuYanQing\n# Desc: 解析yaml\n# Date: 2021/2/1 0:37\n'''\n\nimport os,yaml\nimport BaseSetting\nfrom Utils.OkHttps import OpenServlet\n\nclass OperYaml:\n\tdef __init__(self, yamlPath=None,params=None):\n\t\t\"\"\"\n\t\t设置yaml文件路径\n\t\t:param yamlPath:\n\t\t\"\"\"\n\t\tself.yamlPath = yamlPath\n\t\tself.headers = OpenServlet().set_headers(params)\n\n\tdef statist_case(self):\n\t\t\"\"\"\n\t\t读取Yaml文件转化为dict\n\t\t:return: \n\t\t\"\"\"\n\t\twith open(self.yamlPath, 'r', encoding='utf-8') as file:\n\t\t\tcontents = file.read()\n\t\t\ttestCase_dict = yaml.safe_load(contents)\n\t\t\tcase_list = []\n\t\t\tfor caseName, caseInfo in testCase_dict.items():\n\t\t\t\tnew_dict = {}\n\t\t\t\tnew_dict[caseName] = caseInfo\n\t\t\t\tcase_list.append(new_dict)\n\t\t\treturn case_list\n\nif __name__ == \"__main__\":\n\tcasePath = os.path.join(BaseSetting.AbsPath, \"YamlCase\", \"Login.yaml\")\n\tprint(OperYaml(casePath).statist_case())\n\n\n\n","repo_name":"kamalyes/uiatest","sub_path":"Utils/YamlToCase.py","file_name":"YamlToCase.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22117356344","text":"import logging\nimport unittest\nimport json\nfrom flask_api import status # HTTP Status Codes\nfrom app.models import Product, Review\nfrom app import server\n\n######################################################################\n# T E S T C A S E S\n######################################################################\n\n\nclass TestProductServer(unittest.TestCase):\n \"\"\" product Server Tests \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Run once before all tests \"\"\"\n server.app.debug = False\n server.initialize_logging(logging.ERROR)\n\n def setUp(self):\n \"\"\" Runs before each test \"\"\"\n self.app = server.app.test_client()\n server.initialize_logging()\n server.init_db()\n server.data_reset()\n server.data_load({\"name\": \"iPhone 8\", \"price\": 649})\n server.data_load({\"name\": \"MacBook Pro\", \"price\": 1799})\n\n def tearDown(self):\n \"\"\" Runs after each test \"\"\"\n server.Product.catalog.remove_all()\n\n def test_index(self):\n \"\"\" Test the Home Page\"\"\"\n resp = self.app.get('/')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertIn('Products RESTful Service', resp.data)\n\n def test_get_product_list(self):\n \"\"\" Get a list of products \"\"\"\n resp = self.app.get('/products')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n\n def test_get_product(self):\n \"\"\" Get one product \"\"\"\n resp = self.app.get('/products/1')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data['name'], 'iPhone 8')\n\n def test_get_product_not_found(self):\n \"\"\" Get a product thats not found \"\"\"\n resp = self.app.get('/products/-1')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_get_product_none_in_list(self):\n \"\"\" Search for a product in a catalog with no products \"\"\"\n server.Product.catalog.remove_all()\n resp = self.app.get('/products/1')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n # Ensure there are no products in the catalog:\n resp = self.app.get('/products')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 0)\n\n def test_create_product(self):\n \"\"\" Create a product \"\"\"\n # save the current number of products for later comparison\n product_count = self.get_product_count()\n # add a new product\n new_product = {'name': 'samsung hdtv', 'price': 499}\n data = json.dumps(new_product)\n resp = self.app.post('/products', data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n # Make sure location header is set\n location = resp.headers.get('Location', None)\n self.assertIsNotNone(location)\n # Check the data is correct\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['name'], 'samsung hdtv')\n self.assertEqual(new_json['price'], 499)\n # check that count has gone up and includes the new product\n resp = self.app.get('/products')\n data = json.loads(resp.data)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(len(data), product_count + 1)\n self.assertIn(new_json, data)\n\n def test_create_product_with_id(self):\n \"\"\" Create a product passing in an id \"\"\"\n # add a new product\n new_product = {'name': 'sony vaio', 'price': 549, 'id': 2}\n data = json.dumps(new_product)\n resp = self.app.post('/products', data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n # Make sure location header is set\n location = resp.headers.get('Location', None)\n self.assertIsNotNone(location)\n # Check the data is correct\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['name'], 'sony vaio')\n self.assertEqual(new_json['price'], 549)\n self.assertEqual(new_json['id'], 2)\n\n def test_create_product_with_missing_required_attribute(self):\n \"\"\" Create a product with the name missing (required attribute) \"\"\"\n new_product = {'price': 550}\n data = json.dumps(new_product)\n resp = self.app.post('/products', data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_update_product(self):\n \"\"\" Update a product using its id \"\"\"\n new_product = {'name': 'sony vaio', 'price': 549}\n data = json.dumps(new_product)\n # Update product with id 0:\n resp = self.app.put('/products/1', data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n resp = self.app.get('/products/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['name'], 'sony vaio')\n self.assertEqual(new_json['price'], 549)\n\n def test_update_product_with_no_name(self):\n \"\"\" Update a product with missing name (required field) \"\"\"\n new_product = {'price': 500}\n data = json.dumps(new_product)\n resp = self.app.put('/products/1', data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_update_product_not_found(self):\n \"\"\" Update a product that can't be found \"\"\"\n new_product = {\"name\": \"Polaroid camera\", \"price\": 55}\n data = json.dumps(new_product)\n resp = self.app.put('/products/-1', data=data,\n content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_add_product_review(self):\n \"\"\" Review product \"\"\"\n new_review = {\"username\": \"Grumpy Grumperson\",\n \"date\": \"2018/04/05\",\n \"score\": 1,\n \"detail\": \"Can't stand it\"}\n data = json.dumps(new_review)\n resp = self.app.put(\"products/1/review\", data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n resp = self.app.get('/products/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['review_list'][-1]\n ['username'], 'Grumpy Grumperson')\n\n def test_add_nonexistent_product_review(self):\n \"\"\" Add review to an nonexistent product \"\"\"\n new_review = {\"username\": \"Grumpy Grumperson\",\n \"date\": \"2018/04/05\",\n \"score\": 2,\n \"detail\": \"Can't stand it\"}\n data = json.dumps(new_review)\n resp = self.app.put(\"products/-1/review\", data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_add_product_review_with_bad_attributes(self):\n \"\"\" Review product with bad attributes \"\"\"\n new_review = {\"badattribute1\": \"Grumpy Grumperson\", \"badattribute2\": 1}\n data = json.dumps(new_review)\n resp = self.app.put(\"products/1/review\", data=data,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_add_inexistent_product_review(self):\n \"\"\" Review inexistent product \"\"\"\n new_review = {\"username\": \"Grumpy Grumperson\", \"score\": 1}\n data = json.dumps(new_review)\n resp = self.app.put(\"products/-1/review\", data=data,\n content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_delete_product(self):\n \"\"\" Delete a product that exists \"\"\"\n # save the current number of products for later comparrison\n product_count = self.get_product_count()\n # delete a product\n resp = self.app.delete('/products/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_product_count()\n self.assertEqual(new_count, product_count - 1)\n\n def test_get_nonexisting_product(self):\n \"\"\" Get a product that doesn't exist \"\"\"\n resp = self.app.get('/products/5')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_query_one_product(self):\n \"\"\" Get one product with keyword \"\"\"\n resp = self.app.get('/products', query_string='name=iPhone')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n # logging.info('data length: ' + str(len(data)))\n self.assertTrue(len(data) > 0)\n # logging.info('data: ' + str(data))\n\n # note assertIn is to determine if one string is the substring of another\n # so use resp.data instead\n self.assertIn('iPhone 8', resp.data)\n self.assertNotIn('MacBook Pro', resp.data)\n query_item = data[0]\n self.assertEqual(query_item['name'], 'iPhone 8')\n\n def test_method_not_allowed(self):\n \"\"\" Call a Method thats not Allowed \"\"\"\n resp = self.app.post('/products/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_sort_by_lowest_price(self):\n \"\"\"Show the product with the lowest price first\"\"\"\n resp = self.app.get('/products?sort=price')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data[0]['name'], 'iPhone 8')\n self.assertEqual(data[1]['name'], 'MacBook Pro')\n\n def test_sort_by_highest_price(self):\n \"\"\"Show the product with the highest price first\"\"\"\n resp = self.app.get('/products?sort=price-')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data[0]['name'], 'MacBook Pro')\n self.assertEqual(data[1]['name'], 'iPhone 8')\n\n def test_sort_by_highest_review(self):\n \"\"\"Show the product with the highest review first\"\"\"\n watch = Product(name=\"I Watch\", price=329)\n watch.set_id(2)\n watch.set_image_id(\"001\")\n watch.set_description(\"Smart Watch\")\n watch_review_list = [Review(username=\"applefan\", score=4, detail=\"OK\"),\n Review(username=\"helloworld\",\n score=4, detail=\"As expected\"),\n Review(username=\"pythonfan\",\n score=3, detail=\"So So\")]\n watch.set_review_list(watch_review_list)\n server.Product.catalog.save(watch)\n self.assertEqual(watch.get_name(), \"I Watch\")\n self.assertEqual(watch.get_price(), 329)\n self.assertEqual(watch.get_id(), 2)\n self.assertEqual(watch.get_image_id(), \"001\")\n self.assertEqual(watch.get_description(), \"Smart Watch\")\n self.assertEqual(watch.get_review_list(), watch_review_list)\n tv = Product(name=\"Apple TV\", price=9999)\n tv.set_id(3)\n tv.set_image_id(\"001\")\n tv.set_description(\"Hi-end TV\")\n tv_review_list = [Review(username=\"applelover\", score=5, detail=\"Excellent\"),\n Review(username=\"tvfan\", score=5,\n detail=\"Loving this!!\"),\n Review(username=\"devops team member\",\n score=5, detail=\"Highly recommend!\"),\n Review(username=\"nyu\", score=5, detail=\"Nice!\")]\n tv.set_review_list(tv_review_list)\n server.Product.catalog.save(tv)\n self.assertEqual(tv.get_name(), \"Apple TV\")\n self.assertEqual(tv.get_price(), 9999)\n self.assertEqual(tv.get_id(), 3)\n self.assertEqual(tv.get_image_id(), \"001\")\n self.assertEqual(tv.get_description(), \"Hi-end TV\")\n self.assertEqual(tv.get_review_list(), tv_review_list)\n resp = self.app.get('/products?sort=review')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data[0]['name'], 'Apple TV')\n self.assertEqual(data[1]['name'], 'I Watch')\n\n def test_sort_by_alphabetical_order(self):\n \"\"\"Show the product in alphabetical order\"\"\"\n resp = self.app.get('/products?sort=name')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data[0]['name'], 'iPhone 8')\n self.assertEqual(data[1]['name'], 'MacBook Pro')\n\n def test_sort_by_reverse_alphabetical_order(self):\n \"\"\"Show the product in reverse alphabetical order\"\"\"\n resp = self.app.get('/products?sort=name-')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data[1]['name'], 'iPhone 8')\n self.assertEqual(data[0]['name'], 'MacBook Pro')\n\n\n######################################################################\n# Utility functions\n######################################################################\n\n def get_product_count(self):\n \"\"\" save the current number of products \"\"\"\n resp = self.app.get('/products')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)\n\n\n######################################################################\n# M A I N\n######################################################################\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"DevOps-Squads-Spring-2018/products","sub_path":"tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":14232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5097196282","text":"# coding:utf-8\nfrom lxbasic import bsc_core\n\nimport lxsession.commands as ssn_commands\n\nuser = bsc_core.SystemMtd.get_user_name()\n\nj_option_opt = bsc_core.ArgDictStringOpt(\n option=dict(\n option_hook_key='rsv-task-batchers/asset/gen-surface-export',\n #\n file='/production/shows/nsa_dev/assets/chr/td_test/user/work.dongchangbao/katana/scenes/surfacing/td_test.srf.surfacing.v000_002.katana',\n user=bsc_core.SystemMtd.get_user_name(),\n #\n choice_scheme='asset-katana-publish',\n #\n # td_enable=True,\n # rez_beta=True,\n )\n)\n#\nssn_commands.set_option_hook_execute_by_deadline(\n option=j_option_opt.to_string()\n)\n","repo_name":"no7hings/lxdcc_rsc","sub_path":"script/python/.resources/option-hooks/rsv-task-batchers/.test/_tst__asset_katana_gen_surface_export.py","file_name":"_tst__asset_katana_gen_surface_export.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24935776810","text":"\n\"\"\"\n# Word Break\n\nGiven a **non-empty** string *s* and a dictionary *wordDict* containing a list of **non-empty** words, determine if *s* can be segmented into a space-separated sequence of one or more dictionary words.\n\n**Note:** \n - The same word in the dictionary may be reused multiple times in the segmentation.\n - You may assume the dictionary does not contain duplicate words.\n\n**Example 1:** \n```\nInput: s = \"leetcode\", wordDict = [\"leet\", \"code\"]\nOutput: true\nExplanation: Return true because \"leetcode\" can be segmented as \"leet code\".\n```\n\n**Example 2:** \n```\nInput: s = \"applepenapple\", wordDict = [\"apple\", \"pen\"]\nOutput: true\nExplanation: Return true because \"applepenapple\" can be segmented as \"apple pen apple\".\n Note that you are allowed to reuse a dictionary word.\n```\n\n**Example 3:** \n```\nInput: s = \"catsandog\", wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\nOutput: false\n```\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n # # Time Limit Exceeded\n # if s == \"\":\n # return True\n # for i in range(len(s)):\n # word = s[ : i + 1]\n # # print(word)\n # if word != \"\" and word in wordDict and self.wordBreak(s[i + 1 : ], wordDict):\n # return True\n # return False\n\n \n # Reference: https://leetcode.com/problems/word-break/discuss/870794/Faster-than-99.6-Python\n if any([c not in set(''.join(wordDict)) for c in s]):\n return False\n \n wordDict = sorted(wordDict, key = len, reverse = True)\n seen = set()\n\n def backtracking(s, wordDict):\n if s in wordDict:\n return True\n if s in seen:\n return False\n seen.add(s)\n for w in wordDict:\n if s.startswith(w) and backtracking(s[len(w) : ], wordDict):\n return True\n return False\n\n return backtracking(s, wordDict)\n\n# True\nprint(Solution().wordBreak(\"leetcode\", [\"leet\", \"code\"]))\n\n# True\nprint(Solution().wordBreak(\"applepenapple\", [\"apple\", \"pen\"]))\n\n# False\nprint(Solution().wordBreak(\"catsandog\", [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]))\n\n# False\nprint(Solution().wordBreak(\n \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab\", \n [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\"]\n))\n\n# False\nprint(Solution().wordBreak(\n \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\", \n [\"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\", \"ba\"]\n))\n\n\n","repo_name":"W-46ec/LeetCode","sub_path":"30-Day LeetCoding Challenge/2020_09/29_Word Break.py","file_name":"29_Word Break.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"21937258155","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport os\nimport requests\nimport errno\n\n\nclass Myntra_products:\n\n def __init__(self):\n self.driver = webdriver.Chrome(\n \"F:\\\\Ignis Tech Solutions\\\\venv\\\\chromedriver.exe\")\n self.product_brand_name = []\n self.product_name = []\n self.product_mrp_price = []\n self.product_sls_price = []\n\n def getLinks(self):\n\n self.product_links_list = list()\n\n for i in range(2, 3): # you can change range of for loop based on number of pages + 1\n\n # Iterating over all pages to get all products links\n main_url = \"https://www.myntra.com/women-shrugs?p=\"\n url = f\"{main_url}{i}\"\n self.driver.get(url)\n src = self.driver.page_source\n soup = BeautifulSoup(src, 'lxml')\n for li in soup.find_all(class_='product-base'):\n links = li.a.get('href')\n self.product_links_list.append(links)\n\n print(\"Total Products Are:\", len(self.product_links_list))\n\n return self.product_links_list\n\n def getProductInfo(self):\n\n img_attrs_list = list()\n img_links = list()\n self.image_name_list = list()\n self.sizes_all_list = list()\n sizes_list = list()\n sizes_dict = {}\n self.product_desc = list()\n self.product_sizechart = list()\n\n x = 0\n \n for items in self.product_links_list:\n\n x += 1\n print(f\"getting all info {x}/{len(self.product_links_list)}\")\n self.driver.get(\"https://www.myntra.com/\"+items)\n src = self.driver.page_source\n soup = BeautifulSoup(src, 'lxml')\n try:\n # This is for getting product brand name, product name, MRP and Sales Price, Size\n for a in soup.find_all('div', attrs={'class': 'pdp-description-container'}):\n bname = a.find('h1', attrs={'class': 'pdp-title'})\n self.pname = a.find('h1', attrs={'class': 'pdp-name'})\n mrp = a.find('span', attrs={'class': 'pdp-mrp'})\n sls = a.find('span', attrs={'class': 'pdp-price'})\n img = a.find('div', attrs={'class': 'image-grid-image'})\n print(img)\n\n # print(mrp.text)\n\n self.product_brand_name.append(bname.text)\n self.product_name.append(self.pname.text)\n if (sls and mrp != None):\n sls_temp = sls.text\n sls_int = sls_temp.replace(\"Rs. \", \"\")\n\n mrp_temp = mrp.text\n mrp_int = mrp_temp.replace(\"Rs. \", \"\")\n\n sls_int = int(sls_int)\n if sls_int < 500:\n new_sls = sls_int * 0.28\n elif sls_int >= 500 and sls_int <= 1000:\n new_sls = sls_int * 0.42\n elif sls_int >= 2000:\n new_sls = sls_int * 0.55\n elif sls_int >= 3000:\n new_sls = sls_int * 0.60\n elif sls_int >= 5000:\n new_sls = sls_int * 0.72\n elif sls_int >= 10000:\n new_sls = sls_int * 0.78\n\n self.product_mrp_price.append(int(mrp_int))\n self.product_sls_price.append(round(new_sls))\n\n else:\n self.product_mrp_price.append(0)\n self.product_sls_price.append(0)\n except:\n self.product_brand_name.append(0)\n self.product_name.append(0)\n self.product_mrp_price.append(0)\n self.product_sls_price.append(0)\n continue\n\n # This is for size atributes\n sizes_list.clear()\n sizes_dict.clear()\n for a in soup.find_all('div', attrs={'class': 'size-buttons-tipAndBtnContainer'}):\n size_name = a.find(\n 'p', attrs={'class': 'size-buttons-unified-size'})\n # print(size_name.text)\n if size_name != None:\n sizes_list.append(size_name.text)\n else:\n sizes_list.append(0)\n # print(sizes_list)\n sizes_dict['size'] = sizes_list.copy()\n # print(sizes_dict)\n self.sizes_all_list.append(sizes_dict.copy())\n # print(self.sizes_all_list)\n\n # # This is for getting product images link\n # for a in soup.find_all('div', attrs={'class', 'image-grid-imageContainer'}):\n # img = a.find('div', attrs={'class': 'image-grid-image'})\n # # print(img)\n # img_attrs_list.append(str(img))\n # break\n # #print(len(img_attrs_list))\n\n # # clearing string and getting product images links\n # for items_img in img_attrs_list:\n # repl = items_img.replace(\n # '''
''', \"\")\n # # print(rep)\n # img_links.append(rep)\n # # print(len(img_links))\n\n # # This is for creating folder for storing product images\n # foldername = f\"F:/Ignis Tech Solutions/venv/ProductsImages1/\"\n # if not os.path.exists(os.path.dirname(foldername)):\n # try:\n # os.makedirs(os.path.dirname(foldername))\n # except OSError as e: # Guard against race condition\n # if e.errno != errno.EEXIST:\n # raise\n # # This is for writing images to folder\n # for link in img_links:\n # print(link)\n # try:\n # with open(f\"F:\\\\Ignis Tech Solutions\\\\venv\\\\ProductsImages1\\\\{self.pname.text}-image1.jpg\", 'wb') as f:\n # # print(link)\n # image = requests.get(link)\n # f.write(image.content)\n # break\n # except:\n # continue\n # print(f\"saved successfully...{self.pname.text}.jpg\")\n\n # This is for product description\n try:\n for dsc in soup.find_all('div', attrs={'class': 'pdp-productDescriptorsContainer'}):\n if dsc != None:\n self.product_desc.append(dsc)\n else:\n self.product_desc.append(0)\n except:\n self.product_desc.append(0)\n continue\n\n # This is for size chart\n try:\n btn = self.driver.find_element_by_class_name(\n 'size-buttons-show-size-chart')\n btn.click()\n page = self.driver.page_source\n page_soup = BeautifulSoup(page, 'lxml')\n chrt = page_soup.find(\n 'div', attrs={'class': 'sizeChartWeb-info'})\n if chrt != None:\n self.product_sizechart.append(chrt)\n else:\n self.product_sizechart.append(0)\n except:\n self.product_sizechart.append(0)\n continue\n\n # This is for getting all images names\n # for names in self.product_name:\n # self.image_name_list.append(f\"ProductsImages\\\\{names}-image1.jpg\")\n # print(len(self.image_name_list))\n\n # print(\"brand name:\",len(self.product_brand_name))\n # print(\"product name:\",len(self.product_name))\n # print(\"sales price:\",len(self.product_sls_price))\n # print(\"MRP price:\",len(self.product_mrp_price))\n # print(\"desc:\",len(self.product_desc))\n # print(\"sizechart:\",len(self.product_sizechart))\n # print(\"sizes:\",len(self.sizes_all_list))\n # print(\"image names:\",len(self.image_name_list))\n\n def write_to_csv(self):\n print(\"writing to csv....\")\n df = pd.DataFrame({'Brand Name': self.product_brand_name, 'Product Name': self.product_name,\n 'MRP Price': self.product_mrp_price, 'Sales Price': self.product_sls_price,\n # 'Images': self.image_name_list,\n 'Product Attributes': self.sizes_all_list,\n 'Product Description': self.product_desc,\n 'Product Size Chart': self.product_sizechart})\n # df.transpose()\n df.to_csv('product_info_list.csv', index=False, encoding='utf-8')\n print(\"completed.\")\n self.driver.close()\n\n\nm = Myntra_products()\nm.getLinks()\nm.getProductInfo()\nm.write_to_csv()\n","repo_name":"omkarmhaske17/Scrape_Myntra_Products","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35489316749","text":"from Models.Foto import Foto\n\nclass FotoHelper(Foto):\n def montaFotos(self, itens, id = None):\n\n Fotos = []\n for foto in itens:\n fotos = Foto(\n str(foto.get('_id')),\n foto.get('Url'),\n foto.get('Tipo'),\n foto.get('Local'),\n foto.get('Album'),\n FotoHelper.montaTags(self, foto.get('Tags'))\n )\n Fotos.append(fotos.__dict__)\n if id:\n return fotos.__dict__\n else:\n return Fotos\n\n def montaTags(self, foto):\n Tags = []\n for tag in foto:\n Item_Tag = tag\n Tags.append(Item_Tag)\n return Tags","repo_name":"GochiFelipe/SiteMateus","sub_path":"ApiMateus/Helper/FotoHelper.py","file_name":"FotoHelper.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33725655845","text":"import tensorflow as tf\nfrom tensorpack import *\nfrom tf_utils.ar_layers import *\nfrom tf_utils.common import *\n\n\ndef predict(self, z):\n \"\"\"\n :param self: model\n :param z ~ N(0, I) shape: [M, n_z] == [batch_size, n_z]\n :return: batch of matrix X shape: [M, tau, C] == [batch_size, next_seq_trend, num_classes]\n \"\"\"\n l = gaussian_dense(name='fc', inputs=z, out_C=self.hps.T * self.hps.f[0])\n l = tf.reshape(l, shape=[-1, self.hps.T, self.hps.f[0]])\n\n l = conv1d(name='conv_l1', inputs=l, kernel_size=5, stride=1,\n in_C=self.hps.f[0], out_C=128)\n\n l = tf.layers.batch_normalization(inputs=l)\n l = tf.nn.elu(l)\n\n l = conv1d(name='conv_l2', inputs=l, kernel_size=5, stride=1,\n in_C=128, out_C=64)\n\n l = tf.layers.batch_normalization(inputs=l)\n l = tf.nn.elu(l)\n\n cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hps.lstm_units, state_is_tuple=True) # , activation=tf.nn.tanh)\n outputs, state = tf.nn.dynamic_rnn(cell, l, sequence_length=[self.hps.T] * self.hps.M, dtype=tf.float32)\n\n x_con = tf.contrib.layers.fully_connected(inputs=outputs, num_outputs=self.hps.D)\n\n if self.hps.normalize_data in ['min_max', 'min_max_centralize']:\n x_con = tf.nn.sigmoid(x_con)\n\n fc_l1 = tf.contrib.layers.fully_connected(inputs=state.c, num_outputs=self.hps.Tau * self.hps.C)\n\n y_predict = tf.reshape(fc_l1, shape=[-1, self.hps.Tau, self.hps.C])\n y_predict = tf.sigmoid(y_predict) / tf.reduce_sum(tf.sigmoid(y_predict), axis=[-1], keepdims=True)\n return x_con, y_predict\n","repo_name":"QuangNamVu/thesis","sub_path":"models/vae/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43361292267","text":"#! /usr/bin/env python3\n\n\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n self.nums1 = nums1\n self.nums2 = nums2\n self.m = len(nums1)\n self.n = len(nums2)\n self.k = (self.m + self.n) // 2\n self.i_min = max(0, self.k - self.n)\n self.i_max = min(self.m, self.k)\n return self.findMedianIterator(self.i_min, self.i_max)\n\n def findMedianIterator(self, before, toward):\n if toward == before:\n #print(\"Value error, \", before, toward)\n raise ValueError\n elif toward > before:\n i = (toward + before + 1) // 2\n j = self.k - i\n else:\n i = (toward + before) // 2\n j = self.k - i\n\n if i == self.i_max or i == self.i_min:\n return self.findMedian(i, j)\n if self.nums1[i - 1] > self.nums2[j]:\n return self.findMedianIterator(i, min(before, toward))\n if self.nums1[i] < self.nums2[j - 1]:\n return self.findMedianIterator(i, max(before, toward))\n return self.findMedian(i, j)\n\n def findMedian(self, i, j):\n right_min = min(self.nums1[i:i + 1] + self.nums2[j:j + 1])\n if (self.m + self.n) % 2:\n return right_min\n left_max = max(self.nums1[i - 1:i] + self.nums2[j - 1:j])\n return (right_min + left_max) / 2.0\n\nif __name__ == '__main__':\n nums1 = [1, 2, 4, 5, 8, 9]\n nums2 = [0, 3, 6, 7, 10]\n print(Solution().findMedianSortedArrays(nums1, nums2))\n","repo_name":"hynjnk/leetcode","sub_path":"0004_median_of_two_sorted_arrays.py","file_name":"0004_median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40685589752","text":"import asyncio\nimport signal\nimport sys\nfrom asyncio import CancelledError\nfrom pathlib import Path\nfrom typing import Any\n\nimport pkg_resources\nfrom aiohttp import ClientOSError, ServerDisconnectedError\nfrom pyfixtures import FixtureScope, runs_in_new_fixture_context\nfrom structlog import get_logger\nfrom virtool_core.logging import configure_logs\nfrom virtool_core.redis import configure_redis\n\nfrom virtool_workflow import execute\nfrom virtool_workflow.api.jobs import ping\nfrom virtool_workflow.hooks import (\n on_failure,\n on_cancelled,\n on_success,\n on_step_start,\n on_terminated,\n on_error,\n)\nfrom virtool_workflow.runtime.discovery import load_workflow_and_fixtures\nfrom virtool_workflow.runtime.events import Events\nfrom virtool_workflow.runtime.redis import (\n get_next_job_with_timeout,\n wait_for_cancellation,\n)\nfrom virtool_workflow.runtime.sentry import configure_sentry\nfrom virtool_workflow.workflow import Workflow\n\nlogger = get_logger(\"runtime\")\n\n\ndef configure_builtin_status_hooks():\n \"\"\"\n Configure built-in job status hooks.\n\n Push status updates to API when various lifecycle hooks are triggered.\n\n \"\"\"\n\n @on_step_start\n async def handle_step_start(push_status):\n await push_status(state=\"running\")\n\n @on_error(once=True)\n async def handle_error(error, push_status):\n await push_status(\n stage=\"\",\n state=\"error\",\n error=error,\n max_tb=50,\n )\n\n @on_cancelled(once=True)\n async def handle_cancelled(push_status):\n await push_status(stage=\"\", state=\"cancelled\")\n\n @on_terminated\n async def handle_terminated(push_status):\n await push_status(stage=\"\", state=\"terminated\")\n\n @on_success(once=True)\n async def handle_success(push_status):\n await push_status(stage=\"\", state=\"complete\")\n\n\ndef cleanup_builtin_status_hooks():\n \"\"\"\n Clear callbacks for built-in status hooks.\n\n This prevents carryover of hooks between tests. Carryover won't be encountered in\n production because workflow processes exit after one run.\n\n TODO: Find a better way to isolate hooks to workflow runs.\n\n \"\"\"\n on_step_start.clear()\n on_failure.clear()\n on_cancelled.clear()\n on_success.clear()\n on_error.clear()\n on_terminated.clear()\n\n\nasync def ping_periodically(http, job, jobs_api_connection_string, job_id):\n \"\"\"\n Ping the API to keep the job alive.\n\n \"\"\"\n retries = 0\n\n try:\n while True:\n if retries > 5:\n logger.warning(\"Failed to ping server\")\n break\n\n await asyncio.sleep(0.1)\n\n try:\n await ping(http, jobs_api_connection_string, job_id)\n except (ClientOSError, ServerDisconnectedError):\n await asyncio.sleep(0.3)\n retries += 1\n continue\n\n await asyncio.sleep(5)\n except CancelledError:\n logger.info(\"Stopped pinging server\")\n\n\nasync def run_workflow(\n config: dict[str, Any],\n job_id: str,\n workflow: Workflow,\n events: Events,\n) -> dict[str, Any]:\n # Configure hooks here so that they can be tested when using `run_workflow`.\n configure_builtin_status_hooks()\n\n async with FixtureScope() as scope:\n scope[\"config\"] = config\n scope[\"job_id\"] = job_id\n\n bound_ping = await scope.bind(ping_periodically)\n\n execute_task = asyncio.create_task(execute(workflow, scope, events))\n ping_task = asyncio.create_task(bound_ping())\n\n try:\n await execute_task\n except asyncio.CancelledError:\n execute_task.cancel()\n\n ping_task.cancel()\n\n await ping_task\n await execute_task\n\n cleanup_builtin_status_hooks()\n\n return scope.get(\"results\", {})\n\n\n@runs_in_new_fixture_context()\nasync def start_runtime(\n dev: bool,\n jobs_api_connection_string: str,\n mem: int,\n proc: int,\n redis_connection_string: str,\n redis_list_name: str,\n sentry_dsn: str,\n timeout: int,\n work_path: Path,\n):\n version = pkg_resources.get_distribution(\"virtool-workflow\").version\n\n logger.info(\"Found virtool-workflow\", version=version)\n\n configure_logs(dev)\n configure_sentry(sentry_dsn)\n\n workflow = load_workflow_and_fixtures()\n\n config = dict(\n dev=dev,\n jobs_api_connection_string=jobs_api_connection_string,\n mem=mem,\n proc=proc,\n work_path=work_path,\n )\n\n async with configure_redis(redis_connection_string, timeout=15) as redis:\n try:\n job_id = await get_next_job_with_timeout(redis_list_name, redis, timeout)\n except asyncio.TimeoutError:\n # This happens due to Kubernetes scheduling issues or job cancellations. It\n # is not an error.\n logger.warning(\"Timed out while waiting for job\")\n sys.exit(0)\n\n events = Events()\n\n workflow_run = asyncio.create_task(run_workflow(config, job_id, workflow, events))\n\n def terminate_workflow(*_):\n events.terminated.set()\n workflow_run.cancel()\n\n signal.signal(signal.SIGTERM, terminate_workflow)\n\n def cancel_workflow(*_):\n events.cancelled.set()\n workflow_run.cancel()\n\n async with configure_redis(redis_connection_string, timeout=15) as redis:\n cancellation_task = asyncio.create_task(\n wait_for_cancellation(redis, job_id, cancel_workflow)\n )\n\n await workflow_run\n\n cancellation_task.cancel()\n await cancellation_task\n\n if events.terminated.is_set():\n sys.exit(124)\n","repo_name":"virtool/virtool-workflow","sub_path":"virtool_workflow/runtime/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"33308939870","text":"from __future__ import print_function, division\n\nimport numpy as np\nimport os\nfrom scipy.signal import convolve, argrelmax\nfrom scipy.fftpack import rfft, rfftfreq, irfft\nfrom peakdet import detect_peaks\n\n\ndef check_valleys(x, i, threshold=1):\n left = True\n right = True\n li = i-1\n ri = i+1\n while li >= 0:\n if li-1 < 0 or x[li-1] > x[li]: # then this is a valley\n left = abs(x[i]-x[li]) >= threshold\n break\n li = li-1\n\n # while ri x[ri]: #then this is a valley\n # right = abs(x[ri]-x[i])>=0.1*x[i]\n # break\n # ri=ri+1\n return left # and right\n\n\ndef cliffs(x):\n potential_boundaries = argrelmax(x)[0]\n ret = []\n for i, pb in enumerate(potential_boundaries):\n li = i-1\n left = abs(x[i]-x[0])\n while li >= 0:\n if li-1 < 0 or x[li-1] > x[li]: # then this is a valley\n left = abs(x[i]-x[li])\n break\n li = li-1\n ret.append([pb, left])\n return ret\n\n\ndef greedy_detect(x, times, num=5):\n diffs = np.array(cliffs(x))\n diffs = diffs[diffs[:, 1].argsort()]\n lim = int(len(x)/num)\n diffs = np.sort(diffs[-lim:, 0]).astype(int)\n return times[diffs]\n\n\ndef baseline_like_detect(x, times, threshold=1, min_threshold=1):\n #x = 1-np.exp(-x)\n potential_boundaries = argrelmax(x)[0]\n boundaries = []\n mean = np.mean(x[potential_boundaries])\n for i, pb in enumerate(potential_boundaries):\n if pb == 0 or pb == len(x):\n boundaries.append(pb)\n continue\n\n if x[pb] < min_threshold*mean:\n continue\n if not check_valleys(x, pb, threshold):\n continue\n # j=upper_valley(pb,valleys)\n # if j>0 and valleys[j]>pb and valleys[j-1] 0:\n y = [boundaries[0]]\n i = 0\n for j in range(1, len(boundaries)):\n if boundaries[j]-boundaries[i] >= clip:\n boundaries[i:j] = np.mean(boundaries[i:j])\n i = j\n j += 1\n\n for bound in boundaries:\n if bound != y[-1]:\n y.append(bound)\n boundaries = np.array(y)\n\n return boundaries\n\n\ndef fourier_detect(x, times, rate):\n fr = rfftfreq(len(times), 1/rate)\n y = rfft(x)\n y[fr > int(1/0.05)] = 0\n x_smoothed = irfft(y)\n return times[argrelmax(x_smoothed)[0]]\n\n\ndef auto_detect(x, times, ker_len):\n\n kernel = np.ones((int(ker_len))) / ker_len\n x_smoothed = convolve(x, kernel)\n boundaries = detect_peaks(x_smoothed, mph=np.max(x_smoothed)*0.4, mpd=2,)\n boundaries = times[boundaries]\n\n return boundaries\n\n\ndef post_process_file(\n input_file,\n output_file,\n method='baseline',\n time_file=None,\n rate=100.0,\n ker_len=3,\n clip=0.03,\n threshold=0.5,\n min_threshold=1\n):\n # Load error signal\n x = np.load(input_file)\n x = x.reshape(x.size)\n\n # Flatten beginning\n\n x[:7]=0\n\n times = np.arange(len(x))/rate\n if time_file is not None:\n times = np.loadtxt(time_file)\n\n if method == 'fourier':\n boundaries = fourier_detect(x, times, rate)\n elif method == 'auto':\n boundaries = auto_detect(x, times, ker_len)\n elif method == 'manual':\n boundaries = manual_detect(x, times, ker_len, clip, rate)\n elif method == 'baseline':\n boundaries = baseline_like_detect(\n x,\n times,\n threshold=threshold,\n min_threshold=min_threshold\n )\n elif method == 'greedy':\n boundaries = greedy_detect(x, times, threshold)\n elif method == 'none':\n boundaries = times[argrelmax(x)[0]]\n else:\n boundaries = fourier_detect(x, times, rate)\n boundaries=list(boundaries)\n if not (len(x)-1)/rate in boundaries:\n boundaries.append((len(x)-1)/rate)\n if not 0 in boundaries:\n boundaries=[0]+boundaries\n np.savetxt(output_file, boundaries, fmt=\"%.2f\")\n\n\ndef run(\n input_dir,\n output_dir,\n method='baseline',\n time_dir=None,\n rate=100.0,\n ker_len=3,\n clip=0.03,\n threshold=0.5,\n min_threshold=1\n):\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n for f in os.listdir(input_dir):\n if f.endswith('_loss.npy'):\n\n ifile = input_dir+'/'+f\n ofile = output_dir+'/'+f[:-9]+'.syldet'\n\n if time_dir is not None:\n tfile = time_dir + f[:-9]+'times'\n else:\n tfile = None\n\n post_process_file(\n ifile,\n ofile,\n method=method,\n time_file=tfile,\n rate=rate,\n ker_len=ker_len,\n clip=clip,\n threshold=threshold,\n min_threshold=min_threshold\n )\n","repo_name":"pmichel31415/neural-seg","sub_path":"post_process_rnn_error.py","file_name":"post_process_rnn_error.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"20157831222","text":"from datetime import datetime\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nfrom mongoengine import connect, Document, EmbeddedDocument, \\\n StringField, IntField, FloatField, DateTimeField, ListField, \\\n EmbeddedDocumentField\n\n\n# connect('students')\n# connect('students', host='192.168.1.35', port=27017)\nconnect('students', host='mongodb://localhost/students')\n\nclass Grade(EmbeddedDocument):\n ''' 学生的成绩 '''\n name = StringField(required=True)\n score = FloatField(required=True)\n\n\nSEX_CHOICES = (\n ('female', '女'),\n ('male', '男')\n )\n\nclass Student(Document):\n ''' 学生模型 '''\n name = StringField(required=True, max_lenght=32)\n age = IntField(required=True)\n sex = StringField(required=True, choices=SEX_CHOICES)\n grade = FloatField()\n created_at = DateTimeField(default=datetime.now())\n grades = ListField(EmbeddedDocumentField(Grade))\n address = StringField()\n school = StringField()\n\n meta = {\n 'collection': 'students'\n }\n\n\nclass TestMongoEngine(object):\n\n def add_one(self):\n ''' 新增数据 '''\n yuwen = Grade(\n name='语文',\n score=95\n )\n english = Grade(\n name='英语',\n score=89)\n stu_obj = Student(\n name='张三',\n age=21,\n sex='male',\n grades=[yuwen, english]\n )\n # stu_obj.test = 'OK'\n stu_obj.save()\n return stu_obj\n\n def get_one(self):\n ''' 查询一条数据 '''\n return Student.objects.first()\n\n def get_more(self):\n ''' 查询多条数据 '''\n # return Student.objects\n return Student.objects.all()\n\n def get_one_from_oid(self, oid):\n ''' 查询指定ID的数据 '''\n return Student.objects.filter(id=oid).first()\n\n def update(self):\n ''' 修改数据 '''\n # 修改一条数据\n # rest = Student.objects.filter(sex='male').update_one(inc__age=1)\n # return rest\n # 修改多条数据\n rest = Student.objects.filter(sex='male').update(inc__age=1)\n return rest\n\n def delete(self):\n ''' 删除数据 '''\n # 删除一条数据\n rest = Student.objects.filter(sex='male').first().delete()\n # 删除多条数据\n rest = Student.objects.filter(sex='male').delete()\n return rest\n\ndef main():\n obj = TestMongoEngine()\n # rest = obj.add_one()\n # print(rest.id)\n\n # rest = obj.get_one()\n # print(rest.id)\n\n rest = obj.get_more()\n print(type(rest))\n for item in rest:\n print(item.id)\n\n # rest = obj.get_one_from_oid('593bb8e7fa3ebd091078d40e')\n # print(rest.name)\n\n # rest = obj.update()\n # print(rest)\n\n # rest = obj.delete()\n # print(rest)\n\nif __name__ == '__main__':\n main()","repo_name":"mtianyan/Python3Database","sub_path":"第8章 MongoDB ODM/test_mongoengine.py","file_name":"test_mongoengine.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"70463387377","text":"from collections import namedtuple\nimport fileinput\n\nLiteral = namedtuple('Literal', 'version type value bits')\nOpLength = namedtuple('OpLength', 'version type id length bits')\nOpNumber = namedtuple('OpNumber', 'version type id number bits')\n\n\nclass Packet:\n\n def __init__(self, version, type, bits, *, value=None, id=None, children=None):\n self.version = version\n self.type = type\n self.bits = bits\n\n self.value = value\n self.id = id\n self.children = children or []\n\n def pprint(self, depth=0):\n pad = ' ' * depth\n print(pad + str(self))\n for c in self.children:\n c.pprint(depth + 1)\n\n def __str__(self):\n s = '{} {} '.format(self.version, self.type)\n if self.value:\n s += str(self.value) + ' '\n else:\n s += str(self.id) + ' '\n s += str(self.bits)\n return s\n\n\ndef decode(bits):\n # parse and yield out each packet\n while '1' in bits:\n pbits = ''\n\n # version / type\n pbits += bits[:6]\n v, t, bits = bits[:3], bits[3:6], bits[6:] \n v, t = int(v, base=2), int(t, base=2)\n\n # literal\n if t == 4:\n value = ''\n while True:\n pbits += bits[:5]\n g, bits = bits[:5], bits[5:]\n value += g[1:]\n if g[0] == '0':\n break\n\n value = int(value, base=2)\n packet = Packet(v, t, pbits, value=value)\n return packet\n\n # operator (recursion)\n else:\n pbits += bits[0]\n i, bits = bits[0], bits[1:]\n i = int(i, base=2)\n\n if i == 0:\n pbits += bits[:15]\n l, bits = bits[:15], bits[15:]\n l = int(l, base=2)\n\n size = 0\n children = []\n while size < l:\n c = decode(bits[size:l])\n children.append(c)\n size += len(c.bits)\n pbits += c.bits\n\n bits = bits[l:]\n return Packet(v, t, pbits, id=i, children=children)\n else:\n pbits += bits[:11]\n n, bits = bits[:11], bits[11:]\n n = int(n, base=2)\n\n size = 0\n count = 0\n children = []\n while count < n:\n c = decode(bits[size:])\n children.append(c)\n size += len(c.bits)\n pbits += c.bits\n count += 1\n\n bits = bits[size:]\n packet = Packet(v, t, pbits, id=i, children=children)\n return packet\n\n\ndef score1(p):\n res = p.version\n for c in p.children:\n res += score1(c)\n return res\n\n\ndef part1(p):\n return score1(p)\n\n\ndef score2(p):\n if p.type == 0:\n res = 0\n for c in p.children:\n res += score2(c)\n return res\n\n if p.type == 1:\n res = 1\n for c in p.children:\n res *= score2(c)\n return res\n\n if p.type == 2:\n res = []\n for c in p.children:\n res.append(score2(c))\n return min(res)\n\n if p.type == 3:\n res = []\n for c in p.children:\n res.append(score2(c))\n return max(res)\n\n if p.type == 4:\n return p.value\n\n if p.type == 5:\n res = []\n for c in p.children:\n res.append(score2(c))\n return res[0] > res[1]\n\n if p.type == 6:\n res = []\n for c in p.children:\n res.append(score2(c))\n return res[0] < res[1]\n\n if p.type == 7:\n res = []\n for c in p.children:\n res.append(score2(c))\n return res[0] == res[1]\n\n\ndef part2(p):\n return score2(p)\n\n\nif __name__ == '__main__':\n t = None\n for line in fileinput.input():\n t = line.strip()\n\n # convert hex pairs to binary\n bits = ''\n for h in t:\n n = int(h, base=16)\n bits += '{:04b}'.format(n)\n\n p = decode(bits)\n print(part1(p))\n print(part2(p))\n","repo_name":"theandrew168/advent-of-code","sub_path":"2021/day16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"16275693162","text":"import matplotlib\nmatplotlib.use('TkAgg')\n\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom animate import camera, alg_title, graph_title, Plot\n\n\ndef BubbleSort(data):\n # repeat until no swaps\n swapped = True\n while swapped:\n swapped = False\n # check from 0 to n-2\n for i in range( len( data ) - 1 ):\n # swap if i-th and (i+1)-th element out of order\n if data[i] > data[i+1]:\n data[i], data[i+1] = data[i+1], data[i]\n swapped = True\n Plot(i + 1, data)\n\ndef insertionSort( data):\n count=0\n for i in range( 1, len( data ) ):\n elem = data.pop( i )\n insertPos = i\n for j in range(i):\n if elem < data[j]:\n insertPos = j\n break\n data.insert(insertPos, elem )\n Plot(data)\n\ndef selectionSort(data):\n for i in range( len( data ) ):\n min = i\n for j in range( i + 1, len( data ) ):\n if data[j] < data[min]:\n min = j\n data[i], data[min] = data[min], data[i]\n Plot(data)\n\ntry:\n data_size = int(input('Data size(defaut 30):'))\nexcept ValueError:\n data_size = 30\n\ndata = random.sample(range(data_size), data_size)\n\nalgorithms = {'1': BubbleSort, '2': insertionSort, '3': selectionSort}\n\nalg = input('Select the a;gorithm: (1)BubbleSort, (2)InsertionSort, (3) SelectionSort')\n\ngph = input('Select the graph(1 for bar, 2 for scatter):')\n\ngraph_title(gph)\nalg_title(alg)\n\nfunc = alogrithms[alg]\n\nfunc(data)\n\ninterval_time = 20\nanimation = camera.animate(interval=interval_time)\n\n# To save as gif install imagemagick, to save as mp4 install ffmpeg(if not already installed)\nif save:\n #animation.save('animation.gif', dpi=60, writer='imagemagick')\n animation.save('animation.mp4')\n\nplt.show()\n","repo_name":"nataliesyw/Sorting-Algorithm-Visualizer-","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3033154035","text":"# Problem statement\n# Create a function that takes in:\n# an integer that represents a number of years,\n# a 2 dimensional matrix filled with 0s and 1s that describes a garden. 1s represent hedges, and 0s represent empty spaces. \n\n# The function should simulate the growth of hedges over the given number of years, according to the following rules:\n# 1. An empty square which is adjacent to a hedge (including diagonally) will be filled in the next year. \n# 2. A square which is filled with a hedge will be empty the following year if it is surrounded on all eight sides by other hedges, which prevents it from getting enough sun. Note that hedges on the edge squares will always get enough sun.\n\n# 3. Any other squares will be left intact.\n\n# The function should return the number of pairs of adjacent hedges (including diagonally) at the end of this process. We’d like you to focus on writing simple, well-structured code; you will not be graded on optimizing the performance of your solution.\n# Examples:\n# Example 1. If the number of years is 1, and the initial matrix is\n# [[0, 0, 1],\n# [0, 0, 0]]\n# the final matrix will be:\n# [[0, 1, 1],\n# [0, 1, 1]]\n# and the function should output 6. Here are the six pairs of adjacent hedges in this garden:\n# [[0, 1, 1], [[0, 1, 1], [[0, 1, 1], [[0, 1, 1], [[0, 1, 1], [[0, 1, 1],\n# [0, 1, 1]] [0, 1, 1]] [0, 1, 1]] [0, 1, 1]] [0, 1, 1]] [0, 1, 1]]\n\n# Example 2. If the number of years is 2, and the initial matrix is:\n# [[1, 0, 0, 0],\n# [1, 1, 0, 0],\n# [1, 0, 0, 1]]\n\n #year 1\n# [[1, 1, 1, 1],\n# [1, 1, 1, 1],\n# [1, 1, 1, 1]]\n\n #year 2\n# [[1, 1, 1, 1],\n# [1, 0, 1, 1],\n# [1, 1, 1, 1]]\n\n #year 3\n# [[1, 1, 1, 1],\n# [1, 1, 1, 1],\n# [1, 1, 1, 1]]\n\n\n# the final matrix will be:\n# [[1, 1, 1, 1],\n# [1, 0, 1, 1],\n# [1, 1, 1, 1]]\n# And the function should output 21.\n\n\n\n\n#clarifying questions:\n #general dimensions of the matrix? \n #can you receive an empty matrix? \n #4x4 matrix, does that constitute corners or not?\n\n\n# inputs: num of years, hedge_matrix\n# return a count of the number of pairs of adjacent hedges\n\n\n# simulate growth hedges\n #year constitutes one bfs\n #bfs\n \n #for n years\n #set() to make sure we don't visit a node twice\n #init queue with 0,0\n #queue\n #pop from front of queue\n #create var missing nodes\n #create var count adjacent hedges\n #create var of adj elem\n #iterate through each adj elem and check if they don't exist. If they don't exist, iterate our missing nodes var\n #remove from adj list to prevent from iterating?\n #if missing nodes > 5 => found corner\n #if it's 1 => do nothing \n \n #iterate through adjacent hedges\n #if neighbor not in visited\n #if hedge == 0 and current == 1: => change to 1\n #if hedge == 1 and current == 0: => change to 1\n #if hedge == 1 and current == 1: => incremenet adjacent hedges count\n\n #if we change a node who's adjacent to a 1 from 0 to 1, does that mean we increment the adjacent nodes?\n\n #add hedge to queue and to visited set\n \n #reset adjacent hedge count to 0\n #reset missing nodes to 0\n \n #need to reset set() after each iteration\n\n #after floodfilling, we can do another bfs to count the number of adjacent pairs? \n #only need to do one pass\n #don't need a set()\n\n #init var to count the number of pairs\n #init queue that starts at 0,0\n #while queue:\n #pop from front. \n #init var for adjacent elements, top, left, right, bottom, diag up, antidiag up, diag down, antidiag down\n\n # iterate through adjacent elems:\n #if adj element == 1: increment count\n #append adj element to back of queue\n \n # return count/2\n\n \ndef grow_hedges(years, hedges):\n for n in range(0, years):\n simulate_growth(hedges)\n\n return calculate_adjacent_hedges(hedges)\n\ndef simulate_growth(hedges):\n visited = set()\n visited.add((0,0))\n adjusted = set()\n queue = [(0,0)]\n\n while queue:\n i, j= queue.pop(0)\n current = hedges[i][j]\n count_missing = 0\n count_adjacent_hedges = 0\n # top, bottom, left, right, diagUp, diagDown, antiDiagUp, antiDiagDown\n adj_list = [(i-1, j), (i+1, j), (i, j-1), (i, j+1), (i-1, j+1), (i+1, j-1), (i-1, j-1), (i+1, j+1)]\n copy_adj = adj_list[:]\n\n for elem in adj_list:\n a, b = elem\n if a < 0 or a >= len(hedges) or b < 0 or b >= len(hedges[0]): \n count_missing += 1\n copy_adj.remove(elem)\n\n for elem in copy_adj:\n c, d = elem\n adj_elem = hedges[c][d]\n\n if current == 1 and adj_elem == 0 and elem not in adjusted:\n hedges[c][d] = 1\n adjusted.add(elem)\n \n elif current == 1 and adj_elem == 1:\n count_adjacent_hedges += 1\n\n if elem not in visited:\n visited.add(elem)\n queue.append(elem)\n\n if count_adjacent_hedges == 8 and (i,j) not in adjusted: \n hedges[i][j] = 0\n adjusted.add((i,j))\n\ndef calculate_adjacent_hedges(hedges):\n queue = [(0,0)]\n visited = set((0,0))\n paired = set()\n count = 0\n\n while queue:\n i,j = queue.pop(0)\n curr_elem = hedges[i][j]\n\n adj_list = [(i-1, j), (i+1, j), (i, j-1), (i, j+1), (i-1, j+1), (i+1, j-1), (i-1, j-1), (i+1, j+1)]\n copy_adj = adj_list[:]\n\n for elem in adj_list:\n a, b = elem\n if a < 0 or a >= len(hedges) or b < 0 or b >= len(hedges[0]): \n copy_adj.remove(elem)\n\n for elem in copy_adj:\n c, d = elem\n adj_elem = hedges[c][d]\n\n if adj_elem == 1 and curr_elem == 1:\n if (((i,j), elem)) not in paired and ((elem, (i,j))) not in paired:\n paired.add(((i,j), elem))\n count += 1\n \n if elem not in visited:\n visited.add(elem)\n queue.append(elem)\n\n return count\n \n\n\n #create an empty matrix with the same dimensions as our input\n #then we can iterate over the number of years and populate our empty matrix\n\nmatrix = [[1, 0, 0, 0],\n [1, 1, 0, 0],\n [1, 0, 0, 1]]\n\n# matrix = [[0, 0, 1],\n# [0, 0, 0]]\n\ndef grow_hedges_2(years, hedges):\n for n in range(years):\n garden = simulate(hedges)\n \n print(garden)\n return calc_adj(garden)\n\ndef simulate(hedges):\n height = len(hedges)\n width = len(hedges[0])\n copy_hedges = [[0 for j in range(len(hedges[0]))] for i in range(len(hedges))]\n for i in range(height):\n for j in range(width):\n adj_neighbors = count_neighbors(hedges, i, j)\n elem = hedges[i][j]\n if elem == 1 and adj_neighbors == 8:\n copy_hedges[i][j] = 0\n \n if elem == 0 and adj_neighbors > 0:\n copy_hedges[i][j] = 1\n else:\n copy_hedges[i][j] = hedges[i][j]\n\n return copy_hedges\n\ndef count_neighbors(hedges, i , j):\n pop_neighbors = 0\n distance = (-1, 0, 1)\n for delta_i in distance:\n for delta_j in distance:\n if delta_i == 0 and delta_j == 0: continue\n\n row = i+delta_i\n column = j+delta_j\n\n if 0 <= row < len(hedges) and 0 <= column < len(hedges[0]):\n if hedges[row][column] == 1: pop_neighbors += 1\n\n return pop_neighbors \n\ndef calc_adj(hedges):\n count = 0\n height = len(hedges)\n width = len(hedges[0])\n for i in range(height):\n for j in range(width):\n if hedges[i][j] == 1: count += count_neighbors(hedges, i, j) \n\n\n return count//2\n\n\nprint(grow_hedges_2(3, matrix))","repo_name":"sbalayan1/practice_code","sub_path":"DSandAlgos/grow_hedges.py","file_name":"grow_hedges.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6267580375","text":"from python_wikibase.data_model.entity import check_item_param\nfrom python_wikibase.data_types.data_type import DataType\n\n\nclass Quantity(DataType):\n @staticmethod\n def parse_number(value):\n \"\"\"Parse and return number (string, float or int) as int or float\"\"\"\n value_float = float(value)\n if value_float.is_integer():\n return int(value_float)\n else:\n return value_float\n\n def __init__(self, py_wb, api, language):\n super().__init__(py_wb, api, language)\n self.amount = None\n self.unit = None\n\n def __int__(self):\n return int(self.amount)\n\n def __float__(self):\n return float(self.amount)\n\n def unmarshal(self, data_value):\n quantity_value = data_value[\"value\"]\n\n # Amount (parse as int or float)\n self.amount = self.parse_number(quantity_value[\"amount\"])\n\n # Unit\n if quantity_value[\"unit\"] == \"1\":\n # \"1\": No unit\n self.unit = None\n else:\n # Unit URL has the form \"http://localhost:8181/entity/Q1\", extract last part\n unit_item_id = quantity_value[\"unit\"].split(\"/\")[-1]\n self.unit = self.py_wb.Item()\n self.unit.entity_id = unit_item_id\n\n return self\n\n def marshal(self):\n marshalled = {}\n\n # Amount\n if self.amount >= 0:\n marshalled[\"amount\"] = f\"+{self.amount}\"\n else:\n marshalled[\"amount\"] = str(self.amount)\n\n # Unit\n if not self.unit:\n marshalled[\"unit\"] = \"1\"\n else:\n api_url = self.py_wb.api.api.base_url\n api_url_split = api_url.split(\"/\")\n base_url = \"/\".join(api_url_split[:3])\n marshalled[\"unit\"] = f\"{base_url}/entity/{self.unit.entity_id}\"\n\n return marshalled\n\n def create(self, amount, unit=None):\n # Amount (parse as int or float)\n self.amount = self.parse_number(amount)\n\n # Unit (must be Wikibase item)\n if unit:\n check_item_param(unit, \"unit\")\n self.unit = unit\n\n return self\n","repo_name":"samuelmeuli/python-wikibase","sub_path":"python_wikibase/data_types/quantity.py","file_name":"quantity.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"57"} +{"seq_id":"5696818860","text":"import json\nimport os\n\nfrom pathlib import Path\nfrom nextcord.ext import commands\n\ncwd = Path(__file__).parents[0]\ncwd = str(cwd)\n\n\ndef get_prefix(bot, message):\n with open('prefixes.json', 'r') as f:\n prefixes = json.load(f)\n return prefixes[str(message.guild.id)]\n\n\nSettingsFile = json.load(open('settings.json'))\nClient = commands.Bot(command_prefix=(get_prefix))\nClient.remove_command(\"help\") # To create a personal help command \n\n# Add default if bot join any guild\n@Client.event\nasync def on_guild_join(guild):\n with open('prefixes.json', 'r') as f:\n prefixes = json.load(f)\n\n prefixes[str(guild.id)] = SettingsFile['prefix']\n\n with open('prefixes.json', 'w') as f:\n json.dump(prefixes, f, indent=4)\n\n# When the bot is left from the guild prefix will deleted automatically\n@Client.event\nasync def on_guild_remove(guild):\n with open('prefixes.json', 'r') as f:\n prefixes = json.load(f)\n\n prefixes.pop(str(guild.id))\n\n with open('prefixes.json', 'w') as f:\n json.dump(prefixes, f, indent=4)\n\n\n@Client.command(pass_context=True)\n@commands.has_permissions(administrator=True) # Change prefix command\nasync def prefix(ctx, prefix):\n \"\"\"\n Change Prefix In This Guild Only\n You Can Put Your Custom Prefix\n \"\"\"\n with open('prefixes.json', 'r') as f:\n prefixes = json.load(f)\n\n prefixes[str(ctx.guild.id)] = prefix\n\n with open('prefixes.json', 'w') as f:\n json.dump(prefixes, f, indent=4)\n\n await ctx.reply(f'**Prefix changed to: `{prefix}`**')\n\nif __name__ == \"__main__\":\n for file in os.listdir(cwd + \"/cogs\"):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n Client.load_extension(f\"cogs.{file[:-3]}\")\n Client.run(SettingsFile['token'])","repo_name":"ZI1E/Nextcord-Template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16628483842","text":"#!/usr/bin/env python\n\nimport rospy\nimport time\n\nfrom ros_basics_tutorials.srv import RectangleAreaService\nfrom ros_basics_tutorials.srv import RectangleAreaServiceRequest\nfrom ros_basics_tutorials.srv import RectangleAreaServiceResponse\n\ndef handle_rectangle_area(req):\n print(\"Returning [%s * %s = %s]\"%(req.h, req.w, (req.h * req.w)))\n time.sleep(5)\n area_res = RectangleAreaServiceResponse(req.h * req.w)\n return area_res\n\ndef rectangle_area_service_server():\n rospy.init_node('rectangle_area_service_server')\n s = rospy.Service('rectangle_area_service', RectangleAreaService, handle_rectangle_area)\n print(\"Ready to calculate Area of Rectangle. \")\n rospy.spin()\n\nif __name__==\"__main__\":\n rectangle_area_service_server()\n","repo_name":"Shanky-Robot/ROS-Basics","sub_path":"src/ros_service_assignment/script/ras_server.py","file_name":"ras_server.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"16467022031","text":"\n\nclass DataBase(object):\n \"\"\"robot database: cmdMsg feedbackMsg\"\"\"\n __instance = None\n __isFirstInit = False\n\n def __new__(cls):\n if not cls.__instance:\n DataBase.__instance = super().__new__(cls)\n return cls.__instance\n\n def __init__(self):\n if not self.__isFirstInit:\n self.cmdMsg = []\n self.feedbackMsg = []\n self.__isFirstInit = True\n else:\n pass\n\n# test this class\ndef test():\n db1 = DataBase()\n db1.cmdMsg = [1.0, 30]\n db1.feedbackMsg = [1.1, 34]\n\n print(\"db1->cmdMSg:\",db1.cmdMsg)\n print(\"db1->feedbackMSg:\",db1.feedbackMsg)\n\n db2 = DataBase()\n print(\"db2->cmdMSg:\",db2.cmdMsg)\n print(\"db2->feedbackMSg:\",db2.feedbackMsg)\n \n# test()","repo_name":"GeHaha/Practice","sub_path":"Car1/SlamCar/DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42382613915","text":"def meeting(s) :\n count = 0\n how_many_names = s.count(':')\n\n fn = []\n ln = []\n\n while count < how_many_names :\n\n x = s.find(':')\n y = s.find(';')\n\n fn.append(s[:x].upper())\n\n ln_range = s[x+1:y]\n if y == -1 :\n ln_range = s[x+1:]\n\n ln.append(ln_range.upper())\n\n s = s[y + 1:]\n count = count + 1\n\n zippy = sorted(list(zip(ln, fn)))\n\n new_str = ''\n for first, last in zippy :\n new_str += '(' + first + ', ' + last + ')'\n\n return(new_str)\n\nprint(meeting(\"Fred:Corwill;Wilfred:Corwill;Barney:Tornbull;Betty:Tornbull;Bjon:Tornbull;Raphael:Corwill;Alfred:Corwill\"))\nprint(meeting(\"Alexis:Wahl;John:Bell;Victoria:Schwarz;Abba:Dorny;Grace:Meta;Ann:Arno;Madison:STAN;Alex:Cornwell;Lewis:Kern;Megan:Stan;Alex:Korn\"))\n","repo_name":"patthrasher/codewars-practice","sub_path":"kyu6-6.py","file_name":"kyu6-6.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27679854012","text":"#!/usr/bin/python\n# coding: UTF-8\n\n\"\"\"\nplot.py plots measured and simulated data from the ODIN satellite and the simulation program IASCO. The plots are projected eather on a world map (Miller projection) och on the north and south pole. For more information on how to use these different applications, please see the documentation for globalPlot and polarPlot respectivly.\n\"\"\"\n\nimport convert_date as c\nimport color_axis as col\nfrom scipy import io as sio\nimport matplotlib as matplotlib\nmatplotlib.use('Agg')\nimport os\nimport numpy as n\nfrom mpl_toolkits.basemap import Basemap,shiftgrid\nfrom pylab import *\nimport gc\nfrom datetime import date as dt\nfrom odin.config.environment import *\n\ndef globalPlot(date_mjd,level,species):\n \"\"\"\n Function for a global plot. The figure shows a plot over the world map for the chosen species (O3 501.8 GHz, O3 544.6 GHz, H2O, N2O, HNO3). Inputs are the date defined in Modified Julian Date, the potential temperature level of interest (see table below) and the species of interest (date and level (0 to 5) should be defined as integers while the species is defined as a string).\n\n 0 1 2 3 4 5\n O3_501 | 475 | 525 | 575 | 625 | --- | ---\n O3_544 | 475 | 525 | 575 | 625 | --- | ---\n N2O | 475 | 525 | 575 | 625 | --- | ---\n H2O | 400 | 425 | 450 | 475 | 500 | 525\n HNO3 | 475 | 525 | 575 | 625 | 675 | 725\n \n Example: globalPlot(54745,2,'O3_501') - Will plot a global projection for 2008-10-06 for O3 (510.8 GHz) at the potential temperature level 575 K.\n \"\"\"\n year,month,day,hour,minute,secs,tics = c.mjd2utc(date_mjd)\n load_path = config().get('GEM','LEVEL3_DIR') + 'DATA/'\n data = sio.loadmat(load_path + species + '/' + str(year) + '/' + str(month) + '/' + species + '_' + str(date_mjd) + '_00.mat')\n data=double(data['TracerField_u16'])*data['K_TracerField']\n m = Basemap(projection='mill') # Makes a Miller projection of the world map\n\n # Defines latitudes and longitudes and makes a transformation of the data coordinates to fit the coordinates of the map (basemap)\n lats = n.arange(-88.875,88.875+2.25,2.25)\n lons = n.arange(-178.875,178.875+2.25,2.25)\n nx = 320\n ny = 160\n specdat = m.transform_scalar(data[:,:,level], lons, lats, nx, ny)\n\t\n clf() # Makes the figure\n fig = figure(figsize=(8,6))\n caxis, step = col.c_axis(level,species)\n \n im = m.imshow(specdat*1e6, interpolation='bilinear', vmin=caxis[0], vmax=caxis[1]+0.0000001*step,origin='upper')\n colorbar(im,orientation='vertical',shrink=0.76,ticks=n.arange(caxis[0],caxis[1]+step,step))\n m.drawcoastlines()\n m.drawmeridians(range(-135,136,45),labels=[1,0,0,1])\n m.drawparallels([-75,-45,0,45,75],labels=[1,0,0,1])\n \n titles = ['O3 (501.8 GHz)','O3 (544.6 GHz)']\n \n # Makes title and figure texts\n if (species == 'N2O'):\n if level == 0:\n title(species + '\\nDate: %d-%02d-%02d Level: 475K' %(year,month,day) )\n elif level == 1:\n title(species + '\\nDate: %d-%02d-%02d Level: 525K' %(year,month,day) )\n elif level == 2:\n title(species + '\\nDate: %d-%02d-%02d Level: 575K' %(year,month,day) ) \n elif level == 3:\n title(species + '\\nDate: %d-%02d-%02d Level: 625K' %(year,month,day) )\n if (species == 'O3_501'):\n if level == 0:\n title(titles[0] + '\\nDate: %d-%02d-%02d Level: 475K' %(year,month,day) )\n elif level == 1:\n title(titles[0] + '\\nDate: %d-%02d-%02d Level: 525K' %(year,month,day) )\n elif level == 2:\n title(titles[0] + '\\nDate: %d-%02d-%02d Level: 575K' %(year,month,day) ) \n elif level == 3:\n title(titles[0] + '\\nDate: %d-%02d-%02d Level: 625K' %(year,month,day) )\t\t\n if (species == 'O3_544'):\n if level == 0:\n title(titles[1] + '\\nDate: %d-%02d-%02d Level: 475K' %(year,month,day) )\n elif level == 1:\n title(titles[1] + '\\nDate: %d-%02d-%02d Level: 525K' %(year,month,day) )\n elif level == 2:\n title(titles[1] + '\\nDate: %d-%02d-%02d Level: 575K' %(year,month,day) ) \n elif level == 3:\n title(titles[1] + '\\nDate: %d-%02d-%02d Level: 625K' %(year,month,day) )\t\t\n elif species == 'H2O':\n if level == 0:\n title(species + '\\nDate: %d-%02d-%02d Level: 400K' %(year,month,day) )\n elif level == 1:\n title(species + '\\nDate: %d-%02d-%02d Level: 425K' %(year,month,day) )\n elif level == 2:\n title(species + '\\nDate: %d-%02d-%02d Level: 450K' %(year,month,day) ) \n elif level == 3:\n title(species + '\\nDate: %d-%02d-%02d Level: 475K' %(year,month,day) )\n elif level == 4:\n title(species + '\\nDate: %d-%02d-%02d Level: 500K' %(year,month,day) ) \n elif level == 5:\n title(species + '\\nDate: %d-%02d-%02d Level: 525K' %(year,month,day) )\n elif species == 'HNO3':\n if level == 0:\n title(species + '\\nDate: %d-%02d-%02d Level: 475K' %(year,month,day) )\n elif level == 1:\n title(species + '\\nDate: %d-%02d-%02d Level: 525K' %(year,month,day) )\n elif level == 2:\n title(species + '\\nDate: %d-%02d-%02d Level: 575K' %(year,month,day) ) \n elif level == 3:\n title(species + '\\nDate: %d-%02d-%02d Level: 625K' %(year,month,day) )\n elif level == 4:\n title(species + '\\nDate: %d-%02d-%02d Level: 675K' %(year,month,day) ) \n elif level == 5:\n title(species + '\\nDate: %d-%02d-%02d Level: 725K' %(year,month,day) )\n\t\t\n c_str='Copyright (c) ' + str(dt.today().year) + ' Chalmers tekniska högskola AB'\n figtext(0.26,0.14, c_str.decode('UTF-8'),fontsize=8)\n figtext(0.325,0.115,'Marcus Jansson & Erik Zakrisson',fontsize=8)\n if species == 'HNO3':\n \tfigtext(0.885,0.465,'[ppm]',fontsize=12,rotation='vertical')\n else:\n figtext(0.865,0.465,'[ppm]',fontsize=12,rotation='vertical')\n\t\n # Save the images divided into folders according to species, year and month\n save_path_main=(config().get('GEM','LEVEL3_DIR') + 'PICTURES')\n\n if not os.path.isdir(save_path_main + '/'):\n os.mkdir(save_path_main + '/')\n if not os.path.isdir(save_path_main + '/' + species + '/'):\n os.mkdir(save_path_main + '/' + species + '/')\n if not os.path.isdir(save_path_main + '/' + species + '/' + str(year) + '/'):\n os.mkdir(save_path_main + '/' + species + '/' + str(year) + '/')\n if not os.path.isdir(save_path_main + '/' + species + '/' + str(year) + '/' + str(month) + '/'):\n os.mkdir(save_path_main + '/' + species + '/' + str(year) + '/' + str(month) + '/')\n\t\t\n savefig(os.path.join(save_path_main + '/' + species + '/' + str(year) + '/' + str(month) + '/', species + '_' + str(level) + '_' + str(date_mjd) + '.png'))\n gc.collect()\n\ndef polarPlot(date_mjd,level):\n \"\"\"\n Function for a plot over the polar regions. The figure shows subplots for all species (O3 (501.8 GHz), H2O, N2O, HNO3) and for the south pole and the north pole respectively. Inputs are the date defined in Modified Julian Date and the potential temperature level of interest (0 => 475K, 1 => 525K) these are the levels that are mutual between the four species.\n\n Example: polarPlot(54745,0) - Will plot a polar projection for 2008-10-06 at the potential temperature level 475 K.\n \"\"\"\n import matplotlib.colors as colors\n\n species=['O3_501','H2O','N2O','HNO3']\n titles=['O3 (501.8 GHz)','H2O','N2O','HNO3']\n # The temperatur levels 475K and 525K corresponds to level 3 and 5 in the data matrix for H2O (0 and 1 for all the other)\n if level == 0: \n level_H2O = 3\n elif level == 1:\n level_H2O = 5\n else:\n level_H2O = level\n levels = [level,level_H2O,level,level]\n\t\t\n year,month,day,hour,minute,secs,tics = c.mjd2utc(date_mjd)\n # Makes the figure with head title and figure texts\n clf()\n fig = figure(figsize=(12,6)) \n axis('off')\n\t\n if level == 0:\n figtext(0.375,0.88,'Date: %d-%02d-%02d Level: 475K\\n' %(year,month,day), fontsize=14 )\n elif level == 1:\n figtext(0.365,0.91,'Date: %d-%02d-%02d Level: 525K\\n' %(year,month,day), fontsize=14 )\n else:\n figtext(0.365,0.91,'Date: %d-%02d-%02d Level: ...\\n' %(year,month,day), fontsize=14 )\n\n\t\t\n c_str='Copyright (c) ' + str(dt.today().year) + ' Chalmers tekniska högskola AB'\n figtext(0.375,0.1, c_str.decode('UTF-8'),fontsize=8)\n figtext(0.42,0.076,'Marcus Jansson & Erik Zakrisson',fontsize=8)\n figtext(0.038,0.713,'North Pole', fontsize=12)\n figtext(0.038,0.275,'South Pole', fontsize=12)\n figtext(0.92,0.484,'[ppm]',fontsize=12,rotation='horizontal')\n load_path = config().get('GEM','LEVEL3_DIR') + 'DATA/'\n for i in range(0,len(species)): # Makes subplots for each species\n\t\n data = sio.loadmat(load_path + species[i] + '/' + str(year) + '/' + str(month) + '/' + species[i] + '_' + str(date_mjd) + '_00.mat')\n data=double(data['TracerField_u16'])*data['K_TracerField']\n mN = Basemap(lon_0=0,resolution='c',area_thresh=10000.,boundinglat=20., projection='npstere') # North pole projection\n mS = Basemap(lon_0=0,resolution='c',area_thresh=10000.,boundinglat=-20., projection='spstere') # South pole projection\n # Define Latitudes and longitudes\n lats = n.arange(-88.875,88.875+2.25,2.25)\n lats = lats*90/lats[-1]\n lons = n.arange(-180,180+2.25,2.25)\n N_lat = len(lats)\n N_lon = len(lons)\n\t\n if species[i] == 'H2O' or species[i] == 'HNO3':\n no_of_levels = 6\n else:\n no_of_levels = 4\n specdata=zeros((80,161,no_of_levels))\n for k in range(0,no_of_levels): # Add an extra column at the poles\n for l in range(0,80):\n for o in range(0,161):\n if o==160:\n specdata[l,o,k]=data[l,o-1,k]\n else:\n specdata[l,o,k]=data[l,o,k]\n # Flip the matrix so the values is correct (upper left corner (0,0) in the data matrix represent the lower left corner of the map.)\n data=zeros((80,161,no_of_levels))\n for j in range(0,no_of_levels): \n data[:,:,j]=flipud(specdata[:,:,j])\n\t\t\n # Makes a transformation of the data coordinates to fit the coordinates of the map (basemap)\n dxN = 2.*pi*mN.rmajor/len(lons)\n nxN = int((mN.xmax-mN.xmin)/dxN)+1\n nyN = int((mN.ymax-mN.ymin)/dxN)+1\n dxS = 2.*pi*mS.rmajor/len(lons)\n nxS = int((mS.xmax-mS.xmin)/dxS)+1\n nyS = int((mS.ymax-mS.ymin)/dxS)+1\n specdataN = mN.transform_scalar(data[:,:,levels[i]], lons, lats, nxN, nyN, masked=True)\n specdataS = mS.transform_scalar(data[:,:,levels[i]], lons, lats, nxS, nyS, masked=True)\n\t\t\n # Makes the images, north- and south pole\n caxis,step = col.c_axis(levels[i],species[i])\n axN=fig.add_subplot(2,4,i+1)\n imN = mN.imshow(specdataN*1e6, norm=colors.normalize(clip=False), interpolation='bicubic', vmin=caxis[0], vmax=caxis[1])\n colorbar(shrink=0.75)\n mN.drawcoastlines()\n mN.drawmeridians(n.arange(0.,360.,60.))\n mN.drawparallels(n.arange(-80.,90,20.))\n title(titles[i], fontsize=12)\n\t\t\n axS = fig.add_subplot(2,4,i+5)\n imS = mS.imshow(specdataS*1e6, norm=colors.normalize(clip=False), interpolation='bicubic', vmin=caxis[0], vmax=caxis[1])\n colorbar(shrink=0.75)\n mS.drawcoastlines()\n mS.drawmeridians(n.arange(0.,360.,60.))\n mS.drawparallels(n.arange(-80.,90,20.))\n\n # Save the images divided into folders according to year and month\n save_path_main=(config().get('GEM','LEVEL3_DIR') + 'PICTURES/')\n if not os.path.isdir(save_path_main):\n os.mkdir(save_path_main)\n if not os.path.isdir(save_path_main + 'Polar/'):\n os.mkdir(save_path_main + 'Polar/')\n if not os.path.isdir(save_path_main + 'Polar/' + str(year) + '/'):\n os.mkdir(save_path_main + 'Polar/' + str(year) + '/')\n if not os.path.isdir(save_path_main + 'Polar/' + str(year) + '/' + str(month) + '/'):\n os.mkdir(save_path_main + 'Polar/' + str(year) + '/' + str(month) + '/')\n\n savefig(os.path.join(save_path_main + 'Polar/' + str(year) + '/' + str(month) + '/', 'Polar_' + str(level) + '_' + str(date_mjd) + '.png'))\n gc.collect()\n","repo_name":"Odin-SMR/hermod","sub_path":"src/odin.iasco/odin/iasco/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":12428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20978009465","text":"import subprocess, re, conf, os\nfrom shared import Command\n\n\nclass VMType(object):\n MASTER = 0\n WORKER = 1\n PRODUCER = 2\n\n @staticmethod\n def to_str(type):\n if type == VMType.MASTER:\n return 'MASTER'\n elif type == VMType.WORKER:\n return 'WORKER'\n return 'PRODUCER'\n\n\n @staticmethod\n def from_str(str):\n str = str.upper()\n if str == 'MASTER':\n return VMType.MASTER\n elif str == 'WORKER':\n return VMType.WORKER\n return VMType.PRODUCER\n\nclass VMState(object):\n INIT = 0\n PENDING = 1\n HOLD = 2\n ACTIVE = 3\n STOPPED = 4\n SUSPENDED = 5\n DONE = 6\n FAILED = 7\n UNKNOWN = 8\n\n\n @staticmethod\n def to_str(state):\n return {VMState.INIT : 'INIT',\n VMState.PENDING : 'PENDING',\n VMState.HOLD : 'HOLD',\n VMState.ACTIVE : 'ACTIVE',\n VMState.STOPPED : 'STOPPED',\n VMState.SUSPENDED : 'SUSPENDED',\n VMState.DONE : 'DONE',\n VMState.FAILED : 'FAILED',\n VMState.UNKNOWN : 'UNKNOWN'\n }[state]\n\n @staticmethod\n def from_str(str):\n str = str.upper()\n try:\n return {'INIT': VMState.INIT,\n 'PENDING': VMState.PENDING,\n 'HOLD': VMState.HOLD,\n 'ACTIVE': VMState.ACTIVE,\n 'STOPPED': VMState.STOPPED,\n 'SUSPENDED': VMState.SUSPENDED,\n 'DONE': VMState.DONE,\n 'FAILED': VMState.FAILED,\n }[str]\n except KeyError:\n return VMState.UNKNOWN\n\n \n\n\nclass OneVMWrapper(object):\n \"\"\"Mapping between address based identification and VM_ID based\n identification. Also provides a way to suspend or start new VMs in\n OpenNebula. Assumes migration never occurs by an external system.\"\"\"\n\n # TODO: Make use of the official open nebula API instead of onevm command. \n # This is the only DAS-4 specific code in the entire project. Adapt this class to Amazon EC2 for example in order to run the system on Amazon EC 2.\n\n\n def __init__(self):\n self._load_vm_mappings() \n \n \n def _load_vm_mappings(self):\n self._mapping = {}\n self._id_ordering = []\n self._remapping = {}\n output = subprocess.Popen([\"onevm\", \"list\"], stdout=subprocess.PIPE).communicate()\n lines = output[0].split('\\n')[1:-1]\n for line in lines:\n split = line.split()\n if len(split) == 0:\n continue\n id = split[0]\n type = VMType.from_str(split[3])\n ip,state = self._get_vm_ip_and_state(id)\n self._id_ordering.append(id)\n self._mapping[id] = {'ip':ip, 'type': type, 'state': state}\n self._remapping[ip] = id\n\n def _get_vm_ip_and_state(self, id):\n ip_reg = re.compile(r\"IP=\\\"(.+?)\\\"\", re.MULTILINE)\n state_reg = re.compile(r\"STATE\\s*:\\s*(\\w+)\", re.MULTILINE)\n out = subprocess.Popen([\"onevm\",\"show\",id], stdout=subprocess.PIPE).communicate()\n return (re.search(ip_reg, out[0]).group(1),\n VMState.from_str(re.search(state_reg, out[0]).group(1)))\n\n\n\n def list_vms(self, load=False):\n if load:\n self._load_vm_mappings()\n return (list(self._id_ordering),self._mapping.copy(), self._remapping.copy())\n\n\n\n def _create_vm(self, type_enum):\n \"\"\"Create a VM, and register it in the wrapper.\"\"\"\n one = VMType.to_str(type_enum).lower() + \".one\"\n output = subprocess.Popen([\"onevm\", \"create\", os.path.expanduser(conf.VM_IMAGE_DIR)+one], stdout=subprocess.PIPE).communicate()\n id_reg = re.compile(r\"ID:\\s*(\\d+)\", re.MULTILINE)\n id = re.search(id_reg, output[0]).group(1)\n ip,state = self._get_vm_ip_and_state(id)\n self._mapping[id] = {'ip': ip, 'type': type_enum, 'state':state}\n self._id_ordering.append(id)\n self._remapping[ip] = id\n return id\n\n\n\n def _suspend_vm(self, id):\n if id not in self._mapping:\n raise VMError(\"VM does not exist.\")\n subprocess.Popen([\"onevm\", \"suspend\", id])\n\n def _delete_vm(self, id):\n if id not in self._mapping:\n raise VMError(\"VM does not exist.\")\n subprocess.Popen([\"onevm\", \"delete\", id])\n del self._remapping[self._mapping[id]['ip']]\n del self._mapping[id]\n self._id_ordering.remove(id)\n\n def _resume_vm(self, id):\n if id not in self._mapping:\n raise VMError(\"VM does not exist.\")\n subprocess.Popen([\"onevm\", \"resume\", id])\n\n def exec_cmd(self, cmd):\n if not isinstance(cmd, VMCommand):\n return\n if cmd.type == VMCommand.CREATE:\n return self._create_vm(cmd.data)\n if cmd.type == VMCommand.SUSPEND:\n self._suspend_vm(cmd.data)\n return\n if cmd.type == VMCommand.DELETE:\n self._delete_vm(cmd.data)\n return\n if cmd.type == VMCommand.RESUME:\n self._resume_vm(cmd.data)\n return\n\n\nclass VMCommand(Command):\n CREATE = 0\n SUSPEND = 1\n DELETE = 2\n RESUME = 3\n \n\n def __str__(self):\n return \"(%s,%s)\" % ({self.CREATE: 'CREATE',\n self.DELETE: 'DELETE',\n self.RESUME: 'RESUME',\n self.SUSPEND: 'SUSPEND'}[self._type], str(self._data))\n\nclass VMError(Exception):\n pass\n","repo_name":"stefanvanwouw/najnaf","sub_path":"src/head/vm_wrapper.py","file_name":"vm_wrapper.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22138981123","text":"#!/usr/bin/env python\n\nimport serial\nimport threading\n\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Int16\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import UInt32\n\nimport numpy as np\nimport time\n\n#Publishers\npub_receive = rospy.Publisher(\"debug_serial\", String, queue_size=10, tcp_nodelay=True) #Debug Serial\n\npwm_set = 700\nposicionar = False\n\npub_G1EA1800CV1_volante_acionar = rospy.Publisher(\"G1EA1800CV1_volante\", Int16, queue_size=10, tcp_nodelay=True) \n\npub_direcao_ok = rospy.Publisher(\"direcao_ok\", Bool, queue_size=10, tcp_nodelay=True)\n\n\npwm_atual = 0;\n\nkp = 40\nki = 5\nkd = 0\n\nerro = 0\nerro_old = 0\nproporcional = 0\nintegral = 0\nderivativo = 0\nlim_integral = 20\ncontrole = 0\nreset = 0\nok = 0\n\ndef PID(valorLido, valorSetado):\n global erro, erro_old, kp, ki, kd\n global proporcional, integral, derivativo, controle, reset\n \n \n \n erro = ((valorSetado - valorLido) * -1) / 100\n proporcional = erro * kp\n integral = integral + (erro * ki)\n derivativo = (erro - erro_old) * kd\n \n erro_old = erro\n \n if(integral >= lim_integral):\n integral = lim_integral\n \n if(integral <= -lim_integral):\n integral = -lim_integral\n \n #Zerar Integral\n if(valorLido > (valorSetado-3) and valorLido < (valorSetado+3)):\n integral = 0\n \n controle = (proporcional + integral + derivativo) *-1\n \n \n if(controle > 127):\n controle = 127\n \n if(controle < -127):\n controle = -127\n \n \n\ndef direcao_set_callback(data):\n global pwm_set, posicionar\n pwm_set = data.data\n posicionar = True\n \ndef garfo_posicionar_callback(data):\n global posicionar\n posicionar = data.data\n\ndef Thread_logica():\n global pwm_atual, posicionar, ok, pwm_set\n\n while not rospy.is_shutdown():\n try:\n \n if posicionar:\n \n PID(pwm_atual, pwm_set)\n pwm = controle\n \n pub_G1EA1800CV1_volante_acionar.publish(int(pwm))\n \n print(\"pwm_atual: \" + str(pwm_atual) + \" pwm_set: \" + str(pwm_set) + \" pwm: \" + str(pwm))\n if(pwm_atual > (pwm_set-5) and pwm_atual < (pwm_set+5)):\n ok = ok + 1\n if(ok > 20):\n print(\"Finalizado\")\n posicionar = False\n else:\n ok = 0\n \n pub_direcao_ok.publish(False)\n \n else:\n pub_G1EA1800CV1_volante_acionar.publish(0)\n pub_direcao_ok.publish(True)\n ok = 0\n \n time.sleep(0.05)\n \n except Exception as ex:\n print (\"Erro __main__ Garfo: \" + str(ex))\n #pub_ros_debug.publish(\"Erro __main__ Serial: \" + str(ex))\n \n \ndef agv_p19_callback(data):\n global pwm_atual\n pwm_atual = data.data\n\n \nif __name__ == '__main__':\n rospy.init_node('Direcao', anonymous=False)\n \n rospy.Subscriber(\"direcao_set\", Int16, direcao_set_callback, queue_size=1, buff_size=2*24)\n rospy.Subscriber(\"garfo_posicionar\", Bool, garfo_posicionar_callback, queue_size=1, buff_size=2*24)\n rospy.Subscriber(\"agv_p19\", Int16, agv_p19_callback, queue_size=1, buff_size=2**24) \n \n rate = rospy.Rate(10) #1 vez a cada 50ms\n \n t2 = threading.Thread(target=Thread_logica)\n t2.start()\n \n #time.sleep(10)\n \n while not rospy.is_shutdown():\n \n rate.sleep()","repo_name":"EduardoBarioni/BLUE","sub_path":"G1EA1800CV1/NUC/firmware-ros/firmware/script/Direcao.py","file_name":"Direcao.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"953533795","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"定数定義ファイル\n\n定数を一括管理する\n\n\"\"\"\n# データ保存先ディレクトリ\nDATA_DIRECTORY = \"./datas\"\n\n# メンバーリスト\nMEMBERS = [\"佐藤\", \"伊藤\"]\n\n# モードリスト\nMODE = [\"勤怠入力\", \"残業報告\", \"なう送信\"]\n\n# 区切り\nSYMBOL = \"-\"\nSYMBOL_LENGTH = 50\n\n# 定時\nON_TIME = \"18:00\"\n\n# 定時後休憩\nOVER_TIME_REST = 15","repo_name":"yusei88/work-time-logger","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15356261855","text":"\nfrom faker import Faker\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\n\nimport os\nimport math\nimport re\nimport random\nimport xmltodict\nimport psycopg2\nimport requests \nimport boto3\nimport traceback\n\nfrom helpers.data_generator import DataGenerator\nfrom pplibs.logUtils import get_account_config\nfrom utils import meld_columns\nfrom utils import group_by_column\nfrom db_utils import insert_to_db\n\nfrom event_types import event_types \n\nfake = Faker()\ngenerator = DataGenerator()\n\n\n#### Getting DB credentials\nclient = boto3.client('ssm')\nresponse = client.get_parameter(\n\tName=\"/{}/redshift/master-database-password\".format(os.environ[\"ENV\"]),\n\tWithDecryption=True\n) \n\ndb_host = os.environ[\"REDSHIFT_ENDPOINT\"] # PRODUCTION\n\ndb_name = os.environ[\"REDSHIFT_DB_NAME\"]\ndb_user = os.environ[\"REDSHIFT_DB_USER\"]\ndb_port = os.environ[\"REDSHIFT_DB_PORT\"]\ndb_pass = response['Parameter']['Value']\n\n\n\ndef get_agents(cur, account_id):\n\tsql = \"\"\"\n\t\tSELECT id as propair_id, account_id, agent_id, name_velocify, email from agent_profiles\n\t\tWHERE account_id={}\n\t\"\"\".format(account_id)\n\tcur.execute(sql)\n\tdata = meld_columns(cur.fetchall(), [x.name for x in cur.description])\n\n\treturn group_by_column(data, 'propair_id')\n\ndef get_leads(cur, account_id, date_dt):\n\tsql = \"\"\"\n\t\tselect eld.account_id, eld.account_lead_id, el.first_name_stated + ' ' + el.last_name_stated as name,\n\t\teld.lead_datetime, el.campaign_group,\n\t\teld.profile_id_first_assignment_user, eld.profile_id_user, el.day_phone, el.evening_phone\n\t\tfrom external_lead_details eld\n\t\tleft join external_leads el on eld.account_id = el.account_id and eld.account_lead_id=el.account_lead_id\n\t\twhere eld.account_id={}\n\t\tand eld.account_lead_id < 0\n\t\tand eld.lead_datetime > '{}';\n\t\"\"\".format(account_id, date_dt)\n\n\tcur.execute(sql)\n\tdata = meld_columns(cur.fetchall(), [x.name for x in cur.description])\n\n\treturn group_by_column(data, 'account_lead_id')\n\ndef get_events(cur, account_id, date_dt):\n\tsql = \"\"\"\n\t\tselect account_id, account_lead_id, log_subtype_name, log_subtype_id, log_type, log_user_email, \n\t\tlog_user_id, log_user_name, milestone_id, milestone_name, log_date\n\t\tfrom external_events\n\t\twhere account_id={}\n\t\tand account_lead_id < 0\n\t\tand log_date > '{}'\n\t\tand log_type = 'Action';\n\t\"\"\".format(account_id, date_dt)\n\n\tcur.execute(sql)\n\tdata = meld_columns(cur.fetchall(), [x.name for x in cur.description])\n\n\treturn data\n\ndef strTimeProp(start, end, format, prop):\n\t\"\"\"Get a time at a proportion of a range of two formatted times.\n\n\tstart and end should be strings specifying times formated in the\n\tgiven format (strftime-style), giving an interval [start, end].\n\tprop specifies how a proportion of the interval to be taken after\n\tstart. The returned time will be in the specified format.\n\t\"\"\"\n\n\tstime = time.mktime(start.timetuple())\n\tetime = time.mktime(end.timetuple())\n\n\tptime = stime + prop * (etime - stime)\n\n\treturn time.strftime(format, time.localtime(ptime))\n\n\ndef randomDate(start, end):\n\treturn strTimeProp(start, end, '%Y-%m-%d %H:%M:%S', random.random())\n\ndef rand_phone():\n\trange_start = 10**(7)\n\trange_end = (10**8)-1\n\tn = str(randint(range_start, range_end))\n\treturn '555-' + n[1:4] + '-' + n[4:]\n\ndef generate_call(lead, event, call_config, system):\n\tcurrent_date = datetime.now().strftime('%m/%d/%Y %I:%M:%S %p')\n\tlead_id = lead['account_lead_id']\n\tevent_dt = event['log_date']\n\n\tif (system==\"velocify\"):\n\t\tagent = random.choice(call_config['agents'])\n\t\ttemplate = Call({\n\t\t\t'attrib': {\n\t\t\t\t'AgentId': event['log_user_id'], \n\t\t\t\t'CallDuration': '{}:{}'.format(random.randrange(1,10), random.randrange(0,60)), \n\t\t\t\t'CallOrigin': random.choice(call_config['call_origin']), \n\t\t\t\t'CallSegment': random.choice(call_config['call_segment']), \n\t\t\t\t'CallTime': event['log_date'], \n\t\t\t\t'Campaign': lead['campaign_group'], \n\t\t\t\t'Group': fake.company_suffix() + ' ' + fake.company(), \n\t\t\t\t'LeadFullName': lead['name'], \n\t\t\t\t'LeadId': '{}'.format(lead_id), \n\t\t\t\t'ProspectPhone': rand_phone(), \n\t\t\t\t'RecordingUrl': '', \n\t\t\t\t'Result': event['log_subtype_name'],\n\t\t\t\t'InboundPhone': random.choice([lead['day_phone'], lead['evening_phone']]),\n\t\t\t\t'User': event['log_user_name'],\n\t\t\t\t'WaitTime': '{}'.format(random.randrange(0,2))\n\t\t\t},\n\t\t\t'tag': 'Call'\n\t\t})\n\telif(system==\"incontact\"):\n\t\ttotal_duration = random.randint(120, 360)\n\t\tcall_dt = event_dt - timedelta(minutes=total_duration)\n\t\tsystem = random.choice(call_config['systems'])\n\n\t\tskill = random.choice(call_config['skill_names'])\n\n\t\tskill_split = skill.split('_')\n\t\tstate_called = skill_split[0]\n\t\tgroup_log = skill_split[1] if len(skill_split) > 1 else None\n\t\tcall_series = skill_split[2] if len(skill_split) > 2 else None\n\n\t\tcampaign_split = lead['campaign_group'].split('|') if lead['campaign_group'] != None else []\n\t\tsource_detail = campaign_split[0] if len(campaign_split) > 0 else None\n\t\tcall_origin = campaign_split[1] if len(campaign_split) > 1 else None\n\n\n\t\ttemplate = {\n\t\t\t'account_lead_id': '{}'.format(lead_id),\n\t\t\t'call_datetime': '{}'.format(event['log_date']),\n\t\t\t'call_agent_id': '{}'.format(event['log_user_id']),\n\t\t\t'agent_time': '{}'.format(random.randint(2, total_duration)),\n\t\t\t'call_duration': '{}'.format(random.randint(2, total_duration)),\n\t\t\t'campaign_name': lead['campaign_group'],\n\t\t\t'inbound_phone': random.choice([lead['day_phone'], lead['evening_phone']]),\n\t\t\t'result': event['log_subtype_name'],\n\t\t\t'agent_profile_name': event['log_user_name'],\n\t\t\t'wait_time': '{}'.format(random.randint(0, int(total_duration / 3))),\n\t\t\t'in_queue': '{}'.format(random.randint(0, int(total_duration / 4))),\n\t\t\t'pre_queue': '{}'.format(random.randint(0, int(total_duration / 4))),\n\t\t\t'post_queue': '{}'.format(random.randint(0, int(total_duration / 5))),\n\t\t\t'routing_time': '{}'.format(random.randint(0, int(total_duration / 6))),\n\t\t\t'prospect_source_id': '{}'.format(random.randint(1, 100000000)),\n\t\t\t'disp_code': '{}'.format(event['log_subtype_id']),\n\t\t\t'agent_disposition': '{}'.format(random.randint(-100, 2000)),\n\t\t\t'sys_class': '{}'.format(system['code']),\n\t\t\t'sys_class_name': system['name'],\n\t\t\t'source_name': skill,\n\t\t\t'role_group': 'Team {}'.format(random.randint(104, 1000)),\n\t\t\t'call_disp_duration': '{}'.format(total_duration),\n\t\t\t'abandon': random.choice(['Y', 'N']),\n\t\t\t'contact_name': lead['name'],\n\t\t\t'call_series': call_series,\n\t\t\t'group_log': group_log,\n\t\t\t'state_called': state_called,\n\t\t\t'source_detail': source_detail,\n\t\t\t'call_origin': call_origin\n\t\t}\n\n\treturn template\n\nclass Call(object):\n\tdef __init__(self, *initial_data, **kwargs):\n\t\tfor dictionary in initial_data:\n\t\t\tfor key in dictionary:\n\t\t\t\tsetattr(self, key, dictionary[key])\n\t\tfor key in kwargs:\n\t\t\tsetattr(self, key, kwargs[key])\n\n\ndef main():\n\ttry: \n\n\t\taccount = os.environ['ACCOUNT']\n\t\tdays_ago = os.environ['DAYS_AGO']\n\n\t\tprint(\"::::: Starting Call generation\")\n\t\tprint(\"::::: Account: {}\".format(account))\n\t\tprint(\"::::: Days ago: {}\".format(days_ago))\n\n\t\tprint('::::: Connecting to database')\n\t\tconn = psycopg2.connect(\"dbname={} host={} port={} user={} password={}\".format(db_name, db_host, db_port, db_user, db_pass))\n\t\tcur = conn.cursor()\n\t\tprint('::::: Successfully connected to database')\n\n\t\tsql = \"\"\"\n\t\t\tSELECT id, name from accounts where name='{}'\n\t\t\"\"\".format(account)\n\t\tcur.execute(sql)\n\n\t\taccount_id = cur.fetchone()\n\t\tif(account_id):\n\t\t\taccount_id = account_id[0]\n\n\t\t\tsandbox_config = get_account_config(1, cache=False)\n\t\t\taccount_config = get_account_config(account_id, cache=False)\n\t\t\tsystem = account_config['calls_software'] if 'calls_software' in account_config else 'incontact'\n\n\n\t\t\tdate_dt = datetime.now() - timedelta(days=int(days_ago))\n\n\t\t\tagents = get_agents(cur, account_id)\n\t\t\tleads = get_leads(cur, account_id, date_dt)\n\t\t\tevents = get_events(cur, account_id, date_dt)\n\n\t\t\tcalls = []\n\t\t\tcall_counter = -1\n\t\t\tcontact_counter = -1\n\t\t\tfor event in events:\n\t\t\t\tlead = leads[event['account_lead_id']][0] if event['account_lead_id'] in leads else None\n\t\t\t\tif (lead):\n\t\t\t\t\tcall = generate_call(lead, event, sandbox_config['fake_calls_config'][system], system)\n\t\t\t\t\tcall['account_id'] = account_id\n\t\t\t\t\tcall['call_id'] = call_counter\n\t\t\t\t\tcall['contact_id'] = contact_counter\n\n\t\t\t\t\tcalls.append(call)\n\t\t\t\t\tcall_counter -= 1\n\t\t\t\t\tcontact_counter -= 1\n\n\t\t\tprint(\"::::: Generated {} calls\".format(len(calls)))\n\n\t\t\tunique = ['account_id', 'call_id']\n\t\t\tinsert_to_db(calls, 'call_logs', cur, conn, unique_columns=unique)\n\n\n\t\t\tcur.close()\n\t\t\tconn.close()\n\n\t\telse:\n\t\t\traise Exception(\"Account: {} not found!\".format(account))\n\n\texcept Exception as e:\n\t\tstack = traceback.format_exc()\n\n\t\tprint(\":::::: ERROR :::::::\")\n\t\tprint(e)\n\t\tprint(stack)\n\t\tprint(sys.exc_info())\n\n\t\traise Exception(e)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"Magikon/sam-lambda","sub_path":"functions/data_lead_generator/generate_calls.py","file_name":"generate_calls.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40843621804","text":"import os\nimport sys\nimport FreeCAD as App, Mesh, Part\nfrom yaml import safe_load\n\nif App.GuiUp:\n import FreeCADGui as Gui\n\n\nif not sys.version_info.major == 3:\n print(\"This script requires Python 3.x\")\n print(\"You are using Python {}.{}.\".format(sys.version_info.major, sys.version_info.minor))\n sys.exit(1)\n\npythonopen = open\npredefined_colors = {\n 'red': (1.0, 0.0, 0.0),\n 'darkRed': (0.67, 0.0, 0.0),\n 'green': (0.0, 1.0, 0.0),\n 'darkGreen': (0.0, 0.67, 0.0),\n 'blue': (0.0, 0.0, 1.0),\n 'darkBlue': (0.0, 0.0, 0.67),\n 'yellow': (1.0, 1.0, 0.0),\n 'cyan': (0.0, 1.0, 1.0),\n 'purple': (1.0, 0.0, 1.0),\n 'white': (1.0, 1.0, 1.0),\n 'lightGray': (0.75, 0.75, 0.75),\n 'gray': (0.5, 0.5, 0.5),\n 'darkGray': (0.25, 0.25, 0.25),\n 'black': (0.0, 0.0, 0.0),\n}\n\n\ndef insertObject(directory, filename, document, group, attributes = None):\n if not os.path.isfile(os.path.join(directory, filename)):\n directory = os.path.expanduser('~/.FreeCAD/Mod/yaml-workspace')\n if not os.path.isfile(os.path.join(directory, filename)):\n print('ERROR: `{}` not found!'.format(filename))\n return\n if filename[-4:] in ['.stp', '.igs', 'iges', 'step']:\n return insertPart(directory, filename, document, group, attributes)\n insertMesh(directory, filename, document, group, attributes)\n\ndef insertMesh(directory, filename, document, group, attributes = None):\n mesh = Mesh.Mesh(u'{}/{}'.format(directory, filename))\n object_name = filename[:-4]\n if 'objectName' in attributes:\n object_name = attributes['objectName']\n new_mesh = document.addObject(\"Mesh::Feature\", object_name)\n new_mesh.Mesh = mesh\n if attributes:\n color = getColor(attributes)\n if color:\n new_mesh.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n new_mesh.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n new_mesh.Placement = App.Placement(placement, rotation)\n group.addObject(new_mesh)\n\ndef insertPart(directory, filename, document, group, attributes = None):\n if not os.path.isfile(os.path.join(directory, filename)):\n directory = os.path.expanduser('~/.FreeCAD/Mod/yaml-workspace')\n if not os.path.isfile(os.path.join(directory, filename)):\n print('ERROR: `{}` not found!'.format(filename))\n return\n\n part = Part.Shape()\n part = Part.read(u'{}/{}'.format(directory, filename))\n object_name = filename[:-4]\n if 'objectName' in attributes:\n object_name = attributes['objectName']\n new_part = document.addObject(\"Part::Feature\", object_name)\n new_part.Shape = part\n if attributes:\n color = getColor(attributes)\n if color:\n new_part.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n new_part.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n new_part.Placement = App.Placement(placement, rotation)\n group.addObject(new_part)\n\ndef insertSolid(name, document, group, attributes):\n if attributes['solid'] == 'cylinder':\n return insertCylinder(name, document, group, attributes)\n if attributes['solid'] == 'sphere':\n return insertSphere(name, document, group, attributes)\n if attributes['solid'] == 'ellipsoid':\n return insertEllipsoid(name, document, group, attributes)\n if attributes['solid'] == 'box':\n return insertBox(name, document, group, attributes)\n if attributes['solid'] == 'cone':\n return insertCone(name, document, group, attributes)\n if attributes['solid'] == 'torus':\n return insertTorus(name, document, group, attributes)\n if attributes['solid'] == 'prism':\n return insertPrism(name, document, group, attributes)\n if attributes['solid'] == 'wedge':\n return insertWedge(name, document, group, attributes)\n print('ERROR: Unsupported solid tyle {}'.format(attributes['solid']))\n\ndef insertCylinder(name, document, group, attributes):\n solid = document.addObject(\"Part::Cylinder\",\"Cylinder\")\n solid.Label = name\n solid.Radius = '{} mm'.format(attributes['radius'])\n solid.Height = '{} mm'.format(attributes['height'])\n if 'angle' in attributes:\n solid.Angle = '{} deg'.format(attributes['angle'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertSphere(name, document, group, attributes):\n solid = document.addObject(\"Part::Sphere\",\"Sphere\")\n solid.Label = name\n solid.Radius = '{} mm'.format(attributes['radius'])\n if 'angle1' in attributes:\n solid.Angle1 = '{} deg'.format(attributes['angle1'])\n if 'angle2' in attributes:\n solid.Angle2 = '{} deg'.format(attributes['angle2'])\n if 'angle3' in attributes:\n solid.Angle3 = '{} deg'.format(attributes['angle3'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertEllipsoid(name, document, group, attributes):\n solid = document.addObject(\"Part::Ellipsoid\",\"Ellipsoid\")\n solid.Label = name\n solid.Radius1 = '{} mm'.format(attributes['radius1'])\n solid.Radius2 = '{} mm'.format(attributes['radius2'])\n solid.Radius3 = '{} mm'.format(attributes['radius3'])\n if 'angle1' in attributes:\n solid.Angle1 = '{} deg'.format(attributes['angle1'])\n if 'angle2' in attributes:\n solid.Angle2 = '{} deg'.format(attributes['angle2'])\n if 'angle3' in attributes:\n solid.Angle3 = '{} deg'.format(attributes['angle3'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertBox(name, document, group, attributes):\n solid = document.addObject(\"Part::Box\",\"Box\")\n solid.Label = name\n solid.Length = '{} mm'.format(attributes['length'])\n solid.Width = '{} mm'.format(attributes['width'])\n solid.Height = '{} mm'.format(attributes['height'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertCone(name, document, group, attributes):\n solid = document.addObject(\"Part::Cone\",\"Cone\")\n solid.Label = name\n solid.Radius1 = '{} mm'.format(attributes['radius1'])\n solid.Radius2 = '{} mm'.format(attributes['radius2'])\n solid.Height = '{} mm'.format(attributes['height'])\n if 'angle' in attributes:\n solid.Angle = '{} deg'.format(attributes['angle'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertTorus(name, document, group, attributes):\n solid = document.addObject(\"Part::Torus\",\"Torus\")\n solid.Label = name\n solid.Radius1 = '{} mm'.format(attributes['radius1'])\n solid.Radius2 = '{} mm'.format(attributes['radius2'])\n if 'angle1' in attributes:\n solid.Angle1 = '{} deg'.format(attributes['angle1'])\n if 'angle2' in attributes:\n solid.Angle2 = '{} deg'.format(attributes['angle2'])\n if 'angle3' in attributes:\n solid.Angle3 = '{} deg'.format(attributes['angle3'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertPrism(name, document, group, attributes):\n solid = document.addObject(\"Part::Prism\",\"Prism\")\n solid.Label = name\n solid.Polygon = int(attributes['polygon'])\n solid.Circumradius = '{} mm'.format(attributes['radius'])\n solid.Height = '{} mm'.format(attributes['height'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef insertWedge(name, document, group, attributes):\n solid = document.addObject(\"Part::Wedge\",\"Wedge\")\n solid.Label = name\n solid.Xmin = '{} mm'.format(attributes['xmin'])\n solid.Ymin = '{} mm'.format(attributes['ymin'])\n solid.Zmin = '{} mm'.format(attributes['zmin'])\n solid.X2min = '{} mm'.format(attributes['x2min'])\n solid.Z2min = '{} mm'.format(attributes['z2min'])\n solid.Xmax = '{} mm'.format(attributes['xmax'])\n solid.Ymax = '{} mm'.format(attributes['ymax'])\n solid.Zmax = '{} mm'.format(attributes['zmax'])\n solid.X2max = '{} mm'.format(attributes['x2max'])\n solid.Z2max = '{} mm'.format(attributes['z2max'])\n color = getColor(attributes)\n if color:\n solid.ViewObject.ShapeColor = color\n transparency = getTransparency(attributes)\n if transparency:\n solid.ViewObject.Transparency = transparency\n placement = getPlacement(attributes)\n rotation = getRotation(attributes)\n solid.Placement = App.Placement(placement, rotation)\n group.addObject(solid)\n\ndef getColor(json_data):\n color_data = json_data.get('color', None)\n if not color_data:\n return None\n if not isinstance(color_data, list):\n if color_data not in predefined_colors:\n raise Exception('Color data needs to be an array of RGB floats or one of predefined colors!!!')\n return predefined_colors[color_data]\n return (color_data[0], color_data[1], color_data[2])\n\ndef getTransparency(json_data):\n return json_data.get('transparency', None)\n\ndef getPlacement(json_data):\n placement = App.Vector(.0, .0, .0)\n placement_config = json_data.get('placement', None)\n if placement_config:\n placement = App.Vector(*placement_config)\n return placement\n\ndef getRotation(json_data):\n rotation_vector = json_data.get('rotationVector', (.0, .0, 1.0))\n rotation_angle = json_data.get('rotationAngle', 0.0)\n return App.Rotation(App.Vector(*rotation_vector), rotation_angle)\n\ndef open(filename):\n base_directory = os.path.dirname(filename)\n sub_directory = None\n print('Reading: {}'.format(filename))\n print('Base: {}'.format(base_directory))\n\n yaml_data = None\n with pythonopen(filename) as f:\n yaml_data = safe_load(f)\n\n if yaml_data is None:\n raise Exception(\"Error reading YAML file: {}\".format(filename))\n\n print('YML data: {}'.format(yaml_data))\n if 'settings' in yaml_data:\n if 'subDirectory' in yaml_data['settings']:\n base_directory += '/{}'.format(yaml_data['settings']['subDirectory'])\n print('Base: {}'.format(base_directory))\n\n if 'import' not in yaml_data:\n raise Exception('No \\'import\\' section in YAML file!!!')\n\n yaml_data = yaml_data['import']\n\n for document_name, document_data in yaml_data.items():\n document = App.newDocument(document_name)\n\n for group_name, group_data in document_data.items():\n document_group = document.addObject(\"App::DocumentObjectGroup\", group_name)\n\n if isinstance(group_data, str):\n insertObject(base_directory, group_data, document, document_group)\n continue\n\n if isinstance(group_data, list):\n for file in group_data:\n insertObject(base_directory, file, document, document_group)\n continue\n\n for file, file_data in group_data.items():\n if file == 'files':\n for f in file_data:\n insertObject(base_directory, f, document, document_group)\n continue\n if not isinstance(file_data, list):\n if 'solid' not in file_data:\n insertObject(base_directory, file, document, document_group, file_data)\n else:\n insertSolid(file, document, document_group, file_data)\n else:\n for file_data2 in file_data:\n insertObject(base_directory, file, document, document_group, file_data2)\n document.recompute()\n Gui.activeDocument().activeView().viewAxonometric()\n Gui.SendMsgToActiveView(\"ViewFit\")\n","repo_name":"Mambix/FreeCAD-yaml-workbench","sub_path":"Import_Yml.py","file_name":"Import_Yml.py","file_ext":"py","file_size_in_byte":13882,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"15892284771","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.forms import ModelForm, modelform_factory\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom .models import User, Listing, Bid, Comment, Watchlist, Category\n\n\ndef index(request):\n return render(request, \"auctions/index.html\" , {\n \"listings\" : Listing.objects.all()\n })\n\ndef all_listings(request):\n return render(request, \"auctions/all_listings.html\" , {\n \"listings\" : Listing.objects.all()\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n location = request.POST[\"Location\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.location = location\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n\n\ndef listing(request, listing_title):\n listing = Listing.objects.get(title = listing_title)\n \n if request.method == \"POST\": \n if 'new_bid' in request.POST:\n new_bid = request.POST[\"new_bid\"] \n bid_entry = Bid.objects.create(bid_value = new_bid, bidder=request.user, listing=listing)\n bid_entry.save()\n elif 'comment' in request.POST:\n content = request.POST[\"comment\"]\n comment = Comment.objects.create(content=content, listing=listing, author=request.user)\n comment.save() \n return HttpResponseRedirect(reverse(\"listing\",args=(listing_title,)))\n\n # get request \n else:\n categories = listing.category.all()\n comments = Comment.objects.filter(listing__title = listing_title)\n\n bid = Bid.objects.filter(listing__title = listing_title).last() \n if not bid:\n min_bid = listing.starting_bid\n else:\n min_bid = max(bid.bid_value+1,listing.starting_bid)\n\n watchlist_status = False\n if request.user.is_authenticated:\n watchlist, created = Watchlist.objects.get_or_create(user = request.user)\n if listing in watchlist.listings.all():\n watchlist_status = True\n\n return render(request, \"auctions/listing.html\",{\n \"listing\" : listing,\n \"categories\" : categories,\n \"bid\" : bid,\n \"min_bid\" : min_bid,\n \"comments\" : comments,\n \"watchlist_status\" : watchlist_status\n })\n\n@login_required\ndef newlisting(request):\n if request.method == \"POST\":\n form = Listingform(request.POST)\n\n if form.is_valid():\n newlisting = form.save(commit=False)\n newlisting.seller = request.user\n newlisting.save()\n form.save_m2m()\n return HttpResponseRedirect(reverse(\"listing\",args=(newlisting.title,)))\n else:\n return render(request, \"auctions/newlisting.html\",{\n \"form\": form\n })\n else:\n form = Listingform()\n return render(request, \"auctions/newlisting.html\",{\n \"form\": form\n })\n\n@login_required(login_url='auctions/login')\ndef edit_listing(request, listing_id):\n if request.method == \"POST\":\n listing = Listing.objects.get(pk = listing_id)\n if 'save' in request.POST:\n form = Listingform(request.POST, instance=listing)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"listing\",args=(listing.title,)))\n else:\n return render(request, \"auctions/edit_listing.html\",{\n \"form\":form,\n \"listing_id\":listing_id,\n \"listing\":listing\n })\n elif 'edit_listing' in request.POST:\n form = Listingform(instance = listing)\n return render(request, \"auctions/edit_listing.html\",{\n \"form\":form,\n \"listing_id\":listing_id,\n \"listing\":listing\n })\n else:\n # redirect to index for get requests\n return HttpResponseRedirect(reverse(\"index\"))\n\n@login_required\ndef useraccount(request):\n if request.method == \"POST\":\n form = Userform(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"useraccount\"))\n else:\n return render(request, \"auctions/account.html\",{\n \"form\":form\n })\n\n else:\n listings = request.user.listings.all()\n winnings = request.user.winnings.all()\n form = Userform(instance=request.user)\n return render(request, \"auctions/account.html\",{\n \"form\":form,\n \"listings\":listings,\n \"winnings\":winnings\n })\n\n@login_required\ndef edit_watchlist(request):\n #add or remove listing from watchlist\n if request.method == \"POST\":\n listing_title = request.POST.get(\"listing_title\",\"\")\n listing = Listing.objects.get(title = listing_title)\n watchlist, created = Watchlist.objects.get_or_create(user = request.user)\n if listing in watchlist.listings.all():\n watchlist.listings.remove(listing)\n else:\n watchlist.listings.add(listing)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required\ndef watchlist(request):\n watchlist, created = Watchlist.objects.get_or_create(user = request.user)\n return render(request, \"auctions/watchlist.html\",{\n \"watchlist\": watchlist.listings.all()\n })\n\n@login_required\ndef close_auction(request):\n if request.method == 'POST':\n listing_title = request.POST.get(\"listing_title\",\"\")\n listing = Listing.objects.get(title = listing_title)\n bid = Bid.objects.filter(listing__title = listing_title).last() \n if bid is not None:\n listing.winner = bid.bidder\n else:\n listing.winner = request.user\n listing.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\ndef categories(request):\n if request.method == \"POST\":\n form = Categoryform(request.POST)\n if form.is_valid():\n newcategory = form.save()\n newcategory.save()\n return HttpResponseRedirect(reverse(\"categories\"))\n else:\n return render(request, \"auctions/categories.html\",{\n \"form\": form\n })\n else: \n form = Categoryform()\n categories = Category.objects.all()\n return render(request, \"auctions/categories.html\",{\n \"categories\": categories,\n \"form\":form\n })\n\ndef category(request, category_name):\n listings = Listing.objects.filter(category__category = category_name)\n return render(request, \"auctions/category.html\",{\n \"listings\":listings,\n \"category_name\": category_name\n })\n\ndef newcategory(request):\n newcategory = request.POST[\"newcategory\"]\n category = Category.objects.create(category=newcategory)\n category.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\nclass Listingform(ModelForm):\n class Meta:\n model = Listing\n exclude = ['seller','winner']\n #fields = '__all__'\n\nclass Userform(ModelForm):\n class Meta:\n model = User\n fields = ['username','first_name','last_name','email','location','birth_date']\n\nclass Categoryform(ModelForm):\n class Meta:\n model = Category\n # fields = ['category']\n fields = '__all__'","repo_name":"nitinimage/cs50web","sub_path":"commerce/auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2542427963","text":"# (3)******************line plot using matplotlib part 2****************\n\n# ////import matplotlib\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\n# ////////////previos data \n# ////////////creating variables x and y \ndays=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]\ntemp=[33,34,35,36,37,37.6,41,39,41,36,39,45,35,45,39]\n# -----2nd temoerature\nl_temp=[34,35,36,37,38,36,39,36,40,41,43,42,45,44,43]\n\n# ////start ploting\n# plt.plot(days,temp)\n\n# /////starting style function use\nstyle.use('ggplot')\n# /////when we want to chnage the backgroud style than we use grid function\nplt.grid(color='b', linestyle='-',linewidth=2) \n\n# /////when we want to chnage the color as we wish than we color function\n# plt.plot(days,temp,color='r')\n\n# ////whne we want to creat doted or many other symbols to show the graph than we use marker function\n# plt.plot(days,temp,color='g',marker='o')\n\n# /////when we want to change the line shape than we use linestyle function used\n# plt.plot(days,temp,color='r',marker='o',linestyle='--')\n\n# /////when we want to change the linewidth than we use linewigh function\n# plt.plot(days,temp,color='r',marker='o',linestyle='--',linewidth=2)\n\n# /////when we want to chnage the marker size than we use markersize function\n# plt.plot(days,temp,color='r',marker='o',linestyle='--',linewidth=2,markersize=10)\n\n# /////when we want to code is short way than we creat shortway as following\n# plt.plot(days,temp,'go--',linewidth=2,markersize=10)\nplt.plot(days,temp,'go--',linewidth=2,markersize=10,label='Karachi Temperature')\nplt.plot(days,l_temp,'ro:',linewidth=2,markersize=10,label='Lahore Temperature')\n\n \n# //////When we want to change the fontsize of title and xy variables than we use fontsize function\\\n# /////linit of the axis\nplt.axis([0,20,30,50])\n# /////creating table\nplt.title(\"Karachi Ands Temperature\",fontsize=15)\n# ////creating x and y names\nplt.xlabel(\"Days\",fontsize=15)\nplt.ylabel(\"Temperature\",fontsize=20)\n# //////when we want to creat name of the line plot than we use legend function\n# /////this is also use in the plot function using labal attribute \n# plt.legend(\"Tem Line\")\n# //////when we want to change the location of the name of line then we use loc in legend function\n# plt.legend([\"Tem Line\"], loc=4)\nplt.legend(loc=4)\n# /////to showing the graph \nplt.show()","repo_name":"tayyabmalik4/MatplotlibWithTayyab","sub_path":"3_line_plot_#2_matplotlib.py","file_name":"3_line_plot_#2_matplotlib.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19599839071","text":"import time\nimport ir\n\nimport pooler\nfrom osv import osv\nfrom report import report_sxw\n\nclass report_rappel(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(report_rappel, self).__init__(cr, uid, name, context=context)\n self.localcontext.update( {\n 'time' : time,\n 'ids_to_objects': self._ids_to_objects,\n 'adr_get' : self._adr_get,\n 'getLines' : self._lines_get,\n 'get_text' : self._get_text\n })\n\n def _ids_to_objects(self, partners_ids):\n pool = pooler.get_pool(self.cr.dbname)\n all_partners = []\n for partner in partners_ids:\n partners = pool.get('account_followup.stat').browse(self.cr, self.uid, partner[2])\n for par in partners:\n all_partners.append(par.name)\n return all_partners\n\n def _adr_get(self, partner, type):\n res_partner = pooler.get_pool(self.cr.dbname).get('res.partner')\n res_partner_address = pooler.get_pool(self.cr.dbname).get('res.partner.address')\n adr = res_partner.address_get(self.cr, self.uid, [partner.id], [type])[type]\n return adr and res_partner_address.read(self.cr, self.uid, [adr]) or [{}]\n\n def _lines_get(self, partner):\n moveline_obj = pooler.get_pool(self.cr.dbname).get('account.move.line')\n movelines = moveline_obj.search(self.cr, self.uid,\n [('partner_id', '=', partner.id),\n ('account_id.type', '=', 'receivable'),\n ('reconcile_id', '=', False), ('state', '<>', 'draft')])\n movelines = moveline_obj.read(self.cr, self.uid, movelines)\n return movelines\n\n def _get_text(self, partner, followup_id, context={}):\n fp_obj = pooler.get_pool(self.cr.dbname).get('account_followup.followup')\n fp_line = fp_obj.browse(self.cr, self.uid, followup_id).followup_line\n li_delay = []\n for line in fp_line:\n li_delay.append(line.delay)\n li_delay.sort(reverse=True)\n text = \"\"\n a = {}\n partner_line = pooler.get_pool(self.cr.dbname).get('account.move.line').search(self.cr, self.uid, [('partner_id','=',partner.id),('reconcile_id','=',False)])\n partner_delay = []\n context={}\n context.update({'lang': partner.lang})\n for i in pooler.get_pool(self.cr.dbname).get('account.move.line').browse(self.cr, self.uid, partner_line, context):\n for delay in li_delay:\n if i.followup_line_id and str(i.followup_line_id.delay)==str(delay):\n text = i.followup_line_id.description\n a[delay] = text\n partner_delay.append(delay)\n text = partner_delay and a[max(partner_delay)] or ''\n if text:\n text = text % {\n 'partner_name': partner.name,\n 'date': time.strftime('%Y-%m-%d'),\n 'company_name': fp_obj.browse(self.cr, self.uid, followup_id).company_id.name,\n 'user_signature': pooler.get_pool(self.cr.dbname).get('res.users').browse(self.cr, self.uid, self.uid, context).signature,\n }\n return text\n\n\nreport_sxw.report_sxw('report.account_followup.followup.print',\n 'res.partner', 'addons/account_followup/report/rappel.rml',\n parser=report_rappel)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","repo_name":"xrg/OpenERP-addons","sub_path":"account_followup/report/report_followup_print.py","file_name":"report_followup_print.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"8929991804","text":"# encoding: UTF-8\n\n# 从okex下载数据\nfrom datetime import datetime, timezone\n\nimport requests\nimport execjs\nimport traceback\nfrom vnpy.trader.app.ctaStrategy.ctaBase import CtaBarData, CtaTickData\n\nperiod_list = ['1min','3min','5min','15min','30min','1day','1week','1hour','2hour','4hour','6hour','12hour']\nsymbol_list = ['btc_usd','eth_usd','etc_usd','bch_usd','xrp_usd','eos_usd','btg_usd']\n\n\nclass OkexFutureData(object):\n\n # ----------------------------------------------------------------------\n def __init__(self, strategy):\n \"\"\"\n 构造函数\n :param strategy: 上层策略,主要用与使用strategy.writeCtaLog()\n \"\"\"\n self.strategy = strategy\n\n # 设置HTTP请求的尝试次数,建立连接session\n self.session = requests.session()\n self.session.keep_alive = False\n\n def get_bars(self, symbol, period, callback, bar_is_completed=False,bar_freq=1, start_dt=None):\n \"\"\"\n 返回k线数据\n symbol:合约b tc:next_week:10\n period: 周期: 1min,3min,5min,15min,30min,1day,3day,1hour,2hour,4hour,6hour,12hour\n \"\"\"\n ret_bars = []\n if ':' not in symbol:\n self.strategy.writeCtaError(u'{} {}格式需要包含合约类型,如:btc:next_week:10'.format(datetime.now(), symbol))\n return False, ret_bars\n s = symbol.split(':')\n symbol_pair, contract_type = s[0],s[1]\n if not symbol_pair.endswith('_usd'):\n symbol_pair += '_usd'\n\n if symbol_pair not in symbol_list:\n self.strategy.writeCtaError(u'{} {}不在下载清单中'.format(datetime.now(), symbol_pair))\n return False, ret_bars\n\n url = u'https://www.okex.com/api/v1/future_kline.do?symbol={}&type={}&contract_type={}'.format(symbol_pair, period,contract_type)\n self.strategy.writeCtaLog('{}开始下载:{} {}数据.URL:{}'.format(datetime.now(), symbol, period, url))\n bars = []\n content = None\n try:\n content = self.session.get(url).content.decode('gbk')\n bars = execjs.eval(content)\n except Exception as ex:\n self.strategy.writeCtaError('exception in get:{},{},{}'.format(url,str(ex), traceback.format_exc()))\n return False, ret_bars\n\n for i, bar in enumerate(bars):\n if len(bar) < 5:\n self.strategy.writeCtaError('error when import bar:{}'.format(bar))\n return False\n\n add_bar = CtaBarData()\n try:\n add_bar.vtSymbol = symbol\n add_bar.symbol = symbol\n add_bar.datetime = datetime.fromtimestamp(bar[0] / 1000)\n add_bar.date = add_bar.datetime.strftime('%Y-%m-%d')\n add_bar.time = add_bar.datetime.strftime('%H:%M:%S')\n add_bar.tradingDay = add_bar.date\n add_bar.open = float(bar[1])\n add_bar.high = float(bar[2])\n add_bar.low = float(bar[3])\n add_bar.close = float(bar[4])\n add_bar.volume = float(bar[6]) # 这里:5 是交易量,6是交易量转化BTC或LTC数量\n except Exception as ex:\n self.strategy.writeCtaError('error when convert bar:{},ex:{},t:{}'.format(bar, str(ex), traceback.format_exc()))\n return False, ret_bars\n\n if start_dt is not None and bar.datetime < start_dt:\n continue\n ret_bars.append(add_bar)\n if callback is not None:\n callback(add_bar, bar_is_completed, bar_freq)\n\n return True, ret_bars\n\n\n\nclass TestStrategy(object):\n\n def __init__(self):\n\n self.minDiff = 1\n self.shortSymbol = 'I'\n self.vtSymbol = 'I99'\n\n self.TMinuteInterval = 1\n def addBar(self,bar,bar_is_completed, bar_freq):\n print(u'tradingDay:{},dt:{},{} o:{},h:{},l:{},c:{},v:{}'.format(bar.tradingDay, bar.datetime,bar.vtSymbol, bar.open, bar.high,\n bar.low, bar.close, bar.volume))\n def onBar(self, bar):\n print(u'tradingDay:{},dt:{},{} o:{},h:{},l:{},c:{},v:{}'.format(bar.tradingDay,bar.datetime,bar.vtSymbol, bar.open, bar.high, bar.low, bar.close, bar.volume))\n\n def writeCtaLog(self, content):\n print(content)\n\n def writeCtaError(self, content):\n print(content)\n\nif __name__ == '__main__':\n\n t = TestStrategy()\n of = OkexFutureData(t)\n of.get_bars(symbol='btc:next_week:10', period='1min', callback=t.addBar)\n\n","repo_name":"birforce/vnpy_crypto","sub_path":"vnpy/data/okex/okex_future_data.py","file_name":"okex_future_data.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"57"} +{"seq_id":"21323174191","text":"\"\"\"Zendesk Sell deals stream class.\"\"\"\nfrom typing import Iterable, Optional\n\nfrom singer_sdk.tap_base import Tap\n\nfrom tap_zendesk_sell.client import ZendeskSellStream\nfrom tap_zendesk_sell.streams import SCHEMAS_DIR\n\n\nclass DealsStream(ZendeskSellStream):\n \"\"\"Zendesk Sell deals stream class.\"\"\"\n\n name = \"deals\"\n primary_keys = [\"id\"]\n\n def __init__(self, tap: Tap):\n \"\"\"Initialize the stream.\"\"\"\n super().__init__(tap)\n custom_fields_properties = self._update_schema(\n {\n \"deal\",\n }\n )\n if custom_fields_properties:\n self._schema[\"properties\"][\"custom_fields\"] = {\n \"properties\": custom_fields_properties,\n \"description\": \"Custom fields attached to a deal.\",\n }\n\n def get_child_context(self, record: dict, context: Optional[dict]) -> dict:\n \"\"\"Return a child context for the stream.\"\"\"\n return {\"deal_id\": record[\"id\"]}\n\n def get_records(self, context: Optional[dict]) -> Iterable[dict]:\n \"\"\"Return a generator of row-type dictionary objects.\"\"\"\n finished = False\n page = 1\n while not finished:\n data = self.conn.deals.list(per_page=100, page=page, sort_by=\"id\")\n if not data:\n finished = True\n for row in data:\n yield row\n page += 1\n\n schema_filepath = SCHEMAS_DIR / \"deals.json\"\n\n\nclass AssociatedContacts(ZendeskSellStream):\n \"\"\"Zendesk Sell asociated contacts stream class.\"\"\"\n\n name = \"associated_contacts\"\n parent_stream_type = DealsStream\n\n def get_records(self, context: Optional[dict]) -> Iterable[dict]:\n \"\"\"Return a generator of row-type dictionary objects.\"\"\"\n finished = False\n page = 1\n while not finished:\n data = self.conn.associated_contacts.list(\n deal_id=context.get(\"deal_id\"), page=page, per_page=100 # type: ignore\n )\n if not data:\n finished = True\n for row in data:\n row[\"deal_id\"] = context.get(\"deal_id\") # type: ignore\n yield row\n page += 1\n\n schema_filepath = SCHEMAS_DIR / \"associated_contacts.json\"\n","repo_name":"leag/tap-zendesk-sell","sub_path":"tap_zendesk_sell/streams/deals.py","file_name":"deals.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"4460519008","text":"#!/usr/bin/python3 \r\n\r\nimport socket\r\nimport sys\r\nimport _thread\r\nimport time\r\n\r\nglobal __DEBUG__\r\n__DEBUG__=True\r\n\r\ndef connection_handler(host,port):\r\n while True:\r\n try:\r\n s=socket.socket()\r\n except:\r\n if __DEBUG__:\r\n print (f'Failed to create socket')\r\n sys.exit(1)\r\n\r\n s.connect((host, port))\r\n msg1=s.recv(1024).decode('utf-8')\r\n if __DEBUG__:\r\n print(msg1)\r\n msg2=s.recv(1024).decode('utf-8')\r\n if __DEBUG__:\r\n print(msg2)\r\n s.close()\r\n\r\n\r\nhost = \"10.1.0.4\"\r\n#host = socket.gethostname()\r\n\r\nthreads=5\r\nstarting_port = 1025\r\n\r\nfor i in range(threads):\r\n _thread.start_new_thread(connection_handler,(host,starting_port,))\r\n if __DEBUG__:\r\n print (f\"Started thread to handle connection {i}\")\r\n\r\nwhile True:\r\n pass","repo_name":"myers-dev/NVA_connections","sub_path":"supplementals/python/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"27604961721","text":"# Functions\ndef title_space():\n print(\"====================\")\n#############\ndef spacing():\n print(\"====================\")\n print(\"====================\")\n#############\ndef empty_space():\n print(\"\")\n#############\ndef result_space():\n print(\"============\")\n#############\ndef results_comb():\n result_space()\n empty_space()\n#############\n\n# String Format V0\ntitle_space()\nprint(\"String Format V0\")\ntitle_space()\n# User Input age\nuser_age_input = int(input(\"Please enter your age: \"))\n# User Input Name\nuser_name_input = str(input(\"Please enter your name: \"))\n# Completed Input\nuser_completed_input = '\"Your name is \" + {} + \" and your age is \" + {}'\n# Results\nresult_space()\nprint(user_completed_input.format(user_name_input, user_age_input))\nresults_comb()\n\n# String Format V1\ntitle_space()\nprint(\"String Format V1\")\ntitle_space()\n# User Input age\nuser_age_input = int(input(\"Please enter your age: \"))\n# User Input Name\nuser_name_input = str(input(\"Please enter your name: \"))\n# Completed Input\nuser_completed_input = \"Your name is {} , and your age is: {}\"\n# Results\nresult_space()\nprint(user_completed_input.format(user_name_input, user_age_input))\nresults_comb()\n\n# String Format V2\ntitle_space()\nprint(\"String Format V2\")\ntitle_space()\n# User Input age\n# user_age_input = int(input(\"Please enter your age: \"))\n# User Input Name\n# user_name_input = str(input(\"Please enter your name: \"))\n\n# User Input Discord Username/#\ndiscord_username_input = str(input(\"Please enter your discord username. Dont input #'s \\n \"\n \"Example -> username: \"))\ndiscord_username_number_input = int(input(\"Please enter your discord username #: \"))\n# Completed Input\nuser_completed_input = \"Your discord username is {}#{}\"\n# Results\nresult_space()\nprint(user_completed_input.format(discord_username_input, discord_username_number_input))\nresults_comb()\n","repo_name":"CyborgVillager/Intro_Python_Oct_and_Forward_on","sub_path":"Begin_anew/String/string_format.py","file_name":"string_format.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72141276659","text":"import logging\nfrom typing import Optional\n\nfrom .plugin_manager import PluginManager\nfrom .project import ForseeProject\n\nlog = logging.getLogger(__name__)\n\n\nclass Explorer:\n def __init__(self, project: ForseeProject):\n self.proj = project.angr_project\n initial_state = project.initial_state\n self.simgr = self.proj.factory.simgr(initial_state)\n self.plugin_manager = PluginManager(self.proj, self.simgr)\n for technique in project.techniques:\n log.debug(f\"Adding technique {technique}\")\n self.simgr.use_technique(technique)\n\n def run(self, max_steps: Optional[int] = None):\n \"\"\"\n Step the initial state forward by max_steps or until completion\n \"\"\"\n step_count = 0\n max_exceeded = False\n log.info(f\"Starting exploration at {hex(self.simgr.active[0].addr)}\")\n while not max_exceeded and not self.simgr.complete():\n if len(self.simgr.active) == 0:\n log.warning(\"No states in the active stash\")\n break\n self.simgr.step()\n self.plugin_manager.stepped(self.simgr)\n log.debug(f\"{self.simgr}\")\n log.debug(f\"Active: {self.simgr.active}\")\n step_count += 1\n if max_steps:\n max_exceeded = step_count >= max_steps\n if max_exceeded:\n self.simgr.move(from_stash=\"active\", to_stash=\"max_steps\")\n self.plugin_manager.complete(self.simgr)\n log.info(\"Exploration finished\")\n log.info(f\"Max steps exceeded: {max_exceeded}\")\n log.info(f\"Reached completed state: {self.simgr.complete()}\")\n if len(self.simgr.errored) > 0:\n log.error(f\"The following errors were reported:\")\n for error in self.simgr.errored:\n log.error(f\" {error}\")\n","repo_name":"CyFI-Lab-Public/Forecast","sub_path":"forsee/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"57"} +{"seq_id":"22954433096","text":"# Part I - Game.Py\n# game.py – this file/module should contain a class called Game. It should have 4 methods:\n# get_user_item(self) – Ask the user to select an item (rock/paper/scissors). Keep asking until the user has selected one of the items – use data validation and looping. Return the item at the end of the function.\n\n# get_computer_item(self) – Select rock/paper/scissors at random for the computer. Return the item at the end of the function. Use python’s random.choice() function (read about it online).\n\n# get_game_result(self, user_item, computer_item) – Determine the result of the game.\n# Parameters:\n# user_item – the user’s chosen item (rock/paper/scissors)\n# computer_item – the computer’s chosen (random) item (rock/paper/scissors)\n# Return either win, draw, or loss. Where win means that the user has won, draw means the user and the computer got the same item, and loss means that the user has lost.\n\n# play(self) – the function that will be called from outside the class (ie. from rock-paper-scissors.py). It will do 3 things:\n# Get the user’s item (rock/paper/scissors) and remember it\n\n# Get a random item for the computer (rock/paper/scissors) and remember it\n\n# Determine the results of the game by comparing the user’s item and the computer’s item\n# Print the output of the game; something like this: “You selected rock. The computer selected paper. You lose”, “You selected scissors. The computer selected scissors. You drew!”\n\n# Return the results of the game as a string: win;draw;loss;, where win means that the user has won, draw means the user and the computer got the same item, and loss means that the user has lost.\nimport random\nclass Game():\n \n def __init__(self) -> None:\n pass\n\n def get_user_item(self):\n user = input(\"Choose between Rock, paper and scissors to play\")\n # print (user)\n \n rock = 'rock'\n paper = 'paper'\n scissors = 'scissors'\n while True:\n if user == rock or user == paper or user == scissors:\n break\n else:\n user = input(\"Choose Rock, paper or scissors !!!\")\n return user\n def get_computer_item(self):\n rock = 'rock'\n paper = 'paper'\n scissors = 'scissors'\n list = [rock,paper,scissors]\n \n random1 = random.choice(list)\n # print(random1)\n return random1\n \n def get_game_result(self, user_item, computer_item):\n \n rock = 'rock'\n paper = 'paper'\n scissors = 'scissors'\n if user_item==rock and computer_item == paper:\n print (\"you choosed rock , computer won\")\n if user_item==paper and computer_item == paper:\n print (\"you choosed paper , tie\")\n if user_item==scissors and computer_item == paper:\n print (\"you choose scissors , you won\")\n if user_item==rock and computer_item == scissors:\n print ('you choosed rock , you won')\n if user_item==rock and computer_item == rock:\n print ('you choosed rock , tie')\n if user_item==paper and computer_item == rock:\n print ('you choosed paper , you won')\n if user_item==paper and computer_item == scissors:\n print (\"you choose paper , computer won\")\n if user_item==scissors and computer_item == scissors:\n print (\"you cjoosed scissors , tie\")\n if user_item==scissors and computer_item == rock:\n print (\"You choosed scissors, computer won\")\n \n def play(self,user_item,computer_item):\n \n rock = 'rock'\n paper = 'paper'\n scissors = 'scissors'\n remember= res\n print(remember)\n rememberRandom = res2\n print(rememberRandom)\n condition =res3\n \n win = 0\n draw = 0\n loss = 0\n \n if user_item==rock and computer_item == paper:\n loss+=1\n print(f\"lose:{loss}\")\n if user_item==paper and computer_item == paper:\n draw+=1\n print(f\"draw:{draw}\")\n if user_item==scissors and computer_item == paper:\n win+=1\n print(f\"win:{win}\")\n if user_item==rock and computer_item == scissors:\n win+=1\n print(f\"win:{win}\")\n if user_item==rock and computer_item == rock:\n print(f\"draw:{draw}\")\n if user_item==paper and computer_item == rock:\n win+=1\n print(f\"win:{win}\")\n if user_item==paper and computer_item == scissors:\n loss+=1\n print(f\"lose:{loss}\")\n if user_item==scissors and computer_item == scissors:\n print(f\"draw:{draw}\")\n if user_item==scissors and computer_item == rock:\n loss+=1\n print(f\"lose:{loss}\")\n \n \n\n \n\n\n\ngame1 = Game()\n# game1.get_user_item()\n# game1.get_computer_item()\nres = game1.get_user_item()\nres2 = game1.get_computer_item()\nres3 = game1.get_game_result(res,res2)\n# game1.get_game_result(res,res2)\ngame1.play(res,res2)\n\n","repo_name":"Roycoder1/ALL","sub_path":"W5D1/DAY5/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9822961376","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport sys\nimport time\nimport Adafruit_DHT\nsys.path.append('./lib_oled96')\n\n# Bibliotheken importieren\nfrom lib_oled96 import ssd1306\nfrom smbus import SMBus\n\n# Display einrichten\ni2cbus = SMBus(1) # 0 = Raspberry Pi 1, 1 = Raspberry Pi > 1\noled = ssd1306(i2cbus)\n\n# Ein paar Abkürzungen, um den Code zu entschlacken\ndraw = oled.canvas\n\n# Display zum Start löschen\noled.cls()\noled.display()\n\n# Formen zeichnen\ndraw.line((4, 2, 20, oled.height-1), fill=1) # diagonale Linie\ndraw.rectangle((22, 2, 30, oled.height-1), outline=1, fill=0) # Rechteck\ndraw.rectangle((32, 2, 40, oled.height-1), outline=0, fill=1) # Rechteck, ausgefüllt\ndraw.ellipse((42, 2, 60, oled.height-1), outline=1, fill=0) # Ellipse\ndraw.line((76, 2, 76, 63), fill=1) # vertikale Linie\ndraw.arc((62, 2, 90, 63), -90, 90, fill=1) # Bogen\ndraw.polygon([(92, 63), (110, 63), (101, 2)], outline=1, fill=0) # Polygon (Dreieck)\n\n# Ausgaben auf Display schreiben\noled.display()\n","repo_name":"luisanadal/pi-robot","sub_path":"oled/test_display2.py","file_name":"test_display2.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2583396724","text":"import os\nimport tcfl.tc\nimport tcfl.tl\n\n@tcfl.tc.tags(**tcfl.tl.zephyr_tags())\n# Ask for a target that defines an zephyr_board field, which indicates\n# it can run the Zephyr OS\n@tcfl.tc.target(\"zephyr_board\",\n app_zephyr = os.path.join(tcfl.tl.ZEPHYR_BASE,\n \"samples\", \"hello_world\"))\nclass _test(tcfl.tc.tc_c):\n @staticmethod\n def eval(target):\n target.expect(\"Hello World! %s\" % target.kws['zephyr_board'])\n","repo_name":"intel/tcf","sub_path":"examples/test_zephyr_hello_world.py","file_name":"test_zephyr_hello_world.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"57"} +{"seq_id":"71570900337","text":"import time\r\n\r\nnum = int(input(\"Enter a positive number: \"))\r\ntotal1 = 0\r\nt0 = time.time()\r\nfor i in range(1, num +1):\r\n total1 = total1 + i\r\nt1 = time.time()\r\n\r\ntotal2 = num*(num + 1)/2\r\n\r\nt2 = time.time()\r\n\r\nt3 = time.time()\r\n\r\nprint(\"t1 - t0 = \", str(t1 - t0))\r\nprint(\"t3 - t2 = \", str(t3 - t2))\r\n","repo_name":"AnthonyJamez12/Web-Portfolio-","sub_path":"Programming Principles I/4 Week/Wenesday (Time).py","file_name":"Wenesday (Time).py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31976760410","text":"def getFileName():\n fName=input(\"Enter file name: \")\n if (fName==\"\"):\n fName=\"friends.txt\"\n if (fName.endswith(\".txt\")==False):\n fName+=\".txt\"\n return fName\nfileName=getFileName()\nfin=open(fileName,\"r\")\nwhile True:\n name=fin.readline()\n if name==\"\":\n break\n name=name.strip()\n phone=fin.readline().strip()\n print(\"{:30} {:>20}\".format(name,phone))\nfin.close()\n","repo_name":"khurrams-sh/ICS3U","sub_path":"Unit 5/ReadNamesAndNumbers.py","file_name":"ReadNamesAndNumbers.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1615349045","text":"# ?procedural programming = the method of programming what we have done till today\n\n\n\n#object oreinted programming = the ystemetic way of programming\n\ndef hello():\n print(\"hello\")\n\nhello()\nsales = 6000\nprofit1 = 2000\nad1 = 1000\n# rejeev.sales\n\nsales2 = 6000\nprofit2 = 20000\nad2 = 1000\n# vikran.sales\n\n\nsales3 = 6000 \nprofit3 = 2000\nad3 = 2000\n# samir.sales\n\n\n# RailwayForm --> Class(template)\n# class is a template for crating the objects\n# harry --> harry ko info vako form --> objects(entity)\n# tom --> tom ko info vako form --> objects(entity)\n# shubhum --> shubhum ko info vako form --> obbjects(entity)\n\n\n# shubhum.changeName('shubhi') --> methods from class\n# shubhum.phonenumber() --> methods\n\n\n\n# features\n# encapsulation - capsule ko vitra banda gardini(shubhu.changeName)\n#inheritance = to make a similar type of class while adding some off the featires(extend), applies dry principle -- RailwaySpecialForm\n#polymorphism + one things multiple forms\n\n# oop is used to map real world entity, to understand the program more better\n","repo_name":"ShadChad/100-Days-Of-Python","sub_path":"47th_day.py","file_name":"47th_day.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19556513999","text":"# Mastermind.py\n\nimport random\n\ndef give_feedback(lguess, answer):\n \"\"\"\n return X's and O's to give feedback on each guess\n \"\"\"\n correct = ''\n idx_list = []\n for i in range(4):\n if lguess[i] == answer[i]:\n correct = correct + 'X'\n else:\n idx_list.append(i)\n for j in idx_list:\n for k in idx_list:\n if j != k and lguess[j] == answer[k]:\n correct = correct + 'O'\n answer[k] = ''\n break\n return correct\n\nif __name__ == \"__main__\":\n tries = 10\n colors = ['G', 'R', 'B', 'W', 'Y', 'P']\n answer = [random.choice(colors) for _ in range(4)]\n print(\"\\nInstructions: Guess the right combination of 4 colors in 10 tries\")\n print(\"X means right color, right space | O means right color, wrong space\")\n print(\"Your color choices are Green, Red, Blue, White, Yellow, and Purple\\n\\n\")\n while tries > 0:\n guess = raw_input(\"Enter 4 colors from G/R/B/W/Y/P, seperate with a space: \")\n lguess = guess.split(' ')\n if len(lguess) == 4 and set(lguess).issubset(set(colors)):\n tries = tries - 1\n if lguess == answer:\n print(\"you win!!!\")\n break\n else:\n feedback = give_feedback(lguess, answer[:])\n print(\"Guess again: here is your feedback: \" + feedback)\n print(str(tries) + \" tries left \\n\")\n else:\n print(\"Wrong Format, guess again \\n\")\n\n print(\"Answer: \" + ' '.join(answer))\n","repo_name":"MikeTam1021/WEP_class","sub_path":"student_C6/mastermind.py","file_name":"mastermind.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15564110148","text":"#!/opt/relocatable-python/bin/python3\n\nimport argparse\nimport json\nimport time\nimport requests\nimport credentials\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-t\", \"--tag\", type=int, required=True, action=\"store\", help='input tag ID (from the WS1 URL) of devices to cutover from Teem/EventBoard to Zoom Rooms.')\nargs = parser.parse_args()\n\ntagID = args.tag\n\nprint(tagID)\n\napi_url_base = \"https://domain.awmdm.com/api/mdm\"\n\nget_device_id = \"{}/devices?searchby=DeviceId&id=\".format(api_url_base)\n\nremove_asam_eb_profile = \"{}/profiles/40671/remove\".format(api_url_base)\n\ninstall_sam_zr_profile = \"{}/profiles/53133/install\".format(api_url_base)\n\nremove_sam_zr_profile = \"{}/profiles/53133/remove\".format(api_url_base)\n\ninstall_asam_zr_profile = \"{}/profiles/45614/install\".format(api_url_base)\n\nquery_ws1_url = \"{}/devices/commands?command=DeviceQuery&searchBy=DeviceId&id=\".format(api_url_base)\n\nget_tag_devices = \"{}/tags/%s/devices\".format(api_url_base) %(tagID)\n\nadd_zrct_device_tag = \"{}/tags/14386/adddevices\".format(api_url_base)\n\nremove_zrct_device_tag = \"{}/tags/14386/removedevices\".format(api_url_base)\n\nadd_zrc_device_tag = \"{}/tags/14387/adddevices\".format(api_url_base)\n\nws1_headers = credentials.ws1API\n\nresults = []\n\nresponse = requests.get(get_tag_devices, headers=ws1_headers).json()\nfor device in response['Device']:\n results.append(str(device['DeviceId']))\n\ndef deviceQuery(results):\n for device in results:\n response = requests.post(query_ws1_url+device, headers=ws1_headers)\n print(device,response)\n\ndef removeEbASAM(results):\n for device in results:\n payload = json.dumps({\n \"DeviceId\": device\n })\n response = requests.post(remove_asam_eb_profile, headers=ws1_headers, data=payload)\n print(device,response)\n\ndef installZrcSAM(results):\n for device in results:\n payload = json.dumps({\n \"DeviceId\": device\n })\n response = requests.post(install_sam_zr_profile, headers=ws1_headers, data=payload)\n print(device,response)\n\ndef removeZrcSAM(results):\n for device in results:\n payload = json.dumps({\n \"DeviceId\": device\n })\n response = requests.post(remove_sam_zr_profile, headers=ws1_headers, data=payload)\n print(device,response)\n\ndef installZrcASAM(results):\n for device in results:\n payload = json.dumps({\n \"DeviceId\": device\n })\n response = requests.post(install_asam_zr_profile, headers=ws1_headers, data=payload)\n print(device,response)\n\ndef addZRCTDeviceTag(results):\n for device in results:\n payload = json.dumps({\n \"BulkValues\": {\n \"Value\": [\n device\n ]\n }\n })\n response = requests.post(add_zrct_device_tag, headers=ws1_headers, data=payload)\n print(device,response)\n\ndef removeZRCTDeviceTag(results):\n for device in results: \n payload = json.dumps({\n \"BulkValues\": {\n \"Value\": [\n device\n ]\n }\n })\n response = requests.post(remove_zrct_device_tag, headers=ws1_headers, data=payload)\n print(device,response) \n\ndef addZRCDeviceTag(results):\n for device in results:\n payload = json.dumps({\n \"BulkValues\": {\n \"Value\": [\n device\n ]\n }\n })\n response = requests.post(add_zrc_device_tag, headers=ws1_headers, data=payload)\n print(device,response)\n\n# Remove ASAM profile for EventBoard\nprint(\"Removing EventBoard ASAM profile...\")\nremoveEbASAM(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Add Zoom Rooms Controller - Temporary tag\nprint(\"Installing Zoom Rooms Controller - Temporary device tag...\")\naddZRCTDeviceTag(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Sleep 30 seconds\nprint('Sleeping 30 seconds...')\ntime.sleep(30)\n\n# Install Zoom Rooms Controller SAM profile\nprint(\"Installing Zoom Rooms Controller SAM Profile...\")\ninstallZrcSAM(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Sleep 60 seconds\nprint('Sleeping 30 seconds...')\ntime.sleep(30)\n\n# Remove Zoom Rooms Controller SAM profile\nprint(\"Removing Zoom Rooms Controller SAM profile...\")\nremoveZrcSAM(results)\n\n# Remove Zoom Rooms Controller - Temporary tag\nprint(\"Removing Zoom Rooms Controller - Temporary device tag...\")\nremoveZRCTDeviceTag(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Sleep 30 seconds\nprint('Sleeping 30 seconds...')\ntime.sleep(30)\n\n# Add Zoom Rooms Controller tag\nprint(\"Installing Zoom Rooms Controller device tag...\")\naddZRCDeviceTag(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Install Zoom Rooms Controller ASAM profile\nprint(\"Installing Zoom Rooms Controller ASAM Profile...\")\ninstallZrcASAM(results)\n\n# Query all devices\nprint(\"Querying devices...\")\ndeviceQuery(results)\n\n# Sleep 30 seconds\nprint('Sleeping 30 seconds...')\ntime.sleep(30)\n\nprint(\"Cutover complete.\")","repo_name":"jamesfkane/workspace-one-api-scripts","sub_path":"eventboard-zoom-cutover/eventboard-zoom-cutover.py","file_name":"eventboard-zoom-cutover.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8800949679","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def __init__(self):\n self.ans = float(\"Inf\")\n self.prev = None\n\n def dfs(self, root):\n if root is None:\n return\n\n #prev = root\n self.dfs(root.left)\n if self.prev is not None:\n self.ans = min(abs(root.val - self.prev.val), self.ans)\n self.prev = root\n self.dfs(root.right)\n\n return\n\n def minDiffInBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n\n if root is None:\n return 0\n\n prev = None\n self.dfs(root)\n return self.ans\n\n\nif __name__ == \"__main__\":\n s = Solution()\n t = TreeNode(27)\n t.right = TreeNode(34)\n t.right.right = TreeNode(58)\n t.right.right.left = TreeNode(50)\n t.right.right.left.left = TreeNode(44)\n print(s.minDiffInBST(t))","repo_name":"shants/LeetCodePy","sub_path":"783.py","file_name":"783.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9542297673","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\nx = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\ny = [0.0, 0.8, 0.9, 0.1, -0.8, -1.0]\nplt.scatter(x, y)\n# plt.show()\n\n# x = np.array(x)\n# y = np.array(y)\nparams = np.polyfit(x, y, 1)\n\n\nprint(\"params using numpy\", params)\nx = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\n\ndef func(x, m, c):\n\treturn m*x + c\n\nplt.plot(x, [func(x_sub, *params) for x_sub in x])\n\n\n\nx = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\ny = [0.0, 0.8, 0.9, 0.1, -0.8, -1.0]\nparams, extras = curve_fit(func, x, y)\n\nprint(\"params using curvefit\", params)\n\nplt.show()\n\n\n","repo_name":"lionellloh/2DStuff","sub_path":"Numpy Polyfit.py","file_name":"Numpy Polyfit.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38119079585","text":"\nfrom threading import Thread\n\nfrom flask import current_app, render_template\nfrom flask_mail import Mail, Message\n\nmail = Mail()\n\n\ndef send_email(to, subject, template, **kwargs):\n msg = Message('[共享书籍]' + ' ' + subject,\n sender=current_app.config['FLASKY_MAIL_SENDER'],\n recipients=[to])\n # msg.body = render_template(template , **kwargs)\n msg.html = render_template(template , **kwargs)\n app = current_app._get_current_object()\n thr = Thread(target=send, args=[app, msg])\n thr.start()\n # mail.send(msg)\n#\ndef send(app, msg):\n with app.app_context():\n try:\n mail.send(msg)\n except Exception as e:\n print(e)\n","repo_name":"atraraxia/sharebook","sub_path":"app/helper/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31932633723","text":"import bob.ip.base\nimport numpy\nimport math\n\n# create test image\nimage = numpy.zeros((21,21))\nimage[5:16, 5:16] = 1\n\n# perform Sobel filtering\nsobel = bob.ip.base.sobel(image)\n\n# compute direction-independent and direction-dependent results\nabs_sobel = numpy.sqrt(numpy.square(sobel[0]) + numpy.square(sobel[1]))\nangle = 45.\nrot_sobel = math.sin(angle*math.pi/180) * sobel[0] + math.cos(angle*math.pi/180) * sobel[1]\n\n# plot results\nfrom matplotlib import pyplot\npyplot.figure(figsize=(20,4))\npyplot.subplot(151) ; pyplot.imshow(image, cmap='gray') ; pyplot.title('Image')\npyplot.subplot(152) ; pyplot.imshow(sobel[0], cmap='gray') ; pyplot.title('Sobel - Y')\npyplot.subplot(153) ; pyplot.imshow(sobel[1], cmap='gray') ; pyplot.title('Sobel - X')\npyplot.subplot(154) ; pyplot.imshow(abs_sobel, cmap='gray') ; pyplot.title('Sobel - Abs')\npyplot.subplot(155) ; pyplot.imshow(rot_sobel, cmap='gray') ; pyplot.title('Sobel - %3.0f$^\\circ$'%angle)\npyplot.show()\n\n","repo_name":"bioidiap/bob.ip.base","sub_path":"doc/plot/sobel.py","file_name":"sobel.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"26056533025","text":"#프로그래밍 02분반 8조 2016746 조수예 2017274 조대식 2016691 박선하 1418083 최희태\r\n#발표자 2016746 조수예\r\n\r\nmenu={} #딕서녀리 변수로 선언\r\nmenu['Americano'] = ': 가격 3000원' #딕셔너리에 키값과 밸류값 지정\r\nmenu['Ice Americano'] = ': 가격 3500원'\r\nmenu['Cappuccino'] = ': 가격 4000원'\r\nmenu['Caffe Latte'] = ': 가격 4500원'\r\nmenu['Espresso'] = ': 가격 3600원'\r\n\r\nfor i in menu.keys() : #키값이랑 밸류값을 반복해서 출력하기 위해 for문을 입력\r\n print(\"%s %s\" % (i, menu[i])) #i를 반복해서 출력함\r\n\r\nwhile(True) : #무한반복을 위해 와일에 True값으로 지정함\r\n mymenu = input(\"위의 메뉴를 선택하시오.\")\r\n if mymenu in menu : #딕셔너리에 있는 키값이 반복됨\r\n print(\"<%s>는 <%s>입니다 결재 부탁드립니다.\" %(mymenu,menu.get(mymenu)[5:9])) #menu.get(mymenu)와 같은 키 menu[mymenu]\r\n #문자열을 인덱싱해서 5와 9로 값 설정했지만 가격이 10000원이 넘어가면 출력이 안됨.\r\n elif mymenu.upper() == 'END' : #계속 반복 될 때 멈추고 싶으면 end를 입력한다.\r\n break #END를 입력하면 브레이크가 걸림.\r\n else : #해당하는 단어가 아닌 딕셔너리에 없는 단어 입력 시 밑의 값 출력\r\n print(\"미안합니다. %s은 메뉴에 없습니다.\" %(mymenu)) #딕셔너리에 없는 단어 입력 시 mymenu의 값 적용\r\n\r\nprint(menu.keys())\r\n\r\n","repo_name":"XOXOT/Python_Study","sub_path":"python_2020/week12/평가문제(조별과제)_1.py","file_name":"평가문제(조별과제)_1.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11513432763","text":"\"\"\"\nThis program finds out the probability of drawing a combination\nof `x` number of balls out of 'y' numbers\n\nProject No.Name: 4.7_Lottery Probabilities\nName: Mijanur Rahman (mijanur.m.rahman@tuni.fi)\nStudent ID: 151762776\n\"\"\"\n\n\ndef factorial(number):\n \"\"\"\n This function returns the factorial of a given number\n :param number: given number, `int`\n :return: factorial number as `int`\n \"\"\"\n factorial_of_number = 1\n for i in range(1, number + 1):\n factorial_of_number = factorial_of_number * i\n return factorial_of_number\n\n\ndef combinations(total_number, drawn_number):\n \"\"\"\n This function determines the total combinations of\n `drawn_number` out of `total_number`\n :param total_number: `int`, is the superset of drawn_number\n :param drawn_number: 'int', is the given smaller set\n from `total_number`\n :return: `int`, the total possibilities\n \"\"\"\n return int(factorial(total_number) /\n ((factorial(total_number - drawn_number)) *\n factorial(drawn_number)))\n\n\ndef main():\n total_balls = int(input(\"Enter the total number of lottery balls: \"))\n drawn_balls = int(input(\"Enter the number of the drawn balls: \"))\n\n if total_balls > 0 and drawn_balls > 0:\n if drawn_balls < total_balls:\n print(f\"The probability of guessing all {drawn_balls} balls \"\n f\"correctly is \"\n f\"1/{combinations(total_balls, drawn_balls)}\")\n else:\n print(\"At most the total number of balls can be drawn.\")\n else:\n print(\"The number of balls must be a positive number.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mrahman-projects/comp.cs.100","sub_path":"week04/4.7_lottery_probabilities.py","file_name":"4.7_lottery_probabilities.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37807338175","text":"from django.shortcuts import render\nfrom .models import Post\nfrom django.http import HttpResponseRedirect\n# Create your views here.\ndef index(request):\n post = Post.objects.filter(publicate=True)\n return render(request, 'index.html', {'posts': post})\n\n\ndef formCreate(request):\n return render(request, 'formCreate.html')\n\n\ndef formEdit(request, id):\n post = Post.objects.get(id=id)\n return render(request, 'formEdit.html', {'id': id, 'post': post})\n\n\ndef create(request):\n post = Post()\n post.name = request.POST.get('name')\n post.text = request.POST.get('text')\n post.category = request.POST.get('category')\n post.image_url = request.POST.get('image_url')\n post.save()\n return HttpResponseRedirect(\"/\")\n\n\ndef edit(request, id):\n post = Post.objects.get(id=id)\n post.name = request.POST.get('name')\n post.text = request.POST.get('text')\n post.category = request.POST.get('category')\n post.image_url = request.POST.get('image_url')\n post.save()\n return HttpResponseRedirect(\"/\")\n\n\ndef delete(request, id):\n post = Post.objects.get(id=id)\n post.delete()\n return HttpResponseRedirect(\"/\")","repo_name":"PedroRusi/Posts","sub_path":"project/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72236747057","text":"import pygame\n\nclass Button():\n def __init__(self, x, y, image, scale):\n width = image.get_width()\n height = image.get_height()\n self.image = pygame.transform.scale(image, (int(width * scale), int(height * scale)))\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n def draw(self, surface):\n action = False\n # Get mouse position\n pos = pygame.mouse.get_pos()\n\n if self.rect.collidepoint(pos):\n if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n self.clicked = True\n action = True\n\n if pygame.mouse.get_pressed()[0] == 0:\n self.clicked = False\n\n surface.blit(self.image, (self.rect.x, self.rect.y))\n\n return action\n\ndef init():\n global screen\n pygame.init()\n # Initialing RGB Color \n screen = pygame.display.set_mode((600, 500))\n\n # Changing surface color\n color = (255, 255, 255)\n screen.fill(color)\n pygame.display.flip()\n\n # Set caption\n pygame.display.set_caption('Donatello Tracking Drone')\n\n # Buttons\n launch_img = pygame.image.load('launch.png').convert_alpha()\n land_img = pygame.image.load('land.png').convert_alpha()\n right_img = pygame.image.load('right.png').convert_alpha()\n left_img = pygame.image.load('left.png').convert_alpha()\n take_photo_img = pygame.image.load('camera.png').convert_alpha()\n up_img = pygame.image.load('up.png').convert_alpha()\n down_img = pygame.image.load('down.png').convert_alpha()\n yaw_right_img = pygame.image.load('yaw-right.png').convert_alpha()\n yaw_left_img = pygame.image.load('yaw-left.png').convert_alpha()\n\n # Instances\n global launch_button\n global land_button\n global right_button\n global left_button\n global take_photo_button\n global up_button\n global down_button\n global yaw_right_button\n global yaw_left_button\n\n launch_button = Button(0, 330, launch_img, 0.7)\n land_button = Button(370, 330, land_img, 0.7)\n right_button = Button(450, 180, right_img, 0.8)\n left_button = Button(50,180, left_img, 0.8)\n take_photo_button = Button(230, 170, take_photo_img, 0.7)\n up_button = Button(250, 0, up_img, 0.8)\n down_button = Button(250, 350, down_img, 0.8)\n yaw_right_button = Button(440, 10, yaw_right_img, 0.8)\n yaw_left_button = Button(50, 10, yaw_left_img, 0.8)\n\ndef getKey(keyName):\n ans = False\n for eve in pygame.event.get(): pass\n keyInput = pygame.key.get_pressed()\n myKey = getattr(pygame, 'K_{}'.format(keyName))\n if keyInput[myKey]:\n ans = True\n pygame.display.update()\n\n return ans\n\ndef getButtonPress(keyName):\n action = ''\n for eve in pygame.event.get(): pass\n if launch_button.draw(screen):\n print('LAUNCH')\n action = 'LAUNCH'\n elif land_button.draw(screen):\n print('LAND')\n action = 'LAND'\n elif right_button.draw(screen):\n print('RIGHT')\n action = 'RIGHT'\n elif left_button.draw(screen):\n print('LEFT')\n action = 'LEFT'\n elif take_photo_button.draw(screen):\n print('PHOTO')\n action = 'PHOTO'\n elif up_button.draw(screen):\n print('UP')\n action = 'UP'\n elif down_button.draw(screen):\n print('DOWN')\n action = 'DOWN'\n elif yaw_left_button.draw(screen):\n print('YAW-LEFT')\n action = 'YAW-LEFT'\n elif yaw_right_button.draw(screen):\n print('YAW-RIGHT')\n action = 'YAW-RIGHT'\n \n pygame.display.update()\n return keyName == action\n","repo_name":"Fernando74lr/Donatello-Tracking-Drone","sub_path":"keyPressModule.py","file_name":"keyPressModule.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42805538266","text":"# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport unittest\nfrom unittest.mock import MagicMock, call, patch\n\nimport pytest\n\nfrom run_manifests import main\n\n\nclass TestRunManifests(unittest.TestCase):\n @pytest.fixture(autouse=True)\n def capfd(self, capfd):\n self.capfd = capfd\n\n @patch(\"argparse._sys.argv\", [\"run_manifests.py\", \"--help\"])\n def test_usage(self):\n with self.assertRaises(SystemExit):\n main()\n\n out, _ = self.capfd.readouterr()\n self.assertTrue(out.startswith(\"usage:\"))\n\n @patch(\"argparse._sys.argv\", [\"run_manifests.py\", \"list\"])\n @patch(\"run_manifests.logging\", return_value=MagicMock())\n def test_main_list(self, mock_logging, *mocks):\n main()\n\n mock_logging.info.assert_has_calls(\n [\n call(\"OpenSearch 1.0.0\"),\n call(\"OpenSearch 1.0.1\"),\n call(\"OpenSearch 1.1.0\"),\n call(\"OpenSearch 1.2.0\"),\n call(\"OpenSearch 2.0.0\"),\n ]\n )\n\n mock_logging.info.assert_has_calls([call(\"Done.\")])\n\n @patch(\"argparse._sys.argv\", [\"run_manifests.py\", \"update\"])\n @patch(\"run_manifests.InputManifests\", return_value=MagicMock())\n def test_main_update(self, mock_manifests, *mocks):\n main()\n mock_manifests.return_value.update.assert_called()\n","repo_name":"peterzhu1992/opensearch-build","sub_path":"bundle-workflow/tests/test_run_manifests.py","file_name":"test_run_manifests.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"70840893617","text":"from django.conf.urls import url\nfrom django.urls import path, include\nfrom rest_framework_extensions.routers import ExtendedSimpleRouter\n\nfrom quiz.api.views import QuizViewSet, QuestionViewSet, ChoiceViewSet\n\nrouter = ExtendedSimpleRouter()\n\n# Профиль пользователя\nquiz_router = router.register(\n prefix=r'quiz',\n viewset=QuizViewSet,\n basename='quiz'\n)\n(\n # Уведомдения пользователя\n quiz_router.register(\n prefix=r'questions',\n viewset=QuestionViewSet,\n parents_query_lookups=['quiz'],\n basename='quiz-questions'\n ).register(\n prefix=r'choices',\n viewset=ChoiceViewSet,\n parents_query_lookups=['question__quiz', 'question'],\n basename='quiz-questions-choices'\n ),\n)\n\n\nurlpatterns = [\n # DRF router\n path('', include(router.urls)),\n]\n","repo_name":"Extralait/FRtest","sub_path":"back/quiz/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26212940390","text":"import argparse\nimport json\nfrom typing import AsyncGenerator, List\n\n# SYSTEM_PROMPT = \"\"\"\\\n# You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\\\n# \"\"\"\n\nimport time\nfrom fastapi import BackgroundTasks, FastAPI, Request\nfrom fastapi.responses import JSONResponse, Response, StreamingResponse\nimport uvicorn\nfrom pydantic import BaseModel\nfrom vllm.engine.arg_utils import AsyncEngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.sampling_params import SamplingParams\nfrom vllm.utils import random_uuid\nfrom fastapi.middleware.cors import CORSMiddleware\n\nTIMEOUT_KEEP_ALIVE = 5 # seconds.\nTIMEOUT_TO_PREVENT_DEADLOCK = 1 # seconds.\napp = FastAPI()\n\nclass my_request(BaseModel):\n lang: str = None\n prompt: str = None\n n: int = 0\n temperature: float = 0.6\n top_p: float = 0\n top_k: float = 0\n\n\nclass key_request(BaseModel):\n lang: str = None\n prompt: str = None\n n: int = 0\n apikey: str = None\n apiSecret: str = None\n temperature: float = 0\n top_p: float = 0\n top_k: float = 0\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.post(\"/multilingual_code_generate_block\")\nasync def generate(request: my_request):\n \"\"\"Generate completion for the request.\n\n The request should be a JSON object with the following fields:\n - prompt: the prompt to use for the generation.\n - stream: whether to stream the results or not.\n - other fields: the sampling parameters (See `SamplingParams` for details).\n \"\"\"\n # request_dict = await request.json()\n # prompt = request_dict.pop(\"prompt\")\n # stream = request_dict.pop(\"stream\", False)\n # sampling_params = SamplingParams(**request_dict)\n # request_id = random_uuid()\n before = time.time()\n prompt = request.prompt\n\n # prompt=[f'[INST] <>\\n{SYSTEM_PROMPT}\\n<>\\n\\n']\n # prompt.append(f'{message} [/INST]')\n # prompt=''.join(prompt)\n\n sampling_params = SamplingParams(n=request.n ,\n use_beam_search=False,\n temperature=request.temperature,\n top_p=request.top_p,\n top_k=request.top_k)\n stream = False\n request_id = random_uuid()\n # if not engine.is_running:\n # engine.start_background_loop()\n\n \n\n results_generator = engine.generate(prompt, sampling_params, request_id)\n\n # Streaming case\n async def stream_results() -> AsyncGenerator[bytes, None]:\n async for request_output in results_generator:\n prompt = request_output.prompt\n text_outputs = [\n prompt + output.text for output in request_output.outputs\n ]\n ret = {\"text\": text_outputs}\n yield (json.dumps(ret) + \"\\0\").encode(\"utf-8\")\n\n async def abort_request() -> None:\n await engine.abort(request_id)\n\n if stream:\n background_tasks = BackgroundTasks()\n # Abort the request if the client disconnects.\n background_tasks.add_task(abort_request)\n return StreamingResponse(stream_results(), background=background_tasks)\n\n # Non-streaming case\n final_output = None\n async for request_output in results_generator:\n # if await request.is_disconnected():\n # # Abort the request if the client disconnects.\n # await engine.abort(request_id)\n # return Response(status_code=499)\n final_output = request_output\n\n assert final_output is not None\n prompt = final_output.prompt\n text_outputs = \"\"\n for output in final_output.outputs:\n text_outputs = output.text + text_outputs\n # text_outputs = [prompt + output.text for output in final_output.outputs]\n now = time.time()\n all_time = str(now - before)\n answer = {\"data\": {\n \"result\": {\n \"process_time\": all_time,\n \"output\": {\n \"code\": [\n text_outputs\n ]\n }\n },\n\n \"status\": 0,\n \"message\": \"success\",}\n }\n return answer\n\n# @app.post(\"/multilingual_code_generate\")\n# async def generate(request: my_request):\n# before = time.time()\n# prompt = request.prompt\n# sampling_params = SamplingParams(n=request.n ,use_beam_search=False,temperature=request.temperature)\n# stream = False\n# request_id = random_uuid() \n\n# results_generator = engine.generate(prompt, sampling_params, request_id)\n\n# # Streaming case\n# async def stream_results() -> AsyncGenerator[bytes, None]:\n# async for request_output in results_generator:\n# prompt = request_output.prompt\n# text_outputs = [\n# prompt + output.text for output in request_output.outputs\n# ]\n# ret = {\"text\": text_outputs}\n# yield (json.dumps(ret) + \"\\0\").encode(\"utf-8\")\n\n# async def abort_request() -> None:\n# await engine.abort(request_id)\n\n# if stream:\n# background_tasks = BackgroundTasks()\n# # Abort the request if the client disconnects.\n# background_tasks.add_task(abort_request)\n# return StreamingResponse(stream_results(), background=background_tasks)\n\n# # Non-streaming case\n# final_output = None\n# async for request_output in results_generator:\n# # if await request.is_disconnected():\n# # # Abort the request if the client disconnects.\n# # await engine.abort(request_id)\n# # return Response(status_code=499)\n# final_output = request_output\n\n# assert final_output is not None\n# prompt = final_output.prompt\n# for output in final_output.outputs:\n# print (output.text)\n# myoutput = output.text\n# now = time.time()\n# all_time = str(now - before)\n# answer = {\"data\": {\n# \"result\": {\n# \"process_time\": all_time,\n# \"output\": {\n# \"code\": [\n# myoutput\n# ]\n# }\n# },\n# \"status\": 0,\n# \"message\": \"success\",}\n# }\n# return answer\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--host\", type=str, default=\"localhost\")\n parser.add_argument(\"--port\", type=int, default=8000)\n parser = AsyncEngineArgs.add_cli_args(parser)\n args = parser.parse_args()\n\n engine_args = AsyncEngineArgs.from_cli_args(args)\n # engine = AsyncLLMEngine.from_engine_args(engine_args,\n # start_engine_loop=False)\n engine = AsyncLLMEngine.from_engine_args(engine_args)\n \n uvicorn.run(app,\n host='0.0.0.0',\n port=args.port, \n log_level=\"debug\",\n timeout_keep_alive=TIMEOUT_KEEP_ALIVE)\n","repo_name":"huogaibeiguyong/vllm","sub_path":"vllm/vllm/entrypoints/my_server.py","file_name":"my_server.py","file_ext":"py","file_size_in_byte":7572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28179397642","text":"from category import *\nfrom mongoconnect import *\n\nif __name__ == '__main__': \n min_period = 'D1'\n day_change = 0.008\n df = pd.read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), filebasename + '_'+min_period+'.csv')) \n count = 0\n # here should be a larger for loop \n # count the category pickle files \n # and fine tuning value S to make pickle files little\n for i in range(9690, len(df), 2):\n current_time = df.iloc[i].date\n cu_set = MakeCurrentSet(filebasename, current_time, min_period, day_change)\n count += 1\n BuildCat2DB(cu_set, eur_d1)\n print(count) ","repo_name":"AdaJass/Similarity","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"11062855010","text":"import os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport properties as p\r\nfrom six.moves import xrange\r\nfrom model import creater\r\n\r\nnp.set_printoptions(threshold=np.nan)\r\n\r\nclass runner():\r\n sess = None\r\n model = None\r\n is_ready = False\r\n root = p.get_root()\r\n working_directory = p.get_working_directory()\r\n _buckets = p.get_buckets()\r\n EOS_ID = 2\r\n \r\n def init_session(self, user, project):\r\n repo = os.path.join(self.root, user, project)\r\n layer_size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor = p.get_training_config(user, project)\r\n self.sess = tf.Session()\r\n with tf.variable_scope(\"chatbot\", reuse=tf.AUTO_REUSE):\r\n self.model = creater.create_model(self.sess, True, repo, self._buckets, self.working_directory\r\n , layer_size=layer_size, num_layers=num_layers\r\n , max_gradient_norm=max_gradient_norm, batch_size=batch_size\r\n , learning_rate=learning_rate, learning_rate_decay_factor=learning_rate_decay_factor)\r\n self.model.batch_size = 1\r\n \r\n self.is_ready = True\r\n \r\n def run_session(self, token_ids):\r\n bucket_id = max(min([b for b in xrange(len(self._buckets)) if self._buckets[b][0] > len(token_ids)]) - 1, 0)\r\n print(\"bucket_id : \" + str(bucket_id))\r\n encoder_inputs, decoder_inputs, target_weights = self.model.get_batch({bucket_id: [(token_ids, [])]}, bucket_id)\r\n _, _, output_logits = self.model.step(self.sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)\r\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\r\n percent = []\r\n for i in range(len(output_logits)):\r\n percent.append(output_logits[i][0][outputs[i]])\r\n if self.EOS_ID in outputs:\r\n outputs = outputs[:outputs.index(self.EOS_ID)]\r\n\r\n return outputs\r\n \r\n def get_is_ready(self):\r\n return self.is_ready\r\n ","repo_name":"kant1724/chatbot_tf_eng","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17491729180","text":"#!/usr/bin/env python2.7\nfrom moke import *\n\nimport numpy as np\nfrom numpy.testing import assert_approx_equal as equal\nfrom numpy.testing import assert_array_almost_equal as aequal\n\n\n@task\ndef test_scale_pairs():\n mkdir(path(\"test_scale_pairs\"))\n assert sh(\"cp ../data/test_ab_cnt.arr test_scale_pairs\") == 0\n assert sh(\"epicode.py scale_pairs test_scale_pairs/test_ab_cnt.arr\") == 0\n with open(\"test_scale_pairs/test_ab_cnt_deseq.arr\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[1]), 1.632993161855451847e+00)\n assert sh(\"rm -rf test_scale_pairs\") == 0\n\n@task\ndef test_scale_diff():\n mkdir(path(\"test_scale_diff\"))\n assert sh(\"cp ../data/test_ab_cnt_deseq.arr test_scale_diff\") == 0\n assert sh(\"epicode.py scale_diff test_scale_diff/test_ab_cnt_deseq.arr\") == 0\n with open(\"test_scale_diff/test_ab_cnt_deseq_lvl.arr\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[1]), 3.635961336943780431e-01)\n assert sh(\"rm -rf test_scale_diff\") == 0\n\n@task\ndef test_scale_features():\n mkdir(path(\"test_scale_features\"))\n assert sh(\"cp ../data/test_ab_cnt_deseq_lvl.arr test_scale_features\") == 0\n assert sh(\"epicode.py scale_features -scalgo sig95 test_scale_features/test_ab_cnt_deseq_lvl.arr\") == 0\n with open(\"test_scale_features/test_ab_cnt_deseq_lvl_sig95.arr\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[1]), 1.211047172546386719e-02)\n assert sh(\"rm -rf test_scale_features\") == 0\n\n@task\ndef test_code_sklearn():\n mkdir(path(\"test_code_sklearn\"))\n assert sh(\"cp ../data/a549_start_lvl_sig95.arr test_code_sklearn\") == 0\n assert sh(\"epicode.py code_sklearn -c 6 test_code_sklearn/a549_start_lvl_sig95.arr\") == 0\n with open(\"test_code_sklearn/a549_start_lvl_sig95_pgnmf-c#6-i#None-p#.epi\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[0]), 4.0693399501)\n assert sh(\"rm -rf test_code_sklearn\") == 0\n\n@task\ndef test_multi_code_sklearn():\n mkdir(path(\"test_multicode_sklearn\"))\n assert sh(\"cp ../data/[01]*sig95.arr test_multicode_sklearn\") == 0\n assert sh(\"epicode.py multi_code_sklearn -base test_multicode_sklearn/tve -c 6 test_multicode_sklearn/[01]*sig95.arr\") == 0\n with open(\"test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.epi\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[1]), 2.75326630104)\n with open(\"test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.arr\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[0]), 1.250747926106863528e-01)\n assert sh(\"rm -rf test_multicode_sklearn\") == 0\n\n@task\ndef test_recode_sklearn():\n mkdir(path(\"test_recode_sklearn\"))\n assert sh(\"cp ../data/0*sig95.arr test_recode_sklearn\") == 0\n assert sh(\"cp ../data/tss_vs_enh_pgnmf-c#6-i#None-p#.epi test_recode_sklearn\") == 0\n assert sh(\"epicode.py recode_sklearn -arr test_recode_sklearn/0_tss_vs_enh_lvl_sig95.arr -epi test_recode_sklearn/tss_vs_enh_pgnmf-c#6-i#None-p#.epi -base recode -odn test_recode_sklearn\") == 0\n with open(\"test_recode_sklearn/recode.arr\") as fh:\n head = fh.readline()\n firs = fh.readline()\n equal(float(firs.split(\"\\t\")[0]), 1.250747926106863528e-01)\n assert sh(\"rm -rf test_recode_sklearn\") == 0\n\n\nif __name__ == \"__main__\":\n task()\n","repo_name":"mcieslik-mctp/epicode","sub_path":"test/test_cmd.py","file_name":"test_cmd.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"21969290668","text":"#!/usr/bin/env python\n\n# Descrition: Publishes state in cartesian coordinatesand broadcasts tf tamp_map -> base_link\n# selects input topic based on system_setup param \n# system_setup = \"rhino_real\": /OpenDLV/SensorMsgGPS & /OpenDLV/SensorMsgCAN\n# system_setup = \"rhino_fssim\": /fssim/base_pose_ground_truth\n\n#from __future__ import division\n\nimport numpy as np\nimport time\nimport utm\nimport yaml\nimport rospy\nimport tf\nimport rospkg\nfrom common.msg import State\nfrom common.msg import OriginPoseUTM\nfrom visualization_msgs.msg import Marker\nfrom tf.transformations import quaternion_from_euler\nfrom util import angleToInterval\nfrom util import angleToContinous\n\nclass pos2DKalmanFilter:\n # constructor\n def __init__(self,dt,Qscale):\n self.x = np.array([[0.], # X\n [0.], # Xdot\n [0.], # Y\n [0.]]) # Ydot \n self.F = np.array([[1., dt, 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., dt],\n [0., 0., 0., 1.]])\n \n self.H = np.array([[1.,0.,0.,0.], # measurement function\n [0.,0.,1.,0.]]) \n self.Q = Qscale*np.array([[1.0, 0., 0., 0.], \n [0., 1.0, 0., 0.],\n [0., 0., 1.0, 0.],\n [0., 0., 0., 1.0]])\n self.P = np.copy(self.Q) # init covariance matrix same as Q\n self.R = np.array([[0.1, 0.], # measurement noise\n [0., 0.1]])\n\n def predict(self):\n self.x = np.dot(self.F, self.x)\n self.P = np.dot(self.F, self.P).dot(self.F.T) + self.Q\n\n def update(self,z):\n S = np.dot(self.H, self.P).dot(self.H.T) + self.R\n K = np.dot(self.P, self.H.T).dot(np.linalg.pinv(S))\n y = z - np.dot(self.H, self.x)\n self.x += np.dot(K, y)\n self.P = self.P - np.dot(K, self.H).dot(self.P) \n \nclass FullEKF: \n # constructor\n def __init__(self,dt,lf,lr,Iz,m,g,Q_diag_ele,R_diag_ele):\n # params \n self.dt = dt\n self.lf = lf\n self.lr = lr\n self.Iz = Iz\n self.m = m\n self.g = g\n \n # init state vector\n self.x = np.array([[0.], #0 X\n [0.], #1 Y\n [0.], #2 psi\n [0.], #3 psidot\n [0.], #4 vx\n [0.], #5 vy\n [0.], #6 Fyf\n [0.], #7 Fyr\n [0.]])#8 Fx \n self.psi_last = self.x[2,0] # help var to handle discontinuity in psi\n \n # init matrices\n self.F = np.array([[0., 0., -self.x[4,0]*np.sin(self.x[2,0])-self.x[5,0]*np.cos(self.x[2,0]), 0., np.cos(self.x[2,0]), -np.sin(self.x[2,0]), 0., 0., 0.],\n [0., 0., self.x[4,0]*np.cos(self.x[2,0])-self.x[5,0]*np.sin(self.x[2,0]), 0., np.sin(self.x[2,0]), np.cos(self.x[2,0]), 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., self.lf/self.Iz, -self.lr/self.Iz, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 1./self.m],\n [0., 0., 0., -self.x[4,0], -self.x[3,0], 0., 1./self.m, 1./self.m, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],])*self.dt + np.eye(9)\n \n# self.H = np.array([[1.,0.,0.,0.,0.,0.,0.,0.,0.], # measurement function\n# [0.,1.,0.,0.,0.,0.,0.,0.,0.],\n# [0.,0.,1.,0.,0.,0.,0.,0.,0.],\n# [0.,0.,0.,1.,0.,0.,0.,0.,0.],\n# [0.,0.,0.,0.,1.,0.,0.,0.,0.],\n# [0.,0.,0.,0.,0.,1.,0.,0.,0.]]) \n \n self.H = np.eye(9)\n \n self.Q = np.diag(Q_diag_ele)\n self.P = np.copy(self.Q) # init covariance matrix same as Q\n self.R = np.diag(R_diag_ele)\n# self.R = np.array([[0.1, 0., 0., 0., 0., 0.], # measurement noise\n# [0., 0.1, 0., 0., 0., 0.],\n# [0., 0., 0.1, 0., 0., 0.],\n# [0., 0., 0., 0.1, 0., 0.],\n# [0., 0., 0., 0., 0.1, 0.],\n# [0., 0., 0., 0., 0., 0.1]])\n \n\n def predict(self, theta, phi): # input grade (theta) and bank (phi)\n \n # nonlinear state update\n self.x[0,0] = self.x[0,0] + self.dt*(self.x[4,0]*np.cos(self.x[2,0]) - self.x[5,0]*np.sin(self.x[2,0]))\n self.x[1,0] = self.x[1,0] + self.dt*(self.x[4,0]*np.sin(self.x[2,0]) + self.x[5,0]*np.cos(self.x[2,0]))\n self.x[2,0] = self.x[2,0] + self.dt*(self.x[3,0])\n self.x[3,0] = self.x[3,0] + self.dt*((1./self.Iz)*(self.lf*self.x[6,0] - self.lr*self.x[7,0]))\n self.x[4,0] = self.x[4,0] + self.dt*((1./self.m)*self.x[8,0] - self.g*np.sin(theta))\n self.x[5,0] = self.x[5,0] + self.dt*((1./self.m)*(self.x[6,0]+self.x[7,0])-self.x[4,0]*self.x[3,0]+self.g*np.sin(phi))\n self.x[6,0] = self.x[6,0] + np.random.normal(0.0, self.Q[6,6])\n self.x[7,0] = self.x[7,0] + np.random.normal(0.0, self.Q[7,7])\n self.x[8,0] = self.x[8,0] + np.random.normal(0.0, self.Q[8,8])\n \n # recompute F at x\n self.F = np.array([[0., 0., -self.x[4,0]*np.sin(self.x[2,0])-self.x[5,0]*np.cos(self.x[2,0]), 0., np.cos(self.x[2,0]), -np.sin(self.x[2,0]), 0., 0., 0.],\n [0., 0., self.x[4,0]*np.cos(self.x[2,0])-self.x[5,0]*np.sin(self.x[2,0]), 0., np.sin(self.x[2,0]), np.cos(self.x[2,0]), 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., self.lf/self.Iz, -self.lr/self.Iz, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 1./self.m],\n [0., 0., 0., -self.x[4,0], -self.x[3,0], 0., 1./self.m, 1./self.m, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],\n [0., 0., 0., 0., 0., 0., 0., 0., 0],])*self.dt + np.eye(9) \n \n # update covariance matrix P \n self.P = np.dot(self.F, self.P).dot(self.F.T) + self.Q\n\n def update(self,z):\n S = np.dot(self.H, self.P).dot(self.H.T) + self.R\n K = np.dot(self.P, self.H.T).dot(np.linalg.pinv(S))\n \n # handle discontinuity in psi (by making z[2,0] continuos) NOTE: have to do angleToInterval before using estimated heading \n # TODO problem on second lap. More general solution needed\n psis = np.array([self.x[2,0],z[2,0]])\n psis_cont = angleToContinous(psis) \n z[2,0] = psis_cont[1]\n \n # do measurement update\n y = z - np.dot(self.H, self.x)\n #rospy.logwarn(\"state_est_cart EKF: x = \" + str(self.x))\n #rospy.logwarn(\"state_est_cart EKF: z = \" + str(z))\n #rospy.logwarn(\"state_est_cart EKF: y = \" + str(y))\n #rospy.logwarn(\"state_est_cart EKF: K = \" + str(K)) \n self.x += np.dot(K, y)\n self.P = self.P - np.dot(K, self.H).dot(self.P) \n\nclass StateEstCart:\n # constructor\n def __init__(self):\n # init node\n rospy.init_node('state_est_cart', anonymous=True)\n self.dt = rospy.get_param('/dt_state_est_cart')\n self.rate = rospy.Rate(1./self.dt) \n \n # load rosparams\n self.robot_name = rospy.get_param('/robot_name')\n self.system_setup = rospy.get_param('/system_setup')\n self.lf = rospy.get_param('/car/kinematics/b_F')\n self.lr = rospy.get_param('/car/kinematics/b_R')\n self.h_cg = rospy.get_param('/car/kinematics/h_cg')\n self.m = rospy.get_param('/car/inertia/m')\n self.g = rospy.get_param('/car/inertia/g')\n self.Iz = rospy.get_param('/car/inertia/I_z')\n\n if(self.system_setup == \"rhino_real\"):\n from opendlv_ros.msg import SensorMsgGPS\n from opendlv_ros.msg import SensorMsgCAN \n\n elif(self.system_setup == \"rhino_fssim\" or self.system_setup == \"gotthard_fssim\"):\n from fssim_common.msg import State as fssimState\n\n\n \n # init local vars\n self.state_out = State()\n self.live = False # todo incorporate in \"system_setup\"\n self.ts_latest_pos_update = rospy.Time.now()\n self.psidot_last = 0\n\n # init position KF \n Qscale = 0.01 \n self.kf = pos2DKalmanFilter(self.dt,Qscale)\n \n # init full KF\n Q_diag_ele = np.array([1e0,1e0,1e0,1e0,1e0,1e0,1e1,1e1,1e1])\n R_diag_ele = np.array([0.1,0.1,0.01,0.01,0.5,0.5,1e3,1e3,1e3]) # approx meas noises (large values for forces since we fake a zero meas)\n self.ekf_state = FullEKF(self.dt,self.lf,self.lr,self.Iz,self.m,self.g,Q_diag_ele,R_diag_ele)\n \n # load vehicle dimensions \n dimsyaml = rospkg.RosPack().get_path('common') + '/config/vehicles/' + self.robot_name + '/config/distances.yaml'\n with open(dimsyaml, 'r') as f:\n self.dims = yaml.load(f,Loader=yaml.SafeLoader) \n \n # init subs pubs\n if (self.system_setup == \"rhino_real\"):\n self.odlv_gps_sub = rospy.Subscriber(\"/OpenDLV/SensorMsgGPS\", SensorMsgGPS, self.odlv_gps_callback)\n self.odlv_gps_msg = SensorMsgGPS()\n self.received_odlv_gps = False\n self.odlv_can_sub = rospy.Subscriber(\"/OpenDLV/SensorMsgCAN\", SensorMsgCAN, self.odlv_can_callback)\n self.odlv_can_msg = SensorMsgCAN()\n self.received_odlv_can = False\n self.origin_pose_utm_sub = rospy.Subscriber(\"/origin_pose_utm\", OriginPoseUTM, self.origin_pose_utm_callback)\n self.origin_pose_utm = OriginPoseUTM()\n self.received_origin_pose_utm = False\n elif(self.system_setup == \"rhino_fssim\"):\n self.fssim_state_sub = rospy.Subscriber(\"/fssim/base_pose_ground_truth\", fssimState, self.fssim_state_callback)\n self.received_fssim_state = False\n else: \n rospy.logerr(\"state_est_cart: invalid value of system_setup param, system_setup = \" + self.system_setup)\n self.statepub = rospy.Publisher('state_cart', State, queue_size=1)\n self.poserawpub = rospy.Publisher('/pose_raw_vis', Marker, queue_size=1)\n self.poseFullEKFpub = rospy.Publisher('/pose_full_ekf_vis', Marker, queue_size=1)\n self.tfbr = tf.TransformBroadcaster()\n \n # force arrow markers\n self.Fyf_vis_pub = rospy.Publisher('/Fyf_est_vis', Marker, queue_size=1)\n self.Fyr_vis_pub = rospy.Publisher('/Fyr_est_vis', Marker, queue_size=1)\n self.Fx_vis_pub = rospy.Publisher('/Fx_est_vis', Marker, queue_size=1)\n \n # wait for messages before entering main loop\n if (self.system_setup == \"rhino_real\"):\n while((not self.received_odlv_gps) or (not self.received_odlv_can)):\n rospy.loginfo_throttle(1, \"state_est_cart: waiting opendlv messages\")\n self.rate.sleep()\n while(not self.received_origin_pose_utm):\n rospy.loginfo_throttle(1, \"state_est_cart: waiting origin pose utm\")\n self.rate.sleep()\n elif(self.system_setup == \"rhino_fssim\"):\n while(not self.received_fssim_state):\n rospy.loginfo_throttle(1, \"state_est_cart: waiting fssim state message\")\n self.rate.sleep()\n\n rospy.logwarn(\"state_est_cart: started with sensor setup \" + self.system_setup)\n\n # Main loop\n while not rospy.is_shutdown():\n \n # timing\n start = time.time()\n \n # state estimation\n if (self.system_setup == \"rhino_real\"):\n self.update_rhino_state()\n self.statepub.publish(self.state_out)\n\n # Full EKF \n self.ekf_state.predict(0.,0,)\n z = np.array([[self.state_out.X], #0 X\n [self.state_out.Y], #1 Y\n [self.state_out.psi], #2 psi\n [self.state_out.psidot], #3 psidot\n [self.state_out.vx], #4 vx\n [self.state_out.vy], #5 vy\n [1.0], # Fyf\n [2.0], # Fyr\n [3.0],]) # Fx\n self.ekf_state.update(z)\n\n # publish ekf pose marker\n X_ekf = self.ekf_state.x[0,0]\n Y_ekf = self.ekf_state.x[1,0]\n psi_ekf = angleToInterval(np.array([self.ekf_state.x[2,0]]))[0]\n m_ekf = self.get_pose_marker(X_ekf,Y_ekf,psi_ekf)\n self.poseFullEKFpub.publish(m_ekf)\n \n# # debug print estimated state\n# rospy.logwarn(\"state_est_cart: X_ekf = \" + str(self.ekf_state.x[0,0]))\n# rospy.logwarn(\"state_est_cart: Y_ekf = \" + str(self.ekf_state.x[1,0]))\n# rospy.logwarn(\"state_est_cart: psi_ekf = \" + str(self.ekf_state.x[2,0]))\n# rospy.logwarn(\"state_est_cart: psidot_ekf = \" + str(self.ekf_state.x[3,0]))\n# rospy.logwarn(\"state_est_cart: vx_ekf = \" + str(self.ekf_state.x[4,0]))\n# rospy.logwarn(\"state_est_cart: vy_ekf = \" + str(self.ekf_state.x[5,0]))\n \n \n # get pseudo measurement of yaw acc\n psidotdot_est_tmp =0 #= (self.state_out.psidot - self.psidot_last)/self.dt\n self.psidot_last = self.state_out.psidot\n\n # Todo acc KF (linear)\n\n # compute pseudo measurements of tire forces from accelerations\n Fyf_est, Fxf_est, Fyr_est, Fxr_est = self.get_tire_forces_from_motion(self.state_out.ax,\n self.state_out.ay,\n psidotdot_est_tmp,\n self.state_out.vx,\n self.state_out.psidot,\n self.m,\n self.lf,\n self.lr,\n self.Iz,\n self.state_out.Fzf,\n self.state_out.Fzr)\n \n self.state_out.rhof = np.sqrt(Fxf_est**2 + Fyf_est**2)/self.state_out.Fzf\n self.state_out.rhor = np.sqrt(Fxr_est**2 + Fyr_est**2)/self.state_out.Fzr\n \n \n \n # publish estimated tire force arrow markers\n #Fyf_est = 0.5*self.m*(self.state_out.ay + self.state_out.psidot*self.state_out.vx)\n #Fyr_est = 0.5*self.m*(self.state_out.ay + self.state_out.psidot*self.state_out.vx)\n #Fx_est = self.m*self.state_out.ax\n \n \n self.Fyf_vis_pub.publish(self.getForceArrowMarker(np.pi/2., Fyf_est/1000.,0))\n self.Fyr_vis_pub.publish(self.getForceArrowMarker(np.pi/2., Fyr_est/1000.,3.4))\n self.Fx_vis_pub.publish(self.getForceArrowMarker(0, (Fxf_est+Fxr_est)/1000.,1.2))\n \n # broadcast tf\n start_tfbc = time.time()\n self.broadcast_dyn_tfs()\n self.broadcast_static_tfs()\n end_tfbc = time.time()\n comptime_tfbc = end_tfbc-start_tfbc\n\n # timing: check wrt dt\n end = time.time()\n comptime = end-start\n if (comptime > self.dt):\n rospy.logwarn(\"state_est_cart: compute time exceeding dt!\")\n rospy.logwarn(\"state_est_cart: total comptime = \" + str(comptime))\n rospy.logwarn(\"state_est_cart: comptime tf broadcast = \" + str(comptime_tfbc)) \n \n self.rate.sleep()\n\n def update_rhino_state(self):\n # HANDLE INCOMING DATA\n \n # get message delay times\n delta_t_gps = rospy.Time.now() - self.ts_latest_pos_update\n delta_t_can = rospy.Time.now() - self.odlv_can_msg.header.stamp\n\n # check message age\n msg_time_mgn = 0.1\n if(delta_t_gps.to_sec() > msg_time_mgn):\n rospy.logwarn(\"state_est_cart: Old GPS measurement, delta_t_gps.to_sec() = \" + str(delta_t_gps.to_sec()))\n# else:\n# rospy.logwarn_throttle(1,\"state_est_cart: GPS measurement age: = \" + str(delta_t_gps.to_sec()))\n\n if(delta_t_can.to_sec() > msg_time_mgn):\n rospy.logwarn(\"state_est_cart: Old CAN measurement, delta_t_can.to_sec() = \" + str(delta_t_can.to_sec())) \n\n # incoming pos\n X_utm, Y_utm, utm_nr, utm_letter = utm.from_latlon(self.odlv_gps_msg.lat, self.odlv_gps_msg.long)\n X_raw = X_utm - self.origin_pose_utm.X0_utm\n Y_raw = Y_utm - self.origin_pose_utm.Y0_utm\n \n # check utm zone\n if(utm_nr != self.origin_pose_utm.utm_nr or utm_letter != chr(self.origin_pose_utm.utm_letter)):\n rospy.logerr(\"UTM zone mismatch: GPS measurement utm_nr = \" + str(utm_nr) + \", origin_pose utm_nr = \" + str(self.origin_pose_utm.utm_nr))\n rospy.logerr(\"UTM zone mismatch: GPS measurement utm_letter = \" + utm_letter + \", origin_pose utm_letter = \" + str(chr(self.origin_pose_utm.utm_letter)))\n\n # incoming yawangle field is heading (degrees 0 to 360, 0 North, increasing clockwise)\n # converting to psi (radians -pi to pi, 0 East, increasing counterclockwise )\n heading_raw = self.odlv_gps_msg.yawangle\n psi_raw = (np.pi/180)*(90-heading_raw) \n psi_raw = angleToInterval(np.array([psi_raw]))[0]\n \n # convert heading-rate to yawrate\n psidot_raw = -self.odlv_gps_msg.yawrate*(np.pi/180)\n \n # velocities \n vx_raw = self.odlv_can_msg.vx\n vy_raw = -self.odlv_gps_msg.vy # flipped sign convention oxgs\n\n # accelerations\n ax_raw = self.odlv_gps_msg.ax\n ay_raw = -self.odlv_gps_msg.ay # flipped sign convention oxgs\n \n # normal forces\n Fzf_raw = self.odlv_can_msg.load_axle_1*9.82\n Fzr_raw = self.odlv_can_msg.load_axle_2*9.82\n \n # publish raw pose marker\n m = self.get_pose_marker(X_raw,Y_raw,psi_raw)\n self.poserawpub.publish(m)\n \n # STATE EST\n \n # set velocities, acc and heading directly\n self.state_out.psidot = psidot_raw\n self.state_out.vx = vx_raw\n self.state_out.vy = vy_raw \n self.state_out.ax = ax_raw\n self.state_out.ay = ay_raw \n self.state_out.psi = psi_raw \n self.state_out.Fzf = Fzf_raw\n self.state_out.Fzr = Fzr_raw\n\n # set position from KF\n z = np.array([[X_raw],\n [Y_raw]])\n self.kf.predict()\n self.kf.update(z)\n self.state_out.X = self.kf.x[0][0]\n self.state_out.Y = self.kf.x[2][0]\n\n # print errors if faulty state estimates\n if(self.state_out.psi < -np.pi or self.state_out.psi > np.pi):\n rospy.logerr(\"state_est_cart: psi outside interval, psi = \" + str(self.state_out.psi))\n \n \n def get_pose_marker(self,X,Y,psi):\n m = Marker()\n m.header.stamp = rospy.Time.now()\n m.header.frame_id = \"map\"\n m.pose.position.x = X;\n m.pose.position.y = Y;\n m.pose.position.z = 1.0;\n q = quaternion_from_euler(0, 0, psi)\n m.pose.orientation.x = q[0]\n m.pose.orientation.y = q[1]\n m.pose.orientation.z = q[2]\n m.pose.orientation.w = q[3]\n m.type = m.ARROW;\n m.scale.x = 2.0;\n m.scale.y = 0.6;\n m.scale.z = 0.6;\n m.color.a = 1.0; \n m.color.r = 1.0;\n m.color.g = 0.0;\n m.color.b = 0.0;\n return m\n\n\n def getForceArrowMarker(self,orientation,magnitude,rearward_shift):\n m = Marker()\n m.header.stamp = rospy.Time.now()\n m.header.frame_id = \"chassis\"\n m.pose.position.x = -rearward_shift;\n m.pose.position.y = 0;\n m.pose.position.z = 0;\n q = quaternion_from_euler(0, 0, orientation)\n m.pose.orientation.x = q[0]\n m.pose.orientation.y = q[1]\n m.pose.orientation.z = q[2]\n m.pose.orientation.w = q[3]\n m.type = m.ARROW;\n m.scale.x = magnitude;\n m.scale.y = 0.3;\n m.scale.z = 0.3;\n m.color.a = 1.0; \n m.color.r = 0.0;\n m.color.g = 0.0;\n m.color.b = 0.7;\n return m\n\n def broadcast_dyn_tfs(self):\n self.tfbr.sendTransform((self.state_out.X, self.state_out.Y, 0),\n tf.transformations.quaternion_from_euler(0, 0, self.state_out.psi),\n rospy.Time.now(),\n \"base_link\",\n \"tamp_map\")\n \n # todo - steering angle here\n# self.tfbr.sendTransform((self.dims[\"left_steering_hinge\"][\"left_front_wheel\"][\"x\"], self.dims[\"left_steering_hinge\"][\"left_front_wheel\"][\"y\"], self.dims[\"left_steering_hinge\"][\"left_front_wheel\"][\"z\"]),\n# tf.transformations.quaternion_from_euler(0, 0, 1.0),\n# rospy.Time.now(),\n# \"left_front_wheel\",\n# \"left_steering_hinge\") \n#\n# self.tfbr.sendTransform((self.dims[\"right_steering_hinge\"][\"right_front_wheel\"][\"x\"], self.dims[\"right_steering_hinge\"][\"right_front_wheel\"][\"y\"], self.dims[\"right_steering_hinge\"][\"right_front_wheel\"][\"z\"]),\n# tf.transformations.quaternion_from_euler(0, 0, 1.0),\n# rospy.Time.now(),\n# \"right_front_wheel\",\n# \"right_steering_hinge\") \n\n def broadcast_static_tfs(self):\n self.tfbr.sendTransform((self.dims[\"base_link\"][\"cog\"][\"x\"], self.dims[\"base_link\"][\"cog\"][\"y\"], self.dims[\"base_link\"][\"cog\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"cog\",\n \"base_link\") \n q = tf.transformations.quaternion_from_euler(-np.pi/2, 0., -np.pi/2.)\n self.tfbr.sendTransform((2.0, 0.0, 1.45),\n (q[0], q[1], q[2], q[3]), \n rospy.Time.now(),\n \"cam\",\n \"cog\") \n \n self.tfbr.sendTransform((self.dims[\"cog\"][\"chassis\"][\"x\"], self.dims[\"cog\"][\"chassis\"][\"y\"], self.dims[\"cog\"][\"chassis\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"chassis\",\n \"cog\") \n\n self.tfbr.sendTransform((self.dims[\"chassis\"][\"left_rear_wheel_joint\"][\"x\"], self.dims[\"chassis\"][\"left_rear_wheel_joint\"][\"y\"], self.dims[\"chassis\"][\"left_rear_wheel_joint\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"left_rear_wheel_joint\",\n \"chassis\") \n\n self.tfbr.sendTransform((self.dims[\"chassis\"][\"right_rear_wheel_joint\"][\"x\"], self.dims[\"chassis\"][\"right_rear_wheel_joint\"][\"y\"], self.dims[\"chassis\"][\"right_rear_wheel_joint\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"right_rear_wheel_joint\",\n \"chassis\") \n\n self.tfbr.sendTransform((self.dims[\"chassis\"][\"left_steering_hinge_joint\"][\"x\"], self.dims[\"chassis\"][\"left_steering_hinge_joint\"][\"y\"], self.dims[\"chassis\"][\"left_steering_hinge_joint\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"left_steering_hinge\",\n \"chassis\") \n\n self.tfbr.sendTransform((self.dims[\"chassis\"][\"right_steering_hinge\"][\"x\"], self.dims[\"chassis\"][\"right_steering_hinge\"][\"y\"], self.dims[\"chassis\"][\"right_steering_hinge\"][\"z\"]),\n (0, 0, 0, 1), \n rospy.Time.now(),\n \"right_steering_hinge\",\n \"chassis\") \n\n def get_tire_forces_from_motion(self,ax,ay,psidotdot,vx,psidot,m,lf,lr,Iz,Fzf,Fzr):\n # acc --> Fx, Fy, Mz\n Fx = m*ax # assuming zero grade angle atm\n Fy = m*(ay+vx*psidot)\n Mz = Iz*psidotdot\n # Fy, Mz --> Fyf Fyr\n Fyr = (lf*Fy-Mz)/(lr+lf)\n Fyf = (lr*Fy+Mz)/(lr+lf) # Fy-Fyr\n # Fzf, Fxr --> Fxf, Fxf (assume Fxi distributed as Fzi)\n ratio_f = Fzf/(Fzf+Fzr)\n Fxf = ratio_f*Fx\n Fxr = (1-ratio_f)*Fx\n return Fyf, Fxf, Fyr, Fxr\n\n def get_normal_forces_from_motion(self,ax,theta):\n Fzf = (1.0/(self.lf+self.lr))*(-self.m*ax*self.h_cg - self.m*self.g*self.h_cg*np.sin(theta) + self.m*self.g*self.lr*np.cos(theta));\n Fzr = (1.0/(self.lf+self.lr))*( self.m*ax*self.h_cg + self.m*self.g*self.h_cg*np.sin(theta) + self.m*self.g*self.lf*np.cos(theta));\n return Fzf,Fzr\n\n\n def fssim_state_callback(self, msg):\n self.received_fssim_state = True\n self.state_out.X = msg.x\n self.state_out.Y = msg.y\n self.state_out.psi = angleToInterval(np.array([msg.yaw]))[0]\n self.state_out.psidot = msg.r\n self.state_out.vx = msg.vx\n self.state_out.vy = msg.vy\n self.state_out.ax = msg.ax\n self.state_out.ay = msg.ay\n Fzf, Fzr = self.get_normal_forces_from_motion(self.state_out.ax,0.)\n self.state_out.Fzf = Fzf \n self.state_out.Fzr = Fzr \n self.state_out.header.stamp = rospy.Time.now()\n self.statepub.publish(self.state_out) # publish in callback to minimize delay\n\n def odlv_gps_callback(self, msg):\n # print errors if faulty measurements\n if(msg.yawangle < 0.0 or msg.yawangle > 360):\n rospy.logerr(\"state_est_cart: incoming heading outside interval, hdng = \" + str(msg.yawangle))\n \n # store timestamp of latest incoming position\n if (self.odlv_gps_msg.vx >= 1.0 and not (self.odlv_gps_msg.lat == msg.lat and self.odlv_gps_msg.long == msg.long)):\n self.ts_latest_pos_update = rospy.Time.now()\n elif (self.odlv_gps_msg.vx < 1.0):\n self.ts_latest_pos_update = rospy.Time.now()\n \n # receive new message\n self.odlv_gps_msg = msg\n self.received_odlv_gps = True\n \n # restamp incoming msg if not live\n if(not self.live):\n self.odlv_gps_msg.header.stamp = rospy.Time.now()\n \n def odlv_can_callback(self, msg):\n self.odlv_can_msg = msg\n self.received_odlv_can = True\n # restamp incoming msg if not live \n if(not self.live):\n self.odlv_can_msg.header.stamp = rospy.Time.now()\n\n def origin_pose_utm_callback(self, msg):\n self.origin_pose_utm = msg\n self.received_origin_pose_utm = True\n\nif __name__ == '__main__':\n sec = StateEstCart()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\") ","repo_name":"SHEKHARSR/TAMP21","sub_path":"src/saarti/perception/scripts/state_est_cart.py","file_name":"state_est_cart.py","file_ext":"py","file_size_in_byte":27922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"71890960819","text":"import json\nimport time\nimport argparse\nimport numpy as np\nfrom rtreedb import RTreeDB\n\ndef parse_args():\n\n help = {\n 'rt_bound': 'test limits of rtree',\n 'f': 'path to create an rtree file',\n 'start': 'comma separated start',\n 'stop': 'comma separated stop',\n }\n\n parser = argparse.ArgumentParser(help['rt_bound'])\n parser.add_argument('-f', default='database', help=help['f'])\n parser.add_argument('start', help=help['start'])\n parser.add_argument('stop', help=help['stop'])\n\n # Return parsed dictionary\n return vars(parser.parse_args())\n\nif __name__ == '__main__':\n\n # Get argument dictionary\n argd = parse_args()\n # Make the database\n db = RTreeDB(argd['f'])\n # Get the start and stop bounds\n start = np.uint32(argd['start'].split(','))\n stop = np.uint32(argd['stop'].split(','))\n # Start timing rtree lookup\n t0 = time.time()\n # Check against the rtree\n in_bounds = db.check_bounds([start, stop])\n t1 = time.time()\n # Print time taken to check bounds\n print(\"\"\"{}\n\n in {:.2f} seconds\n \"\"\".format(in_bounds, t1-t0))\n","repo_name":"Rhoana/butterfly","sub_path":"scripts/database_trials/rt_bound.py","file_name":"rt_bound.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"7931536366","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 30 15:01:14 2020\n\n@author: meike\n\"\"\"\n\n\n'''Run missing best hits'''\n\nimport os\nfrom pathlib import Path\nfrom datetime import date\n \npath = os.getcwd()\np = Path(path)\n\n#get the date to keep track of the scripts (added to scriptname)\ntoday = date.today().strftime(\"%d/%m/%Y\")\ntoday = today.split('/')\ntoday = ''.join(today)\n\nids_done = []\n\nfor file in list(os.listdir('/home/meiker/orthomcl/besthit')): \n id_ = file.strip().split('.')[0]\n ids_done.append(id_)\n\nmissing = []\nfor file in list(os.listdir('/home/meiker/orthomcl/splitSimSeq/')):\n id_ = file.strip().split('.')[0]\n if id_ not in ids_done:\n missing.append(id_)\n \ntaxon_list = []\nwith open(os.path.join(p.parents[0], 'files', 'taxon_list')) as f:\n for line in f:\n taxon_list.append(line.strip())\n\nindexes = [] \nfor i, taxon in enumerate(taxon_list):\n if taxon in missing:\n indexes.append(i+1)\n\nprint(len(missing))\n \nsize = round(len(indexes)/3)\n\nfor i in range(1,4):\n with open(os.path.join(p.parents[0], 'scripts', 'bash_scripts', 'porthomcl', 'besthit', today + '_missing_best_hits' + str(i) + '.sh'), 'w') as f:\n for j in indexes:\n f.write(\"porthomclPairsBestHit.py -t /home/meiker/orthomcl/taxon_list -s /home/meiker/orthomcl/splitSimSeq -b /home/meiker/orthomcl/besthit -q /home/meiker/orthomcl/paralogTemp -x \"+str(j)+\"\\n\")","repo_name":"danielriosgarza/strepto_phylogenomics","sub_path":"scripts/300320202_missing_besthits.py","file_name":"300320202_missing_besthits.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"815743918","text":"from datetime import timezone, datetime, timedelta\nimport logging\nimport os\nimport requests\n\nCLUSTER_QUERY_STRING = \"avg(avg_over_time(nvidiasmi_utilization_gpu[7d]))\"\nJOB_GPU_PERCENT = 'avg by (job_name) (avg_over_time(task_gpu_percent[7d]))'\nJOB_GPU_HOURS = 'sum by (job_name) (count_over_time(task_gpu_percent[7d]))'\n# user used gpu hours / total gpu hours\nUSER_QUERY_STRING = \\\n \"(sum by (username) (sum_over_time(task_gpu_percent[7d]))) / (sum by (username) (count_over_time(task_gpu_percent[7d])*100)) * 100\"\n\nQUERY_PREFIX = \"/prometheus/api/v1/query\"\nALERT_PREFIX = \"/alert-manager/api/v1/alerts\"\n# only the jobs that are running or completed within 7d should be included\n# currently, we just set the limit to max\nREST_JOB_API_PREFIX = \"/rest-server/api/v2/jobs?order=completionTime,DESC\"\n\nTOKEN = os.environ.get('PAI_BEARER_TOKEN')\nPROMETHEUS_SCRAPE_INTERVAL = int(os.environ.get('PROMETHEUS_SCRAPE_INTERVAL'))\n\ndef enable_request_debug_log(func):\n def wrapper(*args, **kwargs):\n requests_log = logging.getLogger(\"urllib3\")\n level = requests_log.level\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n\n try:\n return func(*args, **kwargs)\n finally:\n requests_log.setLevel(level)\n requests_log.propagate = False\n\n return wrapper\n\n\ndef datetime_to_hours(dt):\n \"\"\"Converts datetime.timedelta to hours\n\n Parameters:\n -----------\n dt: datetime.timedelta\n\n Returns:\n --------\n float\n \"\"\"\n return dt.days * 24 + dt.seconds / 3600\n\n\ndef check_timestamp_within_7d(timestamp):\n \"\"\"\n check if a timestamp is within 7 days\n \"\"\"\n return datetime.fromtimestamp(int(timestamp/1000), timezone.utc) > datetime.now(timezone.utc) - timedelta(days=7)\n\n\ndef get_related_jobs(rest_url):\n \"\"\"\n Returns all related jobs\n\n Returns:\n --------\n list\n All the jobs completed within 7 days will be included in the list.\n Jobs completed before 7 days may also be included.\n The list may contain duplicated jobs.\n \"\"\"\n jobs_related = []\n\n offset = 0\n limit = 5000\n headers = {'Authorization': f\"Bearer {TOKEN}\"}\n while True:\n resp = requests.get(rest_url+f\"limit={limit}&offset={offset}\", headers=headers)\n resp.raise_for_status()\n jobs = resp.json()\n jobs_related += jobs\n # no more jobs or the last job in the list completed before 7 days\n if not jobs or (jobs[-1][\"completedTime\"] is not None and not check_timestamp_within_7d(jobs[-1][\"completedTime\"])) :\n break\n offset += limit\n\n return jobs_related\n\n\n@enable_request_debug_log\ndef get_usage_info(job_gpu_percent, job_gpu_hours, user_usage_result, rest_url):\n job_infos = {}\n user_infos = {}\n jobs_related = get_related_jobs(rest_url)\n\n for v in user_usage_result[\"data\"][\"result\"]:\n user_infos[v[\"metric\"][\"username\"]] = {\n \"username\": v[\"metric\"][\"username\"],\n \"usage\": v[\"value\"][1][:6] + \"%\", \"resources_occupied\": 0\n }\n for v in job_gpu_percent[\"data\"][\"result\"]:\n job_name = v[\"metric\"][\"job_name\"]\n matched_job = list(\n filter(lambda job: f\"{job['username']}~{job['name']}\" == job_name,\n jobs_related))\n # ingore unfounded jobs\n if not matched_job:\n logging.warning(\"Job %s not found.\", job_name)\n continue\n job_info = matched_job[0]\n # ignore jobs not started\n if not job_info[\"launchedTime\"]:\n logging.warning(\"job not start, ignore it\")\n continue\n\n job_infos[job_name] = {\n \"job_name\": job_name,\n \"usage\": v[\"value\"][1],\n \"gpu_number\": job_info[\"totalGpuNumber\"]\n }\n\n # get job duration\n job_infos[job_name][\"start_time\"] = datetime.fromtimestamp(\n int(job_info[\"launchedTime\"]) / 1000,\n timezone.utc)\n # job has not finished\n if not job_info[\"completedTime\"]:\n job_infos[job_name][\"duration\"] = datetime.now(timezone.utc) - job_infos[job_name][\"start_time\"]\n # job has finished\n else:\n job_infos[job_name][\"duration\"] = datetime.fromtimestamp(\n int(job_info[\"completedTime\"]) / 1000,\n timezone.utc) - job_infos[job_name][\"start_time\"]\n job_infos[job_name][\"status\"] = job_info[\"state\"]\n\n # get matched job gpu hours info\n gpu_hours_info = list(\n filter(lambda job: job[\"metric\"][\"job_name\"] == job_name,\n job_gpu_hours[\"data\"][\"result\"]))\n job_infos[job_name][\"resources_occupied\"] = float(gpu_hours_info[0][\"value\"][1]) * PROMETHEUS_SCRAPE_INTERVAL / 3600 # GPU * hours\n\n # gpu hours by user\n username = job_info[\"username\"]\n user_infos[username][\"resources_occupied\"] += job_infos[job_name][\"resources_occupied\"]\n\n # format\n for job_info in job_infos.values():\n job_info[\"usage\"] = job_info[\"usage\"][:6] + \"%\"\n job_info[\"gpu_number\"] = str(job_info[\"gpu_number\"])\n job_info[\"duration\"] = str(job_info[\"duration\"])\n job_info[\"start_time\"] = job_info[\"start_time\"].strftime(\"%y-%m-%d %H:%M:%S\")\n job_info[\"resources_occupied\"] = f\"{job_info['resources_occupied']:.2f}\"\n for user_info in user_infos.values():\n user_info[\"resources_occupied\"] = f\"{user_info['resources_occupied']:.2f}\"\n\n # sort usage info by resources occupied\n job_usage = sorted(job_infos.values(), key=lambda x: float(x[\"resources_occupied\"]), reverse=True)\n user_usage = sorted(user_infos.values(), key=lambda x: float(x[\"resources_occupied\"]), reverse=True)\n\n return job_usage[:10], user_usage\n\n\n@enable_request_debug_log\ndef collect_metrics(url):\n query_url = url.rstrip(\"/\") + QUERY_PREFIX\n rest_url = url.rstrip(\"/\") + REST_JOB_API_PREFIX\n\n # cluster info\n logging.info(\"Collecting cluster usage info...\")\n resp = requests.get(query_url, params={\"query\": CLUSTER_QUERY_STRING})\n resp.raise_for_status()\n result = resp.json()\n cluster_usage = result[\"data\"][\"result\"][0][\"value\"][1][:6] + \"%\"\n\n # user info\n logging.info(\"Collecting user usage info...\")\n resp = requests.get(query_url, params={\"query\": USER_QUERY_STRING})\n resp.raise_for_status()\n user_usage_result = resp.json()\n\n # job info\n logging.info(\"Collecting job usage info...\")\n # job gpu percent\n resp = requests.get(query_url, params={\"query\": JOB_GPU_PERCENT})\n resp.raise_for_status()\n job_gpu_percent = resp.json()\n # job gpu hours\n resp = requests.get(query_url, params={\"query\": JOB_GPU_HOURS})\n resp.raise_for_status()\n job_gpu_hours = resp.json()\n\n job_usage, user_usage = get_usage_info(job_gpu_percent, job_gpu_hours, user_usage_result, rest_url)\n\n return cluster_usage, job_usage, user_usage\n\n\n@enable_request_debug_log\ndef send_alert(pai_url: str, cluster_usage, job_usage, user_usage):\n trigger_time = str(datetime.now(timezone.utc).date())\n post_url = pai_url.rstrip(\"/\") + ALERT_PREFIX\n alerts = []\n # for cluster\n alert = {\n \"labels\": {\n \"alertname\": \"usage\",\n \"report_type\": \"cluster-usage\",\n \"severity\": \"info\",\n \"cluster_usage\": cluster_usage,\n \"trigger_time\": trigger_time,\n },\n \"annotations\": {\n \"summary\": \"The cluster usage has been reported, please check your email-box for details.\"\n },\n \"generatorURL\": \"alert/script\"\n }\n alerts.append(alert)\n\n # for job\n for job in job_usage:\n alert = {\n \"labels\": {\n \"alertname\": \"usage\",\n \"report_type\": \"cluster-usage\",\n \"severity\": \"info\",\n \"job_name\": job[\"job_name\"],\n \"resources_occupied\": job[\"resources_occupied\"],\n \"gpu_number\": job[\"gpu_number\"],\n \"usage\": job[\"usage\"],\n \"duration\": job[\"duration\"],\n \"start_time\": job[\"start_time\"],\n \"status\": job[\"status\"],\n \"trigger_time\": trigger_time,\n },\n \"annotations\": {\n \"summary\": \"The cluster usage has been reported, please check your email-box for details.\"\n },\n \"generatorURL\": \"alert/script\"\n }\n alerts.append(alert)\n\n # for user\n for user in user_usage:\n alert = {\n \"labels\": {\n \"alertname\": \"usage\",\n \"report_type\": \"cluster-usage\",\n \"severity\": \"info\",\n \"username\": user[\"username\"],\n \"resources_occupied\": user[\"resources_occupied\"],\n \"usage\": user[\"usage\"],\n \"trigger_time\": trigger_time,\n },\n \"annotations\": {\n \"summary\": \"The cluster usage has been reported, please check your email-box for details.\"\n },\n \"generatorURL\": \"alert/script\"\n }\n alerts.append(alert)\n\n logging.info(\"Sending alerts to alert-manager...\")\n resp = requests.post(post_url, json=alerts)\n resp.raise_for_status()\n logging.info(\"Alerts sent to alert-manager.\")\n\n\ndef main():\n PAI_URI = os.environ.get(\"PAI_URI\")\n # collect cluster gpu usage information\n cluster_usage, job_usage, user_usage = collect_metrics(PAI_URI)\n # send alert to alert manager\n send_alert(PAI_URI, cluster_usage, job_usage, user_usage)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\n \"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s\",\n level=logging.INFO,\n )\n main()\n","repo_name":"microsoft/pai","sub_path":"src/alert-manager/src/cluster-utilization/send_alert.py","file_name":"send_alert.py","file_ext":"py","file_size_in_byte":9691,"program_lang":"python","lang":"en","doc_type":"code","stars":2559,"dataset":"github-code","pt":"57"} +{"seq_id":"885234763","text":"import logging\nimport os\nimport os.path\nimport shutil\nimport numpy as np\nimport scipy\nimport scipy.linalg\nimport math\nimport re\nimport json\nimport jsonlines\nimport collections\nimport random\nfrom random import randint\n\nfrom . import pose_utils, vector\nfrom pose_utils import Pose, Gesture\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_CLIPS_PATH = 'clips.jsonl'\nDEFAULT_CONFIG_PATH = 'config.json'\nDEFAULT_IMAGES_PATH = 'images/'\nDEFAULT_TTS_PATH = os.path.join('tts-clips')\nDEFAULT_VIDEO_PATH = os.path.join('video-clips')\n\n# Joints in H3.6M -- data has 32 joints, but only 17 that move;\n# these are the indices.\nH36M_NAMES = [''] * 32\nH36M_NAMES[0] = 'Hip'\nH36M_NAMES[1] = 'RHip'\nH36M_NAMES[2] = 'RKnee'\nH36M_NAMES[3] = 'RFoot'\nH36M_NAMES[6] = 'LHip'\nH36M_NAMES[7] = 'LKnee'\nH36M_NAMES[8] = 'LFoot'\nH36M_NAMES[12] = 'Spine'\nH36M_NAMES[13] = 'Thorax'\nH36M_NAMES[14] = 'Neck/Nose'\nH36M_NAMES[15] = 'Head'\nH36M_NAMES[17] = 'LShoulder'\nH36M_NAMES[18] = 'LElbow'\nH36M_NAMES[19] = 'LWrist'\nH36M_NAMES[25] = 'RShoulder'\nH36M_NAMES[26] = 'RElbow'\nH36M_NAMES[27] = 'RWrist'\n\nDIRECTION_NAMES = [\n 'RThigh', 'RShin', 'LThigh', 'LShin', 'Breast', 'Backbone', 'Neck', 'Head',\n 'LCollar', 'LUpperArm', 'LLowerArm', 'RCollar', 'RUpperArm', 'RLowerArm'\n]\n\nUPPER_BODY_PARTS = [\n 'Spine', 'Thorax', 'Neck/Nose', 'Head', 'LShoulder', 'LElbow', 'LElbow',\n 'LWrist', 'RShoulder', 'RElbow', 'RWrist'\n]\n\n\nclass ClipWriter():\n\n def __init__(self, path=DEFAULT_CLIPS_PATH, mode='a'):\n if mode == 'w':\n logger.warn(\"Overwriting any clips in {}\".format(path))\n\n self.path = path\n self.writer = jsonlines.open(path, mode=mode)\n\n def send(self, clip):\n logger.debug('Writing clip {} to {}'.format(clip['id'], self.path))\n self.writer.write(clip)\n\n def close(self):\n self.writer.close()\n\n\ndef get_clips(path=DEFAULT_CLIPS_PATH):\n with jsonlines.open(path, mode='r') as clips:\n return list(clips)\n\n\ndef get_clip_ids(path=DEFAULT_CLIPS_PATH):\n with jsonlines.open(path, mode='r') as reader:\n return list(map(lambda c: c['id'], reader))\n\n\ndef remove_duplicate_clips(clips_path=DEFAULT_CLIPS_PATH,\n output_path='clips-deduped.jsonl'):\n writer = ClipWriter(output_path)\n\n with jsonlines.open(clips_path, 'r') as reader:\n ids = []\n n_total_clips = 0\n\n for clip in reader:\n n_total_clips += 1\n clip_id = clip['id']\n if clip_id in ids:\n continue\n\n writer.send(clip)\n ids.append(clip_id)\n\n writer.close()\n logger.info(\"Wrote {} unique out of {} clips.\".format(\n len(ids), n_total_clips))\n\n\ndef add_clips_to(clips_path_a, clips_path_b=DEFAULT_CLIPS_PATH):\n logger.info(\"Adding clips from {} to {}.\".format(clips_path_a, clips_path_b))\n\n writer = ClipWriter(clips_path_b)\n with jsonlines.open(clips_path_a, 'r') as reader:\n for clip in reader:\n writer.send(clip)\n\n writer.close()\n\n\ndef get_clip_image_filenames(clip, images_path=DEFAULT_IMAGES_PATH):\n filenames = os.listdir(images_path)\n return filter(lambda f: f.startswith(clip['id'] + '-'), filenames)\n\n\ndef move_2d_finished_images(clips_path=DEFAULT_CLIPS_PATH,\n images_path=DEFAULT_IMAGES_PATH,\n images_path_done=None):\n if images_path_done is None:\n images_path_done = os.path.join(images_path, 'done')\n\n clips_path = os.path.realpath(clips_path)\n images_path = os.path.realpath(images_path)\n images_path_done = os.path.realpath(images_path_done)\n\n logger.debug(\"Reading clips from {}.\".format(clips_path))\n logger.debug(\"Reading images from {}.\".format(images_path))\n logger.debug(\"Moving images of finished clips to {}.\"\n .format(images_path_done))\n\n if not os.path.exists(images_path_done):\n os.makedirs(images_path_done)\n\n n_clips_done = 0\n n_clips_all = 0\n all_filenames = list(pose_utils.get_outputs())\n with jsonlines.open(clips_path, 'r') as reader:\n for clip in reader:\n n_clips_all += 1\n\n filenames = pose_utils.get_clip_files(clip, filenames=all_filenames)\n has_detections = any(True for _ in filenames)\n\n if not has_detections:\n continue\n\n n_clips_done += 1\n image_files = list(\n get_clip_image_filenames(clip, images_path=images_path))\n logging.debug(\"Moving {} image files for clip {}.\".format(\n len(image_files), clip['id']))\n for filename in image_files:\n in_path = os.path.join(images_path, filename)\n out_path = os.path.join(images_path_done, os.path.basename(filename))\n logger.debug(\"Moving {} to {}\".format(filename, out_path))\n shutil.move(in_path, out_path)\n\n logging.info(\"Moved {} out of {} clips' images.\".format(\n n_clips_done, n_clips_all))\n\n\ndef get_clip_stats(clips_path=DEFAULT_CLIPS_PATH):\n import itertools\n\n clips = get_clips(clips_path)\n angles = map(lambda clip: clip['angles'], clips)\n all_angles = itertools.chain.from_iterable(angles)\n all_angles = map(pose_utils.get_angle_list, all_angles)\n all_angles = np.array(list(all_angles))\n\n mean = np.mean(all_angles, axis=0)\n std = np.std(all_angles, axis=0)\n\n print(\"Mean: {}\".format(mean))\n print(\"STD: {}\".format(std))\n\n return mean, std\n\n\ndef add_clip_angles(read_path=DEFAULT_CLIPS_PATH,\n write_path='clips-angles.jsonl'):\n\n assert read_path != write_path\n\n writer = ClipWriter(write_path, mode='w')\n clips = get_clips(read_path)\n\n n_clips_in = 0\n n_clips_out = 0\n\n for clip in clips:\n n_clips_in += 1\n try:\n points = np.asarray(clip['points_3d'])\n points = Gesture(points, fmt='h36m').as_list(fmt='angles')\n clip['angles'] = angles\n writer.send(clip)\n n_clips_out += 1\n except ValueError as e:\n logger.warn(e)\n\n writer.close()\n\n logger.info(\"Wrote {} out of {} clips.\".format(n_clips_out, n_clips_in))\n\n\ndef normalize_clips(read_path=DEFAULT_CLIPS_PATH,\n write_path='clips-normalized.jsonl'):\n\n assert read_path != write_path\n\n writer = ClipWriter(write_path, mode='w')\n clips = get_clips(read_path)\n\n n_clips_in = 0\n n_clips_out = 0\n\n all_angles = None\n\n for clip in clips:\n n_clips_in += 1\n try:\n if 'points_3d' not in clip:\n raise ValueError('No 3D points in clip')\n\n points = np.asarray(clip['points_3d'])\n if len(points.shape) < 2:\n raise ValueError('Badly shaped 3D points, has shape {}'.format(\n points.shape))\n\n points = list(map(straighten_pose, points))\n points = patch_poses(points)\n points = straighten_frames(points)\n clip['points_3d'] = points.tolist()\n clip['angles'] = list(map(pose_utils.get_pose_angles, points.tolist()))\n\n angle_list = np.array(\n list(map(pose_utils.get_angle_list, clip['angles'])))\n if all_angles is None:\n all_angles = angle_list\n else:\n all_angles = np.vstack((all_angles, angle_list))\n\n writer.send(clip)\n n_clips_out += 1\n except ValueError as e:\n logger.warn(e)\n\n writer.close()\n\n logger.info(\"Wrote {} out of {} clips.\".format(n_clips_out, n_clips_in))\n\n config_path = DEFAULT_CONFIG_PATH\n if os.path.isfile(config_path):\n with open(config_path) as config_file:\n config = json.load(config_file)\n else:\n config = {}\n\n # First entry is for length indicator\n config['angle_stats'] = {\n 'mean': [0.0] + np.mean(all_angles, axis=0).tolist(),\n 'std': [1.0] + np.std(all_angles, axis=0).tolist()\n }\n\n with open(config_path, 'w') as config_file:\n json.dump(config, config_file)\n\n logger.info(\"Saved statistics to {}\".format(config_path))\n\n\ndef get_crappy_3d_prediction(read_path='clips.dirty.jsonl'):\n clips = get_clips(read_path)\n\n for _ in range(len(clips)):\n try:\n clip = random.choice(clips)\n\n if 'points_3d' not in clip:\n continue\n\n points = np.asarray(clip['points_3d'])\n if len(points.shape) < 2:\n continue\n\n points = list(map(straighten_pose, points))\n points = patch_poses(points)\n points = straighten_frames(points)\n\n # If this didn't fail, try again\n except ValueError as e:\n logger.info(e)\n return np.asarray(clip['points_3d'])\n\n logger.warn('No crappy 3D predictions found.')\n\n\ndef straighten_pose(points_3d):\n points_3d = np.array(points_3d)\n\n # Figure out the person's orientation from hip position\n # Note that the hip center is [0, 0, 0]\n rhip = points_3d[1, :]\n rhip = rhip / np.linalg.norm(rhip)\n alpha = np.arcsin(rhip[2])\n if rhip[0] > 0.0:\n alpha = np.pi - alpha\n\n # Straighten the person's orientation so he looks towards +z\n # Rotate alpha degrees around the y axis, some transformation matrix thing\n M = rotation_matrix([0, 1, 0], alpha)\n assert points_3d.shape[1] == 3\n normal_points = np.matmul(points_3d, M)\n\n normal_points = normalize_pose_scale(normal_points)\n\n if np.any(np.abs(normal_points) > 1.0):\n raise ValueError(\"At least one point is larger than 1.0\")\n\n if not abs(normal_points[1, 2]) < 1.0e-2:\n logging.warn(\"Right hip z is large: {}\".format(normal_points[1, 2]))\n\n return normal_points\n\n\ndef straighten_frames(frames, epsilon=np.pi / 12):\n \"\"\"Straightens a pose by correcting for a forward lean.\n \"\"\"\n frames = np.asarray(frames)\n\n spine = frames[:, H36M_NAMES.index(\n 'Thorax'), :] - frames[:, H36M_NAMES.index('Hip'), :]\n alphas = vector.multi_angle_between(spine, [0, -1, 0], [1, 0, 0])\n alphas_clamped = np.clip(alphas, -epsilon, epsilon)\n\n patch_alphas = alphas_clamped - alphas\n patch_alpha = np.mean(patch_alphas)\n M = rotation_matrix([1, 0, 0], -patch_alpha)\n\n upper_body_indices = [H36M_NAMES.index(i) for i in UPPER_BODY_PARTS]\n frames[:, upper_body_indices] = np.matmul(frames[:, upper_body_indices], M)\n\n return frames\n\n\ndef rotation_matrix(axis, theta):\n \"\"\"\n Return the rotation matrix associated with counterclockwise rotation about\n the given axis by theta radians.\n \"\"\"\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n\ndef normalize_pose_scale(pose, to_height=1.0):\n \"\"\"Scales a pose so its height is 1\n \"\"\"\n head_index = pose_utils.H36M_NAMES.index('Head')\n foot_index = pose_utils.H36M_NAMES.index('LFoot')\n head_y = pose[head_index, 1]\n foot_y = pose[foot_index, 1]\n\n height = abs(head_y - foot_y)\n scale = to_height / height\n pose = pose * scale\n\n return pose\n\n\ndef patch_poses(poses, max_out_of_bounds_joints=4.0 / 32, max_distance=0.3):\n \"\"\"Fills in points that moved too much with the previous frame's point.\n\n Args:\n poses: ndarray (n_frames, n_joints, n_dims)\n max_out_of_bounds_joints: float Fraction of out of bounds allowed\n max_distance: maximum travel distance between frames\n (a person's height is 1)\n Returns:\n poses\n Raises:\n ValueError: if too many joints are out of bounds (specified by\n max_out_of_bounds_joints)\n \"\"\"\n max_distance_sq = max_distance**2\n\n poses = np.asarray(poses)\n\n for i, pose in enumerate(poses):\n # Skip first frame\n if i == 0:\n continue\n\n # Calculate distance to previous frame\n previous_pose = poses[i - 1]\n distance_sq = np.sum((pose - previous_pose)**2, axis=1)\n assert distance_sq.shape == (pose.shape[0],), \\\n \"Should sum for every joint, instead got shape {}\" \\\n .format(distance_sq.shape)\n out_of_bounds_joints = np.where(distance_sq > max_distance_sq)\n\n if len(out_of_bounds_joints[0]) > \\\n max_out_of_bounds_joints * pose.shape[0]:\n raise ValueError(\"Too many ({} out of {}) joints out of bounds.\".format(\n len(out_of_bounds_joints[0]), pose.shape[0]))\n\n # Fill in with previous frame's points\n pose[out_of_bounds_joints, :] = previous_pose[out_of_bounds_joints, :]\n\n return poses\n\n\ndef clean_word(word):\n return re.sub('[ 0123456789\\.\\,;:\\?!\\(\\)\\[\\]\\{\\}\"\\'\\<\\>%]', '',\n word.strip().lower())\n\n\ndef create_vocabulary(vocab_size=512, clips_path=DEFAULT_CLIPS_PATH, vocab_path='vocab.txt'):\n vocab = collections.Counter()\n for clip in get_clips(clips_path):\n words = clip['subtitle'].split(' ')\n words = list(filter(lambda x: len(x.strip()) > 0, map(clean_word, words)))\n vocab.update(words)\n\n vocab_file = open(vocab_path, 'w')\n for i, (word, cnt) in enumerate(vocab.most_common(vocab_size)):\n vocab_file.write('{}\\n'.format(str(word), i + 1))\n vocab_file.close()\n\n\ndef get_random_clip():\n clips = get_clips()\n clip = None\n while clip is None or 'points_3d' not in clip or len(clip['points_3d']) == 0:\n logger.info('Getting a random clip')\n clip = clips[randint(0, len(clips))]\n\n return clip\n\n\ndef get_clusters(centers_path=\"cluster-centers.json\"):\n with open(centers_path) as centers_file:\n centers = json.load(centers_file)['clusters']\n\n return centers\n\n\ndef try_for_random_clip(fn, clips_path=DEFAULT_CLIPS_PATH):\n clips = get_clips()\n for _ in range(len(clips)):\n try:\n clip = random.choice(clips)\n fn(clip)\n break\n except (ValueError, IOError) as e:\n logger.error(e)\n logger.info('Will try another clip...')\n","repo_name":"iamarcel/thesis","sub_path":"src/common/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":13437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"11396146005","text":"import cv2\nimport numpy as np\n\n# Transform colour space from RGB to Y, U and V, with seperate channels for each, into a greyscale\ndef split_color_channels(image_path):\n # Load the image using PIL\n image = cv2.imread(image_path)\n # Get image size\n width, height = len(image[0]), len(image)\n\n # Create numpy arrays for each grayscale channel\n Yy = np.zeros((height, width), dtype=np.uint8)\n Cb = np.zeros((height, width), dtype=np.uint8)\n Cr = np.zeros((height, width), dtype=np.uint8)\n\n # Loop through the pixels of the image and compute the channel values\n for x in range(height):\n for y in range(width):\n # Get the color channels for the current pixel\n b, g, r = image[x, y]\n\n # Compute the grayscale values for the current pixel\n Yy[x, y] = int(0.299*r + 0.587*g + 0.114*b)\n Cb[x, y] = int(-0.14713*r - 0.28886*g + 0.436*b)\n Cr[x, y] = int(0.615*r - 0.51498*g - 0.10001*b)\n\n # Return the channel arrays\n return Yy, Cb, Cr\n\ndef combine_color_channels(y, u, v, height, width):\n new_pixels = [[(0, 0, 0) for j in range(width)] for i in range(height)]\n # Copy pixels from the channels to the new image\n for i in range(height):\n for j in range(width):\n Yy = y[i][j]\n Uu = u[i][j]\n Vv = v[i][j]\n r = max(min(int(Yy + 1.13983*Vv), 255), 0)\n g = max(min(int(Yy - 0.39465*Uu - 0.5806*Vv), 255), 0)\n b = max(min(int(Yy + 2.03211*Uu), 255), 0)\n new_pixels[i][j] = (b, g, r)\n\n print(\"Completed copying row \" + str(i))\n \n new_image = np.array(new_pixels, dtype=np.uint8)\n return new_image\n\ndef downsample_channel(channel, factor):\n # Get channel size\n width, height = len(channel[0]), len(channel)\n\n # Compute the new size of the downsampled channel\n new_height = int(height/factor)+1\n new_width = int(width/factor)+1\n\n # Create a new numpy array for the downsampled channel\n downsampled = np.zeros((new_height, new_width), dtype=np.uint8)\n\n # Loop through the pixels of the downsampled channel and compute the average value\n for y in range(0, height, factor):\n for x in range(0, width, factor):\n # Get the values for the current block\n block = channel[y:y+factor, x:x+factor]\n\n # Compute the average value for the current block\n avg_value = int(np.mean(block))\n\n # Set the corresponding value in the downsampled channel\n downsampled[int(y/factor), int(x/factor)] = avg_value\n\n return downsampled\n\ndef bicubic_interpolation(x, a):\n if x < 0:\n x = -x\n if x <= 1:\n return (a + 2) * x**3 - (a + 3) * x**2 + 1\n elif x < 2:\n return a * x**3 - 5 * a * x**2 + 8 * a * x - 4 * a\n else:\n return 0\n \ndef bicubic_upsample(channel, factor, a):\n # Get channel size\n width, height = len(channel[0]), len(channel)\n\n # Compute the new size of the upsampled channel\n new_height = int(height*factor)+1\n new_width = int(width*factor)+1\n\n # Create a new numpy array for the upsampled channel\n upsampled = np.zeros((new_height, new_width), dtype=np.uint8)\n\n # Loop through the pixels of the upsampled channel and compute the bicubic interpolation\n for y in range(new_height):\n for x in range(new_width):\n # Compute the corresponding position in the original channel\n orig_y = (y + 0.5) / factor - 0.5\n orig_x = (x + 0.5) / factor - 0.5\n\n # Compute the indices and weights for the surrounding pixels\n y0 = int(np.floor(orig_y))\n x0 = int(np.floor(orig_x))\n indices = range(y0-1, y0+3)\n indices = [(i, j) for i in indices for j in range(x0-1, x0+3)]\n indices = [(i, j) for i, j in indices if i >= 0 and j >= 0 and i < height and j < width]\n weights = [bicubic_interpolation(y-y0-1, a) * bicubic_interpolation(x-x0-1, a) for y, x in indices]\n\n # Compute the interpolated value for the current pixel\n interpolated = sum([channel[i, j] * weights[n] for n, (i, j) in enumerate(indices)])\n\n # Set the corresponding value in the upsampled channel\n upsampled[y, x] = int(interpolated)\n print(\"Row \" + str(y) + \" is completed interpolation.\")\n\n return upsampled\n\ndef bilinear_upsample(channel, factor):\n # Get channel size\n height, width = len(channel), len(channel[0])\n\n # Compute the new size of the upsampled channel\n new_height = int(height * factor)\n new_width = int(width * factor)\n\n # Create a new numpy array for the upsampled channel\n upsampled = np.zeros((new_height, new_width), dtype=np.uint8)\n\n # Loop through the pixels of the upsampled channel and compute the bilinear interpolation\n for y in range(new_height):\n for x in range(new_width):\n # Compute the corresponding position in the original channel\n orig_y = (y + 0.5) / factor - 0.5\n orig_x = (x + 0.5) / factor - 0.5\n\n # Compute the indices for the surrounding pixels\n y0 = int(orig_y)\n x0 = int(orig_x)\n y1 = y0 + 1\n x1 = x0 + 1\n\n # Check if the surrounding pixels are within the bounds of the original channel\n if y0 >= 0 and x0 >= 0 and y1 < height and x1 < width:\n # Compute the weights for the surrounding pixels\n wy0 = (y1 - orig_y) / (y1 - y0)\n wx0 = (x1 - orig_x) / (x1 - x0)\n wy1 = (orig_y - y0) / (y1 - y0)\n wx1 = (orig_x - x0) / (x1 - x0)\n\n # Compute the interpolated value for the current pixel\n interpolated = wy0 * wx0 * channel[y0, x0] + wy0 * wx1 * channel[y0, x1] + wy1 * wx0 * channel[y1, x0] + wy1 * wx1 * channel[y1, x1]\n\n # Set the corresponding value in the upsampled channel\n upsampled[y, x] = int(interpolated)\n print(\"Row \" + str(y) + \" is completed interpolation.\")\n\n return upsampled\n\nimage_path = \"hotdawg.jpg\"\nimage = cv2.imread(image_path)\nwidth, height = len(image[0]), len(image)\ny, cb, cr = split_color_channels(image_path)\n\ndown_y = downsample_channel(y, 2)\ndown_cb = downsample_channel(cb, 16)\ndown_cr = downsample_channel(cr, 16)\n\nup_y = bilinear_upsample(down_y, 2)\nup_cb = bilinear_upsample(down_cb, 16)\nup_cr = bilinear_upsample(down_cr, 16)\n\nnew_image = combine_color_channels(y, cb, cr, height, width)\n\ncv2.imwrite(\"sampled_hotdawg.jpg\", new_image)\n","repo_name":"Dragorific/4TN4-Image-Scaling-Bicubic","sub_path":"newMain.py","file_name":"newMain.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29419201","text":"# --- SPACE INVADERS --- #\n\n# Import Modules\nimport pygame\nimport random\nimport time\nimport shelve\nimport os\nimport sys\n\n# Initiate Game\npygame.init()\npygame.mixer.init()\n\n# ---CONSTANTS---\n\n# FrameRate\nFPS = 60\n\n# Set Screen Resolution (Game will still run if tweaked, too large an adjustment to width will cause issues however)\n# Screen width and height in a similar ratio as the original space invaders\nSCREEN_WIDTH = 700\nSCREEN_HEIGHT = 700\n\n# Colours\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\n# Fonts\nSMALL_FONT = pygame.font.SysFont(\"monospace\", 20)\nMED_FONT = pygame.font.SysFont(\"aettenschweiler\", 50)\nLARGE_FONT = pygame.font.SysFont(\"aettenschweiler\", 100)\n\n# Searches for HighScore folder\ndef file_search():\n file_name = \"HighScores\" # file to be searched\n cur_dir = os.getcwd() # Dir from where search starts can be replaced with any path\n File_Exists = False\n\n file_list = os.listdir(cur_dir)\n parent_dir = os.path.dirname(cur_dir)\n if file_name in file_list:\n print (\"File Exists in: \", cur_dir)\n File_Exists = True\n else:\n while True:\n if cur_dir == parent_dir: #if dir is root dir\n print (\"File not found\")\n break\n else:\n cur_dir = parent_dir\n\n return File_Exists\n\n# Allows game to be distributed\ndef resource_path(relative_path, HS_ON=False):\n\n if HS_ON: # If highscore file is found, uses that instead\n Exists = file_search()\n if Exists:\n PATH = os.path.join(os.getcwd(), relative_path)\n else:\n PATH = os.path.join(sys._MEIPASS, relative_path)\n else:\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n PATH = os.path.join(base_path, relative_path)\n\n return PATH\n\n# Music\nPLACEHOLDER = resource_path(\"Sounds/SOUND PLACEHOLDER.WAV\")\nFIRSTNOTE = resource_path(\"Sounds/SOUND FirstNote.WAV\")\nSECONDNOTE = resource_path(\"Sounds/SOUND SecondNote.WAV\")\nTHIRDNOTE = resource_path(\"Sounds/SOUND ThirdNote.WAV\")\nFOURTHNOTE = resource_path(\"Sounds/SOUND FourthNote.WAV\")\nPLAYERSHOOT = resource_path(\"Sounds/SOUND PlayerShoot.WAV\")\nINVADERKILLED_SFX = resource_path(\"Sounds/SOUND InvaderKilled.WAV\")\nPLAYERKILLED_SFX = resource_path(\"Sounds/SOUND PlayerKilled.WAV\")\n\n# Images\nINVADER1 = resource_path(\"Images/IMG Invader1.png\")\nINVADER2 = resource_path(\"Images/IMG Invader2.png\")\nINVADER3 = resource_path(\"Images/IMG Invader3.png\")\nINVADERKILLED = resource_path(\"Images/IMG InvaderKilled.png\")\nPLAYER1 = resource_path(\"Images/IMG Player1.png\")\nPLAYER1_EXPLODE = resource_path(\"Images/IMG Player1Explosion.png\")\nPLAYER1_SHOOT = resource_path(\"Images/IMG Player1Shoot.png\")\nPLAYER2 = resource_path(\"Images/IMG Player2.png\")\nPLAYER2_EXPLODE = resource_path(\"Images/IMG Player2Explosion.png\")\nPLAYER2_SHOOT = resource_path(\"Images/IMG Player2Shoot.png\")\nPLAYER_LIFELOST = resource_path(\"Images/IMG PlayerLifeLost.png\")\n\n# Highscores\nSINGLE_HS = resource_path('HighScores/HighScore.txt', True)\nMULTI_HS = resource_path('HighScores/MultiplayerHighScore.txt', True)\n\n# Shield Dimensions\nSHIELD_WIDTH = SCREEN_WIDTH / 80\nSHIELD_HEIGHT = SCREEN_HEIGHT / 80\n\n\n# ---CLASSES---\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, x, y, Image, ImageShooting, ImageExplode):\n super().__init__()\n\n self.image = pygame.image.load(Image).convert()\n self.image_shoot = pygame.image.load(ImageShooting).convert()\n self.image_explode = pygame.image.load(ImageExplode).convert()\n\n # Passed in Location\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def Shoot(self):\n NewBullet = Bullet(self.rect.x + 23, self.rect.y, 12)\n pygame.mixer.music.load(PLAYERSHOOT)\n pygame.mixer.music.play(1)\n self.image = self.image_shoot\n return NewBullet\n\n def Explode(self):\n self.image = self.image_explode\n\n\nclass Invader(pygame.sprite.Sprite):\n def __init__(self, x, y, ScoreValue, ShotFreq, Image):\n super().__init__()\n\n self.image = pygame.image.load(Image).convert()\n\n # Passed in Location\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n # Speed vector\n self.change_x = 15\n self.change_y = 50\n\n # Score Value\n self.score = ScoreValue\n\n # Shot Frequency\n self.shot_freq = ShotFreq\n\n def InvaderShoot(self):\n NewBullet = Bullet(self.rect.x + 15, self.rect.y + 10, -5)\n return NewBullet\n\n # Allows invader to shoot at random points. Different shot frequency can be set for a harder invader\n def RandShooting(self):\n EnemyShoot = 1\n RandNum = random.randint(1, self.shot_freq)\n if RandNum == EnemyShoot:\n NewBullet = Invader.InvaderShoot(self)\n return NewBullet\n\n def UpdateX(self):\n # New x for invader\n self.rect.x += self.change_x\n\n def UpdateY(self):\n self.rect.y += self.change_y\n\n\nclass Shield(pygame.sprite.Sprite):\n\n def __init__(self, x, y):\n super().__init__()\n\n # Image created\n self.image = pygame.Surface([SHIELD_WIDTH, SHIELD_HEIGHT])\n self.image.fill(GREEN)\n\n # Passed in Location\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n\nclass Bullet(pygame.sprite.Sprite):\n\n def __init__(self, x, y, Speed):\n super().__init__()\n\n self.image = pygame.Surface([3, 10])\n self.image.fill(WHITE)\n\n # Passed in Location\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n # Speed vector\n self.change_y = Speed\n\n def Update(self):\n # New position for bullet\n self.rect.y -= self.change_y\n\n\n# ---FUNCTIONS---\ndef TextObjects(Text, Font, Colour, Centre):\n TextSurface = Font.render(Text, True, Colour)\n TextSurface_rect = TextSurface.get_rect()\n TextSurface_rect.center = Centre\n return TextSurface, TextSurface_rect\n\n\ndef DisplayMessage(Text, Font, Colour, Screen, Centre):\n Text, Text_rect = TextObjects(Text, Font, Colour, Centre)\n Screen.blit(Text, Text_rect)\n\n\ndef DisplayImage(Image, Screen, x, y):\n Image = pygame.image.load(Image).convert()\n Screen.blit(Image, (x, y))\n\n\ndef UpdateHighScore(Score, HighScoreFile, Reset=False):\n if \"HighScore\" in HighScoreFile:\n HighScore = HighScoreFile[\"HighScore\"]\n if Score > HighScore:\n HighScoreFile[\"HighScore\"] = Score\n elif Reset:\n Score = 0\n HighScoreFile[\"HighScore\"] = Score\n HighScoreFile.close()\n\n\ndef SpawnInvaders(RowNumber, InvadersPerRow, ScoreValue, ShotFreq, Image):\n InvaderList = []\n for Index in range(InvadersPerRow):\n x = (SCREEN_WIDTH - SCREEN_WIDTH / 3) / (InvadersPerRow + 1) * (Index + 1) - SCREEN_WIDTH / 40\n y = SCREEN_HEIGHT / 6 + SCREEN_HEIGHT / 18 * RowNumber\n Inv = Invader(x, y, ScoreValue, ShotFreq, Image)\n InvaderList.append(Inv)\n return InvaderList\n\n\ndef SpawnShields(ShieldsPerBlock, NumBlocks, NumRows):\n ShieldList = []\n for IndexShieldsPerBlock in range(int(-ShieldsPerBlock / 2), int(ShieldsPerBlock / 2)):\n for IndexRowNumber in range(NumRows):\n y = SCREEN_HEIGHT - (100 + ((SHIELD_HEIGHT - 1) * (IndexRowNumber - 1)))\n for IndexBlockNumber in range(NumBlocks):\n x = (SCREEN_WIDTH / NumBlocks) * IndexBlockNumber + (\n (SCREEN_WIDTH / NumBlocks) / 2) + IndexShieldsPerBlock * (SHIELD_WIDTH - 1)\n Shi = Shield(x, y)\n ShieldList.append(Shi)\n return ShieldList\n\n\ndef Levels(Level):\n InvaderList = []\n if Level == 1:\n InvaderList = [Level1()]\n if Level == 2:\n InvaderList = [Level2()]\n if Level == 3:\n InvaderList = [Level3()]\n if Level == 4:\n InvaderList = [Level4()]\n if Level == 5:\n InvaderList = [Level5()]\n return InvaderList\n\n\n# Levels here can be changed easily\ndef Level1():\n InvaderList = [SpawnInvaders(1, 11, 50, 3000, INVADER3),\n SpawnInvaders(2, 11, 20, 5000, INVADER2),\n SpawnInvaders(3, 11, 20, 5000, INVADER2),\n SpawnInvaders(4, 11, 10, 7500, INVADER1),\n SpawnInvaders(5, 11, 10, 7500, INVADER1)]\n # SpawnInvaders(RowNumber (From top to Bottom), InvadersPerRow, ScoreValue, ShotFreq (Lower = Higher Freq), Image)\n return InvaderList\n\n\ndef Level2():\n InvaderList = [SpawnInvaders(2, 11, 70, 2500, INVADER3),\n SpawnInvaders(3, 11, 50, 3000, INVADER2),\n SpawnInvaders(4, 11, 50, 3000, INVADER2),\n SpawnInvaders(5, 11, 20, 5000, INVADER1),\n SpawnInvaders(6, 11, 20, 5000, INVADER1)]\n # SpawnInvaders(RowNumber (From top to Bottom), InvadersPerRow, ScoreValue, ShotFreq (Lower = Higher Freq), Image)\n return InvaderList\n\n\ndef Level3():\n InvaderList = [SpawnInvaders(3, 10, 200, 1000, INVADER3),\n SpawnInvaders(4, 10, 100, 2000, INVADER2),\n SpawnInvaders(5, 10, 100, 2000, INVADER2),\n SpawnInvaders(6, 10, 50, 3000, INVADER1),\n SpawnInvaders(7, 10, 50, 3000, INVADER1)]\n # SpawnInvaders(RowNumber (From top to Bottom), InvadersPerRow, ScoreValue, ShotFreq (Lower = Higher Freq), Image)\n return InvaderList\n\n\ndef Level4():\n InvaderList = [SpawnInvaders(4, 8, 300, 500, INVADER3),\n SpawnInvaders(5, 8, 200, 1000, INVADER2),\n SpawnInvaders(6, 8, 200, 1000, INVADER2),\n SpawnInvaders(7, 8, 100, 2000, INVADER1),\n SpawnInvaders(8, 8, 100, 2000, INVADER1)]\n # SpawnInvaders(RowNumber (From top to Bottom), InvadersPerRow, ScoreValue, ShotFreq (Lower = Higher Freq), Image)\n return InvaderList\n\n\ndef Level5():\n InvaderList = [SpawnInvaders(5, 7, 500, 250, INVADER3),\n SpawnInvaders(6, 7, 300, 500, INVADER2),\n SpawnInvaders(7, 7, 300, 500, INVADER2),\n SpawnInvaders(8, 7, 200, 1000, INVADER1),\n SpawnInvaders(9, 7, 200, 1000, INVADER1)]\n # SpawnInvaders(RowNumber (From top to Bottom), InvadersPerRow, ScoreValue, ShotFreq (Lower = Higher Freq), Image)\n return InvaderList\n\n\n# Main Loop\ndef SettingsLoop():\n # --Setting Variables--\n\n # Create a screen and set a clock\n SettingsScreen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\n Clock = pygame.time.Clock()\n\n # Starting Variables (Adjustable)\n TwoPlayer = False\n Lives = 3 # Starting lives\n Score = 0 # Starting score\n ReloadTime = 0.75 # Player shooting delay (lower is faster) (Not less than 0.3!)\n MovementSensitivity = 3.5 # Player movement sensitivity\n TimeForInvaderUpdateX = 1.05 # The time per invader movement (lower is faster)\n InvaderSpeedUpIncrease = 0.7 # Adjusts the delay for invaders to move for every change in y (lower is faster)\n InvaderSpeedUpIncreaseReductionPerY = 1.05 # The reduction in invader speed up amount per change in y (higher is\n # more)\n InvaderShotFreqIncrease = 1.2 # How much more frequently the invaders shoot for every change in y (higher is more)\n InvaderUpdateXSpeedUpPerLevel = 0.05 # How much faster the invaders will start per level (higher is faster)\n\n Level = 1 # Starting Level\n ExtraLifePerLevel = True # Whether or not player gains a life for each level completed\n\n # Working Variables (Do Not Adjust)\n Done = False\n Play = False\n\n # --Settings Loop--\n while not Done:\n\n SettingsScreen.fill(BLACK)\n DisplayMessage(\"SPACE INVADERS\", LARGE_FONT, GREEN, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 8))\n DisplayMessage(\"5 Waves of Survival\", MED_FONT, GREEN, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 5))\n DisplayMessage(\"Press Enter to Start\", MED_FONT, GREEN, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT - SCREEN_HEIGHT / 4))\n\n DisplayImage(INVADER3, SettingsScreen, SCREEN_WIDTH / 2, SCREEN_HEIGHT - SCREEN_HEIGHT / 5)\n DisplayImage(INVADER2, SettingsScreen, SCREEN_WIDTH / 2 - SCREEN_WIDTH / 20,\n SCREEN_HEIGHT - SCREEN_HEIGHT / 5)\n DisplayImage(INVADER1, SettingsScreen, SCREEN_WIDTH / 2 + SCREEN_WIDTH / 20,\n SCREEN_HEIGHT - SCREEN_HEIGHT / 5)\n\n # Displays Instructions and HighScore\n if TwoPlayer:\n Multiplayer = \"Activated\"\n DisplayMessage(\"PLAYER 1: Use <- and -> to move and the UP key to shoot\", SMALL_FONT, WHITE, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 3))\n DisplayMessage(\"PLAYER 2: Use A and D to move and W to shoot\", SMALL_FONT, WHITE,\n SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 3 + SCREEN_HEIGHT / 20))\n HighScoreFile = shelve.open(MULTI_HS)\n if \"HighScore\" in HighScoreFile:\n DisplayMessage(\"HIGHSCORE:\" + str(HighScoreFile[\"HighScore\"]), MED_FONT, GREEN, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT - SCREEN_HEIGHT / 3))\n else:\n HighScoreFile[\"HighScore\"] = 0\n else:\n Multiplayer = \"Deactivated\"\n DisplayMessage(\"Use <- and -> to move. Click the space bar to shoot\", SMALL_FONT, WHITE, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 3))\n HighScoreFile = shelve.open(SINGLE_HS)\n if \"HighScore\" in HighScoreFile:\n DisplayMessage(\"HIGHSCORE:\" + str(HighScoreFile[\"HighScore\"]), MED_FONT, GREEN, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT - SCREEN_HEIGHT / 3))\n else:\n HighScoreFile[\"HighScore\"] = 0\n\n MultiplayerClickBox = pygame.Rect(0, 0, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 20)\n MultiplayerClickBox.center = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)\n HighScoreResetBox = pygame.Rect(0, 0, SCREEN_WIDTH / 4, SCREEN_HEIGHT / 20)\n HighScoreResetBox.center = (SCREEN_WIDTH-(SCREEN_WIDTH / 6), SCREEN_HEIGHT-(SCREEN_HEIGHT / 12))\n DisplayMessage(\"Multiplayer Mode:\" + Multiplayer, SMALL_FONT, WHITE, SettingsScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n DisplayMessage(\"Reset HighScore\", SMALL_FONT, RED, SettingsScreen,\n (SCREEN_WIDTH-(SCREEN_WIDTH / 6), SCREEN_HEIGHT-(SCREEN_HEIGHT / 12)))\n\n for event in pygame.event.get():\n # quits game if X clicked in top corner\n if event.type == pygame.QUIT:\n Done = True\n if event.type == pygame.KEYDOWN:\n if event.key == 13: # 13 is the number of the enter key\n Done = True\n Play = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n if MultiplayerClickBox.collidepoint(event.pos):\n TwoPlayer = not TwoPlayer\n if HighScoreResetBox.collidepoint(event.pos): # Resets Highscore\n if TwoPlayer:\n HighScoreFile = shelve.open(MULTI_HS)\n UpdateHighScore(Score, HighScoreFile, True)\n else:\n HighScoreFile = shelve.open(SINGLE_HS)\n UpdateHighScore(Score, HighScoreFile, True)\n\n\n pygame.display.flip()\n\n Clock.tick(FPS)\n\n return TwoPlayer, Lives, Score, ReloadTime, MovementSensitivity, TimeForInvaderUpdateX, InvaderSpeedUpIncrease, \\\n InvaderShotFreqIncrease, Level, ExtraLifePerLevel, InvaderUpdateXSpeedUpPerLevel, \\\n InvaderSpeedUpIncreaseReductionPerY, Play\n\n\ndef GameLoop(TwoPlayer, Lives, Score, ReloadTime, MovementSensitivity, TimeForInvaderUpdateX, InvaderSpeedUpIncrease,\n InvaderShotFreqIncrease, Level, InvaderUpdateXSpeedUpPerLevel, InvaderSpeedUpIncreaseReductionPerY):\n # --Setting Variables--\n\n # Create a screen and set a clock\n GameScreen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\n Clock = pygame.time.Clock()\n\n # Working Variables (Do Not Adjust)\n LastShotTime = 0\n Reloading = False\n PressedKeys = {\"left\": False, \"right\": False}\n LastInvaderUpdateX = 0\n Move = False\n ChangeDirectionYSpeedShotFreq = False\n InvaderLanded = False\n ThemeNote = FIRSTNOTE\n RUNNING, PAUSED, GAMEOVER, VICTORY = 0, 1, 2, 3\n State = RUNNING\n Done = False\n Restart = False\n LevelUp = False\n\n # Player1 Created\n Player1 = Player((SCREEN_WIDTH - 50) / 2, SCREEN_HEIGHT - 70, PLAYER1,\n PLAYER1_SHOOT, PLAYER1_EXPLODE)\n PlayerList = [Player1]\n\n # Player2 Created if Multiplayer Selected\n if TwoPlayer:\n Player2 = Player((SCREEN_WIDTH - 50) / 2, SCREEN_HEIGHT - 70, PLAYER2,\n PLAYER2_SHOOT, PLAYER2_EXPLODE)\n PlayerList.append(Player2)\n LastShotTimeP2 = 0\n ReloadingP2 = False\n PressedKeysP2 = {\"left\": False, \"right\": False}\n\n PlayerList = pygame.sprite.Group(PlayerList)\n\n # Invaders Created\n InvaderList = [Levels(Level)]\n InvaderList = pygame.sprite.Group(InvaderList)\n InvaderKillList = []\n\n # Shields Created\n ShieldList = [SpawnShields(10, 4, 5)]\n # SpawnShields(ShieldsPerBlock, NumBlocks, NumRows)\n ShieldList = pygame.sprite.Group(ShieldList)\n\n # Bullet Lists Created\n Bullets = []\n InvaderBullets = []\n\n # Sprites Added\n all_sprites_list = pygame.sprite.Group()\n all_sprites_list.add(PlayerList, InvaderList, ShieldList)\n\n # --GameLoop--\n while not Done:\n if State == RUNNING:\n\n # Player Controls\n for event in pygame.event.get():\n # quits game if X clicked in top corner\n if event.type == pygame.QUIT:\n Done = True\n\n elif event.type == pygame.KEYDOWN:\n # shoots when space clicked\n if event.key == pygame.K_SPACE and not TwoPlayer:\n # If reload time has elapsed allows a shot to be fired\n if time.perf_counter() - LastShotTime > ReloadTime:\n Reloading = False\n if not Reloading:\n NewBullet = Player.Shoot(Player1)\n all_sprites_list.add(NewBullet)\n Bullets.append(NewBullet)\n LastShotTime = time.perf_counter()\n Reloading = True\n # Pressing keys and letting go adjust a variable so the player can hold keys to move smoothly\n if event.key == pygame.K_LEFT:\n PressedKeys[\"left\"] = True\n if event.key == pygame.K_RIGHT:\n PressedKeys[\"right\"] = True\n if event.key == pygame.K_p:\n State = PAUSED\n\n if TwoPlayer:\n if event.key == pygame.K_w:\n # If reload time has elapsed allows a shot to be fired\n if time.perf_counter() - LastShotTimeP2 > ReloadTime:\n ReloadingP2 = False\n if not ReloadingP2:\n NewBullet = Player.Shoot(Player2)\n all_sprites_list.add(NewBullet)\n Bullets.append(NewBullet)\n LastShotTimeP2 = time.perf_counter()\n ReloadingP2 = True\n # Swaps player 1 shoot to up if two player so players hands don't get in each others way\n if event.key == pygame.K_UP:\n # If reload time has elapsed allows a shot to be fired\n if time.perf_counter() - LastShotTime > ReloadTime:\n Reloading = False\n if not Reloading:\n NewBullet = Player.Shoot(Player1)\n all_sprites_list.add(NewBullet)\n Bullets.append(NewBullet)\n LastShotTime = time.perf_counter()\n Reloading = True\n # Pressing keys and letting go adjust a variable so the player can hold keys to move smoothly\n if event.key == pygame.K_a:\n PressedKeysP2[\"left\"] = True\n if event.key == pygame.K_d:\n PressedKeysP2[\"right\"] = True\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n PressedKeys[\"left\"] = False\n if event.key == pygame.K_RIGHT:\n PressedKeys[\"right\"] = False\n\n if TwoPlayer:\n if event.key == pygame.K_a:\n PressedKeysP2[\"left\"] = False\n elif event.key == pygame.K_d:\n PressedKeysP2[\"right\"] = False\n\n # Moves player and sets boundaries\n if PressedKeys[\"right\"]:\n if Player1.rect.x < SCREEN_WIDTH - 70 * SCREEN_WIDTH / 896:\n Player1.rect.x += MovementSensitivity\n if PressedKeys[\"left\"]:\n if Player1.rect.x > 10 * SCREEN_WIDTH / 784:\n Player1.rect.x -= MovementSensitivity\n\n if TwoPlayer:\n if PressedKeysP2[\"right\"]:\n if Player2.rect.x < SCREEN_WIDTH - 70 * SCREEN_WIDTH / 896:\n Player2.rect.x += MovementSensitivity\n if PressedKeysP2[\"left\"]:\n if Player2.rect.x > 10 * SCREEN_WIDTH / 784:\n Player2.rect.x -= MovementSensitivity\n\n # Invaders will move per unit time and a note will play every time they move\n if time.perf_counter() - LastInvaderUpdateX > TimeForInvaderUpdateX - \\\n (InvaderUpdateXSpeedUpPerLevel * Level):\n Move = True\n if Move:\n for Enemy in InvaderList:\n Invader.UpdateX(Enemy)\n LastInvaderUpdateX = time.perf_counter()\n pygame.mixer.music.load(ThemeNote)\n pygame.mixer.music.play(1)\n Move = False\n # Changes Note\n if ThemeNote == FOURTHNOTE:\n ThemeNote = PLACEHOLDER\n if ThemeNote == THIRDNOTE:\n ThemeNote = FOURTHNOTE\n if ThemeNote == SECONDNOTE:\n ThemeNote = THIRDNOTE\n if ThemeNote == FIRSTNOTE:\n ThemeNote = SECONDNOTE\n if ThemeNote == PLACEHOLDER:\n ThemeNote = FIRSTNOTE\n\n # Y movement, shooting for invaders and checks if colliding with shields\n for Enemy in InvaderList:\n\n # Checks if an invader collides with a shield or has passed it and removes the shield if it does\n pygame.sprite.spritecollide(Enemy, ShieldList, True)\n for Barrier in ShieldList:\n if Enemy.rect.y > Barrier.rect.y and Enemy.rect.x >= Barrier.rect.x:\n Barrier.kill()\n ShieldList.remove(Barrier)\n\n # All invaders remaining will shoot at randomly intervals\n NewBullet = Invader.RandShooting(Enemy)\n if NewBullet is not None:\n all_sprites_list.add(NewBullet)\n InvaderBullets.append(NewBullet)\n # If any invader hits the side of the screen all invaders will change their direction,\n # move down and speed up\n if Enemy.rect.x > SCREEN_WIDTH - 50 or Enemy.rect.x < 0:\n ChangeDirectionYSpeedShotFreq = True\n\n if ChangeDirectionYSpeedShotFreq:\n for Enemy in InvaderList:\n Invader.UpdateY(Enemy)\n Enemy.rect.x -= Enemy.change_x\n Enemy.change_x = - Enemy.change_x\n Enemy.shot_freq = int(Enemy.shot_freq / InvaderShotFreqIncrease)\n # If an invader lands the player loses\n if Enemy.rect.y >= Player1.rect.y:\n InvaderLanded = True\n # Invaders are sped up\n TimeForInvaderUpdateX *= InvaderSpeedUpIncrease\n if InvaderSpeedUpIncrease < 1:\n InvaderSpeedUpIncrease *= InvaderSpeedUpIncreaseReductionPerY\n # Resets variable\n ChangeDirectionYSpeedShotFreq = False\n\n # Checks player bullets\n for Ammo in Bullets:\n Ammo.Update()\n # Checks if a player bullet collides with an invader and removes the bullet if it does\n InvaderKillList = pygame.sprite.spritecollide(Ammo, InvaderList, False)\n if len(InvaderKillList) > 0:\n Ammo.kill()\n Bullets.remove(Ammo)\n # Plays invader killed sound, shows explosion and adds to overall score\n for Enemy in InvaderKillList:\n pygame.mixer.music.load(INVADERKILLED_SFX)\n pygame.mixer.music.play(1)\n Enemy.image = pygame.image.load(INVADERKILLED).convert()\n Score += Enemy.score\n # Checks if a player bullet collides with a shield, removes the shield and bullet if it does\n ShieldDamageFriendly = pygame.sprite.spritecollide(Ammo, ShieldList, True)\n if len(ShieldDamageFriendly) != 0:\n Ammo.kill()\n Bullets.remove(Ammo)\n # Any bullet off the screen is removed\n if Ammo.rect.y < -10:\n Ammo.kill()\n Bullets.remove(Ammo)\n\n # Checks invader bullets\n for Ammo in InvaderBullets:\n Ammo.Update()\n # Checks if an invader bullet collides with the player, removes a life and bullet if it does\n LivesLost = pygame.sprite.spritecollide(Ammo, PlayerList, False)\n if len(LivesLost) != 0:\n Ammo.kill()\n InvaderBullets.remove(Ammo)\n Lives -= 1\n # Players Flash Red when a life is lost\n for P in PlayerList:\n P.image = pygame.image.load(PLAYER_LIFELOST).convert()\n # Checks if an invader bullet collides with a shield, removes the shield and bullet if it does\n ShieldDamage = pygame.sprite.spritecollide(Ammo, ShieldList, True)\n if len(ShieldDamage) != 0:\n Ammo.kill()\n InvaderBullets.remove(Ammo)\n # Any invader bullet off the screen is removed\n if Ammo.rect.y > SCREEN_HEIGHT:\n Ammo.kill()\n InvaderBullets.remove(Ammo)\n\n # Game Over Protocol\n if Lives <= 0 or InvaderLanded:\n pygame.mixer.music.load(PLAYERKILLED_SFX)\n pygame.mixer.music.play(1)\n for P in PlayerList:\n P.Explode()\n State = GAMEOVER\n\n # Victory Protocol\n if len(InvaderList) == 0:\n State = VICTORY\n\n # Redraws screen\n GameScreen.fill(BLACK)\n all_sprites_list.draw(GameScreen)\n\n # Adds lives at top\n DisplayMessage(\"Lives: \" + str(Lives), SMALL_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH - SCREEN_WIDTH * 0.1, SCREEN_HEIGHT * 0.03))\n # Adds score at top\n DisplayMessage(\"Score: \" + str(Score), SMALL_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH * 0.12, SCREEN_HEIGHT * 0.03))\n\n # Adds level at top\n DisplayMessage(\"Level \" + str(Level), SMALL_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT * 0.03))\n\n # Resets Player Image after shooting\n Player1.image = pygame.image.load(PLAYER1).convert()\n if TwoPlayer:\n Player2.image = pygame.image.load(PLAYER2).convert()\n\n # Removes Invader after it explodes\n for Enemy in InvaderKillList:\n Enemy.kill()\n InvaderList.remove(Enemy)\n InvaderKillList.remove(Enemy)\n\n # GameOver Protocol\n elif State == GAMEOVER:\n DisplayMessage(\"GAME OVER!\", LARGE_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, (SCREEN_HEIGHT / 2 - (SCREEN_HEIGHT / 12))))\n DisplayMessage(\"Press R to Restart the level with 3 lives\", MED_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, (SCREEN_HEIGHT / 2 - (SCREEN_HEIGHT / 3))))\n DisplayMessage(\"or Q to Quit\", MED_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, (SCREEN_HEIGHT / 2 - (SCREEN_HEIGHT / 3) + SCREEN_HEIGHT / 20)))\n\n if not TwoPlayer:\n HighScoreFile = shelve.open(SINGLE_HS)\n HighScore = HighScoreFile[\"HighScore\"]\n if Score > HighScore:\n DisplayMessage(\"NEW HIGHSCORE SET!\", MED_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 + SCREEN_HEIGHT / 15))\n if Score <= HighScore:\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n\n if TwoPlayer:\n HighScoreFile = shelve.open(MULTI_HS)\n HighScore = HighScoreFile[\"HighScore\"]\n if Score > HighScore:\n DisplayMessage(\"NEW HIGHSCORE SET!\", MED_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 + SCREEN_HEIGHT / 15))\n if Score <= HighScore:\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, RED, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n Done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n Lives = 3\n Restart = True\n Done = True\n if event.key == pygame.K_q:\n Done = True\n\n # Victory Protocol\n elif State == VICTORY:\n DisplayMessage(\"YOU WIN!\", LARGE_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, (SCREEN_HEIGHT / 2 - (SCREEN_HEIGHT / 12))))\n if Level != 5:\n DisplayMessage(\"Press N for the Next Level or Q to Quit\", MED_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, (SCREEN_HEIGHT / 2 - (SCREEN_HEIGHT / 3))))\n\n if not TwoPlayer:\n HighScoreFile = shelve.open(SINGLE_HS)\n HighScore = HighScoreFile[\"HighScore\"]\n if Score > HighScore:\n DisplayMessage(\"NEW HIGHSCORE SET!\", MED_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 + SCREEN_HEIGHT / 15))\n if Score <= HighScore:\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n\n if TwoPlayer:\n HighScoreFile = shelve.open(MULTI_HS)\n HighScore = HighScoreFile[\"HighScore\"]\n if Score > HighScore:\n DisplayMessage(\"NEW HIGHSCORE SET!\", MED_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 + SCREEN_HEIGHT / 15))\n if Score <= HighScore:\n DisplayMessage(\"SCORE:\" + str(Score), LARGE_FONT, WHITE, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n Done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_n:\n LevelUp = True\n Restart = True\n Done = True\n if event.key == pygame.K_q:\n Done = True\n\n # Pause Protocol\n elif State == PAUSED:\n DisplayMessage(\"PAUSED\", LARGE_FONT, GREEN, GameScreen,\n (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n Done = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n State = RUNNING\n\n # Makes the game appear in the window\n pygame.display.flip()\n\n Clock.tick(FPS)\n\n return Restart, LevelUp, Lives, Score, GameScreen\n\n\ndef MainLoop():\n # Saves settings into a variable\n ChosenSettings = SettingsLoop()\n Play = ChosenSettings[12]\n Level = ChosenSettings[8]\n Lives = ChosenSettings[1]\n Score = ChosenSettings[2]\n # Main Loop\n while Play:\n ContinueOptions = GameLoop(ChosenSettings[0], Lives, Score, ChosenSettings[3], ChosenSettings[4],\n ChosenSettings[5], ChosenSettings[6], ChosenSettings[7], Level, ChosenSettings[10],\n ChosenSettings[11])\n Play = ContinueOptions[0]\n LevelUp = ContinueOptions[1]\n Lives = ContinueOptions[2]\n\n # Saves HighScore if a new one has been set\n Score = ContinueOptions[3]\n if not ChosenSettings[0]:\n HighScoreFile = shelve.open(SINGLE_HS)\n UpdateHighScore(Score, HighScoreFile)\n\n if ChosenSettings[0]:\n HighScoreFile = shelve.open(MULTI_HS)\n UpdateHighScore(Score, HighScoreFile)\n\n # Moves to the next level if the previous one has been completed has been completed\n if LevelUp:\n Score = ContinueOptions[3]\n Level += 1\n if ChosenSettings[9]:\n Lives += 1\n\n # Ends programme\n pygame.quit()\n\n\n# ---MAIN---\n\nif __name__ == \"__main__\":\n MainLoop()","repo_name":"HarryHD123/SpaceInvaders","sub_path":"MyGame.py","file_name":"MyGame.py","file_ext":"py","file_size_in_byte":35672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20015543671","text":"__author__ = 'Daniel.Lee'\r\n\r\nimport re\r\n\r\nclass Solution:\r\n # @return an integer\r\n def romanToInt(self, s):\r\n sum = 0\r\n for i in range(0,len(s)):\r\n if s[i] == 'I': sum += 1\r\n elif s[i] == 'V': sum += 5\r\n elif s[i] == 'X': sum += 10\r\n elif s[i] == 'L': sum += 50\r\n elif s[i] == 'C': sum += 100\r\n elif s[i] == 'D': sum += 500\r\n elif s[i] == 'M': sum += 1000\r\n\r\n if s.find('IV') != -1: sum -= 2\r\n if s.find('IX') != -1: sum -= 2\r\n if s.find('XL') != -1: sum -= 20\r\n if s.find('XC') != -1: sum -= 20\r\n if s.find('CD') != -1: sum -= 200\r\n if s.find('CM') != -1: sum -= 200\r\n return sum","repo_name":"datonli/ds_workspace","sub_path":"pypractise/leetcode/roman-to-integer.py","file_name":"roman-to-integer.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39738830733","text":"from django.shortcuts import redirect\nfrom .agregarf import AgregarF\nfrom AppFactura.models import TipoComprobante\n# Create your views here.\n\ndef addf(request, factura_id):\n agregarf = AgregarF(request)\n factura = TipoComprobante.objects.get(id_tipo_comprobante=factura_id)\n agregarf.addf(factura=factura)\n return redirect(request.META.get('HTTP_REFERER', 'TipoComprobante'))\n\ndef clearf(request):\n agregar = AgregarF(request)\n agregar.clearf()\n return redirect(request.META.get('HTTP_REFERER', 'TipoComprobante'))","repo_name":"gonzalezkj/GEFAST","sub_path":"AppAgregarF/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43662978463","text":"import numpy as np\n\ndef read_data(filename):\n # complete the read_data() function\n\n elastic = []\n measurements = []\n # elastic, measurements = None\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(','):\n continue\n values = line.split(',')\n if values[-1].startswith('F'):\n elastic.append(False)\n else: \n elastic.append(True)\n measurements.append(values[1:-1])\n \n elastic = np.array(elastic)\n measurements = np.array(measurements, dtype=np.float64)\n \n \n return elastic, measurements\n\nif __name__ == \"__main__\":\n filename = \"experiments_labeled.csv\"\n\n elastic, measurements = read_data(filename)\n\n # assign to M1,M2,V1i,V2i,V1f,V2f\n # ...\n M1, M2, V1i, V2i, V1f, V2f = measurements.transpose()\n\n # example use: calculate P1i\n # ...\n P1i = M1 * V1i\n\n # print rounded to 4 decimals\n print(\"P1i =\", np.round(P1i, 4))\n\n\n","repo_name":"jstrombe19/phy202","sub_path":"mod6-reading/one-d-collisions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5757903317","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg=cv2.imread('img.png') #opencv reads in bgr format\nimg=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) #matplotlib reads in rgb\n\n\nkernel=np.ones((5,5),np.float32)/25 #based on pixel format it works on homogenous filter\ndst=cv2.filter2D(img,-1,kernel)\nblur=cv2.blur(img,(5,5));\ngblur=cv2.GaussianBlur(img,(5,5),0) # gaussian blur the weightage of the pixel is different in different area\nmedian=cv2.medianBlur(img,5)#median filter is somthing to replace each pixel value with its neighbour pixels\n\ntitles=['image','2D convolution','blur','gblur','medain' ]\nimages=[img,dst,blur,gblur,median]\n\nfor i in range(5):\n plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')\n plt.title(titles[i])\n\nplt.show()","repo_name":"pradeepraja2097/Python","sub_path":"opencv/venv/image_filterrs.py","file_name":"image_filterrs.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12974473922","text":"\"\"\"\nSpiral \n\"\"\"\nimport math \n\ndef BoatEarMoonPresets():\n \"\"\" Variables for the creation of a Boat Ear Moon Shell\"\"\"\n # variables to set the resolution of the shell mesh \n n = 10\n m = 3\n turns = 10\n s= 0\n # Boat ear moon presets\n i = 1\n D=1\n alpha_=math.radians(83)\n beta=math.radians(42) \n phi=math.radians(70) \n mu=math.radians(10) \n omega=math.radians(30) \n A=25 \n a=12 \n b=20 \n L=0 \n P=0 \n W1=0 \n W2=0 \n N=0 \n return n,m,turns,s,i,D,alpha_, beta,phi, mu, omega, A, a, b, L, P, W1, W2, N\n\n \ndef generateSpiral(presets):\n \"\"\" Generates spiral points \"\"\"\n n,m,turns,s,i,D, alpha_, beta,phi, mu, omega,A, a, b, L, P, W1, W2,N = presets\n \n spiral_matrix=[]\n i= 0\n while i < n: \n print (\"i\", i) \n theta = float(map(i, 0, n, 0, turns))\n rad = float(exp(theta * cos(alpha_) / sin(alpha_)))\n \n x = float(A * rad * sin(beta) * cos(theta) * D)\n y = float( A * rad * sin(beta) * sin(theta))\n z = float(-A * rad * cos(beta))\n \n spiral_i = PVector(x,y,z)\n spiral_matrix.append(spiral_i) \n i = i + 1 #for some reason i += 1 does not work here. It may be a processing thing. \n return (spiral_matrix) \n \n# Point cloud rendering below \ncurrentFrame = 0\n\ndef setup():\n size(600,600,P3D) \n frameRate(24)\n\ndef draw():\n background(255,255,255)\n global currentFrame\n currentFrame = currentFrame + 1\n \n fov = float(PI/3)\n cameraZ = float((height/2.0) / tan(fov/2.0))\n perspective(fov, float(width)/float(height), cameraZ/10.0, cameraZ*10.0) \n \n translate(300,300)\n rotateY(currentFrame/24.0)\n \n stroke(255,0,0)\n strokeWeight(10)\n\n spiral_points= generateSpiral(BoatEarMoonPresets())\n\n for vertex in spiral_points: \n stroke(0,0,255)\n strokeWeight(10)\n x,y,z = vertex[0],vertex[1],vertex[2]\n point(x,y,z)\n","repo_name":"Deyspring/Seashell_Generator","sub_path":"sketch_shells/spiral/spiral.pyde","file_name":"spiral.pyde","file_ext":"pyde","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26404191453","text":"# Переставить min и max\r\ns = str(input())\r\nx = list(map(int, s.split()))\r\nm = max(x)\r\nn = min(x)\r\ni = x.index(m)\r\nj = x.index(n)\r\nx[i] = n\r\nx[j] = m\r\nfor i in x:\r\n print(i, end=' ')\r\n","repo_name":"min24/PythonCourseraHSE_duy171","sub_path":"week5_programming_exercise/5.13.py","file_name":"5.13.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"35003551097","text":"import datetime\nimport os\nimport netifaces\nimport socket\nimport subprocess\n\nfrom os import listdir\nfrom os.path import isfile, join\nfrom time import sleep\n\n\nclass Helper:\n\n def __init__(self, configuration):\n self.configuration = configuration\n self.default = self.configuration['default']\n\n def infos(self):\n infos = []\n infos.append('hostname ' + str(socket.gethostname()))\n infos.append('PID ' + str(os.getpid()))\n ifaces = self.interfaces_self()\n for iface in ifaces:\n infos.append('Interface' + str(iface))\n return infos\n\n def interfaces_first(self):\n ips = self.interfaces_self()\n # remove ipv6 from results\n for ip in ips:\n if ':' not in ip:\n return ip\n return '127.0.0.1'\n\n def interfaces_self(self):\n ifaces = []\n for interface in netifaces.interfaces():\n if interface != 'lo':\n if 2 in netifaces.ifaddresses(interface):\n _i = netifaces.ifaddresses(interface)\n _i = _i[2][0]['addr']\n if self.not_local(_i):\n ifaces.append(_i)\n if 17 in netifaces.ifaddresses(interface):\n _i = netifaces.ifaddresses(interface)\n _i = _i[17][0]['addr']\n if self.not_local(_i):\n ifaces.append(_i)\n if 18 in netifaces.ifaddresses(interface):\n _i = netifaces.ifaddresses(interface)\n _i = _i[18][0]['addr']\n if self.not_local(_i):\n ifaces.append(_i)\n return ifaces\n\n def file_delete(self, file_path):\n print(file_path)\n if os.path.exists(file_path):\n os.remove(file_path)\n return 'done'\n else:\n return 'not found'\n\n def files_in_path(self, file_path):\n files = [f for f in listdir(file_path) if isfile(join(file_path, f))]\n return files\n\n def folder_create_once(self, folder_path):\n try:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n return True\n except IOError as e:\n print('Error[Helper]' + str(e))\n return False\n\n def log_add_text(self, name, text):\n l_home, pre_text = self.log_home(name)\n text = self.now_str() + ': ' + pre_text + ' ' + str(text)\n with open(l_home, 'a') as outfile:\n outfile.write(text + '\\n')\n\n def log_home(self, name):\n _config = self.default\n _log_location = _config['log_location']\n _log_file = _config['log_file']\n _pre_text = name + ':'\n if name in self.configuration:\n _config = self.configuration[name]\n if 'log_location' in _config:\n _log_location = _config['log_location']\n if 'log_file' in _config:\n _log_file = _config['log_file']\n _pre_text = ''\n log_home_path = _log_location + '/' + _log_file\n self.folder_create_once(_log_location)\n return [log_home_path, _pre_text]\n\n def not_local(self, ip):\n if ip != '127.0.0.1':\n return True\n return False\n\n def now_str(self):\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n def shutdown(self, time):\n _down = int(time)\n print('fpvcar down in ' + str(_down))\n sleep(_down)\n print('os shudown in 10')\n try:\n subprocess.call(['sleep 10s;sudo shutdown -h now'], shell=True)\n return 'os going down'\n except Exception as e:\n return str(e)\n","repo_name":"hackffm/things_web","sub_path":"things_web/resources/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4338306931","text":"import numpy as np\nimport pandas as pd\nimport nltk\nimport pickle\nimport random\nimport json\nfrom sklearn.model_selection import train_test_split\nrandom.seed(420)\n\ndef load_dataset():\n \"\"\"\n Function to load ACL files\n \"\"\"\n print(\"Loading files Files\")\n #########################\n abstracts_train=[]\n with open('aan/split/train','r') as fil:\n line=fil.readline()\n while line:\n curr_abstract=[]\n with open('aan/txt_tokenized/{}'.format(line.rstrip()),'r') as fil2:\n line_curr=fil2.readline()\n while line_curr:\n curr_abstract.append(line_curr.rstrip())\n line_curr=fil2.readline()\n abstracts_train.append(curr_abstract)\n line=fil.readline()\n \n abstracts_train=np.array(abstracts_train)\n ###########################\n abstracts_test=[]\n with open('aan/split/test','r') as fil:\n line=fil.readline()\n while line:\n curr_abstract=[]\n with open('aan/txt_tokenized/{}'.format(line.rstrip()),'r') as fil2:\n line_curr=fil2.readline()\n while line_curr:\n curr_abstract.append(line_curr.rstrip())\n line_curr=fil2.readline()\n abstracts_test.append(curr_abstract)\n line=fil.readline()\n \n abstracts_test=np.array(abstracts_test)\n ############################\n abstracts_val=[]\n with open('aan/split/valid','r') as fil:\n line=fil.readline()\n while line:\n curr_abstract=[]\n with open('aan/txt_tokenized/{}'.format(line.rstrip()),'r') as fil2:\n line_curr=fil2.readline()\n while line_curr:\n curr_abstract.append(line_curr.rstrip())\n line_curr=fil2.readline()\n abstracts_val.append(curr_abstract)\n line=fil.readline()\n \n abstracts_val=np.array(abstracts_val)\n\n print(\"JSON's loaded of shape\")\n print(abstracts_train.shape)\n print(abstracts_test.shape)\n print(abstracts_val.shape)\n\n return abstracts_train,abstracts_test,abstracts_val\n\ndef process_dataset(stories):\n print(\"Processing dataset\")\n cnt=0\n new_stories=[]\n for story in stories:\n flag=True\n for sentence in story:\n words=nltk.TreebankWordTokenizer().tokenize(sentence)\n num_words=len(words)\n if(num_words>50):\n cnt+=1\n flag=False\n break\n if flag==True:\n new_stories.append(story)\n\n print(\"Removed {} stories\".format(cnt))\n return new_stories\n\n\"\"\"\nFor this code, need the AAN dataset, containing the sub-directories sentences, split and txt_tokenized\n\"\"\"\n\ndef main():\n abstracts_train,abstracts_test,abstracts_val=load_dataset()\n with open(\"acl_train.pkl\",\"wb\") as f:\n pickle.dump(abstracts_train,f)\n with open(\"acl_val.pkl\",\"wb\") as f:\n pickle.dump(abstracts_val,f)\n with open(\"acl_test.pkl\",\"wb\") as f:\n pickle.dump(abstracts_test,f)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"RuskinManku/PairwiseModels4SO","sub_path":"load_acl.py","file_name":"load_acl.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"12260117518","text":"#!/usr/bin/python\n\n\nimport mxnet as mx\nimport core.config as config\n\n\ndef generator_lenet5(noise, batch_size, img_channels, eps):\n '''\n generator of the GAN basing on deconvolution layers.\n The structure is:\n 1. fc layer of size 1024 + BN + ReLU\n 2. fc of size 8 x 8 x 128 + BN + ReLU\n 3. Resize into Image Tensor\n 4. transposed conv2 layer with 64 filters of 4x4, stride 2, pad 1 + BN + ReLU\n 5. transposed conv2 layer with 1 filters of 4x4, stride 2, pad 1 + BN + ReLU\n params:\n noise: a symbol standing for input random noise\n batch_size: the batch size of the generated examples\n eps: the eps assigned to mx.sym.BatchNorm()\n '''\n\n fix_gamma = True\n\n fc1 = mx.sym.FullyConnected(noise, num_hidden=1024, no_bias=False, flatten=True, name='gen_fc1')\n relu1 = mx.sym.Activation(fc1, act_type='relu', name='gen_relu1')\n bn1 = mx.sym.BatchNorm(relu1, fix_gamma=fix_gamma, eps=eps, name='gen_bn1')\n\n fc2 = mx.sym.FullyConnected(bn1, num_hidden=8*8*128, no_bias=False, name='gen_fc2')\n relu2 = mx.sym.Activation(fc2, act_type='relu', name='gen_relu2')\n bn2 = mx.sym.BatchNorm(relu2, fix_gamma=fix_gamma, eps=eps, name='gen_bn2')\n\n conv_input = bn2.reshape(shape=(batch_size,128,8,8), name='gen_reshape1')\n # 128x8x8\n trans_conv1 = mx.sym.Deconvolution(conv_input, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=64,name=\"gen_trans_conv1\")\n relu3 = mx.sym.Activation(trans_conv1, act_type='relu', name='gen_relu3')\n bn3 = mx.sym.BatchNorm(relu3, fix_gamma=fix_gamma, eps=eps, name='gen_bn3')\n # 64x16x16\n trans_conv2 = mx.sym.Deconvolution(bn3, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=32,name=\"gen_trans_conv2\")\n relu4 = mx.sym.Activation(trans_conv2, act_type='relu', name='gen_relu4')\n bn4 = mx.sym.BatchNorm(relu4, fix_gamma=fix_gamma, eps=eps, name='gen_bn4')\n # 32x32x32\n trans_conv3 = mx.sym.Deconvolution(bn4, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=img_channels,name=\"gen_trans_conv3\")\n tanh1 = mx.sym.Activation(trans_conv3, act_type='tanh', name='gen_tanh1')\n # ncx64x64\n\n out = tanh1\n\n return out\n\n\ndef generator_fcn(noise, batch_size, nc, eps):\n '''\n symbol noise has a shape of batch_sizexncx1x1\n '''\n fix_gamma = True\n # 1x1x100\n trans_conv1 = mx.sym.Deconvolution(noise, kernel=(4,4), stride=(1,1), pad=(0,0), num_filter=1024, name='gen_trans_conv1')\n bn1 = mx.sym.BatchNorm(trans_conv1, fix_gamma=fix_gamma, eps=eps, name='gen_bn1')\n relu1 = mx.sym.Activation(bn1, act_type='relu', name='gen_relu1')\n # 4x4x1024\n trans_conv2 = mx.sym.Deconvolution(relu1, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=512, name='gen_trans_conv2')\n bn2 = mx.sym.BatchNorm(trans_conv2, fix_gamma=fix_gamma, eps=eps, name='gen_bn2')\n relu2 = mx.sym.Activation(bn2, act_type='relu', name='gen_relu2')\n # 8x8x512\n trans_conv3 = mx.sym.Deconvolution(relu2, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=256, name='gen_trans_conv3')\n bn3 = mx.sym.BatchNorm(trans_conv3, fix_gamma=fix_gamma, eps=eps, name='gen_bn3')\n relu3 = mx.sym.Activation(bn3, act_type='relu', name='gen_relu3')\n # 16x16x256\n trans_conv4 = mx.sym.Deconvolution(relu3, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=128, name='gen_trans_conv4')\n bn4 = mx.sym.BatchNorm(trans_conv4, fix_gamma=fix_gamma, eps=eps, name='gen_bn4')\n relu4 = mx.sym.Activation(bn4, act_type='relu', name='gen_relu4')\n # 32x32x128\n trans_conv5 = mx.sym.Deconvolution(relu4, kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=nc, name='gen_trans_conv5')\n # 64x64xnc\n\n tanh = mx.sym.Activation(trans_conv5, act_type='tanh', name='gen_output')\n\n return tanh\n\n\n\n\n\n","repo_name":"CoinCheung/GAN-mxnet","sub_path":"symbol/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"33257300012","text":"\"\"\"\n\nThis input file again solves a 1D diffusion problem as in\n:mod:`examples.diffusion.mesh1D`.\nThe difference being that the mesh is two dimensional.\n\nThe result is again tested in the same way:\n\n >>> DiffusionTerm().solve(var)\n >>> Lx = nx * dx\n >>> x = mesh.cellCenters[0]\n >>> analyticalArray = valueLeft + (valueRight - valueLeft) * x / Lx\n >>> print(var.allclose(analyticalArray, rtol = 1e-9))\n 1\n\n\"\"\"\nfrom __future__ import unicode_literals\n\n__docformat__ = 'restructuredtext'\n\nfrom fipy import input\nfrom fipy import CellVariable, Grid2D, DiffusionTerm, Viewer\n\nnx = 50\nny = 50\n\ndx = 1.\n\nvalueLeft = 0.\nvalueRight = 1.\n\nmesh = Grid2D(dx = dx, nx = nx, ny = ny)\n\nvar = CellVariable(name = \"solution variable\",\n mesh = mesh,\n value = valueLeft)\n\nvar.constrain(valueLeft, mesh.facesLeft)\nvar.constrain(valueRight, mesh.facesRight)\n\nif __name__ == '__main__':\n DiffusionTerm().solve(var)\n\n viewer = Viewer(vars=var, datamin=0., datamax=1.)\n viewer.plot()\n input(\"finished\")\n\n","repo_name":"usnistgov/fipy","sub_path":"examples/diffusion/steadyState/mesh50x50/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":436,"dataset":"github-code","pt":"57"} +{"seq_id":"20362486353","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom ast import Bytes\nfrom mmap import PROT_WRITE\nimport rospy\nimport serial\nimport time\nimport sensor_msgs\nfrom sensor_msgs.msg import Imu\nfrom sensor_msgs.msg import MagneticField\nfrom scipy.spatial.transform import Rotation\nfrom std_msgs.msg import Header\n\nif __name__ == '__main__':\n SENSOR_NAME = \"imu_sensor\"\n pub1= rospy.Publisher(\"imu\",Imu,queue_size=10)\n pub2= rospy.Publisher(\"mag\",MagneticField,queue_size=10)\n rospy.init_node('gps_sensor')\n serial_port = rospy.get_param('~port','/dev/ttyUSB1')\n serial_baud = rospy.get_param('~baudrate',115200)\n sampling_rate = rospy.get_param('~sampling_rate',40.0)\n \n port = serial.Serial(serial_port, serial_baud, timeout=3.)\n rospy.logdebug(\"Using imu sensor on port \"+serial_port+\" at \"+str(serial_baud))\n rospy.logdebug(\"Initializing sensor with *0100P4\\\\r\\\\n ...\")\n sampling_count = int(round(1/(sampling_rate*0.007913)))\n rospy.sleep(0.2) \n port.write(b'$VNWRG,07,40*XX') \n #line = port.readline()\n \n rospy.logdebug(\"Initialization complete\")\n rospy.loginfo(\"Publishing IMU and Magnetometer data\")\n \n h1= Header()\n #h2=Header()\n msg_imu=Imu()\n msg_mag=MagneticField()\n \n i=1\n\n try:\n while not rospy.is_shutdown(): \n line = port.readline() #reading the serial port data\n if line == 'no data ': #if there is no data, display \"no data\"\n rospy.logwarn(\"VNYMR: No data\")\n else:\n if line.startswith(b'$VNYMR') : #parse the data from serial port only when it starts with $VNYMR\n print(line)\n s =line.split(b\",\") #split the string with delimiter \",\"\n yaw = s[1].decode('utf-8') #getting required valueS\n pitch = s[2].decode('utf-8')\n roll = s[3].decode('utf-8')\n print(\"Yaw:\"+yaw+\" Pitch:\"+pitch+\" Roll:\"+roll)\n magx = s[4].decode('utf-8')\n magy = s[5].decode('utf-8')\n magz = s[6].decode('utf-8')\n print(\"Magx:\"+magx+\" Magy:\"+magy+\" Magz:\"+magz)\n aclx = s[7].decode('utf-8')\n acly = s[8].decode('utf-8')\n aclz = s[9].decode('utf-8')\n print(\"Aclx:\"+aclx+\" Acly:\"+acly+\" Aclz:\"+aclz)\n gyrx = s[10].decode('utf-8')\n gyry = s[11].decode('utf-8')\n gyrz = s[12].decode('utf-8')\n gyrz = gyrz[:-5]\n print(\"Gyrx:\"+gyrx+\" Gyry:\"+gyry+\" Gyrz:\"+gyrz)\n rot = Rotation.from_euler('xyz', [float(roll), float(pitch), float(yaw)], degrees=True)\n quat=rot.as_quat()\n x=quat[0]\n y=quat[1]\n z=quat[2]\n w=quat[3]\n print(\"x:\"+str(x)+\" y:\"+str(y)+\" z:\"+str(z)+\" w:\"+str(w))\n\n #publish data to the message\n h1.seq=i\n \n h1.stamp=rospy.get_rostime()\n h1.stamp=rospy.get_rostime()\n h1.frame_id=\"IMU DATA\"\n h1.frame_id=\"MAG DATA\"\n msg_imu.header=h1\n msg_mag.header=h1\n msg_imu.orientation.x=float(x)\n msg_imu.orientation.y=float(y)\n msg_imu.orientation.z=float(z)\n msg_imu.orientation.w=float(w)\n msg_imu.angular_velocity.x=float(gyrx)\n msg_imu.angular_velocity.y=float(gyry)\n msg_imu.angular_velocity.z=float(gyrz)\n msg_imu.linear_acceleration.x=float(aclx)\n msg_imu.linear_acceleration.y=float(acly)\n msg_imu.linear_acceleration.z=float(aclz)\n msg_mag.magnetic_field.x=float(magx)\n msg_mag.magnetic_field.y=float(magy)\n msg_mag.magnetic_field.z=float(magz)\n pub1.publish(msg_imu)\n pub2.publish(msg_mag)\n i=i+1 #incrementing counter value for message header sequence\n except rospy.ROSInterruptException:\n port.close()\n \n\n\n\n","repo_name":"josejosepht/Sensor-fusion-for-Vehicle-Localization-with-IMU-and-Magnetometer","sub_path":"imu_mag_pkg/src/imu_script.py","file_name":"imu_script.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"41579387910","text":"from odoo.tests.common import TransactionCase\n\n\nclass TestProductPrintCategory(TransactionCase):\n \"\"\"Tests for 'Product Print Category' Module\"\"\"\n\n def setUp(self):\n super(TestProductPrintCategory, self).setUp()\n self.wizard_obj = self.env[\"product.print.wizard\"]\n # self.report_obj = self.env[\"report\"]\n self.custom_report_obj = self.env[\n \"report.product_print_category.report_pricetag\"]\n self.print_category = self.env.ref(\n \"product_print_category.demo_category\")\n\n # Test Section\n def test_01_test_wizard_obsolete(self):\n wizard = self.wizard_obj.with_context(\n active_model=\"product.print.category\",\n active_ids=[self.print_category.id], ).create({})\n self.assertEqual(\n len(wizard.line_ids),\n 1,\n 'Print obsolete product should propose 1 product'\n )\n\n def test_02_test_wizard_all(self):\n wizard = self.wizard_obj.with_context(\n active_model=\"product.print.category\",\n active_ids=[self.print_category.id], all_products=True, ).create({\n })\n self.assertEqual(\n len(wizard.line_ids),\n 2,\n \"Print all products should propose 5 products\"\n )\n data = wizard.print_report()\n self.env['report.product_print_category.report_pricetag'].\\\n _get_report_values(docids=None, data=data.get('data'))\n","repo_name":"paulRbr/FoodCoops","sub_path":"product_print_category/tests/test_product_print_category.py","file_name":"test_product_print_category.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"32926357385","text":"\"\"\"Terraria chat message tags can be used to display items in chat.\nThis is done with the format: [i:]\n\nThere exist statues in the game that depict alphanumeric characters.\nThis program conveniently converts plaintext into statue chat tags.\"\"\"\n\nimport sys\n\n# Big 0 Statue Item ID offset by ASCII 0\nCONST_BIG_0 = 2702 - ord('0')\n\n# Big A Statue Item ID offset by ASCII A\nCONST_BIG_A = 2712 - ord('A')\n\ndef formatTag(itemID):\n \"\"\"Return a valid item chat tag format given an ID\"\"\"\n return \"[i:{}]\".format(str(itemID))\n\ndef charToBig(c):\n \"\"\"Convert a char to the string code for an equivalent big letter statue\"\"\"\n if c.isdigit():\n return formatTag(ord(c) + CONST_BIG_0)\n if c.isalpha():\n return formatTag(ord(c) + CONST_BIG_A)\n return c\n\ndef strToBig(raw):\n \"\"\"Convert a string to the codes for equivalent big letter statues.\"\"\"\n return ''.join(map(charToBig, raw.upper()))\n\n# Main\nif len(sys.argv) > 1:\n print(strToBig(sys.argv[1]))\nelse:\n print(\"Enter plaintext:\")\n print(\"Translated:\\n\" + strToBig(input()))\n","repo_name":"AvocadosConstant/terraria-big-text","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13181655165","text":"from pdf_report_builder.structure.structural_elements.computed_types import ComputedTypes\nfrom pdf_report_builder.structure.structural_elements.base import StructuralElement\nfrom pdf_report_builder.structure.structural_elements.tome_contents import TomeContentsElement\nfrom pdf_report_builder.utils.file_watcher import FileWatcher\nfrom .base_factory import BaseFactory\nfrom .dict_processors import DictProcessor\nfrom .file_factory import FileFactory\n\nfrom settings import FILE_WATCHER\n\nBASE_VALID_ELEMENT_KEYS = [\n 'name',\n 'computed',\n 'code_attr',\n 'files',\n 'subelements',\n 'enumeration_include',\n 'enumeration_print',\n 'create_bookmark',\n 'expanded',\n 'code_add',\n 'inner_enumeration'\n]\n\n\nclass StructuralElementFactory(BaseFactory):\n\n @staticmethod\n def from_dict(d: dict):\n if not 'computed' in d:\n d['computed'] = 0\n element_creator = StructuralElementFactory()._get_element_creator(d['computed'])\n return element_creator(d)\n \n def _get_element_creator(self, type: int):\n if type == ComputedTypes.REGULAR.value:\n return self.create_regular_element\n if type == ComputedTypes.TOME_CONTENTS.value:\n return self.create_tome_contents_element\n \n def _regular_element_processor(self, d: dict):\n dict_processor = DictProcessor(d)\n dict_processor.parse_boolean('enumeration_include', True)\n dict_processor.parse_boolean('enumeration_print', False)\n dict_processor.parse_boolean('create_bookmark', True)\n dict_processor.parse_boolean('expanded', True)\n dict_processor.parse_boolean('code_add', False)\n dict_processor.parse_boolean('inner_enumeration', False)\n dict_processor.parse_levels_list('files', FileFactory.from_dict)\n if FILE_WATCHER:\n for file in dict_processor.d['files']:\n FileWatcher().add_file(file)\n dict_processor.parse_levels_list('subelements', StructuralElementFactory.from_dict)\n return dict_processor\n \n def _computed_element_processor(self, d: dict):\n dict_processor = self._regular_element_processor(d)\n dict_processor.parse_path('pdf_temp_path')\n return dict_processor\n\n def create_regular_element(self, d: dict):\n dict_processor = self._regular_element_processor(d)\n dict_processor.remove_invalid_keys(BASE_VALID_ELEMENT_KEYS)\n return StructuralElement(**dict_processor.d)\n \n def create_tome_contents_element(self, d: dict):\n dict_processor = self._computed_element_processor(d)\n dict_processor.parse_path('doc_template')\n dict_processor.remove_invalid_keys(\n BASE_VALID_ELEMENT_KEYS + ['pdf_temp_path', 'doc_template']\n )\n return TomeContentsElement(**dict_processor.d)\n","repo_name":"ginhelly/pdf-report-builder","sub_path":"pdf_report_builder/structure/factory/element_factory.py","file_name":"element_factory.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14706661894","text":"from flask_app.config.mysqlconnection import MySQLConnection, connectToMySQL\n\nclass Dojo:\n def __init__(self,data):\n self.id = data['id']\n self.name = data['name']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.ninjas = []\n\n @classmethod\n def get_all_dojos(cls):\n query = 'SELECT * FROM dojos;'\n results = connectToMySQL('dojos_and_ninjas_schema').query_db(query)\n\n dojos = []\n for dojo in results:\n dojos.append(cls(dojo))\n\n return dojos\n\n @classmethod\n def create_dojo(cls,data):\n query = 'INSERT INTO dojos (name) VALUES (%(name)s);'\n return connectToMySQL('dojos_and_ninjas_schema').query_db(query,data)\n \n @classmethod \n def get_dojo_by_id(cls,data):\n query = 'SELECT * FROM dojos LEFT JOIN ninjas ON dojos.id = ninjas.dojo_id WHERE dojos.id = %(id)s;'\n results = connectToMySQL('dojos_and_ninjas_schema').query_db(query,data)\n dojo = Dojo(results[0])\n for row in results:\n if row['ninjas.id'] != None:\n ninja_data = {\n 'id': row['ninjas.id'],\n 'first_name': row['first_name'],\n 'last_name': row['last_name'],\n 'age': row['age'],\n 'created_at': row['ninjas.created_at'],\n 'updated_at': row['ninjas.updated_at'],\n 'dojo_id' : row['dojo_id'],\n }\n new_ninja = Ninja(ninja_data)\n dojo.ninjas.append(new_ninja)\n return dojo\n\n\nclass Ninja:\n def __init__(self,data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.age = data['age']\n\n\n @classmethod\n def create_ninja(cls,data):\n query = 'INSERT INTO ninjas (first_name, last_name, age, dojo_id) VALUES (%(first_name)s, %(last_name)s, %(age)s, %(dojo_id)s);'\n return connectToMySQL('dojos_and_ninjas_schema').query_db(query,data)\n","repo_name":"AJamesonan/Coding_Dojo","sub_path":"July_Python/Dojos_and_ninjas/flask_app/models/dojo_and_ninja.py","file_name":"dojo_and_ninja.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39662015164","text":"#%%\nimport logging\n\nlogging.basicConfig(filename='covi19_dashboarder.log',\n level=logging.ERROR, \n format='%(asctime)s %(message)s')\nlogger = logging.getLogger(\"covi19_dashboarder\")\n\nclass Normalized_by_population_numbers_evolution():\n \"\"\"Displays data as:\n - line charts of infections and deaths evolution normalized by total population and elderly population\n - bar charts of increments of infections and deaths normalized by total population and elderly population\n \"\"\"\n def __init__(self, data):\n self.data_ = data\n self.population_data_ = None\n\n def get_population_data(self, multiselection):\n try:\n import pandas as pd \n demographic_data = pd.read_csv('https://raw.githubusercontent.com/GermanCM/Covid19_data_analyzer/master/external_data/demographic_population.csv', sep=',')\n population_countries_df = pd.DataFrame()\n desired_cols = ['Country', 'SEX', 'AGE', 'TIME' , 'Value']\n\n for country in multiselection:\n demographic_data_country=demographic_data[demographic_data.Country==country]\n most_recent_year=demographic_data_country.TIME.max()\n most_recent_data_mask=demographic_data_country.TIME==most_recent_year\n total_population_age_mask=demographic_data_country.AGE=='TOTAL' \n total_population_sex_mask=demographic_data_country.SEX=='T'\n\n country_demographic_data=demographic_data_country[(total_population_age_mask)&(most_recent_data_mask)&\\\n (total_population_sex_mask)]\n country_demographic_data.Value=country_demographic_data.Value.apply(int)\n\n population_countries_df = population_countries_df.append(country_demographic_data, ignore_index=True)\n \n self.population_data_ = population_countries_df[desired_cols]\n return population_countries_df[desired_cols]\n\n except Exception as exc:\n logger.exception('raised exception at {}: {}'.format(logger.name+'.'+ 'get_population_data', exc))\n\n def get_elderly_population_data(self, multiselection):\n try:\n import pandas as pd \n elderly_demographic_data = pd.read_csv('https://raw.githubusercontent.com/GermanCM/Covid19_data_analyzer/master/external_data/demographic_age_over_65.csv', sep=',')\n elderly_population_countries_df = pd.DataFrame()\n desired_columns = ['Variable', 'Measure', 'Country', 'Year', 'Value']\n\n for country in multiselection:\n elderly_demographic_data_country=elderly_demographic_data[elderly_demographic_data['Country']==country]\n most_recent_year=elderly_demographic_data_country['Year'].max()\n most_recent_data_mask=elderly_demographic_data_country['Year']==most_recent_year\n total_population_age_mask=elderly_demographic_data_country['Variable']=='Population: 80 years old and over' \n total_population_measure_mask=elderly_demographic_data_country['Measure']=='% of total population'\n\n elderly_country_demographic_data=elderly_demographic_data_country[(total_population_age_mask)&(most_recent_data_mask)&\\\n (total_population_measure_mask)]\n elderly_country_demographic_data.Value=elderly_country_demographic_data.Value.apply(int)\n\n elderly_population_countries_df = elderly_population_countries_df.append(elderly_country_demographic_data, ignore_index=True)\n \n self.elderly_population_data_ = elderly_population_countries_df[desired_columns]\n return elderly_population_countries_df[desired_columns]\n\n except Exception as exc:\n logger.exception('raised exception at {}: {}'.format(logger.name+'.'+ 'get_elderly_population_data', exc))\n\n def get_absolute_elderly_population_numbers(self, multiselection):\n try:\n import pandas as pd\n\n pop_data = self.get_population_data(multiselection)\n elderly_pop_data = self.get_elderly_population_data(multiselection)\n\n elderly_pop_data['Total_elderly_population_value'] = pd.Series()\n elderly_pop_data\n\n for country in elderly_pop_data.Country:\n country_total_pop_mask = pop_data.Country==country\n elderly_pop_data_mask = elderly_pop_data.Country==country\n desired_index = elderly_pop_data[elderly_pop_data_mask].index\n\n country_total_pop_value = pop_data[country_total_pop_mask].Value\n elderly_pop_data_percentage = elderly_pop_data[elderly_pop_data_mask].Value \n\n elderly_pop_data.loc[desired_index,'Total_elderly_population_value']=\\\n int(((elderly_pop_data_percentage*country_total_pop_value)/100))\n\n return elderly_pop_data\n\n except Exception as exc:\n logger.exception('raised exception at {}: {}'.format(logger.name+'.'+ 'get_absolute_elderly_population_numbers', exc))\n\n \n def return_normalized_lines_evolution_figure(self, selected_countries_data, multiselection):\n try:\n from plotly.subplots import make_subplots\n import plotly.graph_objects as go\n import numpy as np\n import pandas as pd \n\n population_countries_df = self.get_population_data(multiselection)\n \n fig = make_subplots(rows=2, cols=1, subplot_titles=(\"Confirmed cases normalized by population\", \"Deaths normalized by population\"))\n\n for country_i in population_countries_df.Country.values:\n country_population = population_countries_df[population_countries_df.Country==country_i]['Value']\n country_data = selected_countries_data[selected_countries_data.Country==country_i]\n \n country_data.Confirmed=country_data.Confirmed.apply(lambda x: 1000*(x/country_population)).round(2)\n country_data.Deaths=country_data.Deaths.apply(lambda x: 1000*(x/country_population)).round(2)\n\n fig.add_trace(\n go.Scatter(x=country_data.date, y=country_data.Confirmed, name=country_i, mode='lines+markers+text'),\n row=1, col=1\n )\n fig.add_trace(\n go.Scatter(x=country_data.date, y=country_data.Deaths, name=country_i, mode='lines+markers+text'),\n row=2, col=1\n )\n\n fig.update_layout(margin={\"r\":10,\"t\":60,\"l\":10,\"b\":10}, height=600, width=710, showlegend=False, paper_bgcolor=\"#EBF2EC\") \n return fig\n\n except Exception as exc:\n logger.exception('raised exception at {}: {}'.format(logger.name+'.'+ 'return_normalized_lines_evolution_figure', exc))\n\n\n def return_normalized_bars_increments_evolution_figure(self, selected_countries_data, multiselection):\n try:\n from plotly.subplots import make_subplots\n import plotly.graph_objects as go\n import numpy as np\n import pandas as pd \n\n population_countries_df = self.get_population_data(multiselection)\n\n fig = make_subplots(rows=2, cols=1, subplot_titles=(\"New confirmed infections normalized by population per day\", \n \"New deaths normalized by population per day\"))\n \n for country_i in multiselection:\n country_population = population_countries_df[population_countries_df.Country==country_i]['Value']\n country_data = selected_countries_data[selected_countries_data.Country==country_i]\n\n country_data.Confirmed=country_data.Confirmed.apply(lambda x: 10000*(x/country_population)).round(2)\n country_data.Deaths=country_data.Deaths.apply(lambda x: 10000*(x/country_population)).round(2)\n\n #confirmed new infections\n country_confirmed_shift = country_data['Confirmed'].shift(periods=1)\n country_data['Day_increment'] = country_data['Confirmed'] - country_confirmed_shift\n #confirmed new deaths\n country_deaths_shift = country_data['Deaths'].shift(periods=1)\n country_data['Day_deaths'] = country_data['Deaths'] - country_deaths_shift\n\n trace_country_cases = go.Bar(\n x = country_data['date'],\n y = country_data['Day_increment'],\n name=country_i\n )\n fig.add_trace(\n trace_country_cases,\n row=1, col=1\n )\n trace_country_deaths = go.Bar(\n x = country_data['date'],\n y = country_data['Day_deaths'],\n name=country_i\n )\n fig.add_trace(\n trace_country_deaths,\n row=2, col=1\n )\n\n fig.update_layout(margin={\"r\":10,\"t\":60,\"l\":10,\"b\":10}, height=600, width=710, showlegend=False, paper_bgcolor=\"#EBF2EC\") #legend=dict(x=-.18, y=1))\n return fig\n\n except Exception as exc:\n logger.exception('raised exception at {}: {}'.format(logger.name+'.'+ 'return_bars_increments_evolution_figure', exc))\n\n","repo_name":"GermanCM/Covid19_data_analyzer","sub_path":"page_numbers_normalized_by_population/normalized_numbers_by_population_evolution.py","file_name":"normalized_numbers_by_population_evolution.py","file_ext":"py","file_size_in_byte":9494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22515565115","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom typing import Optional, Union\n\nfrom pydantic import BaseModel\n\n\nclass KeysBase(BaseModel):\n user_id: Union[int, None]\n account_address: str\n\n\nclass KeysCreate(KeysBase):\n account_name: str\n private_key: str\n\n class Config:\n schema_extra = {\n \"example\": {\n \"account_address\": \"0x86a8478F78219421B0e4EC80F14a278ffdc0dA27\",\n \"account_name\": \"jacob\",\n \"private_key\": \"9/fall90\",\n }\n }\n\n\nclass KeysUpdate(KeysBase):\n id: int\n account_name: str\n\n\nclass KeysDelete(BaseModel):\n id: int\n user_id: Optional[int]\n\n\nclass KeysDeleteWithUser(KeysDelete):\n user_id: int\n","repo_name":"MrxZone/into_v2","sub_path":"app/schemas/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"73006774578","text":"import time\nimport sys\n\nprint(\"\"\"\n\n________ __________ __________ \n___ __ \\_________ __ ___ ____/___________________ _________ __ /__ /_____________\n__ / / / __ \\_ |/_/ __ /_ _ __ \\_ ___/_ __ `__ \\ __ `/ __/ __/ _ \\_ ___/\n_ /_/ // /_/ /_> < _ __/ / /_/ / / _ / / / / / /_/ // /_ / /_ / __/ / \n/_____/ \\____//_/|_| /_/ \\____//_/ /_/ /_/ /_/\\__,_/ \\__/ \\__/ \\___//_/ \n\n\n+================================================================================================+\n\"\"\")\n\ntime.sleep(2.5)\n\nprint(\"\"\"\n______ \n___ /______ __ \n__ __ \\_ / / / \n_ /_/ / /_/ / \n/_.___/_\\__, / \n /____/ \n_____ __ _____________ \n___ | / /_____ ______(_)__(_)\n__ |/ /_ __ `/____ /__ / \n_ /| / / /_/ /____ / _ / \n/_/ |_/ \\__,_/ ___ / /_/ \n /___/ \n\\n\n\"\"\")\n\n#First welcoming message explaining how the tool works\nprint(\"\"\"Welcome to Naji's command line Dox Formatter! Please enter down below which bit of informtation you'd like to record.\nIf you want to at any moment quit formatting and save your dox format, please press \"f\" at any input. If at any moment you want\nto exit to program, press \"x\".\n\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\n\\n\n\"\"\")\n\n#Functions of every single option to append to the dox. Asks an input for the value, appends names list to WriteName then\n#prints out how many names you have total and the number. Goes back to the main loop and asks again for what you want. This process repeats\n#for all functions.\ndef Name():\n\tWriteName = input(\"What names would you like to add?: \")\n\tif WriteName == 'x' or WriteName == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WriteName == 'f' or WriteName == 'F':\n\t\tPrintDox()\n\telse:\n\t\tnames.append(WriteName)\n\t\tprint('\\n')\n\t\tprint(names)\n\t\tprint(f\"You have {len(names)} name(s) so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\ndef Addr():\n\tWriteAddr = input(\"What addresses would you like to add?: \")\n\tif WriteAddr == 'x' or WriteAddr == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WriteAddr == 'f' or WriteAddr == 'F':\n\t\tPrintDox()\n\telse:\n\t\taddr.append(WriteAddr)\n\t\tprint('\\n')\n\t\tprint(addr)\n\t\tprint(f\"You have {len(addr)} name(s) so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\ndef Phone():\n\tWritePhone = input(\"What phone numbers would you like to add?: \")\n\tif WritePhone == 'x' or WritePhone == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WritePhone == 'f' or WritePhone == 'F':\n\t\tPrintDox()\n\telse:\n\t\tphone.append(WritePhone)\n\t\tprint('\\n')\n\t\tprint(phone)\n\t\tprint(f\"You have {len(phone)} phone number(s) so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\ndef Social():\n\tWriteSocial = input(\"What social media would you like to add?: \")\n\tif WriteSocial == 'x' or WriteSocial == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WriteSocial == 'f' or WriteSocial == 'F':\n\t\tPrintDox()\n\telse:\n\t\tsocial.append(WriteSocial)\n\t\tprint('\\n')\n\t\tprint(social)\n\t\tprint(f\"You have {len(social)} social media(s) so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\ndef Family():\n\tWriteFamily = input(\"What family/friends would you like to add?: \")\n\tif WriteFamily == 'x' or WriteFamily == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WriteFamily == 'f' or WriteFamily == 'F':\n\t\tPrintDox()\n\telse:\n\t\tfamily.append(WriteFamily)\n\t\tprint('\\n')\n\t\tprint(family)\n\t\tprint(f\"You have {len(family)} family/friend(s) so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\ndef Other():\n\tWriteOther = input(\"What other things would you like to add?: \")\n\tif WriteOther == 'x' or WriteOther == 'X':\n\t\tprint(\"Exiting...\")\n\t\tsys.exit()\n\telif WriteOther == 'f' or WriteOther == 'F':\n\t\tPrintDox()\n\telse:\n\t\tother.append(WriteOther)\n\t\tprint('\\n')\n\t\tprint(other)\n\t\tprint(f\"You have {len(other)} other things so far.\")\n\t\tprint('\\n')\n\t\tprint(\"\"\"\n1. Name(s)\n2. Address(s)\n3. Phone Number(s)\n4. Social Media(s)\n5. Family or Friend(s)\n6. Other\"\"\")\n\t\tprint(\"\\n\")\n\t\tLoop()\n\n#Lists of the dox\nnames = []\naddr = []\nphone = []\nsocial = []\nfamily = []\nother = []\n\n#Will print out the dox from Dox_Format to a file. Loops through Dox_Format and adds a comma and space between each piece of info.\ndef PrintDox():\n\twith open(\"DoxList.txt\", 'w') as file:\n\t\tfor row in Dox_Format:\n\t\t\ts = \", \".join(map(str, row))\n\t\t\tfile.write(s + \"\\n\")\n\n#Lists for Adding to Dox_Format (really bad coding i know)\ndivider = [\"+-----------------------------------------------------------------------+\"]\nname = [\"Names - \"]\naddresses = [\"Addresses - \"]\nphones = [\"Phone Numbers - \"]\nsocials = [\"Social Medias - \"]\nfamilies = [\"Family/Friends - \"]\nothers = [\"Others - \"]\nspace = [\"\\n\"]\n\n#Main Dox, gets all its variables from above and prints them. Decided to use tuples since they print out white space and looks better.\nDox_Format = (\ndivider,\nname,\nspace,\nnames,\ndivider,\naddresses,\nspace,\naddr,\ndivider,\nphones,\nspace,\nphone,\ndivider,\nsocials,\nspace,\nsocial,\ndivider,\nfamilies,\nspace,\nfamily,\ndivider,\nothers,\nspace,\nother,\ndivider,\nspace\n)\n\n#Main loop, asks for input, goes to the function or shows an error based off what you put for the variable option.\ndef Loop():\n\twhile True:\n\t\toption = input(\"What would you like to add?: \")\n\t\tif option == 'x' or option == 'X':\n\t\t\tprint(\"Exiting...\")\n\t\t\tsys.exit()\n\t\telif option == 'f' or option == 'F':\n\t\t\tPrintDox()\n\t\t\tbreak\n\t\telif option == '1':\n\t\t\tName()\n\t\t\tbreak\n\t\telif option == '2':\n\t\t\tAddr()\n\t\t\tbreak\n\t\telif option == '3':\n\t\t\tPhone()\n\t\t\tbreak\n\t\telif option == '4':\n\t\t\tSocial()\n\t\t\tbreak\n\t\telif option == '5':\n\t\t\tFamily()\n\t\t\tbreak\n\t\telif option == '6':\n\t\t\tOther()\n\t\t\tbreak\n\t\telif option > str(6):\n\t\t\tprint(\"Too high of a number entered, try again.\")\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(\"Something went wrong, please try again.\")\n\nLoop()\n","repo_name":"TheNaj/Dox-Formatter","sub_path":"Dox_Formatter.py","file_name":"Dox_Formatter.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42186563891","text":"'''\n--------------------------------------------------------------------------------\n\n\nThe process of learning, recognizing, and extracting these topics across a\ncollection of documents is called topic modeling.\n\nMethod 1. The Term Frequency – Inverse Document Frequency (TF-IDF)\nInvolves multiplying a local component like term frequency (TF) with a global\ncomponent, that is, inverse document frequency (IDF) and optionally\nnormalizing the result to unit length. As a result of this, the words that\noccur frequently across documents will get downweighted.\n\n\nMethod 2. Latent Dirichlet Allocation (LDA)\nLDA represents documents as mixtures of topics (a probabilistic topic model).\n\nE.g., If we have 3 topics, then some specific probability distributions we’d\nlikely see are:\nMixture X: 90% topic A, 5% topic B, 5% topic C\nMixture Y: 5% topic A, 90% topic B, 5% topic C\nMixture Z: 5% topic A, 5% topic B, 90% topic C\n\nLet's say companies face 4 different kinds of risk:\n market, liquidity, credit, operational\n\n\"If you view the number of topics as a number of clusters and the probabilities\nas the proportion of cluster membership, then using LDA is a way of soft-\nclustering your composites and parts. With the documents now mapped to a lower\ndimensional latent/hidden topic/category space, you can now apply other machine\nlearning algorithms which will benefit from the smaller number of dimensions.\nFor example, you could run your documents through LDA, and then hard-cluster\nthem using DBSCAN.\"\n\nWritten by Evie Wan, Nicholas Mosca, Eric South\n--------------------------------------------------------------------------------\n'''\nfrom gensim import corpora\nfrom gensim import models\nimport numpy as np\n\nimport html_parser\n\n\ndef tf_idf(corpus_of_text):\n \"\"\"\n Compare documents by Term Frequency–Inverse Document Frequency (TF-IDF).\n\n :param corpus_of_text: list of documents, where each document is a sublist\n of tokenized strings.\n :return weights: nested list containing words and their frequency weights.\n \"\"\"\n # Create dictionary (each words gets a unique ID)\n risk_dict = corpora.Dictionary(corpus_of_text)\n # print(risk_dict.token2id) # Display words and their unique IDs\n\n # Create a bag-of-words corpus\n risk_corpus = \\\n [risk_dict.doc2bow(doc, allow_update=True) for doc in corpus_of_text]\n # print(risk_corpus) # Print corpus represented as a dense array\n\n # Reference dictionary to make corpus human readable\n # word_counts = \\\n # [[(risk_dict[id], count) for id,\n # count in line] for line in risk_corpus]\n # print(word_counts) # Print corpus where IDs are replaced with the word\n\n # Save the dict and corpus to disk\n # risk_dict.save('risk_dict.dict')\n # risk_corpora.MmCorpus.serialize('bow_corpus.mm', bow_corpus)\n\n # Load them back\n # loaded_dict = corpora.Dictionary.load('risk_dict.dict')\n # risk_corpus = corpora.MmCorpus('bow_corpus.mm')\n # for line in corpus:\n # print(line)\n\n # Create the TF-IDF model\n tfidf = models.TfidfModel(risk_corpus, smartirs='ntc')\n\n # Show the TF-IDF weights\n for doc in tfidf[risk_corpus]:\n weights = [[risk_dict[id],\n np.around(freq, decimals=2)] for id, freq in doc]\n\n return weights\n\n\ndef lda(corpus_of_text):\n \"\"\"\n Compare documents by Latent Dirichlet Allocation (LDA).\n\n :param corpus_of_text: list of documents, where each document is a sublist\n of tokenized strings.\n :return model: set of words that are most associated with each topic.\n \"\"\"\n # Create a dictionary and corpus for the LDA model\n lda_dict = corpora.Dictionary(corpus_of_text)\n lda_corpus = [lda_dict.doc2bow(line) for line in corpus_of_text]\n\n # Train the model\n lda_model = models.LdaMulticore(corpus=lda_corpus,\n id2word=lda_dict,\n random_state=100,\n num_topics=4,\n passes=10,\n chunksize=1000,\n batch=False,\n alpha='asymmetric',\n decay=0.5,\n offset=64,\n eta=None,\n eval_every=0,\n iterations=100,\n gamma_threshold=0.001,\n per_word_topics=True)\n\n # Save the model\n # lda_model.save('lda_model.model')\n\n return lda_model.print_topics(-1) # See the topics\n\n\ndef main():\n all_text = html_parser.main() # Import a nested list of tokenized words\n tf_idf_weights = tf_idf(all_text) # Run TF-IDF model and return weights\n topic_mixtures = lda(all_text)\n\n print(tf_idf_weights)\n print(topic_mixtures)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BU-Spark/CS506Spring2021Repository","sub_path":"10_K_Risk_Evaluator/topic_models.py","file_name":"topic_models.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"57"} +{"seq_id":"70110240179","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport socket\n\n\nsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\nsock.connect(\"test.sock\")\n\nwhile True:\n data = input(\"请输入您需要发送的数据:\")\n data = data + \"\\n\" #\\n 是给 go 服务端读的\n sock.send(data.encode(\"utf-8\"))\n receive = sock.recv(1024) #多读一些数据,不影响的\n print(\"py : \",receive.decode(\"utf-8\"))\n\nsock.close()\n","repo_name":"elissa2333/library","sub_path":"echoClientUnix2.py","file_name":"echoClientUnix2.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"37778374945","text":"from asyncio import create_task\nfrom asyncio import gather\nfrom uuid import UUID\n\nfrom fastapi import APIRouter\nfrom fastapi import Body\n\nfrom . import handlers\nfrom .. import common\nfrom .. import exceptions\nfrom .. import lora\nfrom .. import mapping\nfrom .. import util\n\nrouter = APIRouter()\n\n\nclass RelatedUnitRequestHandler(handlers.OrgFunkRequestHandler):\n \"\"\"This is a dummy handler that exists to enable reading related units.\n\n Eventually, we'll do that in the handlers, but for now we use\n their _existence_ to allow reading.\n\n \"\"\"\n\n role_type = \"related_unit\"\n function_key = mapping.RELATED_UNIT_KEY\n\n def prepare_create(self, req: dict):\n raise NotImplementedError\n\n def prepare_edit(self, req: dict):\n raise NotImplementedError\n\n\n@router.post(\"/ou/{origin}/map\")\nasync def map_org_units(origin: UUID, req: dict = Body(...)):\n \"\"\"Mark the given organisational units as related.\n\n .. :quickref: Unit; Map\n\n Please note that this defines the related/mapped units for the\n given unit from the given timestamp and onwards. Any other\n preexisting mappings are terminated.\n\n :statuscode 200: The operation succeeded.\n :statuscode 404: No such unit found.\n :statuscode 409: Validation failed, see below.\n\n :param origin: The UUID of the organisational unit.\n\n :json Array added: The UUIDs of added function relations.\n :>json Array removed: The UUIDs of removed function relations.\n :>json Array unchanged: The UUIDs of function relations not modified.\n\n **Example Request**:\n\n .. sourcecode:: json\n\n {\n \"destination\": [\n \"04c78fc2-72d2-4d02-b55f-807af19eac48\",\n \"469d655b-6d61-446a-90f0-989448f08654\"\n ],\n \"validity\": {\n \"from\": \"2017-03-01\"\n }\n }\n\n **Example Response**:\n\n .. sourcecode:: json\n\n {\n \"added\": [\n \"9ec1eab4-abcc-432c-899d-86dce18d4fa2\"\n ],\n \"deleted\": [\n \"daa77a4d-6500-483d-b099-2c2eb7fa7a76\"\n ],\n \"unchanged\": [\n \"9d07123e-47ac-4a9a-88c8-da82e3a4bc9e\"\n ]\n }\n\n **Validation**:\n\n All of the given organisation units must exist at the given date,\n and they must not be terminated.\n\n \"\"\"\n origin = str(origin)\n\n date = util.get_valid_from(req)\n c = lora.Connector(effective_date=date)\n destinations = set(util.checked_get(req, \"destination\", [], required=True))\n if origin in destinations:\n exceptions.ErrorCodes.E_RELATED_TO_SELF(\n origin=origin,\n destinations=sorted(destinations),\n )\n\n wanted_units = {origin} | destinations\n units = dict(await c.organisationenhed.get_all_by_uuid(uuids=sorted(wanted_units)))\n\n if len(units) != len(wanted_units):\n exceptions.ErrorCodes.E_ORG_UNIT_NOT_FOUND(\n org_unit_uuid=sorted(wanted_units - units.keys()),\n )\n\n good = {\n unitid\n for unitid, unit in units.items()\n for state in util.get_states(unit)\n if util.get_effect_to(state) == util.POSITIVE_INFINITY\n and state[\"gyldighed\"] == \"Aktiv\"\n }\n\n if wanted_units - good:\n exceptions.ErrorCodes.V_DATE_OUTSIDE_ORG_UNIT_RANGE(\n org_unit_uuid=sorted(wanted_units - good),\n )\n\n (orgid,) = mapping.BELONGS_TO_FIELD.get_uuids(units[origin])\n\n preexisting = {\n unitid: funcid\n for funcid, func in await c.organisationfunktion.get_all(\n funktionsnavn=mapping.RELATED_UNIT_KEY,\n tilknyttedeenheder=origin,\n gyldighed=\"Aktiv\",\n )\n for unitid in mapping.ASSOCIATED_ORG_UNITS_FIELD.get_uuids(func)\n if unitid != origin\n }\n\n edits = {\n funcid: common.inactivate_org_funktion_payload(\n date,\n \"Fjern relateret organisation\",\n )\n for unitid, funcid in preexisting.items()\n if unitid not in destinations\n }\n\n creations = [\n common.create_organisationsfunktion_payload(\n mapping.RELATED_UNIT_KEY,\n date,\n util.POSITIVE_INFINITY,\n \"{} <-> {}\".format(\n mapping.ORG_UNIT_EGENSKABER_FIELD(units[origin])[0][\n \"brugervendtnoegle\"\n ],\n mapping.ORG_UNIT_EGENSKABER_FIELD(units[destid])[0][\n \"brugervendtnoegle\"\n ],\n ),\n tilknyttedebrugere=[],\n tilknyttedeorganisationer=[orgid],\n tilknyttedeenheder=[origin, destid],\n )\n for destid in destinations\n if destid not in preexisting\n ]\n\n return {\n \"deleted\": sorted(\n await gather(\n *[\n create_task(c.organisationfunktion.update(req, funcid))\n for funcid, req in edits.items()\n ]\n )\n ),\n \"added\": sorted(\n await gather(\n *[create_task(c.organisationfunktion.create(req)) for req in creations]\n )\n ),\n \"unchanged\": sorted(destinations & preexisting.keys()),\n }\n","repo_name":"OS2mo/os2mo","sub_path":"backend/mora/service/related.py","file_name":"related.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"42845482076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : FG-NIC\n# @Author : Xiaoyu LIN\n# @File : util.py\n# @Description : This file contains helper funcitons for both training and testing phases.\n\nfrom typing import Union, Optional\nfrom torch.nn import Module\nimport argparse\nimport os\n\ndef str2bool(v: str) -> bool:\n \"\"\" Convert string to boolean.\n Args:\n v (string): string\n Return:\n (boolean): True or False\n \"\"\"\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n \ndef prepare_parser() -> argparse.ArgumentParser:\n \"\"\" Parser for all scripts.\n Returns:\n args (argparse.ArgumentParser): Arguments read from commond line.\n \"\"\"\n \n parser = argparse.ArgumentParser()\n \n # classification network\n parser.add_argument('--classification', default=None, type=str, help='Name of classification network') \n parser.add_argument('--num_classes', default=256 + 1, type=int, help='Number of classes in the dataset')\n parser.add_argument('--dataset', default='caltech256', type=str, help='Name of dataset')\n \n # training\n parser.add_argument('--task', default='classification', type=str, help='Name of task') # ['classification']\n parser.add_argument('--batch_size', default=128, type=int, help='Batch size') # 128 for classification, 256 for restoration\n parser.add_argument('--input_size', default=224, type=int, help='Size of input images')\n parser.add_argument('--num_epochs', default=120, type=int, help='Number of training epoches') # 120 for classification, 60 for restoration and proposed model\n parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') \n parser.add_argument('--warmup', default=5, type=int, help='Number of epochs for batch-step warmup') \n parser.add_argument('--smoothing', default=0.1, type=float, help='Epsilon for label smoothing') \n \n # testing\n parser.add_argument('--num_round', default=10, type=int, help='Number of round to rerun the experiments') \n parser.add_argument('--is_ensemble', default=True, type=str2bool, help='Flag for ensemble or single model')\n \n # degradaion\n parser.add_argument('--degradation', default='clean', type=str, help='Degradation type')\n parser.add_argument('--level', default=0.0, nargs=\"+\", help='Max degradaion level or fixed degradation level')\n parser.add_argument('--level_min', default=None, type=float, help='Min degradation level for spatially_varying')\n parser.add_argument('--vary', default='uniform', type=str, help='Degradation level change style') \n \n # restoration\n parser.add_argument('--restoration', default=None, type=str, help='Name of restoration network')\n parser.add_argument('--patch_size', default=50, type=int, help='Size of input patch')\n parser.add_argument('--stride', default=25, type=int, help='Stride to take image patch')\n \n # fidelity map\n parser.add_argument('--fidelity_input', default=None, type=str, help='Fidelity map input')\n parser.add_argument('--fidelity_output', default='l1', type=str, help='Fidelity map output')\n \n # our model\n parser.add_argument('--mode', default=None, type=str, help='Modes of proposed method')\n parser.add_argument('--downsample', default='bilinear', type=str, help='Downsample method')\n parser.add_argument('--increase', default=0.5, type=float)\n parser.add_argument('--num_channel', default=16, type=int)\n parser.add_argument('--ablation', default=None, type=str)\n \n # system\n parser.add_argument('--dev', default='3,2,1,0', type=str) # Number of GPU to use\n parser.add_argument('--save_dir', default=None, type=str) # Name of model\n \n # global variable\n parser.add_argument('--DATA_DIR', default=\"./datasets\", type=str) \n parser.add_argument('--CLASSIFIER_DIR', default=\"./classification\", type=str) \n parser.add_argument('--RESTORATION_DIR', default=\"./restorations\", type=str) \n parser.add_argument('--FIDELITY_DIR', default=\"./fidelity\", type=str) \n parser.add_argument('--RESULT_DIR', default=\"./result\", type=str) \n parser.add_argument('--MODEL_DIR', default=\"./saved_model\", type=str) \n parser.add_argument('--MEAN', default=[0.485, 0.456, 0.406], type=list) \n parser.add_argument('--STD', default=[0.229, 0.224, 0.225], type=list)\n parser.add_argument('--SEED', default=0, type=int)\n \n return parser.parse_args()\n\n\ndef get_level(level_1: Union[float, list],\n level_2: Optional[Union[float, int, str]] = None,\n ) -> Union[float, tuple, list]:\n \"\"\" Get degradation level from command line.\n Args:\n level_1 (Union[float, list]): Command line argument '--level'\n level_2 (Optional[Union[float, int]]): Command line argument '--level_min'\n Returns:\n level (Union[float, tuple, list]): float for fixed uniform degradation level,\n tuple for saptially varying degradtion level, list for mixture of degradation level.\n \"\"\"\n if not isinstance(level_1, list):\n return level_1\n else:\n level_1 = [float(i) for i in level_1]\n \n level_1 = [i/255.0 if i>1 else i for i in level_1]\n level_2 = level_2/255.0 if level_2 and level_2 > 1 else level_2\n \n if len(level_1) == 1:\n return level_1[0] if level_2 is None else (level_1[0], level_2)\n else:\n return level_1\n\n\ndef set_cwd(args: argparse.ArgumentParser,\n phase: str = 'train') -> None:\n \"\"\" Set up current working directory.\n Args:\n args (argparse.ArgumentParser): Arguments read from command-line.\n phase (str): Test or train phase.\n \"\"\"\n if 'classification' in args.task.lower():\n PATH = args.classification + '-' + args.degradation if phase == 'train' else args.classification\n DIR = args.CLASSIFIER_DIR\n PATH = PATH + '-' + args.restoration if args.restoration is not None and phase == 'train' else PATH \n elif 'restoration' in args.task.lower():\n PATH = args.restoration.lower()\n DIR = args.RESTORATION_DIR\n elif 'fidelity' in args.task.lower():\n PATH = '-'.join([args.fidelity_input, args.fidelity_output, args.restoration])\n DIR = args.FIDELITY_DIR\n elif 'model' in args.task.lower():\n PATH = '-'.join([args.classification, args.restoration, args.mode, args.fidelity_output])\n if args.fidelity_input is not None:\n PATH += '-' + args.fidelity_input\n DIR = args.MODEL_DIR\n elif 'deepcorrect' in args.task.lower():\n PATH = args.classification\n DIR = './baseline/DeepCorrect'\n elif 'wavecnet' in args.task.lower():\n PATH = args.classification\n DIR = './baseline/WaveCNet'\n \n PATH = '-'.join([args.dataset.lower(), PATH])\n if args.save_dir is not None:\n PATH += '-' + args.save_dir\n CWD= os.path.expanduser(os.path.join(DIR, PATH)) \n if not os.path.isdir(CWD):\n os.makedirs(CWD)\n os.chdir(CWD)\n \n\ndef set_parameter_requires_grad(model: Module, \n requires_grad: bool = False\n ) -> None:\n \"\"\" Setup feature extract or fine tuning.\n Args:\n model (Module): model to be trained\n requires_grad (bool):\n \"\"\"\n for param in model.parameters():\n param.requires_grad = requires_grad\n \n \ndef prepare_ablation(ablation: str) -> None:\n \n if 'spatialmultiplication' in ablation.lower():\n from ablation import SpatialMultiplication as Model\n elif 'residualmechanism' in ablation.lower():\n from ablation import ResidualMechanism as Model\n elif 'spatialaddition' in ablation.lower():\n from ablation import SpatialAddition as Model\n elif 'channelmultiplication' in ablation.lower():\n from ablation import ChannelMultiplication as Model\n elif 'channelconcatenation' in ablation.lower():\n from ablation import ChannelConcatenation as Model\n else:\n raise ValueError('Invalid ablation method.')\n \n return Model","repo_name":"IVRL/FG-NIC","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"5979333423","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nfrom keras.models import Sequential\nfrom tensorflow.keras import optimizers\nimport itertools\nimport numpy as np\nimport cma\nfrom sklearn.model_selection import KFold\nfrom keras.models import Sequential\nimport zipfile\nimport pandas as pd\nfrom tensorflow.keras.utils import to_categorical\nimport csv\nimport os.path\nimport shutil\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\n\n\n\nfrom keras.layers import Dense\nfrom DEN import DENlayer\nfrom DMN import DMNlayer\nfrom DSN import DSNlayer\n\nfrom PreTrain.kmeans import bkmeans\nfrom PreTrain.kmeans import ekmeans\nfrom PreTrain.kmeans import skmeans\n\ndef write_data(File, DataSet,Acc, Std, Modelo, Neuronas, Activacion, Sigma, Xpopsize,Kfold, Scaler):\n # csv header\n fieldnames = ['DataSet', 'Acc', 'Std', 'Modelo', 'Neuronas','Activacion','Sigma','Xpopsize','Kfold','Scaler']\n\n\n # csv data\n rows = [\n {'DataSet': DataSet,\n 'Acc': Acc,\n 'Std': Std,\n 'Modelo': Modelo,\n 'Neuronas': Neuronas,\n 'Activacion': Activacion,\n 'Sigma': Sigma,\n 'Xpopsize': Xpopsize,\n 'Kfold': Kfold,\n 'Scaler':Scaler\n }\n ]\n\n flag=True\n if os.path.exists(File) == False:\n flag = False\n with open(File, 'a', encoding='UTF8', newline='') as f:\n if flag == False:\n print(fieldnames)\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(rows) \n else:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n #writer.writeheader()\n writer.writerows(rows) \n\ndef get_data(Folder_dir, File_name, len_test=0.1):\n zip_ref = zipfile.ZipFile(Folder_dir+\"/\"+File_name+\".zip\", 'r')\n zip_ref.extractall(\"./\")\n zip_ref.close()\n data = pd.read_csv(File_name+\".csv\")\n #data = data.drop(labels='Unnamed: 32',axis=1)\n from sklearn import preprocessing \n label = preprocessing.LabelEncoder() \n \n\n X = data.drop(['Class'], axis=1)\n if 'id' in X.columns:\n X = X.drop(['id'],axis=1)\n print(X.head())\n label = preprocessing.LabelEncoder() \n data['Class'] = label.fit_transform(data['Class'])\n Y = data['Class']\n X = X.values\n \n if len(Y.unique())>2:\n Y = to_categorical(Y, dtype =\"uint8\")\n if len_test == 0:\n x_train = X\n y_train = Y\n return x_train,y_train\n \n else:\n x_train = X[:int(1-(len(Y)*len_test))]\n y_train = Y[:int(1-(len(Y)*len_test))]\n x_test = X[int(1-(len(Y)*len_test)):]\n y_test = Y[int(1-(len(Y)*len_test)):]\n return x_train,y_train,x_test,y_test\n \n \n \n\n \n \n\n\ndef model_init(x,y,layers = ['DMN', 'DMN'], neurons=[2,2,1], activations=['tanh','sigmoid'], dendrites=[]):\n\n model = Sequential()\n nlayer = 1\n print(\"layers\", layers)\n for layer in layers:\n if layer == 'P':\n if nlayer == 1:\n model.add(Dense(neurons[1], activation = activations[0], input_shape = (neurons[0],) ) )\n else:\n model.add(Dense(neurons[2], activation = activations[1]))\n\n if layer == 'DMN':\n if nlayer == 1:\n model.add(DMNlayer(neurons[1], dendrites, activation = activations[0], input_shape = (neurons[0],)))\n else:\n model.add(DMNlayer(neurons[2], activation = activations[1]))\n\n if layer == 'DEN':\n if nlayer == 1:\n model.add(DENlayer(neurons[1], dendrites, activation = activations[0], input_shape = (neurons[0],)))\n else:\n model.add(DENlayer(neurons[2], activation = activations[1]))\n \n if layer == 'DSN':\n if nlayer == 1:\n model.add(DSNlayer(neurons[1], dendrites, activation = activations[0], input_shape = (neurons[0],)))\n else:\n model.add(DSNlayer(neurons[2], activation = activations[1])) \n nlayer = nlayer+1\n \n adam = optimizers.Adam(learning_rate=0.01)\n if activations[1] == 'softmax':\n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n \n if activations[1] == 'sigmoid':\n model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n \n \n model.summary() \n return model\ndef get_theta0(weights):\n theta0 = []\n \n if isinstance(weights, (np.ndarray, list) ):\n \n try:\n weights =list(itertools.chain(*weights))\n weights = get_theta0(weights,deep)\n \n except:\n \n try:\n weights =list(itertools.chain(*weights))\n \n for t in weights:\n if isinstance(t, (np.ndarray) ):\n for t_aux in t:\n theta0.append(t_aux)\n if isinstance( t, (np.floating, float) ):\n theta0.append(t)\n except:\n for t in weights:\n if isinstance(t, (np.ndarray) ):\n for t_aux in t:\n if isinstance(t_aux, (np.ndarray) ):\n for t_aux_2 in t_aux:\n theta0.append(t_aux_2)\n else:\n theta0.append(t_aux)\n if isinstance( t, (np.floating, float) ):\n theta0.append(t)\n weights = theta0\n return weights\n \n return weights\n\ndef set_params(theta, neurons, model):\n layer = 0\n model_type = []\n for layer_type in model.layers[::]:\n \n if isinstance(layer_type, Dense):\n if layer == 0:\n \n l1 = theta[:neurons[0]*neurons[1] + neurons[1]]\n l2 = theta[neurons[0]*neurons[1] + neurons[1]:]\n w1_1 = l1[:neurons[0]*neurons[1]].reshape(neurons[0],neurons[1])\n w1_2 = l1[neurons[0]*neurons[1]:].reshape(neurons[1],)\n \n if layer == 1:\n \n w2_1 = l2[:neurons[1]*neurons[2]].reshape(neurons[1],neurons[2])\n w2_2 = l2[neurons[1]*neurons[2]:].reshape(neurons[2],)\n \n if isinstance(layer_type, DMNlayer):\n if layer == 0:\n l1 = theta[:neurons[0] * neurons[1] * 2]\n l2 = theta[neurons[0] * neurons[1] * 2:]\n w1_1 = l1[:neurons[0]*neurons[1]].reshape(neurons[1],neurons[0])\n w1_2 = l1[neurons[0]*neurons[1]:].reshape(neurons[1], neurons[0])\n \n if layer == 1:\n w2_1 = l2[:neurons[1]*neurons[2]].reshape(neurons[2],neurons[1])\n w2_2 = l2[neurons[1]*neurons[2]:].reshape(neurons[2],neurons[1])\n \n \n if isinstance(layer_type, DENlayer):\n if layer == 0:\n l1 = theta[:neurons[0] * neurons[1] + (neurons[0]**2)*neurons[1] ]\n l2 = theta[neurons[0] * neurons[1] + (neurons[0]**2)*neurons[1]:]\n w1_1 = l1[:neurons[0] * neurons[1]].reshape(neurons[1],1,neurons[0])\n w1_2 = l1[neurons[0] * neurons[1]:].reshape(neurons[1],neurons[0],neurons[0])\n if layer == 1:\n w2_1 = l2[:neurons[1]*neurons[2]].reshape(neurons[2],1,neurons[1])\n w2_2 = l2[neurons[1]*neurons[2]:].reshape(neurons[2],neurons[1],neurons[1])\n \n if isinstance(layer_type, DSNlayer):\n if layer == 0:\n l1 = theta[:neurons[0] * neurons[1] +neurons[1] ]\n l2 = theta[neurons[1] * neurons[0] + neurons[1]:]\n w1_1 = l1[:neurons[1] * neurons[0]].reshape(neurons[1],1,neurons[0])\n w1_2 = l1[neurons[1] * neurons[0]:].reshape(neurons[1],1)\n \n if layer == 1:\n w2_1 = l2[:neurons[1]*neurons[2]].reshape(neurons[2],1,neurons[1])\n w2_2 = l2[neurons[1]*neurons[2]:].reshape(neurons[2],1)\n \n layer = 1\n \n \n theta = [w1_1,w1_2,w2_1,w2_2]\n model.set_weights(theta)\n \n \n \n\ndef train_acc(theta, neurons, xtrain, ytrain, model):\n set_params(theta, neurons, model)\n _, acc = model.evaluate(xtrain,ytrain, verbose=False)\n return -acc\n\ndef val_acc(theta, neurons, xval, yval, model):\n set_params(theta, neurons, model)\n _, val_acc = model.evaluate(xval,yval, verbose=False)\n return -val_acc\n\ndef train_loss(theta, neurons, xtrain, ytrain, model):\n set_params(theta, neurons, model)\n loss, acc = model.evaluate(xtrain,ytrain, verbose=False)\n #print(\"loss:\",loss)\n return loss\n\ndef val_loss(theta, neurons, xval, yval, model):\n set_params(theta, neurons, model)\n loss, val_acc = model.evaluate(xval,yval, verbose=False)\n return loss\n\n\ndef cma_training(model, neurons, theta_0, sigma , x_train, y_label_train, x_test, y_label_test,file, cma_dict={} ):\n pop_size = 4 + int(3*np.log(neurons[0]))\n seed = None\n tolfun = 1e-11\n max_iter = 100 + 150*(neurons[0]+3)**2 // (pop_size*100)**0.5\n cmean = 0.5\n mu = int(pop_size/2)\n mu_eff = (pop_size/4.0) #pop_size/4\n rank_one = 2/neurons[0]**2\n rank_mu = min(mu_eff/neurons[0]**2 ,1-rank_one)\n popsize_factor = 1.5\n \n dict_cma = {'popsize': pop_size, # 'popsize': '4+int(3*np.log(n)) # population size, AKA lambda, number of\n #new solution per iteration' n=space dimension\n 'seed': None, # 'seed': 'time # random number seed for `numpy.random`;\n #`None` and `0` equate to `time`, `np.nan` means \"do nothing\", see also option \"randn\"',\n 'tolfun': 1e-11, # 'tolfun': '1e-11 #v termination criterion: tolerance in function value, quite useful'\n 'maxiter': max_iter, # 'maxiter': '100 + 150 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',\n 'CMA_cmean': cmean, # 'CMA_cmean':< '1 # learning rate for the mean value', ver ecuacion 9 del tutorial\n 'CMA_rankmu': rank_mu, # 'CMA_rankmu': '1.0 # multiplier for rank-mu update learning rate of covariance matrix'\n #ver ecuacion 30 del tutorial\n # For cμ = 1, no prior information is retained.\n #For cμ = 0, no learning takes place, c μ ≈ min(1, μeff/n2 ) is a reasonably choice.\n 'CMA_rankone': rank_one, # 'CMA_rankone': '1.0 # multiplier for rank-one update learning rate of covariance matrix',\n 'CMA_mu': mu, # 'CMA_mu': 'None # parents selection parameter, default is popsize // 2',\n #ver ec. 9 del tutorial \n \n \n }\n if len(cma_dict)>0:\n for item,val in cma_dict.items() :\n dict_cma[item] = val\n \n \n \n es = cma.CMAEvolutionStrategy(theta_0, sigma, dict_cma ) # fijas la media y la std inicial\n fbest = 1000\n acc_best_v = 0\n tacc = 0\n vacc = 0 \n i = 0\n print(dict_cma['maxiter'])\n while not es.stop():\n #while i acc_best_v:\n acc_best_v = vacc\n thetabest = es.result.xbest\n print(\"*******\",tacc,vacc)\n np.save(file, thetabest)\n \n \n es.logger.add() # write data to disc to be plotted\n es.disp() \n if vacc == 1:\n break\n \n i += 1\n\n \n return es\n\ndef main(model_name, neurons, activations ,kfold,cma_dict, sigma ,x,y, scaler):\n \n # Define per-fold score containers\n acc_per_fold = []\n loss_per_fold = []\n best_score = 0\n \n # Define the K-fold Cross Validator\n kfold = KFold(n_splits=kfold, shuffle=True)\n\n fold_no = 1\n \n\n file = \"thetabest\"+model_name[0]+model_name[1]\n global_theta_best = \"global_theta_best\"+model_name[0]+model_name[1]\n\n \n\n\n for train, test in kfold.split(x, y):\n # Generate a print\n print('------------------------------------------------------------------------')\n print(f'Training for fold {fold_no} ...')\n \n \n \n \n \n # fit scaler on training data\n norm = MinMaxScaler((-scaler,scaler)).fit(x[train])\n x[train] = norm.transform(x[train])\n x[test] = norm.transform(x[test])\n norm = StandardScaler(with_std=False).fit(x[train])\n # transform training data\n x[train] = norm.transform(x[train])\n std = np.std(x[train])\n mean = np.mean(x[train])\n print(\"std\",np.std(x[train]))\n print(\"mean\",np.mean(x[train]))\n \n\n # transform testing dataabs\n x[test] = norm.transform(x[test])\n \n \n #(x,y,layers = ['DMN', 'DMN'], neurons=[2,2,1], activations=['tanh','sigmoid'], dendrites=[]):\n #dendrites = bkmeans.bkmeans(x[train],y[train],[int(neurons[1])],0.01)\n \n model = model_init(x[train], y[train], model_name, neurons, activations)\n weights = model.get_weights()\n #print(\"-->\",weights)\n #weights = np.array(weights)\n theta_0 = get_theta0(weights)\n\n \n #def cma_training(model, neurons, theta_0, sigma , x_train, y_label_train, x_test, y_label_test,file, cma_dict={} ):\n es = cma_training(model, neurons, theta_0, std , x[train], y[train], x[test], y[test],file, cma_dict)\n\n print('termination:', es.stop())\n # Generate generalization metrics\n thetabest = np.load(file+\".npy\")\n scores = -val_acc(thetabest,neurons,x[test], y[test], model)\n if best_score < scores:\n best_score = scores\n shutil.copy(file+\".npy\", global_theta_best+\".npy\")\n \n print(f'Score for fold {fold_no}: {model.metrics_names[1]} of {scores*100}%')\n acc_per_fold.append(scores * 100)\n \n shutil.copy(file+\".npy\", file+\"_fold\"+str(fold_no)+\".npy\")\n \n\n # Increase fold number\n fold_no = fold_no + 1\n\n # == Provide average scores ==\n print('------------------------------------------------------------------------')\n print('Score per fold')\n for i in range(0, len(acc_per_fold)):\n print('------------------------------------------------------------------------')\n print(f'> Fold {i+1} - Accuracy: {acc_per_fold[i]}%')\n print('------------------------------------------------------------------------')\n print('Average scores for all folds:')\n print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')\n #print(f'> Loss: {np.mean(loss_per_fold)}')\n print('------------------------------------------------------------------------')\n #File = 'CMA-ES.csv', DataSet,Acc, Std, Modelo, Neuronas, Activacion, Sigma, Xpopsize,kfold\n Data=[np.mean(acc_per_fold),np.std(acc_per_fold) ,model_name[0]+model_name[1],std]\n return model, Data \n","repo_name":"RRomanGodinez/DNN-trained-by-CMA-ES","sub_path":"src/cma_functions.py","file_name":"cma_functions.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10591408358","text":"import logging\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom typing import Optional, List\n\nfrom .API import WasteData\n\nfrom homeassistant.const import CONF_RESOURCES\nfrom homeassistant.components.calendar import CalendarEntity, CalendarEvent\nfrom homeassistant.core import HomeAssistant\n\nfrom .const import DOMAIN, CONF_ID\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef setup_platform(hass, config, async_add_entities, discovery_info=None):\n\n if discovery_info and \"config\" in discovery_info:\n conf = discovery_info[\"config\"]\n else:\n conf = config\n\n if not conf:\n return\n\n async_add_entities([AfvalbeheerCalendar(hass.data[DOMAIN][conf[CONF_ID]], conf)])\n\n\nclass AfvalbeheerCalendar(CalendarEntity):\n \"\"\"Defines a Afvalbeheer calendar.\"\"\"\n\n _attr_icon = \"mdi:delete-empty\"\n\n def __init__(\n self,\n WasteData: WasteData,\n config,\n ) -> None:\n \"\"\"Initialize the Afvalbeheer entity.\"\"\"\n self.WasteData = WasteData\n self.config = config\n\n self._attr_name = f\"{DOMAIN.capitalize()} {WasteData.waste_collector}\"\n self._attr_unique_id = f\"{DOMAIN}_{config[CONF_ID]}\"\n\n self._event = None\n\n @property\n def event(self) -> Optional[CalendarEvent]:\n \"\"\"Return the next upcoming event.\"\"\"\n if len(self.WasteData.collections) > 0:\n waste_item = self.WasteData.collections.get_sorted()[0]\n return CalendarEvent(\n summary=waste_item.waste_type,\n start=waste_item.date.date(),\n end=(waste_item.date + timedelta(days=1)).date(),\n )\n\n async def async_get_events(\n self, hass: HomeAssistant, start_date: datetime, end_date: datetime\n ) -> List[CalendarEvent]:\n \"\"\"Return calendar events within a datetime range.\"\"\"\n events: List[CalendarEvent] = []\n for waste_items in self.WasteData.collections:\n if start_date.date() <= waste_items.date.date() <= end_date.date():\n # Summary below will define the name of event in calendar\n if waste_items.waste_type in self.config[CONF_RESOURCES]:\n events.append(\n CalendarEvent(\n summary=waste_items.waste_type,\n start=waste_items.date.date(),\n end=waste_items.date.date() + timedelta(days=1),\n )\n )\n\n return events\n","repo_name":"pippyn/Home-Assistant-Sensor-Afvalbeheer","sub_path":"custom_components/afvalbeheer/calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":227,"dataset":"github-code","pt":"57"} +{"seq_id":"74395653937","text":"from aiohttp import web\nfrom aiohttp.web_response import ContentCoding\nfrom functools import wraps\n\nCOMPRESS_FASTEST = 1\nBASE_STRING_SIZE = 49\nMTU_TCP_PACKET_SIZE = 1500\nCOMPRESS_THRESHOLD = MTU_TCP_PACKET_SIZE + BASE_STRING_SIZE\n\n\n# @json_response decorator for class methods\ndef json_response(func):\n \"\"\" @json_response decorator adds header and dumps response object \"\"\"\n\n @wraps(func)\n async def wrapper(self, request, *args, **kwargs):\n res = await func(self, request, *args, **kwargs)\n response = web.json_response(data=res)\n if response.content_length > COMPRESS_THRESHOLD:\n response.enable_compression(force=ContentCoding.gzip)\n return response\n\n return wrapper\n","repo_name":"CityOfZion/neo-python","sub_path":"neo/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"57"} +{"seq_id":"20932424790","text":"from django.urls import path\n\nfrom .views import (\n ProductDetailView,\n CategoryDetailView,\n BaseView,\n RegisterUser,\n logout_user,\n LoginUser,\n)\n\nurlpatterns = [\n path('', BaseView.as_view(), name='index'),\n path('products///', ProductDetailView.as_view(), name='product_detail'),\n path('category//', CategoryDetailView.as_view(), name='category_detail'),\n path('login/', LoginUser.as_view(), name='login'),\n path('logout/', logout_user, name='logout'),\n path('register/', RegisterUser.as_view(), name='register'),\n]\n\n","repo_name":"Biiskii/shop","sub_path":"shop/digits/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39202320994","text":"import argparse\r\nimport os\r\nimport csv\r\n\r\nparser = argparse.ArgumentParser(description=\"convert TSV file to CSV\")\r\nparser.add_argument('-r', '--reverse', help=\"reverse conversion\", action='count', required=False, default=False)\r\nparser.add_argument('file', help=\"file to convert\")\r\nargs = parser.parse_args()\r\n\r\nf = args.file\r\nif f.endswith('.tsv') or f.endswith('.csv'):\r\n f = f[:-4]\r\n\r\nif not args.reverse:\r\n with open(f+'.tsv','r') as fin:\r\n with open(f+'.csv','w') as fout:\r\n csvout = csv.write(fout)\r\n for l in fin.readlines():\r\n csvout.writerow(l.strip().split('\\t'))\r\n os.remove(f+'.tsv')\r\nelse:\r\n with open(f+'.csv','r') as fin:\r\n csvin = csv.reader(fin)\r\n with open(f+'.tsv','w') as fout:\r\n for l in list(csvin):\r\n fout.write('\\t'.join(l)+'\\n')\r\n os.remove(f+'.csv')\r\n","repo_name":"omerktz/TraFix","sub_path":"utils/tsv2csv.py","file_name":"tsv2csv.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"57"} +{"seq_id":"21010561091","text":"from flask import Flask, Response, request\nimport requests\nimport json\nimport mysql.connector\n\napp = Flask(__name__)\napp.config[\"JSON_SORT_KEYS\"] = False\n\nconnection = mysql.connector.connect(\n host='127.0.0.1',\n port=3306,\n database='eu_flight_game',\n user='root',\n password='root'\n)\n\n\n@app.route('/get_weather')\ndef get_weather():\n try:\n args = request.args\n icao = str(args.get(\"icao\"))\n sql = f\"SELECT municipality FROM eu_airports WHERE ident = \\\"{icao}\\\";\"\n cursor = connection.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n\n ex12_api_key = \"873df6e3e029ba2d8538f1482461817f\"\n location_name = result[0][0]\n request2 = \"https://api.openweathermap.org/data/2.5/weather?q=\" + location_name + \"&appid=\" + ex12_api_key + \"&units=metric\"\n\n response2 = requests.get(request2)\n json_response2 = response2.json()\n return f\"{location_name} {json_response2['weather'][0]['description']} {json_response2['main']['temp']}\"\n\n except ValueError:\n response = {\n \"message\": \"Invalid request\",\n \"status\": 400\n }\n json_response = json.dumps(response)\n http_response = Response(response=json_response, status=400, mimetype=\"application/json\")\n return http_response\n\n\n@app.errorhandler(404)\ndef page_not_found(error_code):\n response = {\n \"message\": \"Invalid endpoint\",\n \"status\": 404\n }\n json_response = json.dumps(response)\n http_response = Response(response=json_response, status=404, mimetype=\"application/json\")\n return http_response\n\n\nif __name__ == '__main__':\n app.run(use_reloader=True, host='127.0.0.1', port=5000)\n\n\n\n\n","repo_name":"Aki78/AirportHeistGroup3MetropoliaProject","sub_path":"Python/FlaskAPIs/Weather.py","file_name":"Weather.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"5269907732","text":"from collections import deque\nfrom heapq import heappush, heappop\nN, M = map(int, input().split())\nopener = []\ncan1 = []\ncan2 = []\nfor _ in range(N):\n T, X = map(int, input().split())\n if T==0: can1.append(X)\n elif T==2: opener.append(X)\n else: can2.append(X)\n\ncan1.sort(reverse=True)\ncan2.sort(reverse=True)\nopener.sort(reverse=True)\n\ncan1 = deque(can1)\ncan2 = deque(can2)\nans = 0\nhold = []\nhap = 0\nfor i in range(M):\n if can1:\n heappush(hold, can1[0])\n hap += can1.popleft()\n else:\n heappush(hold, 0)\nans = hap \nfor c in opener:\n if len(hold)==0: break\n hap -= heappop(hold)\n for i in range(c):\n if len(can2)==0 or len(hold)==0: break\n n = heappop(hold)\n if can2[0]>n:\n heappush(hold, can2[0])\n hap += can2.popleft()\n hap -= n\n ans = max(ans, hap)\nprint(ans)","repo_name":"neko0774/competitive","sub_path":"atcoder/abc/abc312/f/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43806116053","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom .views import AssignViewSets, PsihoTestViewSets, AnswerTestViewSets\n\nrouter = routers.DefaultRouter()\nrouter.register('assigned', AssignViewSets)\nrouter.register('psiho_test', PsihoTestViewSets)\n# router.register('answer_test', AnswerTestViewSets)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth', include('rest_framework.urls', namespace='rest_framework'))\n] ","repo_name":"Sorinace/Final-project-Django--2021--Scoala-Informala","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"14355318172","text":"#Дан список размера N. Найти номера тех элементов списка, которые больше своего\n#левого соседа, и количество таких элементов. Найденные номера выводить в\n#порядке их убывания.\n\nimport random #имортируется библиотека рандом\n\nn = random.randrange(2,11) # генирируется рандомное число от 2 до 11\nprint('n = ', n) # вывод числа n\n\na = [random.randrange(1,11) for i in range(n)] # список в котором генерируются значения от 1 до 11, всего чисел n\nprint(a) # вывод списка\n\n\nc = 0 # счетчик\nfor i in range(len(a)-1,0,-1): # цикл который начинается от len(a)-1 до 0 с шагом -1\n if a[i] > a[i-1]: # условие что a по индексу i больше a[i-1]\n c += 1\n print(i,end='; ') # вывод числа + ;\nprint('Count:',c) # Вывод последнего знач","repo_name":"xercesm/Proj_1sem_Belaya","sub_path":"PZ_6/PZ_6_2.py","file_name":"PZ_6_2.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29410344328","text":"\"\"\"\nCombine documents and question in a prompt and send it to an LLM to get the answer. \n\"\"\"\nimport openai\nfrom promptflow import tool\nfrom promptflow.connections import AzureOpenAIConnection\nfrom typing import Generator\nimport os\n\n@tool\ndef rag(\n system_prompt: str,\n chat_history: list[str],\n query: str,\n azure_open_ai_connection: AzureOpenAIConnection,\n deployment_name: str\n) -> Generator[str, None, None]:\n \"\"\"\n Ask the LLM to answer the user's question given the chat history and context.\n \"\"\"\n openai.api_type = azure_open_ai_connection.api_type\n openai.api_base = azure_open_ai_connection.api_base\n openai.api_version = azure_open_ai_connection.api_version\n openai.api_key = azure_open_ai_connection.api_key\n\n messages = [{\"role\": \"system\", \"content\": system_prompt}]\n for item in chat_history:\n messages.append({\"role\": \"user\", \"content\": item[\"inputs\"][\"question\"]})\n messages.append({\"role\": \"assistant\", \"content\": item[\"outputs\"][\"answer\"]})\n messages.append({\"role\": \"user\", \"content\": query})\n\n chat_completion = openai.ChatCompletion.create(\n deployment_id=deployment_name,\n messages=messages,\n temperature=0,\n max_tokens=1024,\n n=1,\n stream=True\n )\n\n for chunk in chat_completion:\n if chunk[\"object\"] == \"chat.completion.chunk\":\n if \"content\" in chunk[\"choices\"][0][\"delta\"]:\n yield chunk[\"choices\"][0][\"delta\"][\"content\"]\n\n","repo_name":"bstollnitz/rag-promptflow","sub_path":"src/rag_flow_n_tools/rag.py","file_name":"rag.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"33314599568","text":"__author__ = 'Michele Johnson'\r\n\r\n\"\"\" Write a new version of the DriveCar project you wrote in Problem 1. \r\nCreate a new Python project DriveCarNew and copy the files from the old project. \r\nMake the following changes to class Car:\r\n\r\n(a)\tChange all instance variables to private\r\n(b)\tWrite getter method for speed.\r\n(c)\tDefine a __str__ method to display the car’s make, model and speed.\r\n\r\nCar\r\n -make: String\r\n -model: String\r\n -speed: Integer\r\n +create(car_make: String)\r\n +accelerate(): Integer\r\n +decelerate(): Integer\r\n +getSpeed(): Integer\r\n +entry_choice(): Integer \"\"\"\r\n\r\nfrom Lab14_OOP2_Problem1_MealOrderTemp.valid_entry import integer_entry\r\n\r\n\r\nclass Car:\r\n\r\n \"\"\" Creates three private instance variables to store the car’s make, model, and speed \"\"\"\r\n\r\n def __init__(self, car_make, car_model):\r\n\r\n \"\"\" constructor of class Car \"\"\"\r\n\r\n self.__make = car_make\r\n self.__model = car_model\r\n self.__speed = 0\r\n\r\n def accelerate(self):\r\n\r\n \"\"\" increases speed by 5 \"\"\"\r\n\r\n self.__speed += 5\r\n\r\n def decelerate(self):\r\n\r\n \"\"\" decreases speed by 5\"\"\"\r\n\r\n self.__speed -= 5\r\n if self.__speed < 0:\r\n self.__speed = 0\r\n\r\n def get_speed(self):\r\n\r\n \"\"\" gets and returns speed\"\"\"\r\n return self.__speed\r\n\r\n def __str__(self):\r\n\r\n \"\"\" converts class to string \"\"\"\r\n return '\\nMake:\\t\\t\\t' + self.__make + '\\nModel:\\t\\t\\t' + self.__model + '\\nFinal speed:\\t' + str(\r\n self.__speed)\r\n\r\n def entry_choice(self):\r\n\r\n \"\"\" gets valid menu choice \"\"\"\r\n\r\n a_choice = 0\r\n is_valid = False\r\n while is_valid is False:\r\n print(\"Menu:\\n\"\r\n \"\\t 1 to Accelerate\\n\"\r\n \"\\t 2 to Decelerate\\n\"\r\n \"\\t 3 to Exit\")\r\n request_text = \"Enter selection\"\r\n a_choice = integer_entry(request_text)\r\n\r\n if a_choice < 1 or a_choice > 3:\r\n print(\"\\nNot a valid selection.\\n\"\r\n \"Please try again.\\n\")\r\n else:\r\n is_valid = True\r\n\r\n return a_choice\r\n","repo_name":"mischelay2001/WTCSC121","sub_path":"CSC121Lab13_OOP/CSC121Lab13Problem3_OOP_DriveNewCar/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"9784088320","text":"# std lib\n\n# my lib\n\n# third part lib\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\n\n# constant\nfrom ProjectML.general_util.constant import SEED\n\n\ndef ensemble_random_forest(X_train, y_train):\n clf = RandomForestClassifier(n_estimators=500, criterion='gini', max_features=5, n_jobs=-1, class_weight='balanced',\n random_state=SEED)\n # Fit 'rf' to the training set\n clf.fit(X_train, y_train)\n return clf\n\n\ndef svm_classifier(X_train, y_train):\n clf = svm.SVC(C=100.0, kernel='rbf', class_weight='balanced', max_iter=-1, random_state=SEED).fit(X_train, y_train)\n return clf\n","repo_name":"rtaiello/rispeva-supervised-ml-predictions","sub_path":"ProjectML/proceduralSuccess/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29138213172","text":"#!/usr/bin/env python\n\"\"\"API Authorization Manager.\"\"\"\n\nimport io\nimport logging\n\nfrom typing import Iterable, Text, Type\n\nimport yaml\n\nfrom grr_response_core import config\nfrom grr_response_core.lib.util import precondition\nfrom grr_response_server.authorization import auth_manager\nfrom grr_response_server.gui import api_call_router\nfrom grr_response_server.gui import api_call_router_registry\n\n\nclass Error(Exception):\n \"\"\"Base class for auth manager exception.\"\"\"\n\n\nclass InvalidAPIAuthorization(Error):\n \"\"\"Used when an invalid authorization is defined.\"\"\"\n\n\nclass ApiCallRouterNotFoundError(Error):\n \"\"\"Used when a router with a given name can't be found.\"\"\"\n\n\nclass ApiCallRouterDoesNotExpectParameters(Error):\n \"\"\"Raised when params are passed to a router that doesn't expect them.\"\"\"\n\n\nclass APIAuthorization(object):\n \"\"\"Authorization for users/groups to use an API handler.\"\"\"\n\n def __init__(self):\n self.router_cls = None\n self.users = []\n self.groups = []\n self.router_params = {}\n\n @staticmethod\n def ParseYAMLAuthorizationsList(yaml_data):\n \"\"\"Parses YAML data into a list of APIAuthorization objects.\"\"\"\n try:\n raw_list = list(yaml.safe_load_all(yaml_data))\n except (ValueError, yaml.YAMLError) as e:\n raise InvalidAPIAuthorization(\"Invalid YAML: %s\" % e)\n\n result = []\n for auth_src in raw_list:\n auth = APIAuthorization()\n auth.router_cls = _GetRouterClass(auth_src[\"router\"])\n auth.users = auth_src.get(\"users\", [])\n auth.groups = auth_src.get(\"groups\", [])\n auth.router_params = auth_src.get(\"router_params\", {})\n\n result.append(auth)\n\n return result\n\n\nclass APIAuthorizationManager(object):\n \"\"\"Manages loading API authorizations and enforcing them.\"\"\"\n\n def _CreateRouter(self, router_cls, params=None):\n \"\"\"Creates a router with a given name and params.\"\"\"\n if not router_cls.params_type and params:\n raise ApiCallRouterDoesNotExpectParameters(\"%s is not configurable\" %\n router_cls)\n\n rdf_params = None\n if router_cls.params_type:\n rdf_params = router_cls.params_type()\n if params:\n rdf_params.FromDict(params)\n\n return router_cls(params=rdf_params)\n\n def __init__(self, acl_list: Iterable[APIAuthorization],\n default_router_cls: Type[api_call_router.ApiCallRouter]):\n \"\"\"Initializes the manager by reading the config file.\"\"\"\n precondition.AssertIterableType(acl_list, APIAuthorization)\n\n self.routers = []\n self.auth_manager = auth_manager.AuthorizationManager()\n\n self.default_router = self._CreateRouter(default_router_cls)\n\n for index, acl in enumerate(acl_list):\n router = self._CreateRouter(acl.router_cls, params=acl.router_params)\n self.routers.append(router)\n\n router_id = str(index)\n self.auth_manager.DenyAll(router_id)\n\n for group in acl.groups:\n self.auth_manager.AuthorizeGroup(group, router_id)\n\n for user in acl.users:\n self.auth_manager.AuthorizeUser(user, router_id)\n\n @staticmethod\n def FromYaml(\n source: Text, default_router_cls: Type[api_call_router.ApiCallRouter]\n ) -> \"APIAuthorizationManager\":\n precondition.AssertType(source, Text)\n\n acl_list = APIAuthorization.ParseYAMLAuthorizationsList(source)\n return APIAuthorizationManager(acl_list, default_router_cls)\n\n def GetRouterForUser(self, username):\n \"\"\"Returns a router corresponding to a given username.\"\"\"\n\n for index, router in enumerate(self.routers):\n router_id = str(index)\n\n if self.auth_manager.CheckPermissions(username, router_id):\n logging.debug(\"Matched router %s to user %s\", router.__class__.__name__,\n username)\n return router\n\n logging.debug(\n \"No router ACL rule match for user %s. Using default \"\n \"router %s\", username, self.default_router.__class__.__name__)\n return self.default_router\n\n\n# Set in InitializeApiAuthManager\nAPI_AUTH_MGR = None\n\n\ndef InitializeApiAuthManager(default_router_cls=None):\n \"\"\"Init hook that initializes API auth manager.\"\"\"\n global API_AUTH_MGR\n\n if not default_router_cls:\n default_router_name = config.CONFIG[\"API.DefaultRouter\"]\n default_router_cls = _GetRouterClass(default_router_name)\n\n filepath = config.CONFIG[\"API.RouterACLConfigFile\"]\n if filepath:\n logging.info(\"Using API router ACL file: %s\", filepath)\n with io.open(filepath, \"r\") as filedesc:\n API_AUTH_MGR = APIAuthorizationManager.FromYaml(filedesc.read(),\n default_router_cls)\n else:\n API_AUTH_MGR = APIAuthorizationManager([], default_router_cls)\n\n\ndef _GetRouterClass(router_name: Text) -> Type[api_call_router.ApiCallRouter]:\n try:\n return api_call_router_registry.GetRouterClass(router_name)\n except KeyError:\n message = \"Router '{}' does not exist\".format(router_name)\n raise ApiCallRouterNotFoundError(message)\n","repo_name":"google/grr","sub_path":"grr/server/grr_response_server/gui/api_auth_manager.py","file_name":"api_auth_manager.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":4531,"dataset":"github-code","pt":"57"} +{"seq_id":"37847058644","text":"import cv2\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\n\nOUTP_DIR = '/home/fangsh/tianchi/tianchi_dataset/crop_data/crop_bbox'\nROOT_DIR = '/home/fangsh/tianchi/tianchi_dataset/draw_outp/drow_new'\nXML_ROOT = '/home/fangsh/tianchi/tianchi_dataset/data_megred/train'\n\n\ndef estimate_cur_img_with_bbox(a1,b1,bboxes):\n '''\n\n :param a1: image height range\n :param b1: image width range\n :param bboxes: bboxes\n :return: save flag\n '''\n save_flag = False\n for box in bboxes:\n a2 = list(range(box[1],box[3]))\n b2 = list(range(box[0],box[2]))\n tmp1 = [var for var in a1 if var in a2]\n tmp2 = [var for var in b1 if var in b2]\n if tmp1 and tmp2:\n save_flag = True\n break\n return save_flag\n\ndef main():\n\n cls_info = []\n file_dir = []\n\n\n for root, sub, files in os.walk(ROOT_DIR):\n cls_info.extend(sub)\n for file in files:\n file_dir.append(os.path.join(root, file))\n #print(cls_info)\n\n image_dir = []\n for i in file_dir:\n #print(i.split('/')[-1][-4:])\n if i.split('/')[-1][-4:] == '.jpg':\n image_dir.append(i)\n\n for idx,cur_img_dir in enumerate(image_dir):\n sys.stdout.write('\\r >> Convert to %s %d/%d' % (i, idx + 1, len(image_dir)))\n\n cur_cls = cur_img_dir.split('/')[-2]\n\n if cur_cls == '正常':\n cur_name = cur_img_dir.split('/')[-1][0:-4]\n image = cv2.imread(cur_img_dir)\n for i in range(5):\n for j in range(5):\n crop_img = image[j*384:(j+1)*384,i*512:(i+1)*512]\n cur_outp_dir = os.path.join(OUTP_DIR,cur_cls,cur_name)\n crop_img_name = cur_name+'_%d_%d.jpg'%(i,j)\n if not os.path.exists(cur_outp_dir):\n os.makedirs(cur_outp_dir)\n\n cv2.imwrite(os.path.join(cur_outp_dir,crop_img_name),crop_img)\n else:\n cur_name = cur_img_dir.split('/')[-1][0:-4]\n cls = cur_img_dir.split('/')[-2]\n image = cv2.imread(cur_img_dir)\n\n xml_dir = os.path.join(XML_ROOT,cls,cur_name+'.xml')\n\n tree = ET.parse(xml_dir)\n root = tree.getroot()\n bboxes = []\n\n for obj in root.findall('object'):\n\n bbox = obj.find('bndbox')\n bboxes.append((int(bbox[0].text),\n int(bbox[1].text),\n int(bbox[2].text),\n int(bbox[3].text)))\n for i in range(5):\n for j in range(5):\n crop_img = image[j*384:(j+1)*384,i*512:(i+1)*512]\n cur_outp_dir = os.path.join(OUTP_DIR,cur_cls,cur_name)\n crop_img_name = cur_name+'_%d_%d.jpg'%(i,j)\n if not os.path.exists(cur_outp_dir):\n os.makedirs(cur_outp_dir)\n\n a1 = list(range(j*384,(j+1)*384))\n b1 = list(range(i*512,(i+1)*512))\n save_flag = estimate_cur_img_with_bbox(a1,b1,bboxes)\n if save_flag:\n cv2.imwrite(os.path.join(cur_outp_dir,crop_img_name),crop_img)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"Fan9/tianchi_competition","sub_path":"scripts/crop_image.py","file_name":"crop_image.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"8950115561","text":"# encoding: utf-8\n\n\"\"\"Segmetación por búsqueda de contornos \"\"\"\n\nfrom __future__ import division\n\nimport cv2\nimport numpy as np\n# from itertools import izip\nfrom vi.util import generar_umbrales, hallar_contornos\n\n\nclass Segmentacion(object):\n \"\"\"Segmentación con filtro por área media \"\"\"\n\n def __init__(self):\n \"\"\"Agrega el procesador \"\"\"\n super(Segmentacion, self).__init__()\n self._procesadores = []\n\n def ejecutar(self, baldosas):\n \"\"\"Recibe una lista de baldosas para ser pre-procesadas y mejorar\n la lectura de los caracteres. Retorna la lista de baldosas procesadas.\n \"\"\"\n return [self._procesar(baldosa) for baldosa in baldosas]\n\n def _procesar(self, baldosa):\n \"\"\"Aplica los pre-procesadores a la baldosa. \"\"\"\n procesada = baldosa\n for procesador in self._procesadores:\n procesada = procesador(procesada)\n return procesada\n\n @staticmethod\n def _filtro_area(baldosa):\n \"\"\"En base al promedio, retira elementos que podrían no ser letras\n\n Returns:\n baldosa_gris, filtrados: Baldosa en escala de grises y contornos\n filtrados por su área.\n \"\"\"\n gris = cv2.cvtColor(baldosa, cv2.COLOR_BGR2GRAY)\n umbrales = generar_umbrales(baldosa, 13, 6, 31, 4)\n contornos = hallar_contornos(umbrales,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_NONE)\n filtrados = []\n if len(contornos) > 5:\n areas = [cv2.contourArea(c) for c in contornos]\n d_areas = np.array(areas)\n mu = d_areas.mean()\n sigma = d_areas.std()\n for contorno, area in zip(contornos, areas):\n if mu - sigma < area < mu - 0.09*sigma:\n filtrados.append(contorno)\n # tmp = baldosa.copy()\n # dibujar_contornos(tmp, filtrados)\n # cv2.imshow('seg', tmp)\n # cv2.waitKey()\n\n return gris, filtrados\n\n # mascara = np.zeros(gris.shape, np.uint8)\n # cv2.drawContours(mascara, filtrados, -1, 255, 1)\n\n # shape = (gris.shape[0] + 10, gris.shape[1] + 10)\n # marco = np.zeros(shape, np.uint8)\n # marco[5:gris.shape[0]+5, 5:gris.shape[1]+5] = mascara\n\n # # _, binarizado_base = cv2.threshold(gris, 90, 255, cv2.THRESH_BINARY)\n # # diff = cv2.absdiff(binarizado_base, mascara)\n # # cv2.imshow('video', diff)\n # # cv2.waitKey()\n\n # # shape = tuple(sum(x) for x in izip(baldosa.shape, (10, 10, 0)))\n # # bald = np.zeros(shape, np.uint8)\n # # bald[:baldosa.shape[0], :baldosa.shape[1]] = baldosa\n # # masc = cv2.cvtColor(marco, cv2.COLOR_GRAY2BGR)\n # # cv2.imshow('video', np.hstack((bald, masc)))\n # # cv2.waitKey()\n\n # return marco\n","repo_name":"gcca/urb-vi","sub_path":"vi/metodo/segmentacion/contorno_base.py","file_name":"contorno_base.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35055048529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 28 13:04:49 2023\n\n@author: bverr\n\"\"\"\nimport numpy as np\nfrom ast import literal_eval\nimport re\n\ndef savetxt(fname, X, fmt='%.18e',\n delimiter=' ', newline='\\n', comments='#', \n axisdata=None, axisnames=None,\n header='',\n footer=''):\n if not fname.endswith('.txt'):\n fnametxt = fname + '.txt'\n else: \n fnametxt = fname\n \n with open(fnametxt, 'w') as f:\n shape = X.shape\n f.write(f'{comments}{shape}\\n')\n \n if axisnames is None:\n axisnames = np.full( (len(shape), ), None )\n if axisdata is None:\n axisdata = np.full( (len(shape), ), None ) \n \n for axis in range(len(shape)):\n axisname = ''\n if axis >= len(axisnames) or axisnames[axis] is None:\n axisname = str(axis)\n else:\n axisname = axisnames[axis]\n \n f.write(f'{comments}axis {axis} ({axisname})\\n')\n \n \n if axis >= len(axisdata) or axisdata[axis] is None:\n axisdata_ = np.arange(shape[axis])\n else:\n if len(axisdata[axis]) != shape[axis]:\n raise IndexError(f'Error at axis {axis}. Provided axisdata does not match shape of array. {len(axisdata[axis])} != {shape[axis]}')\n axisdata_ = axisdata[axis]\n axisdata_ = np.array(axisdata_)\n axisdata_ = np.array2string(axisdata_, separator=',')\n f.write(f'{comments}{axisdata_}\\n')\n \n if header != '':\n f.write(f'{comments}{header}\\n')\n \n prev_index = np.zeros(len(shape), dtype=np.int32)\n newline_multiplicity = np.flip(np.arange(len(shape)))\n for index, x in np.ndenumerate(X):\n index_ = np.array(index)\n \n newline_copies = int( \n newline_multiplicity.dot( np.clip(index_ - prev_index, 0, np.inf) ) \n )\n string = newline_copies*str(newline) + \\\n fmt%x + \\\n delimiter\n f.write(string)\n \n prev_index = index_\n \n if footer != '':\n f.write(f'\\n{comments}{footer}')\n \ndef loadtxt(fname, comments='#', **kwargs):\n axisnames = []\n axisdata = []\n with open(fname, 'r') as f:\n shape = f.readline().replace(comments, '')\n shape = literal_eval(shape)\n for axis in range(len(shape)):\n axisname = f.readline()\n \n axisname = re.search(f'{comments} axis \\d+ \\((.+)\\)', axisname)\n \n if axisname is None:\n axisname = ['',str(axis)]\n \n axisnames.append( axisname[1] )\n \n axisdata_ = f.readline().replace(comments, '')\n axisdata_ = literal_eval(axisdata_)\n axisdata.append(axisdata_)\n \n return ( np.loadtxt(fname, **kwargs).reshape(shape), axisdata, axisnames)\n\ndef genfromtxt(fname, comments='#', **kwargs):\n axisnames = []\n axisdata = []\n with open(fname, 'r') as f:\n shape = f.readline().replace(comments, '')\n shape = literal_eval(shape)\n for axis in shape:\n axisname = f.readline()\n axisname = re.search(f'{comments} axis \\d+ \\((.)\\)', axisname)\n if axisname is None:\n axisname = ['',str(axis)]\n \n axisnames.append( axisname[1] )\n \n axisdata_ = f.readline().replace(comments, '')\n axisdata_ = literal_eval(axisdata_)\n axisdata.append(axisdata_)\n \n return ( np.genfromtxt(fname, **kwargs).reshape(shape), axisdata, axisnames)\n","repo_name":"bram98/ndsave","sub_path":"ndsave.py","file_name":"ndsave.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34809645083","text":"# Python3 program to\n# demonstrate instantiating\n# a class\nclass Dog:\n # A simple class\n # attribute\n attr1 = \"mammal\"\n attr2 = \"dog\"\n\n # A sample method\n def fun(self):\n print(\"I'm a\", self.attr1)\n print(\"I'm a\", self.attr2)\n# Driver code\n# Object instantiation\nr = Dog()\n\n# Accessing class attributes\n# and method through objects\nprint(r.attr1)\nr.fun()\n\nr1 = Dog()\nprint(r1.attr2)\n","repo_name":"drpoudel3785/pythondjango","sub_path":"Dog.py","file_name":"Dog.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"180411589","text":"\"\"\"Acoustic analysis songs plugin.\"\"\"\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom gi.repository import GLib\nfrom quodlibet import _\nfrom quodlibet.plugins.songsmenu import SongsMenuPlugin\n\nDBusGMainLoop(set_as_default=True)\n\n\ndef no_op(*args, **kwargs):\n pass\n\n\ndef get_title(song):\n \"\"\"Return lowercase UNICODE title of song.\"\"\"\n version = song.comma(\"version\").lower()\n title = song.comma(\"title\").lower()\n if version:\n return \"%s (%s)\" % (title, version)\n return title\n\n\nclass AnalyzeSongsPlugin(SongsMenuPlugin):\n\n \"\"\"Acoustic song analysis.\"\"\"\n\n PLUGIN_ID = \"Acoustic Analysis\"\n PLUGIN_NAME = _(\"Autoqueue Analyze\") # noqa\n PLUGIN_DESC = _(\"Perform acoustic analysis of the selected songs.\") # noqa\n PLUGIN_ICON = \"gtk-find-and-replace\"\n PLUGIN_VERSION = \"0.1\"\n\n def __init__(self, *args):\n SongsMenuPlugin.__init__(self, *args)\n bus = dbus.SessionBus()\n sim = bus.get_object(\n 'org.autoqueue', '/org/autoqueue/Similarity')\n self.similarity = dbus.Interface(\n sim, dbus_interface='org.autoqueue.SimilarityInterface')\n\n def plugin_songs(self, songs):\n \"\"\"Add the work to the coroutine pool.\"\"\"\n print('added {} songs'.format(len(songs)))\n GLib.idle_add(self.doit, songs)\n\n def doit(self, songs):\n filenames = []\n for song in songs:\n filename = song('~filename')\n try:\n filename.encode('utf-8')\n except UnicodeEncodeError:\n try:\n filename = filename.decode('utf-8')\n except (UnicodeDecodeError, AttributeError):\n print(\n \"Could not figure out filename encoding: %r\" %\n song('~filename'))\n continue\n filenames.append(filename)\n self.similarity.analyze_tracks(\n filenames, reply_handler=no_op, error_handler=no_op)\n","repo_name":"thisfred/autoqueue","sub_path":"analyze_songs.py","file_name":"analyze_songs.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"74664448817","text":"\"\"\"Config flow for StecaGrid integration.\"\"\"\r\nimport logging\r\n\r\nimport voluptuous as vol\r\n\r\nfrom homeassistant import config_entries, core, exceptions\r\n\r\n#from .const import DOMAIN # pylint:disable=unused-import\r\nfrom .const import (CONF_HOST, CONF_PORT, DOMAIN)\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nDATA_SCHEMA = vol.Schema({\"inverter_host\": str, \"inverter_port\": int})\r\n\r\n\r\nasync def validate_input(hass: core.HomeAssistant, data):\r\n \"\"\"Validate the user input allows us to connect.\r\n\r\n Data has the keys from DATA_SCHEMA with values provided by the user.\r\n \"\"\"\r\n\r\n # Return info that you want to store in the config entry.\r\n inverter_host = data[\"inverter_host\"]\r\n inverter_port = data[\"inverter_port\"]\r\n return {\"title\": f\"StecaGrid {inverter_host}\"}\r\n\r\n\r\nclass ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\r\n \"\"\"Handle a config flow for StecaGrid.\"\"\"\r\n\r\n VERSION = 1\r\n \r\n CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL\r\n\r\n async def async_step_user(self, user_input=None):\r\n \"\"\"Handle the initial step.\"\"\"\r\n errors = {}\r\n if user_input is not None:\r\n try:\r\n inverter_host = user_input[CONF_HOST]\r\n inverter_port = user_input[CONF_PORT]\r\n #inverter_host = user_input[\"inverter_host\"]\r\n #inverter_port = user_input[\"inverter_port\"]\r\n info = f\"StecaGrid {host}\"\r\n return self.async_create_entry(title=info, data=user_input)\r\n except Exception: # pylint: disable=broad-except\r\n _LOGGER.exception(\"Unexpected exception\")\r\n errors[\"base\"] = \"unknown\"\r\n\r\n return self.async_show_form(\r\n step_id=\"user\", data_schema=DATA_SCHEMA, errors=errors\r\n )\r\n\r\n\r\nclass CannotConnect(exceptions.HomeAssistantError):\r\n \"\"\"Error to indicate we cannot connect.\"\"\"\r\n","repo_name":"MichaelOE/homeassistant-stecagrid","sub_path":"custom_components/stecagrid/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14831327185","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 28 13:50:20 2021\n\n@author: wangkun\n\"\"\"\n\nimport numpy as np\nfrom scipy.io import loadmat\nimport random\nimport display_data\nimport nn\n\ninput_layer_size = 400 \nhidden_layer_size = 25\nnum_labels = 10\n\n# Part 1: Loading and visualizing data\nprint('\\nPart1: Loading and Visualizing Data ...\\n')\n\n# load matlab mat format data from the file ex3data1.mat\ndata = loadmat('ex4data1.mat')\n\nX = data['X']\nm = X.shape[0]\n\ny = data['y'].reshape((m, 1))\n\nyt = np.zeros((y.shape[0], num_labels))\nyt[np.arange(yt.shape[0]), y.reshape((1, y.shape[0])) - 1] = 1\n\n# Randomly select 100 data points to display\nprint('\\n Rondomly select 100 examples to display')\nrand_indices = random.sample(range(m),100)\n\nsel = X[rand_indices, :]\n\ndisplay_data.display_data(sel)\n\nwait = input('\\nProgram paused. Press to continue')\n\n# Part 2: Loading parameters\nprint('\\nPart2: Loading Saved Neural Network Parameters ...')\n\nweights = loadmat('ex4weights.mat')\n\ntheta1 = weights['Theta1']\ntheta2 = weights['Theta2']\n\ntheta1_shape = theta1.shape\ntheta2_shape = theta2.shape\n\n# unroll parameters\nnn_params = np.concatenate((theta1.reshape(theta1_shape[0] * theta1_shape[1], 1), \n theta2.reshape(theta2_shape[0] * theta2_shape[1], 1)), axis = 0)\n\n#print(nn_params.shape)\n\n#part 3: Compute costt function (feedforward)\nprint('\\nPart3: Feedforward Using Neural Network ...')\nlambda_parameter = 0.0\nJ = nn.cost_func(nn_params, input_layer_size, hidden_layer_size, num_labels,\n X, yt, lambda_parameter)\n\nprint('\\n Cost at parameters (loaded from ex4weights): ', J)\nprint(' (this value should be about 0.287629.)')\n\n#part 4: Implement Regularization\nprint('\\nPart 4: Checking Cost Function (Regularization) ...')\nlambda_parameter = 1.0\nJ = nn.cost_func(nn_params, input_layer_size, hidden_layer_size, num_labels,\n X, yt, lambda_parameter)\n\nprint('\\n Cost at parameters (loaded from ex4weights): ', J)\nprint(' (this value should be about 0.383770.)')\n\n#part 5: Sigmoid Gradient\nprint('\\nPart 5: Sigmoid Gradient')\nprint('\\n Evaluating sigmoid gradient ...')\n\ntest_points = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])\ng = nn.sigmoid_gradient(test_points)\n\nprint('\\n Sigmoid gradieent evaluated at [-1, -0.5, 0, 0.5, 1]:', g)\n\n#part 6: Initialiiziing Parameters\nprint('\\nPart 6: Initializiing Neural Network Parameters ...')\n\ninitial_theta1 = nn.rand_initialize_weights(input_layer_size, hidden_layer_size)\ninitial_theta2 = nn.rand_initialize_weights(hidden_layer_size, num_labels)\n\ninitial_nn_params = np.concatenate((initial_theta1.flatten(), initial_theta2.flatten()), axis = 0)\n\n#part 7: Implement Backpropagation\nprint('\\nPart 7: Checking Backpropagation ...')\nnn.check_nn_gradient()\n\n#part 8: Implement Regularization\nprint('\\nPart 8: Checking Backpropagation with Regularization ...')\nlambda_parameter = 3.0\nnn.check_nn_gradient(lambda_parameter)\n\ndebug_J, debug_grad = nn.cost_func(nn_params, input_layer_size, hidden_layer_size, num_labels, X, yt, lambda_parameter)\nprint('\\n Cost function at (fixed) debugging parameters with lambda = 3.0 is: ', debug_J)\nprint('\\n This value should be 0.576051.')\n\n#part 9: Training NN\nprint('\\nPart 9: Training Neural Network ...')\n\nlambda_parameter = 1.0\nnn_params, cost = nn.train(input_layer_size, hidden_layer_size, num_labels, X, yt, initial_nn_params, lambda_parameter)\n\ntheta1 = nn_params[0:(input_layer_size + 1) * hidden_layer_size].reshape((hidden_layer_size, input_layer_size + 1)) \ntheta2 = nn_params[(input_layer_size + 1) * hidden_layer_size:].reshape((num_labels, hidden_layer_size + 1))\n\n\n#part 10: Implement Prediction\npred = nn.predict(theta1, theta2, X)\n\nprint('\\nPart 10: Training Set Accuracy: ', np.mean((pred == y) * 1.0)) \n\n\n\n","repo_name":"kk8576/Coursera_Machine_Learning","sub_path":"ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17187939277","text":"import os\nimport argparse\n\nparser = argparse.ArgumentParser(description='driver script for run.py')\nparser.add_argument('--size', type=str, default='test',\n help='size of the benchmark suite')\nargs = parser.parse_args()\n\nprogs = [\n\t# 'blackscholes',\n\t'bodytrack',\n\t'canneal',\n\t'facesim',\n\t'ferret',\n\t'fluidanimate',\n\t'freqmine',\n\t# 'raytrace',\n\t'streamcluster',\n\t'vips',\n\t# 'dedup',\n\t# 'swaptions',\n\t'x264',\n]\n\n# progs = [ 'bodytrack']\n\n\ntrace_folder = '../output_trace/{}/'.format(args.size)\n# compile\n# os.system(\"g++ -std=c++11 llc_simulation.cpp LLC.cpp CE_Belady.cpp -o llc_simulation\")\n# os.system(\"gcc llc-belady.c -o llc-belady\")\n# os.system(\"gcc llc-lru.c -o llc-lru\")\n\n# result_folder = '../result/{}/'.format(args.size)\n\n# try:\n# \tos.mkdir(result_folder)\n# except:\n# \tpass\n\n\n# command =''\n# for p in progs:\n# \tcommand += 'time ./llc_simulation {}LLCtrace_{}.out {}LLCtrace_{}_hitfile.out {}LLCtrace_{}_sharefile.out {}LLCtrace_{}_reusefile.out & '.format(trace_folder, p,\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult_folder, p, \n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult_folder, p, \n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult_folder, p)\n\n# command += 'wait'\n# os.system(command)\n\n# for p in progs:\n# \tcommand = './llc-lru {}LLCtrace_{}.out >> lru-{}-stats'.format(trace_folder, p, args.size)\n# \tprint(command)\n# \tos.system(command)\n\n\nfor l in range(12,30,3):\n\tfor n in range(5,35,5):\n\t\tfor d in [200,500,1000,1500,2000,5000,10000]:\n\t\t\tcommand = './llc-prob-share {}LLCtrace_{}.out {} {} {} >> prob-share-{}-stats'.format(trace_folder, 'canneal', l, n, d, args.size)\n\t\t\tos.system(command)\n","repo_name":"abhikr360/Hierarchical-Cache-Simulator","sub_path":"script-llc_run.py","file_name":"script-llc_run.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"2139256141","text":"import os\nimport argparse\nimport glob\nimport pickle\nfrom models import *\nfrom local_datasets import *\nfrom metrics.run_metric import *\nfrom utils.constants import *\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--model-name', type=str, default='mbart50-m2m',\n choices=[\"mbart50-m2m\", \"m2m-100-base\", \"m2m-100-large\", \"m2m-124\", \"fair-wmt20\",\"GPT-3\", \"Helsinki-NLP\", \"google_drive\"],\n help='language model name')\n parser.add_argument('--dataset', type=str, default='flores_101',\n choices=[\"wmt14\", \"wmt15\", \"wmt16\", \"wmt17\", \"wmt18\", \"wmt19\", \"wmt20-news\", \"wmt20-bio\",\"flores_101\"],\n help='dataset name')\n parser.add_argument('--start', type=float)\n parser.add_argument('--end', type=float)\n parser.add_argument('--skip-src', type=str)\n parser.add_argument('--skip-tgt', type=str)\n parser.add_argument('--corpus-level', type=bool, default=True,\n help='corpus-level or sample-level')\n parser.add_argument('--cuda', type=str, default='1',\n help='cuda index')\n return parser\n\nclass Features(object):\n\n def __init__(self, args: dict, src_lang, tgt_lang):\n self.cuda = args.cuda\n self.model_name = args.model_name\n self.dataset_name = args.dataset\n self.src_lang, self.tgt_lang = src_lang, tgt_lang\n self._corpus_level = args.corpus_level\n if self.model_name == \"fair-wmt20\":\n torch.hub.set_dir('tmp_model')\n print(torch.hub.list('pytorch/fairseq'))\n model = dict()\n model['en-ta'] = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-ta').to(f'cuda:{self.cuda}')\n model['ta-en'] = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.ta-en').to(f'cuda:{self.cuda}')\n self.model = model\n else:\n self.model = load_model(self.model_name, self.cuda)\n\n\n def sub_process(self, _test=False, _corpus_level=False):\n split = \"devtest\" if _test else \"dev\"\n split_name = \"test\" if _test else \"train\"\n src_ref_train, tgt_ref_train = read_data(self.dataset_name, self.src_lang, self.tgt_lang, split)\n dir_path = f\"local_{self.dataset_name}/{self.src_lang}_{self.tgt_lang}/{self.model_name}\"\n # source -> target\n try:\n med_tgt_train = pickle.load(open(f'{dir_path}/{split_name}_trans_from_tgt.pkl',\"rb\"))\n except:\n print(f\"Can't find {dir_path}/{split_name}_trans_from_tgt.pkl\")\n med_tgt_train = model_translate(self.model, src_ref_train, self.src_lang, self.tgt_lang)\n\n # target -> source\n try:\n med_src_train = pickle.load(open(f'{dir_path}/{split_name}_trans_from_src.pkl',\"rb\"))\n except:\n print(f\"Can't find {dir_path}/{split_name}_trans_from_src.pkl\")\n med_src_train = model_translate(self.model, tgt_ref_train, self.tgt_lang, self.src_lang)\n\n direct_score = Scores(src_ref_train, med_src_train, tgt_ref_train, med_tgt_train,\n self.src_lang, self.tgt_lang,\n self.dataset_name, self.model_name, [\"spbleu\",\"bleu\",\"chrf\",\"bertscore\"],\n _test=_test, _corpus_level=self._corpus_level)\n direct_score.store_translation_results()\n direct_score.store_score_results()\n\n # source -> target -> source\n try:\n src_pred_train = pickle.load(open(f'{dir_path}/{split_name}_self_src.pkl',\"rb\"))\n except:\n print(f\"Can't find {dir_path}/{split_name}_self_src.pkl\")\n src_pred_train = model_translate(self.model,\n med_tgt_train, self.tgt_lang,\n self.src_lang)\n\n #target -> source -> target\n try:\n tgt_pred_train = pickle.load(open(f'{dir_path}/{split_name}_self_tgt.pkl',\"rb\"))\n except:\n print(f\"Can't find {dir_path}/{split_name}_self_tgt.pkl\")\n tgt_pred_train = model_translate(self.model,\n med_src_train, self.src_lang,\n self.tgt_lang)\n\n reverse_score = Scores(src_ref_train, src_pred_train, tgt_ref_train, tgt_pred_train,\n self.src_lang, self.tgt_lang,\n self.dataset_name, self.model_name, [\"spbleu\",\"bleu\",\"chrf\",\"bertscore\"],\n _test=_test, _self=True, _corpus_level=self._corpus_level)\n reverse_score.store_translation_results()\n reverse_score.store_score_results()\n\n def run(self):\n # try:\n # print(\"Start Preparing Training Data!\")\n # self.sub_process()\n # except:\n # passd\n print(\"Start Preparing Test Data!\")\n self.sub_process(_test=True)\n\nif __name__ == \"__main__\":\n args = get_args().parse_args()\n flg = False\n model_lang_dict = {\n \"m2m-100-large\": M2M100_LANGS,\n \"m2m-100-base\": M2M100_LANGS,\n \"mbart50-m2m\": MBART50_LANGS,\n \"Helsinki-NLP\": HELSINKI_LANGS\n }\n m2m_langs = [lang.split(\"_\")[0] for lang in model_lang_dict[args.model_name]]\n if args.dataset == \"flores_101\":\n lang_pairs = [(x, y) for x, y in itertools.combinations(REGION_1, r=2) if x != y] + list(itertools.product(REGION_1, REGION_2)) + list(itertools.product(REGION_2, REGION_1)) + [(x, y) for x, y in itertools.combinations(REGION_2, r=2) if x != y]\n folders=glob.glob(f\"local_flores_101/*\")\n for src_lang, tgt_lang in lang_pairs[int(args.start * len(lang_pairs)):int(args.end * len(lang_pairs))]:\n try:\n files = glob.glob(f\"local_flores_101/{src_lang}_{tgt_lang}/{args.model_name}/*\")\n if args.skip_src == 'none' and args.skip_tgt == 'none':\n flg = True\n if flg and src_lang in m2m_langs and tgt_lang in m2m_langs and len(files) > 0:# and len(files) < 44:# and f\"local_flores_101/{src_lang}_{tgt_lang}\" not in folders and f\"local_flores_101/{tgt_lang}_{src_lang}\" not in folders:\n print(src_lang, tgt_lang)\n print(\"======================\")\n features = Features(args, src_lang, tgt_lang)\n features.run()\n print()\n if src_lang == args.skip_src and tgt_lang == args.skip_tgt:\n flg = True\n except:\n pass\n elif args.dataset == \"wmt20-news\":\n files = glob.glob(\"data/wmt20/sources/*\")\n lang_pairs = [((file.split(\"/\")[-1].split(\"-\")[1][:2]),(file.split(\"/\")[-1].split(\"-\")[1][2:])) for file in files]\n for src_lang, tgt_lang in lang_pairs[int(args.start * len(lang_pairs)):int(args.end * len(lang_pairs))]:\n\n files = glob.glob(f\"local_wmt20-news/{src_lang}_{tgt_lang}/{args.model_name}/*\")\n if args.skip_src == 'none' and args.skip_tgt == 'none':\n flg = True\n print(src_lang, tgt_lang, flg)\n print(len(files))\n if flg and len(files) < 24 and ((f\"{src_lang}-{tgt_lang}\" in m2m_langs and f\"{tgt_lang}-{src_lang}\" in m2m_langs) or (src_lang in m2m_langs and tgt_lang in m2m_langs)):\n print(src_lang, tgt_lang)\n print(\"======================\")\n features = Features(args, src_lang, tgt_lang)\n features.run()\n print()\n if src_lang == args.skip_src and tgt_lang == args.skip_tgt:\n flg = True\n\t\n elif args.dataset == \"wmt20-bio\":\n files = glob.glob(\"data/wmt20bio/sources/*\")\n lang_pairs = [((file.split(\"/\")[-1].split(\"_\")[0][:2]),(file.split(\"/\")[-1].split(\"-\")[0][3:5])) for file in files]\n print(lang_pairs)\n for src_lang, tgt_lang in lang_pairs[int(args.start * len(lang_pairs)):int(args.end * len(lang_pairs))]:\n\n files = glob.glob(f\"local_wmt20-bio/{src_lang}_{tgt_lang}/{args.model_name}/*\")\n if args.skip_src == 'none' and args.skip_tgt == 'none':\n flg = True\n print(src_lang, tgt_lang, flg)\n print(len(files))\n # and len(files) < 20\n if flg and ((f\"{src_lang}-{tgt_lang}\" in m2m_langs and f\"{tgt_lang}-{src_lang}\" in m2m_langs) or (src_lang in m2m_langs and tgt_lang in m2m_langs)):\n print(src_lang, tgt_lang)\n print(\"======================\")\n features = Features(args, src_lang, tgt_lang)\n features.run()\n print()\n if src_lang == args.skip_src and tgt_lang == args.skip_tgt:\n flg = True\n\t\n elif args.model_name == \"fair-wmt20\":\n lang_pairs = [('ta','en'),('en','ta')]\n for src_lang, tgt_lang in lang_pairs[int(args.start*len(lang_pairs)):int(args.end*len(lang_pairs))]:\n print(src_lang, tgt_lang)\n print(\"======================\")\n features = Features(args, src_lang, tgt_lang)\n features.run()\n print()\n","repo_name":"terryyz/rtt-rethinking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29926832395","text":"from __future__ import unicode_literals\nimport os\nimport subprocess\n\nfrom .common import PostProcessor\nfrom ..compat import compat_shlex_split\nfrom ..utils import (\n check_executable,\n encodeArgument,\n shell_quote,\n PostProcessingError,\n)\n\n\nclass SponSkrubPP(PostProcessor):\n _temp_ext = 'spons'\n _def_args = []\n _exe_name = 'sponskrub'\n\n def __init__(self, downloader, path='', args=None, ignoreerror=False, cut=False, force=False):\n PostProcessor.__init__(self, downloader)\n self.force = force\n self.cutout = cut\n self.args = ['-chapter'] if not cut else []\n self.args += self._configuration_args(self._def_args) if args is None else compat_shlex_split(args)\n self.path = self.get_exe(path)\n\n if not ignoreerror and self.path is None:\n if path:\n raise PostProcessingError('sponskrub not found in \"%s\"' % path)\n else:\n raise PostProcessingError('sponskrub not found. Please install or provide the path using --sponskrub-path.')\n\n def get_exe(self, path=''):\n if not path or not check_executable(path, ['-h']):\n path = os.path.join(path, self._exe_name)\n if not check_executable(path, ['-h']):\n return None\n return path\n\n def run(self, information):\n if self.path is None:\n return [], information\n\n if information['extractor_key'].lower() != 'youtube':\n self.to_screen('Skipping sponskrub since it is not a YouTube video')\n return [], information\n if self.cutout and not self.force and not information.get('__real_download', False):\n self._downloader.to_screen(\n '[sponskrub] Skipping sponskrub since the video was already downloaded. '\n 'Use --sponskrub-force to run sponskrub anyway')\n return [], information\n\n self.to_screen('Trying to %s sponsor sections' % ('remove' if self.cutout else 'mark'))\n if self.cutout:\n self._downloader.to_screen('WARNING: Cutting out sponsor segments will cause the subtitles to go out of sync.')\n if not information.get('__real_download', False):\n self._downloader.to_screen('WARNING: If sponskrub is run multiple times, unintended parts of the video could be cut out.')\n\n filename = information['filepath']\n temp_filename = filename + '.' + self._temp_ext + os.path.splitext(filename)[1]\n if os.path.exists(temp_filename):\n os.remove(temp_filename)\n\n cmd = [self.path]\n if self.args:\n cmd += self.args\n cmd += ['--', information['id'], filename, temp_filename]\n cmd = [encodeArgument(i) for i in cmd]\n\n if self._downloader.params.get('verbose', False):\n self._downloader.to_screen('[debug] sponskrub command line: %s' % shell_quote(cmd))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n if p.returncode == 0:\n os.remove(filename)\n os.rename(temp_filename, filename)\n self.to_screen('Sponsor sections have been %s' % ('removed' if self.cutout else 'marked'))\n elif p.returncode == 3:\n self.to_screen('No segments in the SponsorBlock database')\n else:\n stderr = stderr.decode('utf-8', 'replace')\n msg = stderr.strip().split('\\n')[-1]\n raise PostProcessingError(msg if msg else 'sponskrub failed with error code %s!' % p.returncode)\n return [], information\n","repo_name":"rband709/yt-dlp1","sub_path":"youtube_dlc/postprocessor/sponskrub.py","file_name":"sponskrub.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"12300550910","text":"import pandas as pd\nimport statsmodels.api as sm\nfrom datetime import datetime\nimport matplotlib.pyplot as plt \nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.stattools import adfuller\n#from pmdarima import auto_arima\nimport warnings\nimport numpy as np\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n#from nowcast_lstm.LSTM import LSTM\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\nfrom matplotlib import pyplot as plt\nimport logging\nlogging.getLogger('fbprophet').setLevel(logging.WARNING) \nimport argparse\n\nif __name__ == '__main__':\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--training',\n default='training_data.csv',\n help='input training data file name')\n parser.add_argument('--testing',\n default='testing_data.csv',\n help='input testing data file name')\n parser.add_argument('--output',\n default='output.csv',\n help='output file name')\n args = parser.parse_args(args=[])\n\n #讀資料 並整理成有時間的序列\n colnames=['open','high','low','close']\n df_temp = pd.read_csv( args.training, names=colnames)\n df_temp_1 = pd.read_csv( args.testing, names=colnames)\n df = pd.concat([df_temp,df_temp_1])\n df = df.reset_index()\n df = df.loc[:,[\"open\",\"high\",\"low\",\"close\"]]\n date = pd.date_range(start='2015/01/01',periods=len(df))\n date = pd.DataFrame(date)\n df[['date']] = date\n\n #透過增加特徵的方式使模型訓練能得到更多資訊\n def relative_strength_idx(df, n=14):\n close = df['close']\n delta = close.diff()\n delta = delta[1:]\n pricesUp = delta.copy()\n pricesDown = delta.copy()\n pricesUp[pricesUp < 0] = 0\n pricesDown[pricesDown > 0] = 0\n rollUp = pricesUp.rolling(n).mean()\n rollDown = pricesDown.abs().rolling(n).mean()\n rs = rollUp / rollDown\n rsi = 100.0 - (100.0 / (1.0 + rs))\n return rsi\n\n # SMA\n df['EMA_3'] = df['close'].ewm(3).mean().shift()\n df['EMA_7'] = df['close'].ewm(7).mean().shift()\n df['EMA_30'] = df['close'].ewm(30).mean().shift()\n\n # EMA\n df['SMA_3'] = df['close'].rolling(3).mean().shift()\n df['SMA_7'] = df['close'].rolling(7).mean().shift()\n df['SMA_30'] = df['close'].rolling(30).mean().shift()\n\n # RSI\n df['RSI'] = relative_strength_idx(df).fillna(0)\n\n # MACD\n EMA_12 = pd.Series(df['close'].ewm(span=12, min_periods=12).mean())\n EMA_26 = pd.Series(df['close'].ewm(span=26, min_periods=26).mean())\n df['MACD'] = pd.Series(EMA_12 - EMA_26)\n df['MACD_signal'] = pd.Series(df.MACD.ewm(span=9, min_periods=9).mean())\n\n\n df['y'] = df['open'].shift(-2)\n df = df.dropna(axis=0).reset_index(drop=True)\n\n #模型訓練\n features = ['SMA_3','SMA_7','SMA_30','EMA_3','EMA_7','EMA_30','RSI','MACD','MACD_signal']\n df_train = df[:-len(df_temp_1)+1]\n df_valid = df[-len(df_temp_1)+1:]\n model_fbp = Prophet()\n for feature in features:\n model_fbp.add_regressor(feature)\n\n model_fbp.fit(df_train[[\"date\", \"y\"] + features].rename(columns={\"date\": \"ds\", \"y\": \"y\"}))\n forecast = model_fbp.predict(df_valid[[\"date\", \"y\"] + features].rename(columns={\"date\": \"ds\"}))\n df_valid[\"Forecast_Prophet\"] = forecast.yhat.values\n\n #轉成list進行投資策略的計算\n ans = df_valid[\"Forecast_Prophet\"].values.tolist()\n ans_l = [] \n tmp = 0\n for i in range(1,len(ans)-1):\n if ans[i+1]>ans[i]:\n if tmp != 1:\n ans_l.append(1)\n tmp += 1 \n else:\n ans_l.append(0)\n else:\n if tmp != -1:\n ans_l.append(-1)\n tmp += -1 \n else:\n ans_l.append(0)\n ans_l.append(0)\n ans_l.append(0)\n #存檔\n test = pd.DataFrame(data=ans_l)\n test.to_csv(\"output.csv\")\n\n\n","repo_name":"Jameswu1/DSAI_2022_HW2","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72777607537","text":"#!/usr/bin/env python\n# coding=utf-8\nimport json\nimport os\nimport sys\nimport logging\nfrom workflow.wf_func import (\n set_job_log\n)\n\nfrom workflow.wf_cls import (\n get_args_class,\n get_prompt_class,\n get_tokenizer_class,\n get_input_dataset_class,\n)\n\nfrom transformers import (\n HfArgumentParser,\n set_seed,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef run():\n # get work job config from wf_config_file\n wf_cls_config_file = 'config/class.json'\n\n with open(wf_cls_config_file, 'r') as f:\n wf_cls_config = f.read()\n workflow_cls_config = json.loads(wf_cls_config)\n logger.info(f\"WorkFlow config : {workflow_cls_config}\")\n\n # 获取作业参数配置\n model_arguments_cls, data_arguments_cls, training_arguments_cls = get_args_class(workflow_cls_config)\n parser = HfArgumentParser((model_arguments_cls, data_arguments_cls, training_arguments_cls))\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # 初始化log\n set_job_log(logger, training_args)\n logger.info(f\"Model arguments : {model_args} \\n \"\n f\"Data arguments : {data_args} \\n \"\n f\"Train arguments : {training_args} \\n\"\n )\n\n import socket\n logger.warning(\n f\"Host: {socket.gethostname()}, Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # 为了可以复现训练过程\n set_seed(training_args.seed)\n\n # 加载数据集\n # 获取 prompter 和 tokenizer\n # prompt_cls = get_prompt_class()\n tokenizer_cls = get_tokenizer_class(workflow_cls_config)\n\n data_cls = get_input_dataset_class(workflow_cls_config)\n data = data_cls(\n data_args=data_args,\n model_args=model_args,\n # ptompt_cls=prompt_cls,\n tokenizer_cls=tokenizer_cls\n ).load()\n print(data)\n\n# # 加载 Config\n# model_config = get_config(model_args)\n#\n# # 检查checkpoint\n# is_exist_checkpoint, checkpoint = check_and_get_checkpoint()\n#\n# # 加载模型\n# model = get_model(model_args, model_config, job_type)\n# if need_load_checkpint_first and is_exist_checkpoint:\n# load_checkpoint(checkpoint)\n#\n# # 模型包装 由训练方式决定\n# model = get_wrap_model(model, job_type, model_args)\n#\n# # 输入数据处理,输出生成data collator\n# data_collator = get_data_collator(prompt, tokenizer, preprocess_train, preprocess_eval, preprocess_test, data_args)\n#\n# # Initialize Trainer\n# trainer = initialize_train(model, training_args, data, tokenizer, data_collator, **wargs)\n# # Training\n# if training_args.do_train:\n# do_train(trainer)\n# if training_args.do_eval:\n# do_eval(trainer)\n# # Predict\n# if training_args.do_predict:\n# do_predict(trainer)\n# # return results\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"mimosa1987/workflow_hf","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"70067275060","text":"from django import forms\nfrom .models import PersonalDocente, PersonalNoDocente\n\nclass PersonalDocenteForm(forms.ModelForm):\n \"\"\"Form definition for PersonalDocente.\"\"\"\n\n class Meta:\n \"\"\"Meta definition for PersonalDocenteform.\"\"\"\n\n model = PersonalDocente\n fields = (\n 'first_name',\n 'last_name',\n 'materia',\n 'perfil',\n 'id',\n )\n\nclass PersonalNoDocenteForm(forms.ModelForm):\n \"\"\"Form definition for PersonalDocente.\"\"\"\n\n class Meta:\n \"\"\"Meta definition for PersonalDocenteform.\"\"\"\n\n model = PersonalNoDocente\n fields = (\n 'first_name',\n 'last_name',\n 'oficina',\n 'perfil',\n 'id',\n )\n\n \n\n","repo_name":"Federicaalem/Practica1_final","sub_path":"django/Establecimiento/applications/registro/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26548289301","text":"class Node:\n def __init__(self,info,link =None):\n self.info = info\n self.link = link\n\nclass LinkedStack:\n def __init__(self):\n self.head= None\n self.current_ptr= None\n self.size = 0\n self.tempLst =[] # created just to test if LinkedStack working properly(Not compulsory)\n \n def __len__(self):\n return self.size\n \n def isEmpty(self):\n return self.size == 0\n \n def push(self,item):\n if self.head == None:\n newNode = Node(item)\n newNode.link = None\n self.head = newNode\n self.size += 1\n self.tempLst.append(item)\n else:\n newNode = Node(item)\n newNode.link = self.head\n self.head = newNode\n self.size += 1\n self.tempLst.append(item)\n return self.tempLst\n\n def top(self):\n if self.head == None:\n print('Stack empty ! cannot return top element')\n return self.head.info\n \n def pop(self):\n if self.isEmpty():\n print('Stack empty ! cannot pop')\n item = self.head.info\n temp_pointer= self.head\n self.head = self.head.link\n temp_pointer.link =None\n self.size -= 1\n return item\n\ns= LinkedStack()\nprint(s.push(4))\nprint(s.push(9))\nprint(s.push(8))\nprint(s.push(3))\nprint(s.pop())\nprint(s.pop())\nprint(s.pop())\nprint(s.pop())\n\n \n\n \n \n","repo_name":"anantvir/DataStructures_LinkedLists","sub_path":"stack_using_singly_linked_list.py","file_name":"stack_using_singly_linked_list.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29568473354","text":"#!/usr/bin/env python3\n\"\"\"PCA\"\"\"\n\nimport numpy as np\n\n\ndef pca(X, var=0.95):\n \"\"\"perform PCA on a dataset\"\"\"\n # Singular Value Decomposition:\n _, s, vh = np.linalg.svd(X)\n W = vh.T\n # Return the cumulative sum of the elements\n total_var = np.cumsum(s) / np.sum(s)\n nd = np.argmax(total_var >= var) + 1\n # var is the fraction of the variance that the PCA transformation should maintain\n return W[:,:nd]\n\n\nif __name__ == \"__main__\":\n np.random.seed(0)\n a = np.random.normal(size=50)\n b = np.random.normal(size=50)\n c = np.random.normal(size=50)\n d = 2 * a\n e = -5 * b\n f = 10 * c\n\n X = np.array([a, b, c, d, e, f]).T\n m = X.shape[0]\n X_m = X - np.mean(X, axis=0)\n W = pca(X_m)\n T = np.matmul(X_m, W)\n print(T)\n X_t = np.matmul(T, W.T)\n print(np.sum(np.square(X_m - X_t)) / m)\n","repo_name":"Merhbene/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x00-dimensionality_reduction/0-pca.py","file_name":"0-pca.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"43603930403","text":"import pyodbc # this libraries used for DB connection\r\nimport pandas as pd # this libraries is used to frame the data\r\nconn = pyodbc.connect('Driver={SQL Server};' # here giving all DB Details \r\n 'Server=DESKTOP-J3J03MN;'\r\n 'Database=Pybot;'\r\n 'Trusted_Connection=yes;')\r\ncursor = conn.cursor() # fetch the data from the result set of the queries\r\nconn.commit() # to save the current transaction\r\ncursor.execute('SELECT * FROM Pybot.dbo.rebot') # execute the table\r\nfor row in cursor:\r\n print(row)\r\n","repo_name":"mounishqr2610/PythonDBconnection-SQL","sub_path":"integrationSQL.py","file_name":"integrationSQL.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35523926223","text":"import re\n\nSAMPLE = re.compile(r'^[a-z]+@[a-z]+\\.[a-z]+')\nemail_1 = r'someone@geekbrains.ru'\nemail_2 = r'someone@geekbrainsru'\nsample = {'username': 'someone', 'domain': 'geekbrains.ru'}\n\n\ndef email_parse(email: str):\n\tif SAMPLE.match(email):\n\t\tprint({'username': email.split('@')[0], 'domain': email.split('@')[1]})\n\telse:\n\t\tmsg = f': wrong email: {email}'\n\t\traise ValueError(msg)\n\n\nemail_parse(email_1)\nemail_parse(email_2)\n","repo_name":"Peka97/GeekBrains-Study","sub_path":"Karasyov_Ivan_dz_8/task_8_1.py","file_name":"task_8_1.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71659955058","text":"class Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n seen_emails = set()\n for email in emails:\n local, domain = email.split('@')\n new_local = ''\n for c in local:\n if c == '+':\n break\n elif c != '.':\n new_local += c\n seen_emails.add(new_local + '@' + domain)\n return len(seen_emails)","repo_name":"mcappucci1/LeetCode","sub_path":"python/929_unique_email_addresses.py","file_name":"929_unique_email_addresses.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3033110995","text":"#ROOT -> LEFT -> RIGHT\n\n#1.check if the node is empty\n#2.print the contents of the node\n#3.traverse left subtree by recursively calling the preorder method\n#4.traverse right subtree by recursively calling the preorder method\n\n# F, B, A, \ntree = {\n 'val': 'F',\n 'left': {\n 'val': 'B',\n 'left': {\n 'val': 'A',\n 'left': {},\n 'right': {}\n },\n 'right': {\n 'val': 'D',\n 'left': {\n 'val': 'C',\n 'left': {},\n 'right': {}\n },\n 'right':{\n 'val': 'E',\n 'left': {},\n 'right': {}\n }\n }\n },\n 'right': {\n 'val': 'G',\n 'left': {},\n 'right': {\n 'val': 'I',\n 'left': {},\n 'right': {\n 'val': 'H', \n 'left': {}, \n 'right': {}\n }\n }\n },\n}\n\n\ndef preorder_travesal_iterative(root):\n arr, stack = [], []\n\n while stack:\n if root:\n arr.append(root.val)\n if root.right: stack.append(root.right)\n if root.left: stack.append(root.left)\n \n return arr\n\n\ndef preorder_traversal_recursive(root):\n if root:\n print(root['val'])\n preorder_traversal_recursive(root['left'])\n preorder_traversal_recursive(root['right'])\n\npreorder_traversal_recursive(tree) # F B A D C E G I H\n\n","repo_name":"sbalayan1/practice_code","sub_path":"DSandAlgos/Data Strucutres/Trees/preorder_traversal.py","file_name":"preorder_traversal.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19494885971","text":"\"\"\"\nThis file is designed to take in data about respiration phase\nfrom a force sensitive resistor, process the data, and output \nit so that it can be useful for spectrogram_func.py\n\nCreated Jul 2022\nby Trevor Jehl\n\"\"\"\nimport sys\nfrom scipy.ndimage.filters import uniform_filter1d\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# The size of the running average window\nRUNNING_WINDOW_SIZE = 15\n\ndef calcHz (millis):\n \"\"\"\n Given the first two lines of the respiration data, (ex. [92010, 92096])\n calculate the sampling frequency in Hz (samples/s).\n \"\"\"\n # Convert milliseconds to seconds\n seconds = [num / 1000 for num in millis]\n \n # Calculate sampling frequency\n hz = len(seconds) / (seconds[1] - seconds[0])\n return hz\n\n\ndef readRespData(filename):\n \"\"\"\n Given a file path & sample start and stop\n times (in sample #), open the file and\n return the breathing force data.\n \"\"\"\n vals = []\n millis = []\n \n # Read FSR data into vals list\n with open(filename) as f:\n for line in f:\n line = line.split(' ')\n data = int(line[-1])\n vals.append(data)\n \n # If millis info is in the .txt file,\n # read the info and calculate samples/s (Hz)\n with open(filename) as f:\n for line in f:\n lst = line.split(';')\n \n # If there is time data, add it to list\n if len(lst) > 1 and int(lst[0]):\n time = float(lst[0])\n millis.append(time)\n \n # Otherwise, use a predefined random freq\n else:\n Hz = 11.7\n print(f\"No time data found. Using Hz = {Hz}\")\n break\n\n # If recorded two timestamps, stop reading file\n # and calculate the sampling rate\n if len(millis) == 2:\n Hz = calcHz(millis)\n break\n \n # Calculate real time elapsed according to Hz\n time_list = []\n for i in range(len(vals)): \n time = i / Hz\n time_list.append(time)\n \n return vals, time_list\n\n\ndef runningMean(vals):\n \"\"\"\n Given the readings from the force sensitive resistor,\n compute a running average to normalize the data.\n Return a list of vals of the same dimension as the\n passed-in list.\n \"\"\"\n return uniform_filter1d(vals, size = RUNNING_WINDOW_SIZE)\n\n\ndef calcDifferential(vals, time_list):\n \"\"\"\n Given breathing vals, calculate the\n nth order differential of the data.\n \"\"\"\n # Calculate the nth order discrete differential of\n # respiration force data\n diff = np.diff(vals, n = 1)\n\n # New time array for discrete differential array -- taking\n # differential reduces size of array, new x vals are needed to graph\n diff_time_vals = []\n for i in range(1, len(vals)):\n end = time_list[i]\n start = time_list[i - 1]\n diff_time_vals.append(np.mean(end - start) + start)\n\n return diff, diff_time_vals\n\n\ndef findRespiratoryPhase(diff, diff_time_vals):\n \"\"\"\n Given the differential dataset, find the\n x vals where the y vals are > 0 [inspiration]\n or where the y vals are < 0 [expiration], return\n a dictionary with a list of tuples that represent\n the start and end of each phase of respiration.\n \"\"\"\n # A dictionary where all 'insp' data has y>0, 'exp' has y<0\n\n resp_phase = {'insp' : [], 'exp' : []}\n\n # Each 'insp' & 'exp' list contains tuples with the start\n # and end time (in seconds) of each respiration phase\n resp_startstop = {'insp' : [], 'exp' : []}\n\n # This for loop finds all the points where the 'diff' data\n # is above/below the y-axis (inspiration/expiration respectively)\n for i, num in enumerate(diff):\n if num > 0:\n\n if resp_phase['exp']:\n end = resp_phase['exp'][-1]\n start = resp_phase['exp'][0]\n resp_startstop['exp'].append([start, end])\n resp_phase['exp'] = []\n \n x_val = diff_time_vals[i]\n resp_phase['insp'].append(x_val)\n \n if num < 0:\n\n if resp_phase['insp']:\n end = resp_phase['insp'][-1]\n start = resp_phase['insp'][0]\n resp_startstop['insp'].append([start, end])\n resp_phase['insp'] = []\n \n x_val = diff_time_vals[i]\n resp_phase['exp'].append(x_val)\n\n # Checks to make sure that inspiration/expiration\n # at the end of the file is still recorded.\n if resp_phase['exp']:\n end = resp_phase['exp'][-1]\n start = resp_phase['exp'][0]\n resp_startstop['exp'].append([start, end])\n resp_phase['exp'] = []\n\n if resp_phase['insp']:\n end = resp_phase['insp'][-1]\n start = resp_phase['insp'][0]\n resp_startstop['insp'].append([start, end])\n resp_phase['insp'] = []\n \n # Without the following code, the system identifies the first\n # value above/below zero. The following code pushes the index\n # of the start and end to the left/right respectively\n for key, lst in resp_startstop.items():\n resp_startstop[key] = [(diff_time_vals[diff_time_vals.index(tuple[0]) - 1], diff_time_vals[diff_time_vals.index(tuple[1]) + 1]) for tuple in lst if (diff_time_vals.index(tuple[0]) - 1 >= 0) and (diff_time_vals.index(tuple[1]) + 1 < len(diff_time_vals))]\n \n # *** Optional code for graphing vertical lines at the\n # start and end of each respiration phase (usful for visually\n # checking that the code works) ***\n # for lst in resp_startstop.values():\n # for tup in lst:\n # for val in tup:\n # plt.subplot(2,1,1)\n # plt.axvline(x = val, color = 'b')\n\n return resp_startstop\n\n\ndef indexOfClosest(lst: list, num: float):\n \"\"\"\n Given lst and any number num, returns the index of the number in lst\n that is closest to K.\n \"\"\"\n closest = lst[min(range(len(lst)), key = lambda i: abs(lst[i] - num))]\n return lst.index(closest)\n\n\ndef graphResp(vals, running, time_list, diff, diff_time_vals, startstop):\n \"\"\"\n Given all the calculated data, graph and label the data.\n \"\"\"\n # Plot raw & calculated data\n data_start = indexOfClosest(time_list, startstop[0])\n data_end = indexOfClosest(time_list, startstop[1])\n\n plt.subplot(2, 1, 1)\n # Graph raw data\n plt.plot(time_list[data_start : data_end],\n vals[data_start : data_end],\n label = 'Raw Sensor Data')\n # Graph moving avg data\n plt.plot(time_list[data_start : data_end], \n running[data_start : data_end],\n label = f'Running Average (window size = {RUNNING_WINDOW_SIZE})')\n \n # Graph calculated respiration phase data\n plt.subplot(2, 1, 2)\n plt.plot(diff_time_vals[data_start : data_end],\n diff[data_start : data_end],\n 'r', label = 'Breath Phase')\n\n # Fill above & below differential data to \n # different breathing phases.\n plt.fill_between(\n x = diff_time_vals[data_start : data_end], \n y1 = diff[data_start : data_end], \n where = diff[data_start : data_end] >= 0,\n color = \"g\",\n alpha = 0.2)\n \n plt.fill_between(\n x = diff_time_vals[data_start : data_end], \n y1 = diff[data_start : data_end],\n where = diff[data_start : data_end] <= 0,\n color = \"b\",\n alpha = 0.2)\n\n # Bottom plot styling\n plt.xlabel('Time [s]')\n # Top plot styling\n plt.subplot(2, 1, 1)\n plt.legend()\n plt.title('Respiration Phase Analysis')\n \n\ndef doRespAnalysis(filename, startstop):\n \"\"\" \n Given a .txt with a list of FSR vals, and a tuple of the start\n and stop times to show on the graph, calculate all data for graphing.\n Actuall graphing is not done within this function so that this function\n can be easily called form other python scripts without automatically \n generating a plot.\n \"\"\"\n vals, time_list= readRespData(filename)\n \n running = runningMean(vals)\n diff, diff_time_vals = calcDifferential(running, time_list)\n\n # Currently unused, extracts a dictionary of the start/stop \n # time of each respiratory phase\n resp_startstop = findRespiratoryPhase(diff, diff_time_vals)\n \n return vals, running, time_list, diff, diff_time_vals\n\n\ndef main():\n # Interpret command line args\n args = sys.argv[1:]\n filename = args[0]\n if len(args) > 0:\n startstop = (float(args[1]), float(args[2]))\n \n vals, running, time_list, diff, diff_time_vals = doRespAnalysis(filename, startstop)\n \n graphResp(vals, running, time_list, diff, diff_time_vals, startstop)\n plt.show()\n\n\nif __name__ == '__main__':\n main()","repo_name":"trevorjehl/VCD","sub_path":"respiration_phase.py","file_name":"respiration_phase.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34005281415","text":"#\n# Window Edited by Pycharm.\n# Time : 2022/08/28\n# Author : YU.J.P\n#\n\n\"\"\"\n 版本: V1.1\n 基本功能:\n 1. 简洁的窗体;\n 2. 新增按钮 以及相应提示\n 更新内容 :\n -\n 需求:\n - 设计窗体\n\n\"\"\"\nimport copy # 拷贝函数\nimport os # 文件输入\nimport random # 随机包\nimport threading # 多线程操作\nimport pygame # 游戏引擎\nimport tkinter # 窗口视窗\nfrom tkinter import messagebox # 消息窗口\nfrom mutagen.mp3 import MP3 # 获取mp3总时长\n\n\n# 全局变量\nVERSION = 'V1.1' # 版本信息\n\n\n# 主要逻辑运行\nclass MainRun:\n # 固定变量\n window_0 = None # 窗口 编号:0\n button_0 = None # 按钮 编号:0 确认\n button_1 = None # 按钮 编号:11 返回\n\n def __init__(self):\n pass\n\n # 窗口初始化\n def initALLWindow(self):\n # 窗口 编号:0 初始化\n MainRun.window_0 = tkinter.Tk() # 创建窗口\n MainRun.window_0.title('Window ' + VERSION) # 窗口命名\n MainRun.window_0.geometry('500x300+500+200') # 设置窗口大小 -- 格式 : '长x宽+x+y'\n\n # 按钮初始化\n def initAllButton(self):\n # 按钮 编号:0 初始化\n MainRun.button_0 = tkinter.Button(MainRun.window_0) # 放到 window_0 上\n MainRun.button_0['text'] = '确定' # 按钮命名\n MainRun.button_0.pack() # 按钮定位\n MainRun.button_0.bind(\"\", self.jumpMassage_0)\n # 按钮 编号:1 初始化\n MainRun.button_1 = tkinter.Button(MainRun.window_0) # 放到 window_0 上\n MainRun.button_1['text'] = '返回' # 按钮命名\n MainRun.button_1.pack() # 按钮定位\n MainRun.button_1.bind(\"\", self.jumpMassage_1)\n\n # 按钮 编号:0 确认 消息弹窗\n def jumpMassage_0(self, e):\n tkinter.messagebox.showinfo('提示', '确认成功!(◕ᴗ◕✿)') # 窗口名称 点击成功\n\n # 按钮 编号:1 确认 消息弹窗\n def jumpMassage_1(self, e):\n tkinter.messagebox.showinfo('提示', '返回成功!(◕ᴗ◕✿)') # 窗口名称 点击成功\n\n # 主程序入口\n def Start(self):\n # 窗口初始化\n self.initALLWindow()\n # 按钮初始化\n self.initAllButton()\n\n # 循环显示窗口\n MainRun.window_0.mainloop()\n\n\n# 开始运行\nif __name__ == '__main__':\n MainRun().Start() # 运行\n","repo_name":"YJP520/Tkinter-Learning","sub_path":"src/history/window2.py","file_name":"window2.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"71854917299","text":"from random import randint\n\n\ntext = [\"Задача 1.\",\n \"Задача 2.\",\n \"Задача 3.\",\n \"Задача 4 не понимаю что от меня нужно.\",\n \"Задача 5 не понимаю что от меня нужно.\"]\nprint('\\n'.join(text))\nx = int(input(\"Ввыбирите задачу: \"))\nif 0 < x < 6:\n\n if x == 1:\n array = [randint(1, 20) for i in range(int(input(\"Введите число: \")))]\n print(array)\n array2 = []\n for i in range(1, len(array)):\n if array[i-1] < array[i]:\n array2.append(array[i])\n print(array2)\n\n if x == 2:\n array = [i for i in range(\n 1, int(input(\"Введите число: \"))) if i % 20 == 0 or i % 21 == 0]\n print(array)\n\n if x == 3:\n array = [\"Иван\", \"Мария\", \"Петр\", \"Илья\",\n \"Марина\", \"Петр\", \"Алина\", \"Бибочка\"]\n array2 = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ё', 'Ж', 'З', 'И', 'Й', 'К', 'Л', 'М', 'Н', 'О',\n 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ц', 'Ч', 'Ш', 'Щ', 'Ъ', 'Ы', 'Ь', 'Э', 'Ю', 'Я']\n print(array)\n def sort(array, array2):\n array3 = []\n n = ''\n \n for i in range(len(array2)):\n n = array2[i]\n array3.append(n + ':')\n for k in range(len(array)):\n if n == array[k][0]:\n array3.append(array[k])\n if len(array3) > 1:\n print(*array3, end=\" \")\n array3 = []\n\n sort(array, array2)\n\n if x == 4:\n x = 0\n\n if x == 5:\n x = 0\n\nelse:\n print(\"Вы выбрали задачу которой нету в списке, перезапустите \")\n","repo_name":"ZadRotaS/Python_DZ","sub_path":"Seminar6_DZ/Seminar6_DZ.py","file_name":"Seminar6_DZ.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13986559652","text":"# my_lambdata\\iqr.py\n\n# Find the IQR of a set of numbers\n\nimport numpy\n\n\ndef iqr(X):\n # Finds midpoint of Q1\n Q1 = numpy.percentile(X, 25, interpolation='midpoint')\n\n # Finds midpoint of Q3\n Q3 = numpy.percentile(X, 75, interpolation='midpoint')\n\n # Calculates IQR\n iqr = Q3 - Q1\n\n return iqr\n","repo_name":"karlmanalo/lambdata-karlmanalo","sub_path":"my_lambdata/iqr.py","file_name":"iqr.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28544403735","text":"import pandas\n\n#\n# read csv into pandas\n#\ndata = pandas.read_csv(\"data/in/2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv\")\nprint(data.columns)\n#\n# get column\n#\nfur_color = data['Primary Fur Color']\n#\n# process count per value\n#\nsquirrels_per_color = fur_color.value_counts()\n#\n# print dataframe, dict\n#\nprint(squirrels_per_color)\nprint(squirrels_per_color.to_dict())\n#\n# or\n#\n# column = data[\"Primary Fur Color\"]\n#\n# extract manually for every color\n#\ngray = data[data[\"Primary Fur Color\"] == \"Gray\"]\ngray_count = len(gray)\ncinnamon = data[data[\"Primary Fur Color\"] == \"Cinnamon\"]\ncinnamon_count = len(cinnamon)\nblack = data[data[\"Primary Fur Color\"] == \"Black\"]\nblack_count = len(black)\nprint(\"counts: \", gray_count, cinnamon_count, black_count)\n#\n# create dict manually\n#\ndata_dict = {\n \"Fur Color\": [\n \"Gray\",\n \"Cinnamon\",\n \"Black\",\n ],\n \"Count\": [\n gray_count,\n cinnamon_count,\n black_count,\n ]\n}\nprint(data_dict)\n#\n# create dataframe from dict\n#\ndf = pandas.DataFrame(data_dict)\nprint(df)\n#\n# write to csv\n#\ndf.to_csv(\"data/out/squirrel_count.csv\")\n","repo_name":"atrox3d/day-25-csv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29810379607","text":"import logging\nimport json\n\nfrom rdflib import Graph, URIRef\nfrom rdflib.namespace import DC\n\nlogger = logging.getLogger(__name__)\n\n\nUNIVERSITY_OF_OXFORD = URIRef(\"http://oxpoints.oucs.ox.ac.uk/id/00000000\")\n\n\nclass OxpointsDescendantsImporter(object):\n\n def __init__(self, kv, oxpoints_file, relation, rdf_media_type='text/turtle'):\n \"\"\"From a given start point follow all edges through the specified ``relation``\n and collect a set of all descendants recursively.\n\n :param kv: key-value store\n :param oxpoints_file: path to the oxpoints representation\n :param relation: the predicate we are following through the graph\n :param rdf_media_type: file format of oxpoints_file\n \"\"\"\n self.kv = kv\n self.relation = relation\n graph = Graph()\n graph.parse(oxpoints_file, format=rdf_media_type)\n self.graph = graph\n\n def import_data(self):\n \"\"\"Import all of oxpoints by starting with the University itself\n \"\"\"\n self.import_subject(UNIVERSITY_OF_OXFORD)\n\n def format_descendant(self, subject):\n desc = {'id': self._get_formatted_oxpoints_id(subject)}\n title = self.graph.value(subject, DC.title)\n if title:\n desc['title'] = title.toPython()\n return desc\n\n def import_subject(self, subject):\n \"\"\"For each object following the ``self.relation`` predicate of the\n ``subject`` we recursively add all objects to a list of descendants.\n\n These are placed into JSON and written to the KV Store.\n \"\"\"\n descendants = []\n children = self.get_children(subject)\n descendants.extend(map(self.format_descendant, children))\n for child in children:\n child_descendants = self.import_subject(child)\n descendants.extend(child_descendants)\n self.kv.set(self._get_formatted_oxpoints_id(subject), json.dumps({'descendants': descendants}))\n return descendants\n\n def get_children(self, subject):\n return [triple[0] for triple in self.graph.triples((None, self.relation, subject))]\n\n def _get_formatted_oxpoints_id(self, uri_ref, separator=':'):\n \"\"\"Split an URI to get the OxPoints ID\n :param uri_ref: URIRef object\n :return string representing oxpoints ID\n \"\"\"\n return 'oxpoints{separator}{ident}'.format(separator=separator,\n ident=uri_ref.toPython().rsplit('/')[-1])\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('oxpointsfile', type=argparse.FileType('r'))\n ns = parser.parse_args()\n from moxie.core.kv import KVService\n from moxie.places.importers.rdf_namespaces import Org\n kv = KVService('redis://localhost:6379/12')\n importer = OxpointsDescendantsImporter(kv, ns.oxpointsfile, Org.subOrganizationOf)\n importer.import_data()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"martinfilliau/moxie","sub_path":"moxie/places/importers/oxpoints_descendants.py","file_name":"oxpoints_descendants.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70307221297","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ndef crosscorr(data_x, data_y, lag=0):\r\n '''\r\n Lag_N cross correlation.\r\n Params:\r\n data_x, data_y: pandas series of the same length\r\n \r\n Returns:\r\n correlation: float\r\n '''\r\n return data_x.corr(data_y.shift(lag))\r\n\r\ndef window_lagged_crosscorr(data1, data2, lags_range, lag_steps, no_splits, return_corr=False):\r\n '''\r\n data1, data2: pandas series of the same length\r\n lags_range (int): range borders of lags (from -lags_range to lags_range)\r\n lags_steps (int): step of lags\r\n no_splits (int): number of splits\r\n return_corr (bool): whether to return correlations dataframe or not\r\n '''\r\n samples_per_split = data1.shape[0] / no_splits\r\n crosscorrs = []\r\n d1_name = data1.name\r\n d2_name = data2.name\r\n arr = np.array(range(-int(lags_range), int(lags_range+1), lag_steps))\r\n step = len(range(-int(lags_range), int(lags_range+1), lag_steps)) // 8\r\n ticks = list(np.where(np.in1d(arr, arr[::step]))[0])\r\n ticks_labels = list(arr[::step])\r\n range_val = range(-int(lags_range), int(lags_range+1), lag_steps) \r\n \r\n for t in range(0, no_splits):\r\n \r\n d1 = data1.loc[(t)*samples_per_split:(t+1)*samples_per_split]\r\n d2 = data2.loc[(t)*samples_per_split:(t+1)*samples_per_split]\r\n window_corrs = [crosscorr(d1, d2, lag) for lag in range_val]\r\n crosscorrs.append(window_corrs)\r\n \r\n crosscorrs = pd.DataFrame(crosscorrs)\r\n f, ax = plt.subplots(figsize=(20,10))\r\n sns.heatmap(crosscorrs, cmap='RdBu_r', ax=ax)\r\n ax.set(title=f'Windowed Time Lagged Cross Correlation between \\n {d1_name} and {d2_name}',\r\n xlabel='Offset, minutes',\r\n ylabel='Window number')\r\n ax.set_xticks(ticks)\r\n ax.set_xticklabels(ticks_labels, rotation=70, fontsize=14)\r\n plt.show()\r\n if return_corr == True:\r\n return crosscorrs\r\n\r\ndef rolling_window_lagged_crosscorr(data1, data2, lags_range, lag_steps, \r\n window_size, step_size , return_corr=False,\r\n plot_graph = True):\r\n '''\r\n data1, data2: pandas series of the same length\r\n lags_range (int): range borders of lags (from -lags_range to lags_range)\r\n lags_step (int): step of lags\r\n window_size (int): window size\r\n step_size (int): step for window moving\r\n return_corr (bool): whether to return correlations dataframe or not\r\n plot_graph (bool): whether plot the correlations plot or not\r\n '''\r\n t_start = 0\r\n t_end = t_start + window_size\r\n crosscorrs = []\r\n idx = []\r\n d1_name = data1.name\r\n d2_name = data2.name\r\n arr = np.array(range(-int(lags_range), int(lags_range+1), lag_steps))\r\n range_val = range(-int(lags_range), int(lags_range+1), lag_steps)\r\n step = len(range(-int(lags_range), int(lags_range+1), lag_steps)) // 8\r\n ticks = list(np.where(np.in1d(arr, arr[::step]))[0])\r\n ticks_labels = list(arr[::step])\r\n \r\n while t_end < data1.shape[0]:\r\n \r\n d1 = data1.iloc[t_start:t_end]\r\n d2 = data2.iloc[t_start:t_end]\r\n window_corrs = [crosscorr(d1, d2, lag) for lag in range_val]\r\n idx.append(t_start)\r\n crosscorrs.append(window_corrs)\r\n t_start = t_start + step_size\r\n t_end = t_end + step_size\r\n \r\n crosscorrs = pd.DataFrame(crosscorrs, index=idx)\r\n if plot_graph == True:\r\n f,ax = plt.subplots(figsize=(20,20))\r\n sns.heatmap(crosscorrs, cmap='RdBu_r', ax=ax)\r\n ax.set(title=f'Rolling Windowed Time Lagged Cross Correlation between \\n {d1_name } and {d2_name }',\r\n xlabel='Offset, minutes',\r\n ylabel='Window number')\r\n ax.set_xticks(ticks)\r\n ax.set_xticklabels(ticks_labels, rotation=70, fontsize=14)\r\n plt.show()\r\n if return_corr == True:\r\n return crosscorrs\r\n\r\ndef best_lags(df, arr, threshold=0.95, drop_zero_lag=True):\r\n \r\n '''\r\n Inputs:\r\n df(pd.Dataframe): dataframe with correlations between two tickers\r\n arr(np.array): array with lags values\r\n threshld(float): threshold levelfor correlation\r\n Returns:\r\n check(pd.Dataframe): dataframe with lags above threshold with window start\r\n '''\r\n top_corr_list = []\r\n for i in range(0, len(df)):\r\n \r\n test = pd.DataFrame()\r\n test['corr'] = df.iloc[i,:][df[abs(df)>threshold].iloc[i,:].notnull()]\r\n test.index = arr[df.iloc[i,:][df[abs(df)>threshold].iloc[i,:].notnull()].index]\r\n if len(test) > 0:\r\n name = [df.iloc[i,:].name]\r\n val = [[{'lag':i, 'corr':j} for i, j in zip(test.index, test.iloc[:,0])]]\r\n if drop_zero_lag==True:\r\n val = [[el for el in val[0] if el['lag']!=0]]\r\n if len(val[0]) != 0:\r\n top_corr_list.append(name+val)\r\n \r\n check = pd.DataFrame(top_corr_list)\r\n check.columns = ['window_start', 'lags']\r\n check['lags'] = check.lags.apply(lambda y: sorted(y, key=lambda x: abs(x['corr']), reverse=True))\r\n return check\r\n\r\n\r\ndef plot_corr_windows(col1, col2, lags_range, lag_step,\r\n window_size, step_size,\r\n arr, threshold=0.95):\r\n '''\r\n Inputs:\r\n col1,col2 pandas series of the same length\r\n lags_range (int): range borders of lags (from -lags_range to lags_range)\r\n lags_step (int): step of lags\r\n window_size (int): window size\r\n step_size (int): step for window moving\r\n arr(np.array): array with lags values\r\n threshld(float): threshold levelfor correlation\r\n '''\r\n \r\n df = rolling_window_lagged_crosscorr(\r\n col1,\r\n col2,\r\n lags_range, lag_step,\r\n window_size, step_size,\r\n return_corr=True,\r\n plot_graph=False)\r\n check = best_lags(df, arr, threshold)\r\n fig, axs = plt.subplots(2,3, figsize=(16,10))\r\n for i in range(0,6):\r\n lag, wind, corr = check.iloc[i,1][0]['lag'], check.iloc[i,0], check.iloc[i,1][0]['corr']\r\n name1 = col1.name\r\n name2 = col2.name\r\n\r\n data1 = col1 \\\r\n [wind + lag: wind + window_size + lag].\\\r\n reset_index(drop = True) \r\n data2 = col2 \\\r\n [wind:wind + window_size].reset_index(drop = True)\r\n data1 -= data1.mean()\r\n data2 -= data2.mean()\r\n axs[i//3, i%3].plot(data1, label = name1)\r\n axs[i//3, i%3].plot(data2, label = name2)\r\n axs[i//3, i%3].set_title(f\"Window: {wind} lag:{lag} minutes corr: {round(corr,2)}\" )\r\n axs[i//3, i%3].legend()\r\n plt.show()\r\n\r\ndef most_common_lags(df):\r\n '''\r\n Input:\r\n df(pd.Dataframe) : dataframe with lags above threshold with window start\r\n Returns:\r\n lag_count (dict) : dictionary with count of lags\r\n '''\r\n lags_count = {}\r\n for _, lag in df.iterrows():\r\n for el in lag[1]:\r\n\r\n if el['lag'] in lags_count.keys():\r\n lags_count[el['lag']] += 1\r\n else:\r\n lags_count[el['lag']] = 1\r\n lags_count = {k: lags_count[k] for k in sorted(\r\n lags_count,\r\n key=lags_count.get,\r\n reverse=True)}\r\n return lags_count ","repo_name":"CostiaB/Binance_lagged_connections","sub_path":"lagged_correlation.py","file_name":"lagged_correlation.py","file_ext":"py","file_size_in_byte":7556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"13081869484","text":"__author__ = 'JEFFERYK'\n\ndef fibonaci(num):\n last = 0\n current = 1\n next = 1\n for n in range(num):\n yield current\n next = current + last\n last = current\n current = next\n return\n\nn = int(input(\"How many?: \"))\nfor i in fibonaci(n):\n print(i)\n","repo_name":"JeffKochuk/TestingCenter","sub_path":"testGenerators.py","file_name":"testGenerators.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"2898555","text":"from django.contrib import admin\nfrom commodity.models import Commondity\n\n\n\n\n\n\n# Register your models here.\nclass AdminCommondity(admin.ModelAdmin):\n list_display = ('CommodityName','CommodityCategory','CommodityPrice','CommodityDateTime')#后台显示的列表内容\n search_fields = ('CommodityName', 'CommodityCategory',)#从哪些字段中搜索\n list_filter = ('CommodityCategory','CommodityDateTime',)#筛选器\n\nadmin.site.register(Commondity,AdminCommondity)\n\n","repo_name":"hehuijun/WebStore2","sub_path":"commodity/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16922753830","text":"import os\nimport xlrd\nclass ExcelUtils():\n def __init__(self,file_path,sheet_name): #构造\n self.file_path=file_path\n self.sheet_name=sheet_name\n self.sheet=self.get_sheet() #把整个表格对象做成属性,后面使用方便\n\n #获取表格数据\n def get_sheet(self):\n wb=xlrd.open_workbook(self.file_path)\n sheet=wb.sheet_by_name(self.sheet_name)\n return sheet\n\n def get_row_count(self):\n row_count=self.sheet.nrows\n return row_count\n\n def get_col_count(self):\n col_count=self.sheet.ncols\n return col_count\n\n #获取单元格的值\n def get_cell_value(self,row_index,col_index): #不常用的方法可以加双下划线私有化命名\n cell_value=self.sheet.cell_value(row_index,col_index)\n return cell_value\n\n #获取合并单元格的坐标信息\n def get_merged_info(self):\n merged = self.sheet.merged_cells\n return merged\n\n #获取所有单元格的值(包括合并单元格)\n def get_merged_cell_value(self,row_index, col_index):\n cell_value = None\n for (rlow, rhigh, clow, chigh) in self.get_merged_info():\n if (row_index >= rlow and row_index < rhigh):\n if (col_index >= clow and col_index < chigh):\n cell_value = self.get_cell_value(rlow, clow)\n break;\n else:\n cell_value = self.get_cell_value(row_index, col_index)\n else:\n cell_value = self.get_cell_value(row_index, col_index)\n return cell_value\n #将表格数据转成字典,塞到一个list中\n def get_sheet_data_by_dict(self):\n all_data_list = []\n first_row = self.sheet.row(0) # 取出第一行的数据\n for row in range(1, self.get_row_count()):\n row_dict = {}\n for col in range(0, self.get_col_count()):\n row_dict[first_row[col].value] = self.get_merged_cell_value(row, col)\n all_data_list.append(row_dict)\n return all_data_list\n\n#测试代码\nif __name__=='__main__':\n current_path=os.path.dirname(__file__)\n excel_path=os.path.join(current_path,'..','samples/data/test_case.xlsx')\n excelUtils=ExcelUtils(excel_path,\"Sheet1\") #类实例化\n #print(excelUtils.get_merged_cell_value(4,0))\n print(excelUtils.get_merged_cell_value(4,0))\n for i in excelUtils.get_sheet_data_by_dict():\n print(i)\n\n print(excelUtils.get_merged_info)\n","repo_name":"TesterKitty/API_FRAME","sub_path":"common/excel_untils.py","file_name":"excel_untils.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72931407217","text":"from odoo import models, api, fields, _\nfrom odoo.exceptions import ValidationError\n\n\nclass MrpWoHour(models.TransientModel):\n _name = 'mrp.wo.hour'\n _description = \"Production Line Hours Entry\"\n\n produce_id = fields.Many2one('mrp.wo.produce', 'MRP Produce')\n work_order_line = fields.Many2one('mrp.wo.produce.work_line', 'Work Order')\n quantity = fields.Float('Quantity')\n quantity_to_produce = fields.Float('Quantity to Produce', related='produce_id.production_id.product_qty')\n no_of_hours = fields.Float('Number of Hours')\n\n def set_hours_of_wo(self):\n if not self.quantity:\n raise ValidationError(_('Please set a value for quantity'))\n if self.quantity != self.quantity_to_produce:\n self.produce_id.update({'update_quantity': True, 'new_quantity': self.quantity})\n self.work_order_line.update({'labor_time': self.no_of_hours})\n view_id = self.env.ref('mrp_wo_produce.view_mrp_wo_produce_wizard').id\n return {'type': 'ir.actions.act_window',\n 'name': _('Wo Produce'),\n 'res_model': 'mrp.wo.produce',\n 'target': 'current',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.produce_id.id,\n 'context': {'form_view_initial_mode': 'edit', 'barcode_scan': True},\n 'views': [[view_id, 'form']],\n }\n","repo_name":"asphaltzipper/azi-odoo-modules","sub_path":"mrp_automation/wizard/mrp_wo_hour.py","file_name":"mrp_wo_hour.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"57"} +{"seq_id":"38292826614","text":"'''\n125. Valid Palindrome\nEasy\n\nGiven a string s, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.\n\nExample 1:\nInput: s = \"A man, a plan, a canal: Panama\"\nOutput: true\nExplanation: \"amanaplanacanalpanama\" is a palindrome.\n\nExample 2:\nInput: s = \"race a car\"\nOutput: false\nExplanation: \"raceacar\" is not a palindrome.\n'''\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n# left, right = 0, len(s) - 1\n \n# s = s.lower()\n# while left < right:\n# while left < len(s) and not s[left].isalnum():\n# left += 1\n# while right >= 0 and not s[right].isalnum():\n# right -= 1\n \n# if left >= right:\n# if right >= 0 and left < len(s) and s[left] != s[right]: return False\n# return True\n \n# if s[left] != s[right]: return False\n \n# left += 1\n# right -= 1\n \n# return True\n\n s = s.lower()\n \n string = []\n for c in s:\n if c.isalnum():\n string.append(c)\n \n return string == string[::-1]\n\n ","repo_name":"hongfu12321/Leetcode","sub_path":"string/125-Valid_Palindrome.py","file_name":"125-Valid_Palindrome.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4284256892","text":"import unittest\r\nimport xlwt\r\nfrom SQL.GBQConnector import *\r\nfrom SQL.GBQManager import *\r\nfrom SQL.SqlServerConnector import *\r\nfrom SQL.SqlServerManager import *\r\nfrom Utils.DataIO import *\r\n\r\nclass CSVFileIOTests(unittest.TestCase):\r\n def setUp(self):\r\n \"\"\r\n\r\n def tearDown(self):\r\n \"\"\r\n\r\n def test_readInAllInputLists(self):\r\n sourceFullData = DataIO.readData(sourceFullFile)\r\n sourceIncData = DataIO.readData(sourceIncFile)\r\n targetFullData = DataIO.readData(targetFullFile)\r\n targetIncData = DataIO.readData(targetIncFile)\r\n\r\n print(\"\\n****************************************Source Full Data****************************************\\n\")\r\n for table in sourceFullData:\r\n print(table)\r\n\r\n print(\"\\n****************************************Source Inc Data****************************************\\n\")\r\n for table in sourceIncData:\r\n print(table)\r\n\r\n print(\"\\n****************************************Target Full Data****************************************\\n\")\r\n for table in targetFullData:\r\n print(table)\r\n\r\n print(\"\\n****************************************Target Inc Data****************************************\\n\")\r\n for table in targetIncData:\r\n print(table)\r\n\r\n def test_OutputDataToDirectory(self):\r\n # Get source and target tables to test from input files\r\n self.sourceTables = DataIO.readData(sourceFullFile)\r\n self.targetTables = DataIO.readData(targetFullFile)\r\n targetUKeys = DataIO.readIDData(targetUKeyFile)\r\n\r\n # Connect to both databases\r\n self.sqlServerConnector = SqlServerConnector(Driver, Server, Database, UID, PWD)\r\n self.SqlServerCursor = self.sqlServerConnector.connectToDatabase()\r\n\r\n self.gbqConnector = GBQConnector(GBQCredentials, Project, LaunchBrowser)\r\n self.gbqClient = self.gbqConnector.connectToDatabase()\r\n\r\n sqlServerManager = SqlServerManager(self.SqlServerCursor)\r\n gbqManager = GBQManager(self.gbqClient)\r\n\r\n # Get schema and sample data information from databases\r\n for tableId in range(len(self.sourceTables)):\r\n # Grab source and target schema info\r\n sourceTableSchema = sqlServerManager.getSchema(self.sourceTables[tableId])\r\n targetTableSchema = gbqManager.getSchema(self.targetTables[tableId])\r\n\r\n # Grab source and target data validation info\r\n targetTableSampleData = gbqManager.getSampleTableData(self.targetTables[tableId])\r\n uIDsList = DataHandler.getUIDsFromTargetTable(targetTableSampleData)\r\n sourceTableSampleData = sqlServerManager.getSampleTableData(self.sourceTables[tableId], targetUKeys[self.sourceTables[tableId]], uIDsList)\r\n\r\n # Write schema to excel sheet\r\n self.__writeToExcel(self.sourceTables[tableId], sourceTableSchema.ColumnSchemaList, targetTableSchema.ColumnSchemaList, 'Schema')\r\n\r\n # Write Data Validation to excel sheet\r\n self.__writeToExcel(self.sourceTables[tableId], sourceTableSampleData.SampleData, targetTableSampleData.SampleData, 'Sample Data')\r\n\r\n def __writeToExcel(self,tabName,sourceList,targetList,type):\r\n book = xlwt.Workbook()\r\n target = book.add_sheet('target')\r\n source = book.add_sheet('source')\r\n\r\n if type == \"Schema\":\r\n columnCount = len(sourceList)\r\n\r\n for columnId in range(columnCount):\r\n target.write(0, columnId, targetList[columnId].Name)\r\n target.write(1, columnId, targetList[columnId].DataType)\r\n source.write(0, columnId, sourceList[columnId].Name)\r\n source.write(1, columnId, sourceList[columnId].DataType)\r\n elif type == \"Sample Data\":\r\n rowNum = len(sourceList)\r\n\r\n for i in range(rowNum):\r\n colNum = len(sourceList[i])\r\n for j in range(colNum):\r\n target.write(i, j, targetList[i][j])\r\n source.write(i, j, sourceList[i][j])\r\n\r\n path = TestOutputDirectory + \"\\\\\" + type + \"\\\\\" + tabName + \".xls\"\r\n book.save(path)","repo_name":"Duskamo/UCH_DataComparerAndResultReporter","sub_path":"UnitTests/CSVFileIOTests.py","file_name":"CSVFileIOTests.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35889042678","text":"from core import Action\nfrom core.dataset import BatchGenerator\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport os\nimport numpy as np\n\n\nclass SequenceVisualizer(Action):\n def __init__(self, config, experiment_env):\n super(SequenceVisualizer, self).__init__(config, experiment_env)\n\n def csv(self, dataset_generator: BatchGenerator):\n for i, mini_batch in enumerate(dataset_generator):\n csv_out = os.path.join(self.experiment_env.output_dir,\n \"%s_batch_%03d.csv\" % (dataset_generator.dataset.name, i))\n print(\"Writing csv to\", csv_out)\n x = mini_batch[0] # type:np.ndarray\n x = x.reshape((x.shape[0], x.shape[1] * x.shape[2]))\n label = mini_batch[1][:, np.newaxis] # type:np.ndarray\n x_label = np.concatenate((label, x), axis=1)\n np.savetxt(csv_out, x_label, delimiter=\",\")\n\n def visualize(self, dataset_generator: BatchGenerator, batch_limit=None, segment_limit=None):\n \"\"\"\n\n :param dataset_generator:\n :param batch_limit: Visualize first \"batch_limit\" batches on;y\n :param segment_limit: Visualize first \"segment_limit\" segments in each batch only\n :return:\n \"\"\"\n matplotlib.use('PDF')\n pdf_out_0 = os.path.join(self.experiment_env.output_dir, \"%s_view_0.pdf\" % dataset_generator.dataset.name)\n pdf_out_1 = os.path.join(self.experiment_env.output_dir, \"%s_view_1.pdf\" % dataset_generator.dataset.name)\n print(\"Writing pdf visualization to\", pdf_out_1)\n with PdfPages(pdf_out_1) as pdf_1:\n with PdfPages(pdf_out_0) as pdf_0:\n print(\"Total # of batches\", len(dataset_generator))\n for i, mini_batch in enumerate(dataset_generator):\n if batch_limit is not None and i > batch_limit:\n break\n x = mini_batch[0]\n label = mini_batch[1]\n record_name = mini_batch[2]\n start_index = mini_batch[3]\n for j in range(x.shape[0]):\n if segment_limit is not None and j > segment_limit:\n break\n plt.plot(x[j, :, 0], label=\"Lead 1\")\n plt.plot(x[j, :, 1], label=\"Lead 2\")\n plt.title(\n \"%s minibatch #%d, segment #%d - label: %d\" % (\n dataset_generator.dataset.name, i, j, label[j]))\n mitdb_tag = self.config[\"preprocessing\"].get(\"MIT_DB_TAG\")\n nsrdb_tag = self.config[\"preprocessing\"].get(\"NSR_DB_TAG\")\n plt.text(0, 0, \"MIT_DB: {mitdb_tag}; NSR_DB: {nsrdb_tag}\")\n plt.text(0, 0.5, \"Record_Name: {record_name}; Start_Index: {start_index[j]}\")\n plt.grid()\n plt.legend()\n if label[j] == 1:\n pdf_1.savefig()\n else:\n pdf_0.savefig()\n plt.close()\n","repo_name":"taoyilee/hacktech2019","sub_path":"core/dataset/sequence_visualizer.py","file_name":"sequence_visualizer.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"70924823218","text":"import os\nimport re\nimport datetime\n\n\nclass AuxiliarFunctions:\n def __init__(self):\n self._sizefiles = []\n self._nameFiles = []\n\n def convert_bytes(self, size, unit=None):\n if unit == \"KB\":\n return str(round(size / 1024, 3)) + 'KB'\n elif unit == \"MB\":\n return str(round(size / (1024 * 1024), 3)) + ' MB'\n elif unit == \"GB\":\n return str(round(size / (1024 * 1024 * 1024), 3)) + ' GB'\n else:\n return str(size) + ' bytes'\n\n def found_files(self, path_dir,extension):\n content = os.listdir(path_dir)\n for file in content:\n if os.path.isfile(os.path.join(path_dir, file)) and file.endswith(extension):\n self._nameFiles.append(file)\n self._sizefiles.append(self.convert_bytes(\n os.path.getsize(path_dir + '/' + file), 'MB'))\n return dict(zip(self._nameFiles, self._sizefiles))\n\n def site_url_location(self, host):\n pattern = re.compile(r'^https://.+/')\n site = \"\".join(pattern.findall(host))\n site = host.replace(site, '')\n return site[:-6]\n\n def format_date(self):\n date_format = datetime.datetime.now()\n return date_format.strftime(\"_%d%m%Y%H%M\")\n","repo_name":"AlejoCJaimes/WebScrapper","sub_path":"project/web_scrapper_labels/functions_handling.py","file_name":"functions_handling.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43722002439","text":"''' \nA stock OHLC chart that monitors the MACD indicator.\nThis example shows the streaming and updating feature of Bokeh charts.\n'''\nimport numpy as np\n\nfrom bokeh.driving import count\nfrom bokeh.layouts import column, gridplot, row\nfrom bokeh.models import ColumnDataSource, Select, Slider\nfrom bokeh.plotting import curdoc, figure\n\nnp.random.seed(1)\n\n\n\n# source creator using np.random\ndef __create_prices():\n last_average = 100\n returns = np.asarray(np.random.lognormal(3, 0.5, 1))\n average = last_average * np.cumprod(returns)\n high = average * np.exp(abs(np.random.gamma(1, 0.03, size=1)))\n low = average / np.exp(abs(np.random.gamma(1, 0.03, size=1)))\n delta = high - low\n open = low + delta * np.random.uniform(0.05, 0.95, size=1)\n close = low + delta * np.random.uniform(0.05, 0.95, size=1)\n return open[0], high[0], low[0], close[0], average[0]\n\nMA12, MA26, EMA12, EMA26 = '12-tick Moving Avg', '26-tick Moving Avg', '12-tick EMA', '26-tick EMA'\n\n# define source data structure\n# source = ColumnDataSource(dict(\n# time=[], average=[], low=[], high=[], open=[], close=[],\n# ma=[], macd=[], macd9=[], macdh=[], color=[]\n# ))\nxopen, xhigh, xlow, xclose, xaverage = [],[],[],[],[]\nfor _ in range(10):\n open, high, low, close, average = __create_prices()\n xopen.append(open)\n xhigh.append(high)\n xlow.append(low)\n xclose.append(close)\n xaverage.append(average)\n\nsource = ColumnDataSource(dict(\n time=[0,1,2,3,4,5,6,7,8,9], average=xaverage, low=xlow, high=xhigh, open=xopen, close=xclose,\n ma=[0,1,2,3,2,1,0,1,2,3], macd=[3,2,1,0,1,2,3,2,1,0], macd9=[1,1,1,1,1,2,2,2,2,2], \n macdh=[1,2,1,2,1,2,1,2,1,2], color=[3,3,3,3,3,0,0,0,0,0]\n))\n\n# define static figure show\n# draw figure#1\np = figure(height=500, tools=\"xpan,xwheel_zoom,xbox_zoom,reset\", x_axis_type=None, y_axis_location=\"right\")\np.x_range.follow = \"end\"\np.x_range.follow_interval = 100\np.x_range.range_padding = 0\n# draw lines\np.line(x='time', y='average', alpha=0.2, line_width=3, color='navy', source=source)\np.line(x='time', y='ma', alpha=0.8, line_width=2, color='orange', source=source)\n# draw k bar\np.segment(x0='time', y0='low', x1='time', y1='high', line_width=2, color='black', source=source)\np.segment(x0='time', y0='open', x1='time', y1='close', line_width=8, color='color', source=source)\n# draw figure#2\np2 = figure(height=250, x_range=p.x_range, tools=\"xpan,xwheel_zoom,xbox_zoom,reset\", y_axis_location=\"right\")\np2.line(x='time', y='macd', color='red', source=source)\np2.line(x='time', y='macd9', color='blue', source=source)\np2.segment(x0='time', y0=0, x1='time', y1='macdh', line_width=6, color='black', alpha=0.5, source=source)\n\n# 2 slider & 1 selector for changing the source\nmean = Slider(title=\"mean\", value=0, start=-0.01, end=0.01, step=0.001)\nstddev = Slider(title=\"stddev\", value=0.04, start=0.01, end=0.1, step=0.01)\nmavg = Select(value=MA12, options=[MA12, MA26, EMA12, EMA26])\n\n# source creator using np.random\ndef _create_prices(t):\n last_average = 100 if t==0 else source.data['average'][-1]\n returns = np.asarray(np.random.lognormal(mean.value, stddev.value, 1))\n average = last_average * np.cumprod(returns)\n high = average * np.exp(abs(np.random.gamma(1, 0.03, size=1)))\n low = average / np.exp(abs(np.random.gamma(1, 0.03, size=1)))\n delta = high - low\n open = low + delta * np.random.uniform(0.05, 0.95, size=1)\n close = low + delta * np.random.uniform(0.05, 0.95, size=1)\n return open[0], high[0], low[0], close[0], average[0]\n\n# compute moving average for 10 usin conv trick -> rolling(n)\ndef _moving_avg(prices, days=10):\n if len(prices) < days: return [100]\n return np.convolve(prices[-days:], np.ones(days, dtype=float), mode=\"valid\") / days\n\n# compute expotential moving average using conv trick -> rolling(n)\ndef _ema(prices, days=10):\n if len(prices) < days or days < 2: return [prices[-1]]\n a = 2.0 / (days+1)\n kernel = np.ones(days, dtype=float)\n kernel[1:] = 1 - a\n kernel = a * np.cumprod(kernel)\n # The 0.8647 normalizes out that we stop the EMA after a finite number of terms\n return np.convolve(prices[-days:], kernel, mode=\"valid\") / (0.8647)\n\n\n@count() # call update -> t=t+1\ndef update(t):\n # new data creator\n open, high, low, close, average = _create_prices(t+10)\n color = \"green\" if open < close else \"red\"\n # source data structure\n # source = ColumnDataSource(\n # dict(time=[], average=[], low=[], high=[], open=[], close=[], ma=[], macd=[], macd9=[], macdh=[], color=[]\n # ))\n # define newly added data structure (only part of source data is updated)\n new_data = dict( \n time=[t],\n open=[open],\n high=[high],\n low=[low],\n close=[close],\n average=[average],\n color=[color],\n )\n # compute new data for k line\n close = source.data['close'] + [close]\n ma12 = _moving_avg(close[-12:], 12)[0]\n ma26 = _moving_avg(close[-26:], 26)[0]\n ema12 = _ema(close[-12:], 12)[0]\n ema26 = _ema(close[-26:], 26)[0]\n # chose value for slider bar\n if mavg.value == MA12: new_data['ma'] = [ma12]\n elif mavg.value == MA26: new_data['ma'] = [ma26]\n elif mavg.value == EMA12: new_data['ma'] = [ema12]\n elif mavg.value == EMA26: new_data['ma'] = [ema26]\n # compute new data for macd\n macd = ema12 - ema26\n new_data['macd'] = [macd]\n macd_series = source.data['macd'] + [macd] # series for compute ema(macd)\n macd9 = _ema(macd_series[-26:], 9)[0]\n new_data['macd9'] = [macd9]\n new_data['macdh'] = [macd - macd9]\n # ----- new_data type(new_data)=class.dict -----\n # {'time': [0], 'open': [105.77556002562126], 'high': [106.7134742510443], 'low': [105.56676727306952], \n # 'close': [105.71939940183024], 'average': [106.71310807070746], 'color': ['red'], 'ma': [100], \n # 'macd': [0.0], 'macd9': [0.0], 'macdh': [0.0]}\n source.stream(new_data=new_data, rollover= 50) # rollover: max items to keep or will discard appendings\n\n\nmodel=column(row(mean, stddev, mavg), gridplot([[p], [p2]], toolbar_location=\"left\", width=1000))\n# model.plot()\n\n# add a model as the base of the cur documentation obj \ncurdoc().add_root(model)\ncurdoc().add_periodic_callback(callback=update, period_milliseconds=500)\n# set update time\n# curdoc().add_periodic_callback(callback=update, period_milliseconds=1000)\ncurdoc().title = \"OHLC\"\n\n","repo_name":"twistfatezz/bokeh_startup_examples","sub_path":"stock_ohlc.py","file_name":"stock_ohlc.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"28678858071","text":"#!/usr/bin/env python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'breakingRecords' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts INTEGER_ARRAY scores as parameter.\n#\n\ndef breakingRecords(scores):\n # Write your code here \n maximun = scores[0]\n minimun = scores[0]\n count_min = 0\n count_max = 0\n lenght = len(scores)\n for i in range(1,lenght):\n if scores[i] > maximun:\n maximun = scores[i]\n count_max = count_max + 1\n if scores[i] < minimun:\n minimun = scores[i]\n count_min = count_min + 1\n return [count_max,count_min]\n\n\n\n\n\nif __name__ == '__main__':\n\n n = int(input().strip())\n\n scores = list(map(int, input().rstrip().split()))\n\n result = breakingRecords(scores)\n\n print(' '.join(map(str, result)))\n\n\n\n\n","repo_name":"elsebasan/hackerrank","sub_path":"Interview3MonthsKit/week1/04-breakinrecords/breakinrecords.py","file_name":"breakinrecords.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17039370641","text":"# %% [code]\n# A solution for the Youtube8m-2019 Challenge.\n# Explaination: https://www.kaggle.com/c/youtube8m-2019/discussion/112388\n\nimport multiprocessing\nimport os\nimport pickle\nimport time\n\nfrom glob import glob\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_sched\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\n\nfrom sklearn.model_selection import KFold\nfrom scipy.stats import describe\n\n\nSEGMENT_LEN = 5\nNUM_FOLDS = 10\nNUM_MODELS = 10\nBATCH_SIZE = 96\nLOG_FREQ = 1000\nNUM_CLASSES = 1000\nN_VIDEO_CLASSES = 3862\nNUM_EPOCHS = 25\nFEATURES_DIM = 1152\n\nLEARNING_RATE = 0.00011729283760398037\nWEIGHT_DECAY = 0.0011412688966608406\n\nLR_FACTOR = 0.1\nLR_PATIENCE = 3\nLR_MINIMUM = 3e-7\nLR_THRESHOLD = 1e-3\n\n\n# Normally, training shows low GPU utilization (about 10%) since the training process is HDD-bound.\n# However, only 10% of validation data is labeled.\n# Let's compress it tight to optimize HDD access time (ofc training would be a lot faster with SSD).\ndef convert_val_data(prefix: str, wildcard: str) -> None:\n print('converting', wildcard)\n all_files = sorted(glob(wildcard))\n\n all_ids = []\n all_labels = []\n all_scores = []\n all_features_list = []\n\n for tfrec_file in all_files:\n for example in tf.python_io.tf_record_iterator(tfrec_file):\n tf_example = tf.train.Example.FromString(example)\n\n video_id = tf_example.features.feature['id'] \\\n .bytes_list.value[0].decode(encoding='utf-8')\n\n seg_start = list(tf_example.features.feature['segment_start_times'].int64_list.value)\n seg_labels = list(tf_example.features.feature['segment_labels'].int64_list.value)\n seg_scores = list(tf_example.features.feature['segment_scores'].float_list.value)\n\n tf_seq_example = tf.train.SequenceExample.FromString(example)\n num_frames = len(tf_seq_example.feature_lists.feature_list['audio'].feature)\n\n if any(np.array(seg_start) > num_frames): # why are there videos with invalid labels?\n # print('skipping video', video_id, 'file', tfrec_file)\n continue\n\n for segment, label, score in zip(seg_start, seg_labels, seg_scores):\n features = []\n\n for frame in range(segment, segment + SEGMENT_LEN):\n rgb = tf.decode_raw(tf_seq_example.feature_lists \\\n .feature_list['rgb'].feature[frame] \\\n .bytes_list.value[0],tf.uint8).numpy()\n audio = tf.decode_raw(tf_seq_example.feature_lists \\\n .feature_list['audio'].feature[frame] \\\n .bytes_list.value[0],tf.uint8).numpy()\n\n frame_features = np.concatenate([rgb, audio])\n features.append(frame_features)\n\n all_ids.append(video_id)\n all_labels.append(label)\n all_scores.append(score)\n all_features_list.append(np.expand_dims(features, axis=0))\n\n\n all_features = np.concatenate(all_features_list)\n\n print('writing features to the disk')\n np.save(f'{prefix}_features.npy', all_features)\n\n print('writing labels to the disk')\n with open(f'{prefix}_ids.pkl', 'wb') as f:\n pickle.dump((all_ids, all_labels, all_scores), f)\n\n\ndef dequantize(feat_vector: np.array, max_quantized_value=2, min_quantized_value=-2) -> np.array:\n ''' Dequantize the feature from the byte format to the float format. '''\n assert max_quantized_value > min_quantized_value\n quantized_range = max_quantized_value - min_quantized_value\n scalar = quantized_range / 255.0\n bias = (quantized_range / 512.0) + min_quantized_value\n return feat_vector * scalar + bias\n\n\n# PyTorch dataset class for numpy arrays.\nclass SegmentsDataset(torch.utils.data.Dataset):\n def __init__(self, ids: np.array, dataset_mask: Optional[np.array], labels: Optional[np.array],\n scores: Optional[np.array], features_path: str, mode: str) -> None:\n print(f'creating SegmentsDataset in mode {mode}')\n\n self.ids = ids\n self.scores = scores\n self.mode = mode\n\n if self.mode != 'test':\n labels_table = pd.read_csv('/kaggle/input/youtube8m-2019/vocabulary.csv')\n labels_table = labels_table.Index\n\n encode_table = np.zeros(np.amax(labels_table) + 1, dtype=int)\n for i, index in enumerate(labels_table):\n encode_table[index] = i\n\n assert dataset_mask is not None and self.scores is not None\n self.features_indices = np.arange(dataset_mask.size)[dataset_mask]\n self.labels = encode_table[labels]\n features_size = dataset_mask.size\n\n assert self.labels.shape[0] == self.scores.shape[0]\n assert self.features_indices.size == self.labels.shape[0]\n assert features_size >= self.scores.shape[0]\n\n if self.mode == 'train':\n self.labels *= (self.scores > 0.5).astype(int)\n else:\n features_size = self.ids.shape[0]\n\n self.features = np.load(features_path)\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n features = self.features\n\n if self.mode != 'test':\n features_indices = self.features_indices\n labels = self.labels\n\n x = features[features_indices[index]]\n else:\n x = features[index]\n\n x = dequantize(x)\n x = torch.tensor(x, dtype=torch.float32)\n\n if self.mode == 'test':\n return x, 0\n else:\n y = labels[index].item()\n return x, y\n\n def __len__(self) -> int:\n return self.ids.shape[0]\n\n\n# PyTorch dataset class for original TFRecords.\n# I only use it for inference here, but it supports validation data with segment annotations.\ndef unwrap_video(video_dict):\n return (\n video_dict['id'],\n video_dict['labels'],\n video_dict['features'],\n video_dict['segment_start_times'],\n video_dict['segment_labels'],\n video_dict['segment_scores']\n )\n\ndef wrap_segment(vid, labels, start_time, features, segment_label, segment_score):\n return {\n 'id': vid,\n 'labels': labels,\n 'start_time': start_time,\n 'features': features,\n 'segment_label': segment_label,\n 'segment_score': segment_score\n }\n\nclass YouTube8MRecordParser:\n context_features = {\n \"id\": tf.io.FixedLenFeature((), tf.string),\n \"labels\": tf.io.VarLenFeature(tf.int64),\n \"segment_start_times\": tf.io.VarLenFeature(tf.int64),\n \"segment_end_times\": tf.io.VarLenFeature(tf.int64),\n \"segment_labels\": tf.io.VarLenFeature(tf.int64),\n \"segment_scores\": tf.io.VarLenFeature(tf.float32)\n }\n\n sequence_features = {\n \"rgb\": tf.io.FixedLenSequenceFeature([], tf.string),\n \"audio\": tf.io.FixedLenSequenceFeature([], tf.string),\n }\n\n @staticmethod\n def parse(proto):\n sample, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n YouTube8MRecordParser.context_features,\n YouTube8MRecordParser.sequence_features\n )\n\n sample['features'] = tf.concat([\n tf.decode_raw(sequence_parsed['rgb'], tf.uint8),\n tf.decode_raw(sequence_parsed['audio'], tf.uint8)\n ], axis=-1\n )\n\n for k, v in sample.items():\n if k == 'labels' or 'segment' in k:\n sample[k] = v.values\n\n return sample\n\n @staticmethod\n def to_numpy(eager_sample):\n return {\n k: v.numpy()\n for k, v in eager_sample.items()\n }\n\n @staticmethod\n def get_video_dataset(tfrecords, num_workers=None):\n return tf.data.TFRecordDataset(tfrecords, num_parallel_reads=num_workers)\\\n .map(YouTube8MRecordParser.parse, num_parallel_calls=num_workers)\\\n .filter(lambda video: tf.math.greater_equal(tf.shape(video['features'])[0], 5))\n\n @staticmethod\n def _video_to_segments_iterator(vid, labels, features, segment_start_times, segment_labels, segment_scores):\n n_samples = len(features) // SEGMENT_LEN\n\n assert n_samples >= 5\n\n for idx in range(n_samples):\n start_time = SEGMENT_LEN * idx\n\n segment_label = segment_score = -1\n if start_time in segment_start_times:\n i = np.where(segment_start_times == start_time)[0][0]\n segment_label = segment_labels[i]\n segment_score = segment_scores[i]\n\n yield (\n vid,\n labels,\n start_time,\n features[start_time: start_time + SEGMENT_LEN],\n segment_label,\n np.float32(segment_score)\n )\n\n def _video_to_segments(*args):\n result = [[] for _ in range(6)]\n\n for segment in YouTube8MRecordParser._video_to_segments_iterator(*args):\n for i, value in enumerate(segment):\n result[i].append(value)\n\n return result\n\n @staticmethod\n def get_segment_dataset(tfrecords: List[str]) -> Any:\n return YouTube8MRecordParser.get_video_dataset(tfrecords, None)\\\n .map(lambda video: tf.py_func(\n YouTube8MRecordParser._video_to_segments,\n unwrap_video(video),\n Tout=[tf.string, tf.int64, tf.int64, tf.uint8, tf.int64, tf.float32]),\n num_parallel_calls=None\n )\\\n .flat_map(lambda *args: tf.data.Dataset.zip(tuple(\n tf.data.Dataset.from_tensor_slices(k)\n for k in args))\n )\\\n .map(\n wrap_segment,\n num_parallel_calls=None\n )\n\nclass YouTube8MSegmentDataset(torch.utils.data.IterableDataset):\n def __init__(self, tfrecords: List[str]) -> None:\n self._dataset = YouTube8MRecordParser.get_segment_dataset(tfrecords)\n\n def __iter__(self) -> None:\n for i, segment in enumerate(map(YouTube8MRecordParser.to_numpy, self._dataset)):\n features = dequantize(segment['features']).astype(np.float32)\n yield features, segment['id'].decode()\n\n\ndef get_train_val_split(items: List[str], fold: int) -> Tuple[np.array, np.array]:\n skf = KFold(NUM_FOLDS, shuffle=True, random_state=0)\n items = np.array(items)\n train_idx, val_idx = list(skf.split(items))[fold]\n return items[train_idx], items[val_idx]\n\ndef load_train_data(fold: int) -> Any:\n with open('val_ids.pkl', 'rb') as f:\n all_ids, all_labels, all_scores = pickle.load(f)\n\n unique_ids = sorted(set(all_ids))\n unique_train_ids, unique_val_ids = get_train_val_split(unique_ids, fold)\n\n all_ids = np.array(all_ids)\n all_labels = np.array(all_labels)\n all_scores = np.array(all_scores)\n print(all_ids.shape)\n print(all_labels.shape)\n\n train_mask = np.isin(all_ids, unique_train_ids)\n train_ids = all_ids[train_mask]\n train_labels = all_labels[train_mask]\n train_scores = all_scores[train_mask]\n\n val_ids = all_ids[~train_mask]\n val_labels = all_labels[~train_mask]\n val_scores = all_scores[~train_mask]\n\n train_dataset = SegmentsDataset(train_ids, train_mask, train_labels, train_scores,\n 'val_features.npy', mode='train')\n\n val_dataset = SegmentsDataset(val_ids, ~train_mask, val_labels, val_scores,\n 'val_features.npy', mode='val')\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=BATCH_SIZE, shuffle=True,\n num_workers=0, drop_last=True)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=BATCH_SIZE, shuffle=False,\n num_workers=0, drop_last=False)\n\n return train_loader, val_loader\n\ndef load_test_data(wildcard: str) -> Any:\n test_dataset = YouTube8MSegmentDataset(glob(wildcard))\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)\n\n return test_loader\n\n\nclass AverageMeter:\n ''' Computes and stores the average and current value. '''\n def __init__(self) -> None:\n self.reset()\n\n def reset(self) -> None:\n self.val = 0.0\n self.avg = 0.0\n self.sum = 0.0\n self.count = 0\n\n def update(self, val: float, n: int = 1) -> None:\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef set_lr(optimizer: Any, lr: float) -> None:\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef get_lr(optimizer: Any) -> float:\n for param_group in optimizer.param_groups:\n lr = float(param_group['lr'])\n return lr\n\n assert False\n\ndef accuracy(predicts: Any, targets: Any) -> float:\n if isinstance(predicts, torch.Tensor):\n predicts = predicts.cpu().numpy()\n\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n if len(predicts.shape) == 2:\n predicts = np.argmax(predicts, axis=1)\n\n if len(targets.shape) == 2:\n targets = np.argmax(targets, axis=1)\n\n if predicts.shape != targets.shape:\n print(predicts.shape)\n print(targets.shape)\n assert False\n\n return np.mean(predicts == targets)\n\ndef average_precision(actuals, predictions, k=None):\n num_positives = actuals.sum() + 1e-10\n\n sorted_idx = np.argsort(predictions)[::-1]\n if k is not None:\n sorted_idx = sorted_idx[:k]\n\n actuals = actuals[sorted_idx]\n precisions = np.cumsum(actuals) / np.arange(1, len(actuals) + 1)\n\n return (precisions * actuals).sum() / float(num_positives)\n\nclass MeanAveragePrecisionCalculator:\n ''' Classwise MAP@K - metric for Youtube-8M 2019 competition. '''\n\n def __init__(self, num_classes=NUM_CLASSES, k=10 ** 5):\n self._num_classes = num_classes\n self._k = k\n self._predictions = [[] for _ in range(num_classes)]\n self._actuals = [[] for _ in range(num_classes)]\n\n def accumulate(self, predictions, actuals, masks=None):\n if masks is None:\n masks = np.ones_like(actuals)\n\n for i in range(self._num_classes):\n mask = masks[:, i] > 0\n\n self._predictions[i].append(predictions[:, i][mask])\n self._actuals[i].append(actuals[:, i][mask])\n\n def __call__(self):\n aps = []\n positive_count = []\n total_count = []\n\n for i in range(self._num_classes):\n actuals = np.concatenate(self._actuals[i])\n predictions = np.concatenate(self._predictions[i])\n\n aps.append(average_precision(actuals, predictions, self._k))\n\n total_count.append(len(actuals))\n positive_count.append(actuals.sum())\n\n return np.mean(aps)\n\n\ndef train_epoch(train_loader: Any, model: Any, criterion: Any, optimizer: Any,\n epoch: int, lr_scheduler: Any) -> float:\n print(f'epoch: {epoch}')\n print(f'learning rate: {get_lr(optimizer)}')\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n avg_score = AverageMeter()\n\n model.train()\n optimizer.zero_grad()\n\n num_steps = len(train_loader)\n\n print(f'total batches: {num_steps}')\n end = time.time()\n activation = nn.Softmax(dim=1)\n\n for i, (input_, target) in enumerate(train_loader):\n input_ = input_.cuda()\n output = model(input_)\n\n loss = criterion(output, target.cuda())\n\n predict = torch.argmax(output.detach(), dim=-1)\n avg_score.update(accuracy(predict, target))\n\n losses.update(loss.data.item(), input_.size(0))\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % LOG_FREQ == 0:\n print(f'{epoch} [{i}/{num_steps}]\\t'\n f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'loss {losses.val:.4f} ({losses.avg:.4f})\\t'\n f'acc {avg_score.val:.4f} ({avg_score.avg:.4f})')\n\n print(f' * average acc on train {avg_score.avg:.4f}')\n return avg_score.avg\n\ndef inference(data_loader: Any, model: Any) -> np.array:\n ''' Returns predictions array. '''\n model.eval()\n\n predicts_list = []\n activation = nn.Softmax(dim=1)\n\n with torch.no_grad():\n for input_, target in data_loader:\n output = model(input_.cuda())\n output = activation(output)\n predicts_list.append(output.detach().cpu().numpy())\n\n predicts = np.concatenate(predicts_list)\n print('predicts', predicts.shape)\n return predicts\n\ndef validate(val_loader: Any, model: Any, epoch: int) -> float:\n ''' Infers predictions and calculates validation score. '''\n print('validate()')\n val_pred = inference(val_loader, model)\n\n metric = MeanAveragePrecisionCalculator()\n\n val_true = val_loader.dataset.labels\n val_scores = val_loader.dataset.scores\n\n assert val_true.size == val_pred.shape[0]\n\n masks = np.eye(NUM_CLASSES)[val_true] # convert to one-hot encoding\n actuals = masks * np.expand_dims(val_scores, axis=-1)\n\n metric.accumulate(val_pred, actuals, masks)\n score = metric()\n\n print(f' * epoch {epoch} validation score: {score:.4f}')\n return score\n\n# In my pipeline, there is a single inference function for both validation and test set prediction.\n# But I had to copy-paste this function here to add some hacks to bypass\n# Kaggle kernel memory restrictions.\ndef inference_for_testset(test_predicts: np.array, data_loader: Any, model: Any) -> np.array:\n ''' Returns predictions array. '''\n model.eval()\n\n ids_list: List[str] = []\n activation = nn.Softmax(dim=1)\n\n with torch.no_grad():\n for i, (input_, ids) in enumerate(data_loader):\n output = model(input_.cuda())\n output = activation(output)\n\n ids_list.extend(ids)\n pred = output.detach().cpu().numpy()\n bs = data_loader.batch_size\n test_predicts[i * bs : i * bs + pred.shape[0]] += pred\n\n ids = np.array(ids_list)\n print('ids', ids.shape)\n return ids\n\n\nclass SwishActivation(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.sigmoid(x) * x\n\nclass ClassifierModel(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n layers: List[nn.Module] = []\n width = FEATURES_DIM\n\n for num_neurons in [2765, 1662]:\n layers.append(nn.Linear(width, num_neurons))\n width = num_neurons\n\n layers.append(nn.BatchNorm1d(width))\n layers.append(SwishActivation())\n\n layers.append(nn.Linear(width, NUM_CLASSES))\n self.layers = nn.Sequential(*layers)\n self.avg_pool = nn.AdaptiveAvgPool1d(1)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = torch.transpose(x, 1, 2)\n x = self.avg_pool(x).view(x.size(0), -1)\n x = self.layers(x)\n return x\n\n\ndef get_model_path(fold_num: int) -> str:\n return f'best_model_fold_{fold_num}.pth'\n\ndef train_model(fold_num: int) -> float:\n print('=' * 80)\n print(f'training a model, fold {fold_num}')\n\n model = ClassifierModel()\n model.cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)\n\n train_loader, val_loader = load_train_data(fold_num)\n lr_scheduler = lr_sched.ReduceLROnPlateau(optimizer, mode='max', factor=LR_FACTOR,\n patience=LR_PATIENCE, threshold=LR_THRESHOLD,\n min_lr=LR_MINIMUM)\n\n last_epoch = -1\n print(f'training will start from epoch {last_epoch + 1}')\n\n best_score = 0.0\n best_epoch = 0\n\n last_lr = get_lr(optimizer)\n best_model_path = None\n\n for epoch in range(last_epoch + 1, NUM_EPOCHS):\n print('-' * 50)\n lr = get_lr(optimizer)\n\n # if we have just reduced LR, reload the best saved model\n if lr < last_lr - 1e-10 and best_model_path is not None:\n print(f'learning rate dropped: {lr}, reloading')\n last_checkpoint = torch.load(best_model_path)\n\n model.load_state_dict(last_checkpoint['state_dict'])\n optimizer.load_state_dict(last_checkpoint['optimizer'])\n print(f'checkpoint loaded: {best_model_path}')\n set_lr(optimizer, lr)\n last_lr = lr\n\n train_epoch(train_loader, model, criterion, optimizer, epoch, lr_scheduler)\n score = validate(val_loader, model, epoch)\n\n lr_scheduler.step(metrics=score)\n\n is_best = score > best_score\n best_score = max(score, best_score)\n if is_best:\n best_epoch = epoch\n\n if is_best:\n best_model_path = get_model_path(fold_num)\n\n data_to_save = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n\n torch.save(data_to_save, best_model_path)\n print(f'a snapshot was saved to {best_model_path}')\n\n print(f'best score: {best_score:.04f}')\n return -best_score\n\ndef predict_with_model(test_predicts: np.array, fold_num: int) -> np.array:\n print(f'predicting on the test set, fold {fold_num}')\n\n model = ClassifierModel()\n model.cuda()\n\n best_model_path = get_model_path(fold_num)\n\n if os.path.exists(best_model_path):\n print(f'loading checkpoint: {best_model_path}')\n last_checkpoint = torch.load(best_model_path)\n model.load_state_dict(last_checkpoint['state_dict'])\n\n last_epoch = last_checkpoint['epoch']\n print(f'loaded the model from epoch {last_epoch}')\n os.unlink(best_model_path)\n\n output = inference_for_testset(test_predicts, test_loader, model)\n return output\n\ndef generate_submission(ids: np.array, probas: np.array) -> None:\n last_id, current_seg = None, 0\n segment_start_times = []\n print('generating submission')\n\n for video_id in ids:\n if video_id == last_id:\n current_seg += 1\n else:\n current_seg = 0\n last_id = video_id\n\n segment_start_times.append(current_seg * 5)\n\n labels = pd.read_csv('/kaggle/input/youtube8m-2019/vocabulary.csv')\n classes_table = labels.Index\n\n assert probas.shape[1] == NUM_CLASSES\n max_predicts = 10 ** 5\n\n with open('submission.csv', 'w') as fout:\n fout.write('Class,Segments\\n')\n\n for i in range(NUM_CLASSES):\n current_probas = probas[:, i]\n sorted_idx = np.argsort(current_probas)[::-1][:max_predicts]\n current_line = [\n '{}:{}'.format(ids[idx], segment_start_times[idx])\n for idx in sorted_idx\n ]\n\n fout.write('{},{}\\n'.format(classes_table[i], ' '.join(current_line)))\n\n print('submission has been generated')\n\nif __name__ == '__main__':\n tf.compat.v1.enable_eager_execution()\n\n convert_val_data('val', '/kaggle/input/youtube-challenge-2019-val/val/validate*.tfrecord')\n test_loader = load_test_data('/kaggle/input/youtubechallenge2019test/test/test*.tfrecord')\n test_predicts = np.zeros((2038114, 1000), dtype=np.float16)\n\n for fold_idx in range(NUM_MODELS):\n train_model(fold_idx)\n test_ids = predict_with_model(test_predicts, fold_idx)\n\n test_predicts /= NUM_MODELS\n generate_submission(test_ids, test_predicts)","repo_name":"seculayer/AutoAPE-challenge4","sub_path":"kaggle/youtube8m-2019/youtube8m-2019.py","file_name":"youtube8m-2019.py","file_ext":"py","file_size_in_byte":23965,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"57"} +{"seq_id":"22557870745","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass GumbelVectorQuantizer(nn.Module):\n def __init__(\n self,\n in_dim,\n codebook_size,\n temp,\n embedding_dim,\n time_first=True,\n groups=1,\n combine_groups=False,\n activation=nn.GELU(),\n weight_proj_depth=1,\n weight_proj_factor=1,\n only_predict_alpha=True,\n ):\n \"\"\"Vector quantization using gumbel softmax\n\n Args:\n in_dim: input dimension (channels)\n codebook_size: number of quantized vectors per group\n temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)\n groups: number of groups for vector quantization\n combine_groups: whether to use the vectors for all groups\n embedding_dim: dimensionality of the resulting quantized vector\n time_first: if true, expect input in BxTxC format, otherwise in BxCxT\n activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1\n weight_proj_depth: number of layers (with activation in between) to project input before computing logits\n weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of\n projections by this factor\n \"\"\"\n super().__init__()\n\n self.groups = groups\n self.combine_groups = combine_groups\n self.input_dim = in_dim\n self.codebook_size = codebook_size\n self.time_first = time_first\n self.only_predict_alpha = only_predict_alpha\n\n if only_predict_alpha:\n self.symbol2idx = { # should correspond to symbols in fastpitch grapheme embedding table\n '_': 0, # padding\n ' ': 1, # whitespace\n }\n self.pad_idx = self.symbol2idx['_']\n self.whitespace_idx = self.symbol2idx[' ']\n\n assert (\n embedding_dim % groups == 0\n ), f\"dim {embedding_dim} must be divisible by groups {groups} for concatenation\"\n\n var_dim = embedding_dim // groups\n num_groups = groups if not combine_groups else 1\n\n self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * codebook_size, var_dim))\n nn.init.uniform_(self.vars)\n\n if weight_proj_depth > 1:\n\n def block(input_dim, output_dim):\n return nn.Sequential(nn.Linear(input_dim, output_dim), activation)\n\n inner_dim = self.input_dim * weight_proj_factor\n self.weight_proj = nn.Sequential(\n *[\n block(self.input_dim if i == 0 else inner_dim, inner_dim)\n for i in range(weight_proj_depth - 1)\n ],\n nn.Linear(inner_dim, groups * codebook_size),\n )\n else:\n self.weight_proj = nn.Linear(self.input_dim, groups * codebook_size)\n nn.init.normal_(self.weight_proj.weight, mean=0, std=1)\n nn.init.zeros_(self.weight_proj.bias)\n\n if isinstance(temp, str):\n import ast\n\n temp = ast.literal_eval(temp)\n assert len(temp) == 3, f\"{temp}, {len(temp)}\"\n\n self.max_temp, self.min_temp, self.temp_decay = temp\n self.curr_temp = self.max_temp\n self.codebook_indices = None\n\n def set_num_updates(self, num_updates):\n self.curr_temp = max(\n self.max_temp * self.temp_decay**num_updates, self.min_temp\n )\n\n def get_codebook_indices(self):\n if self.codebook_indices is None:\n from itertools import product\n\n p = [range(self.codebook_size)] * self.groups\n inds = list(product(*p))\n self.codebook_indices = torch.tensor(\n inds, dtype=torch.long, device=self.vars.device\n ).flatten()\n\n if not self.combine_groups:\n self.codebook_indices = self.codebook_indices.view(\n self.codebook_size**self.groups, -1\n )\n for b in range(1, self.groups):\n self.codebook_indices[:, b] += self.codebook_size * b\n self.codebook_indices = self.codebook_indices.flatten()\n return self.codebook_indices\n\n def codebook(self):\n indices = self.get_codebook_indices()\n return (\n self.vars.squeeze(0)\n .index_select(0, indices)\n .view(self.codebook_size**self.groups, -1)\n )\n\n def sample_from_codebook(self, b, n):\n indices = self.get_codebook_indices()\n indices = indices.view(-1, self.groups)\n cb_size = indices.size(0)\n assert (\n n < cb_size\n ), f\"sample size {n} is greater than size of codebook {cb_size}\"\n sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))\n indices = indices[sample_idx]\n\n z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)\n return z\n\n def to_codebook_index(self, indices):\n res = indices.new_full(indices.shape[:-1], 0)\n for i in range(self.groups):\n exponent = self.groups - i - 1\n res += indices[..., i] * (self.codebook_size**exponent)\n return res\n\n def trainable_parameters(self):\n \"\"\"return the model parameters that we wish to update for respeller training\n note that we ignore self.vars as we wish it to be initialised and frozen to the grapheme\n embedding table from the TTS model\"\"\"\n trainable_parameters = []\n for name, param in self.named_parameters():\n if name != 'vars':\n trainable_parameters.append(param)\n return trainable_parameters\n\n def forward_idx(self, x):\n res = self.forward(x, produce_targets=True)\n return res[\"x\"], res[\"targets\"]\n\n def forward(self, x, produce_targets=False):\n\n result = {\"codebook_size\": self.codebook_size * self.groups}\n\n if not self.time_first:\n x = x.transpose(1, 2)\n\n bsz, tsz, fsz = x.shape\n x = x.reshape(-1, fsz)\n x = self.weight_proj(x)\n x = x.view(bsz * tsz * self.groups, -1)\n\n _, k = x.max(-1)\n hard_x = (\n x.new_zeros(*x.shape)\n .scatter_(-1, k.view(-1, 1), 1.0)\n .view(bsz * tsz, self.groups, -1)\n )\n hard_probs = torch.mean(hard_x.float(), dim=0)\n result[\"code_perplexity\"] = torch.exp(\n -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)\n ).sum()\n\n avg_probs = torch.softmax(\n x.view(bsz * tsz, self.groups, -1).float(), dim=-1\n ).mean(dim=0)\n result[\"prob_perplexity\"] = torch.exp(\n -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)\n ).sum()\n\n result[\"temp\"] = self.curr_temp\n\n if self.only_predict_alpha:\n # set logits for all non alpha symbols to 0, i.e. padding and whitespace\n # print(f\"debug only_predict_alpha {x.size()=}\")\n x[:,self.pad_idx] = 0\n x[:,self.whitespace_idx] = 0\n\n if self.training:\n x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)\n else:\n x = hard_x\n\n # print(f\"DEBUG, gumbel {self.training=}\")\n # print(f\" gumbel {x=}\")\n\n x = x.view(bsz * tsz, -1)\n\n vars = self.vars\n if self.combine_groups:\n vars = vars.repeat(1, self.groups, 1)\n\n if produce_targets:\n result[\"targets\"] = (\n x.view(bsz * tsz * self.groups, -1)\n .argmax(dim=-1)\n .view(bsz, tsz, self.groups)\n .detach()\n )\n\n x = x.unsqueeze(-1) * vars\n x = x.view(bsz * tsz, self.groups, self.codebook_size, -1)\n x = x.sum(-2)\n x = x.view(bsz, tsz, -1)\n\n if not self.time_first:\n x = x.transpose(1, 2) # BTC -> BCT\n\n result[\"x\"] = x\n\n return result\n","repo_name":"jonojace/respeller","sub_path":"modules/gumbel_vector_quantizer.py","file_name":"gumbel_vector_quantizer.py","file_ext":"py","file_size_in_byte":8069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26420110874","text":"import sys\nimport tqdm\nimport scipy\nimport numpy as np\nimport os\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import roc_auc_score\n\ntrain_access_pattern = []\nval_access_pattern = []\n\nSAMPLE_LIMIT = 100000000\n#SAMPLE_LIMIT = 10000\n\nuser_features_processed = None\nuser_mappings = None\nad_features_process = None\nad_mappings = None\nuser_click_history = {}\ntrain_dataset = []\nval_dataset = []\n\nembedding_table_lengths = []\n\nsplit = .8\n\nclass RecModel(torch.nn.Module):\n\n def __init__(self, embedding_table_lengths, em_size=16):\n super().__init__()\n\n # First embedding table index is the ads embedding \n self.embedding_table_lengths = embedding_table_lengths\n self.tables = torch.nn.ModuleList([torch.nn.EmbeddingBag(x+1, em_size, mode=\"sum\") for x in embedding_table_lengths])\n\n self.em_size = em_size\n\n self.fc1 = torch.nn.Linear(241, 200)\n self.fc2 = torch.nn.Linear(200, 80)\n self.fc3 = torch.nn.Linear(80, 2) \n\n def forward(self, sparse_features, dense_features, user_click_history):\n\n num_embs = sparse_features.shape[1]\n indices = [1+sparse_features[:,i].reshape((-1, 1)) for i in range(num_embs)]\n embs = [self.tables[i](x) for i,x in enumerate(indices)]\n\n # Make sure index 0 of the embeddings table for ads is 0\n self.tables[0].weight.data[0,:] = 0\n user_click_history_embeddings = self.tables[0](user_click_history+1)\n\n features = torch.cat(embs + [user_click_history_embeddings] + [dense_features], dim=1)\n\n x = features\n\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n\n return x\n\ndef process_features_list(ds):\n columns = len(ds[0])\n rows = len(ds)\n mappings = [{} for i in range(columns)]\n for column in range(columns):\n idx = 0\n for row in range(rows):\n if ds[row][column] not in mappings[column]:\n mappings[column][ds[row][column]] = idx\n idx += 1\n print(idx)\n\n processed = {}\n for row in ds:\n key = mappings[0][row[0]]\n processed_row = [mappings[i][x] for i,x in enumerate(row)]\n assert(key not in processed)\n processed[key] = processed_row\n return processed, mappings\n\ndef initialize():\n\n # Read ad features\n # adgroup_id,cate_id,campaign_id,customer,brand,price\n ad_features_raw = []\n with open(\"data/taobao/ad_feature.csv\", \"r\") as f:\n all_lines = f.readlines()[1:]\n for i, line in enumerate(all_lines):\n line = line.split(\",\")\n vals = [int(line[0]),\n int(line[1]),\n int(line[2]),\n int(line[3]),\n 0 if line[4].strip() == \"NULL\" else int(line[4]),\n float(line[5])]\n ad_features_raw.append(vals)\n\n # Read user features\n # userid,cms_segid,cms_group_id,final_gender_code,age_level,pvalue_level,shopping_level,occupation,new_user_class_level\n user_features_raw = []\n with open(\"data/taobao/user_profile.csv\", \"r\") as f:\n all_lines = f.readlines()[1:]\n for i, line in enumerate(all_lines):\n line = line.split(\",\")\n vals = [0 if x.strip() == \"\" else int(x) for x in line]\n user_features_raw.append(vals)\n\n global user_features_processed\n global user_mappings\n global ad_features_processed\n global ad_mappings\n user_features_processed, user_mappings = process_features_list(user_features_raw)\n ad_features_processed, ad_mappings = process_features_list(ad_features_raw)\n\n # Read ad click user access pattern\n # user,time_stamp,adgroup_id,pid,nonclk,clk\n global user_click_history\n dataset = []\n n_skipped = 0\n with open(\"data/taobao/raw_sample.csv\") as f:\n all_lines = f.readlines()[1:]\n LIM = min(SAMPLE_LIMIT, len(all_lines))\n\n for i, line in enumerate(all_lines):\n if i % 1000 == 0:\n print(f\"Reading taobao line={i}/{len(all_lines)}\")\n if i >= LIM:\n break\n vals = line.split(\",\")\n\n if int(vals[0]) not in user_mappings[0]:\n print(\"User profile not found... continuing\")\n n_skipped += 1\n continue\n\n if int(vals[2]) not in ad_mappings[0]:\n print(\"Ad profile not found... continuing\")\n n_skipped += 1\n continue\n\n # Obtain all sparse features\n # - User sparse features\n remapped_user_id = user_mappings[0][int(vals[0])]\n user_sparse_features = user_features_processed[remapped_user_id]\n # - Ad sparse features (everything except price)\n remapped_ad_id = ad_mappings[0][int(vals[2])]\n ad_sparse_features = [x for i,x in enumerate(ad_features_processed[remapped_ad_id]) if i != 5]\n # - Context sparse features\n # TODO (need to do remapping)\n all_sparse_features = ad_sparse_features + user_sparse_features \n\n # Obtain all dense features\n ad_dense_features = [x for i,x in enumerate(ad_features_processed[remapped_ad_id]) if i == 5]\n all_dense_features = ad_dense_features\n\n # Timestamp / target\n timestamp = int(vals[-3])\n click = int(vals[-1])\n\n # Add to dataset\n dataset.append((all_sparse_features, all_dense_features, timestamp, click))\n\n # Update user history\n if remapped_user_id not in user_click_history:\n user_click_history[remapped_user_id] = []\n user_click_history[remapped_user_id].append((remapped_ad_id, timestamp))\n\n print(\"Skipped\", n_skipped)\n global train_dataset\n global val_dataset\n split_indx = int(len(dataset)*split)\n train_dataset = dataset[:split_indx]\n val_dataset = dataset[split_indx:]\n\n # Obtain table lengths for each sparse feature index\n global embedding_table_lengths\n rows = len(dataset)\n columns = len(dataset[0][0])\n for column in range(columns):\n vals = [dataset[row][0][column] for row in range(rows)]\n print(max(vals))\n embedding_table_lengths.append(max(vals)+1)\n\n # Obtain train and val access pattern\n print(\"Extracting access pattern\")\n for i, (user_id, click_history) in enumerate(user_click_history.items()):\n hist = [x[0] for x in click_history]\n if i >= int(split*len(user_click_history)):\n val_access_pattern.append(hist)\n else:\n train_access_pattern.append(hist)\n\ndef evaluate_model(model, dataset, batch=64, pir_optimize=None):\n groundtruths, preds = [], []\n\n indices = list(range(len(dataset)))\n for b in range(0, len(indices), batch):\n \n points = [dataset[x] for x in indices[b:b+batch]]\n sparse_features = [x[0] for x in points]\n dense_features = [x[1] for x in points]\n timestamps = [x[2] for x in points]\n targets = [x[3] for x in points]\n\n # Get historical clicks\n user_ids = [x[5] for x in sparse_features]\n user_history = [get_user_history(user_ids[i], timestamps[i]) for i in range(len(user_ids))]\n\n sparse_features = torch.from_numpy(np.array(sparse_features)).long()\n dense_features = torch.from_numpy(np.array(dense_features)).float()\n user_history = torch.from_numpy(np.array(user_history)).long()\n targets = torch.from_numpy(np.array(targets)).long()\n\n sparse_features = sparse_features.to(next(model.parameters()).device)\n dense_features = dense_features.to(next(model.parameters()).device)\n user_history = user_history.to(next(model.parameters()).device)\n targets = targets.to(next(model.parameters()).device)\n\n ############################\n # PIR\n data_pir = []\n for bbatch in user_history:\n bbatch = bbatch.detach().cpu().numpy().tolist()\n n_fillers = bbatch.count(-1)\n bb = [x for x in bbatch if x != -1]\n if pir_optimize is not None:\n recovered, _ = pir_optimize.fetch(bb)\n else:\n recovered = bb\n # 9 is \n new_b = [x if x in recovered else -1 for x in bb]\n data_pir.append(new_b + [-1]*n_fillers)\n data_pir = np.array(data_pir)\n data_pir = torch.from_numpy(data_pir)\n data_pir = data_pir.to(next(model.parameters()).device)\n\n assert(data_pir.shape == user_history.shape)\n\n user_history= data_pir\n\n ############################ \n\n model.zero_grad()\n pred = model(sparse_features, dense_features, user_history)\n \n prob_click = F.softmax(pred, dim=1)[:,1]\n prob_click = prob_click.detach().cpu().numpy().flatten().tolist()\n\n preds += prob_click\n groundtruths += targets.detach().cpu().numpy().flatten().tolist()\n\n score = roc_auc_score(groundtruths, preds)\n return score\n\ndef get_user_history(user_id, timestamp):\n clicks = [x[0] for x in user_click_history[user_id] if x[1] < timestamp]\n L = 10000\n clicks = clicks[:L]\n if len(clicks) <= L:\n clicks += [-1]*(L-len(clicks))\n return clicks\n\ndef train_taobao_rec(epochs=100, batch=64):\n print(\"Training...\")\n\n model = RecModel(embedding_table_lengths)\n model.to(\"cuda\")\n loss = torch.nn.CrossEntropyLoss()\n optim = torch.optim.Adam(model.parameters())\n\n # Train on train users\n for epoch in range(epochs):\n print(f\"Epoch {epoch}/{epochs}\")\n indices = list(range(len(train_dataset)))\n np.random.shuffle(indices)\n\n train_loss = 0 \n for b in tqdm.tqdm(range(0, len(indices), batch)):\n points = [train_dataset[x] for x in indices[b:b+batch]]\n sparse_features = [x[0] for x in points]\n dense_features = [x[1] for x in points]\n timestamps = [x[2] for x in points]\n targets = [x[3] for x in points]\n\n # Get historical clicks\n user_ids = [x[5] for x in sparse_features]\n user_history = [get_user_history(user_ids[i], timestamps[i]) for i in range(len(user_ids))]\n\n sparse_features = torch.from_numpy(np.array(sparse_features)).long()\n dense_features = torch.from_numpy(np.array(dense_features)).float()\n user_history = torch.from_numpy(np.array(user_history)).long()\n targets = torch.from_numpy(np.array(targets)).long()\n\n sparse_features = sparse_features.to(\"cuda\")\n dense_features = dense_features.to(\"cuda\")\n user_history = user_history.to(\"cuda\")\n targets = targets.to(\"cuda\") \n\n model.zero_grad()\n pred = model(sparse_features, dense_features, user_history)\n output = loss(pred, targets)\n\n output.backward()\n optim.step()\n\n train_loss += output.detach().cpu().item()\n \n score = evaluate_model(model, val_dataset)\n #score = evaluate_model(model, train_dataset)\n print(\"Eval score\", score)\n\n torch.save(model, f\"recmodel_epoch={epoch}.pt\")\n\ndef evaluate(pir_optimize):\n dir_to_use = os.path.dirname(__file__)\n model = RecModel(embedding_table_lengths)\n #with open(f\"{dir_to_use}/recmodel_epoch=0.pt\", 'rb') as f: \n # model = torch.load(f)\n # pass\n model.to(\"cpu\")\n\n auc = evaluate_model(model, val_dataset, pir_optimize=pir_optimize)\n print(f\"AUC: {auc}\")\n return {\"auc\" : auc}\n \nif __name__==\"__main__\":\n initialize()\n #train_taobao_rec()\n evaluate(None)\n","repo_name":"facebookresearch/GPU-DPF","sub_path":"paper/experimental/batch_pir/modules/taobao_rec/taobao_rec_dataset_v2.py","file_name":"taobao_rec_dataset_v2.py","file_ext":"py","file_size_in_byte":11738,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"57"} +{"seq_id":"5983648252","text":"import click.testing\nimport pytest\n\nfrom hypermodern_python import console\n\n\n@pytest.fixture\ndef runner():\n return click.testing.CliRunner()\n\n\ndef test_main_succeeds(runner, mock_requests_get):\n result = runner.invoke(console.main)\n #assert result.exit_code == 0\n #assert \"Lorem Ipsum\" in result.output\n assert mock_requests_get.called\n\n@pytest.fixture\ndef mock_requests_get(mocker):\n mock = mocker.patch(\"requests.get\")\n mock.return_value.__enter__.return_value.json.return_value = {\n \"title\": \"Lorem Ipsum\",\n \"extract\": \"Lorem ipsum dolor sit amet\",\n }\n return mock\n","repo_name":"astonmarcin/hypermodern-python","sub_path":"tests/test_console.py","file_name":"test_console.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20613207874","text":"import numpy as np\nimport time\n\nimport Constants as c\nimport Game\nimport Load\nimport MonteCarloTreeSearchAI\nimport PrintTime\n\ndef main():\n \n # Parameters\n nGames = 1000\n depth = 10\n search = 1000\n bEpsilon = 0.2\n bTemperature = 0.2\n wEpsilon = 0.2\n wTemperature = 0.2\n \n # Initialize\n print('Start MCTS benchmark!')\n print('[Total games: %d]' % (2 * nGames))\n print(\n '[Depth: %d, Search: %d, Black epsilon: %1.1f, Black temperature: %1.1f, White epsilon: %1.1f, White temperature: %1.1f]'\n % (depth, search, bEpsilon, bTemperature, wEpsilon, wTemperature))\n HCAI = []\n for i in range(1):\n HCAI.append(c.actions[1])\n for i in range(35):\n HCAI.append(c.actions[0])\n bMCTSAI = MonteCarloTreeSearchAI.AI(c.team2, c.team3, depth, bEpsilon, search, bTemperature)\n bMCTSvHC = 0\n wMCTSAI = MonteCarloTreeSearchAI.AI(c.team3, c.team2, depth, wEpsilon, search, wTemperature)\n wMCTSvHC = 0\n \n # Run the benchmark\n for iGame in range(nGames):\n if iGame % int(nGames / 10) == 0:\n print('Completed ' + str(int(100 * iGame / nGames)) + '%')\n game = Game.Game(c.team2, c.team3)\n while game.running:\n game.trainers[0].setNextAction(bMCTSAI.getAction(game.getState(c.amBlack)))\n game.trainers[1].setNextAction(HCAI[game.round])\n game.progress()\n if game.win[0]:\n bMCTSvHC += 1\n game = Game.Game(c.team2, c.team3)\n while game.running:\n game.trainers[0].setNextAction(HCAI[game.round])\n game.trainers[1].setNextAction(wMCTSAI.getAction(game.getState(c.amWhite)))\n game.progress()\n if game.win[1]:\n wMCTSvHC += 1\n print('Completed 100%')\n \n # Write the results\n resultsFile = open('MCTS_benchmark_results.txt', 'w')\n resultsFile.write('## Monte Carlo Tree Search\\n')\n resultsFile.write(\n 'Games: ' + str(nGames) + '\\n'\n + 'Depth: ' + str(depth) + '\\n'\n + 'Search: ' + str(search) + '\\n'\n + 'Black epsilon: ' + str(bEpsilon) + '\\n'\n + 'Black temperature: ' + str(bTemperature) + '\\n'\n + 'White epsilon: ' + str(wEpsilon) + '\\n'\n + 'White temperature: ' + str(wTemperature) + '\\n\\n')\n resultsFile.write('## Black \\n\\n')\n resultsFile.write('# Win percentage: \\n')\n resultsFile.write(str(100 * bMCTSvHC / nGames) + '%\\n\\n')\n resultsFile.write('## White \\n\\n')\n resultsFile.write('# Win percentage: \\n')\n resultsFile.write(str(100 * wMCTSvHC / nGames) + '%\\n\\n')\n resultsFile.close()\n\nif __name__ == '__main__':\n startTime = time.time()\n main()\n PrintTime.printTime(time.time() - startTime)\n\n#","repo_name":"Anaerob/SEEX30","sub_path":"GRB/MCTS_benchmark.py","file_name":"MCTS_benchmark.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71925954737","text":"import os\nimport random\nimport math\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import metrics\n\nfrom utils import build_vocab, load_vocab_file, init_embedding_uniform, shuffle_data\nfrom utils import load_pad_data, build_vocab_with_seg, load_pad_data_with_seg\nfrom mvlstm import MVLSTM\n\nos.environ[\"CUDA_VISIBLE_DEVICE\"] = \"1,2,3\"\n\nparams = {\n \"embed_dim\": 128,\n \"k_max_num\": 10,\n \"hidden_size\": 50,\n \"seq_len\": 32,\n \"n_class\": 2,\n\n \"data_path\": \"/home/zhouzilong/deep_weapon/data/\",\n \"vocab_file\": None,\n \"model_path\": \"./checkpoints/\",\n \"model_name\": \"MVLSTM_2\",\n \"segmentation\": False,\n\n \"batch_size\": 256,\n \"num_epochs\": 1,\n \"learning_rate\": 1e-3,\n \"display_freq\": 100,\n\n \"task\": \"test\",\n }\n\ndef load(params):\n if params[\"vocab_file\"]:\n vocab_dict = load_vocab_file(params[\"vocab_file\"])\n else:\n if params[\"segmentation\"]:\n vocab_dict = build_vocab_with_seg([params[\"data_path\"]+\"train.txt\", \\\n params[\"data_path\"]+\"dev.txt\"])\n else:\n vocab_dict = build_vocab([params[\"data_path\"]+\"train.txt\", params[\"data_path\"]+\"test.txt\"])\n \n if params[\"segmentation\"]:\n if params[\"task\"] == \"train\":\n train_data = load_pad_data_with_seg(params[\"data_path\"]+\"train.txt\", vocab_dict, params[\"seq_len\"])\n else: \n train_data = None\n dev_data = load_pad_data_with_seg(params[\"data_path\"]+\"dev.txt\", vocab_dict, params[\"seq_len\"])\n else:\n if params[\"task\"] == \"train\":\n train_data = load_pad_data(params[\"data_path\"]+\"train.txt\", vocab_dict, params[\"seq_len\"])\n else:\n train_data = None\n dev_data = load_pad_data(params[\"data_path\"]+\"dev.txt\", vocab_dict, params[\"seq_len\"])\n\n embedding = init_embedding_uniform(len(vocab_dict)+10, params[\"embed_dim\"])\n\n return vocab_dict, train_data, dev_data, embedding\n\nclass ModelClassifier:\n\n def __init__(self, params, embedding):\n self.n_class = params[\"n_class\"]\n self.batch_size = params[\"batch_size\"]\n self.num_epochs = params[\"num_epochs\"]\n self.lr = params[\"learning_rate\"]\n self.model_path = params[\"model_path\"]\n self.model_name = params[\"model_name\"]\n self.display_freq = params[\"display_freq\"]\n\n self.model = MVLSTM(params, embedding)\n\n self.global_step = tf.Variable(0, trainable=False)\n lr = tf.train.exponential_decay(self.lr, self.global_step*self.batch_size, \\\n 100000, 0.95, staircase=True)\n self.optimizer = tf.train.RMSPropOptimizer(lr)\n gvs = self.optimizer.compute_gradients(self.model.loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gvs]\n self.train_op = self.optimizer.apply_gradients(capped_gvs, global_step=self.global_step)\n\n self.init = tf.global_variables_initializer()\n self.saver = tf.train.Saver()\n self.sess = None\n\n def get_minibatch(self, data, start_index, end_index):\n end_index = min(len(data[\"label\"]), end_index)\n mini_vec_0 = data[\"vec_0\"][start_index: end_index]\n mini_vec_1 = data[\"vec_1\"][start_index: end_index]\n mini_label = data[\"label\"][start_index: end_index]\n\n return mini_vec_0, mini_vec_1, mini_label\n\n def train(self, train_data, dev_data):\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n for epoch in range(self.num_epochs):\n print(\"------- epoch: %i --------\" % epoch)\n train_data = shuffle_data(train_data)\n\n total_batch = int(math.ceil(len(train_data[\"vec_0\"]) / self.batch_size))\n\n for i in range(total_batch):\n vec_0, vec_1, label = self.get_minibatch(\n train_data, self.batch_size * i, self.batch_size * (i + 1)\n )\n feed_dict = {\n self.model.sent_a: vec_0, \n self.model.sent_b: vec_1,\n self.model.label: label,\n }\n _ = self.sess.run(self.train_op, feed_dict)\n\n if i % self.display_freq == 0:\n auc, acc, loss = self.evaluate(dev_data)\n print(\"step: %i\\t auc: %f\\t acc: %f\\t loss: %f\" % (i, auc, acc, loss))\n print(\"saving in %s\" %self.model_path + self.modelname + \".ckpt\")\n self.saver.save(self.sess, self.model_path + self.model_name + \".ckpt\")\n\n def evaluate(self, dev_data):\n total_batch = int(math.ceil(len(dev_data[\"vec_0\"]) / self.batch_size))\n\n logits = np.empty((len(dev_data[\"vec_0\"]), self.n_class))\n for i in range(total_batch):\n vec_0, vec_1, label = self.get_minibatch(dev_data, \\\n self.batch_size * i, self.batch_size * (i + 1))\n feed_dict = {\n self.model.sent_a: vec_0,\n self.model.sent_b: vec_1,\n self.model.label: label,\n }\n logit, loss = self.sess.run([self.model.logits, self.model.loss], feed_dict)\n logits[self.batch_size * i: self.batch_size * (i + 1)] = logit\n\n out = logits[:, 1]\n pred = np.argmax(logits, axis=1)\n \n auc = metrics.roc_auc_score(dev_data[\"label\"], out)\n acc = metrics.accuracy_score(dev_data[\"label\"], pred)\n\n return auc, acc, loss\n\n def test(self, test_data):\n self.sess = tf.Session()\n self.saver.restore(self.sess, self.model_path + self.model_name + \".ckpt\")\n\n total_batch = int(math.ceil(len(test_data[\"vec_0\"]) / self.batch_size))\n\n logits = np.empty((len(test_data[\"vec_0\"]), self.n_class))\n for i in range(total_batch):\n vec_0, vec_1, label = self.get_minibatch(test_data, \\\n self.batch_size * i, self.batch_size * (i + 1))\n feed_dict = {\n self.model.sent_a: vec_0,\n self.model.sent_b: vec_1,\n }\n logit = self.sess.run(self.model.logits, feed_dict)\n logits[self.batch_size * i: self.batch_size * (i + 1)] = logit\n\n conf = self.softmax(logits)\n pred = np.argmax(logits, axis=1)\n if \"label\" in test_data.keys():\n auc = metrics.roc_auc_score(test_data[\"label\"], logits[:, 1])\n acc = metrics.accuracy_score(test_data[\"label\"], pred)\n print(\"TEST results: auc: %f \\t acc: %f\" %(auc, acc))\n\n #for c, p in zip(conf, pred):\n #print(c, p)\n\n return conf, pred\n\n @staticmethod\n def softmax(z):\n s = np.max(z, axis=1)\n s = s[:, np.newaxis]\n e_x = np.exp(z - s)\n div = np.sum(e_x, axis=1)\n div = div[:, np.newaxis]\n\n return e_x / div\n\n\nif __name__ == \"__main__\":\n vocab, train_data, dev_data, embedding = load(params)\n model_classifier = ModelClassifier(params, embedding)\n if params[\"task\"] == \"train\":\n model_classifier.train(train_data, dev_data)\n elif params[\"task\"] == \"test\":\n model_classifier.test(dev_data)\n else:\n raise NameError\n \n\n\n\n","repo_name":"jastfkjg/semantic-matching","sub_path":"mvlstm/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"16046709469","text":"# -*- coding: utf-8 -*-\nimport sys, logging, redis,time\nfrom watchdog.observers import Observer\nfrom watchdog.events import LoggingEventHandler, FileSystemEventHandler\nfrom . import watch_dispatch\nfrom django.conf import settings\nlogging.config.dictConfig(settings.LOGGING)\nlog = logging.getLogger('ownsearch.watch_filesystem')\n\nDISPATCHER=watch_dispatch.Index_Dispatch()\n\n# handles sync event actions, only modified \nclass MySyncHandler(FileSystemEventHandler):\n\t\n def on_any_event(self,event):\n #log.debug(event.event_type)\n #log.debug(f'{event.__dict__} at {time.time()}')\n\n if event.event_type=='created':\n DISPATCHER.process('created',event._src_path,None)\n\n elif event.event_type=='moved':\n DISPATCHER.process('moved',event._src_path,event._dest_path)\n \n elif event.event_type=='deleted':\n DISPATCHER.process('delete',event._src_path,None)\n \n elif event.event_type=='modified': \n DISPATCHER.process('modified',event._src_path,None)\n \ndef launch(path):\n observer = Observer()\n observer.schedule(MySyncHandler(), path, recursive=True)\n observer.start()\n# global DISPATCHER\n# DISPATCHER=watch_dispatch.Index_Dispatch()\n return observer\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n path = sys.argv[1] if len(sys.argv) > 1 else '.'\n# event_handler = LoggingEventHandler()\n observer=launch(path)\n \n \n try:\n while True:\n time.sleep(1)\n watch_dispatch.modify_check(5)\n watch_dispatch.task_check()\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n\n\n","repo_name":"StephenGrey/sleuth","sub_path":"wwwsearch/watcher/watch_filesystem.py","file_name":"watch_filesystem.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"17533999772","text":"s = input()\nt = input()\ndef prefix_func(s):\n n = len(s)\n pi = [0] * n\n for i in range(1, n):\n j = pi[i-1]\n while j > 0 and s[i] != s[j]:\n j = pi[j-1]\n if s[i] == s[j]:\n j += 1\n pi[i] = j\n return pi\ndef fast():\n pr = prefix_func(t + \"$\" + s)[len(t) + 1:]\n for i in range(len(pr)):\n if pr[i] == len(t):\n if not (len(s) - i - 1) % 2 and not (i - len(t) + 1) % 2:\n return \"YES\"\n return \"NO\"\nprint(fast())\n","repo_name":"maksonchik20/Codeforces","sub_path":"Индивидуальная олимпиада школьников по информатике и программированию 2023/Задача A. Плагиат кода.py","file_name":"Задача A. Плагиат кода.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8512573676","text":"import pytest\r\nfrom requests.models import Response \r\nfrom drive_downloader.download_contents import Downloader\r\nfrom unittest import mock\r\nfrom unittest.mock import Mock\r\nimport random\r\n\r\n\r\ndef mocked_request_dot_get_with_bytes_response(flag=0):\r\n\r\n if flag == 0:\r\n data = bytes(b\"73 61 6d 70 6c 65 20 70 64 66 20 66 69 6c 65 20 66 6f 72 20 74 68 69 73 20 74 65 73 74 20\")\r\n else:\r\n data = str('73 61 6d 70 6c 65 20 70 64 66 20 66 69 6c 65 20 66 6f 72 20 74 68 69 73 20 74 65 73 74 20')\r\n mock_raw = Mock()\r\n mock_raw.headers = {'Content-Type': 'text/plain', 'Content-Disposition': 'attachment;filename=\"mockfile.txt\";filename*=UTF-8\\'\\'mockfile.txt', 'Content-Length': '89'}\r\n mock_raw.text = data\r\n mock_raw.status_code = 200\r\n\r\n def iter_content(size=2):\r\n mocked_data = data\r\n while mocked_data:\r\n mocked_chunk = data[:size]\r\n mocked_data = data[size:]\r\n yield mocked_chunk\r\n\r\n mock_raw.iter_content = iter_content\r\n return mock_raw\r\n\r\n\r\nclass TestDownloader():\r\n\r\n downaloder_links = [(\"https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\", 200), \r\n (\"https://drive.google.com/drive/folders/1g-UCIJBVn6FakY6Pkk4JJYwMljOBqJGk?usp=sharing\", 404),\r\n (\"https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4E/view?usp=sharing\", 404), \r\n ]\r\n\r\n retriever_link = [(\"https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\", \"1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE\"),\r\n (\"https://drive.google.com/file/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\", None) \r\n ]\r\n\r\n # This is to test the download functionality - download()\r\n @pytest.mark.parametrize(\"link, expected\", downaloder_links)\r\n def test_download(self, link, expected):\r\n returns = []\r\n obj = Downloader(link)\r\n sc, file_prop, rc = obj.download()\r\n assert rc == expected \r\n\r\n\r\n # This is to test the case where the ID is not pressent in the URL or the URL is not in the correct format - download()\r\n def test_download_with_exception(self):\r\n link = 'https://www.url.which/is/not/correct/d/'\r\n obj = Downloader(link)\r\n with pytest.raises(RuntimeError) as e:\r\n obj.download()\r\n assert str(e.value) == 'Not able to retrieve File ID'\r\n \r\n\r\n # This is to test the ID retriver form URL - get_id_from_url()\r\n @pytest.mark.parametrize(\"link, expected\", retriever_link)\r\n def test_get_id_from_url(self, link, expected):\r\n obj = Downloader(link)\r\n assert obj.get_id_from_url() == expected\r\n\r\n # What if the filename already exist - download() -- This test will fail as we are just passing the cloud file's name as the output filename\r\n def test_for_same_filename_already_exist(self):\r\n obj = Downloader(\"https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\")\r\n sc1, file_prop1, rc1 = obj.download()\r\n sc2, file_prop2, rc2 = obj.download()\r\n assert file_prop2['name'] != file_prop1['name'] \r\n\r\n @mock.patch('requests.Session.get', return_value=mocked_request_dot_get_with_bytes_response())\r\n def test_downlaoder_without_internet_bytes(self, mock_get):\r\n\r\n obj = Downloader(\"https://fake.domain/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\")\r\n rc, file_prop, sc = obj.download()\r\n assert sc == 200 and rc == 0\r\n\r\n # It will fail as the content is in string not bytes\r\n @mock.patch('requests.Session.get', return_value=mocked_request_dot_get_with_bytes_response(flag=1))\r\n def test_downlaoder_without_internet_string(self, mock_get):\r\n\r\n obj = Downloader(\"https://fake.domain/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\")\r\n rc, file_prop, sc = obj.download()\r\n \r\n\r\n def test_for_check_system_storage(self):\r\n obj = Downloader(\"https://fake.domain/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing\")\r\n with pytest.raises(RuntimeError) as e:\r\n obj.check_system_storage(random.randint(888888, 9999999) * 100000)\r\n #assert e == \"RuntimeError('Not enough space available')\"\r\n assert str(e.value) == 'Not enough space available'\r\n","repo_name":"sandipandutta21/gdrive_downloader_with_unittest","sub_path":"tests/test_downalod_contents.py","file_name":"test_downalod_contents.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70942373938","text":"'''Ejercicio 3.2.6¶\nEscribir un programa que cree un diccionario vacío y lo vaya llenado con información sobre una persona (por ejemplo nombre, edad, sexo, teléfono, correo electrónico, etc.)\nque se le pida al usuario. Cada vez que se añada un nuevo dato debe imprimirse el contenido del diccionario.'''\ndef datosPersonales():\n persona = {}\n continuar = True\n while continuar:\n return persona\n\nif __name__ == \"__main__\":\n persona = {}\n continuar = True\n #proceso\n while continuar:\n clave = input('¿Qué dato quieres introducir? ')\n valor = input(clave + ': ')\n persona[clave] = valor\n #salida\n print(persona)","repo_name":"IES-Rafael-Alberti/2324-u3-diccionarios-Albertopinero","sub_path":"src/ejercicio6.py","file_name":"ejercicio6.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28867472875","text":"\"\"\"Test filenames with human parsed correct results.\"\"\"\n\nTEST_COMIC_FIELDS = {\n \"series\": \"Long Series Name\",\n \"issue\": \"1\",\n \"year\": 2000,\n \"ext\": \"cbz\",\n}\nTEST_COMIC_FIELDS_VOL = {\n \"series\": \"Long Series Name\",\n \"volume\": 1,\n \"issue\": \"1\",\n \"year\": 2000,\n \"ext\": \"cbz\",\n}\nTEST_COMIC_VOL_ONLY = {\n \"series\": \"Long Series Name\",\n \"volume\": 1,\n \"year\": 2000,\n \"ext\": \"cbr\",\n}\n\nFNS = {\n \"Night of 1000 Wolves 001 (2013).cbz\": {\n \"series\": \"Night of 1000 Wolves\",\n \"issue\": \"1\",\n \"year\": 2013,\n \"ext\": \"cbz\",\n },\n \"19687 Sandman 53.cbz\": {\"series\": \"19687 Sandman\", \"issue\": \"53\", \"ext\": \"cbz\"},\n \"33475 OMAC v3 2.cbr\": {\n \"series\": \"33475 OMAC\",\n \"volume\": 3,\n \"issue\": \"2\",\n \"ext\": \"cbr\",\n },\n \"Long Series Name 001 (2000) Title (Source) (Releaser).cbz\": TEST_COMIC_FIELDS,\n \"Long Series Name #001 (2000) Title (Source) (Releaser).cbz\": TEST_COMIC_FIELDS,\n \"Long Series Name (2000) 001 Title (Source) (Releaser).cbz\": TEST_COMIC_FIELDS,\n \"Long Series Name (2000) #001 Title (Source) (Releaser).cbz\": TEST_COMIC_FIELDS,\n \"Long Series Name v1 (2000) #001 \"\n \"Title (Source) (Releaser).cbz\": TEST_COMIC_FIELDS_VOL,\n \"Long Series Name 001 (2000) (Source-Releaser).cbz\": TEST_COMIC_FIELDS,\n \"Long Series Name Vol 1 TPB \"\n \"(2000) (Source) (Releaser & Releaser-Releaser).cbr\": TEST_COMIC_VOL_ONLY,\n \"Ultimate Craziness (2019) (Digital) (Friends-of-Bill).cbr\": {\n \"series\": \"Ultimate Craziness\",\n \"year\": 2019,\n \"ext\": \"cbr\",\n },\n \"Jimmy Stocks Love Chain (2005) (digital) (The Magicians-Empire).cbr\": {\n \"series\": \"Jimmy Stocks Love Chain\",\n \"year\": 2005,\n \"ext\": \"cbr\",\n },\n \"Arkenstone Vol. 01 - The Smell of Burnt Toast (2020) (digital) (My-brother).cbr\": {\n \"series\": \"Arkenstone\",\n \"volume\": 1,\n \"year\": 2020,\n \"ext\": \"cbr\",\n },\n \"Bardude - The Last Thing I Remember.cbz\": {\n \"series\": \"Bardude The Last Thing I Remember\",\n \"ext\": \"cbz\",\n },\n \"Drunkguy - The Man Without Fear - 01.cbz\": {\n \"series\": \"Drunkguy The Man Without Fear\",\n \"issue\": \"1\",\n \"ext\": \"cbz\",\n },\n \"The_Arkenstone_v03_(2002)_(Digital)_(DR_&_Quenya-Elves).cbr\": {\n \"series\": \"The Arkenstone\",\n \"volume\": 3,\n \"year\": 2002,\n \"ext\": \"cbr\",\n },\n \"Kartalk v01 001 - Fear the Brakes (2004) (digital) (Son of Ultron-EMpire).cbr\": {\n \"series\": \"Kartalk\",\n \"volume\": 1,\n \"issue\": \"1\",\n \"year\": 2004,\n \"ext\": \"cbr\",\n },\n \"Kartalk Library Edition v01 (1992) (digital) (Son of Ultron-Empire).cbr\": {\n \"series\": \"Kartalk Library Edition\",\n \"volume\": 1,\n \"year\": 1992,\n \"ext\": \"cbr\",\n },\n \"Kind of Deadly v02 - Last Bullet (2006) (Digital) (Zone-Empire).cbr\": {\n \"series\": \"Kind of Deadly\",\n \"volume\": 2,\n \"year\": 2006,\n \"ext\": \"cbr\",\n },\n \"Jeremy John - A Big Long Title (2017) (digital-Minutement).cbz\": {\n \"series\": \"Jeremy John A Big Long Title\",\n \"year\": 2017,\n \"ext\": \"cbz\",\n },\n \"Jeremy John 001 (2006) (digital (Minutemen-Faessla).cbz\": {\n \"series\": \"Jeremy John\",\n \"issue\": \"1\",\n \"year\": 2006,\n \"ext\": \"cbz\",\n },\n \"Jeremy John 003 (2007) (4 covers) (digital) (Minutemen-Faessla).cbz\": {\n \"series\": \"Jeremy John\",\n \"issue\": \"3\",\n \"year\": 2007,\n \"ext\": \"cbz\",\n },\n \"Jeremy John v01 - Uninterested! (2007) (Digital) (Asgard-Empire).cbr\": {\n \"series\": \"Jeremy John\",\n \"volume\": 1,\n \"year\": 2007,\n \"ext\": \"cbr\",\n },\n \"King of Skittles 01 (of 05) (2020) (digital) (Son of Ultron-Empire).cbr\": {\n \"series\": \"King of Skittles\",\n \"issue\": \"1\",\n \"issue_count\": 5,\n \"year\": 2020,\n \"ext\": \"cbr\",\n },\n \"Darkwad 011 (2019) (Digital) (Zone-Empire).cbr\": {\n \"series\": \"Darkwad\",\n \"issue\": \"11\",\n \"year\": 2019,\n \"ext\": \"cbr\",\n },\n \"Darkwad by Carlos Zemo v01 - Knuckle Fight (2009) (Digital) (Zone-Empire).cbr\": {\n \"series\": \"Darkwad by Carlos Zemo\",\n \"volume\": 1,\n \"year\": 2009,\n \"ext\": \"cbr\",\n },\n \"The Walking Dead #002 (2003).cbz\": {\n \"series\": \"The Walking Dead\",\n \"issue\": \"2\",\n \"year\": 2003,\n },\n \"The Walking Dead #3.cbz\": {\n \"series\": \"The Walking Dead\",\n \"issue\": \"3\",\n },\n \"The Walking Dead 4.cbz\": {\n \"series\": \"The Walking Dead\",\n \"issue\": \"4\",\n },\n \"A Fractional Comic 1.1.cbz\": {\"series\": \"A Fractional Comic\", \"issue\": \"1.1\"},\n \"A Fractional Comic 8.54.cbz\": {\"series\": \"A Fractional Comic\", \"issue\": \"8.54\"},\n \"Earth X #½.cbz\": {\"series\": \"Earth X\", \"issue\": \".5\"},\n \"Avengers #001½.cbz\": {\"series\": \"Avengers\", \"issue\": \"1.5\"},\n \"The Amazing Spider-Man #78.BEY.cbz\": {\n \"series\": \"The Amazing Spider-Man\",\n \"issue\": \"78.BEY\",\n },\n \"The Amazing Spider-Man #54.LR.cbz\": {\n \"series\": \"The Amazing Spider-Man\",\n \"issue\": \"54.LR\",\n },\n \"Wolverine & the X-Men #27AU.cbz\": {\n \"series\": \"Wolverine & the X-Men\",\n \"issue\": \"27AU\",\n },\n \"Fantastic Four #5AU.cbz\": {\"series\": \"Fantastic Four\", \"issue\": \"5AU\"},\n}\n","repo_name":"ajslater/comicbox","sub_path":"tests/test_filenames.py","file_name":"test_filenames.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"6422311932","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nfrom urllib.request import urlopen\r\n\r\nimport pywikibot\r\n\r\nfrom utils import (\r\n create_links_string,\r\n read_log,\r\n)\r\n\r\nREGEXP = r\"(https?://(?:www\\.)?sputnikmusic\\.com/(?:review/|album\\.php).+?)/?[\\s|}\\]#<>]\"\r\n\r\n\r\ndef check_user(link):\r\n try:\r\n return \"USER\" in urlopen(link).read().decode(\"iso-8859-1\")\r\n except:\r\n return False\r\n \r\n\r\ndef main():\r\n site = pywikibot.Site()\r\n sputnik = pywikibot.Page(site, u\"Проект:Музыка/Неавторитетные источники/Sputnikmusic\")\r\n whitelist = pywikibot.Page(site, u\"Проект:Музыка/Неавторитетные источники/Sputnikmusic/Whitelist\")\r\n good_links = set(whitelist.text.split())\r\n good_pages = set(re.findall(r\"\\[\\[(.+?)\\]\\]\", sputnik.text))\r\n bad_pages_count = int(re.findall(r\"Текущее количество: (\\d+)\", sputnik.text)[0])\r\n read_pages_count = 0\r\n \r\n for page in site.search(\"insource:\\\"sputnikmusic.com\\\"\", [0], content=True):\r\n if page.title() in good_pages:\r\n continue\r\n links = [re.sub(r\"http://\", \"https://\", link) for link in re.findall(REGEXP, page.text, flags=re.I) \r\n if re.sub(r\"http://\", \"https://\", link) not in good_links and check_user(link)]\r\n\r\n if links:\r\n bad_pages_count += 1\r\n links_string = create_links_string(links, page)\r\n sputnik.text = sputnik.text + '\\n' + links_string[:-1:]\r\n read_pages_count += 1\r\n read_log(read_pages_count)\r\n \r\n sputnik.text = re.sub(r\"Текущее количество: (\\d+)\", r\"Текущее количество: {}\".format(bad_pages_count), sputnik.text)\r\n sputnik.save(u\"обновление списка\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"MCPN/NPBot","sub_path":"outdated/sputnikmusic_using_search.py","file_name":"sputnikmusic_using_search.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13185891913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProblem: Get binary representations of a number.\n\nApproach: \n 1. Recursive\n 2. Iterative\nURL: https://www.hackerrank.com/challenges/coin-change\n\nCreated on Sat Nov 18 16:01:21 2017\n@author: toran\n\n\"\"\"\n\n\ndef get_binary_recursive(n):\n \"\"\" Recursive.\"\"\"\n rem = []\n while n != 0:\n rem.append(n % 2)\n n = n // 2\n\n return \"\".join(map(str, rem[::-1]))\n\n\ndef get_binary_iterative(n):\n \"\"\"\n Iterative.\n Check each ith bit if its 0(off) or 1(on).\n bitwise AND of 1&1 will give 1 and else will be 0.\n 4 byte int i.e. 32 bits.\n \"\"\"\n bi = [0] * 32\n\n for i in range(32):\n if ((2 ** i) & n) > 0:\n bi[i] = 1\n\n return \"\".join(map(str, bi[::-1]))\n\n\nn = 10\nprint(get_binary_iterative(n))\nprint(get_binary_recursive(n))\n","repo_name":"toransahu/g4g","sub_path":"bit-manupulation/getBinaryOfNumber.py","file_name":"getBinaryOfNumber.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"304759526","text":"from django.db import models\nimport random\nimport time\nfrom django.contrib.auth.models import AbstractUser\n# Create your models here.\n\ndef user_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/user_/\n return 'UserImage/{0}/{1}'.format(time.strftime('%Y-%m-%d'),str(random.randint(0,99999999))+filename)\n\nclass User(AbstractUser):\n # 继承Auth user\n NickName = models.CharField(\n # 昵称\n # 不可为null,不可为空、设定字段名为'nickname'、最大长度为256\n null=False,\n blank=False,\n db_column = 'nickname',\n max_length=256\n ) \n\n ImageUrl = models.ImageField(\n # 头像地址\n # 不可为null、可以为空\n null=False,\n blank=False,\n db_column = 'imageurl',\n upload_to=user_directory_path\n\n\n ) \n\n PhoneNum = models.CharField(\n # 手机号\n # 不可为null、不可为空、独一无二\n null=False,\n blank=False,\n db_column = 'phonenum',\n max_length=256\n ) \n\n\n\n","repo_name":"Mars-408/Mung_Account","sub_path":"Django Code/Bill/User/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"20659414003","text":"\"\"\"\nDjango settings for the SODAR Core Example Site project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport environ\nimport os\n\nfrom projectroles.constants import get_sodar_constants\n\n\nSITE_PACKAGE = 'example_site'\n\nROOT_DIR = environ.Path(__file__) - 3\nAPPS_DIR = ROOT_DIR.path(SITE_PACKAGE)\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# .env file, should load only in development environment\nREAD_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)\n\nif READ_DOT_ENV_FILE:\n # Operating System Environment variables have precedence over variables\n # defined in the .env file, that is to say variables from the .env files\n # will only be used if not defined as environment variables.\n env_file = str(ROOT_DIR.path('.env'))\n env.read_env(env_file)\n\n# SITE CONFIGURATION\n# ------------------------------------------------------------------------------\n# Hosts/domain names that are valid for this site\nALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['*'])\n\n# APP CONFIGURATION\n# ------------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Useful template tags\n # 'django.contrib.humanize',\n # Admin\n 'django.contrib.admin',\n]\nTHIRD_PARTY_APPS = [\n 'crispy_forms', # Form layouts\n 'rules.apps.AutodiscoverRulesConfig', # Django rules engine\n 'djangoplugins', # Django plugins\n 'pagedown', # For markdown\n 'markupfield', # For markdown\n 'rest_framework', # For API views\n 'knox', # For token auth\n 'docs', # For the online user documentation/manual\n 'db_file_storage', # For filesfolders\n 'dal', # For user search combo box\n 'dal_select2',\n 'dj_iconify.apps.DjIconifyConfig', # Iconify for SVG icons\n 'django_saml2_auth', # SAML2 support\n]\n\n# Project apps\nLOCAL_APPS = [\n # Custom users app\n 'example_site.users.apps.UsersConfig',\n # SODAR Projectroles app\n 'projectroles.apps.ProjectrolesConfig',\n # SODAR Timeline app\n 'timeline.apps.TimelineConfig',\n # SODAR Filesfolders app\n 'filesfolders.apps.FilesfoldersConfig',\n # User Profile site app\n 'userprofile.apps.UserprofileConfig',\n # Admin Alerts site app\n 'adminalerts.apps.AdminalertsConfig',\n # App Alerts site app\n 'appalerts.apps.AppalertsConfig',\n # Site Info site app\n 'siteinfo.apps.SiteinfoConfig',\n # API Tokens site app\n 'tokens.apps.TokensConfig',\n # SODAR Taskflow backend app\n 'taskflowbackend.apps.TaskflowbackendConfig',\n # Background Jobs app\n 'bgjobs.apps.BgjobsConfig',\n # External Data Cache app\n 'sodarcache.apps.SodarcacheConfig',\n # Example project app\n 'example_project_app.apps.ExampleProjectAppConfig',\n # Example site app\n 'example_site_app.apps.ExampleSiteAppConfig',\n # Example backend app\n 'example_backend_app.apps.ExampleBackendAppConfig',\n]\n\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ------------------------------------------------------------------------------\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n# MIGRATIONS CONFIGURATION\n# ------------------------------------------------------------------------------\nMIGRATION_MODULES = {'sites': 'example_site.contrib.sites.migrations'}\n\n# DEBUG\n# ------------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', False)\n\n# FIXTURE CONFIGURATION\n# ------------------------------------------------------------------------------\nFIXTURE_DIRS = (str(APPS_DIR.path('fixtures')),)\n\n# EMAIL CONFIGURATION\n# ------------------------------------------------------------------------------\nEMAIL_BACKEND = env(\n 'DJANGO_EMAIL_BACKEND',\n default='django.core.mail.backends.smtp.EmailBackend',\n)\nEMAIL_SENDER = env('EMAIL_SENDER', default='noreply@example.com')\nEMAIL_SUBJECT_PREFIX = env('EMAIL_SUBJECT_PREFIX', default='')\n\n# MANAGER CONFIGURATION\n# ------------------------------------------------------------------------------\nADMINS = [(\"\"\"Admin User\"\"\", 'admin.user@example.com')]\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#managers\nMANAGERS = ADMINS\n\n# DATABASE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n# Uses django-environ to accept uri format\n# See: https://django-environ.readthedocs.io/en/latest/#supported-types\nDATABASES = {\n 'default': env.db('DATABASE_URL', default='postgres:///sodar_core')\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = False\n\n# Set default auto field (for Django 3.2+)\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n\n# Set django-db-file-storage as the default storage (for filesfolders)\nDEFAULT_FILE_STORAGE = 'db_file_storage.storage.DatabaseFileStorage'\n\n\n# GENERAL CONFIGURATION\n# ------------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Berlin'\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#language-code\nLANGUAGE_CODE = 'en-us'\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#use-i18n\nUSE_I18N = False\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#use-tz\nUSE_TZ = True\n\n# TEMPLATE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/3.2/ref/settings/#templates\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [str(APPS_DIR.path('templates'))],\n 'OPTIONS': {\n 'debug': DEBUG,\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n # Site context processors\n 'projectroles.context_processors.urls_processor',\n 'projectroles.context_processors.site_app_processor',\n 'projectroles.context_processors.app_alerts_processor',\n ],\n },\n }\n]\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\nSTATIC_ROOT = str(ROOT_DIR('staticfiles'))\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = [str(APPS_DIR.path('static'))]\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n]\n\n# Iconify SVG icons\nICONIFY_JSON_ROOT = os.path.join(STATIC_ROOT, 'iconify')\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\nMEDIA_ROOT = str(APPS_DIR('media'))\nMEDIA_URL = '/media/'\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = 'config.urls'\nWSGI_APPLICATION = 'config.wsgi.application'\n\n# PASSWORD STORAGE SETTINGS\n# ------------------------------------------------------------------------------\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.Argon2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher',\n]\n\n# PASSWORD VALIDATION\n# ------------------------------------------------------------------------------\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.'\n 'UserAttributeSimilarityValidator'\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.'\n 'MinimumLengthValidator'\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.'\n 'CommonPasswordValidator'\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.'\n 'NumericPasswordValidator'\n },\n]\n\n# AUTHENTICATION CONFIGURATION\n# ------------------------------------------------------------------------------\nAUTHENTICATION_BACKENDS = [\n 'rules.permissions.ObjectPermissionBackend', # For rules\n 'django.contrib.auth.backends.ModelBackend',\n]\n\n# Custom user app defaults\nAUTH_USER_MODEL = 'users.User'\nLOGIN_REDIRECT_URL = 'home'\nLOGIN_URL = 'login'\n\n# SLUGLIFIER\nAUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'\n\n# Location of root django.contrib.admin URL, use {% url 'admin:index' %}\nADMIN_URL = r'^admin/'\n\n\n# Celery\n# ------------------------------------------------------------------------------\nif USE_TZ:\n # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone\n CELERY_TIMEZONE = TIME_ZONE\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url\nCELERY_BROKER_URL = env.str('CELERY_BROKER_URL', 'redis://localhost:6379/0')\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend\nCELERY_RESULT_BACKEND = CELERY_BROKER_URL\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content\nCELERY_ACCEPT_CONTENT = ['json']\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer\nCELERY_TASK_SERIALIZER = 'json'\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer\nCELERY_RESULT_SERIALIZER = 'json'\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit\nCELERYD_TASK_TIME_LIMIT = 5 * 60\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit\nCELERYD_TASK_SOFT_TIME_LIMIT = 60\n\n\n# Django REST framework default auth classes\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'knox.auth.TokenAuthentication',\n )\n}\n\n# Knox settings\nTOKEN_TTL = None\n\n# Settings for HTTP AuthBasic\nBASICAUTH_REALM = 'Log in with user@DOMAIN and your password.'\nBASICAUTH_DISABLE = False\n\n\n# LDAP configuration\n# ------------------------------------------------------------------------------\n\n# Enable LDAP if configured\nENABLE_LDAP = env.bool('ENABLE_LDAP', False)\nENABLE_LDAP_SECONDARY = env.bool('ENABLE_LDAP_SECONDARY', False)\n\n# Alternative domains for detecting LDAP access by email address\nLDAP_ALT_DOMAINS = env.list('LDAP_ALT_DOMAINS', None, [])\n\nif ENABLE_LDAP:\n import itertools\n import ldap\n from django_auth_ldap.config import LDAPSearch\n\n # Default values\n LDAP_DEFAULT_CONN_OPTIONS = {ldap.OPT_REFERRALS: 0}\n LDAP_DEFAULT_FILTERSTR = '(sAMAccountName=%(user)s)'\n LDAP_DEFAULT_ATTR_MAP = {\n 'first_name': 'givenName',\n 'last_name': 'sn',\n 'email': 'mail',\n }\n\n # Primary LDAP server\n AUTH_LDAP_SERVER_URI = env.str('AUTH_LDAP_SERVER_URI', None)\n AUTH_LDAP_BIND_DN = env.str('AUTH_LDAP_BIND_DN', None)\n AUTH_LDAP_BIND_PASSWORD = env.str('AUTH_LDAP_BIND_PASSWORD', None)\n AUTH_LDAP_CONNECTION_OPTIONS = LDAP_DEFAULT_CONN_OPTIONS\n\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n env.str('AUTH_LDAP_USER_SEARCH_BASE', None),\n ldap.SCOPE_SUBTREE,\n LDAP_DEFAULT_FILTERSTR,\n )\n AUTH_LDAP_USER_ATTR_MAP = LDAP_DEFAULT_ATTR_MAP\n AUTH_LDAP_USERNAME_DOMAIN = env.str('AUTH_LDAP_USERNAME_DOMAIN', None)\n AUTH_LDAP_DOMAIN_PRINTABLE = env.str(\n 'AUTH_LDAP_DOMAIN_PRINTABLE', AUTH_LDAP_USERNAME_DOMAIN\n )\n\n AUTHENTICATION_BACKENDS = tuple(\n itertools.chain(\n ('projectroles.auth_backends.PrimaryLDAPBackend',),\n AUTHENTICATION_BACKENDS,\n )\n )\n\n # Secondary LDAP server (optional)\n if ENABLE_LDAP_SECONDARY:\n AUTH_LDAP2_SERVER_URI = env.str('AUTH_LDAP2_SERVER_URI', None)\n AUTH_LDAP2_BIND_DN = env.str('AUTH_LDAP2_BIND_DN', None)\n AUTH_LDAP2_BIND_PASSWORD = env.str('AUTH_LDAP2_BIND_PASSWORD', None)\n AUTH_LDAP2_CONNECTION_OPTIONS = LDAP_DEFAULT_CONN_OPTIONS\n\n AUTH_LDAP2_USER_SEARCH = LDAPSearch(\n env.str('AUTH_LDAP2_USER_SEARCH_BASE', None),\n ldap.SCOPE_SUBTREE,\n LDAP_DEFAULT_FILTERSTR,\n )\n AUTH_LDAP2_USER_ATTR_MAP = LDAP_DEFAULT_ATTR_MAP\n AUTH_LDAP2_USERNAME_DOMAIN = env.str('AUTH_LDAP2_USERNAME_DOMAIN')\n AUTH_LDAP2_DOMAIN_PRINTABLE = env.str(\n 'AUTH_LDAP2_DOMAIN_PRINTABLE', AUTH_LDAP2_USERNAME_DOMAIN\n )\n\n AUTHENTICATION_BACKENDS = tuple(\n itertools.chain(\n ('projectroles.auth_backends.SecondaryLDAPBackend',),\n AUTHENTICATION_BACKENDS,\n )\n )\n\n\n# SAML configuration\n# ------------------------------------------------------------------------------\n\n\nENABLE_SAML = env.bool('ENABLE_SAML', False)\nSAML2_AUTH = {\n # Required setting\n # Pysaml2 Saml client settings\n # See: https://pysaml2.readthedocs.io/en/latest/howto/config.html\n 'SAML_CLIENT_SETTINGS': {\n # Optional entity ID string to be passed in the 'Issuer' element of\n # authn request, if required by the IDP.\n 'entityid': env.str('SAML_CLIENT_ENTITY_ID', 'SODARcore'),\n 'entitybaseurl': env.str(\n 'SAML_CLIENT_ENTITY_URL', 'https://localhost:8000'\n ),\n # The auto(dynamic) metadata configuration URL of SAML2\n 'metadata': {\n 'local': [\n env.str('SAML_CLIENT_METADATA_FILE', 'metadata.xml'),\n ],\n },\n 'service': {\n 'sp': {\n 'idp': env.str(\n 'SAML_CLIENT_IPD',\n 'https://sso.hpc.bihealth.org/auth/realms/cubi',\n ),\n # Keycloak expects client signature\n 'authn_requests_signed': 'true',\n # Enforce POST binding which is required by keycloak\n 'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST',\n },\n },\n 'key_file': env.str('SAML_CLIENT_KEY_FILE', 'key.pem'),\n 'cert_file': env.str('SAML_CLIENT_CERT_FILE', 'cert.pem'),\n 'xmlsec_binary': env.str('SAML_CLIENT_XMLSEC1', '/usr/bin/xmlsec1'),\n 'encryption_keypairs': [\n {\n 'key_file': env.str('SAML_CLIENT_KEY_FILE', 'key.pem'),\n 'cert_file': env.str('SAML_CLIENT_CERT_FILE', 'cert.pem'),\n }\n ],\n },\n # Custom target redirect URL after the user get logged in.\n # Defaults to /admin if not set. This setting will be overwritten if you\n # have parameter ?next= specificed in the login URL.\n 'DEFAULT_NEXT_URL': '/',\n # # Optional settings below\n # 'NEW_USER_PROFILE': {\n # 'USER_GROUPS': [], # The default group name when a new user logs in\n # 'ACTIVE_STATUS': True, # The default active status for new users\n # 'STAFF_STATUS': True, # The staff status for new users\n # 'SUPERUSER_STATUS': False, # The superuser status for new users\n # },\n # 'ATTRIBUTES_MAP': env.dict(\n # 'SAML_ATTRIBUTES_MAP',\n # default={\n # Change values to corresponding SAML2 userprofile attributes.\n # 'email': 'Email',\n # 'username': 'UserName',\n # 'first_name': 'FirstName',\n # 'last_name': 'LastName',\n # }\n # ),\n # 'TRIGGER': {\n # 'FIND_USER': 'path.to.your.find.user.hook.method',\n # 'NEW_USER': 'path.to.your.new.user.hook.method',\n # 'CREATE_USER': 'path.to.your.create.user.hook.method',\n # 'BEFORE_LOGIN': 'path.to.your.login.hook.method',\n # },\n # Custom URL to validate incoming SAML requests against\n # 'ASSERTION_URL': 'https://your.url.here',\n}\n\n\n# Logging\n# ------------------------------------------------------------------------------\n\n# Custom logging level\nLOGGING_LEVEL = env.str('LOGGING_LEVEL', 'DEBUG' if DEBUG else 'ERROR')\n\n# List of apps to include in logging\nLOGGING_APPS = env.list(\n 'LOGGING_APPS',\n default=[\n 'projectroles',\n 'siteinfo',\n 'sodarcache',\n 'taskflowbackend',\n 'timeline',\n ],\n)\n\n# Path for file logging. If not set, will log only to console\nLOGGING_FILE_PATH = env.str('LOGGING_FILE_PATH', None)\n\n\ndef set_logging(level=None):\n if not level:\n level = 'DEBUG' if DEBUG else 'ERROR'\n app_logger_config = {\n 'level': level,\n 'handlers': ['console', 'file'] if LOGGING_FILE_PATH else ['console'],\n 'propagate': True,\n }\n log_handlers = {\n 'console': {\n 'level': level,\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n }\n }\n if LOGGING_FILE_PATH:\n log_handlers['file'] = {\n 'level': level,\n 'class': 'logging.FileHandler',\n 'filename': LOGGING_FILE_PATH,\n 'formatter': 'simple',\n }\n return {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n }\n },\n 'handlers': log_handlers,\n 'loggers': {a: app_logger_config for a in LOGGING_APPS},\n }\n\n\nLOGGING = set_logging(LOGGING_LEVEL)\n\n\n# General site settings\n# ------------------------------------------------------------------------------\n\nSITE_TITLE = 'SODAR Core Example Site'\nSITE_SUBTITLE = env.str('SITE_SUBTITLE', 'Beta')\nSITE_INSTANCE_TITLE = env.str('SITE_INSTANCE_TITLE', 'SODAR Core Example')\n\n\n# Local App Settings\n# ------------------------------------------------------------------------------\n\n\n# Plugin settings\nENABLED_BACKEND_PLUGINS = env.list(\n 'ENABLED_BACKEND_PLUGINS',\n None,\n [\n 'appalerts_backend',\n 'sodar_cache',\n 'timeline_backend',\n 'example_backend_app',\n ],\n)\n\n# SODAR API settings\nSODAR_API_DEFAULT_VERSION = '0.1'\nSODAR_API_ALLOWED_VERSIONS = [SODAR_API_DEFAULT_VERSION]\nSODAR_API_MEDIA_TYPE = 'application/your.application+json'\nSODAR_API_DEFAULT_HOST = env.url(\n 'SODAR_API_DEFAULT_HOST', 'http://0.0.0.0:8000'\n)\n\n\n# Projectroles app settings\n\n# Remote access mode: SOURCE or TARGET\nPROJECTROLES_SITE_MODE = env.str('PROJECTROLES_SITE_MODE', 'SOURCE')\n\n# Enable or disable project creation if site is in TARGET mode\nPROJECTROLES_TARGET_CREATE = env.bool('PROJECTROLES_TARGET_CREATE', True)\n\n# Username of default admin for when regular users cannot be assigned to a task\nPROJECTROLES_DEFAULT_ADMIN = env.str('PROJECTROLES_DEFAULT_ADMIN', 'admin')\n\n# Allow showing and synchronizing local non-admin users\nPROJECTROLES_ALLOW_LOCAL_USERS = env.bool(\n 'PROJECTROLES_ALLOW_LOCAL_USERS', False\n)\n\n# Allow unauthenticated users to access public projects if set true\nPROJECTROLES_ALLOW_ANONYMOUS = env.bool('PROJECTROLES_ALLOW_ANONYMOUS', False)\n\n# General projectroles settings\nPROJECTROLES_DISABLE_CATEGORIES = env.bool(\n 'PROJECTROLES_DISABLE_CATEGORIES', False\n)\nPROJECTROLES_INVITE_EXPIRY_DAYS = env.int('PROJECTROLES_INVITE_EXPIRY_DAYS', 14)\nPROJECTROLES_SEND_EMAIL = env.bool('PROJECTROLES_SEND_EMAIL', False)\nPROJECTROLES_EMAIL_SENDER_REPLY = env.bool(\n 'PROJECTROLES_EMAIL_SENDER_REPLY', False\n)\n# Custom header and footer\nPROJECTROLES_EMAIL_HEADER = env.str('PROJECTROLES_EMAIL_HEADER', None)\nPROJECTROLES_EMAIL_FOOTER = env.str('PROJECTROLES_EMAIL_FOOTER', None)\n\nPROJECTROLES_ENABLE_SEARCH = env.bool('PROJECTROLES_ENABLE_SEARCH', True)\n\n# Optional projectroles settings\n# Sidebar icon size. Minimum=18, maximum=42.\nPROJECTROLES_SIDEBAR_ICON_SIZE = env.int('PROJECTROLES_SIDEBAR_ICON_SIZE', 36)\n# PROJECTROLES_SECRET_LENGTH = 32\n# PROJECTROLES_HELP_HIGHLIGHT_DAYS = 7\n# PROJECTROLES_SEARCH_PAGINATION = 5\n# Support for viewing the site in \"kiosk mode\" (under work, experimental)\n# PROJECTROLES_KIOSK_MODE = env.bool('PROJECTROLES_KIOSK_MODE', False)\n\nPROJECTROLES_HIDE_APP_LINKS = env.list('PROJECTROLES_HIDE_APP_LINKS', None, [])\n\n# Set limit for delegate roles per project (if 0, no limit is applied)\nPROJECTROLES_DELEGATE_LIMIT = env.int('PROJECTROLES_DELEGATE_LIMIT', 1)\n\n# Warn about unsupported browsers (IE)\nPROJECTROLES_BROWSER_WARNING = env.bool('PROJECTROLES_BROWSER_WARNING', True)\n\n# Disable default CDN JS/CSS includes to replace with your local files\nPROJECTROLES_DISABLE_CDN_INCLUDES = env.bool(\n 'PROJECTROLES_DISABLE_CDN_INCLUDES', False\n)\n\n# Inline HTML include to the head element of the base site template\nPROJECTROLES_INLINE_HEAD_INCLUDE = env.str(\n 'PROJECTROLES_INLINE_HEAD_INCLUDE', None\n)\n\n# Paths/URLs to optional global includes to supplement/replace default ones\nPROJECTROLES_CUSTOM_JS_INCLUDES = env.list(\n 'PROJECTROLES_CUSTOM_JS_INCLUDES', None, []\n)\nPROJECTROLES_CUSTOM_CSS_INCLUDES = env.list(\n 'PROJECTROLES_CUSTOM_CSS_INCLUDES', None, []\n)\n\n# Enable profiling for debugging/analysis\nPROJECTROLES_ENABLE_PROFILING = env.bool('PROJECTROLES_ENABLE_PROFILING', False)\nif PROJECTROLES_ENABLE_PROFILING:\n MIDDLEWARE += ['projectroles.middleware.ProfilerMiddleware']\n\n\n# Bgjobs app settings\nBGJOBS_PAGINATION = env.int('BGJOBS_PAGINATION', 15)\n\n\n# Timeline app settings\nTIMELINE_PAGINATION = env.int('TIMELINE_PAGINATION', 15)\n\n\n# Filesfolders app settings\nFILESFOLDERS_MAX_UPLOAD_SIZE = env.int('FILESFOLDERS_MAX_UPLOAD_SIZE', 10485760)\nFILESFOLDERS_MAX_ARCHIVE_SIZE = env.int(\n 'FILESFOLDERS_MAX_ARCHIVE_SIZE', 52428800\n)\nFILESFOLDERS_SERVE_AS_ATTACHMENT = env.bool(\n 'FILESFOLDERS_SERVE_AS_ATTACHMENT', False\n)\nFILESFOLDERS_LINK_BAD_REQUEST_MSG = env.str(\n 'FILESFOLDERS_LINK_BAD_REQUEST_MSG', 'Invalid request'\n)\n# Custom project list column example\nFILESFOLDERS_SHOW_LIST_COLUMNS = env.bool(\n 'FILESFOLDERS_SHOW_LIST_COLUMNS', True\n)\n\n\n# Adminalerts app settings\nADMINALERTS_PAGINATION = env.int('ADMINALERTS_PAGINATION', 15)\n\n\n# Appalerts app settings\nAPPALERTS_STATUS_INTERVAL = env.int('APPALERTS_STATUS_INTERVAL', 5)\n\n\n# Taskflow backend settings\nTASKFLOW_SODAR_SECRET = env.str('TASKFLOW_SODAR_SECRET', 'CHANGE ME!')\nTASKFLOW_TEST_MODE = False # Important! Disallow cleanup() command by default\n\n\n# SODAR constants\n# SODAR_CONSTANTS = get_sodar_constants(default=True)\n","repo_name":"bihealth/sodar-core-legacy","sub_path":"config/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":23782,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"57"} +{"seq_id":"16192381099","text":"import os\nimport sys\nsys.path.append(os.getcwd())\nimport json\nimport random\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nfrom torch import optim\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom qmugs.loss import vae_loss, mae\nfrom qmugs import utils as qmugs_utils\nfrom configs.datasets_config import get_dataset_info\nfrom qmugs.utils import QMugsDataset, QMugsDataLoader,\\\n preprocess_batch, compute_mean_mad, prop_key\n\nfrom vagrant.model import Vagrant\nfrom vagrant.utils import KLAnnealer\n\ndef train(rank, datasets, args):\n print('in rank {}'.format(rank))\n dist.init_process_group(backend='nccl', init_method='env://',\n world_size=args.n_gpus, rank=rank)\n torch.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(args.seed)\n args.device = torch.device('cuda:{}'.format(rank))\n print('rank {} initialized...'.format(rank))\n\n dataset_info = get_dataset_info('qmugs', remove_h=args.remove_h)\n transform = qmugs_utils.QMugsTransform(dataset_info, args.device)\n args.charge_scale = transform.max_charge\n args.in_node_nf = transform.atomic_number_list.shape[-1] * (args.charge_power + 1)\n args.in_edge_nf = 0\n raw_train, raw_string, raw_props = datasets\n dataset = QMugsDataset(raw_train, raw_string, raw_props, transform=transform)\n del raw_train, raw_string\n print('datasets created...')\n\n if len(args.properties) > 0:\n args.predict_property = True\n else:\n args.predict_property = False\n args.means, args.mads = compute_mean_mad(raw_props)\n sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=args.n_gpus,\n rank=rank, shuffle=True)\n loader = QMugsDataLoader(dataset, batch_size=args.batch_size, sampler=sampler)\n print('rank {} iter created...'.format(rank))\n\n #### Build model\n model = Vagrant(args, predict_property=args.predict_property)\n state = model.state\n model = model.to(args.device)\n args.vocab_weights = args.vocab_weights.to(args.device)\n model = DDP(model, device_ids=[rank])\n model.train()\n\n #### Set up training helpers\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)\n kl_annealer = KLAnnealer(args.beta_init, args.beta, args.kl_anneal_stop, args.kl_anneal_start)\n n_epochs = 0\n\n for epoch in range(args.n_epochs):\n args.beta = kl_annealer(epoch)\n total = len(loader.dataset) // args.batch_size // args.n_gpus\n for i, data in enumerate(tqdm(loader, total=total)):\n nodes, atom_positions, edges, edge_attr, atom_mask,\\\n edge_mask, n_nodes, y_true, y0, y_mask, props, scaled_props = preprocess_batch(data, args)\n\n mu, logvar, y_logits, pred_props = model(h0=nodes, x=atom_positions, edges=edges,\n edge_attr=edge_attr, node_mask=atom_mask,\n edge_mask=edge_mask, n_nodes=n_nodes,\n y0=y0, y_mask=y_mask)\n loss, kld, bce, mses = vae_loss(y_true, y_logits, mu, logvar,\n pred_props, scaled_props,\n args.beta, args.vocab_weights, args.device)\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n if args.predict_property:\n prop = args.properties[0]\n mae_score = mae(props[0].detach().cpu().numpy().reshape(-1,1),\n ((args.mads[prop] * pred_props[0].detach().cpu()) + args.means[prop]).numpy())\n else:\n mae_score = 0\n\n log_file = open(args.log_fn, 'a')\n log_file.write('{},{},{},{},{},{},{}\\n'.format(epoch,\n i,\n np.round(loss.item(), 5),\n np.round(kld.item(), 5),\n np.round(bce.item(), 5),\n np.round(mses[0].item(), 5),\n np.round(mae_score, 5)))\n log_file.close()\n\n lr_scheduler.step()\n if rank == 0:\n n_epochs += 1\n state['epoch'] = n_epochs\n state['model_state_dict'] = model.state_dict()\n state['optimizer_state_dict'] = optimizer.state_dict()\n\n if n_epochs % args.save_freq == 0:\n ckpt_fn = '{}_{}.ckpt'.format(n_epochs, args.name)\n torch.save(state, os.path.join(args.ckpt_dir, ckpt_fn))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n ### I/O Parameters\n parser.add_argument('--name', default='vagrant_qmugs', type=str)\n parser.add_argument('--ckpt_dir', default='checkpoints', type=str)\n parser.add_argument('--save_freq', default=10, type=int)\n parser.add_argument('--test_freq', default=1, type=int)\n parser.add_argument('--log_freq', default=50, type=int)\n\n ### Model Hyperparameters\n parser.add_argument('--d_model', default=256, type=int) \n parser.add_argument('--d_latent', default=128, type=int)\n parser.add_argument('--properties', nargs='+', default=[]) \n\n ### Encoder Hyperparameters\n parser.add_argument('--n_enc', default=4, type=int) \n parser.add_argument('--pred_depth', default=3, type=int) \n parser.add_argument('--pred_width', default=256, type=int) \n parser.add_argument('--readout', default='sum', choices=['sum', 'mean'], type=str)\n parser.add_argument('--edge_attention', default=True, type=bool)\n\n ### Decoder Hyperparameters\n parser.add_argument('--n_dec', default=4, type=int)\n parser.add_argument('--d_ff', default=256, type=int)\n parser.add_argument('--n_heads', default=4, type=int)\n parser.add_argument('--p_dropout', default=0.0, type=float)\n\n ### Training Hyperparameters\n parser.add_argument('--n_epochs', default=100, type=int) \n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--beta', default=0.5, type=float)\n parser.add_argument('--beta_init', default=1e-8, type=float)\n parser.add_argument('--kl_anneal_start', default=0, type=int)\n parser.add_argument('--kl_anneal_stop', default=100, type=int)\n parser.add_argument('--weight_decay', default=1e-16, type=float)\n\n ### Data Hyperparameters\n parser.add_argument('--data_dir', default='./data/qmugs', type=str)\n parser.add_argument('--num_workers', default=0, type=int)\n parser.add_argument('--charge_power', default=2, type=int)\n parser.add_argument('--max_length', default=125, type=int)\n parser.add_argument('--remove_h', default=False, action='store_true')\n parser.add_argument('--max_heavy_atoms', default=50, type=int)\n\n ### Distributed args\n parser.add_argument('--port', default='12355', type=str)\n parser.add_argument('--local_rank', default=-1)\n args = parser.parse_args()\n\n # Set device and dtype\n args.cuda = torch.cuda.is_available()\n args.device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n args.dtype = torch.float32\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = args.port\n args.seed = random.randint(0, 2**32-1)\n args.n_gpus = torch.cuda.device_count()\n print(args)\n\n os.makedirs(args.ckpt_dir, exist_ok=True)\n args.ckpt_dir = os.path.join(args.ckpt_dir, args.name)\n os.makedirs(args.ckpt_dir, exist_ok=True)\n args.log_fn = os.path.join(args.ckpt_dir, 'log.txt')\n log_file = open(args.log_fn, 'a')\n log_file.write('epoch,batch,loss,kld,bce,mse,mae\\n')\n log_file.close()\n\n print('loading data...')\n args.properties = [prop_key[prop] for prop in args.properties]\n raw_train, raw_val, raw_test, raw_string, raw_props, args = qmugs_utils.load_datasets(args)\n raw_string = [arr.numpy() if isinstance(arr, torch.Tensor) else arr for arr in raw_string]\n datasets = [raw_train, raw_string, raw_props]\n print('data loaded...')\n\n mp.spawn(train, nprocs=args.n_gpus, args=(datasets, args,))\n","repo_name":"oriondollar/vagrant_en","sub_path":"scripts/train_qmugs_parallel.py","file_name":"train_qmugs_parallel.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"18845568520","text":"from Bio import SeqIO, SeqRecord\nimport argparse\n\n\ndef duplicate_remover(file):\n d = {}\n duplicate = []\n for record in SeqIO.parse(file, \"fasta\"):\n if record.seq in d.keys():\n print(\"duplicate found : \\n\" + record.id)\n duplicate.append(record.id)\n d[record.seq] = record.description\n return d, duplicate\n\n\ndef new_file_writer(file, d):\n with open(file, 'w') as f:\n for key in d.keys():\n data = SeqRecord.SeqRecord(key, id=d[key])\n SeqIO.write(data, f, \"fasta\")\n\n\ndef duplicate_file_writer(file,duplicate_list):\n with open(file,'w') as f:\n for double in duplicate_list:\n f.write(double + \"\\n\")\n\nif __name__ == '__main__':\n # Get argument and parse them\n parser = argparse.ArgumentParser()\n # file path :\n parser.add_argument(\"-f\", \"--filename\", required=True)\n args = parser.parse_args()\n\n filename = args.filename\n\n d, duplicate = duplicate_remover(filename)\n o_filename = filename[:-3] + \"2\" + filename[-3:]\n d_filename = filename[:-3] + \"_duplicate.txt\"\n new_file_writer(o_filename, d)\n duplicate_file_writer(d_filename, duplicate)\n\n","repo_name":"KrysVal/projetM2","sub_path":"etc/duplicate_remover.py","file_name":"duplicate_remover.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17372088740","text":"# %%\n# 生成词嵌入文件\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom gensim.models import Word2Vec, KeyedVectors\nfrom tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Dropout\nfrom tensorflow.keras.models import Model, Sequential\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom mail import mail\nimport os\nfrom keras.utils import to_categorical\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n# %%\n# f = open('tmp/userid_creative_ids.txt')\nf = open('word2vec/userid_creative_ids.txt')\nnum_creative_id = 2481135+1\ntokenizer = Tokenizer(num_words=num_creative_id)\ntokenizer.fit_on_texts(f)\nf.close()\n\n\n# %%\npath = \"word2vec/wordvectors_creative_id.kv\"\nwv = KeyedVectors.load(path, mmap='r')\n\n\n# %%\nf = open('word2vec/userid_creative_ids.txt')\nmax_len_creative_id = -1\nfor line in f:\n current_line_len = len(line.strip().split(' '))\n max_len_creative_id = max(max_len_creative_id, current_line_len)\nf.close()\n\n\n# %%\ncreative_id_tokens = list(wv.vocab.keys())\nembedding_dim = 128\nembedding_matrix = np.random.randn(len(creative_id_tokens)+1, 128)\ncnt = 0\nfor creative_id in creative_id_tokens:\n embedding_vector = wv[creative_id]\n if embedding_vector is not None:\n index = tokenizer.texts_to_sequences([creative_id])[0][0]\n embedding_matrix[index] = embedding_vector\n\n\n# %%\ndebug = True\nif debug:\n max_len_creative_id = 100\n# shape:(sequence长度,)\ninput_x = Input(shape=(None,))\n# cpus = tf.config.experimental.list_logical_devices('CPU')\n# with tf.device('cpu'):\n# emb = Embedding(input_dim=num_creative_id,\n# output_dim=128,\n# weights=[embedding_matrix],\n# trainable=False,\n# input_length=max_len_creative_id,\n# mask_zero=True)\n# x = emb(input_x)\nx = Embedding(input_dim=num_creative_id,\n output_dim=128,\n weights=[embedding_matrix],\n trainable=True,\n input_length=max_len_creative_id,\n mask_zero=True)(input_x)\nx = LSTM(1024, return_sequences=True)(x)\nx = LSTM(512, return_sequences=False)(x)\nx = Dense(128)(x)\nx = Dropout(0.5)(x)\noutput_y = Dense(10, activation='softmax')(x)\n\nmodel = Model([input_x], output_y)\n\n# model = Sequential([\n# Embedding(num_creative_id, 128,\n# weights=[embedding_matrix],\n# trainable=False,\n# input_length=None),\n# LSTM(1024),\n# Dense(1, activation='sigmoid')\n# ])\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n\n# %%\n# 测试数据格式(batch_size, sequence长度)\ntest_data = np.array([1, 2, 3, 4]).reshape(1, -1)\nmodel.predict(test_data)\n\n\n# %%\ncreative_id_seq = []\nwith open('word2vec/userid_creative_ids.txt')as f:\n for text in f:\n creative_id_seq.append(text.strip())\n\n\n# %%\nif debug:\n sequences = tokenizer.texts_to_sequences(creative_id_seq[:900000//1])\nelse:\n sequences = tokenizer.texts_to_sequences(creative_id_seq)\n\nX_train = pad_sequences(sequences, maxlen=max_len_creative_id)\n\n# %%\n# 使用迭代器实现\n# X_train = pad_sequences(sequences, maxlen=max_len_creative_id)\n# %%\nuser_train = pd.read_csv(\n 'data/train_preliminary/user.csv').sort_values(['user_id'], ascending=(True,))\nY_gender = user_train['gender'].values\nY_age = user_train['age'].values\n\nY_age = Y_age-1\nY_gender = Y_gender - 1\n# %%\nif debug:\n Y_gender = Y_gender[:900000//1]\n Y_age = Y_age[:900000//1]\n Y_age = to_categorical(Y_age)\n# %%\ncheckpoint = ModelCheckpoint(\"tmp/age_epoch_{epoch:02d}.hdf5\", monitor='val_loss', verbose=0,\n save_best_only=False, mode='auto', period=1)\n# %%\ntry:\n model.fit(X_train,\n Y_age,\n validation_split=0.1,\n epochs=100,\n batch_size=512,\n callbacks=[checkpoint],\n )\n mail('train lstm for age done!!!')\nexcept Exception as e:\n e = str(e)\n mail('train lstm for age failed!!! ' + e)\n# %%\n\nmodel.load_weights('tmp/age_epoch_01.hdf5')\n\n# %%\nif debug:\n sequences = tokenizer.texts_to_sequences(\n creative_id_seq[900000:])\nelse:\n sequences = tokenizer.texts_to_sequences(\n creative_id_seq[900000:])\n\nX_test = pad_sequences(sequences, maxlen=max_len_creative_id, padding='post')\n\n\n# %%\ny_pred = model.predict(X_test, batch_size=4096)\n\n\n# %%\ny_pred = np.argmax(y_pred, axis=1)\ny_pred = y_pred.flatten()\ny_pred = y_pred+1\n# %%\nres = pd.DataFrame({'predicted_age': y_pred})\nres.to_csv(\n 'data/ans/lstm_age.csv', header=True, columns=['predicted_age'], index=False)\n\n# %%\nmail('lstm predict age done!!!')\n\n# %%\nuser_id_test = pd.read_csv(\n 'data/test/clicklog_ad_user_test.csv').sort_values(['user_id'], ascending=(True,)).user_id.unique()\nans = pd.DataFrame({'user_id': user_id_test})\n\n# %%\ngender = pd.read_csv('data/ans/lstm_gender.csv')\nage = pd.read_csv('data/ans/lstm_age.csv')\n# %%\nans['predicted_gender'] = gender.predicted_gender\nans['predicted_age'] = age.predicted_age\nans.to_csv('data/ans/LSTM.csv', header=True, index=False,\n columns=['user_id', 'predicted_age', 'predicted_gender'])\n# %%\nmail('save ans to csv done!')\n# %%\n","repo_name":"sunlanchang/Tencent-Ads-Algo-Comp-2020","sub_path":"LSTM_age.py","file_name":"LSTM_age.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"38874151691","text":"import numpy as np\nfrom numpy.testing import (assert_equal,\n assert_array_equal, assert_array_almost_equal, assert_array_less, assert_,)\nimport pytest\n\nimport scipy.signal._wavelets as wavelets\n\n\nclass TestWavelets:\n def test_qmf(self):\n with pytest.deprecated_call():\n assert_array_equal(wavelets.qmf([1, 1]), [1, -1])\n\n def test_daub(self):\n with pytest.deprecated_call():\n for i in range(1, 15):\n assert_equal(len(wavelets.daub(i)), i * 2)\n\n def test_cascade(self):\n with pytest.deprecated_call():\n for J in range(1, 7):\n for i in range(1, 5):\n lpcoef = wavelets.daub(i)\n k = len(lpcoef)\n x, phi, psi = wavelets.cascade(lpcoef, J)\n assert_(len(x) == len(phi) == len(psi))\n assert_equal(len(x), (k - 1) * 2 ** J)\n\n def test_morlet(self):\n with pytest.deprecated_call():\n x = wavelets.morlet(50, 4.1, complete=True)\n y = wavelets.morlet(50, 4.1, complete=False)\n # Test if complete and incomplete wavelet have same lengths:\n assert_equal(len(x), len(y))\n # Test if complete wavelet is less than incomplete wavelet:\n assert_array_less(x, y)\n\n x = wavelets.morlet(10, 50, complete=False)\n y = wavelets.morlet(10, 50, complete=True)\n # For large widths complete and incomplete wavelets should be\n # identical within numerical precision:\n assert_equal(x, y)\n\n # miscellaneous tests:\n x = np.array([1.73752399e-09 + 9.84327394e-25j,\n 6.49471756e-01 + 0.00000000e+00j,\n 1.73752399e-09 - 9.84327394e-25j])\n y = wavelets.morlet(3, w=2, complete=True)\n assert_array_almost_equal(x, y)\n\n x = np.array([2.00947715e-09 + 9.84327394e-25j,\n 7.51125544e-01 + 0.00000000e+00j,\n 2.00947715e-09 - 9.84327394e-25j])\n y = wavelets.morlet(3, w=2, complete=False)\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, s=4, complete=True)\n y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, s=4, complete=False)\n assert_array_almost_equal(y, x, decimal=2)\n y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, w=3, s=5, complete=True)\n y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, w=3, s=5, complete=False)\n assert_array_almost_equal(y, x, decimal=2)\n y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, w=7, s=10, complete=True)\n y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n x = wavelets.morlet(10000, w=7, s=10, complete=False)\n assert_array_almost_equal(x, y, decimal=2)\n y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]\n assert_array_almost_equal(x, y, decimal=2)\n\n def test_morlet2(self):\n with pytest.deprecated_call():\n w = wavelets.morlet2(1.0, 0.5)\n expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex)\n assert_array_equal(w, expected)\n\n lengths = [5, 11, 15, 51, 101]\n for length in lengths:\n w = wavelets.morlet2(length, 1.0)\n assert_(len(w) == length)\n max_loc = np.argmax(w)\n assert_(max_loc == (length // 2))\n\n points = 100\n w = abs(wavelets.morlet2(points, 2.0))\n half_vec = np.arange(0, points // 2)\n assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])\n\n x = np.array([5.03701224e-09 + 2.46742437e-24j,\n 1.88279253e+00 + 0.00000000e+00j,\n 5.03701224e-09 - 2.46742437e-24j])\n y = wavelets.morlet2(3, s=1/(2*np.pi), w=2)\n assert_array_almost_equal(x, y)\n\n def test_ricker(self):\n with pytest.deprecated_call():\n w = wavelets.ricker(1.0, 1)\n expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))\n assert_array_equal(w, expected)\n\n lengths = [5, 11, 15, 51, 101]\n for length in lengths:\n w = wavelets.ricker(length, 1.0)\n assert_(len(w) == length)\n max_loc = np.argmax(w)\n assert_(max_loc == (length // 2))\n\n points = 100\n w = wavelets.ricker(points, 2.0)\n half_vec = np.arange(0, points // 2)\n #Wavelet should be symmetric\n assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])\n\n #Check zeros\n aas = [5, 10, 15, 20, 30]\n points = 99\n for a in aas:\n w = wavelets.ricker(points, a)\n vec = np.arange(0, points) - (points - 1.0) / 2\n exp_zero1 = np.argmin(np.abs(vec - a))\n exp_zero2 = np.argmin(np.abs(vec + a))\n assert_array_almost_equal(w[exp_zero1], 0)\n assert_array_almost_equal(w[exp_zero2], 0)\n\n def test_cwt(self):\n with pytest.deprecated_call():\n widths = [1.0]\n def delta_wavelet(s, t):\n return np.array([1])\n len_data = 100\n test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)\n\n #Test delta function input gives same data as output\n cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)\n assert_(cwt_dat.shape == (len(widths), len_data))\n assert_array_almost_equal(test_data, cwt_dat.flatten())\n\n #Check proper shape on output\n widths = [1, 3, 4, 5, 10]\n cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)\n assert_(cwt_dat.shape == (len(widths), len_data))\n\n widths = [len_data * 10]\n #Note: this wavelet isn't defined quite right, but is fine for this test\n def flat_wavelet(l, w):\n return np.full(w, 1 / w)\n cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)\n assert_array_almost_equal(cwt_dat, np.mean(test_data))\n","repo_name":"scipy/scipy","sub_path":"scipy/signal/tests/test_wavelets.py","file_name":"test_wavelets.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":11925,"dataset":"github-code","pt":"57"} +{"seq_id":"20100418421","text":"#!/usr/bin/env python\n\nfrom operator import add\nimport numpy as np\n\nclass AdSource:\n def setPlatform(self, platform):\n self.platform = platform\n\n def start(self):\n ad_charge_list = [\"900170201\", \"900180201\", \"900190201\"]\n hit_num = lambda v:[1 if v==x else 0 for x in ad_charge_list]\n\n cache_bs_rdd = self.get_bs_rdd().map(lambda line:[ line[40], line[55] ])\n cache_bs_rdd.cache()\n self.click1 = cache_bs_rdd.map(lambda line:line[1]).countByValue()\n self.click2 = cache_bs_rdd.filter(lambda line:line[0] in ad_charge_list).map(lambda line:(line[1], line[0])).mapValues(hit_num).mapValues(np.array).reduceByKey(add).mapValues(list).collectAsMap()\n cache_bs_rdd.unpersist()\n\n cache_fs_rdd = self.get_fs_rdd().filter(lambda line:line[23]=='0' or line[23]=='1').map(lambda line:[ line[4], line[17] ])\n cache_fs_rdd.cache()\n self.pv1 = cache_fs_rdd.map(lambda line:line[1]).countByValue()\n self.pv2 = cache_fs_rdd.filter(lambda line:line[0] in ad_charge_list).map(lambda line:(line[1], line[0])).mapValues(hit_num).mapValues(np.array).reduceByKey(add).mapValues(list).collectAsMap()\n cache_fs_rdd.unpersist()\n\n def stop(self):\n param = self.platform.param\n lv_date = self.platform.lv_date\n pv1, click1, pv2, click2 = self.pv1, self.click1, self.pv2, self.click2\n ad_name_list = [\"commweal_click_rate\", \"transparent_click_rate\", \"nonclick_click_rate\"]\n\n sid_set = set([sid for sid in pv2] + [sid for sid in click2])\n\n for i in xrange(len(ad_name_list)):\n file_name = ad_name_list[i]\n fp = open(param[\"result\"] + file_name, \"w\")\n for sid in sid_set:\n if pv2.get(sid, [0,0,0])[i] == 0 and click2.get(sid, [0,0,0])[i] == 0:\n continue\n tmp_list = [lv_date, sid, pv1.get(sid, 0), click1.get(sid ,0), pv2.get(sid, [0,0,0])[i], click2.get(sid, [0,0,0])[i]]\n tmp_list = [str(x) for x in tmp_list]\n fp.write(\"\\t\".join(tmp_list) + \"\\n\")\n fp.close()\n\n def get_fs_rdd(self):\n return self.platform.fs_rdd\n\n def get_bs_rdd(self):\n return self.platform.bs_rdd\n\n def get_bp_rdd(self):\n return self.platform.bp_rdd\n\ndef getPluginClass():\n return AdSource\n","repo_name":"sunyongmofang/data_flow_script","sub_path":"plugins/ad_source_type_stat/specail_ad.py","file_name":"specail_ad.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28284799909","text":"'''\nTODO\n\n * Change lines with // to #\n\nTranslate C code to python\n This script will do some of the work, but you'll still be left with\n some hand translation for pointer stuff and to handle gotos (so it\n works best on C code with no gotos and few pointers).\n\n Here's how the script works:\n\n * Comments are converted to python comments.\n * All lines have the ending ';' removed.\n * It is an error if any line subsequently contains a ';' character,\n as this might indicate more than one statement per line.\n * '&&' is changed to ' and ' and '||' is changed to ' or '.\n * 'else if' --> 'elif'\n * 'else' --> 'else:'\n * An 'if ()' statement is translated into 'if():'. Note the\n translation will be wrong if there are multiple tests inside the\n parentheses; these will cause a python error and be easy to find.\n\n Lines are otherwise left alone, so things like #include and #define\n statements will be in the resulting text.\n'''\nif 1: # Copyright, license\n # These \"trigger strings\" can be managed with trigger.py\n #∞copyright∞# Copyright (C) 2014 Don Peterson #∞copyright∞#\n #∞contact∞# gmail.com@someonesdad1 #∞contact∞#\n #∞license∞#\n # Licensed under the Open Software License version 3.0.\n # See http://opensource.org/licenses/OSL-3.0.\n #∞license∞#\n #∞what∞#\n # Translate C code to python\n #∞what∞#\n #∞test∞# #∞test∞#\n pass\nif 1: # Imports\n import sys\n import re\n from pdb import set_trace as xx\nif 1: # Custom imports\n from wrap import dedent\n from color import C\nif 1: # Global variables\n nl = \"\\n\"\n comment = re.compile(r\"/\\*(.*?)\\*/\", re.S)\n if_stmnt = re.compile(r\"if\\s*(\\(.*?\\))\", re.S)\n elif_stmnt = re.compile(r\"(else\\s+if)\")\n else_stmnt = re.compile(r\"(else)\")\n not_token = re.compile(r\"![^=]\")\ndef Error(msg, status=1):\n print(msg, file=sys.stderr)\n exit(status)\ndef Commentify(s):\n '''Split on newlines and prepend '#' to each line. Return as a\n string.'''\n out = []\n for line in s.split(nl):\n out.append(\"# \" + line)\n return nl.join(out)\ndef ConvertComments(s):\n '''Convert C comments to python comments and return the string.\n '''\n mo = comment.search(s)\n while mo:\n assert len(mo.groups()) == 1\n t = Commentify(mo.groups()[0])\n i, j = mo.start(), mo.end()\n s = s[:i] + t + s[j:]\n mo = comment.search(s)\n return s\ndef RemoveSemicolons(s, file):\n '''Note this also removes curly braces.\n '''\n out = []\n for i, line in enumerate(s.split(nl)):\n line = line.rstrip()\n if not line or line.lstrip()[0] == \"#\":\n out.append(line)\n continue\n if line[-1] in \";{}\":\n line = line[:-1]\n if not line.strip():\n continue\n if line.count(\";\") > 1 and \"for\" not in line:\n msg = \"More than one ';' in line {0} of '{1}'\"\n Error(msg.format(i + 1, file))\n if \";\" in line and \"for\" not in line:\n line = line.replace(\";\", \"\")\n out.append(line)\n return nl.join(out)\ndef FixIfs(s, file):\n '''Find and translate if statements:\n ! --> not\n && --> and\n || --> or\n '''\n out = []\n mo = if_stmnt.search(s)\n while mo:\n assert len(mo.groups()) == 1\n i, j = mo.start(), mo.end()\n out.append(s[:i])\n t = s[i:j] # The contents of the if statement's parentheses\n while not_token.search(t):\n t = not_token.sub(\"not \", t)\n t = t.replace(\"&&\", \" and \")\n t = t.replace(\"||\", \" or \")\n out.append(t + \":\")\n s = s[j:]\n mo = if_stmnt.search(s)\n out.append(s)\n return ''.join(out)\ndef FixElif(s, file):\n '''Change 'else if' to 'elif' and put a colon after 'else'.\n '''\n out = []\n for line in s.split(nl):\n t = line.lstrip()\n if not t or t[0] == \"#\":\n out.append(line)\n continue\n mo = elif_stmnt.search(line)\n if mo:\n i, j = mo.start(), mo.end()\n line = line[:i] + \"elif\" + line[j:]\n out.append(line)\n continue\n mo = else_stmnt.search(line)\n if mo:\n i, j = mo.start(), mo.end()\n line = line[:i] + \"else:\" + line[j:]\n out.append(line)\n continue\n out.append(line)\n return nl.join(out)\ndef Translate(file, d):\n '''Translate the given file and write it to a new file with '.py'\n appended.\n '''\n with open(file, \"r\") as fp:\n s = fp.read()\n s = s.replace(\"&&\", \" and \").replace(\"||\", \" or \")\n s = ConvertComments(s)\n s = RemoveSemicolons(s, file)\n s = FixIfs(s, file)\n s = FixElif(s, file)\n with open(file + \".py\", \"w\") as fp:\n fp.write(s)\ndef BugNotice():\n print(f\"{C.lcyn}\", end=\"\")\n print(dedent(f'''\n Bugs in implementation:\n * Need to handle while and do{{}}while statements too\n * if () to if(): can be wrong if there are multiple tests inside\n the parentheses\n * Pointer stuff has to be handled manually\n * goto statements not handled\n * You will still have to hand-translate stuff\n '''))\n print(f\"{C.norm}\", end=\"\")\nif __name__ == \"__main__\":\n BugNotice()\n d = {}\n if len(sys.argv) < 2:\n print(dedent(f'''\n Usage: {sys.argv[0]} file1 [file2...]\n Translates the indicated C file(s) to python. '.py' will be appended to\n the file's name.\n '''))\n for file in sys.argv[1:]:\n Translate(file, d)\n","repo_name":"someonesdad1/plib","sub_path":"pgm/c2py.py","file_name":"c2py.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29442861050","text":"import datetime\nimport json\nimport logging\nimport os\nimport time\n\nfrom django.contrib import messages\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.http import HttpResponseBadRequest, HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic.edit import FormView\n\nimport redis\nfrom util_shared.datetime_utils import get_utc_now\n\nfrom .forms import (\n SelectTagsForm, SelectUserInfoActionForm, SelectScrapeTasksForm,\n SendOneForm, ImportProfilesForm\n)\n\nfrom .models import (\n ApiQuotaPeriod,\n Tag,\n TwitterProfile,\n Tweet,\n TwitterProfileTagRel,\n ProfileFollowsProfileRel,\n Tweet,\n ProfileMentionedInTweet,\n ProfileMentionedInProfileDescription,\n RetweetRel,\n LikeRel\n)\nfrom twitter.util.ingestion import ingest_spreadsheet\nfrom twitter.util.redis_util import send_scrape_work, send_scrape_work__conversation\n\n\nlogger = logging.getLogger(__name__)\n\n\nSCRAPER_QUEUE_NAME = 'twint_twitter_items'\nREDIS_HOSTNAME = os.environ.get('REDIS_HOSTNAME', 'localhost')\nREDIS_PORT = os.environ.get('REDIS_PORT', '6379')\n\nWORK_TYPES = [\n # note: order is important because of SelectScrapeTasksView.form_valid()\n 'user_timeline',\n 'user_likes',\n 'friend_ids',\n 'follower_ids',\n 'user_info',\n 'tweet_likes',\n 'tweet_retweets',\n 'conversation_tweets'\n]\n\nPROFILE_RELATED_MODELS = [\n TwitterProfileTagRel,\n ProfileFollowsProfileRel,\n Tweet,\n ProfileMentionedInTweet,\n ProfileMentionedInProfileDescription,\n RetweetRel,\n LikeRel,\n]\n\n\n@require_http_methods([\"POST\"])\ndef merge_profiles_view(request):\n try:\n data = json.loads(request.body)\n except:\n return HttpResponseBadRequest('invalid request body, json parse failed')\n\n to_merge = data['to_merge']\n remove_profiles = data['remove']\n\n bf = datetime.datetime.now()\n for cls in PROFILE_RELATED_MODELS:\n cls.merge_profiles(to_merge)\n af = datetime.datetime.now()\n\n time_taken = (af - bf).total_seconds()\n print(f\"merged {len(to_merge)} profiles, took: {time_taken}\")\n\n if remove_profiles:\n to_remove = [tup[1] for tup in to_merge]\n for cls in PROFILE_RELATED_MODELS:\n cls.remove_profiles(to_remove)\n TwitterProfile.objects.filter(id__in=to_remove).delete()\n\n return HttpResponse('ok')\n\n\nclass ImportProfilesView(FormView):\n template_name = 'twitter/import_profiles_spreadsheet.html'\n form_class = ImportProfilesForm\n\n def form_valid(self, form):\n form_data = form.cleaned_data\n num_profiles_before = TwitterProfile.objects.count()\n ingest_spreadsheet(file_contents=form_data['spreadsheet_file_content'])\n num_profiles_after = TwitterProfile.objects.count()\n num_new = num_profiles_after - num_profiles_before\n messages.success(self.request, f\"{num_new} profiles created\")\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('import-spreadsheet')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['total_tags_count'] = Tag.objects.count()\n return context\n\n\nclass SendOneView(FormView):\n\n template_name = 'twitter/send_one.html'\n form_class = SendOneForm\n\n def form_valid(self, form):\n work_type = form.cleaned_data['work_type']\n\n if work_type == 'conversation_tweets':\n conversation_ids = form.cleaned_data['conversation_ids']\n items_sent = send_scrape_work__conversation(\n None, conversation_ids, priority=1, flush=True\n )\n else:\n profiles = form.cleaned_data['selected_profiles']\n items_sent = send_scrape_work(\n None, profiles, work_type, priority=1, flush=True\n )\n messages.success(self.request, f\"{work_type} requested, sent: {items_sent} items\")\n return super().form_valid(form)\n\n def form_invalid(self, form):\n # todo: improve message detail\n messages.error(self.request, f\"profile not found or missing user_info\") # assuming this is the case here\n return redirect('send-one')\n\n def get_success_url(self):\n return reverse('send-one')\n\n\nclass SelectTagsView(FormView):\n\n template_name = 'twitter/select_tags.html'\n form_class = SelectTagsForm\n\n def form_valid(self, form):\n tag_slugs = form.cleaned_data['tag_slugs']\n\n profiles = Tag.get_profiles_with_tags(tag_slugs)\n if len(profiles) == 0:\n messages.error(self.request, f\"no profiles found with tags: {tag_slugs}\")\n return redirect('select-tags')\n\n self.num_userinfo_missing = len([o for o in profiles if o.user_info is None])\n self.request.session['selected_tags'] = ','.join(tag_slugs)\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super(SelectTagsView, self).get_form_kwargs()\n # kwargs['user'] = user = self.request.user\n # if 'initial' not in kwargs:\n # kwargs['initial'] = {}\n return kwargs\n\n def get_success_url(self):\n if self.num_userinfo_missing > 0:\n return reverse('select-user-info-action')\n return reverse('select-scrape-tasks')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['total_tags_count'] = Tag.objects.count()\n return context\n\n\ndef flush_group_view(request, work_type):\n\n if work_type not in WORK_TYPES:\n raise SuspiciousOperation(f'unexpected work_type: {work_type}')\n\n work_types = [work_type]\n\n redis_cli = redis.Redis(host=REDIS_HOSTNAME, port=REDIS_PORT)\n for wt in work_types:\n msg = {'work_type': wt, 'items': ['flush-group']}\n redis_cli.lpush(SCRAPER_QUEUE_NAME, json.dump(msg))\n\n messages.success(request, f'{work_type} group flushed')\n\n return redirect('select-tags')\n\n\ndef feed_view(request, screen_name_or_userid):\n if screen_name_or_userid.isdigit():\n user_id = screen_name_or_userid\n profile = get_object_or_404(TwitterProfile, user_id=user_id)\n else:\n screen_name = screen_name_or_userid.lower()\n profile = get_object_or_404(TwitterProfile, screen_name=screen_name)\n\n tweets = Tweet.objects.filter(author=profile, json_data__isnull=False).order_by('-publish_datetime')[:20]\n context = {'tweets': tweets}\n return render(request, \"twitter/twitter_feed.html\", context) # todo: change to not use base html file\n\n\nclass SelectUserInfoActionView(FormView):\n\n template_name = 'twitter/select_userinfo_action.html'\n form_class = SelectUserInfoActionForm\n\n def get_form_kwargs(self):\n kwargs = super(SelectUserInfoActionView, self).get_form_kwargs()\n\n profiles_with_ui, _ = self._get_profiles()\n kwargs['num_with_user_info'] = len(profiles_with_ui)\n\n # kwargs['user'] = user = self.request.user\n # if 'initial' not in kwargs:\n # kwargs['initial'] = {}\n return kwargs\n\n def form_valid(self, form):\n form_data = form.cleaned_data\n\n tags = self.request.session['selected_tags']\n if not tags: # should never get here\n messages.error(self.request, 'no selected_tags found in session')\n return redirect('select-tags')\n\n cancel = bool(form_data.get('cancel'))\n fetch_userinfo = bool(form_data.get('fetch_userinfo'))\n\n if cancel:\n return redirect('select-tags')\n\n if fetch_userinfo:\n profiles = Tag.get_profiles_with_tags(tags.split(','), available_only=False)\n num_items = send_scrape_work(\n None, profiles, 'user_info', priority=1, flush=True\n )\n time.sleep(1) # delay the user a little so items get processed\n messages.info(self.request, f\"requested user-info for {len(profiles)} profiles\")\n\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('select-scrape-tasks')\n\n def _get_profiles(self):\n tag_slugs_list = self.request.session['selected_tags'].split(',')\n profiles = Tag.get_profiles_with_tags(tag_slugs_list)\n profiles_no_ui = [p for p in profiles if p.user_info is None]\n profiles_with_ui = [p for p in profiles if p.user_info is not None]\n return profiles_with_ui, profiles_no_ui\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n profiles_with_ui, profiles_no_ui = self._get_profiles()\n context['num_without_user_info'] = len(profiles_no_ui)\n context['num_with_user_info'] = len(profiles_with_ui)\n context['tags_joined'] = self.request.session['selected_tags']\n\n return context\n\n\nclass SelectScrapeTasksView(FormView):\n\n template_name = 'twitter/select_scrape_tasks.html'\n form_class = SelectScrapeTasksForm\n\n def __init__(self, *args, **kwargs):\n super(SelectScrapeTasksView, self).__init__(*args, **kwargs)\n\n def form_invalid(self, form):\n return super(SelectScrapeTasksView, self).form_invalid(form)\n\n def form_valid(self, form):\n '''\n {\n 'scrape_user_timeline': True, 'scrape_user_likes': False,\n 'scrape_friend_ids': False,\n 'scrape_follower_ids': False,\n 'user_timeline_priority': '2', 'user_likes_priority': '2',\n 'friend_ids_priority': '2',\n 'follower_ids_priority': '2', 'user_likes_limit': 19, 'user_timeline_limit': 19,\n 'friend_ids_limit': 19, 'follower_ids_limit': 19,\n }\n '''\n form_data = form.cleaned_data\n\n tag_slugs_list = self.request.session['selected_tags'].split(',')\n profiles = Tag.get_profiles_with_tags(tag_slugs_list)\n\n profiles = [p for p in profiles if p.user_info is not None]\n profiles_no_ui = [p for p in profiles if p.user_info is None]\n if profiles_no_ui:\n print(f\"warning: skipping {len(profiles_no_ui)} profiles without user-info\")\n\n if not profiles:\n messages.error(self.request, 'no profiles with user-info found')\n return super(SelectScrapeTasksView, self).form_valid(form)\n\n workload_keys = ['user_timeline', 'user_likes', 'friend_ids', 'follower_ids']\n\n priority = int(form_data['priority'])\n limit = int(form_data['limit'])\n\n for wt in workload_keys:\n do_scrape = form_data[f'scrape_{wt}']\n if not do_scrape:\n continue\n\n _profiles = profiles\n _profiles = profiles[:limit]\n if not _profiles:\n continue # limit was set to < 1\n\n flush = form_data.get('flush_queues', False)\n send_scrape_work(\n None, _profiles, wt, priority=priority, flush=flush\n )\n\n self.request.session['selected_tags'] = None\n messages.success(self.request, f'profiles sent for scrape, {len(profiles_no_ui)} skipped')\n return super(SelectScrapeTasksView, self).form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super(SelectScrapeTasksView, self).get_form_kwargs()\n\n tag_slugs_list = self.request.session['selected_tags'].split(',')\n profiles = Tag.get_profiles_with_tags(tag_slugs_list)\n kwargs['num_profiles'] = len(profiles)\n\n return kwargs\n\n def get_success_url(self):\n return reverse('select-tags')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n tag_slugs_list = self.request.session['selected_tags'].split(',')\n profiles = Tag.get_profiles_with_tags(tag_slugs_list)\n context['num_profiles'] = len(profiles)\n\n return context\n\n def get(self, *args, **kwargs):\n\n tags_li = self.request.session['selected_tags'].split(',')\n profiles = Tag.get_profiles_with_tags(tags_li)\n\n if len(profiles) == 0:\n messages.error(self.request, f'no profiles found for tags: {tags_li}')\n return redirect('select-tags')\n\n return super(SelectScrapeTasksView, self).get(*args, *kwargs)\n\n\ndef get_current_quota_periods__view(request, service_slug, endpoint_slug, account_slug):\n\n now = get_utc_now()\n quota_periods = ApiQuotaPeriod.objects.filter(\n service_slug=service_slug, endpoint_slug=endpoint_slug,\n account_slug=account_slug,\n start_datetime__gte=now, end_datetime__lte=now\n )\n object_dicts = [obj.get_dict() for obj in quota_periods]\n return HttpResponse(\n msgpack.dumps(object_dicts), content_type='application/msgpack'\n )\n","repo_name":"rossrochford/twitter_ingestor","sub_path":"twitter_webserver/twitter_webserver/twitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"37692878605","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\norg_data = np.loadtxt(\"histogram_vB.txt\")*10\nprint(org_data[0], len(org_data), len(org_data[0]))\ndata_list = []\nfor i in range(len(org_data)):\n hist, edges = np.histogram(org_data[i], bins=50, density=True)\n #print(hist)\n w = edges[1] - edges[0]\n hist = hist * w\n data_list.append(hist)\n print( f\"hist :{hist}, SUM {sum(hist)} \")\nnp.savetxt(\"dataB.txt\", data_list)","repo_name":"is0383kk/M2-code","sub_path":"MGMM/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12163702600","text":"import os\n\n# For reading, visualizing, and preprocessing data\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_toolbelt.utils import fs\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.preprocessing import StandardScaler\nfrom xgboost import XGBClassifier\n\nfrom alaska2 import get_holdout, INPUT_IMAGE_KEY, get_test_dataset\nfrom alaska2.metric import alaska_weighted_auc\nfrom alaska2.submissions import get_x_y_for_stacking\nfrom submissions.eval_tta import get_predictions_csv\nfrom submissions.make_submissions_averaging import compute_checksum_v2\n\n\ndef main():\n output_dir = os.path.dirname(__file__)\n\n experiments = [\n \"G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16\",\n \"G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16\",\n \"G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16\",\n \"G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16\",\n # \"H_Jul12_18_42_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16\",\n #\n \"K_Jul17_17_09_nr_rgb_tf_efficientnet_b6_ns_mish_fold0_local_rank_0_fp16\",\n \"J_Jul19_20_10_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16\",\n \"H_Jul11_16_37_nr_rgb_tf_efficientnet_b7_ns_mish_fold2_local_rank_0_fp16\",\n \"K_Jul18_16_41_nr_rgb_tf_efficientnet_b6_ns_mish_fold3_local_rank_0_fp16\"\n #\n #\n ]\n\n holdout_predictions = get_predictions_csv(experiments, \"cauc\", \"holdout\", \"d4\")\n test_predictions = get_predictions_csv(experiments, \"cauc\", \"test\", \"d4\")\n checksum = compute_checksum_v2(experiments)\n\n holdout_ds = get_holdout(\"\", features=[INPUT_IMAGE_KEY])\n image_ids = [fs.id_from_fname(x) for x in holdout_ds.images]\n\n quality_h = F.one_hot(torch.tensor(holdout_ds.quality).long(), 3).numpy().astype(np.float32)\n\n test_ds = get_test_dataset(\"\", features=[INPUT_IMAGE_KEY])\n quality_t = F.one_hot(torch.tensor(test_ds.quality).long(), 3).numpy().astype(np.float32)\n\n with_logits = True\n\n x, y = get_x_y_for_stacking(holdout_predictions, with_logits=with_logits, tta_logits=with_logits)\n # Force target to be binary\n y = (y > 0).astype(int)\n print(x.shape, y.shape)\n\n x_test, _ = get_x_y_for_stacking(test_predictions, with_logits=with_logits, tta_logits=with_logits)\n print(x_test.shape)\n\n if True:\n sc = StandardScaler()\n x = sc.fit_transform(x)\n x_test = sc.transform(x_test)\n\n if False:\n sc = PCA(n_components=16)\n x = sc.fit_transform(x)\n x_test = sc.transform(x_test)\n\n if True:\n x = np.column_stack([x, quality_h])\n x_test = np.column_stack([x_test, quality_t])\n\n group_kfold = GroupKFold(n_splits=5)\n cv_scores = []\n test_pred = None\n one_over_n = 1.0 / group_kfold.n_splits\n\n for train_index, valid_index in group_kfold.split(x, y, groups=image_ids):\n x_train, x_valid, y_train, y_valid = (x[train_index], x[valid_index], y[train_index], y[valid_index])\n print(np.bincount(y_train), np.bincount(y_valid))\n\n cls = XGBClassifier(\n base_score=0.5,\n booster=\"gbtree\",\n colsample_bylevel=1,\n colsample_bynode=1,\n colsample_bytree=0.6,\n gamma=0.5,\n gpu_id=-1,\n importance_type=\"gain\",\n interaction_constraints=\"\",\n learning_rate=0.01,\n max_delta_step=0,\n max_depth=3,\n min_child_weight=10,\n # missing=nan,\n monotone_constraints=\"()\",\n n_estimators=1000,\n n_jobs=8,\n nthread=1,\n num_parallel_tree=1,\n objective=\"binary:logistic\",\n random_state=0,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n silent=True,\n subsample=0.8,\n tree_method=\"exact\",\n validate_parameters=1,\n verbosity=2,\n )\n\n cls.fit(x_train, y_train)\n\n y_valid_pred = cls.predict_proba(x_valid)[:, 1]\n score = alaska_weighted_auc(y_valid, y_valid_pred)\n cv_scores.append(score)\n\n if test_pred is not None:\n test_pred += cls.predict_proba(x_test)[:, 1] * one_over_n\n else:\n test_pred = cls.predict_proba(x_test)[:, 1] * one_over_n\n\n for s in cv_scores:\n print(s)\n print(np.mean(cv_scores), np.std(cv_scores))\n\n with_logits_sfx = \"_with_logits\" if with_logits else \"\"\n\n submit_fname = os.path.join(output_dir, f\"xgb_cls_{np.mean(cv_scores):.4f}_{checksum}{with_logits_sfx}.csv\")\n df = pd.read_csv(test_predictions[0]).rename(columns={\"image_id\": \"Id\"})\n df[\"Label\"] = test_pred\n df[[\"Id\", \"Label\"]].to_csv(submit_fname, index=False)\n print(\"Saved submission to \", submit_fname)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BloodAxe/Kaggle-2020-Alaska2","sub_path":"submissions/make_submissions_xgb_cls.py","file_name":"make_submissions_xgb_cls.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"57"} +{"seq_id":"41789748668","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n@author: maya\n@contact: 2372836278@qq.com\n@software: Pycharm\n@file: chart.py\n@time: 2018/12/18 19:33\n@desc:\n'''\nimport csv\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom pylab import *\n\n# 读取商品的评论信息\ndf = pd.read_csv('comments.csv', encoding='utf-8')\n# 设置字体,避免中文出现乱码\n# rcParams['font.family'] = \"Source Han Serif CN\"\nmpl.rcParams['font.sans-serif'] = ['SimHei']\n\n# df['顾客会员等级'].replace('PLUS会员[试用]', 'PLUS会员', inplace=True)\nlabels = list(set(df.顾客会员等级))\nsizes = [list(df.顾客会员等级).count(level) for level in list(set(df.顾客会员等级))]\nuserLevelDataFrame = pd.DataFrame(numpy.array([labels, sizes]).T, columns=['会员级别', '人数'])\n\n#调节图形大小,宽,高\nplt.figure(figsize=(12,9))\n#定义饼状图的外侧显示的文本标签,标签是列表\nlabels = sorted(list(set(df.顾客会员等级)))\n# 定义饼图的颜色\ncolors = ['red', 'blue', 'yellow', 'cyan', 'purple', 'orange']\n#sizes:设置每个标签在饼图中占多大,本例子是绘制会员分配的饼图\nsizes = [list(df.顾客会员等级).count(level) for level in labels]\n#将某部分爆炸出来, 使用括号,将第一块分割出来,数值的大小是分割出来的与其他两块的间隙\nexplode = (numpy.array([0.02 for i in range(len(labels))]))\n#labeldistance,饼图外侧文本的位置离中心点有多远,1.1指1.1倍半径的位置,1表示在饼图的边上,<1表示文字在饼图内\n#autopct,圆里面的文本格式,%.2f%%表示小数有两位的浮点数\n#shadow,饼是否有阴影\n#startangle,起始角度,0,表示从0开始逆时针转,为第一块。一般选择从90度开始比较好看\n#pctdistance,百分比的text离圆心的距离\nplt.pie(sizes,explode=explode,labels=labels,colors=colors,\n labeldistance = 1.1,autopct = '%.2f%%',shadow = False,\n startangle = 90,pctdistance = 0.6)\n\n# 设置x,y轴刻度一致,这样饼图才能是圆的\nplt.axis('equal')\n# 绘制图例,loc用于设置图例的位置,upper right表示图例位于右上方\nplt.legend(loc='upper left')\nplt.title('购买商品的会员分配图')\nplt.show()\n\n# 缺失值处理\ndf = df.fillna('不详')\n\n# 根据购物平台的名称,已经购买次数构造一个DataFrame\nuserClientCol = ['购物平台', '次数']\n# 注意:需数组转置\nuserClientDataFrame = pd.DataFrame(numpy.array([list(set(df.购物使用的平台)), [list(df.购物使用的平台).count(level) for level in list(set(df.购物使用的平台))]]).T, columns=userClientCol)\n\nplt.figure(figsize=(12,9),dpi=120)\nlabels = list(userClientDataFrame['购物平台'])\nplt.bar(range(len(labels)),userClientDataFrame['次数'],tick_label=labels)\nplt.title('购物使用的平台')\nplt.show()\n\n\nxs = []\nys = {}\ny = []\nz = []\n\n#读取数据构造列表\n\nwith open('comments.csv', encoding='utf-8') as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n result = []\n for row in f_csv:\n if row:\n # print(row[4])\n xs.append(row[3][:9])\n\nfor x in xs:\n if x in ys:\n ys[x] += 1\n else:\n ys[x] = 1\n# z保存所有评论的时间——删除重复项直接用set构造\nz = list(set(xs))\n# y保存评论次数——直接count获取重复次数即可\nfor i in ys.values():\n y.append(i)\n\nplt.figure()\nplt.plot(z, y)\n# 标题——各时间段对应评论次数\nplt.title(\"index for time\")\nplt.legend()\n# x轴——评论时间\nplt.xlabel('x-comment_time')\n# y轴——次数\nplt.ylabel('y-index')\nplt.show()\n","repo_name":"MAYA-MUYI/spider","sub_path":"jd/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"zh","doc_type":"code","stars":23,"dataset":"github-code","pt":"57"} +{"seq_id":"33473836944","text":"\"\"\"\nImplementation of ``nn.Modules`` for N-Beats model.\n\"\"\"\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef linear(input_size, output_size, bias=True, dropout: int = None):\n lin = nn.Linear(input_size, output_size, bias=bias)\n if dropout is not None:\n return nn.Sequential(nn.Dropout(dropout), lin)\n else:\n return lin\n\n\ndef linspace(backcast_length: int, forecast_length: int, centered: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n if centered:\n norm = max(backcast_length, forecast_length)\n start = -backcast_length\n stop = forecast_length - 1\n else:\n norm = backcast_length + forecast_length\n start = 0\n stop = backcast_length + forecast_length - 1\n lin_space = np.linspace(start / norm, stop / norm, backcast_length + forecast_length, dtype=np.float32)\n b_ls = lin_space[:backcast_length]\n f_ls = lin_space[backcast_length:]\n return b_ls, f_ls\n\n\nclass NBEATSBlock(nn.Module):\n def __init__(\n self,\n units,\n thetas_dim,\n num_block_layers=4,\n backcast_length=10,\n forecast_length=5,\n share_thetas=False,\n dropout=0.1,\n ):\n super().__init__()\n self.units = units\n self.thetas_dim = thetas_dim\n self.backcast_length = backcast_length\n self.forecast_length = forecast_length\n self.share_thetas = share_thetas\n\n fc_stack = [\n nn.Linear(backcast_length, units),\n nn.ReLU(),\n ]\n for _ in range(num_block_layers - 1):\n fc_stack.extend([linear(units, units, dropout=dropout), nn.ReLU()])\n self.fc = nn.Sequential(*fc_stack)\n\n if share_thetas:\n self.theta_f_fc = self.theta_b_fc = nn.Linear(units, thetas_dim, bias=False)\n else:\n self.theta_b_fc = nn.Linear(units, thetas_dim, bias=False)\n self.theta_f_fc = nn.Linear(units, thetas_dim, bias=False)\n\n def forward(self, x):\n return self.fc(x)\n\n\nclass NBEATSSeasonalBlock(NBEATSBlock):\n def __init__(\n self,\n units,\n thetas_dim=None,\n num_block_layers=4,\n backcast_length=10,\n forecast_length=5,\n nb_harmonics=None,\n min_period=1,\n dropout=0.1,\n ):\n if nb_harmonics:\n thetas_dim = nb_harmonics\n else:\n thetas_dim = forecast_length\n self.min_period = min_period\n\n super().__init__(\n units=units,\n thetas_dim=thetas_dim,\n num_block_layers=num_block_layers,\n backcast_length=backcast_length,\n forecast_length=forecast_length,\n share_thetas=True,\n dropout=dropout,\n )\n\n backcast_linspace, forecast_linspace = linspace(backcast_length, forecast_length, centered=False)\n\n p1, p2 = (thetas_dim // 2, thetas_dim // 2) if thetas_dim % 2 == 0 else (thetas_dim // 2, thetas_dim // 2 + 1)\n s1_b = torch.tensor(\n [np.cos(2 * np.pi * i * backcast_linspace) for i in self.get_frequencies(p1)], dtype=torch.float32\n ) # H/2-1\n s2_b = torch.tensor(\n [np.sin(2 * np.pi * i * backcast_linspace) for i in self.get_frequencies(p2)], dtype=torch.float32\n )\n self.register_buffer(\"S_backcast\", torch.cat([s1_b, s2_b]))\n\n s1_f = torch.tensor(\n [np.cos(2 * np.pi * i * forecast_linspace) for i in self.get_frequencies(p1)], dtype=torch.float32\n ) # H/2-1\n s2_f = torch.tensor(\n [np.sin(2 * np.pi * i * forecast_linspace) for i in self.get_frequencies(p2)], dtype=torch.float32\n )\n self.register_buffer(\"S_forecast\", torch.cat([s1_f, s2_f]))\n\n def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]:\n x = super().forward(x)\n amplitudes_backward = self.theta_b_fc(x)\n backcast = amplitudes_backward.mm(self.S_backcast)\n amplitudes_forward = self.theta_f_fc(x)\n forecast = amplitudes_forward.mm(self.S_forecast)\n\n return backcast, forecast\n\n def get_frequencies(self, n):\n return np.linspace(0, (self.backcast_length + self.forecast_length) / self.min_period, n)\n\n\nclass NBEATSTrendBlock(NBEATSBlock):\n def __init__(\n self,\n units,\n thetas_dim,\n num_block_layers=4,\n backcast_length=10,\n forecast_length=5,\n dropout=0.1,\n ):\n super().__init__(\n units=units,\n thetas_dim=thetas_dim,\n num_block_layers=num_block_layers,\n backcast_length=backcast_length,\n forecast_length=forecast_length,\n share_thetas=True,\n dropout=dropout,\n )\n\n backcast_linspace, forecast_linspace = linspace(backcast_length, forecast_length, centered=True)\n norm = np.sqrt(forecast_length / thetas_dim) # ensure range of predictions is comparable to input\n\n coefficients = torch.tensor([backcast_linspace**i for i in range(thetas_dim)], dtype=torch.float32)\n self.register_buffer(\"T_backcast\", coefficients * norm)\n\n coefficients = torch.tensor([forecast_linspace**i for i in range(thetas_dim)], dtype=torch.float32)\n self.register_buffer(\"T_forecast\", coefficients * norm)\n\n def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]:\n x = super().forward(x)\n backcast = self.theta_b_fc(x).mm(self.T_backcast)\n forecast = self.theta_f_fc(x).mm(self.T_forecast)\n return backcast, forecast\n\n\nclass NBEATSGenericBlock(NBEATSBlock):\n def __init__(\n self,\n units,\n thetas_dim,\n num_block_layers=4,\n backcast_length=10,\n forecast_length=5,\n dropout=0.1,\n ):\n super().__init__(\n units=units,\n thetas_dim=thetas_dim,\n num_block_layers=num_block_layers,\n backcast_length=backcast_length,\n forecast_length=forecast_length,\n dropout=dropout,\n )\n\n self.backcast_fc = nn.Linear(thetas_dim, backcast_length)\n self.forecast_fc = nn.Linear(thetas_dim, forecast_length)\n\n def forward(self, x):\n x = super().forward(x)\n\n theta_b = F.relu(self.theta_b_fc(x))\n theta_f = F.relu(self.theta_f_fc(x))\n\n return self.backcast_fc(theta_b), self.forecast_fc(theta_f)\n","repo_name":"jdb78/pytorch-forecasting","sub_path":"pytorch_forecasting/models/nbeats/sub_modules.py","file_name":"sub_modules.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","stars":3323,"dataset":"github-code","pt":"57"} +{"seq_id":"74462650739","text":"\"\"\"\n\nThis file contains the BaseUser object, which represents a Roblox user ID.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Optional, List, TYPE_CHECKING\n\nfrom .baseitem import BaseItem\nfrom ..bases.basebadge import BaseBadge\nfrom ..instances import ItemInstance, InstanceType, AssetInstance, GamePassInstance, instance_classes\nfrom ..partials.partialbadge import PartialBadge\nfrom ..presence import Presence\nfrom ..promotionchannels import UserPromotionChannels\nfrom ..robloxbadges import RobloxBadge\nfrom ..utilities.iterators import PageIterator, SortOrder\nfrom ..utilities.shared import ClientSharedObject\n\nif TYPE_CHECKING:\n from ..friends import Friend\n from ..roles import Role\n from ..utilities.types import AssetOrAssetId, GamePassOrGamePassId\n\n\nclass BaseUser(BaseItem):\n \"\"\"\n Represents a Roblox user ID.\n\n Attributes:\n _shared: The ClientSharedObject.\n id: The user ID.\n \"\"\"\n\n def __init__(self, shared: ClientSharedObject, user_id: int):\n \"\"\"\n Arguments:\n shared: The ClientSharedObject.\n user_id: The user ID.\n \"\"\"\n\n self._shared: ClientSharedObject = shared\n self.id: int = user_id\n\n async def get_status(self) -> str:\n \"\"\"\n Grabs the user's status.\n\n Returns:\n The user's status.\n \"\"\"\n status_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\n \"users\", f\"/v1/users/{self.id}/status\"\n )\n )\n status_data = status_response.json()\n return status_data[\"status\"]\n\n def username_history(\n self, page_size: int = 10, sort_order: SortOrder = SortOrder.Ascending, max_items: int = None\n ) -> PageIterator:\n \"\"\"\n Grabs the user's username history.\n\n Arguments:\n page_size: How many members should be returned for each page.\n sort_order: Order in which data should be grabbed.\n max_items: The maximum items to return when looping through this object.\n\n Returns:\n A PageIterator containing the user's username history.\n \"\"\"\n return PageIterator(\n shared=self._shared,\n url=self._shared.url_generator.get_url(\n \"users\", f\"v1/users/{self.id}/username-history\"\n ),\n page_size=page_size,\n sort_order=sort_order,\n max_items=max_items,\n handler=lambda shared, data: data[\"name\"],\n )\n\n async def get_presence(self) -> Optional[Presence]:\n \"\"\"\n Grabs the user's presence.\n\n Returns:\n The user's presence, if they have an active presence.\n \"\"\"\n presences = await self._shared.presence_provider.get_user_presences([self.id])\n try:\n return presences[0]\n except IndexError:\n return None\n\n async def get_friends(self) -> List[Friend]:\n \"\"\"\n Grabs the user's friends.\n\n Returns:\n A list of the user's friends.\n \"\"\"\n\n from ..friends import Friend\n friends_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"friends\", f\"v1/users/{self.id}/friends\")\n )\n friends_data = friends_response.json()[\"data\"]\n return [Friend(shared=self._shared, data=friend_data) for friend_data in friends_data]\n\n async def get_currency(self) -> int:\n \"\"\"\n Grabs the user's current Robux amount. Only works on the authenticated user.\n\n Returns:\n The user's Robux amount.\n \"\"\"\n currency_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"economy\", f\"v1/users/{self.id}/currency\")\n )\n currency_data = currency_response.json()\n return currency_data[\"robux\"]\n\n async def has_premium(self) -> bool:\n \"\"\"\n Checks if the user has a Roblox Premium membership.\n\n Returns:\n Whether the user has Premium or not.\n \"\"\"\n premium_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"premiumfeatures\", f\"v1/users/{self.id}/validate-membership\")\n )\n premium_data = premium_response.text\n return premium_data == \"true\"\n\n async def get_item_instance(self, item_type: InstanceType, item_id: int) -> Optional[ItemInstance]:\n \"\"\"\n Gets an item instance for a specific user.\n\n Arguments:\n item_type: The type of item to get an instance for.\n item_id: The item's ID.\n\n Returns: An ItemInstance, if it exists.\n \"\"\"\n\n item_type: str = item_type.value.lower()\n\n # this is so we can have special classes for other types\n item_class = instance_classes.get(item_type) or ItemInstance\n\n instance_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"inventory\", f\"v1/users/{self.id}/items/{item_type}/{item_id}\")\n )\n instance_data = instance_response.json()[\"data\"]\n if len(instance_data) > 0:\n return item_class(\n shared=self._shared,\n data=instance_data[0]\n )\n else:\n return None\n\n async def get_asset_instance(self, asset: AssetOrAssetId) -> Optional[AssetInstance]:\n \"\"\"\n Checks if a user owns the asset, and returns details about the asset if they do.\n\n Returns:\n An asset instance, if the user owns this asset.\n \"\"\"\n return await self.get_item_instance(\n item_type=InstanceType.asset,\n item_id=int(asset)\n )\n\n async def get_gamepass_instance(self, gamepass: GamePassOrGamePassId) -> Optional[GamePassInstance]:\n \"\"\"\n Checks if a user owns the gamepass, and returns details about the asset if they do.\n\n Returns:\n An gamepass instance, if the user owns this gamepass.\n \"\"\"\n return await self.get_item_instance(\n item_type=InstanceType.gamepass,\n item_id=int(gamepass)\n )\n\n async def get_badge_awarded_dates(self, badges: list[BaseBadge]) -> List[PartialBadge]:\n \"\"\"\n Gets the dates that each badge in a list of badges were awarded to this user.\n\n Returns:\n A list of partial badges containing badge awarded dates.\n \"\"\"\n awarded_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"badges\", f\"v1/users/{self.id}/badges/awarded-dates\"),\n params={\n \"badgeIds\": [badge.id for badge in badges]\n }\n )\n awarded_data: list = awarded_response.json()[\"data\"]\n return [\n PartialBadge(\n shared=self._shared,\n data=partial_data\n ) for partial_data in awarded_data\n ]\n\n async def get_group_roles(self) -> List[Role]:\n \"\"\"\n Gets a list of roles for all groups this user is in.\n\n Returns:\n A list of roles.\n \"\"\"\n from ..roles import Role\n from ..groups import Group\n roles_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"groups\", f\"v1/users/{self.id}/groups/roles\")\n )\n roles_data = roles_response.json()[\"data\"]\n return [\n Role(\n shared=self._shared,\n data=role_data[\"role\"],\n group=Group(\n shared=self._shared,\n data=role_data[\"group\"]\n )\n ) for role_data in roles_data\n ]\n\n async def get_primary_group_role(self) -> Optional[Role]:\n \"\"\"\n Gets a role for the primary group this user is in.\n\n Returns:\n Role\n \"\"\"\n from ..roles import Role\n from ..groups import Group\n roles_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"groups\", f\"v1/users/{self.id}/groups/primary/role\")\n )\n json = roles_response.json()\n if json is None:\n return None\n return Role(\n shared=self._shared,\n data=json[\"role\"],\n group=Group(\n shared=self._shared,\n data=json[\"group\"]\n )\n )\n\n async def get_roblox_badges(self) -> List[RobloxBadge]:\n \"\"\"\n Gets the user's Roblox badges.\n\n Returns:\n A list of Roblox badges.\n \"\"\"\n\n badges_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"accountinformation\", f\"v1/users/{self.id}/roblox-badges\")\n )\n badges_data = badges_response.json()\n return [RobloxBadge(shared=self._shared, data=badge_data) for badge_data in badges_data]\n\n async def get_promotion_channels(self) -> UserPromotionChannels:\n \"\"\"\n Gets the user's promotion channels.\n\n Returns:\n The user's promotion channels.\n \"\"\"\n channels_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"accountinformation\", f\"v1/users/{self.id}/promotion-channels\")\n )\n channels_data = channels_response.json()\n return UserPromotionChannels(\n data=channels_data\n )\n\n async def _get_friend_channel_count(self, channel: str) -> int:\n count_response = await self._shared.requests.get(\n url=self._shared.url_generator.get_url(\"friends\", f\"v1/users/{self.id}/{channel}/count\")\n )\n return count_response.json()[\"count\"]\n\n def _get_friend_channel_iterator(\n self,\n channel: str,\n page_size: int = 10,\n sort_order: SortOrder = SortOrder.Ascending, max_items: int = None\n ) -> PageIterator:\n from ..friends import Friend\n return PageIterator(\n shared=self._shared,\n url=self._shared.url_generator.get_url(\"friends\", f\"v1/users/{self.id}/{channel}\"),\n page_size=page_size,\n sort_order=sort_order,\n max_items=max_items,\n handler=lambda shared, data: Friend(shared=shared, data=data)\n )\n\n async def get_friend_count(self) -> int:\n \"\"\"\n Gets the user's friend count.\n\n Returns:\n The user's friend count.\n \"\"\"\n return await self._get_friend_channel_count(\"friends\")\n\n async def get_follower_count(self) -> int:\n \"\"\"\n Gets the user's follower count.\n\n Returns:\n The user's follower count.\n \"\"\"\n return await self._get_friend_channel_count(\"followers\")\n\n async def get_following_count(self) -> int:\n \"\"\"\n Gets the user's following count.\n\n Returns:\n The user's following count.\n \"\"\"\n return await self._get_friend_channel_count(\"followings\")\n\n def get_followers(\n self,\n page_size: int = 10,\n sort_order: SortOrder = SortOrder.Ascending, max_items: int = None\n ) -> PageIterator:\n \"\"\"\n Gets the user's followers.\n\n Returns:\n A PageIterator containing everyone who follows this user.\n \"\"\"\n return self._get_friend_channel_iterator(\n channel=\"followers\",\n page_size=page_size,\n sort_order=sort_order,\n max_items=max_items,\n )\n\n def get_followings(\n self,\n page_size: int = 10,\n sort_order: SortOrder = SortOrder.Ascending, max_items: int = None\n ) -> PageIterator:\n \"\"\"\n Gets the user's followings.\n\n Returns:\n A PageIterator containing everyone that this user is following.\n \"\"\"\n return self._get_friend_channel_iterator(\n channel=\"followings\",\n page_size=page_size,\n sort_order=sort_order,\n max_items=max_items,\n )\n","repo_name":"CantCode023/FrostBot","sub_path":"venv/Lib/site-packages/roblox/bases/baseuser.py","file_name":"baseuser.py","file_ext":"py","file_size_in_byte":12112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"3346686848","text":"'''\nCreated on Mar 5, 2016\n\nThis code is one extension of theano sample code\n\n@author: Wuga\n'''\nimport numpy\nimport theano\nimport theano.tensor as T\nimport layers\nimport files as F\nfrom net import CDNN\nimport timeit\nimport cPickle\nfrom PIL import Image \n\ndef ModelTester(learning_rate=0.01, n_epochs=100, batch_size=40):\n \"\"\"\n The function test the proposed model on BSDS500 dataset in pickle format\n (Data transform code is available in files folder of this package)\n \n This function will also train and save best model!\n\n :type learning_rate: float type\n :param learning_rate: gradient descent learning rate\n \n :type n_epochs: integer type\n :param n_epochs: number of epochs to run\n \n :type batch_size: integer type\n :param batch_size: size of data in each loop of gradient decent\n \"\"\"\n \n datasets = F.loadPickleData('data.save')\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n \n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n \n train_set_x=train_set_x.dimshuffle((0, 3, 1, 2))\n train_set_y=train_set_y.dimshuffle((0, 3, 1, 2))\n valid_set_x=valid_set_x.dimshuffle((0, 3, 1, 2))\n valid_set_y=valid_set_y.dimshuffle((0, 3, 1, 2))\n test_set_x=test_set_x.dimshuffle((0, 3, 1, 2))\n test_set_y=test_set_y.dimshuffle((0, 3, 1, 2))\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n index = T.lscalar() \n x = T.ftensor4('x') \n y = T.ftensor4('y')\n \n\n rng = numpy.random.RandomState(1234)\n\n classifier = CDNN(\n rng=rng,\n batch_size=batch_size,\n input=x\n )\n \n cost = classifier.BinaryCrossEntroy(y)\n \n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n \n test_cost_model = theano.function(\n inputs=[index],\n outputs=classifier.BinaryCrossEntroy(y),\n givens={\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n \n validate_cost_model = theano.function(\n inputs=[index],\n outputs=classifier.BinaryCrossEntroy(y),\n givens={\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n \n train_cost_model = theano.function(\n inputs=[index],\n outputs=classifier.BinaryCrossEntroy(y),\n givens={\n x: train_set_x[index * batch_size:(index + 1) * batch_size],\n y: train_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n \n train_error_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: train_set_x[index * batch_size:(index + 1) * batch_size],\n y: train_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n \n \n gparams = [T.grad(cost, param) for param in classifier.params]\n\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(classifier.params, gparams)\n ]\n\n # compiling a Theano function `train_model` that returns the cost, but\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n \n ###############\n # TRAIN MODEL #\n ###############\n print('... training')\n\n patience = 10000 \n validation_frequency = min(n_train_batches, patience // 2)\n start_time = timeit.default_timer()\n\n epoch = 0\n done_looping = False\n \n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(n_train_batches):\n\n minibatch_avg_cost = train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n train_losses = [train_error_model(i) for i\n in range(n_train_batches)]\n this_train_losses = numpy.mean(train_losses)\n \n train_cost = [train_cost_model(i) for i\n in range(n_train_batches)]\n this_train_cost = numpy.mean(train_cost)\n\n print(\n 'epoch %i, minibatch %i/%i, train error %f %%, train cost %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_train_losses *100,\n this_train_cost * 100.\n )\n )\n\n if patience <= iter:\n done_looping = True\n break\n end_time = timeit.default_timer()\n \n # compile a predictor function\n predict_model = theano.function(\n inputs=[classifier.input],\n outputs=classifier.y_pred)\n test_set_x = test_set_x.eval()\n predicted_values = predict_model(test_set_x[:5])\n print(\"Predicted values for the first 10 examples in test set:\")\n print (predicted_values[0].reshape((480,320))*255).astype(numpy.uint8)\n for idx,I in enumerate(predicted_values):\n I8 = (I.reshape((480,320)) * 255).astype(numpy.uint8)\n rgbArray = numpy.zeros((480,320,3), 'uint8')\n rgbArray[..., 0] = I8\n rgbArray[..., 1] = I8\n rgbArray[..., 2] = I8\n img = Image.fromarray(rgbArray)\n img.save(str(idx)+'myimg.jpeg')\n predicted_values = predict_model(test_set_x[5:10])\n print(\"Predicted values for the first 10 examples in test set:\")\n print (predicted_values[0].reshape((480,320))*255).astype(numpy.uint8)\n for idx,I in enumerate(predicted_values):\n I8 = (I.reshape((480,320)) * 255).astype(numpy.uint8)\n rgbArray = numpy.zeros((480,320,3), 'uint8')\n rgbArray[..., 0] = I8\n rgbArray[..., 1] = I8\n rgbArray[..., 2] = I8\n img = Image.fromarray(rgbArray)\n img.save(str(idx)+'myimg.jpeg')\n\n \nModelTester(learning_rate=0.1, n_epochs=200, batch_size=5)\n","repo_name":"wuga214/Boundary-Detection-via-Convolution-Deconvolution-Neural-Network-with-BMA","sub_path":"Conv-Deconv-Image-Process/TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":7241,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"57"} +{"seq_id":"20104060810","text":"import re\r\n\r\nfrom rapidfuzz.process import extractOne\r\nfrom sqlalchemy.ext.asyncio import AsyncSession\r\n\r\nfrom src.database.models import City\r\nfrom src.database.repositories.abstract import Repository\r\n\r\n\r\nclass CityRepository(Repository[City]):\r\n def __init__(self, session: AsyncSession):\r\n super().__init__(type_model=City, session=session)\r\n\r\n async def new(self, name: str, abb: str, simplified_name: str) -> City:\r\n new_user = await self.session.merge(\r\n City(abb=abb, name=name, simplified_name=simplified_name)\r\n )\r\n return new_user\r\n\r\n async def fuzzy_get_by_name(self, name: str) -> City | None:\r\n simplified_cities = [city.simplified_name for city in await self.get_many()]\r\n simplified_name = re.sub(\r\n \"[^a-zA-Zа-яА-Я]\", \"\", name.lower().strip().replace(\"ё\", \"е\")\r\n )\r\n close = extractOne(simplified_name, simplified_cities)\r\n min_close_factor = 80\r\n if close[1] < min_close_factor:\r\n return None\r\n return await self.get_by_where(City.simplified_name == str(close[0]))\r\n","repo_name":"Ap73MKa/telegramConcertParser","sub_path":"src/database/repositories/city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"17307942929","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom typing import List\r\nimport os\r\n\r\nSRC_DIR = SRC_DIR = os.path.dirname(__file__)\r\nCLEAN_DIR = os.path.join(SRC_DIR, './data/04-10-22:14:50')\r\n\r\ndef get_files_in_dir(dir) -> List[str]:\r\n try:\r\n return os.listdir(dir)\r\n except Exception as e:\r\n print(f'Failed to get files in {dir}.\\n{e}')\r\n return []\r\n\r\ndef read_data(file_dir, filename):\r\n path = os.path.join(file_dir, filename)\r\n\r\n return pd.read_csv(path)\r\n\r\ndef plot_respeck(dataframe: pd.DataFrame, filename):\r\n fig, ax = plt.subplots(2, 1, figsize=(12, 12))\r\n\r\n # plot respeck\r\n ax[0].plot(dataframe['accel_x'], label=\"accel_x\")\r\n ax[0].plot(dataframe['accel_y'], label=\"accel_y\")\r\n ax[0].plot(dataframe['accel_z'], label=\"accel_z\")\r\n ax[0].legend()\r\n\r\n ax[0].set_title(f\"{dataframe['sensor_type'].values[0]} - {dataframe['activity_type'].values[0]} \\n Accelerometer data\")\r\n\r\n ax[1].plot(dataframe['gyro_x'], label=\"gyro_x\")\r\n ax[1].plot(dataframe['gyro_y'], label=\"gyro_y\")\r\n ax[1].plot(dataframe['gyro_z'], label=\"gyro_z\")\r\n ax[1].legend()\r\n\r\n ax[1].set_title(f\"{dataframe['sensor_type'].values[0]} - {dataframe['activity_type'].values[0]} \\n Gyroscope data\")\r\n\r\n \r\n # plt.show(block=True)\r\n\r\n plt.savefig(\"./plotRespeckFinal/\" + filename + \".png\")\r\n\r\ndef plot_thingy(dataframe: pd.DataFrame, filename):\r\n fig, ax = plt.subplots(3, 1, figsize=(12, 18))\r\n\r\n # plot thingy\r\n ax[0].plot(dataframe['accel_x'], label=\"accel_x\")\r\n ax[0].plot(dataframe['accel_y'], label=\"accel_y\")\r\n ax[0].plot(dataframe['accel_z'], label=\"accel_z\")\r\n ax[0].legend()\r\n\r\n ax[0].set_title(f\"{dataframe['sensor_type'].values[0]} - {dataframe['activity_type'].values[0]} \\n Accelerometer data\")\r\n\r\n ax[1].plot(dataframe['gyro_x'], label=\"gyro_x\")\r\n ax[1].plot(dataframe['gyro_y'], label=\"gyro_y\")\r\n ax[1].plot(dataframe['gyro_z'], label=\"gyro_z\")\r\n ax[1].legend()\r\n\r\n ax[1].set_title(f\"{dataframe['sensor_type'].values[0]} - {dataframe['activity_type'].values[0]} \\n Gyroscope data\")\r\n\r\n ax[2].plot(dataframe['mag_x'], label=\"mag_x\")\r\n ax[2].plot(dataframe['mag_y'], label=\"mag_y\")\r\n ax[2].plot(dataframe['mag_z'], label=\"mag_z\")\r\n ax[2].legend()\r\n\r\n ax[2].set_title(f\"{dataframe['sensor_type'].values[0]} - {dataframe['activity_type'].values[0]} \\n Magnetometer data\")\r\n\r\n # plt.show(block=True)\r\n\r\n plt.savefig(\"./plotThingyFinal/\" + filename + \".png\")\r\n\r\ndef main():\r\n files = get_files_in_dir(CLEAN_DIR)\r\n\r\n for file in files:\r\n dataframe = read_data(CLEAN_DIR, file)\r\n\r\n if file.startswith('Respeck'):\r\n plot_respeck(dataframe, file)\r\n\r\n elif file.startswith('Thingy'):\r\n plot_thingy(dataframe, file)\r\n\r\n else:\r\n print(f'Invalid file name {file}')\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n\r\n except KeyboardInterrupt:\r\n exit(0)","repo_name":"umuttkartal/pdiot_group_u","sub_path":"view_data.py","file_name":"view_data.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24151036556","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: mosquito\n# Email: sensor.wen@gmail.commit\n# Description: CI build for repo\n\nfrom subprocess import getoutput, getstatusoutput, call\nfrom urllib.request import urlretrieve\nimport urllib.error\nimport re\nimport os\nimport sys\nimport shutil\nimport fnmatch\nimport argparse\n\nsrcDir = os.path.join(os.getcwd(), 'build')\noutDir = os.path.join(os.getcwd(), 'output')\n\ndef get_commit_list():\n '''Get all of commit.\n\n Returns:\n Return all of commit list.\n '''\n\n stdout = getoutput('/bin/git log --pretty=%h')\n if 'ghprbActualCommit' in os.environ:\n commitList = re.findall('\\w{7}', stdout)[1:]\n else:\n commitList = re.findall('\\w{7}', stdout)\n return commitList\n\ndef get_file_list(commit):\n '''Get modified files for commit.\n\n Args:\n commit: commit string.\n\n Returns:\n Return list that modified files for commit.\n '''\n\n stdout = getoutput('/bin/git show --pretty=\"%h: %s\" --name-status {}'.format(commit))\n return list(filter(black_item, re.findall('rpms.*', stdout)))\n\ndef black_item(item):\n '''Fliter blacklist entry.\n\n Args:\n item: A string of item.\n '''\n\n for black in blackList:\n if re.match('.*' + black + '.*', item):\n return False\n else:\n return True\n\ndef parse_spec(specFile):\n '''Parse the Spec file contents.\n\n Args:\n specFile: A string of spec file path.\n\n Returns:\n Return the list contains the Spec file name and content. If the file\n is not found, it returns false.\n '''\n\n if os.path.exists(specFile) and str(specFile).endswith('.spec'):\n return specFile, getoutput('/bin/rpmspec -P {}'.format(specFile))\n\n return False\n\ndef get_source_list(specContent):\n '''Get source and patch files list.\n\n Args:\n specContent: A string of Spec file content.\n\n Returns:\n Return the list contains source and patch files list.\n '''\n\n return re.findall('[Source|Patch]\\d*:\\s+(.*)', specContent)\n\ndef get_sources(itemList, output=srcDir, verb=None):\n '''Get source files from local and internet.\n\n Args:\n itemList: A list of source files.\n output: A string of temp directory.\n verb: A bool of verbose.\n '''\n\n for item in itemList:\n if not os.path.exists(os.path.join(output, item.split('/')[-1])):\n if item.split('://')[0] in ['http', 'https', 'ftp']:\n if verb:\n print('\\033[36mverb:\\033[0m downloading {} file.'.format(item))\n try:\n urlretrieve(item, '{}/{}'.format(output, item.split('/')[-1]))\n #call(['wget', '-q', '-P', output, item])\n except Exception as e:\n print('\\033[31merro:\\033[0m downloading error. {}'.format(e))\n else:\n for src in find_files(item, 'rpms'):\n if verb:\n print('\\033[36mverb:\\033[0m copy {} file to build directory.'.format(src))\n shutil.copy(src, output)\n\ndef find_files(pattern, path=os.getcwd()):\n '''Search specify file.\n\n Args:\n pattern: Filename regular expression.\n path: Search path.\n\n Yields:\n Returns the target path of the file generator.\n '''\n\n for root, dirs, files in os.walk(path):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(root, filename)\n\ndef build_srpm(specFile, output='build'):\n '''Build source rpm.\n\n Args:\n specFile: A string of the Spec filename.\n output: A string of the SRPM file output directory.\n\n Returns:\n Return the SRPM filename.\n '''\n\n command = '/bin/rpmbuild ' \\\n '-D \"_topdir .\" ' \\\n '-D \"_builddir {out}\" ' \\\n '-D \"_buildrootdir {out}\" ' \\\n '-D \"_rpmdir {out}\" ' \\\n '-D \"_sourcedir {out}\" ' \\\n '-D \"_specdir {out}\" ' \\\n '-D \"_srcrpmdir {out}\" ' \\\n '-bs {}'.format(specFile, out=output)\n return re.search('build.*', getoutput(command)).group()\n\ndef build_rpm(srpmFile, release='23', arch='x86_64', output=outDir, opts='', verb=None):\n '''Build rpm.\n\n Args:\n srpmFile: A string of SRPM file path.\n release: A string of system release version.\n arch: A string of system architecture.\n output: A string of RPM file output directory.\n opts: A string of mock options.\n verb: A bool of verbose.\n\n Returns:\n Return the command running log.\n '''\n\n if verb:\n opts += ' --verbose'\n\n command = '/bin/mock --resultdir={} --root=fedora-{}-{}-rpmfusion {} {}'.format(\n output, release, arch, opts, srpmFile)\n return getstatusoutput(command)\n\ndef rpm_lint(repoDir=outDir, time=10):\n '''Check rpm files.\n\n Args:\n repoDir: A string of repository directory.\n time: A integer of time(minutes).\n\n Returns:\n Return the check result.\n '''\n\n command = '/bin/find {} -name \"*.rpm\" -and -ctime -{} | xargs ' \\\n '/bin/rpmlint -i'.format(repoDir, round(time/60/24, 4))\n return getoutput(command)\n\ndef create_repo(output=outDir):\n '''Creates metadata of rpm repository.\n\n Args:\n output: A string of RPM metadata output directory.\n\n Returns:\n Return the command running log.\n '''\n\n return getoutput('/bin/createrepo_c -d -x *.src.rpm {}'.format(output))\n\ndef result(filename, content):\n '''Log build result to file.\n\n Args:\n filename: A string of filename.\n content: A string of content.\n '''\n\n result = 'success' if content[0] == 0 else 'fail'\n _, pkgname, release, arch = content\n\n with open(filename, mode='a+') as f:\n f.write('{} {} for fc{}-{}\\n'.format(pkgname, result, release, arch))\n print('\\033[32minfo:\\033[0m Write build result to {} file.'.format(filename))\n\ndef parse_args():\n '''Parser for command-line options.\n\n Returns:\n Return the Namespace object.\n '''\n\n parser = argparse.ArgumentParser(description='repository ci builder.')\n parser.add_argument('-o', '--output-dir', metavar='PATH', type=str,\n dest='outDir', action='store', default=outDir,\n help='set rpm package output directory (default: output)')\n parser.add_argument('-c', '--commit', metavar='COMMIT', type=str,\n dest='commit', action='store', required=False,\n help='build the specified commit')\n parser.add_argument('-f', '--file', metavar='FILE', type=str,\n dest='file', action='append', default=[], required=False,\n help='build the specified Spec file')\n parser.add_argument('-a', '--arch', metavar='ARCH', type=str,\n dest='archs', action='append', required=False,\n help='set architecture for build rpm package (default: x86_64, i386)')\n parser.add_argument('-r', '--release', metavar='RELEASE', type=str,\n dest='releases', action='append', required=False,\n help='set release version for build rpm package (default: 22, 23, 24)')\n parser.add_argument('-b', '--black-list', metavar='BLACKLIST', type=str,\n dest='blacklist', action='append', required=False,\n help='set blacklist, skip these items')\n parser.add_argument('--mock-opts', metavar='OPTIONS', type=str,\n dest='mock', action='store', default='', required=False,\n help='set mock command-line options')\n parser.add_argument('--createrepo', dest='createrepo', action='store_true',\n help='run createrepo to create repository')\n parser.add_argument('--rpmlint', dest='rpmlint', action='store_true',\n help='check common problems in rpm package')\n parser.add_argument('--clean', dest='clean', action='store_true',\n help='clean workspace before building')\n parser.add_argument('--result', metavar='PATH', type=str,\n dest='result', action='store', required=False, default='result.log',\n help='log bulid result to file (default: result.log)')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',\n help='be verbose')\n parser.add_argument(dest='files', metavar='FILE', type=str, action='store', nargs='*')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n Archs = args.archs if args.archs else ['x86_64', 'i386']\n Releases = args.releases if args.releases else ['22', '23', '24']\n blackList = args.blacklist if args.blacklist else ['electron']\n args.file += args.files\n\n if not sys.stdin.isatty():\n args.file += sys.stdin.read().split()\n\n rootDir = args.outDir\n if 'REPO_ROOT' in os.environ:\n rootDir = os.environ['REPO_ROOT']\n\n mode = 'manual'\n if 'GIT_PREVIOUS_COMMIT' in os.environ or 'ghprbActualCommit' in os.environ:\n mode = 'ci'\n\n if args.clean:\n if args.verbose:\n print('\\033[36mverb:\\033[0m clean workspace.')\n getoutput('/bin/git clean -f -d -x')\n\n if not os.path.isdir(srcDir):\n os.mkdir(srcDir)\n\n results = []\n if os.path.exists(args.result):\n with open(args.result) as f:\n results = re.findall('rpms/.*.spec', f.read())\n\n for commit in get_commit_list():\n if ('GIT_PREVIOUS_COMMIT' in os.environ and \\\n commit in os.environ['GIT_PREVIOUS_COMMIT']) or \\\n ('ghprbActualCommit' in os.environ and \\\n commit not in os.environ['ghprbActualCommit']):\n break\n\n commit = args.commit if args.commit else commit\n fileList = args.file if args.file else get_file_list(commit)\n\n for filePath in fileList:\n if mode == 'manual' and filePath in results:\n print('\\033[36mverb:\\033[0m skip {} file.'.format(filePath))\n continue\n\n if parse_spec(filePath):\n specFile, specContent = parse_spec(filePath)\n if args.verbose:\n print('\\033[36mverb:\\033[0m parser {} file.'.format(specFile))\n elif mode == 'ci':\n print('Unmodified spec file.')\n continue\n else:\n print('Unmodified spec file.')\n sys.exit()\n\n sourceList = get_source_list(specContent)\n get_sources(sourceList, verb=args.verbose)\n srpmFile = build_srpm(specFile)\n print('\\033[32minfo:\\033[0m Build SRPM -', srpmFile)\n\n for rel in Releases:\n for arch in Archs:\n outDir = os.path.join(rootDir, rel, arch)\n print('\\033[32minfo:\\033[0m Build RPM for fc{} - {}:\\n'.format(rel, arch))\n value, log = build_rpm(srpmFile, release=rel, arch=arch, output=outDir,\n opts=args.mock, verb=args.verbose)\n print(log)\n if args.createrepo:\n print('\\033[32minfo:\\033[0m Create metadata for fc{} - {}:\\n'.format(rel, arch),\n create_repo(outDir))\n if args.rpmlint:\n print('\\033[32minfo:\\033[0m Check RPM for fc{} - {}:\\n'.format(rel, arch),\n rpm_lint(outDir))\n if mode == 'manual':\n result(args.result, [value, specFile, rel, arch])\n\n if args.file or args.commit:\n break\n","repo_name":"xiuxiazhang/repo","sub_path":"repos/cibuild.py","file_name":"cibuild.py","file_ext":"py","file_size_in_byte":11677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"44905112988","text":"from sqlalchemy import Column, String, Integer\nimport models.Type\nfrom repository.base import Base\n\n\nclass Type(Base.Base):\n __tablename__ = 'types'\n\n id = Column(Integer, primary_key=True)\n value = Column(String(2000))\n description = Column(String(2000))\n\n def __init__(self,\n id=0,\n value=\"\",\n description=\"\",\n type=models.Type.Type()):\n if(type.value or type.id):\n self.id = type.id\n self.value = type.value\n self.description = type.description\n else:\n self.id = id\n self.value = value\n self.description = description\n","repo_name":"CapivaraProjects/database","sub_path":"Type.py","file_name":"Type.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74793592499","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 20 22:55:33 2018\r\n\r\n@author: Johan\r\n\"\"\"\r\nimport time\r\nt = 0\r\nfor i in range(100):\r\n t1 = time.time()\r\n l = [i for i in range(1000) if (i%3)==0 or (i%5) == 0]\r\n t += (time.time() - t1)\r\n\r\nprint(t/100)\r\n\r\nprint(sum(l))","repo_name":"tobbelito123/projEuler","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27832882662","text":"#calc_dist.py\nimport mdtraj as md\nimport numpy as np\nimport sys\n\nfname = 'npt_8.gro'\nNsol = 2136\nNdmso = 8544\nNchain = 20\nd_th = 0.50\n\nt = md.load(fname)\ntop = t.topology\ndf, b = top.to_dataframe()\npos = t.xyz\nbox = t.unitcell_lengths[0,0]\n\ndf['x'] = pos[0,:,0]\ndf['y'] = pos[0,:,1]\ndf['z'] = pos[0,:,2]\n\now = df[df['name'] == 'O']\npva_c = df[df['name'] == 'c3']\n\nprint(ow)\nprint(pva_c)\n\n# inter-chain\noutf = 'ocdist.dat'\n\nwith open(outf, 'wt') as f:\n f.write('#index\\tmol1\\tchain2\\tatm1\\tatm2\\tx1\\ty1\\tz1\\tx2\\ty2\\tz2\\tO-O dist\\n')\n\nwith open(outf, 'a+') as f:\n icount = 0\n for i in range(1, Nsol+1):\n ocount = 0\n aow = ow[ow['resSeq']==i]\n pos_aow = np.array(aow[['x','y','z']]) \n atm_aow = np.array(aow['serial'])\n for j in range(Nsol+Ndmso+1, Nsol+Ndmso+Nchain+1):\n dn = pva_c[pva_c['resSeq']==j]\n pos_dn = np.array(dn[['x','y','z']])\n atm_dn = np.array(dn['serial'])\n #print(j, pos_aow, pos_dn)\n for ipd, pdn in enumerate(pos_dn):\n dr = np.abs(pos_aow - pdn)\n #print(dr/box, np.round(dr/box))\n dr -= np.round(dr/box)*box\n #print(dr/box)\n d = np.sqrt(np.sum(dr**2))\n #print(d)\n if d <= d_th:\n icount += 1\n ocount += 1\n print('molpair: {0:4d}\\t{1:4d}'.format(i,j))\n print('atmpair: {0:4d}\\t{1:4d}\\t{2:6.5f}'.format(atm_aow[0], atm_dn[ipd], d))\n f.write('{0:4d}\\t{1:3d}\\t{2:4d}\\t{3:4d}\\t{4:4d}\\t{5:7.3f}\\t{6:7.3f}\\t{7:7.3f}\\t{8:7.3f}\\t{9:7.3f}\\t{10:7.3f}\\t{11:6.5f}\\n'.format(\n icount, i, j, atm_aow[0], atm_dn[ipd], pos_aow[0,0], pos_aow[0,1], pos_aow[0,2], pos_dn[ipd,0], pos_dn[ipd,1], pos_dn[ipd,2], d))\n print(i,ocount)\n","repo_name":"yamada1988/mypythonpkg","sub_path":"myscript/calc_dist_gel.py","file_name":"calc_dist_gel.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"35460237854","text":"import vk_api, json\r\nfrom random import randint as rand\r\nfrom vk_api.longpoll import VkEventType, VkLongPoll\r\nfrom config import tok\r\nfrom threading import Thread\r\n\r\n\r\ntry:\r\n\tvk_session = vk_api.VkApi(token = tok)\r\n\tlongpoll = VkLongPoll(vk_session)\r\n\tprint('Connected')\r\nexcept Exception as e:\r\n\tinput('Произошла ошибка подключения к вк, программа завершилась!')\r\n\texit()\r\n\r\n\r\nclass User():\r\n\tdef __init__(self, id, cash, mode, number, access_key):\r\n\t\tself.id = id\r\n\t\tself.number = number\r\n\t\tself.cash = cash\r\n\t\tself.mode = mode\r\n\t\tself.access_key = access_key\r\n\r\n\r\ndef get_access_key():\r\n\tst = 'qwertyuiopasdfghjklzxcvbnm1234567890'\r\n\tcode = ''\r\n\tfor i in range(8):\r\n\t\tcode = f'{code}{st[rand(0, len(st))]}'\r\n\treturn code\r\n\r\n\r\ndef get_carousel(el):\r\n\tcarousel = {\"type\" : \"carousel\", \"elements\" : []}\r\n\tfor element in el:\r\n\t\tcarousel[\"elements\"].append({\"photo_id\": element[0], \"action\": { \"type\": \"open_photo\" },\r\n\t\t\"buttons\": [{ \"action\": { \"type\": \"text\", \"label\": element[1], \"payload\": \"{}\" }}]})\r\n\tcarousel = json.dumps(carousel, ensure_ascii = False).encode('utf-8')\r\n\tcarousel = str(carousel.decode('utf-8'))\r\n\treturn carousel\r\n\r\n\r\ndef get_keyboard(buts): # функция создания клавиатур\r\n\tnb = []\r\n\tfor i in range(len(buts)):\r\n\t\tnb.append([])\r\n\t\tfor k in range(len(buts[i])):\r\n\t\t\tnb[i].append(None)\r\n\tfor i in range(len(buts)):\r\n\t\tfor k in range(len(buts[i])):\r\n\t\t\ttext = buts[i][k][0]\r\n\t\t\tcolor = {'зеленый' : 'positive', 'красный' : 'negative', 'синий' : 'primary'}[buts[i][k][1]]\r\n\t\t\tnb[i][k] = {\"action\": {\"type\": \"text\", \"payload\": \"{\\\"button\\\": \\\"\" + \"1\" + \"\\\"}\", \"label\": f\"{text}\"}, \"color\": f\"{color}\"}\r\n\tfirst_keyboard = {'one_time': False, 'buttons': nb, 'inline' : False}\r\n\tfirst_keyboard = json.dumps(first_keyboard, ensure_ascii=False).encode('utf-8')\r\n\tfirst_keyboard = str(first_keyboard.decode('utf-8'))\r\n\treturn first_keyboard\r\n\r\n\r\ncarous = get_carousel([\r\n\t[\"296431501_457273770\", \"Кейс 1 уровня (10руб)\"],\r\n\t[\"296431501_457273771\", \"Кейс 2 уровня (20руб)\"],\r\n\t[\"296431501_457273772\", \"Кейс 3 уровня (30руб)\"]\r\n])\r\n\r\nclear_key = get_keyboard([])\r\n\r\nbal_key = get_keyboard([\r\n\t[('Пополнить', 'синий'), ('Снять', 'синий')],\r\n\t[('Мой баланс', 'зеленый')],\r\n\t[('Назад', 'красный')]\r\n])\r\n\r\nback_key = get_keyboard([\r\n\t[('Назад', 'синий')]\r\n])\r\n\r\nmenu_key = get_keyboard([\r\n\t[('Играть', 'зеленый'), ('Баланс', 'синий')]\r\n])\r\n\r\n\r\ndef sender(id, text, key):\r\n\tvk_session.method('messages.send', {'user_id' : id, 'message' : text, 'message' : text, 'random_id' : 0, 'keyboard' : key})\r\n\r\n\r\ndef send_car(id, text, car):\r\n\tvk_session.method('messages.send', {\r\n\t\t\t'user_id' : id,\r\n\t\t\t'message' : text,\r\n\t\t\t'random_id' : 0,\r\n\t\t\t'attachment' : [],\r\n\t\t\t'template' : car\r\n\t\t})\r\n\r\n\r\nusers = []\r\n\r\n\r\ndef pay(number, score):# выплата\r\n\tprint(f'Выплачено {score}руб по номеру {number}')\r\n\t# выплата score рублей на номер number\r\n\r\n\r\ndef get_transactions():\r\n\tglobal users\r\n\tlast = 0\r\n\twhile True:\r\n\t\tpass # проверка последнего платежа по токену пользователя\r\n\r\n\r\nwhile True:\r\n\ttry:\r\n\t\tfor event in longpoll.listen():\r\n\t\t\tif event.type == VkEventType.MESSAGE_NEW:\r\n\t\t\t\tif event.to_me:\r\n\r\n\t\t\t\t\tid = event.user_id\r\n\t\t\t\t\tmsg = event.text.lower()\r\n\r\n\t\t\t\t\tif msg == 'начать':\r\n\t\t\t\t\t\tflag = 0\r\n\t\t\t\t\t\tfor user in users:\r\n\t\t\t\t\t\t\tif id == user.id:\r\n\t\t\t\t\t\t\t\tsender(id, 'Выберите действие:', menu_key)\r\n\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\t\t\t\t\t\t\t\tflag = 1\r\n\t\t\t\t\t\tif flag == 0:\r\n\t\t\t\t\t\t\tsender(id, 'Выберите действие:', menu_key)\r\n\t\t\t\t\t\t\tusers.append(User(id = id, cash = 30, mode = 'menu', number = 0, access_key = 'None'))\r\n\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor user in users:\r\n\t\t\t\t\t\t\tif id == user.id:\r\n\r\n\r\n\t\t\t\t\t\t\t\tif user.mode == 'menu':\r\n\r\n\t\t\t\t\t\t\t\t\tif msg == 'играть':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Выберите действие:', back_key)\r\n\t\t\t\t\t\t\t\t\t\tsend_car(id, 'Выберите кейс:', carous)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'game'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'баланс':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Выберите действие:', bal_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\r\n\t\t\t\t\t\t\t\telif user.mode == 'game':\r\n\r\n\t\t\t\t\t\t\t\t\tif msg == 'кейс 1 уровня (10руб)':\r\n\t\t\t\t\t\t\t\t\t\tif user.cash >= 10:\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash -= 10\r\n\t\t\t\t\t\t\t\t\t\t\tbonus = rand(5, 16)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash += bonus\r\n\t\t\t\t\t\t\t\t\t\t\tsend_car(id, f'Вы выиграли {bonus} рублей!\\nВыберите кейс:', carous)\r\n\t\t\t\t\t\t\t\t\t\t\tprint('1')\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'У вас недостаточно средств!\\nВаш баланс: {user.cash}\\nВы можете пополнить баланс через QIWI.', menu_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'кейс 2 уровня (20руб)':\r\n\t\t\t\t\t\t\t\t\t\tif user.cash >= 20:\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash -= 20\r\n\t\t\t\t\t\t\t\t\t\t\tbonus = rand(15, 26)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash += bonus\r\n\t\t\t\t\t\t\t\t\t\t\tsend_car(id, f'Вы выиграли {bonus} рублей!\\nВыберите кейс:', carous)\r\n\t\t\t\t\t\t\t\t\t\t\tprint('2')\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'У вас недостаточно средств!\\nВаш баланс: {user.cash}\\nВы можете пополнить баланс через QIWI.', menu_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'кейс 3 уровня (30руб)':\r\n\t\t\t\t\t\t\t\t\t\tif user.cash >= 30:\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash -= 30\r\n\t\t\t\t\t\t\t\t\t\t\tbonus = rand(25, 36)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.cash += bonus\r\n\t\t\t\t\t\t\t\t\t\t\tsend_car(id, f'Вы выиграли {bonus} рублей!\\nВыберите кейс:', carous)\r\n\t\t\t\t\t\t\t\t\t\t\tprint('3')\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'У вас недостаточно средств!\\nВаш баланс: {user.cash}\\nВы можете пополнить баланс через QIWI.', menu_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'назад':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Выберите действие:', menu_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\r\n\r\n\t\t\t\t\t\t\t\telif user.mode == 'balance':\r\n\r\n\t\t\t\t\t\t\t\t\tif msg == 'назад':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Выберите действие:', menu_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'menu'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'пополнить':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Для пополнения баланса переведите сумму на QIWI на номер: 89294046340\\nПосле перевода средств, ваш баланс в боте пополнится в течении 5 минут.\\nВведите номер телефона, с которого придет оплата(по QIWI):', clear_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'upload1'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'снять':\r\n\t\t\t\t\t\t\t\t\t\tif user.cash >= 100:\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, 'Введите телефонный номер своего qiwi, на который выведется баланс:', clear_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'upload2'\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'Минимальная сумма вывода: 100руб\\nВаш баланс: {user.cash}руб', bal_key)\r\n\r\n\t\t\t\t\t\t\t\t\telif msg == 'мой баланс':\r\n\t\t\t\t\t\t\t\t\t\tsender(id, f'Ваш баланс: {user.cash}', bal_key)\r\n\r\n\r\n\t\t\t\t\t\t\t\telif user.mode == 'upload1': # пополнение\r\n\t\t\t\t\t\t\t\t\tif (msg.startswith('8')) or (msg.startswith('7')):\r\n\t\t\t\t\t\t\t\t\t\tif (len(msg) == 11)&(msg.isdigit()):\r\n\t\t\t\t\t\t\t\t\t\t\tuser.number = msg\r\n\t\t\t\t\t\t\t\t\t\t\tuser.access_key = get_access_key()\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'Номер введён верно!\\n!!!Внимание!!!\\nДля корректной проверки платежа вы должны отправить код \"{user.access_key}\" в комментарии при переводе!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg.startswith('+'):\r\n\t\t\t\t\t\t\t\t\t\tif (len(msg) == 12)&(msg[1::].isdigit()):\r\n\t\t\t\t\t\t\t\t\t\t\tuser.number = msg\r\n\t\t\t\t\t\t\t\t\t\t\tuser.access_key = get_access_key()\r\n\t\t\t\t\t\t\t\t\t\t\tsender(id, f'Номер введён верно!\\n!!!Внимание!!!\\nДля корректной проверки платежа вы должны отправить код \"{user.access_key}\" в комментарии при переводе!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Неверный номер!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\r\n\t\t\t\t\t\t\t\telif user.mode == 'upload2': # вывод\r\n\t\t\t\t\t\t\t\t\tif (msg.startswith('8')) or (msg.startswith('7')):\r\n\t\t\t\t\t\t\t\t\t\tif (len(msg) == 11)&(msg.isdigit()):\r\n\t\t\t\t\t\t\t\t\t\t\tuser.number = msg\r\n\t\t\t\t\t\t\t\t\t\t\tscore = (user.cash-50)\r\n\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpay(user.number, score)\r\n\t\t\t\t\t\t\t\t\t\t\t\tuser.cash -= score\r\n\t\t\t\t\t\t\t\t\t\t\t\tsender(id, f'Номер введён верно!\\nОжидайте выплаты на QIWI на номер {msg}\\nСумма выплаты: {score}', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsender(id, 'Не удалось вывести средства!\\nПроизошла ошибка при переводе!\\nВозможные причины:\\n1) Сбой в работе бота\\n2) Вы не подтвердили паспортные данные своего кошелька QIWI\\nПроверьте свой QIWI кошелёк или попробуйте позже!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\t\t\t\t\t\t\t\t\telif msg.startswith('+'):\r\n\t\t\t\t\t\t\t\t\t\tif (len(msg) == 12)&(msg[1::].isdigit()):\r\n\t\t\t\t\t\t\t\t\t\t\tuser.number = msg\r\n\t\t\t\t\t\t\t\t\t\t\tscore = (user.cash-50)\r\n\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpay(user.number, score)\r\n\t\t\t\t\t\t\t\t\t\t\t\tuser.cash -= score\r\n\t\t\t\t\t\t\t\t\t\t\t\tsender(id, f'Номер введён верно!\\nОжидайте выплаты на QIWI на номер {msg}\\nСумма выплаты: {score}', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsender(id, 'Не удалось вывести средства!\\nПроизошла ошибка при переводе!\\nВозможные причины:\\n1) Сбой в работе бота\\n2) Вы не подтвердили паспортные данные своего кошелька QIWI\\nПроверьте свой QIWI кошелёк или попробуйте позже!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tsender(id, 'Неверный номер!', bal_key)\r\n\t\t\t\t\t\t\t\t\t\tuser.mode = 'balance'\r\n\r\n\r\n\texcept Exception as e:\r\n\t\tprint('Произошла ошибка, идёт переподключение к серверу вк...')\r\n\t\tvk_session = vk_api.VkApi(token = tok)\r\n\t\tlongpoll = VkLongPoll(vk_session)","repo_name":"dshutrin/demo_work","sub_path":"order4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"1353466925","text":"#!/usr/bin/env python3\n\"\"\" data collection from APIs \"\"\"\nfrom requests import get\n\n\ndef sentientPlanets():\n \"\"\"\n method that returns the list of names of the home planets of all\n sentient species.\n \"\"\"\n result_planets = []\n n_page = 1\n state = True\n\n while state:\n req_species = get(\n f\"https://swapi-api.hbtn.io/api/species/?page=\" + str(n_page)\n )\n data_species = req_species.json()\n\n for specie in data_species[\"results\"]:\n if 'sentient' in {specie[\"classification\"], specie['designation']}:\n if specie['homeworld'] is not None:\n req_planet = get(specie['homeworld'])\n result_planets.append(req_planet.json()['name'])\n\n if data_species['next'] is None:\n state = False\n\n n_page += 1\n\n return result_planets\n","repo_name":"andresvanegas19/holbertonschool-machine_learning","sub_path":"pipeline/0x01-apis/1-sentience.py","file_name":"1-sentience.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9292017315","text":"coins = [1, 2, 5, 10, 20, 50, 100, 200]\n\ndef find_ways(n):\n\tways = [1 if x == 0 else 0 for x in range(0, n + 1)]\n\t# i means the max coin we can use\n\tfor i in coins:\n\t\tfor j in range(i, n + 1):\n\t\t\tways[j] = ways[j] + ways[j - i]\n\treturn ways[n]\n\nprint(find_ways(2 * 100))","repo_name":"cloudydev/Euler","sub_path":"1-50/p31.py","file_name":"p31.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26776363577","text":"from typing import Any, List, Union, Optional\nimport time\nimport gym\nimport numpy as np\nfrom ding.envs import BaseEnv, BaseEnvTimestep, FrameStackWrapper\nfrom ding.torch_utils import to_ndarray, to_list\nfrom ding.envs.common.common_function import affine_transform\nfrom ding.utils import ENV_REGISTRY\n\n\n@ENV_REGISTRY.register('bipedalwalker')\nclass BipedalWalkerEnv(BaseEnv):\n\n def __init__(self, cfg: dict) -> None:\n self._cfg = cfg\n self._init_flag = False\n self._act_scale = cfg.act_scale\n self._rew_clip = cfg.rew_clip\n if \"replay_path\" in cfg:\n self._replay_path = cfg.replay_path\n else:\n self._replay_path = None\n\n def reset(self) -> np.ndarray:\n if not self._init_flag:\n self._env = gym.make('BipedalWalker-v3')\n self._observation_space = self._env.observation_space\n self._action_space = self._env.action_space\n self._reward_space = gym.spaces.Box(\n low=self._env.reward_range[0], high=self._env.reward_range[1], shape=(1, ), dtype=np.float32\n )\n self._init_flag = True\n if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:\n np_seed = 100 * np.random.randint(1, 1000)\n self._env.seed(self._seed + np_seed)\n elif hasattr(self, '_seed'):\n self._env.seed(self._seed)\n if self._replay_path is not None:\n self._env = gym.wrappers.RecordVideo(\n self._env,\n video_folder=self._replay_path,\n episode_trigger=lambda episode_id: True,\n name_prefix='rl-video-{}'.format(id(self))\n )\n self._eval_episode_return = 0\n obs = self._env.reset()\n obs = to_ndarray(obs).astype(np.float32)\n return obs\n\n def close(self) -> None:\n if self._init_flag:\n self._env.close()\n self._init_flag = False\n\n def render(self) -> None:\n self._env.render()\n\n def seed(self, seed: int, dynamic_seed: bool = True) -> None:\n self._seed = seed\n self._dynamic_seed = dynamic_seed\n np.random.seed(self._seed)\n\n def step(self, action: np.ndarray) -> BaseEnvTimestep:\n assert isinstance(action, np.ndarray), type(action)\n if action.shape == (1, ):\n action = action.squeeze() # 0-dim array\n if self._act_scale:\n action = affine_transform(action, min_val=self.action_space.low, max_val=self.action_space.high)\n\n obs, rew, done, info = self._env.step(action)\n self._eval_episode_return += rew\n if self._rew_clip:\n rew = max(-10, rew)\n rew = np.float32(rew)\n\n if done:\n info['eval_episode_return'] = self._eval_episode_return\n obs = to_ndarray(obs).astype(np.float32)\n rew = to_ndarray([rew]) # wrapped to be transfered to a array with shape (1,)\n return BaseEnvTimestep(obs, rew, done, info)\n\n def enable_save_replay(self, replay_path: Optional[str] = None) -> None:\n if replay_path is None:\n replay_path = './video'\n self._replay_path = replay_path\n\n def random_action(self) -> np.ndarray:\n random_action = self.action_space.sample()\n if isinstance(random_action, np.ndarray):\n pass\n elif isinstance(random_action, int):\n random_action = to_ndarray([random_action], dtype=np.int64)\n return random_action\n\n @property\n def observation_space(self) -> gym.spaces.Space:\n return self._observation_space\n\n @property\n def action_space(self) -> gym.spaces.Space:\n return self._action_space\n\n @property\n def reward_space(self) -> gym.spaces.Space:\n return self._reward_space\n\n def __repr__(self) -> str:\n return \"DI-engine BipedalWalker Env\"\n","repo_name":"opendilab/DI-engine","sub_path":"dizoo/box2d/bipedalwalker/envs/bipedalwalker_env.py","file_name":"bipedalwalker_env.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":2963,"dataset":"github-code","pt":"57"} +{"seq_id":"14355213262","text":"#В матрице найти минимальный и максимальные элементы.\n\nfrom random import randint\n\ndef random_number():\n n, m = 3, 3\n a = [[randint(1, 10) for j in range(m)] for i in range(n)]\n yield a\n\nlst = []\ndef min_max(i):\n iter_object = iter(i[0])\n while True:\n try:\n next_i = next(iter_object)\n for k in next_i:\n lst.append(k)\n except StopIteration:\n print(\"Итерация закончилась\")\n break\n print(f\"Минимальное значение в матрице: {min(lst)}\")\n print(f\"Максимальное значение в матрице: {max(lst)}\")\n\n iter_object_2 = iter(i[0])\n print(\"\\nСама матрица:\")\n while True:\n try:\n c = next(iter_object_2)\n print(*c)\n except StopIteration:\n print(\"\\nВсе!\")\n break\n\n\nb = list(random_number())\nmin_max(b)\n","repo_name":"xercesm/Proj_1sem_Belaya","sub_path":"PZ_13/PZ_13_1.py","file_name":"PZ_13_1.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12967961490","text":"\nfrom Investment.InvestFuncs import StockInfo\n\n\n#####Company Search\ndef search_company(update, context):\n if context.user_data['search'] == True:\n text = update.message.text.split(' ')\n try:\n stock = StockInfo()\n data = stock.get_news(text[0],text[1])\n if len(data)>0:\n for i in data:\n if i[6]:\n subs = 'Да'\n else:\n subs = 'Нет'\n update.message.reply_text(f\"\"\"Время: {i[0]}\nЗаголовок: {i[1]}\nИсточник:{i[2]}\nАнонс:{i[3]}\nСсылка:{i[4]}\nЯзык:{i[5]}\nНужна подписка: {subs}\"\"\")\n else:\n update.message.reply_text(\"К сожалению, новостей нет\")\n except:\n update.message.reply_text(\"Введите верный тикер\")\n else:\n print('No')\n pass\n\ndef search_menu(update, context):\n query = update.callback_query\n query.answer()\n query.edit_message_text(text=\"Введите тикер компании и количество последних новостей Например: AAPL 3'\")\n context.user_data['search'] = True\n","repo_name":"Rune28/FinApp","sub_path":"Menu/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27397083902","text":"from flask import (\n\tBlueprint, flash, g, redirect, render_template,\n request, session, url_for\n)\n\nfrom vhs.common.database import get_database\nfrom vhs.model.movie import Movie\n\nbp = Blueprint('movies', __name__, url_prefix='/movies')\n\n@bp.route('/')\ndef index():\n\tmovies = Movie.query.order_by(Movie.id.desc()).limit(5).all()\n\t\n\treturn render_template('movies/index.html',movies=movies)\n\n@bp.route('/delete/')\ndef delete(id):\n\tdatabase = get_database()\n\n\ttry:\n\t\tdatabase.session.delete(Movie.query.filter_by(id=id).first())\n\t\tdatabase.session.commit()\n\t\n\t\tflash(\"Deleted movie.\")\n\texcept:\n\t\tflash(\"Failed to delete movie.\")\n\n\treturn redirect(url_for(\"movies.index\"))\n\n@bp.route('/edit/', methods=('GET', 'POST'))\ndef edit(id):\n\tdatabase = get_database()\n\n\tmovie = Movie.query.filter_by(id=id).first()\n\n\tif not movie:\n\t\tflash(\"Movie doesn't exist.\")\n\t\treturn redirect(url_for(\"movies.index\"))\n\n\tif request.method == 'POST':\n\t\tmovie.title = request.form.get('title', \"\")\n\n\t\ttry:\n\t\t\tmovie.price_code = int(request.form.get('price_code', \"0\"))\n\t\texcept:\n\t\t\tflash(\"Please select a valid price code.\")\n\n\t\tdatabase.session.add(movie)\n\t\tdatabase.session.commit()\n\t\n\t\tflash(\"Updated movie.\")\n\t\t\n\treturn render_template(\"movies/edit.html\", movie=movie, Movie=Movie)\n\n@bp.route('/add')\ndef add():\n\tdatabase = get_database()\n\n\ttry:\n\t\tmovie = Movie(title='New Movie')\n\t\tdatabase.session.add(movie)\n\t\tdatabase.session.commit()\n\n\t\treturn redirect(url_for(\"movies.edit\", id=movie.id))\n\texcept:\n\t\tflash(\"Failed to add movie.\")\n\n\treturn redirect(url_for(\"movies.index\"))\n","repo_name":"aaronbolyard-school/CSC-221","sub_path":"Module 3/Assignments/M3LAB/vhs/views/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34992013764","text":"import cv2\nimport numpy as np\nfrom scipy import misc as s\nimport os\ndef forsvg(filename):\n i = cv2.imread(filename)\n im = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)\n ret,thresh = cv2.threshold(i,175,255,cv2.THRESH_BINARY)\n kernel = np.ones((5,5),np.uint8)\n dilation = cv2.erode(thresh,kernel,iterations = 2)\n bit = cv2.bitwise_not(dilation)\n bits = cv2.resize(bit,(222, 222), interpolation = cv2.INTER_CUBIC)\n threshs = cv2.resize(dilation,(222, 222), interpolation = cv2.INTER_CUBIC)\n\n\n s.imsave(\"bit.jpg\", bits)\n s.imsave(\"thresh.jpg\", dilation)\n\n os.system('convert bit.jpg bit.bmp')\n os.system('convert thresh.jpg thresh.bmp')\n os.system('potrace -s bit.bmp')\n os.system('potrace -s thresh.bmp')\n","repo_name":"vimarshc/SVGedit","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39342776722","text":"# --------------------------------------------------------- #\n# argparse #\n# --------------------------------------------------------- #\nimport argparse\n\ndef parse_arguments():\n ''' Parse script's arguments.\n Options:\n args['makefile']\n args['procs']\n args['node'])\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-r\",\"--range\",help=\"range for running\")\n args = vars(parser.parse_args())\n return args\n# args = parse_arguments()\n","repo_name":"dmerz75/myconfigs","sub_path":"mylib/myargs.py","file_name":"myargs.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71570867377","text":"from django.shortcuts import redirect, render\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.urls import reverse\n\njob_title = [\n \"First Job\",\n \"Second Job\",\n \"Thrid Job\"\n]\n\n\njob_description = [\n \"First job description\",\n \"Second job description\",\n \"Third job description\",\n]\n\n\ndef job_list(request):\n \"\"\"\"will iterate over job_title[] and display each job on the home page\"\"\"\n list_of_job = \"\"\n return HttpResponse(list_of_job)\n\n\n\ndef job_detail(request, id):\n\n try:\n if id == 0:\n return redirect(reverse(\"jobs_home\")) #this will redirect the user to the home screen. \n return_html = f\"

{job_title[id]}

{job_description[id]}\"\n return HttpResponse(return_html)\n except:\n return HttpResponseNotFound(\"Not found\")","repo_name":"anthonynarine/Job-application-","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15988156305","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 15 12:12:01 2017\r\n\r\n@author: Rodrigo\r\n\"\"\"\r\n#com risadinha\r\nfrom datetime import datetime\r\nimport random\r\n\r\nlista = random.sample(range(1, 1000000), 100000)\r\nl = [2, 3, 5, 7, 9, 35, 678, 86, 123, 54, 8765]\r\n\r\ndef getTempoExecBubble():\r\n inicio = datetime.now()\r\n bubbleSort(lista)\r\n fim = datetime.now()\r\n print('Duração: {}'.format(fim - inicio))\r\n \r\ndef getTempoExecQuick():\r\n inicio = datetime.now()\r\n quickSort(lista, 0, len(lista) - 1)\r\n fim = datetime.now()\r\n print('Duration: {}'.format(fim - inicio))\r\n\r\ndef bubbleSort(l):\r\n if len(l) <= 1:\r\n lst = l\r\n else:\r\n for j in range(0, len(l)):\r\n for i in range(0, len(l)-1):\r\n if l[i] > l[i+1]:\r\n temp = l[i+1]\r\n l[i+1] = l[i]\r\n l[i] = temp\r\n lst = l\r\n return lst\r\n \r\n#função pro quicksort\r\ndef pivo(l,inicio,fim):\r\n i = (inicio - 1)\r\n x = l[fim]\r\n \r\n for j in range(inicio, fim):\r\n if l[j] <= x:\r\n \r\n i = i+1\r\n #inverte as posições\r\n l[i],l[j] = l[j],l[i]\r\n \r\n # mesma coisa aqui, depois do loop\r\n l[i+1],l[fim] = l[fim],l[i+1]\r\n return (i+1)\r\n \r\ndef quickSort(l,inicio,fim):\r\n \r\n tamanho = fim - inicio + 1\r\n pilha = [0] * (tamanho) #cria um array do tamanho da lista\r\n #graças a deus pq ninguém merece \r\n topo = -1\r\n \r\n topo += 1\r\n pilha[topo] = inicio\r\n topo += 1\r\n pilha[topo] = fim\r\n \r\n while topo >= 0:\r\n fim = pilha[topo]\r\n topo -= 1\r\n inicio = pilha[topo]\r\n topo = topo - 1 \r\n # Pivô\r\n pv = pivo(l, inicio, fim) # faz a famosa inversão (na outra função)\r\n # esquerda #FORATEMER\r\n if pv-1 > inicio:\r\n topo += 1\r\n pilha[topo] = inicio\r\n topo += 1\r\n pilha[topo] = pv - 1\r\n \r\n # direita #BORATEMER\r\n if pv+1 < fim:\r\n topo += 1\r\n pilha[topo] = pv + 1\r\n topo += 1\r\n pilha[topo] = fim\r\n return l #FUNCIONOU PORRA\r\n \r\n#sem risadinha","repo_name":"rodrigoorf/datastructures","sub_path":"ord2.py","file_name":"ord2.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30102295850","text":"n = int(input())\na = sorted(list(map(int, input().split())), reverse=True)\nswi = 0\ns = [0, 0]\ni = 0\nwhile i < len(a) - 1:\n if a[i] == a[i + 1]:\n s[swi] = a[i]\n i += 1\n swi += 1\n if swi == 2:\n break\n i += 1\n\nprint(s[0] * s[1])\n","repo_name":"taichi6930/atcoder","sub_path":"_archive/ABC_C_71.py","file_name":"ABC_C_71.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5808343505","text":"import json\nimport os\nfrom ..dumper import Dumper\n\n\nclass JsonDumper(Dumper):\n\n def dump(self, file_path, sheet_name, output_dir, keys, contents, indexes, options):\n data = self.compose(keys, contents, indexes)\n if data is None:\n return False\n\n filename = os.path.splitext(os.path.basename(file_path))[0]\n os.makedirs(output_dir, exist_ok=True)\n with open(\"%s/%s_%s.json\" % (output_dir, filename, sheet_name), 'w') as ofp:\n json.dump(data, ofp, ensure_ascii=False, indent=4)\n\n return True\n\n @staticmethod\n def compose(keys, contents, indexes):\n \"\"\" compose keys and contents context\n\n :keys: keys context\n :contents: contents context\n :returns: object if succeed\n\n \"\"\"\n kv_objs = [{keys[i][0]: content[i] for i in range(0, len(content))} for content in contents]\n\n return {\n \"data\": kv_objs,\n \"index\": indexes,\n }\n","repo_name":"VyronLee/XlsxConverter","sub_path":"xlsx_converter/dumper/json/json_dumper.py","file_name":"json_dumper.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38453336322","text":"n = int(input())\nlimit = [0] + list(map(int,input().split()))\nm = int(input())\nboxes = list(map(int,input().split()))\n\nlimit.sort()\nboxes.sort()\nif limit[-1] < boxes[-1]:\n print(-1)\n exit()\n\npickups = [0]*m\ncranes = 1\nidx = m-1\nmaxT = 1\n\nwhile idx >= 0:\n t = 0\n\n for _ in range(maxT):\n if idx < 0:\n break\n t += 1\n pickups[idx] = t\n idx -= 1\n\n while idx >= 0 and limit[-cranes-1] < boxes[idx]:\n t += 1\n for _ in range(cranes):\n if idx < 0:\n break\n pickups[idx] = t\n idx -= 1\n\n cranes += 1\n maxT = max(maxT,t)\n\nprint(max(pickups))","repo_name":"LightPotato99/baekjoon","sub_path":"greedy/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35392780682","text":"import serial\nimport time\n\nser = serial.Serial('/dev/rfcomm0', 115200)\nif ser.isOpen == False:\n ser.open()\n\nwhile(1):\n for count in range(0,51):\n #time.sleep(0.02)\n dist = int(count)\n data = bytes(str(chr(dist)), 'ascii')\n #print(\"data is\" + str(data))\n ser.write(data)\n\nwhile(1):\n size = ser.inWaiting()\n if size != 0:\n print(\"here\")\n response = ser.read(1)\n print(response)","repo_name":"wangshengwalter/CPEN_391","sub_path":"RPI/DE1Communication/bluetooth.py","file_name":"bluetooth.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39719449312","text":"import datetime\n\nfrom rest_framework import serializers\n\nfrom .models import Account, Transaction\n\n\nclass AccountSerializer(serializers.ModelSerializer):\n class Meta:\n model = Account\n fields = '__all__'\n\n\nclass TransactionSerializer(serializers.ModelSerializer):\n crypto = serializers.ReadOnlyField(source='crypto.name')\n price = serializers.ReadOnlyField(source='price.value')\n\n class Meta:\n model = Transaction\n fields = ('crypto', 'id', 'date', 'price', 'amount', 'value')\n\n\nclass TransactionCreateSerializer(serializers.ModelSerializer):\n price = serializers.ReadOnlyField(source='price.value')\n crypto = serializers.CharField(max_length=255)\n\n class Meta:\n model = Transaction\n fields = '__all__'\n read_only_fields = (\"price\", \"account\", \"owner\", \"value\")\n\n def create(self, validated_data):\n crypto = validated_data.pop('crypto')\n amount = validated_data.pop('amount')\n user = self.context['request'].user\n transaction = Transaction.objects.create_transaction(crypto, user, amount)\n return transaction\n\n\nclass AccountInfoSerializer(serializers.ModelSerializer):\n pastData = serializers.SerializerMethodField('get_pastData')\n transactions = serializers.SerializerMethodField('get_transactions')\n\n def get_pastData(self, account):\n today = datetime.date.today()\n balance = account.balance\n days = self.context.get('days')\n if not days:\n days = 14\n else:\n days = int(days)\n two_weeks_ago_balances = []\n for i in range(days):\n day = today - datetime.timedelta(days=i)\n daily_transactions = account.transactions.filter(date__date=day).order_by('-date')\n for transaction in daily_transactions:\n serializer = TransactionSerializer(transaction)\n two_weeks_ago_balances.append({\n 'balance': balance,\n 'date': serializer.data['date'],\n 'transactionId': serializer.data['id']\n })\n balance -= transaction.value\n return two_weeks_ago_balances\n\n def get_transactions(self, account):\n today = datetime.date.today()\n days = self.context.get('days')\n if not days:\n days = 14\n else:\n days = int(days)\n day = today - datetime.timedelta(days=days)\n transactions = account.transactions.filter(date__gte=day).order_by('-date')\n serializer = TransactionSerializer(transactions, many=True)\n return serializer.data\n\n class Meta:\n model = Account\n fields = ['balance', 'pastData', 'transactions']\n\n\nclass HomeAccountInfoSerializer(serializers.ModelSerializer):\n pastData = serializers.SerializerMethodField('get_pastData')\n\n def get_pastData(self, account):\n today = datetime.date.today()\n balance = account.balance\n\n two_weeks_ago_balances = []\n for i in range(14):\n day = today - datetime.timedelta(days=i)\n daily_transactions = account.transactions.filter(date__date=day).order_by('-date')\n two_weeks_ago_balances.append({\n 'balance': balance,\n 'date': day\n })\n for transaction in daily_transactions:\n balance -= transaction.value\n return two_weeks_ago_balances\n\n class Meta:\n model = Account\n fields = ['balance', 'pastData']\n","repo_name":"Dominik2122/crypto-trader","sub_path":"api/account/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74518550268","text":"#!/usr/bin/python\nimport datetime\n\nFILE = '../DATA/presidents.txt'\n\ndef main():\n info = []\n with open(FILE) as presidents_in:\n for line in presidents_in:\n (\n term, lname, fname, bdate, ddate, bplace, bstate, tsdate, tedate,\n party\n ) = line[:-1].split(':')\n \n name = '{} {}'.format(fname, lname)\n \n birth_date = make_date(bdate)\n \n info.append((name, birth_date, party))\n \n for name, date, party in sorted(info, key=lambda e: e[1]):\n print(\"{:35s} {} {}\".format(name, date, party))\n\ndef make_date(date_str):\n year, month, day = date_str.split('-')\n date = datetime.date(int(year), int(month), int(day))\n return date\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jstrickler/20230710USGS","sub_path":"ANSWERS/pres_by_dob.py","file_name":"pres_by_dob.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"32149595697","text":"import sys\ninput = sys.stdin.readline\n\nN, H = map(int, input().split())\n\nup = [0]*(H+1) # 종유석\ndown = [0]*(H+1) # 석순\n\nfor i in range(N):\n if i % 2 == 0:\n down[int(input())] += 1 # 입력한 높이에서 파괴할 수 있는 석순 갯수\n else:\n up[int(input())] += 1 # 입력한 높이에서 파괴할 수 있는 종유석 갯수\n\nfor i in range(H-1,0,-1):\n down[i] += down[i+1] # 석순 파괴 갯수 누적합 (높이5에서 파괴한 석순은 높이4에서도 파괴할 수 있음)\n up[i] += up[i+1] # 종류석 파괴 갯수 누적합 (높이5에서 파괴한 종유석은 높이4에서도 파괴할 수 있음)\n\n\nminCnt = N\nrangeCnt = 0\nfor i in range(1,H+1):\n if down[i] + up[H-i+1] < minCnt:\n minCnt = down[i] + up[H-i+1]\n rangeCnt = 1\n elif down[i] + up[H-i+1] == minCnt:\n rangeCnt += 1\n\nprint(minCnt, rangeCnt)\n\n\n# 참고 링크 : https://hongcoding.tistory.com/6\n","repo_name":"HS980924/Algorithm","sub_path":"src/10.이분탐색/B#3020_개똥벌레.py","file_name":"B#3020_개똥벌레.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"35800501406","text":"import unittest\nimport numpy as np\n\nfrom task import result\n\n\ndata = np.genfromtxt('data.csv', delimiter=',', dtype=np.int64)\nmaxima = np.argmax(data, axis=1)\nmaxima = np.expand_dims(maxima, axis=1)\ntest_result = np.take_along_axis(data, maxima, axis=1)\n\n\nclass TestCase(unittest.TestCase):\n def test_array(self):\n np.testing.assert_array_equal(result, test_result, err_msg='Your result does not match the expected.')\n\n def test_array_shape(self):\n self.assertEqual(test_result.shape, result.shape, msg='Shape of the array result should be (100, 1).')\n","repo_name":"jetbrains-academy/Python-Libraries-NumPy","sub_path":"NumPy/Compare Search/Find maximum/tests/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21651485962","text":"import RPi.GPIO as GPIO\r\nimport time\r\nimport mysql.connector\r\n#from connection import connection\r\n#import dht11\r\n\r\n#import pymysql\r\n\r\nGPIO.setwarnings(False)\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.cleanup()\r\n\r\n\r\nGPIO.setup(23, GPIO.OUT)\r\nGPIO.setup(24, GPIO.IN)\r\n\r\nfor i in range(5):\r\n GPIO.output(23, GPIO.HIGH)\r\n time.sleep(0.5)\r\n GPIO.output(23, GPIO.LOW)\r\n time.sleep(0.5)\r\n\r\n #Endlosschleife\r\nwhile True:\r\n if GPIO.input(24) == 0:\r\n # Ausschalten\r\n GPIO.output(23, GPIO.LOW)\r\n else:\r\n #Einschalten\r\n GPIO.output(23, GPIO.HIGH)\r\n connection = mysql.connector.connect(host='localhost',\r\n database='hallo',\r\n user='username',\r\n password= 'root')\r\n counter=1\r\n counter+=1\r\n counter= counter+1\r\n \r\n mycursor = connection.cursor()\r\n sql = \"INSERT INTO neu (Name,Wert) VALUES (%s, %s)\"\r\n val = (\"Taster\",counter)\r\n mycursor.execute(sql,val)\r\n\r\n connection.commit()\r\n\r\n print(mycursor.rowcount, \"datenbank ausgeführt.\")\r\n\r\n","repo_name":"marko0161/BT-RPi4-Button-MySQL-Database","sub_path":"MySQLDatabase.py","file_name":"MySQLDatabase.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20930937250","text":"'''for par in range(2,51,2):\r\n print(par, end=' - ')\r\n par += 2'''\r\n\r\n'''num = int(input('Digite um numero: '))\r\nfor i in range(1,11):\r\n result = i*num\r\n print('{} x {:2} = {:2} '.format(num,i,result))'''\r\n\r\n'''soma =0\r\ncont = 0\r\nfor i in range(0,6):\r\n num = int(input('Digite um numero: '))\r\n if num % 2 == 0:\r\n soma += num\r\n cont += 1\r\nprint('Voce digitou {} pares e a soma e de {}'.format(cont,soma))'''\r\n\r\n\r\n'''from time import sleep\r\nn1 =int(input('Digite termo:'))\r\nn2 =int(input('Razão:'))\r\nx = n1\r\nfor i in range(0,10):\r\n print(x,end='-')\r\n x += n2\r\n sleep(0.1)\r\nprint('ABCABOU')'''\r\n\r\nfrase = input('Digite uma frase: ').strip().lower()\r\npalavra = frase.split()\r\njunto = ''.join(palavra)\r\ninverso = junto[::-1]\r\nprint(junto)\r\n'''for letra in range(len(junto) -1, -1,-1):\r\n inverso += junto[letra]'''\r\nprint(inverso)\r\nif inverso ==junto:\r\n print('Temos um palindromo')\r\nelse:\r\n print('Não é um palindromo ')\r\n\r\n","repo_name":"ArthurBomfimNeto/exercicios_python-","sub_path":"exe47.py","file_name":"exe47.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"35651790135","text":"import numpy as np\nfrom rdkit import Chem\nimport copy\nimport sys\nimport os\nparent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\nsys.path.append(parent_dir)\nfrom gram_utils import *\nfrom smiles_utils import canonicalize\n\ndef mutation(gene):\n idx = np.random.choice(len(gene))\n gene_mutant = copy.deepcopy(gene)\n gene_mutant[idx] = np.random.randint(0, 256)\n return gene_mutant\n\ndef simulated_annealing(ini_smi, reward, n_iters, temp, beta):\n init = np.ones(X.shape[1])\n best = init\n best_logp = logp_calc(best)\n curr, curr_eval = best, best_logp \n route = []\n for i in range(n_iters):\n molecule_new = mutation(curr,0.8)\n molecule_new_c = molecule_new.copy()\n logp_new = calculator(molecule_new)\n route.append([molecule_new_c,logp_new])\n if logp_new > best_logp:\n best, best_logp = molecule_new, logp_new\n print('n_iter:',i, 'best_logp: ', best_logp)\n diff = logp_new - curr_eval\n t = temp / float(i + 1)\n metropolis = np.exp(diff / t)\n if diff > 0 or rand() < metropolis:\n curr, curr_eval = molecule_new, logp_new \n curr_c = curr.copy()\n return [best, best_logp],route","repo_name":"onecoinbuybus/ChemGE-Extended","sub_path":"opt/SA.py","file_name":"SA.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36317573092","text":"from django.shortcuts import render\r\n\r\nfrom django.http import HttpResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom rest_framework.renderers import JSONRenderer\r\nfrom rest_framework.parsers import JSONParser\r\nfrom hola.models import Persona\r\nfrom hola.serializadorPersona import SerializadorPersona\r\nfrom datetime import datetime\r\n\r\nclass RespuestaJSON(HttpResponse):\r\n\r\n def __init__(self, data, **kwargs):\r\n content = JSONRenderer().render(data)\r\n kwargs['content_type'] = 'application/json'\r\n super(RespuestaJSON, self).__init__(content, **kwargs)\r\n\t\t\r\n@csrf_exempt\r\ndef lista_personas(peticion):\r\n\tif peticion.method == 'GET':\r\n\t\tfecnac1 = datetime.strptime('17-04-2011', '%d-%m-%Y').date()\r\n\t\tpersona1 = Persona(nombre='Pedro', sexo='Hombre', fecha_nacimiento=fecnac1)\r\n\t\tpersona1.save()\r\n\t\tfecnac2 = datetime.strptime('24-06-2015', '%d-%m-%Y').date()\r\n\t\tpersona2 = Persona(nombre='Marta', sexo='Mujer', fecha_nacimiento=fecnac2)\r\n\t\tpersona2.save()\r\n\t\tlistaPersonas = Persona.objects.all()\r\n\t\tserializador = SerializadorPersona(listaPersonas, many=True)\r\n\t\treturn RespuestaJSON(serializador.data)\r\n\r\n\telif peticion.method == 'POST':\r\n\t\tdatos = JSONParser().parse(peticion)\r\n\t\tserializador = SerializadorPersona(data=datos)\r\n\t\tif serializador.is_valid():\r\n\t\t\tserializador.save()\r\n\t\t\treturn RespuestaJSON(serializador.data, status=201)\r\n\t\treturn RespuestaJSON(serializador.errors, status=400)","repo_name":"Mario1234/python-djangorestdemo","sub_path":"prueba_django/hola/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14248474151","text":"import numpy as np\nimport scipy.stats as st\n\nwith open(\"twelveEightThousandRunning.txt\") as f:\n lines = f.read().split(\"\\n\")\n data_arrays = {}\n for line in lines:\n s = line.split(\"\\t\")\n if len(s) == 3 and \"ok\" not in line and \"FAIL\" not in line: # ignore meta info\n exp_name = s[0].strip()\n time = s[2]\n time = time.replace(\"ns/op\", \"\")\n time = float(time.strip()) # want milliseconds, not nanoseconds\n if exp_name in data_arrays.keys():\n data_arrays[exp_name].append(time)\n else:\n data_arrays[exp_name] = [time]\n\nfor exp in data_arrays.keys():\n data = np.array(data_arrays[exp])\n avg = \"{:.2f}\".format(np.average(data)/1000000)\n median = \"{:.2f}\".format(np.median(data)/1000000)\n ci = \"{:.2f}\".format((2*np.std(data)/pow(len(data), 0.5))/1000000)\n ci_div_avg = ((2*np.std(data)/pow(len(data), 0.5))/1000000)/(np.average(data)/1000000)\n\n print(exp, \"\\n\")\n print(\"\\taverage: \", avg, \"\\tmedian: \", median, \"\\tconfidence interval\", ci, \"\\t ci/avg\", ci_div_avg, \"\\n\")\n #print(exp, \"\\t\\t\", avg, \"\\t\", median, \"\\t\", stdev, \"%\\n\")\n","repo_name":"juyaojia/microservices_env","sub_path":"benchmark_trace_retrieval/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"16319873676","text":"\"\"\"SQLite: usando o módulo sqlite3\"\"\"\n\n# Conexão persistente(salva os dados)\nimport sqlite3\n\nconnection = sqlite3.connect('database.db') # cria a base de dados\ncursor = connection.cursor()\n\n\ncursor.execute('CREATE TABLE IF NOT EXISTS clients('\n 'id INTEGER PRIMARY KEY AUTOINCREMENT,'\n 'name TEXT,'\n 'weight REAL'\n ')')\n\n\"\"\"\n#Insere os valores na base de dados\ncursor.execute(\n 'INSERT INTO clients(name, weight) VALUES(\"Igor Freitas\", 70.3)')\n\nPrevine o SQL Injection usando uma tuple\ncursor.execute(\n 'INSERT INTO clients(name, weight) VALUES(?,?)', ('Michele Freitas', 50))\n\n# Usando dicionario\ncursor.execute(\n 'INSERT INTO clients(name, weight) VALUES(:name,:weight)',\n {'name': 'Noah Freitas', 'weight': 25}\n)\n\ncursor.execute(\n 'INSERT INTO clients VALUES(:id,:name,:weight)',\n {'id': None, 'name': 'Alice Freitas', 'weight': 60}\n)\nconnection.commit() # Executa a linha na base de dados\"\"\"\n\n# # Atualiza um dado a partir do id\n# cursor.execute('UPDATE clients SET name = name WHERE id =id',\n# {'name': 'Igor Freitas', 'id': 2}\n# )\n# connection.commit()\n\n# Escolhe um id para ser apagado\n# cursor.execute('DELETE FROM clients WHERE id = id',\n# {'id': 2}\n# )\n# connection.commit()\n\ncursor.execute(\n 'SELECT name,weight FROM clients WHERE weight > :weight',\n {'weight': 50}\n)\n\n# Exibi as informação da tabela client\ncursor.execute('SELECT * FROM clients')\n\n# cursor.fetchall() # Busca os valores que estão na tabela\nfor line in cursor.fetchall():\n identifier, name, weight = line\n\n print(identifier, name, weight)\n\ncursor.close()\nconnection.close()\n","repo_name":"igorfreits/Studies-Python","sub_path":"Udemy/6-Base-de-dados/aula 103 - SQLite.py","file_name":"aula 103 - SQLite.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"35728328585","text":"hrsInput = input(\"Enter Hours: \")\nhours = float(hrsInput)\n\nrateInput = input(\"Enter Rate: \")\nrate = float(rateInput)\n\nbasePay = (hours * rate)\n\nif hours <= 40:\n print(basePay)\n\nif hours > 40:\n otHours = hours - 40\n otRate = float(rate * 1.5)\n otPay = otHours * otRate\n totalPay = (40 * rate) + otPay\n print(totalPay)\n","repo_name":"kasandradarwin/PythonPractice","sub_path":"PayCalc2.py","file_name":"PayCalc2.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72530992509","text":"# Element Search\n\nfrom math import ceil, floor\n\n\ndef main():\n a_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n a_num = 2\n print(a_list)\n print(f'key: {a_num}')\n sort_list = sorted(a_list)\n\n if binarySearch(sort_list, a_num):\n print('the number is within the list')\n else:\n print('the number is NOT within the list') \n\ndef elementSearch(get_list, get_num):\n # input: takes an ordered list of numbers (ascending order), and another number\n # return: Bool -> whether the number is within the list or not \n\n for i in range(len(get_list)):\n if get_list[i] == get_num:\n return True\n return False\n\ndef binarySearch(get_list, get_num):\n # input: takes an ordered list of numbers (ascending order), and another number\n # return: Bool -> whether the number is within the list or not \n temp_list = get_list.copy()\n while True:\n mid = floor(len(temp_list) / 2)\n if get_num > temp_list[len(temp_list) - 1] or get_num < temp_list[0]:\n return False \n elif get_num == temp_list[mid]:\n return True \n elif get_num > temp_list[mid]:\n temp_list = temp_list[mid: len(temp_list)]\n print(temp_list, mid)\n elif get_num < temp_list[mid]:\n temp_list = temp_list[0: mid]\n print(temp_list, mid)\n else:\n return False\n \n\nif __name__ == '__main__':\n main()","repo_name":"itiro-y/python-repo","sub_path":"PracticePythonDotOrg/Ex20_Element_Search/ex20.py","file_name":"ex20.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2805243805","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator\n\nfile_pyserial=open('data_from_uart/data_pyserial.txt',mode='r',encoding='utf8')\nfile_custom=open('data_from_uart/data_custom_uart.txt',mode='r',encoding='utf8')\npyserial_data=[]\ncustom_data=[]\nlen_str=30\n\n\nwhile True:\n line_pyserial=file_pyserial.readline()\n line_custom=file_custom.readline()\n if not line_pyserial or not line_custom:\n break\n\n pyserial_data.append(round(float(line_pyserial),3))\n custom_data.append(round(float(line_custom),3))\n\ntimes_custom=np.array(custom_data)\ntimes_pyserial=np.array(pyserial_data)\nspeed_custom=np.array(1/(times_custom/len_str/8),dtype='int')\nspeed_pyserial=np.array(1/(times_pyserial/len_str/8),dtype='int')\n\n\nfile_pyserial.close()\nfile_custom.close()\n\nprint(speed_custom)\nprint(speed_pyserial)\n\nfig=plt.figure(figsize=(11,8))\nax1=fig.add_subplot(2,2,1)\nax2=fig.add_subplot(2,2,2)\nax3=fig.add_subplot(2,2,3)\nax4=fig.add_subplot(2,2,4)\n\n\n\nax1.plot(times_custom,label='custom')\nax2.plot(times_pyserial,label='pyserial')\nax3.plot(speed_custom,label='custom')\nax4.plot(speed_pyserial,label='pyserial')\n\nax1.legend()\nax2.legend()\nax3.legend()\nax4.legend()\n\nfig.suptitle('data transmission statistics for a 15 byte string')\n\nax1.set(xlim=(0,15),ylim=(0,0.05))\nax1.xaxis.set_major_locator(MultipleLocator(base=1))\nax1.set_xlabel('number of iteration')\nax1.set_ylabel('time in sec')\nax1.grid()\n\nax2.set(xlim=(0,15),ylim=(1.5,3))\nax2.xaxis.set_major_locator(MultipleLocator(base=1))\nax2.set_xlabel('number of iteration')\nax2.set_ylabel('time in sec')\nax2.grid()\n\nax3.set(xlim=(0,15),ylim=(0,17000))\nax3.xaxis.set_major_locator(MultipleLocator(base=1))\nax3.set_xlabel('number of iteration')\nax3.set_ylabel('speed in bits/second')\nax3.grid()\n\nax4.set(xlim=(0,15),ylim=(0,150))\nax4.xaxis.set_major_locator(MultipleLocator(base=1))\nax4.set_xlabel('number of iteration')\nax4.set_ylabel('speed in bits/second')\nax4.grid()\n\n\nplt.show()","repo_name":"slavic67/research_yolo","sub_path":"plot_uart.py","file_name":"plot_uart.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21088216166","text":"from __future__ import print_function\nfrom os.path import join\nimport yaml\n\nfrom action_execution.config_keys import PathConfig, ModelConfigKeys\n\nclass ExecutionModel(object):\n '''A description of an action execution model.\n\n Author -- Alex Mitrevski\n Email -- aleksandar.mitrevski@h-brs.de\n\n '''\n def __init__(self, model_name=None):\n self.config_path = join(PathConfig.DIR_ABS_PATH,\n PathConfig.MODEL_CONFIG_PATH)\n\n self.id = model_name\n self.inputs = dict()\n self.outputs = dict()\n self.params = dict()\n\n if self.id is not None:\n self.load_config(self.id)\n\n def load_config(self, model_name):\n '''Loads the config file of the model specified by 'model_name'\n\n Keyword arguments:\n model_name -- id of a model\n\n '''\n config_path = join(self.config_path, model_name + '.yaml')\n\n try:\n file_handle = open(config_path, 'r')\n config = yaml.load(file_handle)\n file_handle.close()\n\n self.id = model_name\n if ModelConfigKeys.INPUTS in config:\n for opt_in in config[ModelConfigKeys.INPUTS]:\n input_config = opt_in[ModelConfigKeys.INPUT]\n input_name = input_config[ModelConfigKeys.INPUT_NAME]\n input_type = input_config[ModelConfigKeys.INPUT_TYPE]\n self.inputs[input_name] = input_type\n if ModelConfigKeys.OUTPUTS in config:\n for opt_out in config[ModelConfigKeys.OUTPUTS]:\n output_config = opt_out[ModelConfigKeys.OUTPUT]\n output_name = output_config[ModelConfigKeys.OUTPUT_NAME]\n output_type = output_config[ModelConfigKeys.OUTPUT_TYPE]\n self.outputs[output_name] = output_type\n if ModelConfigKeys.PARAMS in config:\n for opt_params in config[ModelConfigKeys.PARAMS]:\n param_config = opt_params[ModelConfigKeys.PARAM]\n param_name = param_config[ModelConfigKeys.PARAM_NAME]\n param_type = param_config[ModelConfigKeys.PARAM_TYPE]\n self.params[param_name] = param_type\n\n print('Description of \"{0}\" loaded successfully'.format(self.id))\n except IOError:\n print('Cannot load config of unknown optimiser \"{0}\"'.format(self.id))\n\n def verify_input(self, argument_dict):\n '''Checks whether the input in kwargs corresponds to the\n input description specified in the config. The input is considered\n invalid if it contains:\n * an unexpected key or\n * an object of an incorrect type for a given key (including\n a list of incorrect objects)\n\n Returns:\n valid_input -- a Boolean specifing whether the input is valid\n message -- a string with error messages about the input;\n empty string if the input is valid\n\n '''\n valid_input = True\n message = ''\n for key, value in argument_dict.items():\n # the input is invalid if a list of a given type is expected\n # for a given key, but not passed, or if an object of an incorrect\n # type is passed\n if key in self.inputs:\n list_expected = self.inputs[key].find('[]') != -1\n if list_expected:\n valid_list, new_message = self.__verify_list_input(key, value)\n if valid_input:\n valid_input = valid_list\n message += new_message\n else:\n valid_obj, new_message = self.__verify_object_input(key, value)\n if valid_input:\n valid_input = valid_obj\n message += new_message\n return valid_input, message\n\n def __verify_object_input(self, key, value):\n '''Checks whether 'value' has the expected object type for 'key' as\n specified in the model description.\n\n valid_input -- a Boolean specifing whether the input is valid\n message -- an error message about the input;\n empty string if the input is valid\n\n '''\n valid_input = True\n message = ''\n exp_input_type = self.inputs[key]\n content_type = type(value).__name__\n if content_type != exp_input_type:\n message = 'Expected input \"' + exp_input_type \\\n + '\" for key \"' + key + '\"'\n valid_input = False\n return valid_input, message\n\n def __verify_list_input(self, key, value):\n '''Checks whether 'value' is of type list and whether the list's\n objects are of the expected type for 'key' as specified\n in the model description.\n\n valid_input -- a Boolean specifing whether the input is valid\n message -- an error message about the input;\n empty string if the input is valid\n\n '''\n valid_input = True\n message = ''\n if type(value).__name__ != 'list':\n message = 'Expected input \"list\" for key \"' + key + '\"'\n valid_input = False\n else:\n bracket_idx = self.inputs[key].find('[]')\n exp_input_type = self.inputs[key][0:bracket_idx]\n if value:\n list_content_type = type(value[0]).__name__\n if list_content_type != exp_input_type:\n message = 'Expected list of \"' + exp_input_type \\\n + '\" for key \"' + key + '\"'\n valid_input = False\n return valid_input, message\n\n def print_config(self):\n '''Prints the values of the model's config fields\n '''\n print('model_id = {0}'.format(self.id))\n print('Inputs:')\n for key, value in self.inputs.items():\n print(' {0}: {1}'.format(key, value))\n print('Outputs:')\n for key, value in self.outputs.items():\n print(' {0}: {1}'.format(key, value))\n","repo_name":"alex-mitrevski/action-execution","sub_path":"action_execution/execution_model.py","file_name":"execution_model.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12819586498","text":"from django.db import models\n\nclass Year(models.Model):\n year = models.IntegerField(primary_key = True)\n description = models.CharField(max_length=1500, null=True)\n '''\n {\"year\":2020,\"description\":\"ok\"} \n '''\n\nclass User(models.Model):\n index = models.IntegerField(primary_key = True)\n name = models.CharField(max_length=255, null=True)\n surname = models.CharField(max_length=255, null=True)\n email = models.EmailField(null=True)\n created = models.DateTimeField(auto_now_add=True, null=True)\n years = models.ManyToManyField(Year, related_name=\"members\",null=True,blank=True)\n \n# {\"index\":150000,\"name\":\"Tymoteusz\",\"surname\":\"Puchacz\",\"email\":\"mailmail@gmail.com\"}\n\nclass Member(models.Model):\n year = models.ForeignKey(Year, on_delete=models.CASCADE)\n userid = models.ForeignKey(User, on_delete=models.CASCADE)\n created = models.DateTimeField(auto_now_add=True)\n\n '''\n {\"name\":\"Tymoteusz\",\"surname\":\"Puchacz\"}\n ''' \n\nclass Contributions(models.Model):\n member = models.ForeignKey(User, on_delete=models.CASCADE)\n year = models.ForeignKey(Year, on_delete=models.CASCADE)\n cost = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n month = models.CharField(max_length=255, null=True)\n date = models.DateField(auto_now_add=True, null=True)\n\n'''\n{\n\"cost\":40,\n\"month\":\"Styczeń\",\n\"date\":\"2022-10-25\"\n}\n'''\n\nclass Expenditure(models.Model):\n year = models.ForeignKey(Year,on_delete=models.CASCADE)\n cost = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n description = models.CharField(max_length=1500, null=True)\n date = models.DateField(null=True)\n created = models.DateTimeField(auto_now_add=True, null=True)\n\n'''\n{\n\"cost\":40,\n\"description\":\"zakup koszulek\",\n\"date\":\"2006-10-25\"\n}\n'''\n\nclass Income(models.Model):\n year = models.ForeignKey(Year,on_delete=models.CASCADE)\n cost = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n description = models.CharField(max_length=1500, null=True)\n date = models.DateField(null=True)\n created = models.DateTimeField(auto_now_add=True, null=True)\n\n'''\n{\n\"cost\":40,\n\"description\":\"sprzedaż koszulek\",\n\"date\":\"2006-10-25\"\n}\n'''\n\n","repo_name":"Zielony20/RestApi","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17007553465","text":"string = input()\n\nstring = string.upper()\narr = [0 for i in range(26)]\nfor i in range(len(string)):\n arr[ord(string[i]) - ord('A')] += 1\n\nmaxNumber = max(arr)\nmaxIndex = arr.index(maxNumber)\narr.pop(maxIndex)\ntry:\n arr.index(maxNumber)\n print('?')\nexcept:\n print(chr(maxIndex + ord('A'))) \n","repo_name":"masonHong/INU-Study","sub_path":"Backjoon/HTJ/문자열 사용하기/1157 단어 공부.py","file_name":"1157 단어 공부.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12702350987","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='dnsproxy',\n version='0.0.1',\n description='Python DNS Proxy Framework',\n long_description=readme,\n author='Bitsec AB',\n author_email='info@bitsec.se',\n url='https://github.com/bitsec/dnsproxy',\n license=license,\n packages=find_packages(exclude=('tests', 'docs'))\n)\n","repo_name":"Bitsec/dnsproxy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"18762840224","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8\n\nfrom console import Console\nfrom update import Update\nfrom botnet import Botnet\nfrom player import Player\nfrom mails import Mails\nimport time\nimport json\nimport config\nimport ddos\nimport logging\nlogger = logging.getLogger(__name__)\nFORMAT = '%(asctime)s [%(threadName)10s][%(module)10s][%(levelname)8s] %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\n\n\nclass run:\n def __init__(self):\n \"\"\"\n Pull all variables from config.py file.\n \"\"\"\n\n self.player = Player()\n self.database = config.database\n self.Max_point_tournament = config.Max_point_tournament\n self.BotNet_update = config.BotNet_update\n self.joinTournament = config.joinTournament\n self.tournament_potator = config.tournament_potator\n self.booster = config.booster\n self.Use_netcoins = config.Use_netcoins\n self.attacks_normal = config.attacks_normal\n self.updates = config.updates\n self.updatecount = config.updatecount\n self.maxanti_normal = config.maxanti_normal\n self.active_cluster_protection = config.active_cluster_protection\n self.mode = config.mode\n self.number_task = config.number_task\n self.min_energy_botnet = config.minimal_energy_botnet_upgrade\n self.stat = \"0\"\n self.wait_load = config.wait_load\n self.c = Console(self.player)\n self.u = Update(self.player)\n # disable botnet for > api v13\n self.b = Botnet(self.player)\n self.ddos = ddos.Ddos(self.player)\n self.m = Mails(self.player)\n self.init()\n\n def init(self):\n while True:\n # update the player\n time.sleep(self.wait_load)\n stat = \"0\"\n # prepare account\n if self.number_task:\n self.get_max_update = int(self.number_task)\n else:\n self.get_max_update = int(self.u.infoUpdate(\"ram\", \"new\")) - 1\n self.running_all = self.u.runningtasks()\n logger.info(\"you are running {}/{} tasks\".format(self.running_all, self.get_max_update))\n\n if int(self.running_all) < int(self.get_max_update):\n while \"0\" in stat or \"3\" in stat:\n if int(self.u.runningtasks()) < int(self.u.infoUpdate(\"ram\", \"new\")) - 1 or int(self.u.runningtasks()) < int(self.get_max_update):\n try:\n moneyforupdate = int(self.u.infoUpdate(self.updates[self.updatecount]))\n except IndexError:\n logger.info(\"reset\")\n self.updatecount = 0\n moneyforupdate = int(self.u.infoUpdate(self.updates[self.updatecount]))\n stat = \"1\"\n mymoney = int(json.loads(self.c.myinfo())[\"money\"])\n\n if mymoney < moneyforupdate:\n self.updatecount += 1\n\n try:\n logger.info(\"require {}$ for update {} your money {}$\".format(moneyforupdate, self.updates[self.updatecount], mymoney))\n except IndexError:\n stat = \"1\"\n\n totaltask = int(self.u.runningtasks()) + int(self.updatecount)\n if int(totaltask) == int(self.get_max_update):\n stat = \"1\"\n else:\n (stat, levelupdates) = self.u.startTask(self.updates[self.updatecount])\n if \"3\" in stat or \"0\" in stat:\n logger.info(\"updating {} level {}\".format(self.updates[self.updatecount], int(levelupdates)+1))\n # print \"Started Update\n logger.info(\"Waiting... in update\")\n # u.useBooster()\n self.updatecount += 1\n totaltask = int(self.u.runningtasks()) + int(self.updatecount)\n if int(totaltask) == int(self.get_max_update):\n stat = \"1\"\n else:\n break\n\n # recheck running ask for boost and netcoins\n self.running_all = self.u.runningtasks()\n\n self.ddos.run_ddos()\n if self.BotNet_update:\n botnet = json.loads(self.b._botnetInfo())\n if int(botnet['count']) > 0 and int(botnet['energy']) >= self.min_energy_botnet:\n for count, i in enumerate(botnet['data']):\n self.b.upgradebotnet(i['hostname'], int(i['running']), count)\n else:\n if int(botnet['count']) == 0:\n logger.info(\"You are not botnet\")\n\n if int(botnet['energy']) == 0:\n logger.info(\"You are not energy for update botnet\")\n else:\n logger.info(\"Your botnet energy (\"+ str(botnet['energy']) +\") < \" + str(self.min_energy_botnet) + \" Please wait for regeneration...\")\n\n # attack botnet\n #number_botnet = json.loads(self.b._botnetInfo())\n #if int(number_botnet['count']) > 0:\n # self.b.attack()\n\n if self.joinTournament and self.c.getTournament():\n self.mode = \"Potator\"\n logger.info(\"** Force Mode to 'Potator' for Tournament **\")\n # task = self.u.doTasks(self.wait_load)\n if self.booster and self.running_all > 1:\n try:\n vtasks = self.u.getrunningtasks()\n json_data = json.loads(vtasks)\n while len(json_data[\"data\"]) > 1:\n if int(json_data[\"boost\"]) > 5:\n json_data = json.loads(self.u.useBooster())\n logger.info(\"Using booster on rest {}\".format(json_data[\"boost\"]))\n if int(json_data['fAllCosts']) < 50:\n break\n # UPDATE Value\n else:\n logger.info(\"you have < 5 boost.\")\n break\n except Exception as e:\n logger.error(\"Connection Error try again...{0}\".format(e))\n pass\n if self.Use_netcoins:\n time.sleep(2)\n if self.player.netcoins > 1 and self.running_all > 1:\n self.u.finishAll()\n self.player.refreshinfo() # update player info\n logger.info(\"I used Netcoins for finish all task.\")\n \n if self.player.email > 0:\n time.sleep(self.wait_load)\n logger.info('Reading mails...')\n self.m.read_mails()\n\n # attack players\n self.c.attack(self)\n\n # reinitialise your profil money, email ...\n run.__init__(self)\n\nif __name__ == \"__main__\":\n r = run()\n","repo_name":"OlympicCode/vHackXTBot-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"6"} +{"seq_id":"24515561305","text":"# edgarapp/views.py\r\n\r\nimport itertools\r\nfrom datetime import datetime\r\nfrom django.contrib import messages\r\n\r\nimport requests\r\nimport textdistance\r\nfrom bs4 import BeautifulSoup\r\nfrom django.conf import settings\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import (authenticate, login, logout,\r\n update_session_auth_hash)\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.forms import PasswordChangeForm\r\nfrom django.core.mail import BadHeaderError, send_mail\r\nfrom django.db.models import Q\r\n# For contact View\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\n# 404 error page\r\nfrom django.shortcuts import redirect, render\r\nfrom django.template import RequestContext\r\nfrom django.templatetags.static import static\r\nfrom django.utils.translation import ugettext as _\r\nfrom django.views.decorators.gzip import gzip_page\r\nfrom django.views.generic import ListView, TemplateView\r\n\r\nfrom .forms import ContactForm, UsersLoginForm, UsersRegisterForm\r\nfrom .models import Company, Directors, Executives, Filing, Funds, Proxies\r\nfrom .utils import TOCAlternativeExtractor, Printer\r\n\r\n\r\ndef handler404(request, *args, **argv):\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n response = render_to_response('404.html', {'extended_template': extended_template},\r\n context_instance=RequestContext(request))\r\n response.status_code = 404\r\n return response\r\n\r\n\r\ndef HomePageView(request):\r\n template_name = 'home.html'\r\n\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n return render(\r\n request, template_name,\r\n {'extended_template': extended_template}\r\n )\r\n\r\n\r\ndef SearchResultsView(request):\r\n # model = Company, Filing, Funds, Directors, Proxies, Executives\r\n # template_name = 'companyOverview.html'\r\n\r\n extended_template = 'base_company.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_company_member.html'\r\n\r\n query = request.GET.get('q')\r\n print(query)\r\n\r\n if not request.user.is_authenticated:\r\n # print(\"done\")\r\n if query != 'TSLA':\r\n messages.error(request, 'To search for other Tickers,')\r\n return render(\r\n request, 'home.html',\r\n {'extended_template': extended_template}\r\n )\r\n mycompany = Company.objects.get(ticker=query)\r\n\r\n filing = Filing.objects.filter(cik=mycompany.cik).order_by('-filingdate').latest('filingdate');\r\n\r\n return HttpResponseRedirect('/filing/?q=' + query + '&fid=' + str(filing.cik))\r\n\r\n # -------------no need to carry out the other searches as they are expensive-----------------------\"\r\n # filings = Filing.objects.filter(cik=mycompany.cik).order_by('-filingdate')\r\n # proxies = Proxies.objects.filter(cik=mycompany.cik).order_by('-filingdate')\r\n # name = mycompany.name\r\n # name = name.upper()\r\n # name = name.replace('INTERNATIONAL', 'INTL')\r\n # name = name.replace(' /DE', '')\r\n # name = name.replace('/DE', '')\r\n # name = name.replace('INC.', 'INC')\r\n # name = name.replace(',', '')\r\n\r\n # matches = []\r\n # exectable = []\r\n\r\n # funds = Funds.objects.raw(\r\n # 'SELECT * FROM edgarapp_funds WHERE company = %s ORDER BY share_prn_amount+0 DESC LIMIT 100', [name])\r\n #\r\n # directors = Directors.objects.filter(\r\n # company=mycompany.name).order_by('-director')\r\n #\r\n # allDirectors = Directors.objects.all()\r\n\r\n # executives = Executives.objects.filter(company=mycompany.name)\r\n # today = datetime.today()\r\n # currYear = today.year\r\n #\r\n # for year in executives:\r\n # if year.filingdate.split('-')[0] == str(currYear):\r\n # exectable.append(year)\r\n #\r\n # for person in directors:\r\n # if person:\r\n # personA = person.director.replace(\"Mr.\", '')\r\n # personA = person.director.replace(\"Dr.\", '')\r\n # personA = person.director.replace(\"Ms.\", '')\r\n # a = set([s for s in personA if s != \",\" and s != \".\" and s != \" \"])\r\n # aLast = personA.split(' ')[-1]\r\n # if (len(personA.split(' ')) == 1):\r\n # aLast = personA.split('.')[-1]\r\n # comps = []\r\n # for check in allDirectors:\r\n # if person:\r\n # personB = check.director.replace(\"Mr.\", '')\r\n # personB = check.director.replace(\"Dr.\", '')\r\n # personB = check.director.replace(\"Ms.\", '')\r\n # bLast = personB.split(' ')[-1]\r\n # if (len(personB.split(' ')) == 1):\r\n # bLast = personB.split('.')[-1]\r\n # # print(personA, aLast, person.company, personB, bLast, check.company)\r\n # if aLast == bLast:\r\n # # first check jaccard index to speed up algo, threshold of .65\r\n # b = set([s for s in personB if s !=\r\n # \",\" and s != \".\" and s != \" \"])\r\n # if (len(a.union(b)) != 0):\r\n # jaccard = float(\r\n # len(a.intersection(b)) / len(a.union(b)))\r\n # else:\r\n # jaccard = 1\r\n # # print(personA, personB, jaccard)\r\n # if (jaccard > 0.65):\r\n # # run Ratcliff-Obershel for further matching, threshold of .75 and prevent self-match\r\n # sequence = textdistance.ratcliff_obershelp(\r\n # personA, personB)\r\n # # print(sequence)\r\n # if sequence > 0.75 and mycompany.name != check.company:\r\n # comps.append(check.company)\r\n # if not comps:\r\n # comps.append('Director is not on the board of any other companies')\r\n # matches.append(comps)\r\n #\r\n # object_list = []\r\n # object_list.append(query)\r\n # object_list.append((mycompany.name, mycompany.ticker))\r\n # object_list.append(filings)\r\n # object_list.append(funds)\r\n # object_list.append(zip(directors, matches))\r\n # object_list.append(zip(exectable, matches))\r\n # object_list.append(itertools.zip_longest(proxies, filings, fillvalue='foo'))\r\n\r\n # object_list is (q, (companyname, ticker), (filings object))\r\n # if request.user.is_authenticated:\r\n # print(object_list)\r\n\r\n latest_filing = []\r\n # for file in filings:\r\n\r\n # filing = Filing.objects.filter(cik=mycompany.cik).order_by('-filingdate').first()\r\n # print(filing)\r\n # url ='E:/Workspace/mblazr/edgarapp/static'+'/'+ 'filings/' + filing.filingpath\r\n # toc_extractor = TOCExtractor()\r\n # with open(url) as file:\r\n #\r\n # filing_html = file.read()\r\n #\r\n # try:\r\n # extract_data = toc_extractor.extract(filing_html)\r\n # table_of_contents = extract_data.table\r\n # except:\r\n # table_of_contents = \"\"\r\n # 'filing_html': filing_html,'table_of_contents': table_of_contents\r\n\r\n # return render(\r\n # request, template_name,\r\n # {'object_list': object_list, 'extended_template': extended_template,\r\n # 'table_of_contents': table_of_contents,\r\n # 'filing_html': filing_html\r\n # }\r\n # )\r\n # else:\r\n # if query == 'HD':\r\n # return render(\r\n # request, template_name,\r\n # {'object_list': object_list, 'extended_template': extended_template}\r\n # )\r\n # else:\r\n # return render(request, 'about.html', {'extended_template': 'base.html'})\r\n\r\n\r\n\r\n\r\n@gzip_page\r\ndef SearchFilingView(request):\r\n template_name = 'companyFiling.html'\r\n extended_template = 'base_company.html'\r\n global filing_to_display,filings_list\r\n q_company = request.GET.get('q')\r\n q_filing = request.GET.get('fid')\r\n if q_company == '' or q_company == None:\r\n q_company='TSLA'\r\n\r\n if q_filing=='' or q_filing == None:\r\n q_filing='all'\r\n else:\r\n try:\r\n int(q_filing)\r\n except:\r\n q_filing='all'\r\n\r\n\r\n\r\n #Authentication here\r\n if not request.user.is_authenticated and q_company != 'TSLA':\r\n # redirect them to login\r\n return redirect('/accounts/login/?next=' + q_company)\r\n\r\n elif request.user.is_authenticated or (not request.user.is_authenticated and q_company == 'TSLA'):\r\n #Check query being searched\r\n company_search = Company.objects.filter(ticker=q_company)\r\n\r\n if len(company_search)>0:\r\n #Company is valid\r\n filings_for_company = Filing.objects.filter(cik=company_search[0].cik)\r\n\r\n if len(filings_for_company)>0:\r\n filings_list=[]\r\n\r\n #Prepare Filings List (to didplay on left side)\r\n for myfiling in filings_for_company:\r\n filings_list.append(myfiling.dict_values())\r\n #We have filings for that Company\r\n if q_filing == 'all':\r\n filing_to_display = filings_for_company[0]\r\n else:\r\n result_for_fid = Filing.objects.filter(cik=company_search[0].cik,id=q_filing)\r\n if len(result_for_fid)==1:\r\n filing_to_display =result_for_fid[0]\r\n else:\r\n #Output First Filing automaticaly\r\n filing_to_display = filings_for_company[0]\r\n\r\n\r\n #Now we have filings as well as complete company info\r\n company_cik = company_search[0].cik\r\n company_name = company_search[0].name\r\n company_ticker =company_search[0].ticker\r\n\r\n #Get directors,executives,funds\r\n funds = Funds.objects.filter(company=company_name)[:100]\r\n directors = Directors.objects.filter(company=company_name)\r\n executives = Executives.objects.filter(company=company_name)\r\n\r\n object_list=[]\r\n\r\n #Fetch file and prepare TOC\r\n #Check the Filing Data\r\n # all_parts = str(filing_to_display.filingpath).split('/')\r\n #\r\n # path_to_extract_toc =''\r\n # path_of_filing =''\r\n #\r\n # if(len(all_parts)==4): #filings/files/val/file.ht\r\n # path_to_extract_toc = str(filing_to_display.filingpath).split('/')[2]+'/'+ str(filing_to_display.filingpath).split('/')[-1]\r\n # path_of_filing = path_to_extract_toc\r\n # elif (len(all_parts)==2): #cikvalue/file.htm\r\n # path_to_extract_toc = str(filing_to_display.filingpath)\r\n # path_of_filing =path_to_extract_toc\r\n #\r\n # fetched_filing = readFiling(path_to_extract_toc)\r\n\r\n #print(fetched_filing)\r\n # #t_o_c = filing_to_display.table_of_contents.first()\r\n # #if not t_o_c :\r\n url = '/mnt/filings-static/capitalrap/edgarapp/static/filings/' + filing_to_display.filingpath\r\n\r\n #t_o_c = filing.table_of_contents.first()\r\n\r\n #if not t_o_c:\r\n toc_extractor = TOCAlternativeExtractor()\r\n\r\n extract_data = toc_extractor.extract(url)\r\n\r\n t_o_c = filing_to_display.table_of_contents.create(body=extract_data.table)\r\n\r\n return render(\r\n request, template_name, {\r\n 'object_list': object_list,\r\n 'company_filings': filings_list,\r\n 'company_ticker': company_ticker,\r\n 'directors': directors,\r\n 'executives': executives,\r\n 'company_name': company_name,\r\n 'current_filing': filing_to_display,\r\n 'funds': funds,\r\n 'extended_template': extended_template,\r\n 'table_of_contents': t_o_c.body, # prep, # t_o_c.body,#updatedtoc,\r\n 'fid': company_cik,\r\n #'filepath': path_of_filing\r\n\r\n })\r\n else:\r\n\r\n #No filing in Od Db as well\r\n return HttpResponse(status=404,content='

No filings for '+str(company_search[0].name)+ ' was found.Check back later',content_type='text/html')\r\n else:\r\n #Company could Not be found so redirect to home page\r\n\r\n return HttpResponseRedirect('/')\r\n\r\n\r\n@gzip_page\r\ndef SearchFilingView_old(request):\r\n template_name = 'companyFiling.html'\r\n\r\n extended_template = 'base_company.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_company_member.html'\r\n\r\n matches = []\r\n exectable = []\r\n\r\n # Check to ensure query value is not empty if empty we search for tesla\r\n if request.GET.get('q') != None or request.GET.get('q') != '':\r\n query = request.GET.get('q')\r\n else:\r\n query = 'TSLA'\r\n\r\n # user is not logged in and\r\n # they are not searching for Tesla\r\n if not request.user.is_authenticated and query != 'TSLA':\r\n # redirect them to login\r\n return redirect('/accounts/login/?next=' + query)\r\n\r\n elif request.user.is_authenticated or (not request.user.is_authenticated and query == 'TSLA'):\r\n # user is authenticated or they are not authenticated but are searching for Tesla\r\n # check if query sqtring has valid arguments\r\n company_filings = Filing.objects.filter(company__ticker=query)\r\n\r\n fid = request.GET.get('fid')\r\n if fid == 'all':\r\n # query string fetches the latest filing\r\n filing = company_filings.first()\r\n\r\n # the latest filing is being recieved\r\n\r\n else:\r\n # normal fid is in place\r\n\r\n filing = company_filings.filter(id=fid).first() # the filing was requested by fid\r\n\r\n company_filings = [filing.dict_values() for filing in company_filings]\r\n\r\n links = []\r\n verify = []\r\n\r\n company = filing.company\r\n # name = mycompany.name\r\n # name = name.upper()\r\n # name = name.replace('INTERNATIONAL', 'INTL')\r\n # name = name.replace(' /DE', '')\r\n # name = name.replace('/DE', '')\r\n # name = name.replace('INC.', 'INC')\r\n # name = name.replace(',', '')\r\n\r\n # funds = Funds.objects.raw(\r\n # 'SELECT * FROM edgarapp_funds WHERE company = %s ORDER BY share_prn_amount+0 DESC LIMIT 100', [name])\r\n funds = company.funds.all()[:100]\r\n # 'SELECT * FROM edgarapp_funds WHERE company = %s ORDER BY share_prn_amount+0 DESC LIMIT 100', [name])\r\n\r\n # directors = Directors.objects.filter(company=mycompany.name).order_by('-director')\r\n directors = company.company_directors.all()\r\n\r\n # allDirectors = Directors.objects.all()\r\n\r\n # executives = Executives.objects.filter(company=mycompany.name)\r\n executives = company.executives.all()\r\n\r\n # today = datetime.today()\r\n # currYear = today.year\r\n\r\n # for year in executives:\r\n # if year.filingdate.split('-')[0] == str(currYear):\r\n # exectable.append(year)\r\n\r\n # for person in directors:\r\n # if person:\r\n # personA = person.director.replace(\"Mr.\", '')\r\n # personA = person.director.replace(\"Dr.\", '')\r\n # personA = person.director.replace(\"Ms.\", '')\r\n # a = set([s for s in personA if s != \",\" and s != \".\" and s != \" \"])\r\n # aLast = personA.split(' ')[-1]\r\n # if (len(personA.split(' ')) == 1):\r\n # aLast = personA.split('.')[-1]\r\n # comps = []\r\n # for check in allDirectors:\r\n # if person:\r\n # personB = check.director.replace(\"Mr.\", '')\r\n # personB = check.director.replace(\"Dr.\", '')\r\n # personB = check.director.replace(\"Ms.\", '')\r\n # bLast = personB.split(' ')[-1]\r\n # if (len(personB.split(' ')) == 1):\r\n # bLast = personB.split('.')[-1]\r\n # print(personA, aLast, person.company, personB, bLast, check.company)\r\n # if aLast == bLast:\r\n # # first check jaccard index to speed up algo, threshold of .65\r\n # b = set([s for s in personB if s !=\r\n # \",\" and s != \".\" and s != \" \"])\r\n # if (len(a.union(b)) != 0):\r\n # jaccard = float(\r\n # len(a.intersection(b)) / len(a.union(b)))\r\n # else:\r\n # jaccard = 1\r\n # # print(personA, personB, jaccard)\r\n # if (jaccard > 0.65):\r\n # # run Ratcliff-Obershel for further matching, threshold of .75 and prevent self-match\r\n # sequence = textdistance.ratcliff_obershelp(\r\n # personA, personB)\r\n # if sequence > 0.75 and mycompany.name != check.company:\r\n # comps.append(check.company)\r\n # if not comps:\r\n # comps.append('Director is not on the board of any other companies')\r\n # matches.append(comps)\r\n\r\n object_list = []\r\n # object_list.append((query, fid))\r\n # object_list.append((mycompany.name, mycompany.ticker))\r\n # object_list.append(company_filings)\r\n # object_list.append(filing)\r\n # object_list.append(funds)\r\n # object_list.append(zip(directors, matches))\r\n # object_list.append(zip(exectable, matches))\r\n # object_list.append(links)\r\n\r\n company_name = company.name\r\n company_ticker = company.ticker\r\n\r\n url = '/mnt/filings-static/capitalrap/edgarapp/static/filings/' + filing.filingpath\r\n\r\n t_o_c = filing.table_of_contents.first()\r\n\r\n if not t_o_c:\r\n toc_extractor = TOCAlternativeExtractor()\r\n\r\n extract_data = toc_extractor.extract(url)\r\n\r\n t_o_c = filing.table_of_contents.create(body=extract_data.table)\r\n\r\n return render(\r\n request, template_name, {\r\n 'object_list': object_list,\r\n 'company_filings': company_filings,\r\n 'company_ticker': company_ticker,\r\n 'directors': directors,\r\n 'executives': executives,\r\n 'company_name': company_name,\r\n 'current_filing': filing,\r\n 'funds': funds,\r\n 'extended_template': extended_template,\r\n 'table_of_contents': t_o_c.body,\r\n 'fid': filing.id,\r\n }\r\n )\r\n\r\n\r\ndef AboutView(request):\r\n template_name = 'about.html'\r\n extended_template = 'base.html'\r\n\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n return render(\r\n request, template_name,\r\n {'extended_template': extended_template}\r\n )\r\n\r\n\r\ndef HedgeFundView(request):\r\n template_name = 'hedgeFunds.html'\r\n extended_template = 'base.html'\r\n\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n return render(\r\n request, template_name,\r\n {'extended_template': extended_template}\r\n )\r\n\r\n\r\ndef FaqView(request):\r\n template_name = 'faq.html'\r\n extended_template = 'base.html'\r\n\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n return render(\r\n request, template_name,\r\n {'extended_template': extended_template}\r\n )\r\n\r\n\r\n# for contact\r\n\r\n\r\ndef contactView(request):\r\n form = ContactForm(request.POST or None)\r\n\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n if form.is_valid():\r\n name = form.cleaned_data.get(\"name\")\r\n email = form.cleaned_data.get(\"email\")\r\n message = form.cleaned_data.get(\"message\")\r\n subject = \"CapitalRap Contact Form: \" + name\r\n\r\n comment = name + \" with the email, \" + email + \\\r\n \", sent the following message:\\n\\n\" + message\r\n send_mail(subject, comment, settings.EMAIL_HOST_USER,\r\n [settings.EMAIL_HOST_USER])\r\n\r\n context = {'form': form, 'extended_template': extended_template}\r\n messages.info(request, 'Thank you for contacting us!')\r\n return HttpResponseRedirect(request.path_info)\r\n\r\n else:\r\n context = {'form': form, 'extended_template': extended_template}\r\n return render(\r\n request, 'contact.html', context,\r\n )\r\n\r\n # if request.method == 'GET':\r\n # form = ContactForm()\r\n # else:\r\n # form = ContactForm(request.POST)\r\n # if form.is_valid():\r\n # name = form.cleaned_data['name']\r\n # email = form.cleaned_data['email']\r\n # message = form.cleaned_data['message']\r\n # try:\r\n # send_mail('CapitalRap Contact Form '+name+' '+email, message, settings.EMAIL_HOST_USER, [settings.EMAIL_HOST_USER], fail_silently=False)\r\n # except BadHeaderError:\r\n # return HttpResponse('Invalid header found.') #TODO: ADD MESSAGE INSTEAD\r\n # messages.info(request, 'Thank you for contacting us!')\r\n # return HttpResponseRedirect(request.path_info)\r\n # return render(request, \"contact.html\", {'form': form})\r\n\r\n\r\n##################\r\n## Members side ##\r\n\r\n\r\ndef login_view(request):\r\n form = UsersLoginForm(request.POST or None)\r\n\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'home.html'\r\n\r\n if form.is_valid():\r\n username = form.cleaned_data.get(\"username\")\r\n password = form.cleaned_data.get(\"password\")\r\n user = authenticate(username=username, password=password)\r\n login(request, user)\r\n\r\n if request.GET.get('next') == None:\r\n return redirect('home')\r\n else:\r\n return redirect('/filing/?q=' + request.GET.get('next') + '&fid=all')\r\n return render(request, \"form.html\", {\r\n \"form\": form,\r\n \"title\": \"Login\",\r\n 'extended_template': extended_template,\r\n })\r\n\r\n\r\ndef register_view(request):\r\n form = UsersRegisterForm(request.POST or None)\r\n\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n\r\n if form.is_valid():\r\n user = form.save()\r\n password = form.cleaned_data.get(\"password\")\r\n user.set_password(password)\r\n user.save()\r\n new_user = authenticate(username=user.username, password=password)\r\n login(request, new_user)\r\n if request.GET.get('next') == None:\r\n return redirect('home')\r\n else:\r\n return redirect('/filing/?q=' + request.GET.get('next') + '&fid=all')\r\n\r\n return render(request, \"form.html\", {\r\n \"title\": \"Register\",\r\n \"form\": form,\r\n 'extended_template': extended_template,\r\n })\r\n\r\n\r\n@login_required\r\ndef account_view(request):\r\n if request.method == 'POST':\r\n form = PasswordChangeForm(request.user, request.POST)\r\n if form.is_valid():\r\n user = form.save()\r\n update_session_auth_hash(request, user)\r\n messages.success(request, _(\r\n 'Your password was successfully updated!'))\r\n return redirect('account')\r\n else:\r\n messages.error(request, _('There was an error. Try again!'))\r\n else:\r\n form = PasswordChangeForm(request.user)\r\n return render(request, 'account.html', {\r\n 'form': form\r\n })\r\n\r\n\r\ndef logout_view(request):\r\n logout(request)\r\n return HttpResponseRedirect(\"/\")\r\n\r\n\r\ndef PlanView(request):\r\n extended_template = 'base.html'\r\n if request.user.is_authenticated:\r\n extended_template = 'base_member.html'\r\n return render(request, 'plan.html', {'extended_template': extended_template,\r\n })\r\n\r\n\r\ndef PrinterView(request, fid, start):\r\n try:\r\n filing = Filing.objects.get(id=fid)\r\n except:\r\n return HttpResponse(status=404, content=\"Requested Filing Could not be Found for printing\")\r\n\r\n url = '/mnt/filings-static/capitalrap/edgarapp/static/filings/' + filing.filingpath\r\n if start == 'full':\r\n return HttpResponseRedirect('/static/filings/' + filing.filingpath)\r\n else:\r\n printer = Printer().generate(url, start)\r\n\r\n return render(request, 'printer.html', {'html': printer})","repo_name":"ppalancica/mblazr","sub_path":"edgarapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"2055780151","text":"from django.shortcuts import render\n\nfrom feeder.forms import UrlForm\nfrom feeder.models import Feed, Article\n\n\ndef add_feed(request):\n success = False\n if request.method == 'POST':\n urlform = UrlForm(request.POST)\n if urlform.is_valid():\n feed = Feed.objects.create()\n feed.rss_url = urlform.cleaned_data['url']\n feed.save()\n success = True\n else:\n urlform = UrlForm()\n return render(request, \"add_feed.html\",\n {'form': urlform,\n 'success': success})\n\n\ndef home(request):\n feeds = Feed.objects.all()\n articles = Article.objects.all()\n context = {\n 'feeds': feeds,\n 'articles': articles,\n }\n return render(request, 'home.html', context)\n","repo_name":"ravipudi/pathrika","sub_path":"pathrika/feeder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43111235409","text":"# if (year is not divisible by 4) then (it is a common year)\r\n# else if (year is not divisible by 100) then (it is a leap year)\r\n# else if (year is not divisible by 400) then (it is a common year)\r\n# else (it is a leap year)\r\n\r\ndef is_leap(year): \r\n res = False \r\n if (year % 4 != 0):\r\n res = False \r\n else:\r\n if (year % 100 != 0):\r\n res = True\r\n else:\r\n if (year % 400 != 0):\r\n res = False\r\n else:\r\n res = True\r\n return res\r\n\r\nyear = int(input())\r\nprint(is_leap(year))","repo_name":"tanyardr/pythonPure","sub_path":"leapYear.py","file_name":"leapYear.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24294104295","text":"temp = input(\"Enter the number of rows and columns : \").split()\nn, m = int(temp[0]), int(temp[1])\n\"\"\"\nprint(\"Enter the values :\")\n# Each row should be given as space seperated input and press enter before giving the second row\nli = [[int(i) for i in input().split()] for j in range(n)]\n\"\"\"\n# Each row should be given as space seperated input\ntemp = input(\"Enter the values : \").split()\nli = [[int(temp[m*i+j]) for j in range(m)] for i in range(n)]\nprint(\"The 2-D List is : \")\nfor ele in li:\n out = \" \".join([str(x) for x in ele])\n print(out)","repo_name":"Tanmoy0077/Python-Experiments","sub_path":"TwoD_List.py","file_name":"TwoD_List.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19670505401","text":"while True:\n try:\n num1 = input(\"Enter first number: \")\n if num1 == 'q' or num1 == 'Q':\n break\n\n num2 = input(\"Enter second number: \")\n if num2 == 'q' or num2 == 'Q':\n break\n\n num1 = int(num1)\n num2 = int(num2)\n except ValueError:\n print(\"Enter the numeric input, please.\")\n else: \n result = num1 + num2\n print(\"Addition is: \" + str(result))","repo_name":"greedyaj/learn.python","sub_path":"Python/CrashCourse/Chaprter10 - Files and Exceptions/addition_calculator.py","file_name":"addition_calculator.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41277623740","text":"d = {}\nwith open(r\"C:\\Users\\Пользователь\\Documents\\input.txt\",'r') as f:\n for line in f:\n s = line.strip().split(';')\n d[s[0]] = s[1:len(s)]\nmed = medAbs1 = medAbs2 = medAbs3 = 0\nfor key,value in d.items():\n med = (float(value[0])+float(value[1])+float(value[2]))/3\n medAbs1 += float(value[0])\n medAbs2 += float(value[1])\n medAbs3 += float(value[2])\n d[key] = str(med)\nn = len(d)\nwith open(r\"C:\\Users\\Пользователь\\Documents\\output.txt\",'w') as ouf:\n for value in d.values():\n ouf.write(value + '\\n')\n ouf.write(str(medAbs1/n) + \" \" + str(medAbs2/n) + \" \" + str(medAbs3/n))\n","repo_name":"mathamateur/Stepik","sub_path":"Python_BasicsAndApplication/Medium_mark.py","file_name":"Medium_mark.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35905782579","text":"import pandas as pd\nimport gensim\nfrom gensim.parsing.preprocessing import preprocess_documents\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom gensim.test.utils import get_tmpfile\nimport pickle\n\n# data frame creation for movies released in and after 2000\ndf = pd.read_csv(\"wiki_movie_plots_deduped.csv\", sep = \",\")\ndf = df[df[\"Release Year\"] >= 2000]\n\nprint(df.head())\n\n# text corpus creation and preprocessing\ntextCorpus = df[\"Plot\"].values\nprocessedCorpus = preprocess_documents(textCorpus)\ntaggedCorpus = [TaggedDocument(d, [i]) for i, d in enumerate(processedCorpus)]\n\n# Doc2Vec model creation\nmodel = Doc2Vec(taggedCorpus, dm = 0, vector_size = 200,\n window = 2, min_count = 1, epochs = 10, hs = 1)\n\n# saving the model to a file\nfname = \"modelDoc2Vec\"\npickle.dump(model, open(fname, \"wb\"))\n\n# testing the model with a sample input\ntestString = \"Sith lord fights Jedi with light saber on star destroyer\"\ntestString = gensim.parsing.preprocessing.preprocess_string(testString)\ntestDocVector = model.infer_vector(testString)\nsims = model.dv.most_similar(positive = [testDocVector])\nfor s in sims:\n print(\"{}: {}\".format(s[1], df[\"Title\"].iloc[s[0]]))\n","repo_name":"photonPrograms/botAlpha","sub_path":"plot_relation.py","file_name":"plot_relation.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"10689278659","text":"\nimport math\nimport time\nimport numpy as np\nimport scipy.io.wavfile\nimport scipy.signal\nimport multiprocessing\n\ntry:\n #import visualization submodule\n import reverbsimulator.viz\nexcept Exception as e:\n # there was an error in the extra depends\n print (\"error while importing viz: '%s'\" % str(e))\n\ndef db(x, inv=False):\n if inv:\n # if inverted, return how many db a coefficient is\n if x == 0 or x == 0.0:\n return -float(\"inf\")\n else:\n return 20.0 * math.log(x, 10.0)\n else:\n # from db to coefficient\n return math.pow(10.0, x/20.0)\n\ndef semitones(x, inv=False):\n if inv:\n # from semitones to hz\n return 440.0 * math.pow(2.0, x / 12.0)\n else:\n # from hz to semitones\n return 12.0 * math.log(x / 440.0, 2.0)\n\ndef normalize(data):\n return data/np.max(np.abs(data))\n\ndef write_wav(filename, data):\n scipy.io.wavfile.write(filename, 44100, np.int16(normalize(data) * 32767))\n\ndef read_wav(filename):\n _, src = scipy.io.wavfile.read(filename)\n return src\n\n\nclass ImpulseResponse():\n\n def __init__(self, data, delay=0.0, samplerate=44100):\n # very small epsilon to consider zero\n # TODO: find a faster way to do this\n #epsi = 0.0\n #epsi = 1.0 / (500 * (len(data) + 5))\n #if epsi > 10.0 **-16.0:\n # epsi = 10 ** -16.0\n\n imp = np.array(data, dtype=np.float32)\n self.data = imp\n \n #print(np.nonzero(tmp))\n #print(np.nonzero(tmp)[-1])\n #if len(data) in (0, 1):\n # self.data = imp\n #else:\n # tmp = np.abs(imp) < epsi\n # i = len(imp) - 1\n # while i > 0 and tmp[i]:\n # i -= 1\n # self.data = imp[:i+1]\n self.delay = delay\n self.samplerate = samplerate\n\n def copy(self):\n return ImpulseResponse(self.data, self.delay, self.samplerate)\n\n\n # generative functions\n\n def impulse(delay=0.0, db=0.0):\n return ImpulseResponse(np.array([1.0], dtype=np.float32), delay)\n\n def nothing():\n return ImpulseResponse(np.array([], dtype=np.float32))\n\n def RC_lowpass(cutoff, num_pts=100):\n #this is an emulation of an infinite response (https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter)\n # num_pts is how many points it is let out to\n dt = 1.0 / 44100.0\n RC = 1.0 / (cutoff * 2.0 * math.pi)\n alpha = dt / (RC + dt)\n\n data = [alpha * math.pow(1.0 - alpha, i) for i in range(num_pts)]\n\n return ImpulseResponse(data, 0.0, 44100)\n\n\n def RC_highpass(cutoff, num_pts=100):\n #this is an emulation of an infinite response (https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter)\n # num_pts is how many points it is let out to\n dt = 1.0 / 44100.0\n RC = 1.0 / (cutoff * 2.0 * math.pi)\n alpha = dt / (RC + dt)\n\n\n data = [math.pow(-1, i) * (math.pow(alpha, i + 1) - math.pow(alpha, i)) for i in range(num_pts)]\n return ImpulseResponse(data, 0.0, 44100)\n\n def response_filter(func, num_pts=2048):\n # func should be a function/lambda that takes:\n # hz\n # and returns a complex number that describes the response to that frequency\n # (you can use just a float to keep phase undisturbed)\n\n response = np.array([func(44100.0 * i / (num_pts * 2)) for i in range(num_pts)], np.complex64)\n imp = np.fft.irfft(response)\n #print (imp)\n\n return ImpulseResponse(imp, 0.0, 44100)\n\n\n # class methods\n\n def add_delay(self, amount):\n self.delay += amount\n\n def combine(self, otherIR):\n if len(self.data) == 0 or len(otherIR.data) == 0:\n return ImpulseResponse([], 0.0)\n else:\n # TODO: possibly use fftpack to convolve quicker (for larger sets)\n conv_signal = None\n if len(self.data) == 1 and len(otherIR.data) == 1:\n conv_signal = [self.data[0] * otherIR.data[0]]\n else:\n conv_signal = np.convolve(self.data, otherIR.data)\n \n return ImpulseResponse(conv_signal, self.delay + otherIR.delay)\n\n def __add__(self, v):\n # adds them together\n if isinstance(v, ImpulseResponse):\n if self.delay > v.delay:\n _data = v.data.copy()\n _adj_my = np.concatenate([np.zeros(int((self.delay - v.delay) * self.samplerate)), self.data])\n if len(_data) > len(_adj_my):\n _adj_my = np.concatenate([_adj_my, np.zeros(len(_data) - len(_adj_my))])\n elif len(_adj_my) > len(_data):\n _data = np.concatenate([_data, np.zeros(len(_adj_my) - len(_data))])\n return ImpulseResponse(_data + _adj_my, v.delay, self.samplerate)\n else:\n _data = self.data.copy()\n _adj_other = np.concatenate([np.zeros(int((v.delay - self.delay) * self.samplerate)), v.data])\n if len(_data) > len(_adj_other):\n _adj_other = np.concatenate([_adj_other, np.zeros(len(_data) - len(_adj_other))])\n elif len(_adj_other) > len(_data):\n _data = np.concatenate([_data, np.zeros(len(_adj_other) - len(_data))])\n return ImpulseResponse(_data + _adj_other, self.delay, self.samplerate)\n\n\n def __mul__(self, v):\n if isinstance(v, ImpulseResponse):\n return self.combine(v)\n elif isinstance(v, float) or isinstance(v, int):\n r = self.copy()\n r.data *= v\n return r\n def __rmul__(self, v):\n return self.__mul__(v)\n\n def flattened(self):\n\n extra = np.zeros((int(self.delay * self.samplerate), ), dtype=np.float32)\n ir = np.concatenate([extra, self.data])\n\n # at the back doesn't matter\n return np.trim_zeros(ir, 'b')\n\n\n def __str__(self):\n return \"IR(delay=%d, len(data)=%d)\" % (int(self.delay * self.samplerate), len(self.data))\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Point():\n\n def __init__(self, x, y=None):\n if y is None and (isinstance(x, list) or isinstance(x, tuple) or isinstance(x, Point)):\n self._x = x[0]\n self._y = x[1]\n else:\n self._x = x\n self._y = y\n #elif radius is not None and direction is not None:\n # self._x = radius * math.cos(direction)\n # self._y = radius * math.sin(direction)\n #else:\n # raise Exception(\"Error creating point!\")\n\n def polar(direction, radius=1.0):\n return Point(radius * math.cos(direction), radius * math.sin(direction))\n\n def __hash__(self):\n return hash(self._x) + hash(self._y)\n\n def __getitem__(self, k):\n if k == 0:\n return self._x\n elif k == 1:\n return self._y\n return (self._x, self._y)[k]\n\n def get_x(self):\n return self._x\n def set_x(self, v):\n self._x = v\n\n def get_y(self):\n return self._y\n def set_y(self, v):\n self._y = v\n\n x = property(get_x, set_x)\n y = property(get_y, set_y)\n\n def get_radius(self):\n return math.hypot(self._x, self._y)\n\n def set_radius(self, v):\n new_ratio = v / self.get_radius()\n self._x *= new_ratio\n self._y *= new_ratio\n\n def get_direction(self):\n return math.atan2(self._y, self._x)\n def set_direction(self, v):\n myr = self.get_radius()\n self._x = myr * math.cos(v)\n self._y = myr * math.sin(v)\n\n radius = property(get_radius, set_radius)\n direction = property(get_direction, set_direction)\n \n def rotated(self, radians, about=(0.0, 0.0)):\n diff = self - about\n # matrix transformation\n cosp = math.cos(radians)\n sinp = math.sin(radians)\n new_x = cosp * diff._x - sinp * diff._y\n new_y = sinp * diff._x + cosp * diff._y\n\n return Point(about[0] + new_x, about[1] + new_y)\n\n def __neg__(self):\n return Point(-self._x, -self._y)\n\n def __rshift__(self, v):\n # rshift is rotation clockwise (>>)\n return self.rotated(-v)\n \n def __lshift__(self, v):\n # lshift rotation counterclockwise (<<)\n return self.rotated(v)\n\n def __add__(self, v):\n return Point(self._x + v[0], self._y + v[1])\n def __sub__(self, v):\n return Point(self._x - v[0], self._y - v[1])\n\n def __dot__(self, v):\n return self._x * v[0] + self._y * v[1]\n\n def __mul__(self, v):\n if isinstance(v, Point) or isinstance(v, tuple) or isinstance(v, list):\n # dot product\n return self.__dot__(v)\n else:\n return Point(self._x * v, self._y * v)\n\n def __rmul__(self, v):\n return self.__mul__(v)\n\n def __div__(self, v):\n if isinstance(v, Point):\n raise Exception(\"division of points not defined!\")\n return Point(self._x / v, self._y / v)\n\n def __abs__(self):\n return self.r\n\n def __str__(self):\n return \"(%f, %f)\" %(self._x, self._y)\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, v):\n return self._x == v[0] and self._y == v[1] \n\n\nclass PointTransformation():\n\n def __init__(self, translate=None, rotate=None):\n self.translate = translate\n self.rotate = rotate\n if rotate is not None:\n self.rotate_cos = math.cos(rotate)\n self.rotate_sin = math.sin(rotate)\n\n def identity(self, pt):\n # basecase\n return pt\n\n def do_translate(self, pt):\n if self.translate is not None:\n return pt\n\n else:\n return pt + self.translate\n\n def do_rotate(self, pt):\n if self.rotate is None:\n return pt\n else:\n x = pt[0]\n y = pt[1]\n return Point(x * self.rotate_cos - y * self.rotate_sin, x * self.rotate_sin + y * self.rotate_cos)\n\n\n def transform(self, pt):\n if self.rotate is not None:\n x = pt[0] + self.translate[0]\n y = pt[1] + self.translate[1]\n return Point(x * self.rotate_cos - y * self.rotate_sin, x * self.rotate_sin + y * self.rotate_cos)\n else:\n return Point(pt[0] - self.translate[0], pt[1] - self.translate[1])\n\n\n def transform_y0(self, _x):\n # transforms assuming pt[1] == 0.0\n if self.rotate is not None:\n x = _x + self.translate[0]\n y = self.translate[1]\n return Point(x * self.rotate_cos - y * self.rotate_sin, x * self.rotate_sin + y * self.rotate_cos)\n else:\n return Point(_x- self.translate[0], - self.translate[1])\n\n\n def inverse(self, pt):\n if self.rotate is not None:\n # de-rotate then transform\n # [cos(p) -sin(p)] ^-1\n # [sin(p) cos(p)]\n # = \n # [cos(p) sin(p)]\n # [-sin(p) cos(p)]\n x = pt[0] * self.rotate_cos + pt[1] * self.rotate_sin\n y = -pt[0] * self.rotate_sin + pt[1] * self.rotate_cos\n return Point(x - self.translate[0], y - self.translate[1])\n else:\n return Point(pt[0] - self.translate[0], pt[1] - self.translate[1])\n\n def inverse_y0(self, _x):\n # invert assuming pt[1] == 0\n if self.rotate is not None:\n \n # de-rotate then transform\n # [cos(p) -sin(p)] ^-1\n # [sin(p) cos(p)]\n # = \n # [cos(p) sin(p)]\n # [-sin(p) cos(p)]\n x = _x * self.rotate_cos\n y = -_x * self.rotate_sin\n return Point(x - self.translate[0], y - self.translate[1])\n else:\n return Point(pt[0] - self.translate[0], - self.translate[1])\n\nclass Ray():\n\n def __init__(self, start, direction):\n #start: Point, direction is a radian measure\n self.start = Point(start)\n self.direction = direction\n\n self._s = None\n self._d = None\n self._T = None\n\n def perspective_T(self):\n # returns perspective transform\n if self._s is None or self._d is None or self.start != self._s or self.direction != self._d:\n self._s = self.start\n self._d = self.direction\n self._T = PointTransformation(-self.start, -self.direction)\n return self._T\n\n\nclass RaycastResult():\n\n def __init__(self, hit, point=None, otherside=None, direction=None, distance=None):\n #hit: bool, whether or not the object was hit\n #point: which point did it collide at\n #otherside: where would the ray come through the other side?\n #direction: what is the direction of the tangent line of the geometry at the point of collision?\n #distance: how far away was the hit?\n\n self.hit = hit\n self.point = point\n self.otherside = otherside\n self.direction = direction\n self.distance = distance\n\n def __bool__(self):\n return self.hit\n\n def __str__(self):\n if self.hit:\n return \"RaycastResult(True, %s, otherside=%s direction=%f, distance=%f)\" % (self.point, self.otherside, self.direction, self.distance)\n else:\n return \"RaycastResult(False)\"\n\n def __repr__(self):\n return self.__str__()\n\n\n\nclass Geometry():\n \"\"\"\n\n Geometry is an abstract class that all scene object types (line, circle, etc) interactable inherit from\n\n All must use the \"points\" and \"values\" variables. The points variable includes 2d points that are the pivot points (and can be rotated). The values variables can be anything\n\n A circle might use self.points = {\"center\": CenterPoint} and self.values = {\"radius\": radius}\n\n Classes can set the default kwargs (for a radius of one, example)\n\n \"\"\"\n\n default_kwargs = {}\n\n def __init__(self, **kwargs):\n # merge arguments\n self.values = { **self.__class__.default_kwargs, **kwargs }\n\n def __hash__(self):\n return hash(self.values.values())\n\n def __getitem__(self, k):\n return self.values.__getitem__(k)\n\n def __setitem__(self, k, v):\n return self.values.__setitem__(k, v)\n\n def copy(self):\n return self.__class__(**self.values)\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.values)\n\n def __repr__(self):\n return self.__str__()\n\n def __raycast__(self, ray):\n \"\"\"\n this method should be overloaded and return a RaycastResult detailing how a ray would interact with the geometry\n \"\"\"\n raise NotImplementedError(\"__raycast__ method is not implemented\")\n\nclass Line(Geometry):\n\n def __init__(self, start, end):\n super().__init__(start=Point(start), end=Point(end))\n\n def __raycast__(self, ray):\n # transform both points\n #start_p = (self[\"start\"] - ray.start) >> ray.direction\n #end_p = (self[\"end\"] - ray.start) >> ray.direction\n start_p = ray.perspective_T().transform(self[\"start\"])\n end_p = ray.perspective_T().transform(self[\"end\"])\n\n if (start_p.y >= 0.0 and end_p.y <= 0.0) or (start_p.y <= 0.0 and end_p.y >= 0.0):\n # x point where they intersect\n x_pos = - start_p.y * (end_p.x - start_p.x) / (end_p.y - start_p.y) + start_p.x\n # then we hit the line, but we need to make sure its on the right side (because a ray is only positive)\n x_pos, \n if x_pos > 0.001:\n hit_pt = ray.perspective_T().inverse_y0(x_pos)\n return RaycastResult(True, hit_pt, hit_pt, (self[\"end\"] - self[\"start\"]).direction, x_pos)\n else:\n return RaycastResult(False)\n else:\n return RaycastResult(False)\n\nclass Polygon(Geometry):\n\n def __init__(self, *points):\n super().__init__(points=points)\n\n self._h = None\n self._lines = None\n\n def get_lines(self):\n my_h = hash(self)\n if self._h is None or my_h != self._h:\n self._h = my_h\n if len(self[\"points\"]) == 0 or len(self[\"points\"]) == 1:\n self._lines = []\n elif len(self[\"points\"]) == 2:\n self._lines = [Line(*self[\"points\"])]\n else:\n self._lines = [Line(self[\"points\"][i], self[\"points\"][i + 1]) for i in range(len(self[\"points\"]) - 1)] + [Line(self[\"points\"][-1], self[\"points\"][0])]\n\n return self._lines\n\n lines = property(get_lines)\n\n def __raycast__(self, ray):\n def line_cast(ln):\n # line transform by ray\n #start_p = (ln[\"start\"] - ray.start) >> ray.direction\n #end_p = (ln[\"end\"] - ray.start) >> ray.direction\n return ln.__raycast__(ray)\n \n hits = list(filter(None, map(line_cast, self.lines)))\n if len(hits) == 0:\n return RaycastResult(False)\n elif len(hits) == 1:\n return hits[0]\n else:\n return min(hits, key=lambda x: x.distance)\n \n\nclass Circle(Geometry):\n\n def __init__(self, center, radius=1.0):\n super().__init__(center=Point(center), radius=radius)\n\n def __raycast__(self, ray):\n # transform both points\n center_p = ray.perspective_T().transform(self[\"center\"])\n\n r = self[\"radius\"]\n\n if abs(center_p.y) <= r and center_p.x > 0.001:\n # the third side within the circle\n o_side = math.sqrt(r ** 2 - center_p.y ** 2)\n #dist = center_p.x - o_side\n hit_point = ray.perspective_T().inverse_y0(center_p.x - o_side)\n #other side of the circleo_side\n through_point = ray.perspective_T().inverse_y0(center_p.x + o_side)\n\n return RaycastResult(True, hit_point, through_point, ray.direction - math.pi/2 + math.atan2(center_p.y, o_side), center_p.x - o_side)\n else:\n return RaycastResult(False)\n\nclass SceneObject():\n def __init__(self, tag=None):\n #tag: can be a string\n self.tag = tag\n\n def __str__(self):\n if self.tag is not None:\n return \"%s('%s')\" % (self.__class__.__name__, self.tag)\n else:\n return \"%s()\" % (self.__class__.__name__)\n\n# obj types\nclass Speaker(SceneObject):\n\n def __init__(self, geom, tag=None):\n super().__init__(tag)\n\n #geom: the geometry in the scene\n self.geom = geom\n\nclass Material(SceneObject):\n\n def __init__(self, geom, IR_reflect, IR_through=None, tag=None):\n super().__init__(tag)\n\n #geom: what geometry is the object in question?\n #IR_reflect: the response from reflections\n #IR_through: the response from the other side\n self.geom = geom\n self.IR_reflect = IR_reflect\n self.IR_through = IR_through\n\n\nclass Mic(SceneObject):\n def __init__(self, pos, tag=None):\n super().__init__(tag)\n #pos: position\n self.pos = Point(pos)\n\n\n\ndef scene_proc(scene, mic, i, max_bounces, Nsamples):\n cur_dir = 2.0 * math.pi * i / Nsamples\n #st = time.time()\n #_ss_time += time.time() - st\n\n return scene.single_sample(Ray(mic.pos, cur_dir), max_bounces, anim=None)\n\n\n\nclass Scene():\n def __init__(self, objs=None, sound_speed=343.0, db_per_doubling=-6.0):\n self.sound_speed = sound_speed\n self.db_per_doubling = db_per_doubling\n\n if objs is None:\n self.objs = []\n else:\n self.objs = objs\n\n self.tag_idx = {}\n\n for i in range(len(self.objs)):\n o = self.objs[i]\n if o.tag != None:\n self.tag_idx[o.tag] = i\n\n def add_obj(self, obj):\n if obj.tag != None:\n if obj.tag in self.tag_idx.keys():\n print (\"warning, duplicate item for tag '%s'\" % obj.tag)\n self.tag_idx[obj.tag] = len(self.objs)\n self.objs += [obj]\n\n def __getitem__(self, k):\n if isinstance(k, int):\n return self.objs[k]\n elif k in self.tag_idx.keys():\n return self.objs[self.tag_idx[k]]\n else:\n raise KeyError()\n\n def __setitem__(self, k, v):\n if k in self.tag_idx.keys():\n self.objs[self.tag_idx[k]] = v\n else:\n self.add_obj(v)\n\n def create_IR(self, mic_tag=None, Nsamples=50, max_bounces=25, anim=None, threads=None):\n my_mic = None\n if mic_tag is None:\n for o in self.objs:\n if isinstance(o, Mic):\n my_mic = o\n break\n else:\n my_mic = self[mic_tag]\n if my_mic and not isinstance(my_mic, Mic):\n raise Exception(\"The object under tag '%s' is not a Mic\" % (mic_tag))\n\n total_IR = None\n\n _ss_time = 0.0\n\n if threads == 0 or threads == None or threads == 1:\n for i in range(Nsamples):\n if anim:\n anim.colors[\"ray\"] = (1.0 * i /Nsamples, 1.0 - 1.0 * i / Nsamples, 0.0)\n #print (\"%d\" % i)\n cur_dir = 2.0 * math.pi * i / Nsamples\n st = time.time()\n cur_IR = self.single_sample(Ray(my_mic.pos, cur_dir), max_bounces, anim)\n _ss_time += time.time() - st\n if total_IR is None:\n total_IR = cur_IR\n else:\n total_IR = total_IR + cur_IR\n else:\n # multithreaded approach\n\n\n pool = multiprocessing.Pool(threads)\n args = []\n for i in range(Nsamples):\n args += [(self, my_mic, i, max_bounces, Nsamples)]\n coll_IR = pool.starmap(scene_proc, tuple(args))\n\n # combine\n for c in coll_IR:\n if total_IR is None:\n total_IR = c\n else:\n total_IR = total_IR + c\n\n\n\n #print (\"individual samples took: %f\" % (_ss_time / Nsamples))\n\n return total_IR\n\n\n def single_sample(self, ray, max_bounces=25, anim=None):\n my_ir = None\n total_dist = 0.0\n\n for i in range(max_bounces):\n hit_obj = None\n bounce_r = RaycastResult(False)\n if anim:\n anim.goto(ray.start)\n \n #print ('bounce: %d' % i)\n\n for o in self.objs:\n if isinstance(o, Material) or isinstance(o, Speaker):\n cur_rr = o.geom.__raycast__(ray)\n\n if cur_rr:\n if not bounce_r or cur_rr.distance < bounce_r.distance:\n bounce_r = cur_rr\n hit_obj = o\n\n if hit_obj is not None:\n # a bounce should satisfy \"s + s' = 2 * a\"\n # so, s' = 2 * a - s\n total_dist += bounce_r.distance\n\n if anim:\n anim.draw_ray(ray, distance=bounce_r.distance)\n\n if isinstance(hit_obj, Speaker):\n # we found the last\n break\n else:\n if my_ir is None:\n my_ir = hit_obj.IR_reflect.copy()\n else:\n # TODO: add IR_through\n my_ir = my_ir.combine(hit_obj.IR_reflect)\n\n #if hit_obj.IR_through is not None:\n # my_ir = my_ir.combine(self.single_sample(Ray(bounce_r.otherside, ray.direction), max_bounces=max_bounces // 2, anim=anim))\n\n ray = Ray(bounce_r.point, 2 * bounce_r.direction - ray.direction)\n\n else:\n my_ir = ImpulseResponse.nothing()\n break\n \n if total_dist > 0.0 and my_ir is not None:\n my_ir *= db(math.log(total_dist, 2.0) * self.db_per_doubling)\n my_ir.delay += total_dist / self.sound_speed\n\n if my_ir is None:\n my_ir = ImpulseResponse.nothing()\n return my_ir\n \n \n\n","repo_name":"ChemicalDevelopment/ReverbSimulator","sub_path":"reverbsimulator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":24015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26917109961","text":"import pygame\nfrom pygame.sprite import Group\n\nclass Items_popup(pygame.sprite.Sprite):\n def __init__(self, x:int, y:int, width:int, height:int, afbeelding:str):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load(afbeelding)\n self.image = pygame.transform.scale(self.image, (width, height))\n self.afbeelding = afbeelding \n self.rect = self.image.get_rect()\n self.rect.topleft = (x,y)\n self.width = width\n self.height = height","repo_name":"MeganTai/ingenieursproject","sub_path":"items_popup.py","file_name":"items_popup.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39732654590","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 18 12:45:15 2022\n\n@author: Christopher Corbell\n\"\"\"\n\nfrom hex.hexboard import HexBoard\n\nclass HexTree:\n \n def __init__(self):\n self.plies = []\n \n def generateChildBoards(parent: HexBoard, assignLabels=True):\n nextPlayer = parent.getNextPlayer()\n if nextPlayer == HexBoard.EMPTY_TOKEN:\n raise Exception(\"Can't generate child boards, this board is game-over or in unexpected state\")\n \n emptyTiles = parent.getEmptyTiles()\n childBoards = []\n n = 0\n for tile in emptyTiles:\n childBoard = parent.copy()\n if nextPlayer == HexBoard.X_TOKEN:\n childBoard.playX(tile[0], tile[1])\n else:\n childBoard.playO(tile[0], tile[1])\n \n if assignLabels:\n childBoard.label = parent.label + f\".{n}\"\n n += 1\n childBoards.append(childBoard)\n return childBoards\n \n def generateTree(self, root:HexBoard, maxDepth=-1):\n self.plies = []\n \n depth = 0\n lastPly = [root]\n currentPly = []\n \n if maxDepth == -1:\n maxDepth = root.countEmptyTiles()\n \n while depth < maxDepth:\n for parent in lastPly:\n if parent.isGameOver():\n continue\n children = HexTree.generateChildBoards(parent)\n currentPly.extend(children)\n \n self.plies.append(currentPly)\n lastPly = currentPly\n currentPly = []\n depth += 1\n \n def generateBoardFromLabel(label, root):\n boards = HexTree.getAllBoardsForLabel(label)\n if len(boards) == 0:\n return None\n return boards.pop()\n \n \n def getAllBoardsForLabel(label, root):\n \n labelParts = label.split('.')\n currentLabel = labelParts[0]\n root.label = currentLabel\n \n results = [root]\n nextRoot = root\n \n for index in range(1, len(labelParts)):\n \n nextIndex = int(labelParts[index])\n currentLabel += f\".{labelParts[index]}\"\n \n children = HexTree.generateChildBoards(nextRoot)\n if nextIndex < 0 or nextIndex >= len(children):\n raise Exception(f\"Could not get object for label {currentLabel} - bad index\")\n \n nextRoot = children[nextIndex]\n nextRoot.label = currentLabel\n results.append(nextRoot)\n \n return results\n \n def findWins(root, forPlayer=HexBoard.O_TOKEN, labelsOnly=False):\n \"\"\"\n Given a root hex board, find all winning boards for\n a player (token), or for either player if forPlayer is\n None or empty.\n \n The method returns a list of winning boards, unless\n labelsOnly is set to True.\n \n The boards are found in a breadth-first generative search\n of the game tree below root.\n\n Parameters\n ----------\n root : HexBoard\n A valid HexBoard, canonically labeled.\n\n Returns\n -------\n A list of winning HexBoard objects, unless labelsOnly=True,\n then this method returns a list of label strings.\n \"\"\"\n resultBoards = []\n resultLabels = []\n children = HexTree.generateChildBoards(root)\n for child in children:\n childIsLeaf = child.isGameOver()\n childIsWin = False\n \n if forPlayer == HexBoard.O_TOKEN:\n childIsWin = child.isOWin()\n elif childIsWin == HexBoard.X_TOKEN:\n childIsWin == child.isXWin()\n else:\n childIsWin = childIsLeaf\n \n if childIsWin:\n if labelsOnly:\n resultLabels.append(child.label)\n else:\n resultBoards.append(child)\n \n if not childIsLeaf:\n \n childWins = HexTree.findWins(child, forPlayer, labelsOnly)\n if labelsOnly:\n resultLabels.extend(childWins)\n else:\n resultBoards.extend(childWins)\n \n if labelsOnly:\n return resultLabels\n else:\n return resultBoards\n \n ","repo_name":"ccorbell/gametheory","sub_path":"hex/hextree.py","file_name":"hextree.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"575349478","text":"#A helpful little script for graphics.py module written by A.Colwell(2014)\r\n\r\nfrom graphics import *\r\n\r\nkeepGoing=True\r\n\r\nwin = GraphWin('Location Identifier',600,400)\r\nwin.setBackground('white')\r\nlabel = Text(Point(70,15),'Click Here to Stop')\r\nlabel.draw(win)\r\nstopper=Rectangle(Point(1,1),Point(140,25))\r\nstopper.draw(win)\r\n\r\n\r\ndef main():\r\n coord=win.getMouse()\r\n x=coord.getX()\r\n y=coord.getY()\r\n loc = str(x)+','+str(y)\r\n label=Text(Point(x,y-15),loc)\r\n label.draw(win)\r\n circ=Circle(coord,3)\r\n circ.draw(win)\r\n return x,y\r\n\r\n\r\nwhile keepGoing is True:\r\n x,y=main()\r\n if x<140 and y<25:\r\n win.close()\r\n break\r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\n","repo_name":"MrColwell/PythonProfessionalLearning","sub_path":"PythonForTeachers/studentExercises/graphics_mouseclick.py","file_name":"graphics_mouseclick.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40606046405","text":"#!/usr/bin/python3\n\"\"\"extend your Python script to export data in the CSV format\"\"\"\nimport csv\nimport requests\nfrom sys import argv\n\n\nif __name__ == '__main__':\n\n url = 'https://jsonplaceholder.typicode.com/users/{}'.format(\n argv[1])\n data = requests.get(url)\n data = data.json()\n name = data.get('username')\n url1 = 'https://jsonplaceholder.typicode.com/todos?userId={}'.format(\n argv[1])\n posts = requests.get(url1)\n posts = posts.json()\n\n with open(\"{}\".format(argv[1]) + \".csv\", \"w\") as f:\n writer = csv.writer(f, dialect='unix')\n for _dic in posts:\n writer.writerow(\n [argv[1],\n name,\n _dic.get('completed'),\n _dic.get('title')])\n","repo_name":"jesus4388/holbertonschool-system_engineering-devops","sub_path":"0x15-api/1-export_to_CSV.py","file_name":"1-export_to_CSV.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18623425589","text":"import time\nimport numpy as np\nimport tkinter as tk\nfrom PIL import ImageTk, Image, ImageGrab\n\nnp.random.seed(1)\nPhotoImage = ImageTk.PhotoImage\nUNIT = 100\nHEIGHT = 5\nWIDTH = 5\n\n\nclass Env(tk.Tk):\n def __init__(self):\n super(Env, self).__init__()\n self.start = [0,0]\n self.end = [5,5]\n self.action_space = ['u', 'd', 'l', 'r']\n self.n_actions = len(self.action_space)\n self.title('Q Learning')\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))\n self.shapes = self.load_images()\n self.canvas = self._build_canvas()\n self.texts = []\n\n def _build_canvas(self):\n canvas = tk.Canvas(self, bg='white',\n height=HEIGHT * UNIT,\n width=WIDTH * UNIT)\n # create grids\n for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT\n canvas.create_line(x0, y0, x1, y1)\n for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r\n canvas.create_line(x0, y0, x1, y1)\n\n # add img to canvas\n self.soldier = canvas.create_image(50, 50, image=self.shapes[0])\n self.mountain1 = canvas.create_image(250, 50, image=self.shapes[1])\n self.mountain2 = canvas.create_image(350, 50, image=self.shapes[1])\n self.mountain3 = canvas.create_image(250, 150, image=self.shapes[1])\n self.mountain4 = canvas.create_image(350, 150, image=self.shapes[1])\n self.mountain5 = canvas.create_image(250, 250, image=self.shapes[1])\n self.mountain6 = canvas.create_image(50, 350, image=self.shapes[1])\n self.mountain7 = canvas.create_image(150, 350, image=self.shapes[1])\n self.mountain8 = canvas.create_image(250, 350, image=self.shapes[1])\n self.mountain9 = canvas.create_image(350, 350, image=self.shapes[1])\n self.mountain10 = canvas.create_image(50, 450, image=self.shapes[1])\n self.mountain11 = canvas.create_image(150, 450, image=self.shapes[1])\n self.mountain12 = canvas.create_image(250, 450, image=self.shapes[1])\n self.mountain13 = canvas.create_image(350, 450, image=self.shapes[1])\n \n self.reward = canvas.create_image(450, 50, image=self.shapes[3])\n \n \n \n \n self.city = canvas.create_image(self.end[0]*UNIT-UNIT/2, self.end[1]*UNIT-UNIT/2, image=self.shapes[2])\n\n # pack all\n canvas.pack()\n\n return canvas\n\n def load_images(self):\n soldier = PhotoImage(\n Image.open(r\"F:\\(1)Postgraduate\\vsCode\\freshMan\\Project1\\python\\Q_learning\\q\\reinforcement_learning\\img\\soldier.jpg\").resize((65, 65)))\n mountain = PhotoImage(\n Image.open(r\"F:\\(1)Postgraduate\\vsCode\\freshMan\\Project1\\python\\Q_learning\\q\\reinforcement_learning\\img\\mountain.jpg\").resize((65, 65)))\n reward = PhotoImage(\n Image.open(r\"F:\\(1)Postgraduate\\vsCode\\freshMan\\Project1\\python\\Q_learning\\q\\reinforcement_learning\\img\\commander.jpg\").resize((65, 65)))\n circle = PhotoImage(\n Image.open(r\"F:\\(1)Postgraduate\\vsCode\\freshMan\\Project1\\python\\Q_learning\\q\\reinforcement_learning\\img\\city.jpg\").resize((65, 65)))\n \n return soldier, mountain, reward\n\n def text_value(self, row, col, contents, action, font='Helvetica', size=10,\n style='normal', anchor=\"nw\"):\n if action == 0:\n origin_x, origin_y = 7, 42\n elif action == 1:\n origin_x, origin_y = 85, 42\n elif action == 2:\n origin_x, origin_y = 42, 5\n else:\n origin_x, origin_y = 42, 77\n\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def print_value_all(self, q_table):\n for i in self.texts:\n self.canvas.delete(i)\n self.texts.clear()\n for i in range(HEIGHT):\n for j in range(WIDTH):\n for action in range(0, 4):\n state = [i, j]\n if str(state) in q_table.keys():\n temp = q_table[str(state)][action]\n self.text_value(j, i, round(temp, 2), action)\n\n def coords_to_state(self, coords):\n x = int((coords[0] - 50) / 100)\n y = int((coords[1] - 50) / 100)\n return [x, y]\n\n def state_to_coords(self, state):\n x = int(state[0] * 100 + 50)\n y = int(state[1] * 100 + 50)\n return [x, y]\n\n def reset(self):\n self.update()\n time.sleep(0.005)\n x, y = self.canvas.coords(self.rectangle)\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\n self.render()\n # return observation\n return self.coords_to_state(self.canvas.coords(self.rectangle))\n\n def step(self, action):\n \n return next_state, reward, done\n\n # 渲染环境\n def render(self):\n time.sleep(0.0003)\n self.update()\n","repo_name":"RobbieDragon233/Q_learning","sub_path":"environment_last.py","file_name":"environment_last.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36011112218","text":"# encoding: utf8\n\n# A class for reading control_point data from the Mosaiq database.\n#\n# Authors:\n# Christoffer Lervåg\n# Helse Møre og Romsdal HF\n#\n# Python 3.6\n\n# Used for GUI debugging:\n#from tkinter import *\n#from tkinter import messagebox\n\nfrom .database import Database\n\nclass ControlPoint:\n \n # Returns a single control_point matching the given database id (TFP_ID) (or None if no match).\n @classmethod\n def find(cls, id):\n instance = None\n row = Database.fetch_one(\"SELECT * FROM TxFieldPoint WHERE TFP_ID = '{}'\".format(str(id)))\n if row != None:\n instance = cls(row)\n return instance\n \n\n # Gives all control_point instances belonging to the given field.\n @classmethod\n def for_field(cls, field):\n control_points = list()\n rows = Database.fetch_all(\"SELECT * FROM TxFieldPoint WHERE FLD_ID = '{}'\".format(field.id))\n for row in rows:\n control_points.append(cls(row))\n return control_points\n \n # Creates a ControlPoint instance from a control_point database row.\n def __init__(self, row):\n # Database attributes:\n self.tfp_id = row['TFP_ID']\n self.field_id = row['FLD_ID']\n self.created_date = row['Create_DtTm']\n self.created_by_id = row['Create_ID']\n self.edited_date = row['Edit_DtTm']\n self.edited_by_id = row['Edit_ID'] \n self.number = row['Point']\n self.index = row['Index']\n self.nr_leaves = row['MLC_Leaves']\n self.leaf_bank1 = row['A_Leaf_Set']\n self.leaf_bank2 = row['B_Leaf_Set']\n self.gantry_angle = row['Gantry_Ang']\n self.collimator_angle = row['Coll_Ang']\n self.field_size_x = row['Field_X']\n self.field_size_y = row['Field_Y']\n self.collimator_x1 = row['Coll_X1']\n self.collimator_x2 = row['Coll_X2']\n self.collimator_y1 = row['Coll_Y1']\n self.collimator_y2 = row['Coll_Y2']\n self.beam_intensity = row['Beam_Intensity']\n self.energy = row['Energy']\n self.energy_unit_id = row['Energy_Unit_Enum']\n self.meterset_rate = row['Meterset_Rate']\n self.gantry_rotation_id = row['Gantry_Dir_Enum']\n self.couch_pitch_angle = row['Couch_Pitch_Ang']\n #self.couch_roll_angle = row['Couch_Roll_Angle'] # for some reason this caused a crash\n self.couch_angle = row['Couch_Ang']\n self.couch_vertical = row['Couch_Vrt']\n self.couch_lateral = row['Couch_Lat']\n self.couch_longitudinal = row['Couch_Lng']\n self.isocenter_x = row['Isocenter_X']\n self.isocenter_y = row['Isocenter_Y']\n self.isocenter_z = row['Isocenter_Z']\n self.is_modified = row['IsModifiedAfterDataImport']\n # Convenience attributes:\n self.id = self.tfp_id\n # Cache attributes:\n self.instance_created_by = None\n self.instance_edited_by = None\n self.instance_field = None\n\n # The staff who created the appointment.\n def created_by(self):\n if not self.instance_created_by:\n self.instance_created_by = Location.find(self.created_by_id)\n return self.instance_created_by\n \n # The staff who last edited the appointment.\n def edited_by(self):\n if not self.instance_edited_by:\n self.instance_edited_by = Location.find(self.edited_by_id)\n return self.instance_edited_by\n \n # The energy_unit description derived from the energy_unit_id.\n def energy_unit(self):\n values = {\n 1 : 'KV',\n 2 : 'MV',\n 3 : 'MEV'\n }\n return values.get(self.energy_unit_id, 'Unknown energy_unit_id: {}'.format(self.energy_unit_id))\n \n # Gives the field which this checklist belongs to.\n def field(self):\n if not self.instance_field:\n self.instance_field = Field.find(self.field_id)\n return self.instance_field\n \n # The gantry_rotation description derived from the gantry_rotation_id.\n def gantry_rotation(self):\n values = {\n 0 : 'Unspecified',\n 1 : 'CW',\n 2 : 'CC',\n 3 : 'NONE'\n }\n return values.get(self.gantry_rotation_id, 'Unknown gantry_rotation_id: {}'.format(self.gantry_rotation_id))\n ","repo_name":"dicom/raystation-scripts","sub_path":"mosaiq/control_point.py","file_name":"control_point.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"6"} +{"seq_id":"7955738324","text":"from selenium import webdriver\nimport time\n\nbrowser = webdriver.Chrome(executable_path='/home/amjed/chromedriver/chromedriver')\n\nbrowser.get('https://jasim.tech/automation/one')\n\ntime.sleep(1)\n\nname = browser.find_element_by_id('id_name')\nname.send_keys('Amjed Saleel')\n\ntime.sleep(2)\n\nsubmit_btn = browser.find_element_by_css_selector('input[type=\"submit\"]')\nsubmit_btn.click()\n","repo_name":"amjedsaleel/python-automation","sub_path":"src/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36830752573","text":"import os\nimport copy\nfrom nbconvert.exporters.export import exporter_map\nfrom nbconvert.writers.files import FilesWriter\nfrom nbconvert.preprocessors import ExecutePreprocessor\nfrom notebook.base.handlers import IPythonHandler\nfrom notebook.utils import url_path_join\nfrom tornado import web, escape\nfrom tornado.concurrent import run_on_executor\nfrom concurrent.futures import ThreadPoolExecutor # `pip install futures` for python2\n\nfrom seahorse_notebook_path import SeahorseNotebookPath\n\n\nclass HeadlessNotebookHandler(IPythonHandler):\n executor = ThreadPoolExecutor(max_workers=10)\n\n @run_on_executor\n def process_notebook(self, path):\n Exporter = exporter_map[\"html\"]\n exporter = Exporter(config=self.config, log=self.log)\n serialized_path = path.serialize()\n\n # We get ExecutePreprocessor from exporter list\n # and add notebook path so kernel can use the path for various operations.\n # For example it can load the notebook from url\n ep = next(filter(lambda c: isinstance(c, ExecutePreprocessor), exporter._preprocessors))\n ep.extra_arguments.append(\"--seahorse_notebook_path=\" + serialized_path)\n\n model = self.contents_manager.get(path=serialized_path)\n try:\n output, resources = exporter.from_notebook_node(model['content'])\n\n model['content'] = resources[\"seahorse_notebook_content\"]\n self.contents_manager.save(model, path=serialized_path)\n\n resources['output_extension'] = ''\n FilesWriter(config=self.config, log=self.log).write(\n output, resources,\n notebook_name=HeadlessNotebookHandler.notebook_name(path.workflow_id, path.node_id))\n except Exception as e:\n raise web.HTTPError(500, \"nbconvert failed: %s\" % e)\n\n # get HTML-ized un-editable notebook\n def get(self, seahorse_notebook_path):\n Exporter = exporter_map[\"html\"]\n updated_config = self.no_execution_config(self.config)\n exporter = Exporter(config=updated_config, log=self.log)\n model = self.contents_manager.get(path=seahorse_notebook_path)\n\n try:\n output, _ = exporter.from_notebook_node(model['content'])\n self.write(output)\n except Exception as e:\n raise web.HTTPError(500, \"nbconvert failed: %s\" % e)\n\n def post(self):\n data = escape.json_decode(self.request.body)\n workflow_id, node_id, language = data[\"workflow_id\"], data[\"node_id\"], data[\"language\"]\n try:\n os.remove(self.notebook_name(workflow_id, node_id))\n except FileNotFoundError:\n pass\n\n # use input dataframe for headless\n seahorse_notebook_path = SeahorseNotebookPath(workflow_id, node_id, language, node_id, 0)\n self.process_notebook(seahorse_notebook_path)\n raise web.HTTPError(202)\n\n @staticmethod\n def notebook_storage_folder():\n return \"/home/jovyan/work/\"\n\n @staticmethod\n def notebook_name(workflow_id, node_id):\n return \"{0}{1}_{2}.html\".format(\n HeadlessNotebookHandler.notebook_storage_folder(), workflow_id, node_id)\n\n @staticmethod\n def no_execution_config(config):\n new_config = copy.deepcopy(config)\n new_config.ClearOutputPreprocessor.enabled = False\n new_config.ExecutePreprocessor.enabled = False\n return new_config\n\ndef load_jupyter_server_extension(nb_server_app):\n \"\"\"\n Called when the extension is loaded.\n\n Args:\n nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.\n \"\"\"\n web_app = nb_server_app.web_app\n host_pattern = '.*$'\n base_url = web_app.settings['base_url']\n route_pattern = url_path_join(base_url, '/HeadlessNotebook')\n\n web_app.add_handlers(host_pattern, [(route_pattern, HeadlessNotebookHandler)])\n\n # regex excludes dot character to prevent '/OfflineNotebook/workflowid/nodeid/custom.css' from being processed\n web_app.add_handlers(host_pattern, [(url_path_join(base_url,\n '/OfflineNotebook/(?P[^.]+)'), HeadlessNotebookHandler)])\n\n route_pattern_with_workflow_id = url_path_join(base_url, '/HeadlessNotebook/([^/]+)')\n web_app.add_handlers(host_pattern,\n [(route_pattern_with_workflow_id, web.StaticFileHandler, {\"path\": \"/home/jovyan/work/\"})])\n","repo_name":"deepsense-ai/seahorse","sub_path":"remote_notebook/headless_notebook_handler.py","file_name":"headless_notebook_handler.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"6"} +{"seq_id":"73652414589","text":"# 给你一个整数数组 nums 和两个整数 minK 以及 maxK 。\n\n# nums 的定界子数组是满足下述条件的一个子数组:\n\n# 子数组中的 最小值 等于 minK 。\n# 子数组中的 最大值 等于 maxK 。\n# 返回定界子数组的数目。\n\n# 子数组是数组中的一个连续部分。\n\nclass Solution(object):\n\n\n def countSubarrays(self, nums, minK, maxK):\n ans = 0\n min_i = max_i = i0 = -1\n\n for i, x in enumerate(nums):\n if x == minK: min_i = i\n if x == maxK: max_i = i\n \n if not minK <= x <= maxK: i0 = i\n\n ans += max(min(min_i, max_i) - i0, 0)\n return ans\n\n \"\"\"\n 超时做法\n \"\"\"\n def countSubarrays1(self, nums, minK, maxK):\n \"\"\"\n :type nums: List[int]\n :type minK: int\n :type maxK: int\n :rtype: int\n \"\"\"\n n = len(nums)\n\n dp = [[[float('inf'), float('-inf')] for _ in range(n)] for _ in range(n)]\n\n ans = 0\n for i in range(n):\n for j in range(i, n):\n if i == j:\n dp[i][j][0] = nums[j]\n dp[i][j][1] = nums[j]\n else:\n dp[i][j][0] = min(dp[i][j - 1][0], nums[j])\n dp[i][j][1] = max(dp[i][j - 1][1], nums[j])\n\n if dp[i][j][0] == minK and dp[i][j][1] == maxK:\n ans += 1\n \n return ans\n \n\n\n\nnums = [1,3,5,2,7,5]\nminK = 1\nmaxK = 5\n\na = Solution()\nprint(a.countSubarrays(nums, minK, maxK))","repo_name":"xxxxlc/leetcode","sub_path":"competition/单周赛/315/countSubarrays.py","file_name":"countSubarrays.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14976466160","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django. contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\n\nfrom .models import Post, Group, User, Follow\nfrom .forms import PostForm, CommentForm\nfrom yatube.settings import TEN_POSTS\n\n\ndef index(request):\n posts = Post.objects.all()\n paginator = Paginator(posts, TEN_POSTS)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n context = {'page': page}\n return render(request, 'index.html', context)\n\n\ndef group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts = group.posts.all()\n paginator = Paginator(posts, TEN_POSTS)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n context = {'group': group, 'page': page}\n return render(request, 'group.html', context)\n\n\ndef profile(request, username):\n profile = get_object_or_404(User, username=username)\n profile_posts = profile.posts.all()\n paginator = Paginator(profile_posts, TEN_POSTS)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n following = profile.following.filter(user__username=request.user)\n context = {\n 'posts': profile_posts,\n 'page': page,\n 'profile': profile,\n 'following': following,\n }\n return render(request, 'profile.html', context)\n\n\ndef post_view(request, username, post_id):\n profile = get_object_or_404(User, username=username)\n post = get_object_or_404(Post, author__username=username, id=post_id)\n following = profile.following.filter(user__username=request.user)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n context = {\n 'post': post,\n 'form': form,\n 'profile': profile,\n 'following': following,\n }\n return render(request, 'post.html', context)\n\n\n@login_required\ndef add_comment(request, username, post_id):\n post = get_object_or_404(Post, author__username=username, id=post_id)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n return redirect('post', username, post_id)\n\n\n@login_required\ndef post_edit(request, username, post_id):\n post = get_object_or_404(Post, author__username=username, id=post_id)\n if request.user.username != username:\n return redirect('post', username=username, post_id=post.id)\n form = PostForm(\n request.POST or None, files=request.FILES or None, instance=post)\n if form.is_valid():\n form.save()\n return redirect('post', username=username, post_id=post.id)\n is_edit = True\n context = {\n 'post': post,\n 'form': form,\n 'is_edit': is_edit,\n }\n return render(request, 'new_post.html', context)\n\n\n@login_required\ndef new_post(request):\n form = PostForm(request.POST or None, files=request.FILES or None)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('index')\n\n context = {'form': form}\n return render(request, 'new_post.html', context)\n\n\n# view-функции для подписки\n@login_required\ndef follow_index(request):\n user = request.user\n followings = Follow.objects.filter(\n user=user).select_related('author').all()\n posts = []\n if followings:\n for following in followings:\n posts += following.author.posts.all()\n page_number = request.GET.get('page')\n paginator = Paginator(posts, TEN_POSTS)\n page = paginator.get_page(page_number)\n context = {\n 'page': page,\n 'paginator': paginator\n }\n return render(request, \"follow.html\", context)\n\n\n@login_required\ndef profile_follow(request, username):\n user = request.user\n following = get_object_or_404(User, username=username)\n if (not Follow.objects.filter(user=user, author=following).exists()\n and request.user.username != username):\n Follow.objects.create(user=user, author=following)\n return redirect('profile', username)\n\n\n@login_required\ndef profile_unfollow(request, username):\n user = request.user\n following = get_object_or_404(User, username=username)\n follow = Follow.objects.filter(user=user, author=following)\n if follow.exists():\n follow.delete()\n return redirect('profile', username)\n\n\n# views для страниц с ошибками\ndef page_not_found(request, exception):\n return render(\n request,\n 'misc/404.html',\n {'path': request.path},\n status=404\n )\n\n\ndef server_error(request):\n return render(\n request,\n 'misc/500.html',\n status=500\n )\n","repo_name":"Iki-oops/hw05_final","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25375771770","text":"import numpy as np\nimport pandas as pd\nfrom com_cheese_api.util.file import FileReader\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\n\nimport os\n\nclass UserDf:\n def __init__(self):\n self.fileReader = FileReader()\n self.data = os.path.join(os.path.abspath(os.path.dirname(__file__))+'/data')\n self.odf = None\n\n #------------------------------------------ 데이터 셋 정제 ------------------------------------------#\n\n @staticmethod\n def userOrigin():\n user_data = pd.read_csv(\"com_cheese_api/resources/data/users.csv\")\n print(user_data)\n return user_data\n\n @staticmethod\n def make_barplot(x_name, y_name, data_name):\n font_path = 'C:\\\\Windows\\\\Fonts\\\\NanumGothic.ttf'\n font_name1 = fm.FontProperties(fname = font_path).get_name()\n plt.rc('font', family = font_name1)\n plt.xticks(rotation = 45)\n sns.barplot(x = x_name, y = y_name, data = data_name)\n # plt.show()\n\n @staticmethod\n def category_Count():\n user_data = UserDf.userOrigin()\n sub_size = user_data['buy_count'].groupby(user_data['sub1_category']).sum().reset_index(name='sub1_counts')\n sub_size['sub1_rank'] = sub_size['sub1_counts'].rank(ascending=False)\n barplot = UserDf.make_barplot('sub1_category', 'sub1_counts', sub_size)\n return sub_size\n\n @staticmethod\n def cheeseData():\n cheese_data = pd.read_csv(\"com_cheese_api/resources/data/cheese_data.csv\")\n return cheese_data\n\n @staticmethod\n def item_Count():\n user_data = UserDf.userOrigin()\n category_count = UserDf.category_Count()\n\n item_size = user_data['buy_count'].groupby([user_data['sub1_category'],user_data['sub2_category']]).sum().reset_index(name='sub2_counts')\n item_size['sub2_rank'] = item_size['sub2_counts'].rank(ascending=False, method=\"dense\")\n\n category_item_rank = pd.merge(category_count, item_size, on = 'sub1_category', how = 'right')\n user_item_rank = pd.merge(user_data, category_item_rank, on = 'sub2_category', how = 'left')\n # print(user_item_rank)\n\n user_items_ranks = user_item_rank.drop(['sub1_category_y'], axis=1)\n users_item_data = user_items_ranks.rename(columns={'sub1_category_x': 'sub1_category'})\n # print(users_item_data)\n\n # users_item_data.to_csv(os.path.join('com_cheese_api/resources/data', 'user_item_counts3.csv'), index=False)\n return users_item_data\n\n @staticmethod\n def item_Change():\n cheese_data = UserDf.cheeseData()\n users_item_data = UserDf.item_Count()\n\n cheese_df = cheese_data.rename(columns={'ranking': 'sub2_rank'})\n user_cheese_merge = pd.merge(users_item_data, cheese_df, on = 'sub2_rank', how = 'left')\n user_data1 = user_cheese_merge.drop(['item_code', 'item_name', 'item_add_name', 'category_x', 'sub1_category', 'sub2_category', 'item_brand', 'sub1_counts', 'sub1_rank', 'sub2_counts', 'buy_price'], axis=1)\n user_data2 = user_data1.drop(['country', 'matching', 'matching.1', 'content', 'img'], axis=1)\n user_data_fin = user_data2.rename(columns={'Unnamed: 0_x': 'user_index', 'Unnamed: 0_y': 'cheese_code', 'brand': 'cheese_brand', 'name': 'cheese_name', 'price' : 'cheese_one_price', 'sub2_rank': 'cheese_rank', \\\n 'category_y': 'cheese_category', 'texture': 'cheese_texture', 'types': 'cheese_types'})\n # print(list(users_cheese_merge))\n # print(user_data_fin)\n user_data_fin.to_csv(os.path.join('com_cheese_api/resources/data', 'user_data.csv'), index=False)\n return user_data_fin\n # item_Change()\n\n # 데이터 정제 끝난 User 데이터 셋!\n @staticmethod\n def userData():\n user_data = UserDf.item_Change()\n return user_data\n\n @staticmethod\n def show_User_Df():\n user_data = UserDf.userData()\n show_user = user_data.head(10)\n show_column = list(user_data)\n print(show_user)\n print(show_column)\n return user_data\n\n @staticmethod\n def data_split ():\n user_data = UserDf.userData()\n user_train, user_test = train_test_split(user_data, test_size=0.3, random_state = 32)\n user_train.to_csv(os.path.join('com_cheese_api/resources/data', 'user_train.csv'), index=False)\n user_test.to_csv(os.path.join('com_cheese_api/resources/data', 'user_test.csv'), index=False)\n return user_train, user_test\n\n\n\n#######################\n def new_model(self, payload) -> object:\n this = self.fileReader\n this.data = self.data\n this.fname = payload\n print(f'{self.data}')\n print(f'{this.fname}')\n return pd.read_csv(Path(self.data, this.fname))\n\n @staticmethod\n def create_train(this):\n return this.train.drop('cheese_category', axis=1)\n\n @staticmethod\n def create_label(this):\n return this.train['cheese_category'] # Label = answer\n\n @staticmethod\n def drop_feature(this, feature):\n this.train = this.train.drop([feature], axis = 1)\n this.test = this.test.drop([feature], axis = 1)\n return this\n \n @staticmethod\n def gender_norminal(this):\n combine = [this.train, this.test]\n gender_mapping = {'male': 0, 'female': 1}\n for dataset in combine:\n dataset['user_gender'] = dataset['user_gender'].map(gender_mapping)\n this.train = this.train\n this.test = this.test\n return this\n\n\n # def user_Corr():\n # user_data = UserDf.userData()\n # userCorr = user \n # def make_heatmap\n\nif __name__ == '__main__':\n UserDf.show_User_Df()\n\n#------------------------------------------ 데이터 탐색 & 시각화 ------------------------------------------#\n\n # def make_wordcloud(self):\n # user_data = show_df()\n # user_df = _data.loc[:,['cheese_name']]\n # user_lists = np.array(user_df['cheese_name'].tolist())\n \n # with open('com_cheese_api/user/data/stopword.txt', 'r') as file:\n # lines = file.readlines()\n # stop_str = ''.join(lines)\n # stopword = stop_str.replace('\\n', ' ')\n # stopwords = stopword.split(' ')\n\n # sentences_tag = []\n \n # #형태소 분석하여 리스트에 넣기\n # for sentence in _lists:\n # morph = self.okt.pos(sentence)\n # sentences_tag.append(morph)\n # #print(morph)\n # #print('-' * 30)\n \n # #print(sentences_tag)\n # #print('\\n' * 3)\n \n # noun_adj_list = []\n # #명사와 형용사만 구분하여 이스트에 넣기\n # for sentence1 in sentences_tag:\n # for word, tag in sentence1:\n # if word not in stopwords:\n # if tag in ['Noun']:\n # if len(word) >= 2:\n # noun_adj_list.append(word)\n \n \n # word_count_list = []\n # #형태소별 count\n # counts = Counter(noun_adj_list)\n # tags = counts.most_common(100)\n # word_count_list.append(tags)\n # word_list = sum(word_count_list, [])\n # print(word_list)\n # print(type(word_list))\n \n # # wordCloud 생성\n # # 한글 깨지는 문제 해결하기 위해 font_path 지정\n # wc = WordCloud(font_path='/usr/share/fonts/truetype/nanum/NanumBarunGothicBold.ttf', background_color='white', width=800, height=600)\n # #print(dict(tags))\n # cloud = wc.generate_from_frequencies(dict(tags))\n # plt.figure(figsize=(10, 8))\n # plt.axis('off')\n # plt.imshow(cloud)\n # return plt.show()\n\n \n\n","repo_name":"soominok/cheese-ai","sub_path":"com_cheese_api/resources/practice/self_practice.py","file_name":"self_practice.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2872835071","text":"from models.Url import Url\nfrom engine.ScrapeEngine import ScrapeEngine\nfrom models.ModulesEnum import ModulesEnum\nfrom typing import List, Dict, Any\n\n\nclass Request:\n\n def __init__(self) -> None:\n self.nCount = 0\n self.countRequired = 3\n self.KEY = \"\"\n self.URL = \"\"\n self.MODULES = 0\n\n def setURL(self, url: str) -> bool:\n self.URL = url\n if self.URL:\n self.nCount + 1\n return True\n else:\n return False\n\n def setMODULES(self, modules: str) -> None:\n self.MODULES = modules\n module = ModulesEnum()\n if self.MODULES in module.Enum:\n self.nCount + 1\n else:\n self.MODULES = None\n pass\n\n def setKey(self, key: str) -> bool:\n self\n self.KEY = key\n if self.KEY:\n self.nCount + 1\n return True\n else:\n return False\n\n def sendRequest(self) -> bool:\n print(\"sen_requewst {}\".format(self.countRequired))\n if self.nCount != self.countRequired:\n scrapObj = ScrapeEngine(self.URL, self.MODULES, self.KEY)\n print(scrapObj.get_soup())\n module = ModulesEnum()\n if self.MODULES == module.LINK:\n print(12)\n return scrapObj.get_links()\n elif self.MODULES == module.EMAIL:\n return scrapObj.get_emails()\n elif self.MODULES == module.TOKEN:\n print(1212)\n return scrapObj.get_words()\n return True\n else:\n return False\n","repo_name":"chroakPRO/oak-scrape","sub_path":"models/Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"74052850703","text":"from random import randint, sample\n\nimport config\nfrom keras.preprocessing.text import text_to_word_sequence\n\n\ndef get_intent(username, origin, destination, targets, middleboxes, qos, start, end, allow, block):\n intent = 'define intent ' + username + 'Intent:'\n if origin:\n intent = intent + ' from endpoint(\"' + origin + '\")'\n if destination:\n intent = intent + ' to endpoint(\"' + destination + '\")'\n\n for index, target in enumerate(targets):\n if target:\n if 'for' not in intent:\n intent = intent + ' for '\n intent = intent + 'client(\"' + target + '\")'\n\n if index != (len(targets) - 1):\n intent = intent + ', '\n\n for index, mb in enumerate(middleboxes):\n if mb:\n if 'add' not in intent:\n intent = intent + ' add '\n intent = intent + 'middlebox(\"' + mb + '\")'\n\n if index != (len(middleboxes) - 1):\n intent = intent + ', '\n\n for index, metric in enumerate(qos):\n if metric and metric['name'] not in intent:\n if 'with' not in intent:\n intent = intent + ' with '\n\n intent = intent + metric['name'] + '(\"' + metric['constraint']\n intent = intent + '\",\"' + metric['value'] + '\")' if metric['constraint'] is not 'none' else intent + '\")'\n\n if index != (len(qos) - 1):\n intent = intent + ', '\n\n if start:\n intent = intent + ' start hour(\"' + start + '\")'\n if end:\n intent = intent + ' end hour(\"' + start + '\")'\n\n if allow:\n if allow not in intent:\n intent = intent + ' allow trafic(\"' + allow + '\")'\n if block:\n if block not in intent:\n intent = intent + ' block trafic(\"' + block + '\")'\n\n return intent\n\n\ndef get_entities(username, origin, destination, targets, middleboxes, qos, start, end, allow, block):\n entities = username\n if origin:\n entities = entities + ' ' + origin\n if destination:\n entities = entities + ' ' + destination\n\n for target in targets:\n if target:\n entities = entities + ' ' + target\n\n for mb in middleboxes:\n if mb:\n entities = entities + ' ' + mb\n\n for metric in qos:\n if metric:\n if metric['name'] not in entities:\n entities = entities + ' ' + metric['name'] + ' ' + metric['constraint']\n if metric['constraint'] is not 'none':\n entities = entities + ' ' + metric['value']\n\n if start:\n entities = entities + ' ' + start\n if end:\n entities = entities + ' ' + end\n\n if allow:\n if allow not in entities:\n entities = entities + ' allow ' + allow\n if block:\n if block not in entities:\n entities = entities + ' block ' + block\n\n return entities\n\n\ndef write():\n with open(config.DATASET_PATH, 'wb') as file:\n for i in range(config.DATASET_SIZE):\n qos = []\n sampled_metrics = sample(config.DATASET_QOS_METRICS, randint(0, 4))\n for metric in sampled_metrics:\n sampled_constraint = sample(config.DATASET_QOS_CONSTRAINTS, 1)[0]\n while metric[0] is 'bandwidth' and sampled_constraint is 'none':\n sampled_constraint = sample(config.DATASET_QOS_CONSTRAINTS, 1)[0]\n qos.append({'name': metric[0], 'constraint': sampled_constraint, 'value': str(randint(0, 100)) + metric[1]})\n\n username = sample(config.DATASET_USERNAMES, 1)[0]\n origin = sample(config.DATASET_LOCATIONS, 1)[0]\n destination = sample(config.DATASET_LOCATIONS, 1)[0]\n while destination == origin:\n destination = sample(config.DATASET_LOCATIONS, 1)[0]\n target = sample(config.DATASET_TARGETS, 1)[0]\n mbs = [mb for mb in sample(config.DATASET_MIDDLEBOXES, randint(0, len(config.DATASET_MIDDLEBOXES)))]\n start = sample(config.DATASET_HOURS, 1)[0]\n end = sample(config.DATASET_HOURS, 1)[0]\n allow = sample(config.DATASET_TRAFFIC, 1)[0]\n block = sample(config.DATASET_TRAFFIC, 1)[0]\n entities = get_entities(username, origin, destination, target, mbs, qos, start, end, allow, block)\n intent = get_intent(username, origin, destination, target, mbs, qos, start, end, allow, block)\n file.write(entities + ' > ' + intent + '\\n')\n\n\ndef write_alt():\n with open(config.DATASET_PATH, 'wb') as file:\n for i in range(config.DATASET_SIZE):\n qos = []\n for metric in range(randint(0, 2)):\n qos.append({'name': '@qos_metric', 'constraint': '@qos_constraint', 'value': '@qos_value'})\n\n username = '@username'\n origin = '@location' if randint(0, 10) % 2 == 0 else ''\n destination = '@location' if randint(0, 10) % 2 == 0 else ''\n target = ['@target' for i in range(randint(0, 2))]\n mbs = ['@middlebox' for i in range(randint(0, 2))]\n start = '@hour' if randint(0, 10) % 2 == 0 else ''\n end = '@hour' if randint(0, 10) % 2 == 0 else ''\n allow = '@traffic' if randint(0, 10) % 2 == 0 else ''\n block = '@traffic' if randint(0, 10) % 2 == 0 else ''\n entities = get_entities(username, origin, destination, target, mbs, qos, start, end, allow, block)\n intent = get_intent(username, origin, destination, target, mbs, qos, start, end, allow, block)\n file.write(entities + ' > ' + intent + '\\n')\n\n\ndef read():\n lines = []\n\n input_words = []\n output_words = []\n\n with open(config.DATASET_PATH, 'r') as f:\n lines = f.read().split('\\n')\n\n for line in lines:\n if line and not line.startswith('#'):\n input_text, output_text = line.split('>')\n input_words.append(text_to_word_sequence(input_text, filters=config.DATASET_FILTERS))\n output_words.append(text_to_word_sequence(output_text, filters=config.DATASET_FILTERS))\n\n return input_words, output_words\n\n\ndef read_split():\n lines = []\n\n fit_input_words = []\n fit_output_words = []\n\n test_input_words = []\n test_output_words = []\n\n with open(config.DATASET_PATH, 'r') as f:\n lines = f.read().split('\\n')\n\n fit_lines = sample(lines, int(len(lines) * 0.7))\n for line in fit_lines:\n if line and not line.startswith('#'):\n input_text, output_text = line.split('>')\n fit_input_words.append(text_to_word_sequence(input_text, filters=config.DATASET_FILTERS))\n fit_output_words.append(text_to_word_sequence(output_text, filters=config.DATASET_FILTERS))\n\n test_lines = list(set(lines) - set(fit_lines))\n for line in test_lines:\n if line and not line.startswith('#'):\n input_text, output_text = line.split('>')\n fit_input_words.append(text_to_word_sequence(input_text, filters=config.DATASET_FILTERS))\n fit_output_words.append(text_to_word_sequence(output_text, filters=config.DATASET_FILTERS))\n\n return fit_input_words, fit_output_words, test_input_words, test_output_words\n\n\nif __name__ == \"__main__\":\n write_alt()\n","repo_name":"asjacobs92/nia","sub_path":"seq2seq/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"20202032896","text":"import datetime\nclass github:\n _repositories = []\n _expiresAt = datetime.datetime.utcnow()\n\n def __init__(self):\n self.refreshRepositories()\n self.refreshThread = None\n \n \n \n @classmethod\n def getRepositories(cls):\n return cls._repositories\n \n def refreshRepositories(self):\n repos = []\n import requests\n with requests.get('https://api.github.com/users/Troy-Pierce/repos') as req:\n if req.status_code == 200:\n for x in req.json():\n if not x['private']:\n repo = {\n 'name': x['full_name'],\n 'url': x['html_url'],\n 'description': x['description'],\n 'languages': []\n }\n with requests.get(f'https://api.github.com/repos/{repo.get(\"name\")}/languages') as rdetail:\n if rdetail.status_code == 200:\n languages = []\n for key in rdetail.json():\n languages.append(key)\n if len(languages) > 0:\n repo.update({'languages': languages})\n repos.append(repo)\n self.__class__._expiresAt = datetime.datetime.now()+datetime.timedelta(days=1)\n else:\n self.__class__._expiresAt = datetime.datetime.now()+datetime.timedelta(hours=1)\n self.__class__._repositories = repos\n from threading import Timer\n self.refreshThread = Timer((self.__class__._expiresAt-datetime.datetime.now()).total_seconds(), self.refreshRepositories).start()\n return self.__class__._repositories\n\n \n","repo_name":"Troy-Pierce/portfolio","sub_path":"flask/website/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72049677583","text":"\"\"\"Author: Brandon Trabucco.\nLoads training batches from the serialized tensorfloe dataset.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\n\nfrom image_annotations.abstract import Abstract\n\n\nclass ModelInputs(Abstract):\n \"\"\"Utility class to load training batches.\n \"\"\"\n\n def __init__(self, input_file_pattern, is_training):\n \"\"\"Initialize useful global variables.\n \"\"\"\n\n self.reader = tf.TFRecordReader()\n self.input_file_pattern = input_file_pattern\n self.batch_size = 32\n self.values_per_input_shard = 16\n self.input_queue_capacity_factor = 16\n self.num_input_reader_threads = 4\n self.num_preprocess_threads = 4\n\n self.video_id_feature = \"image/video_id\"\n self.image_id_feature = \"image/image_id\"\n self.xs_feature = \"image/xs\"\n self.ys_feature = \"image/ys\"\n self.image_feature = \"image/image\"\n self.shape_feature = \"image/shape\"\n\n\n def parse_sequence_example(self, serialized):\n \"\"\"Parses a tensorflow.SequenceExample into an image and caption.\n Args:\n serialized: A scalar string Tensor; a single serialized SequenceExample.\n Returns:\n video_id: tf.int64 scalar identifying the source video.\n image_id: tf.int64 scalar identifying the source frame.\n xs: tf.float32[] list of object points.\n ys: tf.float32[] list of object points.\n image: tf.float32[] list of flattened image pixels.\n shape: tf.int64[] list of the original image shape.\n \"\"\"\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n self.video_id_feature: tf.FixedLenFeature([], dtype=tf.int64),\n self.image_id_feature: tf.FixedLenFeature([], dtype=tf.int64)\n },\n sequence_features={\n self.xs_feature: tf.FixedLenSequenceFeature([], dtype=tf.float32),\n self.ys_feature: tf.FixedLenSequenceFeature([], dtype=tf.float32),\n self.image_feature: tf.FixedLenSequenceFeature([], dtype=tf.float32),\n self.shape_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n })\n\n return (context[self.video_id_feature], \n context[self.image_id_feature], \n sequence[self.xs_feature], \n sequence[self.ys_feature], \n sequence[self.image_feature], \n sequence[self.shape_feature])\n\n\n def prefetch_input_data(self, reader, file_pattern, batch_size,\n values_per_shard, input_queue_capacity_factor=16, num_reader_threads=1,\n shard_queue_name=\"filename_queue\", value_queue_name=\"input_queue\"):\n \"\"\"Prefetches string values from disk into an input queue.\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n /tmp/train_data-?????-of-00100).\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n Returns:\n A Queue containing prefetched string values.\n \"\"\"\n data_files = []\n for pattern in file_pattern.split(\",\"):\n data_files.extend(tf.gfile.Glob(pattern))\n if not data_files:\n tf.logging.fatal(\"Found no input files matching %s\", file_pattern)\n else:\n tf.logging.info(\"Prefetching values from %d files matching %s\",\n len(data_files), file_pattern)\n\n filename_queue = tf.train.string_input_producer(\n data_files, shuffle=True, capacity=16, name=shard_queue_name)\n min_queue_examples = values_per_shard * input_queue_capacity_factor\n capacity = min_queue_examples + 100 * batch_size\n values_queue = tf.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string],\n name=\"random_\" + value_queue_name)\n\n enqueue_ops = []\n for _ in range(num_reader_threads):\n _, value = reader.read(filename_queue)\n enqueue_ops.append(values_queue.enqueue([value]))\n tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(\n values_queue, enqueue_ops))\n tf.summary.scalar(\n \"queue/%s/fraction_of_%d_full\" % (values_queue.name, capacity),\n tf.cast(values_queue.size(), tf.float32) * (1. / capacity))\n\n return values_queue\n\n\n def batch_with_dynamic_pad(self, enqueue_list, batch_size, queue_capacity):\n \"\"\"Batches input images.\n Args:\n images: A list of [video_id, image_id, xs, ys, image, shape]\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n Returns:\n video_ids: tf.int64 Tensor identifying the source videos.\n image_ids: tf.int64 Tensor identifying the source frames.\n xss: tf.float32 Tensor of object points.\n yss: tf.float32 Tensor of object points.\n images: tf.float32 Tensor of flattened image pixels.\n shapes: tf.int64 Tensor of the original image shapes.\n \"\"\"\n\n batch = tf.train.batch_join(\n enqueue_list,\n batch_size=batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n name=\"batch_and_pad\")\n\n return batch\n\n\n def distort_image(self, image, thread_id):\n \"\"\"Perform random distortions on an image.\n Args:\n image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n Returns:\n distorted_image: A float32 Tensor of shape [height, width, 3] with values in\n [0, 1].\n \"\"\"\n # Randomly flip horizontally.\n with tf.name_scope(\"flip_horizontal\", values=[image]):\n image = tf.image.random_flip_left_right(image)\n\n # Randomly distort the colors based on thread id.\n color_ordering = thread_id % 2\n with tf.name_scope(\"distort_color\", values=[image]):\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n\n return image\n\n\n def start(self):\n \"\"\"Build the batch inputs and distore images for training.\n Returns:\n video_ids: tf.int64 Tensor identifying the source videos.\n image_ids: tf.int64 Tensor identifying the source frames.\n xss: tf.float32 Tensor of object points.\n yss: tf.float32 Tensor of object points.\n images: tf.float32 Tensor of image pixels.\n \"\"\"\n\n # Prefetch serialized SequenceExample protos.\n input_queue = self.prefetch_input_data(\n self.reader,\n self.input_file_pattern,\n batch_size=self.batch_size,\n values_per_shard=self.values_per_input_shard,\n input_queue_capacity_factor=self.input_queue_capacity_factor,\n num_reader_threads=self.num_input_reader_threads)\n\n # Image processing and random distortion. Split across multiple threads\n # with each thread applying a slightly different distortion.\n assert self.num_preprocess_threads % 2 == 0\n enqueue_list = []\n for thread_id in range(self.num_preprocess_threads):\n serialized_sequence_example = input_queue.dequeue()\n video_id, image_id, xs, ys, image, shape = self.parse_sequence_example(\n serialized_sequence_example)\n image = tf.reshape(image, shape)\n image = self.distort_image(image, thread_id=thread_id)\n enqueue_list.append([video_id, image_id, xs, ys, image])\n\n # Batch inputs.\n queue_capacity = (2 * self.num_preprocess_threads *\n self.batch_size)\n video_ids, image_ids, xss, yss, images = (\n self.batch_with_dynamic_pad(enqueue_list,\n batch_size=self.batch_size,\n queue_capacity=queue_capacity))\n\n return video_ids, image_ids, xss, yss, images\n","repo_name":"brandontrabucco/image_annotations","sub_path":"image_annotations/model/model_inputs.py","file_name":"model_inputs.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12650603032","text":"\"\"\"\nauthor: sunshawn\nthis program use a regular expression to match the datas and make a data frame.\ndate: 2020.7.8\n\"\"\"\n\n\nimport re\nimport pandas as pd\nimport numpy as np\n\n\n# constant value definition\n# these constant values are used to map the data frame.\n# if don't, the AI will not be able to identify the datas.\nWEATHERS = {\n '晴': 0,\n '多云': 1,\n '阴': 2,\n '小雨': 3,\n '中雨': 4,\n '大雨': 5,\n '暴雨': 6,\n '小到中雨': 7,\n '中到大雨': 8,\n '大到暴雨': 9,\n '阵雨': 10,\n '雷阵雨': 11,\n '小雪': 12,\n '中雪': 13,\n '大雪': 14,\n '暴雪': 15,\n '小到中雪': 16,\n '中到大雪': 17,\n '大到暴雪': 18,\n '雨夹雪': 19\n}\n\nWIND_DIRECTIONS = ['东风', '东北风', '北风', '西北风', '西风', '西南风', '南风', '东南风']\n\nWIND_STRENGTH = ['1-2级', '2-3级', '3-4级', '4-5级', '5-6级', '6-7级', '7-8级']\n\n\ncount = 0\nWIND = dict()\nfor io in WIND_DIRECTIONS:\n for jo in WIND_STRENGTH:\n WIND[io + ' ' + jo] = count\n count += 1\nprint(WIND)\n\n\ndef scratch_info(html):\n \"\"\"\n used to scratch the useful information of the document\n :param html: the html file\n :return: the weather, the temperature, and the wind.\n \"\"\"\n # \\u4e00-\\u9fa5 中文\n weather = re.findall('\\r\\n +([\\u4e00-\\u9fa5]+)\\r\\n +/([\\u4e00-\\u9fa5]+)', html)\n temperature = re.findall('\\r\\n +(-?\\d)+℃\\r\\n +/\\r\\n +(-?\\d)+℃\\r\\n +', html)\n wind = re.findall('\\r\\n +([\\u4e00-\\u9fa5]+风) (.+?)级\\r\\n +/([\\u4e00-\\u9fa5]+风) (.+?)级', html)\n return weather, temperature, wind\n\n\ndef turn_info(info):\n \"\"\"\n turn the information into a list\n because the form of the source datas is ('...', '...') due to the morning and evening weather differences\n :param info: the information scratched\n :return: the completed list\n \"\"\"\n # print(info)\n target = []\n for i in info: # the list form\n if len(i) == 2: # weather\n for j in i: # the tuple form\n target.append(j)\n else:\n former = i[0] + i[1]\n latter = i[2] + i[3]\n target.append(former)\n target.append(latter)\n return target\n\n\ndef convert_data(dataframe):\n \"\"\"\n convert the string type of datas into integer type of datas\n :param dataframe: the data frame wanted to map\n :return: the mapped data frame\n \"\"\"\n dataframe['weather'] = dataframe['weather'].map(WEATHERS)\n dataframe['wind'] = dataframe['wind'].map(WIND)\n return dataframe\n\n\ndef make_dataframe(weather, temperature, wind):\n \"\"\"\n the function to make a data frame\n :param weather: weather list\n :param temperature: temperature list\n :param wind: wind list\n :return: data frame\n \"\"\"\n ready_list = []\n for i in range(len(weather)):\n try:\n ready_list.append([weather[i], temperature[i], wind[i]])\n except IndexError:\n print('Index Error Occurs.')\n print(len(weather))\n print(len(temperature))\n print(len(wind))\n ready_frame = np.array(ready_list)\n df = pd.DataFrame(ready_frame, columns=['weather', 'temperature', 'wind'])\n df = convert_data(df)\n return df\n","repo_name":"sunshawn/weather-forecast-python","sub_path":"src/exmatch.py","file_name":"exmatch.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72294843023","text":"from scores import GridScorer\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom source import *\nfrom utils import draw_heatmap_2D\nimport os\nimport argparse\nimport pickle\nimport torch\nimport utils\nimport model\n\nparser = argparse.ArgumentParser()\n# 1-step RNN\n# parser.add_argument('--f_in', type=str, default='../logs/01_isometry/20220727-223216-num_neurons=1800-005-1-positive_v=True-num_steps_train=200000-batch_size=10000-006-gpu=0/ckpt/weights.npy', help='Checkpoint path to load')\n# parser.add_argument('--f_in', type=str, default='../logs/04_rnn_isometry/20220827-234250-rnn_step=1-batch_size=8000-gpu=0/ckpt/checkpoint-step100000.pth', help='Checkpoint path to load')\n\n# 5-step RNN\n# parser.add_argument('--f_in', type=str, default='../logs/04_rnn_isometry/20220828-165259-rnn_step=1-adaptive_dr=True-reg_decay_until=20000-batch_size=8000-gpu=0/ckpt/weights.npy', help='Checkpoint path to load')\n\n\n# 10-step RNN\n# parser.add_argument('--f_in', type=str, default='../logs/01_isometry_rnn/20220802-215231-num_steps_train=200000-gpu=1/ckpt/weights.npy', help='Checkpoint path to load')\nparser.add_argument('--f_in', type=str, default='../logs/04_rnn_isometry/20220915-223938-rnn_step=10-block_size=12-005-1-adaptive_dr=True-reg_decay_until=15000-batch_size=8000-num_steps_train=100000-gpu=0/ckpt/checkpoint-step100000.pth', help='Checkpoint path to load')\n\n# parser.add_argument('--f_in', type=str, default='/home/gmm/Documents/workingspace/grid_cell_00/output/main_100_00_new_loss_small_area/2021-05-24-17-49-02--num_group=1--block_size=96--num_data=20000--weight_reg_u=6/syn/weights_7999.npy', help='Checkpoint path to load')\nparser.add_argument('--dir_out', type=str, default='test',\n help='Checkpoint path to load')\nFLAGS = parser.parse_args()\n\n# read ckpt\nckpt_path = FLAGS.f_in\nckpt = torch.load(ckpt_path)\n\nconfig = ckpt['config']\n\ndevice = utils.get_device(1)\n# config.b_scalar = True\n\nmodel_config = model.GridCellConfig(**config.model)\nmodel = model.GridCell(model_config)\nmodel.load_state_dict(ckpt['state_dict'])\nmodel.to(device)\n\n# np.save('../logs/04_rnn_isometry/20220828-165259-rnn_step=1-adaptive_dr=True-reg_decay_until=20000-batch_size=8000-gpu=0/ckpt/weights.npy', \\\n# model.encoder.v.data.cpu().numpy())\n\ndir_out = './output/test_gridness'\nlog_file = os.path.join(dir_out, 'log.txt')\n\ndir_out = os.path.join(dir_out, FLAGS.dir_out)\nif not os.path.exists(dir_out):\n os.mkdir(dir_out)\nnum_interval = 40\nblock_size = 12\nnum_block = 150\n\nstarts = [0.1] * 20\nends = np.linspace(0.2, 1.2, num=20)\n\n# starts = [0.2] * 10\n# ends = np.linspace(0.4, 1.6, num=20)\n\n# starts = [0.1] * 30 + [0.2] * 30\n# ends = np.concatenate([np.linspace(0.2, 1.5, num=30), np.linspace(0.3, 1.5, num=30)])\n\n\nmasks_parameters = zip(starts, ends.tolist())\n\n# weights_file = FLAGS.f_in\n# weights = np.load(weights_file)\nweights = model.encoder.v.data.cpu().numpy()\n# weights = np.transpose(weights, axes=[2, 0, 1])\nncol, nrow = block_size, num_block\n\nscorer = GridScorer(40, ((0, 1), (0, 1)), masks_parameters)\n\nscore_list = np.zeros(shape=[len(weights)], dtype=np.float32)\nscale_list = np.zeros(shape=[len(weights)], dtype=np.float32)\norientation_list = np.zeros(shape=[len(weights)], dtype=np.float32)\nsac_list = []\nplt.figure(figsize=(int(ncol * 1.6), int(nrow * 1.6)))\n\n\nfor i in range(len(weights)):\n rate_map = weights[i]\n rate_map = (rate_map - rate_map.min()) / (rate_map.max() - rate_map.min())\n \n score, autocorr_ori, autocorr, scale, orientation, peaks = \\\n gridnessScore(rateMap=rate_map, arenaDiam=1, h=1.0 /\n (num_interval-1), corr_cutRmin=0.3)\n \n if (i > 64 and i < 74) or (i > 74 and i < 77) or (i > 77 and i < 89) or (i > 89 and i < 92) or (i > 92 and i < 96):\n peaks = peaks0\n else:\n peaks0 = peaks\n \n\n score_60, score_90, max_60_mask, max_90_mask, sac = scorer.get_scores(\n weights[i])\n sac_list.append(sac)\n '''\n scorer.plot_sac(autocorr,\n ax=plt.subplot(nrow, ncol, i + 1),\n title=\"%.2f\" % (score_60),\n # title=\"%.2f, %.2f, %.2f\" % (score_60, scale, orientation),\n cmap='jet')\n '''\n \n scorer.plot_sac(sac,\n ax=plt.subplot(nrow, ncol, i + 1),\n title=\"\",\n # title=\"%.2f\" % (score_60),\n # title=\"%.2f, %.2f, %.2f\" % (score_60, scale, orientation),\n cmap='jet')\n '''\n scorer.plot_sac(sac,\n ax=plt.subplot(nrow, ncol, i + 1),\n title=\"%.2f\" % (max_60_mask[1]),\n # title=\"%.2f, %.2f, %.2f\" % (score_60, scale, orientation),\n cmap='jet')\n '''\n plt.subplots_adjust(wspace=0.2, hspace=0.2)\n score_list[i] = score_60\n # scale_list[i] = scale\n # print(max_60_mask)\n scale_list[i] = max_60_mask[1]\n orientation_list[i] = orientation\n# plt.savefig(os.path.join(dir_out, 'autocorr.png'), bbox_inches='tight')\nplt.savefig(os.path.join(dir_out, 'autocorr_score_noscore.png'), bbox_inches='tight')\n# plt.savefig(os.path.join(dir_out, 'polar.png'))\nplt.close()\nsac_list = np.asarray(sac_list)\n\n# with open(os.path.join(dir_out, 'stats.pkl'), \"wb\") as f:\n# pickle.dump([sac_list, score_list, scale_list, orientation_list], f)\n# np.set_printoptions(threshold=np.nan)\nnp.save(os.path.join(dir_out, 'score_list.npy'), score_list)\nnp.save(os.path.join(dir_out, 'scale_list.npy'), scale_list)\nnp.save(os.path.join(dir_out, 'orientation_list.npy'), orientation_list)\n\nscale_list = np.load(os.path.join(dir_out, 'scale_list.npy'))\nscore_list = np.load(os.path.join(dir_out, 'score_list.npy'))\norientation_list = np.load(os.path.join(dir_out, 'orientation_list.npy'))\n\nprint(score_list)\nprint(len(score_list[np.isnan(score_list)]))\nprint(np.mean(score_list[~np.isnan(score_list)]))\n\nprint(np.mean(scale_list))\nprint(len(scale_list))\nprint((scale_list * 40))\nprint(np.sum(score_list > 0.37) / len(score_list))\n\nplt.hist(orientation_list, density=True, bins=20)\nplt.show()\nplt.hist(orientation_list[score_list > 0.37], density=True, bins=20)\nplt.show()\n# with open(os.path.join(dir_out, 'stats.pkl'), \"rb\") as f:\n# sac_list, score_list, scale_list, orientation_list = pickle.load(f)\n","repo_name":"DehongXu/grid-cell-rnn","sub_path":"gridness_score_collect.py","file_name":"gridness_score_collect.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"8195984697","text":"def caracter_perdido(n,str):\r\n global perdida\r\n\r\n if n or n == 0:\r\n mitad1 = str[:n]\r\n mitad2 = str[n + 1:]\r\n cond1=mitad1 + mitad2\r\n perdida=cond1\r\n elif n == -1:\r\n cond2= str[:-1]\r\n perdida=cond2\r\n return perdida\r\ndef consignas():\r\n consigna1 = str(input(\"Ingrese una palabra:\"))\r\n consignas2= int(input(\"Ingrese un indice:\"))\r\n caracter_perdido(consigna1,consignas2)\r\nconsignas()\r\n\r\n","repo_name":"juannma03/TP1","sub_path":"EJ 10-TP1-MICHAUX.py","file_name":"EJ 10-TP1-MICHAUX.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28233544442","text":"n = int(input())\ns = input()\nl = len(s)\n#예제와 같은 행렬을 만든다. \nlst = [[0]*(n) for _ in range(l//n)]\n\nx = -1\n#행의 길이만큼 돌면서 \nfor i in range(l//n) :\n #짝수이면 그대로 넣고 \n if i %2 ==0 :\n for j in range(n) :\n x+=1 \n lst[i][j] = s[x]\n #홀수이면 반대로 넣는다. \n else :\n for j in range(n-1,-1,-1) :\n x += 1\n lst[i][j] = s[x]\nresult = []\n#열 우선으로 출력한다. \nfor j in range(n) :\n for i in range(l//n) :\n result.append(lst[i][j])\n\nprint(''.join(map(str,result)))","repo_name":"holawan/Problem-Solving","sub_path":"BAEKJOON/구현/1855_암호.py","file_name":"1855_암호.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29838054204","text":"import pandas as pd \nimport re\nimport numpy as np\n\n\n# this is full dataset\ndf1= pd.read_csv('articleswithlabels.csv').reset_index(drop=True)\n\n# replace empty brackets with np.nan\ndf1.loc[(df1[' authors']==\"[]\"),\" authors\"]=np.nan\n\n# get the required columns, add the one for news_source url \nnewdf= df1[['title',' authors',' date',' url','Name','Bias']]\n\n# remove rows with null values\nindex_with_nan = df1.index[df1.isnull().any(axis=1)]\ndf=df1.drop(index_with_nan,0).reset_index(drop=True)\nprint(df.columns)\nauthorsmixed=pd.unique(df[' authors'])\n # print(authorsmixed[-100:])\n\n \n\nfor ele in authorsmixed:\n \n if(not(isinstance(ele,str))):\n print(\"haha\")","repo_name":"alt-nikitha/NewsMediaBiasDetection","sub_path":"random_code.py","file_name":"random_code.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"23547247105","text":"import functools\nimport logging\nimport os\nimport pickle\nimport random\nimport time\n\nimport cupy as cp\n\nfrom .backends import createBackend\nfrom .frontends import createFrontend\nfrom ._utils import (call_by_root, gen_run_env, HashableDict, is_running_mpiexec,\n load_benchmark_data, report, save_benchmark_data, reseed,\n is_running_mpi)\n\n\n# set up a logger\nlogger_name = \"cuquantum-benchmarks\"\nlogger = logging.getLogger(logger_name)\n\n\ndef run_interface(\n benchmarks, nqubits_interface, ngpus_interface, ncpu_threads_interface, frontend, backend, nwarmups, nrepeats, nshots_interface,\n nfused_interface, precision_interface, new_circ, save, cache_dir,\n cusvaer_global_index_bits, cusvaer_p2p_device_bits, cusvaer_data_transfer_buffer_bits, cusvaer_comm_plugin_type, cusvaer_comm_plugin_soname):\n\n reseed(1234) # TODO: use a global seed?\n backend, backend_config = backend # unpack\n ngpus = ngpus_interface if ngpus_interface is not None else backend_config['config']['ngpus']\n ncpu_threads = ncpu_threads_interface if ncpu_threads_interface is not None else backend_config['config']['ncputhreads']\n nshots = nshots_interface if nshots_interface is not None else backend_config['config']['nshots']\n nfused = nfused_interface if nfused_interface is not None else backend_config['config']['nfused']\n precision = precision_interface if precision_interface is not None else backend_config['config']['precision']\n\n general_interface = GeneralInterface(frontend=frontend,\n backend=backend,\n nshots=nshots,\n nfused=nfused,\n precision=precision,\n #append=append,\n new_circ=new_circ,\n save=save)\n\n for benchmark_name in benchmarks.keys(): # Iterate over diferent benchmarks\n benchmark = benchmarks[benchmark_name]\n\n gpu_device_properties = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)\n gpu_name = gpu_device_properties['name'].decode('utf-8').split(' ')[-1]\n if gpu_name not in benchmark['nqubits']:\n # Use the default config for this benchmark if there is no GPU-specific config\n gpu_name = 'default'\n nqubits_list = [nqubits_interface] if nqubits_interface else benchmark['nqubits'][gpu_name]\n\n benchmark_object = benchmark['benchmark']\n config = benchmark['config']\n config['precision'] = precision # WAR\n\n for nqubits in nqubits_list: # Iterate over diferent number of qubits\n run_specific = RunSpecific(benchmark_name=benchmark_name,\n benchmark_object=benchmark_object,\n nqubits=nqubits,\n ngpus=ngpus,\n ncpu_threads=ncpu_threads,\n nwarmups=nwarmups,\n nrepeats=nrepeats,\n config=config,\n general_interface=general_interface,\n cache_dir=cache_dir,\n cusvaer_global_index_bits=cusvaer_global_index_bits,\n cusvaer_p2p_device_bits=cusvaer_p2p_device_bits,\n cusvaer_data_transfer_buffer_bits=cusvaer_data_transfer_buffer_bits,\n cusvaer_comm_plugin_type=cusvaer_comm_plugin_type,\n cusvaer_comm_plugin_soname=cusvaer_comm_plugin_soname)\n run_specific.run()\n\n\nclass GeneralInterface:\n\n def __init__(self, frontend, backend, nshots, nfused, precision, new_circ, save):\n self.frontend = frontend\n self.backend = backend\n self.nshots = nshots\n self.nfused = nfused\n self.precision = precision\n #self.append = append\n self.new_circ = new_circ\n self.save = save\n self.full_data = {}\n\n\nclass RunSpecific:\n\n def __init__(\n self, benchmark_name, benchmark_object, nqubits, ngpus, ncpu_threads, nwarmups, nrepeats, config,\n general_interface, cache_dir,\n cusvaer_global_index_bits, cusvaer_p2p_device_bits, cusvaer_data_transfer_buffer_bits,\n cusvaer_comm_plugin_type, cusvaer_comm_plugin_soname):\n self.benchmark_name = benchmark_name\n self.benchmark_object = benchmark_object\n self.nqubits = nqubits\n self.ngpus = ngpus\n self.ncpu_threads = ncpu_threads\n self.nwarmups=nwarmups\n self.nrepeats=nrepeats\n self.config = config\n self.general_interface = general_interface\n self.benchmark_data = {}\n self.cache_dir = cache_dir\n # cusvaer options\n self.cusvaer_global_index_bits = cusvaer_global_index_bits\n self.cusvaer_p2p_device_bits = cusvaer_p2p_device_bits\n self.cusvaer_data_transfer_buffer_bits = cusvaer_data_transfer_buffer_bits\n self.cusvaer_comm_plugin_type = cusvaer_comm_plugin_type\n self.cusvaer_comm_plugin_soname = cusvaer_comm_plugin_soname\n\n # currently we assume the following subdirectories exist\n self.required_subdirs = ('circuits', 'data')\n\n def _load_or_generate_circuit(self, circuit_filename):\n # We need a mechanism to ensure any incompatible gate_sequence generated\n # and cached from the previous releases is invalidated. We do so by\n # assigning a version number gate_seq_ver for the gate sequence and\n # encoding it in the pickle filename.\n #\n # v0.1.0: the gate_sequence is a list of size-2 lists.\n # v0.2.0: the gate_sequence is a list of Gate objects. gate_seq_ver = 1.\n gate_seq_ver = 1\n\n circuit_filename += f\"_v{gate_seq_ver}.pickle\"\n frontend = createFrontend(self.general_interface.frontend, self.nqubits, self.config)\n try:\n if self.general_interface.new_circ:\n raise ValueError\n\n # If this circuit has been generated previously, load it\n with open(os.path.join(self.cache_dir, circuit_filename), 'rb') as f:\n gate_sequence = pickle.load(f)\n circuit = frontend.generateCircuit(gate_sequence)\n logger.debug(f'Circuit loaded from {circuit_filename}')\n\n except: # Otherwise, generate the circuit and save it\n gate_sequence = self.benchmark_object.generateGatesSequence(self.nqubits, self.config)\n circuit = frontend.generateCircuit(gate_sequence)\n def dump():\n with open(os.path.join(self.cache_dir, circuit_filename), 'wb') as f:\n pickle.dump(gate_sequence, f, protocol=pickle.HIGHEST_PROTOCOL)\n logger.debug(f'Circuit generated and saved to {circuit_filename}')\n call_by_root(dump)\n\n return circuit\n\n def get_circuit(self, circuit_filename):\n # This method ensures only the root process is responsible to generate/broadcast the circuit\n # so that all processes see the same circuit.\n MPI = is_running_mpi()\n circuit = call_by_root(functools.partial(self._load_or_generate_circuit, circuit_filename))\n if MPI:\n comm = MPI.COMM_WORLD\n circuit = comm.bcast(circuit)\n return circuit\n\n def timer(self, backend, circuit, nshots):\n perf_time = 0\n cuda_time = 0\n post_time = 0\n if self.ngpus > 0:\n start_gpu = cp.cuda.Event()\n end_gpu = cp.cuda.Event()\n\n # warm up\n for i in range(self.nwarmups):\n backend.pre_run(circuit, nshots=nshots)\n backend.run(circuit, nshots)\n\n # actual timing\n for i in range(self.nrepeats):\n backend.pre_run(circuit, nshots=nshots)\n\n if self.ngpus > 0:\n start_gpu.record()\n pe1 = time.perf_counter()\n\n run_dict = backend.run(circuit, nshots)\n\n pe2 = time.perf_counter()\n if self.ngpus > 0:\n end_gpu.record()\n\n perf_time += pe2 - pe1\n if self.ngpus > 0:\n end_gpu.synchronize()\n cuda_time += cp.cuda.get_elapsed_time(start_gpu, end_gpu) / 1000 # ms->s\n\n # TODO: remove results?\n results = run_dict['results']\n post_res = run_dict['post_results']\n run_data = run_dict['run_data']\n\n for k, v in run_data.items():\n self.benchmark_data[k] = v\n\n pe2 = time.perf_counter()\n post_process = self.benchmark_object.postProcess(self.nqubits, post_res)\n pe3 = time.perf_counter()\n post_time += pe3 - pe2\n\n return perf_time / self.nrepeats, cuda_time / self.nrepeats, post_time / self.nrepeats, post_process\n\n def _fix_filename_for_cutn(self, circuit_filename, nqubits):\n target = pauli = None\n if self.general_interface.backend == 'cutn':\n target = os.environ.get('CUTENSORNET_BENCHMARK_TARGET', 'amplitude')\n circuit_filename += f'_{target}'\n if target == 'expectation':\n pauli = random.choices(('I', 'X', 'Y', 'Z'), k=nqubits)\n circuit_filename += f\"_{''.join(pauli)}\"\n return circuit_filename, target, pauli\n\n def extract_backend_version(self):\n if 'aer' in self.general_interface.backend:\n import qiskit\n version = qiskit.__qiskit_version__['qiskit-aer']\n elif 'qsim' in self.general_interface.backend:\n import qsimcirq\n version = qsimcirq.__version__\n elif self.general_interface.backend == 'cutn':\n import cuquantum\n version = cuquantum.cutensornet.get_version()\n elif self.general_interface.backend == 'cirq':\n import cirq\n version = cirq.__version__\n elif self.general_interface.backend == 'naive':\n from .backends import backends\n version = backends['naive'].version\n elif self.general_interface.backend == 'pennylane':\n import pennylane\n version = pennylane.__version__\n elif self.general_interface.backend == 'pennylane-lightning-gpu':\n import pennylane_lightning_gpu\n version = pennylane_lightning_gpu.__version__\n elif self.general_interface.backend == 'pennylane-lightning-qubit':\n import pennylane_lightning\n version = pennylane_lightning.__version__\n elif self.general_interface.backend == 'pennylane-lightning-kokkos':\n import pennylane_lightning_kokkos\n version = pennylane_lightning_kokkos.__version__\n elif self.general_interface.backend in ('qulacs-gpu', 'qulacs-cpu'):\n import qulacs\n version = qulacs.__version__\n else:\n assert False\n return version\n\n def extract_frontend_version(self):\n if self.general_interface.frontend == 'qiskit':\n import qiskit\n version = qiskit.__qiskit_version__['qiskit-terra']\n elif self.general_interface.frontend == 'cirq':\n import cirq\n version = cirq.__version__\n elif self.general_interface.frontend == 'naive':\n from .frontends import frontends\n version = frontends['naive'].version\n elif self.general_interface.frontend == 'pennylane':\n import pennylane\n version = pennylane.__version__\n elif self.general_interface.frontend == 'qulacs':\n import qulacs\n version = qulacs.__version__\n else:\n assert False\n return version\n\n def extract_glue_layer_version(self):\n if self.general_interface.backend == 'cutn':\n import cuquantum\n glue_ver = f'cuquantum {cuquantum.__version__}'\n else:\n return None\n return glue_ver\n\n def run(self):\n measure = self.config['measure']\n\n # try to load existing perf data, if any\n data_filename = f'{self.benchmark_name}.json'\n filepath = f'{self.cache_dir}/data/{data_filename}'\n self.general_interface.full_data = load_benchmark_data(\n filepath, self.cache_dir, self.required_subdirs)\n\n gpu_device_properties = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)\n gpu_name = gpu_device_properties['name'].decode('utf-8').split(' ')[-1]\n num_qubits = str(self.nqubits)\n num_gpus = str(self.ngpus)\n\n # FIXME: this is buggy (no early return)\n # try:\n # if (self.general_interface.append\n # and num_gpus in self.general_interface.full_data[num_qubits][self.general_interface.frontend+'-v'+frontend_version][self.general_interface.backend+'-v'+backend_version][gpu_name]):\n # self.general_interface.logger.info(\n # f'Skipping {self.benchmark_name} with {self.nqubits} qubits and {self.ngpus} GPUs [{self.general_interface.backend}-v{backend_version}]')\n # except KeyError:\n # # KeyError means this configuration is not currently benchmarked, so we can continue running\n # self.general_interface.logger.debug('Benchmark configuration not found in existing data')\n # pass\n\n circuit_filename = f'circuits/{self.benchmark_name}_{self.nqubits}'\n\n if 'unfold' in self.config.keys() and self.config['unfold']:\n circuit_filename += '_unfold'\n if 'p' in self.config.keys():\n p = self.config['p']\n circuit_filename += f'_p{p}'\n if measure:\n circuit_filename += '_measure'\n circuit_filename, target, pauli = self._fix_filename_for_cutn(circuit_filename, self.nqubits)\n self.general_interface.cutn_target = target\n\n # get circuit\n circuit = self.get_circuit(circuit_filename)\n\n # get backend\n backend = createBackend(\n self.general_interface.backend, self.ngpus, self.ncpu_threads, self.general_interface.precision,\n nqubits=self.nqubits, # TODO: backend config\n cusvaer_global_index_bits=self.cusvaer_global_index_bits, # cusvaer options\n cusvaer_p2p_device_bits=self.cusvaer_p2p_device_bits,\n cusvaer_data_transfer_buffer_bits=self.cusvaer_data_transfer_buffer_bits,\n cusvaer_comm_plugin_type=self.cusvaer_comm_plugin_type,\n cusvaer_comm_plugin_soname=self.cusvaer_comm_plugin_soname,\n nfused=self.general_interface.nfused, # only qiskit and qsim\n )\n\n # get versions; it's assumed up to this point, the existence of Python modules for\n # both frontend and backend is confirmed\n backend_version = self.extract_backend_version()\n frontend_version = self.extract_frontend_version()\n glue_layer_version = self.extract_glue_layer_version()\n\n if self.ngpus == 0:\n logger.info(\n f'* Running {self.benchmark_name} with {self.ncpu_threads} CPU threads, and {self.nqubits} qubits [{self.general_interface.backend}-v{backend_version}]:')\n else:\n logger.info(\n f'* Running {self.benchmark_name} with {self.ngpus} GPUs, and {self.nqubits} qubits [{self.general_interface.backend}-v{backend_version}]:')\n\n preprocess_data = backend.preprocess_circuit(\n circuit,\n # only cutn needs these, TODO: backend config\n circuit_filename=os.path.join(self.cache_dir, circuit_filename),\n target=target,\n pauli=pauli\n )\n\n for k in preprocess_data.keys():\n self.benchmark_data[k] = preprocess_data[k]\n\n # run benchmark\n perf_time, cuda_time, post_time, post_process = self.timer(backend, circuit, self.general_interface.nshots) # nsamples -> nshots\n\n # report the result\n run_env = gen_run_env(gpu_device_properties)\n report(perf_time, cuda_time, post_time if post_process else None, self.ngpus,\n run_env, gpu_device_properties, self.benchmark_data)\n\n # Save the new benchmark data\n out = self.canonicalize_benchmark_data(frontend_version, backend_version, run_env, glue_layer_version)\n save_benchmark_data(\n *out,\n self.general_interface.full_data, filepath, self.general_interface.save)\n\n def canonicalize_benchmark_data(self, frontend_version, backend_version, run_env, glue_layer_version):\n \"\"\"\n json scheme: this is designed such that if any item in sim_config changes, the\n benchmark data would be appended, not overwriting.\n\n benchmark\n |_ num_qubits\n |_ sim_config_hash ( = hash string of sim_config )\n |_ benchmark_data\n |_ frontend (part of sim_config)\n |_ name\n |_ version\n |_ backend (part of sim_config)\n |_ name\n |_ version\n |_ ngpus\n |_ ncputhreads\n |_ nshots\n |_ nfused\n |_ precision\n |_ ... (all backend-specific options go here)\n |_ glue_layer (part of sim_config)\n |_ name\n |_ version\n |_ run_env (part of sim_config)\n |_ hostname\n |_ cpu_name\n |_ gpu_name\n |_ gpu_driver_ver\n |_ gpu_runtime_ver\n |_ nvml_driver_ver\n |_ cpu_time\n |_ gpu_time\n |_ ... (other timings, env info, ...)\n \"\"\"\n # TODO: consider recording cuquantum-benchmarks version?\n # TODO: alternatively, version each individual benchmark and record it?\n\n num_qubits = str(self.nqubits)\n\n sim_config = HashableDict({\n 'frontend': HashableDict({\n \"name\": self.general_interface.frontend,\n \"version\": frontend_version,\n }),\n 'backend': HashableDict({\n \"name\": self.general_interface.backend,\n \"version\": backend_version,\n \"ngpus\": self.ngpus,\n \"ncputhreads\": self.ncpu_threads,\n \"nshots\": self.general_interface.nshots,\n \"nfused\": self.general_interface.nfused,\n \"precision\": self.general_interface.precision,\n \"with_mpi\": is_running_mpiexec(),\n }),\n 'glue_layer': HashableDict({\n \"name\": None,\n \"version\": glue_layer_version,\n }),\n 'run_env': run_env,\n })\n\n # frontend-specific options\n # TODO: record \"measure\"?\n\n # backend-specific options\n if self.general_interface.backend == \"cusvaer\":\n sim_config[\"backend\"][\"cusvaer_global_index_bits\"] = self.cusvaer_global_index_bits\n sim_config[\"backend\"][\"cusvaer_p2p_device_bits\"] = self.cusvaer_p2p_device_bits\n elif self.general_interface.backend == \"cutn\":\n sim_config[\"backend\"][\"target\"] = self.general_interface.cutn_target\n\n sim_config_hash = sim_config.get_hash()\n self.benchmark_data = {**self.benchmark_data, **sim_config}\n\n return num_qubits, sim_config_hash, self.benchmark_data\n\n\nclass BenchApiRunner:\n\n supported_cusv_apis = ('apply_matrix',)\n supported_cutn_apis = ()\n supported_apis = supported_cusv_apis + supported_cutn_apis\n\n def __init__(self, **kwargs):\n self.num_qubits = kwargs.pop(\"nqubits\")\n self.benchmark = kwargs.pop(\"benchmark\")\n self.cache_dir = kwargs.pop(\"cachedir\")\n kwargs.pop(\"verbose\") # don't care\n self.args = kwargs # just hold the entire group of parsed cmdline args, don't unpack all\n\n # currently we assume the following subdirectories exist\n self.required_subdirs = ('data',)\n\n # load existing json, if any\n self.data_filename = f\"{self.benchmark}.json\"\n self.file_path = f'{self.cache_dir}/data/{self.data_filename}'\n self.full_data = load_benchmark_data(\n self.file_path, self.cache_dir, self.required_subdirs)\n\n def run(self):\n # prep\n if self.benchmark not in self.supported_apis:\n raise NotImplementedError(f\"only {self.supported_apis} is supported for now\")\n gpu_device_properties = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)\n benchmark_data = {} # dummy\n\n # time the api\n perf_time, cuda_time = self._run_apply_matrix()\n\n # report the result\n run_env = gen_run_env(gpu_device_properties)\n report(perf_time, cuda_time, None, 1,\n run_env, gpu_device_properties, benchmark_data)\n\n # Save the new benchmark data\n out = self.canonicalize_benchmark_data(run_env, benchmark_data)\n save_benchmark_data(*out, self.full_data, self.file_path)\n\n def _run_apply_matrix(self):\n # TODO: It's better to move this method elsewhere, once we support more apis\n from .benchmarks.apply_matrix import test_apply_matrix\n args = self.args\n\n # create targets while keeping args clean for later use\n ntargets = args.pop(\"ntargets\")\n targets = args.pop(\"targets\")\n targets = tuple(range(ntargets)) if targets is None else tuple(targets)\n args[\"targets\"] = targets\n\n # create controls while keeping args clean for later use\n ncontrols = args.pop(\"ncontrols\")\n controls = args.pop(\"controls\")\n if controls is None and ncontrols is None:\n controls = ()\n elif controls is None:\n controls = tuple(range(ncontrols))\n else:\n controls = tuple(controls)\n args[\"controls\"] = controls\n\n # run\n return test_apply_matrix(\n self.num_qubits,\n targets,\n controls,\n args[\"precision\"],\n args[\"precision\"], # TODO: allow different mat precision?\n args[\"layout\"],\n int(args[\"adjoint\"]),\n args[\"nwarmups\"],\n args[\"nrepeats\"],\n args[\"location\"],\n flush_l2=args[\"flush_cache\"],\n )\n\n def canonicalize_benchmark_data(self, run_env, benchmark_data):\n \"\"\"\n json scheme: this is designed such that if any item in sim_config changes, the\n benchmark data would be appended, not overwriting.\n\n benchmark\n |_ num_qubits\n |_ sim_config_hash ( = hash string of sim_config )\n |_ benchmark_data\n |_ api (part of sim_config)\n |_ name\n |_ cuqnt_py_ver\n |_ lib_ver\n |_ precision\n |_ ... (all api-specific options go here)\n |_ run_env (part of sim_config)\n |_ hostname\n |_ cpu_name\n |_ gpu_name\n |_ gpu_driver_ver\n |_ gpu_runtime_ver\n |_ nvml_driver_ver\n |_ cpu_time\n |_ gpu_time\n |_ ... (other timings, env info, ...)\n \"\"\"\n # TODO: consider recording cuquantum-benchmarks version?\n from cuquantum import __version__ as cuqnt_py_ver\n num_qubits = str(self.num_qubits)\n benchmark = self.benchmark\n\n if benchmark in self.supported_cusv_apis:\n from cuquantum import custatevec as lib\n elif benchmark in self.supported_cutn_apis:\n from cuquantum import cutensornet as lib\n else:\n assert False\n\n # Note: be mindful that we unpack self.args here, as it's designed to be\n # sensitive to any change in the cmdline options.\n sim_config = HashableDict({\n \"api\": HashableDict({**{\n \"name\": benchmark,\n \"cuqnt_py_ver\": cuqnt_py_ver,\n \"lib_ver\": lib.get_version(),\n }, **self.args}),\n 'run_env': run_env,\n })\n\n # TODO: remember to record cutn_target once we support it\n #elif self.args.backend == \"cutn\":\n # sim_config[\"backend\"][\"target\"] = self.args.cutn_target\n\n sim_config_hash = sim_config.get_hash()\n benchmark_data = {**benchmark_data, **sim_config}\n\n return num_qubits, sim_config_hash, benchmark_data\n","repo_name":"gomsigithub/cuquantum_benchmarks","sub_path":"cuquantum_benchmarks/run_interface.py","file_name":"run_interface.py","file_ext":"py","file_size_in_byte":25052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3515650857","text":"from __future__ import annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom itertools import islice\nfrom typing import (\n Any,\n Generic,\n Iterable,\n Callable,\n TypeVar,\n TYPE_CHECKING,\n Optional,\n Iterator,\n cast,\n Type,\n)\n\nfrom seamful.resource import (\n ModuleResource,\n PrivateResource,\n OverridingResource,\n UnboundResource,\n BoundResource,\n ResourceKind,\n ProviderResource,\n)\nfrom seamful.provider.errors import (\n MissingProviderMethod,\n ProviderMethodNotCallable,\n ProvidersModuleIsNotAModule,\n CannotProvideBaseModule,\n ProviderMethodMissingReturnTypeAnnotation,\n ProviderMethodReturnTypeMismatch,\n ProviderMethodParameterMissingTypeAnnotation,\n ProviderMethodParameterUnrelatedName,\n ProviderMethodParameterInvalidTypeAnnotation,\n ProviderMethodParameterMatchesResourceNameButNotType,\n ProvidersCannotBeInstantiated,\n ResourceDefinitionCannotReferToExistingResource,\n CannotDefineModuleResourceInProvider,\n PrivateResourceCannotOccludeModuleResource,\n CannotDependOnResourceFromAnotherProvider,\n OverridingResourceIncompatibleType,\n OverridingResourceNameDoesntMatchModuleResource,\n ProvidersDontSupportMultipleInheritance,\n ProviderDeclarationMissingModule,\n BaseProviderProvidesFromADifferentModule,\n ProvidersMustInheritFromProviderClass,\n IncompatibleResourceTypeForInheritedResource,\n ProviderModuleCantBeChanged,\n InvalidProviderAttributeName,\n InvalidProviderAttribute,\n ResourceModuleMismatch,\n ResourceProviderMismatch,\n UnknownModuleResource,\n UnknownProviderResource,\n CannotDependOnParentProviderResource,\n)\n\nT = TypeVar(\"T\")\n\nif TYPE_CHECKING:\n from seamful.module.module_type import ModuleType\n\nRESERVED_PROVIDER_ATTRIBUTES = (\"module\", \"resources\")\n\n\nclass ProviderType(type):\n _resources_by_name: dict[str, ProviderResource[Any]]\n _resources: set[ProviderResource[Any]]\n _provider_methods_by_resource: dict[BoundResource[Any], ProviderMethod[Any]]\n _bases: tuple[ProviderType, ...]\n\n def __init__(\n self,\n name: str,\n bases: tuple[ProviderType, ...],\n dct: dict[str, Any],\n *,\n module: Optional[ModuleType] = None,\n ) -> None:\n type.__init__(self, name, bases, dct)\n self._provider_methods_by_resource = {}\n self._resources_by_name = {}\n self._resources = set()\n if len(bases) == 0:\n self._bases = tuple()\n return\n if len(bases) > 1:\n raise ProvidersDontSupportMultipleInheritance(self, bases)\n base_provider = bases[0]\n self._module = self._get_module_from_class_declaration(base_provider, module)\n self._collect_resources(dct, base_provider)\n self._bases = (base_provider, *base_provider._bases)\n self._collect_provider_methods()\n\n def __call__(self, *args: Any, **kwargs: Any) -> None:\n raise ProvidersCannotBeInstantiated(self)\n\n def __iter__(self) -> Iterator[ProviderMethod[Any]]:\n return iter(self._provider_methods_by_resource.values())\n\n def __getitem__(self, resource: BoundResource[T]) -> ProviderMethod[T]:\n self._ensure_related_resource(resource)\n target_resource = (\n resource.overrides if isinstance(resource, OverridingResource) else resource\n )\n provider_method = self._provider_methods_by_resource[target_resource]\n return provider_method\n\n @property\n def module(self) -> ModuleType:\n return self._module\n\n @module.setter\n def module(self, value: Any) -> None:\n raise ProviderModuleCantBeChanged(self, value)\n\n @property\n def resources(\n self,\n ) -> Iterable[ProviderResource[Any]]:\n return self._resources\n\n def _get_module_from_class_declaration(\n self, base: type, module: Optional[ModuleType]\n ) -> ModuleType:\n from seamful.module.module_type import ModuleType, Module\n\n if base is Provider:\n if module is None:\n raise ProviderDeclarationMissingModule(self)\n elif module is Module:\n raise CannotProvideBaseModule(self)\n elif isinstance(module, ModuleType):\n return module\n else:\n raise ProvidersModuleIsNotAModule(self, module)\n elif issubclass(base, Provider):\n if module is not None and module is not base.module:\n raise BaseProviderProvidesFromADifferentModule(self, base, module)\n return base.module\n else:\n raise ProvidersMustInheritFromProviderClass(self, base)\n\n def _collect_provider_methods(self) -> None:\n for provider_resource in self._resources:\n provider_method = self._build_provider_method(provider_resource)\n self._add_provider_method(provider_method)\n for module_resource in self._module:\n if module_resource.name in self._resources_by_name:\n continue\n provider_method = self._build_provider_method(module_resource)\n self._add_provider_method(provider_method)\n\n def _build_provider_method(\n self,\n resource: BoundResource[T],\n ) -> ProviderMethod[T]:\n method = getattr(self, f\"provide_{resource.name}\", None)\n if method is None:\n raise MissingProviderMethod(resource, self)\n if not callable(method):\n raise ProviderMethodNotCallable(resource, self)\n signature = inspect.signature(method)\n if signature.return_annotation is signature.empty:\n raise ProviderMethodMissingReturnTypeAnnotation(self, resource, method)\n if not resource.is_supertype_of(signature.return_annotation):\n raise ProviderMethodReturnTypeMismatch(\n self, resource, method, mismatched_type=signature.return_annotation\n )\n method_dependencies = tuple(self._get_parameter_resources(signature, resource, method))\n\n bound_resource = (\n resource.overrides if isinstance(resource, OverridingResource) else resource\n )\n return ProviderMethod(\n provider=self,\n method=method,\n resource=bound_resource,\n dependencies=method_dependencies,\n )\n\n def _get_parameter_resources(\n self,\n signature: inspect.Signature,\n target: BoundResource[Any],\n method: Any,\n ) -> Iterable[tuple[str, BoundResource[Any]]]:\n # exclude first parameter (self)\n for name, parameter in islice(signature.parameters.items(), 1, None):\n yield name, self._get_parameter_resource(name, parameter, target, method)\n\n def _get_parameter_resource(\n self,\n name: str,\n parameter: inspect.Parameter,\n target: BoundResource[Any],\n method: Any,\n ) -> BoundResource[Any]:\n parameter_type: Any = parameter.annotation\n if parameter_type is inspect.Signature.empty:\n raise ProviderMethodParameterMissingTypeAnnotation(\n self, target, method, parameter_name=name\n )\n\n if isinstance(parameter_type, ModuleResource):\n return parameter_type\n\n if isinstance(parameter_type, ProviderResource):\n if parameter_type.provider in self._bases:\n raise CannotDependOnParentProviderResource(self, target, parameter_type, name)\n # when providers can be subclassed, part of this is a valid use case.\n raise CannotDependOnResourceFromAnotherProvider(self, target, parameter_type, name)\n\n if not isinstance(parameter_type, type):\n raise ProviderMethodParameterInvalidTypeAnnotation(\n self, target, method, name, parameter_type\n )\n\n # the parameter type is not a resource. We match the parameter's name with\n # the module's resource names.\n\n if name in self._resources_by_name:\n provider_resource = self._resources_by_name[name]\n self._ensure_parameter_type_satisfies_resource_type(\n parameter_type, provider_resource, target, name\n )\n return provider_resource\n elif name in self._module:\n module_resource = self._module[name]\n self._ensure_parameter_type_satisfies_resource_type(\n parameter_type, module_resource, target, name\n )\n return module_resource\n else:\n raise ProviderMethodParameterUnrelatedName(self, target, method, name, parameter_type)\n\n def _ensure_parameter_type_satisfies_resource_type(\n self,\n parameter_type: type,\n resource: BoundResource[Any],\n target: BoundResource[Any],\n parameter_name: str,\n ) -> None:\n if not resource.is_subtype_of(cast(Type[Any], parameter_type)):\n raise ProviderMethodParameterMatchesResourceNameButNotType(\n self,\n target,\n parameter_name=parameter_name,\n refers_to=resource,\n mismatched_type=parameter_type,\n )\n\n def _collect_resources(\n self,\n dct: dict[str, Any],\n base_provider: ProviderType,\n ) -> None:\n for name, candidate in dct.items():\n if name.startswith(\"_\") or name.startswith(\"provide_\"):\n continue\n resource = self._collect_resource(name, candidate)\n self._add_resource(resource)\n\n for base_resource in base_provider.resources:\n existing = self._resources_by_name.get(base_resource.name)\n if existing is not None:\n if not existing.is_subtype_of(base_resource.type):\n raise IncompatibleResourceTypeForInheritedResource(\n self,\n existing,\n base_provider=base_provider,\n base_resource=base_resource,\n )\n else:\n self._add_resource(base_resource.bound_to_sub_provider(self))\n\n def _collect_resource(self, name: str, candidate: Any) -> ProviderResource[Any]:\n if name in RESERVED_PROVIDER_ATTRIBUTES:\n raise InvalidProviderAttributeName(self, name, candidate, RESERVED_PROVIDER_ATTRIBUTES)\n if isinstance(candidate, UnboundResource):\n if name in self._module:\n if candidate.kind == ResourceKind.MODULE:\n raise CannotDefineModuleResourceInProvider(self, name, candidate.type)\n elif candidate.kind == ResourceKind.PRIVATE:\n raise PrivateResourceCannotOccludeModuleResource(self, name, candidate.type)\n return OverridingResource(candidate.type, name, self, self._module[name])\n else:\n if candidate.kind == ResourceKind.OVERRIDE:\n raise OverridingResourceNameDoesntMatchModuleResource(\n self, name, candidate.type\n )\n if candidate.kind == ResourceKind.MODULE:\n raise CannotDefineModuleResourceInProvider(self, name, candidate.type)\n return PrivateResource(candidate.type, name, self)\n elif isinstance(candidate, BoundResource):\n raise ResourceDefinitionCannotReferToExistingResource(self, name, candidate)\n elif isinstance(candidate, type):\n if name in self._module:\n overrides = self._module[name]\n overriding_resource = OverridingResource[Any](candidate, name, self, overrides)\n if not overriding_resource.is_subtype_of(overrides.type):\n raise OverridingResourceIncompatibleType(overriding_resource, overrides)\n return overriding_resource\n else:\n return PrivateResource[Any](candidate, name, self)\n else:\n raise InvalidProviderAttribute(self, name, candidate)\n\n def _add_resource(self, resource: ProviderResource[Any]) -> None:\n self._resources_by_name[resource.name] = resource\n self._resources.add(resource)\n setattr(self, resource.name, resource)\n\n def _add_provider_method(self, provider_method: ProviderMethod[Any]) -> None:\n self._provider_methods_by_resource[provider_method.resource] = provider_method\n\n def _ensure_related_resource(self, resource: BoundResource[Any]) -> None:\n if isinstance(resource, ModuleResource):\n if resource.module is not self._module:\n raise ResourceModuleMismatch(self, resource)\n elif resource not in self._module:\n raise UnknownModuleResource(self, resource)\n elif isinstance(resource, (PrivateResource, OverridingResource)):\n if resource.provider is not self:\n raise ResourceProviderMismatch(self, resource)\n if resource not in self._resources:\n raise UnknownProviderResource(self, resource)\n else:\n raise TypeError()\n\n\n@dataclass(frozen=True)\nclass ProviderMethod(Generic[T]):\n method: Callable[..., T]\n provider: ProviderType\n resource: BoundResource[Any]\n dependencies: Iterable[tuple[str, BoundResource[Any]]]\n\n\nM = TypeVar(\"M\")\n\n\nclass Provider(metaclass=ProviderType):\n def __init_subclass__(cls, *, module: Optional[ModuleType] = None) -> None:\n pass\n","repo_name":"rdarder/seamful","sub_path":"src/seamful/provider/provider_type.py","file_name":"provider_type.py","file_ext":"py","file_size_in_byte":13430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"41498884017","text":"import json\nimport re\nfrom string import ascii_uppercase\nfrom time import time\nfrom urllib.parse import urljoin\n\nimport scrapy\nfrom more_itertools import first\nfrom scrapy import Request\n\nfrom product_spider.items import JkProduct, JKPackage\nfrom product_spider.utils.functions import strip\n\n\nclass JkPrdSpider(scrapy.Spider):\n name = \"jk\"\n allowed_domains = [\"jkchemical.com\"]\n base_url = \"http://www.jkchemical.com\"\n start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x),\n ascii_uppercase)\n prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\"\n\n def parse(self, response):\n for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"):\n tmp_url = self.base_url + xp_url.extract()\n yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list)\n\n def parse_list(self, response):\n xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\")\n for xp_box in xp_boxes:\n div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\")\n brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '')\n rel_url = div.xpath('.//a[@class=\"name\"]/@href').get()\n img_url = div.xpath('.//img/@src').get()\n d = {\n 'brand': brand.replace('-', '') or None,\n \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(),\n \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()),\n \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(),\n \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()),\n \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()),\n 'prd_url': rel_url and urljoin(response.url, rel_url),\n 'img_url': img_url and urljoin(response.url, img_url),\n }\n data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get()\n data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get()\n\n yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())),\n body=u\"\",\n meta={\"prd_data\": d},\n callback=self.parse_package)\n\n next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get()\n if next_page:\n yield Request(urljoin(response.url, next_page), callback=self.parse_list)\n\n def parse_package(self, response):\n s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0]\n packages = json.loads(s)\n d = response.meta.get('prd_data', {})\n package = first(packages, {})\n if package:\n d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName')\n yield JkProduct(**d)\n for package_obj in packages:\n catalog_price = package_obj.get(\"CatalogPrice\", {})\n dd = {\n 'brand': d.get('brand'),\n 'cat_no': d.get('cat_no'),\n 'package': package_obj.get(\"stringFormat\"),\n 'price': catalog_price and catalog_price.get('Value'),\n 'currency': catalog_price and strip(catalog_price.get('Currency')),\n 'attrs': json.dumps(package_obj),\n }\n yield JKPackage(**dd)\n","repo_name":"Pandaaaa906/product_spider","sub_path":"product_spider/spiders/jk_spider.py","file_name":"jk_spider.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16992112457","text":"# -*- coding:utf8 -*-\nfrom __future__ import division\nimport os,sys,os.path\nimport time\nimport requests\nimport threading\n\ndef get_file_info_from_path(dir,topdown=True):\n dirinfo=[]\n for root, dirs, files in os.walk(dir, topdown):\n for name in files:\n dirinfo.append(os.path.join(root,name))\n return dirinfo\n\ndef submit_single_sample_debug(filepath):\n REST_URL = \"http://xxxxxxxx/tasks/create/file\"\n SAMPLE_FILE = filepath\n\n with open(SAMPLE_FILE, \"rb\") as sample:\n files = {\"file\": (os.path.basename(filepath), sample)}\n r = requests.post(REST_URL, files=files)\n\n task_id = r.json()[\"task_id\"][0]\n return task_id\n\ndef submit_single_sample(file):\n r = requests.post(\"http://xxxxxxxx/tasks/create/submit\", files=[\n\t (\"files\", open(file,'rb')),\n\t])\n submit_id = r.json()[\"submit_id\"]\n task_ids = r.json()[\"task_ids\"][0]\n errors = r.json()[\"errors\"]\n return task_ids\n\ndef query_task_status():\n r = requests.get(\"http://xxxxxxx/tasks/list\")\n tasks=r.json()['tasks']\n reports=[]\n for i in tasks:\n reports.append(i['status'])\n return reports\n\ndef submit_samples():\n filepath_list = get_file_info_from_path('data')\n i=1\n ids=[]\n for filepath in filepath_list[:]:\n ids.append(submit_single_sample_debug(filepath))\n print(ids)\n\ndef get_report_score(id):\n r=requests.get(\"http://xxxxxxxx/tasks/report/\"+str(id))\n if r.status_code!=200:\n print(\"fail to get report! code:\"+str(r.status_code))\n return 0\n score=r.json()['info']['score']\n return score\n\ndef delete_task(ids):\n print(\"delete:\")\n for id in ids:\n print(\"task:\"+str(id))\n r=requests.get(\"http://xxxxxxxx/tasks/delete/\"+str(id))\n errors = r.json()\n print(r.json())\n\ndef submit_query_report(file):\n id=submit_single_sample(file)\n time.sleep(10)\n reports=query_task_status()\n i=0\n while i5.0","repo_name":"bitsecurerlab/MAB-malware","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"21285368677","text":"from collections import defaultdict\nimport math\n\n\nin_file = open('in/day7.txt')\nst = in_file.read()\nin_file.close()\n\n# Possible edge: repeated directory names, weird traversal\n\nclass Directory:\n def __init__(self, name, size, parent):\n self.name = name\n self.size = size\n self.subdirs = []\n self.parent = parent\n\n def add_subdir(self, subdir):\n self.subdirs.append(subdir)\n\n def get_subdir(self, name):\n for subdir in self.subdirs:\n if subdir.name == name:\n return subdir\n return None\n\n def get_size(self):\n if self.size == 0:\n return sum([x.get_size() for x in self.subdirs])\n else:\n return self.size\n\n def __str__(self):\n return self.name + \" \" + str(self.size)\n\nclass File:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n\n def get_size(self):\n return self.size\n\n def __str__(self):\n return self.name + \" \" + str(self.size)\n\ncurr_path = []\nroot_dir = Directory(\"/\", 0, None)\ncurr_dir = None\nfor line in st.splitlines():\n if line.startswith('$ cd'):\n s, cmd, dir = line.split()\n if dir == \"..\":\n curr_path.pop()\n curr_dir = curr_dir.parent\n elif dir == \"/\":\n curr_path = ['/']\n curr_dir = root_dir\n else:\n curr_path.append(dir)\n curr_dir = curr_dir.get_subdir(dir)\n elif line[0] != '$':\n size, name = line.split()\n if size == \"dir\":\n curr_dir.add_subdir(Directory(name, 0, curr_dir))\n else:\n size = int(size)\n new_file = File(name, size)\n curr_dir.add_subdir(new_file)\n\n# run a dfs from root_dir\ntotal = 0\nfilesize_map = {}\ndef total_size_under_100000(node, depth):\n global total\n size = node.get_size()\n filesize_map[node.name] = size\n if size < 100000: \n total += size\n for child in node.subdirs:\n if isinstance(child, Directory):\n total_size_under_100000(child, depth + 1)\n\ntotal_size_under_100000(root_dir, 0)\nprint(total)\n\n# Part 2\nTOTAL_SPACE = 70000000\nNEEDED_SPACE = 30000000\nUNUSED = TOTAL_SPACE - root_dir.get_size()\n\nmin_deletable = math.inf\nfor name, size in filesize_map.items():\n if size < min_deletable and size + UNUSED > NEEDED_SPACE:\n min_deletable = size\n\nprint(min_deletable)\n","repo_name":"jaredblack/advent-of-code-2022","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20290945471","text":"import requests\nimport urllib.parse\nimport yt_dlp\nimport os\nimport sys\nimport re\nfrom utils import upperescape, checkconfig, offsethandler, YoutubeDLLogger, ytdl_hooks, ytdl_hooks_debug, setup_logging # NOQA\nfrom datetime import datetime\nimport schedule\nimport time\nimport logging\nimport argparse\n\n# allow debug arg for verbose logging\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--debug', action='store_true', help='Enable debug logging')\nargs = parser.parse_args()\n\n# setup logger\nlogger = setup_logging(True, True, args.debug)\n\ndate_format = \"%Y-%m-%dT%H:%M:%SZ\"\nnow = datetime.now()\n\nCONFIGFILE = os.environ['CONFIGPATH']\nCONFIGPATH = CONFIGFILE.replace('config.yml', '')\nSCANINTERVAL = 60\n\n\nclass SonarrYTDL(object):\n\n def __init__(self):\n \"\"\"Set up app with config file settings\"\"\"\n cfg = checkconfig()\n\n # Sonarr_YTDL Setup\n\n try:\n self.set_scan_interval(cfg['sonarrytdl']['scan_interval'])\n try:\n self.debug = cfg['sonarrytdl']['debug'] in ['true', 'True']\n if self.debug:\n logger.setLevel(logging.DEBUG)\n for logs in logger.handlers:\n if logs.name == 'FileHandler':\n logs.setLevel(logging.DEBUG)\n if logs.name == 'StreamHandler':\n logs.setLevel(logging.DEBUG)\n logger.debug('DEBUGGING ENABLED')\n except AttributeError:\n self.debug = False\n except Exception:\n sys.exit(\"Error with sonarrytdl config.yml values.\")\n\n # Sonarr Setup\n try:\n api = \"api\"\n scheme = \"http\"\n basedir = \"\"\n if cfg['sonarr'].get('version', '').lower() == 'v4':\n api = \"api/v3\"\n logger.debug('Sonarr api set to v4')\n if cfg['sonarr']['ssl'].lower() == 'true':\n scheme = \"https\"\n if cfg['sonarr'].get('basedir', ''):\n basedir = '/' + cfg['sonarr'].get('basedir', '')\n\n self.base_url = \"{0}://{1}:{2}{3}\".format(\n scheme,\n cfg['sonarr']['host'],\n str(cfg['sonarr']['port']),\n basedir\n )\n self.sonarr_api_version = api\n self.api_key = cfg['sonarr']['apikey']\n except Exception:\n sys.exit(\"Error with sonarr config.yml values.\")\n\n # YTDL Setup\n try:\n self.ytdl_format = cfg['ytdl']['default_format']\n except Exception:\n sys.exit(\"Error with ytdl config.yml values.\")\n\n # YTDL Setup\n try:\n self.series = cfg[\"series\"]\n except Exception:\n sys.exit(\"Error with series config.yml values.\")\n\n def get_episodes_by_series_id(self, series_id):\n \"\"\"Returns all episodes for the given series\"\"\"\n logger.debug('Begin call Sonarr for all episodes for series_id: {}'.format(series_id))\n args = {'seriesId': series_id}\n res = self.request_get(\"{}/{}/episode\".format(\n self.base_url, \n self.sonarr_api_version\n ), args\n )\n return res.json()\n\n def get_episode_files_by_series_id(self, series_id):\n \"\"\"Returns all episode files for the given series\"\"\"\n res = self.request_get(\"{}/{}/episodefile?seriesId={}\".format(\n self.base_url, \n self.sonarr_api_version,\n series_id\n ))\n return res.json()\n\n def get_series(self):\n \"\"\"Return all series in your collection\"\"\"\n logger.debug('Begin call Sonarr for all available series')\n res = self.request_get(\"{}/{}/series\".format(\n self.base_url, \n self.sonarr_api_version\n ))\n return res.json()\n\n def get_series_by_series_id(self, series_id):\n \"\"\"Return the series with the matching ID or 404 if no matching series is found\"\"\"\n logger.debug('Begin call Sonarr for specific series series_id: {}'.format(series_id))\n res = self.request_get(\"{}/{}/series/{}\".format(\n self.base_url,\n self.sonarr_api_version,\n series_id\n ))\n return res.json()\n\n def request_get(self, url, params=None):\n \"\"\"Wrapper on the requests.get\"\"\"\n logger.debug('Begin GET with url: {}'.format(url))\n args = {\n \"apikey\": self.api_key\n }\n if params is not None:\n logger.debug('Begin GET with params: {}'.format(params))\n args.update(params)\n url = \"{}?{}\".format(\n url,\n urllib.parse.urlencode(args)\n )\n res = requests.get(url)\n return res\n\n def request_put(self, url, params=None, jsondata=None):\n logger.debug('Begin PUT with url: {}'.format(url))\n \"\"\"Wrapper on the requests.put\"\"\"\n headers = {\n 'Content-Type': 'application/json',\n }\n args = (\n ('apikey', self.api_key),\n )\n if params is not None:\n args.update(params)\n logger.debug('Begin PUT with params: {}'.format(params))\n res = requests.post(\n url,\n headers=headers,\n params=args,\n json=jsondata\n )\n return res\n\n def rescanseries(self, series_id):\n \"\"\"Refresh series information from trakt and rescan disk\"\"\"\n logger.debug('Begin call Sonarr to rescan for series_id: {}'.format(series_id))\n data = {\n \"name\": \"RescanSeries\",\n \"seriesId\": str(series_id)\n }\n res = self.request_put(\n \"{}/{}/command\".format(self.base_url),\n None, \n self.sonarr_api_version,\n data\n )\n return res.json()\n\n def filterseries(self):\n \"\"\"Return all series in Sonarr that are to be downloaded by youtube-dl\"\"\"\n series = self.get_series()\n matched = []\n for ser in series[:]:\n for wnt in self.series:\n if wnt['title'] == ser['title']:\n # Set default values\n ser['subtitles'] = False\n ser['playlistreverse'] = True\n ser['subtitles_languages'] = ['en']\n ser['subtitles_autogenerated'] = False\n # Update values\n if 'regex' in wnt:\n regex = wnt['regex']\n if 'sonarr' in regex:\n ser['sonarr_regex_match'] = regex['sonarr']['match']\n ser['sonarr_regex_replace'] = regex['sonarr']['replace']\n if 'site' in regex:\n ser['site_regex_match'] = regex['site']['match']\n ser['site_regex_replace'] = regex['site']['replace']\n if 'offset' in wnt:\n ser['offset'] = wnt['offset']\n if 'cookies_file' in wnt:\n ser['cookies_file'] = wnt['cookies_file']\n if 'format' in wnt:\n ser['format'] = wnt['format']\n if 'playlistreverse' in wnt:\n if wnt['playlistreverse'] == 'False':\n ser['playlistreverse'] = False\n if 'subtitles' in wnt:\n ser['subtitles'] = True\n if 'languages' in wnt['subtitles']:\n ser['subtitles_languages'] = wnt['subtitles']['languages']\n if 'autogenerated' in wnt['subtitles']:\n ser['subtitles_autogenerated'] = wnt['subtitles']['autogenerated']\n ser['url'] = wnt['url']\n matched.append(ser)\n for check in matched:\n if not check['monitored']:\n logger.warn('{0} is not currently monitored'.format(ser['title']))\n del series[:]\n return matched\n\n def getseriesepisodes(self, series):\n needed = []\n for ser in series[:]:\n episodes = self.get_episodes_by_series_id(ser['id'])\n for eps in episodes[:]:\n eps_date = now\n if \"airDateUtc\" in eps:\n eps_date = datetime.strptime(eps['airDateUtc'], date_format)\n if 'offset' in ser:\n eps_date = offsethandler(eps_date, ser['offset'])\n if not eps['monitored']:\n episodes.remove(eps)\n elif eps['hasFile']:\n episodes.remove(eps)\n elif eps_date > now:\n episodes.remove(eps)\n else:\n if 'sonarr_regex_match' in ser:\n match = ser['sonarr_regex_match']\n replace = ser['sonarr_regex_replace']\n eps['title'] = re.sub(match, replace, eps['title'])\n needed.append(eps)\n continue\n if len(episodes) == 0:\n logger.info('{0} no episodes needed'.format(ser['title']))\n series.remove(ser)\n else:\n logger.info('{0} missing {1} episodes'.format(\n ser['title'],\n len(episodes)\n ))\n for i, e in enumerate(episodes):\n logger.info(' {0}: {1} - {2}'.format(\n i + 1,\n ser['title'],\n e['title']\n ))\n return needed\n\n def appendcookie(self, ytdlopts, cookies=None):\n \"\"\"Checks if specified cookie file exists in config\n - ``ytdlopts``: Youtube-dl options to append cookie to\n - ``cookies``: filename of cookie file to append to Youtube-dl opts\n returns:\n ytdlopts\n original if problem with cookies file\n updated with cookies value if cookies file exists\n \"\"\"\n if cookies is not None:\n cookie_path = os.path.abspath(CONFIGPATH + cookies)\n cookie_exists = os.path.exists(cookie_path)\n if cookie_exists is True:\n ytdlopts.update({\n 'cookiefile': cookie_path\n })\n # if self.debug is True:\n logger.debug(' Cookies file used: {}'.format(cookie_path))\n if cookie_exists is False:\n logger.warning(' cookie files specified but doesn''t exist.')\n return ytdlopts\n else:\n return ytdlopts\n\n def customformat(self, ytdlopts, customformat=None):\n \"\"\"Checks if specified cookie file exists in config\n - ``ytdlopts``: Youtube-dl options to change the ytdl format for\n - ``customformat``: format to download\n returns:\n ytdlopts\n original: if no custom format\n updated: with new format value if customformat exists\n \"\"\"\n if customformat is not None:\n ytdlopts.update({\n 'format': customformat\n })\n return ytdlopts\n else:\n return ytdlopts\n\n def ytdl_eps_search_opts(self, regextitle, playlistreverse, cookies=None):\n ytdlopts = {\n 'ignoreerrors': True,\n 'playlistreverse': playlistreverse,\n 'matchtitle': regextitle,\n 'quiet': True,\n\n }\n if self.debug is True:\n ytdlopts.update({\n 'quiet': False,\n 'logger': YoutubeDLLogger(),\n 'progress_hooks': [ytdl_hooks],\n })\n ytdlopts = self.appendcookie(ytdlopts, cookies)\n if self.debug is True:\n logger.debug('Youtube-DL opts used for episode matching')\n logger.debug(ytdlopts)\n return ytdlopts\n\n def ytsearch(self, ydl_opts, playlist):\n try:\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n result = ydl.extract_info(\n playlist,\n download=False\n )\n except Exception as e:\n logger.error(e)\n else:\n video_url = None\n if 'entries' in result and len(result['entries']) > 0:\n try:\n video_url = result['entries'][0].get('webpage_url')\n except Exception as e:\n logger.error(e)\n else:\n video_url = result.get('webpage_url')\n if playlist == video_url:\n return False, ''\n if video_url is None:\n logger.error('No video_url')\n return False, ''\n else:\n return True, video_url\n\n def download(self, series, episodes):\n if len(series) != 0:\n logger.info(\"Processing Wanted Downloads\")\n for s, ser in enumerate(series):\n logger.info(\" {}:\".format(ser['title']))\n for e, eps in enumerate(episodes):\n if ser['id'] == eps['seriesId']:\n cookies = None\n url = ser['url']\n if 'cookies_file' in ser:\n cookies = ser['cookies_file']\n ydleps = self.ytdl_eps_search_opts(upperescape(eps['title']), ser['playlistreverse'], cookies)\n found, dlurl = self.ytsearch(ydleps, url)\n if found:\n logger.info(\" {}: Found - {}:\".format(e + 1, eps['title']))\n ytdl_format_options = {\n 'format': self.ytdl_format,\n 'quiet': True,\n 'merge-output-format': 'mp4',\n 'outtmpl': '/sonarr_root{0}/Season {1}/{2} - S{1}E{3} - {4} WEBDL.%(ext)s'.format(\n ser['path'],\n eps['seasonNumber'],\n ser['title'],\n eps['episodeNumber'],\n eps['title']\n ),\n 'progress_hooks': [ytdl_hooks],\n 'noplaylist': True,\n }\n ytdl_format_options = self.appendcookie(ytdl_format_options, cookies)\n if 'format' in ser:\n ytdl_format_options = self.customformat(ytdl_format_options, ser['format'])\n if 'subtitles' in ser:\n if ser['subtitles']:\n postprocessors = []\n postprocessors.append({\n 'key': 'FFmpegSubtitlesConvertor',\n 'format': 'srt',\n })\n postprocessors.append({\n 'key': 'FFmpegEmbedSubtitle',\n })\n ytdl_format_options.update({\n 'writesubtitles': True,\n 'allsubtitles': True,\n 'writeautomaticsub': True,\n 'subtitleslangs': ser['subtitles_languages'],\n 'postprocessors': postprocessors,\n })\n\n\n if self.debug is True:\n ytdl_format_options.update({\n 'quiet': False,\n 'logger': YoutubeDLLogger(),\n 'progress_hooks': [ytdl_hooks_debug],\n })\n logger.debug('Youtube-DL opts used for downloading')\n logger.debug(ytdl_format_options)\n try:\n yt_dlp.YoutubeDL(ytdl_format_options).download([dlurl])\n self.rescanseries(ser['id'])\n logger.info(\" Downloaded - {}\".format(eps['title']))\n except Exception as e:\n logger.error(\" Failed - {} - {}\".format(eps['title'], e))\n else:\n logger.info(\" {}: Missing - {}:\".format(e + 1, eps['title']))\n else:\n logger.info(\"Nothing to process\")\n\n def set_scan_interval(self, interval):\n global SCANINTERVAL\n if interval != SCANINTERVAL:\n SCANINTERVAL = interval\n logger.info('Scan interval set to every {} minutes by config.yml'.format(interval))\n else:\n logger.info('Default scan interval of every {} minutes in use'.format(interval))\n return\n\n\ndef main():\n client = SonarrYTDL()\n series = client.filterseries()\n episodes = client.getseriesepisodes(series)\n client.download(series, episodes)\n logger.info('Waiting...')\n\n\nif __name__ == \"__main__\":\n logger.info('Initial run')\n main()\n schedule.every(int(SCANINTERVAL)).minutes.do(main)\n while True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"whatdaybob/sonarr_youtubedl","sub_path":"app/sonarr_youtubedl.py","file_name":"sonarr_youtubedl.py","file_ext":"py","file_size_in_byte":17579,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"47"} +{"seq_id":"21191926068","text":"from flask import Flask, render_template \nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup \nfrom io import BytesIO\nimport base64\nimport matplotlib.pyplot as plt\nimport dateparser\nfrom matplotlib import style\n\napp = Flask(__name__)\n\ndef scrap(url):\n #This is fuction for scrapping\n url_get = requests.get('https://monexnews.com/kurs-valuta-asing.htm?kurs=JPY&searchdatefrom=01-01-2019&searchdateto=01-12-2019')\n soup = BeautifulSoup(url_get.content,\"html.parser\")\n \n #Find the key to get the information\n table = soup.find('table', attrs={'class':'centerText newsTable2'})\n tr = table.find_all('tr')\n\n temp = [] #initiating a tuple\n\n for i in range(1, len(tr)):\n row = table.find_all('tr')[i]\n\n #get dates\n date = row.find_all('td')[0].text\n date = date.strip() #for removing the excess whitespace\n\n #get ask\n ask = row.find_all('td')[1].text\n ask = ask.strip() #for removing the excess whitespace\n\n #get bid\n bid = row.find_all('td')[2].text\n bid = bid.strip() #for removing the excess whitespace \n\n\n\n temp.append((date, ask, bid)) #append the needed information \n \n temp = temp[::-1] #remove the header\n\n df = pd.DataFrame(temp, columns = ('date','bid', 'ask')) #creating the dataframe\n #data wranggling - try to change the data type to right data type\n\n ## change 'date' data types\n df['date'] = df['date'].apply(lambda x: dateparser.parse(x))\n \n ## converting 'ask' and 'bid' data types\n df['ask'] = df['ask'].replace(',', '.', regex = True)\n df['bid'] = df['bid'].replace(',', '.', regex = True)\n df[\"ask\"] = pd.to_numeric(df[\"ask\"], errors='coerce')\n df[\"bid\"] = pd.to_numeric(df[\"bid\"], errors='coerce')\n \n #end of data wranggling\n\n return df\n\n@app.route(\"/\")\ndef index():\n df = scrap('https://monexnews.com/kurs-valuta-asing.htm?kurs=JPY&searchdatefrom=01-01-2019&searchdateto=01-12-2019') #insert url here\n\n #This part for rendering matplotlib\n fig = plt.figure(figsize=(5,2),dpi=300)\n df.plot(x = 'date',y = ['bid', 'ask'])\n \n #Do not change this part\n plt.savefig('plot1',bbox_inches=\"tight\") \n figfile = BytesIO()\n plt.savefig(figfile, format='png')\n figfile.seek(0)\n figdata_png = base64.b64encode(figfile.getvalue())\n result = str(figdata_png)[2:-1]\n #This part for rendering matplotlib\n\n #this is for rendering the table\n df = df.to_html(classes=[\"table table-bordered table-striped table-dark table-condensed\"])\n\n return render_template(\"index.html\", table=df, result=result)\n\n\nif __name__ == \"__main__\": \n app.run()\n","repo_name":"ezrasote/alg_capt_webscraping","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19798242376","text":"from iss.surveys.abstractParametrisedObject import AbstractParametrisedObject\nfrom iss.surveys.survey import Survey\nfrom iss.surveys.parser import parse_tree\nfrom iss.surveys.varId import VarId\nfrom iss.surveys.value import Value\n\n\nclass Assignment(AbstractParametrisedObject):\n\n def generate_js(self):\n value_tree = self.result_tree[\n parse_tree['CHILDREN_TREES']][self.value_index]\n if value_tree[parse_tree['PROD_NAME']] == 'widget':\n return self.get_widget_value_js()\n\n var_js = self.get_assignment_var_js()\n value_js = self.get_assignment_value_js()\n\n return (Survey.survey_var + \".addAssignment( function() {\"\n + var_js + ' = ' + value_js + '() });\\n')\n\n def get_assignment_var_js(self):\n var_tree = self.result_tree[parse_tree['CHILDREN_TREES']][\n self.var_index]\n var_id = VarId(var_tree)\n\n return var_id.generate_simple_js()\n\n def get_assignment_value_js(self):\n value_tree = self.result_tree[\n parse_tree['CHILDREN_TREES']][self.value_index]\n prod_name = value_tree[parse_tree['PROD_NAME']]\n production = Survey.string_to_class(prod_name)(value_tree)\n\n return production.generate_js()\n\n def get_widget_value_js(self):\n children_trees = self.result_tree[parse_tree['CHILDREN_TREES']]\n\n value_tree = children_trees[self.value_index]\n var_tree = children_trees[self.var_index]\n\n var_id = Value(var_tree).generate_simple_js()\n widget = Survey.string_to_class('widget')(\n value_tree, additional_js_args=['resultVarName: \"' + var_id + '\"'])\n\n return widget.generate_js()\n","repo_name":"agnieszkapaszkowska/mega-ankieta","sub_path":"django_app/iss/iss/surveys/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73737240782","text":"# coding: utf-8\n\"\"\"\n@Author: Randy\n@Date: 2020-12-30\n@LastEditTime: 2020-12-30\n@Description: api test for module Record\n@FilePath: record_start_stop_callback_delete_api_test.py\n\"\"\"\n\nimport audio\nimport utime\n\ndef record_callback(para):\n '''\n 录音回调函数\n :param para:\n :return:\n '''\n print(\"file_name:\",para[0])\n print(\"audio_len:\",para[1])\n print(\"record state:\",para[2])\n\ndef api_test(api):\n result1 = 'error'\n try:\n result1 = eval(api)\n except Exception as err:\n print('[error]'+str(err)+';')\n if result1 != 'error':\n result2 = True\n else:\n result2 = False\n print('%s:: %s||result_api:: %s;' % (api, result1, result2))\n utime.sleep(0.05)\n\n\nif __name__ == '__main__':\n list = [\n \"record.start(10, 1, 8000)\",\n \"record.stop()\",\n \"record.start(30, 2, 8000)\",\n \"record.getFilePath()\"\n \"record.getData(0, 44)\",\n \"record.getSize()\",\n \"record.exists()\",\n \"record.Delete()\",\n \"record.isBusy()\"\n ]\n record = audio.Record('1',record_callback)\n audio = audio.Audio(0)\n for i in list:\n api_test(i)\n utime.sleep(0.5)\n","repo_name":"learning-lte/quecpython-test-project","sub_path":"api_test_case/audio/record_start_stop_callback_delete_api_test.py","file_name":"record_start_stop_callback_delete_api_test.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7400218052","text":"#!/usr/bin/python3\n\"\"\"This uses reddit's API.\"\"\"\nimport requests\npost = None\n\n\ndef recurse(subreddit, hot_list=[]):\n \"\"\"Willreturn top ten post titles recursively\"\"\"\n global post\n user_agent = {'User-Agent': 'api_advanced-project'}\n url = \"https://www.reddit.com/r/{}/hot.json\".format(subreddit)\n parameters = {'after': post}\n results = requests.get(url, params=parameters, headers=user_agent,\n allow_redirects=False)\n\n if results.status_code == 200:\n post_data = results.json().get(\"data\").get(\"after\")\n if post_data is not None:\n post = post_data\n recurse(subreddit, hot_list)\n all_titles = results.json().get(\"data\").get(\"children\")\n for title_ in all_titles:\n hot_list.append(title_.get(\"data\").get(\"title\"))\n return hot_list\n else:\n return (None)\n","repo_name":"Mwarstep/alx-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72259871822","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torchvision.datasets import ImageNet\nfrom datetime import datetime\nimport torch.optim as optim\nimport torch.nn as nn\nimport gc\n\nimport numpy as np\nfrom utils import AddGaussianNoise, AddSaltPepperNoise\nfrom timm.models import efficientnet_b0\nfrom peff_b0 import PEffN_b0SeparateHP_V1\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n########################\n## GLOBAL CONFIGURATIONS\n########################\nTRAIN_MEAN = [0.485, 0.456, 0.406]\nTRAIN_STD = [0.229, 0.224, 0.225]\ndataset_root = '../datasets/imagenet'\n\n#total training epoches\nEPOCH = 8\n\nSAME_PARAM = False # to use the same parameters for all pcoders or not\nFF_START = True # to start from feedforward initialization\nMAX_TIMESTEP = 5\n\n#tensorboard log dir\nLOG_DIR = '../tensorboards/' + f'runs_train_hps_{MAX_TIMESTEP}ts'\nif FF_START:\n LOG_DIR += '_ff_start'\nif not SAME_PARAM:\n LOG_DIR += '_sep'\nLOG_DIR += '_imagenet'\n\nTASK_NAME = 'pefb0_v1n'\nWEIGHT_PATTERN_N = '../weights/PEffNetB0/pnetn_pretrained_pc*.pth'\n\n#time of we run the script\nTIME_NOW = datetime.now().isoformat()\n\n########################\n########################\n\ndef evaluate(net, epoch, dataloader, timesteps, writer=None, tag='Clean'):\n test_loss = np.zeros((timesteps+1,))\n correct = np.zeros((timesteps+1,))\n for (images, labels) in dataloader:\n images = images.cuda()\n labels = labels.cuda()\n \n with torch.no_grad():\n for tt in range(timesteps+1):\n if tt == 0:\n outputs = net(images)\n else:\n outputs = net()\n \n loss = loss_function(outputs, labels)\n test_loss[tt] += loss.item()\n _, preds = outputs.max(1)\n correct[tt] += preds.eq(labels).sum()\n\n print()\n for tt in range(timesteps+1):\n test_loss[tt] /= len(dataloader.dataset)\n correct[tt] /= len(dataloader.dataset)\n print('Test set t = {:02d}: Average loss: {:.4f}, Accuracy: {:.4f}'.format(\n tt,\n test_loss[tt],\n correct[tt]\n ))\n if writer is not None:\n writer.add_scalar(f\"{tag}Perf/Epoch#{epoch}\", correct[tt], tt)\n print()\n\ndef train(net, epoch, dataloader, timesteps, writer=None):\n for batch_index, (images, labels) in enumerate(dataloader):\n net.reset()\n\n labels = labels.cuda()\n images = images.cuda()\n\n ttloss = np.zeros((timesteps+1))\n optimizer.zero_grad()\n\n for tt in range(timesteps+1):\n if tt == 0:\n outputs = net(images)\n loss = loss_function(outputs, labels)\n ttloss[tt] = loss.item()\n else:\n outputs = net()\n current_loss = loss_function(outputs, labels)\n ttloss[tt] = current_loss.item()\n loss += current_loss\n \n loss.backward()\n optimizer.step()\n net.update_hyperparameters()\n \n print(f\"Training Epoch: {epoch} [{batch_index * 16 + len(images)}/{len(dataloader.dataset)}]\\tLoss: {loss.item():0.4f}\\tLR: {optimizer.param_groups[0]['lr']:0.6f}\")\n for tt in range(timesteps+1):\n print(f'{ttloss[tt]:0.4f}\\t', end='')\n print()\n if writer is not None:\n writer.add_scalar(f\"TrainingLoss/CE\", loss.item(), (epoch-1)*len(dataloader) + batch_index)\n\ndef load_pnet(net, weight_pattern, build_graph, random_init, ff_multiplier, fb_multiplier, er_multiplier, same_param, device='cuda:0'):\n if same_param:\n raise Exception('Not implemented!')\n else:\n pnet = PEffN_b0SeparateHP_V1(net, build_graph=build_graph, random_init=random_init, ff_multiplier=ff_multiplier, fb_multiplier=fb_multiplier, er_multiplier=er_multiplier)\n\n\n for pc in range(pnet.number_of_pcoders):\n pc_dict = torch.load(weight_pattern.replace('*',f'{pc+1}'), map_location='cpu')\n if 'C_sqrt' not in pc_dict:\n pc_dict['C_sqrt'] = torch.tensor(-1, dtype=torch.float)\n getattr(pnet, f'pcoder{pc+1}').load_state_dict(pc_dict)\n\n pnet.eval()\n pnet.to(device)\n return pnet\n\ndef log_hyper_parameters(net, epoch, sumwriter, same_param=True):\n if same_param:\n sumwriter.add_scalar(f\"HyperparamRaw/feedforward\", getattr(net,f'ff_part').item(), epoch)\n sumwriter.add_scalar(f\"HyperparamRaw/feedback\", getattr(net,f'fb_part').item(), epoch)\n sumwriter.add_scalar(f\"HyperparamRaw/error\", getattr(net,f'errorm').item(), epoch)\n sumwriter.add_scalar(f\"HyperparamRaw/memory\", getattr(net,f'mem_part').item(), epoch)\n\n sumwriter.add_scalar(f\"Hyperparam/feedforward\", getattr(net,f'ffm').item(), epoch)\n sumwriter.add_scalar(f\"Hyperparam/feedback\", getattr(net,f'fbm').item(), epoch)\n sumwriter.add_scalar(f\"Hyperparam/error\", getattr(net,f'erm').item(), epoch)\n sumwriter.add_scalar(f\"Hyperparam/memory\", 1-getattr(net,f'ffm').item()-getattr(net,f'fbm').item(), epoch)\n else:\n for i in range(1, net.number_of_pcoders+1):\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_feedforward\", getattr(net,f'ffm{i}').item(), epoch)\n if i < net.number_of_pcoders:\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_feedback\", getattr(net,f'fbm{i}').item(), epoch)\n else:\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_feedback\", 0, epoch)\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_error\", getattr(net,f'erm{i}').item(), epoch)\n if i < net.number_of_pcoders:\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_memory\", 1-getattr(net,f'ffm{i}').item()-getattr(net,f'fbm{i}').item(), epoch)\n else:\n sumwriter.add_scalar(f\"Hyperparam/pcoder{i}_memory\", 1-getattr(net,f'ffm{i}').item(), epoch)\n\nall_noises = [\n \"gaussian_noise\",\n \"impulse_noise\",\n \"none\"]\nnoise_gens = [\n [\n AddGaussianNoise(std=0.50),\n AddGaussianNoise(std=0.75),\n AddGaussianNoise(std=1.00),\n AddGaussianNoise(std=1.25),\n AddGaussianNoise(std=1.50),\n ],\n [\n AddSaltPepperNoise(probability=0.05),\n AddSaltPepperNoise(probability=0.1),\n AddSaltPepperNoise(probability=0.15),\n AddSaltPepperNoise(probability=0.2),\n AddSaltPepperNoise(probability=0.3),\n ],\n [None],\n]\n\nfor nt_idx, noise_type in enumerate(all_noises):\n for ng_idx, noise_gen in enumerate(noise_gens[nt_idx]):\n print(noise_gen)\n start = datetime.now()\n \n noise_level = 0\n transform_clean = [\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ]\n transform_noise = transform_clean[:]\n\n transform_clean.append(transforms.Normalize(mean=TRAIN_MEAN, std=TRAIN_STD))\n transform_noise.append(transforms.Normalize(mean=TRAIN_MEAN, std=TRAIN_STD))\n\n if noise_gen is not None:\n noise_level = ng_idx + 1\n transform_noise.append(noise_gen)\n\n clean_ds = ImageNet(dataset_root, split='val', download=False, transform=transforms.Compose(transform_clean))\n clean_loader = torch.utils.data.DataLoader(clean_ds, batch_size=16, shuffle=False, drop_last=False, num_workers=8)\n\n noise_ds = ImageNet(dataset_root, split='val', download=False, transform=transforms.Compose(transform_noise))\n noise_loader = torch.utils.data.DataLoader(noise_ds, batch_size=16, shuffle=True, drop_last=False, num_workers=8)\n\n \n sumwriter = SummaryWriter(f'{LOG_DIR}/net_{TASK_NAME}_type_{noise_type}_lvl_{noise_level}', filename_suffix=f'_{noise_type}_{noise_level}')\n \n backward_weight_patter = WEIGHT_PATTERN_N\n\n # feedforward for baseline\n net = efficientnet_b0(pretrained=True)\n pnet_fw = load_pnet(net, backward_weight_patter,\n build_graph=False, random_init=(not FF_START), ff_multiplier=1.0, fb_multiplier=0.0, er_multiplier=0.0, same_param=SAME_PARAM, device='cuda:0')\n \n loss_function = nn.CrossEntropyLoss()\n evaluate(pnet_fw, 0, noise_loader, timesteps=1, writer=sumwriter, tag='FeedForward')\n print(datetime.now() - start)\n del pnet_fw\n gc.collect()\n\n # train hps\n net = efficientnet_b0(pretrained=True)\n pnet = load_pnet(net, backward_weight_patter,\n build_graph=True, random_init=(not FF_START), ff_multiplier=0.33, fb_multiplier=0.33, er_multiplier=0.0, same_param=SAME_PARAM, device='cuda:0')\n\n loss_function = nn.CrossEntropyLoss()\n hyperparams = [*pnet.get_hyperparameters()]\n if SAME_PARAM:\n optimizer = optim.Adam([\n {'params': hyperparams[:-1], 'lr':0.01},\n {'params': hyperparams[-1:], 'lr':0.0001}], weight_decay=0.00001)\n else:\n fffbmem_hp = []\n erm_hp = []\n for pc in range(pnet.number_of_pcoders):\n fffbmem_hp.extend(hyperparams[pc*4:pc*4+3])\n erm_hp.append(hyperparams[pc*4+3])\n optimizer = optim.Adam([\n {'params': fffbmem_hp, 'lr':0.01},\n {'params': erm_hp, 'lr':0.0001}], weight_decay=0.00001)\n\n log_hyper_parameters(pnet, 0, sumwriter, same_param=SAME_PARAM)\n hps = pnet.get_hyperparameters_values()\n print(hps)\n\n evaluate(pnet, 0, noise_loader, timesteps=MAX_TIMESTEP, writer=sumwriter, tag='Noisy')\n print(datetime.now() - start)\n for epoch in range(1, EPOCH+1):\n train(pnet, epoch, noise_loader, timesteps=MAX_TIMESTEP, writer=sumwriter)\n print(datetime.now() - start)\n log_hyper_parameters(pnet, epoch, sumwriter, same_param=SAME_PARAM)\n\n hps = pnet.get_hyperparameters_values()\n print(hps)\n\n evaluate(pnet, epoch, noise_loader, timesteps=MAX_TIMESTEP, writer=sumwriter, tag='Noisy')\n print(datetime.now() - start)\n\n evaluate(pnet, epoch, clean_loader, timesteps=MAX_TIMESTEP, writer=sumwriter, tag='Clean')\n \n sumwriter.close()\n\n del pnet\n gc.collect()\n print(datetime.now() - start)\n","repo_name":"artipago/Role_of_Feedback_in_Predictive_Coding","sub_path":"deep_networks/scripts/train_pefbo_hps_imagenet.py","file_name":"train_pefbo_hps_imagenet.py","file_ext":"py","file_size_in_byte":10442,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"35375185093","text":"from copy import deepcopy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torch.utils.data import (\n DataLoader,\n RandomSampler,\n SequentialSampler,\n TensorDataset,\n random_split,\n)\n\n\"\"\"\nDATALOADER UTILITIES\n\"\"\"\n\n\ndef get_bert_dataloader(df, tokenizer, max_seq_length=64, batch_size=16, mode=\"train\"):\n \"\"\"\n Helper function for generating dataloaders for BERT\n \"\"\"\n\n dataset = df_to_bert_dataset(df, max_seq_length, tokenizer)\n\n if mode == \"validate\":\n val_sampler = SequentialSampler(dataset)\n val_loader = DataLoader(dataset, sampler=val_sampler, batch_size=batch_size)\n return val_loader\n\n if mode == \"distill\":\n distill_sampler = SequentialSampler(dataset)\n distill_loader = DataLoader(\n dataset, sampler=distill_sampler, batch_size=batch_size\n )\n return distill_loader\n\n elif mode == \"train\":\n train_sampler = RandomSampler(dataset)\n train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=batch_size)\n return train_loader\n\n\ndef df_to_bert_format(df, max_length, tokenizer):\n sentences = df.iloc[:, 0].values\n labels = df.iloc[:, 1].values\n\n input_ids = []\n attention_masks = []\n\n for sent in sentences:\n encoded_dict = tokenizer.encode_plus(\n sent,\n add_special_tokens=True,\n max_length=max_length,\n pad_to_max_length=True,\n truncation=True,\n return_attention_mask=True,\n return_tensors=\"pt\",\n )\n\n input_ids.append(encoded_dict[\"input_ids\"])\n attention_masks.append(encoded_dict[\"attention_mask\"])\n\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n labels = torch.tensor(labels)\n\n return input_ids, attention_masks, labels\n\n\ndef df_to_bert_dataset(df, max_length, tokenizer):\n input_ids, attention_masks, labels = df_to_bert_format(df, max_length, tokenizer)\n dataset = TensorDataset(input_ids, attention_masks, labels)\n return dataset\n","repo_name":"SforAiDl/KD_Lib","sub_path":"KD_Lib/KD/text/utils/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":554,"dataset":"github-code","pt":"47"} +{"seq_id":"35437065790","text":"from __future__ import absolute_import\n\nimport datetime\nimport hashlib\nimport json\nimport os\nimport re\nimport subprocess\nfrom configparser import ConfigParser\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport hachoir.core.config as hachoir_config\nimport LnkParse3\nimport yaml\nfrom assemblyline.common.entropy import calculate_partition_entropy\nfrom assemblyline.common.identify import CUSTOM_BATCH_ID, CUSTOM_PS1_ID\nfrom assemblyline.odm.base import DOMAIN_ONLY_REGEX, IP_ONLY_REGEX, UNC_PATH_REGEX\nfrom assemblyline_v4_service.common.base import ServiceBase\nfrom assemblyline_v4_service.common.request import ServiceRequest\nfrom assemblyline_v4_service.common.result import (\n BODY_FORMAT,\n Heuristic,\n Result,\n ResultGraphSection,\n ResultKeyValueSection,\n ResultOrderedKeyValueSection,\n ResultSection,\n)\nfrom hachoir.core.log import Logger\nfrom hachoir.core.log import log as hachoir_logger\nfrom hachoir.metadata import extractMetadata\nfrom hachoir.parser.guess import createParser\nfrom multidecoder.decoders.shell import get_cmd_command, get_powershell_command\n\nTAG_MAP = {\n \"ole2\": {\n \"author\": \"file.ole.summary.author\",\n \"last_modification\": \"file.date.last_modified\",\n \"subject\": \"file.ole.summary.subject\",\n \"title\": \"file.ole.summary.title\",\n },\n # \"LNK\": {\"target_file_dosname\": \"file.name.extracted\"},\n \"ZIP\": {\"zip_modify_date\": \"file.date.last_modified\"},\n \"EXE\": {\"file_description\": \"file.pe.versions.description\", \"time_stamp\": \"file.pe.linker.timestamp\"},\n \"DLL\": {\"file_description\": \"file.pe.versions.description\", \"time_stamp\": \"file.pe.linker.timestamp\"},\n \"DOC\": {\n \"author\": \"file.ole.summary.author\",\n \"code_page\": \"file.ole.summary.codepage\",\n \"comment\": \"file.ole.summary.comment\",\n \"company\": \"file.ole.summary.company\",\n \"create_date\": \"file.date.creation\",\n \"last_modified_by\": \"file.ole.summary.last_saved_by\",\n \"manager\": \"file.ole.summary.manager\",\n \"modify_date\": \"file.date.last_modified\",\n \"subject\": \"file.ole.summary.subject\",\n \"title\": \"file.ole.summary.title\",\n },\n None: {\n \"image_size\": \"file.img.size\",\n \"megapixels\": \"file.img.mega_pixels\",\n \"create_date\": \"file.date.creation\",\n \"creation_date\": \"file.date.creation\",\n \"modify_date\": \"file.date.last_modified\",\n \"original_file_name\": \"file.name.extracted\",\n },\n}\n\nEXIFTOOL_DATE_FMT = \"%Y:%m:%d %H:%M:%S%z\"\n\n\ndef build_key(input_string: str) -> str:\n list_string = list(input_string)\n new_list: List[str] = []\n previous_upper = False\n for idx, i in enumerate(list_string):\n if i.isupper():\n if idx != 0 and not previous_upper:\n new_list.append(\"_\")\n\n previous_upper = True\n new_list.append(i.lower())\n elif i in [\".\", \"_\"]:\n previous_upper = True\n new_list.append(i)\n else:\n previous_upper = False\n new_list.append(i)\n\n return \"\".join(new_list)\n\n\ndef get_type_val(data: str, src_name: str) -> Tuple[str, str]:\n key = src_name\n val = data\n\n if \":\" in data:\n key, val = data.split(\":\", 1)\n elif \"=\" in data:\n key, val = data.split(\"=\", 1)\n\n key = build_key(key)\n val = val.strip()\n return key, val\n\n\ndef contains_inf_nan(v):\n if isinstance(v, dict):\n for d in v.keys():\n if contains_inf_nan(d):\n return True\n for d in v.values():\n if contains_inf_nan(d):\n return True\n return False\n elif isinstance(v, list):\n for d in v:\n if contains_inf_nan(d):\n return True\n return False\n else:\n return v in [float(\"inf\"), -float(\"inf\"), float(\"nan\")]\n\n\n#########################################################\n# Scan Execution Class #\n#########################################################\nclass Characterize(ServiceBase):\n def hachoir_logger_callback(self, level: int, prefix: str, _text: str, ctxt: Optional[Logger]) -> None:\n # Show where in hachoir the log comes from using ctxt if it exists\n log = f\"hachoir {ctxt.__class__} [{ctxt._logger()}]: {_text}\" if ctxt else f\"hachoir: {_text}\\n\"\n self.log.info(log)\n\n def start(self) -> None:\n hachoir_config.quiet = True\n # Don't print to stdout, use our logger via callback\n hachoir_logger.use_print = False\n hachoir_logger.on_new_message = self.hachoir_logger_callback\n\n def execute(self, request: ServiceRequest) -> None:\n request.result = Result()\n\n if request.file_type.startswith(\"uri/\"):\n with open(request.file_path, \"r\") as f:\n data = yaml.safe_load(f)\n\n data.pop(\"uri\")\n headers = data.pop(\"headers\", {})\n if data or headers:\n params_section = ResultOrderedKeyValueSection(\n f\"{request.task.fileinfo.uri_info.scheme.upper()} Params\", parent=request.result\n )\n for k, v in data.items():\n params_section.add_item(k, v)\n for k, v in headers.items():\n params_section.add_item(k, v)\n params_section.promote_as_uri_params()\n return\n\n # 1. Calculate entropy map\n with open(request.file_path, \"rb\") as fin:\n (entropy, part_entropies) = calculate_partition_entropy(fin)\n\n graph_section = ResultGraphSection(f\"File entropy: {round(entropy, 3)}\")\n graph_section.set_colormap(0, 8, part_entropies)\n graph_section.promote_as_entropy()\n request.result.add_section(graph_section)\n\n if request.file_type != \"shortcut/windows\":\n # 2. Get hachoir metadata\n parser = createParser(request.file_path)\n if parser is not None:\n with parser:\n parser_tags = parser.getParserTags()\n parser_id = parser_tags.get(\"id\", \"unknown\")\n\n # Do basic metadata extraction\n metadata = extractMetadata(parser, 1)\n\n if metadata:\n kv_body: Dict[str, Union[str, List[str]]] = {}\n tags: List[Tuple[str, str]] = []\n for m in metadata:\n if m.key == \"comment\":\n for v in m.values:\n key, val = get_type_val(v.text, \"comment\")\n if not val:\n continue\n\n kv_body[key] = val\n\n tag_type = TAG_MAP.get(parser_id, {}).get(key, None) or TAG_MAP.get(None, {}).get(\n key, None\n )\n if tag_type is not None:\n tags.append((tag_type, val))\n elif m.key in [\"mime_type\"]:\n pass\n else:\n values = [v.text for v in m.values]\n if len(values) == 1 and values[0]:\n kv_body[m.key] = values[0]\n elif values:\n kv_body[m.key] = values\n\n for v in values:\n tag_type = TAG_MAP.get(parser_id, {}).get(m.key, None) or TAG_MAP.get(None, {}).get(\n m.key, None\n )\n if tag_type is not None:\n tags.append((tag_type, v))\n\n if kv_body:\n res = ResultSection(\n f\"Metadata extracted by hachoir-metadata [Parser: {parser_id}]\",\n body=json.dumps(kv_body, allow_nan=False),\n body_format=BODY_FORMAT.KEY_VALUE,\n parent=request.result,\n )\n\n for t_type, t_val in tags:\n res.add_tag(t_type, t_val)\n\n # 3. Get Exiftool Metadata\n exif = subprocess.run([\"exiftool\", \"-j\", request.file_path], capture_output=True, check=False)\n if exif.stdout:\n exif_data = json.loads(exif.stdout.decode(\"utf-8\", errors=\"ignore\"))\n res_data = exif_data[0]\n if \"Error\" not in res_data:\n exif_body = {}\n for k, v in res_data.items():\n if v and k not in [\n \"SourceFile\",\n \"ExifToolVersion\",\n \"FileName\",\n \"Directory\",\n \"FileSize\",\n \"FileModifyDate\",\n \"FileAccessDate\",\n \"FileInodeChangeDate\",\n \"FilePermissions\",\n \"FileType\",\n \"FileTypeExtension\",\n \"MIMEType\",\n \"Warning\",\n ]:\n if contains_inf_nan(v):\n exif = subprocess.run(\n [\"exiftool\", f\"-{k}\", \"-T\", request.file_path], capture_output=True, check=False\n )\n v = exif.stdout.decode(\"utf-8\", errors=\"ignore\").strip()\n exif_body[build_key(k)] = v\n if exif_body:\n e_res = ResultSection(\n \"Metadata extracted by ExifTool\",\n body=json.dumps(exif_body, allow_nan=False),\n body_format=BODY_FORMAT.KEY_VALUE,\n parent=request.result,\n )\n for k, v in exif_body.items():\n tag_type = TAG_MAP.get(res_data.get(\"FileTypeExtension\", \"UNK\").upper(), {}).get(\n k, None\n ) or TAG_MAP.get(None, {}).get(k, None)\n if tag_type:\n e_res.add_tag(tag_type, v)\n\n # 4. Lnk management.\n if request.file_type == \"shortcut/windows\":\n with open(request.file_path, \"rb\") as indata:\n lnk = LnkParse3.lnk_file(indata)\n\n features = lnk.get_json(get_all=True)\n\n lnk_result_section = ResultSection(\n \"Extra metadata extracted by LnkParse3\",\n parent=request.result,\n )\n\n heur_1_items = {}\n risky_executable = [\"rundll32.exe\", \"powershell.exe\", \"cmd.exe\", \"mshta.exe\"]\n\n if \"command_line_arguments\" in features[\"data\"]:\n if any(x in features[\"data\"][\"command_line_arguments\"].lower() for x in risky_executable):\n heur_1_items[\"command_line_arguments\"] = features[\"data\"][\"command_line_arguments\"]\n elif \" && \" in features[\"data\"][\"command_line_arguments\"]:\n heur_1_items[\"command_line_arguments\"] = features[\"data\"][\"command_line_arguments\"]\n\n lbp = \"\"\n if \"local_base_path\" in features[\"link_info\"]:\n lbp = features[\"link_info\"][\"local_base_path\"]\n if \"common_path_suffix\" in features[\"link_info\"]:\n lbp = f\"{lbp}{features['link_info']['common_path_suffix']}\"\n if any(x in lbp.lower() for x in risky_executable):\n if \"mshta.exe\" in lbp.lower() and \"command_line_arguments\" in features[\"data\"]:\n cla = features[\"data\"][\"command_line_arguments\"]\n if \" \" not in cla and (cla.startswith(\"https://\") or cla.startswith(\"http://\")):\n heur = Heuristic(9)\n heur_section = ResultSection(heur.name, heuristic=heur, parent=lnk_result_section)\n heur_section.add_line(f\"Download of {cla}\")\n heur_section.add_tag(\"network.static.uri\", cla)\n heur_1_items[\"local_base_path\"] = features[\"link_info\"][\"local_base_path\"]\n\n if \"relative_path\" in features[\"data\"]:\n if any(x in features[\"data\"][\"relative_path\"].lower() for x in risky_executable):\n heur_1_items[\"relative_path\"] = features[\"data\"][\"relative_path\"]\n\n target = \"\"\n if \"target\" in features:\n import ntpath\n\n if \"items\" in features[\"target\"]:\n last_item = None\n for item in features[\"target\"][\"items\"]:\n if \"primary_name\" in item:\n last_item = item\n target = ntpath.join(target, item[\"primary_name\"])\n\n if last_item and last_item[\"flags\"] == \"Is directory\":\n target = \"\"\n\n if any(x in target.lower() for x in risky_executable):\n heur_1_items[\"target_file_dosname\"] = target\n\n timestamps = []\n if features[\"header\"][\"creation_time\"]:\n timestamps.append((\"creation_time\", features[\"header\"][\"creation_time\"]))\n if features[\"header\"][\"modified_time\"]:\n timestamps.append((\"modified_time\", features[\"header\"][\"modified_time\"]))\n\n if request.task.depth != 0:\n heur2_earliest_ts = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(\n days=self.config.get(\"heur2_flag_more_recent_than_days\", 3)\n )\n heur2_latest_ts = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=2)\n recent_timestamps = []\n future_timestamps = []\n for k, timestamp in timestamps:\n if timestamp < heur2_earliest_ts:\n continue\n if timestamp > heur2_latest_ts:\n future_timestamps.append((k, timestamp))\n continue\n recent_timestamps.append((k, timestamp))\n\n if recent_timestamps:\n heur = Heuristic(2)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n for k, timestamp in recent_timestamps:\n heur_section.set_item(k, timestamp.isoformat())\n if future_timestamps:\n heur = Heuristic(3)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n for k, timestamp in future_timestamps:\n heur_section.set_item(k, timestamp.isoformat())\n\n if \"DISTRIBUTED_LINK_TRACKER_BLOCK\" in features[\"extra\"]:\n if \"machine_identifier\" in features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"]:\n machine_id = features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"][\"machine_identifier\"]\n lnk_result_section.add_tag(\"file.shortcut.machine_id\", machine_id)\n if machine_id.lower().startswith(\"desktop-\"):\n heur = Heuristic(5)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n heur_section.set_item(\"machine_identifier\", machine_id)\n if \"droid_file_identifier\" in features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"]:\n mac = features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"][\"droid_file_identifier\"][-12:]\n mac = \":\".join(a + b for a, b in zip(mac[::2], mac[1::2]))\n lnk_result_section.add_tag(\"file.shortcut.tracker_mac\", mac)\n elif \"birth_droid_file_identifier\" in features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"]:\n mac = features[\"extra\"][\"DISTRIBUTED_LINK_TRACKER_BLOCK\"][\"birth_droid_file_identifier\"][-12:]\n mac = \":\".join(a + b for a, b in zip(mac[::2], mac[1::2]))\n lnk_result_section.add_tag(\"file.shortcut.tracker_mac\", mac)\n\n # Adapted code from previous logic. May be best replaced by new heuristics and logic.\n bp = str(lbp).strip()\n rp = str(features[\"data\"].get(\"relative_path\", \"\")).strip()\n nn = str(features[\"data\"].get(\"net_name\", \"\")).strip()\n t = str(target).strip().rsplit(\"\\\\\")[-1].strip()\n cla = str(features[\"data\"].get(\"command_line_arguments\", \"\")).strip()\n # Optional extras to use in case none of the other are filled\n extra_targets = {\n k: v\n for k, v in features.get(\"extra\", {}).get(\"ENVIRONMENTAL_VARIABLES_LOCATION_BLOCK\", {}).items()\n if k.startswith(\"target_\")\n }\n\n filename_extracted = bp or rp or t or nn\n if filename_extracted.rsplit(\"\\\\\")[-1].strip():\n lnk_result_section.add_tag(\"file.name.extracted\", filename_extracted.rsplit(\"\\\\\")[-1])\n elif extra_targets:\n heur = Heuristic(7)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n for k, v in extra_targets.items():\n filename_extracted = v\n heur_section.set_item(k, v)\n heur_section.add_tag(\"file.name.extracted\", v.rsplit(\"\\\\\")[-1])\n\n unc_result = None\n if \"icon_location\" in features[\"data\"]:\n deceptive_icons = [\"wordpad.exe\", \"shell32.dll\", \"explorer.exe\", \"msedge.exe\"]\n\n lnk_result_section.add_tag(\"file.shortcut.icon_location\", features[\"data\"][\"icon_location\"])\n if re.match(UNC_PATH_REGEX, features[\"data\"][\"icon_location\"]):\n heur = Heuristic(10)\n unc_result = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n unc_result.add_tag(\"network.static.unc_path\", features[\"data\"][\"icon_location\"])\n unc_result.set_item(\"icon_location\", features[\"data\"][\"icon_location\"])\n if any(\n features[\"data\"][\"icon_location\"].lower().strip('\"').strip(\"'\").endswith(x)\n and not filename_extracted.endswith(x)\n for x in deceptive_icons\n ):\n heur = Heuristic(4)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n heur_section.set_item(\"icon_location\", features[\"data\"][\"icon_location\"])\n\n process_cmdline = f\"{filename_extracted} {cla}\".strip()\n if re.match(UNC_PATH_REGEX, process_cmdline):\n if unc_result is None:\n heur = Heuristic(10)\n unc_result = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n unc_result.add_tag(\"network.static.unc_path\", process_cmdline)\n unc_result.set_item(\"cmdline\", process_cmdline)\n elif process_cmdline:\n lnk_result_section.add_tag(\"file.shortcut.command_line\", process_cmdline)\n\n filename_extracted = filename_extracted.rsplit(\"\\\\\")[-1].strip().lstrip(\"./\").lower()\n\n cmd_code = None\n if filename_extracted in [\"cmd\", \"cmd.exe\"]:\n file_content = CUSTOM_BATCH_ID\n file_content += get_cmd_command(f\"{filename_extracted} {cla}\".encode())\n cmd_code = (file_content, \"bat\")\n if \"rundll32 \" in cla: # We are already checking for rundll32.exe as part of risky_executable\n heur_1_items[\"command_line_arguments\"] = features[\"data\"][\"command_line_arguments\"]\n elif filename_extracted in [\"powershell\", \"powershell.exe\"]:\n file_content = CUSTOM_PS1_ID\n file_content += get_powershell_command(f\"{filename_extracted} {cla}\".encode())\n cmd_code = (file_content, \"ps1\")\n\n if heur_1_items:\n heur = Heuristic(1)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n heur_section.update_items(heur_1_items)\n\n if cmd_code:\n sha256hash = hashlib.sha256(cmd_code[0]).hexdigest()\n cmd_filename = f\"{sha256hash[0:10]}.{cmd_code[1]}\"\n cmd_file_path = os.path.join(self.working_directory, cmd_filename)\n with open(cmd_file_path, \"wb\") as cmd_f:\n cmd_f.write(cmd_code[0])\n request.add_extracted(\n cmd_file_path,\n cmd_filename,\n \"Extracted LNK execution code\",\n )\n\n def _datetime_to_str(obj):\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n return obj\n\n temp_path = os.path.join(self.working_directory, \"features.json\")\n with open(temp_path, \"w\") as f:\n json.dump(features, f, default=_datetime_to_str)\n request.add_supplementary(temp_path, \"features.json\", \"Features extracted from the LNK file\")\n\n if lnk.appended_data:\n sha256hash = hashlib.sha256(lnk.appended_data).hexdigest()\n appended_data_path = os.path.join(self.working_directory, sha256hash)\n with open(appended_data_path, \"wb\") as appended_data_f:\n appended_data_f.write(lnk.appended_data)\n request.add_extracted(\n appended_data_path,\n sha256hash,\n \"Additional data at the end of the LNK file\",\n )\n heur = Heuristic(6)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=lnk_result_section)\n heur_section.set_item(\"Length\", len(lnk.appended_data))\n\n for extra_data in lnk.extras:\n if isinstance(extra_data, LnkParse3.extra.unknown.UnknownExtra):\n sha256hash = hashlib.sha256(extra_data.extra_data).hexdigest()\n appended_data_path = os.path.join(self.working_directory, sha256hash)\n with open(appended_data_path, \"wb\") as appended_data_f:\n appended_data_f.write(extra_data.extra_data)\n request.add_extracted(\n appended_data_path,\n sha256hash,\n \"Unknown Extra data\",\n )\n section = ResultKeyValueSection(\"Unknown Extra data\", parent=lnk_result_section)\n section.set_item(\"Length\", len(extra_data.extra_data))\n\n # 5. URL file management\n if request.file_type == \"shortcut/web\":\n config = ConfigParser()\n config.read(request.file_path, encoding=\"UTF-8\")\n\n res = ResultKeyValueSection(\"Metadata extracted by Ini Reader\", parent=request.result)\n for k, v in config.items(\"InternetShortcut\", raw=True):\n res.set_item(k, v)\n\n if k.lower() == \"url\":\n if v.startswith(\"http://\") or v.startswith(\"https://\"):\n res.set_heuristic(8)\n res.add_tag(\"network.static.uri\", v)\n elif v.startswith(\"file:\"):\n heur = Heuristic(1)\n heur_section = ResultKeyValueSection(heur.name, heuristic=heur, parent=res)\n heur_section.set_item(\"url\", v)\n heur_section.add_tag(\"network.static.uri\", v)\n heur_section.add_tag(\"file.path\", get_filepath_from_fileuri(v))\n\n config.pop(\"InternetShortcut\", None)\n if config.sections():\n extra_res = ResultKeyValueSection(\"Extra sections\", parent=res)\n extra_res.set_item(\"Names\", \", \".join(config.sections()))\n\n\ndef get_filepath_from_fileuri(fileuri: str):\n if not fileuri.startswith(\"file:\"):\n return None\n\n filepath = fileuri[5:].lstrip(\"/\\\\\")\n\n if len(filepath) > 9 and filepath[:9] == \"localhost\" and filepath[9] in [\"/\", \"\\\\\"]:\n filepath = filepath[10:].lstrip(\"/\\\\\")\n\n host_part = filepath.split(\"/\")[0].split(\"\\\\\")[0]\n original_host_part = host_part\n if \"@\" in host_part and re.match(r\"\\d+\", host_part.split(\"@\", 1)[1]):\n host_part = host_part.split(\"@\", 1)[0]\n if re.match(DOMAIN_ONLY_REGEX, host_part) or re.match(IP_ONLY_REGEX, host_part):\n filepath = filepath[len(original_host_part):].lstrip(\"/\\\\\")\n\n if filepath.split(\"/\")[0].split(\"\\\\\")[0].count(\":\") == 1:\n return filepath\n\n prepend = \"/\"\n if \"/\" not in filepath and \"\\\\\" in filepath:\n prepend = \"\\\\\"\n return f\"{prepend}{filepath}\"\n","repo_name":"CybercentreCanada/assemblyline-service-characterize","sub_path":"characterize.py","file_name":"characterize.py","file_ext":"py","file_size_in_byte":25345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"31286516835","text":"from datetime import datetime\r\nimport json\r\nfrom pytz import UTC # timezone\r\nfrom typing import List, Tuple\r\n\r\nimport caldav\r\nfrom icalendar import Calendar, Event, vText\r\n\r\nimport ids\r\n\r\n\r\ndef __processEvent(event: Event, clear: bool) -> Event:\r\n eventClean = Event()\r\n\r\n # Event\r\n if clear and \"SUMMARY\" in event:\r\n eventClean.add('SUMMARY', event[\"SUMMARY\"], encode=0)\r\n if not clear and \"CATEGORIES\" in event:\r\n c = event[\"CATEGORIES\"] if type(event[\"CATEGORIES\"]) is list else [event[\"CATEGORIES\"]]\r\n cats = []\r\n for cat in c:\r\n cats.extend(cat.cats)\r\n eventClean.add('SUMMARY', ', '.join(cats), encode=1)\r\n if \"CATEGORIES\" in event:\r\n eventClean.add('CATEGORIES', event[\"CATEGORIES\"], encode=0)\r\n if False and clear and \"DESCRIPTION\" in event:\r\n eventClean.add('DESCRIPTION', event[\"DESCRIPTION\"], encode=0)\r\n if clear and \"LOCATION\" in event:\r\n eventClean.add('LOCATION', event[\"LOCATION\"], encode=0)\r\n\r\n # Date\r\n if \"DTSTART\" in event:\r\n eventClean.add('DTSTART', event[\"DTSTART\"], encode=0)\r\n if \"DURATION\" in event:\r\n eventClean.add('DURATION', event[\"DURATION\"], encode=0)\r\n if \"DTEND\" in event:\r\n eventClean.add('DTEND', event[\"DTEND\"], encode=0)\r\n if \"DTSTAMP\" in event:\r\n eventClean.add('DTSTAMP', event[\"DTSTAMP\"], encode=0)\r\n\r\n # Recurrence\r\n if \"RECURRENCE-ID\" in event:\r\n eventClean.add('RECURRENCE-ID', event[\"RECURRENCE-ID\"], encode=0)\r\n if \"SEQUENCE\" in event:\r\n eventClean.add('SEQUENCE', event[\"SEQUENCE\"], encode=0)\r\n if \"RRULE\" in event:\r\n eventClean.add('RRULE', event[\"RRULE\"], encode=0)\r\n if \"RDATE\" in event:\r\n eventClean.add('RDATE', event[\"RDATE\"], encode=0)\r\n if \"EXDATE\" in event:\r\n eventClean.add('EXDATE', event[\"EXDATE\"], encode=0)\r\n\r\n # Meta\r\n if \"UID\" in event:\r\n eventClean.add('UID', event[\"UID\"], encode=0)\r\n if \"CREATED\" in event:\r\n eventClean.add('CREATED', event[\"CREATED\"], encode=0)\r\n if \"LAST-MODIFIED\" in event:\r\n eventClean.add('LAST-MODIFIED', event[\"LAST-MODIFIED\"], encode=0)\r\n\r\n return eventClean\r\n\r\n\r\ndef getDavCalendar(calN: str) -> caldav.Calendar:\r\n URLFULL = ids.URL+\"calendars/\"+ids.USERN+\"/\"+calN\r\n\r\n client = caldav.DAVClient(url=ids.URL, username=ids.USERN, password=ids.PASSW)\r\n calendar = caldav.Calendar(client=client, url=URLFULL)\r\n\r\n return calendar\r\n\r\n\r\ndef convert(calD: caldav.Calendar, clear: bool) -> List[Event]:\r\n results = calD.events(baikal=True)\r\n events = []\r\n\r\n for subcalendar in results:\r\n cal = Calendar.from_ical(subcalendar._data)\r\n for component in cal.walk():\r\n if component.name == \"VEVENT\":\r\n event = __processEvent(component, clear)\r\n events.append(event)\r\n\r\n return events\r\n\r\n\r\ndef getEvents(calN: str, clear: bool) -> List[Event]:\r\n return convert(getDavCalendar(calN), clear)\r\n\r\n\r\ndef getStrCalendar(calN: str, clear: bool) -> str:\r\n return getStrCalendars([(calN, clear)])\r\n\r\n\r\ndef getStrCalendars(calendars: List[Tuple[str, bool]]) -> str:\r\n cal = Calendar()\r\n for calendar in calendars:\r\n events = getEvents(*calendar)\r\n for event in events:\r\n cal.add_component(event)\r\n return cal.to_ical().decode(\"utf-8\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(getStrCalendars([(\"default\", True), (\"personnal\", False)]))\r\n \r\n c = getDavCalendar(\"default\")\r\n r = c.events(baikal=True)\r\n for subcalendar in r:\r\n cal = Calendar.from_ical(subcalendar._data)\r\n for component in cal.walk():\r\n if component.name == \"VEVENT\":\r\n if 'RRULE' in component and 'EXDATE' in component:\r\n e = component\r\n","repo_name":"telec16/caldav2ics","sub_path":"web/ics/dav2ics.py","file_name":"dav2ics.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28778454987","text":"from tkinter import BOTH, TOP, ttk, Tk, Text, Frame\nfrom turtle import width\n\nfrom core.stats.report import report\nfrom gui.plot_frame import plot_frame\nfrom gui.slider_frame import slider_frame\nfrom gui.block_frame import block_frame\n\nclass Gui(Tk):\n \n '''Builds tab group view'''\n\n def __init__(self, report : report):\n super(Gui, self).__init__()\n width = 960\n height = 576\n screen_width = self.winfo_screenwidth() # Width of the screen\n screen_height = self.winfo_screenheight() # Height of the screen\n # Calculate Starting X and Y coordinates for Window\n x = (screen_width/2) - (width/2)\n y = (screen_height/2) - (height/2)\n self.geometry('%dx%d+%d+%d' % (width, height, x, y))\n self.resizable(0,0)\n self.title(\"Blockchain Simulator ver. 1.0\")\n self.report = report\n self.build()\n self.mainloop()\n\n def build(self) ->None:\n self.tabControl = ttk.Notebook(self)\n self.info_tab()\n self.user_tab()\n self.block_tab()\n self.histo_tab()\n self.tabControl.pack(expand = 1, fill =\"both\")\n\n def info_tab(self) ->None:\n info = self.report.get_info()\n tab = Frame(self.tabControl)\n self.tabControl.add(tab, text ='General')\n t = Text(master = tab)\n t.insert(1.0, info)\n t.configure(state = \"disabled\")\n t.pack(fill=BOTH, side = TOP, expand=1)\n\n def user_tab(self) ->None:\n users = self.report.user_pool.users\n tab = slider_frame(self.tabControl, users)\n tab.build()\n self.tabControl.add(tab, text ='Users')\n\n def block_tab(self) ->None:\n blocks = self.report.stats_f.block_stats\n tab = block_frame(self.tabControl, blocks)\n tab.build(2)\n self.tabControl.add(tab, text ='Blocks')\n\n def histo_tab(self) ->None:\n stats = self.report.stats\n tab = plot_frame(self.tabControl, stats)\n self.tabControl.add(tab, text ='Stats')","repo_name":"Vito-Scaraggi/biometric_blockchain","sub_path":"simulator/gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27925165529","text":"import queue\n\n\ndef isValidPos(x, y):\n if x < 0 or y < 0 or x >= MAZE_SIZE or y >= MAZE_SIZE:\n return False\n else:\n return map[y][x] == '0' or map[y][x] == 'x'\n\n\ndef BFS():\n Q = queue.Queue(maxsize=0)\n Q.put((0, 1))\n print(\"BFS: \")\n\n while not Q.qsize() == 0:\n here = Q.get()\n print(here, end=\"->\")\n x, y = here\n if(map[y][x] == 'x'):\n return True\n else:\n map[y][x] = '.'\n if isValidPos(x, y - 1):\n Q.put((x, y - 1))\n if isValidPos(x, y + 1):\n Q.put((x, y + 1))\n if isValidPos(x - 1, y):\n Q.put((x - 1, y))\n if isValidPos(x + 1, y):\n Q.put((x + 1, y))\n return False\n\n\nmap = [['1', '1', '1', '1', '1', '1'],\n ['e', '0', '1', '0', '0', '1'],\n ['1', '0', '0', '0', '1', '1'],\n ['1', '0', '1', '0', '1', '1'],\n ['1', '0', '1', '0', '0', 'x'],\n ['1', '1', '1', '1', '1', '1']]\nMAZE_SIZE = 6\nresult = BFS()\nif result:\n print(' --> 미로탐색 성공')\nelse:\n print(' --> 미로탐색 실패')\n","repo_name":"bakkso/Data-Structure","sub_path":"Data_Structure/week6/P_5.1.py","file_name":"P_5.1.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"916529823","text":"import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom data_processor import *\nimport pandas as pd\nfrom datetime import date\n\n# want to load into our environment our api key\nsimbolico = os.environ.get('inegiKey')\n# purpose is to create current date and current year function\ntoday = date.today()\ncurrentDate = today.strftime('%m_%d_%y')\n# setting up params for our data_visuals\nsns.set(font_scale=0.5)\nsns.set_style('dark')\n\n# geo identifier\ngeos = {'0700':'nacional', # choose 0700 for mortality\n '07000001': 'not sure what this is'}\n\n# data bridges\nbridges = {'BIE':'Bank for Economic Information',\n 'BISE':'Bank of Indicators'} # choose BISE for mortality\n\n# mortality index topics\nmorts = {\n '1002000030':'General deaths',\n '1002000031':'General deaths, men',\n '1002000032':'General deaths, women',\n '1002000034':'Deaths of infants younger than one year',\n '6200002200':'Homicide death rate per 100K inhabitants',\n '6200240338':'Suicides registered',\n '6300000252':'Deaths by homicide, men',\n '6300000268':'Homicide death rate per 100K inhabitants, women',\n '6300000265':'Homicide death rate per 100K inhabitants, men',\n '6300000258':'Deaths by homicide, women'\n}\n\n# creating a list of keys\nmorts_keys = list(morts.keys())\ngeo_keys = list(geos.keys())\nbridge_keys = list(bridges.keys())\n\n# processing our data\ndf = process_data_by_series(keys=morts_keys, geo=geo_keys[0],\n bridge=bridge_keys[1])\n\n# cleaning our data\ndf = clean_data(df=df)\n\n# lets output our df as a csv file\ndf.to_csv('./data_files/mx_mortality_data.csv', index=False)\n\n# now let's visualize\nvis_data_lineplot(dict=morts, df=df)\n\n\n# now we will convert our figures into a pdf file\nfilename = './data_visuals/mexico_mortality_data_visuals_'\nsave_multi_image(filename + currentDate + '.pdf')","repo_name":"aangelsalazarr/INEGI-Data-Project","sub_path":"inegi_mortality.py","file_name":"inegi_mortality.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3370517357","text":"#In order to make this work, you need Windows Speech Recognition and dragonfly.\n#Dragonfly has a bunch of pre-requisites as well that you can find on their Github page. \n\nfrom dragonfly import Grammar, MappingRule, Text, Dictation\nimport pythoncom\nimport time\n\ntest_com = MappingRule(\\\nname=\"test\",\\\nmapping = {\"write \": Text(\"%(text)s\")},\\\nextras=[Dictation(\"text\"),],)\n\ngrammar = Grammar(\"test grammar\")\ngrammar.add_rule(test_com)\ngrammar.load()\n\n#Keeps the program running to execute the commands\nwhile True:\n pythoncom.PumpWaitingMessages()\n time.sleep(0.1)\n \n","repo_name":"aseastman/SpeechRecognition","sub_path":"SimpleMicCheck.py","file_name":"SimpleMicCheck.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36726259196","text":"import numpy as np\nfrom lipkin.model import LipkinModel\n\nclass HartreeFock(LipkinModel):\n\n name = 'Hartree-Fock'\n\n def __init__(self, epsilon, V, Omega):\n \n if Omega%2 == 1:\n raise ValueError('This HF implementation assumes N = Omega = even.')\n \n LipkinModel.__init__(self, epsilon, V, Omega, Omega)\n self.r_gs = (-1)**(0.5*self.Omega)\n self.err = 1E-8\n \n \n def solve_equations(self, num_iter=100, theta0=0.0, phi0=0.0):\n \n # set initial tau\n tau = np.array([theta0, phi0])\n \n # construct HF hamiltonian\n h = self.get_self_consistent_hamiltonian(tau)\n \n # construct kinetic energy\n T = np.zeros((2,2), dtype=np.complex128)\n T[0,0] = -0.5*self.epsilon*self.Omega\n T[1,1] = 0.5*self.epsilon*self.Omega\n \n # container for single particle potential\n Gamma = np.zeros((2,2), dtype=np.complex128)\n \n for i in range(num_iter):\n \n # solve eigenvalue problem\n eigvals, eigvecs = np.linalg.eig(h)\n \n # construct new density matrix\n rho = np.outer(eigvecs[:,0], np.conjugate(eigvecs[:,0]))\n \n # construct new potential\n Gamma[0,1] = -self.V*self.Omega*(self.Omega-1)*rho[1,0]\n Gamma[1,0] = -self.V*self.Omega*(self.Omega-1)*rho[0,1]\n \n # construct new hamiltonian\n h = T + Gamma\n \n # calculate energy\n E = 0.5*np.trace(np.dot(T+h, rho)).real\n \n return E\n \n \n def get_self_consistent_hamiltonian(self, tau):\n \n theta, phi = tau[0], tau[1]\n h = np.empty((2,2), dtype=np.complex128)\n \n h[0,0] = 1\n h[1,1] = -1\n h[0,1] = self.chi*np.sin(theta)*np.exp(1j*phi)\n h[1,0] = self.chi*np.sin(theta)*np.exp(-1j*phi)\n \n return -0.5*self.epsilon*self.Omega*h\n \n \n def minimize_energy(self, num_iter=10000):\n \n # pick small initial tau = (theta, phi)\n tau = np.random.normal(0.0, 0.1, 2)\n \n # initialize adam optimizer\n self.m = np.zeros(2)\n self.v = np.zeros(2)\n \n # start minimizing\n for self.t in range(1, num_iter+1):\n \n E = self.get_energy(tau)\n grad = self.get_gradient_energy(tau)\n tau = self.update_tau(tau, grad)\n \n return tau\n \n \n def minimize_signature_projected_energy(self, r, num_iter=10000):\n \n # pick small initial tau = (theta, phi)\n tau = np.random.normal(0.0, 0.1, 2)\n \n # initialize adam optimizer\n self.m = np.zeros(2)\n self.v = np.zeros(2)\n \n # start minimizing\n for self.t in range(1, num_iter+1):\n \n Er = self.get_signature_projected_energy(r, tau)\n grad = self.get_gradient_projected_energy(r, tau)\n tau = self.update_tau(tau, grad)\n\n return tau\n \n def get_energy(self, tau):\n \n theta, phi = tau[0], tau[1]\n E = np.cos(theta)+0.5*self.chi*(np.sin(theta)**2)*np.cos(2*phi);\n \n return -0.5*self.epsilon*self.Omega*E\n \n \n def get_gradient_energy(self, tau):\n \n theta, phi = tau[0], tau[1]\n factor = 0.5*self.epsilon*self.Omega*np.sin(theta)\n dE_dtheta = factor*(1-self.chi*np.cos(theta)*np.cos(2*phi))\n dE_dphi = factor*self.chi*np.sin(theta)*np.sin(2*phi)\n\n return np.array([dE_dtheta, dE_dphi])\n \n def get_weight(self, r, tau):\n \n theta = tau[0]\n a = 1.0+r*self.r_gs*(np.cos(theta))**(self.Omega-2)\n b = 1.0+r*self.r_gs*(np.cos(theta))**self.Omega\n \n if a < self.err and b < self.err:\n return float((self.Omega-2))/float(self.Omega)\n \n else:\n return (a+self.err)/(b+self.err)\n \n def get_gradient_weight(self, r, tau):\n \n theta = tau[0]\n a = 2*(1+r*self.r_gs*(np.cos(theta))**self.Omega)-self.Omega*(np.sin(theta))**2\n a *= r*self.r_gs*np.sin(theta)*(np.cos(theta))**(self.Omega-3)\n b = (1+r*self.r_gs*(np.cos(theta))**self.Omega)**2\n \n if a < self.err and b < self.err:\n return np.array([theta*float((self.Omega-2))/float(self.Omega), 0])\n \n return np.array([(a+self.err)/(b+self.err), 0])\n \n\n def get_signature_projected_energy(self, r, tau):\n\n return self.get_energy(tau)*self.get_weight(r, tau)\n \n \n def get_gradient_projected_energy(self, r, tau):\n \n E = self.get_energy(tau)\n W = self.get_weight(r, tau)\n gradE = self.get_gradient_energy(tau)\n gradW = self.get_gradient_weight(r, tau)\n \n return E*gradW + W*gradE\n \n\n def update_tau(self, tau, gradient, eta0=0.001, beta1=0.9, beta2=0.999, epsilon=1.0E-8):\n \n eta = eta0*np.sqrt(1.0-beta2**self.t)/(1.0-beta1**self.t)\n self.m = beta1*self.m+(1.0-beta1)*gradient;\n self.v = beta2*self.v+(1.0-beta2)*np.square(gradient);\n tau -= eta*np.divide(self.m, np.sqrt(self.v)+epsilon)\n self.t += 1\n \n return tau\n \n\n\n","repo_name":"kim-jane/NuclearManyBody","sub_path":"src/lipkin/hf.py","file_name":"hf.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33568460123","text":"\"\"\"\nRInChI Object Orientated Molecule Class Module\n----------------------------------------------\n\nThis module contains the Molecule class and associated functions\n\nModifications:\n\n - B. Hammond 2014\n\n - D. Hampshire 2017\n\n Significant restructuring of the class to gain more consistent and less verbose code.\n\"\"\"\n\nimport copy\nimport re\nfrom collections import Counter, deque\nfrom itertools import zip_longest\n\nfrom numpy.linalg import matrix_rank\n\nfrom .atom import Atom\n\n\nclass Molecule:\n \"\"\"\n A class containing a molecule as defined by an inchi. Contains functions for generating edge lists and node edge\n tables describing molecular graphs, and functions that use molecular graphs to calculate information about the\n molecules - ring sizes, atom hybridisation, contained functional groups etc.\n \"\"\"\n\n def __init__(self, inchi):\n self.inchi = inchi.rstrip()\n\n if \";\" in self.inchi or \"*\" in self.inchi:\n raise ValueError('Composite InChI detected - please use the Molecule.new() method')\n\n self.atoms = {} # dictionary of atom objects\n self.formula = None\n self.formula_dict = {}\n self.edge_list = None\n self.fingerprint = None\n self.is_simple = None\n self.conlayer = None\n self.atom_bonds_set = False\n\n # Ring counting\n self.rings = []\n self.ring_count = None\n self.ring_count_by_atoms = None\n self.ring_permutations = None\n self.has_searched_rings = False\n self.number_of_rings = None\n\n # Matching flag\n self.matched_in_layer = False\n\n # Perform initialisation\n self.init_level = None\n self.initialize()\n\n def __iter__(self):\n for atom in self.atoms.values():\n yield atom\n\n def __getitem__(self, key):\n return self.atoms[key]\n\n def __str__(self):\n return \"\".format(self.inchi)\n\n def __repr__(self):\n return \"\".format(self.inchi)\n\n def initialize(self):\n \"\"\"\n Initialises the molecule\n \"\"\"\n self.conlayer = self.inchi_to_layer(\"c\")\n\n if self.conlayer:\n self.number_of_rings = self.count_rings()['rings']\n else:\n self.number_of_rings = 0\n\n if not self.formula_dict:\n self.chemical_formula_to_dict()\n self.set_atoms()\n if self.conlayer:\n self.calculate_edges()\n self.set_atomic_hydrogen()\n\n @staticmethod\n def composite_to_simple(inchi):\n \"\"\"\n Splits an inchi with multiple disconnected components into a list of connected inchis\n\n # Modified 2017 D Hampshire to split formula of multiple identical components\n # cf. http://www.inchi-trust.org/technical-faq/#5.6\n\n Args:\n inchi: A inchi (usually composite\n\n Returns:\n A list of simple inchis within the composite inchi argument\n \"\"\"\n\n # Separate the input InChI into the header, formula, and other layers\n layers = inchi.split(\"/\")\n header = layers[0]\n formula = layers[1]\n remainder = layers[2:]\n\n def formula_multiples(formula):\n \"\"\"\n Generates the multiple formula strings if of the form 2C6H12\n \"\"\"\n formulas = formula.split(\".\")\n multiples = []\n for fm in formulas:\n assert isinstance(fm, str)\n if fm[0].isdigit():\n cnt, form = re.match('([0-9]+)(.*)', fm).groups()\n multiples.append([[form] * int(cnt), fm])\n for new_list, fm in multiples:\n insert_list(formulas, fm, new_list)\n return formulas\n\n def layer_splitter(comps, prefix):\n \"\"\"\n Generates multiple formula strings\n \"\"\"\n multiples = []\n for comp in comps:\n assert isinstance(comp, str)\n if '*' in comp:\n cnt, comp_new = comp.split('*')\n multiples.append([[comp_new] * int(cnt), comp])\n\n for new_list, fm in multiples:\n insert_list(comps, fm, new_list)\n\n comps = [(prefix + item) if (item != \"\") else \"\" for item in comps]\n return comps\n\n def insert_list(lst, original, new):\n \"\"\"\n Inserts list items at the position of a single item\n \"\"\"\n lstindex = lst.index(original)\n lst.remove(original)\n lst[lstindex:lstindex] = new\n return lst\n\n formula = formula_multiples(formula)\n individuals = [formula]\n\n # Formula is split on '.', other layers are split on ';'\n\n for l in remainder:\n prefix = l[0]\n ls = l[1:].split(\";\")\n ls = layer_splitter(ls, prefix)\n individuals.append(ls)\n\n # Transposes a list of split lists into a list of split inchis\n individuals = list(zip_longest(*individuals, fillvalue=\"\"))\n\n # Inchis are reassembled and returned\n def gen_lists(ind):\n for i in ind:\n yield [j for j in i if j]\n\n ret = (header + \"/\" + \"/\".join(x) for x in gen_lists(individuals) if x)\n return ret\n\n @staticmethod\n def new(inchi):\n \"\"\"\n Creates a list of new Molecule objects. Safer than Molecule() due to composite InChI implications.\n\n Args:\n inchi: An InChI string\n\n Returns:\n list of Molecule objects.\n\n \"\"\"\n if \";\" in inchi or \"*\" in inchi:\n return [Molecule(inch) for inch in Molecule.composite_to_simple(inchi)]\n else:\n return [Molecule(inchi)]\n\n #####################################################################\n # Generating molecular properties, ie. molecular graph, chemical formula\n #####################################################################\n\n def inchi_to_layer(self, l):\n \"\"\"\n Get a particular layer of the InChI\n\n Args:\n l: The layer of the InChI to retrieve\n\n Returns:\n The InChI layer desired\n \"\"\"\n layers = self.inchi.split(\"/\")\n for layer in layers:\n if layer.startswith(l):\n return layer[1:]\n else:\n return None\n\n def inchi_to_chemical_formula(self):\n \"\"\"\n Converts an Inchi to a Chemical formula\n\n Returns:\n The Chemical Formula of the Molecule as a string\n \"\"\"\n layers = self.inchi.split(\"/\")\n return layers[1]\n\n def chemical_formula_to_dict(self):\n \"\"\"\n Get the chemical formula as a dict\n\n Returns:\n A dict with elements as keys and number of atoms as value\n \"\"\"\n result = {}\n if not self.formula:\n self.formula = self.inchi_to_chemical_formula()\n\n if self.formula_dict:\n return\n\n # Find all elemental formulae followed by numbers and match the element to the count\n multi_elements = re.findall(r\"([A-Z][a-z]?\\d+)\", self.formula)\n for e in multi_elements:\n result[re.search(r\"([A-Z][a-z]?)\", e).group()] = int(re.search(r\"(\\d+)\", e).group())\n\n # Any elements with no following number are implicitly present only once\n single_elements = re.findall(r\"([A-Z][a-z]?)(?!\\d+)(?![a-z])\", self.formula)\n for e in single_elements:\n if e not in result.keys():\n result[e] = 1\n\n self.formula_dict = result\n\n def set_atoms(self):\n \"\"\"\n Sets the atoms objects with their appropriate indexes and elements for each of the instances of the the Atom\n class.\n \"\"\"\n fd = self.formula_dict.copy()\n fd.pop('H', None)\n elements = tuple(fd.keys())\n num_elements = sum(fd.values())\n\n if num_elements == 0:\n # Must be hydrogen only\n self.atoms = {1: Atom(1)}\n self.atoms[1].element = 'H'\n self.is_simple = True\n elif num_elements == 1:\n self.atoms = {1: Atom(1)}\n self.atoms[1].element = elements[0]\n self.is_simple = True\n elif num_elements >= 2:\n ordering = []\n ordered_atoms = []\n\n # In the canonical InChI labelling scheme, carbon is first, all other elements are arranged alphabetically,\n # excluding hydrogen\n if \"C\" in elements:\n ordering.append(\"C\")\n heteroatoms = sorted([a for a in elements if not (a == \"C\" or a == \"H\")])\n ordering += heteroatoms\n\n for e in ordering:\n ordered_atoms.extend([e] * fd[e])\n\n # Set index and elements\n for index in range(num_elements):\n self.atoms[index + 1] = Atom(index + 1)\n self.atoms[index + 1].element = ordered_atoms[index]\n self.is_simple = False\n\n if num_elements == 2 and not self.conlayer:\n # Solves problems when dealing with atoms which contain only two non hydrogen atoms and have no connectivity\n # layer, but their structure can be deduced.\n self.edge_list = [(1, 2)]\n self.conlayer = '1-2'\n self.inchi += '/c1-2'\n\n def set_atomic_hydrogen(self):\n \"\"\"\n Takes the molecular graph and the inchi, and sets the number of protons attached to each atom.\n\n Requires initialised atoms.\n \"\"\"\n h_layer = self.inchi_to_layer(\"h\")\n\n if not h_layer:\n return None\n\n # Currently ignoring mobile hydrogen - Eliminate mobile hydrogen, stored as bracketed sections of the string\n mobile_groups = re.findall(r\"\\(H(\\d?),([\\d,]+)\\)\", h_layer)\n mobile_protons = {}\n\n for num, indexes in mobile_groups:\n if not num:\n num = 1\n centres = indexes.split(\",\")\n for centre in centres:\n mobile_protons[int(centre)] = (int(num), len(centres))\n\n h_layer = re.sub(r\"\\([\\d\\-,]+\\)\", \"\", h_layer)\n\n # Split the proton layer by the number of protons being attached to each atom\n list_by_proton = re.findall(r\"(? 100:\n print(self.inchi)\n print(\"Error - TIMEOUT2\")\n return None\n # Add correct molecular edges for comma separated pairs of values MAY work for centres with a valence\n # greater than 4\n pairs = re.findall(r\"(?=\\b(\\d+)\\([\\d\\-!]+,(\\d+))\", conlayer_comma)\n\n for p in pairs:\n edges.append(tuple(sorted(map(int, p))))\n conlayer_comma = re.sub(r\"\\b(\\d+\\([\\d\\-!]+),\\d+\", r\"\\1\", conlayer_comma)\n conlayer_comma = re.sub(r\"\\([\\d\\-!]+\\)\", \"!\", conlayer_comma)\n\n # All pairs of numbers separated by - or ( are edges of the molecular graph\n pairs = re.findall(r\"(?=(\\b\\d+[\\-(]\\d+\\b))\", conlayer_mut)\n for p in pairs:\n edges.append(tuple(sorted(map(int, re.findall(r\"\\d+\", p)))))\n\n # While there is still a layer of parenthesis remaining, eliminate the lowest layer, and join together the\n # atoms on either side of the parenthesis group\n while \"(\" in conlayer_mut:\n timeout += 1\n if timeout > 100:\n print(\"Error - TIMEOUT3\")\n return None\n conlayer_mut = re.sub(r\"\\([\\d\\-!,]+\\)\", \"!\", conlayer_mut)\n pairs = re.findall(r\"(?=(\\b\\d+!\\d+\\b))\", conlayer_mut)\n for p in pairs:\n new_edge = tuple(sorted(map(int, re.findall(r\"\\d+\", p))))\n if new_edge not in edges:\n edges.append(new_edge)\n return edges\n\n def calculate_edges(self, edge_list=None):\n \"\"\"\n Sets the node-edge graph as a dict.\n\n Args:\n edge_list: A molecular graph as a list of edges. If no list is passed, the function sets the atoms for its\n own instance.\n \"\"\"\n if edge_list is None:\n if self.edge_list is None:\n self.edge_list = self.generate_edge_list()\n edge_list = self.edge_list\n\n # Add bonds to the atom objects\n for edge in edge_list:\n self.atoms[edge[0]].bonds.append(edge[1])\n self.atoms[edge[1]].bonds.append(edge[0])\n self.atom_bonds_set = True\n\n return edge_list\n\n @staticmethod\n def edges_to_atoms(ls):\n \"\"\"\n Sets the node-edge graph as a dict.\n\n Args:\n ls: A molecular graph as a list of edges. If no list is passed, the function sets the atoms for its\n own instance.\n \"\"\"\n\n llist = {atom: Atom(atom) for edge in ls for atom in edge}\n\n # Store the molecular graph in node-edge format\n for edge in ls:\n llist[edge[0]].bonds.append(edge[1])\n llist[edge[1]].bonds.append(edge[0])\n return llist\n\n #######################################################################\n # RING FINDING METHODS\n #######################################################################\n\n def depth_first_search(self, start=1):\n \"\"\"\n Performs a DFS over the molecular graph of a given Molecule object, returning a list of edges that form a\n spanning tree (tree edges), and a list of the edges that would cyclise this spanning tree (back edges)\n\n The number of back edges returned is equal to the number of rings that can be described in the molecule\n\n Args:\n start: Set which atom should be the starting node\n\n Returns:\n tree_edges: A list of tree edges.\n back_edges: A list of back edges. The list length is equal to the smallest number of cycles that can\n describe the cycle space of the molecular graph\n\n \"\"\"\n\n # Copy of the atom list that will be destroyed\n llist_mut = copy.deepcopy(self.atoms)\n\n # Initialise the starting node\n starting_node = start\n current_node = starting_node\n\n # List and a stack to store the nodes visited\n node_stack = [starting_node]\n nodes_visited = [starting_node]\n\n # Sorts the edges of the molecular graph into tree edges and back edges Back edges cyclise the molecular\n # graph, and so each back edge corresponds to a cycle\n tree_edges = []\n back_edges = []\n\n # Main Algorithm\n while node_stack:\n\n # If the current node has any untraversed edges\n if llist_mut[current_node].bonds:\n for child in llist_mut[current_node].bonds:\n current_edge = [current_node, child]\n\n # If the current node has a previously visited node as a child, this must be a back edge,\n # forming a cycle. Otherwise, this is a tree edge to an unexplored node, and the current node is\n # changed to this node.\n if child in nodes_visited:\n back_edges.append(current_edge)\n llist_mut[current_node].bonds.remove(child)\n llist_mut[child].bonds.remove(current_node)\n else:\n nodes_visited.append(child)\n tree_edges.append(current_edge)\n\n llist_mut[current_node].bonds.remove(child)\n llist_mut[child].bonds.remove(current_node)\n\n node_stack.append(child)\n current_node = child\n break\n\n # If the current node has no unvisited children, check the parent node.\n else:\n node_stack.pop()\n if node_stack:\n current_node = node_stack[-1]\n\n return tree_edges, back_edges\n\n @staticmethod\n def breadth_first_search(graph, start, finish):\n \"\"\"\n Get the shortest path between the start and finish nodes\n\n Adapted from http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/,\n accessed 06/11/2014\n\n Args:\n graph: an unweighted, undirected vertex-edge graph as a list\n start: the starting node\n finish: the finishing node as\n\n Returns:\n The shortest path as a list\n\n \"\"\"\n\n # Collections.deque is a doubly linked list - supports fast addition and removal to either end of the list\n queue = deque([(start, [start])])\n while queue:\n (vertex, path) = queue.popleft()\n for nxt in set(graph[vertex].bonds) - set(path):\n if nxt == finish:\n\n # Target node has been found, return the path\n return path + [nxt]\n else:\n queue.append((nxt, path + [nxt]))\n\n def find_shortest_path(self, graph, start, end, path=None):\n \"\"\"\n Recursively iterates over the entire molecular graph, yielding the shortest path between two points\n\n Adapted from https://www.python.org/doc/essays/graphs/, accessed 15/10/2014\n\n Args:\n graph: an unweighted, undirected vertex-edge graph as a list\n start: the starting node as a number\n end: the finishing node as a number\n path: latest iteration of the path\n\n Returns:\n The shortest path as a list of indices\n \"\"\"\n\n if path is None:\n path = []\n path = path + [start]\n\n # Once at the target node, go no further\n if start == end:\n return path\n\n shortest_path = []\n\n # Iterates recursively over all non-cyclic paths linking the target and final nodes If a path is smaller than\n # the previous smallest path, it replaces the smallest path.\n for child in graph[start].bonds:\n if child not in path:\n new = self.find_shortest_path(graph, child, end, path)\n if new:\n if not shortest_path or len(new) < len(shortest_path):\n shortest_path = new\n return shortest_path\n\n def find_rings_from_back_edges(self):\n \"\"\"\n Accepts output from the depth_first_search algorithm, returns a list of all rings within the molecule.\n\n Will NOT find a minimum cycle basis, but can be used to find an initial cycle set when performing the Horton\n Algorithm (see elsewhere)\n \"\"\"\n\n # Initialise list of all rings in the molecule\n rings_list = []\n tree_edges, back_edges = self.depth_first_search()\n\n for edge in back_edges:\n start, end = edge\n\n partial_edges = [e for e in (tree_edges + back_edges)]\n partial_edges.remove(edge)\n\n partial_graph = self.edges_to_atoms(partial_edges)\n\n path = self.find_shortest_path(partial_graph, start, end)\n\n rings_list.append(path)\n self.rings = rings_list\n\n def find_initial_ring_set(self):\n \"\"\"\n For every edge in the molecule, find the smallest ring is it a part of, add it to a list\n NEEDS REIMPLEMENTATION\n\n Returns:\n list of all minimal rings, sorted by the number of edges they contain\n \"\"\"\n # Ensure that the molecular graph was calculated\n cycles = []\n for edge in self.edge_list:\n remainder = [e for e in self.edge_list if not e == edge]\n if not remainder:\n break\n try:\n path = self.breadth_first_search(self.edges_to_atoms(remainder), edge[0], edge[1])\n if path:\n cycles.append(self.edge_list_to_vector(self.path_to_cycle_edge_list(path)))\n except KeyError:\n pass\n\n # Return all minimal rings, sorted by the number of edges they contain\n return sorted(cycles, key=sum)\n\n def find_initial_ring_set_trial(self):\n \"\"\"\n For every edge in the molecule, find the smallest ring is it a part of, add it to a list\n TRIAL REIMPLEMENTATION, NOT YET WORKING\n\n Returns:\n list of all minimal rings, sorted by the number of edges they contain\n \"\"\"\n cycles = []\n for edge in self.edge_list:\n remainder = [e for e in self.edge_list if not e == edge]\n if not remainder:\n break\n for node in self.atoms.keys():\n try:\n path_a = self.find_shortest_path(self.edges_to_atoms(remainder), edge[0], node)\n if path_a:\n path_b = self.find_shortest_path(self.edges_to_atoms(remainder), node, edge[1])\n if path_b and (len(set(path_a).intersection(path_b)) == 1):\n cycles.append(self.edge_list_to_vector(self.path_to_cycle_edge_list(path_a + path_b)))\n except KeyError:\n pass\n\n # Return all minimal rings, sorted by the number of edges they contain\n return sorted(cycles, key=sum)\n\n def find_linearly_independent(self, cycles):\n \"\"\"\n Given a list of candidate cycles, sorted by size, this function attempts to find the smallest,\n linearly independent basis of cycles that spans the entire cycle space of the molecular graph - the Minimum\n Cycle Basis.\n\n Args:\n cycles: list of candidate cycles sorted by size\n\n Returns:\n None\n \"\"\"\n\n # Calculates the minimal cycle basis for an inputted sorted cycle space\n minimum_cycle_basis = []\n for cycle in cycles:\n\n # If all the rings have been found, stop\n if len(minimum_cycle_basis) == self.number_of_rings:\n break\n\n # Try adding each cycle to the basis\n matrix = (minimum_cycle_basis + [cycle])\n\n # If the rank of the basis has not increased, the new cycle is linearly dependent on the\n # current basis, and so is not a member of the MCB\n if matrix_rank(matrix) == len(minimum_cycle_basis) + 1:\n minimum_cycle_basis.append(cycle)\n # elif cycle == [sum(i) % 2 for i in zip(*minimum_cycle_basis)]:\n # print(cycle, minimum_cycle_basis)\n\n s = [self.edge_list_to_atoms_spanned(self.vector_to_edge_list(x)) for x in minimum_cycle_basis]\n self.rings = s\n return s\n\n def edge_list_to_vector(self, subset):\n \"\"\"\n Converts an edge list to a vector in the (0, 1)^N vector space spanned by the edges of the molecule\n\n Args:\n subset: The vector subset to use\n\n Returns:\n The vector stored as a list.\n \"\"\"\n vector = []\n for edge in self.edge_list:\n if edge in subset:\n vector.append(1)\n elif [edge[1], edge[0]] in subset:\n vector.append(1)\n else:\n vector.append(0)\n return vector\n\n @staticmethod\n def path_to_cycle_edge_list(path):\n \"\"\"\n Converts a cycle described by an ordered list of nodes to an edge list\n\n Args:\n path: The path of the cycle stored as an ordered list\n\n Returns:\n The edge list\n \"\"\"\n edges = []\n for i in range(len(path)):\n try:\n edges.append(tuple(sorted((path[i], path[i + 1]))))\n except IndexError:\n edges.append(tuple(sorted((path[i], path[0]))))\n return edges\n\n def vector_to_edge_list(self, vector):\n \"\"\"\n Takes an edge vector and returns an edge list\n\n Args:\n vector: an edge vector stored in an iterable\n\n Returns:\n The edge list\n \"\"\"\n ls = []\n for i in range(len(vector)):\n if vector[i]:\n ls.append(self.edge_list[i])\n return ls\n\n @staticmethod\n def edge_list_to_atoms_spanned(edge_list):\n \"\"\"\n Takes an edge list and returns a list of atoms spanned\n\n Args:\n edge_list: An edge list\n\n Returns:\n A list of all the keys for the atoms which are spanned by the edge list.\n \"\"\"\n atoms = {}\n for edge in edge_list:\n atoms[edge[0]] = 1\n atoms[edge[1]] = 1\n return list(atoms.keys())\n\n def calculate_rings(self):\n \"\"\"\n Sets the ring count property which contains the ring sizes in the format { ring size : number of rings\n present, ...}\n \"\"\"\n c = self.inchi_to_layer(\"c\")\n if not c or \";\" in self.inchi_to_layer(\"c\"):\n self.ring_count = Counter()\n\n if not self.has_searched_rings:\n if self.number_of_rings:\n self.has_searched_rings = True\n self.find_linearly_independent(self.find_initial_ring_set())\n if self.rings:\n count = Counter([len(ring) for ring in self.rings])\n self.ring_count = count\n else:\n self.ring_count = Counter()\n\n @staticmethod\n def _generate_permutation_sets(ring):\n ring_perms = []\n ring_d = deque(ring)\n for __ in range(2):\n for _ in range(len(ring)):\n ring_perms.append(\"\".join(ring_d))\n ring_d.rotate()\n ring_d.reverse()\n return ring_perms[0], set(ring_perms)\n\n def calculate_rings_by_atoms(self):\n \"\"\"\n Count the rings by atom list eg. \"CCCCCN\" will return the number of pyridine fragments in the molecule.\n\n Returns:\n number of rings\n \"\"\"\n\n if not self.has_searched_rings:\n self.calculate_rings()\n\n count = Counter()\n\n all_perms_sets = {}\n # For each ring in the molecule\n\n rings = []\n for ring in self.rings:\n rings.append(\"\".join([self.atoms[a].element for a in ring]))\n\n for ring in rings:\n if all_perms_sets:\n for ring_set, values in all_perms_sets.items():\n if ring in values:\n count[ring_set] += 1\n break\n else:\n name, data = self._generate_permutation_sets(ring)\n count[name] += 1\n all_perms_sets.update({name: data})\n else:\n name, data = self._generate_permutation_sets(ring)\n count[name] += 1\n all_perms_sets.update({name: data})\n\n self.ring_permutations = all_perms_sets\n self.ring_count_by_atoms = count\n\n ################\n # Analysis\n ################\n\n def get_ring_count(self):\n \"\"\"\n Get the ring count\n\n Returns:\n a Counter object containing the number of rings of each size\n \"\"\"\n if self.atoms:\n self.calculate_rings()\n return self.ring_count\n else:\n return Counter()\n\n def has_isotopic_layer(self):\n \"\"\"\n Does the molecule inchi have an isotopic layer?\n\n Returns:\n A boolean value\n \"\"\"\n if self.inchi_to_layer(\"i\"):\n return True\n else:\n return False\n\n def get_hybrid_count(self):\n \"\"\"\n Calculate the hybridisation of each atom\n\n Returns:\n A Counter object containing the hybridisation of the atoms\n \"\"\"\n if self.atoms:\n self.set_atomic_hydrogen()\n return Counter([a.get_hybridisation() for a in self.atoms.values()])\n else:\n return Counter()\n\n def get_valence_count(self):\n \"\"\"\n Calculates the valences of each atom in the Molecule\n\n Returns:\n A Counter object containing the valences of the atoms\n \"\"\"\n if self.atoms:\n self.set_atomic_hydrogen()\n return Counter([a.get_valence() for a in self.atoms.values()])\n else:\n return Counter()\n\n def get_ring_count_inc_elements(self):\n \"\"\"\n Count the rings of a molecule. Result includes the elements of the ring.\n\n Returns:\n a Counter containing the number of rings of each size and the elements contained by a ring\n \"\"\"\n self.calculate_rings_by_atoms()\n return self.ring_count_by_atoms\n\n def get_formula(self):\n \"\"\"\n Get chemical empirical formula\n\n Returns:\n Chemical formula stored as a counter\n \"\"\"\n self.chemical_formula_to_dict()\n return Counter(self.formula_dict)\n\n def count_sp3(self, wd=False, enantio=False):\n \"\"\"\n Count the number of sp3 stereocentres in a molecule.\n\n Args:\n wd: Whether or not the stereocentre must be well-defined to be counted.\n enantio: Whether or not the structure must be enantiopure to be counted.\n\n Returns:\n The number of sp3 stereocentres in the structure.\n \"\"\"\n sp3_centre_count = 0\n\n # Split the inchi into layers\n inchi_layers = self.inchi.split('/')\n\n # Collate all the sp3 stereochemistry layers\n sp3_layers = []\n for index, layer in enumerate(inchi_layers):\n if layer.startswith('t'):\n stereolayer = [layer]\n try:\n if inchi_layers[index + 1].startswith('m'):\n stereolayer.append(inchi_layers[index + 1])\n try:\n if inchi_layers[index + 2].startswith('s'):\n stereolayer.append(inchi_layers[index + 2])\n except IndexError:\n pass\n except IndexError:\n pass\n try:\n if inchi_layers[index + 1].startswith('s'):\n stereolayer.append(inchi_layers[index + 1])\n except IndexError:\n pass\n sp3_layers.append(stereolayer)\n\n # If there are no sp3_layers at all, then there is no sp3 stereochemistry.\n if not sp3_layers:\n return sp3_centre_count\n\n # If enantio is specified, check for '/s2' (relative stereochemistry) or '/s3' (racemic) flags are present. If\n # either are, the sp3 stereochemistry is not enantiomeric.\n if enantio:\n for layer in sp3_layers:\n for sublayer in layer:\n if sublayer.startswith('s2') or sublayer.startswith('s3'):\n return sp3_centre_count\n\n # If enantio is not specified (or if enantio is specified and there is no \"/s2\" or \"/s3\" flag), count and return\n # the stereocentres.\n for sp3_layer in sp3_layers:\n\n # Consider only the \"/t\" layer, and discard the \"t\" flag.\n t_layer = sp3_layer[0][1:]\n\n # Consider multi-component salts.\n t_layer_by_components = t_layer.split(';')\n for component in t_layer_by_components:\n\n # Components without stereochemistry will have empty stereolayers.\n if component:\n sp3_centres = component.split(',')\n\n # Consider stoichiometry.\n multiplier = 1\n if component[1] == '*':\n multiplier = int(component[0])\n for sp3_centre in sp3_centres:\n\n # If wd is specified, only count those stereocentres which\n # aren't \"u\" (undefined) or \"?\" (omitted).\n if wd:\n if sp3_centre[-1] not in 'u?':\n sp3_centre_count += multiplier\n else:\n sp3_centre_count += multiplier\n return sp3_centre_count\n\n def count_sp2(self, wd=False):\n \"\"\"\n Count the number of sp2 stereocentres.\n\n Args:\n wd: Whether or not the stereocentre must be well-defined to be counted.\n\n Returns:\n sp2_centre_count: The number of sp2 stereocentres in the structure.\n \"\"\"\n sp2_centre_count = 0\n\n # Split the inchi into layers.\n\n inchi_layers = self.inchi.split('/')\n # Collate the sp2 layers.\n sp2_layers = []\n for layer in inchi_layers:\n if layer.startswith('b'):\n sp2_layers.append(layer[1:])\n\n # If there are no sp2 layers, the molecule has no sp2 stereochemistry.\n if not sp2_layers:\n return sp2_centre_count\n for sp2_layer in sp2_layers:\n\n # Discard the \"d\" flag.\n sp2_layer = sp2_layer[1:]\n\n # Consider multi-component salts.\n sp2_layer_by_components = sp2_layer.split(';')\n for component in sp2_layer_by_components:\n\n # Components without stereochemistry will have empty stereolayers\n if component:\n sp2_centres = component.split(',')\n\n # Consider stoichiometry.\n multiplier = 1\n if component[1] == '*':\n multiplier = int(component[0])\n for sp2_centre in sp2_centres:\n\n # If wd is specified, only count those stereocentres which\n # aren't \"u\" (undefined) or \"?\" (omitted).\n if wd:\n if sp2_centre[-1] not in 'u?':\n sp2_centre_count += multiplier\n else:\n sp2_centre_count += multiplier\n return sp2_centre_count\n\n def count_rings(self):\n \"\"\"\n Count the number of rings in an InChI.\n\n Returns:\n ring_count: The number of rings in the InChI.\n \"\"\"\n count = Counter()\n conlayer = self.inchi_to_layer('c')\n if conlayer is None:\n return count\n\n # For each component, count the number of rings and add it to the total.\n ring_count = 0\n # Simple species do not have a connectivity layer.\n if conlayer:\n\n # Consider stoichiometry\n multiplier = 1\n if conlayer[1] == '*':\n multiplier = int(conlayer[0])\n conlayer = conlayer[2:]\n atoms = (conlayer.replace('(', '-').replace(')', '-').replace(',', '-').split('-'))\n for index, atom in enumerate(atoms):\n if atom in atoms[:index]:\n ring_count += multiplier\n count['rings'] = ring_count\n if count['rings'] != 0:\n count['molecules'] = 1\n return count\n\n def count_centres(self, wd=False, sp2=True, sp3=True):\n \"\"\"\n Counts the centres contained within an inchi\n\n Args:\n wd: Whether or not the stereocentre must be well-defined to be counted.\n sp2: Count sp2 centres\n sp3: Count sp3 centres\n\n Returns:\n stereocentres: The number of stereocentres\n stereo_mols: The number of molecules with stereocentres\n\n \"\"\"\n count = Counter(molecules=0)\n if sp2:\n count['sp2'] = self.count_sp2(wd)\n if sp3:\n count['sp3'] = self.count_sp3(wd)\n count['stereocentres'] = count['sp2'] + count['sp3']\n if count['stereocentres'] != 0:\n count['molecules'] = 1\n return count\n","repo_name":"dfhampshire/RInChI","sub_path":"rinchi_tools/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":38259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8827618135","text":"\n#duty arc\nfrom csp_wizard.node import Node\nimport arc\ndef create_odd_node(tasks, position, pi):\n # Tạo node\n id = tasks[position].id\n current_task = tasks[position]\n node = Node(id = id*2-1,\n arcs =[arc.Arc(cost = -pi[position],\n travel_time = current_task.time_end -current_task.time_start,\n\n start_point = id*2-1,\n end_point = id*2)])\n return node\n#connecting arc\ndef create_even_node(tasks, position, pi, T_idle, T_drive):\n # Tạo node\n id = tasks[position].id\n current_task = tasks[position]\n #tạo arcs \n arcs = []\n for next_position in range(position+1,len(tasks)):\n next_id = tasks[next_position].id\n next_task = tasks[next_position]\n if (next_task.time_start >= current_task.time_end\n and next_task.time_start <= current_task.time_end + T_idle):\n arcs.append(arc.Arc(cost = -pi[next_position],\n travel_time = 0,\n start_point = id*2,\n end_point = next_id*2-1))\n arcs.append(arc.Arc(cost = 0,\n travel_time = 0,\n start_point = id*2,\n end_point = len(tasks)*2+1,\n ))\n node = Node(id = id*2,\n arcs = arcs)\n \n return node\ndef create_starting_node(tasks, pi, T_idle, T_drive):\n arcs = []\n for position in range(len(tasks)):\n next_task = tasks[position]\n arcs.append(arc.Arc(cost = -pi[position],\n travel_time = 0,\n start_point = 0,\n end_point = next_task.id*2-1))\n return Node(id = 0, arcs = arcs)\n return arcs\n \ndef create_graph_min_n_drivers(tasks, pi, T_idle, T_drive):\n nodes = []\n #tạo node đầu tiên\n nodes.append(create_starting_node(tasks, pi, T_idle, T_drive))\n #tạo các node lẻ\n for position in range(len(tasks)):\n nodes.append(create_odd_node(tasks, position, pi))\n nodes.append(create_even_node(tasks, position, pi, T_idle, T_drive))\n #tạo node cuối cùng\n nodes.append(Node(id = len(tasks)*2+1, arcs = []))\n return nodes\n\ndef create_idle_nodes_from_min_n_driver_nodes(nodes, tasks):\n for node in nodes:\n #phải là connect arc\n if node.id % 2 == 0 and node.id != 0 and node.id != len(tasks)*2+1:\n for arc in node.arcs:\n position_source = arc.start_point//2-1\n if(arc.end_point == len(tasks)*2+1):\n continue\n position_destination = arc.end_point//2\n arc.cost += (tasks[position_destination].time_start - \n tasks[position_source].time_end)\n\n\n\n\n\n\n\n","repo_name":"phanlehoang/column_generation","sub_path":"app/column_hobbit/create_graph_min_n_drivers.py","file_name":"create_graph_min_n_drivers.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20019997912","text":"from bs4 import BeautifulSoup as bs\nfrom typing import List\nimport scraper_scripts.utils as utils\nimport os\nfrom dotenv import load_dotenv\nimport re\n\n\ndef get_data(html_data: bytes) -> List[dict]:\n if not html_data:\n print(f\"Error while getting data - {FILE_NAME}\")\n raise Exception\n \n soup = bs(html_data, \"html.parser\")\n elements = soup.find_all(name='div', attrs='product-thumb')\n # phone number\n info_elements = soup.find_all(name='div', attrs='center-info-title')\n for element in info_elements:\n if element.a:\n phone_number_raw = element.a.text.strip()\n phone_number_clean = re.findall('3412', phone_number_raw)\n if phone_number_clean:\n phone_number = phone_number_raw\n\n shawarma_data = []\n\n for element in elements:\n data = {}\n try:\n\n # title\n title = element.find(name='a', attrs='product-title').text.strip()\n data.update({\"title\": title})\n \n # weight\n weight = element.find(name='div', attrs='food__feature').text.strip()\n data.update({\"weight\": weight[:-2]})\n \n # price\n price = element.find(name='div', attrs='food__price').text.strip()\n data.update({\"new_price\": price[:-2]})\n \n # img\n img = element.find(name='img').get('data-src')\n data.update({\"img\": img})\n \n # link\n data.update({\"link\": URL})\n \n # phone number\n data.update({\"phone_number\": phone_number})\n\n # website link\n data.update({\"website_link\": URL_CLEAN})\n\n # website title\n data.update({\"website_title\": \"Panda Grill\"})\n\n # ingredients\n ingredients = element.find('p', {\"itemprop\": \"description\"}).text.strip()\n data.update({\"ingredients\": ingredients})\n\n # category\n data.update({\"category\": \"shawarma\"})\n\n shawarma_data.append(data)\n\n except Exception as error:\n print(f\"Error in {FILE_NAME} - {error}\")\n\n return shawarma_data\n\n\ndef main():\n try:\n html_data = utils.get_html_page(URL)\n pandagrill_data = get_data(html_data)\n\n # utils.print_data(pandagrill_data)\n\n with open(\"./html/\" + FILE_NAME + \".html\", \"w\") as file:\n file.write(str(pandagrill_data))\n\n print(f\"[!!][{FILE_NAME}] was updated\\tlength - {len(pandagrill_data)}\")\n utils.save_json(pandagrill_data, FILE_NAME)\n print(f\"[{FILE_NAME}] json file created\")\n \n except Exception as error:\n print(f\"[!!!] An error occurred: {error}\")\n\n\nif __name__ == \"__main__\":\n load_dotenv()\n URL = os.getenv('URL_PANDAGRILL')\n URL_CLEAN = os.getenv('URL_PANDAGRILL_CLEAN')\n FILE_NAME = os.getenv('FILE_NAME_PANDAGRILL')\n main()\n\n\n","repo_name":"MikeBeloborodov/discount-scraper-app-scripts","sub_path":"scraper_scripts/python_scripts/pandagrill.py","file_name":"pandagrill.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16917112648","text":"from torch import nn\nfrom torch.nn import functional as F\n\nimport fd_shifts.models.networks as networks\n\n\nclass ConfidNetAndEncoder(networks.network.Network):\n def __init__(self, cf):\n super().__init__()\n\n network = networks.get_network(cf.model.network.backbone)(cf)\n self._encoder = network.encoder\n self._classifier = network.classifier\n self.confid_net = ConfidNet(cf)\n\n @property\n def encoder(self) -> networks.network.DropoutEnablerMixin:\n return self._encoder\n\n @property\n def classifier(self) -> nn.Module:\n return self._classifier\n\n def forward(self, x):\n x = self.encoder(x)\n pred_class = self.classifier(x)\n pred_confid = self.confid_net(x)\n\n return pred_class, pred_confid\n\n def forward_features(self, x):\n return self.encoder(x)\n\n\nclass ConfidNet(nn.Module):\n def __init__(self, cf):\n super().__init__()\n\n confid_net_fc_dim = cf.model.confidnet_fc_dim\n self.uncertainty1 = nn.Linear(cf.model.fc_dim, confid_net_fc_dim)\n self.uncertainty2 = nn.Linear(confid_net_fc_dim, confid_net_fc_dim)\n self.uncertainty3 = nn.Linear(confid_net_fc_dim, confid_net_fc_dim)\n self.uncertainty4 = nn.Linear(confid_net_fc_dim, confid_net_fc_dim)\n self.uncertainty5 = nn.Linear(confid_net_fc_dim, 1)\n\n def forward(self, x):\n x = F.relu(self.uncertainty1(x))\n x = F.relu(self.uncertainty2(x))\n x = F.relu(self.uncertainty3(x))\n x = F.relu(self.uncertainty4(x))\n confid = self.uncertainty5(x)\n\n return confid\n","repo_name":"IML-DKFZ/fd-shifts","sub_path":"fd_shifts/models/networks/confidnet.py","file_name":"confidnet.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"25404307287","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGiven an initial set of parameters, it will create a parameter file and a copy of it for records.\n\"\"\"\nimport argparse\nimport os\nimport sys\nimport shutil\nimport numpy as np\nfrom scipy.optimize import minimize, basinhopping\nfrom collections import OrderedDict\nfrom subprocess import check_output\nfrom md_utils.md_common import (InvalidDataError, GOOD_RET, INPUT_ERROR, warning, IO_ERROR,\n TemplateNotReadableError, MISSING_SEC_HEADER_ERR_MSG, create_out_fname, read_tpl,\n conv_num, write_csv, conv_raw_val, move_existing_file)\nfrom md_utils.fill_tpl import (OUT_DIR, MAIN_SEC, TPL_VALS_SEC, TPL_EQS_SEC,\n TPL_VALS, TPL_EQ_PARAMS, NEW_FNAME, fill_save_tpl)\n\ntry:\n # noinspection PyCompatibility\n from ConfigParser import ConfigParser, MissingSectionHeaderError\nexcept ImportError:\n # noinspection PyCompatibility\n from configparser import ConfigParser, MissingSectionHeaderError\n\n# Constants #\n# config keys #\nRIGHT_SIDE_PENALTY = 'right_side_penalty'\nLEFT_SIDE_POTENTIAL = 'left_side_penalty'\nBASIN_HOP_MIN_MAX = 'basin_hop_min_max'\nVALID_SEC_NAMES = [MAIN_SEC, TPL_VALS_SEC, TPL_EQS_SEC, RIGHT_SIDE_PENALTY, LEFT_SIDE_POTENTIAL, BASIN_HOP_MIN_MAX]\nTRIAL_NAME = 'trial_name'\nPAR_TPL = 'par_tpl'\nPAR_COPY_NAME = 'par_copy'\nPAR_FILE_NAME = 'par_name'\nRESULT_FILE = 'driver_output_file_name'\nRESULT_COPY = 'driver_output_copy_name'\nFITTING_SUM_FNAME = 'fitting_summary_file_name'\nBEST_PARAMS_FNAME = 'best_params_file_name'\nLOWEST_RESID = 'max_resid_to_save'\nCOPY_DIR = 'copy_dir'\nBASH_DRIVER = 'bash_driver'\nCONV_CUTOFF = 'converge_tolerance'\nMAX_ITER = 'max_iterations'\nNUM_PARAM_DECIMALS = 'num_decimals'\nPRINT_INFO = 'print_status'\nSCIPY_OPT_METHOD = 'scipy_opt_method'\nPRINT_CONV_ALL = 'print_conv_all'\nPOWELL = 'powell'\nNELDER_MEAD = 'nelder-mead'\nBASIN_HOP = 'basinhopping'\nTESTED_SCIPY_MIN = [NELDER_MEAD, POWELL]\nTEMP = 'basin_hop_temp'\nBASIN_NITER = 'basin_hop_niter'\nNITER_SUCCESS = 'niter_success'\nBASIN_DEF_STEP = 'basin_default_step'\nBASIN_SEED = 'basin_random_seed'\nMINI_CYCLES = 'mini_cycles'\nTRIANGLE_MINI = 'triangle_mini'\n\n# for storing config data\nOPT_PARAMS = 'opt_params'\nRESID = 'resid'\nINITIAL_DIR = 'initial_dir'\nBASIN_HOPS = 'basin_hops'\nBASIN_MAXS = 'basin_maxs'\nBASIN_MINS = 'basin_mins'\n\n# Defaults\nDEF_CFG_FILE = 'conv_evb_par.ini'\nDEF_TPL = 'evb_par.tpl'\nDEF_CONV_CUTOFF = 1.0\nDEF_MAX_ITER = None\nDEF_PARAM_DEC = 6\nDEF_OPT_METHOD = POWELL\n# for setting up the \"direc\" option with Powell, i.e. direc=([1,0,0],[0,0.1,0],[0,0,1])\nDEF_DIR = 1.0\nDEF_PENALTY = 1000000.0\nDEF_CFG_VALS = {TRIAL_NAME: None, PAR_TPL: DEF_TPL, OUT_DIR: None, PAR_FILE_NAME: None,\n PAR_COPY_NAME: None, COPY_DIR: None, CONV_CUTOFF: DEF_CONV_CUTOFF, MAX_ITER: DEF_MAX_ITER,\n PRINT_INFO: False, NUM_PARAM_DECIMALS: DEF_PARAM_DEC, RESULT_FILE: None,\n RESULT_COPY: None, OPT_PARAMS: [], SCIPY_OPT_METHOD: DEF_OPT_METHOD,\n FITTING_SUM_FNAME: None, PRINT_CONV_ALL: False, MINI_CYCLES: 1,\n BASIN_HOP: False, TEMP: None, NITER_SUCCESS: None, BASIN_NITER: 50,\n BASIN_DEF_STEP: 1.0, BASIN_SEED: None, BEST_PARAMS_FNAME: None, LOWEST_RESID: np.inf,\n TRIANGLE_MINI: False,\n }\nREQ_KEYS = {BASH_DRIVER: str}\n\n\n# Logic #\n\n# CLI Processing #\n\n\nclass RandomDisplacementBounds(object):\n \"\"\"random displacement with bounds\"\"\"\n def __init__(self, x_min, x_max, step_size, print_info):\n self.x_min = x_min\n self.x_max = x_max\n self.step_size = step_size\n self.print_info = print_info\n\n def __call__(self, x):\n \"\"\"take a random step but ensure the new position is within the bounds\"\"\"\n x_new = x + np.random.uniform(-self.step_size, self.step_size, np.shape(x))\n comp_max = x_new < self.x_max\n comp_min = x_new > self.x_min\n if not np.all(comp_max):\n for val_id, less_than_max in enumerate(comp_max):\n if not less_than_max:\n x_new[val_id] = self.x_max[val_id]\n if not np.all(comp_min):\n for val_id, more_than_min in enumerate(comp_min):\n if not more_than_min:\n x_new[val_id] = self.x_min[val_id]\n if self.print_info:\n print(\"Hopping to parameter values: {}\".format(\",\".join([\"{:11f}\".format(x) for x in x_new])))\n return x_new.tolist()\n\n\ndef process_conv_tpl_keys(raw_key_val_tuple_list):\n \"\"\"\n In case there are multiple (comma-separated) values, split on comma and strip. If possible, convert to int or float;\n otherwise. Return the tuple as a processed ordered dict\n\n @param raw_key_val_tuple_list: key-value dict read from configuration file;\n check for commas to indicate multiple parameters, and converted to int\n or floats if amenable\n @return val_dict: a dictionary of values\n @return dir_dict: a dictionary of initial directions for minimization\n \"\"\"\n val_dict = OrderedDict()\n dir_dict = {}\n for key, val in raw_key_val_tuple_list:\n val_list = [x.strip() for x in val.split(',')]\n val_num = len(val_list)\n if val_num == 1:\n # if it can be converted, do so; this helps with my printing formatting\n val_dict[key] = conv_num(val_list[0])\n dir_dict[key] = DEF_DIR\n elif val_num == 2:\n # if there are two values, assume that it is a float with the ability to be optimized\n try:\n val_dict[key] = float(val_list[0])\n dir_dict[key] = float(val_list[1])\n except ValueError:\n raise InvalidDataError(\"For key '{}', read '{}', which could not be converted to floats. When two \"\n \"values are provided, they are read as an initial float that may be optimized, \"\n \"and the initial search direction for optimization.\".format(key, val))\n else:\n raise InvalidDataError(\"For key '{}', {} values were found ({}). Each parameter should have either one or \"\n \"two specified values (x0, optionally followed by initial search direction, which \"\n \"defaults to {}.\".format(key, val_num, val, DEF_DIR))\n return val_dict, dir_dict\n\n\ndef process_bin_max_min_vals(raw_key_val_tuple_list):\n \"\"\"\n Convert tuple to a dictionary with float values\n @param raw_key_val_tuple_list: raw entries to be processed\n @return: dictionaries of keys and float values\n \"\"\"\n hop_dict = {}\n min_dict = {}\n max_dict = {}\n for key, val in raw_key_val_tuple_list:\n try:\n val_list = [float(x.strip()) for x in val.split(',')]\n if len(val_list) in [1, 3]:\n hop_dict[key] = val_list[0]\n if len(val_list) == 3:\n if val_list[1] < val_list[2]:\n min_dict[key] = val_list[1]\n max_dict[key] = val_list[2]\n else:\n raise InvalidDataError(\"Min value ({}) is not less than max value ({})\"\n \"\".format(round(val_list[1], 6), round(val_list[2], 6)))\n else:\n raise InvalidDataError(\"Unexpected number of values ({})\".format(len(val_list)))\n except (ValueError, InvalidDataError) as e:\n raise InvalidDataError(\"Encountered error '{}' For key '{}' in section {}, read: {}.\\n\"\n \"Expected 1 or 3 comma-separated floats for each variable (key): the max \"\n \"hop (step) size, \\noptionally followed by the min value, max value that \"\n \"should be obtained from hopping.\".format(e.args[0], key, BASIN_HOP_MIN_MAX, val))\n return hop_dict, min_dict, max_dict\n\n\ndef process_max_min_vals(raw_key_val_tuple_list, default_penalty):\n \"\"\"\n Convert tuple to a dictionary with float values\n @param raw_key_val_tuple_list:\n @param default_penalty: default penalty for the flat-bottomed potential\n @return: dictionary of keys and float values\n \"\"\"\n val_dict = {}\n for key, val in raw_key_val_tuple_list:\n try:\n val_list = [float(x.strip()) for x in val.split(',')]\n if len(val_list) == 2:\n val_dict[key] = val_list\n elif len(val_list) == 1:\n val_dict[key] = val_list + [default_penalty]\n else:\n raise InvalidDataError(\"For key '{}' in max or min section, read: {}. \\nExpected 1 or 2 values: \"\n \"either the edge of the potential and the penalty stiffness, or only the \"\n \"edge of the potential, which will be used with \"\n \"the default penalty for the flat-bottomed potential\"\n \"\".format(key, val))\n except ValueError as e:\n raise InvalidDataError(\"Error in reading max or min value provided for key '{}': {}\"\n \"\".format(key, e.args[0]))\n return val_dict\n\n\ndef process_cfg_conv(raw_cfg, def_cfg_vals=None, req_keys=None, int_list=True):\n \"\"\"\n Converts the given raw configuration, filling in defaults and converting the specified value (if any) to the\n default value's type.\n @param raw_cfg: The configuration map.\n @param def_cfg_vals: dictionary of default values\n @param req_keys: dictionary of required types\n @param int_list: flag to specify if lists should converted to a list of integers\n @return: The processed configuration.\n\n \"\"\"\n proc_cfg = {}\n for key in raw_cfg:\n if not (key in def_cfg_vals or key in req_keys):\n raise InvalidDataError(\"Unexpected key '{}' in configuration ('ini') file.\".format(key))\n key = None\n try:\n for key, def_val in def_cfg_vals.items():\n proc_cfg[key] = conv_raw_val(raw_cfg.get(key), def_val, int_list)\n for key, type_func in req_keys.items():\n proc_cfg[key] = type_func(raw_cfg[key])\n except KeyError as e:\n raise KeyError(\"Missing config val for key '{}'\".format(key, e))\n except Exception as e:\n raise InvalidDataError('Problem with config vals on key {}: {}'.format(key, e))\n if proc_cfg[SCIPY_OPT_METHOD] != DEF_OPT_METHOD:\n proc_cfg[SCIPY_OPT_METHOD] = proc_cfg[SCIPY_OPT_METHOD].lower()\n if proc_cfg[SCIPY_OPT_METHOD] not in TESTED_SCIPY_MIN:\n warning(\"Only the following optimization methods have been tested: scipy.optimize.minimize with {}.\"\n \"\".format(TESTED_SCIPY_MIN))\n for int_key in [TEMP, NITER_SUCCESS]:\n if proc_cfg[int_key] is not None:\n proc_cfg[int_key] = float(proc_cfg[int_key])\n\n # Remove any repeated parameters, or zero-character-length params (can happen if accidentally an additional comma)\n if len(proc_cfg[OPT_PARAMS]) > 0:\n filtered_opt_params = []\n for param in proc_cfg[OPT_PARAMS]:\n if len(param) > 0:\n if param in filtered_opt_params:\n warning(\"'{}' repeated in '{}'; skipping repeated entry\".format(param, OPT_PARAMS))\n else:\n filtered_opt_params.append(param)\n proc_cfg[OPT_PARAMS] = filtered_opt_params\n\n return proc_cfg\n\n\ndef read_cfg(f_loc, cfg_proc=process_cfg_conv):\n \"\"\"\n Reads the given configuration file, returning a dict with the converted values supplemented by default values.\n\n :param f_loc: The location of the file to read.\n :param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw\n value is missing.\n :return: A dict of the processed configuration file's data.\n \"\"\"\n config = ConfigParser()\n try:\n good_files = config.read(f_loc)\n except MissingSectionHeaderError:\n raise InvalidDataError(MISSING_SEC_HEADER_ERR_MSG.format(f_loc))\n if not good_files:\n raise IOError(\"Could not read file '{}'\".format(f_loc))\n\n # Start with empty data structures to be filled\n proc = {TPL_VALS: {}, TPL_EQ_PARAMS: [], RIGHT_SIDE_PENALTY: {}, LEFT_SIDE_POTENTIAL: {}, INITIAL_DIR: {}, }\n\n if MAIN_SEC in config.sections():\n try:\n proc.update(cfg_proc(dict(config.items(MAIN_SEC)), DEF_CFG_VALS, REQ_KEYS, int_list=False))\n if proc[MAX_ITER] is not None:\n proc[MAX_ITER] = int(proc[MAX_ITER])\n except InvalidDataError as e:\n if 'Unexpected key' in e.args[0]:\n raise InvalidDataError(e.args[0] + \" Does this belong \\nin a template value section such as '[{}]'?\"\n \"\".format(TPL_VALS_SEC))\n except ValueError as e:\n raise InvalidDataError(e)\n else:\n raise InvalidDataError(\"The configuration file is missing the required '{}' section\".format(MAIN_SEC))\n\n for section in config.sections():\n if section == MAIN_SEC:\n # this section already processed\n continue\n elif section in [TPL_VALS_SEC, TPL_EQS_SEC]:\n val_dict, dir_dict = process_conv_tpl_keys(config.items(section))\n if section == TPL_EQS_SEC:\n # just keep the names, so we know special processing is required\n proc[TPL_EQ_PARAMS] = val_dict.keys()\n proc[TPL_VALS].update(val_dict)\n proc[INITIAL_DIR].update(dir_dict)\n elif section in [RIGHT_SIDE_PENALTY, LEFT_SIDE_POTENTIAL]:\n val_dict = process_max_min_vals(config.items(section), DEF_PENALTY)\n proc[section].update(val_dict)\n elif section == BASIN_HOP_MIN_MAX:\n proc[BASIN_HOPS], proc[BASIN_MINS], proc[BASIN_MAXS] = process_bin_max_min_vals(config.items(section))\n if proc[BASIN_DEF_STEP] < 0.0:\n proc[BASIN_DEF_STEP] = abs(proc[BASIN_DEF_STEP])\n else:\n raise InvalidDataError(\"Section name '{}' in not one of the valid section names: {}\"\n \"\".format(section, VALID_SEC_NAMES))\n return proc\n\n\ndef parse_cmdline(argv=None):\n \"\"\"\n Returns the parsed argument list and return code.\n :param argv: A list of arguments, or `None` for ``sys.argv[1:]``.\n \"\"\"\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Creates evb parameter files to converge parameters.')\n parser.add_argument(\"-c\", \"--config\", help=\"The location of the configuration file in ini format. \"\n \"The default file name is {}, located in the \"\n \"base directory where the program as run.\".format(DEF_CFG_FILE),\n default=DEF_CFG_FILE, type=read_cfg)\n parser.add_argument(\"-f\", \"--par_name\", help=\"File name for the parameter file to be created by filling the \"\n \"evb parameter template file. It can also be specified in the \"\n \"configuration file. If specified in both places, the command line \"\n \"option will take precedence.\",\n default=None)\n\n args = None\n try:\n args = parser.parse_args(argv)\n if not os.path.isfile(args.config[PAR_TPL]):\n if args.config[PAR_TPL] == DEF_TPL:\n error_message = \"Check input for the configuration key '{}'; \" \\\n \"could not find the default template file: {}\"\n else:\n error_message = \"Could not find the template file specified with \" \\\n \"the configuration key '{}': {}\"\n raise IOError(error_message.format(PAR_TPL, args.config[PAR_TPL]))\n if args.par_name is not None:\n args.config[PAR_FILE_NAME] = args.par_name\n if args.config[PAR_FILE_NAME] is None:\n raise InvalidDataError(\"Missing required key '{}', which can be specified in the \"\n \"required either in the command line for configuration file.\"\n \"\".format(PAR_FILE_NAME))\n for config_param in [BASH_DRIVER]:\n if not os.path.isfile(args.config[config_param]):\n raise IOError(\"Missing file specified with key '{}': {}\"\n \"\".format(config_param, args.config[config_param]))\n if args.config[RESULT_COPY] is not None:\n if args.config[RESULT_FILE] is None:\n raise InvalidDataError(\"A bash driver output file name ('{}') is required when a name for a copy \"\n \"of this file is specified ('{}').\".format(RESULT_FILE, RESULT_COPY))\n except (KeyError, InvalidDataError, IOError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET\n\n\ndef copy_par_result_file(cfg, tpl_vals_dict, print_info=False):\n \"\"\"\n To keep a copy of a par file, make the new file name and copy the previously created par file\n @param cfg: configuration for run\n @param tpl_vals_dict: dictionary to fill strings\n @param print_info: boolean to determine if to print to standard out that a copy was made\n @return: KeyError if required variable is not defined\n \"\"\"\n if cfg[TRIAL_NAME] is not None:\n try:\n tpl_vals_dict[TRIAL_NAME] = cfg[TRIAL_NAME].format(**tpl_vals_dict)\n except KeyError as e:\n raise KeyError(\"Missing key name {} required for '{}': '{}'. Program will terminate.\"\n \"\".format(e, TRIAL_NAME, cfg[TRIAL_NAME]))\n\n for copy_name in [PAR_COPY_NAME, RESULT_COPY]:\n if cfg[copy_name] is not None:\n try:\n base_name = cfg[copy_name].format(**tpl_vals_dict)\n except KeyError as e:\n raise KeyError(\"Missing key name {} required for '{}': '{}'. File will not be copied.\"\n \"\".format(e, copy_name, cfg[copy_name]))\n new_fname = create_out_fname(base_name, base_dir=cfg[COPY_DIR])\n if copy_name == PAR_COPY_NAME:\n shutil.copyfile(tpl_vals_dict[NEW_FNAME], new_fname)\n else:\n # if os.path.isfile(tpl_vals_dict[RESULT_FILE]):\n shutil.copyfile(cfg[RESULT_FILE], new_fname)\n\n if print_info:\n print(\" Copied to: {}\".format(new_fname))\n\n\ndef eval_eqs(cfg, tpl_vals_dict):\n \"\"\"\n Evaluates equations based on\n @param cfg: configuration for the run\n @param tpl_vals_dict: dictionary of variable values to be used to evaluate equations and fill templates\n \"\"\"\n for eq_param in cfg[TPL_EQ_PARAMS]:\n try:\n string_to_eval = cfg[TPL_VALS][eq_param].format(**tpl_vals_dict)\n except KeyError as e:\n raise KeyError(\"Missing parameter value {} needed to evaluate '{}' for the parameter '{}'.\"\n \"\".format(e, tpl_vals_dict[eq_param], eq_param))\n try:\n tpl_vals_dict[eq_param] = eval(string_to_eval)\n except NameError:\n raise InvalidDataError(\"Could not evaluate the string '{}' specifying the value for the parameter \"\n \"'{}'. Check equation order, equations, and/or parameter values.\"\n \"\".format(string_to_eval, eq_param))\n\n\ndef obj_fun(x0_trial, cfg, tpl_dict, tpl_str, fitting_sum, result_dict, result_headers, x0_full=None):\n \"\"\"\n Objective function to be minimized. Also used to save trial input and output.\n @param x0_trial: initial parameter values to minimize\n @param x0_full: all parameter values to minimize (may be larger than x0 trail\n @param cfg: configuration for the run\n @param tpl_dict: dictionary of values for filling in template strings\n @param tpl_str: template string (read from file)\n @param fitting_sum: list of dicts for saving all trial values (to be appended, if needed)\n @param result_dict: a dictionary of results already found, to keep the program from unnecessarily running\n the expensive function when we already have solved for that parameter set\n @param result_headers: list of headers for printing results\n @return: the result for the set of values being tested, obtained from the bash script specified in cfg\n \"\"\"\n if x0_full is None:\n x0_full = x0_trial\n else:\n x0_full[:len(x0_trial)] = x0_trial\n\n resid_dict = {}\n penalty = 0\n for param_num, param_name in enumerate(cfg[OPT_PARAMS]):\n # Needed to add break for triangle/stepwise minimization\n if param_num >= len(x0_trial):\n break\n tpl_dict[param_name] = round(x0_trial[param_num], cfg[NUM_PARAM_DECIMALS])\n resid_dict[param_name] = tpl_dict[param_name]\n if param_name in cfg[LEFT_SIDE_POTENTIAL]:\n min_val = cfg[LEFT_SIDE_POTENTIAL][param_name][0]\n stiffness = cfg[LEFT_SIDE_POTENTIAL][param_name][1]\n if x0_trial[param_num] < min_val:\n penalty += stiffness * np.square(x0_trial[param_num] - min_val)\n if param_name in cfg[RIGHT_SIDE_PENALTY]:\n max_val = cfg[RIGHT_SIDE_PENALTY][param_name][0]\n stiffness = cfg[RIGHT_SIDE_PENALTY][param_name][1]\n if x0_trial[param_num] > max_val:\n penalty += stiffness * np.square(x0_trial[param_num] - max_val)\n\n eval_eqs(cfg, tpl_dict)\n fill_save_tpl(cfg, tpl_str, tpl_dict, cfg[PAR_TPL], cfg[PAR_FILE_NAME], print_info=cfg[PRINT_INFO])\n\n # Note: found that the minimizer calls the function with the same inputs multiple times!\n # only call this expensive function if we don't already have that answer, determined by checking for it in\n # the result dictionary\n # to make the input hashable for a dictionary\n x0_str = str(x0_full)\n if x0_str in result_dict:\n trial_result = result_dict[x0_str]\n else:\n trial_result = float(check_output([cfg[BASH_DRIVER], tpl_dict[NEW_FNAME]]).strip())\n trial_result += penalty\n result_dict[x0_str] = trial_result\n tpl_dict[RESID] = round(trial_result, cfg[NUM_PARAM_DECIMALS])\n if cfg[PAR_COPY_NAME] is not None or cfg[RESULT_COPY] is not None:\n copy_par_result_file(cfg, tpl_dict, print_info=cfg[PRINT_INFO])\n if cfg[FITTING_SUM_FNAME] is not None:\n write_csv(fitting_sum, cfg[FITTING_SUM_FNAME], result_headers, print_message=cfg[PRINT_INFO],\n round_digits=cfg[NUM_PARAM_DECIMALS])\n if cfg[BEST_PARAMS_FNAME] is not None:\n if trial_result < cfg[LOWEST_RESID]:\n cfg[LOWEST_RESID] = trial_result\n with open(cfg[BEST_PARAMS_FNAME], 'w') as w_file:\n for param_num, param_name in enumerate(cfg[OPT_PARAMS]):\n w_file.write(\"{:} = {:f},{:f}\\n\".format(param_name, x0_full[param_num],\n cfg[INITIAL_DIR][param_name]))\n if cfg[PRINT_INFO]:\n print(\"Resid: {:11f} for parameters: {}\".format(trial_result, \",\".join([\"{:11f}\".format(x) for x in x0_trial])))\n if cfg[FITTING_SUM_FNAME] is not None:\n resid_dict[RESID] = trial_result\n fitting_sum.append(resid_dict)\n return trial_result\n\n\ndef min_params(cfg, tpl_dict, tpl_str):\n num_opt_params = len(cfg[OPT_PARAMS])\n x0 = np.empty(num_opt_params)\n ini_direc = np.zeros((num_opt_params, num_opt_params))\n result_dict = {}\n fitting_sum = []\n result_sum_headers = [RESID]\n\n # setup minimization\n for param_num, param_name in enumerate(cfg[OPT_PARAMS]):\n x0[param_num] = cfg[TPL_VALS][param_name]\n ini_direc[param_num, param_num] = cfg[INITIAL_DIR][param_name]\n result_sum_headers.append(param_name)\n\n # arguments for objective function\n obj_fun_args = (cfg, tpl_dict, tpl_str, fitting_sum, result_dict, result_sum_headers)\n\n # options for minimizer\n opt_options = {'maxiter': cfg[MAX_ITER], 'disp': cfg[PRINT_INFO],\n 'return_all': cfg[PRINT_CONV_ALL],\n }\n if cfg[SCIPY_OPT_METHOD] == POWELL:\n opt_options['direc'] = ini_direc\n if cfg[SCIPY_OPT_METHOD] in [POWELL, NELDER_MEAD]:\n opt_options['xtol'] = cfg[CONV_CUTOFF]\n opt_options['ftol'] = cfg[CONV_CUTOFF]\n opt_options['maxfev'] = cfg[MAX_ITER]\n\n if cfg[BASIN_HOP]:\n # for tests\n if cfg[BASIN_SEED]:\n np.random.seed(1)\n\n step_spec = False\n x_min = np.empty(num_opt_params)\n x_max = np.empty(num_opt_params)\n step_size = np.empty(num_opt_params)\n\n if BASIN_HOPS in cfg:\n hop_dict = cfg[BASIN_HOPS]\n min_dict = cfg[BASIN_MINS]\n max_dict = cfg[BASIN_MAXS]\n if len(hop_dict) > 0:\n for param_num, param_name in enumerate(cfg[OPT_PARAMS]):\n if param_name in hop_dict:\n step_size[param_num] = hop_dict[param_name]\n step_spec = True\n else:\n step_size[param_num] = cfg[BASIN_DEF_STEP]\n if param_name in min_dict:\n x_min[param_num] = min_dict[param_name]\n x_max[param_num] = max_dict[param_name]\n else:\n x_min[param_num] = -np.inf\n x_max[param_num] = np.inf\n if step_spec:\n take_step = RandomDisplacementBounds(x_min, x_max, step_size, cfg[PRINT_INFO])\n else:\n take_step = None\n\n minimizer_kwargs = dict(method=POWELL, args=obj_fun_args, options=opt_options)\n\n ret = basinhopping(obj_fun, x0, minimizer_kwargs=minimizer_kwargs,\n disp=cfg[PRINT_INFO], niter=cfg[BASIN_NITER], niter_success=cfg[NITER_SUCCESS],\n take_step=take_step\n )\n return_message = ret.message[-1] + \".\"\n else:\n # Number of minimization cycles set by default or user input\n num_minis = 0\n return_message = \"No minimization cycles completed\"\n ret = None\n trial_param_num = len(x0)\n\n while num_minis < cfg[MINI_CYCLES]:\n # Set up \"triangle\" or step-wise minimization\n if trial_param_num < 3 or not cfg[TRIANGLE_MINI]:\n x0_trial = x0\n # needed for after the first round of minimization\n trial_param_num = len(x0)\n else:\n trial_param_num = 2\n x0_trial = x0[:trial_param_num]\n obj_fun_args = (cfg, tpl_dict, tpl_str, fitting_sum, result_dict, result_sum_headers, x0)\n if 'direc' in opt_options:\n opt_options['direc'] = ini_direc[:trial_param_num, :trial_param_num]\n\n while trial_param_num <= len(x0):\n ret = minimize(obj_fun, x0_trial, args=obj_fun_args,\n method=cfg[SCIPY_OPT_METHOD],\n options=opt_options)\n x0_trial = ret.x\n return_message = ret.message\n x0[:trial_param_num] = x0_trial\n trial_param_num += 1\n if trial_param_num <= len(x0):\n x0_trial = x0[:trial_param_num]\n if 'direc' in opt_options:\n opt_options['direc'] = ini_direc[:trial_param_num, :trial_param_num]\n num_minis += 1\n if cfg[MINI_CYCLES] - num_minis >= 0:\n print(return_message + \" Completed {} of {} minimization cycles\".format(num_minis, cfg[MINI_CYCLES]))\n\n if cfg[PRINT_CONV_ALL]:\n print(return_message + \" Number of function calls: {}\".format(ret.nfev))\n\n # Same final printing either way\n x_final = ret.x\n if x_final.size > 1:\n if cfg[FITTING_SUM_FNAME] is not None:\n write_csv(fitting_sum, cfg[FITTING_SUM_FNAME], result_sum_headers, print_message=cfg[PRINT_INFO],\n round_digits=cfg[NUM_PARAM_DECIMALS])\n print(\"Optimized parameters:\")\n for param_num, param_name in enumerate(cfg[OPT_PARAMS]):\n print(\"{:>11} = {:11f}\".format(param_name, x_final[param_num]))\n else:\n print(\"Optimized parameter:\\n\"\n \"{:>11}: {:11f}\".format(cfg[OPT_PARAMS][0], float(x_final)))\n\n\ndef main(argv=None):\n \"\"\"\n Runs the main program.\n\n :param argv: The command line arguments.\n :return: The return code for the program's termination.\n \"\"\"\n args, ret = parse_cmdline(argv)\n if ret != GOOD_RET or args is None:\n return ret\n\n cfg = args.config\n\n try:\n tpl_str = read_tpl(cfg[PAR_TPL])\n tpl_dict = dict(cfg[TPL_VALS])\n for f_name_key in [BEST_PARAMS_FNAME, FITTING_SUM_FNAME]:\n if cfg[f_name_key] is not None:\n move_existing_file(cfg[f_name_key])\n if len(cfg[OPT_PARAMS]) == 0:\n warning(\"No parameters will be optimized, as no parameters were listed for the keyword '{}' \"\n \"in the '{}' section of the configuration file.\".format(OPT_PARAMS, MAIN_SEC))\n eval_eqs(cfg, tpl_dict)\n fill_save_tpl(cfg, tpl_str, tpl_dict, cfg[PAR_TPL], cfg[PAR_FILE_NAME], print_info=cfg[PRINT_INFO])\n trial_result = float(check_output([cfg[BASH_DRIVER], tpl_dict[NEW_FNAME]]).strip())\n tpl_dict[RESID] = round(trial_result, cfg[NUM_PARAM_DECIMALS])\n if cfg[PAR_COPY_NAME] is not None or cfg[RESULT_COPY] is not None:\n copy_par_result_file(cfg, tpl_dict)\n print(\"Result without optimizing parameters: {}\".format(trial_result))\n else:\n min_params(cfg, tpl_dict, tpl_str)\n\n except (TemplateNotReadableError, IOError) as e:\n warning(\"Problems reading file: {}\".format(e))\n return IO_ERROR\n except (KeyError, InvalidDataError, ValueError) as e:\n warning(e)\n return IO_ERROR\n\n return GOOD_RET\n\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n","repo_name":"team-mayes/md_utils","sub_path":"md_utils/converge_evb_par.py","file_name":"converge_evb_par.py","file_ext":"py","file_size_in_byte":30563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"3931793927","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\n\ntableau = [1,2,5,4,3]\nlabel = [\"Adrien\", \"Thomas\", \"Sylvain\", \"Quentin\",\"Marie\"]\n\ndef barre(t:list):\n \"\"\"Params :\n -t : liste float\n \n Returns : \n None\n Affiche les données du tableau t sous frome d'une barre verticales\"\"\"\n plt.bar(range(len(t)), t, width = 0.5, color = 'red')\n plt.show()\n \n\ndef camembert(t:list,label:list)-> None:\n plt.figure(figsize = (5, 5))\n plt.pie(t, labels = label)\n plt.show()\n\ndef graphe(x:list, y:list):\n \"\"\"Params :\n - x : liste de float, les abscisses\n - y : liste de float, les ordonées\n \n Returns : None\n Affiche un graphe avec les points (xi,yi)\"\"\"\n\n plt.plot(x,y,marker=\"x\")\n plt.show()\n\ngraphe([1,2,4,5,6,7,9,10],[10,9,8,6,5,3,2,1])","repo_name":"TGV2107/Exercices","sub_path":"Modularité et gestion des bugs/Sylvain/NSI 66 22.py","file_name":"NSI 66 22.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"11976085677","text":"from ..data import PNGData\nfrom .base_visualizer import Visualizer as BaseVisualizer\n\n\nclass Visualizer(BaseVisualizer):\n \"\"\"\n Class for providing a plot of a PNG file using matplotlib.\n \"\"\"\n\n def __init__(self, run_directories=None, ax=None):\n \"\"\"\n Parameters\n ----------\n run_directories: list of tuples of length 2\n or single tuple of length 2.\n Each tuple is of the following form (sim_name, sim_path)\n and consists of strings.\n sim_name is a short string used e.g. in plot legends.\n sim_path leads to the run directory of PIConGPU\n (the path before ``simOutput/``).\n If None, the user is responsible for providing run_directories\n later on via set_run_directories() before calling visualize().\n ax: matplotlib.axes\n \"\"\"\n super().__init__(PNGData, run_directories, ax)\n\n def _check_and_fix_run_dirs(self, run_directories):\n \"\"\"\n Overridden from base class. Makes sure to only accept\n a single simulation's run_directory.\n \"\"\"\n base_checked = super()._check_and_fix_run_dirs(run_directories)\n\n # Fail if more than one run_directory since plotting\n # several PNGs at the same time does not make sense!\n if len(base_checked) > 1:\n raise ValueError(\"This visualizer only supports plotting a single\"\n \" simulation! Parameter 'run_directory' can\"\n \" contain only a single element!\")\n\n return base_checked\n\n def _create_plt_obj(self, idx):\n \"\"\"\n Implementation of base class function.\n Turns 'self.plt_obj' into a matplotlib.image.AxesImage object.\n \"\"\"\n self.plt_obj[idx] = self.ax.imshow(self.data[idx])\n\n def _update_plt_obj(self, idx):\n \"\"\"\n Implementation of base class function.\n \"\"\"\n self.plt_obj[idx].set_data(self.data[idx])\n\n def visualize(self, **kwargs):\n \"\"\"\n Creates a plot on the provided axes object for\n the PNG file of the given iteration using matpotlib.\n\n Parameters\n ----------\n kwargs: dict\n additional keyword args. Necessary are the following:\n species : string\n short name of the particle species, e.g. 'e' for electrons\n (defined in ``speciesDefinition.param``)\n species_filter: string\n name of the particle species filter, default is 'all'\n (defined in ``particleFilters.param``)\n axis: string\n the coordinate system axis labels (e.g. 'yx' or 'yz')\n slice_point: float\n relative offset in the third axis not given in the axis\\\n argument.\\\n Should be between 0 and 1\n iteration: int or list of ints\n The iteration at which to read the data.\n if set to 'None', then return images for all available\\\n iterations\n time: float\n simulation time.\n Only one of 'iteration' or 'time' should be passed!\n \"\"\"\n super().visualize(**kwargs)\n\n\nif __name__ == '__main__':\n\n def main():\n\n import sys\n import getopt\n import matplotlib.pyplot as plt\n\n def usage():\n print(\"usage:\")\n print(\n \"python\", sys.argv[0],\n \"-p -i \"\n \" -s -f \"\n \" -a -o \")\n\n path = None\n iteration = None\n species = None\n filtr = None\n axis = None\n slice_point = None\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hp:i:s:f:a:o:\", [\n \"help\", \"path\", \"iteration\", \"species\", \"filter\", \"axis\",\n \"offset\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in [\"-h\", \"--help\"]:\n usage()\n sys.exit()\n elif opt in [\"-p\", \"--path\"]:\n path = arg\n elif opt in [\"-i\", \"--iteration\"]:\n iteration = int(arg)\n elif opt in [\"-s\", \"--species\"]:\n species = arg\n elif opt in [\"-f\", \"--filter\"]:\n filtr = arg\n elif opt in [\"-a\", \"--axis\"]:\n axis = arg\n elif opt in [\"-o\", \"--offset\"]:\n slice_point = float(arg)\n\n # check that we got all args that we need\n if path is None or iteration is None:\n print(\"Path to 'run' directory and iteration have to be provided!\")\n usage()\n sys.exit(2)\n if species is None:\n species = 'e'\n print(\"Particle species was not given, will use\", species)\n if filtr is None:\n filtr = 'all'\n print(\"Species filter was not given, will use\", filtr)\n if axis is None:\n axis = \"yx\"\n print(\"Axis was not given, will use\", axis)\n if slice_point is None:\n print(\"Offset was not given, will determine from file\")\n\n _, ax = plt.subplots(1, 1)\n Visualizer(path, ax).visualize(iteration=iteration, species=species,\n species_filter=filtr, axis=axis,\n slice_point=slice_point)\n plt.show()\n\n main()\n","repo_name":"ComputationalRadiationPhysics/picongpu","sub_path":"lib/python/picongpu/extra/plugins/plot_mpl/png_visualizer.py","file_name":"png_visualizer.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"47"} +{"seq_id":"9365109907","text":"from c7n.manager import resources\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.query import QueryResourceManager, TypeInfo, DescribeSource, ConfigSource\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session\n\n\nclass DescribeBackup(DescribeSource):\n\n def augment(self, resources):\n resources = super(DescribeBackup, self).augment(resources)\n client = local_session(self.manager.session_factory).client('backup')\n results = []\n for r in resources:\n plan = r.pop('BackupPlan', {})\n r.update(plan)\n try:\n tags = client.list_tags(ResourceArn=r['BackupPlanArn']).get('Tags', {})\n except client.exceptions.ResourceNotFoundException:\n continue\n r['Tags'] = [{'Key': k, 'Value': v} for k, v in tags.items()]\n results.append(r)\n return results\n\n def get_resources(self, resource_ids, cache=True):\n client = local_session(self.manager.session_factory).client('backup')\n resources = []\n\n for rid in resource_ids:\n try:\n r = client.get_backup_plan(BackupPlanId=rid)\n plan = r.pop('BackupPlan', {})\n r.update(plan)\n resources.append(r)\n except client.exceptions.ResourceNotFoundException:\n continue\n return resources\n\n\n@resources.register('backup-plan')\nclass BackupPlan(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'backup'\n enum_spec = ('list_backup_plans', 'BackupPlansList', None)\n detail_spec = ('get_backup_plan', 'BackupPlanId', 'BackupPlanId', None)\n id = 'BackupPlanName'\n name = 'BackupPlanId'\n arn = 'BackupPlanArn'\n config_type = cfn_type = 'AWS::Backup::BackupPlan'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeBackup,\n 'config': ConfigSource\n }\n\n\nclass DescribeVault(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(self.manager, super(DescribeVault, self).augment(resources))\n\n def get_resources(self, resource_ids, cache=True):\n client = local_session(self.manager.session_factory).client('backup')\n resources = []\n for rid in resource_ids:\n try:\n resources.append(\n client.describe_backup_vault(BackupVaultName=rid))\n except client.exceptions.ResourceNotFoundException:\n continue\n return resources\n\n\n@resources.register('backup-vault')\nclass BackupVault(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'backup'\n enum_spec = ('list_backup_vaults', 'BackupVaultList', None)\n name = id = 'BackupVaultName'\n arn = 'BackupVaultArn'\n arn_type = 'backup-vault'\n universal_taggable = object()\n config_type = cfn_type = 'AWS::Backup::BackupVault'\n\n source_mapping = {\n 'describe': DescribeVault,\n 'config': ConfigSource\n }\n\n\n@BackupVault.filter_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n\n RelatedIdsExpression = 'EncryptionKeyArn'\n","repo_name":"cloud-custodian/cloud-custodian","sub_path":"c7n/resources/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":5022,"dataset":"github-code","pt":"55"} +{"seq_id":"18613477988","text":"from datetime import datetime\nfrom schemas import ClientCreateInput\nfrom sqlalchemy.ext.asyncio.session import async_session\nfrom database.models import Bikes, Clients\nfrom database.__init__ import async_session\nfrom sqlalchemy.future import select\n\n\nasync def create_client(client: ClientCreateInput):\n async with async_session() as session:\n new_client = Clients(\n name=client.name,\n email=client.email,\n password=client.password,\n phone=client.phone,\n document=client.document,\n address=client.address,\n city=client.city,\n state=client.state,\n zip_code=client.zip_code,\n birthday=client.birthday,\n created_at=datetime.now(),\n updated_at=datetime.now()\n )\n session.add(new_client)\n await session.commit()\n await session.refresh(new_client)\n return new_client\n\n\nasync def get_client_by_email(email: str):\n async with async_session() as session:\n query = await session.execute(select(Clients).where(Clients.email == email))\n return query.scalars().first()\n\n\nasync def get_client_by_id(id: int):\n async with async_session() as session:\n query = await session.execute(select(Clients).where(Clients.id == id))\n return query.scalars().first()\n\n\nasync def create_bike(bike, client_id: int):\n async with async_session() as session:\n new_bike = Bikes(\n brand=bike['brand'],\n model=bike['model'],\n price=float(bike['price']),\n year=int(bike['year']),\n color=bike['color'],\n serial_number=bike['serial_number'],\n created_at=datetime.now(),\n updated_at=datetime.now(),\n client_id=client_id\n )\n session.add(new_bike)\n await session.commit()\n await session.refresh(new_bike)\n return new_bike\n\n\nasync def get_all_bikes(client_id: int):\n async with async_session() as session:\n query = await session.execute(select(Bikes).where(Bikes.client_id == client_id))\n return query.scalars().all()\n\n\nasync def get_all_clients():\n async with async_session() as session:\n query = await session.execute(select(Clients))\n return query.scalars().all()\n","repo_name":"lucas-pmelo/api-porto","sub_path":"database/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"35892055454","text":"\"\"\"\n\nCourse FM1220 Automatic Control at USN\n\nCompulsory assignment 1:\nProgramming a simulator of an heated tank in Python\n\nSolution to Problem 2:\n\"Programming and simulation: Program a simulator of the\ntank heater in Python. The simulator must be implemented \nwith \"native\" code in a For loop based on the Euler \nForward discretization of the model (a built-in simulation \nfunction of Python should not be used). You can set the \ntime-step to 1 s. The following variables should be plotted:\nT, Tin, Tenv, and P with temperatures in one subplot,\nand P in another subplot.\nRun a simulation with P = P0 as calculated in Problem 1.\nIs the simulated static T the same as specified in\nProblem 1?\n\nFinn Aakre Haugen\nfinn.haugen@usn.no\n\n2021 09 01\n\n\"\"\"\n\n#%% Imports:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#%% Model params:\n\nc = 4200 # [J/(kg*K)]\nrho = 1000 # [kg/m3]\nV = 0.2 # [m3]\nU = 1000 # [W/K]\nF = 0.25e-3 # [m3/s]\nT_in = 20 # [deg C]\nT_env = 20 # [deg C]\n\nT_min = 0\nT_max = 100\n\n#%% Calculation of power giving specified static temp: \n\nT_static = 25 # [deg C] Static temp\n\n# From model after t' is set to zero (static value): \nP0 = - (c*rho*F*(T_in-T_static) + U*(T_env-T_static)) # [W]\n\n#%% Sim time settings:\n\ndt = 1 # [s]\nt_start = 0 # [s]\nt_stop = 6000 # [s]\nN_sim = int((t_stop - t_start)/dt) + 1\n\n#%% Preallocation of arrays for storing:\n\nt_array = np.zeros(N_sim)\nT_array = np.zeros(N_sim)\nT_in_array = np.zeros(N_sim)\nT_env_array = np.zeros(N_sim)\nP_array = np.zeros(N_sim)\n\n#%% Sim loop:\n\nT_k = T_init = 20 # [deg C] Initial temp\n\nfor k in range(0, N_sim):\n\n t_k = k*dt\n \n P_k = P0\n T_in_k = T_in\n T_env_k = T_env\n \n dT_dt_k = ((1/(c*rho*V))\n *(P_k\n + (c*rho*F)*(T_in-T_k) \n + U*(T_env-T_k)))\n T_kp1 = T_k + dt*dT_dt_k\n T_kp1 = np.clip(T_kp1, T_min, T_max)\n \n t_array[k] = t_k\n T_array[k] = T_k\n T_in_array[k] = T_in_k\n T_env_array[k] = T_env_k\n P_array[k] = P_k\n\n \n # Time index shift:\n T_k = T_kp1\n\n# %% Plotting:\n\nplt.close('all')\nplt.figure(1)\n\nplt.subplot(2, 1, 1)\nplt.plot(t_array, T_array, 'r', label='T')\nplt.plot(t_array, T_in_array, 'b', label='T_in')\nplt.plot(t_array, T_env_array, 'g', label='T_env')\nplt.legend()\nplt.grid()\nplt.xlabel('t [s]')\nplt.ylabel('[deg C]')\n\nplt.subplot(2, 1, 2)\nplt.plot(t_array, P_array, 'm', label='P')\nplt.legend()\nplt.grid()\nplt.xlabel('t [s]')\nplt.ylabel('[W]')\n\n# plt.savefig('plot_sim_heated_water_tank.pdf')\nplt.show()\n\n\n# %% Comments on the results:\n\nprint('Results and comments:')\nprint('Static value of P [W] = P0:', P0)\nprint('Static value of T [deg C]:', f'{T_array[-1]:.2f}')\nprint('Static value of simulated T is as specified.')\n\n","repo_name":"larsRikard/Automatic-control","sub_path":"Assignment 1/Documentation/Solution/solution_task_2_assignment_1_fm1220_2021.py","file_name":"solution_task_2_assignment_1_fm1220_2021.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"14951933253","text":"import unittest\nimport re\nfrom .testutils import system\n\n\nclass TestBase(unittest.TestCase):\n\n def test_selinux(self):\n \"Tests the SELinux\"\n out, err, eid = system('sudo getenforce')\n out = out.strip()\n out = out.decode('utf-8')\n self.assertEqual(out, 'Enforcing')\n\n def test_logging(self):\n \"Tests journald logging\"\n out, err, eid = system('sudo journalctl -a --no-pager -r --since=$(date +%Y-%m-%d) -n1')\n out = out.decode('utf-8')\n self.assertGreater(len(out.split()), 3, \"journalctl output is missing.\")\n\n def test_services(self):\n \"No service should fail in the startup.\"\n out, err, eid = system('systemctl --all --failed')\n out = out.decode('utf-8')\n self.assertIn('0 loaded units listed', out)\n\n\nclass TestDocker(unittest.TestCase):\n\n def test_docker_enabled(self):\n out, err, eid = system('sudo systemctl is-enabled docker')\n out = out.strip()\n out = out.decode('utf-8')\n self.assertEqual('enabled', out)\n\n def test_docker_running(self):\n out, err, eid = system('sudo systemctl is-active docker')\n out = out.strip()\n out = out.decode('utf-8')\n self.assertEqual('active', out)\n\n def test_docker_pull(self):\n out, err, eid = system('docker pull alpine')\n self.assertEqual(0, eid)\n\n def test_docker_run(self):\n out, err, eid = system('docker run --rm docker.io/alpine ls')\n self.assertEqual(0, eid)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"praveenkumar/tunirtests","sub_path":"dockertests.py","file_name":"dockertests.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"70448420973","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n '''\n If there is no cycle in the list, the fast pointer will eventually reach the end and we can return false in this case.\n slow and fast pointers\n '''\n #if there are no cycle, the 2 pointers will never meet!\n if (head == None) or (head.next==None):\n return False\n #while loop keep exploring the next item\n slow = head\n fast = head.next\n while(slow!=fast):\n if (fast==None) or (fast.next==None):\n return False\n slow = slow.next\n fast = fast.next.next\n \n return True","repo_name":"hwang018/Leetcode","sub_path":"141. Linked List Cycle/.ipynb_checkpoints/solution-checkpoint.py","file_name":"solution-checkpoint.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"16660341487","text":"import random\nfrom datetime import timedelta\nfrom functools32 import lru_cache\nfrom proteus import Model\n\ndef random_datetime(start, end):\n \"\"\"\n This function will return a random datetime between two datetime\n objects.\n \"\"\"\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n random_second = random.randrange(int_delta)\n return start + timedelta(seconds=random_second)\n\n@lru_cache()\ndef module_installed(module):\n Module = Model.get('ir.module')\n return bool(Module.find([\n ('name', '=', module),\n ('state', '=', 'installed'),\n ]))\n\n@lru_cache()\ndef get_account_payable():\n Payable = Model.get('account.account')\n return Payable.find([('kind', '=', 'payable')])\n\n@lru_cache()\ndef get_account_receivable():\n Receivable = Model.get('account.account')\n return Receivable.find([('kind', '=', 'receivable')])\n\n@lru_cache()\ndef get_payment_terms():\n Term = Model.get('account.invoice.payment_term')\n return Term.find([])\n\n@lru_cache()\ndef get_payment_types(kind):\n Type = Model.get('account.payment.type')\n return Type.find([('kind', '=', kind)])\n\n@lru_cache()\ndef get_languages():\n Lang = Model.get('ir.lang')\n return Lang.find([\n ('code', 'in', ['ca_ES', 'es_ES', 'en_US']),\n ])\n\n@lru_cache()\ndef get_price_lists():\n PriceList = Model.get('product.price_list')\n return PriceList.find([])\n\n@lru_cache()\ndef get_banks():\n if not module_installed('bank'):\n return\n Bank = Model.get('bank')\n return Bank.find([])\n\n@lru_cache()\ndef get_company():\n Company = Model.get('company.company')\n companies = Company.find([])\n if companies:\n return companies[0]\n\n@lru_cache()\ndef get_model_id(module, fs_id):\n ModelData = Model.get('ir.model.data')\n data, = ModelData.find([\n ('module', '=', module),\n ('fs_id', '=', fs_id),\n ])\n Class = Model.get(data.model)\n return data.model, data.db_id\n\ndef get_object(module, fs_id):\n model, id = get_model_id(module, fs_id)\n Class = Model.get(model)\n return Class(id)\n","repo_name":"NaN-tic/trytontasks-gal","sub_path":"trytontasks_gal/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"7582409247","text":"# Escreva um programa que leia dois números. Imprima o resultado da multiplicação do primeiro pelo\n# segundo. Utilize apenas os operadores de soma e subtração para calcular o resultado. Lembre-se de que\n# podemos entender a multiplicação de dois números como a soma sucessivas de um deles. Assim, 4 x 5 = 5\n# + 5 + 5 + 5 = 4 + 4 + 4 + 4 + 4.\n\nnum1 = int(input('Digite o primeiro número: '))\nnum2 = int(input('Digite o segundo número: '))\nnegativo = False\nif num2 < 0:\n negativo = True\n num2 = -num2\nres = 0\nfor i in range(num2):\n res += num1 if not negativo else -num1\nprint(f\"{num1} x {num2} = {res}\")","repo_name":"gapigo/Unip-Notebook","sub_path":"5th Semester/Programming Paradigms/NP2 Exercises - Part 2/ex08.py","file_name":"ex08.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"3852076085","text":"from optimus.infer import is_list_of_tuples\n\nimport pandas as pd\n\n\nclass Create:\n def __init__(self, creator):\n self.creator = creator\n\n def data_frame(self, cols=None, rows=None, pdf=None, n_partitions=1, *args, **kwargs):\n \"\"\"\n Helper to create dataframe:\n :param cols: List of Tuple with name, data type and a flag to accept null\n :param rows: List of Tuples with the same number and types that cols\n :param pdf: a pandas dataframe\n :param n_partitions:\n :return: Dataframe\n \"\"\"\n creator = self.creator\n if pdf is None:\n\n # Process the rows\n if not is_list_of_tuples(rows):\n rows = [(i,) for i in rows]\n\n if is_list_of_tuples(cols):\n _columns = [c[0] for c in cols]\n _dtypes = [c[1] for c in cols]\n else:\n _columns = cols\n\n # Process the columns\n if creator.__name__ == \"dask_cudf\":\n import cudf\n pdf = cudf.DataFrame(columns=_columns, data=rows)\n else:\n pdf = pd.DataFrame(columns=_columns, data=rows)\n\n for col, dtype in zip(_columns, _dtypes):\n pdf[col].astype(dtype)\n\n creator = self.creator\n if creator.__name__ == \"pandas\" or creator.__name__ == \"cudf\":\n df = creator.DataFrame(pdf, *args, **kwargs)\n elif creator.__name__ == \"dask.dataframe\":\n df = self.creator.from_pandas(pdf, npartitions=n_partitions, *args, **kwargs)\n elif creator.__name__ == \"dask_cudf\":\n df = self.creator.from_cudf(pdf, npartitions=n_partitions, *args, **kwargs)\n\n df = df.meta.columns(df.cols.names())\n return df\n\n df = data_frame\n","repo_name":"gcastellan0s/Optimus","sub_path":"optimus/engines/base/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"55"} +{"seq_id":"17947345090","text":"import streamlit as st\nimport folium\nfrom folium.plugins import MousePosition\nfrom streamlit_folium import folium_static\n\nnypd_sectors = 'assets/geojson/NYPD_Sectors.geojson'\nnypd_psa = 'assets/geojson/NYCHA_PSA.geojson'\n\nm = folium.Map(\n location=[40.730610, -73.935242],\n tiles='OpenStreetMap',\n zoom_start=10\n)\n\nformatter = \"function(num) {return L.Util.formatNum(num, 3) + ' º ';};\"\n\nMousePosition(\n position=\"topright\",\n separator=\" | \",\n empty_string=\"NaN\",\n lng_first=True,\n num_digits=20,\n prefix=\"Coordinates:\",\n lat_formatter=formatter,\n lng_formatter=formatter,\n).add_to(m)\n\nfolium.GeoJson(nypd_psa, name='NYPCHA PSA', \n tooltip=folium.GeoJsonTooltip(['address', 'borough', 'psa', 'zipcode'],\n aliases=['Address', 'Borough', 'PSA', 'Zipcode'])).add_to(m)\n\nfolium.GeoJson(nypd_sectors, name='NYPD Sectors', \n popup=folium.GeoJsonPopup(['sct_text', 'patrol_bor', 'sq_mile_new'],\n aliases=['Sector', 'Patrol Borough', 'Surface in Square Miles'])).add_to(m)\n\nfolium.LayerControl().add_to(m)\n\ntitle = 'Crime Prediction in New York City'\n\nst.set_page_config(page_title=title)\nst.title(title)\n\ndate = st.date_input('Select a date:')\nsex = st.selectbox('Select your sex:', ['Male', 'Female'])\nage = st.select_slider('Selct your age:', options=range(13, 80), value=24)\n\nst.text('Select your accident position:')\nmap = folium_static(m)\n\nif st.button('Predict Accident'):\n st.text('accident predicted!')","repo_name":"chibounisme/new-york-crime-prediction","sub_path":"frontend/show_map_streamlit.py","file_name":"show_map_streamlit.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"23901313145","text":"# # # Distribution Statement A. Approved for public release. Distribution unlimited.\n# # #\n# # # Author:\n# # # Naval Research Laboratory, Marine Meteorology Division\n# # #\n# # # This program is free software: you can redistribute it and/or modify it under\n# # # the terms of the NRLMMD License included with this program. This program is\n# # # distributed WITHOUT ANY WARRANTY; without even the implied warranty of\n# # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the included license\n# # # for more details. If you did not receive the license, for more information see:\n# # # https://github.com/U-S-NRL-Marine-Meteorology-Division/\n\n'''Wrapper for AMSR2 data products reader'''\nimport logging\nfrom os.path import basename\nLOG = logging.getLogger(__name__)\n\n\nreader_type = 'standard'\n\n\ndef wrapper_amsr2_netcdf(fnames, metadata_only=False, chans=None, area_def=None, self_register=False):\n ''' This is a wrapper around the AMSR2 netcdf data products reader. \n This is part of the tutorial that adds a new reader interface module\n\n All GeoIPS 2.0 readers read data into xarray Datasets - a separate\n dataset for each shape/resolution of data - and contain standard metadata information.\n\n Args:\n fnames (list): List of strings, full paths to files\n metadata_only (Optional[bool]):\n * DEFAULT False\n * return before actually reading data if True\n chans (Optional[list of str]):\n * DEFAULT None (include all channels)\n * List of desired channels (skip unneeded variables as needed)\n area_def (Optional[pyresample.AreaDefinition]):\n * NOT YET IMPLEMENTED\n * DEFAULT None (read all data)\n * Specify region to read\n self_register (Optional[str]):\n * NOT YET IMPLEMENTED\n * DEFAULT False (read multiple resolutions of data)\n * register all data to the specified resolution.\n\n Returns:\n dict of xarray.Datasets: dict of xarray.Dataset objects with required\n Variables and Attributes: (See geoips/docs :doc:`xarray_standards`),\n dict key can be any descriptive dataset id\n '''\n from geoips.stable.reader import get_reader\n amsr2_netcdf_reader = get_reader('amsr2_netcdf')\n xarrays = amsr2_netcdf_reader(fnames, metadata_only=metadata_only, chans=chans, \n area_def=area_def, self_register=self_register)\n return xarrays\n","repo_name":"NRLMMD-GEOIPS/geoips_tutorial","sub_path":"geoips_tutorial/interface_modules/readers/wrapper_amsr2_netcdf.py","file_name":"wrapper_amsr2_netcdf.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"71855374570","text":"from hydroDL.master import slurm\nfrom hydroDL import kPath\nimport os\n# slurm.submitJobGPU('modelA','python /home/users/kuaifang/GitHUB/geolearn/app/waterQual/model/modelA.py',nH=20)\n\n# slurm.submitJobGPU('modelC','python /home/users/kuaifang/GitHUB/geolearn/app/waterQual/model/modelC.py',nH=20)\n\n# wrap up data\n# codePath = os.path.join(kPath.dirCode, 'app',\n# 'waterQual', 'model', 'wrapData.py')\n# jobName = 'wrapUpData'\n# cmdLine = 'python {}'.format(codePath)\n# slurm.submitJob(jobName, cmdLine, nH=1, nM=64)\n\n# TRAIN MODEL\n\n# slurm.submitJobGPU('basinRef','python /home/users/kuaifang/GitHUB/geolearn/app/waterQual/model/trainModel1.py',nH=24)\n\n# slurm.submitJobGPU(\n# 'basinAll', 'python /home/users/kuaifang/GitHUB/geolearn/app/waterQual/model/trainModel2.py', nH=48, nM=64)\n\n\ncmdP = 'python /home/users/kuaifang/GitHUB/geolearn/app/waterQual/model/runCmd.py -D {} -O {}'\nnameP = '{}-opt{}'\ndLst = ['HBN', 'HBN-30d']\noptLst = [1, 2, 3, 4]\nfor d in dLst:\n for opt in optLst:\n print(cmdP.format(d, opt))\n slurm.submitJobGPU(nameP.format(d, opt), cmdP.format(d, opt), nH=8)\n","repo_name":"fkwai/geolearn","sub_path":"app/waterQual/model/remoteScript.py","file_name":"remoteScript.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"41811750158","text":"import os\nimport mock\nimport testtools\nfrom cloudify.mocks import MockContext\nfrom cloudify_rest_client.client import CloudifyClient\nfrom cloudify.workflows.workflow_context import (\n LocalCloudifyWorkflowContextHandler,\n CloudifyWorkflowContextInternal,\n CloudifyWorkflowNode,\n CloudifyWorkflowNodeInstance)\nfrom cloudify.utils import setup_logger\nfrom cloudify.test_utils import workflow_test\nfrom dp_plugin.burst import burst_up, burst_down\n\nWORKFLOW_NAME = 'scale_or_burst'\nINSTALL_NI = 'cloudify.plugins.lifecycle.install_node_instances'\nSTART_MOD = 'cloudify.workflows.workflow_context.' \\\n 'WorkflowDeploymentContext.start_modification'\n\nPLAN_RS = 'cloudify.dp.relationships.plans'\nPLANS = 'deployment_plans'\n\n\nclass MockCloudifyWorkflowContext(MockContext):\n\n def __init__(self, storage):\n self._context = {}\n self.local = True\n self._endpoint = storage\n self._local_task_thread_pool_size = 1\n self._task_retry_interval = 1\n self._task_retries = 1\n self._subgraph_retries = 1\n self._mock_context_logger = \\\n setup_logger('mock-context-logger')\n handler = \\\n LocalCloudifyWorkflowContextHandler(self, storage)\n self.internal = \\\n CloudifyWorkflowContextInternal(self, handler)\n raw_nodes = storage.get_nodes()\n raw_node_instances = storage.get_node_instances()\n self._nodes = dict(\n (node.id, CloudifyWorkflowNode(self, node, self))\n for node in raw_nodes)\n self._node_instances = dict(\n (instance.id, CloudifyWorkflowNodeInstance(\n self, self._nodes[instance.node_id], instance,\n self))\n for instance in raw_node_instances)\n\n @property\n def logger(self):\n return self._mock_context_logger\n\n def get_node(self, node_id):\n return self._nodes.get(node_id)\n\n\nclass TestBurst(testtools.TestCase):\n\n def get_mock_workflow_context(self, _storage):\n cloudify_workflow_context = \\\n MockCloudifyWorkflowContext(_storage)\n return cloudify_workflow_context\n\n def mock_cloudify_client(self, _storage):\n client = CloudifyClient()\n client.nodes = _storage.nodes\n client.node_instances = _storage.node_instances\n return client\n\n burst_blueprint_path = os.path.join('resources', 'blueprint.yaml')\n\n def get_dp_node_group_ids(self, dp_managing_node_rs):\n return [rs.get('target_id') for rs in dp_managing_node_rs]\n\n def get_deployment_plans(self, dp_managing_node):\n return dp_managing_node.properties.get(PLANS)\n\n def set_up_dp_nodes_group(self,\n cfy_local_env,\n dp_nodes_group_ids,\n dp_node_plans):\n dp_nodes_group = {}\n for dp_node_id in dp_nodes_group_ids:\n this_dp_node = cfy_local_env.storage.get_node(dp_node_id)\n dp_nodes_group.update({\n this_dp_node.id: {\n 'count': int(this_dp_node.number_of_instances),\n 'capacity': dp_node_plans.get(\n this_dp_node.id, {}).get('capacity', {}),\n 'constraints': dp_node_plans.get(\n this_dp_node.id, {}).get('constraints', {})\n }\n })\n return dp_nodes_group\n\n @workflow_test(blueprint_path=burst_blueprint_path)\n def test_burst_up(self, cfy_local):\n dp_managing_node = cfy_local.storage.get_node('dp_compute')\n ctx = self.get_mock_workflow_context(cfy_local.storage)\n number_of_new_instances = 2\n mixed_target_node_ids = \\\n self.get_dp_node_group_ids(dp_managing_node['relationships'])\n plans = self.get_deployment_plans(dp_managing_node)\n modification_data = \\\n {dp_managing_node.id: {'instances':\n dp_managing_node.number_of_instances}}\n with mock.patch('dp_plugin.burst.manager_client') as \\\n self.mock_cloudify_client:\n burst_up_modification_data = \\\n burst_up(ctx,\n dp_managing_node.id,\n number_of_new_instances,\n mixed_target_node_ids,\n plans,\n modification_data)\n self.assertEqual(\n dp_managing_node.number_of_instances + number_of_new_instances,\n burst_up_modification_data.get(\n dp_managing_node.id).get('instances')\n )\n dp_compute_1 = cfy_local.storage.get_node('cloud_1_compute')\n self.assertEqual(\n dp_compute_1.number_of_instances + number_of_new_instances,\n burst_up_modification_data.get('cloud_1_compute').get('instances')\n )\n\n @workflow_test(blueprint_path=burst_blueprint_path)\n def test_burst_down(self, cfy_local):\n dp_managing_node = cfy_local.storage.get_node('dp_compute')\n ctx = self.get_mock_workflow_context(cfy_local.storage)\n number_of_new_instances = -1\n mixed_target_node_ids = \\\n self.get_dp_node_group_ids(dp_managing_node['relationships'])\n modification_data = \\\n {dp_managing_node.id: {'instances':\n dp_managing_node.number_of_instances}}\n with mock.patch('dp_plugin.burst.manager_client') as \\\n self.mock_cloudify_client:\n burst_up_modification_data = \\\n burst_down(ctx,\n dp_managing_node.id,\n number_of_new_instances,\n mixed_target_node_ids,\n modification_data)\n self.assertEqual(\n dp_managing_node.number_of_instances + number_of_new_instances,\n burst_up_modification_data.get(\n dp_managing_node.id).get('instances')\n )\n dp_compute_1 = cfy_local.storage.get_node('cloud_1_compute')\n self.assertEqual(\n dp_compute_1.number_of_instances + number_of_new_instances,\n burst_up_modification_data.get('cloud_1_compute').get('instances')\n )\n","repo_name":"cloudify-examples/hybrid-cloud-plugin","sub_path":"dp_plugin/tests/test_dp_plugin.py","file_name":"test_dp_plugin.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"44792528266","text":"from flask import Flask, request, jsonify\nimport requests\nimport concurrent.futures\n\napp = Flask(__name__)\n\ndef fetch_numbers(url):\n try:\n response = requests.get(url)\n response.raise_for_status()\n data = response.json()\n if \"numbers\" in data and isinstance(data[\"numbers\"], list):\n return data[\"numbers\"]\n except (requests.exceptions.RequestException, ValueError):\n pass\n return []\n\n@app.route('/numbers')\ndef get_numbers():\n urls = request.args.getlist(\"url\")\n unique_numbers = set()\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future_to_url = {executor.submit(fetch_numbers, url): url for url in urls}\n for future in concurrent.futures.as_completed(future_to_url):\n url = future_to_url[future]\n numbers = future.result()\n unique_numbers.update(numbers)\n\n sorted_numbers = sorted(unique_numbers)\n response = {\"numbers\": sorted_numbers}\n return jsonify(response)\n\nif __name__ == '__main__':\n app.run(port=3000)\n","repo_name":"swayamverma412/2006385","sub_path":"Numbers/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"411638657","text":"import os, uuid\nfrom azure.storage.filedatalake import DataLakeServiceClient, DataLakeDirectoryClient, DataLakeFileClient\n\ntry:\n print(\"Start of the sample\")\n connect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\n # Create the DataLakeServiceClient object which will be used for access to the data lake storage\n print(\"\\nCreate data lake service...\")\n service_client = DataLakeServiceClient.from_connection_string(conn_str=connect_str)\n \n # generate a random name for testing purpose\n fs_name = \"testfs1\"\n \n # create the filesystem\n filesystem_client = service_client.create_file_system(file_system=fs_name)\n print(\"Create a test filesystem named '{}'.\".format(fs_name))\n \n # create a directory hierarchy\n dir_name = \"testdir1/testdir2/testdir3/testdir4/testdir5\"\n main_directory_client = filesystem_client.create_directory(dir_name)\n print(\"Creating a directory named '{}'.\".format(dir_name))\n \n # locate 4'th directory for uploading\n directory_client = filesystem_client.get_directory_client(\"testdir1/testdir2/testdir3/testdir4\")\n \n # prepare file for uploading\n curr_dirr = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(curr_dirr, 'data/IndianFoodDatasetCSV.csv')\n local_file = open(file_path,'rb')\n file_client = directory_client.create_file(\"IndianFoodDatasetCSV.csv\")\n print(\"Open local file, prepare for uploading\")\n \n file_contents = local_file.read()\n print(\"Uploading starts\")\n file_client.append_data(data=file_contents, offset=0, length=len(file_contents))\n file_client.flush_data(len(file_contents))\n print(\"Uploading finish\")\n\n\nexcept Exception as ex:\n print('Exception:')\n print(ex)\n","repo_name":"passt0r/AzureDataLakeSample","sub_path":"dataLakeSample.py","file_name":"dataLakeSample.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"36252168447","text":"\"\"\"import pygame to disply every elements\nget every needed constants\n\"\"\"\n\nimport pygame\nfrom config import FONT, BIG_FONT, BOT_RIGHT, BOT_LEFT, TILE_SIZE, FLOOR, WALL\n\nclass Display:\n \"\"\"This class, contains every action that must output or modify the display\n of the game window, like moving the character, remove items or show infos\n \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"init the class by creating the pygame window\"\"\"\n self.window = pygame.display.set_mode((width, height))\n pygame.display.set_caption(title)\n\n def game_over(self, message):\n \"\"\"#When the game is over, display the end message and close the game\"\"\"\n self.window.fill((0, 0, 0))\n text = BIG_FONT.render(message, True, (255, 255, 255))\n rect = text.get_rect()\n rect.center = 300, 300\n self.window.blit(text, rect)\n pygame.display.flip()\n pygame.time.wait(3000)\n\n def _loot_item(self, gyver, items):\n \"\"\"check if MacGyver meet an uncollected item,\n if he does, add it to the bag and mark the item as looted\n \"\"\"\n for item in items:\n if(gyver.x_pos == item.x_pos and gyver.y_pos == item.y_pos and\n item.looted is False):\n gyver.bag += 1\n item.looted = True\n self.show_message('You collected the {}'.format(item.name), BOT_LEFT)\n self.window.blit(FLOOR, (TILE_SIZE*item.x_pos, TILE_SIZE*item.y_pos))\n bag_message = \"You collected {}/3 items\".format(gyver.bag)\n self.show_message(bag_message, BOT_RIGHT)\n return item\n return None\n\n def set_map(self, mapping):\n \"\"\"display the floor and wall tiles\"\"\"\n for y_pos, line in enumerate(mapping.map):\n for x_pos, tile in enumerate(line):\n if tile == '#':\n self.window.blit(WALL, (x_pos*TILE_SIZE, y_pos*TILE_SIZE))\n else:\n self.window.blit(FLOOR, (x_pos*TILE_SIZE, y_pos*TILE_SIZE))\n\n def show_looted_items(self, items):\n \"\"\"display the items looted in bottom screen\"\"\"\n for x_pos, item in enumerate(items):\n if item.looted:\n self.window.blit(item.pygame_img, (300+TILE_SIZE*x_pos, 605))\n pygame.display.flip()\n\n def set_characters(self, gyver, bad_guy):\n \"\"\"display both characters\"\"\"\n gyver_position = (gyver.x_pos * TILE_SIZE, gyver.y_pos * TILE_SIZE)\n\n bad_guy_position = (bad_guy.x_pos * TILE_SIZE, bad_guy.y_pos * TILE_SIZE)\n\n self.window.blit(gyver.pygame_img, gyver_position)\n self.window.blit(bad_guy.pygame_img, bad_guy_position)\n\n def set_items(self, items):\n \"\"\"display the items on the map\"\"\"\n for item in items:\n image = item.pygame_img\n self.window.blit(image, (item.x_pos * TILE_SIZE, item.y_pos * TILE_SIZE))\n\n def show_message(self, message, rect):\n \"\"\"Display the given message in the given rect\"\"\"\n self.window.fill((0, 0, 0), rect)\n text = message\n text_render = FONT.render(text, True, (255, 255, 255))\n self.window.blit(text_render, rect)\n pygame.display.update(rect)\n\n def move_character(self, mapping, gyver, items, old_pos):\n \"\"\"move the character, to his new position, check if the position\n is available and return his old position if not and display the\n message \"Invalid destination\" else display the character on his\n new position and check if he meet item or guardian\n \"\"\"\n self.window.fill((0, 0, 0), BOT_LEFT)\n pygame.display.update(BOT_LEFT)\n if (mapping.is_path_available(gyver.y_pos, gyver.x_pos) and\n (old_pos[\"y_pos\"] != gyver.y_pos or old_pos[\"x_pos\"] != gyver.x_pos)):\n mapping.move_character(old_pos[\"y_pos\"], old_pos[\"x_pos\"],\n gyver.y_pos, gyver.x_pos)\n self._loot_item(gyver, items)\n self.show_looted_items(items)\n self.window.blit(gyver.pygame_img,\n (gyver.x_pos * TILE_SIZE, gyver.y_pos * TILE_SIZE))\n self.window.blit(FLOOR, (old_pos[\"x_pos\"] * TILE_SIZE, old_pos[\"y_pos\"] * TILE_SIZE))\n pygame.display.update()\n else:\n gyver.y_pos = old_pos[\"y_pos\"]\n gyver.x_pos = old_pos[\"x_pos\"]\n message = 'Invalid Destination'\n self.show_message(message, BOT_LEFT)\n","repo_name":"JoeCaoV/Macgyver","sub_path":"classes/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"26539813032","text":"# Emre Kin\r\n# \r\n# with this project we do 3 process one make super file 1 make dummy and one for watermark\r\nimport PyPDF2\r\nimport sys\r\ninputs = sys.argv[1:] # with this we cant take file names in terminal\r\n\r\n\r\ndef pdf_combiner(pdf_list): # with this we make a super pdf combined\r\n merger = PyPDF2.PdfFileMerger()\r\n for pdf in pdf_list:\r\n merger.append(pdf)\r\n merger.write('super.pdf')\r\n\r\n\r\nwith open('dummy.pdf', 'rb') as my_pdf: # with this we make a new pdf like dummy\r\n reader = PyPDF2.PdfFileReader(my_pdf)\r\n page = reader.getPage(0)\r\n page.rotateCounterClockwise(90)\r\n writer = PyPDF2.PdfFileWriter()\r\n writer.addPage(page)\r\n with open('tilt.pdf', 'wb') as pdf:\r\n writer.write(pdf)\r\npdf_combiner(inputs)\r\n\r\npdf = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))\r\nwatermark = PyPDF2.PdfFileReader(open('wtr.pdf', 'rb'))\r\noutput = PyPDF2.PdfFileWriter()\r\n\r\nfor i in range(pdf.getNumPages()): # with this we materwark wanted pdf\r\n page = pdf.getPage(i)\r\n page.mergePage(watermark.getPage(0))\r\n output.addPage(page)\r\n pass\r\n\r\nwith open('watermarked_output.pdf', 'wb') as file:\r\n output.write(file)\r\n","repo_name":"Emre-Kin/pdf-watermark","sub_path":"pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"19325905658","text":"from sqlalchemy import(\n Column,\n Integer,\n String,\n ForeignKey\n)\nfrom sqlalchemy.types import(\n Date,\n Boolean,\n Time,\n DateTime\n)\nfrom sqlalchemy.orm import(\n relationship,\n backref\n)\nfrom app.models.base import ModelBase\nfrom app.core.database import Base\nfrom datetime import datetime\n\nclass Server(ModelBase, Base):\n __tablename__ = \"server\"\n id = Column(Integer, primary_key=True, index=True, autoincrement=True)\n name = Column(String(255))\n base = Column(String(255))\n user = Column(String(255))\n password = Column(String(255))\n serve = Column(String(255))\n date_created = Column(DateTime, default=datetime.utcnow())\n\n\n @classmethod\n def add(cls, session, data):\n server = Server()\n server.name = data.name\n server.base = data.base\n server.user = data.user\n server.password = data.password\n server.serve = data.serve\n session.add(server)\n session.commit()\n session.refresh(server)\n return Server.find_by_id(session=session, id=server.id)\n","repo_name":"sdaviid/api-rutorrent-controller","sub_path":"app/models/domain/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"26853181889","text":"from django.urls import path\n\nfrom chess import views\n\napp_name = \"chess\"\n\nurlpatterns = [\n path(\"index\", views.Index.as_view(), name=\"index\"), # 首页\n path(\"feedback\", views.Feedback.as_view(), name=\"feedback\"),\n path(\"about\", views.About.as_view(), name=\"about\"),\n]\n","repo_name":"Chapoii/XiangQi","sub_path":"chess/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"6842742700","text":"import argparse\nimport os\nimport csv\n\n#list of uris to get the mementos of (instagram.com/username)\n#urls= ['instagram.com/RobertFKennedyJr']\n # urls = ['instagram.com/drmercola', 'instagram.com/RobertFKennedyJr', 'instagram.com/thetruthaboutvaccinesttav',\n # 'instagram.com/drtenpenny', 'instagram.com/_rizzaislam', 'instagram.com/drbuttar',\n # 'instagram.com/healthnutnews', 'instagram.com/greenmedinfo', 'instagram.com/kellybroganmd',\n # 'instagram.com/drchristianenorthrup', 'instagram.com/dr.bentapper']\n\n # urls = ['instagram.com/bbcnews', 'instagram.com/unicef', 'instagram.com/cdcgov',\n # 'instagram.com/who', 'instagram.com/thisisbillgates', 'instagram.com/ukgovofficial',\n # 'instagram.com/nhs', 'instagram.com/gatesfoundation', 'instagram.com/lshtm']\n\n #urls = ['instagram.com/instagram', instagram.com/cristiano', 'instagram.com/leomessi', 'instagram.com/selenagomez', \n # 'instagram.com/kyliejenner', 'instagram.com/therock', 'instagram.com/arianagrande', 'instagram.com/kimkardashian', \n # 'instagram.com/beyonce', 'instagram.com/khloekardashian', 'instagram.com/nike', 'instagram.com/justinbieber',\n # 'instagram.com/kendalljenner', 'instagram.com/natgeo', 'instagram.com/taylorswift', 'instagram.com/virat.kohli',\n # 'instagram.com/jlo', 'instagram.com/kourtneykardash', 'instagram.com/nickiminaj', 'instagram.com/mileycyrus'\n # 'instagram.com/neymarjr', 'instagram.com/katyperry', 'instagram.com/zendaya', 'instagram.com/kevinhart4real', 'instagram.com/iamcardib' ]\n\ndef get_mementos(url):\n cmd = 'curl -L -s \\\"http://web.archive.org/cdx/search/cdx?url=https://www.' + url + '\"'\n output = os.popen(cmd)\n return output.readlines()\n\ndef get_memento_analysis(cdx_object):\n\n uri_m = \"https://web.archive.org/web/\" + cdx_object[1] + \"/\" + cdx_object[2]\n timestamp=cdx_object[1]\n memento_year = int(timestamp[:4])\n date=\"{}/{}/{}\".format(timestamp[:4], timestamp[4:6], timestamp[6:8])\n time=\"{}:{}:{}\".format(timestamp[8:10], timestamp[10:12], timestamp[12:])\n mimetype=cdx_object[3]\n if mimetype==\"text/html\":\n mimetype=''\n status_code = cdx_object[4]\n redirects_to=''\n redirect_type=''\n\n cmd = 'curl -I -L -s ' + uri_m +'|grep \"HTTP\\|^location:\"'\n\n #if the memento is a warc/revisit or a redirect, get the location of the revisit\n if status_code=='-' or 300<=int(status_code)<400:\n output = os.popen(cmd)\n x=output.readlines()\n list_of_locations=[]\n list_of_status_codes=[]\n\n #separate redirect locations from status codes\n for item in x:\n if item.startswith('location:'):\n list_of_locations.append(item)\n else:\n list_of_status_codes.append(item[9:12])\n\n if len(list_of_status_codes)==1:\n assert len(list_of_locations)==0\n redirect_type=get_redirect_type(list_of_status_codes)\n elif len(list_of_locations)>0:\n redirects_to=list_of_locations[-1][10:]\n if 'login' in redirects_to:\n redirect_type='login'\n else:\n redirect_type=get_redirect_type(list_of_status_codes)\n \n return [memento_year, uri_m, status_code, date, time, mimetype, cdx_object[5], redirects_to, redirect_type]\n\ndef get_memento_data_from_one_account(url, start, end):\n mementos=get_mementos(url)\n print(len(mementos))\n if start is None:\n start=0\n if end is None:\n end=len(mementos)\n #filename=username.csv\n filename=url[14:]+'.csv'\n with open(filename, 'a', encoding='utf-8') as file:\n writer = csv.writer(file)\n #csv headers\n if start==0:\n writer.writerow([\"year\", \"urim\", \"status_code\", \"date\", \"time\", \"mimetype\", \"digest\", 'redirects_to', 'redirect_type'])\n count=0\n for line in mementos[start:end]:\n cdx_object = line.split(\" \") #[\"urlkey\",\"timestamp\",\"original\",\"mimetype\",\"statuscode\",\"digest\",\"length\"]\n try:\n data=get_memento_analysis(cdx_object)\n #print(data)\n #update csv\n writer.writerow(data)\n except Exception as e:\n print('failed to write to file: ')\n print(line)\n raise e\n count+=1\n if count%50==0:\n print(count)\n print(\"currently processing: \"+ str(cdx_object[1:]))\n\ndef get_redirect_type(list_of_status_codes):\n status_code=int(list_of_status_codes[-1])\n if status_code==200:\n redirect_type='2xx'\n elif 400<=status_code<500:\n redirect_type='4xx'\n elif 500<=status_code<600:\n redirect_type='5xx'\n else:\n print(status_code)\n exit()\n return redirect_type\n\nif __name__ == \"__main__\":\n parser=argparse.ArgumentParser()\n subparser = parser.add_subparsers(dest='command')\n\n cdx_objects = subparser.add_parser('cdx')\n cdx_objects.add_argument('--input', type=str, required=True)\n cdx_objects.add_argument('--output', type=str, required=True)\n\n mult_accounts = subparser.add_parser('mult')\n mult_accounts.add_argument('--input', type=str, required=True)\n\n singular_account = subparser.add_parser('one', help='get memento data for ONE url')\n singular_account.add_argument('--url', type=str, required=True)\n singular_account.add_argument('--start', type=int, required=False)\n singular_account.add_argument('--end', type=int, required=False)\n\n #not finished\n args=parser.parse_args()\n if args.command=='cdx':\n pass\n # with open(args.input, 'r') as input_file:\n # with open(args.output, 'a', encoding='utf-8') as output_file:\n # writer = csv.writer(output_file)\n # for line in output_file:\n # cdx_object = line.split(\" \") #[\"urlkey\",\"timestamp\",\"original\",\"mimetype\",\"statuscode\",\"digest\",\"length\"]\n # try:\n # data=get_memento_analysis(cdx_object)\n # #update csv\n # writer.writerow(data)\n # update_summary()\n # except:\n # print(line)\n\n elif args.command=='one':\n get_memento_data_from_one_account(args.url, args.start, args.end)\n\n #not finished\n elif args.command=='mult':\n with open(args.input, 'r') as file:\n input = file.read().splitlines()\n print(input)\n #if args.combine is not None:\n #main_url(input, combine=True, output=args.combine)\n for url in input:\n get_memento_data_from_one_account(url)\n","repo_name":"rachelzheng03/ODU_REU_2023","sub_path":"memento_replayability_data_extraction.py","file_name":"memento_replayability_data_extraction.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"3147944249","text":"#Etgg 1801-02\n#Thomas Gilman\n#More with Pygame notes\n\n#Pygame: More with surfaces:\n #Surface Transforms:\n #new surface#pygame.transform.scale(sourceSurface,(width,height)<-size to scale to)\n #*usefulmfor sizing or representing distance.\n #pygame.transform.rotate(sourceSurface,angle):\n #*returns a new source that has been rotated(In degrees).\n #New surface=pygame.transform.flip(sourceSurface,True/False(x-axis),True/False(y-axis))\n #*useful for left/right sprite animations\n #New Surface2=pygame.transform.rotoZoom(sourceSurface,angle,zoomfactor(size multiplier,1.0=no change))\n\n#Pygame example 2\n\nimport pygame\nimport time\n\npygame.init()\n\ndisp_surf=pygame.display.set_mode((800,600),pygame.SWSURFACE,24)\n\npatty_surf=pygame.image.load(\"fsm2.jpg\")\n\n#blitting from upper-left corner\nfor i in range(0,720):\n disp_surf.fill((0,0,0))\n new_surf=pygame.transform.rotate(patty_surf,i)\n disp_surf.blit(new_surf,(100,100))\n pygame.display.flip()\n\n#blitting from center of image\nfor i in range(0,720):\n new_surf=pygame.transform.rotate(patty_surf,i)\n disp_surf.fill((0,0,0))\n disp_surf.blit(new_surf,(100-new_surf.get_width()/2,100-new_surf.get_height()/2))\n pygame.display.flip()\n\nfor i in range(0,720):\n new_surf=pygame.transform.rotozoom\n\n\n\npygame.display.flip()\ntime.sleep(5)\n\npygame.display.quit()\n\n#Pygame Sounds:\n #Two types of sounds:\n #*Sound effects\n #*Background Music\n #pygame.mixer.set_num_channels(8)<-number of simultaneous sounds.\n#Load a sound:\n #sound_snd=pygame.mixer.Sound(\"sound.file\")\n#Playing a sound:\n #sound_snd.play()\n#Adjust sound volume\n #sound_snd.set_volume(0.0-1.0)\n #sound_snd.get_volume()\n#Fade out sound\n #sound_snd.fade_out(500(number of milliseconds to fade over))\n#Stop the sound\n #sound_snd.stop()<-stop immediately\n#get length of sound\n #sound_snd.get_length()<-return length in seconds\n#Music:\n #pygame.mixer.music.load(\"music.mp3\"(file name))\n#play the music\n #pygame.mixer.music.play(loops=(-1=forever),startpos=0.0(#seconds in song))\n#Stop the music\n #pygame.mixer.music.stop()\n#fade out music\n #pygame.mixer.music.fade_out(500)\n#set volume\n #pygame.mixer.music.set_volume(1.0)\n#pause/unpause musice\n #pygame.mixer.music.pause()\n #pygame.mixer.music.unpause()\n#Que music\n #pygame.mixer.music.queue(\"BossMusic.mp3\")\n#rewind music\n #pygame.mixer.music.rewind()\n#pygame.mixer.fade_out(500)\n#pygame.mixer.pause()\n#pygame.mixer.unPause()\n#pygame.mixer.stop()\n\n#Input with Pygame:\n #Mouse:\n #*(mx,my)=pygame.mouse.get_pos()\n #returns the x, and y of the mouse.\n #*pygame.mouse.set_visible(True/False)\n #set mouse pointer visibility\n #(LB,MB,RB)=pygame.mouse.get_pressed()\n #*^^each is true or false.\n #pygame.mouse.set_pos((x,y))\n #Keyboard Input:\n #pygame.event.pump()<-allows pygame to receive events from the O.S.\n #Must be called regularly, usually called in the game loop\n #pygame.key.get_pressed()<-returns a giant tuple of all keys & their states\n #Position in tuple determines key.\n #difficult to use.\n #pygame.key.name(number)<-returns keyname of position specified\n#keyboard helper functions:\n #def keyGetPressedList():\n #pygame.event.pump()\n #pressed=pygame.key.get_pressed()\n #result=[]\n #for i in range(0,len(pressed)):\n #if pressed[i]=0:\n #result.append(pygame.key.name(i))\n #return result\n\n #def keyIsPressed(keySymbol):\n #if keySymbol in keyGetPressedList():\n #return True\n #else:\n #return False\n\n #def keyIsNotPressed(keySymbol):\n #if keySymbol not in keyGetPressedList():\n #return True\n #else:\n #return False\n \n","repo_name":"ThomasMGilman/ETGG1801_GameProgrammingFoundations","sub_path":"Notes/More with Pygame Notes.py","file_name":"More with Pygame Notes.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"74897947691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 2 20:56:50 2020\n\n@author: danish\n\"\"\"\nimport cv2\nimport ModelWrapper as mp\nfrom keras.models import load_model\nfrom preprocessing import Fit_Preprocessing, GlobalNormalization, ToJson\nfrom preprocessing import ReadFileNames\nimport numpy as np\n\n\n\ndef WriteInfo(err, text, norm_count, anom_count):\n mp.PrintInline('{4}, Frame Status: {0}, Normal Frame Count: {1}/{2}, Anomaly Frame Count {3}/{2}'.format(text, norm_count, norm_count+anom_count, anom_count, err))\n\ndef get_model(model_path):\n print('\\n\\n------- Loading Model: {0} ! -------'.format(model_path.split('/')[-1]))\n print('\\n--------------- This may take a while! ---------------\\n\\n')\n model=load_model(model_path)\n print('\\n\\n------- Model Loaded! {0} ! -------\\n\\n'.format(model_path.split('/')[-1]))\n return model\n\ndef RealTimeDetection(model, threshold, serve_type='real-time', vid_path=None, verbose=True):\n if serve_type=='real-time':\n cap=cv2.VideoCapture(0)\n elif serve_type=='video':\n if vid_path==None:\n raise TypeError('Value of `vid_path` argument cannot be `None`, when `serve_type` value is `video`. Provide valid path of `str` datatype.')\n cap=cv2.VideoCapture(vid_path)\n \n _,frame=cap.read()\n shape = np.shape(frame)\n ret=True\n norm_count = 0\n anom_count = 0\n test_history = {'Serving Type':serve_type, 'Loss':[], 'Normal Frames': [], \n 'Anomaly Frames':[], 'Total Frames':[]}\n print('\\n\\n------- Press q to exit the Real Time Detection! -------\\n')\n while(cap.isOpened()):\n img_lst=[]\n v_frames = np.zeros(shape=(10, shape[0], shape[1], shape[2]), dtype=np.uint8)\n for i in range(10):\n ret,frame=cap.read()\n if (ret != True):\n cv2.destroyAllWindows()\n raise EOFError('The Video is Completed Succefully!')\n #copy the orignal frame for display.\n v_frames[i]=frame\n gray = mp.ImgProcess(frame, shape=(227,227))\n img_lst.append(gray)\n img_arr = mp.Img_LstArr(img_lst, re_shape=(227, 227, 10))\n #making prediction\n pred = model.predict(img_arr)\n #computing error\n loss = mp.MSE(img_arr, pred)\n err = 'Loss: {0:.5f}'.format(loss)\n if ret==True:\n test_history['Loss'].append(loss); test_history['Normal Frames'].append(norm_count)\n test_history['Anomaly Frames'].append(anom_count)\n test_history['Total Frames'].append(norm_count+anom_count)\n ToJson(test_history, name='Test History.json')\n if loss>threshold:\n anom_count += 10\n text='Anomalies Detected'\n for j in range(len(v_frames)):\n mp.ShowVideo(cap, v_frames[j], text)\n if verbose:\n WriteInfo(err, text, norm_count, anom_count)\n else:\n text='Normal'\n norm_count += 10\n for j in range(len(v_frames)):\n mp.ShowVideo(cap, v_frames[j], text)\n if verbose:\n WriteInfo(err, text, norm_count, anom_count)\n\n\n\ndef StaticServing(path, model, threshold, frames_ext, serve_type='frames', verbose=True):\n if serve_type=='frames':\n onlyfiles, _, _ = ReadFileNames(path, frames_ext)\n all_files = mp.ListCopy(onlyfiles)\n num = 10\n ten_list = np.reshape(all_files, (len(all_files)//num, num))\n img_lst = Fit_Preprocessing(path, frames_ext)\n X_test = GlobalNormalization(img_lst, save_data=False) \n elif serve_type=='npy':\n X_test = np.load(path)\n \n X_test = mp.PrepareData(X_test)\n norm_count = 0\n anom_count = 0\n test_history = {'Serving Type':serve_type, 'Loss':[], 'Normal Frames': [], \n 'Anomaly Frames':[], 'Total Frames':[]}\n print('\\n\\t------------- Now Serving will begin! -------------\\n\\n')\n for number, bunch in enumerate(X_test):\n #Reshaping batch to 5 dimensions\n batch = np.expand_dims(bunch,axis=0)\n pred_batch = model.predict(batch)\n #computing loss\n loss = mp.MSE(batch, pred_batch)\n err = 'Loss: {0:.5f}'.format(loss)\n test_history['Loss'].append(loss); test_history['Normal Frames'].append(norm_count)\n test_history['Anomaly Frames'].append(anom_count)\n test_history['Total Frames'].append(norm_count+anom_count)\n ToJson(test_history, name='Test History.json')\n if loss>threshold:\n anom_count += 10\n text='Anomalies Detected'\n if serve_type=='frames':\n for j in range(len(ten_list[number])):\n v_frame = cv2.imread(ten_list[number][j])\n cap=None\n mp.ShowVideo(cap, v_frame, text)\n if verbose:\n WriteInfo(err, text, norm_count, anom_count)\n else:\n text='Normal'\n norm_count += 10\n if serve_type=='frames':\n for j in range(len(ten_list[number])):\n v_frame = cv2.imread(ten_list[number][j])\n cap=None\n mp.ShowVideo(cap, v_frame, text)\n if verbose:\n WriteInfo(err, text, norm_count, anom_count)\n print('\\n\\t------------- Serving is Completed! -------------\\n\\n')\n return test_history\n\ndef DeploySystem(serve_type, model_path, preset_threshold=True, data_model=None, verbose=True, path=None, frames_ext=None, threshold=None, config_gpu=False):\n serving_types = ['real-time', 'video', 'frames', 'npy']\n if preset_threshold:\n if threshold is not None:\n raise TypeError('Invalid value given to argument `threshold`, its value must be None when `preset_threshold` argument is set to True.')\n if data_model=='UCSD':\n threshold=0.00026\n elif data_model=='Avenue':\n threshold=0.00040\n else:\n raise ValueError('Invalid value given to the Argument `data_model`, it can be either `UCSD` or `Avenue`!')\n else:\n if threshold is None:\n raise TypeError('None value given to argument `threshold`, it cannot be None when `preset_threshold` argument is set to False, provide a value of `float` datype or set the `preset_threshold` argument to True, to use Preset Values of Threshold.')\n if serve_type!='real-time' and serve_type != None:\n if path is None:\n raise TypeError('None value given to argument `path`, it cannot be None when value of `serve_type` is other than None.')\n if config_gpu:\n #Setting up the GPU to avoid VRAM and other conflicts.\n #For refernce visit: https://github.com/irdanish11/AnomalyEventDetection/issues/1\n mp.TF_GPUsetup(GB=5)\n #loading the model\n model = get_model(model_path)\n ####################### Different Serving Techinques ######################\n \n #Serve the Anomaly Detection from the WebCam or any video device that is attached.\n if serve_type=='real-time':\n RealTimeDetection(model, threshold, serve_type, verbose=verbose)\n test_hist = None\n #Serve the Anomaly Detection from the given video.\n elif serve_type=='video':\n RealTimeDetection(model, threshold, serve_type, vid_path=path, verbose=verbose)\n test_hist = None\n #Serve the Anomaly Detection from the directory which contain frames, the Hirerachy of \n #directories must be like this: /*Directories/Here all the images\n #The path you provide must contain a further directory or directories and in those directories\n #should have the frames.\n elif serve_type=='frames':\n test_hist = StaticServing(path, model, threshold, frames_ext, serve_type, verbose=verbose)\n ##Serve the Anomaly Detection from the .npy file.\n elif serve_type=='npy':\n test_hist = StaticServing(path, model, threshold, frames_ext, serve_type, verbose=verbose)\n else:\n raise ValueError('Invalid value given to the `serve_type` argument. Possible values: {0}'.format(serving_types))\n return test_hist\n\n\n\nif __name__=='__main__':\n \n #model_path = 'checkpoints/Train_UCSDped2_Model.h5'\n model_path = 'checkpoints/Train_AvenueDataset_Model.h5'\n \n #vid_path = './AvenueDataset/testing_videos/05.avi' #5,9\n vid_path = './AnomalyEvent.mp4'\n \n frames_ext='.tif'\n frames_dir='Datasets/UCSDped2/Test'\n \n npy_file='./Test_Data/Test_UCSDped2.npy'\n \n #possible serving types\n serving_types = ['real-time', 'video', 'frames', 'npy']\n #Serving of Model\n serve_type = serving_types[1]\n test_hist = DeploySystem(serve_type, model_path, preset_threshold=True, data_model='Avenue', verbose=True, \n path=vid_path, frames_ext=None, threshold=None, config_gpu=True)\n ","repo_name":"irdanish11/AnomalyEventDetection","sub_path":"DeployModel.py","file_name":"DeployModel.py","file_ext":"py","file_size_in_byte":8884,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"55"} +{"seq_id":"25464313433","text":"from flask import Blueprint, jsonify\nfrom markr import db\nfrom markr.models import Class, Lecture, Question, Answer\nfrom markr.mod_vote.models import Vote\n\nteacher = Blueprint('teacher', __name__, url_prefix='/teacher')\n\n@teacher.route('/classes//', methods=[\"GET\"])\ndef get_classes(faculty_id):\n \"\"\"\n This returns a json response of all the classes for the given\n faculty member (specified by the faculty_id)\n \"\"\"\n classes = db.session.query(Class).filter(Class.ucsd_id == faculty_id).all()\n classes = [x.serialize for x in classes]\n\n data = {\n \"data\" : classes\n }\n\n return jsonify(data)\n\n@teacher.route('/lectures//', methods=[\"GET\"])\ndef get_lectures(section_id):\n \"\"\"\n This returns a json response of all the lectures for a given section.\n \"\"\"\n lectures = db.session.query(Lecture).filter(Lecture.sec_id == section_id).all()\n lectures = [x.serialize for x in lectures]\n\n data = {\n \"data\" : lectures\n }\n return jsonify(data)\n\n@teacher.route('/questions//', methods=['GET'])\ndef get_questions(lecture_id):\n \"\"\"\n This returns a json response of all the questions for a given lecture.\n \"\"\"\n questions = db.session.query(Question).filter(Question.lecture_id == lecture_id).all()\n questions = [x.serialize for x in questions]\n\n for question in questions:\n answers = Answer.query.filter_by(question=question[\"id\"]).all()\n question[\"vote_count\"] = Vote.query.filter_by(question_id = question[\"id\"]).count()\n question[\"number_of_options\"] = len(answers)\n question[\"answers\"] = [a.serialize for a in answers]\n\n data = {\n \"data\" : questions\n }\n return jsonify(data)\n\n@teacher.route('/questions_new//', methods=['GET'])\ndef get_questions_new(section_id):\n \"\"\"\n This returns a json response of all the questions for a given lecture.\n \"\"\"\n lectures = db.session.query(Lecture).filter(Lecture.sec_id == section_id).all()\n\n class_questions = []\n for lecture in lectures:\n questions = db.session.query(Question).filter(Question.lecture_id == lecture.id).all()\n questions = [x.serialize for x in questions]\n class_questions.extend(questions)\n\n for question in class_questions:\n answers = Answer.query.filter_by(question=question[\"id\"]).all()\n question[\"vote_count\"] = Vote.query.filter_by(question_id = question[\"id\"]).count()\n question[\"number_of_options\"] = len(answers)\n question[\"answers\"] = [a.serialize for a in answers]\n\n data = {\n \"data\" : class_questions\n }\n return jsonify(data)\n","repo_name":"jsandvik/cse-190-server","sub_path":"markr/teacher/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"1971636843","text":"\n'''\nSimple Snake Game in python3 for Beginners\nPart 3: Food\n'''\nimport turtle\nimport time\nimport random\n\ndelay = 0.1\n\n# set up the screen\nwn = turtle.Screen()\nwn.title(\"Snake Game\")\nwn.bgcolor('green')\nwn.setup(width=600, height=600)\nwn.tracer(0) # turn off the screen updates\n\n\n# Snake head\nhead = turtle.Turtle()\nhead.speed(0)\nhead.shape('square')\nhead.color('black')\nhead.penup()\nhead.goto(0,0)\nhead.direction = 'stop'\n\n# Snake food\nfood = turtle.Turtle()\nfood.speed(0)\nfood.shape('circle')\nfood.color('red')\nfood.penup()\nfood.goto(0,100)\n\n\n\n# Function\ndef go_up():\n\thead.direction = 'up'\n\ndef go_down():\n\thead.direction = 'down'\n\ndef go_left():\n\thead.direction = 'left'\n\t\ndef go_right():\n\thead.direction = 'right'\n\t\n\ndef move():\n\tif head.direction == 'up':\n\t\ty = head.ycor()\n\t\thead.sety(y + 20)\n\n\tif head.direction == 'down':\n\t\ty = head.ycor()\n\t\thead.sety(y - 20)\n\n\tif head.direction == 'left':\n\t\tx = head.xcor()\n\t\thead.setx(x - 20)\n\n\tif head.direction == 'right':\n\t\tx = head.xcor()\n\t\thead.setx(x + 20)\n\n# Keyboard bindings\nwn.listen()\nwn.onkeypress(go_up, 'Up')\nwn.onkeypress(go_down, 'Down')\nwn.onkeypress(go_left, 'Left')\nwn.onkeypress(go_right, 'Right')\n\n\n# Main game loop\nwhile True:\n\twn.update()\n\n\tif head.distance(food) < 20: # why 20 ? becacuse each of the basic turtle shapes is 20 pixels wide \n\t\t# Move the food to a random spot\n\t\tx = random.randint(-290, 290)\n\t\ty = random.randint(-290, 290)\n\n\t\tfood.goto(x, y)\n\n\tmove()\n\ttime.sleep(delay)\n\nturtle.mainloop()\n\n\n\n\n\n","repo_name":"mgbo/My_Exercise","sub_path":"2018_2019/_Snake_Game/snake_game_3.py","file_name":"snake_game_3.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40238925922","text":"# Create your views here.\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect, HttpResponseRedirect, HttpResponse\nfrom django import forms\nfrom django.db import connection, transaction\nfrom django.db.models import Q\nfrom django.template import RequestContext\nfrom django.core.urlresolvers import reverse\n\nfrom plato.models import User, Group, EnsFile\nfrom util import auth, object_util\nfrom util.object_util import *\nfrom util.views import *\n\nfrom add_user.n_users import *\n\nfrom groups.views import *\nfrom add_user.views import *\nfrom users.views import *\n\nimport datetime\n\nfrom plato.form import *\n\n\ndef loggin(request):\n\t\"\"\"\n\t\\brief This is a fonction that get the information given by the users on the authorisation page and check the name of the user in the db and the pwd in the ldap\n\t\\param[in] request: is the http request object \n\t\\param[in] login: user's login\n\t\\param[in] pwd: usr's password\n\t\\author{B.Petitpas}\n\t\\date{02/05/2012}\n\t\\version{1}\n\t\"\"\"\n\n\t#to do => accpete only 1 connection to ldap per seconde ! and only 10 from a single IP adress !\n\t#request = object_util.create_session(request)\n #try to get a user object with a login check in the db\n\tif request.method == 'POST': #the form has been submitted\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid(): #validation rules pass\n\t\t\tlog = form.cleaned_data['log']\n\t\t\tpwd = form.cleaned_data['pwd']\n\t\t\tusr_ = User.objects.filter(login=log)\n\t\t\tif not usr_:\n\t\t\t\t#check if the person exist in ldap\n\t\t\t\tserver = \"ldap.enst.fr\"\n\t\t\t\tuserdn = \"uid=\" + log + \",ou=People,dc=enst,dc=fr\"\n\t\t\t\tstatus,errmsg,info = auth.check_user_pwd(request,server,userdn,pwd,True)\n\t\t\t\tif status > 0 :# the user exists in the ldap (the person is a tsi member) => creta a new user in PLATO database\n\t\t\t\t\trequest.session['login']=log\n\t\t\t\t\trequest.session['name']=info['name']\n\t\t\t\t\trequest.session['fstname']=info['first_name']\n\t\t\t\t\trequest.session['actif']=False\n\t\t\t\t\t#request.session.set_expiry(36000)\n\t\t\t\t\t\n\t\t\t\t\tif info.has_key('status'): # update from the 24 sept 2013 : verify that you've got a tsi status (meaning that you are : permaneent, phd, cdd, post-doc)!\n\t\t\t\t\t\trequest.session['status']=info['status']\n\t\t\t\t\telse: # for person of tsi without a selected status (student, intern)\n\t\t\t\t\t\trequest.session['status']=''\n\t\t\t\t\t\t\n\t\t\t\t\treturn redirect('usr_n',log = log)\n\t\t\t\telse:\n\t\t\t\t\t# reshow the login/pwd page\n\t\t\t\t\t#request.META['HTTP_REFERER']\n\t\t\t\t\treturn return_referer(request)\n\t\t\t\t\t# if request.META.has_key('HTTP_REFERER'): # return the last page visited \n\t\t\t\t\t# \treturn redirect(request.META['HTTP_REFERER'])\n\t\t\t\t\t# else:\n\t\t\t\t\t# \treturn redirect('idx')\n\t\t\telse:\n\t\t\t\t# Check user password\n\t\t\t\tserver = \"ldap.enst.fr\"\n\t\t\t\tuserdn = \"uid=\" + log + \",ou=People,dc=enst,dc=fr\"\n\t\t\t\tstatus,errmsg,info = auth.check_user_pwd(request,server,userdn,pwd,True)\n\t\t\t\t\n\t\t\t\tif status < 0: #ldap not accessible\n\t\t\t\t\treturn return_referer(request)\n\t\t\t\t\t# if request.META.has_key('HTTP_REFERER'):\n\t\t\t\t\t# \treturn redirect(request.META['HTTP_REFERER'])\n\t\t\t\t\t# else:\n\t\t\t\t\t# \treturn redirect('idx')\n\t\t\t\t\t\n\t\t\t\telif status ==0: #not a good password\n\t\t\t\t\treturn return_referer(request)\n\t\t\t\t\t# if request.META.has_key('HTTP_REFERER'):\n\t\t\t\t\t# \treturn redirect(request.META['HTTP_REFERER'])\n\t\t\t\t\t# else:\n\t\t\t\t\t# \treturn redirect('idx')\n\t\t\t\t\t\n\t\t\t\telse: #it's working \n\t\t\t\t\tusr_ = get_object_or_404(User,login=log)\n\t\t\t\t\trequest.session['login']=log\n\t\t\t\t\trequest.session['status']=info['status']\n\t\t\t\t\trequest.session['name']=info['name']\n\t\t\t\t\trequest.session['fstname']=info['first_name']\n\t\t\t\t\trequest.session['actif']=usr_.actif\n\t\t\t\t\treturn redirect('usr', log=log)\n\t\telse:\n\t\t\treturn return_referer(request)\n\t\t\t# if request.META.has_key('HTTP_REFERER'):\n\t\t\t# \treturn redirect(request.META['HTTP_REFERER'])\n\t\t\t# else:\n\t\t\t# \treturn redirect('idx')\n\t# else:\n\t# \tif request.session.get('login',None):\n\t# \t\treturn redirect('usr', log=request.session['login'])\n\t# \telse:\n\t# \t\tform = LoginForm() #show the formular if it's not submitted\n\t# \t\treturn redirect('idx')\n\n\treturn redirect('idx')\n\n\ndef disconnect(request):\n\t\"\"\"\n\t\\brief{delete all the connexion information after clicking on disconnect}\n\t\"\"\"\n\ttry:\n\t\tdel request.session['login']\n\t\tdel request.session['status']\n\t\tdel request.session['name']\n\t\tdel request.session['fstname']\n\t\tdel request.session['actif']\n\t\t# del request.session\n\t\t# del request \n\texcept KeyError:\n\t\tpass\n\treturn redirect('idx')\n","repo_name":"ben6684/PLATO","sub_path":"plato/log/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"71832942572","text":" # & * () != && @ $\n\n\ndef picker( numbers ):\n\n catagory=[]\n counter=[]\n\n for X in numbers:\n\n if X not in catagory:\n catagory.append(X)\n counter.append(1)\n else:\n counter[catagory.index(X)]+=1\n \n## print(sorted(numbers))\n## print(catagory,\"\\n\",counter)\n\n C_and_C = sorted(zip(catagory,counter))\n## print(C_and_C)\n\n maxed_pair=[ -1, -1, -1, -1 ] ## both values represent catagory\n## print(maxed_pair)\n\n i=0 \n while i<(len(C_and_C)-1):\n## print(C_and_C[i],\" \",C_and_C[i+1])\n \n if C_and_C[i+1][0] == C_and_C[i][0]+1 :\n## print(\"andar aaya\")\n## print(maxed_pair) \n if C_and_C[maxed_pair[0]][1] + C_and_C[maxed_pair[2]][1] < (C_and_C[i][1]) + (C_and_C[i+1][1]):\n maxed_pair[0]= C_and_C[i][0]\n maxed_pair[1]= C_and_C[i][1]\n maxed_pair[2]= C_and_C[i+1][0]\n maxed_pair[3]= C_and_C[i+1][1]\n i+=1\n\n## print(maxed_pair) \n if maxed_pair[1] == -1 and maxed_pair[2] == -1:\n return 0\n else:\n return maxed_pair[1]+maxed_pair[3]\n \n\n\n\n####print(picker([9, 5, 6, 2, 3, 1, 7, 8, 5, 2, 6, 4, 7, 1, 3, 6, 9, 8]))\n##print(picker([4, 6, 5, 3, 3, 1]))\nprint(picker([1, 2, 2, 3, 1, 2]))\n\n\n\n\n########Test\n","repo_name":"Commando16/hackerRankQnA","sub_path":"Picking_numbers/picking_numbers.py","file_name":"picking_numbers.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"4103736283","text":"import re\n\nif __name__ == \"__main__\":\n pat = r'[A-Z]{5}[A-Z0-9]{5}'\n\n n = int(input())\n for nl in range(n):\n line = input()\n\n isMatched = re.match(pat, line)\n\n if isMatched:\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"taeseunglee/hackerrank-Regex","sub_path":"7.Applications/valid_PAN_format.py","file_name":"valid_PAN_format.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"11016565475","text":"#!/usr/bin/env python3\n\n\nimport hashlib\nimport pathlib\nimport re\nimport sys\nimport urllib.request\n\n\nVERSION = '8.2107'\nBASE_URL_TEMPLATE = 'https://bgbilling.ru/download/{}'\nCOMPONENT_REGEX = re.compile(r'^name:(\\S*)\\s*zipName:(\\S*)\\s*checkSum:(\\S*)\\s*size:(\\d*)$')\n\n\ndef main():\n current_url = '{}/current'.format(BASE_URL_TEMPLATE.format(VERSION))\n current = download(current_url).decode('utf-8')\n set_url = '{}/sets/{}'.format(BASE_URL_TEMPLATE.format(VERSION), current)\n\n component_list_url = '{}/files.dat'.format(set_url)\n components = download(component_list_url).decode('utf-8')\n for component in components.splitlines():\n match = COMPONENT_REGEX.search(component)\n if match:\n name = match.group(1)\n zip_name = match.group(2)\n check_sum = match.group(3)\n size = match.group(4)\n\n url = '{}/{}'.format(set_url, zip_name)\n dist = download(url)\n save(dist, zip_name)\n else:\n print('Invalid component descriptor:', component, file=sys.stderr)\n continue\n\n\ndef download(url):\n print('Downloading', url, end='...', flush=True)\n data = urllib.request.urlopen(url).read()\n print('done', flush=True)\n return data\n\n\ndef save(data, filename):\n print('Writing', filename, end='...', flush=True)\n with open(filename, 'wb') as f:\n f.write(data)\n print('done', flush=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexanderfefelov/scripts","sub_path":"download/bgbilling/download-dists.py","file_name":"download-dists.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"9485737647","text":"\"\"\"\r\n-*- coding: utf-8 -*-\r\n@author: Wang Zhaohui\r\n\"\"\"\r\n\r\nimport os\r\nimport time\r\nimport xlwt\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# crawler initialize\r\nos.chdir('/Users/xxxxxxqq/vscodeProjects/arxiv')\r\nheader = {\r\n 'Host': 'arxiv.org',\r\n 'Accept':\r\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'\r\n 'application/signed-exchange;v=b3;q=0.9',\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\r\n}\r\n\r\n# initialize \".txt\" and \".xls\" document to save data\r\nwith open('arxiv_text_updated.txt', 'w', encoding=\"utf-8\") as data:\r\n nothing_happen = 0\r\nworkbook = xlwt.Workbook(encoding='utf-8')\r\nworksheet = workbook.add_sheet('My Worksheet', cell_overwrite_ok=True)\r\nworksheet.write(0, 0, \"No.\")\r\nworksheet.write(0, 1, \"arXiv\")\r\nworksheet.write(0, 2, \"Title\")\r\nworksheet.write(0, 3, \"First Author\")\r\nworksheet.write(0, 4, \"Abstract\")\r\n\r\n# website crawling part\r\nnum_count = 0\r\ndef output(url_name, url_name_turn):\r\n print('begin')\r\n # initialize website crawling set\r\n global num_count\r\n url = url_name\r\n time.sleep(3)\r\n r1 = requests.get(url, headers=header)\r\n soup = BeautifulSoup(r1.content, 'lxml')\r\n text_dl = soup.select('div#dlpage > h3')\r\n total = soup.find('small')\r\n text_ds = text_dl[0].get_text()\r\n text_d = text_ds.split()\r\n date = text_d[0]\r\n total = int(total.text.split()[3])\r\n page, rest = divmod(total, 25)\r\n list_page = range(1, page + 1)\r\n\r\n # get information deposited in lists\r\n def get_all_this_page(url):\r\n\r\n header = {\r\n 'Host': 'arxiv.org',\r\n 'Accept':\r\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'\r\n 'application/signed-exchange;v=b3;q=0.9',\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7'\r\n }\r\n time.sleep(3)\r\n rr = requests.get(url, headers=header)\r\n soup2 = BeautifulSoup(rr.content, 'lxml')\r\n linkall0 = soup2.select('div#dlpage > dl > dt > span > a:nth-child(1)')\r\n titleall0 = soup2.select('div#dlpage > dl > dd > div > div.list-title.mathjax')\r\n number0 = soup2.select('div#dlpage > dl > dt > a')\r\n author0 = soup2.select('div#dlpage > dl > dd > div > div.list-authors > a:nth-of-type(1)')\r\n\r\n return linkall0, titleall0, number0, author0\r\n\r\n # filter information\r\n linkall = []\r\n titleall = []\r\n number = []\r\n author = []\r\n\r\n fir = get_all_this_page(url)\r\n linkfir = fir[0]\r\n titlefir = fir[1]\r\n numberfir = fir[2]\r\n authorfir = fir[3]\r\n\r\n try:\r\n for i in range(0, 25):\r\n linkall.append(linkfir[i].get('href'))\r\n titleall.append(titlefir[i].get_text())\r\n number.append(numberfir[i].get_text())\r\n author.append(authorfir[i].get_text())\r\n except:\r\n nothing = 'happened'\r\n\r\n # get information in other pages\r\n fir_page = int(number[0].split('[')[1].split(']')[0])\r\n for j in list_page:\r\n urlmid = url_name_turn + str(\r\n j * 25) + '&show=25'\r\n mid = get_all_this_page(urlmid)\r\n linkmid = mid[0]\r\n titlemid = mid[1]\r\n numbermid = mid[2]\r\n authormid = mid[3]\r\n\r\n if j < page:\r\n for j_1 in range(0, 25):\r\n linkall.append(linkmid[j_1].get('href'))\r\n titleall.append(titlemid[j_1].get_text())\r\n number.append(numbermid[j_1].get_text())\r\n author.append(authormid[j_1].get_text())\r\n else:\r\n for j_2 in range(0, rest):\r\n linkall.append(linkmid[j_2].get('href'))\r\n titleall.append(titlemid[j_2].get_text())\r\n number.append(numbermid[j_2].get_text())\r\n author.append(authormid[j_2].get_text())\r\n\r\n abstract = []\r\n for i in range(0, total):\r\n abstract.append(get_abstract(linkall[i][5:]))\r\n for k in range(0, total):\r\n link = 'arXiv:' + linkall[k][5:]\r\n linkall[k] = link\r\n auth = \"First Author:\" + author[k]\r\n author[k] = auth\r\n\r\n # save information\r\n with open('arxiv_text_updated.txt', 'a', encoding=\"utf-8\") as data:\r\n num_count_excel = num_count\r\n for n in range(0, total):\r\n num_count_excel += 1\r\n worksheet.write(num_count_excel, 0, num_count_excel)\r\n worksheet.write(num_count_excel, 1, linkall[n])\r\n worksheet.write(num_count_excel, 2, titleall[n])\r\n worksheet.write(num_count_excel, 3, author[n])\r\n worksheet.write(num_count_excel, 4, abstract[n])\r\n\r\n for m in range(0, total):\r\n num_count += 1\r\n data.write('[' + str(num_count) + ']' + '\\n')\r\n data.write(linkall[m])\r\n data.write(titleall[m])\r\n data.write(author[m] + '\\n')\r\n data.write(abstract[m])\r\n data.write('\\n')\r\n\r\n\r\n# get abstract by each arXiv code\r\ndef get_abstract(code):\r\n\r\n time.sleep(3)\r\n url2 = \"https://arxiv.org/abs/\" + str(code)\r\n rr2 = requests.get(url2, headers=header)\r\n soup = BeautifulSoup(rr2.content, 'lxml')\r\n abstract = soup.select('div#abs > blockquote')\r\n text = []\r\n for i in abstract:\r\n text.append(i.get_text())\r\n text = str(text)[4:-10]\r\n\r\n return text\r\n\r\n\r\ndef main():\r\n url_1 = 'https://arxiv.org/list/cs.CY/recent'\r\n url_1_t = 'https://arxiv.org/list/cs.CY/pastweek?skip='\r\n url_2 = 'https://arxiv.org/list/cs.SI/recent'\r\n url_2_t = 'https://arxiv.org/list/cs.SI/pastweek?skip='\r\n url_3 = 'https://arxiv.org/list/cs.IR/recent'\r\n url_3_t = 'https://arxiv.org/list/cs.IR/pastweek?skip='\r\n url_4 = 'https://arxiv.org/list/cs.CE/recent'\r\n url_4_t = 'https://arxiv.org/list/cs.CE/pastweek?skip='\r\n\r\n output(url_1, url_1_t)\r\n #output(url_2, url_2_t)\r\n #output(url_3, url_3_t)\r\n #output(url_4, url_4_t)\r\n\r\n workbook.save('/Users/xxxxxxqq/vscodeProjects/arxiv/arxiv_data_updated.xls')\r\n\r\n print('Complete')\r\n\r\n\r\nmain()\r\n","repo_name":"ggxxqq22/zjucss","sub_path":"public/arxiv/WEBSITE_CRAWLING.py","file_name":"WEBSITE_CRAWLING.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"72416457451","text":"import time\nimport itchat as wx\nimport gpt_api\nimport re\nfrom datetime import datetime\nimport json\n\n\ndef readjson(filename):\n with open(filename, encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n\ndef write2json(filename, data):\n with open(filename, 'w', encoding=\"utf-8\") as f:\n json.dump(data, f, ensure_ascii=False, indent=4)\n\n\ndef archive(data):\n current_time = datetime.now()\n time_format = current_time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n filename = f'history/{time_format}.json'\n write2json(filename, data)\n\n\ndef get_whitelist():\n data = readjson('whitelist.json')\n wl = data[0].get(\"group\")\n whitelist = []\n for group, group_info in wl.items():\n name_value = group_info.get(\"name\")\n whitelist.append(name_value)\n return whitelist\n\n\nclass wxbot:\n\n def __init__(self) -> None:\n self.bot_thread = wx.Core()\n self.bot_thread.auto_login(hotReload=True)\n self.gpt_thread = gpt_api.gpt_thread()\n self.msg = {}\n self.text = ''\n self.FromUser = ''\n self.GroupName = ''\n self.NickName = ''\n self.UserName = ''\n auth_config = readjson('auth.json')\n self.config_GroupName = auth_config[0]['accept_GroupName']\n self.config_NickName = auth_config[0]['accept_NickName']\n self.config_passwd = auth_config[0]['passwd']\n self.operator = []\n self.operator_NickName = []\n self.whitelist = get_whitelist()\n self.bool_group_auth = False\n self.bool_op_auth = False\n self.count = 0\n print(self.whitelist)\n\n def start(self):\n self.bot_thread.run()\n\n def receive_msg(self, msg):\n self.msg = msg\n self.text = self.msg['Text']\n self.FromUser = self.msg['FromUserName']\n self.GroupName = self.msg['User']['NickName']\n self.NickName = self.msg['ActualNickName']\n self.UserName = self.msg['ActualUserName']\n if self.bool_whitelist():\n current_time = datetime.now()\n time_format = current_time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n print(f'{time_format} From Group {self.GroupName} {self.NickName} says \\n{self.text}')\n\n def text_reply(self, reply_msg):\n current_time = datetime.now()\n time_format = current_time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n print(f'{time_format} From Group {self.GroupName} 床爪 says \\n{reply_msg}')\n self.bot_thread.send(reply_msg, self.FromUser)\n\n def bool_whitelist(self, send_res=False):\n if self.bool_group_auth:\n return True\n if self.GroupName not in self.whitelist:\n if send_res:\n self.text_reply('Unregistered Group.')\n return False\n return True\n\n def load_op_auth(self):\n auth_config = readjson('auth.json')\n self.config_GroupName = auth_config[0]['accept_GroupName']\n self.config_NickName = auth_config[0]['accept_NickName']\n self.config_passwd = auth_config[0]['passwd']\n self.operator = []\n self.operator_NickName = []\n\n def op_auth(self, passwd=False, send_res=True):\n if self.bool_op_auth:\n return True\n if self.UserName not in self.operator:\n if passwd:\n if self.GroupName == self.config_GroupName:\n if self.NickName == self.config_NickName:\n if passwd == self.config_passwd:\n self.operator.append(self.UserName)\n self.operator_NickName.append(self.NickName)\n self.text_reply(f'Added {self.NickName} to operator group.')\n return True\n else:\n self.text_reply(f'{self.NickName} has authorization!')\n return True\n if send_res:\n self.text_reply(f'{self.NickName} has no authorization!')\n return False\n\n def help(self):\n if self.op_auth(send_res=False):\n self.text_reply('用法:\\n'\n '/help\\t查看帮助\\n'\n '/ask {你的问题}\\n'\n '/u add {content}\\n'\n '更多用法:\\n'\n '/sys su {password}\\t切换到operator\\n'\n '/sys su list\\t现有的op\\n'\n '/sys su reset\\t重置op列表\\n'\n '/sys init\\t保存并重置本轮对话\\n'\n '/sys init {prompt}\\t自定义初始prompt\\n'\n '/sys add {role} {content}\\t加入指定角色的内容\\n'\n '/sys print msg\\t输出本轮对话的所有内容\\n'\n '/sys save\\t保存本轮对话\\n'\n '/sys reload whitelist\\t重新加载群组白名单\\n'\n '/sys reload op\\t重新加载op白名单\\n'\n '/sys enable group_auth {true/false}\\t打开/关闭群组白名单\\n'\n '/sys enable op_auth {true/false}\\t打开/关闭op白名单')\n\n else:\n self.text_reply('用法:\\n'\n '/ask {你的问题}\\n'\n '/u add {content}')\n\n def enable_group_auth(self, _bool):\n self.bool_group_auth = _bool\n\n def enable_op_auth(self, _bool):\n self.bool_op_auth = _bool\n\n def ask(self):\n pattern = r\"/ask\\s(.*)\"\n match = re.findall(pattern, self.text, re.DOTALL)[0]\n if self.count == 10:\n self.text_reply('总结本轮对话并开启新一轮对话')\n last_session = self.gpt_thread.get_response('请总结以上对话')\n archive(self.gpt_thread.messages)\n self.gpt_thread.reset_log()\n self.gpt_thread.add_bot_content(last_session['choices'][0]['message']['content'])\n self.count = 0\n response = self.gpt_thread.get_response(match)\n reply_msg = response['choices'][0]['message']['content']\n self.text_reply(reply_msg)\n self.count = self.count + 1\n\n def u_add(self):\n pattern = r\"/u add\\s(.*)\"\n match = re.findall(pattern, self.text, re.DOTALL)[0]\n self.gpt_thread.add_content('user', match)\n self.text_reply(f'[{self.NickName}] 添加了 [{match}]')\n\n def sys(self):\n if self.op_auth(send_res=False):\n if self.text == '/sys su list':\n self.text_reply('当前operator')\n for item in self.operator_NickName:\n time.sleep(0.3)\n self.text_reply(item)\n return\n elif self.text == '/sys su reset':\n time.sleep(0.2)\n self.operator = []\n self.operator_NickName = []\n self.text_reply('已重置operator')\n return\n elif self.text == '/sys init':\n archive(self.gpt_thread.messages)\n self.gpt_thread.reset_log()\n self.text_reply('已保存本轮对话并重置')\n return\n elif self.text.startswith('/sys init'):\n archive(self.gpt_thread.messages)\n pattern = r\"/sys init\\s(.*)\"\n match = re.findall(pattern, self.text, re.DOTALL)[0]\n self.gpt_thread.reset_system_content(match)\n self.text_reply(f'已保存本轮对话并将初始prompt设置为[{match}]')\n return\n elif self.text.startswith('/sys add'):\n pattern = r\"/sys add\\s+(.*)\"\n match = re.findall(pattern, self.text)[0]\n roles = {'system': 0, 'user': 1, 'assistant': 2}\n for role in roles:\n if match.startswith(role):\n pattern = r\"[a-zA-Z]+\\s+(.*)\"\n match = re.findall(pattern, match)[0]\n self.gpt_thread.add_content(role, match)\n self.text_reply(f'Add:[{match}] as [{role}].')\n return\n elif self.text == '/sys print msg':\n for item in self.gpt_thread.messages[1:]:\n role = item['role']\n content = item['content']\n time.sleep(0.2)\n self.text_reply(f'{role} says {content}')\n return\n elif self.text == '/sys save':\n archive(self.gpt_thread.messages)\n self.text_reply('已保存本轮对话')\n return\n elif self.text == '/sys reload whitelist':\n self.whitelist = get_whitelist()\n self.text_reply('已重新加载群组白名单')\n return\n elif self.text == '/sys reload op':\n self.load_op_auth()\n self.text_reply('已重新加载op名单')\n return\n elif self.text.startswith('/sys enable group_auth'):\n pattern = r\"/sys enable group_auth\\s+(.*)\"\n match = re.findall(pattern, self.text)[0]\n if match == 'true':\n self.enable_group_auth(True)\n self.text_reply('Switch group_auth to true.')\n elif match == 'false':\n self.enable_group_auth(False)\n self.text_reply('Switch group_auth to false.')\n else:\n self.text_reply('わからない(')\n return\n elif self.text.startswith('/sys enable op_auth'):\n pattern = r\"/sys enable op_auth\\s+(.*)\"\n match = re.findall(pattern, self.text)[0]\n if match == 'true':\n self.enable_op_auth(True)\n self.text_reply('Switch op_auth to true.')\n elif match == 'false':\n self.enable_op_auth(False)\n self.text_reply('Switch op_auth to false.')\n else:\n self.text_reply('わからない(')\n return\n if self.text.startswith('/sys su'):\n pattern = r\"/sys su\\s+(.*)\"\n passwd = re.findall(pattern, self.text)[0]\n self.op_auth(passwd=passwd)\n return\n else:\n self.op_auth()\n\n def lex(self):\n if self.text.startswith('/'):\n if self.bool_whitelist(send_res=True):\n if self.text == '/help':\n self.help()\n elif self.text.startswith('/ask'):\n self.ask()\n elif self.text.startswith('/u add'):\n self.u_add()\n elif self.text.startswith('/sys'):\n self.sys()\n else:\n self.text_reply('わからない(')\n\n\ndef MainThread(wxbot1):\n @wxbot1.bot_thread.msg_register('Text', isGroupChat=True)\n def groupchat_reply(msg):\n wxbot1.receive_msg(msg)\n wxbot1.lex()\n\n wxbot1.start()\n\n\nif __name__ == '__main__':\n bot_instance = wxbot()\n MainThread(bot_instance)\n","repo_name":"c0rnP1ex/wxbot_w_gpt","sub_path":"re_wxbot.py","file_name":"re_wxbot.py","file_ext":"py","file_size_in_byte":11142,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"8749110670","text":"# ===============================================\n# IMPORTAÇÕES [Tkinter]\nfrom tkinter import *\nfrom tkinter import tix\nfrom tkinter import messagebox\n\n# IMPORTAÇÕES [pygame]\nimport pygame\nfrom pygame import mixer\n\n# IMPORTAÇÕES [datetime e time]\nfrom datetime import *\nfrom time import sleep\n\n# IMPORTAÇÕES [imagens]\nfrom PIL import Image, ImageTk\n\n# ===============================================\n# CONFIGURAÇÃO [CORES]\nlightBlack = \"#111111\"\naliceBlue = \"#f0f8ff\"\nlightGray = \"#737373\"\nlemonGreen = \"#35FF1E\"\nfreshRed = \"#AA202D\"\ncheck_in = \"No-Check!\"\n\n# ===============================================\n# CONFIGURAÇÃO [APLICAÇÃO]\nclass Functions():\n def horaAtual(self):\n self.date_now = datetime.now()\n self.hora_atual= self.date_now.strftime('%H:%M')\n\n # CONFIGURAÇÃO [LABEL - self.horas]\n self.horas.after(1000, self.horaAtual)\n self.horas['text'] = self.hora_atual\n\n dia = self.date_now.day\n mes = self.date_now.month\n ano = self.date_now.year\n self.date['text'] = f\"{dia}/{mes}/{ano}\"\n \n def saveHorario(self):\n self.semana_set = self.spin_semana.get()\n self.horas_set = int(self.spin_horas.get())\n self.minutos_set = int(self.spin_minutos.get())\n \n if self.semana_set not in self.semanas_list:\n messagebox.showerror(\n title=\"Semana Incorreta\",\n message=\"A semana selecionada não está na lista.\"\n )\n \n elif self.horas_set > 23 or self.horas_set < 0:\n messagebox.showerror(\n title=\"Horário Incorreto\",\n message=\"A hora selecionada está errada. Por favor, faça novamente.\"\n )\n \n elif self.minutos_set > 60 or self.minutos_set < 0:\n messagebox.showerror(\n title=\"Minuto Incorreto\",\n message=\"A definição para o minuto está incorreta.\"\n )\n \n else:\n messagebox.showinfo(\n title=\"Alarme Definido\",\n message=\"O alarme foi definido com sucesso!\"\n )\n\n self.playAlarm()\n self.situation_situa['fg'] = lemonGreen\n self.situation_situa['text'] = \"Ativado!\"\n \n def consult(self):\n self.root_consult = Toplevel()\n self.root_consult.grab_set()\n self.root_consult.focus_force()\n self.root_consult.geometry(\"400x200\")\n self.root_consult.transient(self.root)\n self.root_consult.title(\"Horários\")\n self.root_consult.resizable(width='false', height='false')\n\n self.root_frame = Frame(self.root_consult, bg=lightBlack)\n self.border = Frame(self.root_frame, bg=aliceBlue)\n self.container = Frame(self.border, bg=lightBlack)\n\n self.label_horario = Label(\n self.container, text=\"00:00\", font=('Impact 20'),\n bg=lightBlack, fg=aliceBlue, bd=0\n )\n \n if self.horas_set < 10 and self.minutos_set < 10:\n self.label_horario['text'] = f\"0{str(self.horas_set)}:0{str(self.minutos_set)}\"\n elif self.horas_set < 10 and self.minutos_set > 10:\n self.label_horario['text'] = f\"0{str(self.horas_set)}:{str(self.minutos_set)}\"\n elif self.horas_set > 10 and self.minutos_set < 10:\n self.label_horario['text'] = f\"{str(self.horas_set)}:0{str(self.minutos_set)}\"\n else:\n self.label_horario['text'] = f\"{str(self.horas_set)}:{str(self.minutos_set)}\"\n \n self.label_semana = Label(\n self.container, text=\"Dia da semana\", font=('Arial 8 italic'),\n bg=lightBlack, fg=lightGray, bd=0\n )\n self.label_semana['text'] = self.semana_set\n\n # CONFIGURAÇÃO [PLACE]\n self.root_frame.place(relwidth=1, relheight=1)\n self.border.place(relx=0, rely=0.01, relwidth=1, relheight=0.4)\n self.container.place(relx=0.01, rely=0.01, relwidth=0.98, relheight=0.98)\n self.label_horario.place(relx=0.02, rely=0.01)\n self.label_semana.place(relx=0.02, rely=0.45)\n\n def playAlarm(self):\n self.root.after(1000, self.playAlarm)\n try:\n self.hoursNow_set = self.date_now.strftime('%H:%M')\n self.alarmtime = f\"{str(self.horas_set)}:{str(self.minutos_set)}\"\n print(F'\\033[40mAlarme não foi definido.\\n Hora Definida: {self.hoursNow_set}\\033[m')\n if self.hoursNow_set == self.alarmtime:\n mixer.music.load('alarm-music/alarm.wav')\n mixer.music.play()\n \n except AttributeError:\n print('\\033[40mAlarme não foi definido.\\033[m')\n \n\nclass Alarm(Functions):\n def __init__(self):\n self.root = tix.Tk()\n self.telaApp()\n self.menuBar()\n self.framesApp()\n self.buttons()\n self.contentLeft()\n self.contentRight()\n # ------------------\n self.horaAtual()\n self.root.mainloop()\n\n def telaApp(self):\n self.root.title(\"Alarme - Despertador\")\n self.root.geometry(\"500x300\")\n self.root.resizable(width='false', height='false')\n self.root.iconbitmap(\"icon/alarm-bell.ico\")\n\n def menuBar(self):\n self.menubar = Menu(self.root)\n self.file_menu = Menu(self.menubar)\n self.root.config(menu=self.menubar)\n self.menubar.add_cascade(label=\"Alarmes Definidos\", menu=self.file_menu)\n self.file_menu.add_command(label=\"Horários\", command=self.consult)\n\n def framesApp(self):\n self.frameLeft = Frame(self.root, bg=aliceBlue)\n\n self.frameRight = Frame(self.root, bg=lightBlack)\n\n # CONFIGURAÇÃO [PLACE > FRAMES]\n self.frameLeft.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.frameRight.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n \n def buttons(self):\n self.btn_save = tix.Button(\n self.frameLeft, text=\"Salvar\", font='Arial 10 bold', bg=lightBlack, fg=aliceBlue,\n relief='raised', overrelief='ridge'\n )\n self.btn_save['command'] = self.saveHorario\n self.save_ballon = tix.Balloon(self.frameLeft)\n self.save_ballon.bind_widget(self.btn_save, balloonmsg=\"Definir horário para o alarme.\")\n\n # CONFIGURAÇÃO [PLACE > Botões]\n self.btn_save.place(relx=0.01, rely=0.85)\n\n def contentLeft(self):\n self.label_horas = Label(\n self.frameLeft, text=\"Definir horas*\",\n font=('Arial 10 bold'), bg=aliceBlue, fg=lightBlack\n )\n self.spin_horas = Spinbox(\n self.frameLeft, from_=0, to=23, font=('Arial 13 bold')\n )\n\n self.label_minutos = Label(\n self.frameLeft, text=\"Definir minutos*\",\n font=('Arial 10 bold'), bg=aliceBlue, fg=lightBlack\n )\n self.spin_minutos = Spinbox(\n self.frameLeft, from_=0, to=59, font=('Arial 13 bold')\n )\n\n self.bar_div = Label(\n self.frameLeft, bg=lightBlack\n )\n\n self.semanas_list = [\"Segunda-Feira\", \"Terça-Feira\", \"Quarta-Feira\", \"Quinta-Feira\", \"Sexta-Feira\", \"Sábado\", \"Domingo\"]\n self.spin_semana = Spinbox(\n self.frameLeft, values=self.semanas_list, font=('Arial 15 bold')\n )\n\n # CONFIGURAÇÃO [PLACE]\n self.label_horas.place(relx=0.01, rely=0.04)\n self.spin_horas.place(relx=0.01, rely=0.10)\n self.label_minutos.place(relx=0.01, rely=0.25)\n self.spin_minutos.place(relx=0.01, rely=0.31)\n self.bar_div.place(relx=0, rely=0.46, relwidth=1, relheight=0)\n self.spin_semana.place(relx=0, rely=0.55, relwidth=1, relheight=0.25)\n\n def contentRight(self):\n self.horas = Label(\n self.frameRight, text=\"00:00\", font=('Impact 13'), bg=lightBlack, fg=aliceBlue\n )\n\n self.date = Label(\n self.frameRight, text=\"--/--/--\", font=('Impact 13'), bg=lightBlack, fg=aliceBlue\n )\n\n self.situation_title = Label(\n self.frameRight, text=\"Alarme:\", font=('Impact 13'),\n bg=lightBlack, fg=aliceBlue\n )\n self.situation_situa = Label(\n self.frameRight, text=\"Desativado!\", font=('Impact 13'),\n bg=lightBlack, fg=freshRed\n )\n\n self.img = Image.open('img/alarme.png')\n self.img = self.img.resize((130, 130))\n self.img = ImageTk.PhotoImage(self.img)\n self.set_image = Label(\n self.frameRight, image=self.img, bg=lightBlack\n )\n\n # CONFIGURAÇÃO [PLACE > LABEL]\n self.horas.place(relx=0.8, rely=0)\n self.date.place(relx=0.02, rely=0)\n self.situation_title.place(relx=0.22, rely=0.7)\n self.situation_situa.place(relx=0.45, rely=0.7)\n self.set_image.place(relx=0.25, rely=0.15)\n\n# ===============================================\n# CONFIGURAÇÃO [APLICAÇÃO > ATIVAÇÃO]\nif __name__ == '__main__':\n mixer.init()\n Alarm()","repo_name":"Baku-Stark/Portfolio-Projects","sub_path":"Projetos/Python/Alarme/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"23496503179","text":"\"\"\"\n This file inferred popularity of each coupon by couponkey and genre.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom util import Process\nfrom util_logger import get_logger\nLOG = get_logger()\nLOG.info(\"start b13\")\n\n# load files\ndetail_tr = pd.read_pickle(\"../model/detail_tr.pkl\")\ncoupons = pd.read_pickle(\"../model/coupons.pkl\")\ncoupon_tr = pd.read_pickle(\"../model/coupon_tr.pkl\")\nusers = pd.read_pickle(\"../model/users.pkl\")\n\n# count purchases for each coupon ---------------------------\n\n# exclude duplication\ndetail_tr = detail_tr.groupby([\"COUPON_ID\",\"USER_ID\"]).size().reset_index().drop(0, axis=1)\n\ndetail_cp = detail_tr.groupby(\"COUPON_ID\").size().reset_index().rename(columns={0:\"count\"})\ndetail_cp = detail_cp.merge(coupon_tr, on=\"COUPON_ID\")\ndetail_cp = detail_cp.sort(\"count\")\ndetail_cp[\"key1\"] = Process.to_key1(detail_cp)\n\n# calculate couponkey and genre popularility ----------------\n\n# couponkey popularity, calculated as mean \npopular_key = detail_cp.groupby(\"key1\")[\"count\"].agg([np.mean, np.size])\npopular_key = popular_key.rename(columns={\"mean\":\"key_mean\", \"size\":\"key_size\"})\n\n# genre popularity, calculated as mean \npopular_genre = detail_cp.groupby(\"genre\")[\"count\"].agg([np.mean, np.size])\npopular_genre = popular_genre.rename(columns={\"mean\":\"key_mean\", \"size\":\"key_size\"})\n\n# couponkey popularity, exclude couponkey of only one sample\npopular_key_train = popular_key[popular_key[\"key_size\"] > 1]\n\n# set popularility for each coupon ----------------------\ncoupons[\"key1\"] = Process.to_key1(coupons)\n\n# popularitly is set by couponkey for train coupon, if couponkey has two or more samples\n# popularitly is set by couponkey for test coupon\npop_key1_train = popular_key_train.loc[coupons[\"key1\"] ][\"key_mean\"].values\npop_key1_test = popular_key.loc[coupons[\"key1\"] ][\"key_mean\"].values\nis_test = coupons.period < 0\npop_key1 = np.where(is_test, pop_key1_test, pop_key1_train)\n\n# set popularitly\n# if nan, genre popularility is set\npop_genre = popular_genre.loc[coupons[\"genre\"] ][\"key_mean\"].values\ncoupons[\"pop\"] = np.where(np.isnan(pop_key1), pop_genre, pop_key1)\n\n# write\ncoupon_pop = coupons[[\"COUPON_ID\",\"pop\"]]\ncoupon_pop.to_pickle(\"../model/coupon_pop.pkl\")\n\nLOG.info(\"finished\")","repo_name":"threecourse/kaggle-coupon-purchase-prediction","sub_path":"src/b13_couponkey_popularity.py","file_name":"b13_couponkey_popularity.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"55"} +{"seq_id":"29862517230","text":"from rest_framework import serializers\n\nfrom users.models import User\n\n\ndef validate_me(data):\n \"\"\"\n Проверка, что username не равно 'me'.\n \"\"\"\n if data.get('username') == 'me':\n raise serializers.ValidationError('Username указан неверно!')\n return data\n\n\ndef validate_username_unique(data):\n \"\"\"\n Проверка, что username уникально.\n \"\"\"\n username = data.get('username')\n email = data.get('email')\n if User.objects.filter(username=username).exists():\n user = User.objects.get(username=username)\n if user.email != email:\n raise serializers.ValidationError('Не уникальное имя!')\n return data\n\n\ndef validate_email_unique(data):\n \"\"\"\n Проверка, что email уникален.\n \"\"\"\n username = data.get('username')\n email = data.get('email')\n if User.objects.filter(email=email).exists():\n user = User.objects.get(email=email)\n if user.username != username:\n raise serializers.ValidationError('Не уникальный email!')\n return data\n","repo_name":"konmin123/api_yamdb","sub_path":"api_yamdb/api/v1/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"13494052842","text":"__author__ = 'Milena Farfulowska'\n\nfrom django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom forum import views, viewsAdmin\n\nadmin.autodiscover()\nurlpatterns = patterns('',\n # Examples:\n #url(r'^$', 'home', name='home'),\n url(r'^$', views.home, name='home'),\n # url(r'^admin/', include(admin.site.urls)),\n\n url(r'^signIn/$', views.signIn, name='signIn'),\n url(r'^signUp/$', views.signUp, name='signUp'),\n url(r'^logOut/$', views.logOut, name='logOut'),\n\n url(r'^userProfile/(?P[a-z\\d]{24})/$', views.userProfile, name='userProfile'),\n url(r'^editUser/$', views.editUser, name='editUser'),\n url(r'^delUser/(?P[a-z\\d]{24})/$', views.delUser, name='delUser'),\n url(r'^usersAdmin/$', viewsAdmin.users, name='usersAdmin'),\n url(r'^usersAdmin/editUser/(?P[a-z\\d]{24})/$', viewsAdmin.editUser, name='editUserAdmin'),\n url(r'^usersAdmin/delUser/(?P[a-z\\d]{24})/$', viewsAdmin.delUser, name='delUserAdmin'),\n\n url(r'^signUp/success/', views.success, name='success'),\n url(r'^found/$', views.found, name='found'),\n url(r'^error/(?P[\\d]{1})/$', views.error, name='error'),\n url(r'^success/(?P[\\d]{1})/$', views.success, name='success'),\n url(r'^info/(?P[\\d]{1})/$', views.info, name='info'),\n url(r'^category/(?P[a-z\\d]{24})/', views.category, name='category'),\n\n # threads\n url(r'^thread/(?P[a-z\\d]{24})/$', views.thread, name='thread'),\n url(r'^editThread/(?P[a-z\\d]{24})/$', views.editThread, name='editThread'),\n url(r'^delThread/(?P[a-z\\d]{24})/$', views.delThread, name='delThread'),\n url(r'^reportThread/(?P[a-z\\d]{24})/$', views.reportThread, name='reportThread'),\n # url(r'^banThread/(?P[a-z\\d]{24})/$', viewsAdmin.banThread, name='banThread'),\n url(r'^setOkThread/(?P[a-z\\d]{24})/$', viewsAdmin.setOkThread, name='setOkThread'),\n url(r'^reportedThreads/$', viewsAdmin.reportedThreads, name='reportedThreads'),\n url(r'^newThread/', views.newThread, name='newThread'),\n\n #responses\n url(r'^response/(?P[a-z\\d]{24})/$', views.response, name='response'),\n url(r'^editResponse/(?P[a-z\\d]{24})/$', views.editResponse, name='editResponse'),\n url(r'^delResponse/(?P[a-z\\d]{24})/$', views.delResponse, name='delResponse'),\n url(r'^reportResponse/(?P[a-z\\d]{24})/$', views.reportResponse, name='reportResponse'),\n # url(r'^banThread/(?P[a-z\\d]{24})/$', viewsAdmin.banThread, name='banThread'),\n url(r'^setOkResponse/(?P[a-z\\d]{24})/$', viewsAdmin.setOkResponse, name='setOkResponse'),\n url(r'^reportedResponses/$', viewsAdmin.reportedResponses, name='reportedResponses'),\n\n # url(r'^response/(?P[a-z\\d]+)/$', views.response, name='response'),\n # url(r'^thread/$', views.thread, name='thread'),\n # url(r'^project/', include('project.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"milenafarf/forum","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"34873357774","text":"class DataError(Exception):\n def __init__(self, msg):\n \n self.msg = msg\n def __str__(self):\n return self.msg\ndef robust():\n \n cnt=0\n sum=0\n while 1:\n\n \n try:\n alist = input().split()\n if alist==[]:\n raise DataError(alist)\n for i in range(len(alist)):\n sum += int(alist[i])\n except ValueError:#ValueError \n print(f\"({sum})\")\n else:\n print(sum)\n \nif __name__==\"__main__\":\n robust()\n","repo_name":"brown0394/university_hw_projects","sub_path":"PythonCode/dummmmymymymy.py","file_name":"dummmmymymymy.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"41087228246","text":"\"\"\"\nInput: s = \"PAYPALISHIRING\", numRows = 4\nOutput: \"PINALSIGYAHRPI\"\nExplanation:\n\nP I N\nA L S I G\nY A H R\nP I\n\"\"\"\n\nclass Solution:\n def convert(self, s, n):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n if not s:\n return \"\"\n if len(s) <= n or n == 1:\n return s\n\n # first row char indices in original s\n # 0, (n*2-2), (n*2-2)*2, ...\n # last row char indices in original s\n # n-1, n-1 + (n*2-2), n-1 + (n*2-2)*2, ...\n # rows in the middle, row 1 to row n-2\n # e.g. row 1: 1, 1+(n*2-2), 1+(n*2-2)*2, ... plus each of them has a +(n*2-2)-row*2 item after\n slen = len(s)\n result = \"\"\n for row in range(n):\n gap1 = (n*2-2)\n gap2 = (n*2-2)-row*2\n rowi = row \n while rowi < slen:\n result += s[rowi]\n result += s[rowi + gap2] if row != 0 and row != n-1 and rowi + gap2 < slen else \"\"\n rowi += gap1\n return result\n","repo_name":"balloonio/algorithms","sub_path":"leetcode/problems/6_zigzag_conversion.py","file_name":"6_zigzag_conversion.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"7257201767","text":"class Solution:\r\n def numIslands(self, grid: List[List[str]]) -> int:\r\n m=len(grid)\r\n n=len(grid[0])\r\n visited=set()\r\n res=0\r\n def search(x,y):\r\n visited.add((x,y))\r\n for i,j in ((x-1,y),(x+1,y),(x,y-1),(x,y+1)):\r\n if 0<=i None:\n \"\"\"Sets the working directory. This is the directory used to cache\n datasets and shapefiles. If a file is passed in, the parent directory\n will be used.\n \"\"\"\n input_path = Path(new_working_directory)\n\n if input_path.is_file():\n p = input_path.parent\n else:\n p = input_path\n\n if not p.is_absolute():\n raise ValueError(\n f\"Working directory must be set to an absolute path, not: {p}\"\n )\n\n p.mkdir(parents=True, exist_ok=True)\n self._path = p\n\n def resolve(self, path) -> Path:\n \"\"\"If `path` is relative, this method returns it relative to the\n `working_directory`.\n\n If `path` is already absolute, it will simply be returned.\n \"\"\"\n p = Path(path)\n if p.is_absolute():\n return p\n else:\n return self.path / path\n\n\nworking_directory = _WorkingDirectory()\n","repo_name":"bluebonnet-data/bbd","sub_path":"src/bbd/working_directory/working_directory.py","file_name":"working_directory.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"55"} +{"seq_id":"70928486570","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom bs4 import BeautifulSoup\n\ndata = \"\"\nfor line in sys.stdin:\n data += line\nsoup = BeautifulSoup(data, 'html.parser')\n\nf = soup.find('div',id='filterHolder')\nuls = f.findAll('ul',class_='dropdown')\nlis = uls[0].findAll('li')\n\nfor l in lis:\n\tprint(l.find(text=True).replace('.','-').strip().encode('utf-8'))\n","repo_name":"gustavokira/atp-pontos","sub_path":"parser-week.py","file_name":"parser-week.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"28470522930","text":"import re\nr1 = input('Digite a resistencia 1:')\nr2 = input('Digite a resistencia 2:')\n\nif re.match(r'^[-+]?\\d*\\.?\\d+$',r1) and re.match(r'^[-+]?\\d*\\.?\\d+$',r2) :\n r1, r2 = float(r1), float(r2)\n if r1 > 0 and r2 > 0:\n calculo = (input('digite se quer \"serie\" ou \"paralelo\" \\n'))\n if calculo == 'serie':\n serie = r1 + r2\n print('valor em serie:{}'.format(serie))\n elif calculo == 'paralelo':\n paralelo = (r1*r2)/(r1+r2)\n else:\n print('digite a opção serie ou paralelo') \n else:\n print('digite numeros positivos')\n\nelse:\n print('Digite somente numeros e positivos')","repo_name":"ThiagoGenuino/Monitoria-ALP-Python","sub_path":"Q3L3.py","file_name":"Q3L3.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"34254391464","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd.function import Function, once_differentiable\nfrom .kernels import *\nimport dgl\nimport copy\n\n\nclass sddmm_u_add_v(Function):\n @staticmethod\n def forward(ctx, g, U, V, scaleU_=None, scaleV_=None):\n U_, scaleU = quantize(U, scaleU_)\n if (id(U) == id(V)):\n V_, scaleV = U_, scaleU\n else:\n V_, scaleV = quantize(V, scaleV_)\n out, _ = mySDDMM_int8(g, 'add', U_, V_, scaleU, scaleV, False)\n ctx.backward_cache = g\n return out\n\n @staticmethod\n @once_differentiable\n def backward(ctx, dZ):\n g = ctx.backward_cache\n dZ_, scaledZ = quantize(dZ)\n dU = dV = None\n if ctx.needs_input_grad[2]:\n dV = incidence_SPMM(g, dZ_, scaledZ)\n if ctx.needs_input_grad[1]:\n dU = incidence_SPMM(g, dZ_, scaledZ, True)\n return None, dU, dV, None, None\n","repo_name":"cctry/Tango","sub_path":"cuda/SDDMM.py","file_name":"SDDMM.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"28098318457","text":"#Tic-Tac-Toe Game\n\n\"\"\"\ntic tac toe board \n[\n [-, -, -],\n [-, -, -],\n [-, -, -]\n]\n\nuser_input -> 1-9\nexception: prompt for user input again\ncheck if user_input is already taken\nadd it to the board\n\ncheck if user won: checking rows, columns and diagonals\ntoggle between users upon successful moves\n\"\"\"\n\ndef print_board(board):\n for row in board:\n for slot in row:\n print(slot, end = \" \")\n print()\n\ndef quit(user_input):\n if user_input == \"q\": \n print(\"Thanks for playing!!!\")\n return True\n else: return False\n\ndef check_input(user_input):\n #check if it is a number and within 1-9\n if not user_input.isnumeric() or int(user_input) > 9 or int(user_input) < 1:\n print(\"This is not a valid number\")\n return False\n else:\n return True\n\ndef is_taken(coords, board):\n row = coords[0]\n col = coords[1]\n if board[row][col] != \"-\": \n return True\n return False\n\ndef coordinates(user_input):\n row = int(user_input / 3)\n col = int(user_input % 3)\n return (row, col)\n\ndef add_to_board(coords, active_user, board):\n row = coords[0]\n col = coords[1]\n board[row][col] = active_user\n\ndef current_user(active_user):\n if active_user: return \"X\"\n else: return \"O\"\n\ndef is_win(user, board):\n if check_row(user, board): return True\n if check_col(user, board): return True\n if check_diagonals(user, board):return True\n \n return False\n\ndef check_row(user, board):\n for row in board:\n complete_row = True\n for slot in row:\n if slot != user:\n complete_row = False\n break\n if complete_row:\n return True\n return False\n\ndef check_col(user, board):\n for col in range(3):\n complete_col = True\n for row in range(3):\n if board[row][col] != user:\n complete_col = False\n break\n if complete_col:\n return True\n return False\n\ndef check_diagonals(user, board):\n if board[0][0] == user and board[1][1] == user and board[2][2] == user:\n return True\n elif board[0][2] == user and board[1][1] == user and board[2][0] == user:\n return True\n return False\n \nboard = [\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"]\n ]\n\nuser = True #player 1 if True, player 2 if False\n\nturns = 0 #to track for draw\n\nwhile(True):\n if turns >= 9:\n print_board(board)\n print(\"Its a draw!!!\")\n break\n active_user = current_user(user)\n print_board(board)\n user_input = input(\"Please enter a position 1 - 9: \").lower()\n if quit(user_input): break\n if not check_input(user_input):\n continue\n user_input = int(user_input) - 1\n if is_taken(coordinates(user_input), board):\n print(\"The position is already taken, please choose another position!\")\n continue\n else:\n add_to_board(coordinates(user_input), active_user, board)\n if is_win(active_user, board):\n print_board(board)\n print(\"Player \" + active_user + \" has won!\")\n break\n\n user = not user #Change to player2's turn\n turns += 1\n","repo_name":"houzhongy/Tic-Tac-Toe","sub_path":"tic_tac_toe_2_players.py","file_name":"tic_tac_toe_2_players.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"41411550660","text":"from proxies import Proxies\nfrom bs4 import BeautifulSoup\nimport tqdm\nimport json\nimport re\nimport warnings \nfrom pathlib import Path\nfrom urllib3.exceptions import InsecureRequestWarning\n\n\nPARAM_TOTIME = {'month':2592000, 'week':604800, 'today':-2, 'hour':3600, 'all':0}\n\nclass OfferScraper:\n def __init__(self):\n self.BASE_URL = 'https://www.cian.ru/cat.php'\n # парамеры поиска\n self.params = {\n 'deal_type': 'sale',\n 'engine_version': '2',\n 'mintarea': '30',\n 'maxtarea': '500', \n 'offer_type': 'flat',\n 'p': '1',\n 'region': '1', \n }\n self.proxy = Proxies()\n self.progress_bar = tqdm.tqdm()\n warnings.simplefilter('ignore', InsecureRequestWarning)\n Path('./data/process/').mkdir(parents=True, exist_ok=True)\n\n def get_json_toc(self, params): \n with self.proxy.get(self.BASE_URL, params=params) as request: \n try:\n root_data = BeautifulSoup(request.content, 'lxml')\n head_data = root_data.find('head')\n tag_data = head_data.find('script', string = re.compile('ca\\(\"pageview'))\n \n # может вернуться или pageview или pageviewSite\n try:\n return re.search(r'ca\\(\"pageview\",(.*?)\\)', tag_data.text).group(1)\n except:\n try:\n return re.search(r'ca\\(\"pageviewSite\",(.*?)\\)', tag_data.text).group(1)\n except Exception as e: \n self.progress_bar.write(str(e)) \n except Exception as e: \n self.progress_bar.write('Parse TOC:' + str(e))\n\n def get_json_page(self, offer_id): \n with self.proxy.get('https://www.cian.ru/sale/flat/' + offer_id) as request: \n try: \n root_data = BeautifulSoup(request.content, 'lxml') \n tag_data = root_data.find('script', string = re.compile('offerData')) \n raw_contents = re.search(r'concat\\((.*?)\\);\\n', tag_data.text).group(1)\n \n if raw_contents:\n return json.loads(raw_contents) \n except Exception as e: \n self.progress_bar.write('Parse page: ' + str(e))\n\n def full_scrap(self, search_depth = 'day', verbose = 0):\n mintarea = int(self.params.get('mintarea', 0))\n maxtarea = int(self.params.get('maxtarea', 9999))\n areas = list(zip(\n [max(0, mintarea)] + list(range(max(15, mintarea), min(maxtarea, 250))) + [min(maxtarea, 251)], \n [max(14, mintarea)] + list(range(max(15, mintarea), min(maxtarea, 250))) + [min(maxtarea, 9999)])) \n minfloor = int(self.params.get('minfloor', 1))\n maxfloor = int(self.params.get('maxfloor', 200))\n floors = list(zip(\n [max(0, minfloor)] + list(range(max(15, minfloor), min(maxfloor, 250))) + [min(maxfloor, 251)], \n [max(14, minfloor)] + list(range(max(15, minfloor), min(maxfloor, 250))) + [min(maxfloor, 9999)])) \n \n # just not to make two fors. if nessesary add more terms\n iter_list = [[a, f] for a in areas for f in floors]\n\n # init proxies\n if verbose == 1:\n print('init proxies')\n\n self.proxy.check_proxies(verbose = verbose)\n\n if verbose == 1:\n print('init proxies finished')\n \n params = self.params.copy() \n params['totime'] = PARAM_TOTIME.get(search_depth, 0) \n \n self.progress_bar.total = total = len(iter_list) * 55\n for area, floor in iter_list: \n params['mintarea'] = area[0]\n params['maxtarea'] = area[1]\n params['minfloor'] = floor[0]\n params['maxfloor'] = floor[1]\n \n for page in range(1, 55):\n self.progress_bar.update(1)\n params['p'] = page\n # parse search result to get links to offers\n json_toc = self.get_json_toc(params) \n if json_toc: \n current_page = json.loads(json_toc).get('page').get('pageNumber') \n # when \"p\" param is invalid we are redirected to the first page\n if page > 1 and current_page == 1:\n break\n try: \n for item in json.loads(json_toc).get('products'): \n offer_id = str(item.get('id')) \n json_page = self.get_json_page(offer_id)\n if json_page: \n with open('./data/process/' + offer_id + '.json', 'w', encoding=\"utf-8\") as f:\n json.dump(json_page, f, ensure_ascii=False) \n \n except Exception as e: \n self.progress_bar.write(str(e))\n\n self.progress_bar.update(55 - page)\n \n\n def finish_scrap(self):\n self.proxy.cancel_timers()\n \nif __name__ == '__main__': \n scraper = OfferScraper()\n scraper.full_scrap(verbose=0)\n\n scraper.finish_scrap()\n\n\n\n","repo_name":"alexkurov/Sber_DS_Final","sub_path":"src/offer_scraper.py","file_name":"offer_scraper.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"41334920629","text":"import numpy as np\nimport torch\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom collections import deque\nimport os\nfrom time import time, sleep\nimport gc\nimport fasteners\nimport pickle\n\n\nclass NStepMemory(dict):\n def __init__(self, memory_size=3, gamma=0.99):\n self.memory_size = memory_size\n self.gamma = gamma\n\n self.state = deque(maxlen=memory_size)\n self.action = deque(maxlen=memory_size)\n self.reward = deque(maxlen=memory_size)\n self.stack_count = deque(maxlen=memory_size)\n \n @property\n def size(self):\n return len(self.state)\n\n def add(self, state, action, reward, stack_count):\n self.state.append(state)\n self.action.append(action)\n self.reward.append(reward)\n self.stack_count.append(stack_count)\n\n def get(self):\n state = self.state.popleft()\n action = self.action.popleft()\n stack_count = self.stack_count.popleft()\n reward = sum([self.gamma ** i * r for i,r in enumerate(self.reward)])\n return state, action, reward, stack_count\n \n def clear(self):\n self.state.clear()\n self.action.clear()\n self.reward.clear()\n \n def is_full(self):\n return len(self.state) == self.memory_size\n\n\nclass ReplayMemory:\n def __init__(self, memory_size=100000, batch_size=32, n_step=3, state_size=(84,84), action_repeat=4, n_stacks=4, alpha=0.4):#(3, 84, 84), alpha=0.4):\n self.index = 0\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.n_step = n_step\n self.state_size = (action_repeat,) + state_size\n self.action_repeat = action_repeat\n self.n_stacks = n_stacks // action_repeat\n self.alpha = alpha\n self.beta = 0.4\n self.beta_step = 0.00025 / 4\n\n self.memory = dict()\n self.memory['state'] = np.zeros((self.memory_size, *self.state_size), dtype=np.uint8)\n self.memory['action'] = np.zeros((self.memory_size, 1), dtype=np.int8)\n self.memory['reward'] = np.zeros((self.memory_size, 1), dtype=np.float32)\n self.memory['done'] = np.zeros((self.memory_size, 1), dtype=np.float32)\n self.memory['stack_count'] = np.zeros((self.memory_size,), dtype=np.int8)\n self.memory['priority'] = np.zeros((self.memory_size,), dtype=np.float32)\n\n @property\n def size(self):\n return min(self.index, self.memory_size)\n \n def add(self, state, action, reward, done, stack_count):\n index = self.index % self.memory_size\n self.memory['state'][index] = state * 255\n self.memory['action'][index] = action\n self.memory['reward'][index] = reward\n self.memory['done'][index] = 1 if done else 0\n self.memory['stack_count'][index] = stack_count\n self.index += 1\n \n def extend(self, memory):\n start_index = self.index % self.memory_size\n last_index = (start_index + memory['state'].shape[0]) % self.memory_size\n if start_index < last_index:\n index = [i for i in range(start_index, last_index)]\n else:\n index = [i for i in range(start_index, self.memory_size)] + [i for i in range(last_index)]\n index = np.array(index)\n \n for key in self.memory.keys():\n self.memory[key][index] = memory[key]\n\n self.index += memory['state'].shape[0]\n \n def fit(self):\n for key in self.memory.keys():\n self.memory[key] = self.memory[key][:self.size]\n \n def save(self, path, actor_id):\n path = os.path.join(path, f'memory{actor_id}.pt')\n lock = fasteners.InterProcessLock(path)\n\n while True:\n if os.path.isfile(path) and os.path.getsize(path) > 0:\n if lock.acquire(blocking=False):\n memory = torch.load(path, map_location=lambda storage, loc: strage)\n self.extend(memory)\n self.fit()\n torch.save(self.memory, path)\n lock.release()\n gc.collect()\n return\n else:\n if lock.acquire(blocking=False):\n self.fit()\n torch.save(self.memory, path)\n lock.release()\n gc.collect()\n return\n sleep(np.random.random()+2)\n\n \n def load(self, path, actor_id):\n path = os.path.join(path, f'memory{actor_id}.pt')\n lock = fasteners.InterProcessLock(path)\n\n while True:\n if os.path.isfile(path) and os.path.getsize(path) > 0:\n if lock.acquire(blocking=False):\n memory = torch.load(path, map_location=lambda storage, loc: strage)\n self.extend(memory)\n os.remove(path)\n lock.release()\n gc.collect()\n return\n else:\n sleep(np.random.random())\n return\n \n def update_priority(self, index, priority):\n self.memory['priority'][index] = priority\n\n def get_stacked_state(self, index):\n stack_count = self.memory['stack_count'][index]\n start_index = index - (self.n_stacks - stack_count)\n if start_index < 0:\n start_index = self.memory_size + start_index\n stack_index = [start_index for _ in range(stack_count)] + [(start_index+1+i)%self.memory_size for i in range(self.n_stacks-stack_count)]\n stacked_state = np.concatenate([self.memory['state'][i] for i in stack_index])\n return stacked_state\n\n def sample(self, device='cpu'):\n priority = self.memory['priority'][:self.size]\n index = WeightedRandomSampler(\n priority / np.sum(priority),\n self.batch_size,\n replacement=True)\n index = np.array(list(index))\n #index = np.random.randint(0, self.size, self.batch_size)\n next_index = (index + self.n_step) % self.memory_size\n\n batch = dict()\n batch['state'] = np.stack([self.get_stacked_state(i) for i in index])\n batch['next_state'] = np.stack([self.get_stacked_state(i) for i in next_index])\n batch['action'] = self.memory['action'][index]\n batch['reward'] = self.memory['reward'][index]\n batch['done'] = self.memory['done'][index]\n\n for key in ['state', 'next_state']:\n batch[key] = batch[key].astype(np.float32) / 255.\n \n for key in batch.keys():\n batch[key] = torch.FloatTensor(batch[key]).to(device)\n batch['action'] = batch['action'].long()\n\n self.beta = min(self.beta + self.beta_step, 1)\n weights = (self.size * priority[index]) ** (-self.beta)\n weights /= np.max(weights)\n weights = torch.FloatTensor(weights).to(device)\n\n return batch, index, weights\n \n def indexing_sample(self, start_index, last_index, device='cpu'):\n index = np.array([i for i in range(start_index, last_index)])\n next_index = index + self.n_step\n\n batch = dict()\n batch['state'] = np.stack([self.get_stacked_state(i) for i in index])\n batch['next_state'] = np.stack([self.get_stacked_state(i%self.memory_size) for i in next_index])\n batch['action'] = self.memory['action'][index]\n batch['reward'] = self.memory['reward'][index]\n batch['done'] = self.memory['done'][index]\n\n batch['state'] = batch['state'].astype(np.float32) / 255.\n batch['next_state'] = batch['next_state'].astype(np.float32) / 255.\n return batch, index","repo_name":"jinbeizame007/pytorch-apex","sub_path":"replay_memory.py","file_name":"replay_memory.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"33628248169","text":"import codecs\nimport os.path\n\nimport setuptools\n\n\ndef read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, rel_path), \"r\") as fp:\n return fp.read()\n\n\ndef get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"requests\",\n \"us\",\n \"pandas\",\n \"shapely\",\n \"folium\",\n \"branca\",\n \"pyshp\",\n \"geopy\",\n \"tqdm\",\n]\n\nlong_description = read(\"README.md\")\n\nsetuptools.setup(\n name=\"bbd\",\n version=get_version(\"src/bbd/__init__.py\"),\n author=\"Bluebonnet Data\",\n author_email=\"info@bluebonnetdata.org\",\n description=\"A toolset for political campaign data analysis\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://www.bluebonnetdata.org/\",\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(\n where=\"src\",\n exclude=[\"docs\", \"tests*\"],\n ),\n install_requires=requires,\n extras_require={\"dev\": [\"flake8\", \"black\"]},\n tests_require=[\"pytest\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n project_urls={\"Source\": \"https://github.com/bluebonnet-data/bbd\"},\n)\n","repo_name":"bluebonnet-data/bbd","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"55"} +{"seq_id":"26686173257","text":"from pycinema import Filter\n\nimport numpy\nimport matplotlib.cm as cm\n\nclass ColorMapping(Filter):\n\n def __init__(self):\n super().__init__(\n inputs={\n 'map': 'plasma',\n 'nan': (1,1,1,1),\n 'range': (0,1),\n 'channel': 'depth',\n 'images': [],\n 'composition_id': -1\n },\n outputs={\n 'images': []\n }\n )\n\n def _update(self):\n\n images = self.inputs.images.get()\n iChannel = self.inputs.channel.get()\n results = []\n map = self.inputs.map.get()\n nan = self.inputs.nan.get()\n composition_id = self.inputs.composition_id.get()\n\n nanColor = numpy.array(tuple([f * 255 for f in nan]),dtype=numpy.uint8)\n\n if isinstance(map, tuple):\n fixedColor = numpy.array(tuple([f * 255 for f in map]),dtype=numpy.uint8)\n for image in images:\n if not iChannel in image.channels or iChannel=='rgba':\n results.append(image)\n continue\n result = image.copy()\n if composition_id>=0 and 'composition_mask' in result.channels:\n rgba = None\n if 'rgba' not in result.channels:\n rgba = numpy.full((result.shape[0],result.shape[1],4), nanColor, dtype=numpy.uint8)\n result.channels['rgba'] = rgba\n else:\n rgba = result.channels['rgba']\n mask0 = result.channels['composition_mask']==composition_id\n mask1 = None\n if iChannel == 'depth':\n mask1 = result.channels[iChannel]==1\n else:\n mask1 = numpy.isnan(result.channels[iChannel])\n rgba[mask0 & mask1] = nanColor\n rgba[mask0 & ~mask1] = fixedColor\n else:\n rgba = numpy.full((image.shape[0],image.shape[1],4), fixedColor, dtype=numpy.uint8)\n mask1 = None\n if iChannel == 'depth':\n mask1 = result.channels[iChannel]==1\n else:\n mask1 = numpy.isnan(result.channels[iChannel])\n rgba[mask1] = nanColor\n result.channels['rgba'] = rgba\n\n results.append(result)\n else:\n cmap = cm.get_cmap( map )\n cmap.set_bad(color=nan )\n r = self.inputs.range.get()\n d = r[1]-r[0]\n for image in images:\n if not iChannel in image.channels or iChannel=='rgba':\n results.append(image)\n continue\n\n normalized = (image.channels[ iChannel ]-r[0])/d\n if iChannel == 'depth':\n normalized[image.channels[iChannel]==1] = numpy.nan\n\n result = image.copy()\n if composition_id>=0 and 'composition_mask' in result.channels:\n rgba = None\n if 'rgba' not in result.channels:\n rgba = numpy.full((result.shape[0],result.shape[1],4), nanColor, dtype=numpy.uint8)\n result.channels['rgba'] = rgba\n else:\n rgba = result.channels['rgba']\n\n mask = result.channels['composition_mask']==composition_id\n rgba[mask] = cmap(normalized[mask], bytes=True)\n else:\n result.channels[\"rgba\"] = cmap(normalized, bytes=True)\n\n results.append(result)\n\n self.outputs.images.set(results)\n\n return 1\n","repo_name":"cinemascience/pycinema","sub_path":"pycinema/filters/ColorMapping.py","file_name":"ColorMapping.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"14262281895","text":"\"\"\"\r\n-------------------------------------------------------\r\nLinked version of the list ADT.\r\n-------------------------------------------------------\r\nAuthor: David Brown\r\nID: 999999999\r\nEmail: dbrown@wlu.ca\r\nSection: CP164 A\r\n__updated__ = \"2019-06-18\"\r\n-------------------------------------------------------\r\n\"\"\"\r\n# pylint: disable=W0212\r\n\r\nfrom copy import deepcopy\r\n\r\n\r\nclass _List_Node:\r\n\r\n def __init__(self, value, next_):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Initializes a list node that contains a copy of value\r\n and a link to the next node in the list.\r\n Use: node = _List_Node(value, _next)\r\n -------------------------------------------------------\r\n Parameters:\r\n _value - value value for node (?)\r\n _next - another list node (_List_Node)\r\n Returns:\r\n a new _List_Node object (_List_Node)\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._value = deepcopy(value)\r\n self._next = next_\r\n\r\n\r\nclass List:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Initializes an empty list.\r\n Use: lst = List()\r\n -------------------------------------------------------\r\n Returns:\r\n a new List object (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._front = None\r\n self._rear = None\r\n self._count = 0\r\n\r\n def is_empty(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Determines if the list is empty.\r\n Use: b = lst.is_empty()\r\n -------------------------------------------------------\r\n Returns:\r\n True if the list is empty, False otherwise.\r\n -------------------------------------------------------\r\n \"\"\"\r\n return self._front is None\r\n\r\n def __len__(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Returns the number of values in the list.\r\n Use: n = len(lst)\r\n -------------------------------------------------------\r\n Returns:\r\n the number of values in the list.\r\n -------------------------------------------------------\r\n \"\"\"\r\n return self._count\r\n\r\n def prepend(self, value):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Adds a copy of value to the front of the List.\r\n Use: lst.prepend(value)\r\n -------------------------------------------------------\r\n Parameters:\r\n value - a data element. (?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n # Create the new node.\r\n node = _List_Node(value, self._front)\r\n\r\n if self._rear is None:\r\n # List is empty - update the rear of the List..\r\n self._rear = node\r\n # Update the front of the List.\r\n self._front = node\r\n self._count += 1\r\n return\r\n\r\n def append(self, value):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Adds a copy of value to the end of the List.\r\n Use: lst.append(value)\r\n -------------------------------------------------------\r\n Parameters:\r\n value - a data element (?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n # Create the new node.\r\n node = _List_Node(value, None)\r\n\r\n if self._front is None:\r\n # list is empty - update the front of the List.\r\n self._front = node\r\n else:\r\n self._rear._next = node\r\n # Update the rear of the List.\r\n self._rear = node\r\n self._count += 1\r\n return\r\n\r\n def insert(self, i, value):\r\n \"\"\"\r\n -------------------------------------------------------\r\n A copy of value is added to index i, following values are pushed right.\r\n If i outside of range of -len(list) to len(list) - 1, the value is\r\n prepended or appended as appropriate.\r\n Use: lst.insert(i, value)\r\n -------------------------------------------------------\r\n Parameters:\r\n i - index value (int)\r\n value - a data element (?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n # Negative index adjustment.\r\n if i < 0:\r\n i = self._count + i\r\n\r\n if i <= 0:\r\n # Add value to the front of the list\r\n self.prepend(value)\r\n elif i >= self._count:\r\n # Add value to the end of the list\r\n self.append(value)\r\n else:\r\n # Add elsewhere in the list - not to front or rear\r\n j = 0\r\n previous = None\r\n current = self._front\r\n\r\n while j < i:\r\n # Find the proper location in the List\r\n previous = current\r\n current = current._next\r\n j += 1\r\n # Create the new node.\r\n node = _List_Node(value, current)\r\n previous._next = node\r\n self._count += 1\r\n return\r\n\r\n def _linear_search(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Searches for the first occurrence of key in list.\r\n Private helper method.\r\n (iterative algorithm)\r\n Use: previous, current, index = self._linear_search(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n previous - pointer to the node previous to the node containing key (_ListNode)\r\n current - pointer to the node containing key (_ListNode)\r\n index - index of the node containing key (int)\r\n -------------------------------------------------------\r\n \"\"\"\r\n previous = None\r\n current = self._front\r\n index = 0\r\n\r\n while current is not None and current._value != key:\r\n previous = current\r\n current = current._next\r\n index += 1\r\n\r\n if current is None:\r\n index = -1\r\n\r\n return previous, current, index\r\n\r\n def remove(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds, removes, and returns the first value in list that matches key.\r\n Use: value = lst.remove(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n value - the full value matching key, otherwise None (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n # search list for key.\r\n previous, current, _ = self._linear_search(key)\r\n\r\n if current is None:\r\n # Key is not found.\r\n value = None\r\n else:\r\n value = current._value\r\n self._count -= 1\r\n\r\n if previous is None:\r\n # Remove the first node.\r\n self._front = self._front._next\r\n\r\n if self._front is None:\r\n # List is empty, update _rear.\r\n self._rear = None\r\n else:\r\n # Remove any other node.\r\n previous._next = current._next\r\n\r\n if previous._next is None:\r\n # Last node was removed, update _rear.\r\n self._rear = previous\r\n return value\r\n\r\n def remove_front(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes the first node in the list and returns its value.\r\n Use: value = lst.remove_front()\r\n -------------------------------------------------------\r\n Returns:\r\n value - the first value in the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._front is not None, \"Cannot remove from an empty list\"\r\n\r\n value = self._front._value\r\n self._front = self._front._next\r\n self._count -= 1\r\n\r\n if self._front is None:\r\n # Last node has been removed\r\n self._rear = None\r\n return value\r\n\r\n def remove_many(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds and removes all values in the list that match key.\r\n Use: lst.remove_many(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a data element (?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while self._front is not None and self._front._value == key:\r\n # The front node contains the value to be removed\r\n self._front = self._front._next\r\n self._count -= 1\r\n\r\n if self._front is None:\r\n # All nodes have been removed\r\n self._front = None\r\n self._rear = None\r\n self._count = 0\r\n else:\r\n previous = self._front\r\n current = self._front._next\r\n\r\n while current is not None:\r\n # Remove key from the rest of the list\r\n if current._value == key:\r\n # Do not update previous\r\n self._count -= 1\r\n previous._next = current._next\r\n else:\r\n previous = current\r\n current = current._next\r\n # Update the rear node\r\n self._rear = previous\r\n return\r\n\r\n def find(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds and returns a copy of the first value in list that matches key.\r\n Use: value = lst.find(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n value - a copy of the full value matching key, otherwise None (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n _, current, _ = self._linear_search(key)\r\n\r\n if current is not None:\r\n value = deepcopy(current._value)\r\n else:\r\n value = None\r\n return value\r\n\r\n def peek(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Returns a copy of the first value in list.\r\n Use: value = lst.peek()\r\n -------------------------------------------------------\r\n Returns:\r\n value - a copy of the first value in the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._front is not None, \"Cannot peek at an empty list\"\r\n\r\n value = deepcopy(self._front._value)\r\n return value\r\n\r\n def index(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds location of a value by key in list.\r\n Use: n = lst.index(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n i - the index of the location of key in the list, -1 if\r\n key is not in the list.\r\n -------------------------------------------------------\r\n \"\"\"\r\n _, _, i = self._linear_search(key)\r\n return i\r\n\r\n def _is_valid_index(self, i):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Private helper method to validate an index value.\r\n Python index values can be positive or negative and range from\r\n -len(list) to len(list) - 1\r\n Use: assert self._is_valid_index(i)\r\n -------------------------------------------------------\r\n Parameters:\r\n i - an index value (int)\r\n Returns:\r\n True if i is a valid index, False otherwise.\r\n -------------------------------------------------------\r\n \"\"\"\r\n n = self._count\r\n return -n <= i < n\r\n\r\n def __getitem__(self, i):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Returns a copy of the nth element of the list.\r\n Use: value = l[i]\r\n -------------------------------------------------------\r\n Parameters:\r\n i - index of the element to access (int)\r\n Returns:\r\n value - the i-th element of list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._is_valid_index(i), \"Invalid index value\"\r\n\r\n current = self._front\r\n\r\n if i < 0:\r\n # negative index - convert to positive\r\n i = self._count + i\r\n j = 0\r\n\r\n while j < i:\r\n current = current._next\r\n j += 1\r\n\r\n value = deepcopy(current._value)\r\n return value\r\n\r\n def __setitem__(self, i, value):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Places a copy of value into the list at position n.\r\n Use: l[i] = value\r\n -------------------------------------------------------\r\n Parameters:\r\n i - index of the element to access (int)\r\n value - a data value (?)\r\n Returns:\r\n The i-th element of list contains a copy of value. The\r\n existing value at i is overwritten.\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._is_valid_index(i), \"Invalid index value\"\r\n\r\n current = self._front\r\n\r\n if i < 0:\r\n # negative index - convert to positive\r\n i = self._count + i\r\n j = 0\r\n\r\n while j < i:\r\n current = current._next\r\n j += 1\r\n\r\n current._value = deepcopy(value)\r\n return\r\n\r\n def __contains__(self, key):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Determines if the list contains key.\r\n Use: b = key in l\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n True if the list contains key, False otherwise.\r\n -------------------------------------------------------\r\n \"\"\"\r\n _, current, _ = self._linear_search(key)\r\n return current is not None\r\n\r\n def max(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds the maximum value in list.\r\n Use: value = lst.max()\r\n -------------------------------------------------------\r\n Returns:\r\n max_data - a copy of the maximum value in the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._front is not None, \"Cannot find maximum of an empty list\"\r\n\r\n max_node = self._front\r\n current = self._front._next\r\n\r\n while current is not None:\r\n if max_node._value < current._value:\r\n max_node = current\r\n current = current._next\r\n max_data = deepcopy(max_node._value)\r\n return max_data\r\n\r\n def min(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds the minimum value in list.\r\n Use: value = lst.min()\r\n -------------------------------------------------------\r\n Returns:\r\n min_data - a copy of the minimum value in the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._front is not None, \"Cannot find maximum of an empty list\"\r\n\r\n min_node = self._front\r\n current = self._front._next\r\n\r\n while current is not None:\r\n if min_node._value > current._value:\r\n min_node = current\r\n current = current._next\r\n min_data = deepcopy(min_node._value)\r\n return min_data\r\n\r\n def count(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds the number of times key appears in list.\r\n Use: n = lst.count(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n number - number of times key appears in list (int)\r\n -------------------------------------------------------\r\n \"\"\"\r\n number = 0\r\n current = self._front\r\n\r\n while current is not None:\r\n if key == current._value:\r\n number += 1\r\n current = current._next\r\n return number\r\n\r\n def reverse(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Reverses the order of the elements in list.\r\n (iterative algorithm)\r\n Use: source.reverse()\r\n -------------------------------------------------------\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._rear = self._front\r\n previous = None\r\n current = self._front\r\n\r\n while current is not None:\r\n temp = current._next\r\n current._next = previous\r\n previous = current\r\n current = temp\r\n\r\n self._front = previous\r\n return\r\n\r\n def clean(self):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Removes duplicates from the sorted list. The list contains\r\n one and only one of each value formerly present in the list.\r\n The first occurrence of each value is preserved.\r\n Use: sl.clean()\r\n -------------------------------------------------------\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n key_node = self._front\r\n\r\n while key_node is not None:\r\n # Loop through every node - compare each node with the rest\r\n previous = key_node\r\n current = key_node._next\r\n\r\n while current is not None:\r\n # Always search to the end of the list (may have > 1 duplicate)\r\n if current._value == key_node._value:\r\n # Remove the current node by connecting the node before it\r\n # to the node after it.\r\n self._count -= 1\r\n previous._next = current._next\r\n else:\r\n previous = current\r\n # Move to the _next node.\r\n current = current._next\r\n # Update the rear\r\n self._rear = previous\r\n # Check for duplicates of the _next remaining node in the list\r\n key_node = key_node._next\r\n return\r\n\r\n def pop(self, *args):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Finds, removes, and returns the value in list whose index matches args.\r\n Use: value = lst.pop(args)\r\n -------------------------------------------------------\r\n Parameters:\r\n args - an array of arguments (?)\r\n args[0], if it exists, is the index\r\n Returns:\r\n value - if args exists, the value at position args, otherwise the last\r\n value in the list, value is removed from the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert self._front is not None, \"Cannot pop from an empty list\"\r\n assert len(args) <= 1, \"No more than 1 argument allowed\"\r\n\r\n previous = None\r\n current = self._front\r\n\r\n if len(args) == 1:\r\n\r\n if args[0] < 0:\r\n # index is negative\r\n n = self._count + args[0]\r\n else:\r\n n = args[0]\r\n j = 0\r\n\r\n while j < n:\r\n previous = current\r\n current = current._next\r\n j += 1\r\n else:\r\n # find and pop the last element\r\n j = 0\r\n\r\n while j < (self._count - 1):\r\n previous = current\r\n current = current._next\r\n j += 1\r\n\r\n value = current._value\r\n self._count -= 1\r\n\r\n if previous is None:\r\n # Remove the first node.\r\n self._front = self._front._next\r\n\r\n if self._front is None:\r\n # List is empty, update _rear.\r\n self._rear = None\r\n else:\r\n # Remove any other node.\r\n previous._next = current._next\r\n\r\n if previous._next is None:\r\n # Last node was removed, update _rear.\r\n self._rear = previous\r\n return value\r\n\r\n def _swap(self, pln, prn):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Swaps the position of two nodes. The nodes in pln.next and prn.next\r\n have been swapped, and all links to them updated.\r\n Use: self._swap(pln, prn)\r\n -------------------------------------------------------\r\n Parameters:\r\n pln - node before list node to swap (_List_Node)\r\n prn - node before list node to swap (_List_Node)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n if pln is not prn:\r\n # Swap only if two nodes are not the same node\r\n\r\n if pln is None:\r\n # Make r the new front\r\n left = self._front\r\n self._front = prn._next\r\n else:\r\n left = pln._next\r\n pln._next = prn._next\r\n\r\n if prn is None:\r\n # Make l the new front\r\n right = self._front\r\n self._front = left\r\n else:\r\n right = prn._next\r\n prn._next = left\r\n\r\n # Swap next pointers\r\n # lst._next, r._next = r._next, lst._next\r\n temp = left._next\r\n left._next = right._next\r\n right._next = temp\r\n # Update the rear\r\n if right._next is None:\r\n self._rear = right\r\n elif left._next is None:\r\n self._rear = left\r\n return\r\n\r\n def is_identical(self, target):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Determines whether two lists are identical.\r\n (iterative version)\r\n Use: b = source.is_identical(target)\r\n -------------------------------------------------------\r\n Parameters:\r\n target - another list (List)\r\n Returns:\r\n identical - True if this list contains the same values as\r\n target in the same order, otherwise False.\r\n -------------------------------------------------------\r\n \"\"\"\r\n if self._count != target._count:\r\n identical = False\r\n else:\r\n source_node = self._front\r\n target_node = target._front\r\n\r\n while source_node is not None and source_node._value == target_node._value:\r\n source_node = source_node._next\r\n target_node = target_node._next\r\n\r\n identical = source_node is None\r\n return identical\r\n\r\n def split_alt(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits the source list into separate target lists with values\r\n alternating into the targets. At finish source list is empty.\r\n Order of source values is preserved.\r\n (iterative algorithm)\r\n Use: target1, target2 = source.split()\r\n -------------------------------------------------------\r\n Returns:\r\n target1 - contains alternating values from source (List)\r\n target2 - contains other alternating values from source (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target1 = List()\r\n target2 = List()\r\n left = True\r\n\r\n while self._front is not None:\r\n\r\n if left:\r\n target1._move_front_to_rear(self)\r\n else:\r\n target2._move_front_to_rear(self)\r\n left = not left\r\n\r\n return target1, target2\r\n\r\n def intersection(self, source1, source2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Update the current list with values that appear in both\r\n source1 and source2. Values do not repeat.\r\n (iterative algorithm)\r\n Use: target.intersection(source1, source2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - a linked list (List)\r\n source2 - a linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n source1_node = source1._front\r\n\r\n while source1_node is not None:\r\n value = source1_node._value\r\n _, current, _ = source2._linear_search(value)\r\n\r\n if current is not None:\r\n # Value exists in both source lists.\r\n _, current, _ = self._linear_search(value)\r\n\r\n if current is None:\r\n # Value does not appear in target list.\r\n self.append(value)\r\n\r\n source1_node = source1_node._next\r\n return\r\n\r\n def union(self, source1, source2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Update the current list with all values that appear in\r\n source1 and source2. Values do not repeat.\r\n (iterative algorithm)\r\n Use: target.union(source1, source2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - an linked list (List)\r\n source2 - an linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n source1_node = source1._front\r\n\r\n while source1_node is not None:\r\n value = source1_node._value\r\n _, current, _ = self._linear_search(value)\r\n\r\n if current is None:\r\n # Value does not exist in new list.\r\n self.append(value)\r\n source1_node = source1_node._next\r\n\r\n source2_node = source2._front\r\n\r\n while source2_node is not None:\r\n value = source2_node._value\r\n _, current, _ = self._linear_search(value)\r\n\r\n if current is None:\r\n # Value does not exist in current list.\r\n self.append(value)\r\n\r\n source2_node = source2_node._next\r\n return\r\n\r\n def split_th(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits source into two parts. target1 contains the first half,\r\n target2 the second half. Current list becomes empty.\r\n Uses Tortoise/Hare algorithm.\r\n Use: target1, target2 = source.split_th()\r\n -------------------------------------------------------\r\n Returns:\r\n target1 - a new List with >= 50% of the original List (List)\r\n target2 - a new List with <= 50% of the original List (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target1 = List()\r\n target2 = List()\r\n # Initialize both temporary pointers to beginning of the list.\r\n hare = self._front\r\n tortoise = self._front\r\n\r\n while hare is not None and hare._next is not None:\r\n # Move hare down two nodes.\r\n hare = hare._next._next\r\n\r\n if hare is not None:\r\n # Update tortoise only if hare is not None.\r\n tortoise = tortoise._next\r\n\r\n # Split the list.\r\n if tortoise is not None:\r\n target1._front = self._front\r\n target1._rear = tortoise\r\n target2._front = tortoise._next\r\n tortoise._next = None\r\n target2._count = self._count // 2\r\n\r\n if target2._count > 0:\r\n target2._rear = self._rear\r\n target1._count = self._count - target2._count\r\n self._count = 0\r\n self._front = None\r\n self._rear = None\r\n return target1, target2\r\n\r\n def split(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits list into two parts. target1 contains the first half,\r\n target2 the second half. Current list becomes empty.\r\n Use: target1, target2 = lst.split()\r\n -------------------------------------------------------\r\n Returns:\r\n target1 - a new List with >= 50% of the original List (List)\r\n target2 - a new List with <= 50% of the original List (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target1 = List()\r\n target2 = List()\r\n # Split\r\n middle = self._count // 2 + self._count % 2\r\n previous = None\r\n current = self._front\r\n\r\n for _ in range(middle):\r\n previous = current\r\n current = current._next\r\n\r\n # Define target1\r\n target1._front = self._front\r\n target1._rear = previous\r\n\r\n if previous is not None:\r\n previous._next = None\r\n\r\n # Define target2\r\n target1._count = middle\r\n target2._count = self._count - middle\r\n\r\n target2._front = current\r\n\r\n if target2._count > 0:\r\n target2._rear = self._rear\r\n\r\n # Clean up self\r\n self._front = None\r\n self._rear = None\r\n self._count = 0\r\n return target1, target2\r\n\r\n def split_key(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits list so that target1 contains all values < key,\r\n and target2 contains all values >= key.\r\n Use: target1, target2 = lst.split_key(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a key value to split the list upon (?)\r\n Returns:\r\n target1 - a new List of values <= key (List)\r\n target2 - a new List of values > key (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target1 = List()\r\n target2 = List()\r\n\r\n while self._front is not None:\r\n\r\n if self._front._value < key:\r\n target1._move_front_to_rear(self)\r\n else:\r\n target2._move_front_to_rear(self)\r\n return target1, target2\r\n\r\n def split_apply(self, func):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits list into two parts. target1 contains all the values\r\n where the result of calling func(value) is True, target2 contains\r\n the remaining values. At finish, self is empty. Order of values\r\n in targets is maintained.\r\n Use: target1, target2 = lst.split_apply(func)\r\n -------------------------------------------------------\r\n Parameters:\r\n func - a function that given a value in the list returns\r\n True for some condition, otherwise returns False.\r\n Returns:\r\n target1 - a new List with values where func(value) is True (List)\r\n target2 - a new List with values where func(value) is False (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target1 = List()\r\n target2 = List()\r\n # Split\r\n while self._front is not None:\r\n\r\n if func(self._front._value):\r\n target1._move_front_to_rear(self)\r\n else:\r\n target2._move_front_to_rear(self)\r\n return target1, target2\r\n\r\n def copy(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Duplicates the current list to a new list in the same order.\r\n (iterative version)\r\n Use: target = lst.copy()\r\n -------------------------------------------------------\r\n Returns:\r\n target - a copy of self (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n target = List()\r\n\r\n if self._front is not None:\r\n # Set up the new list front.\r\n target._front = _List_Node(self._front._value, None)\r\n previous = target._front\r\n current = self._front._next\r\n\r\n while current is not None:\r\n # Add a node in the new list.\r\n new_node = _List_Node(current._value, None)\r\n previous._next = new_node\r\n previous = new_node\r\n # Move to the next node in the current list.\r\n current = current._next\r\n target._rear = previous\r\n target._count = self._count\r\n return target\r\n\r\n def reverse_pc(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Reverses a list through partitioning and concatenation.\r\n Use: lst.reverse_pc()\r\n -------------------------------------------------------\r\n Returns:\r\n The contents of the current list are reversed.\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._front = self._reverse_pc_aux(self._front)\r\n\r\n def _reverse_pc_aux(self, current):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Auxiliary function for reverse_pc.\r\n Use: node = self._reverse_pc_aux(node)\r\n -------------------------------------------------------\r\n Parameters:\r\n current - the current node to process (_List_Node)\r\n Returns:\r\n result -\r\n Recursively split_key and concatenate the list until the end\r\n of the list is reached.\r\n -------------------------------------------------------\r\n \"\"\"\r\n if current is None:\r\n result = None\r\n else:\r\n head, tail = self._partition(current)\r\n tail = self._reverse_pc_aux(tail)\r\n result = self._concantenate(tail, head)\r\n return result\r\n\r\n def _partition(self, current):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Partitions a list at the current node.\r\n -------------------------------------------------------\r\n Parameters:\r\n current - the current node to process\r\n Parameters:\r\n current - a valid node dictionary\r\n Returns:\r\n Partition a list by removing the first node as 'head' and\r\n naming the remainder of the list 'tail'. 'node' cannot be None.\r\n -------------------------------------------------------\r\n \"\"\"\r\n tail = current._next\r\n head = current\r\n head._next = None\r\n return head, tail\r\n\r\n def _concantenate(self, tail, head):\r\n \"\"\"\r\n -------------------------------------------------------\r\n [function description]\r\n -------------------------------------------------------\r\n Parameters:\r\n head - the head node of a list\r\n tail - the tail node of a list\r\n Parameters:\r\n head - a valid node dictionary\r\n tail - a valid node dictionary\r\n Returns:\r\n Appends the 'head' node to the end of 'tail'.\r\n -------------------------------------------------------\r\n \"\"\"\r\n previous = None\r\n current = tail\r\n\r\n # Find the end of 'tail'.\r\n while current is not None:\r\n previous = current\r\n current = current._next\r\n\r\n # Append 'head' to the end of 'tail'.\r\n if previous is None:\r\n tail = head\r\n else:\r\n previous._next = head\r\n # 'tail' is the new head of the list.\r\n return tail\r\n\r\n def _move_front_to_front(self, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Moves the front node from the source List to the front\r\n of the current List. Private helper method.\r\n Use: self._move_front_to_front(source)\r\n -------------------------------------------------------\r\n Parameters:\r\n source - a non-empty linked List (List)\r\n Returns:\r\n The current List contains the old front of the source List and\r\n its count is updated. The source List front and count are updated.\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert source._front is not None, \\\r\n \"Cannot move the front of an empty List\"\r\n\r\n node = source._front\r\n # Update the source list\r\n source._count -= 1\r\n source._front = source._front._next\r\n\r\n if source._front is None:\r\n # Clean up source list if empty.\r\n source._rear = None\r\n\r\n # Update the target list\r\n node._next = self._front\r\n self._front = node\r\n\r\n if self._rear is None:\r\n # Target list is empty\r\n self._rear = node\r\n self._count += 1\r\n return\r\n\r\n def _move_front_to_rear(self, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Moves the front node from the source List to the rear\r\n of the current List. Private helper method.\r\n Use: self._move_front_to_rear(source)\r\n -------------------------------------------------------\r\n Parameters:\r\n rs - a non-empty linked List (List)\r\n Returns:\r\n The current List contains the old front of the source List and\r\n its count is updated. The source List front and count are updated.\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert source._front is not None, \\\r\n \"Cannot move the front of an empty List\"\r\n\r\n node = source._front\r\n # Update the source list\r\n source._count -= 1\r\n source._front = source._front._next\r\n\r\n if source._front is None:\r\n # Clean up source list if empty.\r\n source._rear = None\r\n\r\n # Update the target list\r\n if self._rear is None:\r\n self._front = node\r\n else:\r\n self._rear._next = node\r\n\r\n node._next = None\r\n self._rear = node\r\n self._count += 1\r\n return\r\n\r\n def combine(self, source1, source2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Combines two source lists into the current target list.\r\n When finished, the contents of source1 and source2 are interlaced\r\n into target and source1 and source2 are empty.\r\n Order of source values is preserved.\r\n (iterative algorithm)\r\n Use: target.combine(source1, source2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - an linked list (List)\r\n source2 - an linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while source1._front is not None and source2._front is not None:\r\n self._move_front_to_rear(source1)\r\n self._move_front_to_rear(source2)\r\n\r\n if source1._front is not None:\r\n self._append_list(source1)\r\n\r\n if source2._front is not None:\r\n self._append_list(source2)\r\n return\r\n\r\n def _append_list(self, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Appends the entire source list to the rear of the target list.\r\n The source list becomes empty.\r\n Use: target._append_list(source)\r\n -------------------------------------------------------\r\n Parameters:\r\n source - an linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n assert source._front is not None, \"Cannot append an empty list\"\r\n\r\n node = source._front\r\n # Update the target queue\r\n if self._rear is None:\r\n # Current queue is empty.\r\n self._front = node\r\n else:\r\n self._rear._next = node\r\n self._rear = source._rear\r\n self._count += source._count\r\n # Empty the source queue.\r\n source._front = None\r\n source._rear = None\r\n source._count = 0\r\n return\r\n\r\n def __iter__(self):\r\n \"\"\"\r\n USE FOR TESTING ONLY\r\n -------------------------------------------------------\r\n Generates a Python iterator. Iterates through the list\r\n from front to rear.\r\n Use: for v in s:\r\n -------------------------------------------------------\r\n Returns:\r\n yields\r\n value - the next value in the list (?)\r\n -------------------------------------------------------\r\n \"\"\"\r\n current = self._front\r\n\r\n while current is not None:\r\n yield current._value\r\n current = current._next\r\n\r\n def is_identical_r(self, target):\r\n \"\"\"\r\n ---------------------------------------------------------\r\n Determines whether two lists are identical.\r\n (recursive version)\r\n Use: b = source.is_identical_r(target)\r\n -------------------------------------------------------\r\n Parameters:\r\n target - another list (List)\r\n Returns:\r\n identical - True if this list contains the same values as\r\n target in the same order, otherwise False.\r\n -------------------------------------------------------\r\n \"\"\"\r\n if self._count != target._count:\r\n identical = False\r\n else:\r\n identical = self.is_identical_r_aux(self._front, target._front)\r\n return identical\r\n\r\n def is_identical_r_aux(self, node1, node2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n An auxiliary function for is_identical_r\r\n -------------------------------------------------------\r\n Parameters:\r\n node1 - a List node (_ListNode)\r\n node2 - a List node (_ListNode)\r\n Returns:\r\n identical - True if node1 contains the same values as node2\r\n in the same order, otherwise False.\r\n -------------------------------------------------------\r\n \"\"\"\r\n if node1 is None:\r\n identical = True\r\n elif node1._value != node2._value:\r\n identical = False\r\n else:\r\n identical = self.is_identical_r_aux(node1._next,\r\n node2._next)\r\n return identical\r\n\r\n def split_alt_r(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Splits the source list into separate target lists with values\r\n alternating into the targets. At finish source list is empty.\r\n Order of source values is preserved.\r\n (recursive algorithm)\r\n Use: target1, target2 = source.split_alt_r()\r\n -------------------------------------------------------\r\n Returns:\r\n target1 - contains alternating values from source (List)\r\n target2 - contains other alternating values from source (List)\r\n -------------------------------------------------------\r\n \"\"\"\r\n even = List()\r\n odd = List()\r\n self._split_alt_r_aux(even, odd)\r\n return even, odd\r\n\r\n def _split_alt_r_aux(self, even, odd):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Split a list into two parts. even contains the even indexed\r\n elements, odd contains the odd numbered elements.\r\n Order of even and odd is not significant.\r\n -------------------------------------------------------\r\n Parameters:\r\n even - the even numbered elements of the source list (List)\r\n odd - the odd numbered elements of the source list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n if self._front is not None:\r\n even._move_front_to_rear(self)\r\n # Reverse the order of the arguments.\r\n self._split_alt_r_aux(odd, even)\r\n return\r\n\r\n def intersection_r(self, source1, source2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Update the current list with values that appear in both\r\n source1 and source2. Values do not repeat.\r\n (recursive algorithm)\r\n Use: target.intersection_r(source1, source2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - a linked list (List)\r\n source2 - a linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._intersection_r_aux(source2, source1._front)\r\n return\r\n\r\n def _intersection_r_aux(self, source2, node1):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Auxiliary function for intersection_r.\r\n Use: self._intersection_r_aux(node1, node2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - a List (_ListNode)\r\n source2 - a List (_ListNode)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n if node1 is not None:\r\n value = node1._value\r\n _, current, _ = source2._linear_search(value)\r\n\r\n if current is not None:\r\n # Value exists in both lists.\r\n _, current, _ = self._linear_search(value)\r\n\r\n if current is None:\r\n # Value does not appear in new list.\r\n self.append(value)\r\n self._intersection_r_aux(source2, node1._next)\r\n return\r\n\r\n def union_r(self, source1, source2):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Update the current list with all values that appear in\r\n source1 and source2. Values do not repeat.\r\n (recursive algorithm)\r\n Use: target.union_r(source1, source2)\r\n -------------------------------------------------------\r\n Parameters:\r\n source1 - an linked list (List)\r\n source2 - an linked list (List)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._union_r_aux(source1._front)\r\n self._union_r_aux(source2._front)\r\n return\r\n\r\n def _union_r_aux(self, node):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Auxiliary function for union_r.\r\n Use: self._union_r_aux(node)\r\n -------------------------------------------------------\r\n Parameters:\r\n node - right side list rs_node (_ListNode)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n if node is not None:\r\n _, current, _ = self._linear_search(node._value)\r\n\r\n if current is None:\r\n # Value does not exist in current list.\r\n self.append(node._value)\r\n self._union_r_aux(node._next)\r\n return\r\n\r\n def reverse_r(self):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Reverses the order of the elements in list.\r\n (iterative algorithm)\r\n Use: source.reverse_r()\r\n -------------------------------------------------------\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n self._rear = self._front\r\n self._front = self._reverse_r_aux(None, self._front)\r\n return\r\n\r\n def _reverse_r_aux(self, previous, current):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Auxiliary recursive function for reverse_r.\r\n Use: previous = self.reverse_r_aux(new_front)\r\n -------------------------------------------------------\r\n Parameters:\r\n previous - the node to link back to (_ListNode)\r\n current - the node to update the _next link (_ListNode)\r\n Returns:\r\n previous - the last node linked to\r\n -------------------------------------------------------\r\n \"\"\"\r\n if current is not None:\r\n temp = current._next\r\n current._next = previous\r\n previous = self._reverse_r_aux(current, temp)\r\n return previous\r\n\r\n def _linear_search_r(self, key):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Searches for the first occurrence of key in list.\r\n Private helper method.\r\n (iterative algorithm)\r\n Use: previous, current, index = self._linear_search_r(key)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n Returns:\r\n previous - pointer to the node previous to the node containing key (_ListNode)\r\n current - pointer to the node containing key (_ListNode)\r\n index - index of the node containing key (int)\r\n -------------------------------------------------------\r\n \"\"\"\r\n previous, current, index = self._linear_search_r_aux(\r\n key, None, self._front, 0)\r\n return previous, current, index\r\n\r\n def _linear_search_r_aux(self, key, previous, current, index):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Auxiliary method for _linear_search.\r\n Use: p, c, i = self._linear_search(key, previous, current, index)\r\n -------------------------------------------------------\r\n Parameters:\r\n key - a partial data element (?)\r\n previous - pointer to the node previous to the node containing key (_ListNode)\r\n current - pointer to the node containing key (_ListNode)\r\n index - index of the node containing key, -1 if key not found (int)\r\n Returns:\r\n previous - pointer to the node previous to the node containing key (_ListNode)\r\n current - pointer to the node containing key (_ListNode)\r\n index - index of the node containing key, -1 if key not found (int)\r\n -------------------------------------------------------\r\n \"\"\"\r\n if current is None:\r\n index = -1\r\n elif current._value != key:\r\n previous, current, index = self._linear_search_r_aux(\r\n key, current, current._next, index + 1)\r\n\r\n return previous, current, index\r\n","repo_name":"maxkdann/CP164","sub_path":"dann4440_data_structures/src/List_linked2.py","file_name":"List_linked2.py","file_ext":"py","file_size_in_byte":51798,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"44091915163","text":"import os\nimport pprint\nimport math\nimport copy\n\nfilemap = {\"sample\": \"sample13.txt\", \"real\": \"problem13.txt\"}\n\n# we use a booleanstack 1 is True, -1 is False\n# so we can keep track of if we ever saw a True or False\n# in our stack of comparisons, and we can \n# return out of our comparison function early\n# and if we don't see either True or False, ie maybe\n# we return out of the recursive call but without setting\n# true or false in the stack\n\nclass Pair():\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n\n def compare(self, booleanstack):\n if 1 in booleanstack:\n return True\n elif -1 in booleanstack:\n return False\n elif isinstance(self.left, int) and isinstance(self.right, int):\n if self.left < self.right:\n booleanstack.append(1)\n return True\n elif self.left > self.right:\n booleanstack.append(-1)\n return False\n else:\n # we could add a zero here to indicate a tie but it's unnecessary\n booleanstack.append(0) \n return\n elif isinstance(self.left, list) and isinstance(self.right, list):\n print(\"both are lists\")\n if len(self.left) == 0 and len(self.right) == 0:\n return\n elif len(self.left) == 0:\n print(\"left finished first, true\")\n booleanstack.append(1)\n return True\n elif len(self.right) == 0:\n print(\"right finished first, false)\")\n booleanstack.append(-1)\n return False\n l = self.left[0]\n r = self.right[0]\n lremain = self.left[1:] if len(self.left) > 1 else []\n rremain = self.right[1:] if len(self.right) > 1 else []\n print(f\"newleft {l}, newright{r} and {lremain}, {rremain}\")\n headpair = Pair(l, r)\n tailpair = Pair(lremain, rremain)\n\n headpair.compare(booleanstack)\n if 1 in booleanstack:\n return True\n elif -1 in booleanstack:\n return False\n\n tailpair.compare(booleanstack)\n if 1 in booleanstack:\n return True\n elif -1 in booleanstack:\n return False\n\n elif isinstance(self.left, int) and isinstance(self.right, list):\n newleft = [self.left]\n newpair = Pair(newleft, self.right)\n\n newpair.compare(booleanstack)\n if 1 in booleanstack:\n return True\n elif -1 in booleanstack:\n return False\n elif isinstance(self.right, int) and isinstance(self.left, list):\n newright = [self.right]\n newpair = Pair(self.left, newright)\n newpair.compare(booleanstack)\n if 1 in booleanstack:\n return True\n elif -1 in booleanstack:\n return False\n\n\n\n def __repr__(self):\n return f\"left: {self.left};\\t right: {self.right}\"\n\n\n\ndef run_part_1(sample_or_real):\n file = os.path.join(\"input_files\", filemap[sample_or_real])\n with open(file, \"r\") as f:\n data = f.read()\n firstsplit = data.split(\"\\n\\n\")\n allpairs = []\n for entry in firstsplit:\n left, right = entry.split(\"\\n\")\n evalleft = eval(left)\n evalright = eval(right)\n pair = Pair(evalleft, evalright)\n allpairs.append(pair)\n\n print(allpairs)\n right_order = []\n for i, pair in enumerate(allpairs, start=1):\n print(f\"comparing index {i} pair {pair}\")\n if pair.compare(booleanstack=[]):\n print(f\"index {i} in good order with pair {pair}\")\n right_order.append(i)\n\n print(right_order)\n print(sum(right_order))\n\n\ndef run_part_2(sample_or_real):\n with open(file, \"r\") as f:\n data = f.read()\n print(data)\n\n\nif __name__ == \"__main__\":\n while True:\n which_one_to_run = input(\"which part to run? enter 1 or 2:\\n\")\n if which_one_to_run in (\"1\", \"2\"):\n print(\"good choice\")\n break\n\n if which_one_to_run == \"1\":\n run_part_1(\"sample\")\n run_part_1(\"real\")\n else:\n run_part_2(\"sample\")\n # run_part_2(\"real\")\n","repo_name":"robtsai/advent-of-code-2022","sub_path":"13_distress_signal.py","file_name":"13_distress_signal.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13442891749","text":"import random as rand\nimport math\n\n\ndef numberToBase(n, b):\n if n == 0:\n return [0]\n digits = []\n while n:\n digits.append(int(n % b))\n n //= b\n return digits[::-1]\n\nfile = open('nums.txt', 'w')\nfile2 = open('nums_dec.txt', 'w')\nfor i in range(100):\n bits = rand.getrandbits(rand.randint(1, 300))\n file.write(\"{:X}\\n\".format(bits))\n file2.write(\"{}\\n\".format(bits))\nfile.close()\nfile2.close()\n\nfile = open('nums_div.txt', 'w')\nfile2 = open('nums_div_dec.txt', 'w')\nfor i in range(100):\n bits = rand.getrandbits(rand.randint(300, 400))\n bits2 = rand.getrandbits(rand.randint(1, 199))\n while(bits2 == 0):\n bits2 = rand.getrandbits(rand.randint(1, 199))\n file.write(\"{:X}\\n\".format(bits))\n file.write(\"{:X}\\n\".format(bits2))\n file2.write(\"{}\\n\".format(bits))\n file2.write(\"{}\\n\".format(bits2))\nfile.close()\nfile2.close()\n\nfile = open('nums_mont.txt', 'w')\nfile2 = open('nums_mont_dec.txt', 'w')\nfor i in range(100):\n A = rand.getrandbits(rand.randint(1, 200))\n E = rand.getrandbits(rand.randint(1, 6))\n M = rand.getrandbits(rand.randint(1, 100))\n while(M == 0 or M % 2 == 0):\n M = rand.getrandbits(rand.randint(1, 100))\n file.write(\"{:X}\\n\".format(A))\n file.write(\"{:X}\\n\".format(E))\n file.write(\"{:X}\\n\".format(M))\n file2.write(\"{}\\n\".format(A))\n file2.write(\"{}\\n\".format(E))\n file2.write(\"{}\\n\".format(M))\nfile.close()\nfile2.close()\n\nref_add = open('ref_add.txt', 'w')\nref_sub = open('ref_sub.txt', 'w')\nref_mul = open('ref_mul.txt', 'w')\nref_div = open('ref_div.txt', 'w')\nref_cmp = open('ref_cmp.txt', 'w')\nref_gcd = open('ref_gcd.txt', 'w')\nref_mont = open('ref_mont.txt', 'w')\nref_modinv = open('ref_modinv.txt', 'w')\nwith open('nums.txt') as f:\n lines = f.readlines()\n prev = int(lines[0], 16)\n for line in range(1, len(lines)):\n num = int(lines[line], 16)\n ref_add.write(\"{:X}\\n\".format(prev + num))\n ref_sub.write(\"{:X}\\n\".format(prev - num))\n ref_mul.write(\"{:X}\\n\".format(prev * num))\n if prev > num:\n ref_cmp.write(\"1\\n\")\n if prev < num:\n ref_cmp.write(\"-1\\n\")\n if prev == num:\n ref_cmp.write(\"0\\n\")\n ref_gcd.write(\"{:X}\\n\".format(math.gcd(prev, num)))\n try:\n ref_modinv.write(\"{:X}\\n\".format(pow(prev, -1, num)))\n except:\n ref_modinv.write(\"{:X}\\n\".format(0))\n prev = num\n\nwith open('nums_div.txt') as f:\n lines = f.readlines()\n for line in range(0, len(lines), 2):\n top = int(lines[line], 16)\n bottom = int(lines[line+1], 16)\n ref_div.write(\"{:X}\\n\".format(top // bottom))\n ref_div.write(\"{:X}\\n\".format(top % bottom))\n\nwith open('nums_mont.txt') as f:\n lines = f.readlines()\n for line in range(0, len(lines), 3):\n A = int(lines[line], 16)\n E = int(lines[line + 1], 16)\n M = int(lines[line + 2], 16)\n ref_mont.write(\"{:X}\\n\".format((A ** E)%M))\n\nfile = open('nums_binary.txt', 'w')\nfileref = open('ref_binary.txt', 'w')\nfor i in range(100):\n bits = rand.getrandbits(rand.randint(1, 300))\n bytes = numberToBase(bits, 256)\n s = \"\"\n for byte in bytes:\n if byte == 10 or byte == 14 or byte == 13:\n i -= 1\n continue\n s += \"{:c}\".format(byte)\n s += \"\\n\"\n file.write(\"{:X}\\n\".format(bits))\n fileref.write(s)\nfile.close()\n\nref_add.close()\nref_sub.close()\nref_mul.close()\nref_div.close()\nref_cmp.close()\nref_gcd.close()\n ","repo_name":"emesic23/C_ssh_keygen","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32554206510","text":"import pprint\n\ninv = {'sword': 2, 'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\ndragonLoot = ['gold coin', 'dagger', 'gold coin', 'ruby', 'pin', 'pin', 'pin']\n\ndef display_inventory(stuff):\n print('Inventory:')\n total_num_items = 0\n for k, v in stuff.items():\n print(str(k) + ': ' + str(v))\n total_num_items += v\n print('Total number of items: ' + str(total_num_items))\n\ndef addToInventory(stuff, addedItems):\n for item in addedItems:\n stuff.setdefault(item, 0)\n stuff[item] += 1\n\n\n#displayInventory(inv)\naddToInventory(inv, dragonLoot)\npprint.pprint(inv)\n","repo_name":"CommitHooks/Python-Excercises","sub_path":"Page120Proj.py","file_name":"Page120Proj.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6209304215","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cadastros', '0007_auto_20150111_1513'),\n ('portaria', '0007_movimento_motivo'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='movimento',\n name='planta',\n field=models.ForeignKey(default=1, verbose_name=b'Planta de Opera\\xc3\\xa7\\xc3\\xa3o', to='cadastros.Planta'),\n preserve_default=True,\n ),\n ]\n","repo_name":"jairvercosa/onyxlog","sub_path":"onyxlog/portaria/migrations/0008_movimento_planta.py","file_name":"0008_movimento_planta.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"35379032131","text":"counter = 0 # сумма чисел будет записываться в переменную сума\ntotal = 0 # в переменную будут записываться значения, которые мы преобразовуем в try\n\nwhile True:\n text = input(\"Введіть число або sum будь-ласка!\")\n if text == \"sum\":\n sum += total\n print(sum)\n break\n try:\n interger = float(text) # пытаемся число преобразовать в вещественное\n total += interger\n\n except ValueError:\n print(\"Введіть число або sum будь-ласка!\")","repo_name":"OMunya/Learn-python-Hillel-05-05","sub_path":"Lesson - 5/Write_sum_or_count.py","file_name":"Write_sum_or_count.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"938305698","text":"import logging\nimport threading\n\nimport repanier.apps\nfrom django.contrib.admin.helpers import ACTION_CHECKBOX_NAME\nfrom django.core.checks import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.template import Context as TemplateContext, Template\nfrom django.urls import reverse_lazy, reverse, path\nfrom django.utils import timezone\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom repanier.admin.admin_filter import AdminFilterPermanenceInPreparationStatus\nfrom repanier.admin.forms import (\n OpenAndSendOfferForm,\n CloseAndSendOrderForm,\n GeneratePermanenceForm,\n)\nfrom repanier.admin.sale import (\n ProducerAutocomplete,\n SaleAdmin,\n)\nfrom repanier.admin.tools import (\n check_cancel_in_post,\n check_permanence,\n)\nfrom repanier.const import *\nfrom repanier.email.email import RepanierEmail\nfrom repanier.fields.RepanierMoneyField import RepanierMoney\nfrom repanier.middleware import add_filter\nfrom repanier.models.deliveryboard import DeliveryBoard\nfrom repanier.models.permanenceboard import PermanenceBoard\nfrom repanier.models.producer import Producer\nfrom repanier.models.product import Product\nfrom repanier.models.staff import Staff\nfrom repanier.task import task_order\nfrom repanier.task.task_order import open_order, close_order\nfrom repanier.tools import get_recurrence_dates, get_repanier_template_name\nfrom repanier.xlsx.xlsx_offer import export_offer\nfrom repanier.xlsx.xlsx_order import generate_producer_xlsx, generate_customer_xlsx\n\nlogger = logging.getLogger(__name__)\n\n\nclass PermanenceInPreparationAdmin(SaleAdmin):\n list_display = (\n \"get_permanence_admin_display\",\n \"get_row_actions\",\n \"get_producers_with_download\",\n \"get_customers_with_download\",\n \"get_board\",\n \"get_html_status_display\",\n )\n change_list_url = reverse_lazy(\"admin:repanier_permanenceinpreparation_changelist\")\n description = \"offer_description_v2\"\n list_filter = (AdminFilterPermanenceInPreparationStatus,)\n ordering = (\n \"invoice_sort_order\",\n \"canceled_invoice_sort_order\",\n \"permanence_date\",\n \"id\",\n )\n\n def has_delete_permission(self, request, obj=None):\n if obj is None:\n return False\n if request.user.is_order_manager:\n if obj.highest_status == SaleStatus.PLANNED.value:\n return True\n return False\n\n def has_add_permission(self, request):\n return request.user.is_order_manager\n\n def has_change_permission(self, request, permanence=None):\n return request.user.is_order_manager\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"producer_autocomplete/\",\n ProducerAutocomplete.as_view(),\n name=\"producer-autocomplete\",\n ),\n path(\n \"/export-offer/\",\n self.admin_site.admin_view(self.export_offer),\n name=\"permanence-export-offer\",\n ),\n path(\n \"/export-customer-opened-order/\",\n self.admin_site.admin_view(self.export_customer_opened_order),\n name=\"permanence-export-customer-opened-order\",\n ),\n path(\n \"/export-customer-closed-order/\",\n self.admin_site.admin_view(self.export_customer_closed_order),\n name=\"permanence-export-customer-closed-order\",\n ),\n path(\n \"/export-producer-opened-order/\",\n self.admin_site.admin_view(self.export_producer_opened_order),\n name=\"permanence-export-producer-opened-order\",\n ),\n path(\n \"/export-producer-closed-order/\",\n self.admin_site.admin_view(self.export_producer_closed_order),\n name=\"permanence-export-producer-closed-order\",\n ),\n path(\n \"/open-order/\",\n self.admin_site.admin_view(self.open_order),\n name=\"permanence-open-order\",\n ),\n path(\n \"/close-order/\",\n self.admin_site.admin_view(self.close_order),\n name=\"permanence-close-order\",\n ),\n path(\n \"/back-to-scheduled/\",\n self.admin_site.admin_view(self.back_to_scheduled),\n name=\"permanence-back-to-scheduled\",\n ),\n path(\n \"/generate-permanence/\",\n self.admin_site.admin_view(self.generate_permanence),\n name=\"generate-permanence\",\n ),\n ]\n return custom_urls + urls\n\n @check_permanence(SaleStatus.PLANNED)\n def export_offer(self, request, permanence_id, permanence=None):\n wb = export_offer(permanence=permanence, wb=None)\n if wb is not None:\n response = HttpResponse(\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n response[\n \"Content-Disposition\"\n ] = \"attachment; filename={0}-{1}.xlsx\".format(\n _(\"Preview report\"), permanence\n )\n wb.save(response)\n return response\n else:\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n\n export_offer.short_description = _(\"1 --- Check the offer\")\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.OPENED)\n def export_customer_opened_order(self, request, permanence_id, permanence=None):\n return self.export_customer_order(\n request, permanence, action=\"export_customer_opened_order\"\n )\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.SEND)\n def export_customer_closed_order(self, request, permanence_id, permanence=None):\n return self.export_customer_order(\n request, permanence, action=\"export_customer_closed_order\"\n )\n\n def export_customer_order(self, request, permanence, action):\n if not permanence.with_delivery_point:\n # Perform the action directly. Do not ask to select any delivery point.\n response = None\n wb = generate_customer_xlsx(permanence=permanence)[0]\n if wb is not None:\n response = HttpResponse(\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n response[\n \"Content-Disposition\"\n ] = \"attachment; filename={0}-{1}.xlsx\".format(\n _(\"Customers\"), permanence\n )\n wb.save(response)\n return response\n if \"apply\" in request.POST:\n if ACTION_CHECKBOX_NAME in request.POST:\n deliveries_to_be_exported = request.POST.getlist(\"deliveries\", [])\n if len(deliveries_to_be_exported) == 0:\n user_message = _(\"You must select at least one delivery point.\")\n user_message_level = messages.WARNING\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n # Also display order without delivery point -> The customer has not selected it yet\n # deliveries_to_be_exported.append(None)\n else:\n deliveries_to_be_exported = ()\n response = HttpResponse(\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n response[\n \"Content-Disposition\"\n ] = \"attachment; filename={0}-{1}.xlsx\".format(_(\"Customers\"), permanence)\n wb = generate_customer_xlsx(\n permanence=permanence, deliveries_id=deliveries_to_be_exported\n )[0]\n if wb is not None:\n wb.save(response)\n return response\n template_name = get_repanier_template_name(\n \"admin/confirm_export_customer_order.html\"\n )\n return render(\n request,\n template_name,\n {\n **self.admin_site.each_context(request),\n \"action_checkbox_name\": ACTION_CHECKBOX_NAME,\n \"action\": \"export_customer_order\",\n \"permanence\": permanence,\n \"deliveries\": DeliveryBoard.objects.filter(permanence_id=permanence.id),\n },\n )\n\n @check_permanence(SaleStatus.OPENED)\n def export_producer_opened_order(self, request, permanence_id, permanence=None):\n return self.export_producer_order(request, permanence)\n\n @check_permanence(SaleStatus.SEND)\n def export_producer_closed_order(self, request, permanence_id, permanence=None):\n return self.export_producer_order(request, permanence)\n\n def export_producer_order(self, request, permanence):\n # The export producer order use the offer item qty ordered\n # So that, this export is for all deliveries points\n # Perform the action directly. Do not ask to select any delivery point.\n response = HttpResponse(\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n response[\"Content-Disposition\"] = \"attachment; filename={0}-{1}.xlsx\".format(\n _(\"Producers\"), permanence\n )\n wb = None\n producer_set = Producer.objects.filter(permanence=permanence).order_by(\n \"short_profile_name\"\n )\n for producer in producer_set:\n wb = generate_producer_xlsx(permanence, producer=producer, wb=wb)\n if wb is not None:\n wb.save(response)\n return response\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.PLANNED)\n def open_order(self, request, permanence_id, permanence=None):\n if \"apply\" in request.POST or \"apply-wo-mail\" in request.POST:\n send_mail = not (\"apply-wo-mail\" in request.POST)\n # open_order(permanence.id, send_mail)\n t = threading.Thread(target=open_order, args=(permanence.id, send_mail))\n t.start()\n user_message = _(\"The offers are being generated.\")\n user_message_level = messages.INFO\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n\n template_offer_mail = []\n template_cancel_order_mail = []\n email_will_be_sent, email_will_be_sent_to = RepanierEmail.send_email_to_who()\n\n if email_will_be_sent:\n order_responsible = Staff.get_or_create_order_responsible()\n\n template = Template(\n repanier.apps.REPANIER_SETTINGS_CONFIG.offer_customer_mail_v2\n )\n offer_description = permanence.offer_description_v2\n offer_producer = \", \".join(\n [p.short_profile_name for p in permanence.producers.all()]\n )\n qs = Product.objects.filter(\n producer=permanence.producers.first(),\n is_into_offer=True,\n order_unit__lt=OrderUnit.DEPOSIT, # Don't display technical products.\n ).order_by(\"long_name_v2\")[:5]\n offer_detail = \"
    {}
\".format(\n \"\".join(\n \"
  • {producer}, {product}
  • \".format(\n producer=p.producer.short_profile_name,\n product=p.get_long_name_with_customer_price(),\n )\n for p in qs\n )\n )\n context = TemplateContext(\n {\n \"offer_description\": mark_safe(offer_description),\n \"offer_detail\": offer_detail,\n \"offer_recent_detail\": offer_detail,\n \"offer_producer\": offer_producer,\n \"permanence_link\": mark_safe(\n '{}'.format(permanence)\n ),\n \"signature\": order_responsible[\"html_signature\"],\n }\n )\n template_offer_mail.append(template.render(context))\n\n if settings.REPANIER_SETTINGS_CUSTOMER_MUST_CONFIRM_ORDER:\n template = Template(\n repanier.apps.REPANIER_SETTINGS_CONFIG.cancel_order_customer_mail_v2\n )\n\n context = TemplateContext(\n {\n \"name\": _(\"Long name\"),\n \"long_basket_name\": _(\"Long name\"),\n \"basket_name\": _(\"Short name\"),\n \"short_basket_name\": _(\"Short name\"),\n \"permanence_link\": mark_safe(\n '{}'.format(permanence)\n ),\n \"signature\": order_responsible[\"html_signature\"],\n }\n )\n template_cancel_order_mail.append(template.render(context))\n\n form = OpenAndSendOfferForm(\n initial={\n \"template_offer_customer_mail\": mark_safe(\n \"
    ==============
    \".join(template_offer_mail)\n ),\n \"template_cancel_order_customer_mail\": mark_safe(\n \"
    ==============
    \".join(template_cancel_order_mail)\n ),\n }\n )\n template_name = get_repanier_template_name(\"admin/confirm_open_order.html\")\n return render(\n request,\n template_name,\n {\n **self.admin_site.each_context(request),\n \"action_checkbox_name\": ACTION_CHECKBOX_NAME,\n \"action\": \"open_order\",\n \"permanence\": permanence,\n \"form\": form,\n \"email_will_be_sent\": email_will_be_sent,\n \"email_will_be_sent_to\": email_will_be_sent_to,\n },\n )\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.OPENED)\n def close_order(self, request, permanence_id, permanence=None):\n\n if \"apply\" in request.POST or \"apply-wo-mail\" in request.POST:\n # request.POST.get(\"all-deliveries\") return None if not set and \"on\" if set\n everything = not permanence.with_delivery_point or (\n True if request.POST.get(\"all-deliveries\") else False\n )\n deliveries_to_be_send = request.POST.getlist(\"deliveries\", [])\n # logger.debug(\n # \"all_deliveries : {}\".format(request.POST.get(\"all-deliveries\"))\n # )\n # logger.debug(\"everything : {}\".format(everything))\n # logger.debug(\"deliveries_to_be_send : {}\".format(deliveries_to_be_send))\n if (\n permanence.with_delivery_point\n and not everything\n and len(deliveries_to_be_send) == 0\n ):\n user_message = _(\"You must select at least one delivery point.\")\n user_message_level = messages.WARNING\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n send_mail = not (\"apply-wo-mail\" in request.POST)\n # close_order(permanence.id, everything, deliveries_to_be_send, send_mail)\n t = threading.Thread(\n target=close_order,\n args=(permanence.id, everything, deliveries_to_be_send, send_mail),\n )\n t.start()\n user_message = _(\"The orders are being send.\")\n user_message_level = messages.INFO\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n\n template_order_customer_mail = []\n template_order_producer_mail = []\n template_order_staff_mail = []\n email_will_be_sent, email_will_be_sent_to = RepanierEmail.send_email_to_who()\n (\n order_customer_email_will_be_sent,\n order_customer_email_will_be_sent_to,\n ) = RepanierEmail.send_email_to_who(is_email_send=True)\n (\n order_producer_email_will_be_sent,\n order_producer_email_will_be_sent_to,\n ) = RepanierEmail.send_email_to_who(is_email_send=True)\n (\n order_board_email_will_be_sent,\n order_board_email_will_be_sent_to,\n ) = RepanierEmail.send_email_to_who(\n is_email_send=repanier.apps.REPANIER_SETTINGS_SEND_ORDER_MAIL_TO_BOARD,\n board=True,\n )\n\n if email_will_be_sent:\n\n order_responsible = Staff.get_or_create_order_responsible()\n\n if order_customer_email_will_be_sent:\n template = Template(\n repanier.apps.REPANIER_SETTINGS_CONFIG.order_customer_mail_v2\n )\n\n customer_last_balance = _(\n \"The balance of your account as of %(date)s is %(balance)s.\"\n ) % {\n \"date\": timezone.now().strftime(settings.DJANGO_SETTINGS_DATE),\n \"balance\": RepanierMoney(123.45),\n }\n customer_on_hold_movement = _(\n \"This balance does not take into account payments made after the last accounting entry (for an amount of %(bank)s), nor does it take into account orders not yet accounted (for an amount of %(other_order)s)\"\n ) % {\n \"bank\": RepanierMoney(123.45),\n \"other_order\": RepanierMoney(123.45),\n }\n\n bank_account_number = repanier.apps.REPANIER_SETTINGS_BANK_ACCOUNT\n if bank_account_number is not None:\n group_name = settings.REPANIER_SETTINGS_GROUP_NAME\n\n if permanence.short_name_v2:\n communication = \"{} ({})\".format(\n _(\"Short name\"), permanence.short_name_v2\n )\n else:\n communication = _(\"Short name\")\n customer_payment_needed = '{}'.format(\n _(\n \"Please pay a provision of %(payment)s to the bank account %(name)s %(number)s with communication %(communication)s.\"\n )\n % {\n \"payment\": RepanierMoney(123.45),\n \"name\": group_name,\n \"number\": bank_account_number,\n \"communication\": communication,\n }\n )\n else:\n customer_payment_needed = EMPTY_STRING\n context = TemplateContext(\n {\n \"name\": _(\"Long name\"),\n \"long_basket_name\": _(\"Long name\"),\n \"basket_name\": _(\"Short name\"),\n \"short_basket_name\": _(\"Short name\"),\n \"permanence_link\": mark_safe(\n '{}'.format(permanence)\n ),\n \"last_balance\": mark_safe(\n '{}'.format(customer_last_balance)\n ),\n \"order_amount\": RepanierMoney(123.45),\n \"on_hold_movement\": mark_safe(customer_on_hold_movement),\n \"payment_needed\": mark_safe(customer_payment_needed),\n \"delivery_point\": _(\"Delivery point\").upper(),\n \"signature\": order_responsible[\"html_signature\"],\n }\n )\n\n template_order_customer_mail.append(template.render(context))\n\n if order_producer_email_will_be_sent:\n template = Template(\n repanier.apps.REPANIER_SETTINGS_CONFIG.order_producer_mail_v2\n )\n context = TemplateContext(\n {\n \"name\": _(\"Long name\"),\n \"long_profile_name\": _(\"Long name\"),\n \"order_empty\": False,\n \"duplicate\": True,\n \"permanence_link\": format_html(\n '{}', permanence\n ),\n \"signature\": order_responsible[\"html_signature\"],\n }\n )\n\n template_order_producer_mail.append(template.render(context))\n\n if order_board_email_will_be_sent:\n board_composition = permanence.get_html_board_composition()\n template = Template(\n repanier.apps.REPANIER_SETTINGS_CONFIG.order_staff_mail_v2\n )\n context = TemplateContext(\n {\n \"permanence_link\": format_html(\n '{}', permanence\n ),\n \"board_composition\": board_composition,\n \"board_composition_and_description\": board_composition,\n \"signature\": order_responsible[\"html_signature\"],\n }\n )\n\n template_order_staff_mail.append(template.render(context))\n\n form = CloseAndSendOrderForm(\n initial={\n \"template_order_customer_mail\": mark_safe(\n \"
    ==============
    \".join(template_order_customer_mail)\n ),\n \"template_order_producer_mail\": mark_safe(\n \"
    ==============
    \".join(template_order_producer_mail)\n ),\n \"template_order_staff_mail\": mark_safe(\n \"
    ==============
    \".join(template_order_staff_mail)\n ),\n }\n )\n if permanence.with_delivery_point:\n deliveries = DeliveryBoard.objects.filter(\n permanence_id=permanence.id,\n status__in=[SaleStatus.OPENED, SaleStatus.CLOSED],\n )\n else:\n deliveries = DeliveryBoard.objects.none()\n template_name = get_repanier_template_name(\"admin/confirm_close_order.html\")\n return render(\n request,\n template_name,\n {\n **self.admin_site.each_context(request),\n \"action_checkbox_name\": ACTION_CHECKBOX_NAME,\n \"action\": \"close_order\",\n \"permanence\": permanence,\n \"deliveries\": deliveries,\n \"form\": form,\n \"email_will_be_sent\": email_will_be_sent,\n \"order_customer_email_will_be_sent_to\": order_customer_email_will_be_sent_to,\n \"order_producer_email_will_be_sent_to\": order_producer_email_will_be_sent_to,\n \"order_board_email_will_be_sent_to\": order_board_email_will_be_sent_to,\n },\n )\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.OPENED)\n def back_to_scheduled(self, request, permanence_id, permanence=None):\n if \"apply\" in request.POST:\n task_order.back_to_scheduled(permanence)\n user_message = _('The permanence is back to \"Scheduled\".')\n user_message_level = messages.INFO\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n template_name = get_repanier_template_name(\"admin/confirm_action.html\")\n return render(\n request,\n template_name,\n {\n **self.admin_site.each_context(request),\n \"model_verbose_name_plural\": _(\"Offers in preparation\"),\n \"sub_title\": _(\"Please, confirm the action : back to scheduled\"),\n \"action\": \"back_to_scheduled\",\n \"permanence\": permanence,\n \"action_checkbox_name\": ACTION_CHECKBOX_NAME,\n },\n )\n\n @check_cancel_in_post\n @check_permanence(SaleStatus.PLANNED)\n def generate_permanence(self, request, permanence_id, permanence=None):\n if \"apply\" in request.POST:\n form = GeneratePermanenceForm(request.POST)\n if form.is_valid():\n recurrences = form.cleaned_data[\"recurrences\"]\n dates = get_recurrence_dates(permanence.permanence_date, recurrences)\n creation_counter = permanence.duplicate(dates)\n if creation_counter == 0:\n user_message = _(\"Nothing to do.\")\n elif creation_counter == 1:\n user_message = _(\"{} duplicate created.\").format(creation_counter)\n else:\n user_message = _(\"{} duplicates created.\").format(creation_counter)\n user_message_level = messages.INFO\n self.message_user(request, user_message, user_message_level)\n return HttpResponseRedirect(self.get_redirect_to_change_list_url())\n else:\n form = GeneratePermanenceForm()\n template_name = get_repanier_template_name(\n \"admin/confirm_generate_permanence.html\"\n )\n return render(\n request,\n template_name,\n {\n **self.admin_site.each_context(request),\n \"action\": \"generate_permanence\",\n \"permanence\": permanence,\n \"permanenceboard\": PermanenceBoard.objects.filter(\n permanence=permanence_id\n ).order_by(\"permanence_role\"),\n \"deliverypoint\": DeliveryBoard.objects.filter(\n permanence=permanence_id\n ).order_by(\"delivery_point\"),\n \"form\": form,\n \"action_checkbox_name\": ACTION_CHECKBOX_NAME,\n },\n )\n\n def get_row_actions(self, permanence):\n\n if permanence.status == SaleStatus.PLANNED.value:\n return format_html(\n '
    '\n ' '\n ''\n \"
    \",\n add_filter(reverse(\"admin:generate-permanence\", args=[permanence.pk])),\n _(\"Duplicate\"),\n add_filter(\n reverse(\"admin:permanence-open-order\", args=[permanence.pk])\n ),\n _(\"Open orders\"),\n )\n elif permanence.status == SaleStatus.OPENED.value:\n return format_html(\n '
    '\n ' '\n ''\n \"
    \",\n add_filter(\n reverse(\"admin:permanence-back-to-scheduled\", args=[permanence.pk])\n ),\n _(\"Modify the offer\"),\n add_filter(\n reverse(\"admin:permanence-close-order\", args=[permanence.pk])\n ),\n _(\"Close orders\"),\n )\n return EMPTY_STRING\n\n get_row_actions.short_description = EMPTY_STRING\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(status__lte=SaleStatus.SEND.value)\n","repo_name":"pcolmant/repanier","sub_path":"repanier/admin/permanence_in_preparation.py","file_name":"permanence_in_preparation.py","file_ext":"py","file_size_in_byte":28342,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"57"} +{"seq_id":"32578021146","text":"while True:\n flag = True\n num = input()\n if num == \"0\":\n break\n\n for i in range(len(num) // 2):\n if num[i] != num[-1 - i]:\n print(\"no\")\n flag = False\n break\n\n if flag:\n print(\"yes\")\n","repo_name":"yhsphd/BOJ-solutions","sub_path":"1259.py","file_name":"1259.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"10402349719","text":"N,M=map(int,input().split())\nA=[int(b) for b in input().split()]\ncnt=0\nans=sum(A)/(4*M)\n#print(ans)\n\nfor i in range(N):\n if A[i] >=ans: #未満は选べない,以上を选ぶ(最后のケース)\n cnt+=1\n#print(cnt)\n\nif cnt >= M:\n print('Yes')\nelse: print('No')\n","repo_name":"yamabook37/atcoder","sub_path":"ABC_problems/abc161_b.py","file_name":"abc161_b.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"238693125","text":"\"\"\"Conan.io recipe for pcap library\n\"\"\"\nfrom os import path\nfrom tempfile import mkdtemp\nfrom conans import AutoToolsBuildEnvironment, tools, ConanFile\nfrom conans.errors import ConanException\n\n\nclass LibPcapConan(ConanFile):\n \"\"\"Donwload pcap library, build and create package\n \"\"\"\n name = \"libpcap\"\n version = \"1.8.1\"\n generators = \"cmake\", \"txt\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"enable_dbus\": [True, False],\n \"enable_bluetooth\": [True, False],\n \"enable_usb\": [True, False],\n \"enable_packet_ring\": [True, False],\n \"disable_universal\": [True, False]\n }\n url = \"http://github.com/uilianries/conan-libpcap\"\n author = \"Uilian Ries \"\n description = \"An API for capturing network traffic\"\n license = \"https://github.com/the-tcpdump-group/libpcap/blob/master/LICENSE\"\n default_options = \"shared=True\", \"enable_dbus=False\", \"enable_bluetooth=False\", \"enable_usb=False\", \"enable_packet_ring=False\", \"disable_universal=False\"\n libpcap_dir = \"%s-%s-%s\" % (name, name, version)\n install_dir = mkdtemp(suffix=name)\n\n def build_requirements(self):\n if self.settings.os == \"Linux\":\n package_tool = tools.SystemPackageTool()\n package_tool.install(packages=\"bison flex\")\n\n def _is_amd64_to_i386(self):\n return self.settings.arch == \"x86\" and tools.detected_architecture() == \"x86_64\"\n\n def system_requirements(self):\n if self.settings.os == \"Linux\":\n arch = \":i386\" if self._is_amd64_to_i386() else \"\"\n package_list = []\n if self.options.enable_dbus:\n package_list.extend([\"libdbus-glib-1-dev%s\" % arch, \"libdbus-1-dev\"])\n if self.options.enable_bluetooth:\n package_list.append(\"libbluetooth-dev%s\" % arch)\n if self.options.enable_usb:\n package_list.append(\"libusb-1.0-0-dev%s\" % arch)\n if self.options.enable_packet_ring:\n package_list.append(\"libnl-genl-3-dev%s\" % arch)\n if package_list:\n package_tool = tools.SystemPackageTool()\n package_tool.install(packages=\" \".join(package_list))\n\n def source(self):\n tools.get(\"https://github.com/the-tcpdump-group/libpcap/archive/libpcap-%s.tar.gz\" % self.version)\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanException(\"For Windows use WinPcap/4.1.2@RoliSoft/stable\")\n del self.settings.compiler.libcxx\n\n def build(self):\n with tools.chdir(self.libpcap_dir):\n env_build = AutoToolsBuildEnvironment(self)\n configure_args = [\"--prefix=%s\" % self.install_dir]\n configure_args.append(\"--enable-shared\" if self.options.shared else \"--disable-shared\")\n configure_args.append(\"--disable-universal\" if not self.options.disable_universal else \"\")\n configure_args.append(\"--enable-dbus\" if self.options.enable_dbus else \"--disable-dbus\")\n configure_args.append(\"--enable-bluetooth\" if self.options.enable_bluetooth else \"--disable-bluetooth\")\n configure_args.append(\"--enable-usb\" if self.options.enable_usb else \"--disable-usb\")\n configure_args.append(\"--enable-packet-ring\" if self.options.enable_packet_ring else \"--disable-packet-ring\")\n # Cross compile x86_64 to x86 needs --with-pcap\n if self.settings.os == \"Macos\" and self.settings.arch == \"x86\":\n configure_args.append(\"--with-pcap=null\")\n env_build.fpic = True\n env_build.configure(args=configure_args)\n env_build.make(args=[\"all\"])\n env_build.make(args=[\"install\"])\n\n def package(self):\n self.copy(\"LICENSE\", src=self.libpcap_dir, dst=\".\")\n self.copy(pattern=\"*.h\", dst=\"include\", src=path.join(self.install_dir, \"include\"))\n if self.options.shared:\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=path.join(self.install_dir, \"lib\"), keep_path=False)\n self.copy(pattern=\"*.dylib\", dst=\"lib\", src=path.join(self.install_dir, \"lib\"), keep_path=False)\n else:\n self.copy(pattern=\"*.a\", dst=\"lib\", src=path.join(self.install_dir, \"lib\"), keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"pcap\"]\n if self.settings.os == \"Linux\":\n if self.options.enable_dbus:\n self.cpp_info.libs.append(\"dbus-glib-1\")\n self.cpp_info.libs.append(\"dbus-1\")\n if self.options.enable_bluetooth:\n self.cpp_info.libs.append(\"bluetooth\")\n if self.options.enable_usb:\n self.cpp_info.libs.append(\"usb-1.0\")\n if self.options.enable_packet_ring:\n self.cpp_info.libs.append(\"nl-genl-3\")\n self.cpp_info.libs.append(\"nl-3\")\n","repo_name":"uilianries/conan-libpcap","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"8992711706","text":"from collections import deque\ndef solution(maps):\n map_y = len(maps)\n map_x = len(maps[0])\n flag =False\n for idx,i in enumerate(maps):\n for jdx,j in enumerate(i):\n if j=='S':\n s_y,s_x,l_c = bfs(maps,map_y,map_x,idx,jdx,'L')\n if s_y == -1:\n return -1\n flag = True\n break\n if flag:\n break\n answer = bfs(maps,map_y,map_x,s_y,s_x,'E')\n if l_c !=-1 and answer[0]!= -1:\n return l_c+answer[0]\n else:\n return -1\n\ndef bfs(maps, map_y, map_x, start_y,start_x, target):\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n cnt=0\n visit = [[0 for _ in range(map_x)] for _ in range(map_y)] \n queue=deque()\n queue.append((start_y,start_x,cnt))\n while queue:\n s_y , s_x, c = queue.popleft()\n if maps[s_y][s_x] == target:\n if target== 'L':\n return (s_y,s_x, c)\n else :\n return (c,-1,-1)\n for i in range(4):\n nx = dx[i] + s_x\n ny = dy[i] + s_y\n if 0<= ny c+1) :\n visit[ny][nx] = c+1 \n queue.append((ny,nx,c+1))\n return (-1,-1,-1)\n \n\nprint(solution([\"SOOOL\",\"XXXXO\",\"OOOOO\",\"OXXXX\",\"OOOOE\"]))","repo_name":"denhur62/Python-Algorithm","sub_path":"programmers/level2/10회차/미로찾기.py","file_name":"미로찾기.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11257949947","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nN, M = map(int, input().split())\r\nnohear = set()\r\nnosee = set()\r\n\r\nfor _ in range(N):\r\n nohear.add(input().rstrip())\r\n\r\nfor _ in range(M):\r\n nosee.add(input().rstrip())\r\n\r\nnohs = sorted(nohear & nosee)\r\nprint(len(nohs))\r\nprint(*nohs, sep=\"\\n\")","repo_name":"hi9900/algorithm","sub_path":"백준/Silver/1764. 듣보잡/듣보잡.py","file_name":"듣보잡.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4490472164","text":"#!/usr/bin/python3\n\"\"\"Module\"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"Append after\"\"\"\n pg = \"\"\n if filename != \"\" or filename is not None:\n with open(filename, encoding=\"utf-8\") as f:\n line = None\n while line != \"\":\n line = f.readline()\n pg = pg + line\n if search_string in line:\n pg = pg + new_string\n\n with open(filename, \"w\", encoding=\"utf-8\") as n:\n n.write(pg)\n","repo_name":"Chibuzor27/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22826911025","text":"import os\nimport pathlib\nfrom setuptools import setup, find_packages\n\nHERE = pathlib.Path(__file__).parent\n\nVERSION = '0.1'\nPACKAGE_NAME = 'mintscore'\nAUTHOR = 'Markus Dreyer'\nAUTHOR_EMAIL = 'mddreyer@amazon.com'\nURL = 'https://github.com/markusdr'\n\nLICENSE = 'MIT No Attribution'\nDESCRIPTION = 'MINT score to measure abstractiveness'\nLONG_DESCRIPTION = (HERE / \"README.md\").read_text()\nLONG_DESC_TYPE = \"text/markdown\"\n\nPATH=os.path.dirname(os.path.realpath(__file__))\nMINTLCS_PATH=os.path.realpath(f'{PATH}/../mintlcs')\n\nINSTALL_REQUIRES = [\n \"spacy<3.4\",\n \"scipy\",\n \"joblib\",\n f\"mintlcs @ file://localhost/{MINTLCS_PATH}\"\n ]\n\nsetup(name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESC_TYPE,\n author=AUTHOR,\n license=LICENSE,\n author_email=AUTHOR_EMAIL,\n url=URL,\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\n \"console_scripts\": [\n \"mint = mintscore.mint:run\"\n ]\n },\n)\n","repo_name":"amazon-science/abstractive-factual-tradeoff","sub_path":"mintscore/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"57"} +{"seq_id":"3186061629","text":"import itertools\nfrom typing import List\n\ndef parse_input(file_path: str) -> List[int]:\n with open(file_path) as f:\n numbers = [int(l) for l in f.readlines()]\n return numbers\n\ndef find_num(inp: List[int], length_preamble: int=25) -> int:\n for i, x in enumerate(inp[length_preamble:]):\n num_set = inp[i:(i + length_preamble)]\n if x not in [a + b for a, b in itertools.combinations(num_set, 2)]:\n return x\n\ndef find_summands(x: int, inp: List[int], min_range_length: int=2):\n for rl in range(min_range_length, len(inp) + 1):\n for i in range(len(inp) - rl):\n summands = inp[i:(i + rl)]\n if sum(summands) == x:\n return summands\n\nif __name__ == '__main__':\n inp = parse_input('input.txt')\n x = find_num(inp)\n print(f'{x} is not the sum of two of 25 numbers before it.')\n summands = find_summands(x, inp)\n print(f'The contigous range {summands} sums up to: {sum(summands)}')\n print(('The sum of the smallest and largest number in this range is: '\n f'{min(summands) + max(summands)}'))\n","repo_name":"SteScheller/AoC_2020","sub_path":"9/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73898791538","text":"#\n# @lc app=leetcode.cn id=24 lang=python3\n#\n# [24] 两两交换链表中的节点\n#\n# https://leetcode-cn.com/problems/swap-nodes-in-pairs/description/\n#\n# algorithms\n# Medium (64.92%)\n# Likes: 455\n# Dislikes: 0\n# Total Accepted: 88.3K\n# Total Submissions: 135.8K\n# Testcase Example: '[1,2,3,4]'\n#\n# 给定一个链表,两两交换其中相邻的节点,并返回交换后的链表。\n# \n# 你不能只是单纯的改变节点内部的值,而是需要实际的进行节点交换。\n# \n# \n# \n# 示例:\n# \n# 给定 1->2->3->4, 你应该返回 2->1->4->3.\n# \n# \n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n # 为了能够输出结果链表,设置一个空节点,指向头结点\n dummy = ListNode(-1)\n dummy.next = head\n leftNode = dummy\n\n while head and head.next:\n midNode = head\n rightNode = head.next\n\n # Swapping\n leftNode.next = rightNode\n midNode.next = rightNode.next\n rightNode.next = midNode\n\n leftNode = midNode\n head = midNode.next\n\n return dummy.next\n# @lc code=end\n\n","repo_name":"darrenzhang1007/Algorithm","sub_path":"linked-list/24_两两交换链表中的节点/24_medium_两两交换链表中的节点.py","file_name":"24_medium_两两交换链表中的节点.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"97704913","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\ntry:\n import requests\nexcept:\n print(\"please wait installing 'requests' module... \")\n import subprocess\n\n subprocess.run(\"pip install requests\")\n print(\"\\n install finished. Re-start this tool.\")\n\nimport requests\n\n\ndef findAdmin():\n file = open(\"web.txt\", \"r\");\n link = \"http://www.\" + input(\"Enter Site Name \\n(ex : example.com or www.example.com ): \")\n aa = requests.get(link).status_code\n\n try:\n print(\"\\n\\nAvilable links : \\n\")\n while True:\n sub_link = file.readline()\n if not sub_link:\n break\n req_link = link + \"/\" + sub_link\n aa = requests.get(req_link).status_code\n\n try:\n if aa == 404:\n print(\"Trying... \" + sub_link)\n elif aa == 200:\n print(\"[FOUND] \", req_link, aa)\n # input(\"Press enter to continue or press CTRL+C to stop process. \")\n except:\n print(\"Process stopped. \")\n # input(\"\")\n except:\n print(\"Invalid url or check your internet connection.\")\n # input(\"Enter to exit \")\n\n\ndef Credit():\n print(\"#######################################\")\n print(\"# +++ Admin Panel Finder v1 +++ #\")\n print(\"# Developed by #\")\n print(\"# DZ Hacking Gang #\")\n print(\"#######################################\")\n\n\nCredit()\nfindAdmin()\n","repo_name":"farhanibne/admin-found","sub_path":"admin_panel_finder.py","file_name":"admin_panel_finder.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"74442173297","text":"from django.db import models\nfrom django.conf import settings\nfrom mdeditor.fields import MDTextField\n\n# Create your models here.\n\n\nclass Free(models.Model):\n check = models.BooleanField(default=False)\n title = models.CharField(max_length=50)\n create_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n content = MDTextField()\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n like_free = models.ManyToManyField(\n settings.AUTH_USER_MODEL, related_name=\"like_free\"\n )\n hits = models.PositiveIntegerField(default=0, verbose_name=\"조회수\")\n q = models.CharField(max_length=5, default=\"자유\")\n\n\nclass Photo(models.Model):\n free = models.ForeignKey(Free, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=\"images/\", blank=True)\n\n\nclass Comment(models.Model):\n content = models.TextField()\n free = models.ForeignKey(Free, on_delete=models.CASCADE, related_name=\"free_user\")\n updated_at = models.DateTimeField(auto_now=True)\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name=\"free_com_user\"\n )\n unname = models.BooleanField(default=True)\n\n\nclass ReComment1(models.Model):\n comment = models.ForeignKey(\n Comment, on_delete=models.CASCADE, related_name=\"free_comment_user\"\n )\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n body = models.CharField(\"답글\", max_length=200)\n updated_at = models.DateTimeField(auto_now=True)\n unname = models.BooleanField(default=True)\n","repo_name":"YoonDii/SISEON","sub_path":"free/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"3671310799","text":"from aiogram import types, F, Router\nfrom aiogram.types import Message\nfrom aiogram.filters import Command\nfrom aiogram import Dispatcher\n\nimport keyboard as kb\n\nimport constants\n\nfrom database_engine import session\n\nfrom models.article import Article\n\n\nrouter = Router()\ndp = Dispatcher()\n\n\n@router.message(Command('start'))\nasync def start_handler(msg: Message):\n await msg.answer('Я могу помочь Вам при решении шашлычных вопросов.', reply_markup = kb.menu_kb)\n await msg.answer(constants.MENU, reply_markup=kb.menu_kb)\n\n\n@router.message(F.text == constants.MENU)\nasync def menu(msg: Message):\n await msg.answer('👀 Куда теперь?', reply_markup = kb.menu_kb)\n\n\n@router.message(F.text == constants.PICKLE_GUIDE)\nasync def pickle_handler(msg: Message):\n await msg.answer('•🫙 Маринады – подобранные ботом рецепты\\n•📒 Мои рецепты – записать свои рецепты\\n•⭐️ Избранное – Ваши любимые рецепты от бота', reply_markup = kb.pickle_kb) \n\n\n@router.message(F.text == constants.FRYING_GUIDE)\nasync def frying_handler(msg: Message):\n await msg.answer('•🪵 Гайды по жарке – подобранные ботом руководства\\n•📒 Мои гайды – записать свои гайды\\n•⭐️ Избранное – Ваши любимые гайды от бота', \n reply_markup = kb.frying_kb) \n\n\n@router.message(F.text == constants.GALLERY)\nasync def gallery_handler(msg: Message):\n await msg.answer('👍🏻 Здесь хранятся Ваши шедевры', reply_markup = kb.gallery_kb) \n\n\n@router.message(F.text == '🪵 Гайды по жарке')\nasync def frying_guides_handler(msg: Message):\n articles = session.query(Article).all()\n articles_list_html = ''\n for i, article in enumerate(articles):\n articles_list_html += f'{i + 1}. {article.title}\\n'\n await msg.answer(articles_list_html, reply_markup = kb.guide_kb, disable_web_page_preview = True)\n","repo_name":"WhattIsLovee/shashlyk_bot","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43002384462","text":"\"\"\" \nUtility functions\n - @author Ying Li\n - PRECONDITIONS: various\n - POSTCONDITIONS: various\n - PARAMETERS: various\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\ndef add_date_cols(baskets, date_col = \"placed_at\"):\n baskets['datetime'] = pd.to_datetime(baskets[date_col])\n baskets['year'] = baskets[\"datetime\"].dt.year\n baskets['month'] = baskets[\"datetime\"].dt.month\n baskets['date'] = baskets[\"datetime\"].dt.date\n baskets['day'] = baskets[\"datetime\"].dt.day\n baskets['hour'] = baskets[\"datetime\"].dt.hour\n baskets['weekday'] = baskets[\"datetime\"].dt.weekday\n baskets['year_month'] = baskets[\"datetime\"].apply(lambda t: t.strftime(\"%Y-%m\"))\n baskets['month_num'] = (baskets['year'] - 2021) * 12 + baskets['month']\n baskets['year_week'] = baskets[\"datetime\"].apply(lambda t: t.strftime(\"%Y-%W\")) # this makes the beginning of Jan 2022 as week 2022-00 , not 2022-52\n baskets['week_num'] = baskets[\"datetime\"].apply(lambda t: int(t.strftime(\"%W\"))) \n baskets['iso_week_num'] = baskets[\"datetime\"].dt.isocalendar().week # this returns week number 52 for Jan 1, 2021, not 0 which is what we want\n baskets['cum_week_num'] = (baskets['year'] - 2021) * 52 + baskets['week_num']\n return baskets\n\ndef get_merchant_attributes(baskets):\n merchant_attributes = baskets.groupby(['merchant_id']).agg(\n total_spent = ('spent', 'sum'), \n num_orders = ('order_id', 'nunique'), \n first_month = ('month_num', 'min'), \n last_month = ('month_num', 'max'), \n num_months = ('month_num', 'nunique'), \n num_weeks = ('week_num', 'nunique'), \n num_days = ('date', 'nunique'), \n num_skus = ('sku_id','nunique'), \n num_top_cats = ('top_cat','nunique'), \n num_sub_cats = ('sub_cat','nunique'),\n ).reset_index()\n merchant_attributes['avg_spent_per_order'] = merchant_attributes.total_spent / merchant_attributes.num_orders\n merchant_attributes['tenure_month'] = merchant_attributes.last_month - merchant_attributes.first_month +1\n return merchant_attributes\n\ndef get_sku_attributes(baskets):\n sku_attributes = baskets.groupby(['sku_id']).agg(\n total_spent = ('spent', 'sum'), \n num_orders = ('order_id', 'nunique'), \n num_merchants = ('merchant_id', 'nunique'), \n first_month = ('month_num', 'min'), \n last_month = ('month_num', 'max'), \n num_months = ('month_num', 'nunique'), \n first_week = ('week_num', 'min'), \n last_week = ('week_num', 'max'), \n num_weeks = ('week_num', 'nunique'), \n num_days = ('date', 'nunique'), \n ).reset_index()\n sku_attributes['avg_spent_per_order'] = sku_attributes.total_spent / sku_attributes.num_orders\n sku_attributes['tenure_month'] = sku_attributes.last_month - sku_attributes.first_month +1\n return sku_attributes\n\ndef get_skus_by_week(baskets):\n skus_by_week = baskets.groupby(['sku_id','year_week']).agg(\n avg_price_by_week = ('price','mean'),\n total_spent_by_week = ('spent', 'sum'),\n num_order_by_week = ('order_id', 'nunique'), \n num_merchants_by_week = ('merchant_id', 'nunique'),\n ).reset_index()\n return skus_by_week\n\ndef get_skus_by_day(baskets):\n skus_by_day = baskets.groupby(['sku_id','date']).agg(\n avg_price_by_day = ('price','mean'),\n total_spent_by_day = ('spent', 'sum'),\n num_order_by_day = ('order_id', 'nunique'), \n num_merchants_by_day = ('merchant_id', 'nunique'),\n ).reset_index()\n return skus_by_day\n\ndef get_top_cat_attributes(baskets):\n top_cat_attributes = baskets.groupby(['top_cat']).agg(\n avg_price = ('price', 'mean'),\n total_spent = ('spent', 'sum'),\n total_quantity = ('qty' , 'sum'),\n num_orders = ('order_id', 'nunique'), \n num_days = ('date' , 'nunique'),\n num_merchants = ('merchant_id', 'nunique')\n ).reset_index()\n return top_cat_attributes","repo_name":"yingli/applieddatascience","sub_path":"sample_code/util_transaction_data.py","file_name":"util_transaction_data.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"71734149617","text":"from collections import deque\n\ndef bfs(S):\n Q = deque([S])\n cnt = 0\n while Q:\n x = Q.popleft()\n cnt += 1\n for v in adj[x]:\n Q.append(v)\n return cnt\n\nfor tc in range(1,int(input())+1):\n E, N = map(int,input().split())\n adj = [[] for _ in range(E+2)]\n adj_in = list(map(int,input().split()))\n for i in range(E):\n adj[adj_in[2*i]].append(adj_in[2*i+1])\n print(f'#{tc}', bfs(N))","repo_name":"Jeukoh/OJ","sub_path":"SWEA/Tree/5174_subtree.py","file_name":"5174_subtree.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10174555137","text":"#coding: utf8\n\nimport hashlib\nimport logging\nimport time\n\nimport pymysql\nimport pymysql.cursors\nimport requests\nimport telebot\n\nimport config\nimport secret_config\nimport text\nimport ujson\nimport utils\n\nbot = telebot.TeleBot(token = secret_config.token)\n\nclass DB:\n def __init__(self, host, user, db, password):\n self.host = host\n self.user = user\n self.password = password\n self.db = db\n self.charset = 'utf8mb4'\n self.cursorclass = pymysql.cursors.DictCursor\n\nclass DataConn:\n def __init__(self, db_obj):\n self.host = db_obj.host\n self.user = db_obj.user\n self.password = db_obj.password\n self.db = db_obj.db\n self.charset = db_obj.charset\n self.cursorclass = db_obj.cursorclass\n\n def __enter__(self):\n self.conn = pymysql.connect(\n host = self.host,\n user = self.user,\n password = self.password,\n db = self.db,\n charset = self.charset,\n cursorclass = self.cursorclass\n )\n return self.conn\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n if exc_val:\n raise\n \ndb = DB(\n host = secret_config.host,\n user = secret_config.user,\n password = secret_config.password,\n db = secret_config.db\n)\n\nif __name__ == '__main__':\n log_name = 'logs.txt'\n f = open(log_name,'w')\n f.close()\n print('Файл логов создан')\n\ntelebot_logger = logging.getLogger('telebot')\nmysql_info = logging.getLogger('mysql')\nmain_info = logging.getLogger('main_info')\nreport_info = logging.getLogger('reports')\nprint('Список логгеров создан')\n\nlogging.basicConfig(\n format='%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s ',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n filename = 'logs.txt',\n level = logging.INFO\n )\n\n\ndef replacer(text):\n text_list = list(text)\n for i in range(len(text)):\n if text_list[i] in config.restricted_characters:\n text_list[i] = config.restricted_characters_replace[text_list[i]]\n return ''.join(text_list)\n\n\n\ndef register_admins(chat_obj):\n chat_id = chat_obj.id\n with DataConn(db) as conn:\n cursor = conn.cursor()\n for i in bot.get_chat_administrators(chat_id):\n sql = 'SELECT * FROM `chat_admins` WHERE `admin_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (i.user.id, chat_id))\n res = cursor.fetchone()\n if res is None:\n sql = 'INSERT INTO `chat_admins` (`chat_id`, `chat_name`, `admin_name`, `admin_id`, `status`) VALUES (%s, %s, %s, %s, %s)'\n cursor.execute(sql, (chat_id, chat_obj.title, i.user.first_name, i.user.id, i.status))\n conn.commit()\n \n\ndef ban_sticker(msg, sticker_id):\n \"\"\"\n Банит стикер\\n\n :param msg:\\n\n :param sticker_id:\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s AND `sticker_id` = %s'\n cursor.execute(sql, (msg.chat.id, sticker_id))\n res = cursor.fetchone()\n if res is None:\n sql = 'INSERT INTO `banned_stickers`(`chat_id`, `chat_name`, `sticker_id`, `ban_time`) VALUES (%s, %s, %s, %s)'\n try:\n cursor.execute(sql, (msg.chat.id, msg.chat.title, sticker_id, int(time.time())))\n conn.commit()\n except Exception as e:\n print(sql)\n print(e)\n else:\n if res != msg.chat.title:\n sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s'\n cursor.execute(sql, (msg.chat.id, ))\n res = cursor.fetchall()\n for i in res:\n sql = 'UPDATE `banned_stickers` SET `chat_name` = %s WHERE `chat_id` = %s'\n cursor.execute(sql, (msg.chat.title, msg.chat.id))\n conn.commit()\n\ndef unban_sticker(msg, sticker_id):\n \"\"\"\n Разбанивает стикер\\n\n :param msg:\\n\n :param sticker_id:\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s and `sticker_id` = %s'\n cursor.execute(sql, (msg.chat.id, sticker_id))\n res = cursor.fetchone()\n if res is not None:\n sql = 'DELETE FROM `banned_stickers` WHERE `chat_id` = %s and `sticker_id` = %s'\n cursor.execute(sql, (msg.chat.id, sticker_id))\n conn.commit()\n return True\n else:\n return False\n\ndef get_creator(chat_obj):\n \"\"\"\n Возвращает объект создателя чата\\n\n :param msg:\\n\n \"\"\"\n creator = bot.get_chat_administrators(chat_obj.id)[0].user\n for i in bot.get_chat_administrators(chat_obj.id):\n if i.status == 'creator':\n creator = i.user\n return creator\n\ndef register_new_user(user_obj, lang):\n \"\"\"\n Регистрирует нового пользователя\\n\n :param user_obj:\\n\n :param lang:\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `users` WHERE `user_id` = %s'\n cursor.execute(sql, (user_obj.id, ))\n res = cursor.fetchone()\n sec_name = 'None'\n try:\n sec_name = user_obj.second_name\n except Exception as e:\n sec_name = 'None'\n logging.error(e)\n if res is None:\n sql = 'INSERT INTO `users` (`user_id`, `registration_time`, `first_name`, `second_name`, `language`) VALUES (%s, %s, %s, %s, %s)'\n cursor.execute(sql, (user_obj.id, int(time.time()), user_obj.first_name, sec_name, lang))\n conn.commit()\n sql = 'INSERT INTO `user_settings` (`user_id`, `registration_time`, `language`) VALUES (%s, %s, %s)'\n cursor.execute(sql, (user_obj.id, int(time.time()), lang))\n conn.commit()\n utils.notify_new_user(user_obj, lang)\n else:\n sql = 'UPDATE `user_settings` SET `language` = %s WHERE `user_id` = %s'\n cursor.execute(sql, (lang, user_obj.id))\n conn.commit()\n\ndef register_new_chat(chat_obj):\n \"\"\"\n Регистрирует новый чат\\n\n :param msg:\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM chats WHERE `chat_id` = %s'\n cursor.execute(sql, (chat_obj.id, ))\n res = cursor.fetchone()\n if res is None:\n creator = get_creator(chat_obj)\n sql = 'INSERT INTO `chats` (`chat_id`, `chat_name`, `creator_name`, `creator_id`, `chat_members_count`, `registration_time`, `settings`) VALUES (%s, %s, %s, %s, %s, %s, %s)'\n try:\n cursor.execute(sql, (chat_obj.id, chat_obj.title, creator.first_name, creator.id, bot.get_chat_members_count(chat_obj.id), int(time.time()), ujson.dumps(config.default_group_settings)))\n conn.commit()\n except Exception as e:\n logging.error('error: {}'.format(e))\n logging.error(sql)\n utils.notify_new_chat(chat_obj)\n bot.send_message(\n chat_obj.id,\n text.group_commands['ru']['registration']\n )\n register_admins(chat_obj)\n else:\n register_admins(chat_obj)\n\ndef get_users_count():\n \"\"\"\n Возвращает количество пользователей в базе\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT COUNT(`user_id`) FROM `users`'\n cursor.execute(sql)\n res = cursor.fetchall()\n return res['COUNT(`user_id`)']\n\ndef get_chats_count():\n \"\"\"\n Возвращает количество чатов в базе\\n\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT COUNT(`chat_id`) FROM `chats`'\n cursor.execute(sql)\n res = cursor.fetchone()\n return res['COUNT(`chat_id`)']\n\ndef get_user_param(user_id, column):\n \"\"\"\n Возвращает определенный параметр пользовательских настроек\n :param msg:\n :param column:\n \"\"\"\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT `{column}` FROM `user_settings` WHERE `user_id` = %s'.format(\n column = column\n )\n sql = sql\n cursor.execute(sql, (user_id, ))\n res = cursor.fetchone()\n return res[column]\n\ndef get_user_params(user_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT `settings` FROM `users` WHERE `user_id` = %s'\n cursor.execute(sql, (user_id, ))\n res = cursor.fetchone()\n return res\n\ndef set_user_param(user_id, column, state):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'UPDATE `user_settings` SET `{column}` = %s WHERE `user_id` = %s'.format(\n column = column\n )\n cursor.execute(sql, (state, user_id))\n conn.commit()\n\ndef get_group_params(chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `chats` WHERE `chat_id` = %s'\n cursor.execute(sql, (chat_id, ))\n res = cursor.fetchone()\n try:\n ujson.loads(res['settings'])['get_notifications']\n return ujson.loads(res['settings'])\n except Exception as e:\n register_new_chat(bot.get_chat(chat_id))\n change_group_params(chat_id, ujson.dumps(config.default_group_settings))\n bot.send_message(\n chat_id,\n text.group_commands['ru']['errors']['db_error']['got_error']\n )\n bot.send_message( \n chat_id,\n text.group_commands['ru']['errors']['db_error']['finshed']\n )\n return ujson.loads(res['settings'])['get_notifications']\n\ndef change_group_params(chat_id, new_params):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'UPDATE `chats` SET `settings` = %s WHERE `chat_id` = %s'\n try:\n cursor.execute(sql, (new_params, chat_id))\n conn.commit()\n except Exception as e:\n print(e)\n print(sql)\n\n\ndef is_user_new(msg):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM users WHERE `user_id` = %s'\n cursor.execute(sql, (msg.from_user.id, ))\n r = cursor.fetchone()\n if r is None:\n res = True\n else:\n res = False\n return res\n\ndef check_sticker(sticker_id, chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `banned_stickers` WHERE `sticker_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (sticker_id, chat_id))\n r = cursor.fetchone()\n if r is None:\n return False\n else:\n return True\n\ndef get_warns(user_id, chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `warns` WHERE `user_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (user_id, chat_id))\n res = cursor.fetchone()\n if res is None:\n sql = 'INSERT INTO `warns`(`user_id`, `chat_id`, `warns`) VALUES (%s, %s, %s)'\n warns = 0\n cursor.execute(sql, (user_id, chat_id, warns))\n conn.commit()\n else:\n warns = int(res['warns'])\n return warns\n\ndef new_warn(user_id, chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n warns = get_warns(user_id, chat_id)\n warns += 1\n set_warns(user_id, chat_id, warns)\n\ndef zeroing_warns(user_id, chat_id):\n set_warns(user_id, chat_id, 0)\n\ndef set_warns(user_id, chat_id, warns):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'UPDATE `warns` SET `warns` = %s WHERE `user_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (warns, user_id, chat_id))\n conn.commit()\n\ndef get_chats():\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `chats` ORDER BY `registration_time` ASC'\n cursor.execute(sql)\n res = cursor.fetchall()\n return res\n\ndef get_all():\n all_chats = []\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `chats` ORDER BY `registration_time` ASC'\n cursor.execute(sql)\n res = cursor.fetchall()\n all_chats.extend(res)\n sql = 'SELECT * FROM `users` ORDER BY `registration_time` ASC'\n cursor.execute(sql)\n res = cursor.fetchall()\n all_chats.extend(res)\n return all_chats\n\ndef replacerr(text):\n text_list = list(text) \n for idx, word in enumerate(text):\n if word in config.restricted_characters:\n text_list[idx] = config.restricted_characters_replace[word]\n return ''.join(text_list)\n\ndef escape_string(value):\n # value = value.replace('\\\\', r'\\\\\\\\')\n # value = value.replace('\\0', r'\\\\0')\n # value = value.replace('\\n', r'\\\\n')\n # value = value.replace('\\r', r'\\\\r')\n # value = value.replace('\\032', r'\\\\Z')\n value = value.replace(\"'\", r\"\\'\")\n value = value.replace('\"', r'\\\"')\n return value\n\ndef update_stats_bot(count):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `stats` (`amount`, `check_time`) VALUES (%s, %s)'\n cursor.execute(sql, (count, int(time.time())))\n conn.commit()\n\ndef delete_pending():\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'DELETE * FROM `stats`'\n cursor.execute(sql)\n conn.commit()\n\ndef check_global_ban(user_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `global_bans` WHERE `user_id` = %s'\n cursor.execute(sql, (user_id, ))\n res = cursor.fetchone()\n if res is None:\n return False\n else:\n return True\n\ndef global_ban(user_id):\n with DataConn(db) as conn:\n if not check_global_ban(user_id):\n cursor = conn.cursor()\n sql = 'INSERT INTO `global_bans` (`user_id`) VALUES (%s)'\n cursor.execute(sql, (user_id, ))\n conn.commit()\n\ndef global_unban(user_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'DELETE FROM `global_bans` WHERE `user_id` = %s'\n cursor.execute(sql, (user_id, ))\n conn.commit()\n\ndef new_update(msg, end_time):\n user_id = msg.from_user.id\n chat_id = msg.chat.id\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `proceeded_updates` (`user_id`, `chat_id`, `msg_time`, `used_time`, `proceeded_at`) VALUES (%s, %s, %s, %s, %s)'\n try:\n cursor.execute(sql, (user_id, chat_id, msg.date, end_time*1000, int(time.time())))\n conn.commit()\n except Exception as e:\n logging.error(e)\n try:\n new_content(msg, end_time)\n except Exception as e:\n logging.error(e)\n try:\n update_chat_stats(msg)\n except Exception as e:\n logging.error(e)\n try:\n update_user_stats(msg)\n except Exception as e:\n logging.error(e)\n\ndef update_user_stats(msg):\n user_id = msg.from_user.id\n chat_id = msg.chat.id\n chat_name = msg.chat.title\n user_name = msg.from_user.first_name\n with DataConn(db) as conn:\n cursor = conn.cursor()\n current_updates = get_user_messages_count(user_id, chat_id)\n sql = 'SELECT * FROM `most_active_users` WHERE `user_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (user_id, chat_id))\n res = cursor.fetchone()\n if res is None:\n sql = 'INSERT INTO `most_active_users` (`user_id`, `user_name`, `chat_id`, `chat_name`, `amount`) VALUES (%s, %s, %s, %s, %s)'\n cursor.execute(sql, (user_id, user_name, chat_id, chat_name, current_updates))\n conn.commit()\n else:\n sql = 'UPDATE `most_active_users` SET `user_name` = %s, `amount` = %s WHERE `user_id` = %s AND `chat_id` = %s'\n cursor.execute(sql, (user_name, current_updates, user_id, chat_id))\n sql = 'UPDATE `most_active_users` SET `chat_name` = %s WHERE `chat_id` = %s'\n cursor.execute(sql, (chat_name, chat_id))\n conn.commit()\n \n\ndef get_user_messages_count(user_id, chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT `amount` FROM `most_active_users` WHERE `chat_id` = %s AND `user_id` = %s'\n cursor.execute(sql, (chat_id, user_id))\n res = cursor.fetchone()\n return res['amount']\n\ndef update_chat_stats(msg):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n current_updates = get_chat_updates_count(msg.chat.id)\n sql = 'SELECT * FROM `most_popular_chats` WHERE `chat_id` = %s'\n cursor.execute(sql, (msg.chat.id, ))\n res = cursor.fetchone()\n if res is None:\n sql = 'INSERT INTO `most_popular_chats` (`updates_count`, `chat_id`, `chat_name`, `last_update`) VALUES (%s, %s, %s, %s)'\n cursor.execute(sql, (current_updates, msg.chat.id, msg.chat.title, msg.date))\n try:\n conn.commit()\n except Exception as e:\n logging.error(e)\n logging.error(sql)\n else:\n sql = 'UPDATE `most_popular_chats` SET `updates_count` = %s, `chat_name` = %s, `last_update` = %s WHERE `chat_id` = %s'\n cursor.execute(sql, (current_updates, msg.chat.title, msg.date, msg.chat.id))\n try:\n conn.commit()\n except Exception as e:\n logging.error(e)\n logging.error(sql)\n\ndef get_chat_updates_count(chat_id): \n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT `updates_count` FROM `most_popular_chats` WHERE `chat_id` = %s'\n cursor.execute(sql, (chat_id, ))\n res = cursor.fetchone()\n return int(res['updates_count'])\n\ndef get_file_size(msg):\n res = 0\n if msg.content_type == 'audio':\n res = msg.audio.file_size\n elif msg.content_type == 'document':\n res = msg.document.file_size\n elif msg.content_type == 'photo':\n res = msg.photo[-1].file_size\n elif msg.content_type == 'sticker':\n res = msg.sticker.file_size\n elif msg.content_type == 'video':\n res = msg.audio.file_size\n elif msg.content_type == 'video_note':\n res = msg.audio.file_size\n elif msg.content_type == 'voice':\n res = msg.voice.file_size\n return res\n\ndef get_file_id(msg):\n res = ''\n if msg.content_type == 'audio':\n res = msg.audio.file_id\n elif msg.content_type == 'document':\n res = msg.document.file_id\n elif msg.content_type == 'photo':\n res = msg.photo[-1].file_id\n elif msg.content_type == 'sticker':\n res = msg.sticker.file_id\n elif msg.content_type == 'video':\n res = msg.audio.file_id\n elif msg.content_type == 'video_note':\n res = msg.audio.file_id\n elif msg.content_type == 'voice':\n res = msg.voice.file_idfile_id\n return res\n\n\ndef new_message(msg, end_time):\n user_id = msg.from_user.id\n chat_id = msg.chat.id\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `proceeded_messages` (`user_id`, `chat_id`, `msg_time`, `used_time`, `proceeded_at`, `content_type`) VALUES (%s, %s, %s, %s, %s, %s)'\n cursor.execute(sql, (user_id, chat_id, msg.date, end_time*1000, int(time.time()), msg.content_type))\n conn.commit()\n\ndef new_member(msg):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `new_chat_members` (`user_id`, `chat_id`, `joined_chat_at`) VALUES (%s, %s, %s)'\n cursor.execute(sql, (msg.new_chat_member.id, msg.chat.id, msg.date))\n conn.commit()\n\ndef new_content(msg, end_time):\n new_message(msg, end_time)\n if msg.content_type == 'new_chat_members':\n new_member(msg)\n elif msg.content_type == 'text':\n try:\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `text` (`user_id`, `chat_id`, `text`, `msg_date`, `message_id`) VALUES (%s, %s, %s, %s, %s)'\n cursor.execute(sql, (msg.from_user.id, msg.chat.id, msg.text, msg.date, msg.message_id))\n conn.commit()\n except Exception as e:\n logging.error(e)\n logging.error(sql)\n else:\n try:\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `{cont_type}` (`user_id`, `chat_id`, `file_id`, `file_size`) VALUES (%s, %s, %s, %s)'.format(\n cont_type = msg.content_type\n )\n cursor.execute(sql, (msg.from_user.id, msg.chat.id, get_file_id(msg), get_file_size(msg)))\n conn.commit()\n except Exception as e:\n logging.error(e)\n logging.error(sql)\n\ndef get_chat_users(chat_id, limit):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `most_active_users` WHERE `chat_id` = %s ORDER BY `amount` DESC LIMIT {limit}'.format(limit = limit)\n cursor.execute(sql, (chat_id, ))\n r = cursor.fetchall()\n return r\n\ndef get_chat_users_count(chat_id):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT COUNT(`user_id`) FROM `most_active_users` WHERE `chat_id` = %s ORDER BY `amount` DESC'\n cursor.execute(sql, (chat_id, ))\n r = cursor.fetchone()\n return r['COUNT(`user_id`)']\n\ndef new_voteban(chat_id, chat_name, victim_id, victim_name, vote_hash):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'INSERT INTO `votebans`(`vote_hash`, `victim_id`, `victim_name`, `chat_id`, `chat_name`, `votes_count`, `votes_limit`, `started_at`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'\n cursor.execute(sql, (vote_hash, victim_id, victim_name, chat_id, chat_name, 0, utils.get_voteban_limit(chat_id), int(time.time())))\n conn.commit()\n\ndef update_voteban(vote_hash):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n curr_votes = get_voteban_votes_count(vote_hash)\n utils.set_voteban_votes_count(vote_hash, curr_votes)\n if utils.get_voteban_limit():\n pass\n\ndef get_voteban_votes_count(vote_hash):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT COUNT(`vote_id`) FROM `voteban` WHERE `vote_id` = %s'\n cursor.execute(sql, (vote_hash, ))\n r = cursor.fetchone()\n return r['COUNT(`vote_id`)']\n\ndef set_voteban_votes_count(vote_hash, votes_count):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'UPDATE `votebans SET `votes_count` = %s WHERE `vote_hash` = %s'\n cursor.execute(sql, (votes_count, vote_hash))\n conn.commit()\n\ndef get_voteban_info(vote_hash):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'SELECT * FROM `votebans` WHERE `vote_hash` = %s'\n cursor.execute(sql, (vote_hash, ))\n r = cursor.fetchone()\n return r\n\ndef set_voteban_info(column, state, vote_hash):\n with DataConn(db) as conn:\n cursor = conn.cursor()\n sql = 'UPDATE `votebans` SET `{column}` = %s WHERE `vote_hash` = %s'.format(column = column)\n cursor.execute(state, vote_hash)\n conn.commit()\n","repo_name":"SergAHell/zukuzuku-bot","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":23862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"38065919804","text":"# Linked List Nth to Last Node \n\n## Problem Statement\n### Write a function that takes a head node and an integer value **n** and then returns the nth to last node in the linked list. For example, given:\n\n# from the SIngly Linkled list imported the linked list creator \nfrom Implementation_Singly_LinkedList import Singly_Link_list\n\n### Method 1 # reverse the linked list and then take the value from the back \n## \n\nclass nth_to_last_node():\n\n def reverse(self,s):\n prev = None\n current = s.head \n while current is not None:\n next = current.next \n current.next = prev \n prev = current\n current = next \n s.head = prev \n\n def nth_node(self,s,val):\n self.reverse(s)\n current = s.head\n cnt = 0 \n if val == 0: \n return(s.head.value)\n else:\n while current is not None:\n current = current.next\n cnt = cnt + 1 \n if cnt == val :\n return(current.value)\n break\n\n \n\n \ns = Singly_Link_list()\ns.insert(10)\ns.insert(20)\ns.insert(30)\ns.insert(40)\ns.insert(50)\ns.insert(60)\ns.insert(70)\ns.insert(80)\np = nth_to_last_node()\nprint(p.nth_node(s,1))\n\n","repo_name":"RatnamDubey/DataStructures","sub_path":"Implementaions /Problem_LinkedList_Nth_LastNode.py","file_name":"Problem_LinkedList_Nth_LastNode.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11470258195","text":"import sys\nsys.stdin = open('sample_input.txt')\n\n\ndef dfs(V, E, graph, S, G):\n visited = [False for _ in range(V+1)]\n stack = [S]\n\n while stack:\n node = stack.pop()\n if node not in visited:\n visited[node] = True\n stack.extend(graph[node])\n return visited[G]\n\n\nT = int(input())\n\nfor tc in range(1, T+1):\n V, E = map(int, input().split())\n graph = [[] for _ in range(V + 1)]\n for _ in range(E):\n start, end = map(int, input().split())\n graph[start].append(end)\n S, G = map(int, input().split())\n print('#{} {}'.format(tc, dfs(V, E, graph, S, G)))\n\n\n\n","repo_name":"kyinl/swea","sub_path":"Stack/4871_그래프경로/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"69959273459","text":"def palindrome_age(motherAge, daughterAge):\n Age_mother = str(motherAge)\n Age_daughter = str(daughterAge)\n diff= len(Age_mother) - len(Age_daughter)\n Age_daughter = Age_daughter.zfill(len(motherAge))\n age_mother = age_mother[::-1]\n if Age_mother == Age_daughter:\n return True\n else:\n return False\ncount = 0\npreviousDiffAge = 0\nfor Age_mother in range (15, 120):\n for Age_daughter in range(1, 100):\n diffAge = Age_mother - Age_daughter\n if palindrome(Age_mother, Age_daughter) and diffAge == previousDiffAge:\n count = count + 1\n if count == 6:\n print(Age_mother)\n print(Age_daughter)\n previousDiffAge = diffAge\n","repo_name":"inwk6312fall2019/wordplay-deepukavacham","sub_path":"task9.py","file_name":"task9.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34166084232","text":"import re\nimport subprocess\nimport sys\nfrom collections import deque\n\nfrom .config import load_config\nfrom .constants import appname, defconf, is_macos, str_version\nfrom .layout import all_layouts\n\nis_macos\nOPTIONS = '''\n--class\ndest=cls\ndefault={appname}\ncondition=not is_macos\nSet the class part of the |_ WM_CLASS| window property\n\n\n--name\ncondition=not is_macos\nSet the name part of the |_ WM_CLASS| property (defaults to using the value from |_ --class|)\n\n\n--title\nSet the window title. This will override any title set by the program running inside kitty. So\nonly use this if you are running a program that does not set titles.\n\n\n--config\ntype=list\ndefault={config_path}\nSpecify a path to the configuration file(s) to use.\nCan be specified multiple times to read multiple configuration files in sequence, which are merged.\n\n\n--override -o\ntype=list\nOverride individual configuration options, can be specified multiple times.\nSyntax: |_ name=value|. For example: |_ -o font_size=20|\n\n\n--cmd -c\nRun python code in the kitty context\n\n\n--directory -d\ndefault=.\nChange to the specified directory when launching\n\n\n--detach\ntype=bool-set\ncondition=not is_macos\nDetach from the controlling terminal, if any\n\n\n--window-layout\ntype=choices\nchoices={window_layout_choices}\nThe window layout to use on startup.\n\n\n--session\nPath to a file containing the startup |_ session| (tabs, windows, layout, programs).\nSee the README file for details and an example.\n\n\n--single-instance -1\ntype=bool-set\nIf specified only a single instance of |_ {appname}| will run. New invocations will\ninstead create a new top-level window in the existing |_ {appname}| instance. This\nallows |_ {appname}| to share a single sprite cache on the GPU and also reduces\nstartup time. You can also have separate groups of |_ {appname}| instances by using the\n|_ --instance-group| option\n\n\n--instance-group\nUsed in combination with the |_ --single-instance| option. All |_ {appname}| invocations\nwith the same |_ --instance-group| will result in new windows being created\nin the first |_ {appname}| instance within that group\n\n\n# Debugging options\n\n--version -v\nThe current {appname} version\n\n\n--dump-commands\ntype=bool-set\nOutput commands received from child process to stdout\n\n\n--replay-commands\ntype=bool-set\nReplay previously dumped commands\n\n\n--dump-bytes\nPath to file in which to store the raw bytes received from the child process\n\n\n--debug-gl\ntype=bool-set\nDebug OpenGL commands. This will cause all OpenGL calls to check for errors instead of ignoring them. Useful when debugging rendering problems\n'''\n\n\ndef surround(x, start, end):\n if sys.stdout.isatty():\n x = '\\033[{}m{}\\033[{}m'.format(start, x, end)\n return x\n\n\ndef emph(x):\n return surround(x, 91, 39)\n\n\ndef cyan(x):\n return surround(x, 96, 39)\n\n\ndef green(x):\n return surround(x, 32, 39)\n\n\ndef blue(x):\n return surround(x, 34, 39)\n\n\ndef yellow(x):\n return surround(x, 93, 39)\n\n\ndef italic(x):\n return surround(x, 3, 23)\n\n\ndef bold(x):\n return surround(x, 1, 22)\n\n\ndef title(x):\n return blue(bold(x))\n\n\ndef parse_option_spec(spec=OPTIONS):\n NORMAL, METADATA, HELP = 'NORMAL', 'METADATA', 'HELP'\n state = NORMAL\n lines = spec.splitlines()\n prev_line = ''\n seq = []\n disabled = []\n mpat = re.compile('([a-z]+)=(.+)')\n current_cmd = None\n\n for line in lines:\n line = line.strip()\n if state is NORMAL:\n if not line:\n continue\n if line.startswith('# '):\n seq.append(line[2:])\n continue\n if line.startswith('--'):\n parts = line.split(' ')\n current_cmd = {'dest': parts[0][2:].replace('-', '_'), 'aliases': frozenset(parts), 'help': ''}\n state = METADATA\n continue\n raise ValueError('Invalid option spec, unexpected line: {}'.format(line))\n elif state is METADATA:\n m = mpat.match(line)\n if m is None:\n state = HELP\n current_cmd['help'] += line\n else:\n k, v = m.group(1), m.group(2)\n if k == 'condition':\n v = eval(v)\n current_cmd[k] = v\n if k == 'choices':\n current_cmd['choices'] = {x.strip() for x in current_cmd['choices'].split(',')}\n elif state is HELP:\n if line:\n current_cmd['help'] += ' ' + line\n else:\n if prev_line:\n current_cmd['help'] += '\\n'\n else:\n state = NORMAL\n (seq if current_cmd.get('condition', True) else disabled).append(current_cmd)\n current_cmd = None\n prev_line = line\n if current_cmd is not None:\n (seq if current_cmd.get('condition', True) else disabled).append(current_cmd)\n\n return seq, disabled\n\n\ndef prettify(text):\n\n def sub(m):\n t = m.group(2)\n for ch in m.group(1):\n t = {'C': cyan, '_': italic, '*': bold, 'G': green, 'T': title}[ch](t)\n return t\n\n text = re.sub(r'[|]([a-zA-Z_*]+?) (.+?)[|]', sub, text)\n return text\n\n\ndef version():\n return '{} {} created by {}'.format(italic(appname), green(str_version), title('Kovid Goyal'))\n\n\ndef wrap(text, limit=80):\n NORMAL, IN_FORMAT = 'NORMAL', 'IN_FORMAT'\n state = NORMAL\n last_space_at = None\n chars_in_line = 0\n breaks = []\n for i, ch in enumerate(text):\n if state is IN_FORMAT:\n if ch == 'm':\n state = NORMAL\n continue\n if ch == '\\033':\n state = IN_FORMAT\n continue\n if ch == ' ':\n last_space_at = i\n if chars_in_line < limit:\n chars_in_line += 1\n continue\n if last_space_at is not None:\n breaks.append(last_space_at)\n last_space_at = None\n chars_in_line = i - breaks[-1]\n\n lines = []\n for b in reversed(breaks):\n lines.append(text[b:].lstrip())\n text = text[:b]\n if text:\n lines.append(text)\n return reversed(lines)\n\n\ndef print_help_for_seq(seq, usage, message, appname):\n from kitty.icat import screen_size\n try:\n linesz = min(screen_size().cols, 76)\n except EnvironmentError:\n linesz = 76\n blocks = []\n a = blocks.append\n\n def wa(text, indent=0, leading_indent=None):\n if leading_indent is None:\n leading_indent = indent\n j = '\\n' + (' ' * indent)\n lines = []\n for l in text.splitlines():\n if l:\n lines.extend(wrap(l, limit=linesz - indent))\n else:\n lines.append('')\n a((' ' * leading_indent) + j.join(lines))\n\n usage = usage or '[program-to-run ...]'\n a('{}: {} [options] {}'.format(title('Usage'), bold(yellow(appname)), usage))\n a('')\n message = message or (\n 'Run the |G {appname}| terminal emulator. You can also specify the |_ program| to run inside |_ {appname}| as normal'\n ' arguments following the |_ options|. For example: {appname} /bin/sh'\n ).format(appname=appname)\n wa(prettify(message))\n a('')\n if seq:\n a('{}:'.format(title('Options')))\n for opt in seq:\n if isinstance(opt, str):\n a('{}:'.format(title(opt)))\n continue\n a(' ' + ', '.join(map(green, sorted(opt['aliases']))))\n if not opt.get('type', '').startswith('bool-'):\n blocks[-1] += '={}'.format(italic(opt['dest'].upper()))\n if opt.get('help'):\n defval = opt.get('default')\n t = opt['help'].replace('%default', str(defval))\n wa(prettify(t.strip()), indent=4)\n if defval is not None:\n wa('Default: {}'.format(defval), indent=4)\n if 'choices' in opt:\n wa('Choices: {}'.format(', '.join(opt['choices'])), indent=4)\n a('')\n\n text = '\\n'.join(blocks) + '\\n\\n' + version()\n if sys.stdout.isatty():\n p = subprocess.Popen(['less', '-isRXF'], stdin=subprocess.PIPE)\n p.communicate(text.encode('utf-8'))\n raise SystemExit(p.wait())\n else:\n print(text)\n\n\ndef defval_for_opt(opt):\n dv = opt.get('default')\n typ = opt.get('type', '')\n if typ.startswith('bool-'):\n if dv is None:\n dv = False if typ == 'bool-set' else True\n else:\n dv = dv.lower() in ('true', 'yes', 'y')\n elif typ == 'list':\n dv = []\n elif typ in ('int', 'float'):\n dv = (int if typ == 'int' else float)(dv or 0)\n return dv\n\n\nclass Options:\n\n def __init__(self, seq, usage, message, appname):\n self.alias_map = {}\n self.seq = seq\n self.names_map = {}\n self.values_map = {}\n self.usage, self.message, self.appname = usage, message, appname\n for opt in seq:\n if isinstance(opt, str):\n continue\n for alias in opt['aliases']:\n self.alias_map[alias] = opt\n name = opt['dest']\n self.names_map[name] = opt\n self.values_map[name] = defval_for_opt(opt)\n\n def opt_for_alias(self, alias):\n opt = self.alias_map.get(alias)\n if opt is None:\n raise SystemExit('Unknown option: {}'.format(emph(alias)))\n return opt\n\n def needs_arg(self, alias):\n if alias in ('-h', '--help'):\n print_help_for_seq(self.seq, self.usage, self.message, self.appname or appname)\n raise SystemExit(0)\n opt = self.opt_for_alias(alias)\n if opt['dest'] == 'version':\n print(version())\n raise SystemExit(0)\n typ = opt.get('type', '')\n return not typ.startswith('bool-')\n\n def process_arg(self, alias, val=None):\n opt = self.opt_for_alias(alias)\n typ = opt.get('type', '')\n name = opt['dest']\n nmap = {'float': float, 'int': int}\n if typ == 'bool-set':\n self.values_map[name] = True\n elif typ == 'bool-reset':\n self.values_map[name] = False\n elif typ == 'list':\n self.values_map.setdefault(name, [])\n self.values_map[name].append(val)\n elif typ == 'choices':\n choices = opt['choices']\n if val not in choices:\n raise SystemExit('{} is not a valid value for the {} option. Valid values are: {}'.format(\n val, emph(alias), ', '.join(choices)))\n self.values_map[name] = val\n elif typ in nmap:\n f = nmap[typ]\n try:\n self.values_map[name] = f(val)\n except Exception:\n raise SystemExit('{} is not a valid value for the {} option, a number is required.'.format(\n val, emph(alias)))\n else:\n self.values_map[name] = val\n\n\nclass Namespace:\n\n def __init__(self, kwargs):\n for name in kwargs:\n setattr(self, name, kwargs[name])\n\n\ndef parse_cmdline(oc, disabled, args=None):\n NORMAL, EXPECTING_ARG = 'NORMAL', 'EXPECTING_ARG'\n state = NORMAL\n if args is None:\n args = sys.argv[1:]\n args = deque(args)\n current_option = None\n\n while args:\n arg = args.popleft()\n if state is NORMAL:\n if arg.startswith('-'):\n if arg == '--':\n break\n parts = arg.split('=', 1)\n needs_arg = oc.needs_arg(parts[0])\n if not needs_arg:\n if len(parts) != 1:\n raise SystemExit('The {} option does not accept arguments'.format(emph(parts[0])))\n oc.process_arg(parts[0])\n continue\n if len(parts) == 1:\n current_option = parts[0]\n state = EXPECTING_ARG\n continue\n oc.process_arg(parts[0], parts[1])\n else:\n args = [arg] + list(args)\n break\n else:\n oc.process_arg(current_option, arg)\n current_option, state = None, NORMAL\n if state is EXPECTING_ARG:\n raise SystemExit('An argument is required for the option: {}'.format(emph(arg)))\n\n ans = Namespace(oc.values_map)\n for opt in disabled:\n setattr(ans, opt['dest'], defval_for_opt(opt))\n return ans, list(args)\n\n\ndef options_spec():\n if not hasattr(options_spec, 'ans'):\n options_spec.ans = OPTIONS.format(\n appname=appname, config_path=defconf,\n window_layout_choices=', '.join(all_layouts)\n )\n return options_spec.ans\n\n\ndef parse_args(args=None, ospec=options_spec, usage=None, message=None, appname=None):\n options = parse_option_spec(ospec())\n seq, disabled = options\n oc = Options(seq, usage, message, appname)\n return parse_cmdline(oc, disabled, args=args)\n\n\ndef create_opts(args):\n config = args.config or (defconf, )\n overrides = (a.replace('=', ' ', 1) for a in args.override or ())\n opts = load_config(*config, overrides=overrides)\n return opts\n","repo_name":"helinyu/kitty","sub_path":"kitty/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":13119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"41495045648","text":"from pickletools import optimize\r\nimport streamlit as st\r\nimport yfinance as yf\r\nfrom datetime import date\r\nimport pandas as pd\r\nfrom prophet import Prophet\r\nfrom prophet.plot import plot_plotly,plot_components_plotly\r\nfrom plotly import graph_objects as go\r\n\r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', None)\r\nst.set_page_config(\r\n page_title=\"Previsão de Ações\",\r\n page_icon=\"📈\",\r\n layout=\"wide\"\r\n)\r\n\r\nDATA_INICIO = '2017-01-01'\r\nDATA_FIM = date.today().strftime('%Y-%m-%d')\r\n\r\nst.title('Análise de Ações')\r\n\r\n#criando sidebar\r\nst.sidebar.write('Escolha a ação')\r\n\r\nn_dias = st.slider('Quantidade de dias de previsão', 30, 360)\r\n\r\n\r\ndef pegar_dados_acoes():\r\n path = 'acoes.csv'\r\n \r\n return pd.read_csv(path,delimiter=';')\r\n\r\ndf = pegar_dados_acoes()\r\nacao = df['snome']\r\nnome_acao_escolhida = st.sidebar.selectbox('Escolha uma Ação: ', acao)\r\ndf_acao = df[df['snome'] == nome_acao_escolhida] #pegando a ação escolhida pelo usuário\r\nacao_escolhida = df_acao.iloc[0]['sigla_acao']\r\nacao_escolhida = acao_escolhida + '.SA' \r\n\r\n@st.cache\r\ndef pegar_valores_online(sigla_acao):\r\n df = yf.download(sigla_acao, DATA_INICIO, DATA_FIM)\r\n df.reset_index(inplace=True)\r\n return df\r\n\r\ndf_valores = pegar_valores_online(acao_escolhida)\r\n\r\nst.subheader('Tabela de Valores - ' + nome_acao_escolhida)\r\nst.write(df_valores.tail(10))\r\n\r\n#criar grafico\r\nst.subheader('Gráfico de Preço')\r\nfig = go.Figure()\r\nfig.add_trace(go.Scatter(x=df_valores['Date'], y=df_valores['Open'], name=\"Preço Abertura\", line_color='blue'))\r\nfig.add_trace(go.Scatter(x=df_valores['Date'], y=df_valores['Close'], name=\"Preço Fechamento\", line_color='yellow'))\r\nst.plotly_chart(fig)\r\n\r\n#Previsão\r\ndf_treino = df_valores[['Date', 'Close']]\r\n\r\n#retomando colunas\r\ndf_treino = df_treino.rename(columns={\"Date\": \"ds\", \"Close\":\"y\"})\r\n\r\nmodelo = Prophet()\r\nmodelo.fit(df_treino)\r\nfuturo = modelo.make_future_dataframe(periods=n_dias,freq='B')\r\n\r\nprevisao = modelo.predict(futuro)\r\n\r\nst.subheader('Previsão da Ação nos Próximos dias')\r\nst.write(previsao[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(n_dias))\r\n\r\n#grafico \r\ngrafico1 = plot_plotly(modelo, previsao)\r\nst.plotly_chart(grafico1)\r\n\r\ngrafico2 = plot_components_plotly(modelo, previsao)\r\nst.plotly_chart(grafico2)\r\n\r\n\r\n","repo_name":"renangarciarosa/Analise_e_Predicao_Acoes","sub_path":"app_finance.py","file_name":"app_finance.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20498683744","text":"import math\r\n\r\ndef make_graph():\r\n\tf = open(\"graph.txt\",\"r\")\r\n\tlines = f.readlines()\r\n\tgraph={}\r\n\r\n\tfor i in lines:\r\n\t\t\r\n\t\tl = i[:-1].split(\"->\")\r\n\t\tif len(l[1])==1:\r\n\t\t\tgraph[l[0]] = {l[1]}\r\n\t\telse:\r\n\t\t\tgraph[l[0]] = set(l[1].split(\",\"))\r\n\t\tgraph[l[0]].add(l[0])\r\n\t\t\r\n\r\n\treturn graph\r\n\r\ndef get_similarity(graph,u,v):\r\n\treturn len(graph[u].intersection(graph[v]))/math.sqrt((len(graph[v]))*(len(graph[u])))\r\n\r\ndef get_epsilon():\r\n\treturn 0.7\r\n\r\ndef get_popularity_threshold():\r\n\treturn 2\r\n\r\ndef get_neighbours(graph,u):\r\n\tneighbours = set()\r\n\r\n\tfor v in graph[u]:\r\n\t\tif get_similarity(graph,u,v)> get_epsilon():\r\n\t\t\tneighbours.add(v)\r\n\treturn neighbours\r\n\r\ndef is_dir_reachable(graph,cores,u,v):\r\n\t\r\n\t# if v is a neighbour of u\r\n\tif v in get_neighbours(graph,u) or u in get_neighbours(graph,v):\r\n\t\treturn True\r\n\r\n\t# if v can be reached by a neighbour chain and u is a core or vice versa\r\n\telif u in cores or v in cores:\r\n\t\tl = set(u)\r\n\t\ttemp = set()\r\n\t\twhile l and temp!=l:\r\n\t\t\ttemp = l\r\n\t\t\tfor i in list(l):\r\n\t\t\t\tif v in get_neighbours(graph,i):\r\n\t\t\t\t\treturn True\r\n\t\t\t\telse:\r\n\t\t\t\t\tl.update(get_neighbours(graph,i)) \r\n\t\t\t\r\n\t# if u and v are not cores but are reachable by the same core\t\t\t\t\r\n\telse:\t\t\t\r\n\t\tfor i in cores:\r\n\t\t\tif u in get_neighbours(graph,i) and v in get_neighbours(graph,i):\r\n\t\t\t\treturn True\r\n\r\n\t\r\n\treturn False\r\n\r\ndef dir_reachable(graph,cores,u):\r\n\treachable_nodes = []\r\n\tfor i in graph:\r\n\t\tif is_dir_reachable(graph,cores,u,i):\r\n\t\t\treachable_nodes.append(i)\r\n\treturn reachable_nodes\t\t\t\r\n\t\t\t\r\n\r\ndef scan(graph):\r\n\tclusters = {}\r\n\thubs = []\r\n\toutliers = []\r\n\tnode_queue = []\r\n\tcores = []\r\n\tnode_labels = dict(zip(list(graph.keys()),map(lambda x: \"unclassified\",graph.keys())))\r\n\r\n\tfor i in node_labels:\r\n\t\tif node_labels[i] == \"unclassified\":\r\n\t\t\tif len(get_neighbours(graph,i))>get_popularity_threshold():\r\n\t\t\t\t# i is a core point\r\n\t\t\t\tcores.append(i)\r\n\t\t\t\tnode_labels[i] = \"core\"\r\n\t\t\t\tnode_queue.extend(get_neighbours(graph,i))\r\n\t\t\t\tcid = i\r\n\t\t\t\tnode_labels[i]=i\r\n\r\n\t\t\t\twhile node_queue:\r\n\t\t\t\t\tqhead = node_queue[0]\r\n\t\t\t\t\tfor j in dir_reachable(graph,cores,qhead):\r\n\t\t\t\t\t\tif node_labels[j]==\"unclassified\" or node_labels[j]==\"non-member\":\r\n\t\t\t\t\t\t\tnode_labels[j]=i\r\n\t\t\t\t\t\tif node_labels[j]==\"unclassified\":\r\n\t\t\t\t\t\t\tnode_queue.append(j) \r\n\t\t\t\t\tnode_queue.remove(qhead)\r\n\t\t\telse:\r\n\t\t\t\tnode_labels[i] = \"non-member\"\r\n\r\n\t# to get hubs and outliers\r\n\tfor i in [k for k in node_labels if node_labels[k]==\"non-member\"]:\r\n\t\tfor x in graph[i]-{i}:\r\n\t\t\tfor y in graph[i]-{i,x}:\r\n\t\t\t\tif node_labels[x]!=node_labels[y]:\r\n\t\t\t\t\tnode_labels[i] = \"hub\"\r\n\t\t\t\t\tbreak\r\n\t\t\tif node_labels[i] == \"hub\":\r\n\t\t\t\tbreak\r\n\t\tif node_labels[i]!=\"hub\":\r\n\t\t\tnode_labels[i] = \"outlier\"\r\n\r\n\tfor i in node_labels:\r\n\t\tif node_labels[i] == \"hub\":\r\n\t\t\thubs.append(i)\r\n\t\telif node_labels[i] == \"outlier\":\r\n\t\t\toutliers.append(i)\r\n\t\telse:\r\n\t\t\tif node_labels[i] in clusters:\r\n\t\t\t\tclusters[node_labels[i]].add(i) \r\n\t\t\telse:\r\n\t\t\t\tclusters[node_labels[i]] = {i}\r\n\t\r\n\treturn clusters,hubs,outliers\r\n\t\t\t\r\n\t\r\n\t\t\r\n\t\t\t\t\t\t\t\r\ndef main():\r\n\tgraph = make_graph()\r\n\tclusters,hubs,outliers = scan(graph)\r\n\tprint(\"No.of clusters = \",len(clusters))\r\n\tn = 1\r\n\tfor i in clusters:\r\n\t\tprint(\"Cluster \",n,\":\",clusters[i])\r\n\t\tn = n+1\r\n\tprint(\"No.of hubs = \",len(hubs),\"\\nHubs:\",hubs)\r\n\tprint(\"No.of outliers = \",len(outliers),\"\\nOutliers:\",outliers)\r\n\t\r\nif __name__ == \"__main__\":\r\n\tmain()\t\r\n\t\t\r\n\t\r\n\t\r\n","repo_name":"durgaravi/scan-python","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40301917611","text":"# defining the function\r\ndef calculate_rectangle_perimeter(length1,length2,length3,length4):\r\n perimeter = length1 + length2 + length3 + length4 \r\n return perimeter\r\n\r\n# ask the user to input the value of each side\r\nlength1 = float(input(\"Enter the length of the rectangle side 1: \"))\r\nlength2 = float(input(\"Enter the length of the rectangle side 2: \"))\r\nlength3 = float(input(\"Enter the length of the rectangle side 3: \"))\r\nlength4 = float(input(\"Enter the length of the rectangle side 4: \"))\r\n\r\nrectangle_perimeter = calculate_rectangle_perimeter(length1,length2,length3,length4)\r\n\r\n# display the value the user inputs\r\nprint('This is your perimeter value', rectangle_perimeter)\r\n\r\n\r\n","repo_name":"bnguy101/Perimeter","sub_path":"Perimeter.py","file_name":"Perimeter.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14093048551","text":"import re\nimport sys\nimport traceback\nfrom os import getenv\n\nfrom flask_admin import Admin\nfrom flask_assets import Environment as Assets\nfrom flask_basicauth import BasicAuth\nfrom flask_caching import Cache\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom flask_login import LoginManager\nfrom flask_mail import Mail, Message\nfrom flask_session_captcha import FlaskSessionCaptcha\nfrom flask_sessionstore import Session\n\nfrom flaskr.models import User\n\n# Setup flask cache, that we mostly do not use, but it's there just in case\ncache = Cache()\n\n# Flask assets, for CSS, JS and such\nassets_env = Assets()\n\n# Session Store for captcha and perhaps visits counter\nsession = Session()\n\n# Captcha, eventually disabled for better UX, but it's available if needed\ncaptcha = FlaskSessionCaptcha()\n\n# Mail handler\nmail = Mail()\n\n# Debug toolbar for easy dev (disabled in prod)\ndebug_toolbar = DebugToolbarExtension()\n\n# Basic auth\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"main.login\"\nlogin_manager.login_message_category = \"warning\"\nbasic_auth = BasicAuth()\n\n# Admin backoffice\nadmin = Admin()\n\n\n@login_manager.user_loader\ndef load_user(userid):\n return User.query.get(userid)\n\n\ndef send_email(to_recipient, subject, message):\n if 'production' != getenv('FLASK_ENV', 'production'):\n print(\"Skipping sending email because we are not in production.\")\n return\n\n try:\n msg = Message(\n subject=subject,\n html=message,\n sender=getenv('MAIL_DEFAULT_SENDER'),\n recipients=[to_recipient],\n bcc=[\n #'antoine@goutenoir.com', # :(|)\n ],\n )\n mail.send(msg)\n except Exception as e:\n print(\"ERROR Sending email:\\n%s\" % str(e))\n traceback.print_exc(file=sys.stderr)\n\n\ndef icon2html(text):\n icon_html = r\"\"\"\"\"\"\n return re.sub(\n \"\",\n icon_html,\n text\n )\n","repo_name":"dbxifu/travel-footprint-calculator","sub_path":"flaskr/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8861437081","text":"class Questions:\n def __init__(self):\n self.valid_options = ['A', 'B', 'C', 'D']\n self.intense_options = ['Demais','Sim','Um pouco','Não']\n self.questions = ['1. Você se considera uma pessoa preditiva?',\n '2. Você se considera uma pessoa com alto pensamento critico?',\n '3. Você se considera uma pessoa criativa?',\n '4. Você se considera uma pessoa comunicativa?']\n\n def get_answer(self):\n answer = input().capitalize().strip()\n if answer not in self.valid_options:\n raise ValueError('Opção inválida!')\n else:\n return answer\n\n def question(self, question_index):\n print(self.questions[question_index])\n for option in self.valid_options:\n print (f'{option}. {self.intense_options[self.valid_options.index(option)]}')\n return self.get_answer()\n","repo_name":"Maicon-MK/MatchJobs","sub_path":"src/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"8116877295","text":"import gordoncont as gg\nimport numpy as np\n\nclass Oracle:\n def __call__(self, env=None, state=None):\n \"\"\"\n All oracles must implement this function to operate on the\n environment.\n\n Args:\n env: None or SequentialEnvironment\n the environment to be acted upon. if None, state must\n be not None\n state: None or torch FloatTensor\n the environment to be acted upon. if None, env must\n be not None.\n \"\"\"\n raise NotImplemented\n\nclass NullOracle(Oracle):\n def __call__(self, *args, **kwargs):\n return [0,0,-1]\n\nclass RandOracle(Oracle):\n def __init__(self, actn_min=0, actn_max=5):\n self.brain = lambda: (np.random.random((3,))-.5)/0.5\n\n def __call__(self, *args, **kwargs):\n actn = self.brain()\n grab = actn[2]>=0\n temp = 0.05\n return [*actn[:2], (grab-.5)/(.5+temp)]\n\nclass GordonOracle(Oracle):\n def __init__(self, env_type, *args, **kwargs):\n self.env_type = env_type\n self.is_grabbing = False\n\n if self.env_type == \"gordoncont-v0\":\n self.brain = gg.ggames.ai.even_line_match\n elif self.env_type == \"gordoncont-v1\":\n self.brain = gg.ggames.ai.cluster_match\n elif self.env_type == \"gordoncont-v2\":\n self.brain = gg.ggames.ai.cluster_match\n elif self.env_type == \"gordoncont-v3\":\n self.brain = gg.ggames.ai.even_line_match\n elif self.env_type == \"gordoncont-v4\":\n self.brain = gg.ggames.ai.nuts_in_can\n elif self.env_type == \"gordoncont-v5\":\n self.brain = gg.ggames.ai.rev_cluster_match\n elif self.env_type == \"gordoncont-v6\":\n self.brain = gg.ggames.ai.rev_cluster_match\n elif self.env_type == \"gordoncont-v7\":\n self.brain = gg.ggames.ai.brief_display\n elif self.env_type == \"gordoncont-v8\":\n self.brain = gg.ggames.ai.nuts_in_can\n else:\n raise NotImplemented\n\n def __call__(self, env, *args, **kwargs):\n \"\"\"\n Args:\n env: SequentialEnvironment\n the environment\n \"\"\"\n (xycoord, grab) = self.brain(env.controller)\n # use a temperature parameter to avoid vanishing gradients\n temp = .05\n actn = [*xycoord, (float(grab)-.5)/(.5+temp)]\n if grab == env.is_grabbing:\n return actn\n elif self.brain == gg.ggames.ai.nuts_in_can:\n actn = [*xycoord, .5/(.5+temp)]\n return actn\n else:\n actn = [*xycoord, .5/(.5+temp)]\n return actn\n\n","repo_name":"grantsrb/gordoncont","sub_path":"gordoncont/oracles.py","file_name":"oracles.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41704618169","text":"import re\n\n\nclass LinkHandler:\n '''Matches any website links in the text'''\n\n def __init__(self):\n\n http_protocol = r\"\"\"h[it]tps?:\"\"\"\n # generic_protocol = r\"\"\"[a-z][\\w-]+\"\"\"\n top_level_domain = r\"\"\"(?:com|net|org|edu|gov|mil|aero|asia|biz|\"\"\" + \\\n r\"\"\"cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|\"\"\" + \\\n r\"\"\"travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|\"\"\" + \\\n r\"\"\"au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|\"\"\" + \\\n r\"\"\"bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|\"\"\" + \\\n r\"\"\"cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|\"\"\" + \\\n r\"\"\"es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|\"\"\" + \\\n r\"\"\"gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|\"\"\" + \\\n r\"\"\"il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|\"\"\" + \\\n r\"\"\"kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|\"\"\" + \\\n r\"\"\"md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|\"\"\" + \\\n r\"\"\"my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|\"\"\" + \\\n r\"\"\"pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|\"\"\" + \\\n r\"\"\"sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|\"\"\" + \\\n r\"\"\"sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|\"\"\" + \\\n r\"\"\"tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|\"\"\" + \\\n r\"\"\"ye|yt|yu|za|zm|zw)\"\"\"\n\n pattern = r\"\"\"(?i)\\b((?:\"\"\" + http_protocol + \\\n r\"\"\"(?:/{1,3}|[a-z0-9%])|[a-z0-9.\\-]+[.]\"\"\" + \\\n top_level_domain + \\\n r\"\"\"/)(?:[^\\s()<>{}\\[\\]]+|\\([^\\s()]*?\\([^\\s()]+\\)\"\"\" + \\\n r\"\"\"[^\\s()]*?\\)|\\([^\\s]+?\\))+(?:\\([^\\s()]*?\\([^\\s()]+\\)\"\"\" + \\\n r\"\"\"[^\\s()]*?\\)|\\([^\\s]+?\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’])\"\"\" + \\\n r\"\"\"|(?:(? 0:\n char_map = self.char_mapping(token_list)\n match_indices = self.return_link_index(matches,\n char_map,\n tags_list)\n\n if len(match_indices) > 0:\n for idx in match_indices:\n tok = token_list[idx]\n alpha_tok = re.sub('[^a-zA-Z]', '', tok)\n if len(alpha_tok) < 6:\n continue\n if alpha_tok.islower() or alpha_tok.isupper():\n link_indices.append(idx)\n\n if verbose and len(link_indices) > 0: # pragma: no cover <--\n print(f'\\nFinal Matches {entity}: '\n f'{[token_list[idx] for idx in link_indices]}\\n')\n return link_indices\n\n\nif __name__ == '__main__':\n import spacy\n from pprint import pprint\n nlp = spacy.load('en_core_web_sm')\n\n test = \"www.google.com/?search Search Results: ...\"\n doc = nlp(test)\n toks = [t.text for t in doc]\n toks = [t.strip() for t in toks]\n text = \" \".join(toks)\n tags = ['O']*len(doc)\n tag = \"LINK\"\n link_hdlr = LinkHandler()\n link_indices = link_hdlr.match_ref(text, toks, tags,\n entity=tag,\n verbose=True)\n tags = [tag if idx in link_indices else 'O'\n for idx in range(len(toks))]\n print(text)\n print(toks)\n print(tags)\n pprint(list(zip(toks, tags)), compact=True)\n","repo_name":"GreatLearning-NLP-Capstone-Group-9/Automatic-Ticket-Classification","sub_path":"utils/link_handler.py","file_name":"link_handler.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"10036558136","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom rdkit import Chem, RDLogger\nfrom torch import nn\nfrom torch.nn.utils import clip_grad_value_\nfrom torch.utils.data import DataLoader\n\nfrom layers import Generator, RecurrentDiscriminator\nfrom tokenizer import Tokenizer\n\nRDLogger.DisableLog('rdApp.*')\n\n\nclass MolGen(nn.Module):\n\n def __init__(self, data, hidden_dim=128, lr=1e-3, device='cpu'):\n \"\"\"[summary]\n\n Args:\n data (list[str]): [description]\n hidden_dim (int, optional): [description]. Defaults to 128.\n lr ([type], optional): learning rate. Defaults to 1e-3.\n device (str, optional): 'cuda' or 'cpu'. Defaults to 'cpu'.\n \"\"\"\n super().__init__()\n\n self.device = device\n\n self.hidden_dim = hidden_dim\n\n self.tokenizer = Tokenizer(data)\n\n self.generator = Generator(\n latent_dim=hidden_dim,\n vocab_size=self.tokenizer.vocab_size - 1,\n start_token=self.tokenizer.start_token - 1, # no need token\n end_token=self.tokenizer.end_token - 1,\n ).to(device)\n\n self.discriminator = RecurrentDiscriminator(\n hidden_size=hidden_dim,\n vocab_size=self.tokenizer.vocab_size,\n start_token=self.tokenizer.start_token,\n bidirectional=True\n ).to(device)\n\n self.generator_optim = torch.optim.Adam(\n self.generator.parameters(), lr=lr)\n\n self.discriminator_optim = torch.optim.Adam(\n self.discriminator.parameters(), lr=lr)\n\n self.b = 0. # baseline reward\n\n def sample_latent(self, batch_size):\n \"\"\"Sample from latent space\n\n Args:\n batch_size (int): number of samples\n\n Returns:\n torch.Tensor: [batch_size, self.hidden_dim]\n \"\"\"\n return torch.randn(batch_size, self.hidden_dim).to(self.device)\n\n def discriminator_loss(self, x, y):\n \"\"\"Discriminator loss\n\n Args:\n x (torch.LongTensor): input sequence [batch_size, max_len]\n y (torch.LongTensor): sequence label (zeros from generatoe, ones from real data)\n [batch_size, max_len]\n\n Returns:\n loss value\n \"\"\"\n\n y_pred, mask = self.discriminator(x).values()\n\n loss = F.binary_cross_entropy(\n y_pred, y, reduction='none') * mask\n\n loss = loss.sum() / mask.sum()\n\n return loss\n\n def train_step(self, x):\n \"\"\"One training step\n\n Args:\n x (torch.LongTensor): sample form real distribution\n \"\"\"\n\n batch_size, len_real = x.size()\n\n # create real and fake labels\n x_real = x.to(self.device)\n y_real = torch.ones(batch_size, len_real).to(self.device)\n\n # sample latent var\n z = self.sample_latent(batch_size)\n generator_outputs = self.generator.forward(z, max_len=20)\n x_gen, log_probs, entropies = generator_outputs.values()\n\n # label for fake data\n _, len_gen = x_gen.size()\n y_gen = torch.zeros(batch_size, len_gen).to(self.device)\n\n #####################\n # Train Discriminator\n #####################\n\n self.discriminator_optim.zero_grad()\n\n # disc fake loss\n fake_loss = self.discriminator_loss(x_gen, y_gen)\n\n # disc real loss\n real_loss = self.discriminator_loss(x_real, y_real)\n\n # combined loss\n discr_loss = 0.5 * (real_loss + fake_loss)\n discr_loss.backward()\n\n # clip grad\n clip_grad_value_(self.discriminator.parameters(), 0.1)\n\n # update params\n self.discriminator_optim.step()\n\n # ###############\n # Train Generator\n # ###############\n\n self.generator_optim.zero_grad()\n\n # prediction for generated x\n y_pred, y_pred_mask = self.discriminator(x_gen).values()\n\n # Reward (see the ref paper)\n R = (2 * y_pred - 1)\n\n # reward len for each sequence\n lengths = y_pred_mask.sum(1).long()\n\n # list of rew of each sequences\n list_rewards = [rw[:ln] for rw, ln in zip(R, lengths)]\n\n # compute - (r - b) log x\n generator_loss = []\n for reward, log_p in zip(list_rewards, log_probs):\n\n # substract the baseline\n reward_baseline = reward - self.b\n\n generator_loss.append((- reward_baseline * log_p).sum())\n\n # mean loss + entropy reg\n generator_loss = torch.stack(generator_loss).mean() - \\\n sum(entropies) * 0.01 / batch_size\n\n # baseline moving average\n with torch.no_grad():\n mean_reward = (R * y_pred_mask).sum() / y_pred_mask.sum()\n self.b = 0.9 * self.b + (1 - 0.9) * mean_reward\n\n generator_loss.backward()\n\n clip_grad_value_(self.generator.parameters(), 0.1)\n\n self.generator_optim.step()\n\n return {'loss_disc': discr_loss.item(), 'mean_reward': mean_reward}\n\n def create_dataloader(self, data, batch_size=128, shuffle=True, num_workers=5):\n \"\"\"create a dataloader\n\n Args:\n data (list[str]): list of molecule smiles\n batch_size (int, optional): Defaults to 128.\n shuffle (bool, optional): Defaults to True.\n num_workers (int, optional): Defaults to 5.\n\n Returns:\n torch.data.DataLoader: a torch dataloader\n \"\"\"\n\n return DataLoader(\n data,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=self.tokenizer.batch_tokenize,\n num_workers=num_workers\n )\n\n def train_n_steps(self, train_loader, max_step=10000, evaluate_every=50):\n \"\"\"Train for max_step steps\n\n Args:\n train_loader (torch.data.DataLoader): dataloader\n max_step (int, optional): Defaults to 10000.\n evaluate_every (int, optional): Defaults to 50.\n \"\"\"\n\n iter_loader = iter(train_loader)\n\n # best_score = 0.0\n\n for step in range(max_step):\n\n try:\n batch = next(iter_loader)\n except:\n iter_loader = iter(train_loader)\n batch = next(iter_loader)\n\n # model update\n self.train_step(batch)\n\n if step % evaluate_every == 0:\n\n self.eval()\n score = self.evaluate_n(100)\n self.train()\n\n # if score > best_score:\n # self.save_best()\n # print('saving')\n # best_score = score\n\n print(f'valid = {score: .2f}')\n\n def get_mapped(self, seq):\n \"\"\"Transform a sequence of ids to string\n\n Args:\n seq (list[int]): sequence of ids\n\n Returns:\n str: string output\n \"\"\"\n return ''.join([self.tokenizer.inv_mapping[i] for i in seq])\n\n @torch.no_grad()\n def generate_n(self, n):\n \"\"\"Generate n molecules\n\n Args:\n n (int)\n\n Returns:\n list[str]: generated molecules\n \"\"\"\n\n z = torch.randn((n, self.hidden_dim)).to(self.device)\n\n x = self.generator(z)['x'].cpu()\n\n lenghts = (x > 0).sum(1)\n\n # l - 1 because we exclude end tokens\n return [self.get_mapped(x[:l-1].numpy()) for x, l in zip(x, lenghts)]\n\n def evaluate_n(self, n):\n \"\"\"Evaluation: frequence of valid molecules using rdkit\n\n Args:\n n (int): number of sample\n\n Returns:\n float: requence of valid molecules\n \"\"\"\n\n pack = self.generate_n(n)\n\n print(pack[:2])\n\n valid = np.array([Chem.MolFromSmiles(k) is not None for k in pack])\n\n return valid.mean()\n","repo_name":"urchade/molgen","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"40499952723","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt5 import QtCore, QtWidgets\r\n\r\n\r\nclass ResultTab(QtWidgets.QWidget):\r\n def __init__(self, parent=None):\r\n super(ResultTab, self).__init__(parent)\r\n\r\n self.mainGui = parent\r\n self.currentUrl = ''\r\n self.__createUi()\r\n\r\n def __createUi(self):\r\n self.urlLabel = QtWidgets.QLabel('Result: ')\r\n self.urlTable = QtWidgets.QListWidget(self)\r\n\r\n self.contentLabel = QtWidgets.QLabel('Content: ')\r\n self.contentTable = QtWidgets.QTextEdit(self)\r\n self.contentTable.setReadOnly(True)\r\n\r\n layout = QtWidgets.QVBoxLayout(self)\r\n layout.addWidget(self.urlLabel)\r\n layout.addWidget(self.urlTable)\r\n layout.addWidget(self.contentLabel)\r\n layout.addWidget(self.contentTable)\r\n\r\n self.urlTable.itemClicked.connect(self.updateContent)\r\n\r\n def updateResult(self, urlInput):\r\n self.currentUrl = urlInput\r\n self.urlLabel.setText('Result:\\n' + '\"' + urlInput + '\"')\r\n for site in self.mainGui.db.getSites(urlInput):\r\n self.urlTable.addItem(site[0])\r\n\r\n def updateContent(self, item):\r\n self.contentTable.setText(self.mainGui.db.getSiteContent(self.currentUrl, item.text())[0][0].replace('\\n', ''))\r\n","repo_name":"Pedalves/WebDataTools","sub_path":"gui/resultTab.py","file_name":"resultTab.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71272271219","text":"import flet\nfrom flet import (\n Column,\n FloatingActionButton,\n Icon,\n NavigationRail,\n NavigationRailDestination,\n Page,\n Row,\n Text,\n VerticalDivider,\n icons, Ref,\n)\n\ndef main(page: Page):\n\n mTextContent = Ref[Text]()\n mNavigationRail = Ref[NavigationRail]()\n mColumn = Ref[Column]()\n\n def on_page_change(e):\n print(\"Selected destination:\", e.control.selected_index)\n mTextContent.current.value = f\"Body!{e.control.selected_index}\"\n page.update()\n\n\n def createColum(index: int) -> Column:\n print(index)\n mColumn.current.controls.pop()\n return Column([mTextContent.current], alignment=\"start\", expand=True)\n\n\n\n\n\n rail = NavigationRail(\n selected_index=0,\n label_type=\"all\",\n # extended=True,\n min_width=100,\n min_extended_width=400,\n leading=FloatingActionButton(icon=icons.CREATE, text=\"Add\"),\n group_alignment=-0.9,\n destinations=[\n NavigationRailDestination(\n icon=icons.FAVORITE_BORDER, selected_icon=icons.FAVORITE, label=\"First\"\n ),\n NavigationRailDestination(\n icon_content=Icon(icons.BOOKMARK_BORDER),\n selected_icon_content=Icon(icons.BOOKMARK),\n label=\"Second\",\n ),\n NavigationRailDestination(\n icon=icons.SETTINGS_OUTLINED,\n selected_icon_content=Icon(icons.SETTINGS),\n label_content=Text(\"Settings\"),\n ),\n ],\n on_change=on_page_change,\n )\n\n mText = Text(ref=mTextContent, value=f\"Body!{rail.selected_index}\")\n\n page.add(\n Row(\n [\n rail,\n VerticalDivider(width=1),\n mText,\n ],\n expand=True,\n )\n )\n\nflet.app(target=main)\n\n\n","repo_name":"artillerymans/FletForPython","sub_path":"FletForNavigationRail.py","file_name":"FletForNavigationRail.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39447842604","text":"import bpy\r\n\r\nclass LayerImgData(bpy.types.PropertyGroup):\r\n \"\"\"Data to be used each time layer is created.\"\"\"\r\n\r\n width = bpy.props.IntProperty(\r\n name=\"Width\",\r\n description = \"Horizontal dimension\",\r\n default=1024)\r\n \r\n height = bpy.props.IntProperty(\r\n name=\"Height\",\r\n description = \"Vertical dimension\",\r\n default=1024)\r\n \r\n color = bpy.props.FloatVectorProperty(\r\n name=\"BG color\",\r\n description='Color picker',\r\n subtype='COLOR',\r\n size=4,\r\n default=(0.5, 0.5, 0.5, 1),\r\n min=0.0,\r\n max=1.0)\r\n \r\n float = bpy.props.BoolProperty(\r\n name=\"32 bit\",\r\n description=\"Create image with 32 bit floating point bit depth\",\r\n default=False)\r\n","repo_name":"David-DiGioia/blender-paint-layers","sub_path":"data/layer_img_data.py","file_name":"layer_img_data.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"57"} +{"seq_id":"72980755377","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('builds/', views.BuildEditorView.as_view(), name='builds'),\n path('build/', views.BuildEditorDetailView.as_view(), name='build-detail'),\n path('alliances/', views.AlliancesView.as_view(), name='alliances'),\n path('alliance/', views.AlliancesDetailView.as_view(), name='alliance-detail'),\n path('races/', views.RacesView.as_view(), name='races'),\n path('race/', views.RacesDetailView.as_view(), name='race-detail'),\n path('classes/', views.ClassesView.as_view(), name='classes'),\n path('class/', views.ClassesDetailView.as_view(), name='class-detail'),\n path('skills/', views.SkillsView.as_view(), name='skills'),\n path('skill/', views.SkillsDetailView.as_view(), name='skill-detail'),\n path('zones/', views.ZonesView.as_view(), name='zones'),\n path('zone/', views.ZonesDetailView.as_view(), name='zone-detail')\n]","repo_name":"chelcycuevas/python_ui","sub_path":"eso_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9544301655","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\nfrom pygov_br.camara_deputados import cd\n\n\nclass BillAuthor(models.Model):\n name = models.CharField(max_length=255, verbose_name=_('Author Name'))\n region = models.CharField(max_length=255, blank=True, null=True,\n verbose_name=_('Region'))\n party = models.CharField(max_length=255, blank=True, null=True,\n verbose_name=_('Party'))\n register_id = models.CharField(max_length=255, blank=True, null=True,\n verbose_name=_('Register ID'))\n\n class Meta:\n verbose_name = \"Bill Author\"\n verbose_name_plural = \"Bill Authors\"\n\n def __str__(self):\n details = ''\n if self.register_id:\n details = ' - {}({})'.format(self.party, self.region)\n return '{}{}'.format(self.name, details)\n\n\nclass BillInfo(models.Model):\n\n class Meta:\n verbose_name = \"Bill Info\"\n verbose_name_plural = \"Bill Infos\"\n\n def __str__(self):\n return self.bill.title\n\n def save(self, *args, **kwargs):\n try:\n proposal = cd.proposals.get(\n self.proposal_type.initials,\n self.proposal_number,\n self.proposal_year,\n )\n except KeyError:\n raise Exception('Invalid proposal_type, proposal_number or '\n 'proposal_year')\n author = BillAuthor.objects.update_or_create(\n name=proposal['Autor'],\n region=proposal['ufAutor'],\n party=proposal['partidoAutor'],\n register_id=proposal['ideCadastro'],\n )[0]\n self.situation = proposal['Situacao']\n self.author = author\n\n return super(BillInfo, self).save(args, kwargs)\n\n bill = models.OneToOneField('core.Bill', verbose_name=_('Bill'),\n related_name='infos')\n author = models.ForeignKey('BillAuthor', verbose_name=_('Author'),\n null=True, blank=True)\n reporting_member = models.ForeignKey('ReportingMember',\n null=True, blank=True,\n verbose_name=_('Reporting Member'))\n proposal_type = models.ForeignKey('ProposalType',\n verbose_name=_('Proposal Type'))\n proposal_number = models.IntegerField(verbose_name=_('Proposal Number'))\n proposal_year = models.IntegerField(verbose_name=_('Proposal Year'))\n situation = models.CharField(max_length=400, blank=True, null=True,\n verbose_name=_('Situation'))\n\n\nclass ProposalType(models.Model):\n\n initials = models.CharField(max_length=50)\n description = models.CharField(max_length=255)\n\n class Meta:\n verbose_name = \"Proposal Type\"\n verbose_name_plural = \"Proposal Types\"\n\n def __str__(self):\n return '{} - {}'.format(self.initials, self.description)\n\n\nclass ReportingMember(models.Model):\n id = models.IntegerField(primary_key=True,\n verbose_name=_('Reporting Member ID'))\n name = models.CharField(max_length=255,\n verbose_name=_('Parliamentary Name'))\n party = models.CharField(max_length=255,\n verbose_name=_('Party'))\n region = models.CharField(max_length=2,\n verbose_name=_('Region'))\n email = models.EmailField(verbose_name=_('Email'))\n\n class Meta:\n verbose_name = \"ReportingMember\"\n verbose_name_plural = \"ReportingMembers\"\n ordering = ('name', )\n\n def __str__(self):\n return self.name\n","repo_name":"labhackercd/wikilegis","sub_path":"wikilegis/plugins/camara_deputados/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"57"} +{"seq_id":"34044670947","text":"\"\"\"\n프로그래머스 - [1차] 셔틀버스\n\"\"\"\n\n\nfrom collections import deque\n\nSTART_TIME = \"09:00\"\nMAX_NUMBER = 10\nMAX_INTERVAL = 60\n\n\ndef calc_time(time, dt):\n hour = int(time[:2])\n minute = int(time[3:])\n\n dh, dm = divmod(dt, 60)\n _dh, nm = divmod(minute + dm, 60)\n nh = (hour + dh + _dh) % 24\n\n return f\"{str(nh).rjust(2, '0')}:{str(nm).rjust(2, '0')}\"\n\n\ndef solution(n, t, m, timetable):\n max_time = calc_time(START_TIME, (MAX_NUMBER-1) * MAX_INTERVAL)\n timetable = [time for time in timetable if time <= max_time]\n ideal_time = calc_time(START_TIME, (n-1) * t)\n\n if not timetable:\n return ideal_time\n\n timetable.sort()\n\n q = deque(timetable)\n for i in range(n):\n end_time = calc_time(START_TIME, i * t)\n\n cnt = 0\n while q and cnt < m:\n if q[0] > end_time:\n break\n\n time = q.popleft()\n cnt += 1\n\n if i == n - 1 and cnt == m:\n return calc_time(time, -1)\n\n return ideal_time\n\n\nif __name__ == \"__main__\":\n n, t, m = 1, 1, 5\n timetable = [\"08:00\", \"08:01\", \"08:02\", \"08:03\"]\n print(solution(n, t, m, timetable))\n","repo_name":"jonusHK/algorithm_data_structure","sub_path":"implementation/implementation_16*.py","file_name":"implementation_16*.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25088246145","text":"import random\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageOps\n\nfrom lib.config import config\nfrom lib.file_path import FONT_PATH, TEXT_PATH\n\n\ndef create_image(size, bg_color, message):\n\n font = ImageFont.truetype(str(FONT_PATH), config['font_size'])\n font_color = config['font_color']\n\n W, H = size\n\n img = Image.new('RGB', size, bg_color)\n draw = ImageDraw.Draw(img)\n\n _, _, w, h = draw.textbbox((0, 0), message, font=font)\n\n draw.text(((W - w) / 2, (H - h) / 2), message, font=font, fill=font_color)\n return img\n\n\ndef add_border(img):\n color = config['border_color']\n border = config['border_size']\n img = ImageOps.expand(img, border=border, fill=color)\n return img\n\n\ndef get_text_length():\n with open(TEXT_PATH, 'r', encoding='utf-8') as f:\n text_list = f.read().splitlines()\n return len(text_list)\n\n\ndef generate_images():\n with open(TEXT_PATH, 'r', encoding='utf-8') as f:\n text_list = f.read().splitlines()\n\n for i in range(len(text_list)):\n img = create_image(\n config[\"image_size\"],\n config[\"image_color\"],\n text_list[i].replace('/', '\\n'),\n )\n img = add_border(img)\n img.save(f'output/{i}.png', 'PNG')\n\n\ndef concatenate_images():\n\n text_length = get_text_length()\n\n nums = random.sample([i for i in range(text_length)], k=text_length)\n\n result = None\n\n for i in range(5):\n\n temp = Image.open(f'output/{nums[i*5]}.png')\n\n for j in range(1, 5):\n\n img = Image.open(f'output/{nums[j + i*5]}.png')\n\n temp = get_concat_h(temp, img)\n\n if not result:\n result = temp\n else:\n result = get_concat_v(result, temp)\n\n return result\n\n\ndef get_concat_h(im1, im2):\n img = Image.new('RGB', (im1.width + im2.width, im1.height))\n img.paste(im1, (0, 0))\n img.paste(im2, (im1.width, 0))\n return img\n\n\ndef get_concat_v(im1, im2):\n img = Image.new('RGB', (im1.width, im1.height + im2.height))\n img.paste(im1, (0, 0))\n img.paste(im2, (0, im1.height))\n return img\n\n\ndef create_bingo_sheet(file_path):\n img = concatenate_images()\n img.save(file_path, 'PNG')\n","repo_name":"Hung-Liang/bingo_maker","sub_path":"lib/photo_lib.py","file_name":"photo_lib.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38280439157","text":"#!/usr/bin/env Python\n# coding=utf-8\n# the url structure of website\nfrom handlers.Bilibili import GetAreaListHanlder, LoginHanlder, OpenLiveHandler\nfrom handlers.index import IndexHandler\n\nurl = [\n (r'/', IndexHandler),\n (r'/getAreaList', GetAreaListHanlder),\n (r'/login', LoginHanlder),\n (r'/openLive', OpenLiveHandler),\n\n]\n","repo_name":"MRDHR/bilibili-openlive","sub_path":"url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15651274155","text":"from django.urls import path\n\nfrom . import views\n\napp_name='boltnnut'\n\nurlpatterns = [\n path('', views.index),\n path('projects',views.projects,name='projects'),\n path('uploadProject',views.upload,name='uploadProject'),\n path('search',views.search,name='search'),\n path('services',views.services,name='services'),\n]","repo_name":"KimDahui42/clone-boltnnut","sub_path":"boltnnut/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74375691698","text":"from flask import render_template, request, flash\nfrom article_query import app, db\nimport json, os\n\nfrom .utils import store_articles\nfrom .models import Articles\n\n\nif not os.path.exists('article_query\\\\article.db'):\n try:\n db.create_all()\n store_articles(db)\n except:\n print(\"database existing already\")\n\n\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef home():\n page_content = 'form'\n result = []\n total_persons = {}\n keyword = ''\n if request.method == 'POST':\n page_content = 'result'\n keyword = request.form.get('keyword').strip().lower()\n\n if keyword == '' or keyword == ' ':\n page_content = 'form'\n flash(\"Empty keyword is not allowed\", 'danger')\n return render_template('home.html', title='Article Query', page_content=page_content, result=result)\n\n else:\n\n all_articles = Articles.query.order_by(Articles.title)\n\n for item in all_articles:\n if keyword in item.title.lower():\n result.append(item)\n\n\n for item in all_articles:\n if keyword in item.keywords.lower():\n result.append(item)\n\n total_persons = {}\n for item in result:\n total_persons[item.id] = json.loads(item.persons)\n\n\n if len(result) < 1:\n page_content = 'form'\n flash(\"No result found for you keyword, try another keyword\", 'danger')\n\n \n return render_template('home.html', title='Article Query', page_content=page_content, result=result, total_persons=total_persons, keyword=keyword)","repo_name":"Ade-Pyaar/Article","sub_path":"article_query/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41608447923","text":"import os\nfrom abc import ABC, abstractmethod\nfrom queue import PriorityQueue\nimport numpy as np\nimport pickle\nimport torch\n\n\nclass AgentBase(ABC):\n def __init__(self, CONFIG, CONFIG_ENV):\n super().__init__(CONFIG, CONFIG_ENV)\n\n self.config = CONFIG\n self.rng = np.random.default_rng(seed=CONFIG.SEED)\n self.device = CONFIG.DEVICE\n self.image_device = CONFIG.IMAGE_DEVICE\n\n # Mode\n self.eval = CONFIG.EVAL\n\n # Vectorized envs\n self.n_envs = CONFIG.NUM_CPUS\n self.action_dim = CONFIG_ENV.ACTION_DIM\n self.use_append = CONFIG_ENV.USE_APPEND\n self.max_train_steps = CONFIG_ENV.MAX_TRAIN_STEPS\n self.max_eval_steps = CONFIG_ENV.MAX_EVAL_STEPS\n self.env_step_cnt = [0 for _ in range(self.n_envs)]\n\n # Save\n self.out_folder = CONFIG.OUT_FOLDER\n self.save_top_k = CONFIG.SAVE_TOP_K\n self.pq_top_k = PriorityQueue()\n self.save_metric = CONFIG.SAVE_METRIC\n self.use_wandb = CONFIG.USE_WANDB\n # Figure folder\n # figure_folder = os.path.join(out_folder, 'figure')\n # os.makedirs(figure_folder, exist_ok=True)\n\n # Save loss and eval info, key is step number\n self.loss_record = {}\n self.eval_record = {}\n\n # Load tasks\n dataset = CONFIG_ENV.DATASET\n print(\"= Loading tasks from\", dataset)\n with open(dataset, 'rb') as f:\n self.task_all = pickle.load(f)\n self.num_task = len(self.task_all)\n print(self.num_task, \"tasks are loaded\")\n\n # Mode\n if self.eval:\n self.set_eval_mode()\n else:\n self.set_train_mode()\n\n # Set starting step\n if CONFIG.CURRENT_STEP is None:\n self.cnt_step = 0\n else:\n self.cnt_step = CONFIG.CURRENT_STEP\n print(\"starting from {:d} steps\".format(self.cnt_step))\n\n @abstractmethod\n def learn(self):\n raise NotImplementedError\n\n # @abstractmethod\n # def finish_eval(self):\n # raise NotImplementedError\n\n def set_train_mode(self):\n self.num_eval_episode = 0\n\n self.eval_mode = False\n self.max_env_step = self.max_train_steps\n\n def set_eval_mode(self):\n self.num_eval_episode = 0\n self.num_eval_success = 0 # for calculating expected success rate\n self.num_eval_safe = 0 # for calculating expected safety rate\n self.eval_reward_cumulative = [0 for _ in range(self.n_envs)\n ] # for calculating cumulative reward\n self.eval_reward_best = [0 for _ in range(self.n_envs)]\n self.eval_reward_cumulative_all = 0\n self.eval_reward_best_all = 0\n self.env_step_cnt = [0 for _ in range(self.n_envs)]\n\n self.eval_mode = True\n self.max_env_step = self.max_eval_steps\n\n # === Venv ===\n def step(self, action):\n return self.venv.step(action)\n\n def reset_sim(self):\n self.venv.env_method('close_pb')\n\n def reset_env_all(self, task_ids=None, verbose=False):\n if task_ids is None:\n task_ids = self.rng.integers(low=0,\n high=self.num_task,\n size=(self.n_envs, ))\n tasks = [self.task_all[id] for id in task_ids]\n s = self.venv.reset(tasks)\n if verbose:\n for index in range(self.n_envs):\n print(\"<-- Reset environment {} with task {}:\".format(\n index, task_ids[index]))\n self.env_step_cnt = [0 for _ in range(self.n_envs)]\n return s, task_ids\n\n def reset_env(self, env_ind, task_id=None, verbose=False):\n if task_id is None:\n task_id = self.rng.integers(low=0, high=self.num_task)\n s = self.venv.reset_one(env_ind, self.task_all[task_id])\n if verbose:\n print(\"<-- Reset environment {} with task {}:\".format(\n env_ind, task_id))\n self.env_step_cnt[env_ind] = 0\n return s, task_id\n\n # === Models ===\n def save(self, metric=None, force_save=False):\n assert metric is not None or force_save, \\\n \"should provide metric of force save\"\n save_current = False\n if force_save:\n save_current = True\n elif self.pq_top_k.qsize() < self.save_top_k:\n self.pq_top_k.put((metric, self.cnt_step))\n save_current = True\n elif metric > self.pq_top_k.queue[0][0]: # overwrite\n # Remove old one\n _, step_remove = self.pq_top_k.get()\n for module, module_folder in zip(self.module_all,\n self.module_folder_all):\n module.remove(int(step_remove), module_folder)\n self.pq_top_k.put((metric, self.cnt_step))\n save_current = True\n\n if save_current:\n print()\n print('Saving current model...')\n for module, module_folder in zip(self.module_all,\n self.module_folder_all):\n module.save(self.cnt_step, module_folder)\n print(self.pq_top_k.queue)\n\n # TODO\n def restore(self, step, logs_path, agent_type, actor_path=None):\n \"\"\"Restore the weights of the neural network.\n\n Args:\n step (int): #updates trained.\n logs_path (str): the path of the directory, under this folder there\n should be critic/ and agent/ folders.\n \"\"\"\n model_folder = path_c = os.path.join(logs_path, agent_type)\n path_c = os.path.join(model_folder, 'critic',\n 'critic-{}.pth'.format(step))\n if actor_path is not None:\n path_a = actor_path\n else:\n path_a = os.path.join(model_folder, 'actor',\n 'actor-{}.pth'.format(step))\n self.learner.critic.load_state_dict(\n torch.load(path_c, map_location=self.device))\n self.learner.critic_target.load_state_dict(\n torch.load(path_c, map_location=self.device))\n self.learner.actor.load_state_dict(\n torch.load(path_a, map_location=self.device))\n print(' <= Restore {} with {} updates from {}.'.format(\n agent_type, step, model_folder))\n","repo_name":"allenzren/alano","sub_path":"alano/train/agent_base.py","file_name":"agent_base.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34505825723","text":"import argparse\nimport glob\nimport os\n\nfrom tqdm import tqdm\nfrom typing import List, Dict\n\nfrom mention_preprocessor import MCPreprocessor\n\n\ndef dict_2_name2cui(\n dictionary: List[str],\n preprocessor: MCPreprocessor,\n name2cui: Dict[str, str] = {}\n) -> Dict[str, str]:\n for record in tqdm(dictionary):\n cui, names = record.split('||')\n for name in names.strip().split('|'):\n name = preprocessor(name)\n\n if name not in name2cui:\n name2cui[name] = cui\n elif name in name2cui and cui not in name2cui[name]:\n name2cui[name] = name2cui[name] + '|' + cui\n\n return name2cui\n\n\ndef preprocess(\n dictionary_path: str,\n lowercase: bool,\n remove_punct: bool,\n output_path: str,\n extra_data_path: str = None,\n) -> None:\n with open(dictionary_path, 'r', encoding='utf-8') as file:\n dictionary = file.readlines()\n\n preprocessor = MCPreprocessor(\n lowercase=lowercase,\n remove_punct=remove_punct\n )\n\n name2cui = dict_2_name2cui(\n dictionary=dictionary,\n preprocessor=preprocessor\n )\n\n if extra_data_path is not None:\n add_data = []\n concept_files = glob.glob(os.path.join(extra_data_path, \"*.concept\"))\n for filename in concept_files:\n with open(filename, 'r') as file:\n concepts = file.readlines()\n\n for concept in concepts:\n _, _, _, name, cui = concept.strip().split(\"||\")\n add_data.append('||'.join([cui, name]))\n\n name2cui = dict_2_name2cui(\n dictionary=add_data,\n preprocessor=preprocessor,\n name2cui=name2cui\n )\n\n print(f'Number of unique names: {len(name2cui)}')\n dirname = os.path.dirname(output_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n with open(output_path, 'w') as file:\n for name, cui in name2cui.items():\n print(f\"{cui}||{name}\", file=file)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_dict_path', type=str, required=True,\n help='Path to created MEDIC dictionary.')\n parser.add_argument('--output_path', type=str, required=True,\n help='Path for preprocessed dictionary.')\n\n parser.add_argument('--extra_data_path', type=str, default=None,\n help='Path to other extra data.')\n\n parser.add_argument('--lowercase', action=\"store_true\")\n parser.add_argument('--remove_punct', action=\"store_true\")\n\n args = parser.parse_args()\n\n preprocess(\n args.input_dict_path,\n args.lowercase,\n args.remove_punct,\n args.output_path,\n args.extra_data_path\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bestasoff/adynorm","sub_path":"preprocess/dictionary_preprocess.py","file_name":"dictionary_preprocess.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74729089456","text":"\"\"\"\nMerge Sort 练习\n\"\"\"\nimport copy\nimport math\nimport random\nimport unittest\n\n\ndef merge1(data, reverse, compare_field=None):\n \"\"\"\n 合并排序\n 每次 D&C 的子任务都使用新的内存空间,比较费空间,但是 Code 相对比较单纯\n\n :param data List, 要排序的 list,目前可以处理 int list 及 dict list\n :param reverse Boolean,\n True: DESC\n False: ASC\n :param field_name str, 当 data 是 dict 时,指定要用来排序的 key field na,e\n\n :return: list, 本阶段完成的排序结果\n \"\"\"\n if not isinstance(reverse, bool):\n raise Exception(\"arg 'reverse' should be boolean\")\n\n if not isinstance(data, list):\n raise Exception(\"arg 'data' should be list\")\n\n length = len(data)\n if length == 0:\n return data\n elif length == 1:\n return data\n\n # 走到这里时,data 应该至少有 2 个 elements\n\n # Divide - 二分切割\n middle_idx = length//2\n\n # 左半部\n left_part = data[0:middle_idx]\n # 右半部\n right_part = data[middle_idx:]\n\n # 1.1 Conquer - 继续送进子任务\n left_part = merge1(left_part, reverse, compare_field)\n right_part = merge1(right_part, reverse, compare_field)\n\n # 2. 拿到结果,此时左右两个部分应该已经是个别排序好的结果,现在准备合并\n left_length = len(left_part)\n right_length = len(right_part)\n\n # 新 list (合并用)\n merged_result_list = []\n\n # 用于遍历 left_part 的 index\n left_idx = 0\n # 用于遍历 right_part 的 index\n right_idx = 0\n\n while True:\n # 不停 loop 遍历 left_part 与 right_part,每一轮工作如下:\n # (1) 提取 Element 做比较: left_part 在 left_idx 的 Elem vs. 、右两边的 Elem\n # (2) 符合条件(ASC 时,看谁小,反之亦然)者,送入 list\n # (3) 之后那一边的 index +=1 (例如: 如果是 left_part[left_idx] 送入 list,则 left_idx +=1)\n\n # 结束条件\n if left_idx == left_length or right_idx == right_length:\n # 代表其中一个 已经走完了,所以先结束\n break\n\n # 提取初要比较的两个 element ,依 data type 不同,取法可能不同,目前有 int, dict\n left_elem = left_part[left_idx]\n right_elem = right_part[right_idx]\n if isinstance(left_elem, int):\n left_elem_value = left_elem\n right_elem_value = right_elem\n elif isinstance(left_elem, dict):\n left_elem_value = left_elem.get(compare_field)\n right_elem_value = right_elem.get(compare_field)\n else:\n raise Exception(\"!?\")\n\n if reverse:\n # DESC\n if left_elem_value > right_elem_value:\n # 左边较大,左边 append 进去\n merged_result_list.append(left_part[left_idx])\n left_idx += 1\n elif left_elem_value == right_elem_value:\n # 相等的情况 - 只 append left_part 左半部\n # 因为要保持 sort stability,不能让右半部的 element 有机会比左半部的 element 早 append 进去 list。等左半部的全部 append 后,自然会轮到右半部的,这样就会 stable\n merged_result_list.append(left_part[left_idx])\n left_idx += 1\n elif left_elem_value < right_elem_value:\n # 右边较大,右边 append 进去\n merged_result_list.append(right_part[right_idx])\n right_idx += 1\n else:\n # 无此情境\n raise Exception(\"!?\")\n else:\n # ASC\n if left_elem_value < right_elem_value:\n # 左边较小,左边 append 进去\n merged_result_list.append(left_part[left_idx])\n left_idx += 1\n elif left_elem_value == right_elem_value:\n # 相等的情况 - 只 append left_part 左半部\n # 因为要保持 sort stability,不能让右半部的 element 有机会比左半部的 element 早 append 进去 list。等左半部的全部 append 后,自然会轮到右半部的,这样就会 stable\n merged_result_list.append(left_part[left_idx])\n left_idx += 1\n elif left_elem_value > right_elem_value:\n # 右边较小,右边 append 进去\n merged_result_list.append(right_part[right_idx])\n right_idx += 1\n else:\n # 无此情境\n raise Exception(\"!?\")\n\n # 走到这里时, left_part right_part 至少其中一个已全部 append 到 merged_result_list\n # left_part right_part 长度、分布都可能不同,有可能其中一个排完了,另一个还剩一截。\n # 范例 (假设排 ASC)\n # left_part [1, 3]\n # right_part [2, 6, 10]\n # 以这两个范例, 结束 while loop 时,状态如下\n # merged_result_list 为 [1, 2, 3]\n # left_idx 2\n # right_idx 1 -> 所以 right_part 还剩 [6, 10] 还没安排\n #\n # 此时也很简单,就是把剩下的 [6, 10] 附加到 merged_result_list 右边即可\n # 以排 ASC 为例,因为已经排好的 merged_result_list 一定是相对比较小的 Elements,而剩下的一定是相对比较大的,而且本身就是排序好的,因此直接 append 即可\n\n # 由于上面的 loop 最后有 += 1, 因此结束时会比最后一个 index 多 1,刚好等于 length 的长度,所以此处 condition 这样写\n if left_idx < left_length:\n # right_part 已经全部合并到新的list 而 left_part 还有剩,还没结束的那部分,直接附加到 list 后面\n merged_result_list += left_part[left_idx:]\n if right_idx < right_length:\n # left_part 已经全部合并到新的 list 而 right_part 还有剩, 还没结束的那部分,直接附加到 list 后面\n merged_result_list += right_part[right_idx:]\n\n return merged_result_list\n\n\ndef bubble_sort(data, reverse=False, field_name=None):\n \"\"\" 气泡排序 (检验 stable 用) \"\"\"\n result = copy.deepcopy(data)\n length = len(result)\n if length == 0:\n return []\n elif length == 1:\n return result\n\n # 到这边时, length 至少为 2\n\n if isinstance(result[0], int):\n if reverse:\n # DESC\n for j in range(0, length - 1):\n for i in range(0, length - j - 1):\n if result[i] < result[i + 1]:\n result[i], result[i + 1] = result[i + 1], result[i]\n else:\n # ASC\n for j in range(0, length - 1):\n for i in range(0, length - j - 1):\n if result[i] > result[i + 1]:\n result[i], result[i + 1] = result[i + 1], result[i]\n elif isinstance(result[0], dict):\n if not field_name:\n raise Exception(\"field_name is required for dict item\")\n if reverse:\n # DESC\n for j in range(0, length - 1):\n for i in range(0, length - j - 1):\n if result[i].get(field_name) < result[i + 1].get(field_name):\n result[i], result[i + 1] = result[i + 1], result[i]\n else:\n # ASC\n for j in range(0, length - 1):\n for i in range(0, length - j - 1):\n if result[i].get(field_name) > result[i + 1].get(field_name):\n result[i], result[i + 1] = result[i + 1], result[i]\n\n return result\n\n\n\nclass TestMethod(unittest.TestCase):\n int_samples = []\n dict_samples = []\n\n # def TestUtil_for2(self, data, test_func, compare_func, order):\n # \"\"\" 测试 in-place 类型的 排序 function \"\"\"\n # print(\" 原本: %s\" % str(data))\n # test_func(data, compare_func)\n # if order == \"asc\":\n # self.assertEqual(data, sorted(data, reverse=False))\n # print(\" ASC 结果 %s\\n\" % (data,))\n # else:\n # self.assertEqual(data, sorted(data, reverse=True))\n # print(\" DESC 结果 %s\\n\" % (data,))\n\n def TestUtil_for1_int(self, data, test_func, reverse):\n \"\"\" 测试非 in-place 类型的 排序 function - int list\"\"\"\n print(\" 原本: %s\" % str(data))\n result = test_func(data, reverse)\n sorted_data = sorted(data, reverse=reverse)\n self.assertEqual(result, sorted_data)\n print(\" %s 结果 %s\\n\" % (\"DESC\" if reverse else \"ASC\", result))\n\n def TestUtil_for1_dict(self, data, test_func, reverse, field_name=None):\n \"\"\"\n 测试非 in-place 类型的 排序 function - dict list\n \"\"\"\n print(\" 原本: %s\" % str(data))\n result = test_func(data, reverse, field_name)\n if reverse:\n sorted_data = bubble_sort(data, reverse=True, field_name=field_name)\n else:\n sorted_data = bubble_sort(data, reverse=False, field_name=field_name)\n self.assertEqual(result, sorted_data)\n print(\" %s 结果 %s\\n\" % (\"DESC\" if reverse else \"ASC\", result))\n\n def setUp(self):\n \"\"\" 产生测试案例 list \"\"\"\n N1 = random.randint(6, 16)\n data_rd1 = [i for i in range(0, N1)]\n random.shuffle(data_rd1)\n N2 = random.randint(11, 27)\n data_rd2 = [i for i in range(random.randint(1, 5), N2)]\n data_rd2 += data_rd2\n random.shuffle(data_rd2)\n data_rd3 = [i for i in range(0, random.randint(7, 20))] + [6, 6, 6] + [7, 7, 7]\n random.shuffle(data_rd3)\n data_rd4_tmp1 = [i for i in range(random.randint(0, 10), random.randint(15, 20))]\n data_rd4_tmp2 = [i for i in range(random.randint(0, 7), random.randint(12, 26))]\n data_rd4 = data_rd4_tmp1 + data_rd4_tmp2\n random.shuffle(data_rd4)\n data_rd4.pop(random.randint(0, len(data_rd4) - 2))\n data_rd5 = [i for i in range(-3, random.randint(7, 20))]\n random.shuffle(data_rd5)\n data_rd6 = [i for i in range(-5, random.randint(8, 16))] + [-3, -1, -1, 7, 7, 7, 7, 7]\n random.shuffle(data_rd6)\n self.int_samples = [\n {\"name\": \"iTHome Case\", \"list\": [8, 6, 1, 10, 5, 3, 9, 2, 7, 4]},\n {\"name\": \"简单序列 (奇数个)\", \"list\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]},\n {\"name\": \"简单序列 (偶数个)\", \"list\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},\n {\"name\": \"零\", \"list\": []},\n {\"name\": \"一个\", \"list\": [4, ]},\n {\"name\": \"两个\", \"list\": [4, 5]},\n {\"name\": \"三个\", \"list\": [4, 7, 9]},\n {\"name\": \"四个\", \"list\": [4, 9, 8, 3]},\n {\"name\": \"交错 (奇数个)\", \"list\": [11, 1, 10, 2, 9, 3, 8, 4, 7, 5, 6]},\n {\"name\": \"交错 (偶数个)\", \"list\": [1, 10, 2, 9, 3, 8, 4, 7, 5, 6]},\n {\"name\": \"随机不重复 0 ~ %s 打乱\" % N1, \"list\": data_rd1},\n {\"name\": \"随机不重复 0 ~ %s 打乱(含负数)\" % N1, \"list\": data_rd5},\n {\"name\": \"固定带重复 elem\", \"list\": [6, 3, 8, 6, 4, 6, 9, 3, 3, 3, 6, 9, 7, 5]},\n {\"name\": \"随机 两倍重复 0~ %s\" % N2, \"list\": data_rd2},\n {\"name\": \"随机 加一些相同 elem 进���\", \"list\": data_rd3},\n {\"name\": \"随机 加一些相同 elem 进去 (含负数)\", \"list\": data_rd6},\n {\"name\": \"随机 多加一些 elem 进去\", \"list\": data_rd4},\n {\"name\": \"全同 短\", \"list\": [6 for i in range(0, random.randint(5, 10))]},\n {\"name\": \"全同 长\", \"list\": [11 for i in range(0, random.randint(24, 46))]},\n ]\n\n # 用 int 的 test case 产生一份 dict 版本的\n for int_sample in self.int_samples:\n int_list = int_sample[\"list\"]\n dict_list = []\n sn = 1\n for int_item in int_list:\n dict_item = {\n \"id\": int_item,\n \"sn\": sn # 确认 Stable Sort 用\n }\n dict_list.append(dict_item)\n sn += 1\n self.dict_samples.append({\n \"name\": int_sample[\"name\"],\n \"list\": dict_list\n })\n\n\n def test_merge1_int(self):\n \"\"\" 测试 merge1 int \"\"\"\n num = 1\n for sample in self.int_samples:\n # 每一个案例都测过以下八种情境排列组合\n print(\"--- merge1 int 第 %s 组测试案例 --- START \" % num)\n print(sample[\"name\"])\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_int(data=data, test_func=merge1, reverse=False)\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_int(data=data, test_func=merge1, reverse=True)\n print(\"%s 反转\" % (sample[\"name\"],))\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_int(data=reversed_data, test_func=merge1, reverse=False)\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_int(data=reversed_data, test_func=merge1, reverse=True)\n print(\"--- merge1 int 第 %s 组测试案例 --- END\\n\" % num)\n num += 1\n\n def test_bubble_sort_int(self):\n \"\"\" 测试 bubble_sort int \"\"\"\n num = 1\n for sample in self.int_samples:\n # 每一个案例都测过以下八种情境排列组合\n print(\"--- bubble_sort int 第 %s 组测试案例 START ---\" % num)\n print(sample[\"name\"])\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_int(data=data, test_func=bubble_sort, reverse=False)\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_int(data=data, test_func=bubble_sort, reverse=True)\n print(\"%s 反转\" % (sample[\"name\"],))\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_int(data=reversed_data, test_func=bubble_sort, reverse=False)\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_int(data=reversed_data, test_func=bubble_sort, reverse=True)\n print(\"--- bubble_sort int 第 %s 组测试案例 END ---\\n\" % num)\n num += 1\n\n def test_merge1_dict(self):\n num = 1\n for sample in self.dict_samples:\n # 每一个案例都测过以下八种情境排列组合\n print(\"--- merge1 dict 第 %s 组测试案例 --- START \" % num)\n print(sample[\"name\"])\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_dict(data=data, test_func=merge1, reverse=False, field_name=\"id\")\n data = list(copy.deepcopy(sample[\"list\"]))\n self.TestUtil_for1_dict(data=data, test_func=merge1, reverse=True, field_name=\"id\")\n print(\"%s 反转\" % (sample[\"name\"],))\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_dict(data=reversed_data, test_func=merge1, reverse=False, field_name=\"id\")\n reversed_data = list(reversed(copy.deepcopy(sample[\"list\"])))\n self.TestUtil_for1_dict(data=reversed_data, test_func=merge1, reverse=True, field_name=\"id\")\n print(\"--- merge1 dict 第 %s 组测试案例 --- END\\n\" % num)\n num += 1\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"lhwangweb/algorithm_practice","sub_path":"python_practice/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":15459,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5670986796","text":"# imports\nimport argparse\nimport time\nimport cv2\nimport imutils\nimport numpy as np\nimport picamera\nimport gpiozero\n\n\n# globals\nfrom imutils.video import VideoStream\n\nwheels = gpiozero.Robot(left=(7, 8), right=(9, 10))\ncamera = picamera.PiCamera()\nus_sensor = gpiozero.input_devices.DistanceSensor(24, 18)\n\n\ndef image_detection_setup():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--prototxt\", required=True,\n help=\"path to Caffe 'deploy' prototxt file\")\n ap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\n ap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n help=\"minimum probability to filter weak detections\")\n args = vars(ap.parse_args())\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n net = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n return CLASSES, COLORS, net, args\n\n\ndef scan_image_for_objects(vs, net, CLASSES, COLORS, args, status):\n # loop over the frames from the video stream\n while True:\n frame = vs.read()\n frame = imutils.resize(frame, width=400)\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),\n 0.007843, (300, 300), 127.5)\n net.setInput(blob)\n detections = net.forward()\n coords = []\n object_type = []\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > args[\"confidence\"]:\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n label = \"{}: {:.2f}%\".format(CLASSES[idx],\n confidence * 100)\n object_type[i] = label[0]\n coords[i] = cv2.rectangle(frame, (startX, startY), (endX, endY),\n COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(frame, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n sorted_detections = []\n for i in (0, len(detections)):\n sorted_detections[i] = [detections[i], coords[i], object_type[i]]\n return sorted_detections, status\n\n\ndef decide_and_perform_next_movement(detections, stationary, non_detection_count):\n closest_object = detections[0]\n co_difference = 0\n # determining closest object in frame\n for i in (0, len(detections)):\n current_co_difference = detections[i, 1].endX - detections[i, 1].startX\n if current_co_difference > co_difference:\n co_difference = current_co_difference\n closest_object = detections[i]\n # performing ultrasonic sensor pulse\n gpiozero.output(us_sensor[1], True)\n time.sleep(0.00001)\n gpiozero.output(us_sensor[1], False)\n while gpiozero.input(us_sensor[0]) == 0:\n pulse_start = time.time()\n while gpiozero.input(us_sensor[0]) == 1:\n pulse_end = time.time()\n pulse_duration = pulse_end - pulse_start\n distance = round((pulse_duration * 17150), 2)\n # else-if construct to determine the next move of the robot\n if len(detections) == 0:\n if stationary:\n non_detection_count += 1\n if non_detection_count >= 10:\n status = 0\n wheels.stop()\n elif not stationary:\n non_detection_count += 1\n wheels.forward(0.8)\n elif stationary:\n non_detection_count += 1\n wheels.forward(0.8)\n stationary = False\n elif closest_object[1].startX < 100 & co_difference > 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.right(0.9)\n elif distance >= 100:\n non_detection_count = 0\n wheels.right(0.8)\n else:\n non_detection_count = 0\n wheels.forward(0.8)\n elif closest_object[1].startX < 100 & co_difference < 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.right(0.9)\n else:\n non_detection_count = 0\n wheels.forward(0.8)\n elif closest_object[1].startX >= 100 & closest_object[1].startX <= 200 & co_difference > 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.backward(0.9)\n elif distance >= 100:\n non_detection_count = 0\n wheels.backward(0.8)\n else:\n if closest_object[1].startX >= 101 & closest_object[1].startX <= 150:\n non_detection_count = 0\n wheels.right(0.8)\n elif closest_object[1].startX >= 150 & closest_object[1].startX <= 200:\n non_detection_count = 0\n wheels.left(0.8)\n elif closest_object[1].startX >= 100 & closest_object[1].startX <= 200 & co_difference < 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.backward(0.9)\n else:\n non_detection_count = 0\n wheels.forward(0.8)\n elif closest_object[1].startX > 200 & co_difference > 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.left(0.9)\n elif distance >= 100:\n non_detection_count = 0\n wheels.left(0.8)\n else:\n non_detection_count = 0\n wheels.forward(0.8)\n elif closest_object[1].startX < 200 & co_difference < 120:\n if closest_object[2] == 'cat':\n non_detection_count = 0\n wheels.left(0.9)\n else:\n non_detection_count = 0\n wheels.forward(0.8)\n return stationary, non_detection_count, status\n\n\ndef main():\n # preparing variables and performing time buffer\n status = 1\n stationary = True\n non_detection_count = 0\n vs = VideoStream(usePiCamera=True).start()\n CLASSES, COLORS, net, args = image_detection_setup()\n gpiozero.setup(us_sensor[1], gpiozero.OUT)\n gpiozero.setup(us_sensor[0], gpiozero.IN)\n time.sleep(5.0)\n # scanning-movement cycle\n while status == 1:\n detections = scan_image_for_objects(vs, net, CLASSES, COLORS, args, status)\n decide_and_perform_next_movement(detections, stationary, non_detection_count, status)\n #cleanup\n cv2.destroyAllWindows()\n vs.stop()\n return\n","repo_name":"jackstorrie/Jack-Storrie-CES-Project","sub_path":"CESProjectRobot/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":6987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29769397458","text":"class SimpleTreeNode:\n\n def __init__(self, val, parent):\n self.NodeValue = val # значение в узле\n self.Parent = parent # родитель или None для корня\n self.Children = [] # список дочерних узлов\n\nclass SimpleTree:\n\n def __init__(self, root):\n self.Root = root # корень, может быть None\n\n def AddChild(self, ParentNode, NewChild): # ваш код добавления нового дочернего узла существующему ParentNode\n if self.Root is None:\n self.Root = NewChild\n else:\n ParentNode.Children.append(NewChild)\n NewChild.Parent = ParentNode\n\n def DeleteNode(self, NodeToDelete):# ваш код удаления существующего узла NodeToDelete\n node_up = NodeToDelete.Parent\n if node_up == None:\n self.Root = None\n return\n node_up.Children.remove(NodeToDelete)\n NodeToDelete.Parent = None\n\n def GetAllNodes(self):# ваш код выдачи всех узлов дерева в определённом порядке\n vizit = [] # надо предусмотреть если корень none или только один корень\n stack = []\n node = self.Root\n if node == None:\n return vizit\n vizit.append(node)\n while True:\n for i in range(len(node.Children)):\n stack.insert(0, node.Children[i])\n if len(stack) == 0:\n break\n node = stack[0]\n vizit.insert(0, stack[0])\n stack.pop(0)\n return vizit\n\n def FindNodesByValue(self, val):# ваш код поиска узлов по значению\n vizit = []\n stack = []\n result = []\n node = self.Root\n if node == None: # если первый узел none\n return vizit\n if node.NodeValue == val: # если один узел корневой\n result.append(node)\n while True:\n for i in range(len(node.Children)):\n stack.insert(0, node.Children[i])\n if len(stack) == 0:\n break\n node = stack[0]\n if node.NodeValue == val:\n result.append(node)\n vizit.insert(0, stack[0])\n stack.pop(0)\n return result\n\n def MoveNode(self, OriginalNode, NewParent):# ваш код перемещения узла вместе с его поддеревом -- # в качестве дочернего для узла NewParent\n if self.Root == None: # если первый узел none\n return\n if OriginalNode == None:\n return\n if NewParent == None:\n return\n if OriginalNode.Parent == NewParent:# если переставляемые нepks уже так и стоят\n return\n else:\n node_up = OriginalNode.Parent\n index = 0\n for i in range(len(node_up.Children)):\n if node_up.Children[i] == OriginalNode:\n index = i\n node_up.Children.remove(node_up.Children[index])\n self.AddChild(NewParent, OriginalNode)\n\n def Count(self):# количество всех узлов в дереве\n if self.Root == None: # если первая нода нан\n return 0\n count = len(self.GetAllNodes()) # количество всех узлов в дереве\n return count\n def LeafCount(self):# количество листьев в дереве\n if self.Root == None:# если первый узел none\n return 0\n count = 0\n for i in self.GetAllNodes():\n if len(i.Children) == 0 or i.Children == None:\n count += 1# количество листьев в дереве\n return count\n","repo_name":"Cleankz/SimpleTree","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15617659915","text":"import numpy as np\nimport pandas\n\ndef load_raw_data(images_path, labels_path, train_ind_path, val_ind_path, width=56, height=56, n_channels=3):\n # Load images\n with open(images_path, \"rb\") as f:\n data = np.fromfile(f, dtype=np.uint8)\n data = data.reshape(-1, height, width, n_channels)\n data = data.transpose(0, 3, 1, 2)\n\n # Load labels\n df = pandas.read_csv(labels_path, header=None)\n labels = df.to_numpy().T[0]\n\n # Build train set and validation set\n train_indices = np.load(train_ind_path)\n val_indices = np.load(val_ind_path)\n images_train, labels_train = data[train_indices], labels[train_indices]\n images_test, labels_test = data[val_indices], labels[val_indices]\n\n return images_train, images_test, labels_train, labels_test\n\ndef load_test_data(images_path, width=56, height=56, n_channels=3): \n # Load images\n with open(images_path, \"rb\") as f:\n data = np.fromfile(f, dtype=np.uint8)\n data = data.reshape(-1, height, width, n_channels)\n data = data.transpose(0, 3, 1, 2)\n\n return data","repo_name":"Scotchy/face_cls","sub_path":"dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74661231857","text":"#!/usr/bin/env python\n\nimport logging\nfrom itertools import combinations, product, groupby, count\n\nimport networkx as nx\nimport numpy as np\nfrom numpy.random import uniform\n\nfrom ..struct.hetnet import HetNet\n\nlog = logging.getLogger()\n\nCOLOR = 'color'\n\n\ndef generate_abstract(sizes, probabilities, colors=None):\n if colors is not None:\n assert len(sizes) == len(colors)\n else:\n colors = list(range(len(sizes)))\n\n params = {color: {} for color in colors}\n probabilities = np.array(probabilities)\n\n hn = HetNet(params=params)\n\n for i, color, size in zip(count(), colors, sizes):\n for j in range(size):\n hn.add_node(\"{}:{}\".format(color, j), {COLOR: color})\n\n for u, v in product(range(size), repeat=2):\n if uniform() < probabilities[i, i]:\n hn.add_edge(\"{}:{}\".format(color, u), \"{}:{}\".format(color, v))\n\n for (i, c1), (j, c2) in combinations(enumerate(colors), 2):\n for u, v in product(range(sizes[i]), range(sizes[j])):\n if uniform() < probabilities[i, j]:\n hn.add_edge(\"{}:{}\".format(c1, u), \"{}:{}\".format(c2, v))\n\n return hn\n\n\ndef draw_abstract(hn, colors, pos=None):\n if pos is None:\n pos = nx.spring_layout(hn, iterations=175, k=(2 / len(hn)))\n\n color_dict = dict(zip(sorted(hn.colors), sorted(colors)))\n\n for i, nodes in groupby(sorted(hn, key=hn.get_color), key=hn.get_color):\n nx.draw_networkx_nodes(hn,\n pos,\n node_size=30,\n nodelist=list(nodes),\n node_color=color_dict[i])\n\n def ge(edge):\n return sorted(map(hn.get_color, edge))\n\n for (c1, c2), edges in groupby(sorted(hn.edges(), key=ge), key=ge):\n nx.draw_networkx_edges(hn,\n pos,\n edgelist=list(edges),\n edge_color=(color_dict[c1] if c1 == c2 else 'grey'),\n alpha=(0.6 if c1 == c2 else 0.4))\n","repo_name":"cthoyt/hetnetana","sub_path":"src/hetnetana/generation/generate_abstract.py","file_name":"generate_abstract.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35544964784","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[54]:\n\n\n#import data\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\n\n\n# In[55]:\n\n\ndf = pd.read_csv(\"Treasury.csv\")\ndf\n\n\n# In[56]:\n\n\ndf.drop(df.columns[0:2], axis=1)\ndf_x = df.iloc[:,5:7]\ndf_y = df.iloc[:,-1]\n\n\n# In[57]:\n\n\n#Split data with test size 30% of total observations\nfrom sklearn.model_selection import train_test_split\nX_train_knn, X_test_knn, y_train_knn, y_test_knn = train_test_split(df_x, df_y, test_size=0.3,\nrandom_state=1, stratify=df_y)\n\n\n# In[58]:\n\n\n#Standardize units\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X_train_knn)\nX_train_sc = sc.transform(X_train_knn)\nX_test_sc = sc.transform(X_test_knn)\n\n\n# In[59]:\n\n\nfrom sklearn.metrics import accuracy_score\nfrom matplotlib.colors import ListedColormap\ndef plot_decision_regions(X, y, classifier, test_idx = None,\nresolution = 0.02):\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\n alpha=0.8, c=colors[idx],\n marker=markers[idx], label=cl,\n edgecolor='black')\n if test_idx:\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0], X_test[:, 1],\n c='', edgecolor='black', alpha=1.0,\n linewidth=1, marker='o',\n s=100, label='test set')\n\n\n# In[60]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nk_range = range(1,26)\nscore = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors= k)\n knn.fit(X_train_sc, y_train_knn)\n y_pred = knn.predict(X_test_sc)\n score.append(accuracy_score(y_test_knn, y_pred))\n\n\n# In[61]:\n\n\noptimal_k = score.index(np.max(score)) + 1\n\n\n# In[62]:\n\n\noptimal_k\n\n\n# In[63]:\n\n\nscore\n\n\n# In[64]:\n\n\nknn_nk = KNeighborsClassifier(n_neighbors=optimal_k)\nknn_nk.fit(X_train_sc, y_train_knn)\nX_combined_std = np.vstack((X_train_sc, X_test_sc))\ny_combined = np.hstack((y_train_knn, y_test_knn))\nplot_decision_regions(X_combined_std, y_combined, classifier=knn_nk, test_idx=range(105,150))\nplt.xlabel('ctd_last_first')\nplt.ylabel('ctd1_percent')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# In[65]:\n\n\n#Establish decision tree \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\ndf_x_dt = df.iloc[:,2:4]\ndf_y_dt = df.iloc[:,-1]\nX_train, X_test, y_train, y_test = train_test_split(df_x_dt, df_y_dt, test_size=0.3, random_state=1, stratify=df_y_dt)\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_sc_tree = sc.transform(X_train)\nX_test_sc_tree = sc.transform(X_test)\ntree = DecisionTreeClassifier(criterion='gini',max_depth=4, random_state\n=1)\ntree.fit(X_train_sc_tree, y_train)\nX_combined = np.vstack((X_train_sc_tree, X_test_sc_tree))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X_combined ,y_combined, classifier=tree, test_idx=\nrange(105, 150))\nplt.xlabel('roll_start')\nplt.ylabel('roll_heart')\nplt.legend(loc='upper left')\nplt.show()\n\n\n# In[53]:\n\n\nprint(\"My name is Zihan Yu\")\nprint(\"My NetID is: zihanyu3\")\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Zihanyu619/IE598_F19_HW2","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39568556360","text":"from typing import List\n\ndef parent(label):\n i = len(bin(label)) - 3\n return 2 ** (i - 1) + 2 ** i - 1 - label // 2\n\nclass Solution:\n def pathInZigZagTree(self, label: int) -> List[int]:\n\n ans = []\n while label >= 1:\n ans.append(label)\n label = parent(label)\n\n return ans[::-1]\n\nif __name__ == '__main__':\n s = Solution()\n ans = s.pathInZigZagTree(14)\n print(ans)\n\n # for i in range(1, 16):\n # print(i, parent(i))\n","repo_name":"Xupeiyi/leetcode","sub_path":"tree/medium/1104.py","file_name":"1104.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"33258324902","text":"from __future__ import unicode_literals\nfrom fipy.viewers.viewer import AbstractViewer\n\n__all__ = [\"MultiViewer\"]\nfrom future.utils import text_to_native_str\n__all__ = [text_to_native_str(n) for n in __all__]\n\nclass MultiViewer(AbstractViewer):\n \"\"\"\n Treat a collection of different viewers (such for different 2D plots\n or 1D plots with different axes) as a single viewer that will `plot()`\n all subviewers simultaneously.\n \"\"\"\n def __init__(self, viewers):\n \"\"\"\n Parameters\n ----------\n viewers : :obj:`list` of ~fipy.viewers.viewer.Viewer\n the viewers to bind together\n \"\"\"\n if type(viewers) not in [type([]), type(())]:\n viewers = [viewers]\n self.viewers = viewers\n\n def setLimits(self, limits={}, **kwlimits):\n kwlimits.update(limits)\n for viewer in self.viewers:\n viewer.setLimits(**kwlimits)\n\n def plot(self):\n for viewer in self.viewers:\n viewer.plot()\n","repo_name":"usnistgov/fipy","sub_path":"fipy/viewers/multiViewer.py","file_name":"multiViewer.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":436,"dataset":"github-code","pt":"57"} +{"seq_id":"71748567217","text":"# ------------------------------------------------------------------------------\n# Python Scripts scriptarium/[list-words.py]\n# (c) balarabe@protonmail.com\n# ------------------------------------------------------------------------------\n\n# list-words lists the words in every text file\n# in the current folder and its subfolders.\n\nimport re\n\nfrom typing import List\n\nfrom sys import argv as sys_argv\n\nfrom constants import text_file_exts\nfrom functions import list_files\n\ndef split_words(s: str) -> List[str]:\n mode = ''\n ar = []\n w = ''\n for c in s:\n if c.isupper():\n if mode == 'l' or mode == 'd':\n ar.append(w)\n w = c\n else:\n w += c\n mode = 'u'\n\n elif c.islower():\n mode = 'l'\n w += c\n\n elif c.isdigit():\n mode = 'd'\n w += c\n\n else:\n mode = ''\n if w != '':\n ar.append(w)\n w = ''\n\n if w != '':\n ar.append(w)\n\n return ar\n\ndef test_split_words():\n test_cases = [\n ['Hello', ['Hello']],\n ['HelloWorld', ['Hello', 'World']],\n ['helloWorld', ['hello', 'World']],\n ['yard_number', ['yard', 'number']],\n ]\n for tc in test_cases:\n input = tc[0]\n want = tc[1]\n have = split_words(input)\n if have != want:\n print('ok')\n raise Exception(f'words(\\'{input}\\') -> {have} expected: {want}')\n\ntest_split_words()\n\n#-------------------------------------------------------------------------------\n\nprint('\\n'*10)\n\ndictionary = {}\nif len(sys_argv) > 1:\n for fname in sys_argv:\n if fname == sys_argv[0]:\n continue\n with open(fname, mode='r', encoding='utf-8') as fl:\n for s in fl.read().splitlines():\n i = s.find('#')\n if i != -1:\n s = s[:i]\n s = s.strip()\n if s != '':\n if s in dictionary:\n print('duplicate:', s)\n else:\n dictionary[s] = True\n u = s.upper()\n if u != s:\n if u in dictionary:\n print('duplicate:', u)\n else:\n dictionary[u] = True\n print('loaded dictionary')\n\nfor fname in list_files('.'):\n\n # skip file types not listed in text_file_exts\n if not next((ext for ext in text_file_exts if fname.endswith(ext)), False):\n continue\n\n print('\\n'*2 + '-'*80 + '\\n' + fname)\n words = []\n ln = ''\n\n with open(fname, mode='r', encoding='utf-8') as fl:\n s = fl.read()\n ln = ''\n\n for c in s:\n if (c == '_') \\\n or (c >= '0' and c <= '9') \\\n or (c >= 'a' and c <= 'z') \\\n or (c >= 'A' and c <= 'Z'):\n ln += c\n continue\n if ln != '' and not ln in words:\n words.append(ln)\n ln = ''\n\n if ln != '' and not ln in words:\n words.append(ln)\n\n words.sort()\n for ln in words:\n\n if ln == '':\n continue\n if ln[0].isdigit():\n continue\n if ln in dictionary:\n continue\n if ln[:1].lower()+ln[1:] in dictionary:\n continue\n\n allWordsInDict = True\n for w in split_words(ln):\n lw = w[:1].lower()+w[1:]\n if (w not in dictionary) and (lw not in dictionary):\n allWordsInDict = False\n break\n if allWordsInDict:\n continue\n\n print(ln)\n\nquit()\n\n# end\n","repo_name":"balacode/scriptarium","sub_path":"list-words.py","file_name":"list-words.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1376030511","text":"# \"infinte\" while with escape mechanism\n# keep reading and adding numbers until the number entered is 0\ns = 0\nwhile True:\n try:\n num = int(input(\"Please give me a number (0 to quit) \"))\n if num == 0:\n break\n s += num\n except ValueError:\n print(\"That was not a valid number\")\n continue\n print(f\"Intermediate sum so far is {s}\")\n\nprint(s)\n","repo_name":"itb-ie/DrivenetsAutomationCourse2","sub_path":"whille_examples2.py","file_name":"whille_examples2.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22678995993","text":"#the main for the gui\r\n\r\nimport gui\r\nimport sys\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = gui.Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"gff127/cmpsc487_hw1","sub_path":"admin_view.py","file_name":"admin_view.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25063062290","text":"class Settings:\r\n \"\"\"A class to store all settings for Alien Invasion.\"\"\"\r\n\r\n def __init__(self):\r\n # Screen settings\r\n self.screen_width = 1200\r\n self.screen_height = 800\r\n self.bg_color = (0, 0, 0)\r\n self.ship_limit = 3\r\n # Maximum amount of lives is 3!\r\n if self.ship_limit > 3:\r\n raise Exception(\"Maximum amount of ships reached\")\r\n\r\n # Bullet settings\r\n self.bullet_speed = 2\r\n self.bullet_width = 8\r\n self.bullet_height = 17\r\n self.bullet_color = (255, 255, 255)\r\n if self.bullet_color != (255, 255, 255):\r\n raise Exception(\"Wrong Bullet color used!\")\r\n self.bullets_allowed = 10\r\n if self.bullets_allowed >= 11:\r\n raise Exception(\"Maximum amount of Bullets reached!\")\r\n\r\n # Alien settings\r\n self.fleet_drop_speed = 10\r\n\r\n # How quickly the game speeds up\r\n self.speedup_scale = 1.3\r\n if self.speedup_scale >= 1.8:\r\n raise Exception(\"Speedup_Scale Too High\")\r\n\r\n # How quickly the alien point values increase\r\n self.score_scale = 1.5\r\n\r\n self.initialize_dynamic_settings()\r\n\r\n def initialize_dynamic_settings(self):\r\n \"\"\"Settings that change throughout the game.\"\"\"\r\n self.ship_speed = 4\r\n # Maximum ship starting speed is 5\r\n if self.ship_speed > 5:\r\n raise Exception(\"Starting Speed too High!\")\r\n self.bullet_speed = 3.0\r\n self.alien_speed = 1.0\r\n\r\n # fleet_direction of 1 represents right; -1 represents left\r\n self.fleet_direction = 1\r\n\r\n # Scoring\r\n self.alien_points = 50\r\n\r\n def increase_speed(self):\r\n \"\"\"Increase speed settings.\"\"\"\r\n self.ship_speed *= self.speedup_scale\r\n self.bullet_speed *= self.speedup_scale\r\n self.alien_speed *= self.speedup_scale\r\n\r\n self.alien_points = int(self.alien_points * self.score_scale)","repo_name":"Steve-v427/Aliens","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3031082604","text":"import socket\nimport struct\n\n\ndef sendInt(s, n):\n data = struct.pack('!i', n)\n s.send(data)\n\n\nif __name__ == '__main__':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n a = int(input(\"Latura 1: \"))\n b = int(input(\"Latura 2: \"))\n c = int(input(\"Latura 3: \"))\n sock.connect(('localhost', 1921))\n sendInt(sock, a)\n sendInt(sock, b)\n sendInt(sock, c)\n data = sock.recv(100)\n isTriangle = struct.unpack('!?', data)[0]\n print(isTriangle)\n except (ConnectionRefusedError, ConnectionAbortedError):\n print(\"Connection was refused\")\n exit(0)\n except struct.error:\n print('Received data was different than expected')\n finally:\n sock.close()\n","repo_name":"Halex193/Computer-Networks-Lab","sub_path":"Triangle/clientTCP.py","file_name":"clientTCP.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7171307394","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 8 12:09:09 2019\r\n\r\n@author: Cool\r\n\"\"\"\r\n\r\nimport wget\r\nimport os\r\nimport zipfile\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import Model\r\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\r\nfrom tensorflow.keras.optimizers import RMSprop\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\n\r\n#%%\r\n\"\"\" Traning Data \"\"\"\r\n'''\r\nurl = \"https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip\"\r\n\r\nwget.download(url, out = \"C:/Users/cool/Documents/Analytics/tensorFlow/horse-or-human.zip\")\r\n\r\nlocal_zip = 'horse-or-human.zip'\r\nzip_ref = zipfile.ZipFile(local_zip, 'r')\r\nzip_ref.extractall('horse_or_humans/training')\r\nzip_ref.close()\r\n\r\n## Validation data\r\n\r\nurl1 = \"https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip\"\r\n\r\nwget.download(url1, out = \"C:/Users/cool/Documents/Analytics/tensorFlow/validation-horse-or-human.zip\")\r\n\r\nlocal_zip = 'validation-horse-or-human.zip'\r\nzip_ref = zipfile.ZipFile(local_zip, 'r')\r\nzip_ref.extractall('horse_or_humans/validation')\r\nzip_ref.close()\r\n'''\r\n#%%\r\n\"\"\" Get data length \"\"\"\r\n\r\ntrain_horses_dir = \"horse_or_humans/training/horses/\"\r\ntrain_humans_dir = \"horse_or_humans/training/humans/\"\r\nvalidation_horses_dir = \"horse_or_humans/validation/horses/\"\r\nvalidation_humans_dir = \"horse_or_humans/validation/humans/\"\r\n\r\ntrain_horses_fnames = os.listdir(train_horses_dir)\r\ntrain_humans_fnames = os.listdir(train_humans_dir)\r\nvalidation_horses_fnames = os.listdir(validation_horses_dir)\r\nvalidation_humans_fnames = os.listdir(validation_humans_dir)\r\n\r\nprint(len(train_horses_fnames))\r\nprint(len(train_humans_fnames))\r\nprint(len(validation_horses_fnames))\r\nprint(len(validation_humans_fnames))\r\n\r\n#%%\r\n\"\"\" Get the previously trained model \"\"\"\r\n\r\n#url2 = \"https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5\"\r\n\r\n#wget.download(url2, out = \"inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5\")\r\n#import h5py\r\n\r\n\r\n\r\nlocal_weights_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\r\n\r\npre_trained_model = InceptionV3(input_shape = (150, 150, 3), \r\n include_top = False, \r\n weights = None)\r\n\r\npre_trained_model.load_weights(local_weights_file)\r\n\r\nfor layer in pre_trained_model.layers:\r\n layer.trainable = False\r\n\r\n#pre_trained_model.summary() #its huge, dont print\r\n\r\nlast_layer = pre_trained_model.get_layer('mixed3')\r\nprint('last layer output shape: ', last_layer.output_shape)\r\nlast_output = last_layer.output\r\n\r\n#%%\r\n\r\n\r\n# Flatten the output layer to 1 dimension\r\nx = layers.Flatten()(last_output)\r\n# Add a fully connected layer with 1,024 hidden units and ReLU activation\r\nx = layers.Dense(1024, activation='relu')(x)\r\n# Add a dropout rate of 0.2\r\nx = layers.Dropout(0.2)(x) \r\n# Add a final sigmoid layer for classification\r\nx = layers.Dense (1, activation='sigmoid')(x) \r\n\r\nmodel = Model( pre_trained_model.input, x) \r\n\r\nmodel.compile(optimizer = RMSprop(lr=0.0001), \r\n loss = 'binary_crossentropy', \r\n metrics = ['acc'])\r\n\r\n#%%\r\n# Define our example directories and files\r\nbase_dir = 'C:/Users/cool/Documents/Analytics/tensorFlow/horse_or_humans/'\r\n\r\ntrain_dir = os.path.join( base_dir, 'training/')\r\nvalidation_dir = os.path.join( base_dir, 'validation/')\r\n\r\n\r\n# Add our data-augmentation parameters to ImageDataGenerator\r\ntrain_datagen = ImageDataGenerator(rescale = 1./255.,\r\n rotation_range = 40,\r\n width_shift_range = 0.2,\r\n height_shift_range = 0.2,\r\n shear_range = 0.2,\r\n zoom_range = 0.2,\r\n horizontal_flip = True)\r\n\r\n# Note that the validation data should not be augmented!\r\ntest_datagen = ImageDataGenerator( rescale = 1.0/255. )\r\n\r\n# Flow training images in batches of 20 using train_datagen generator\r\ntrain_generator = train_datagen.flow_from_directory(train_dir,\r\n batch_size = 20,\r\n class_mode = 'binary', \r\n target_size = (150, 150)) \r\n\r\n# Flow validation images in batches of 20 using test_datagen generator\r\nvalidation_generator = test_datagen.flow_from_directory( validation_dir,\r\n batch_size = 20,\r\n class_mode = 'binary', \r\n target_size = (150, 150))\r\n\r\n#%%\r\n\"\"\" Fit the model \"\"\"\r\n\r\nhistory = model.fit_generator(\r\n train_generator,\r\n validation_data = validation_generator,\r\n steps_per_epoch = 100,\r\n epochs = 10,\r\n validation_steps = 50,\r\n verbose = 2)\r\n\r\n#%%\r\n\r\nimport matplotlib.pyplot as plt\r\nacc = history.history['acc']\r\nval_acc = history.history['val_acc']\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\n\r\nepochs = range(len(acc))\r\n\r\nplt.plot(epochs, acc, 'r', label='Training accuracy')\r\nplt.plot(epochs, val_acc, 'b', label='Validation accuracy')\r\nplt.title('Training and validation accuracy')\r\nplt.legend(loc=0)\r\nplt.figure()\r\nplt.show()\r\n\r\n#%%","repo_name":"ranjitmishra/CNN-Deep-Learning-TensorFlow-Keras","sub_path":"transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"33741217564","text":"import urllib.request\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nimport json\nimport logging\n\nclass MessageHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n #Игнорируем favicon.ico и ставим уровень логгирования INFO\n if self.path.endswith('favicon.ico'):\n return\n \n logging.info(\"GET request,\\nPath: %s\\nHeaders:\\n%s\\n\",str(self.path), str(self.headers))\n '''\n Обрабатываем GET запрос. Если значение меньше или равно нулю\n выводится сообщение об ошибке\n '''\n try:\n m=self.path.replace('/','')\n \n if float(m) <= 0:\n self.send_error(400,'Wrong input: %s' % self.path)\n return\n \n self.send_response(200)\n self.send_header('Content-type', 'application/json; charset=utf-8')\n self.end_headers()\n a = urllib.request.urlopen(\"https://www.cbr-xml-daily.ru/daily_json.js\").read()\n jsonResponse = json.loads(a.decode('utf-8'))\n for USD in jsonResponse ['Valute']['USD'],:\n z=USD[\"Value\"]*float(m)\n #Отдаём JSON с результатом конвертации\n self.wfile.write(json.dumps(\n {'currency':'USD',\n 'requestedValue':m,\n 'resultingValue':round(z,2), #Округляем значение\n 'requestVersion':self.request_version,\n 'protocolVersion':self.protocol_version},\n ensure_ascii=False).encode('utf-8'))\n except ValueError:\n self.send_error(400,'Wrong input: %s' % self.path)\n \n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO) \n server_address = ('', 80)\n httpd = HTTPServer(server_address, MessageHandler)\n httpd.serve_forever()\n","repo_name":"BonBaron/converter","sub_path":"src/Converter_USD-RUB.py","file_name":"Converter_USD-RUB.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32845703038","text":"import datetime\nfrom math import inf \nimport os\nimport time \n\nfrom astropy.io import fits\nimport numpy as np\n\"\"\"\nGiven a camera in video mode, acquire a logical grouping of frames.\n\nIn many experiments, it is necessary to logically group together a series of \nimages which are acquired by a camera which is \"rolling\" and triggered at arbitrary \ntimes, potentially unknown by software. It is likewise necessary to gracefully handle missed \ntriggers, avoiding situations where the logical grouping of shots becomes \"shifted by one\". \n\nThis function addresses this problem. Given a camera cam which is in video mode, it scans for a set of \nframes of length num_frames, returning a tuple (frame_1, frame_2, ... frame_n) if all frames are found, \nand None otherwise. \n\nREMARK: This function is in general blocking while waiting for frame input, and is not suitable for cases where \nother code is to be executed in the same loop. For these cases, see acquire_rolling_frame_sequence_nonblocking. \nIt is likewise not applicable to cases where the time spacing between the first and last frame in a logical sequence \nis smaller than the time spacing between the last frame in one logical sequence and the first frame in the next. \n\n\nParameters:\n\ncam: The camera object. It is assumed that the camera has been initialized as necessary for acquiring the logical \ngrouping of frames, including being put into video mode with an appropriate buffer\nand configuring the trigger as necessary.\n\nnum_frames: (int) The number of frames to be acquired. If at any point the camera has _more_ than this number of frames available in its \nvideo buffer, it is assumed that something has gone wrong, and the buffer is flushed.\n\nframe_timeout: (float) The timeout for \"abandoning\" acquisition of a sequence of frames. If the camera has a number \nof frames greater than 0 but less than num_frames, the function will block for a time given by frame_timeout while \nwaiting for the correct number of frmaes to come in. If they do not come in, the function flushes the camera buffer, \nassuming that a frame has been missed. This timeout should be set longer than the time between the first frame in the sequence \nand the last, but shorter than the time between the last frame in one logical sequence and the first frame in the next.\n\nReturns:\n\nframes: A tuple of the acquired frames, in the order of acquisition, or else None if the frames were not found. \n\"\"\"\ndef acquire_rolling_frame_sequence(cam, num_frames, frame_timeout):\n frames_available = cam.get_video_buffer_num_available_frames()\n if frames_available == 0:\n return None\n else:\n sequence_start_time = time.time() \n sequence_run_time = 0.0\n while sequence_run_time <= frame_timeout:\n if frames_available == num_frames:\n frame_list = [] \n for i in range(num_frames):\n frame_list.append(cam.get_video_frame())\n return tuple(frame_list)\n elif frames_available > num_frames:\n cam.flush_video_buffer()\n return None \n sequence_run_time = time.time() - sequence_start_time\n return None\n\n\n\"\"\"\nAcquires frames analogously to the above, but in a (more complicated) non-blocking implementation.\n\nParameters:\n\nt: A float representing the time that the function began waiting for frames. When the function \nis first called, this should be passed via a time.time() call - subsequently, it should be the value \nreturned by this function.\n\ncam: Identical to acquire_rolling_frame_sequence, above. \n\nnum_frames: Ditto. \n\nframe_timeout: Ditto.\n\nReturns: A tuple (t, frames), where t is a time when the camera began waiting for frames and frames \nis the tuple of frames - or None - as described above. Note that t will be equal to inf if the camera \nis _not_ waiting for frames.\"\"\"\n\n\ndef acquire_rolling_frame_sequence_nonblocking(t, cam, num_frames, frame_timeout):\n frames_available = cam.get_video_buffer_num_available_frames()\n if frames_available == 0 or frames_available > num_frames:\n frames = None \n t = inf \n cam.flush_video_buffer()\n return (t, frames)\n elif frames_available == num_frames:\n frames_list = [] \n for i in range(num_frames):\n frames_list.append(cam.get_video_frame())\n frames = tuple(frames_list)\n t = inf \n return (t, frames)\n else:\n frames = None\n current_time = time.time()\n if current_time - t > frame_timeout:\n t = inf \n cam.flush_video_buffer()\n return (t, frames)\n\n\n\"\"\"\nRegroup a series of numpy-formatted frames. \n\nGiven a list of frames - either 2D or 3D numpy arrays, depending on whether there are \nmultiple exposures per frame - regroup them according to the indices in exposure_indices.\n\nParameters:\n\nframes_list: An iterable of frames from a camera. Frames are assumed to be in numpy array format, \nbeing 2D numpy arrays if there is only one exposure per frame, and 3D arrays otherwise.\n\nexposure_indices_list: An iterable of iterables, each of them a set of indices to group into one logical \ngrouping of frames. If, for example, frames is of length 3, with each frame containing 2 exposures, then \nsetting exposure_indices_list to [[1, 3, 5], [2, 4, 6]] would group the odd- and even-numbered exposures together, \nwhere the exposure index is incremented first within a frame, then between frames.\n\nWARNING: Observe that exposure indices index frames from 1, not 0. This choice was made for consistency with an \nexternal standard for frame labeling. \n\"\"\"\ndef regroup_numpy_frames(frames, exposure_indices_list):\n first_frame = frames[0]\n if len(first_frame.shape) == 2:\n exposures = frames\n elif len(first_frame.shape) == 3:\n exposures = [] \n for frame in frames:\n for exposure in frame:\n exposures.append(exposure)\n else:\n raise ValueError(\"Incorrect shape for frames.\") \n exposure_array = np.array(exposures)\n return_tuple_list = []\n for exposure_indices in exposure_indices_list:\n exposure_indices = np.array(exposure_indices)\n #Convert to zero indexing\n zero_indexed_exposure_indices = exposure_indices - 1\n regrouped_exposures = exposure_array[zero_indexed_exposure_indices] \n regrouped_exposures_tuple = tuple(regrouped_exposures)\n return_tuple_list.append(regrouped_exposures_tuple)\n return tuple(return_tuple_list)\n\n\n\"\"\"\nSave frames to disk. \n\nGiven a tuple of frames as provided by the functions above, save the data to disk at a specified path and \nin a specified format.\n\nParameters:\n\nframes: A tuple of frames in numpy array format. \npath_sans_extension: A path at which to save the data.\n\"\"\"\ndef save_frames(frames, save_path):\n frame_numpy_stack = np.stack(frames)\n save_format = save_path.split(\".\")[-1] \n if save_format == \"fits\":\n fits.writeto(save_path, frame_numpy_stack)\n else:\n raise ValueError(\"Unsupported saving format.\")","repo_name":"ewolf20/satyendra","sub_path":"code/instruments/cameras/rolling_camera_functions.py","file_name":"rolling_camera_functions.py","file_ext":"py","file_size_in_byte":7048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7263563568","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom . import views\n\n# addfilm_url = reverse('admin:filmbase_film_add')\napp_name = 'filmscrap'\n\nurlpatterns = [\n url(r'^search/$', views.search, name='search'),\n url(r'^searching/$', views.searching, name='searching'),\n # url(r'^addfilm/$', addfilm_url, name = 'addfilm'),\n url(r'film/new/$', views.film_new, name='film_new'),\n url(r'^video/dechaos/$', views.video_dechaos, name='video_dechaos'),\n url(r'^video/addbylink/$', views.video_addbylink, name='video_addbylink'),\n url(r'^video/link2form/$', views.video_link2form, name='video_link2form'),\n\n url('', views.filmscrap, name='filmscrap'),\n # url('^post/new/', admin.\n # views.post_new, name='post_new'),\n\n\n # url(r'^get_suggs/$', views.get_suggested, name='get_suggested'),\n # url(r'^get_videos/$', views.get_videos, name='get_videos'),\n # url(r'^get_hidden/$', views.get_hidden, name='get_hidden'),\n\n # url(r'^$', views.suggested, name='suggested'),\n\n # url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n # url(r'lists/$', views.lists, name='lists'),\n # url(r'lists/(?P[\\w-]+)/$', views.list, name='list'),\n # url('', views.lists, name='lists' ),\n\n # (?P[\\w-]+)/$\n# url(r'lists//', views.list, name='list'),\n\n# path('lists/(?P[-\\w]+)/$', views.list, name='list'),\n\n]\n","repo_name":"chapaeffff/drugoekinotop","sub_path":"filmscrap/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2965174976","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 5 19:15:09 2018\r\n\r\n@author: Prongs\r\n\"\"\"\r\nimport itertools \r\nfh=open('rosalind_lexf.txt','r')\r\nline=fh.readline().strip('\\n')\r\nlist=[]\r\nfor i in range(len(line)):\r\n if(line[i]==' '):\r\n continue\r\n else:\r\n list.append(line[i])\r\n\r\n\r\norder=[]\r\nn=3\r\nfor word in itertools.product(list,repeat=n):\r\n order.append(''.join(word))\r\n \r\norder.sort()\r\nfor i in range(len(order)):\r\n print(order[i])","repo_name":"ChaoticMarauder/Genomics","sub_path":"lexicography.py","file_name":"lexicography.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34997318376","text":"# Напишите программу, которая принимает на вход вещественное число и показывает сумму его цифр.\n# Пример:\n# - 6782 -> 23\n# - 0,56 -> 11\n\ndef sum_elements(a):\n sp = list(str(a))\n for i in sp:\n if i == '.':\n sp.remove('.')\n sum = 0\n for i in sp:\n sum += int(i)\n return sum\n\n\nx = (input('Введите вещественное число: '))\ntry:\n x = float(x)\n print(sum_elements(x))\nexcept:\n print('Введено некорректное число, необходимо вводить только вещественные числа')\n\n\n\n# Напишите программу, которая принимает на вход число N и выдает набор произведений чисел от 1 до N.\n# Пример:\n# - пусть N = 4, тогда [ 1, 2, 6, 24 ] (1, 1*2, 1*2*3, 1*2*3*4)\n\nfrom math import factorial\n\n\ndef set_multiplications(n):\n sp = []\n for i in range(1, n+1):\n sp.append(factorial(i))\n return sp\n\n\nx = int(input('Введите число N: '))\nprint(set_multiplications(x))\n\n","repo_name":"Challenger07022022/MyHomeWork_Python","sub_path":"HomeWork2.py","file_name":"HomeWork2.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11826441228","text":"from scipy.signal import lfilter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport serial\nimport time\n\n# Toma de N datos de calibración y obtención de una recta en base a ella.\narduino = serial.Serial('/dev/ttyACM0', baudrate=9600, timeout=1.0)\n\nN = 200\nx = 0\n\nprint('Toma de %i datos para calibracion' %N)\nwith arduino:\n i = 0\n while i < N:\n try:\n line = arduino.readline()\n if not line:\n continue\n p = np.fromstring(line.decode('ascii', errors='replace'), sep=' ')\n try:\n x += float(p)\n i += 1\n except TypeError:\n print('Reintento con lectura %i' %i)\n except KeyboardInterrupt:\n print('Calibracion Interrumpida. Reintente...')\n break\n\na = float(x/N)\nm = (1023-a)/13.5\nn = -m*a\n\nprint('Calibracion lista con a = %f' %a)\nprint('m: ', m)\nprint('n: ', n)\ninput('ENTER para iniciar medicion')\n\n# Cada dato es guardado en un archivo hasta que se indique su interrupción\nf = open('Data', 'a')\nwith arduino:\n while True:\n try:\n line = arduino.readline()\n if not line:\n continue\n p = np.fromstring(line.decode('ascii', errors='replace'), sep=' ')\n try:\n f.write('\\n'+str(p)+',')\n except TypeError:\n print(\"Reintento\")\n except KeyboardInterrupt:\n break\n\nf.close()\nprint('Realizado')\n","repo_name":"EduCarrasco/Codigos-utilizados-en-Trabajo-de-Titulo","sub_path":"BatteryACS712.py","file_name":"BatteryACS712.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"9787924196","text":"\"\"\" This module reads\n - order data\n - license data\n - installation instructions\n and sends emails with all available licenses to those parents and pupils who paid.\n These email contain installation instructions from the publishers (if available)\n and hints if not all licenses are available yet.\n This means, that you can use this script several times, if you do not get all licenses\n at once.\n\n TODO:\n THIS MODULE CONTAINS MANY INFORMATION THAT IS NOT PASSED WELL\n (E-MAIL CONFIG NOT AS PARAMETERS, TEXT HARD-CODED HERE RATHER THAN IN CONFIG-FILE).\n\"\"\"\n\nimport ebook_config\nimport ebook_functions\nimport json\nimport getpass\nimport smtplib\nfrom email.message import EmailMessage\nimport mimetypes\n\n#######################################################################################\ndry_run = False # if true, no emails will be sent, but printed on the screen; if false, emails will be sent\n\n\n#######################################################################################\ndef get_json_data(filename):\n data = {}\n with open(filename, \"r\") as f:\n data = json.loads(f.read())\n\n return data\n\n\n#######################################################################################\ndef send_licenses(order_data, license_data, installation_info):\n # ask for password only, if e-mails will be sent\n password = \"\"\n if not dry_run:\n password = getpass.getpass()\n\n for pupil, data in order_data.items():\n numberNewLicenses = (\n 0 # a mail will only be sent if there is a new license for this pupil\n )\n try:\n data[\"bezahlt\"]\n except:\n print(\"Skipping pupil \" + pupil + \"... (did not pay)\")\n else:\n try:\n data[\"Lizenzen zugewiesen\"]\n except:\n data[\"Lizenzen zugewiesen\"] = {}\n finally:\n print(\"==============================================\")\n print(pupil)\n print(\"----------------------------------------------\")\n msg = EmailMessage()\n msg[\"Subject\"] = \"E-Book-Lizenzen für \" + pupil\n msg[\"From\"] = ebook_config.EMAIL_SENDER_ADDRESS\n msg[\"To\"] = data[\"E-Mail\"]\n msg[\"Cc\"] = ebook_config.EMAIL_SENDER_ADDRESS\n text = \"Liebe(r) \" + data[\"Erziehungsberechtigte/r\"] + \",\" + \"\\n\"\n text += \"\\n\"\n text += (\n 'mit dieser E-Mail bekommen Sie E-Book-Lizenzen für Ihr Kind \"'\n + pupil\n + '\".\\n'\n )\n text += \"\\n\"\n publishers = []\n for lic in data[\"Lizenzen\"]:\n if lic not in list(\n data[\"Lizenzen zugewiesen\"]\n ): # just look at licenses that have not been sent yet\n publisher = license_data[lic][\"Verlag\"]\n if license_data[lic][\"vergebeneLizenzen\"] < len(\n license_data[lic][\"Lizenzschluessel\"]\n ): # licenses are available\n numberNewLicenses += 1\n if (\n not publisher in publishers\n ): # just send installation instructions of publishers, from which licenses are sent\n publishers.append(publisher)\n data[\"Lizenzen zugewiesen\"][lic] = {}\n data[\"Lizenzen zugewiesen\"][lic][\n \"Lizenzschluessel\"\n ] = license_data[lic][\"Lizenzschluessel\"][\n license_data[lic][\"vergebeneLizenzen\"]\n ]\n license_data[lic][\"vergebeneLizenzen\"] += 1\n text += (\n \"- \"\n + lic\n + \": \"\n + license_data[lic][\"Buchbezeichnung\"]\n + \"\\n\"\n )\n text += (\n \" Lizenzschlüssel: \"\n + data[\"Lizenzen zugewiesen\"][lic][\"Lizenzschluessel\"]\n )\n text += \"\\n\"\n else:\n print(\"There are not enough licenses for \" + lic + \" !\")\n text += (\n \"- \"\n + lic\n + \": \"\n + license_data[lic][\"Buchbezeichnung\"]\n + \"\\n\"\n )\n text += \" Lizenzschlüssel fehlt noch - wird in einer separaten E-Mail verschickt, sobald er uns vorliegt.\"\n text += \"\\n\"\n\n text += \"\\n\\nUnten finden Sie Installationshinweise der verschiedenen Verlage, im Anhang Nutzungsbedingungen.\\n\"\n text += \"\\n\"\n text += \"Viele Grüße,\\n\"\n text += \"Vorname Nachname\"\n text += \"\\n\\n\"\n for publisher in publishers:\n text += (\n \"*** \"\n + publisher\n + \" ***********************************************\\n\"\n )\n for zeile in installation_info[publisher][\"Installation\"]:\n text += zeile + \"\\n\"\n text += \"\\n\\n\"\n\n msg.set_content(text)\n\n for publisher in publishers:\n ## Attachments\n # https://pythoncircle.com/post/719/sending-email-with-attachments-using-python-built-in-email-module/\n for attachment_filename in installation_info[publisher][\"Dateien\"]:\n mime_type, _ = mimetypes.guess_type(attachment_filename)\n mime_type, mime_subtype = mime_type.split(\"/\", 1)\n with open(attachment_filename, \"rb\") as ap:\n msg.add_attachment(\n ap.read(),\n maintype=mime_type,\n subtype=mime_subtype,\n filename=attachment_filename,\n )\n\n if numberNewLicenses > 0:\n if dry_run:\n print(msg)\n else:\n s = smtplib.SMTP(\n ebook_config.EMAIL_SERVER, port=ebook_config.EMAIL_PORT\n )\n s.starttls()\n s.ehlo()\n s.login(ebook_config.EMAIL_LOGIN, password)\n s.send_message(msg)\n s.quit()\n print(\"Sent to \" + msg[\"To\"])\n\n\n#######################################################################################\ndef main():\n order_data = ebook_functions.get_order_data_json(ebook_config.ORDER_DATA_JSON)\n license_data = get_json_data(ebook_config.EBOOK_LICENSES_JSON)\n installation_info = get_json_data(ebook_config.INSTALLATION_INSTRUCTIONS)\n send_licenses(order_data, license_data, installation_info)\n ebook_functions.dump_order_data(order_data, ebook_config.ORDER_DATA_JSON)\n\n # save updated license file\n json_string = json.dumps(license_data, indent=4)\n with open(ebook_config.EBOOK_LICENSES_JSON, \"w\") as f:\n print(json_string, file=f)\n\n\n#######################################################################################\nif __name__ == \"__main__\":\n main()\n","repo_name":"laicuda/elia","sub_path":"src/05_send_licenses.py","file_name":"05_send_licenses.py","file_ext":"py","file_size_in_byte":7885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43356138363","text":"# Embedded file name: scripts/client/gui/HUD2/features/Respawn/RespawnSilentCheckManager.py\nfrom debug_utils import LOG_DEBUG\nimport GlobalEvents\nfrom Event import EventManager, Event\nfrom gui.HUD2.hudFeatures import Feature\nREST_DELAY = 15\n\nclass RespawnSilentCheckManager(object):\n\n def __init__(self, features, needState):\n self._bigWorld = features.require(Feature.BIG_WORLD)\n self._timer = features.require(Feature.TIMER_SERVICE)\n self._state = needState\n self._eventManager = EventManager()\n self.eRestDelay = Event(self._eventManager)\n self._lastCheckTime = 0\n self._currentCheckTime = 0\n self._isChecking = False\n self._subscribe()\n\n def startCheck(self, value = True):\n LOG_DEBUG(' RespawnSilentCheckManager: startCheck ', value)\n self._isChecking = value\n self._currentCheckTime = self._bigWorld.serverTime()\n self._lastCheckTime = self._bigWorld.serverTime()\n\n def _subscribe(self):\n self._timer.eUpdate1Sec += self._onUpdateTime\n GlobalEvents.onMouseEvent.insert(0, self.handleMouseEvent)\n GlobalEvents.onKeyEvent += self.handleKeyEvent\n GlobalEvents.onAxisEvent += self.handleAxisEvent\n\n def _unsubscribe(self):\n self._timer.eUpdate1Sec -= self._onUpdateTime\n GlobalEvents.onMouseEvent.remove(self.handleMouseEvent)\n GlobalEvents.onKeyEvent -= self.handleKeyEvent\n GlobalEvents.onAxisEvent -= self.handleAxisEvent\n\n def changeState(self, newValue):\n if newValue & self._state:\n self.startCheck()\n else:\n self.startCheck(False)\n\n def handleKeyEvent(self, event):\n self._onUpdateControlEvent()\n\n def handleMouseEvent(self, event):\n self._onUpdateControlEvent()\n\n def handleAxisEvent(self, event):\n self._onUpdateControlEvent()\n\n def _onUpdateTime(self):\n LOG_DEBUG(' RespawnSilentCheckManager: _onUpdateTime ', self._isChecking)\n if self._isChecking:\n self._currentCheckTime = self._bigWorld.serverTime()\n restTime = self._currentCheckTime - self._lastCheckTime\n if restTime > REST_DELAY:\n self.startCheck(False)\n self.eRestDelay()\n\n def _onUpdateControlEvent(self):\n if self._isChecking:\n self._lastCheckTime = self._bigWorld.serverTime()\n\n def dispose(self):\n self._unsubscribe()\n self._bigWorld = None\n self._timer = None\n self._eventManager = None\n return","repo_name":"SEA-group/wowp_scripts","sub_path":"client/gui/HUD2/features/Respawn/RespawnSilentCheckManager.py","file_name":"RespawnSilentCheckManager.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20172744451","text":"import argparse\nimport os\nimport glob\nimport json\nimport pandas as pd\nfrom subprocess import Popen, PIPE\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--pointcloud', type=str, help='Path of the area point cloud.')\nparser.add_argument('--detection_dir', type=str, help='Path of the directory that contains 2D detection and ground truth files.')\nparser.add_argument('--area_dir', type=str, help='2D-3D-S area directory path.')\nparser.add_argument('--output_csv', type=str, help='Output CSV file to be created that includes 3D mappings.')\n\nargs = parser.parse_args()\ndetection_files = glob.glob(os.path.join(args.detection_dir, 'detections', '*txt'))\n\nresults = pd.DataFrame(columns=['2d_Detection_File', 'Class', 'Confidence', 'x_min', 'y_min', 'x_max', 'y_max', 'x', 'y', 'z'])\nheader = True\nprint('Number of Detections: ', len(detection_files))\nfor i, d in enumerate(detection_files):\n print('{}/{} {}'.format(i, len(detection_files), d))\n with open(d) as d_file:\n dets = d_file.read().split('\\n')[:-1]\n\n for i in range(len(dets)):\n dets[i] = dets[i].split(' ')\n\n for j in range(1, 6):\n dets[i][j] = float(dets[i][j])\n #print(dets)\n pose_file_name = ('_').join(os.path.basename(d).split('_')[:-1]) + '_pose.json'\n pose_file_path = os.path.join(args.area_dir, 'data/pose', pose_file_name)\n \n with open(pose_file_path) as p_file:\n pose = json.load(p_file)\n \n x = pose['camera_location'][0]\n y = pose['camera_location'][1]\n z = pose['camera_location'][2]\n roll = pose['final_camera_rotation'][1]\n pitch = 1.57 - pose['final_camera_rotation'][0]\n yaw = 1.57 + pose['final_camera_rotation'][2]\n focal_length = pose['camera_k_matrix'][0][0]\n \n #print(x, y, z, roll, pitch, yaw, focal_length)\n center_points = []\n \n '''for det in dets:\n center_point_x = ((det[2] + det[4]) / 2) / 4\n center_point_y = ((det[3] + det[5]) / 2) / 4\n print('\\n\\n\\n')\n print(det)\n \n args_2d_to_3d = ['/home/ubuntu/stanford-detectron/pcl_mapping/center-point/convert_coordinates', args.pointcloud, str(x), str(y), str(z), str(roll), str(pitch), str(yaw), str(center_point_x), str(center_point_y), '1080', '1080', '0', '0.2', '0', str(focal_length), str(focal_length), 'range.png']\n print(center_point_x, center_point_y)\n p = Popen(args_2d_to_3d, stdout=PIPE, stderr=PIPE)\n output, err = p.communicate()\n point = [float(j) for j in output.split()]\n print(output)\n print(point)'''\n \n for det in dets:\n center_points.append(str((det[2] + det[4]) / 2))\n center_points.append(str((det[3] + det[5]) / 2))\n \n \n args_2d_to_3d = ['center-point-multi/convert_coordinates', args.pointcloud, str(x), str(y), str(z), str(roll), str(pitch), str(yaw), '1080', '1080', '0', '0.2', '0', str(focal_length), str(focal_length), 'range.png'] + center_points\n \n #print(args_2d_to_3d)\n p = Popen(args_2d_to_3d, stdout=PIPE, stderr=PIPE)\n output = p.stdout.read()\n center_points = [cp.split() for cp in output.split(b'\\n')[:-1]]\n #print(center_points)\n #print(err)\n \n for i in range(len(center_points)):\n results = results.append({'2d_Detection_File': os.path.basename(d), 'Class': dets[i][0], 'Confidence': float(dets[i][1]), 'x_min': float(dets[i][2]), 'y_min': float(dets[i][3]), 'x_max': float(dets[i][4]), 'y_max': float(dets[i][5]), 'x': float(center_points[i][0]), 'y': float(center_points[i][1]), 'z': float(center_points[i][2])}, ignore_index=True)\n \n if len(results.index) == 50:\n results = results.astype({'Confidence': float, 'x_min': float, 'y_min': float, 'x_max': float, 'y_max': float, 'x': float, 'y': float, 'z': float})\n results.to_csv(args.output_csv, index=False, header=header, mode = 'a')\n header = False\n results = results.iloc[0:0]\n \n \nresults = results.astype({'Confidence': float, 'x_min': float, 'y_min': float, 'x_max': float, 'y_max': float, 'x': float, 'y': float, 'z': float})\nresults.to_csv(args.output_csv, index=False, header=header, mode = 'a')\n\n \n \n \n \n","repo_name":"onatsahin/3d-object-detection-from-2d-dets-2d3dSemantics","sub_path":"src/3d_mapping/full_3d_inference.py","file_name":"full_3d_inference.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28458396433","text":"import logging\n\nimport boto3\nimport click\nfrom botocore.exceptions import ClientError, NoCredentialsError\n\nfrom s3do.utils import do_for_all_objects\n\n\ndef _tags_to_tagset(tags):\n result = []\n for t in tags:\n parts = t.split('=', 1)\n result.append({'Key': parts[0], 'Value': parts[1]})\n return result\n\n\ndef _get_callback(client, bucket, tagset):\n def tag_object(o):\n retries = 3\n while retries > 0:\n try:\n if 'VersionId' in o:\n client.put_object_tagging(\n Bucket=bucket,\n Key=o['Key'],\n VersionId=o['VersionId'],\n Tagging={\n 'TagSet': tagset\n }\n )\n else:\n client.put_object_tagging(\n Bucket=bucket,\n Key=o['Key'],\n Tagging={\n 'TagSet': tagset\n }\n )\n return\n except Exception as e:\n print(e)\n if retries > 0:\n retries -= 1\n logging.warning('Tagging failed for object: ' + bucket + '/' + o['Key'])\n\n return tag_object\n\n\ndef _tag_objects(client, bucket, prefix, tagset):\n do_for_all_objects(client, bucket, prefix, _get_callback(client, bucket, tagset))\n\n\n@click.command()\n@click.argument('bucket')\n@click.argument('prefix', required=False)\n@click.option('--tag', '-t', required=True, multiple=True)\ndef tag(bucket, prefix, tag):\n try:\n client = boto3.client('s3')\n tagset = _tags_to_tagset(tag)\n _tag_objects(client, bucket, prefix, tagset)\n except (ClientError, NoCredentialsError) as e:\n logging.error(e)\n","repo_name":"robvanderleek/s3do","sub_path":"s3do/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"26783707340","text":"import os\nfrom charmhelpers.core import hookenv\nfrom charmhelpers.core import host\nimport yaml\n\n\nMOUNT_BASE = '/srv/juju/volumes'\n\n\nclass VolumeConfigurationError(Exception):\n '''Volume configuration data is missing or invalid'''\n pass\n\n\ndef get_config():\n '''Gather and sanity-check volume configuration data'''\n volume_config = {}\n config = hookenv.config()\n\n errors = False\n\n if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):\n volume_config['ephemeral'] = True\n else:\n volume_config['ephemeral'] = False\n\n try:\n volume_map = yaml.safe_load(config.get('volume-map', '{}'))\n except yaml.YAMLError as e:\n hookenv.log(\"Error parsing YAML volume-map: {}\".format(e),\n hookenv.ERROR)\n errors = True\n if volume_map is None:\n # probably an empty string\n volume_map = {}\n elif not isinstance(volume_map, dict):\n hookenv.log(\"Volume-map should be a dictionary, not {}\".format(\n type(volume_map)))\n errors = True\n\n volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])\n if volume_config['device'] and volume_config['ephemeral']:\n # asked for ephemeral storage but also defined a volume ID\n hookenv.log('A volume is defined for this unit, but ephemeral '\n 'storage was requested', hookenv.ERROR)\n errors = True\n elif not volume_config['device'] and not volume_config['ephemeral']:\n # asked for permanent storage but did not define volume ID\n hookenv.log('Ephemeral storage was requested, but there is no volume '\n 'defined for this unit.', hookenv.ERROR)\n errors = True\n\n unit_mount_name = hookenv.local_unit().replace('/', '-')\n volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)\n\n if errors:\n return None\n return volume_config\n\n\ndef mount_volume(config):\n if os.path.exists(config['mountpoint']):\n if not os.path.isdir(config['mountpoint']):\n hookenv.log('Not a directory: {}'.format(config['mountpoint']))\n raise VolumeConfigurationError()\n else:\n host.mkdir(config['mountpoint'])\n if os.path.ismount(config['mountpoint']):\n unmount_volume(config)\n if not host.mount(config['device'], config['mountpoint'], persist=True):\n raise VolumeConfigurationError()\n\n\ndef unmount_volume(config):\n if os.path.ismount(config['mountpoint']):\n if not host.umount(config['mountpoint'], persist=True):\n raise VolumeConfigurationError()\n\n\ndef managed_mounts():\n '''List of all mounted managed volumes'''\n return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())\n\n\ndef configure_volume(before_change=lambda: None, after_change=lambda: None):\n '''Set up storage (or don't) according to the charm's volume configuration.\n Returns the mount point or \"ephemeral\". before_change and after_change\n are optional functions to be called if the volume configuration changes.\n '''\n\n config = get_config()\n if not config:\n hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)\n raise VolumeConfigurationError()\n\n if config['ephemeral']:\n if os.path.ismount(config['mountpoint']):\n before_change()\n unmount_volume(config)\n after_change()\n return 'ephemeral'\n else:\n # persistent storage\n if os.path.ismount(config['mountpoint']):\n mounts = dict(managed_mounts())\n if mounts.get(config['mountpoint']) != config['device']:\n before_change()\n unmount_volume(config)\n mount_volume(config)\n after_change()\n else:\n before_change()\n mount_volume(config)\n after_change()\n return config['mountpoint']\n","repo_name":"juju/juju","sub_path":"acceptancetests/repository/trusty/haproxy/hooks/charmhelpers/contrib/charmsupport/volumes.py","file_name":"volumes.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":2190,"dataset":"github-code","pt":"57"} +{"seq_id":"26107051292","text":"import unittest\nimport pandas as pd\nfrom main import clean_dataframe\n\nclass TestMain(unittest.TestCase):\n\n def test_clean_dataframe(self):\n raw_data = {\n \"Âge\": [\"25\", \"30\", \"\"],\n \"Taille\": [\"185\", \"190\", \"\"],\n \"Poids\": [\"80\", \"85\", \"\"],\n \"Salaire\": [\"€10M\", \"€20K\", \"€100K\"]\n }\n df = pd.DataFrame(raw_data)\n cleaned_df = clean_dataframe(df)\n\n expected_data = {\n \"Âge\": [25, 30],\n \"Taille\": [185, 190],\n \"Poids\": [80, 85],\n \"Salaire\": [10000000, 20000],\n }\n expected_df = pd.DataFrame(expected_data)\n\n try:\n pd.testing.assert_frame_equal(cleaned_df, expected_df)\n print(\"Test clean_dataframe : réussi\")\n \n except AssertionError:\n print(\"Test clean_dataframe : échoué\")\n raise","repo_name":"raphacarr/Python-avancee","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40805047136","text":"from os import path\nfrom sphinxcontrib.confluencebuilder.logger import ConfluenceLogger as logger\nimport re\nimport requests\nimport zlib\n\n# inventory filename to hold intersphinx information\nINVENTORY_FILENAME = 'objects.inv'\n\n\ndef build_intersphinx(builder):\n \"\"\"\n build intersphinx information from the state of the builder\n\n Attempt to build a series of entries for an intersphinx inventory resource\n for Confluence builder generated content. This is only supported after\n processing a publishing event where page identifiers are cached to build URI\n entries.\n\n Args:\n builder: the builder\n \"\"\"\n def escape(string):\n return re.sub(\"\\\\s+\", ' ', string)\n\n if builder.cloud:\n pages_part = 'pages/{}/'\n else:\n pages_part = 'pages/viewpage.action?pageId={}'\n\n with open(path.join(builder.outdir, INVENTORY_FILENAME), 'wb') as f:\n # header\n f.write((\n '# Sphinx inventory version 2\\n'\n '# Project: {}\\n'\n '# Version: {}\\n'\n '# The remainder of this file is compressed using zlib.\\n'.format(\n escape(builder.env.config.project),\n escape(builder.env.config.version))).encode())\n\n # contents\n compressor = zlib.compressobj(9)\n\n for domainname, domain in sorted(builder.env.domains.items()):\n for name, dispname, typ, docname, raw_anchor, prio in sorted(\n domain.get_objects()):\n\n page_id = builder.state.upload_id(docname)\n if not page_id:\n continue\n\n target_name = f'{docname}#{raw_anchor}'\n target = builder.state.target(target_name)\n\n if raw_anchor and target:\n title = builder.state.title(docname)\n anchor = 'id-' + title + '-' + target\n anchor = anchor.replace(' ', '')\n\n # confluence will convert quotes to right-quotes for\n # anchor values; replace and encode the anchor value\n anchor = anchor.replace('\"', '”')\n anchor = anchor.replace(\"'\", '’') # noqa: RUF001\n anchor = requests.utils.quote(anchor.encode('utf-8'))\n else:\n anchor = ''\n\n uri = pages_part.format(page_id)\n if anchor:\n uri += '#' + anchor\n if dispname == name:\n dispname = '-'\n entry = ('%s %s:%s %s %s %s\\n' %\n (name, domainname, typ, prio, uri, dispname))\n logger.verbose('(intersphinx) ' + entry.strip())\n f.write(compressor.compress(entry.encode('utf-8')))\n\n f.write(compressor.flush())\n","repo_name":"sphinx-contrib/confluencebuilder","sub_path":"sphinxcontrib/confluencebuilder/intersphinx.py","file_name":"intersphinx.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"57"} +{"seq_id":"19687444238","text":"\"\"\"\n=============================================\nFind Regular Segments Using Compact Watershed\n=============================================\n\nThe watershed transform is commonly used as a starting point for many\nsegmentation algorithms. However, without a judicious choice of seeds, it\ncan produce very uneven fragment sizes, which can be difficult to deal with\nin downstream analyses.\n\nThe *compact* watershed transform remedies this by favoring seeds that are\nclose to the pixel being considered.\n\nBoth algorithms are implemented in the :func:`skimage.segmentation.watershed`\nfunction. To use the compact form, simply pass a ``compactness`` value greater\nthan 0.\n\"\"\"\n\nimport numpy as np\nfrom skimage import data, util, filters, color\nfrom skimage.segmentation import watershed\nimport matplotlib.pyplot as plt\n\ncoins = data.coins()\nedges = filters.sobel(coins)\n\ngrid = util.regular_grid(coins.shape, n_points=468)\n\nseeds = np.zeros(coins.shape, dtype=int)\nseeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1\n\nw0 = watershed(edges, seeds)\nw1 = watershed(edges, seeds, compactness=0.01)\n\nfig, (ax0, ax1) = plt.subplots(1, 2)\n\nax0.imshow(color.label2rgb(w0, coins, bg_label=-1))\nax0.set_title('Classical watershed')\n\nax1.imshow(color.label2rgb(w1, coins, bg_label=-1))\nax1.set_title('Compact watershed')\n\nplt.show()\n","repo_name":"scikit-image/scikit-image","sub_path":"doc/examples/segmentation/plot_compact_watershed.py","file_name":"plot_compact_watershed.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":5678,"dataset":"github-code","pt":"57"} +{"seq_id":"34961223948","text":"def calcInterest(Balance, AnnRate, MinPay):\n minMonthPay = round(Balance * MinPay,2)\n paidPrinciple = round(minMonthPay - (Balance * (AnnRate/12)),2)\n Balance = round(Balance - paidPrinciple,2)\n print ('minMonthPay', minMonthPay)\n print (\"paidPrinciple\", paidPrinciple)\n print (\"Balance\", Balance)\n return Balance\n\n \n\ndef getVariables():\n inpBalance = float(input(\"Tell me your balance\"))\n inpAnnRate = float(input(\"Annual Rate\"))\n inpMinPay = float(input(\"Minimal Payment\"))\n Balance = inpBalance\n\n for i in range(1,13):\n print (\"Month\",i)\n Balance = calcInterest(Balance, inpAnnRate, inpMinPay)\n \ngetVariables()\n\n\n\n\n \n \n","repo_name":"weiguxp/pythoncode","sub_path":"ProblemSets/CompoundInterest.py","file_name":"CompoundInterest.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29103609072","text":"arr = [0, 4, 1, 3, 1, 2, 4, 1]\n# 양의 정수, 최대값을 알아야 된다.\n# 최대값 = 4\ncnt = [0] * 5 # 배열의 인덱스 n-1 = 4\n# 빈도수 계산\nfor val in arr:\n cnt[val] += 1\nprint(cnt)\n# 누적 빈도수 계산\nfor i in range(1, len(cnt)):\n cnt[i] = cnt[i - 1] + cnt[i]\nprint(cnt)\n\n# for val in arr:\n# cnt[val] += 1\n\n# # 누적 빈도수 계산\n# numbers = []\n# for i in range(len(cnt)):\n# for j in range(cnt[i]):\n# # i가 cnt[i] 반복되는것\n# numbers.append(i)\n# print(numbers)\n\nlst = [1, 25, 3, 7, 6, 5, 8, 9, 1]\ncnt = [0]*26\nfor i in lst:\n cnt[i] += 1\nnumbers = []\nfor j in range(len(cnt)):\n for num in range(cnt[j]):\n numbers.append(j) \nprint(numbers)","repo_name":"kimjy392/TIL","sub_path":"algorithms/day1/counting sort.py","file_name":"counting sort.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70541511540","text":"import torch\nimport torch_npu\nfrom torch_npu.npu import clear_npu_overflow_flag\nfrom deepspeed.runtime.fp16 import loss_scaler, unfused_optimizer\nfrom . import FLAG_SUPPORT_INF_NAN\n\n\n# loss_scaler============\ndef backward(self, loss, retain_graph=False):\n if not FLAG_SUPPORT_INF_NAN:\n clear_npu_overflow_flag()\n scaled_loss = loss * self.loss_scale\n scaled_loss.backward(retain_graph=retain_graph)\n\n\ndef has_overflow_serial(self, params):\n if not FLAG_SUPPORT_INF_NAN:\n grads = [p.grad.data for p in params if p.grad is not None]\n return torch_npu.npu.utils.npu_check_overflow(grads)\n\n for p in params:\n if p.grad is not None and self._has_inf_or_nan(p.grad.data):\n return True\n return False\n\n\nloss_scaler.LossScalerBase.backward = backward\nloss_scaler.DynamicLossScaler.has_overflow_serial = has_overflow_serial\n","repo_name":"Ascend/DeepSpeed","sub_path":"deepspeed_npu/adaptor_runtime_fp16_loss_scaler.py","file_name":"adaptor_runtime_fp16_loss_scaler.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"32895656412","text":"import torch\nimport numpy as np\nimport math\nimport onnx\nimport pydot\n\nfrom fpgaconvnet_optimiser.models.modules import ReLU\nfrom fpgaconvnet_optimiser.models.layers import Layer\n\nclass ReLULayer(Layer):\n def __init__(\n self,\n rows: int,\n cols: int,\n channels: int,\n coarse: int = 1,\n data_width: int = 16\n ):\n\n # initialise parent class\n super().__init__(rows, cols, channels, coarse, coarse,\n data_width=data_width)\n\n # save parameters\n self._coarse = coarse\n\n # init modules\n self.modules[\"relu\"] = ReLU(self.rows_in(), self.cols_in(), self.channels_in()/self.coarse)\n self.update()\n\n @property\n def coarse(self) -> int:\n return self._coarse\n\n @property\n def coarse_in(self) -> int:\n return self._coarse\n\n @property\n def coarse_out(self) -> int:\n return self._coarse\n\n @coarse.setter\n def coarse(self, val: int) -> None:\n self._coarse = val\n self._coarse_in = val\n self.coarse_out = val\n self.update()\n\n @coarse_in.setter\n def coarse_in(self, val: int) -> None:\n self._coarse = val\n self._coarse_in = val\n self._coarse_out = val\n self.update()\n\n @coarse_out.setter\n def coarse_out(self, val: int) -> None:\n self._coarse = val\n self._coarse_in = val\n self._coarse_out = val\n self.update()\n\n def layer_info(self,parameters,batch_size=1):\n Layer.layer_info(self, parameters, batch_size)\n parameters.coarse = self.coarse\n\n def update(self):\n self.modules['relu'].rows = self.rows_in()\n self.modules['relu'].cols = self.cols_in()\n self.modules['relu'].channels = int(self.channels_in()/self.coarse)\n\n def visualise(self,name):\n cluster = pydot.Cluster(name,label=name)\n\n for i in range(self.coarse):\n cluster.add_node(pydot.Node( \"_\".join([name,\"relu\",str(i)]), label=\"relu\" ))\n\n # get nodes in and out\n nodes_in = [ \"_\".join([name,\"relu\",str(i)]) for i in range(self.streams_in()) ]\n nodes_out = [ \"_\".join([name,\"relu\",str(i)]) for i in range(self.streams_out()) ]\n\n return cluster, nodes_in, nodes_out\n\n def functional_model(self,data,batch_size=1):\n\n batched_flag=False\n print(data.shape)\n if len(data.shape) > 3:\n batched_flag=True\n assert data.shape[1] == self.rows_in() , \"ERROR (data): invalid row dimension\"\n assert data.shape[2] == self.cols_in() , \"ERROR (data): invalid column dimension\"\n assert data.shape[3] == self.channels_in(), \"ERROR (data): invalid channel dimension\"\n else:\n assert data.shape[0] == self.rows_in() , \"ERROR (data): invalid row dimension\"\n assert data.shape[1] == self.cols_in() , \"ERROR (data): invalid column dimension\"\n assert data.shape[2] == self.channels_in(), \"ERROR (data): invalid channel dimension\"\n\n # instantiate relu layer\n relu_layer = torch.nn.ReLU()\n\n # return output featuremap\n if batched_flag:\n data = np.moveaxis(data, -1, 1)\n print(data.shape)\n else:\n data = np.moveaxis(data, -1, 0)\n # FIXME clean up use of batch size here\n data = np.repeat(data[np.newaxis,...], batch_size, axis=0)\n return relu_layer(torch.from_numpy(data)).detach().numpy()\n\n","repo_name":"biggsbenjamin/ATHEENA","sub_path":"optimiser/fpgaconvnet_optimiser/models/layers/ReLULayer.py","file_name":"ReLULayer.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"23171152951","text":"\"\"\"\nThe :mod:`sklearn.utils` module includes various utilities.\n\"\"\"\nfrom collections import Sequence\n\nimport numpy as np\nfrom scipy.sparse import issparse\nimport warnings\n\nfrom .murmurhash import murmurhash3_32\nfrom .validation import (as_float_array,\n assert_all_finite,\n check_random_state, column_or_1d, check_array,\n check_consistent_length, check_X_y, indexable,\n check_symmetric)\nfrom .deprecation import deprecated\nfrom .class_weight import compute_class_weight, compute_sample_weight\nfrom ..externals.joblib import cpu_count\nfrom ..exceptions import ConvergenceWarning as _ConvergenceWarning\nfrom ..exceptions import DataConversionWarning\n\n\n@deprecated(\"ConvergenceWarning has been moved into the sklearn.exceptions \"\n \"module. It will not be available here from version 0.19\")\nclass ConvergenceWarning(_ConvergenceWarning):\n pass\n\n\n__all__ = [\"murmurhash3_32\", \"as_float_array\",\n \"assert_all_finite\", \"check_array\",\n \"check_random_state\",\n \"compute_class_weight\", \"compute_sample_weight\",\n \"column_or_1d\", \"safe_indexing\",\n \"check_consistent_length\", \"check_X_y\", 'indexable',\n \"check_symmetric\", \"indices_to_mask\"]\n\n\ndef safe_mask(X, mask):\n \"\"\"Return a mask which is safe to use on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Data on which to apply mask.\n\n mask: array\n Mask to be used on X.\n\n Returns\n -------\n mask\n \"\"\"\n mask = np.asarray(mask)\n if np.issubdtype(mask.dtype, np.int):\n return mask\n\n if hasattr(X, \"toarray\"):\n ind = np.arange(mask.shape[0])\n mask = ind[mask]\n return mask\n\n\ndef axis0_safe_slice(X, mask, len_mask):\n \"\"\"\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://github.com/scipy/scipy/issues/5361\n\n Also note that we can avoid doing the dot product by checking if\n the len_mask is not zero in _huber_loss_and_gradient but this\n is not going to be the bottleneck, since the number of outliers\n and non_outliers are typically non-zero and it makes the code\n tougher to follow.\n \"\"\"\n if len_mask != 0:\n return X[safe_mask(X, mask), :]\n return np.zeros(shape=(0, X.shape[1]))\n\n\ndef safe_indexing(X, indices):\n \"\"\"Return items or rows from X using indices.\n\n Allows simple indexing of lists or arrays.\n\n Parameters\n ----------\n X : array-like, sparse-matrix, list.\n Data from which to sample rows or items.\n\n indices : array-like, list\n Indices according to which X will be subsampled.\n \"\"\"\n if hasattr(X, \"iloc\"):\n # Pandas Dataframes and Series\n try:\n return X.iloc[indices]\n except ValueError:\n # Cython typed memoryviews internally used in pandas do not support\n # readonly buffers.\n warnings.warn(\"Copying input dataframe for slicing.\",\n DataConversionWarning)\n return X.copy().iloc[indices]\n elif hasattr(X, \"shape\"):\n if hasattr(X, 'take') and (hasattr(indices, 'dtype') and\n indices.dtype.kind == 'i'):\n # This is often substantially faster than X[indices]\n return X.take(indices, axis=0)\n else:\n return X[indices]\n else:\n return [X[idx] for idx in indices]\n\n\ndef resample(*arrays, **options):\n \"\"\"Resample arrays or sparse matrices in a consistent way\n\n The default strategy implements one step of the bootstrapping\n procedure.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n replace : boolean, True by default\n Implements resampling with replacement. If False, this will implement\n (sliced) random permutations.\n\n n_samples : int, None by default\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays.\n If replace is False it should not be larger than the length of\n arrays.\n\n random_state : int or RandomState instance\n Control the shuffling for reproducible behavior.\n\n Returns\n -------\n resampled_arrays : sequence of indexable data-structures\n Sequence of resampled views of the collections. The original arrays are\n not impacted.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import resample\n >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)\n >>> X\n array([[ 1., 0.],\n [ 2., 1.],\n [ 1., 0.]])\n\n >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 4 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[ 1., 0.],\n [ 2., 1.],\n [ 1., 0.]])\n\n >>> y\n array([0, 1, 0])\n\n >>> resample(y, n_samples=2, random_state=0)\n array([0, 1])\n\n\n See also\n --------\n :func:`sklearn.utils.shuffle`\n \"\"\"\n random_state = check_random_state(options.pop('random_state', None))\n replace = options.pop('replace', True)\n max_n_samples = options.pop('n_samples', None)\n if options:\n raise ValueError(\"Unexpected kw arguments: %r\" % options.keys())\n\n if len(arrays) == 0:\n return None\n\n first = arrays[0]\n n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)\n\n if max_n_samples is None:\n max_n_samples = n_samples\n elif (max_n_samples > n_samples) and (not replace):\n raise ValueError(\"Cannot sample %d out of arrays with dim %d\"\n \"when replace is False\" % (max_n_samples,\n n_samples))\n\n check_consistent_length(*arrays)\n\n if replace:\n indices = random_state.randint(0, n_samples, size=(max_n_samples,))\n else:\n indices = np.arange(n_samples)\n random_state.shuffle(indices)\n indices = indices[:max_n_samples]\n\n # convert sparse matrices to CSR for row-based indexing\n arrays = [a.tocsr() if issparse(a) else a for a in arrays]\n resampled_arrays = [safe_indexing(a, indices) for a in arrays]\n if len(resampled_arrays) == 1:\n # syntactic sugar for the unit argument case\n return resampled_arrays[0]\n else:\n return resampled_arrays\n\n\ndef shuffle(*arrays, **options):\n \"\"\"Shuffle arrays or sparse matrices in a consistent way\n\n This is a convenience alias to ``resample(*arrays, replace=False)`` to do\n random permutations of the collections.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n random_state : int or RandomState instance\n Control the shuffling for reproducible behavior.\n\n n_samples : int, None by default\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays.\n\n Returns\n -------\n shuffled_arrays : sequence of indexable data-structures\n Sequence of shuffled views of the collections. The original arrays are\n not impacted.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import shuffle\n >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)\n >>> X\n array([[ 0., 0.],\n [ 2., 1.],\n [ 1., 0.]])\n\n >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 3 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[ 0., 0.],\n [ 2., 1.],\n [ 1., 0.]])\n\n >>> y\n array([2, 1, 0])\n\n >>> shuffle(y, n_samples=2, random_state=0)\n array([0, 1])\n\n See also\n --------\n :func:`sklearn.utils.resample`\n \"\"\"\n options['replace'] = False\n return resample(*arrays, **options)\n\n\ndef safe_sqr(X, copy=True):\n \"\"\"Element wise squaring of array-likes and sparse matrices.\n\n Parameters\n ----------\n X : array like, matrix, sparse matrix\n\n copy : boolean, optional, default True\n Whether to create a copy of X and operate on it or to perform\n inplace computation (default behaviour).\n\n Returns\n -------\n X ** 2 : element wise square\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)\n if issparse(X):\n if copy:\n X = X.copy()\n X.data **= 2\n else:\n if copy:\n X = X ** 2\n else:\n X **= 2\n return X\n\n\ndef gen_batches(n, batch_size):\n \"\"\"Generator to create slices containing batch_size elements, from 0 to n.\n\n The last slice may contain less than batch_size elements, when batch_size\n does not divide n.\n\n Examples\n --------\n >>> from sklearn.utils import gen_batches\n >>> list(gen_batches(7, 3))\n [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]\n >>> list(gen_batches(6, 3))\n [slice(0, 3, None), slice(3, 6, None)]\n >>> list(gen_batches(2, 3))\n [slice(0, 2, None)]\n \"\"\"\n start = 0\n for _ in range(int(n // batch_size)):\n end = start + batch_size\n yield slice(start, end)\n start = end\n if start < n:\n yield slice(start, n)\n\n\ndef gen_even_slices(n, n_packs, n_samples=None):\n \"\"\"Generator to create n_packs slices going up to n.\n\n Pass n_samples when the slices are to be used for sparse matrix indexing;\n slicing off-the-end raises an exception, while it works for NumPy arrays.\n\n Examples\n --------\n >>> from sklearn.utils import gen_even_slices\n >>> list(gen_even_slices(10, 1))\n [slice(0, 10, None)]\n >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS\n [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]\n >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS\n [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]\n >>> list(gen_even_slices(10, 3))\n [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]\n \"\"\"\n start = 0\n if n_packs < 1:\n raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\"\n % n_packs)\n for pack_num in range(n_packs):\n this_n = n // n_packs\n if pack_num < n % n_packs:\n this_n += 1\n if this_n > 0:\n end = start + this_n\n if n_samples is not None:\n end = min(n_samples, end)\n yield slice(start, end, None)\n start = end\n\n\ndef _get_n_jobs(n_jobs):\n \"\"\"Get number of jobs for the computation.\n\n This function reimplements the logic of joblib to determine the actual\n number of jobs depending on the cpu count. If -1 all CPUs are used.\n If 1 is given, no parallel computing code is used at all, which is useful\n for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.\n Thus for n_jobs = -2, all CPUs but one are used.\n\n Parameters\n ----------\n n_jobs : int\n Number of jobs stated in joblib convention.\n\n Returns\n -------\n n_jobs : int\n The actual number of jobs as positive integer.\n\n Examples\n --------\n >>> from sklearn.utils import _get_n_jobs\n >>> _get_n_jobs(4)\n 4\n >>> jobs = _get_n_jobs(-2)\n >>> assert jobs == max(cpu_count() - 1, 1)\n >>> _get_n_jobs(0)\n Traceback (most recent call last):\n ...\n ValueError: Parameter n_jobs == 0 has no meaning.\n \"\"\"\n if n_jobs < 0:\n return max(cpu_count() + 1 + n_jobs, 1)\n elif n_jobs == 0:\n raise ValueError('Parameter n_jobs == 0 has no meaning.')\n else:\n return n_jobs\n\n\ndef tosequence(x):\n \"\"\"Cast iterable x to a Sequence, avoiding a copy if possible.\"\"\"\n if isinstance(x, np.ndarray):\n return np.asarray(x)\n elif isinstance(x, Sequence):\n return x\n else:\n return list(x)\n\n\ndef indices_to_mask(indices, mask_length):\n \"\"\"Convert list of indices to boolean mask.\n\n Parameters\n ----------\n indices : list-like\n List of integers treated as indices.\n mask_length : int\n Length of boolean mask to be generated.\n\n Returns\n -------\n mask : 1d boolean nd-array\n Boolean array that is True where indices are present, else False.\n \"\"\"\n if mask_length <= np.max(indices):\n raise ValueError(\"mask_length must be greater than max(indices)\")\n\n mask = np.zeros(mask_length, dtype=np.bool)\n mask[indices] = True\n\n return mask\n","repo_name":"catboost/catboost","sub_path":"contrib/python/scikit-learn/py2/sklearn/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":7463,"dataset":"github-code","pt":"57"} +{"seq_id":"27809525461","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 28 18:46:07 2021\n\n@author: Barry\n\"\"\"\n\"\"\"\nhttps://stackoverflow.com/questions/40451300/tkinter-in-spyder\n\"\"\"\nimport datetime as dt\n# from tkcalendar import Calendar, DateEntry\nfrom tkinter import Tk\n\nroot = Tk()\nroot.title(\"Sum Calculator\")\nroot.geometry('600x300')\n\n# date_entry= DateEntry(root)\n# date_entry.pack()\n# now= date_entry.get()\n\ndef quitbtn_clicked():\n root.destroy()\nquitbtn = Button(root, text=\"Quit\",\\\n command=quitbtn_clicked)\nquitbtn.grid(row=10,column=4)\n\nStartYearLbl = Label(root, text=\"Starting Year\")\nStartYearLbl.grid(row=0,column=0)\nStartYear = Entry(root,width=4)\nStartYear.insert(0, \"1944\")\nStartYear.grid(row=0,column=1)\n\nendYearLbl = Label(root, text=\"Ending Year\")\nendYearLbl.grid(row=1,column=0)\nendYear = Entry(root,width=4)\nendYear.insert(0, str(dt.datetime.now().date())[0:4])\nendYear.grid(row=1,column=1)\n\nstockSourceLbl = Label(root, text=\"Stock Source\")\nstockSourceLbl.grid(row=2,column=0)\nstockSource = Entry(root,width=10)\nstockSource.insert(0, \"Yahoo\")\nstockSource.grid(row=2,column=1)\n\nstockNameLbl = Label(root, text=\"Stock Name\")\nstockNameLbl.grid(row=3,column=0)\nstockName = Entry(root,width=10)\nstockName.insert(0, \"NFLX\")\nstockName.grid(row=3,column=1)\n\nyear_range_lbl=Label(root, text=\"Year Range:\")\nyear_range_lbl.grid(row=4,column=0)\n\ndef rangebtn_clicked():\n year_range = int(stockSource.get())- int(StartYear.get())\n year_range_text=\"Range = \" + str(year_range)\n year_range_lbl.configure(text= year_range_text)\n\nrangebtn = Button(root, text=\"Calc Range\", \\\n command=rangebtn_clicked)\nrangebtn.grid(row=5,column=0)\n\nroot.mainloop()","repo_name":"nealkuperman/Python","sub_path":"_projects/gui/try/tkinter_dev0.py","file_name":"tkinter_dev0.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"69927812978","text":"from animais import Cachorro\n\ntoto = Cachorro()\ntoto.nome = 'Cleber'\ntoto.raca = 'Street-dog'\ntoto.tamanho = 'P'\ntoto.pelo = 'baixo'\n\ntoto.escrever_quem_sou()\n\ntoto.dormir()\nprint(toto.status)\ntoto.dormir()\ntoto.acordar()\nprint(toto.status)\ntoto.dormir()\ntoto.acordar()\ntoto.comer()\ntoto.correr()\ntoto.latir()\ntoto.dormir()\ntoto.correr()\ntoto.latir()\n\n\n\n\n\n","repo_name":"BernardoPacheco94/RSTI-algoritmos","sub_path":"poo_cachorro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39847894094","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import render\nimport stripe\nfrom django.conf import settings\nfrom .models import Produto, Pedido\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\n@csrf_exempt\ndef create_payment(request, id):\n produto = Produto.objects.get(id = id)\n\n email = json.loads(request.body)['email']\n\n # Create a PaymentIntent with the order amount and currency\n intent = stripe.PaymentIntent.create(\n amount=int(produto.preco),\n currency='BRL',\n metadata={\n 'produto_id': produto.id,\n 'email': email\n }\n\n\n )\n return JsonResponse({\n 'clientSecret': intent['client_secret']\n })\n \n\ndef home(request):\n produto = Produto.objects.get(id = 1)\n\n return render(request, 'home.html', {'produto': produto, 'STRIPE_PUBLIC_KEY': settings.STRIPE_PUBLIC_KEY})\n\n\ndef sucesso(request):\n return HttpResponse('Sucesso!')\n\ndef erro(request):\n return HttpResponse('Erro!')\n\n@csrf_exempt\ndef stripe_webhook(request):\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n endpoint_secret = settings.STRIPE_WEBHOOK_SECRET\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, endpoint_secret\n )\n except ValueError as e:\n # Invalid payload\n return HttpResponse(status=400)\n except stripe.error.SignatureVerificationError as e:\n # Invalid signature\n return HttpResponse(status=400)\n\n if event['type'] == 'charge.succeeded':\n session = event['data']['object']\n x = Pedido(produto_id = session['metadata']['produto_id'],\n payment_intent = session['payment_intent'],\n email = session['metadata']['email'],\n valor_pago = session['amount'],\n status = session['status'])\n x.save()\n \n \n \n\n return HttpResponse(status=200)","repo_name":"Pythonando/Checkout-STRIPE-Python-Full","sub_path":"produtos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16515181042","text":"# written by Nathan Boley, from https://github.com/nboley/GGR_code\n\nimport sys\nimport gzip\n\nVERBOSE = False\n\nadapters = {\n 'Illumina': b'AGATCGGAAGAGC',\n 'Nextera ': b'CTGTCTCTTATA',\n 'smallRNA': b'TGGAATTCTCGG'\n}\n\n\ndef open_gz(fname):\n return gzip.open(fname) if fname.endswith('.gz') else open(fname, 'rb')\n\n\ndef detect_adapters_and_cnts(fname, max_n_lines=1000000):\n adapter_cnts = {\n 'Illumina': 0,\n 'Nextera ': 0,\n 'smallRNA': 0\n }\n\n with open_gz(fname) as fp:\n # read the first million sequences or to the end of the while -- whichever\n # comes first, and then use the adapter for trimming which was found to\n # occur most often\n for seq_index, line in enumerate(fp):\n if seq_index >= max_n_lines:\n break\n if seq_index % 4 != 1:\n continue\n for key in adapters:\n if line.find(adapters[key]) > -1:\n adapter_cnts[key] += 1\n\n observed_adapters = [\n adapter for adapter, cnt in sorted(\n adapter_cnts.items(), key=lambda x: -x[1])\n if cnt > 0\n ]\n return observed_adapters, adapter_cnts, seq_index//4\n\n\ndef detect_most_likely_adapter(fname):\n observed_adapters, adapter_cnts, n_obs_adapters = detect_adapters_and_cnts(\n fname)\n if observed_adapters:\n best_adapter = observed_adapters[0]\n else:\n best_adapter = \"\"\n\n if VERBOSE:\n print(\"\\n\\nAUTO-DETECTING ADAPTER TYPE\\n===========================\")\n print(\"Attempting to auto-detect adapter type from the first 1 million sequences of the first file (>> {} <<)\\n\".format(\n fname)\n )\n print(\"Found perfect matches for the following adapter sequences:\")\n print(\"Adapter type\\tCount\\tSequence\\tSequences analysed\\tPercentage\")\n for adapter in observed_adapters:\n print(\"{}\\t{}\\t{}\\t{}\\t\\t\\t{:.2%}\".format(\n adapter,\n adapter_cnts[adapter],\n adapters[adapter].decode(),\n n_obs_adapters,\n adapter_cnts[adapter]/n_obs_adapters)\n )\n if best_adapter:\n return adapters[best_adapter].decode()\n else:\n return \"\"\n\n\ndef main():\n global VERBOSE\n VERBOSE = False\n best_adapter = detect_most_likely_adapter(sys.argv[1])\n print(best_adapter)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ENCODE-DCC/atac-seq-pipeline","sub_path":"src/detect_adapter.py","file_name":"detect_adapter.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":339,"dataset":"github-code","pt":"57"} +{"seq_id":"2535353395","text":"import numpy as np\n\nfrom config import cfg\n\n\ndef image_show(tensor_image, plt, title=None):\n image = tensor_image.numpy().transpose([1, 2, 0])\n image = np.array(cfg.transform_params['std']) * image + np.array(cfg.transform_params['mean'])\n image = np.clip(image, 0, 1)\n\n plt.imshow(image)\n\n if title is not None:\n plt.set_title(title)\n plt.grid(False)\n","repo_name":"m-petuhov/vk-bot-dog-breed-identification","sub_path":"utils/data/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16870688880","text":"from math import pi, cos, sin, sqrt\nfrom random import choice, randint\n\nimport pygame\nfrom .drawable import Drawable\nfrom .tile_objects.grass import Grass\nfrom ..renderer import Renderer\n\n\nclass Hexagon(Drawable):\n types = {\n \"WATER\": (0, 0, 255),\n \"GRASSLAND\": (0, 255, 0),\n \"DIED_GRASSLAND\": (100, 100, 0),\n \"SAND\": (255, 255, 100),\n \"DIRT\": (50, 50, 0)\n }\n\n def __init__(self, x, y, rad, color=(0, 100, 0)):\n super().__init__(x, y, color)\n\n self.entities = []\n self.lifetime = 0\n\n self.rad = rad\n self.type = choice(list(self.types.keys()))\n self.color = self.types[self.type]\n\n # self.generate_entities()\n self.obj_type = \"TILE\"\n\n def draw(self, screen):\n pygame.draw.polygon(screen, self.color, [\n ((self.x + Renderer.camera.x_offset) + self.rad *\n (cos(2 * pi / 6 * i + self.rotation)),\n (self.y + Renderer.camera.y_offset) + self.rad *\n (sin(2 * pi / 6 * i + self.rotation)))\n for i in range(6)\n ])\n\n pygame.draw.polygon(screen, self.secondary_color, [\n ((self.x + Renderer.camera.x_offset) + self.rad *\n (cos(2 * pi / 6 * i + self.rotation)),\n (self.y + Renderer.camera.y_offset) + self.rad *\n (sin(2 * pi / 6 * i + self.rotation)))\n for i in range(6)\n ], 5)\n\n @staticmethod\n def create_grid(w, h, rad, logic):\n for j in range(w):\n x = (j * (rad * 2)) * (3 / 4)\n y = j * rad % (rad * 2)\n if j % 2 == 1:\n y -= 4\n\n for i in range(h):\n if randint(0, 100) < 80:\n logic.tiles.append(Hexagon(x, y + i * (sqrt(3) * rad), rad))\n\n def update(self, dt):\n self.lifetime += 1 * dt\n if self.type == \"DIRT\" \\\n and randint(0, 20) > 19 \\\n and int(self.lifetime) % randint(8, 12) == 0:\n self.dirt_update()\n\n if self.type == \"GRASSLAND\":\n self.grassland_update()\n\n if self.type == \"WATER\":\n self.water_update()\n\n def click(self):\n self.type = self.next_type()\n self.handle_type_change()\n\n # Renderer.camera.x_offset = 400 - self.x\n # Renderer.camera.y_offset = 300 - self.y\n\n def on_me(self, mousepos):\n mousepos = mousepos[0] - Renderer.camera.x_offset, mousepos[1] - Renderer.camera.y_offset\n bounds = self.calculate_boundaries()\n if bounds[\"horizontal\"][0] < mousepos[0] < bounds[\"horizontal\"][1]:\n if bounds[\"vertical\"][0] < mousepos[1] < bounds[\"vertical\"][1]:\n return True\n return False\n\n def calculate_boundaries(self):\n return {\n \"horizontal\": (int(self.x - (self.rad * .5)), int(self.x + (self.rad * .5))),\n \"vertical\": (int(self.y - (self.rad * .5)), int(self.y + (self.rad * .5)))\n }\n\n def next_type(self):\n keyList = sorted(self.types.keys())\n for i, v in enumerate(keyList):\n if v == self.type:\n return keyList[(i + 1) % len(keyList)]\n\n def generate_entities(self):\n bounds = self.calculate_boundaries()\n if self.type == \"GRASSLAND\":\n for i in range(randint(0, 20)):\n self.entities.append(\n Grass(\n randint(bounds[\"horizontal\"][0], bounds[\"horizontal\"][1]),\n randint(bounds[\"vertical\"][0], bounds[\"vertical\"][1]),\n )\n )\n\n def dirt_update(self):\n bounds = self.calculate_boundaries()\n self.entities.append(\n Grass(\n randint(bounds[\"horizontal\"][0], bounds[\"horizontal\"][1]),\n randint(bounds[\"vertical\"][0], bounds[\"vertical\"][1]),\n )\n )\n\n grass_count = 0\n\n for entity in self.entities:\n if entity.obj_type == \"GRASS\":\n grass_count += 1\n\n if grass_count > 5:\n self.type = \"GRASSLAND\"\n self.handle_type_change()\n for entity in self.entities:\n if entity.obj_type is \"GRASS\":\n entity.grown()\n\n def handle_type_change(self):\n self.color = self.types[self.type]\n self.lifetime = 0\n if self.type in [\"WATER\", \"DIED_GRASSLAND\"]:\n self.entities = []\n\n def grassland_update(self):\n\n if self.lifetime > randint(15, 30):\n self.type = \"DIED_GRASSLAND\"\n self.handle_type_change()\n\n i = 0\n while i < len(self.entities):\n try:\n entity = self.entities[i]\n except IndexError:\n continue\n\n if entity.obj_type == \"GRASS\":\n if entity.lifetime >= randint(16, 20):\n self.entities.pop(self.entities.index(entity))\n entity.destruct()\n else:\n i += 1\n\n def water_update(self):\n if self.lifetime > randint(15, 30):\n self.type = \"SAND\"\n self.handle_type_change()","repo_name":"RafayelGardishyan/HexagonGame","sub_path":"game/objects/hexagon.py","file_name":"hexagon.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"762129599","text":"#!/usr/bin/env python\nimport json\n\ninput_filename='TestFile.cfg'\n#input_filename='BelleCertification.cfg'\noutput_filename='BelleCertification.json'\nf_in=open(input_filename,'r')\nf_out=open(output_filename,'w')\n#f_in.next()\n\ndef line_parser(l_in):\n if l_in.find(\"{\")!=-1 or l_in.find(\"}\")!=-1:\n print(\"keep the line the same\")\n l_out=l_in\n else:\n l_out=l_in.replace('=',':')\n l_out=l_out.replace(',','+:')\n #if l_out.find(\"+:\")\n # l_out=l_out.replace(',','+:')\n if l_out.find(\":\") == -1 or l_out.find(\"#\")!=-1:\n print (\"line without semicolon\\n\")\n l_out=\"'\"+l_out+\"'\"+\":\"\n #return(l_out)\n else:\n print(\"spliting by semicolon\")\n l_out=l_out.split(':',1)\n l_out=\"'\" + l_out[0] + \"'\" + \":\" + \"'\"+l_out[1]+\"'\"\n return(l_out)\n\nfor line in f_in.readline():\n print(\"printing line input for testing\")\n print(line)\n output_tmp=line_parser(line)\n f_out.write(output_tmp)\n\nf_in.close()\nf_out.close()\n","repo_name":"ahandresf/Belle_tools","sub_path":"IntermedianScripts/Belltest.py","file_name":"Belltest.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70772236657","text":"from socket import socket, AF_INET, SOCK_STREAM\r\nfrom threading import Thread\r\nfrom datetime import datetime\r\n\r\nHOST = \"127.0.0.1\" # Enter your server's IP\r\nPORT = 32500\r\nMAX_USERS = 100\r\n\r\nBUFFER_SIZE = 1024\r\n\r\nclients = {}\r\naddresses = {}\r\nSOCK = socket(AF_INET, SOCK_STREAM)\r\nSOCK.bind((HOST, PORT))\r\n\r\n# Security\r\ndef isSpam(msg, IP):\r\n is_spam = True\r\n msg_str = \"\"\r\n try:\r\n msg_str = msg.decode(\"utf8\")\r\n except:\r\n print(f\"[Security] Couldn't decode the message ({IP})\")\r\n return is_spam\r\n \r\n if msg_str.strip() == \"\": # is Empty\r\n print(f\"[Security] Empty message ({IP})\")\r\n elif len(msg_str) >= BUFFER_SIZE:\r\n print(f\"[Security] Overly long message ({IP})\")\r\n else:\r\n is_spam = False\r\n return is_spam\r\n\r\ndef verifyClient(msg_b, IP):\r\n try:\r\n msg_str = msg_b.decode(\"utf8\")\r\n except:\r\n print(f\"[Security] Couldn't decode the message ({IP})\")\r\n return False\r\n\r\n if msg_str == \"CHAT\":\r\n return True \r\n else:\r\n print(f\"[Security] Client verification failed ({IP})\")\r\n return False\r\n\r\n# Connectivity\r\ndef closeConnection(conn):\r\n if conn in clients:\r\n del clients[conn]\r\n try:\r\n conn.send(\"/quit\".encode(\"utf8\"))\r\n conn.close()\r\n except:\r\n pass\r\n\r\ndef broadcast(msg_str):\r\n print(f\"[Broadcast] {msg_str}\")\r\n msg = bytes(msg_str+'\\n',\"utf8\")\r\n try: # Fixes server crashes\r\n for sock in clients:\r\n sock.send(msg)\r\n except:\r\n pass\r\n\r\ndef log(msg, type=\"Info\"):\r\n now = datetime.now().strftime(\"[%H:%M:%S]\")\r\n print(f\"[{type}][{now}] {msg}\")\r\n \r\n# Submain server function\r\ndef handleClient(conn, address):\r\n # Receive and check\r\n while True:\r\n conn.send(\"Enter your nickname:\\n\".encode(\"utf8\"))\r\n try:\r\n name = conn.recv(BUFFER_SIZE)\r\n except:\r\n print(f\"[Security] Couldn't receive the message ({address[0]})\")\r\n closeConnection(conn)\r\n return\r\n\r\n if isSpam(name, address[0]):\r\n closeConnection(conn)\r\n return\r\n\r\n # Process the first request\r\n name = name.decode(\"utf8\")\r\n\r\n if name == \"/quit\":\r\n closeConnection(conn)\r\n print(f\"[Info] {address[0]}:{address[1]} has left the chat\")\r\n return\r\n\r\n # Check for a similar name\r\n if name in clients.values():\r\n print(f\"[Info] Duplicate name found ({name})\")\r\n conn.send(\"Nickname taken\\n\".encode(\"utf8\"))\r\n continue\r\n else:\r\n clients[conn] = name\r\n break\r\n\r\n print(f\"[Info] {address[0]}:{address[1]} chose username \\\"{name}\\\"\")\r\n msg = f\"{name} has joined the chat\"\r\n broadcast(msg)\r\n\r\n # Keep processing requests\r\n while True:\r\n # Receive and check\r\n try:\r\n msg = conn.recv(BUFFER_SIZE)\r\n except:\r\n print(f\"[Security] Couldn't receive the message ({address[0]})\")\r\n closeConnection(conn)\r\n return\r\n\r\n if isSpam(msg, address[0]):\r\n closeConnection(conn)\r\n return\r\n\r\n # Handle the message\r\n msg = msg.decode(\"utf8\")\r\n\r\n if msg == \"/quit\":\r\n closeConnection(conn)\r\n broadcast(f\"{name} has left the chat\")\r\n break\r\n\r\n now = datetime.now().strftime(\"[%H:%M:%S]\")\r\n broadcast(f\"{name} {now}: \"+ msg)\r\n\r\n# Main server function\r\ndef acceptIncomingConnections():\r\n while True:\r\n conn, address = SOCK.accept()\r\n\r\n # Verify\r\n try:\r\n header = conn.recv(BUFFER_SIZE)\r\n except:\r\n print(f\"[Security] Couldn't receive the message ({address[0]})\")\r\n closeConnection(conn)\r\n continue\r\n \r\n if not verifyClient(header, address[0]):\r\n closeConnection(conn)\r\n continue\r\n\r\n # Start the loop\r\n print(f\"[Info] {address[0]}:{address[1]} has connected\")\r\n addresses[conn] = address\r\n Thread(target=handleClient, args=(conn, address)).start()\r\n\r\nSOCK.listen(MAX_USERS)\r\nprint(f\"Chat Server is running at {HOST}:{PORT}\")\r\naccept_connections = Thread(target=acceptIncomingConnections)\r\naccept_connections.start()\r\naccept_connections.join()\r\nSOCK.close()","repo_name":"JacobDev1/chat_server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23723426093","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bet', '0013_auto_20150907_2102'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='comment',\n old_name='song',\n new_name='position',\n ),\n migrations.AlterField(\n model_name='comment',\n name='votes',\n field=models.ManyToManyField(related_name='votes', null=True, through='bet.Vote', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"igorpejic/songbet","sub_path":"apps/bet/migrations/0014_auto_20150907_2114.py","file_name":"0014_auto_20150907_2114.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"34350306122","text":"\nimport sqlite3\n\n#from . import ops\n#from . import stats\nfrom .geometry import geometry\nfrom .raster import raster\n\n\nsqlite3.enable_callback_tracebacks(True)\n\n\ndef connect(path, *args, **kwargs):\n\n kwargs['detect_types'] = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES # overrides...\n conn = sqlite3.connect(path, *args, **kwargs)\n\n # make faster (SOMEHOW ACTUALLY MUCH SLOWER FOR EXECUTEMANY...)\n #conn.isolation_level = None\n\n # register custom functions\n #stats.register_funcs(self.db)\n #ops.register_funcs(self.db)\n geometry.register_funcs(conn)\n geometry.register_aggs(conn)\n raster.register_funcs(conn)\n raster.register_aggs(conn)\n\n return conn\n","repo_name":"karimbahgat/postqlite","sub_path":"postqlite/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"1821429945","text":"\"\"\" This script provides some utils for the purpose of displaying tweets on website.\"\"\"\n\nimport json\nimport os\n\nfrom utils import get_tweetid_content\n\n# For Named Entity Recognition\nimport spacy\nnlp = spacy.load(\"en_core_web_sm\")\n\n\ndef get_city2latlng(datafile):\n city2latlng = dict()\n with open(datafile, 'r', encoding='utf8') as f:\n for iLine, line in enumerate(f):\n if iLine == 0:\n continue\n content = line.strip().split(',')\n city_name = content[0][1:-1].lower()\n lat, lng = float(content[6][1:-1]), float(content[7][1:-1])\n city2latlng[city_name] = (lat, lng)\n return city2latlng\n\n\ndef get_state2latlng(datafile):\n state2latlng = dict()\n with open(datafile, 'r', encoding='utf8') as f:\n for line in f:\n content = line.strip().split('\\t')\n if len(content) != 4:\n continue\n state_abrr_name = content[0].lower()\n state_full_name = content[-1].lower()\n lat, lng = float(content[1]), float(content[2])\n state2latlng[state_abrr_name] = (lat, lng)\n state2latlng[state_full_name] = (lat, lng)\n return state2latlng\n\n\ndef get_info_for_display(tweet_content_list, keep_no_img_tweet=False, keep_no_latlng_tweet=False):\n \"\"\"\n Parse the raw data, where each line is a json string containing all information for a tweet.\n What we need for display (currently) is: [id, full_text, media, location, created_at].\n Then the latitude and longitude could be derived from the location + full_text location entity recognition.\n\n :param tweet_content_list: A list of content where each content is a dict load by json.\n :param keep_no_img_tweet: If we need to keep the tweet that without the image.\n :param keep_no_latlng_tweet: If we need to keep the tweet that without the latitude and longitude information.\n\n :return: A list of dict, which could be written as a json file later.\n \"\"\"\n def get_coordinate(location2latlng, location, full_text):\n \"\"\"If the location cannot be parsed as a valid location, we extract location entity from full_text.\"\"\"\n result_latlng = None\n # Try to find the lat and lng from the location string.\n loc_list = location.strip().split(',')\n for loc in loc_list:\n loc = loc.strip().lower()\n if loc in location2latlng:\n result_latlng = location2latlng[loc]\n if result_latlng is not None:\n return result_latlng\n # Try to find the lat and lng from the GPE entity in text string.\n doc = nlp(full_text, disable=[\"tagger\", \"parser\"])\n for ent in doc.ents:\n if ent.label_ == 'GPE':\n loc = ent.text.lower()\n if loc in location2latlng:\n result_latlng = location2latlng[loc]\n break\n return (None, None) if result_latlng is None else result_latlng\n\n city2latlng = get_city2latlng(\"data/UScity2latlong.csv\")\n state2latlng = get_state2latlng(\"data/USstate2latlong.csv\")\n location2latlng = {**city2latlng, **state2latlng}\n location_missing_count = 0\n result = []\n for content in tweet_content_list:\n key2val = dict()\n key2val[\"id\"] = content[\"id_str\"]\n key2val[\"full_text\"] = content[\"full_text\"]\n\n # For those tweets which have images.\n if \"media\" in content[\"entities\"] and len(content[\"entities\"][\"media\"]) > 0:\n if \"media_url\" in content[\"entities\"][\"media\"][0]:\n key2val[\"media\"] = content[\"entities\"][\"media\"][0][\"media_url\"]\n elif \"mediaURL\" in content[\"entities\"][\"media\"][0]:\n key2val[\"media\"] = content[\"entities\"][\"media\"][0][\"mediaURL\"]\n else:\n raise ValueError(\"The format is invalid: {}\".format(content[\"entities\"][\"media\"][0]))\n elif not keep_no_img_tweet:\n continue\n\n # If this tweet is a re-tweet from others, we should use the original location and time.\n if \"retweeted_status\" in content:\n if \"location\" in content[\"retweeted_status\"][\"user\"]:\n key2val[\"location\"] = content[\"retweeted_status\"][\"user\"][\"location\"]\n else:\n key2val[\"location\"] = \"\"\n location_missing_count += 1\n if \"created_at\" in content[\"retweeted_status\"]:\n key2val[\"created_at\"] = content[\"retweeted_status\"][\"created_at\"]\n elif \"createdAt\" in content[\"retweeted_status\"]:\n key2val[\"created_at\"] = content[\"retweeted_status\"][\"createdAt\"]\n else:\n raise ValueError(\"The format is invalid: {}\".format(content[\"retweeted_status\"]))\n else:\n if \"location\" in content[\"user\"]:\n key2val[\"location\"] = content[\"user\"][\"location\"]\n else:\n key2val[\"location\"] = \"\"\n location_missing_count += 1\n if \"created_at\" in content:\n key2val[\"created_at\"] = content[\"created_at\"]\n elif \"createdAt\" in content:\n key2val[\"created_at\"] = content[\"createdAt\"]\n else:\n raise ValueError(\"The format is invalid: {}\".format(content))\n\n lat, lng = get_coordinate(location2latlng, key2val[\"location\"], key2val[\"full_text\"])\n # In current setting we just omit all tweets that cannot be labeled on the map.\n if lat is None and not keep_no_latlng_tweet:\n continue\n key2val[\"latitude\"] = lat\n key2val[\"longitude\"] = lng\n result.append(key2val)\n print(\"There are {} tweets don't have location info\".format(location_missing_count))\n print(\"The final tweets number for display is {}\".format(len(result)))\n return result\n\n\ndef write_display_info(display_info, outfile):\n \"\"\"\n Write the display info to a file, and currently we use the js file which could be loaded directly by JavaScript.\n TODO(junpeiz): Write the data into a json file and import it to database.\n\n :param display_info: A list of dict containing the information needed to display.\n :param outfile: The path to the output file.\n\n :return: None\n \"\"\"\n with open(outfile, 'w', encoding='utf8') as fout:\n fout.write(\"const tweets = \")\n fout.write(json.dumps(display_info, sort_keys=True, indent=2))\n fout.write(\";\\n\")\n fout.write(\"\\nexport default tweets;\\n\")\n\n\nif __name__ == '__main__':\n tweetid_list, tweet_content_list = get_tweetid_content([\"data/all-tweets-2019.txt\"])\n display_info = get_info_for_display(tweet_content_list)\n write_display_info(display_info, \"out/posts_full.js\")\n # Filter out those tweets that has been predicted high scores by our model\n predict_output = os.path.join('eval', 'rf.run')\n threshold_score = 0.5\n valid_tweetid = set()\n with open(predict_output, 'r', encoding='utf8') as f:\n for line in f:\n line = line.strip().split('\\t')\n tweetid = line[2]\n score = float(line[4])\n if score >= threshold_score:\n valid_tweetid.add(tweetid)\n filtered_tweet_content_list = [content for idx, content in enumerate(tweet_content_list)\n if tweetid_list[idx] in valid_tweetid]\n display_info = get_info_for_display(filtered_tweet_content_list, keep_no_img_tweet=False, keep_no_latlng_tweet=False)\n write_display_info(display_info, \"out/posts_filtered.js\")\n","repo_name":"berniebear/trec_is","sub_path":"utils_display.py","file_name":"utils_display.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35870871732","text":"import logging\nimport os\n\nimport pmb.build\nimport pmb.chroot.apk\nimport pmb.config\nimport pmb.helpers.run\n\n\ndef clone(args, name_repo, shallow=True, chown_to_user=False):\n # Check for repo name in the config\n if name_repo not in pmb.config.git_repos:\n raise ValueError(\"No git repository configured for \" + name_repo)\n\n # Skip if already checked out\n if os.path.exists(args.work + \"/cache_git/\" + name_repo):\n return\n\n # Check out to temp folder\n name_temp = name_repo + \".temp\"\n if not os.path.exists(args.work + \"/cache_git/\" + name_temp):\n # Set up chroot and install git\n pmb.chroot.apk.install(args, [\"git\"])\n logging.info(\"(native) git clone \" + pmb.config.git_repos[name_repo])\n\n # git options\n options = []\n if shallow:\n options += [\"--depth=1\"]\n\n # Run the command\n pmb.chroot.user(args, [\"git\", \"clone\"] + options +\n [pmb.config.git_repos[name_repo], name_temp],\n working_dir=\"/home/pmos/git/\", check=False,\n output=\"stdout\")\n if not os.path.exists(args.work + \"/cache_git/\" + name_temp):\n logging.info(\"NOTE: cloning from git is known to fail when the\"\n \" host linux kernel is older than 3.17:\"\n \" \")\n raise RuntimeError(\"git clone failed!\")\n\n # Chown to user's UID and GID\n if chown_to_user:\n uid_gid = \"{}:{}\".format(os.getuid(), os.getgid())\n pmb.helpers.run.root(args, [\"chown\", \"-R\", uid_gid, args.work +\n \"/cache_git/\" + name_temp])\n\n # Rename the temp folder\n pmb.helpers.run.root(args, [\"mv\", name_temp, name_repo],\n args.work + \"/cache_git\")\n\n\ndef rev_parse(args, revision=\"HEAD\"):\n rev = pmb.helpers.run.user(args, [\"git\", \"rev-parse\", revision],\n args.aports, output_return=True, check=False)\n if rev is None:\n logging.warning(\"WARNING: Failed to determine revision of git repository at \" + args.aports)\n return \"\"\n return rev.rstrip()\n","repo_name":"LibrePhone/pmbootstrap","sub_path":"pmb/helpers/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"2467083751","text":"# author: Mark\n\n# 0.写一个匿名函数,判断指定的年是否是闰年\n\"\"\"\n闰年:能够被4整除但是不能被100整除。或者能够被400整除\n\"\"\"\nisleapyear = lambda year: (year % 4 == 0 and year % 100 != 0) or year % 400 == 0\nprint(isleapyear(2012))\nprint(isleapyear(2008))\nprint(isleapyear(2018))\nprint(isleapyear(2000))\n\n\n# 1.写一个函数将一个指定的列表中的元素逆序( 如[1, 2, 3] -> [3, 2, 1])(注意:不要使 表自带的逆序函数)\ndef reverse(list1: list):\n length = len(list1)\n for index in range(length//2):\n list1[index], list1[length-1-index] = list1[length-1-index], list1[index]\n\n\nmy_list = [1, 2, 3] # index = 0 list1[index] = list1[len(list1)-index-1]\nreverse(my_list)\nprint(my_list)\n\n\n# 2.写一个函数,提取出字符串中所有奇数位上的字符\ndef get_substr(str1: str):\n strstr = ''\n for index in range(len(str1)):\n if index & 1:\n strstr += str1[index]\n return strstr\n\n\nprint(get_substr('abcdefg'))\n\n\n# 3.写一个函数,统计指定列表中指定元素的个数(不用列表自带的count方法)\ndef get_count(list1, item):\n count = 0\n for x in list1:\n if x == item:\n count += 1\n return count\n\n\nprint(get_count([1, 23, 3, 4, 4, 23], 4))\n\n\n# 7.写一个函数,能够将指定字符串中的指定子串替换成指定的其他子串(不能使用字符串的replace方法)\n# 例如: func1('abcdaxyz', 'a', '//') - 返回: '//bcd//xyz'\ndef replace(str1: str, old, new):\n # 先切割\n str_list = str1.split(old)\n\n strstr = str_list[0]\n for index in range(1, len(str_list)):\n strstr += '+'+str_list[index]\n\n return strstr\n\n\nprint(replace('abcabchasjhsabchjshdjfabcsjhdfjhabc123', 'abc', '+'))","repo_name":"MarkRainbows/Python-BaseSkills","sub_path":"Chapter-9/06-homework.py","file_name":"06-homework.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16651876212","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 11 11:51:52 2019\r\n\r\n@author: BSI80086\r\n\"\"\"\r\n\r\nimport streamlit as st\r\nimport pandas as pd \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport plotly.express as px\r\nimport time\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrawdata=pd.read_csv(\"emp_attrition.csv\")\r\ndata = pd.read_csv(\"Employee Attrition.csv\")\r\ndata_numeric= data.drop(['Department','EducationField','Gender','JobRole','MaritalStatus'],axis = 1)\r\ndata_numeric_short=data_numeric.drop(['BusinessTravel','attrition encode','gender encode','marital status encode','Human Resources',\r\n 'Life Sciences','Marketing','Medical','Other','Technical Degree'],axis = 1)\r\ndataset=pd.DataFrame({'Age':[0.0],'JobLevel':[0.0],'MonthlyIncome':[0.0],'NumCompaniesWorked':[0.0],'StockOptionLevel':[0.0],'TotalWorkingYears':[0.0],'Business Travel encode':[0.0]})\r\n\r\ndef main():\r\n st.sidebar.header('Welcome to Attrition Analysis, Visualization and Prediction!')\r\n page = st.sidebar.selectbox(\"What do you want?\", [\"Dataset\",\"Dashboard\",\"Prediction\"], index=0,key=0)\r\n if page == \"Dataset\":\r\n st.image('attrition image.png')\r\n st.title(\"Dataset of Attrition\")\r\n st.header(\"Here you can see both raw dataset and after pre-processing\")\r\n if st.checkbox(\"Showing raw dataset?\"):\r\n show_rawdata=st.slider(\"How much raw data do you want to see?\" , 1,100,5)\r\n st.write(rawdata.sample(show_rawdata))\r\n if st.checkbox(\"Showing dataset after pre-processing?\"):\r\n show_data=st.slider(\"How much data do you want to see?\" , 1,100,5)\r\n st.write(data.sample(show_data))\r\n if st.checkbox(\"I want to check whether our data is balance or not? How is the comparison between resign and not resign employee?\"):\r\n st.write(pd.DataFrame(data['Attrition'].value_counts()))\r\n proportion = data.Attrition.value_counts()[0] / data.Attrition.value_counts()[1]\r\n st.write(\"So, the data still can be tolerated since the proportion is %.2f\" %proportion , \": 1\")\r\n \r\n elif page == \"Dashboard\":\r\n st.image('HR-Analytics.jpg')\r\n st.title(\"Dashboard of Attrition Data\")\r\n st.header(\"Here you can see data visualization and my analysis about each data in order to understand why I did pre-processing like that\")\r\n data_type = st.sidebar.selectbox(\"What type of data you want to visualize and analyze?\", [\"Numerical\",\"Categorical\"])\r\n if data_type == \"Numerical\":\r\n if st.checkbox(\"Wanna know the how I analyze the data and the conclusion from my analysis?\"):\r\n st.write(\"I use these steps to analyze how much each numerical factor affect attrition, : \")\r\n st.write(\"1. I sort the tables based on value from column I want to analyze \")\r\n st.write(\"2. I pick only 10% of the top/bottom data (depend on what column I analyze) or data ranking 1 to 147 or ranking 1323 to 1470 \")\r\n st.write(\"3. I count how many employee who resign from that 10% of data\")\r\n st.write(\"4. I divide number I got from 3rd process with 147 in order to get percentage\")\r\n st.write(\"5. If the percentage I got from 4th process is low, then the correlation between those column and attrition is also low\")\r\n feature = st.selectbox('Choose category : ',data_numeric_short.columns)\r\n analytics(feature)\r\n \r\n \r\n if st.checkbox(\"Wanna see numerical data visualization?\"):\r\n show_columns_x= st.selectbox(\"Select your x axis\" , data_numeric.columns)\r\n show_columns_y= st.selectbox(\"Select your y axis\", data_numeric.columns)\r\n show_color=st.selectbox(\"What data do you want as color?\",data_numeric.columns)\r\n if st.button(\"Let's go!\") :\r\n with st.spinner('Wait a sec'):\r\n time.sleep(2)\r\n st.success('Here it is!')\r\n fig = px.scatter(data,x= show_columns_x, y=show_columns_y, color= show_color, title='Distribution of attrition data')\r\n st.plotly_chart(fig)\r\n \r\n if data_type == \"Categorical\":\r\n subject = st.radio('Choose category : ',['Department','EducationField',\r\n 'Gender','JobRole',\r\n 'MaritalStatus'])\r\n \r\n if st.button('Visualize!'):\r\n with st.spinner('Wait a sec'):\r\n time.sleep(2)\r\n st.success('Here it is!')\r\n viz1(subject)\r\n \r\n \r\n elif page == \"Prediction\":\r\n st.image('prediction.jpg')\r\n st.title(\"Attrition Prediction\")\r\n st.header('Every human capital could use this website to predict whether an employee will resign or not')\r\n \r\n name = st.text_input('Write employee name that you want to predict if they want to resign or not :')\r\n \r\n age=st.slider('Age :',17,61,18)\r\n dataset.Age[0]=age\r\n \r\n joblevel=st.slider ('What is his/her job level?', 1,6,2)\r\n dataset.JobLevel[0]=joblevel\r\n \r\n income = st.slider('How much his/her income?' ,1000,20000,1001)\r\n dataset.MonthlyIncome[0]=income\r\n \r\n numcompaniesworked=st.slider('How many company has he/she been working before?' , 0,10,1)\r\n dataset.NumCompaniesWorked[0]=numcompaniesworked\r\n \r\n stock=st.slider ('How many stock does he/she has in this company?',0,4,1)\r\n dataset.StockOptionLevel[0]=stock\r\n \r\n total_working_years=st.slider('How long he/she has been working? ',0,50,1)\r\n dataset.TotalWorkingYears[0]=total_working_years\r\n \r\n bistrip= st.radio('How often does he/she travel for business reason? ',['Never','Rarely','Often'])\r\n if bistrip=='Never':\r\n dataset['Business Travel encode'][0]=0.0\r\n elif bistrip=='Rarely':\r\n dataset['Business Travel encode'][0]=1.0\r\n else:\r\n dataset['Business Travel encode'][0]=2.0\r\n \r\n x = data[['TotalWorkingYears','Age','MonthlyIncome','JobLevel','StockOptionLevel','NumCompaniesWorked','Business Travel encode']]\r\n y = data['attrition encode'] \r\n x_train, x_test, y_train, y_test= train_test_split(x,y,test_size=0.3,random_state=45) \r\n \r\n st.write('If you do not know what model do you want to choose, I give a link down below that explain each model')\r\n st.write('Learn more about logistic regression : https://towardsdatascience.com/introduction-to-logistic-regression-66248243c148 ')\r\n st.write('Learn more about Naive Bayes Theorem : https://towardsdatascience.com/naive-bayes-in-machine-learning-f49cc8f831b4 ')\r\n st.write('Learn more about Decision Tree : https://towardsdatascience.com/decision-trees-in-machine-learning-641b9c4e8052 ')\r\n st.write('Learn more about Random Forest : https://towardsdatascience.com/understanding-random-forest-58381e0602d2 ')\r\n \r\n st.write('But I suggest you to use logistic regression to predict since it has the highest accuracy compare to decision tree, SVM, naive bayes, even random forest')\r\n predict = st.selectbox(\"But in case you want to check, I made it for you. What model do you want to use?\", ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest'])\r\n if predict=='Logistic Regression':\r\n log_modelling(name)\r\n elif predict=='Naive Bayes':\r\n naivebayes_modelling(name)\r\n elif predict=='Decision Tree':\r\n decisiontree_modelling(name)\r\n elif predict=='Random Forest':\r\n randomforest_modelling(name)\r\n \r\n\r\n### This is the start of function for data analytics\r\ndef analytics(feature):\r\n feature_head=data_numeric_short.sort_values(by = feature ).head(147)\r\n feature_tail=data_numeric_short.sort_values(by = feature ).tail(147)\r\n top_10= (feature_head[feature_head['Attrition'] == 'Yes'].count()[0]/147)*100\r\n bottom_10= (feature_tail[feature_tail['Attrition'] == 'Yes'].count()[0]/147)*100\r\n st.write(\"So there are \" , top_10 , \" % of resign employee in top 10% rank and \" , bottom_10 , \" % of resign employee in bottom 10% rank \")\r\n if (top_10 < 20.0) & (bottom_10 < 20.0) :\r\n st.write(\"It means \" , feature ,\" doesn't really affect people to resign\")\r\n elif (top_10 >= 20.0) :\r\n st.write(\"It means \" , feature ,\" is one of the factor of attrition\")\r\n elif (top_10 < 20.0) & (bottom_10 >=20.0) :\r\n st.write(\"It means \" , feature ,\" is one of the factor of attrition but the correlation is negative\")\r\n \r\n if st.button('Conclusion?'):\r\n st.write('I use only 7 features, they are : TotalWorkingYears,Age,MonthlyIncome,JobLevel,StockOptionLevel,NumCompaniesWorked and Business Travel, to predict the attrition. This decision is taken based on 3 reasons:')\r\n st.write('1. I am afraid to use model that I cannot explain. So instead just use feature importance in decision tree/random forest,I would like to do feature analysis by myself. Check out data analytics part to know how did I do that')\r\n st.write('2. I already validate the importance of these features using machine learning, and these features have a pretty good importance score too!')\r\n st.write('3. These choosen features I used are the things that HC can fill by themselves. Features like satisfaction index has to be filled by the employee')\r\n st.write('If you already have data like satisfaction index, I could add the feature here. Just contact me!')\r\n### This is the end of function for data analytics\r\n \r\n### This is the start of function for categorical data visualization \r\ndef viz1(subject): \r\n if subject=='Department':\r\n subject_x = 'Department_x'\r\n subject_y = 'Department_y'\r\n elif subject=='EducationField':\r\n subject_x = 'EducationField_x'\r\n subject_y = 'EducationField_y'\r\n elif subject=='Gender':\r\n subject_x = 'Gender_x'\r\n subject_y = 'Gender_y'\r\n elif subject=='JobRole':\r\n subject_x = 'JobRole_x'\r\n subject_y = 'JobRole_y'\r\n elif subject=='MaritalStatus':\r\n subject_x = 'MaritalStatus_x'\r\n subject_y = 'MaritalStatus_y'\r\n \r\n resign = pd.DataFrame(data.groupby([subject]).sum()['attrition encode'].reset_index())\r\n sub_all = pd.DataFrame(data[subject].value_counts().reset_index())\r\n\r\n sub_data = pd.merge(resign,sub_all,right_on='index', left_on= subject)\r\n sub_data['Not Resign'] = sub_data[subject_y] - sub_data['attrition encode']\r\n sub_data['Resign Percentage'] = (sub_data['attrition encode']/sub_data[subject_y])*100\r\n sub_data['Not resign percentage'] = (sub_data['Not Resign']/sub_data[subject_y])*100\r\n\r\n \r\n plt.figure(figsize=[15,10])\r\n plt.bar(x=sub_data['index'], height=sub_data['attrition encode'])\r\n plt.bar(x=sub_data['index'], height=(sub_data['Not Resign']),bottom=sub_data['attrition encode'])\r\n \r\n plt.title(\"{} vs Number of Employee who Resign or not\".format(subject))\r\n plt.legend(['Resign','Not Resign'])\r\n plt.xlabel(subject)\r\n plt.ylabel(\"Number of People\")\r\n st.pyplot() \r\n pd.options.display.float_format = '{:,.2f}'.format\r\n st.write(sub_data[[subject_x,subject_y,'Resign Percentage','Not resign percentage']].sort_values('Resign Percentage'))\r\n###This is the end of function for categorical data visualization\r\n\r\n###This is the definition of variable that we're using \r\nx = data[['TotalWorkingYears','Age','MonthlyIncome','JobLevel','StockOptionLevel','NumCompaniesWorked','Business Travel encode']]\r\ny = data['attrition encode'] \r\nx_train, x_test, y_train, y_test= train_test_split(x,y,test_size=0.3,random_state=45) \r\n###This is the end of variable definition \r\n\r\n###This is the start of function for logistic regression\r\ndef log_modelling(name):\r\n model_lr = LogisticRegression()\r\n model_lr.fit(x_train, y_train)\r\n accuracy = model_lr.score(x_test,y_test)\r\n st.write(\"Your employee with name \" , name , \" has \", int(model_lr.predict_proba(dataset)[0][1] *100), \"% chances to resign\")\r\n st.write('This logistic regression model has accuracy :',accuracy*100,'%') \r\n###This is the end of function for logistic regression\r\n \r\n###This is the start of function for naive bayes theorem\r\ndef naivebayes_modelling(name):\r\n model_nb = GaussianNB()\r\n model_nb.fit(x_train, y_train)\r\n accuracy1 = model_nb.score(x_test,y_test)\r\n st.write(\"Your employee with name \" , name , \" has \", int(model_nb.predict_proba(dataset)[0][1] *100), \"% chances to resign\")\r\n st.write('This naive bayes model has accuracy :',accuracy1*100,'%') \r\n###This is the end of function for naive bayes theorem\r\n \r\n###This is the start of function for decision tree \r\ndef decisiontree_modelling(name):\r\n model_dt = DecisionTreeClassifier()\r\n model_dt.fit(x_train, y_train)\r\n accuracy2 = model_dt.score(x_test,y_test)\r\n if model_dt.predict(dataset)[0] == 0:\r\n status = ('will not resign')\r\n else:\r\n status = st.write('will resign')\r\n st.write(\"Your employee with name \" , name , status)\r\n st.write('This decision tree model has accuracy :',accuracy2*100,'%') \r\n st.write(\"Oh yeah and just in case you want to check whether feature I used is also important based on feature importance in decision tree, here you go :\")\r\n featimp = pd.DataFrame(list(model_dt.feature_importances_), columns = ['Importances'])\r\n featcol = pd.DataFrame(list(x_train.columns), columns = ['Parameter'])\r\n featimp = featimp.join(featcol)\r\n featimp = pd.DataFrame(featimp.sort_values(by = ['Importances'], ascending = False))\r\n st.write(\"Feature importances : \\n\" , featimp)\r\n###This is the end of function for decision tree\r\n\r\n###This is the start of function for random forest\r\ndef randomforest_modelling(name):\r\n model_rf = RandomForestClassifier()\r\n model_rf.fit(x_train, y_train)\r\n accuracy3 = model_rf.score(x_test,y_test)\r\n if model_rf.predict(dataset)[0] == 0:\r\n status = ('will not resign')\r\n else:\r\n status = st.write('will resign')\r\n st.write(\"Your employee with name \" , name , status)\r\n st.write('This random forest model has accuracy :',accuracy3*100,'%') \r\n st.write(\"Oh yeah and just in case you want to check whether feature I used is also important based on feature importance in random forest, here you go :\")\r\n \r\n feat_importances = pd.Series(model_rf.feature_importances_, index=x.columns)\r\n feat_importances = feat_importances.nlargest(20)\r\n feat_importances.plot(kind='barh')\r\n st.pyplot()\r\n###This is the end of function for random forest\r\n \r\nmain()","repo_name":"zakkipuar23/human_resource_attrition_analytics","sub_path":"Attrition_Spyder.py","file_name":"Attrition_Spyder.py","file_ext":"py","file_size_in_byte":15205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41246505790","text":"# This problem was recently asked by Facebook:\n\n# Given a number n, find the least number of squares needed to sum up to the number.\n\n\ndef square_sum(n):\n # Fill this in.\n if n <= 3:\n return n\n\n res = n\n\n for x in range(1, n + 1):\n temp = x * x\n if temp > n:\n break\n else:\n res = min(res, 1 + square_sum(n - temp))\n\n return res\n\n\nprint(square_sum(13))\n# Min sum is 3^2 + 2^2\n# 2\n","repo_name":"vishrutkmr7/DailyPracticeProblemsDIP","sub_path":"2020/01 January/dp01012020.py","file_name":"dp01012020.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"7679964869","text":"from os import environ\nimport time\nimport asyncio\nfrom functools import partial\nfrom autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner\nimport json\nimport argparse\n\nPATH = '/vagrant/etc/tunfish/'\nCERTPATH = '/vagrant/certs/'\n\n\nclass Component(ApplicationSession):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.data = None\n\n async def onJoin(self, details):\n\n def got(started, msg, ff):\n res = ff.result()\n duration = 1000. * (time.process_time() - started)\n print(\"{}: {} in {}\".format(msg, res, duration))\n\n if msg == \"ADD NETWORK\":\n print(f\"Network added... done.\")\n\n if msg == \"ADD GATEWAY\":\n print(f\"Gateway added... done.\")\n\n if msg == \"ADD CLIENT\":\n print(f\"Client added... done.\")\n\n print(f\"extra: {self.config.extra['v1']}\")\n args = json.loads(self.config.extra['v1'])\n\n if args['addNetwork'] is not None:\n print(f\"self.data.addNetwork: {args['addNetwork']}\")\n d = {}\n for i in args['addNetwork']:\n k = i.split('=', 1)[0]\n v = i.split('=', 1)[1]\n d[k] = v\n print(f\"DICT: {d}\")\n\n t1 = time.process_time()\n task = self.call(u'com.portier.add_network', d)\n task.add_done_callback(partial(got, t1, \"ADD NETWORK\"))\n await asyncio.gather(task)\n\n if args['addGW'] is not None:\n print(f\"self.data.addGW: {args['addGW']}\")\n d = {}\n for i in args['addGW']:\n k = i.split('=', 1)[0]\n v = i.split('=', 1)[1]\n d[k] = v\n print(f\"DICT: {d}\")\n\n t1 = time.process_time()\n task = self.call(u'com.portier.add_gateway', d)\n task.add_done_callback(partial(got, t1, \"ADD GATEWAY\"))\n await asyncio.gather(task)\n\n if args['addClient'] is not None:\n print(f\"self.data.addClient: {args['addClient']}\")\n d = {}\n for i in args['addClient']:\n k = i.split('=', 1)[0]\n v = i.split('=', 1)[1]\n d[k] = v\n print(f\"DICT: {d}\")\n t1 = time.process_time()\n task = self.call(u'com.portier.add_client', d)\n task.add_done_callback(partial(got, t1, \"ADD CLIENT\"))\n await asyncio.gather(task)\n\n self.leave()\n\n def onDisconnect(self):\n # delete interface\n # reconnect\n asyncio.get_event_loop().stop()\n\n\nclass TunfishControl:\n\n controldata = None\n\n def tf_parse(self, clargs):\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--addNetwork\",\n metavar=\"KEY=VALUE\",\n nargs=\"+\",\n help=\"add new network\"\n \"name=unique name for the network (required)\"\n \"(not yet implemented) services=service1,service2,... \\\n which service should be available at the network \")\n\n parser.add_argument(\"--addGW\",\n metavar=\"KEY=VALUE\",\n nargs=\"+\",\n help=\"add new gateway\"\n \"name=unique name for the gateway (required)\"\n \"ip=publicIP (required)\"\n \"network=network to which the gateway should be added\")\n\n parser.add_argument(\"--addClient\",\n metavar=\"KEY=VALUE\",\n nargs=\"+\",\n help=\"add new client\"\n \"name=unique name for the client (required)\"\n \"network=network to which the client should be added (required)\")\n\n print(f\"1\")\n args = parser.parse_args(args=clargs[1:])\n print(f\"2\")\n # if args.addGW:\n # print(f\"add Gateway... {args}\")\n # return args.addGW\n print(f\"ARGS: {args}\")\n print(f\"ARGS.__dict__: {args.__dict__}\")\n return json.dumps(args.__dict__)\n\n def start(self, conf):\n\n print(f\"ARGS: {conf}\")\n data = self.tf_parse(conf)\n import six\n import ssl\n\n with open(PATH + 'tf-ctl' + '.json', 'r') as f:\n self.controldata = json.load(f)\n\n cf = CERTPATH + self.controldata['cf']\n kf = CERTPATH + self.controldata['kf']\n caf = CERTPATH + self.controldata['caf']\n\n client_ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n client_ctx.verify_mode = ssl.CERT_REQUIRED\n client_ctx.options |= ssl.OP_SINGLE_ECDH_USE\n client_ctx.options |= ssl.OP_NO_COMPRESSION\n client_ctx.load_cert_chain(certfile=cf, keyfile=kf)\n client_ctx.load_verify_locations(cafile=caf)\n client_ctx.set_ciphers('ECDH+AESGCM')\n\n # url = environ.get(\"AUTOBAHN_DEMO_ROUTER\", u\"wss://127.0.0.1:8080/ws\")\n url = environ.get(\"AUTOBAHN_DEMO_ROUTER\", u\"wss://172.16.42.2:8080/ws\")\n print(f\"URL: {url}\")\n if six.PY2 and type(url) == six.binary_type:\n url = url.decode('utf8')\n realm = u\"tf_cb_router\"\n runner = ApplicationRunner(url, realm, ssl=client_ctx, extra={'v1': data})\n runner.run(Component)\n","repo_name":"tunfish/tunfish-sandbox","sub_path":"src/tunfish/tfctl.py","file_name":"tfctl.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"}