diff --git "a/1737.jsonl" "b/1737.jsonl" new file mode 100644--- /dev/null +++ "b/1737.jsonl" @@ -0,0 +1,35 @@ +{"seq_id":"39243118844","text":"#!/usr/bin/python\n#\n# Use the script after program modifications to check for regressions\n\nimport sys\nimport os\nimport numpy as np\n\n# Make sure we import the right code \npwd = os.getcwd()\ncardoonPath = pwd[:pwd.rfind('test')]\nsys.path.insert(0, cardoonPath)\nimport cardoon.simulator as cs\nimport cardoon.circuit as cir\n\nflag = False\n# Check arguments\nif len(sys.argv) >= 2:\n if sys.argv[1] == '-g':\n if len(sys.argv) == 2:\n print('\\nUsage: run_tests.py [-g] ')\n else:\n # Re-generate output data\n netlists = sys.argv[2:]\n flag = True\n print('\\n=======================================================')\n print(' Re-generating reference results.')\n print(' *Existing results are being erased*')\n print('\\n=======================================================')\n else:\n if sys.argv[1] == '*.net':\n # argument was not expanded (we are probably on Windows)\n files = os.listdir(os.curdir)\n netlists = [f for f in files if f[-4:]=='.net']\n else:\n netlists = sys.argv[1:]\nelse:\n print('\\nNo netlist specified, exiting ...')\n exit(-1)\n\nfor net in netlists:\n circuit = cir.get_mainckt()\n analysisQueue = cs.parse_net(net, circuit)\n cs.run_analyses(analysisQueue)\n basename = net.split('.net')[0]\n if flag:\n # Move output data to reference file names\n if circuit.saveReqList:\n for outreq in circuit.saveReqList:\n oldname = basename + '_' + outreq.type + '.npz'\n newname = basename + '_' + outreq.type + '_ref.npz'\n try:\n os.rename(oldname, newname)\n except OSError:\n pass\n else:\n warningflag = False\n # Look for *_ref.npz files and compare with corrensponding *.npz\n files = [name for name in os.listdir('.') \n if (name.find(basename) == 0) and (name.find('_ref.npz') > 0)]\n residual = 0.\n if not files:\n print('\\nFatal: no reference files for {0}'.format(net))\n print('\\n*** Test Failed! ***')\n exit(-1)\n for reffile in files:\n delta = 0.\n # Look for corresponding output file\n outfile = reffile.replace('_ref','')\n try:\n refResult = np.load(reffile)\n except IOError:\n print('\\nProblem reading reference file: {0}'.format(reffile))\n print('\\n*** Test Failed! ***')\n exit(-1)\n try:\n result = np.load(outfile)\n except IOError:\n print('\\nProblem reading output file: {0}'.format(outfile))\n print('\\n*** Test Failed! ***')\n exit(-1)\n\n for var in refResult.files:\n try:\n delta = refResult[var] - result[var]\n except KeyError:\n print('\\nError: \"{0}\" not found in {1}'.format(\n var, outfile))\n print('\\n*** Test Failed! ***')\n exit(-1)\n res = np.average(abs(delta))\n residual += res\n if not np.all(abs(delta) < (abs(cs.glVar.reltol * \n refResult[var])\n + cs.glVar.abstol)):\n message = \"\"\"\nReference file: {0}\nVariable: {1}\nResidual: {2}\"\"\".format(reffile, var, res)\n if res < 1e-3:\n print('\\n=============================================')\n message = \"Warning: \\n\" + message\n print(message)\n print('=============================================')\n warningflag = True\n else:\n message = \"\\nTest failed: \\n\" + message\n raise Exception(message)\n print('\\n=======================================================')\n print(' Success: {0}'.format(net))\n print(' Sum of residuals: {0}'.format(residual))\n print('=======================================================\\n')\n\n # Reset everything\n cs.reset_all()\n\nif not flag:\n if warningflag:\n print('\\n============================================================')\n print(' All tests completed (with Warnings)')\n print('============================================================\\n')\n else:\n print('\\n============================================================')\n print(' All tests succesfully completed!')\n print('============================================================\\n')\n\n\n","repo_name":"cechrist/cardoon","sub_path":"test/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"12"} +{"seq_id":"27783536008","text":"print('\"There are 10 types of people in this world; those who don\\'t understand binary, those who do, and those that understand a base three joke.\"')\n\ndef tertiar(zahl):\n ganz = zahl // 3\n rest = zahl % 3\n if rest == 0:\n zeichen = '0'\n elif rest == 2:\n zeichen = '2'\n else:\n zeichen = '1'\n \n if ganz == 0:\n return zeichen\n else:\n return tertiar(ganz) + zeichen\n \n\nk=3 #Anzahl Disks 3:26 4:80\nm=3**k\nfor n in range(1,m):\n print (tertiar(n))\n \n \n# https://www.youtube.com/watch?v=bdMfjfT0lKk ","repo_name":"Xilefix/CMDShaker","sub_path":"Hanoi_Constrained_Tertary/constrainedHanoi.py","file_name":"constrainedHanoi.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"25705360455","text":"import utils.gflags as gflags\nfrom utils.logger import GetLog\nfrom utils.config import Config, WindowTopModeFlag\n\nimport re\nimport os\nimport time\nimport asyncio\nimport threading\nimport win32pipe # 管道相关\nimport win32file\nimport tkinter as tk\n\nLog = GetLog()\n\n# ======================= 参数解析 ===================================\n\nFlags = gflags.FLAGS\n# 设置\ngflags.DEFINE_integer('language', -1, '更改识别语言。传入序号(从0开始),切换为设置页中对应序号的语言。')\ngflags.DEFINE_integer('window_top_mode', -1, '窗口置顶模式。0为静默模式,1为自动弹出。')\ngflags.DEFINE_string('output_file_path', '', '指定输出文件的目录(文件夹)。')\ngflags.DEFINE_string('output_file_name', '', '指定输出文件的文件名(不含后缀)。')\n# 指令\ngflags.DEFINE_bool('hide', False, 'true时隐藏窗口,最小化到托盘。')\ngflags.DEFINE_bool('show', False, 'true时将主窗口弹出到最前方。')\ngflags.DEFINE_bool('exit', False, 'true时退出Umi-OCR。')\n# 任务\ngflags.DEFINE_bool('clipboard', False, 'true时读取一次剪贴板进行识图。')\ngflags.DEFINE_bool('screenshot', False, 'true时进行一次截屏识图。')\ngflags.DEFINE_string('img', '', '传入本地图片路径。含空格的路径用引号\"\"括起来。多个路径可用逗号,连接。')\n\n\nDictDefault = Flags.FlagValuesDict() # 生成默认值字典\n\n\ndef Parse(args): # 解析参数。传入参数列表,返回解析后的字典。\n try:\n Flags.Reset() # 清空旧参数\n Flags(args) # 解析参数\n f = Flags.FlagValuesDict() # 转字典\n if f['img']: # 处理图片地址\n if ',' in f['img']: # 多个地址切割\n f['img'] = f['img'].split(',')\n else: # 单个地址包装\n f['img'] = [f['img']]\n return f\n except Exception as e:\n return {'error': f'命令行参数解析异常。\\n参数:{args}\\n错误:{e}', **DictDefault}\n\n\ndef Mission(flags):\n '''执行任务。传入参数字典'''\n\n # 设置&指令\n if flags['exit']: # 退出\n Config.main.win.event_generate('<>')\n return\n if flags['show']: # 显示主窗\n Config.main.gotoTop(isForce=True)\n elif flags['hide']: # 隐藏主窗\n # 若有托盘且启用了最小化到托盘\n if Config.get('isTray') and Config.get('isBackground'):\n Config.main.onCloseWin() # 关闭窗口\n else: # 若没有,则\n Config.main.win.iconify() # 最小化\n if flags['language'] > -1: # 切换语言\n lans, index = list(Config.get(\"ocrConfig\").keys()), flags['language']\n if(index < len(lans)):\n Config.set(\"ocrConfigName\", lans[index])\n if flags['window_top_mode'] == 0: # 窗口弹出模式\n Config.set(\"WindowTopMode\", WindowTopModeFlag.never)\n elif flags['window_top_mode'] == 1:\n Config.set(\"WindowTopMode\", WindowTopModeFlag.finish)\n if flags['output_file_path']: # 输出文件目录\n Config.set(\"outputFilePath\", flags['output_file_path'])\n if flags['output_file_name']: # 输出文件文件名前缀\n Config.set(\"outputFileName\", flags['output_file_name'])\n\n # 任务\n if not Config.main.isMsnReady():\n tk.messagebox.showerror(\n '遇到了一点小问题', '当前已有任务进行。')\n return\n if flags['img']:\n Config.main.clearTable() # 清空表格\n Config.main.addImagesList(flags['img']) # 导入路径\n Config.main.run() # 开始运行\n elif flags['clipboard']:\n Config.main.runClipboard()\n elif flags['screenshot']:\n Config.main.openScreenshot()\n\n\ndef ParseStr(strin): # 解析参数。传入参数字符串,直接执行。\n # 匹配所有双引号内的内容\n pattern = r'\"[^\"]*\"'\n matches = re.findall(pattern, strin)\n # 将匹配到的内容替换为特殊标记\n for i, match in enumerate(matches):\n strin = strin.replace(match, f'__QUOTE_MARK_{i}__')\n # 按照空格进行分割\n result = strin.split()\n # 将特殊标记替换回双引号内的内容\n for i, match in enumerate(matches):\n result = [\n x.replace(f'__QUOTE_MARK_{i}__', match[1:-1]) for x in result]\n args = ['', *result] # 第一位为空\n flags = Parse(args)\n if 'error' in flags:\n tk.messagebox.showerror(\n '遇到了一点小问题', flags['error'])\n return\n Mission(flags)\n\n# ======================= 监听指令 ===================================\n\n\npipeName = r'\\\\.\\pipe\\umiocr' # 命名管道\npipeBufferSize = 65535 # 管道缓冲区大小\n\n\nclass Listener:\n\n def __init__(self):\n # 启动外部命令监听线程\n def runLoop(): # 启动事件循环\n asyncio.set_event_loop(self.__runMissionLoop)\n self.__runMissionLoop.run_forever()\n\n # 在当前线程下创建事件循环\n self.__runMissionLoop = asyncio.new_event_loop()\n # 开启新的线程,在新线程中启动事件循环\n threading.Thread(target=runLoop).start()\n # 在新线程中事件循环不断游走执行\n asyncio.run_coroutine_threadsafe(\n self.__listener(), self.__runMissionLoop)\n\n async def __listener(self): # 监听器\n # 检查命名管道是否已存在\n if os.path.exists(pipeName):\n Log.error(f'命名管道{pipeName}已存在!')\n return\n # 设置命名管道\n pipe = win32pipe.CreateNamedPipe(\n pipeName, # 管道名称\n win32pipe.PIPE_ACCESS_DUPLEX, # 打开模式:可读可写\n # 管道模式:报文、等待时挂起线程\n win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE | win32pipe.PIPE_WAIT,\n win32pipe.PIPE_UNLIMITED_INSTANCES, # 最大实例数,无限制\n pipeBufferSize, # 输出缓冲区大小\n pipeBufferSize, # 输入缓冲区大小\n 0, # 默认超时时间\n None # 安全属性\n )\n\n # 等待程序初始化完成\n while not Config.isInit():\n time.sleep(0.1)\n\n while True:\n try:\n # 连接命名管道\n Log.info(f\"命名管道{pipeName}等待连接\")\n win32pipe.ConnectNamedPipe(pipe, None)\n\n # 循环监听管道传来的消息\n while True:\n try:\n # 读取命名管道数据\n indata = win32file.ReadFile(\n pipe, pipeBufferSize)\n print(f\"==============读取数据:\\n{indata}\")\n data = indata[1].decode()\n ParseStr(data) # 分析并执行\n except Exception as e:\n print(f\"读取数据出错:{e}\")\n break\n finally: # 某个客户端断开连接\n try:\n win32pipe.DisconnectNamedPipe(pipe) # 关闭管道,下一次重开\n except:\n pass\n\n\nListener() # 启动监听\n\n# echo hello > \\\\.\\pipe\\umiocr\n","repo_name":"hiroi-sora/Umi-OCR","sub_path":"utils/command_arg.py","file_name":"command_arg.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"zh","doc_type":"code","stars":12403,"dataset":"github-code","pt":"12"} +{"seq_id":"16962120535","text":"import requests\nimport json\n\n#Open the file with all the urls\nfile = open('input.txt','r') \n\n#Get a list of all the urls in the file\nurls = file.read().splitlines()\n\n#Close the file \nfile.close()\n\n#Open or create the file to save the data\nfile = open('output.txt', 'w')\n\n#Loop through the urls, saving the data to the file each time\nfor url in urls: \n response = requests.get(url)\n file.write(json.dumps(response.json()) + \"\\n\")\n\n#Close the file \nfile.close()","repo_name":"rpennin/URL-File-Requests","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73773177941","text":"import bisect\n\nclass TimeMap:\n\n def __init__(self):\n self.time_val = dict[int, str]()\n self.data = dict[str, list[int]]()\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n if key not in self.data:\n self.data[key] = list[int]()\n self.data[key].append(timestamp)\n self.time_val[timestamp] = value\n\n def get(self, key: str, timestamp: int) -> str:\n if key not in self.data:\n return \"\"\n time_list = self.data[key]\n idx = bisect.bisect_right(time_list, timestamp) - 1\n if idx == -1:\n return \"\"\n else:\n return self.time_val[time_list[idx]]\n\n\nd = [1,4,8,12,9]\nr = bisect.bisect_right(d, 7)\nprint(r)","repo_name":"huangweijing/weo_leetcode","sub_path":"981_Time_Based_Key-Value_Store.py","file_name":"981_Time_Based_Key-Value_Store.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"37139186456","text":"'''\nLiam Adams - lbadams2\n'''\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\nfrom qiskit import Aer, execute\nfrom qiskit.tools.visualization import plot_histogram\nfrom math import pi\nimport matplotlib.pyplot as plt\n\n\n# 1/2 in exponent rotates pi/2, 1/4 in exponent rotates pi/4\ndef fredkin(qc, control, targets):\n qc.rz(pi/4, control[0])\n \n qc.cx(targets[1], targets[0])\n qc.ry(-pi/2, targets[1])\n \n qc.rz(pi/4, targets[0])\n qc.rz(pi/4, targets[1])\n\n qc.cx(control[0], targets[0])\n qc.cx(targets[0], targets[1])\n\n qc.rz(-pi/4, targets[0])\n qc.rz(pi/4, targets[1])\n\n qc.cx(control[0], targets[0])\n qc.cx(targets[0], targets[1])\n qc.cx(control[0], targets[0])\n\n qc.rz(-pi/4, targets[1])\n qc.cx(targets[0], targets[1])\n\n qc.rx(pi/2, targets[0])\n qc.rz(-pi/4, targets[1])\n\n qc.cx(control[0], targets[0])\n qc.cx(targets[0], targets[1])\n\n qc.rz(pi/2, targets[1])\n qc.rx(pi/2, targets[0])\n qc.rx(-pi/2, targets[1])\n\n return qc\n \n\ndef create_circuit(control, targets, classical, vals):\n qc = QuantumCircuit(control,targets,classical)\n if vals[0]: # control val\n qc.x(control[0])\n if vals[1]: # target 0 val\n qc.x(targets[0])\n if vals[2]: # target 1 val\n qc.x(targets[1])\n\n qc = fredkin(qc, control, targets)\n qc.measure(control[0],classical[0])\n qc.measure(targets[0],classical[1])\n qc.measure(targets[1],classical[2])\n return qc\n\n\ndef create_circuits():\n control = QuantumRegister(1) # input\n targets = QuantumRegister(2) # output\n c = ClassicalRegister(3) \n backend = Aer.get_backend('qasm_simulator')\n\n first_qc = None\n for i in range(8):\n bin_string = bin(i)[2:].zfill(3)\n vals = [int(b) for b in bin_string]\n qc = create_circuit(control, targets, c, vals)\n if i == 0:\n first_qc = qc\n job = execute(qc, backend, shots=100)\n result = job.result()\n print(result.get_counts())\n #plot_histogram(result.get_counts())\n\n first_qc.draw(output='mpl')\n plt.show() # big number in qubit figure is \"version\"\n\n\nif __name__ == '__main__':\n create_circuits() ","repo_name":"lbadams2/csc_qc","sub_path":"hw3/fredkin.py","file_name":"fredkin.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30122482849","text":"import numpy as np\r\nimport random\r\nimport copy\r\nimport sys\r\nfrom pathlib import Path\r\nsys.path.append(str(Path(__file__).resolve().parent))\r\nimport Parameters.GA as GA\r\nfrom Plot import plot\r\n\r\nfbest = -1*sys.maxsize\r\nwbest = []\r\n\r\nu_lim = GA.u_lim\r\nl_lim = GA.l_lim\r\n\r\nalpha = GA.alpha\r\nP = GA.parents\r\nC = GA.children\r\nL = GA.elites\r\nG = GA.G\r\n\r\nclass GA:\r\n\tdef __init__(self,func,N,**kwargs):\r\n\t\tself.game = func\r\n\t\tself.R = 1/N\r\n\t\tself.w_list = [[random.uniform(l_lim/10,u_lim/10) for i in range(N)] for j in range(C)]\r\n\t\tself.save = kwargs['save']\r\n\r\n\tclass Parent:\r\n\t\tdef __init__(self, w, f):\r\n\t\t\tself.w = w\r\n\t\t\tself.f = f\r\n\r\n\tdef _mutation(self,r_list):\r\n\t\tfor n in range(len(r_list)):\r\n\t\t\tif self.R > random.random(): r_list[n] = random.uniform(l_lim,u_lim)\r\n\t\treturn r_list\r\n\r\n\tdef elite_judge(self,p_list,l_list):\r\n\t\tr_list = copy.copy(l_list)\r\n\t\tfor p in p_list:\r\n\t\t\tr_list = sorted(r_list,key=lambda r:r.f,reverse=True)\r\n\t\t\tif r_list[L-1].f < p.f: r_list[L-1] = p\r\n\t\treturn r_list\r\n\r\n\tdef blx_alpha(self,j_list):\r\n\t\tr_list = random.sample(j_list,2)\r\n\t\tA = copy.copy(r_list[0].w)\r\n\t\tB = copy.copy(r_list[1].w)\r\n\t\tC = []\r\n\t\tfor a,b in zip(A,B):\r\n\t\t\tL = abs(a-b)\r\n\t\t\tLa = L*alpha\r\n\t\t\tlower = l_lim if b-La < l_lim else b-La\r\n\t\t\tupper = u_lim if a+La > u_lim else a+La\r\n\t\t\tC.append(random.uniform(lower,upper))\r\n\t\treturn self._mutation(C)\r\n\t\t\t\r\n\tdef train(self):\r\n\t\tglobal fbest,wbest\r\n\t\tp_list,f_list = [],[]\r\n\t\tl_list = [self.Parent([],-1*sys.maxsize) for i in range(L)]\r\n\r\n\t\tfor i in range(G):\r\n\t\t\tfor w in self.w_list:\r\n\t\t\t\tp = self.game(w)\r\n\t\t\t\tif p[1] > fbest:\r\n\t\t\t\t\tfbest = p[1]\r\n\t\t\t\t\twbest = p[0]\r\n\t\t\t\tp_list.append(self.Parent(p[0],p[1]))\r\n\t\t\t\tf_list.append(p[1])\r\n\r\n\t\t\t# 評価値の高い順にソート\r\n\t\t\tp_list = sorted(p_list,key=lambda p: p.f,reverse=True)\r\n\t\t\t\r\n\t\t\t# エリート更新\r\n\t\t\tl_list = self.elite_judge(p_list,l_list)\r\n\t\t\tprint(f\"{i+1}: \",max(f_list))\r\n\r\n\t\t\t# 淘汰\r\n\t\t\tdel p_list[P:]\r\n\r\n\t\t\t# エリートと親のリスト\r\n\t\t\tjoin_list = p_list + l_list\r\n\r\n\t\t\t# 新たな子のリストを作成\r\n\t\t\tself.w_list.clear()\r\n\t\t\tfor _ in range(C):\r\n\t\t\t\tc = self.blx_alpha(join_list)\r\n\t\t\t\tself.w_list.append(c)\r\n\t\t\t\r\n\t\t\tp_list.clear()\r\n\r\n\t\t\tif i == int(G/10) -1: self.save(wbest,i+1)\r\n\t\t\tif i == int(G/2) -1: self.save(wbest,i+1)\r\n\t\t\t\r\n\t\tprint(fbest)\r\n\t\t#plot(f_list)\r\n\t\treturn wbest\r\n\r\n\r\n","repo_name":"yutarou-1204/LabTask","sub_path":"Algorithm/GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35292961527","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTitle: COMP9417, 2019S2 \nName: z5141730 Bohan Zhao, z5212483 Kunxing Zhang \nTopic: KDD99 Dataset - Create a Classifier for Intrusion Detection\n!! For more details you can look at notbook and report !!\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.utils import get_file\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\ntry:\n path = get_file('kddcup.data_10_percent.gz', origin = 'http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz')\nexcept:\n print('Error downloading')\n raise\n\n# Download from: http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html\ndf = pd.read_csv(path, header = None)\n\nprint(\"This file datasets have {} rows.\\n\".format(len(df)))\n\ndf.dropna(inplace = True,axis = 1)\n\ndf.columns = [\n 'duration',\n 'protocol_type',\n 'service',\n 'flag',\n 'src_bytes',\n 'dst_bytes',\n 'land',\n 'wrong_fragment',\n 'urgent',\n 'hot',\n 'num_failed_logins',\n 'logged_in',\n 'num_compromised',\n 'root_shell',\n 'su_attempted',\n 'num_root',\n 'num_file_creations',\n 'num_shells',\n 'num_access_files',\n 'num_outbound_cmds',\n 'is_host_login',\n 'is_guest_login',\n 'count',\n 'srv_count',\n 'serror_rate',\n 'srv_serror_rate',\n 'rerror_rate',\n 'srv_rerror_rate',\n 'same_srv_rate',\n 'diff_srv_rate',\n 'srv_diff_host_rate',\n 'dst_host_count',\n 'dst_host_srv_count',\n 'dst_host_same_srv_rate',\n 'dst_host_diff_srv_rate',\n 'dst_host_same_src_port_rate',\n 'dst_host_srv_diff_host_rate',\n 'dst_host_serror_rate',\n 'dst_host_srv_serror_rate',\n 'dst_host_rerror_rate',\n 'dst_host_srv_rerror_rate',\n 'outcome'\n]\n\ndf.head()\n\nnumberic_feature = ['duration', 'src_bytes', 'dst_bytes', 'wrong_fragment', \n 'urgent', 'hot', 'num_failed_logins', 'num_compromised',\n 'root_shell', 'su_attempted', 'num_root', 'num_file_creations',\n 'num_shells', 'num_access_files', 'num_outbound_cmds',\n 'count', 'srv_count', 'serror_rate', 'srv_serror_rate',\n 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate',\n 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',\n 'dst_host_srv_count', 'dst_host_same_srv_rate',\n 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',\n 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',\n 'dst_host_srv_serror_rate', 'dst_host_rerror_rate',\n 'dst_host_srv_rerror_rate']\n\ncategirical_features = ['protocol_type', 'service', 'flag', 'land', 'logged_in',\n 'is_host_login', 'is_guest_login']\n\nfor f in numberic_feature:\n df[f] = (df[f] - df[f].mean()) / df[f].std()\n\nfor f in categirical_features:\n dummies = pd.get_dummies(df[f])\n \n for x in dummies.columns:\n df[f\"{f}-{x}\"] = dummies[x]\n df.drop(f, axis = 1, inplace = True)\n\ndf.dropna(inplace = True, axis = 1)\n\ndf.head()\n\nx_columns = df.columns.drop('outcome')\nx = df[x_columns].values\ndummies = pd.get_dummies(df['outcome']) # Classification\noutcomes = dummies.columns\nnum_classes = len(outcomes)\ny = dummies.values\n\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size = 0.25, random_state = 42)\n\n# DNN\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nprint(\"DNN:\")\nmodel = Sequential()\nmodel.add(Dense(10, input_dim = x.shape[1], kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(Dense(50, input_dim = x.shape[1], kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(Dense(10, input_dim = x.shape[1], kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(Dense(1, kernel_initializer = 'normal'))\nmodel.add(Dense(y.shape[1],activation = 'softmax'))\nmodel.compile(loss = 'categorical_crossentropy', optimizer = 'adam')\nmonitor = EarlyStopping(monitor = 'val_loss', min_delta = 1e-3, patience = 5, verbose = 1, mode = 'auto')\nmodel.fit(x_train, y_train, validation_data = (x_test, y_test), callbacks = [monitor], verbose = 2, epochs = 100)\n\npred = model.predict(x_test)\npred = np.argmax(pred,axis = 1)\ny_eval = np.argmax(y_test,axis = 1)\nscore = metrics.accuracy_score(y_eval, pred)\nprint(\"Accuracy: {}\\n\".format(score))\n\n# Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\n\nprint(\"Random Forest:\")\nrfc = RandomForestClassifier(n_estimators = 100, random_state = 90)\nrfc.fit(x_train,y_train)\nscore = rfc.score(x_test,y_test)\nprint(\"Accuracy: %s\\n\"%score)\n\n# k-NN\nfrom sklearn.neighbors import KNeighborsClassifier\n\nprint(\"k-NN:\")\nknn = KNeighborsClassifier()\nknn.fit(x_train,y_train)\nscore = knn.score(x_test,y_test)\nprint(\"Accuracy: %s\\n\"%score)","repo_name":"Leooook/AI_Kdd99","sub_path":"kdd99.py","file_name":"kdd99.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"41160523946","text":"import numpy as np\nfrom pathlib import Path\nfrom PIL import Image\n\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass Maskset(Dataset):\n def __init__(self, mask_root, transform, shuffle=True):\n super().__init__()\n self.transform = transform\n root = Path(mask_root)\n self.paths = np.array([path.as_posix() for path in root.iterdir()])\n if shuffle:\n np.random.shuffle(self.paths)\n\n def _mask(self, path):\n with Image.open(path) as mask:\n mask = self.transform(mask.convert('RGB'))\n return mask\n\n def __getitem__(self, index):\n paths = self.paths[index]\n if isinstance(index, slice):\n n_mask = len(self)\n # masks = (self._mask(path) for path in paths)\n masks = [self._mask(path) for path in paths]\n else:\n masks = self._mask(paths)\n return masks\n\n def __len__(self):\n return len(self.paths)\n\n\ndef mask_iter(mask_root, fine_size):\n transform = transforms.Compose([transforms.Resize((fine_size, fine_size)),\n transforms.ToTensor()])\n return Maskset(mask_root, transform)\n","repo_name":"SanstyleLab/pytorch-book","sub_path":"apps/tools/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"4580260352","text":"from http.server import HTTPServer,BaseHTTPRequestHandler\nhostname = \"0.0.0.0\"\nportnumber = 8080\npublish_this = \" \"\n\n\nclass servername (BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n print (self.path)\n global publish_this\n \n publish_this += \"
\"+self.path\n publish_this + publish_this.replace(\"favicon.ico\",\" \")\n publish_this + publish_this.replace(\"/\",\" \")\n publish_this + publish_this.replace(\"%20\",\" \")\n self.wfile.write(bytes(publish_this ,\"utf-8\"))\n\nif __name__==\"__main__\":\n webserver = HTTPServer ((hostname,portnumber),servername)\n print(\"Yes, you have the webserver running as http://%s:%s\" %(hostname,portnumber))\n\n try: webserver.serve_forever()\n\n except KeyboardInterrupt:\n pass\nwebserver.server_close()\nprint(\"Hey, looks like you stopped the server from running. Nice try\")\n","repo_name":"oluteo/Personal-Projects","sub_path":"python projects/classweb.py","file_name":"classweb.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"45998268538","text":"#!/usr/bin/python3\n\"\"\"\nThis module defines the Student class\n\"\"\"\n\n\nclass Student:\n \"\"\"\n Defines Student class\n\n Args:\n\n first_name (str): first name of the student\n last_name (str): last name of the student\n age (int): age of the student\n \"\"\"\n\n def __init__(self, first_name, last_name, age):\n \"\"\"Initialize class\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"\n Retrieves a dictionary representation of a Student instance\n\n Args:\n attrs (list of strings)\n \"\"\"\n if attrs is None:\n return self.__dict__\n\n new_dict = {}\n\n if type(attrs) is not list:\n raise TypeError(\"attrs is not a list\")\n else:\n for i in attrs:\n if i in self.__dict__:\n new_dict[i] = self.__dict__[i]\n\n return new_dict\n","repo_name":"jyuly12/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36612610878","text":"import numpy\nimport matplotlib\nimport math\n\nportion_down_payment = 0.25\nrate_of_return = 0.04\nmonthly_rate_of_return = rate_of_return / 12\ntotal_cost = 1000000\ndown_payment = total_cost * portion_down_payment\nsemi_annual_raise = 0.07\nmonths = 36\ncurrent_savings = 0.0\n\n# Error margin\nepsilon = 100\n\n# Data supplied by the user\nbase_annual_salary = float(input(\"Enter your annual salary: \"))\nmonthly_salary = base_annual_salary / 12\n\n# Initialising Bisection Search variables\ninitial_high = 10000\nhigh = initial_high\nlow = 0\nguess = (high + low) // 2\nsteps = 0\n\n# Bisection Search Algorithm\nwhile abs(current_savings - down_payment) > epsilon:\n steps += 1\n current_savings = 0.0\n annual_salary = base_annual_salary\n monthly_deposit = monthly_salary * (guess/10000)\n for month in range(1, months + 1):\n current_savings *= 1 + monthly_rate_of_return\n current_savings += monthly_deposit\n if month % 6 == 0:\n annual_salary *= (1+semi_annual_raise)\n monthly_salary = annual_salary/12\n monthly_deposit = monthly_salary*(guess/10000)\n prev_guess = guess\n if current_savings > down_payment:\n high = guess\n else:\n low = guess\n guess = (high + low)//2\n # Check if outside the search space, therefore break infinite loop\n if prev_guess == guess:\n break\n\n# Checking if possible to obtain downpayment in 3 years\nif prev_guess == guess and guess == initial_high:\n print(\"It is not possible to pay the down payment in three years.\")\nelse:\n print(\"Best savings rate: {}\".format(guess / 10000))\n print(\"Steps in bisection search: {}\".format(steps))","repo_name":"amaankahmad/MIT6.0001Python","sub_path":"PS1 - House Finance Calculator/ps1codeC.py","file_name":"ps1codeC.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"39482152417","text":"from sys import stdin\ninput = stdin.readline\n\ndef compute(n: int, m: int, relations: list[tuple[int]]) -> list[int]:\n rel_map = { i: 0 for i in range(1, n + 1) }\n for relation in relations:\n a, b = relation\n rel_map[a] += 1\n rel_map[b] += 1\n return rel_map.values()\n\nif __name__ == '__main__':\n n, m = map(int, input().split())\n relations: list[tuple[int]] = [tuple(map(int, input().split())) for _i in range(m)]\n print('\\n'.join(map(str, compute(n, m, relations))))\n","repo_name":"ShapeLayer/training","sub_path":"tasks/online_judge/baekjoon/python/10865.py","file_name":"10865.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"74039263700","text":"import gym\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nenv = gym.make('FrozenLake-v0')\n\n# Define up the network\ntf.reset_default_graph()\n\nq_net_input = tf.placeholder(shape=[1, 16], dtype=tf.float32)\nq_net_W = tf.Variable(tf.random_uniform([16, 4], 0, 0.01))\nq_net_output = tf.matmul(q_net_input, q_net_W)\nq_net_greedy_prediction = tf.argmax(q_net_output, 1)\n\nq_net_next_q = tf.placeholder(shape=[1, 4], dtype=tf.float32)\nloss = tf.reduce_sum(tf.square(q_net_next_q - q_net_output))\ntrainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\nupdate_step = trainer.minimize(loss)\n\ninit = tf.initialize_all_variables()\n\n# Hyperparameters\nreward_discount = 0.99\nexploration_threshold = 0.1\nnum_episodes = 10000\n\nepisode_length_list = []\nreward_list = []\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for episode in range(1, num_episodes + 1):\n # Reset the environment\n s = env.reset()\n total_reward = 0.0\n done = False\n step = 0\n\n while step < 99 and not done:\n step += 1\n chosen_action, allQ = sess.run([q_net_greedy_prediction, q_net_output],\n feed_dict={\n q_net_input: np.identity(16)[s:s+1]\n })\n # With some probability, take a random step instead\n if np.random.rand(1) < exploration_threshold:\n chosen_action[0] = env.action_space.sample()\n\n # Step the environment\n s1, reward, done, _ = env.step(chosen_action[0])\n\n Q1 = sess.run(q_net_output,\n feed_dict={\n q_net_input: np.identity(16)[s1:s1+1]\n })\n\n # Get the target for our supervised learning\n maxQ1 = np.max(Q1)\n targetQ = allQ # This means that we aren't even going to train the other output neurons\n targetQ[0, chosen_action[0]] = reward + reward_discount*maxQ1\n\n # Take a training step\n _, W1 = sess.run([update_step, q_net_W],\n feed_dict={\n q_net_input: np.identity(16)[s:s+1],\n q_net_next_q: targetQ\n })\n\n total_reward += reward\n s = s1\n\n if episode % 1000 == 0:\n print(\"Episode: \" + str(episode))\n\n episode_length_list.append(step)\n reward_list.append(total_reward)\n\n# Plot the smoothed performance\nsmoothed_running_reward = 0.0\nsmoothing_parameter = 0.01\nsmoothed_reward_list = [0.0] * len(reward_list)\nfor (i, r) in enumerate(reward_list):\n smoothed_running_reward *= (1.0 - smoothing_parameter)\n smoothed_running_reward += smoothing_parameter * r\n smoothed_reward_list[i] = smoothed_running_reward\n\nplt.plot(smoothed_reward_list)\nplt.ylabel('Episode reward')\nplt.show()\n","repo_name":"harrybraviner/self_directed_rl","sub_path":"frozen_lake/q_network_tf.py","file_name":"q_network_tf.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6111271249","text":"#!/usr/bin/python\n\nimport sys\nimport csv\n\n#move data from csv to 2D list\ndata = list(csv.reader(open('Meteorite_Landings_Cleaned.csv','r',encoding='utf-8')))\nheader = data.pop(0)\n\n#split sitename and name-ID into seperate list elements\nfor row in data:\n #find last \"word\" in name value\n last_word = row[0].rfind(\" \")\n if(last_word>0):\n #see if that \"word\" is actually an ID\n foundID = row[0][last_word+1:].isnumeric() or row[0][last_word+2:].isnumeric()\n if(foundID):\n #split\n nid = row[0][last_word+1:]\n name = row[0][:last_word]\n row[0] = name\n row.insert(1,nid)\n else:\n row.insert(1,\"1\")\n #default name ID = 1\n else:\n row.insert(1, \"1\")\n\nheader.insert(1,\"nid\")\nwith open(\"splitname_out.csv\", 'w', newline='',encoding='utf-8') as csvout:\n csvw = csv.writer(csvout)\n csvw.writerow(header)\n csvw.writerows(data)","repo_name":"sqh404/UniProjects","sub_path":"Meteorite Analysis/Python data transformation/venv/splitname.py","file_name":"splitname.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73077516502","text":"import scraper\nimport scraper.models.response as models\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.orm import sessionmaker\nimport pytest\n\n\n@pytest.fixture\ndef cfg() -> scraper.Config:\n return scraper.create_config(\"/home/bab14/develop/python/scraper/urls.yaml\")\n\n\n@pytest.fixture\ndef engine() -> Engine:\n return create_engine(scraper.DATABASE_URI)\n\n\ndef test_config_type(cfg: scraper.Config) -> None:\n assert isinstance(cfg, scraper.Config)\n\n\ndef test_config_sites_len(cfg: scraper.Config) -> None:\n assert len(cfg.sites) == 3\n\n\ndef test_connection(engine: Engine) -> None:\n assert models.Base.metadata.create_all(engine) is None\n\n\ndef test_insert(engine: Engine) -> None:\n sr = models.ScraperResult(\n site=\"test_site\",\n product=\"test_product\",\n in_stock=True,\n price=39.99,\n purchase_url=\"test.url\",\n )\n\n response = None\n Session = sessionmaker(bind=engine)\n session = Session()\n session.add(sr)\n response = session.commit()\n session.close()\n\n assert response is None\n","repo_name":"bab014/piscraper","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28969516653","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n pre = None\n cur = head\n while cur:\n nex = cur.next\n cur.next, pre, cur = pre, cur, nex\n\n return pre\n\n\nhead = ListNode(1)\nc = head\nfor i in range(2, 6):\n temp = ListNode(i)\n c.next = temp\n c = c.next\n\ns2 = Solution().reverseList(head)\nprint(s2)\n","repo_name":"pursue-wind/leetcode","sub_path":"leetcode/LeetCode_206_reverse.py","file_name":"LeetCode_206_reverse.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2825151020","text":"class Solution(object):\n def reverse(self, x):\n sm = 0\n sign = 1\n\n if x<0:\n sign = -1\n x = x*-1\n\n while x>0:\n rem=x%10\n sm = sm*10 + rem\n x = x //10\n\n if not -2147483648x else 'Yes')\n ","repo_name":"RandyGen/AtCoder_Beginner_Contest_209","sub_path":"problem_B/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70660049941","text":"import LineNumber\nfrom Rule import Rule\nfrom TList import TList\n\nclass Justification:\n def __init__(self, rule:Rule=Rule(), references:TList=TList(LineNumber,[])):\n if rule and not isinstance(rule,Rule):\n print('error')\n else:\n self.rule = rule\n\n if references and (not isinstance(references,TList) or not references.T==LineNumber):\n print('Error')\n else:\n self.references = references\n \n def __repr__(self):\n class_name = type(self).__name__\n return f'{class_name}({self.rule!r},{self.references!r})'\n \n def __str__(self):\n s = f'{self.rule}'\n for r in self.references:\n s += f' {r}'\n return s","repo_name":"MarkNisarg/proof-buddy","sub_path":"python-server/Justification.py","file_name":"Justification.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33074478090","text":"# Databricks notebook source\n# MAGIC %md The purpose of this notebook is to explore how we might leverage association rules to build product recommendations. This notebook should be run on a **Databricks 8.4 ML** cluster. \n\n# COMMAND ----------\n\n# DBTITLE 1,Get Configuration Values\n# MAGIC %run \"./MB 01: Configuration\"\n\n# COMMAND ----------\n\n# DBTITLE 1,Import Required Libraries\nimport pandas as pd\nimport itertools\n\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as f\n\n# COMMAND ----------\n\n# MAGIC %md ## Step 1: Access Association Rules\n# MAGIC \n# MAGIC We now have a set of rules generated from our *prior* period transactions. We can retrieve these along with scores that tell us something about the reliability of these them:\n\n# COMMAND ----------\n\n# DBTITLE 1,Retrieve Rules & Scores\nrules = (\n spark\n .table('instacart.assoc_rules')\n .selectExpr('antecedent', 'consequent[0] as consequent', 'lift_norm', 'confidence')\n )\n\ndisplay(rules)\n\n# COMMAND ----------\n\n# MAGIC %md ## Step 2: Construct Recommendations\n# MAGIC \n# MAGIC We now should consider how we might use these rules to make recommendations. In general, we will recommend products based on items added to a shopper's cart:\n# MAGIC \n# MAGIC \n# MAGIC \n# MAGIC But will we make those recommendations based on the whole cart or just the last few items added to it? Will we require the whole cart or just some portion of it to match the antecedent to a rule? Should we consider rules where only a portion of the antecedent is matched by the contents of the cart? How will we prioritize rules when the antecedent only matches a subset of the cart or when only a subset of the antecedent is matched? How should we consider the impact of multiple rules recommending the same products? While on the surface, the notion that the *if this, then that* rules generated by a market basket analysis could be used to make recommendations seems clear, the reality of how we go about assembling those recommendations gets complicated quickly.\n# MAGIC \n# MAGIC In this [white paper](https://www.sciencedirect.com/science/article/pii/S095741741830441X?via%3Dihub) a team of researchers explored some of these choices as well as a few others to construct a recommender of this type. While they arrived at an ideal solution for their scenario, it's not clear that the particular approach they took is universally ideal. With this in mind, we'll borrow a few items from some of their algorithms and insert our own. The basic algorithm we will use is as follows:

\n# MAGIC \n# MAGIC 1. Assemble a shopper's cart at a point in time during the shopping process\n# MAGIC 2. Retrieve all rules with antecedents match some or all of the product or product combinations assembled in the previous step\n# MAGIC 3. Discard any rules with consequents already in the shopping cart\n# MAGIC 4. Assign a weight to each rule based on the number of products in the antecedent relative to the number of products in the original cart\n# MAGIC 5. Multiply each rule's confidence by this weight to arrive at a rule-specific score\n# MAGIC 6. Sum the rule-scores for each unique consequent product\n# MAGIC 7. Present products in order from highest to lowest summed scores\n# MAGIC \n# MAGIC A number of variations on these rules including the use of normalized lift over confidence were explored before arriving at this algorithm. This algorithm provided the best results based on our evaluation metric to be discussed in the next step.\n# MAGIC \n# MAGIC To tackle the first step, we'll assemble each basket as it existed at a point in time. The dataset provides access to a field named *add_to_cart_order* with which we can assemble these carts. The items that will be later added to the cart to complete the order are captured as well to enable later evaluation: \n\n# COMMAND ----------\n\n# DBTITLE 1,Retrieve Cart as of Point in Time\n# retrieve basket contents\nbasket = (\n spark\n .table('instacart.orders')\n .filter(f.expr(\"eval_set='train'\"))\n .join( spark.table('instacart.order_products'), on='order_id')\n .selectExpr('order_id','add_to_cart_order as position', 'product_id')\n )\n\n# assemble basket as of point in time\nbasket_at_position = (\n basket.alias('x')\n .join(basket.alias('y'), on=f.expr('x.order_id=y.order_id AND x.position>=y.position'))\n .groupBy('x.order_id','x.position')\n .agg(f.collect_list('y.product_id').alias('basket'))\n )\n\n# place downstream products in \"next\"\nbasket_and_next = (\n basket_at_position.alias('m')\n .join(basket.alias('n'), on=f.expr('m.order_id=n.order_id AND m.position\n# MAGIC \n# MAGIC To support this kind of deployment pattern, we need the ability to publish rules data to a relevant data store or database. Documentation on how to write data to a variety of such destinations can be found [here](https://docs.databricks.com/data/data-sources/index.html).\n","repo_name":"ktmrmshk/dbdoc","sub_path":"notebooks/src/Solutions Accelerator/Market Basket Analysis/mb_04:_make_recommendations.py","file_name":"mb_04:_make_recommendations.py","file_ext":"py","file_size_in_byte":14372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"21483763049","text":"import guardianbot, logging, asyncio, time, sqlite3 as sql, database.db_access as db\nfrom flask import Flask, render_template, request, g\nfrom timeloop import Timeloop\nfrom datetime import timedelta\nimport subprocess\n\napp = Flask(__name__)\nlogging.basicConfig(filename='logs/webapp.log', level=logging.DEBUG, format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')\nDATABASE = r'database/dopplegaenger.db'\ntl = Timeloop()\n\n@app.route('/')\ndef home():\n conn = db.get_db(DATABASE)\n conn.row_factory = sql.Row\n cur = db.sql_full_report(conn)\n rows = cur.fetchall(); \n return render_template(\"list.html\",rows = rows)\n\n@app.route('/comments/')\ndef comments(title):\n conn = db.get_db(DATABASE)\n conn.row_factory = sql.Row\n cur = db.sql_return_comments_from_title(conn,title)\n rows = cur.fetchall(); \n return render_template('comments.html',rows = rows)\n\n@app.route('/user/<username>')\ndef profile(username):\n conn = db.get_db(DATABASE)\n conn.row_factory = sql.Row\n cur = db.sql_select_all_comments_from_user(conn,username)\n rows = cur.fetchall(); \n return render_template('profile.html', rows = rows)\n\n@tl.job(interval=timedelta(seconds = 60))\ndef spider_run():\n # list_files = subprocess.run([\"watch\", \"-n60\", \"python3\", -r, \"guardianbot.py\"])\n logging.log(logging.INFO, \"Running Bot\")\n\nif __name__== \"__main__\":\n tl.start()\n while True:\n try:\n app.run()\n except KeyboardInterrupt:\n tl.stop()\n db.close_connection()\n break\n","repo_name":"deverickapollo/doppelgaenger-detection","sub_path":"webserver/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32431011350","text":"import os\nimport os.path as osp\nfrom collections import defaultdict\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom mmengine import print_log\nfrom mmengine.utils import digit_version\nfrom mmengine.utils.dl_utils import TORCH_VERSION\nfrom PIL import Image\nfrom torch.nn import functional as F\nfrom torchvision.utils import make_grid\n\nfrom mmagic.structures import DataSample\nfrom mmagic.utils import ForwardInputs, try_import\nfrom .base_mmagic_inferencer import BaseMMagicInferencer, InputsType, PredType\nfrom .inference_functions import calculate_grid_size\n\nimageio = try_import('imageio')\nimageio_ffmpeg = try_import('imageio_ffmpeg')\n\n\nclass EG3DInferencer(BaseMMagicInferencer):\n\n func_kwargs = dict(\n preprocess=['inputs'],\n forward=['num_images', 'interpolation'],\n visualize=[\n 'result_out_dir', 'vis_mode', 'save_img', 'save_video',\n 'img_suffix', 'video_suffix'\n ],\n postprocess=[])\n\n extra_parameters = dict(num_batches=4, sample_model='ema', add_noise=False)\n\n def preprocess(self, inputs: InputsType = None) -> ForwardInputs:\n \"\"\"Process the inputs into a model-feedable format.\n\n Args:\n inputs (List[Union[str, np.ndarray]]): The conditional inputs for\n the inferencer. Defaults to None.\n\n Returns:\n ForwardInputs: The preprocessed inputs and data samples.\n \"\"\"\n if isinstance(inputs, Sequence):\n assert all([type(inputs[0]) == type(lab) for lab in inputs\n ]), ('All label inputs must have the same type.')\n if isinstance(inputs[0], list):\n for lab in inputs:\n assert all([isinstance(l_, float) for l_ in lab])\n inputs = np.array(inputs).astype(np.float32)\n elif isinstance(inputs[0], np.ndarray):\n assert all([lab.ndim == 1 for lab in inputs])\n inputs = [input_.astype(np.float32) for input_ in inputs]\n else:\n raise ValueError(\n 'EG3D only support ndarry or list as label input.')\n\n data_sample_list = []\n for lab in inputs:\n data_sample = DataSample()\n data_sample.set_gt_label(lab)\n data_sample_list.append(data_sample.to(self.device))\n self.extra_parameters['num_batches'] = len(inputs)\n else:\n data_sample_list = None\n\n num_batches = self.extra_parameters['num_batches']\n sample_model = self.extra_parameters['sample_model']\n add_noise = self.extra_parameters['add_noise']\n inputs = dict(\n num_batches=num_batches,\n sample_model=sample_model,\n add_noise=add_noise)\n\n if data_sample_list is None:\n data_samples = None\n else:\n data_samples = DataSample.stack(data_sample_list)\n\n return inputs, data_samples\n\n def forward(self,\n inputs: ForwardInputs,\n interpolation: Optional[str] = 'both',\n num_images: int = 100) -> Union[dict, List[dict]]:\n \"\"\"Forward the inputs to the model.\n\n Args:\n inputs (ForwardInputs): Model inputs. If data sample (the second\n element of `inputs`) is not passed, will generate a sequence\n of images corresponding to passed `interpolation` mode.\n interpolation (str): The interpolation mode. Supported choices\n are 'both', 'conditioning', and 'camera'. Defaults to 'both'.\n num_images (int): The number of frames of interpolation.\n Defaults to 500.\n\n Returns:\n Union[dict, List[dict]]: Output dict corresponds to the input\n condition or the list of output dict of each frame during the\n interpolation process.\n \"\"\"\n inputs, data_sample = inputs # unpack the tuple\n # forward as the passed input\n if data_sample is not None:\n outputs = self.model(inputs, data_sample)\n output_dict = defaultdict(list)\n # return outputs\n for output in outputs:\n fake_img = output.fake_img.data\n depth_img = output.depth\n lr_img = output.lr_img.data\n ray_origins = output.ray_origins\n ray_directions = output.ray_directions\n output_dict['fake_img'].append(fake_img)\n output_dict['depth'].append(depth_img)\n output_dict['lr_img'].append(lr_img)\n output_dict['ray_origins'].append(ray_origins)\n output_dict['ray_directions'].append(ray_directions)\n\n for k in output_dict.keys():\n output_dict[k] = torch.stack(output_dict[k], dim=0)\n\n return output_dict\n\n num_batches = inputs['num_batches']\n output_list = self.model.interpolation(num_images, num_batches,\n interpolation)\n return output_list\n\n def visualize(self,\n preds: Union[PredType, List[PredType]],\n vis_mode: str = 'both',\n save_img: bool = True,\n save_video: bool = True,\n img_suffix: str = '.png',\n video_suffix: str = '.mp4',\n result_out_dir: str = 'eg3d_output') -> None:\n \"\"\"Visualize predictions.\n\n Args:\n preds (Union[PredType, List[PredType]]): Prediction os model.\n vis_mode (str, optional): Which output to visualize. Supported\n choices are 'both', 'depth', and 'img'. Defaults to 'all'.\n save_img (bool, optional): Whether save images. Defaults to True.\n save_video (bool, optional): Whether save videos. Defaults to True.\n img_suffix (str, optional): The suffix of saved images.\n Defaults to '.png'.\n video_suffix (str, optional): The suffix of saved videos.\n Defaults to '.mp4'.\n result_out_dir (str, optional): The save director of image and\n videos. Defaults to 'eg3d_output'.\n \"\"\"\n if save_video:\n assert imageio is not None, (\n 'Please install imageio by \\'pip install '\n 'imageio\\' to save video.')\n assert imageio_ffmpeg is not None, (\n 'Please install imageio-ffmpeg by \\'pip install '\n 'imageio-ffmpeg\\' to save video.')\n\n os.makedirs(result_out_dir, exist_ok=True)\n assert vis_mode.upper() in ['BOTH', 'DEPTH', 'IMG']\n if vis_mode.upper() == 'BOTH':\n vis_mode = ['DEPTH', 'IMG']\n if not isinstance(vis_mode, list):\n vis_mode = [vis_mode]\n\n if not isinstance(preds, list):\n preds = [preds]\n if save_video:\n save_video = False\n print_log('Only one frame of output is generated and cannot '\n 'save video. Set \\'save_video\\' to \\'False\\' '\n 'automatically.')\n if not save_img:\n save_img = True\n print_log('Only one frame of output is generated can only save'\n 'image. Set \\'save_img\\' to \\'True\\' automatically.')\n\n # save video\n batch_size = preds[0]['fake_img'].shape[0]\n\n img_dict = {}\n for target in vis_mode:\n target = 'fake_img' if target.upper() == 'IMG' else target\n if target.lower() == 'fake_img':\n imgs = self.preprocess_img(preds)\n else:\n imgs = self.preprocess_depth(preds)\n img_dict[target.lower()] = imgs\n\n nrow = calculate_grid_size(batch_size)\n\n if save_video:\n video_path = osp.join(\n result_out_dir,\n f'{target.lower()}_seed{self.seed}{video_suffix}')\n video_writer = imageio.get_writer(\n video_path,\n mode='I',\n fps=60,\n codec='libx264',\n bitrate='10M')\n\n frame_list = torch.split(imgs, batch_size)\n for idx, frame in enumerate(frame_list):\n # frame: [bz, C, H, W]\n frame_grid = make_grid(\n frame, nrow=nrow).permute(1, 2, 0)[..., (2, 1, 0)]\n frame_grid = frame_grid.numpy().astype(np.uint8)\n if save_video:\n video_writer.append_data(frame_grid)\n\n if save_img:\n if len(frame_list) != 1:\n img_name = (f'{target.lower()}_frame{idx}_'\n f'seed{self.seed}{img_suffix}')\n else:\n img_name = (f'{target.lower()}_seed{self.seed}'\n f'{img_suffix}')\n img_path = osp.join(result_out_dir, img_name)\n Image.fromarray(frame_grid).save(img_path)\n\n if save_video:\n video_writer.close()\n print_log(f'Save video to \\'{video_path}\\'.', 'current')\n\n if len(vis_mode) > 1:\n fake_img = img_dict['fake_img']\n depth_img = img_dict['depth']\n # [num_frame * bz, 3, H, W * 2]\n imgs = torch.cat([fake_img, depth_img], dim=-1)\n nrow = calculate_grid_size(batch_size, aspect_ratio=2)\n\n if save_video:\n video_path = osp.join(\n result_out_dir, f'combine_seed{self.seed}{video_suffix}')\n video_writer = imageio.get_writer(\n video_path,\n mode='I',\n fps=60,\n codec='libx264',\n bitrate='10M')\n\n frame_list = torch.split(imgs, batch_size)\n for idx, frame in enumerate(frame_list):\n frame_grid = make_grid(\n frame, nrow=nrow).permute(1, 2, 0)[..., (2, 1, 0)]\n frame_grid = frame_grid.numpy().astype(np.uint8)\n\n if save_video:\n video_writer.append_data(frame_grid)\n\n if save_img:\n if len(frame_list) != 1:\n img_name = (f'combine_frame{idx}_'\n f'seed{self.seed}{img_suffix}')\n else:\n img_name = (f'combine_seed{self.seed}' f'{img_suffix}')\n img_path = osp.join(result_out_dir, img_name)\n Image.fromarray(frame_grid).save(img_path)\n\n if save_video:\n video_writer.close()\n print_log(f'Save video to \\'{video_path}\\'.', 'current')\n\n def preprocess_img(self, preds: List[dict]) -> torch.Tensor:\n \"\"\"Preprocess images in the predictions.\n\n Args:\n preds (List[dict]): List of prediction dict of each frame.\n\n Returns:\n torch.Tensor: Preprocessed image tensor shape like\n [num_frame * bz, 3, H, W].\n \"\"\"\n imgs = [p['fake_img'].cpu() for p in preds]\n imgs = torch.cat(imgs, dim=0) # [num_frame * bz, 3, H, W]\n imgs = ((imgs + 1) / 2 * 255.).clamp(0, 255)\n return imgs\n\n def preprocess_depth(self, preds: List[dict]) -> torch.Tensor:\n \"\"\"Preprocess depth in the predictions.\n\n Args:\n preds (List[dict]): List of prediction dict of each frame.\n\n Returns:\n torch.Tensor: Preprocessed depth tensor shape like\n [num_frame * bz, 3, H, W].\n \"\"\"\n depth = [p['depth'].cpu() for p in preds]\n\n depth = torch.cat(depth, dim=0)\n depth = -depth\n depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.\n depth = depth.clamp(0, 255).repeat(1, 3, 1, 1)\n\n img_size = preds[0]['fake_img'].shape[-1]\n if img_size != depth.shape[-1]:\n interpolation_kwargs = dict(\n size=img_size, mode='bilinear', align_corners=False)\n if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):\n interpolation_kwargs['antialias'] = True\n depth = F.interpolate(depth, **interpolation_kwargs)\n return depth\n\n def postprocess(self,\n preds: PredType,\n imgs: Optional[List[np.ndarray]] = None,\n is_batch: bool = False,\n get_datasample: bool = False) -> Dict[str, torch.tensor]:\n \"\"\"Postprocess predictions.\n\n Args:\n preds (List[Dict]): Predictions of the model.\n imgs (Optional[np.ndarray]): Visualized predictions.\n is_batch (bool): Whether the inputs are in a batch.\n Defaults to False.\n get_datasample (bool): Whether to use Datasample to store\n inference results. If False, dict will be used.\n\n Returns:\n Dict[str, torch.Tensor]: Inference results as a dict.\n \"\"\"\n if isinstance(preds[0], dict):\n keys = preds[0].keys()\n outputs = defaultdict(list)\n for pred in preds:\n for k in keys:\n outputs[k].append(pred[k])\n for k in keys:\n outputs[k] = torch.stack(outputs[k], dim=0)\n return outputs\n # directly return the dict\n return preds\n","repo_name":"open-mmlab/mmagic","sub_path":"mmagic/apis/inferencers/eg3d_inferencer.py","file_name":"eg3d_inferencer.py","file_ext":"py","file_size_in_byte":13552,"program_lang":"python","lang":"en","doc_type":"code","stars":5963,"dataset":"github-code","pt":"12"} +{"seq_id":"30655412291","text":"import turtle\nn = 4\nN = 10\nr = 360 / n\nf = 100 / n\nturtle.shape('turtle')\nfor k in range(1, N+1):\n for i in range(0, n):\n turtle.forward(f*k)\n turtle.left(r)\n turtle.penup()\n turtle.left(r/2)\n turtle.backward(f / 2 * 2**0.5)\n turtle.right(r/2)\n turtle.pendown()\n \n","repo_name":"vkomyagin/Education","sub_path":"MFTI_Python_Alghoritms/Lab1/lab1_5.py","file_name":"lab1_5.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41177705060","text":"from flask import Flask, redirect, url_for, session, request, jsonify\nfrom flask_oauthlib.client import OAuth\n\nfrom flask import Flask, jsonify, g, render_template, request, redirect, url_for, session\nfrom flask_github import GitHub\nfrom urlparse import urlparse, urljoin\nfrom requests_oauthlib import OAuth2Session\nfrom flask.json import jsonify\nimport os\nimport json\n\napp = Flask(__name__)\napp.debug = True\napp.secret_key = 'development'\noauth = OAuth(app)\n\ngithub = oauth.remote_app(\n 'github',\n consumer_key='9432818cf3f23b850773',\n consumer_secret='8814480dcbb1018338d5f46a026261e9be06199c',\n request_token_params={'scope': 'user:email'},\n base_url='https://api.github.com/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://github.com/login/oauth/access_token',\n authorize_url='https://github.com/login/oauth/authorize'\n)\n\n\n@app.route('/')\ndef index():\n if 'github_token' in session:\n \ttarget = urlparse(request.url).query\n \tif target.startswith(\"search_term=\"):\n \t\ttarget = target[12:]\n \tlan = 'https://api.github.com/search/repositories?q='+target + '+language:assembly&sort=stars&order=desc'\n \tme = github.get(lan)\n \treturn jsonify(me.data)\n\n@app.route('/navigator/<name>')\ndef success(name):\n target = name\n return 'w %s' % target\n@app.route('/navigator')\ndef login():\n\ttarget = urlparse(request.url).query\n\tif target.startswith(\"search_term=\"):\n\t\ttarget = target[12:]\n\tlan = 'https://api.github.com/search/repositories?q='+target + '+language:assembly&sort=stars&order=desc'\n\treturn github.authorize(callback=url_for('authorized', lan=lan, _external=True))\n\n\n@app.route('/logout')\ndef logout():\n session.pop('github_token', None)\n return redirect(url_for('index'))\n\n\n@app.route('/navigator/authorized')\ndef authorized():\n resp = github.authorized_response()\n if resp is None or resp.get('access_token') is None:\n return 'Access denied: reason=%s error=%s resp=%s' % (\n request.args['error'],\n request.args['error_description'],\n resp\n )\n session['github_token'] = (resp['access_token'], '')\n target = urlparse(request.url).query\n if target.startswith(\"search_term=\"):\n \ttarget = target[12:]\n lan = 'https://api.github.com/search/repositories?q='+target + '+language:assembly&sort=stars&order=desc'\n me = github.get(lan)\n return redirect(url_for('index'))\n\n\n@github.tokengetter\ndef get_github_oauth_token():\n return session.get('github_token')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"kocicjelena/Auth-at-Github-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74261823062","text":"from linked_list import ListNode\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n res = ListNode()\n head = res\n first = True\n r = 0\n\n while l1 and l2 and r != 0:\n a = 0 if not l1 else l1.val\n b = 0 if not l2 else l2.val\n\n c = a + b\n if c >= 10:\n c = c - 10\n r = 1\n else:\n r = 0\n res.val += c\n res.next = ListNode(r)\n if first:\n head = res\n first = False\n res = res.next\n l1 = l1.next if l1 else l1\n l2 = l2.next if l2 else l2\n return head\n\n\nsol = Solution()\ni = sol.addTwoNumbers(ListNode(2, ListNode(4, ListNode(3))), ListNode(5, ListNode(6, ListNode(4))))\nwhile i:\n print(i.val)\n","repo_name":"eliotfgn/dsa","sub_path":"data-structures/challenges/linked_list/add_two_numbers.py","file_name":"add_two_numbers.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"30525676192","text":"##\n##Write a function that returns the largest element in a list.\n##a = [10,100,80,1,1000]\n##def madmax(a):\n## n = a[0]\n## for i in a:\n## if n < i:\n## n = i\n## return n\n##print (madmax(a))\n\n\n##def upsidedown(a):\n## n = []\n## for t in a:\n## n.append(a[-t])\n##\n## return n\n##\n##\n##print(upsidedown(a))\n\ndef upsidedown(a):\n for i in a:\n return n\n\na = [0,1,2,3,4]\n\nprint(upsidedown(a))\n","repo_name":"pedross3/Ubuntu","sub_path":"DPDessoft/fon2.py","file_name":"fon2.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30311374733","text":"from flask import Flask,jsonify,request,render_template\nfrom GPT2LM import GPT2Class\nfrom log import LogClass\nfrom Config import ConfigClass\n\n\napp = Flask(__name__)\n\n# calling the Classes\nModelObj = GPT2Class()\nconfigObj = ConfigClass(\"params.yaml\")\nconfigData = configObj.Loading_Config()\nLogObj = LogClass(configData['LoggingFileName'])\n\n@app.route(\"/\",methods = [\"GET\"])\ndef HomePage():\n return render_template(\"index.html\")\n\n@app.route(\"/predict\",methods=[\"POST\"])\ndef PredictionRoute():\n if request.json is not None:\n text = request.json['text']\n LogObj.Logger(\"Get the Text from User: \"+str(text))\n generated_output = ModelObj.Prediction(text)\n dic = {\n \"Input Text\" : text,\n \"Generated Output\" : generated_output\n }\n LogObj.Logger(\"Get prediction Successfully\")\n return jsonify(dic)\n\n\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"sarangtamrakar/CasualLanguageModeling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"42956125929","text":"import time\nimport os\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport requests\nimport json\nimport math\nimport keras\nimport datetime\nfrom datetime import date\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\n\nAllCsv = os.listdir('/home/moumita_das2820/StockPredict/data')\nAllCsv1 = [ s[:-4] for s in AllCsv]\n\ndef createModel(stock):\n new = pd.read_csv('/home/moumita_das2820/StockPredict/data/'+stock+'.csv')\n # Create a new dataframe with only the 'Close' column\n data = new.filter(['close'])\n #Converting the dataframe to a numpy array\n dataset = data.values\n #Get /Compute the number of rows to train the model on\n training_data_len = math.ceil( len(dataset) *.8) \n #Scale the all of the data to be values between 0 and 1 \n scaler = MinMaxScaler(feature_range=(0, 1)) \n scaled_data = scaler.fit_transform(dataset)\n #Create the scaled training data set \n train_data = scaled_data[0:training_data_len , : ]\n #Split the data into x_train and y_train data sets\n x_train=[]\n y_train = []\n for i in range(60,len(train_data)):\n x_train.append(train_data[i-60:i,0])\n y_train.append(train_data[i,0])\n #Convert x_train and y_train to numpy arrays\n x_train, y_train = np.array(x_train), np.array(y_train)\n #Reshape the data into the shape accepted by the LSTM\n x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))\n #Build the LSTM network model\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True,input_shape=(x_train.shape[1],1)))\n model.add(LSTM(units=50, return_sequences=False))\n model.add(Dense(units=25))\n model.add(Dense(units=1))\n #Compile the model\n model.compile(optimizer='adam', loss='mean_squared_error')\n #Train the model\n model.fit(x_train, y_train, batch_size=2, epochs=20)\n model.save('/home/moumita_das2820/StockPredict/models/'+stock+'_model')\n\ntickers = pd.read_csv('/home/moumita_das2820/StockPredict/data/ticker.csv')\ndata = os.listdir('/home/moumita_das2820/StockPredict/models')\ndata1 = [s[:-6] for s in data]\n\ntickers = tickers.set_index(\"symbol\")\ntickers = tickers.drop(data1, axis=0)\nnew_tickers = tickers.reset_index()\nnew_tickers.to_csv(\"/home/moumita_das2820/StockPredict/data/Newtickers.csv\")\n\nnewTickers = pd.read_csv(\"/home/moumita_das2820/StockPredict/data/Newtickers.csv\")\n#Save each model\nfor stock in newTickers['symbol']:\n print(stock)\n if stock in AllCsv1:\n createModel(stock)\n\n","repo_name":"moumita-das-7019/StockPredict","sub_path":"scripts/CreateModel.py","file_name":"CreateModel.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30065339267","text":"import pytest\nimport time\nfrom unittest.mock import Mock\n\n\n@pytest.fixture\ndef mock_local_time_zone(monkeypatch: pytest.MonkeyPatch):\n \"\"\"A pytest fixture that simulates a given local system time zone.\n\n Args:\n seconds: The number of seconds UTC offset for the base time zone.\n dst_seconds: The number of seconds UTC offset for the daylight savings\n time zone.\n\n Note: these args are positive for time zones East of the prime meridian\n and negative for West.\n \"\"\"\n def _mock_local_time_zone(seconds: int, dst_seconds: int = 0) -> None:\n is_dst = 1 if dst_seconds else 0\n mock_time = Mock(spec=time)\n mock_time.timezone = -seconds\n mock_time.altzone = -dst_seconds\n mock_time.daylight = is_dst\n mock_time.localtime.return_value = Mock(tm_isdst=is_dst)\n monkeypatch.setattr('metomi.isodatetime.timezone.time', mock_time)\n return _mock_local_time_zone\n","repo_name":"metomi/isodatetime","sub_path":"metomi/isodatetime/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"12"} +{"seq_id":"72368261460","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = \"postgresql://tylercitrin:9nv683g@localhost/microcalc\"\n#'postgresql:///' + os.path.join(basedir, 'microcalc.db')\n#\"postgresql://tylercitrin:9nv683g@localhost/microcalcdb\"\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n\t#postgresql[+driver]://<user>:<pass>@<host>/<dbname>\n\t#'postgresql://' + os.path.join(basedir, 'app.db')\n\n\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'you-will-never-guess'\n\"\"\"\n# cross-site request forgery prevention \n#(note that this setting is enabled by default in current versions of Flask-WTF). \n# In most cases you want to have this option enabled as it makes your app more secure.\n\"\"\"\n\n\n#SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'\n\t\t\t#\"postgresql://tcitrin:9nv683g@localhost:5432/microcalcdb\"\n\t\t\t#'postgres://username:password@localhost:5432/dbname'\n#app.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://postgres:postgres@localhost/DBNAME\"","repo_name":"tylerthedeveloper/DerivativeCalculator-FlaskWebApp","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11460247395","text":"from . import views\nfrom django.urls import path, include\nfrom rest_framework import routers\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom .views import UserRegistrationView,LogoutUser\n\nrouter = routers.DefaultRouter()\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('login/', obtain_auth_token, name='api_login'),\n # path('logout/', UserLogoutView.as_view(), name='api_logout'),\n path('logoutuser/', LogoutUser.as_view(), name='logout'),\n path('register/', UserRegistrationView.as_view(), name='api_register'),\n path(\n \"employee/<int:pk>/edit/\",\n views.EmployeeUpdateView.as_view(),\n ),\n path(\n \"employee/<int:pk>/delete/\",\n views.EmployeeDeleteView.as_view(),\n ),\n \n path(\"employeelist/\",views.EmployeeListView.as_view(),),\n \n\n path(\n \"employeedetail/<int:pk>/\",\n views.EmployeDetail.as_view()\n ),\n\n path(\n \"employee/leavecreate\",\n views.LeaveCreateView.as_view(),\n \n ),\n path(\n \"employee/leave/list/\",\n views.LeaveListView.as_view(),\n ),\n path(\n \"leave/status/<int:pk>/change\",\n views.LeaveRejectView.as_view(),\n ),\n\n]\n","repo_name":"suryakiranp33/Emp_management_system","sub_path":"emp_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"14965704752","text":"import threading\r\nimport time\r\nimport tkinter as tk\r\nfrom datetime import datetime\r\nfrom typing import Optional\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nclass Parser:\r\n TIME_OUT = 60\r\n TARGET_LINK = \"https://www.bileter.ru/afisha/building/teatr_dojdey.html\"\r\n ENABLED = True\r\n PROCESS = None\r\n DATE_ENTRY_WIDTH = 50\r\n DATE_TIME_ENTRY_WIDTH = 10\r\n NAME_ENTRY_WIDTH = 30\r\n PRICE_ENTRY_WIDTH = 30\r\n TICKET_COUNT_TEXT_ENTRY_WIDTH = 30\r\n LINK_TEXT_ENTRY_WIDTH = 50\r\n\r\n def __init__(self):\r\n self.new_thread = None\r\n self.timeout = self.TIME_OUT\r\n\r\n self.window = tk.Tk()\r\n\r\n self._init_main_button()\r\n self._init_exit_button()\r\n self._init_target_link_entry()\r\n self._init_time_out_entry()\r\n self._init_results_labels()\r\n\r\n self.result_columns = []\r\n\r\n def _init_main_button(self) -> None:\r\n self.main_button = tk.Button(\r\n width=10,\r\n height=2,\r\n bg=\"grey\",\r\n fg=\"black\",\r\n )\r\n self.main_button.pack()\r\n self._resetbutton()\r\n\r\n def _init_exit_button(self) -> None:\r\n self.exit_button = tk.Button(\r\n text=\"Выход\",\r\n width=10,\r\n height=2,\r\n bg=\"grey\",\r\n fg=\"black\",\r\n )\r\n self.exit_button.pack()\r\n self.exit_button.config(command=self.exit_program)\r\n\r\n def _init_target_link_entry(self) -> None:\r\n self.link_label = tk.Label(text=\"Ссылка\")\r\n self.link_label.pack()\r\n self.target_link = tk.Entry(fg=\"black\", bg=\"white\", width=50)\r\n self.target_link.insert(0, self.TARGET_LINK)\r\n self.target_link.pack()\r\n\r\n def _init_time_out_entry(self) -> None:\r\n self.time_out_label = tk.Label(\r\n text=\"Частота парсинга, c (не чаще 1 раз в минуту)\"\r\n )\r\n self.time_out_label.pack()\r\n self.time_out_entry = tk.Entry(fg=\"black\", bg=\"white\", width=50)\r\n self.time_out_entry.insert(0, str(self.TIME_OUT))\r\n self.time_out_entry.pack()\r\n\r\n def _init_results_labels(self) -> None:\r\n self.results_time_update_label = tk.Label(text=\"Время обновления\")\r\n self.results_time_update_label.pack()\r\n self.results_time_update = tk.Entry(\r\n fg=\"black\", bg=\"white\", width=50, state=tk.DISABLED\r\n )\r\n self.results_time_update.pack()\r\n\r\n frame = tk.Frame()\r\n frame.pack()\r\n date_lable = tk.Label(\r\n frame,\r\n text=\"Дата\",\r\n width=self.DATE_ENTRY_WIDTH,\r\n )\r\n date_lable.pack(side=\"left\")\r\n date_time_lable = tk.Label(\r\n frame, text=\"Время\", width=self.DATE_TIME_ENTRY_WIDTH\r\n )\r\n date_time_lable.pack(side=\"left\")\r\n name_lable = tk.Label(frame, text=\"Название\", width=self.NAME_ENTRY_WIDTH)\r\n name_lable.pack(side=\"left\")\r\n price_lable = tk.Label(frame, text=\"Цена\", width=self.PRICE_ENTRY_WIDTH)\r\n price_lable.pack(side=\"left\")\r\n ticket_count_text_lable = tk.Label(\r\n frame, text=\"Количество\", width=self.TICKET_COUNT_TEXT_ENTRY_WIDTH\r\n )\r\n ticket_count_text_lable.pack(side=\"left\")\r\n link_text_lable = tk.Label(\r\n frame, text=\"Сысылка\", width=self.LINK_TEXT_ENTRY_WIDTH\r\n )\r\n link_text_lable.pack(side=\"left\")\r\n\r\n def start(self) -> None:\r\n self.window.mainloop()\r\n\r\n def exit_program(\r\n self,\r\n ) -> None:\r\n if self.new_thread:\r\n self.new_thread.join(timeout=0.01)\r\n exit(1)\r\n\r\n def _resetbutton(self) -> None:\r\n self.running = False\r\n self.main_button.config(text=\"Старт\", command=self.start_thread)\r\n\r\n def start_thread(self) -> None:\r\n try:\r\n try:\r\n self.time_out = max(int(self.time_out_entry.get()), self.TIME_OUT)\r\n except ValueError:\r\n self.time_out_entry.config(bg=\"red\")\r\n raise\r\n self.time_out_entry.config(bg=\"white\")\r\n self.running = True\r\n self.new_thread = threading.Thread(target=self.loop_parse, daemon=True)\r\n self.new_thread.start()\r\n except Exception:\r\n return\r\n self.main_button.config(text=\"Стоп\", command=self._resetbutton)\r\n\r\n def loop_parse(self) -> None:\r\n try:\r\n while self.running:\r\n self.parse()\r\n time.sleep(self.time_out)\r\n except RuntimeError:\r\n pass\r\n except Exception:\r\n self.results_time_update.config(state=tk.NORMAL)\r\n self.results_time_update.delete(0, len(self.results_time_update.get()))\r\n self.results_time_update.insert(\r\n 0,\r\n \"Что-то пошло не так\",\r\n )\r\n self._resetbutton()\r\n\r\n def _make_response(self) -> str:\r\n target_link_str = self.target_link.get() or self.TARGET_LINK\r\n response = requests.get(target_link_str)\r\n response.raise_for_status()\r\n return response.text\r\n\r\n def _get_performance_divs(self) -> list:\r\n soup = BeautifulSoup(self._make_response(), \"html.parser\")\r\n performance_divs = soup.find_all(\r\n name=\"div\", attrs={\"class\": \"building-schedule-item hasTickets\"}\r\n )\r\n performance_divs.extend(\r\n soup.find_all(\r\n name=\"div\",\r\n attrs={\r\n \"class\": \"building-schedule-item building-schedule-item-hidden hasTickets\"\r\n },\r\n )\r\n )\r\n return performance_divs\r\n\r\n def _clear_resul_columns(self) -> None:\r\n for column in self.result_columns:\r\n for entry in column:\r\n entry.destroy()\r\n self.result_columns = []\r\n\r\n def _parse_date(self, performance: BeautifulSoup) -> str:\r\n date_div = performance.find(\r\n name=\"div\", attrs={\"class\": \"building-schedule-item-date-col\"}\r\n )\r\n date_date = date_div.find(\r\n name=\"div\", attrs={\"class\": \"schedule-date_date\"}\r\n ).text.strip()\r\n date_month = date_div.find(\r\n name=\"div\", attrs={\"class\": \"schedule-date_month\"}\r\n ).text.strip()\r\n date_day = date_div.find(\r\n name=\"div\", attrs={\"class\": \"schedule-date_day\"}\r\n ).text.strip()\r\n return f\"{date_day} - {date_date} {date_month}\"\r\n\r\n def _parse_date_time(self, performance: BeautifulSoup) -> str:\r\n return performance.find(\r\n name=\"div\", attrs={\"class\": \"building-schedule-session-time\"}\r\n ).text.strip()\r\n\r\n def _parse_name(self, performance: BeautifulSoup) -> str:\r\n return performance.find(\r\n name=\"span\", attrs={\"class\": \"show-link-title\"}\r\n ).text.strip()\r\n\r\n def _parse_div_with_tickets(self, performance: BeautifulSoup) -> list:\r\n return performance.find_all(\r\n name=\"div\", attrs={\"class\": \"building-schedule-item-ticket-col\"}\r\n )\r\n\r\n def _parse_link_text(self, div: BeautifulSoup) -> Optional[str]:\r\n link_tag = div.find(name=\"a\", attrs={\"class\": \"item\"})\r\n if link_tag:\r\n link = link_tag.get(\"href\")\r\n return f\"https://www.bileter.ru{link}\"\r\n\r\n def _parse_price(self, div: BeautifulSoup) -> str:\r\n return div.find(name=\"div\", attrs={\"class\": \"price\"}).text.strip()\r\n\r\n def _parse_ticket_count(self, div: BeautifulSoup) -> str:\r\n ticket_count = (\r\n div.find(name=\"div\", attrs={\"class\": \"ticket-count\"})\r\n .find(name=\"span\")\r\n .text.strip()\r\n )\r\n return f\"В наличии {ticket_count} билетов\"\r\n\r\n def _update_result_columns(self, performance, div):\r\n link_text = self._parse_link_text(div=div)\r\n if link_text:\r\n date_time = self._parse_date_time(performance=performance)\r\n name = self._parse_name(performance=performance)\r\n date_string = self._parse_date(performance=performance)\r\n price = self._parse_price(div=div)\r\n ticket_count = self._parse_ticket_count(div=div)\r\n frame = tk.Frame()\r\n date_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.DATE_ENTRY_WIDTH\r\n )\r\n date_entry.insert(0, date_string)\r\n date_time_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.DATE_TIME_ENTRY_WIDTH\r\n )\r\n date_time_entry.insert(0, date_time)\r\n name_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.NAME_ENTRY_WIDTH\r\n )\r\n name_entry.insert(0, name)\r\n price_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.PRICE_ENTRY_WIDTH\r\n )\r\n price_entry.insert(0, price)\r\n ticket_count_text_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.TICKET_COUNT_TEXT_ENTRY_WIDTH\r\n )\r\n ticket_count_text_entry.insert(0, ticket_count)\r\n link_text_entry = tk.Entry(\r\n frame, fg=\"black\", bg=\"white\", width=self.LINK_TEXT_ENTRY_WIDTH\r\n )\r\n link_text_entry.insert(0, link_text)\r\n self.result_columns.append(\r\n [\r\n frame,\r\n date_entry,\r\n date_time_entry,\r\n name_entry,\r\n price_entry,\r\n ticket_count_text_entry,\r\n link_text_entry,\r\n ]\r\n )\r\n\r\n def _paint_results(self):\r\n for column in self.result_columns:\r\n for i, entry in enumerate(column):\r\n side = \"top\" if i == 0 else \"left\"\r\n entry.pack(side=side)\r\n\r\n self.results_time_update.config(state=tk.NORMAL)\r\n self.results_time_update.delete(0, len(self.results_time_update.get()))\r\n self.results_time_update.insert(0, datetime.now().isoformat())\r\n self.results_time_update.config(state=tk.DISABLED)\r\n\r\n def parse(self) -> None:\r\n performance_divs = self._get_performance_divs()\r\n self._clear_resul_columns()\r\n\r\n for performance in performance_divs:\r\n ticket_divs = self._parse_div_with_tickets(performance=performance)\r\n\r\n for div in ticket_divs:\r\n self._update_result_columns(performance=performance, div=div)\r\n\r\n self._paint_results()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = Parser()\r\n parser.start()\r\n","repo_name":"OkhotnikovFN/bileter_parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21720665972","text":"##\n## Programación con Pandas\n## ===========================================================================\n##\n## Construya una tabla que contenga _c0 y una lista\n## separada por ',' de los valores de la columna _c4\n## del archivo `tbl1.tsv`.\n##\n## Rta/\n## _c0 lista\n## 0 0 b,f,g\n## 1 1 a,c,f\n## ...\n## 38 38 d,e\n## 39 39 a,d,f\n## \n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport pandas as pd\ndata = pd.read_csv('tbl1.tsv' , sep='\\t')\nxn = sorted(pd.unique(data._c0))\nserie = pd.Series(xn , name = '_c0')\nlistas = []\nfor n in xn:\n temp = sorted(data[data['_c0'] == n]._c4)\n empty = \"\"\n for num, let in enumerate(temp):\n if num == len(temp)-1:\n empty = empty + str(let)\n else:\n empty = empty + str(let)+\",\" \n listas.append(empty)\nlista = pd.Series(listas, name = 'lista')\ntabla = pd.concat([serie , lista] , axis = 1)\nprint(tabla)\n\n","repo_name":"anagaonag/-evaluacion-del-curso-anagaonag","sub_path":"03-pandas/q09/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"20811523984","text":"#!/usr/bin/env python3\n\nimport requests # need this for Get/Post/Delete\nimport json\nimport operator\nimport sys\nfrom prettytable import PrettyTable\n\n# Initial set of variables to define, so we can get started.\nmy_token = \"\"\nORGID = \"\"\nSDDCID = \"\"\n\n\n# Here we have defined the function to retrieve our access token\ndef get_access_token(my_token):\n params = {'refresh_token': my_token}\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n response = requests.post('https://console.cloud.vmware.com/csp/gateway/am/api/auth/api-tokens/authorize',\n params=params, headers=headers)\n json_response = response.json()\n access_token = json_response['access_token']\n return access_token\n\n\n# Here we define the function to retrieve the NSX-T Reverse Proxy URL\ndef get_nsxt_proxy(ORGID, SDDCID, access_token):\n my_header = {'csp-auth-token': access_token}\n my_url = f\"https://vmc.vmware.com/vmc/api/orgs/{ORGID}/sddcs/{SDDCID}\"\n response = requests.get(my_url, headers=my_header)\n json_response = response.json()\n if response.status_code == 200:\n proxy_url = json_response['resource_config']['nsx_api_public_endpoint_url']\n return proxy_url\n else:\n print(\"There was an error. Check the syntax.\")\n print(f'API call failed with status code {response.status_code}. URL: {my_url}.')\n print(json_response['error_message'])\n\n\n# Here we are calling the function to retrieve the access token and NSX proxy URL and store them as variables.\naccess_token = get_access_token(my_token)\nnsx_proxy = get_nsxt_proxy(ORGID, SDDCID, access_token)\n\n\n# Retrieving information is just a GET request, so we'll send a GET request to the API for the NSX T0 route table\nmy_header = {'csp-auth-token': access_token}\nmy_url = f'{nsx_proxy}/policy/api/v1/infra/tier-0s/vmc/routing-table?enforcement_point_path=' \\\n f'/infra/sites/default/enforcement-points/vmc-enforcementpoint'\nresponse = requests.get(my_url, headers=my_header)\njson_response = response.json()\nif response.status_code == 200:\n t0_routes = json_response['results'][1]['route_entries']\n print(t0_routes)\n\n # pretty_data = json.dumps(json_response, indent=4)\n # print(pretty_data)\n #\n # route_table = PrettyTable(['Route Type', 'Network', 'Admin Distance', 'Next Hop'])\n # for routes in t0_routes:\n # route_table.add_row([routes['route_type'], routes['network'], routes['admin_distance'], routes['next_hop']])\n # print('T0 Routes')\n # print('Route Type Legend:')\n # print('t0c - Tier-0 Connected\\nt0s - Tier-0 Static\\nb - BGP\\nt0n - Tier-0 NAT\\nt1s - Tier-1 Static\\nt1c - '\n # 'Tier-1 Connected\\nisr: Inter-SR')\n # print(route_table.get_string(sort_key=operator.itemgetter(1, 0), sortby=\"Network\", reversesort=True))\n\nelse:\n print(\"There was an error. Check the syntax.\")\n print(f'API call failed with status code {response.status_code}. URL: {my_url}.')\n sys.exit(json_response['error_message'])\n","repo_name":"maulepilot117/vmwareexplore2022","sub_path":"5_show-routes.py","file_name":"5_show-routes.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"}