diff --git "a/3878.jsonl" "b/3878.jsonl" new file mode 100644--- /dev/null +++ "b/3878.jsonl" @@ -0,0 +1,675 @@ +{"seq_id":"243001408","text":"from flask import request\nfrom flask_restful import Resource\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nfrom database.mongodb import book_lists\nfrom models.booklist import BookListModel\nfrom models.book import BookModel\n\nclass BookList(Resource):\n @jwt_required\n def post(self):\n posted_data = request.get_json()\n posted_data[\"playlist_author\"] = get_jwt_identity()[\"username\"]\n\n if get_jwt_identity()[\"access level\"] <= 1:\n return {\"message\":\"only admins\", \"user\":get_jwt_identity() ,\"status code\": 401}\n\n if BookListModel.validate_list(**posted_data):\n return BookListModel.validate_list(**posted_data)\n\n booklist_instance = BookListModel(**posted_data)\n\n book_lists.insert(booklist_instance.to_dict())\n\n return {\"message\":\"book list added succesfuly\", \"user\":get_jwt_identity() ,\"status code\": 200}\n\n @jwt_required\n def get(self):\n posted_data = request.get_json()\n\n booklist = book_lists.find_one({'name':posted_data[\"name\"]}, {\"_id\":0} )\n\n if booklist == None:\n return {\"message\":\"book list not found\", \"status code\": 404}\n\n return {\"book list\":booklist, \"user\":get_jwt_identity(), \"message\":\"succesful\", \"status code\": 200}\n\nclass AddToList(Resource):\n @jwt_required\n def post(self):\n posted_data = request.get_json()\n\n if get_jwt_identity()[\"access level\"] <= 1:\n return {\"message\":\"only admins\", \"user\":get_jwt_identity() ,\"status code\": 401}\n\n if BookModel.validate_book(**posted_data[\"book\"]):\n return BookModel.validate_book(**posted_data[\"book\"])\n\n if book_lists.count_documents({'name':posted_data[\"booklist name\"]}, limit = 1) == 0 :\n return {\"message\":\"booklist doesn't exist\", \"status code\":404}\n\n book_lists.update({\"name\": posted_data[\"booklist name\"]},{\"$push\": {'booklist': posted_data[\"book\"]}})\n\n return {\"message\":\"book added succesfuly\", \"user\":get_jwt_identity(), \"status code\": 200}\n\nclass SearchList(Resource):\n @jwt_required\n def get(self):\n posted_data = request.get_json()\n\n return_data = list(book_lists.find({ \"$or\" : [ {'name':{\"$regex\":posted_data[\"search\"]}} , {'playlist_author':{\"$regex\":posted_data[\"search\"]}} ] }, {\"_id\":0} ))\n\n if len(return_data) == 0:\n return {\"message\":\"no booklists found\", \"status code\":404}\n\n return {\"book lists\": return_data, \"user\":get_jwt_identity(), \"message\":\"succesful\", \"status code\":200}\n","sub_path":"resources/booklist.py","file_name":"booklist.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105772324","text":"from setuptools import find_packages, setup\n\ninstall_requires = [\n \"numpy\",\n \"posix-ipc\",\n \"pyarrow\",\n \"docker\",\n \"six\", # because of docker\n \"spython\",\n \"pydantic[dotenv]\",\n \"click\",\n \"click_plugins\",\n \"coloredlogs\",\n \"msgpack\",\n]\n\nsetup(\n name=\"oremda\",\n use_scm_version=True,\n description=\"Open Reproducible Electron Microscopy Data Analysis\",\n long_description=\"Open Reproducible Electron Microscopy Data Analysis\",\n url=\"https://github.com/OpenChemistry/oremda\",\n author=\"Kitware Inc\",\n license=\"BSD 3-Clause\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3\",\n ],\n keywords=\"\",\n packages=find_packages(),\n install_requires=install_requires,\n entry_points={\n \"console_scripts\": [\n \"oremda = oremda.cli:main\",\n ],\n \"oremda.cli.plugins\": [\n \"run = oremda.cli.run:main\",\n \"engine = oremda.cli.engine:main\",\n \"pull = oremda.cli.pull:main\",\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83203522","text":"a = float(input(\"请输入三角形边1(正整数):\"))\nb = float(input(\"请输入三角形边2(正整数):\"))\nc = float(input(\"请输入三角形边3(正整数):\"))\nif a > 0 and b > 0 and c > 0:\n if a + b > c and a + c > b and b + c > a:\n if a == b and a == c and b == c:\n print(\"构成等边三角形\")\n elif a == b and b != c and a != c:\n print(\"构成等腰三角形\")\n elif a == c and a != b and b !=c:\n print(\"构成等腰三角形\")\n elif b == c and a != b and a !=c:\n print(\"构成等腰三角形\")\n else:\n print(\"构成普通三角形\")\n else:\n print(\"不能构成三��形\")\n\nelse:\n print(\"请输入正整数!\")","sub_path":"day02/demo8.py","file_name":"demo8.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414418422","text":"import re\nimport pandas as pd\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport pymysql\nfrom datetime import datetime, timedelta\nimport time\n\nnowtime = datetime.utcnow() + timedelta(hours=9)\n\n# MySQL Connection 연결\nconn = pymysql.connect(host='aa.co.kr', port=1, user='1', password='1!', db='python', charset='utf8')\n\ncurs = conn.cursor(pymysql.cursors.DictCursor)\n\n\n# url = 'http://land.naver.com/article/articleList.nhn?rletTypeCd=A01&tradeTypeCd=B1&hscpTypeCd=A01%3AA03%3AA04&cortarNo=1117012000' # 도원삼성레미안\n# url = 'http://land.naver.com/article/articleList.nhn?rletTypeCd=A01&tradeTypeCd=B1&hscpTypeCd=A01%3AA03%3AA04&cortarNo=1117012000' #\n# url = 'http://land.naver.com/article/articleList.nhn?rletTypeCd=A01&tradeTypeCd=B1&hscpTypeCd=A01%3AA03%3AA04&cortarNo=1117012000' #\n# url = 'http://land.naver.com/article/articleList.nhn?rletTypeCd=A01&tradeTypeCd=B1&hscpTypeCd=A01%3AA03%3AA04&cortarNo=1117012000' #\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, 'lxml')\n\ntable = soup.find('table')\ntrs = table.tbody.find_all('tr')\n\n# 거래, 종류, 확인일자, 매물명, 면적(㎡), 층, 매물가(만원), 연락처\nfor tr in trs[::2]:\n tds = tr.find_all('td')\n cols = [' '.join(td.text.strip().split()) for td in tds]\n\n if '_thumb_image' not in tds[3]['class']: # 현장확인 날짜와 이미지가 없는 행\n cols.insert(3, '')\n\n 거래 = cols[0]\n 종류 = cols[1]\n 확인일자 = datetime.strptime(cols[2], '%y.%m.%d.')\n# 확인일자 = 확인일자.replace(\"00:00:00\", \"\")\n 현장확인 = cols[3]\n 매물명 = cols[4]\n 매물명 = 매물명.replace(\"네이버부동산에서 보기\", \"\")\n 매물명 = 매물명.replace(\"집주인 \", \"\")\n 면적 = cols[5]\n 공급면적 = re.findall('공급면적(.*?)㎡', 면적)[0].replace(',', '')\n 전용면적 = re.findall('전용면적(.*?)㎡', 면적)[0].replace(',', '')\n 공급면적 = float(공급면적)\n 전용면적 = float(전용면적)\n 층 = cols[6]\n 매물가 = int(cols[7].replace(',', ''))\n 연락처 = cols[8]\n 입력날짜 = str(nowtime)\n\n if float(전용면적) < 50 or float(전용면적) > 80:\n continue\n\n print(거래, 종류, 확인일자, 현장확인, 매물명, 공급면적, 전용면적, 층, 매물가, str(nowtime))\n\n## # ==== insert example ====\n with conn.cursor() as curs:\n sql = \"\"\"insert into tb_naverland(구분, 종류, 확인일자, 현장확인, 아파트명, 공급면적, 전용면적, 층, 매물가, 비고, 입력날짜) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )\"\"\"\n curs.execute(sql, (거래, 종류, 확인일자, 현장확인, 매물명, 공급면적, 전용면적, 층, 매물가, '', 입력날짜))\n\n conn.commit()\n","sub_path":"APT_NAVER_verygood.py","file_name":"APT_NAVER_verygood.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191156620","text":"import ot\n\nthread = ot.Instance(2)\n\nthread.set_panid(0x1234)\nthread.set_channel(11)\n\ndef main():\n if thread.is_initialized():\n thread.cli_uart_init()\n thread.autostart()\n while True:\n thread.process()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/ping/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"557593937","text":"#! /usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import String\nimport time\n\npub = None\ncommand=0\nsub_side=None\n\ndef callback(msg):\n\tn = len(msg.ranges)\n\t#distribute total region (half a circle) into 3 parts\n\tfront = min(msg.ranges[n//3:2*n//3])\n\tgo_front(front)\n\t#rospy.loginfo(ranges)\n\ndef go_front(value):\n\tmsg = Twist()\n\tstate_description = \"pranav\" #for personal info\n\tif(value>1.5):\n\t\trospy.loginfo(state_description)\n\t\tmsg.linear.x = 50\n\t\tpub.publish(msg)\n\t\tprint(\"yo\")\n\telse:\n\t\tmsg.linear.x=0\n\t\tpub.publish(msg)\n\ndef clbk_laser(msg):\n\tglobal left\n\tn = len(msg.ranges)\n\t#distribute total region (half a circle) into 3 parts\n\tleft= min(msg.ranges[n//3:2*n//3])\n\ttake_action(left)\n\n\ndef take_action(value):\n\tmsg = Twist()\n\tstate_description = '' #for personal info\n\tif(value>1):\n\t\trospy.loginfo(state_description)\n\t\tmsg.linear.y = 8\n\t\tpub.publish(msg)\n\telse:\n\t\tmsg.linear.y=0\n\t\tpub.publish(msg)\n\t\tcommand=1\n\t\tsub_side.unregister()\n\t\t\n\n\n\ndef main():\n\tglobal pub\n\tglobal sub_side\n\tglobal command\n\trospy.init_node('all_together')\n\tpub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\tsub_side = rospy.Subscriber('/hexbot/laser_side/scan', LaserScan, clbk_laser)\n\tif command==1:\n\t\tsub_front = rospy.Subscriber('/hexbot/laser/scan', LaserScan, callback)\n\t\n\n\trospy.spin()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"harshil_tut/src/hexbot/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277234580","text":"\n\ndef linear_eq(a, b):\n \"\"\" linear equation \"\"\"\n xa, ya = map(float, a)\n xb, yb = map(float, b)\n m = (yb - ya) / ((xb / xa) or 1.0)\n return lambda x: (m*(x - xa)) + ya\n\n\ndef plot_factory(*points, **kwargs):\n \"\"\" takes a few linear equations then makes a vector\n and if the x is in the x range of the vector then\n it will be able tell if the y value is out of bounds \"\"\"\n\n below = kwargs.get('below', True)\n pairs = []\n\n for i in xrange(0, len(points)-1):\n a, b = i, i+1\n pair = (points[a][0], points[b][0], linear_eq(points[a], points[b]))\n pairs.append(pair)\n\n def _impl(x, y):\n if points[0][0] <= x <= points[-1][0]:\n for start, end, fn in pairs:\n if start <= x <= end:\n if below and y > fn(x):\n return False\n elif not below and y < fn(x):\n return False\n return True\n return _impl\n\n","sub_path":"python/g_process.py","file_name":"g_process.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424187550","text":"import socket\nimport threading\nfrom Setting import *\n\nclass Server:\n def __init__(self, parent=None):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.parent = parent\n self.data_tmp = []\n\n def connect(self, host, port):\n self.thread = threading.Thread(target=self.connect_client, args=(host, port, ))\n self.thread.start()\n\n def connect_client(self, host, port):\n self.socket.bind((host, port))\n self.socket.listen(2)\n self.conn = []\n while len(self.conn) < 2:\n conn, addr = self.socket.accept()\n self.conn.append(conn)\n print(\"Connected to \", addr)\n s0 = threading.Thread(target=self.recv_data0)\n s1 = threading.Thread(target=self.recv_data1)\n s0.start()\n s1.start()\n\n def send(self, data):\n for conn in self.conn:\n conn.sendall(data.encode())\n print(\"send: \", data)\n\n def recv_data0(self):\n while True:\n #thang 1\n try:\n data_recv = self.conn[0].recv(1024).decode()\n except:\n self.close()\n break\n if data_recv == \"QUIT\":\n print(\"Quit request accepted!\")\n self.close()\n break\n if not data_recv:\n self.close()\n print(\"Recv:\", data_recv)\n self.conn[1].sendall(data_recv.encode())\n def recv_data1(self):\n while True:\n #thang 2\n try:\n data_recv = self.conn[1].recv(1024).decode()\n except:\n self.close()\n break\n if data_recv == \"QUIT\":\n print(\"Quit request accepted!\")\n self.close()\n break\n if not data_recv:\n self.close()\n print(\"Recv:\", data_recv)\n self.conn[0].sendall(data_recv.encode())\n\n def close(self):\n self.conn[0].close()\n self.conn[1].close()\n self.socket.close()\n\n def disconnect(self):\n self.socket.close()\n\n# s = Server()\n# s.connect(HOST, PORT)\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445891063","text":"def main():\n import sys\n sys.setrecursionlimit(10**6)\n input = sys.stdin.readline\n H, W = map(int, input().split())\n a = [tuple(map(int, input().split())) for i in range(H)]\n g = [[0]*W for i in range(H)]\n mod = 10**9 + 7\n\n def dfs(i, j):\n if g[i][j] > 0:\n return(g[i][j])\n else:\n v = a[i][j]\n ret = 1\n if i > 0 and v < a[i-1][j]:\n ret += dfs(i-1, j)\n if i < H-1 and v < a[i+1][j]:\n ret += dfs(i+1, j)\n if j > 0 and v < a[i][j-1]:\n ret += dfs(i, j-1)\n if j < W-1 and v < a[i][j+1]:\n ret += dfs(i, j+1)\n ret %= mod\n g[i][j] = ret\n return(ret)\n\n ans = 0\n for i in range(H):\n for j in range(W):\n ans += dfs(i, j)\n # print(g)\n print(ans % mod)\n\n\nmain()\n","sub_path":"abc037/abc037_d/13690057.py","file_name":"13690057.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334575363","text":"#!/usr/bin/env python\n# -*-coding:utf-8 -*-\n\"\"\"\n* Create by shylock on 2016/1/4\n\"\"\"\n\nimport sqlite3\nfrom bottle import route, run, template, debug\n\n\n@route('/')\n@route('/todo')\ndef todo_list():\n conn = sqlite3.connect('todo.db')\n cursor = conn.cursor()\n cursor.execute(\"SELECT id, task FROM todo\")\n result = cursor.fetchall()\n cursor.close()\n conn.close()\n output = template(\"templates/make_table\", rows=result)\n return output\n\n\ndebug(True)\nrun(port=80, host=\"localhost\", reloader=True)\n","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"26554810","text":"\"\"\"\n @author: Matko Gabriel\n @email: ytgabi98@gmail.com\n @date: 12/19/2017 12:11\n\"\"\"\nfrom domain.entities.address import Address\nfrom domain.entities.driver import Driver\nfrom domain.repository.repo import Repository\nfrom ui.ui import UI\n\n\nclass Controller(object):\n\n def __init__(self):\n self.fileAddresses = \"addresses.txt\"\n self.fileDrivers = \"drivers.txt\"\n self.addresToCalculate = None\n\n def startApp(self):\n \"\"\"\n Main function for starting the app\n :return:\n \"\"\"\n #\n # with open(self.fileAddresses, \"w\") as f:\n # for i in range(10):\n # f.write(str(Address(i, \"Address\" + str(i), str(i*3), str(i*4))))\n # with open(self.fileDrivers, \"w\") as f:\n # for i in range(10):\n # f.write(str(Driver(\"Name\" + str(i), str(i*5), str(i*6))))\n repoAddresses = Repository(self.fileAddresses, Address)\n repoDrivers = Repository(self.fileDrivers, Driver)\n while True:\n UI.printMenu()\n try:\n cmd = UI.readCmd()\n if cmd == 1:\n UI.printAll(repoAddresses, repoDrivers)\n if cmd == 2:\n address = UI.readAddress()\n listOfDrivers = self.calculateDistance(address, repoDrivers)\n UI.printList(listOfDrivers)\n if cmd == 3:\n pairs = self.getPairOfDrivers(repoDrivers)\n UI.printPairs(pairs)\n except Exception as ex:\n print(ex)\n\n def calculateDistance(self, address, repoDrivers):\n \"\"\"\n Calculate the distance between an address and a driver\n :param address:\n :param repoDrivers:\n :return:\n \"\"\"\n drivers = repoDrivers.getAll()\n self.addresToCalculate = address\n for i in range(len(drivers)):\n for j in range(len(drivers)):\n if self.distanceFrom(drivers[i], drivers[j]):\n tmp = drivers[i]\n drivers[i] = drivers[j]\n drivers[j] = tmp\n return drivers\n #print(*drivers)\n\n def getPairOfDrivers(self, repoDrivers):\n \"\"\"\n Returns the pair of closest drivers\n :param repoDrivers:\n :return:\n \"\"\"\n drivers = repoDrivers.getAll()\n pairs = []\n poz = 0\n dist = self.manhattan(drivers[0], drivers[1])\n self.addresToCalculate = Address(1, \"abc\", 0, 0)\n for i in range(len(drivers)):\n for j in range(len(drivers)):\n if self.distanceFrom(drivers[i], drivers[j]):\n tmp = drivers[i]\n drivers[i] = drivers[j]\n drivers[j] = tmp\n for i in range(0, len(drivers), 2):\n pairs.append([drivers[i], drivers[i+1]])\n return pairs\n\n\n def distanceFrom(self, d1, d2):\n \"\"\"\n Return the boolean value resulting from comparation of two distances\n :param d1: driver1\n :param d2: driver2\n :return: True if driver1 is closer to the address\n \"\"\"\n return self.manhattanDistanceAddressDriver(self.addresToCalculate, d1) < self.manhattanDistanceAddressDriver(self.addresToCalculate, d2)\n\n def manhattanDistanceAddressDriver(self, address, driver):\n \"\"\"\n Distance between an address and a driver\n :param address:\n :param driver:\n :return:\n \"\"\"\n return abs(address.dx - driver.dx) + abs(address.dy - driver.dy)\n\n def manhattan(self, d1, d2):\n \"\"\"\n distance between two drivers\n :param d1:\n :param d2:\n :return:\n \"\"\"\n return abs(d1.dx - d2.dx) + abs(d2.dy - d2.dy)\n","sub_path":"partialTestW12/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139783337","text":"import sqlite3\n\nf=\"discobandit.db\"\n\ndb = sqlite3.connect(f)\nc = db.cursor()\n\ndef getName(x):\n student = \"SELECT name FROM peeps WHERE id=%d;\" % x\n name = c.execute (student)\n for record in name:\n return record[0]\n\ndef getAvg(x):\n grades = \"SELECT mark FROM courses, peeps WHERE peeps.id = %d AND courses.id = %d;\" % (x,x)\n c.execute(grades)\n data = c.fetchall()\n total = 0;\n count = 0;\n for row in data:\n total += row[0]\n count= count + 1\n if not count == 0:\n return total/count\n\ndef getResults():\n x = 1\n while(x < 11):\n print (\"name: %s id: %d average: %d\" % (getName(x), x, getAvg(x)))\n x = x+1\n\n#print getAvg(4)\ngetResults()\nc.close\ndb.close()\n","sub_path":"get_averages.py","file_name":"get_averages.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242317832","text":"from typing import ContextManager\nfrom django.shortcuts import redirect, render\n\nfrom . import models\n\n\ndef index(request):\n\n\n context = {\n \"all_dojo\" : models.Dojo.objects.all().values(),\n \"all_ninja\" : models.Ninja.objects.all(),\n\n \n }\n\n return render(request, \"index.html\" , context )\n\ndef index2(request):\n if request.POST[\"forms\"] == \"dojo\":\n models.result(request.POST)\n \n if request.POST[\"forms\"] == \"ninja\":\n models.result2(request.POST)\n\n return redirect(\"/\")","sub_path":"django/django_orm/ninja_dojo/app_one/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153308812","text":"import igraph as ig\nimport plotly.offline as py\nfrom plotly.graph_objs import *\nfrom colorhash import ColorHash\nfrom copra_lpa import *\nimport time\n\nvertex_index = {}\n\n\ndef convert_vertex(v):\n if type(v) is int:\n reverse_index = dict((v, k) for k, v in vertex_index.iteritems())\n return reverse_index[v]\n return vertex_index[v]\n\n\ndef process_file(filename):\n \"\"\"\n Process an input file for its vertices, edges, and weights.\n :param filename: Name of file to process.\n :return: List of vertices, edges, and weights parsed from the file.\n \"\"\"\n\n def parse_line_vertices(ln_list):\n \"\"\"\n Parse list of vertices from input file into a list.\n :param ln_list: List of lines in input file.\n :return: List of vertices in graph.\n \"\"\"\n vertex_map = {}\n edge_list = []\n val_count = 0\n for ln in ln_list:\n parts = ln.split()\n src = parts[0]\n dst = parts[1]\n if src not in vertex_map:\n vertex_map[src] = val_count\n val_count = val_count + 1\n if dst not in vertex_map:\n vertex_map[dst] = val_count\n val_count = val_count + 1\n edge_list.append((src, dst))\n global vertex_index\n vertex_index = vertex_map\n return vertex_map.keys()\n\n def parse_line_edges(ln_list):\n \"\"\"\n Parse list of edges from file into a list.\n :param ln_list: List of lines in input file.\n :return: List of edges in graph.\n \"\"\"\n edge_list = []\n for ln in ln_list:\n parts = ln.split()\n src = convert_vertex(parts[0])\n dst = convert_vertex(parts[1])\n edge_list.append((src, dst))\n return edge_list\n\n def parse_weights(ln_list):\n \"\"\"\n Parse weights of each line into a list.\n :param ln_list: Igraph graph object to make weighted.\n :return:\n \"\"\"\n weights = []\n for ln in ln_list:\n weights.append(ln.split()[2])\n return weights\n\n edge_file = open(filename)\n line_list = edge_file.readlines()\n edge_file.close()\n proc_vertices = parse_line_vertices(line_list)\n proc_edges = parse_line_edges(line_list)\n proc_weights = parse_weights(line_list)\n print(str(len(proc_vertices)) + ' vertices')\n print(str(len(proc_edges)) + ' edges')\n return proc_vertices, proc_edges, proc_weights\n\n\ndef plot_data(vertices, edges, weights, f_name, cscale=0):\n \"\"\"\n Plot network data in three-dimensional space.\n :param vertices: List of vertices in the network.\n :param edges: List of connections in the network.\n :param f_name: Name of HTML file to generate.\n :param cscale: Color scale for network, blue by default. [(0, Blue), (1, Green), (2, Orange)]\n \"\"\"\n graph = ig.Graph(edges, directed=False)\n graph.es[\"weight\"] = [weights[e] for e in range(len(graph.es))]\n\n # calculate_edge_betweenness(graph, weights)\n\n def build_colorscale(c, vert_list):\n \"\"\"\n Build the colorscale for vertices in the visualization.\n :param c: Parameter to decide colorscale.\n :param vert_list: List of vertices to color.\n :return: List of colors to associate with each vertex.\n \"\"\"\n clrs = []\n for v in vert_list:\n q = ColorHash(v)\n col_str = 'rgb('\n if c is 0:\n col_str += '%s,%s,255)' % (q.rgb[0], q.rgb[1])\n elif c is 1:\n col_str += '%s,255,%s)' % (q.rgb[0], q.rgb[2])\n elif c is 2:\n col_str += '255,%s,%s)' % (q.rgb[1], q.rgb[2])\n else:\n col_str += '%s,%s,%s)' % tuple(c.rgb)\n clrs.append(col_str)\n return clrs\n\n colors = build_colorscale(cscale, vertices)\n\n # Spatial layout of graph components\n spatial = graph.layout('kk', dim=3)\n\n def trace_edges(edge_list, layt):\n \"\"\"\n Build the plot.ly trace to map edges on three-dimensional plane.\n :param edge_list: List of edges to draw.\n :param layt: Three-dimensional layout for visualization.\n :return: Edge trace for plot.ly visualization.\n \"\"\"\n x_edge, y_edge, z_edge = ([] for i in range(3))\n for e in edge_list:\n x_edge += [layt[e[0]][0], layt[e[1]][0], None] # x-coordinates of edge ends\n y_edge += [layt[e[0]][1], layt[e[1]][1], None] # y-coordinates of edge ends\n z_edge += [layt[e[0]][2], layt[e[1]][2], None] # z-coordinates of edge ends\n\n edge_trace = Scatter3d(x=x_edge, y=y_edge, z=z_edge,\n mode='lines',\n line=Line(color='rgb(0,0,0)', width=1),\n hoverinfo='none')\n return edge_trace\n\n def trace_vertices(vertex_list, layt):\n \"\"\"\n Build the plot.ly trace to map vertices on three-dimensional plane.\n :param vertex_list: List of vertices to place.\n :param layt: Three-dimensional layout for visualization.\n :return: Vertex trace for plot.ly visualization.\n \"\"\"\n vertex_trace = Scatter3d(x=[layt[k][0] for k in range(len(vertex_list))], # x-coordinates of nodes\n y=[layt[k][1] for k in range(len(vertex_list))], # y-coordinates of nodes\n z=[layt[k][2] for k in range(len(vertex_list))], # z-coordinates of nodes\n mode='markers', name='cortical areas',\n marker=Marker(symbol='dot', size=9, color=colors,\n line=Line(color='rgb(50,50,50)', width=0.5)),\n text=vertex_list, hoverinfo='text')\n return vertex_trace\n\n def build_layout():\n \"\"\"\n Build the plot.ly layout object for the visualization.\n :return: Plot.ly layout object.\n \"\"\"\n axis = dict(showbackground=False, showline=False, zeroline=False,\n showgrid=False, showticklabels=False, title='')\n\n layout = Layout(title=\"3D visualization of a \" + f_name + \" cortical area network\",\n showlegend=False,\n scene=Scene(xaxis=XAxis(axis), yaxis=YAxis(axis), zaxis=ZAxis(axis), ),\n margin=Margin(t=100),\n hovermode='closest')\n return layout\n\n data = Data([trace_edges(edges, spatial),\n trace_vertices(vertices, spatial)])\n fig = Figure(data=data, layout=build_layout())\n py.plot(fig, filename='output/' + f_name + '.html')\n\n\ndef calculate_edge_betweenness(graph, weights):\n print('\\n+------------------+')\n print('| Edge Betweenness |')\n print('+------------------+')\n\n start = time.time()\n int_weights = [int(w) for w in weights]\n lpa = graph.community_edge_betweenness(weights=int_weights)\n clus = lpa.as_clustering()\n cluster_index = 0\n for c in clus:\n if cluster_index is 0:\n cluster_index += 1\n continue\n clist = []\n for rep in c:\n if rep is 0:\n continue\n clist.append(convert_vertex(rep))\n print(' Cluster # %d' % cluster_index)\n cluster_index += 1\n ones = [one.split(\"_\")[1] for one in clist if one.startswith('one_')]\n print('\\tSide 1:\\n\\t ' + str(ones))\n twos = [two.split(\"_\")[1] for two in clist if two.startswith('two_')]\n print('\\tSide 2:\\n\\t ' + str(twos) + '\\n')\n end = time.time()\n print('Modularity of clusters is: ' + str(clus.modularity))\n print('Edge betweenness took ' + str(end-start) + ' seconds.')\n\n\ndef print_header(filename):\n \"\"\"\n Formatted printing of file name in header of output.\n :param filename: Name of file to visualize.\n \"\"\"\n raw = filename.split('/')\n filler = ''\n for x in range(len(raw[1])):\n filler += '-'\n border = '+-----------------------------%s-+' % filler\n text = '| Processing data for file -> %s |' % raw[1]\n print(border + '\\n' + text + '\\n' + border)\n\n\nif __name__ == '__main__':\n mouse = 'data/final_mouse_weighted.txt'\n print_header(mouse)\n vertices, edges, weights = process_file(mouse)\n plot_data(vertices, edges, weights, 'Mouse', 2)\n","sub_path":"visualize_network.py","file_name":"visualize_network.py","file_ext":"py","file_size_in_byte":8352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465887537","text":"\"\"\"\nMichael S. Emanuel\nSat Oct 22 10:59:57 2016\n\nAmicable numbers\nProblem 21\nLet d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10000.\n\"\"\"\n\nfrom Euler.Primes import PrimeTable\nfrom typing import List, Dict\n\n\ndef isAmicablePair(a: int, b: int, pdsA: int, pdsB: int) -> bool:\n \"\"\"Indicate whether (a,b) is a pair of distinct amicable numbers.\"\"\"\n # If the proper divisor sum of b equals a, then (a, b) is an amicable pair\n # However, only distinct pairs a != b\n # don't take perfect numbers where a = b (i.e. pds(a) = a)\n return (pdsA == b and pdsB == a and b != a)\n\n\ndef main() -> int:\n # Desired cap on amicable numbers\n N: int = 10000\n # Instantiate prime table\n pt: PrimeTable = PrimeTable()\n # Build primes up to n\n pt.buildPrimesUpTo(N)\n \n pds: Dict[int, int] = {0:0}\n # For all ints up to n, get their proper divisor in a list\n n: int\n for n in range(1, N+1):\n # Proper divisor sum of n\n pds[n] = pt.properDivisorSum(n)\n # Now ProperDivisorSum[n] = pds[n] for all n up to N\n \n # Also need the proper divisor sum for every proper divisor sum from\n # last step that isn't already in the table\n pds2: Dict[int, int] = {}\n for n in pds.values():\n if n > N:\n # Proper divisor sum of n\n pds2[n] = pt.properDivisorSum(n)\n \n # Merge the additional entries into original dict\n pds.update(pds2)\n \n # Search for amicable number pairs\n amicable: List[int] = []\n amicableSum: int = 0\n for a in range(2, N+1):\n # Get the proper divisor sum of a\n pdsA: int = pds[a]\n # Now check the proper divisor sum for b = pdsA\n b: int = pdsA\n pdsB: int = pds[b]\n # Tabulate amicable pairs\n if isAmicablePair(a, b, pdsA, pdsB):\n amicable.append(a)\n amicableSum += a\n print('Amicable numbers up to %d.' % N)\n print(amicable)\n print('Sum of amicable numbers:')\n print(amicableSum)\n return amicableSum\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"Prob021_AmicableNumbers.py","file_name":"Prob021_AmicableNumbers.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453517939","text":"def isdifferent(str):\n list1=list(str)\n for i in list1: \n n=list1.count(i)\n if n >=2:\n return True\n if n==1:\n return False\n\ns=str(input(\"请输入一组整数:\"))\nprint(isdifferent(s))\n\n \n","sub_path":"30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562468514","text":"import os\nfilePath =r\"./\"\n\n\n\ndef native_cp(str_log_open,str_new):\n import os\n import shutil\n str_log_open = str_log_open ###文件名写死!!!防止出错\n shutil.copyfile(str_log_open, str_new)\n\n\ndef jsknb():\n import os\n try:\n os.makedirs('./gophase')\n except:\n print(\"alreay done\")\n\n\n t=[]\n v=[]\n filename1=os.listdir(filePath)\n filename=[]\n #print(filename)\n #m=0\n for i in filename1:\n #print(i)\n if i[0:2]==\"ve\":\n filename.append(i)\n #print(filename)\n filesunxu=[filename[0]]\n for i in filename:\n k=0\n for j in filesunxu:\n if float(i.split('-')[2])>float(j.split('-')[2]) or (float(i.split('-')[2])==float(j.split('-')[2]) and float(i.split('-')[1])!=float(j.split('-')[1])) :\n filesunxu.insert(k,i)\n break\n else:\n if float(i.split('-')[2])==float(j.split('-')[2]) and float(i.split('-')[1])==float(j.split('-')[1]) :\n break\n else:\n if k>=len(filesunxu)-1:\n filesunxu.append(i)\n k=k+1\n #print(filesunxu)\n flag=1\n while flag==1:\n m = 0\n flag=0\n for i in filesunxu:\n if m > 0:\n if filesunxu[m].split('-')[2] == filesunxu[m - 1].split('-')[2]:\n if float(filesunxu[m].split('-')[1]) > float(filesunxu[m - 1].split('-')[1]):\n flag=1\n filesunxu[m], filesunxu[m - 1] = filesunxu[m - 1], filesunxu[m]\n break\n m = m + 1\n\n #print(filesunxu)\n record=[]\n #print(\"shunxu\",filesunxu)\n for i in filesunxu:\n path=filePath+i\n #print(path)\n # print(path)\n f=open(path,'r+')\n line=f.readlines()\n record.append(line[-1])\n f.close()\n #print(\"record\",record)\n print(\"shunxu\",filesunxu)\n m=0\n namelist=[filesunxu[0].split('-')[2]]\n file=filesunxu[0].split('-')[2]\n filenew = open(file, \"w\")\n \n import os\n for i in filesunxu:\n if file != i.split('-')[2]:\n filenew.close()\n file=i.split('-')[2]\n filenew=open(file,\"w\")\n namelist.append(i.split('-')[2])\n filenew.write(record[m])\n \n m=m+1\n \n filenew.close()\n \n return namelist\ndef cpdos(Vlist,Tlist = [100,500,750,1000,1500,2000]):\n Vlist = Vlist\n Tlist = Tlist\n \n for v in Vlist:\n for t in Tlist:\n native_cp(\"./ph.dos-\"+str(v)+\"-\"+str(t),\"./gophase/\"+\"ph.dos-\"+str(v)+\"-\"+str(t))\n return print(\"done\")\n \n\n\ndef trans_pdos(alist,vlist,Tlist=[100,500,750,1000,1500,2000]):\n import os\n os.chdir(\"./gophase\")\n alist = alist\n vlist = vlist\n Tlist=Tlist\n import pandas as pd\n df = pd.DataFrame()\n df[\"v1\"] = alist\n df[\"v2\"] = vlist\n dict1 = dict(zip(df['v1'],df['v2']))\n dict1\n import os\n for a in alist:\n for t in Tlist:\n os.rename(\"ph.dos-\"+str(a)+\"-\"+str(t),\"ph.dos-\"+str(dict1[a])+\"000-\"+str(t))\n\n\n\n\n\n\n\n\n\n\n\n\n#namelist = jsknb()\n#for i in namelist:\nnmlist = []\nfor i in jsknb(): native_cp(\"./\"+str(i),\"./gophase/ve-\"+str(i))\n\nfor i in jsknb(): nmlist.append(str(i))\n #print(i,\"done\")\n#print(nmlist)\nimport pandas as pd\ndf = pd.read_csv(\"./\"+nmlist[-1],header = None,delim_whitespace = True)\nvlist = []\nfor i in df.iloc[:,0]: vlist.append(i)\n\nprint(vlist)\n\ncpdos(Vlist=[2.6,2.7,2.8]) ###这里传入alist,也就是w1和w2的vlist,最开始的参数搜索\n\ntrans_pdos(alist =[2.6,2.7,2.8],\n vlist = vlist )","sub_path":"new_2021.8.3_wf/gotophase-alpha.py","file_name":"gotophase-alpha.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471933019","text":"import sys\nfrom pathlib import Path\nhome = str(Path.home())\nsys.path.append(home + '/Cytnx_lib')\nfrom cytnx import *\n\nA = Storage(15)\n\nB = A\nC = A.clone()\n\nprint(B is A)\nprint(C is A)\n\n","sub_path":"example/Storage/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398892500","text":"# Задача - 1\n# Запросите у пользователя имя, фамилию, email. Теперь необходимо совершить проверки, имя и фамилия должны иметь заглавные первые буквы.\n# email - не должен иметь заглавных букв и должен быть в формате: текст в нижнем регистре, допускается нижнее подчеркивание и цифры, потом @, потом текст, допускаются цифры, точка, ru или org или com.\n# Например:\n# Пупкин василий - неверно указано имя, te$T@test.net - неверно указан email (спецсимвол, заглавная буква, .net), te_4_st@test.com - верно указан.\n\nimport re\npatternnamesurmane = '[A-Z][a-z]+'\npatternmail = '[a-z_0-9]+@[a-z0-9]+\\.(ru|com|org)'\nname = str(input('Введите ваше имя: '))\nname1 = name\nre.search(patternnamesurmane, name1)\nif re.search(patternnamesurmane, name1) is None:\n print('Неверно введено имя!!')\nsurname = str(input('Введите вашу фамилию: '))\nsurname1 = surname\nre.search(patternnamesurmane, surname1)\nif re.search(patternnamesurmane, surname1) is None:\n print('Неправильно введена фамилия!!')\nmail = input('Введите вашу электронную почту: ')\nmail1 = mail\nre.search(patternmail, mail1)\nif re.search(patternmail, mail1) is None:\n print('Некорректно введен e-mail!!')\n","sub_path":"homework_lesson04_normal_only1exercise/homework_lesson04_normal1.py","file_name":"homework_lesson04_normal1.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232028719","text":"from tkinter import *\n# from tkinter.ttk import *\nfrom PIL import Image, ImageTk\nfrom login_dash import LoginDash\n\nclass SigninDash:\n def __init__(self, window):\n self.window = window\n self.window.geometry(\"1366x720+70+50\")\n self.main_black_color = '#0f0f0f'\n self.main_white_color = '#f8f8f8'\n self.window['bg'] = self.main_black_color\n self.window.title(\"Signin Dashboard\")\n # self.window.iconbitmap(\"\")\n self.window.resizable(False, False)\n\n # Login Dashboard Text \n login_dash_text = Label(window, text='Signin Dashboard',\n font=(\"Roboto Regular\", 36),\n fg=self.main_white_color,bg=self.main_black_color)\n login_dash_text.place(x=0,y=0)\n\n # ADMIN LOGIN BUTTON\n self.admin_image_open = Image.open('images/adminbtn.png')\n self.admin_image_open = self.admin_image_open.resize((380, 100), Image.ANTIALIAS)\n self.admin_login_img = ImageTk.PhotoImage(self.admin_image_open)\n\n self.admin_login_btn = Button(window, image=self.admin_login_img,cursor='hand2', \n borderwidth=0,border=0,bg=self.main_black_color,\n command=self.open_login_window)\n self.admin_login_btn.image = self.admin_login_img\n self.admin_login_btn.place(x=490, y=180)\n\n # USER LOGIN BUTTON\n self.user_image_open = Image.open('images/userloginbtn.png')\n self.user_image_open = self.user_image_open.resize((380, 100), Image.ANTIALIAS)\n self.user_login_img = ImageTk.PhotoImage(self.user_image_open)\n\n self.user_login_btn = Button(window, image=self.user_login_img,cursor='hand2',\n borderwidth=0,border=0,bg=self.main_black_color,\n command=self.open_login_window)\n self.user_login_btn.image = self.user_login_img\n self.user_login_btn.place(x=490, y=340)\n\n def open_login_window(self):\n self.newWindow = Toplevel(self.window)\n self.app = LoginDash(self.newWindow)\n\ndef run_func():\n window = Tk()\n SigninDash(window)\n window.mainloop()\n \nif __name__ == '__main__':\n run_func()","sub_path":"sqlite_tkinter/signin_dash.py","file_name":"signin_dash.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273946358","text":"import datetime\nimport csv\nimport nmap\nimport time\nimport os\n\ndef isNotEmpty(path):\n return os.path.isfile(path) and os.path.getsize(path) > 0\n\nprint(\"Date: \",datetime.datetime.now())\n\nprint(time.time())\n\nnm = nmap.PortScanner()\n\nwith open ('conexiones.csv','wt') as csvfile:\n \n filewriter = csv.writer(csvfile,delimiter=',')\n\n if(not isNotEmpty('conexiones.csv')):\n filewriter.writerow(['Date','MAC Address'])\n \n\n while True:\n nm.scan('192.168.0.0/24', arguments='-sP')\n for h in nm.all_hosts():\n if 'mac' in nm[h]['addresses']:\n print(datetime.datetime.now(),nm[h]['addresses']['mac'])\n filewriter.writerow([datetime.datetime.now(),nm[h]['addresses']['mac']])\n time.sleep(10*60)\n \n\n","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529035910","text":"import time\nfrom datetime import datetime\n\nfrom config import DURATIONS\nfrom const import TRIAL_PHASES\nfrom gui import gui\nfrom utils import prepare_out\n\n\ndef do_trial(trial, send):\n now = datetime.now()\n\n print(trial, now)\n\n gui.show_text('+')\n time.sleep(DURATIONS['DISPLAY_FIXATION_CROSS'])\n gui.remove_text()\n\n gui.show_image(trial[\"stim\"])\n\n # this \"callback\" is used to notify the server to start recording as image has loaded and displayed\n if (trial['phase'] == TRIAL_PHASES['SPEAKER']):\n send(\n prepare_out(\n \"prime and image shown\"\n )\n )\n\n time.sleep(DURATIONS['DISPLAY_IMAGE_TIME'])\n gui.remove_image()\n","sub_path":"client/trial/do_trial.py","file_name":"do_trial.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532280837","text":"from django.urls import path, re_path\nfrom apps.account import views\n\napp_name = 'account'\n\nurlpatterns = [\n path('login/', views.user_login, name='login'),\n path('logout/', views.user_logout, name='logout'),\n path('dashboard/', views.dashboard, name='dashboard'),\n # re_path('dashboard/', views.dashboard, name='dashboard'),\n path('edit/', views.edit_profile, name=\"edit\"),\n # users\n path('users/', views.user_list, name='user_list'),\n # user follow\n path('users/follow', views.user_follow, name='user_follow'),\n # re_path(r'^users/(?P[-\\w]+)/$', views.user_detail, name='user_detail'),\n path('users//', views.user_detail, name='user_detail'),\n]\n","sub_path":"apps/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329746862","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom typing import List\n\nimport pandas as pd\n\nfrom zvt.api.rules import iterate_timestamps, is_open_time, is_in_finished_timestamps, is_close_time\nfrom zvt.charts.business import draw_account_details, draw_order_signals\nfrom zvt.domain import SecurityType, TradingLevel, Provider\nfrom zvt.selectors.selector import TargetSelector\nfrom zvt.trader import TradingSignal, TradingSignalType\nfrom zvt.trader.account import SimAccountService\nfrom zvt.utils.time_utils import to_pd_timestamp\n\nlogger = logging.getLogger(__name__)\n\n\n# overwrite it to custom your selector comparator\nclass SelectorsComparator(object):\n\n def __init__(self, selectors: List[TargetSelector]) -> None:\n self.selectors: List[TargetSelector] = selectors\n\n def make_decision(self, timestamp, trading_level: TradingLevel):\n raise NotImplementedError\n\n\n# a selector comparator select the targets ordered by score and limit the targets number\nclass LimitSelectorsComparator(SelectorsComparator):\n\n def __init__(self, selectors: List[TargetSelector], limit=10) -> None:\n super().__init__(selectors)\n self.limit = limit\n\n def make_decision(self, timestamp, trading_level: TradingLevel):\n df_result = pd.DataFrame()\n for selector in self.selectors:\n if selector.level == trading_level:\n df = selector.get_targets(timestamp)\n if not df.empty:\n df = df.sort_values(by=['score', 'security_id'])\n if len(df.index) > self.limit:\n df = df.iloc[list(range(self.limit)), :]\n df_result = df_result.append(df)\n return df_result\n\n\nclass TargetsSlot(object):\n\n def __init__(self) -> None:\n self.level_map_targets = {}\n\n def input_targets(self, level: TradingLevel, targets: List[str]):\n logger.info('level:{},old targets:{},new targets:{}'.format(level,\n self.get_targets(level), targets))\n self.level_map_targets[level.value] = targets\n\n def get_targets(self, level: TradingLevel):\n return self.level_map_targets.get(level.value)\n\n\nclass Trader(object):\n logger = logging.getLogger(__name__)\n\n def __init__(self, security_type=SecurityType.stock, exchanges=['sh', 'sz'], codes=None,\n start_timestamp=None,\n end_timestamp=None,\n provider=Provider.JOINQUANT,\n trading_level=TradingLevel.LEVEL_1DAY,\n trader_name=None) -> None:\n if trader_name:\n self.trader_name = trader_name\n else:\n self.trader_name = type(self).__name__.lower()\n self.trading_signal_listeners = []\n self.state_listeners = []\n\n self.selectors: List[TargetSelector] = None\n\n self.security_type = security_type\n self.exchanges = exchanges\n self.codes = codes\n # make sure the min level selector correspond to the provider and level\n self.provider = provider\n self.trading_level = trading_level\n\n if start_timestamp and end_timestamp:\n self.start_timestamp = to_pd_timestamp(start_timestamp)\n self.end_timestamp = to_pd_timestamp(end_timestamp)\n else:\n assert False\n\n self.account_service = SimAccountService(trader_name=self.trader_name,\n timestamp=self.start_timestamp,\n provider=self.provider,\n level=self.trading_level)\n\n self.add_trading_signal_listener(self.account_service)\n\n self.init_selectors(security_type=self.security_type, exchanges=self.exchanges, codes=self.codes,\n start_timestamp=self.start_timestamp, end_timestamp=self.end_timestamp)\n\n self.selectors_comparator = LimitSelectorsComparator(self.selectors)\n\n self.trading_level_asc = list(set([TradingLevel(selector.level) for selector in self.selectors]))\n self.trading_level_asc.sort()\n\n self.trading_level_desc = list(self.trading_level_asc)\n self.trading_level_desc.reverse()\n\n self.targets_slot: TargetsSlot = TargetsSlot()\n\n def init_selectors(self, security_type, exchanges, codes, start_timestamp, end_timestamp):\n \"\"\"\n implement this to init selectors\n\n :param security_type:\n :type security_type:\n :param exchanges:\n :type exchanges:\n :param codes:\n :type codes:\n :param start_timestamp:\n :type start_timestamp:\n :param end_timestamp:\n :type end_timestamp:\n \"\"\"\n raise NotImplementedError\n\n def add_trading_signal_listener(self, listener):\n if listener not in self.trading_signal_listeners:\n self.trading_signal_listeners.append(listener)\n\n def remove_trading_signal_listener(self, listener):\n if listener in self.trading_signal_listeners:\n self.trading_signal_listeners.remove(listener)\n\n def handle_targets_slot(self, timestamp):\n # the default behavior is selecting the targets in all levels\n selected = None\n for level in self.trading_level_desc:\n targets = self.targets_slot.get_targets(level=level)\n if not targets:\n targets = {}\n\n if not selected:\n selected = targets\n else:\n selected = selected & targets\n\n if selected:\n self.logger.info('timestamp:{},selected:{}'.format(timestamp, selected))\n\n self.send_trading_signals(timestamp=timestamp, selected=selected)\n\n def send_trading_signals(self, timestamp, selected):\n # current position\n account = self.account_service.latest_account\n current_holdings = [position['security_id'] for position in account['positions']]\n\n if selected:\n # just long the security not in the positions\n longed = selected - set(current_holdings)\n if longed:\n position_pct = 1.0 / len(longed)\n order_money = account['cash'] * position_pct\n\n for security_id in longed:\n trading_signal = TradingSignal(security_id=security_id,\n the_timestamp=timestamp,\n trading_signal_type=TradingSignalType.trading_signal_open_long,\n trading_level=self.trading_level,\n order_money=order_money)\n for listener in self.trading_signal_listeners:\n listener.on_trading_signal(trading_signal)\n\n # just short the security not in the selected but in current_holdings\n if selected:\n shorted = set(current_holdings) - selected\n else:\n shorted = set(current_holdings)\n\n for security_id in shorted:\n trading_signal = TradingSignal(security_id=security_id,\n the_timestamp=timestamp,\n trading_signal_type=TradingSignalType.trading_signal_close_long,\n position_pct=1.0,\n trading_level=self.trading_level)\n for listener in self.trading_signal_listeners:\n listener.on_trading_signal(trading_signal)\n\n def on_finish(self):\n draw_account_details(trader_name=self.trader_name)\n draw_order_signals(trader_name=self.trader_name)\n\n def run(self):\n # iterate timestamp of the min level\n for timestamp in iterate_timestamps(security_type=self.security_type, exchange=self.exchanges[0],\n start_timestamp=self.start_timestamp, end_timestamp=self.end_timestamp,\n level=self.trading_level):\n # on_trading_open to setup the account\n if self.trading_level == TradingLevel.LEVEL_1DAY or (\n self.trading_level != TradingLevel.LEVEL_1DAY and is_open_time(security_type=self.security_type,\n exchange=self.exchanges[0],\n timestamp=timestamp)):\n self.account_service.on_trading_open(timestamp)\n\n # the time always move on by min level step and we could check all level targets in the slot\n self.handle_targets_slot(timestamp=timestamp)\n\n for level in self.trading_level_asc:\n # in every cycle, all level selector do its job in its time\n if (is_in_finished_timestamps(security_type=self.security_type, exchange=self.exchanges[0],\n timestamp=timestamp, level=level)):\n df = self.selectors_comparator.make_decision(timestamp=timestamp,\n trading_level=level)\n if not df.empty:\n selected = set(df['security_id'].to_list())\n else:\n selected = {}\n\n self.targets_slot.input_targets(level, selected)\n\n # on_trading_close to calculate date account\n if self.trading_level == TradingLevel.LEVEL_1DAY or (\n self.trading_level != TradingLevel.LEVEL_1DAY and is_close_time(security_type=self.security_type,\n exchange=self.exchanges[0],\n timestamp=timestamp)):\n self.account_service.on_trading_close(timestamp)\n\n self.on_finish()\n","sub_path":"zvt/trader/trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":10039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496007967","text":"import numpy as np \nimport pandas as pd \nimport os\nimport re\n\n\n'''\n上海租房租金预测第一部分:数据初步清洗\n\n通过探查数据发现数据问题:\n 面积和房租字段含有字符,为字符型;\n 租赁方式字段相同类别部分含有字符;\n 租赁方式中分整租和合租,合租所对应的房屋面积部分为真实合租面积,部分为整屋面积\n 公交站字段含有多种信息,可以提取单独成列\n 户型可以分割单独成列\n 地址中含有非上海房源信息,数据量非常小\n 朝向中大部分数据朝南\n 面积、朝向等含有缺失值\n\n初步清洗中清洗字段:\n 面积、房租字段提取数字,转为浮点型\n 统一租赁方式字段中的类别\n 提取公交车站中的地铁线、地铁站名和距离信息\n 分割户型字段\n 填充面积缺失值\n 将朝向字段制作为哑变量\n 为了方便获取地理编码,对地址做一下处理\n'''\n\ndef area_rent(s):\n '''\n 清洗面积和房租字段,提取数字,转为浮点型\n '''\n if pd.isnull(s):\n return s\n else:\n return float(re.match(r'(\\d*)', s).group(1))\n\n\ndef stations(s):\n '''\n 清洗公交站字段,提取地铁线路/地铁站/距离\n '''\n if pd.notnull(s):\n station_match = re.match(r'距(\\d+)号线(\\D+)站约(\\d+)米', s)\n if station_match:\n return station_match.groups()\n else:\n return np.nan\n else:\n return s\n\n\ndef house_type(s):\n '''\n 清洗户型字段,提取数字\n '''\n ht_match = re.match(r'(\\d+)室(\\d+)厅', s)\n if ht_match:\n return ht_match.groups()\n\n\n\nif __name__ == '__main__':\n\n os.chdir(r'D:\\工作\\数据分析\\微专业\\项目数据\\练习10_住宅租金预测')\n\n # 建立空dataframe\n df_all = pd.DataFrame()\n\n # pd.set_option('display.max_columns', None)\n\n # 读取文件\n for f in os.walk(r'data'):\n flis = f[2]\n \n # 循环读取单个csv文件,清洗数据,合并\n print('cleaning...')\n for f in flis:\n df = pd.read_csv('data/' + f)\n\n # 清洗面积字段\n df['area'] = df['面积'].map(area_rent)\n \n # 清洗房租字段\n df['rent_month'] = df['房租'].map(area_rent)\n\n # 清洗户型字段\n df['rooms'] = df['户型'].map(house_type).str[0]\n df['halls'] = df['户型'].map(house_type).str[1]\n\n # 清洗朝向字段\n df['sorth_ori'] = df['朝向'].map(lambda x: 1 if x == '朝南' else 0)\n\n # 清洗公交站字段\n df['sub_line'] = df['公交站'].map(stations).str[0]\n df['sub_station'] = df['公交站'].map(stations).str[1]\n df['sub_distance'] = df['公交站'].map(stations).str[2].astype('float')\n\n # 清洗租赁方式字段\n df['lease_mode'] = df['租赁方式'].str.rstrip('㎡')\n\n # 地址\n df['address'] = df['城市'] + '市' + df['区域'] + '区' + df['地段']\n\n # 填充面积缺失值\n df['area'].fillna(df['area'].median(), inplace=True)\n\n\n # 提取需要的字段\n df_sel = df.copy()[['area', 'rent_month', 'rooms', 'halls', 'lease_mode', 'sorth_ori', 'address', '区域', 'sub_line', 'sub_station', 'sub_distance']]\n\n # 合并dataframe\n df_all = pd.concat([df_sel, df_all])\n\n # 保存到本地\n df_all.to_csv('data_cleaned.csv', index=False)\n print('clean done!')\n","sub_path":"pra_houserentpre_datapro.py","file_name":"pra_houserentpre_datapro.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626979779","text":"# Day 1 HDF File set on arc:\nfrom db import dbfunctions as dbfn\nfiles = dict()\n\nfiles['0'] = dict(\npath = '/home/lab/preeya/fa_analysis/grom_data/ratbmi_data/',\nfiles = ['grom060616_1627.hdf', 'grom060616_1642.hdf', 'grom060616_1708.hdf', \n\t'grom060616_1719.hdf', 'grom060616_1730.hdf', 'grom060616_1741.hdf', 'grom060616_1752.hdf'],\nbaseline = 5402,\ndecoder = 'grom20160606_06_grom20160606_07_te5402_rat_bmi_decoder_0.pkl',\n)\n\nte_list = [5417, 5419, 5421, 5422, 5423, 5424]\nfiles['1'] = dict(\n#Day 2 TE_list: \nte_list = te_list,\npath = '',\nfiles = [dbfn.TaskEntry(te).hdf_filename for te in te_list],\nbasline = 5416,\n)\n\nte_list = [5426, 5427, 5428]\nfiles['2'] = dict(\n#Day 3 TE_list: \nte_list = te_list,\npath = '',\nfiles = [dbfn.TaskEntry(te).hdf_filename for te in te_list],\nbaseline = 5425,\n)\n\nte_list = [5434, 5435]\nfiles['3'] = dict(\n#Day 4 TE_List:\nte_list = te_list,\npath = '',\nfiles = [dbfn.TaskEntry(te).hdf_filename for te in te_list],\nbaseline = 5431,\n)\n\nte_list = [5436, 5438, 5439, 5440, 5441]\nfiles['4'] = dict(\n#Day 4 TE_LIST reward t2: \nte_list = te_list,\npath = '',\nfiles = [dbfn.TaskEntry(te).hdf_filename for te in te_list],\nbaseline = 5431,\n)","sub_path":"rat_bmi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647100151","text":"from django.shortcuts import render, HttpResponse, redirect, render\nfrom blogApp.models import Post, blogComment\nfrom django.contrib import messages\nfrom blogApp.templatetags import extras\n\n# Create your views here.\ndef blogHome(request):\n # To pull all the objects\n allPosts = Post.objects.all()\n context = {'allPosts' : allPosts}\n return render(request, 'blogApp/blogHome.html', context)\n\ndef blogPost(request, slug):\n # To get the object drom the query set\n post = Post.objects.filter(slug = slug).first()\n # Get the comments for the corrosponding posts\n comments = blogComment.objects.filter(post = post, parent = None)\n replies = blogComment.objects.filter(post = post).exclude(parent = None)\n # counting the views\n post.views = post.views + 1\n post.save()\n\n replyDict = {}\n for reply in replies:\n if reply.parent.sno not in replyDict.keys():\n replyDict[reply.parent.sno] = [reply]\n else:\n replyDict[reply.parent.sno].append(reply)\n\n\n context = {'post' : post, 'comments': comments, 'user': request.user, 'replyDict': replyDict}\n return render(request, 'blogApp/blogPost.html', context)\n\ndef postComment(request):\n if request.method == 'POST':\n comment = request.POST.get(\"comment\")\n user = request.user\n postSno = request.POST.get(\"postSno\")\n post = Post.objects.get(sno = postSno)\n parentSno = request.POST.get(\"parentSno\")\n\n if parentSno == \"\" :\n comment = blogComment(comment = comment, user = user, post = post)\n comment.save()\n messages.success(request, \"Your comment has been posted\")\n else:\n parent = blogComment.objects.get(sno = parentSno)\n comment = blogComment(comment = comment, user = user, post = post, parent = parent)\n comment.save()\n messages.success(request, \"Your reply has been posted\")\n\n \n # To get the object drom the query set\n return redirect(f'/blog/{post.slug}')\n \n\n","sub_path":"blogApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283581657","text":"import plotly.graph_objects as go\r\nimport pandas, module\r\n\r\ndata = pandas.read_csv(\"C:/Users/Usuario/Documents/Visual Studio Code/dashboard/twitch_data/top_games_twitch_stats.csv\")[:8]\r\ngames = list(data[\"Nome\"]) #lista com os nomes\r\ngames.reverse() #o reverse se faz necessário para que o plotly disponha os primeiros colocados na parte de cima\r\naverage_views = list(data[\"Visualização Média\"])\r\naverage_views.reverse()\r\naverage_views = list(map(lambda view : \"Visualização Média: \" + str(view), average_views))\r\npercentages = list(map(lambda percentage : float(percentage[:-1]), data[\"Frequência no Top 3\"])) #fazer lista com as porcentagens,\r\npercentages.reverse() #eliminando o \"%\" e convertendo em float\r\n\r\n\r\nfig = go.Figure()\r\n\r\nfig.add_trace(go.Bar(\r\n y= games, #os nomes comporão a ordenada e as porcentagens, a abscissa\r\n x=percentages,\r\n text = average_views,\r\n orientation='h', #vertical ou horizontal\r\n marker=dict(\r\n color='#9147ff', #cor da barra\r\n line=dict(color='#9147ff', width=3) #cor da borda da barra\r\n )\r\n))\r\n\r\nfig.update_layout(title = \"Top games da twitch por frequência no top 3\",\r\n paper_bgcolor = 'white',\r\n plot_bgcolor = 'white')\r\n\r\n\r\nfor i in range(8):\r\n module.layout(fig, percentages[i], games[i], 5)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n fig.show()\r\n","sub_path":"module_approach/twitch/top_games_twitch_plot.py","file_name":"top_games_twitch_plot.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499995425","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport copy\nimport numpy as np\nfrom torchvision import datasets, transforms\nimport torch\nimport random\nimport csv\n\nfrom utils.sampling import mnist_iid, mnist_noniid, cifar_iid\nfrom utils.options import args_parser\nfrom models.Update import LocalUpdate\nfrom models.Nets import MLP, CNNMnist, CNNCifar, customCNNCifar\nfrom models.Fed import FedAvg\nfrom models.test import test_img\nfrom collections import OrderedDict,defaultdict\n\nfrom torch.autograd import Variable\n\nif __name__ == '__main__':\n # parse args\n args = args_parser()\n args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')\n\n # load dataset and split users\n if args.dataset == 'mnist':\n trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)\n dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)\n # sample users\n if args.iid:\n dict_users = mnist_iid(dataset_train, args.num_users)\n else:\n dict_users = mnist_noniid(dataset_train, args.num_users)\n elif args.dataset == 'cifar':\n trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar)\n dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar)\n if args.iid:\n dict_users = cifar_iid(dataset_train, args.num_users)\n else:\n exit('Error: only consider IID setting in CIFAR10')\n else:\n exit('Error: unrecognized dataset')\n img_size = dataset_train[0][0].shape\n\n # build model\n if args.model == 'cnn' and args.dataset == 'cifar':\n net_glob = CNNCifar(args=args).to(args.device)\n net_glob = customCNNCifar(args=args).to(args.device)\n net_glob1 = customCNNCifar(args=args).to(args.device)\n net_glob5 = customCNNCifar(args=args).to(args.device)\n net_glob10 = customCNNCifar(args=args).to(args.device)\n net_glob15 = customCNNCifar(args=args).to(args.device)\n net_glob20 = customCNNCifar(args=args).to(args.device)\n net_glob25 = customCNNCifar(args=args).to(args.device)\n net_glob30 = customCNNCifar(args=args).to(args.device)\n\n elif args.model == 'cnn' and args.dataset == 'mnist':\n net_glob = CNNMnist(args=args).to(args.device)\n\n elif args.model == 'mlp':\n len_in = 1\n for x in img_size:\n len_in *= x\n net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)\n else:\n exit('Error: unrecognized model')\n print(net_glob)\n net_glob.train()\n net_glob1.train()\n net_glob5.train()\n net_glob10.train()\n net_glob15.train()\n net_glob20.train()\n net_glob25.train()\n net_glob30.train()\n\n # copy weights\n w_glob = net_glob.state_dict()\n w_glob1 = net_glob1.state_dict()\n w_glob5 = net_glob5.state_dict()\n w_glob10 = net_glob10.state_dict()\n w_glob15 = net_glob15.state_dict()\n w_glob20 = net_glob20.state_dict()\n w_glob25 = net_glob25.state_dict()\n w_glob30 = net_glob30.state_dict()\n\n # training - NO ATTACK\n loss_train = []\n cv_loss, cv_acc = [], []\n val_loss_pre, counter = 0, 0\n net_best = None\n best_loss = None\n val_acc_list, net_list = [], []\n\n #VIVEK constant attack experiment - 1 MALICIOUS\n loss_train_1 = []\n fixed_agent_1 = random.randint(0,31) #random agent between 0 and 31 is fixed\n updates_recorded_1 = False\n fixed_agent_storage_1 = None\n count_array_1 = []\n selection_count_1 = 0\n\n #VIVEK constant attack experiment - 5 MALICIOUS\n loss_train_5 = []\n fixed_agent_5 = random.sample(range(32),5)\n updates_recorded_mapping_5 = defaultdict(bool)\n selection_count_5 = defaultdict(int)\n for i in fixed_agent_5:\n updates_recorded_mapping_5[i] = False #KEY = agent no. & VAL = boolean\n selection_count_5[i] = 0\n fixed_agent_storage_mapping_5 = {} #KEY = agent no. & VAL = Fixed Updates\n count_array_5 = []\n\n\n #VIVEK constant attack experiment - 10 MALICIOUS\n loss_train_10 = []\n fixed_agent_10 = random.sample(range(32),10)\n updates_recorded_mapping_10 = defaultdict(bool)\n selection_count_10 = defaultdict(int)\n for i in fixed_agent_10:\n updates_recorded_mapping_10[i] = False\n selection_count_10[i] = 0\n fixed_agent_storage_mapping_10 = {}\n count_array_10 = []\n\n #VIVEK constant attack experiment - 15 MALICIOUS\n loss_train_15 = []\n fixed_agent_15 = random.sample(range(32),15)\n updates_recorded_mapping_15 = defaultdict(bool)\n selection_count_15 = defaultdict(int)\n for i in fixed_agent_15:\n updates_recorded_mapping_15[i] = False\n selection_count_15[i] = 0\n fixed_agent_storage_mapping_15 = {}\n count_array_15 = []\n\n #VIVEK constant attack experiment - 20 MALICIOUS\n loss_train_20 = []\n fixed_agent_20 = random.sample(range(32),20)\n updates_recorded_mapping_20 = defaultdict(bool)\n selection_count_20 = defaultdict(int)\n for i in fixed_agent_20:\n updates_recorded_mapping_20[i] = False\n selection_count_20[i] = 0\n fixed_agent_storage_mapping_20 = {}\n count_array_20 = []\n\n #VIVEK constant attack experiment - 25 MALICIOUS\n loss_train_25 = []\n fixed_agent_25 = random.sample(range(32),25)\n updates_recorded_mapping_25 = defaultdict(bool)\n selection_count_25 = defaultdict(int)\n for i in fixed_agent_25:\n updates_recorded_mapping_25[i] = False\n selection_count_25[i] = 0\n fixed_agent_storage_mapping_25 = {}\n count_array_25 = []\n\n #VIVEK constant attack experiment - 30 MALICIOUS\n loss_train_30 = []\n fixed_agent_30 = random.sample(range(32),30)\n updates_recorded_mapping_30 = defaultdict(bool)\n selection_count_30 = defaultdict(int)\n for i in fixed_agent_30:\n updates_recorded_mapping_30[i] = False\n selection_count_30[i] = 0\n fixed_agent_storage_mapping_30 = {}\n count_array_30 = []\n\n malicious_dict = {1:[fixed_agent_1],5:fixed_agent_5,10:fixed_agent_10,15:fixed_agent_15,20:fixed_agent_20,25:fixed_agent_25,30:fixed_agent_30}\n malicious_count ={1:0,5:0,10:0,15:0,20:0,25:0,30:0}\n\n for iter in range(args.epochs):\n #agent_found_count = 0\n w_locals, loss_locals = [], [] #w_locals = array of local_weights\n w_locals_1, loss_locals_1 = [],[]\n w_locals_5, loss_locals_5 = [],[]\n w_locals_10, loss_locals_10 = [],[]\n w_locals_15, loss_locals_15 = [],[]\n w_locals_20, loss_locals_20 = [],[]\n w_locals_25, loss_locals_25 = [],[]\n w_locals_30, loss_locals_30 = [],[]\n m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this\n idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM\n\n for idx in idxs_users:\n local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local1 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local15 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local20 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local25 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local30 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n\n w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\n w1, loss1 = local1.train(net=copy.deepcopy(net_glob1).to(args.device))\n w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))\n w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))\n w15, loss15 = local15.train(net=copy.deepcopy(net_glob15).to(args.device))\n w20, loss20 = local20.train(net=copy.deepcopy(net_glob20).to(args.device))\n w25, loss25 = local25.train(net=copy.deepcopy(net_glob25).to(args.device))\n w30, loss30 = local30.train(net=copy.deepcopy(net_glob30).to(args.device))\n print(\"***BLAH BLAH BLAH***\")\n\n\n if idx==fixed_agent_1:\n if w_glob1:\n noise_conv1_weight = Variable(w_glob1['conv1.weight'].data.new(w_glob1['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob1['conv1.bias'].data.new(w_glob1['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob1['conv2.weight'].data.new(w_glob1['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob1['conv2.bias'].data.new(w_glob1['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob1['fc1.weight'].data.new(w_glob1['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob1['fc1.bias'].data.new(w_glob1['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob1['fc2.weight'].data.new(w_glob1['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob1['fc2.bias'].data.new(w_glob1['fc2.bias'].size()).normal_(0,0.01))\n\n w1['conv1.weight']=w_glob1['conv1.weight']+noise_conv1_weight\n w1['conv1.bias']=w_glob1['conv1.bias']+noise_conv1_bias\n w1['conv2.weight']=w_glob1['conv2.weight']+noise_conv2_weight\n w1['conv2.bias']=w_glob1['conv2.bias']+noise_conv2_bias\n w1['fc1.weight']=w_glob1['fc1.weight']+noise_fc1_weight\n w1['fc1.bias']=w_glob1['fc1.bias']+noise_fc1_bias\n w1['fc2.weight']=w_glob1['fc2.weight']+noise_fc2_weight\n w1['fc2.bias']=w_glob1['fc2.bias']+noise_fc2_bias\n\n\n if idx in fixed_agent_5:\n malicious_count[5]+=1\n if w_glob5:\n noise_conv1_weight = Variable(w_glob5['conv1.weight'].data.new(w_glob5['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob5['conv1.bias'].data.new(w_glob5['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob5['conv2.weight'].data.new(w_glob5['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob5['conv2.bias'].data.new(w_glob5['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob5['fc1.weight'].data.new(w_glob5['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob5['fc1.bias'].data.new(w_glob5['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob5['fc2.weight'].data.new(w_glob5['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob5['fc2.bias'].data.new(w_glob5['fc2.bias'].size()).normal_(0,0.01))\n\n w5['conv1.weight']=w_glob5['conv1.weight']+noise_conv1_weight\n w5['conv1.bias']=w_glob5['conv1.bias']+noise_conv1_bias\n w5['conv2.weight']=w_glob5['conv2.weight']+noise_conv2_weight\n w5['conv2.bias']=w_glob5['conv2.bias']+noise_conv2_bias\n w5['fc1.weight']=w_glob5['fc1.weight']+noise_fc1_weight\n w5['fc1.bias']=w_glob5['fc1.bias']+noise_fc1_bias\n w5['fc2.weight']=w_glob5['fc2.weight']+noise_fc2_weight\n w5['fc2.bias']=w_glob5['fc2.bias']+noise_fc2_bias\n\n if idx in fixed_agent_10:\n malicious_count[10]+=1\n if w_glob10:\n noise_conv1_weight = Variable(w_glob10['conv1.weight'].data.new(w_glob10['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob10['conv1.bias'].data.new(w_glob10['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob10['conv2.weight'].data.new(w_glob10['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob10['conv2.bias'].data.new(w_glob10['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob10['fc1.weight'].data.new(w_glob10['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob10['fc1.bias'].data.new(w_glob10['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob10['fc2.weight'].data.new(w_glob10['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob10['fc2.bias'].data.new(w_glob10['fc2.bias'].size()).normal_(0,0.01))\n\n w10['conv1.weight']=w_glob10['conv1.weight']+noise_conv1_weight\n w10['conv1.bias']=w_glob10['conv1.bias']+noise_conv1_bias\n w10['conv2.weight']=w_glob10['conv2.weight']+noise_conv2_weight\n w10['conv2.bias']=w_glob10['conv2.bias']+noise_conv2_bias\n w10['fc1.weight']=w_glob10['fc1.weight']+noise_fc1_weight\n w10['fc1.bias']=w_glob10['fc1.bias']+noise_fc1_bias\n w10['fc2.weight']=w_glob10['fc2.weight']+noise_fc2_weight\n w10['fc2.bias']=w_glob10['fc2.bias']+noise_fc2_bias\n\n if idx in fixed_agent_15:\n malicious_count[15]+=1\n if w_glob15:\n noise_conv1_weight = Variable(w_glob15['conv1.weight'].data.new(w_glob15['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob15['conv1.bias'].data.new(w_glob15['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob15['conv2.weight'].data.new(w_glob15['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob15['conv2.bias'].data.new(w_glob15['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob15['fc1.weight'].data.new(w_glob15['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob15['fc1.bias'].data.new(w_glob15['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob15['fc2.weight'].data.new(w_glob15['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob15['fc2.bias'].data.new(w_glob15['fc2.bias'].size()).normal_(0,0.01))\n\n w15['conv1.weight']=w_glob15['conv1.weight']+noise_conv1_weight\n w15['conv1.bias']=w_glob15['conv1.bias']+noise_conv1_bias\n w15['conv2.weight']=w_glob15['conv2.weight']+noise_conv2_weight\n w15['conv2.bias']=w_glob15['conv2.bias']+noise_conv2_bias\n w15['fc1.weight']=w_glob15['fc1.weight']+noise_fc1_weight\n w15['fc1.bias']=w_glob15['fc1.bias']+noise_fc1_bias\n w15['fc2.weight']=w_glob15['fc2.weight']+noise_fc2_weight\n w15['fc2.bias']=w_glob15['fc2.bias']+noise_fc2_bias\n\n if idx in fixed_agent_20:\n malicious_count[20]+=1\n if w_glob20:\n noise_conv1_weight = Variable(w_glob20['conv1.weight'].data.new(w_glob20['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob20['conv1.bias'].data.new(w_glob20['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob20['conv2.weight'].data.new(w_glob20['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob20['conv2.bias'].data.new(w_glob20['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob20['fc1.weight'].data.new(w_glob20['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob20['fc1.bias'].data.new(w_glob20['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob20['fc2.weight'].data.new(w_glob20['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob20['fc2.bias'].data.new(w_glob20['fc2.bias'].size()).normal_(0,0.01))\n\n w20['conv1.weight']=w_glob20['conv1.weight']+noise_conv1_weight\n w20['conv1.bias']=w_glob20['conv1.bias']+noise_conv1_bias\n w20['conv2.weight']=w_glob20['conv2.weight']+noise_conv2_weight\n w20['conv2.bias']=w_glob20['conv2.bias']+noise_conv2_bias\n w20['fc1.weight']=w_glob20['fc1.weight']+noise_fc1_weight\n w20['fc1.bias']=w_glob20['fc1.bias']+noise_fc1_bias\n w20['fc2.weight']=w_glob20['fc2.weight']+noise_fc2_weight\n w20['fc2.bias']=w_glob20['fc2.bias']+noise_fc2_bias\n\n if idx in fixed_agent_25:\n malicious_count[25]+=1\n if w_glob25:\n noise_conv1_weight = Variable(w_glob25['conv1.weight'].data.new(w_glob25['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob25['conv1.bias'].data.new(w_glob25['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob25['conv2.weight'].data.new(w_glob25['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob25['conv2.bias'].data.new(w_glob25['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob25['fc1.weight'].data.new(w_glob25['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob25['fc1.bias'].data.new(w_glob25['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob25['fc2.weight'].data.new(w_glob25['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob25['fc2.bias'].data.new(w_glob25['fc2.bias'].size()).normal_(0,0.01))\n\n w25['conv1.weight']=w_glob25['conv1.weight']+noise_conv1_weight\n w25['conv1.bias']=w_glob25['conv1.bias']+noise_conv1_bias\n w25['conv2.weight']=w_glob25['conv2.weight']+noise_conv2_weight\n w25['conv2.bias']=w_glob25['conv2.bias']+noise_conv2_bias\n w25['fc1.weight']=w_glob25['fc1.weight']+noise_fc1_weight\n w25['fc1.bias']=w_glob25['fc1.bias']+noise_fc1_bias\n w25['fc2.weight']=w_glob25['fc2.weight']+noise_fc2_weight\n w25['fc2.bias']=w_glob25['fc2.bias']+noise_fc2_bias\n\n if idx in fixed_agent_30:\n malicious_count[30]+=1\n #orig_glob = copy.deepcopy(w_glob)\n if w_glob30:\n noise_conv1_weight = Variable(w_glob30['conv1.weight'].data.new(w_glob30['conv1.weight'].size()).normal_(0,0.01))\n noise_conv1_bias = Variable(w_glob30['conv1.bias'].data.new(w_glob30['conv1.bias'].size()).normal_(0,0.01))\n noise_conv2_weight = Variable(w_glob30['conv2.weight'].data.new(w_glob30['conv2.weight'].size()).normal_(0,0.01))\n noise_conv2_bias = Variable(w_glob30['conv2.bias'].data.new(w_glob30['conv2.bias'].size()).normal_(0,0.01))\n noise_fc1_weight = Variable(w_glob30['fc1.weight'].data.new(w_glob30['fc1.weight'].size()).normal_(0,0.01))\n noise_fc1_bias = Variable(w_glob30['fc1.bias'].data.new(w_glob30['fc1.bias'].size()).normal_(0,0.01))\n noise_fc2_weight = Variable(w_glob30['fc2.weight'].data.new(w_glob30['fc2.weight'].size()).normal_(0,0.01))\n noise_fc2_bias = Variable(w_glob30['fc2.bias'].data.new(w_glob30['fc2.bias'].size()).normal_(0,0.01))\n\n w30['conv1.weight']=w_glob30['conv1.weight']+noise_conv1_weight\n w30['conv1.bias']=w_glob30['conv1.bias']+noise_conv1_bias\n w30['conv2.weight']=w_glob30['conv2.weight']+noise_conv2_weight\n w30['conv2.bias']=w_glob30['conv2.bias']+noise_conv2_bias\n w30['fc1.weight']=w_glob30['fc1.weight']+noise_fc1_weight\n w30['fc1.bias']=w_glob30['fc1.bias']+noise_fc1_bias\n w30['fc2.weight']=w_glob30['fc2.weight']+noise_fc2_weight\n w30['fc2.bias']=w_glob30['fc2.bias']+noise_fc2_bias\n\n\n\n\n #NO ATTACK\n w_locals.append(copy.deepcopy(w))\n loss_locals.append(copy.deepcopy(loss))\n\n #1 MALICIOUS\n w_locals_1.append(copy.deepcopy(w1))\n loss_locals_1.append(copy.deepcopy(loss1))\n\n #5 MALICIOUS\n w_locals_5.append(copy.deepcopy(w5))\n loss_locals_5.append(copy.deepcopy(loss5))\n\n #10 MALICIOUS\n w_locals_10.append(copy.deepcopy(w10))\n loss_locals_10.append(copy.deepcopy(loss10))\n\n #15 MALICIOUS\n w_locals_15.append(copy.deepcopy(w15))\n loss_locals_15.append(copy.deepcopy(loss15))\n\n #20 MALICIOUS\n w_locals_20.append(copy.deepcopy(w20))\n loss_locals_20.append(copy.deepcopy(loss20))\n\n #25 MALICIOUS\n w_locals_25.append(copy.deepcopy(w25))\n loss_locals_25.append(copy.deepcopy(loss25))\n\n #30 MALICIOUS\n w_locals_30.append(copy.deepcopy(w30))\n loss_locals_30.append(copy.deepcopy(loss30))\n\n # update global weights\n w_glob = FedAvg(w_locals)\n w_glob1 = FedAvg(w_locals_1)\n w_glob5 = FedAvg(w_locals_5)\n w_glob10 = FedAvg(w_locals_10)\n w_glob15 = FedAvg(w_locals_15)\n w_glob20 = FedAvg(w_locals_20)\n w_glob25 = FedAvg(w_locals_25)\n w_glob30 = FedAvg(w_locals_30)\n\n\n # copy weight to net_glob\n net_glob.load_state_dict(w_glob)\n net_glob1.load_state_dict(w_glob1)\n net_glob5.load_state_dict(w_glob5)\n net_glob10.load_state_dict(w_glob10)\n net_glob15.load_state_dict(w_glob15)\n net_glob20.load_state_dict(w_glob20)\n net_glob25.load_state_dict(w_glob25)\n net_glob30.load_state_dict(w_glob30)\n\n # print loss\n loss_avg = sum(loss_locals) / len(loss_locals)\n loss_avg_1 = sum(loss_locals_1) / len(loss_locals_1)\n loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)\n loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)\n loss_avg_15 = sum(loss_locals_15) / len(loss_locals_15)\n loss_avg_20 = sum(loss_locals_20) / len(loss_locals_20)\n loss_avg_25 = sum(loss_locals_25) / len(loss_locals_25)\n loss_avg_30 = sum(loss_locals_30) / len(loss_locals_30)\n\n\n print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))\n print('C1 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_1))\n print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))\n print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))\n print('C15 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_15))\n print('C20 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_20))\n print('C25 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_25))\n print('C30 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_30))\n #count_array.append(agent_found_count)\n loss_train.append(loss_avg)\n loss_train_1.append(loss_avg_1)\n loss_train_5.append(loss_avg_5)\n loss_train_10.append(loss_avg_10)\n loss_train_15.append(loss_avg_15)\n loss_train_20.append(loss_avg_20)\n loss_train_25.append(loss_avg_25)\n loss_train_30.append(loss_avg_30)\n\n # plot loss curve\n #plt.figure()\n #plt.subplots()\n #attack_no = plt.plot(range(len(loss_train)), loss_train)\n #attack_1 = plt.plot(range(len(loss_train_1)),loss_train_1)\n #plt.ylabel('train_loss')\n #plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))\n #print(\"COUNT DATA\",str(count_array))\n print(\"NO ATTACK DATA=\",loss_train)\n print(\"1 ATTACK DATA=\",loss_train_1)\n print(\"5 ATTACK DATA=\",loss_train_5)\n print(\"10 ATTACK DATA=\",loss_train_10)\n print(\"15 ATTACK DATA=\",loss_train_15)\n print(\"20 ATTACK DATA=\",loss_train_20)\n print(\"25 ATTACK DATA=\",loss_train_25)\n print(\"30 ATTACK DATA=\",loss_train_30)\n\n print(malicious_dict)\n print(malicious_count)\n # testing\n net_glob.eval()\n #print(\"Agent_Found_Count\",agent_found_count)\n acc_train, loss_train = test_img(net_glob, dataset_train, args)\n acc_test, loss_test = test_img(net_glob, dataset_test, args)\n print(\"Training accuracy (NO ATTACK): {:.2f}\".format(acc_train))\n print(\"Testing accuracy (NO ATTACK): {:.2f}\".format(acc_test))\n\n net_glob1.eval()\n acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args)\n acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 1): {:.2f}\".format(acc_train1))\n print(\"Testing accuracy (LAZY ATTACK 1): {:.2f}\".format(acc_test1))\n\n net_glob5.eval()\n acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)\n acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 5): {:.2f}\".format(acc_train5))\n print(\"Testing accuracy (LAZY ATTACK 5): {:.2f}\".format(acc_test5))\n\n net_glob10.eval()\n acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args)\n acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 10): {:.2f}\".format(acc_train10))\n print(\"Testing accuracy (LAZY ATTACK 10): {:.2f}\".format(acc_test10))\n\n net_glob15.eval()\n acc_train15, loss_train_15 = test_img(net_glob15, dataset_train, args)\n acc_test15, loss_test_15 = test_img(net_glob15, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 15): {:.2f}\".format(acc_train15))\n print(\"Testing accuracy (LAZY ATTACK 15): {:.2f}\".format(acc_test15))\n\n net_glob20.eval()\n acc_train20, loss_train_20 = test_img(net_glob20, dataset_train, args)\n acc_test20, loss_test_20 = test_img(net_glob20, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 20): {:.2f}\".format(acc_train20))\n print(\"Testing accuracy (LAZY ATTACK 20): {:.2f}\".format(acc_test20))\n\n net_glob25.eval()\n acc_train25, loss_train_25 = test_img(net_glob25, dataset_train, args)\n acc_test25, loss_test_25 = test_img(net_glob25, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 25): {:.2f}\".format(acc_train25))\n print(\"Testing accuracy (LAZY ATTACK 25): {:.2f}\".format(acc_test25))\n\n net_glob30.eval()\n acc_train30, loss_train_30 = test_img(net_glob30, dataset_train, args)\n acc_test30, loss_test_30 = test_img(net_glob30, dataset_test, args)\n print(\"Training accuracy (LAZY ATTACK 30): {:.2f}\".format(acc_train30))\n print(\"Testing accuracy (LAZY ATTACK 30): {:.2f}\".format(acc_test30))\n\n #write to csv\n with open(\"../CIFAR_Results/benchmarks/lazy_attack_data/lazy_n_data_1.csv\",\"w+\") as csv_file:\n my_writer = csv.writer(csv_file,delimiter=',')\n my_writer.writerow((\"NUM_ATTACKS\",\"TRAIN_ACCURACY\",\"TEST_ACCURACY\"))\n my_writer.writerow((0,acc_train.item(),acc_test.item()))\n my_writer.writerow((1,acc_train1.item(),acc_test1.item()))\n my_writer.writerow((5,acc_train5.item(),acc_test5.item()))\n my_writer.writerow((10,acc_train10.item(),acc_test10.item()))\n my_writer.writerow((15,acc_train15.item(),acc_test15.item()))\n my_writer.writerow((20,acc_train20.item(),acc_test20.item()))\n my_writer.writerow((25,acc_train25.item(),acc_test25.item()))\n my_writer.writerow((30,acc_train30.item(),acc_test30.item()))\n\n","sub_path":"federated-learning-master/lazy_attack_CIFAR.py","file_name":"lazy_attack_CIFAR.py","file_ext":"py","file_size_in_byte":28644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608114308","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport sh\nimport shutil\nimport tempfile\n\nfrom six.moves import configparser\n\nfrom dlrn.config import ConfigOptions\nfrom dlrn.drivers.gitrepo import GitRepoDriver\nfrom dlrn.shell import default_options\nfrom dlrn.tests import base\n\n\n@mock.patch.object(sh.Command, '__call__', autospec=True)\nclass TestDriverGit(base.TestCase):\n def setUp(self):\n super(TestDriverGit, self).setUp()\n config = configparser.RawConfigParser(default_options)\n config.read(\"projects.ini\")\n self.config = ConfigOptions(config)\n self.config.gitrepo_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n super(TestDriverGit, self).tearDown()\n shutil.rmtree(self.config.gitrepo_dir)\n\n @mock.patch('dlrn.drivers.gitrepo.refreshrepo')\n def test_getinfo(self, refresh_mock, sh_mock):\n refresh_mock.return_value = [None, None, None]\n driver = GitRepoDriver(cfg_options=self.config)\n package = {'upstream': 'test', 'name': 'test'}\n info = driver.getinfo(package=package, project=\"test\", dev_mode=True)\n self.assertEqual(info, [])\n\n @mock.patch('os.listdir')\n def test_getpackages(self, listdir_mock, sh_mock):\n listdir_mock.return_value = []\n driver = GitRepoDriver(cfg_options=self.config)\n packages = driver.getpackages(dev_mode=True)\n self.assertEqual(packages, [])\n","sub_path":"dlrn/tests/test_driver_git.py","file_name":"test_driver_git.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490731742","text":"import os\n\nfrom PySide import QtCore, QtGui\n\nfrom cgev.common.graph import FIELD_STATES\nfrom cgev.ui.window import Menu\nfrom cgev.ui.graph import Field\n\nfrom string import string\n\n\nclass stringUi(Field, string):\n def __init__(self, scene, node, entry=None):\n self._resolveDisplay = False\n\n super(stringUi, self).__init__(scene, node, entry=entry)\n\n @property\n def value(self):\n return super(stringUi, self.__class__).value.fget(self)\n\n @value.setter\n def value(self, value):\n super(stringUi, self.__class__).value.fset(self, value)\n\n '''\n def aeWidget(self, parent):\n ae_widget = super(stringUi, self).aeWidget(parent)\n\n self.valueEdit = ae_widget.widget.valueEdit\n self.valueEdit.textChanged.connect(self._editValue)\n self.valueEdit.editingFinished.connect(self._updateValue)\n self.valueEdit.setText(str(self._value))\n\n self.switchBtn = ae_widget.widget.switchBtn\n self.switchBtn.clicked.connect(self._switchResolveDisplay)\n\n self.insertBtn = ae_widget.widget.insertBtn\n self.insertBtn.clicked.connect(self._pickVariable)\n\n self._widgets.append(self.valueEdit)\n self._widgets.append(self.switchBtn)\n self._widgets.append(self.insertBtn)\n\n return ae_widget\n '''\n\n def _editValue(self):\n if self.valueEdit.text() == self._value:\n self.delState(FIELD_STATES.MODIFIED)\n elif self.valueEdit.isEnabled():\n self.addState(FIELD_STATES.MODIFIED)\n\n def _updateValue(self):\n self.delState(FIELD_STATES.MODIFIED)\n self._value = self.valueEdit.text()\n\n def _switchResolveDisplay(self):\n self._resolveDisplay = not self._resolveDisplay\n\n self.valueEdit.setEnabled(not self._resolveDisplay)\n\n value = str(self._value)\n if self._resolveDisplay:\n value = self.value\n\n label = 'S'\n if self._resolveDisplay:\n label = 'O'\n self.switchBtn.setText(label)\n\n self.valueEdit.setText(value)\n\n def _pickVariable(self):\n # save current position\n\n insertPos = self.valueEdit.cursorPosition()\n self._updateValue()\n\n # populate context menu\n\n base = QtGui.QMenu()\n menu = Menu(self.insertBtn, menu=base)\n\n root = 'Env'\n keys = os.environ.keys()\n keys.sort()\n\n for name in keys:\n first = name[0].upper()\n label = \"{1}/{2}\".format(root, first, name)\n args = [root, name, insertPos]\n menu.add(label, self._gotVariable, args=args)\n\n # open at button position\n\n pos = self.insertBtn.mapToGlobal(QtCore.QPoint(10, 10))\n base.exec_(pos)\n\n def _gotVariable(self, root, name, pos):\n value = self._value\n name = \"${\" + name + \"}\"\n value = value[:pos] + name + value[pos:]\n self.valueEdit.setText(value)\n self._updateValue()\n","sub_path":"nodal/old/fields/std/string/stringUi.py","file_name":"stringUi.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52275532","text":"import numpy as np\nimport pygeos\n\npoint_polygon_testdata = (\n pygeos.points(np.arange(6), np.arange(6)),\n pygeos.box(2, 2, 4, 4),\n)\npoint = pygeos.points(2, 2)\nline_string = pygeos.linestrings([[0, 0], [1, 0], [1, 1]])\nlinear_ring = pygeos.linearrings(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))\npolygon = pygeos.polygons(((0.0, 0.0), (0.0, 2.0), (2.0, 2.0), (2.0, 0.0), (0.0, 0.0)))\nmulti_point = pygeos.multipoints([[0.0, 0.0], [1.0, 2.0]])\nmulti_line_string = pygeos.multilinestrings([[[0.0, 0.0], [1.0, 2.0]]])\nmulti_polygon = pygeos.multipolygons(\n [\n ((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)),\n ((0.1, 0.1), (0.1, 0.2), (0.2, 0.2), (0.2, 0.1)),\n ]\n)\ngeometry_collection = pygeos.geometrycollections(\n [pygeos.points(51, -1), pygeos.linestrings([(52, -1), (49, 2)])]\n)\npoint_z = pygeos.points(1.0, 1.0, 1.0)\n\nall_types = (\n point,\n line_string,\n linear_ring,\n polygon,\n multi_point,\n multi_line_string,\n multi_polygon,\n geometry_collection,\n pygeos.Empty,\n)\n","sub_path":"pygeos/test/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312522220","text":"\"\"\"Here we link between html, js pages and python processing\"\"\"\r\nimport os\r\nimport subprocess\r\nfrom geopy.geocoders import Nominatim\r\nfrom flask import Flask, render_template, url_for, request, jsonify\r\n\r\napp = Flask(__name__)\r\n\r\nimport urllib.request\r\nfrom PIL import Image\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import url_for\r\nfrom flask import jsonify\r\nfrom geopy.geocoders import Nominatim\r\nimport requests\r\nimport wikipediaapi\r\nimport random\r\nimport time\r\nfrom .parsage import *\r\nfrom .phrase_schema import *\r\nfrom .phrase_anecdocte import *\r\nfrom .convers import *\r\nimport os\r\n\r\nLIST = []\r\nLIST_SENTENCE = []\r\nLIST_WIKI = []\r\nLOCALISATION_WIKI = []\r\nCONVERSATION = []\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n \"\"\"Here we just display home page\"\"\"\r\n return render_template(\"home.html\")\r\n\r\n\r\n@app.route('/tchat')\r\ndef tchat():\r\n \"\"\"Here we just display home page\"\"\"\r\n return render_template(\"tchat.html\")\r\n\r\n\r\n@app.route('/effacer', methods=[\"GET\", \"POST\"])\r\ndef effacer():\r\n eff = request.form['data']\r\n CONVERSATION = []\r\n\r\n\r\nlist_yes_no = [\"phrase schema\"]\r\n@app.route('/answer', methods=[\"GET\", \"POST\"]) \r\ndef answer():\r\n \"\"\"Here we request methods and we send into html page\"\"\"\r\n\r\n question = request.form['data']\r\n question1 = question.lower()\r\n sentence_schema = read_list(question1, list_yes_no)\r\n print(list_yes_no)\r\n sentence = '
'\\\r\n + str(question)\\\r\n + '
'\\\r\n + str(sentence_schema)\\\r\n + '
'\r\n \r\n CONVERSATION.append(sentence)\r\n\r\n return jsonify({'data':CONVERSATION})\r\n\r\n\r\n@app.route('/img', methods=[\"GET\", \"POST\"])\r\ndef img():\r\n\r\n img = request.form['data']\r\n\r\n liste = ['aurevoir.jpg', 'bonjourcava.jpg',\r\n 'casentbizzardtontruk.jpg', 'cki.jpg',\r\n 'cmonpoterico.jpg', 'connarquestion.jpg',\r\n 'cquoicasemange.jpg', 'croooooooo.jpg',\r\n 'croquetteebd.jpg', 'croquettequestion.jpg',\r\n 'croquettttttte.jpg', 'evitequestion.jpg',\r\n 'girencompris.jpg', 'griencapte.jpg', 'grosmot.jpg',\r\n 'gstrictemenrriencdompris.jpg', 'heouiii.jpg',\r\n 'iladitcroquette.jpg', 'imaginehinquoi.jpg',\r\n 'injure.jpg', 'jaipascompris.jpg', 'moiaussicava.jpg',\r\n 'nannn.jpg', 'nannnn.jpg', 'nannnnnnnnnnnnnnn.jpg',\r\n 'nonrep.jpg', 'ohhctpasdumiel.jpg', 'one',\r\n 'onestcopain.jpg', 'ontecoute.jpg', 'ontecoute2.jpg',\r\n 'onvacasserlagueuleaqui.jpg', 'ouaisetquestion.jpg',\r\n 'ouitjpire.jpg', 'pasdekeumhumhum.jpg',\r\n 'posemoiuneadresse.jpg',\r\n 'poussezvousjesuisinfirmier.jpg',\r\n 'putinjaitropbouffer.jpg', 'questquya.jpg',\r\n 'quette.jpg', 'quoi.jpg', 'quoiiii.jpg', 'quoitadisquoi.jpg',\r\n 'tasunkeum.jpg', 'tasvrmntditcroquette.jpg',\r\n 'taunecbsurtoi.jpg', 'taunemeuf.jpg',\r\n 'tuveuxcasserlesgenouxaqui.jpg',\r\n 'yauratoujourspire.jpg',\r\n 'yvamedemanderuneadresseoui.jpg']\r\n\r\n a = random.choice(liste)\r\n return jsonify({'data':\"\".format(a)})\r\n\r\n\r\n@app.route('/geo', methods=[\"GET\", \"POST\"])\r\ndef geo():\r\n \"\"\"Here we send longitude data\"\"\"\r\n\r\n data_wiki = request.form['data']\r\n print(data_wiki)\r\n data_no_ponctuation = no_ponctuation(data_wiki)\r\n cleaning = apostrohpe(data_no_ponctuation)\r\n last_word = parsing_text(cleaning)\r\n search = searching(last_word)\r\n page = search[1]\r\n print(page)\r\n return jsonify({'data':page})\r\n\r\n\r\n@app.route('/geo2', methods=[\"GET\", \"POST\"])\r\ndef geo2():\r\n \"\"\"Here we send latitude data\"\"\"\r\n\r\n data_wiki = request.form['data']\r\n data_no_ponctuation = no_ponctuation(data_wiki)\r\n cleaning = apostrohpe(data_no_ponctuation)\r\n last_word = parsing_text(cleaning)\r\n search = searching(last_word)\r\n page = search[2]\r\n print(page,'00000000000000000000000000000000')\r\n return jsonify({'data':page})\r\n\r\n\r\n@app.route('/wiki', methods=[\"GET\", \"POST\"])\r\ndef wiki():\r\n \"\"\"He we send wikipedia stuff\"\"\"\r\n\r\n b = \"\"\r\n liste = [[], [], [], [], [], [], [], [],\r\n [], [], [], [], [], [], [], [],\r\n [], [], []]\r\n c = 0\r\n data_wiki = request.form['data']\r\n print(data_wiki)\r\n if data_wiki == \"\":\r\n page = \"\"\r\n return jsonify({'data':page})\r\n else:\r\n data_no_ponctuation = no_ponctuation(data_wiki)\r\n cleaning = apostrohpe(data_no_ponctuation)\r\n last_word = parsing_text(cleaning)\r\n search = searching(last_word)\r\n if search == \"None\":\r\n page = \"
\\\r\n Rien trouvé désolé
\"\r\n return jsonify({'data':page})\r\n else:\r\n addresse = []\r\n addresse.append(search[0])\r\n for i in addresse:\r\n for j in i:\r\n liste[c].append(j)\r\n if j == ',':\r\n c += 1\r\n\r\n sentence_from_grandpy = [\"Savez tu que :\",\r\n \"T'avais-je dis que :\",\r\n \"Une fois on m'a dit que :\"]\r\n\r\n choice = random.choice(sentence_from_grandpy)\r\n\r\n element = \"\".join(liste[2][:-1])\r\n print(element,\"0000000000000000000000elemennnnnnnnnnnnnnnnnnt\")\r\n wiki_wiki = wikipediaapi.Wikipedia('fr')\r\n page_py = wiki_wiki.page('{}'.format(element))\r\n existe = page_py.exists()\r\n if existe == True:\r\n page = (\"

\\\r\n \" + str(choice)\\\r\n +\"\" + \"
\"\\\r\n + str(page_py.sections[0:200]) + \"...

\")\r\n\r\n return jsonify({'data':page})\r\n\r\n else:\r\n page = \"

\\\r\n Oups je n'ai rien trouvé

\"\r\n return jsonify({'data':page})\r\n\r\n\r\n@app.route('/data', methods=[\"GET\", \"POST\"])\r\ndef data():\r\n \"\"\"Here, we just recup data with request form\"\"\"\r\n \"\"\"from jquerry function() who define content from input\"\"\"\r\n\r\n data = request.form['data']\r\n data_no_ponctuation = no_ponctuation(data)\r\n cleanning = apostrohpe(data_no_ponctuation)\r\n\r\n last_word = parsing_text(cleanning)\r\n\r\n date = time.strftime(\"%A %d %B %Y %H:%M:%S\")\r\n date = str(date)\r\n\r\n LIST.append(\"
A {}
\".format(date))\r\n\r\n LIST.append(\"
\\\r\n Votre question:
\")\r\n\r\n LIST.append(\"
{}
\".format(data))\r\n\r\n\r\n if data:\r\n var = searching(last_word)\r\n if var == \"None\":\r\n LIST.append(\"
\\\r\n Chat alors pas une addresse trouvée: \\\r\n
\")\r\n \r\n return jsonify({'data':LIST})\r\n \r\n else:\r\n LIST.append(\"
\\\r\n Chat alors une addresse trouvée: \\\r\n
\")\r\n\r\n LIST.append(\"
\\\r\n {}\\\r\n
\".format(var))\r\n\r\n LIST.append(\"
\")\r\n\r\n return jsonify({'data':LIST})\r\n\r\n return jsonify({'error':'...'})\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(error):\r\n return render_template(\"errors/404.html\"), 404\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"env/grandcat/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346981616","text":"\"\"\"\nAuthor: Allen B. Liu\nProgram: strategy.py\nDate: Friday, January 16, 2015\nDescription: Context manager\n\"\"\"\nfrom abc import ABCMeta\nfrom core.event import *\nfrom core.order_manager import *\nfrom core.portfolio import *\n\nclass Strategy(EventListener):\n \"\"\"\n Strategy is base class providing an interface for all subsequent \n (inherited) strategies. \n \"\"\"\n __metaclass__ = ABCMeta\n \n def __init__(self, context, config):\n\n # save the following variables from context\n self.name = config['name']\n self.type = config['type']\n self.context = context\n self.portfolio = Portfolio(context, self)\n self.order_manager = OrderManager(context, self)\n self.limit_manager = None\n self.safety_checker = None\n \n def handle_event(self, event):\n if event.type == 'EXECUTION':\n self.portfolio.add(event)\n \n @abstractmethod\n def close(self):\n self.portfolio.writeEOD()\n self.portfolio.writePNL()","sub_path":"trading/strategies/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125509301","text":"\"\"\"\nScript to extract chk profiles en-masse using multithreading\n\nUsage:\n python extract_profiles \n\"\"\"\nimport sys\nimport multiprocessing as mp\nimport time\n\n# flashbang\nfrom flashbang import load_save\nfrom flashbang.tools import str_to_bool\n\n\ndef main(run, model, model_set, multithread=True, reload=False, save=True,\n config='default', threads=4):\n \"\"\"\n Parameters\n ----------\n run : str\n model : str\n model_set : str\n multithread : bool\n reload : bool\n save : bool\n config : str\n threads : int\n \"\"\"\n t0 = time.time()\n\n multithread = str_to_bool(multithread)\n reload = str_to_bool(reload)\n save = str_to_bool(save)\n threads = int(threads)\n\n chk_list = load_save.find_chk(run=run, model=model, model_set=model_set)\n conf = load_save.load_config(name=config)\n\n params = conf['profiles']['params'] + conf['profiles']['isotopes']\n derived_params = conf['profiles']['derived_params']\n\n if multithread:\n args = []\n for chk in chk_list:\n args.append((chk, run, model, model_set,\n reload, save, params, derived_params))\n\n with mp.Pool(processes=threads) as pool:\n pool.starmap(extract_profiles, args)\n else:\n for chk in chk_list:\n extract_profiles(chk=chk,\n run=run,\n model=model,\n model_set=model_set,\n reload=reload,\n save=save,\n params=params,\n derived_params=derived_params)\n\n t1 = time.time()\n print(f'Time taken: {t1-t0:.2f} s')\n\n\ndef extract_profiles(chk, run, model, model_set, reload, save, params, derived_params):\n \"\"\"Function for multithread pool\n \"\"\"\n load_save.get_profile(chk=chk,\n run=run,\n model=model,\n model_set=model_set,\n reload=reload,\n save=save,\n params=params,\n derived_params=derived_params)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print('Parameters:'\n + '\\n1. run'\n + '\\n2. model'\n + '\\n3. model_set'\n )\n sys.exit(0)\n if len(sys.argv) == 4:\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n else:\n main(sys.argv[1], sys.argv[2], sys.argv[3],\n **dict(arg.split('=') for arg in sys.argv[4:]))\n","sub_path":"flashbang/scripts/extract_profiles.py","file_name":"extract_profiles.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566891217","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import fft\nimport cv2\nimport imutils\nimport math\n\n\ncap = cv2.VideoCapture('Tag1.mp4')\n#cap = cv2.VideoCapture('Tag0.mp4')\n#cap = cv2.VideoCapture('Tag2.mp4')\n\nout = cv2.VideoWriter('Testudo.avi',cv2.VideoWriter_fourcc(*'XVID'), 30, (400,300))\n\n# FFT to subract the background to get the tag\ndef Fourier(gray):\n \n y1 = fft.fft2(gray)\n \n y2 = fft.fftshift(y1)\n \n (w, h) = gray.shape\n half_w, half_h = int(w/2), int(h/2)\n \n # high pass filter\n n = 5\n y2[half_w-n:half_w+n+1,half_h-n:half_h+n+1] = 0\n\n y3 = fft.ifftshift(y2)\n \n y4 = fft.ifft2(y3)\n \n y = np.uint8(np.abs(y4))\n \n return y\n \n# Detecting the ID and Orientation\ndef orientation(newatag):\n \n gr = cv2.cvtColor(newatag, cv2.COLOR_BGR2GRAY)\n \n width, height = gr.shape\n \n #anti-clockwise\n a = gr[int(height/3.2)][int(height/3.2)]\n b = gr[int(height/1.4545)][int(height/3.2)]\n c = gr[int(height/1.4545)][int(height/1.4545)]\n d = gr[int(height/3.2)][int(height/1.4545)]\n \n e = gr[int(height/2.285)][int(height/2.285)] \n f = gr[int(height/1.777)][int(height/2.285)] \n g = gr[int(height/1.777)][int(height/1.777)] \n h = gr[int(height/2.285)][int(height/1.777)] \n \n orient = str()\n data = []\n \n if f > 200:#MSB\n f = 1\n else:\n f = 0\n if g > 200:\n g = 1\n else:\n g = 0\n if h > 200:\n h = 1\n else:\n h = 0\n if e > 200:#LSB\n e = 1\n else:\n e = 0\n \n if c > 200:\n orient = '0 Degrees'\n data.append(f)\n data.append(g)\n data.append(h)\n data.append(e)\n elif b > 200:\n orient = '270 Degrees'\n data.append(e)\n data.append(f)\n data.append(g)\n data.append(h)\n elif a > 200:\n orient = '180 Degrees'\n data.append(h)\n data.append(e)\n data.append(f)\n data.append(g)\n elif d > 200:\n orient = '90 Degrees'\n data.append(g)\n data.append(h)\n data.append(e)\n data.append(f) \n \n #cv2.imshow('Tag',gr)\n \n return orient,data\n\n# Finding the corners of the tag from world co-ordinates\ndef contours(y):\n \n edged = cv2.Canny(y, 90, 800)\n \n contours = cv2.findContours(edged,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5]\n \n #Finding the corners of the tag\n for cnt in contours:\n perimeter = cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,0.09*perimeter,True)\n \n if cv2.contourArea(cnt) < 5000 and cv2.contourArea(cnt) > 100 and len(approx) == 4:\n \n apx = approx\n apx = apx.reshape(4,2)\n \n corn = np.zeros((4,2))\n \n add_cnt = apx.sum(axis=1)\n \n corn[0] = apx[np.argmin(add_cnt)]\n corn[2] = apx[np.argmax(add_cnt)]\n \n diff_cnt = np.diff(apx,axis=1)\n \n corn[1] = apx[np.argmin(diff_cnt)]\n corn[3] = apx[np.argmax(diff_cnt)]\n break\n return corn\n\n\ndef homography(cornlist,wlist):\n Alist = []\n for i in range(len(cornlist)):\n u, v = cornlist[i][0],cornlist[i][1]\n X, Y = wlist[i][0],wlist[i][1]\n Alist.append([X , Y , 1 , 0 , 0 , 0 , - X * u , - Y * u , - u])\n Alist.append([0 , 0 , 0 , X , Y , 1 , - X * v , - Y * v , - v]) \n A = np.array(Alist)\n \n U, sigma, VT = np.linalg.svd(A)\n \n v= VT.T\n \n rv = v[:,8]/v[8][8]\n rv = rv.reshape((3,3))\n \n return rv\n\n# Reorienting the Tag\ndef reorient():\n \n atag = [[0,0],[0,160],[160,160],[160,0]]\n newatag = np.zeros((160,160,3))\n #sh = newatag.shape\n #A = np.zeros(shape=(8,9))\n Alist = []\n for i in range(4):\n u, v = atag[i][0],atag[i][1]\n X, Y = corn[i][0],corn[i][1]\n Alist.append([X , Y , 1 , 0 , 0 , 0 , - X * u , - Y * u , - u])\n Alist.append([0 , 0 , 0 , X , Y , 1 , - X * v , - Y * v , - v]) \n A = np.array(Alist)\n U, sigma, VT = np.linalg.svd(A)\n \n v= VT.T\n \n rv = v[:,8]/v[8][8]\n rv = rv.reshape((3,3))\n \n #r = homography(atag,reclist)\n \n rv_inv = np.linalg.inv(rv)\n \n for i in range(160):\n for j in range(160):\n wcoors=np.array([i,j,1])\n C = np.dot(rv_inv,wcoors)\n C = C/C[2]\n if (640 > C[0] > 0) and (480 > C[1] > 0):\n newatag[i][j] = b[int(C[1])][int(C[0])]\n \n newatag = newatag.astype(np.uint8)\n \n return newatag\n \n\nwhile True:\n \n ret, frame = cap.read()\n \n if ret == True:\n \n b = cv2.resize(frame,(640,480),fx=0,fy=0, interpolation = cv2.INTER_CUBIC) \n b1 = b.copy() \n b2 = b.copy()\n b3 = b.copy() \n gray = cv2.cvtColor(b, cv2.COLOR_BGR2GRAY) \n \n y = Fourier(gray)\n try:\n corn = contours(y)\n except:\n continue\n \n cornlist = corn.tolist()\n cornlist2 = corn.tolist().copy()\n wlist = list()\n \n newatag = reorient()\n \n cv2.imshow('AR TAG',newatag)\n \n # Calling the Orientation Function\n Orientation,ID = orientation(newatag)\n \n print('ID : ', ID)\n print('Orientation : ', Orientation)\n \n cv2.imshow('Frame',b)\n \n if Orientation == '270 Degrees':\n wlist = [[500,0],[0,0],[0,500],[500,500]]\n \n elif Orientation == '0 Degrees':\n wlist = [[0,0],[0,500],[500,500],[500,0]]\n \n elif Orientation == '90 Degrees':\n wlist = [[0,500],[500,500],[500,0],[0,0]]\n \n elif Orientation == '180 Degrees':\n wlist = [[500,500],[500,0],[0,0],[0,500]]\n \n rv1 = homography(cornlist,wlist)\n \n #Placing Testudo on the Tag\n img2 = cv2.imread('testudo.jpg')\n \n test = img2.shape\n\n new_img = np.zeros((test[0],test[1]))\n \n new_coor = []\n \n for i in range(test[0]):\n for j in range(test[1]):\n coor = np.array([[i],[j],[1]])\n q = np.dot(rv1,coor)\n new_x = q[0]/q[2]\n new_y = q[1]/q[2]\n pixel = img2[i][j]\n new_coor.append([int(new_x),int(new_y),pixel])\n \n for i in range(len(new_coor)):\n if (640 > new_coor[i][0] > 0) and (480 > new_coor[i][1] > 0):\n b[new_coor[i][1]][new_coor[i][0]] = new_coor[i][2]\n \n cv2.imshow('TESTUDO',b)\n \n out.write(b)\n \n if cv2.waitKey(1) & 0xFF == ord('q'): \n break\n else:\n break \n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"Problem_2_A.py","file_name":"Problem_2_A.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183270343","text":"import json\nfrom collections import OrderedDict\n\n\nraws = \"\"\"\n一日\tついたち\n二日\tふつか\n三日\tみっか\n四日\tよっか\n五日\tいつか\n六日\tむいか\n七日\tなのか\n八日\tようか\n九日\tここのか\n十日\tとおか\n十一日\tじゅういちにち\n十二日\tじゅうににち\n十三日\tじゅうさんにち\n十四日\tじゅうよっか\n十五日\tじゅうごにち\n十六日\tじゅうろくにち\n十七日\tじゅうしちにち\n十八日\tじゅうはちにち\n十九日\tじゅうくにち\n二十日\tはつか\n二十一日\tにじゅういちにち\n二十二日\tにじゅうににち\n二十三日\tにじゅうさんにち\n二十四日\tにじゅうよっか\n二十五日\tにじゅうごにち\n二十六日\tにじゅうろくにち\n二十七日\tにじゅうしちにち\n二十八日\tにじゅうはちにち\n二十九日\tにじゅうくにち\n三十日\tさんじゅう\n三十一日\tさんじゅういちにち\n\"\"\"\n\nday_read = OrderedDict()\n\nfor day_num, l in enumerate(raws.strip().splitlines(), 1):\n line = l.strip()\n num_day, _read = line.split()\n num_read = num_day[:-1]\n\n day_read[str(day_num)] = num_read\n\nres = json.dumps(day_read, indent=2, ensure_ascii=False)\nprint(res)\n","sub_path":"tool/numtozhgen.py","file_name":"numtozhgen.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47318939","text":"#\n# @lc app=leetcode id=215 lang=python3\n#\n# [215] Kth Largest Element in an Array\n#\n# https://leetcode.com/problems/kth-largest-element-in-an-array/description/\n#\n# algorithms\n# Medium (45.49%)\n# Total Accepted: 318.5K\n# Total Submissions: 697.3K\n# Testcase Example: '[3,2,1,5,6,4]\\n2'\n#\n# Find the kth largest element in an unsorted array. Note that it is the kth\n# largest element in the sorted order, not the kth distinct element.\n# \n# Example 1:\n# \n# \n# Input: [3,2,1,5,6,4] and k = 2\n# Output: 5\n# \n# \n# Example 2:\n# \n# \n# Input: [3,2,3,1,2,4,5,5,6] and k = 4\n# Output: 4\n# \n# Note: \n# You may assume k is always valid, 1 ≤ k ≤ array's length.\n# \n#\nfrom typing import List\n\nclass Solution:\n def findKthLargest(self, nums: 'List[int]', k: 'int') -> 'int':\n return self.findKthLargest_3(nums, k)\n\n def findKthLargest_3(self, nums: List[int], k: int) -> int:\n import random\n random.shuffle(nums)\n\n def quickSelect(l: int, r: int) -> int:\n pivot = nums[r]\n left, right = l, r\n #collapse wall\n while left < right:\n while nums[left] > pivot and left < right: left += 1\n while nums[right] <= pivot and left < right: right -= 1\n nums[left], nums[right] = nums[right], nums[left]\n nums[left], nums[r] = nums[r], nums[left]\n if left == k-1:\n return nums[left]\n elif left < k-1:\n return quickSelect(left+1, r)\n else:\n return quickSelect(l, left-1)\n \n return quickSelect(0, len(nums)-1)\n \n def findKthLargest_2(self, nums, k):\n import heapq\n min_heap = [-float('inf')] * k\n for num in nums:\n if num > min_heap[0]:\n heapq.heappushpop(min_heap, num)\n return heapq.heappop(min_heap)\n\n def findKthLargest_1(self, nums, k):\n import heapq\n nums = [-num for num in nums]\n heapq.heapify(nums)\n k -= 1\n for _ in range(k): \n heapq.heappop(nums)\n return -nums[0]\n \n def findKthLargest_0(self, nums, k):\n nums.sort()\n return nums[-k]\n\n","sub_path":"leetcode/215.kth-largest-element-in-an-array.py","file_name":"215.kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339069616","text":"import numpy as np\nimport random\nfrom matplotlib import pyplot as plt\n\n# Normal Barnsley fern\nfunc_coef = np.array([\n # a b c d e f\n [0.0, 0.0, 0.0, 0.16, 0.0, 0.0], # f1 coefficients\n [0.85, 0.004, -0.04, 0.85, 0.0, 1.6], # f2 coefficients\n [0.2, -0.26, 0.23, 0.22, 0.0, 1.6], # f3 coefficients\n [-0.15, 0.28, 0.26, 0.24, 0.0, 0.44] # f4 coefficients\n])\n\nfunc_probability = np.array([0.02, 0.84, 0.07, 0.07])\n\n# Barnsley fern mutated into a Thelypteridaceae fern\nmutant_func_coef = np.array([\n [0.0, 0.0, 0.0, 0.25, 0.0, -0.4, 0.02],\n [0.95, 0.005, -0.005, 0.93, -0.002, 0.5, 0.84],\n [0.035, -0.2, 0.16, 0.04, -0.09, 0.02, 0.07],\n [-0.04, 0.2, 0.16, 0.04, 0.083, 0.12, 0.07]\n])\n\n\ndef affine_transformation(x, y, coefficients):\n \"\"\"\n Returns result of the following transformations:\n [a b] [x] [e]\n result = [c d] * [y] + [f]\n\n :param x: x value of previous point\n :param y: y value of previous point\n :param coefficients: coefficients a, b, c, d, e, f from func_coef\n :return: next point [x, y]\n \"\"\"\n return np.add(\n np.dot(\n np.array([[coefficients[0], coefficients[1]], [coefficients[2], coefficients[3]]]),\n np.array([[x], [y]])\n ), np.array([[coefficients[4]], [coefficients[5]]])).flatten()\n\n\ndef barnsley_fern(iterations):\n \"\"\"\n Generates array of points to build Barnsley fern\n\n :param iterations: number of generated points\n :return: np.array of shape (2, iterations) that contains x and y values\n \"\"\"\n points = [[None] * iterations, [None] * iterations]\n points[0][0] = 0\n points[1][0] = 0\n for i in range(1, iterations - 1):\n probability = random.uniform(0, 1)\n if func_probability[1] >= probability > 0.0:\n func = 1\n elif func_probability[1] + func_probability[2] >= probability > func_probability[1]:\n func = 2\n elif func_probability[1] + func_probability[2] + func_probability[3] >= probability > func_probability[1] + func_probability[2]:\n func = 3\n else:\n func = 0\n next_point = affine_transformation(points[0][i-1], points[1][i-1], func_coef[func])\n points[0][i] = next_point[0]\n points[1][i] = next_point[1]\n return points\n\n\nif __name__ == '__main__':\n fern = barnsley_fern(1000000)\n plt.style.use('dark_background')\n fig, ax = plt.subplots()\n ax.plot(fern[0], fern[1], 'g,')\n ax.set(xlabel='x', ylabel='y', title='Barnsley fern')\n fig.savefig(\"barnsley_fern.png\")\n plt.show()\n","sub_path":"barnsley_fern.py","file_name":"barnsley_fern.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336708775","text":"import numpy as np\n\n\ndef read_file(path):\n # read matrix\n with open(path, 'r') as file:\n matrix = file.readlines()\n matrix = [[np.round(float(n), 3) for n in x.split()] for x in matrix]\n # Check input data\n for i in range(1, (len(matrix))):\n if len(matrix[i]) != len(matrix[0]):\n raise IOError(\"Wrong input data!!!\")\n return matrix\n\n\ndef print_matrix(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n print('{:8}'.format(round(matrix[i][j], 3).__str__()), end=' ')\n print(\"\\n\")\n\n\ndef print_matrix_solution(matrix, err):\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n print(\"x\", i+1, sep='', end=\" =\")\n if err:\n print('{:8}'.format(round(matrix[i][j], 25).__str__()))\n else:\n print('{:8}'.format(round(matrix[i][j], 5).__str__()))\n\n\ndef write_matrix(matrix, path):\n with open(path, \"w\") as file:\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n file.write('{:7}'.format(round(matrix[i][j], 3).__str__()))\n file.write(\" \")\n file.write(\"\\n\")\n\n\ndef errors(mtr, sol, mtr2):\n print(\"Errors:\")\n for i in range(len(mtr)):\n print(\"error in x\", i+1, \" =\", end=\" \", sep='')\n mtr_sum = 0\n for j in range(len(mtr)):\n mtr_sum += sol[j][0] * mtr[i][j]\n print('{:7}'.format(round(mtr2[i][0] - mtr_sum, 20).__str__()), end='\\n')\n","sub_path":"lab_2/input_output.py","file_name":"input_output.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363808624","text":"#coding=utf-8\nconst = \"\"\"\n\n\n\n \n\n\n\n\n

TodoList应用演示

\n\n
\n\n \n \n \n \n \n\n\t\t{0}\n
任务
管理
\n
\n \n \n
\n\n\n\n\n\"\"\"\n\n\nfrom bottle import static_file,route, run, post, request, redirect, error\nimport os, urllib,re,json,time\nRoot = os.path.split(os.path.realpath(__file__))[0]+\"/static/\"\nimport SQLIO\n\n@route('/todo')\ndef index():\n\treturn const.format(SQLIO.PageMake(),)\n@post('/todo')\ndef Accept():\n\tReq = request.body.read()\n\tL = re.split(\"&\",Req)\n\tM = {}\n\tfor i in L:\n\t\tA = re.split(\"=\",i)\n\t\tM[A[0]] = urllib.unquote(A[1])\n\tfor j in M.keys():\n\t\tif re.findall(\"id-\",j):\n\t\t\tSQLIO.SQL_del(int(j[3:]))\n\t\t\tredirect('/todo', 302)\n\ttry:\n\t\ttype = M[\"new\"]\n\t\tnewtask = M[\"newtask\"]\n\texcept:\n\t\tredirect('/error', 404)\n\tif newtask != \"\":\n\t\tSQLIO.SQL_in(newtask)\n\t\tredirect('/todo', 302)\n\telse:\n\t\treturn \"=.=所以你想添加什么任务呀\"\n\t\n@route('/error')\ndef err():\n\treturn \"虽然不知道你在干什么但是触发了服务器错误呢\"\n@route('/static/') \ndef server_static(filename):\n\treturn static_file(filename, root=Root)\nrun(host='localhost',port=8080)","sub_path":"NKUCodingCat/0024/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179740063","text":"import pandas as pd\nfrom pprint import pprint\nfrom collections import Counter\nfrom random import randint\nimport copy, sys\n\ndef entropy_calculator(probs): \n import math\n return sum( [-prob*math.log(prob, 2) for prob in probs] )\n\ndef calculate_entire_set_entropy(dataset):\n count_pos_neg = Counter(x for x in dataset)\n class_total_instances = len(dataset)*1.0\n probs = [x / class_total_instances for x in count_pos_neg.values()]\n return entropy_calculator(probs)\n \ndef calculate_information_gain(dataset,information_gain_attr,final_attr):\n df_split = dataset.groupby(information_gain_attr)\n nobs = len(dataset.index) * 1.0\n df_agg_ent = df_split.agg({final_attr : [calculate_entire_set_entropy, lambda x: len(x)/nobs] })[final_attr]\n df_agg_ent.columns = ['Entropy', 'PropObservations'] \n new_entropy = sum( df_agg_ent['Entropy'] * df_agg_ent['PropObservations'] )\n old_entropy = calculate_entire_set_entropy(dataset[final_attr])\n return old_entropy - new_entropy\n\ndef unique(seq, return_counts=False, id=None):\n found = set()\n if id is None:\n for x in seq:\n found.add(x)\n else:\n for x in seq:\n x = id(x)\n if x not in found:\n found.add(x)\n found = list(found) \n counts = [seq.count(0),seq.count(1)]\n if return_counts:\n return found,counts\n else:\n return found\n \ndef addition(data):\n sum = 0\n for i in data:\n sum = sum + i\n return sum\n\ndef calculate_variance(target_values):\n values = list(target_values)\n elements,counts = unique(values,True)\n variance_impurity = 0\n sum_counts = addition(counts)\n for i in elements:\n variance_impurity += (-counts[i]/sum_counts*(counts[i]/sum_counts))\n return variance_impurity\n\ndef calculate_variance_information_gain(dataset,information_gain_attr,final_attr):\n df_split = dataset.groupby(information_gain_attr)\n nobs = len(dataset.index) * 1.0\n df_agg_ent = df_split.agg({final_attr : [calculate_variance, lambda x: len(x)/nobs] })[final_attr]\n df_agg_ent.columns = ['Variance', 'VarObservation'] \n new_varianegain = sum( df_agg_ent['Variance'] * df_agg_ent['VarObservation'] )\n old_variancegain = calculate_variance(dataset[final_attr])\n return old_variancegain - new_varianegain\n\ndef id3_algorithm(df, target_attribute_name, attribute_names,impurity,default_class=None):\n \n from collections import Counter\n cnt = Counter(x for x in df[target_attribute_name])\n global node_number_info\n if len(cnt) == 1:\n return next(iter(cnt)) \n elif df.empty or (not attribute_names):\n return default_class \n else:\n default_class = max(cnt.keys())\n gainz = 0\n if(impurity == \"IG\"):\n gainz = [calculate_information_gain(df, attr, target_attribute_name) for attr in attribute_names] \n else:\n gainz = [calculate_variance_information_gain(df, attr, target_attribute_name) for attr in attribute_names]\n index_of_max = gainz.index(max(gainz)) \n best_attr = attribute_names[index_of_max]\n tree = {best_attr:{}} \n positiveCount = df['Class'].value_counts()[1];\n negativeCount = df['Class'].value_counts()[0];\n if positiveCount>negativeCount :\n best_class = 1\n elif positiveCount to-print:{yes,no} prune:{yes:no}\")\nelse:\n ifprint = sys.argv[4]\n ifprune = sys.argv[5]\n print(\"Please wait until you see END\")\n print(\"D1 or D2\")\n dataset = pd.read_csv(sys.argv[1])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"IG\")\n if(ifprint == \"yes\"):\n print(\"Printing tree with IG\")\n pprint(tree)\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H1 NP Train Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n #print(tree)\n #H1 NP Test Accuracy\n dataset = pd.read_csv(sys.argv[3])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"IG\")\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H1 NP Test Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n \n #H1 NP validation Accuracy\n dataset = pd.read_csv(sys.argv[2])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"IG\")\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H1 NP Validation Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n \n #H2 NP validation Accuracy\n dataset = pd.read_csv(sys.argv[1])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"VI\")\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H2 NP Train Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n \n #H2 NP validation Accuracy\n dataset = pd.read_csv(sys.argv[2])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"VI\")\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H2 NP Test Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n \n #H2 NP validation Accuracy\n dataset = pd.read_csv(sys.argv[3])\n node_number_info = 0\n attribute_names = list(dataset.columns)\n attribute_names.remove('Class') \n tree = id3_algorithm(dataset,'Class',attribute_names,\"VI\")\n dataset['predicted'] = dataset.apply(classify, axis=1, args=(tree,'No') ) \n print('\\n H2 NP Validation Accuracy is: ' + str( sum(dataset['Class']==dataset['predicted'] ) / (1.0*len(dataset.index)) ))\n \n if(ifprint == \"yes\"):\n print(\"Printing tree with VI\")\n pprint(tree)\n print(\"END\")\n #print(tree)\n# tree = id3_algorithm(dataset,'Class',attribute_names,\"IG\")\n# test_data = pd.read_csv('test_set.csv')\n# tree3 = post_prune(2,4,tree)\n# print(tree3)\n#\n# test_data['predicted3'] = test_data.apply(classify, axis=1, args=(tree3,'1') ) \n# print( 'Accuracy with pruned Information gain tree ' + (str( sum(test_data['Class']==test_data['predicted3'] ) / (1*len(test_data.index)) )))\n \n\n\n\n\n\n\n\n\n\n\n ","sub_path":"assignment1-Decision Tree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458335350","text":"## 1. Introduction ##\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nmovie_reviews = pd.read_csv(\"fandango_score_comparison.csv\")\n\nfig = plt.figure(figsize = (5,12))\nax1 = fig.add_subplot(4,1,1)\nax2 = fig.add_subplot(4,1,2)\nax3 = fig.add_subplot(4,1,3)\nax4 = fig.add_subplot(4,1,4)\n\nax1.set_xlim(0,5)\nax2.set_xlim(0,5)\nax3.set_xlim(0,5)\nax4.set_xlim(0,5)\n\nmovie_reviews[\"RT_user_norm\"].hist(ax=ax1)\nmovie_reviews[\"Metacritic_user_nom\"].hist(ax=ax2)\nmovie_reviews[\"Fandango_Ratingvalue\"].hist(ax=ax3)\nmovie_reviews[\"IMDB_norm\"].hist(ax=ax4)\n\n## 2. Mean ##\n\ndef calc_mean(column):\n return sum(column.tolist())/len(column.tolist())\n\nuser_reviews = movie_reviews[['RT_user_norm', 'Metacritic_user_nom', 'Fandango_Ratingvalue', 'IMDB_norm']]\n\nuser_reviews_means = user_reviews.apply(calc_mean)\n\nrt_mean = user_reviews_means['RT_user_norm']\n\nmc_mean = user_reviews_means['Metacritic_user_nom']\n\nfg_mean = user_reviews_means['Fandango_Ratingvalue']\n\nid_mean = user_reviews_means['IMDB_norm']\n\n\n## 3. Variance and standard deviation ##\n\ndef calc_mean(series):\n vals = series.values\n mean = sum(vals) / len(vals)\n return mean\n\ndef var(column):\n mean = calc_mean(column)\n v = 0\n for i in column.tolist():\n diff_sqr = (i - mean)**2\n v += diff_sqr\n return v/len(column)\n\n\ncolumns = [\"RT_user_norm\", \"Metacritic_user_nom\", \"Fandango_Ratingvalue\", \"IMDB_norm\"]\nuser_reviews = movie_reviews[columns]\nuser_reviews_variances = user_reviews.apply(var)\n\nrt_var = user_reviews_variances[\"RT_user_norm\"]\nmc_var = user_reviews_variances[\"Metacritic_user_nom\"]\nfg_var = user_reviews_variances[\"Fandango_Ratingvalue\"]\nid_var = user_reviews_variances[\"IMDB_norm\"]\n\nrt_stdev = rt_var ** (1/2)\nmc_stdev = mc_var ** (1/2)\nfg_stdev = fg_var ** (1/2)\nid_stdev = id_var ** (1/2)\n\n## 4. Scatter plots ##\n\nfig = plt.figure(figsize=(4,8))\nax1 = fig.add_subplot(3,1,1)\nax2 = fig.add_subplot(3,1,2)\nax3 = fig.add_subplot(3,1,3)\n\nax1.set_xlim(0,5.0)\nax2.set_xlim(0,5.0)\nax3.set_xlim(0,5.0)\n\nax1.scatter(movie_reviews[\"RT_user_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\nax2.scatter(movie_reviews[\"Metacritic_user_nom\"], movie_reviews[\"Fandango_Ratingvalue\"])\nax3.scatter(movie_reviews[\"IMDB_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\n\n## 5. Covariance ##\n\ndef calc_mean(series):\n vals = series.values\n mean = sum(vals) / len(vals)\n return mean\n\ndef cov(series1, series2):\n mean_1 = sum(series1.tolist())/len(series1.tolist())\n mean_2 = sum(series2.tolist())/len(series2.tolist())\n diff_1 = [i - mean_1 for i in series1.tolist()]\n diff_2 = [i - mean_2 for i in series2.tolist()]\n total = 0\n for i in range(len(series1)):\n total += diff_1[i]*diff_2[i]\n return total/len(series1)\n\nrt_fg_covar = cov(movie_reviews['RT_user_norm'], movie_reviews['Fandango_Ratingvalue'])\n\nmc_fg_covar = cov(movie_reviews['Metacritic_user_nom'], movie_reviews['Fandango_Ratingvalue'])\n\nid_fg_covar = cov(movie_reviews['IMDB_norm'], movie_reviews['Fandango_Ratingvalue'])\n\n## 6. Correlation ##\n\ndef calc_mean(series):\n vals = series.values\n mean = sum(vals) / len(vals)\n return mean\n\ndef calc_variance(series):\n mean = calc_mean(series)\n squared_deviations = (series - mean)**2\n mean_squared_deviations = calc_mean(squared_deviations)\n return mean_squared_deviations\n\ndef calc_covariance(series_one, series_two):\n x = series_one.values\n y = series_two.values\n x_mean = calc_mean(series_one)\n y_mean = calc_mean(series_two)\n x_diffs = [i - x_mean for i in x]\n y_diffs = [i - y_mean for i in y]\n codeviates = [x_diffs[i] * y_diffs[i] for i in range(len(x))]\n return sum(codeviates) / len(codeviates)\n\nrt_fg_covar = calc_covariance(movie_reviews[\"RT_user_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\nmc_fg_covar = calc_covariance(movie_reviews[\"Metacritic_user_nom\"], movie_reviews[\"Fandango_Ratingvalue\"])\nid_fg_covar = calc_covariance(movie_reviews[\"IMDB_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\n\ndef corr(s1, s2):\n return calc_covariance(s1,s2)/(calc_variance(s1)*calc_variance(s2))**(1/2)\n\nrt_fg_corr = corr(movie_reviews[\"RT_user_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\nmc_fg_corr = corr(movie_reviews[\"Metacritic_user_nom\"], movie_reviews[\"Fandango_Ratingvalue\"])\nid_fg_corr = corr(movie_reviews[\"IMDB_norm\"], movie_reviews[\"Fandango_Ratingvalue\"])\n\n","sub_path":"probability and statistics in python/Challenge: Descriptive Statistics-199.py","file_name":"Challenge: Descriptive Statistics-199.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98087983","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/1/3 16:19\r\n# @Author : zhoujun\r\nimport torch\r\nfrom torch import nn\r\n\r\n\r\nclass PSELoss(nn.Module):\r\n def __init__(self, Lambda, ratio=3, reduction='mean'):\r\n \"\"\"Implement PSE Loss.\r\n \"\"\"\r\n super(PSELoss, self).__init__()\r\n assert reduction in ['mean', 'sum'], \" reduction must in ['mean','sum']\"\r\n self.Lambda = Lambda\r\n self.ratio = ratio\r\n self.reduction = reduction\r\n\r\n def forward(self, preds: torch.Tensor, labels, training_mask):\r\n \"\"\"\r\n loss 由两部分组成 L_c ,L_s\r\n L_c 表示对完整文本实例的loss,会根据OHEM算法选择背景类\r\n L_s 是针对缩放文本实例的loss\r\n :param preds: 预测结果\r\n :param labels: ground truth\r\n :return: loss 值\r\n \"\"\"\r\n bs, n, w, h = preds.size()\r\n preds = preds.contiguous().view(bs, n, w * h) # b,c,h,w -> b,c,h*w\r\n labels = labels.contiguous().view(bs, n, w * h)\r\n training_mask = training_mask.contiguous().view(bs, w * h).to(torch.float)\r\n\r\n all_loss_c = []\r\n all_loss_s = []\r\n all_loss = []\r\n for pred, label, mask in zip(preds, labels, training_mask):\r\n L_c, L_s, loss = self.single_sample_loss(pred, label, mask)\r\n all_loss_c.append(L_c)\r\n all_loss_s.append(L_s)\r\n all_loss.append(loss)\r\n all_loss_c = torch.stack(all_loss_c)\r\n all_loss_s = torch.stack(all_loss_s)\r\n all_loss = torch.stack(all_loss)\r\n if self.reduction == 'mean':\r\n all_loss_c = all_loss_c.mean()\r\n all_loss_s = all_loss_s.mean()\r\n all_loss = all_loss.mean()\r\n elif self.reduction == 'sum':\r\n all_loss_c = all_loss_c.sum()\r\n all_loss_s = all_loss_s.sum()\r\n all_loss = all_loss.sum()\r\n return all_loss_c, all_loss_s, all_loss\r\n\r\n def single_sample_loss(self, pred, label, training_mask):\r\n pred = pred * training_mask\r\n label = label * training_mask\r\n M = self.cal_M(pred[-1], pred[-1])\r\n L_c = 1 - self.dice_coefficient(pred[-1] * M, label[-1] * M)\r\n # 计算L_s\r\n W = (pred[-1] >= 0.5).float()\r\n L_s = 0\r\n for i in range(0, len(label) - 1):\r\n dice = self.dice_coefficient(pred[i] * W, label[i] * W)\r\n L_s += dice\r\n L_s = 1 - L_s / (len(label) - 1)\r\n all_loss = self.Lambda * L_c + (1 - self.Lambda) * L_s\r\n return L_c, L_s, all_loss\r\n\r\n def dice_coefficient(self, S, G):\r\n \"\"\"\r\n 计算每个样本的D\r\n :param S:\r\n :param G:\r\n :return:\r\n \"\"\"\r\n eps = 1e-5\r\n intersection = torch.sum(S * G)\r\n union = torch.sum(S * S) + torch.sum(G * G) + eps\r\n dice = 2 * intersection / union\r\n return dice\r\n\r\n def cal_M(self, pred_n, label_n):\r\n \"\"\"\r\n 使用OHEM算法选择参与计算loss的文本和背景像素矩阵,单个样本\r\n :param pred_n: 完整文本实例的预测结果\r\n :param label_n: 完整文本实例的ground truth\r\n :return:\r\n \"\"\"\r\n # 计算选择背景区域像素点个数\r\n pos_mask = (label_n >= 0.5)\r\n neg_mask = (label_n < 0.5)\r\n n_pos = pos_mask.sum().int().item()\r\n n_neg = neg_mask.sum().int().item()\r\n n_neg = min(n_pos * self.ratio, n_neg)\r\n if n_neg > n_pos * self.ratio:\r\n n_neg = n_pos * self.ratio\r\n # 从预测图里拿到背景像素的分数\r\n zero_predict_score = pred_n.masked_select(neg_mask)\r\n # 按照OHEM的比例选取背景像素的x,y索引\r\n value, _ = zero_predict_score.topk(n_neg)\r\n threshold = value[-1]\r\n M = pred_n >= threshold\r\n else:\r\n M = torch.ones_like(pred_n).to(pred_n.device)\r\n return M.float()\r\n","sub_path":"model/loss_tf.py","file_name":"loss_tf.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243017436","text":"import probeinterface as pi\n\nfrom .neobaseextractor import NeoBaseRecordingExtractor, NeoBaseSortingExtractor\n\n\nclass MEArecRecordingExtractor(NeoBaseRecordingExtractor):\n \"\"\"\n Class for reading data from a MEArec simulated data.\n\n Based on :py:class:`neo.rawio.MEArecRawIO`\n\n Parameters\n ----------\n file_path: str\n The file path to load the recordings from.\n all_annotations: bool, default: False\n Load exhaustively all annotations from neo.\n \"\"\"\n mode = 'file'\n NeoRawIOClass = 'MEArecRawIO'\n name = \"mearec\"\n\n def __init__(self, file_path, all_annotations=False):\n neo_kwargs = self.map_to_neo_kwargs(file_path)\n NeoBaseRecordingExtractor.__init__(self, \n all_annotations=all_annotations,\n **neo_kwargs)\n\n self.extra_requirements.append('mearec')\n\n probe = pi.read_mearec(file_path)\n probe.annotations[\"mearec_name\"] = str(probe.annotations[\"mearec_name\"])\n self.set_probe(probe, in_place=True)\n self.annotate(is_filtered=True)\n\n if hasattr(self.neo_reader._recgen, \"gain_to_uV\"):\n self.set_channel_gains(self.neo_reader._recgen.gain_to_uV)\n\n self._kwargs.update({'file_path': str(file_path)})\n\n @classmethod\n def map_to_neo_kwargs(cls, file_path):\n neo_kwargs = {'filename': str(file_path)}\n return neo_kwargs\n\n\nclass MEArecSortingExtractor(NeoBaseSortingExtractor):\n mode = 'file'\n NeoRawIOClass = 'MEArecRawIO'\n handle_spike_frame_directly = False\n name = \"mearec\"\n\n def __init__(self, file_path):\n neo_kwargs = self.map_to_neo_kwargs(file_path)\n NeoBaseSortingExtractor.__init__(self,\n sampling_frequency=None, # auto guess is correct here\n use_natural_unit_ids=True,\n **neo_kwargs)\n\n self._kwargs = {'file_path': str(file_path)}\n\n @classmethod\n def map_to_neo_kwargs(cls, file_path):\n neo_kwargs = {'filename': str(file_path)}\n return neo_kwargs\n\n\ndef read_mearec(file_path):\n \"\"\"Read a MEArec file.\n\n Parameters\n ----------\n file_path: str or Path\n Path to MEArec h5 file\n\n Returns\n -------\n recording: MEArecRecordingExtractor\n The recording extractor object\n sorting: MEArecSortingExtractor\n The sorting extractor object\n \"\"\"\n recording = MEArecRecordingExtractor(file_path)\n sorting = MEArecSortingExtractor(file_path)\n return recording, sorting\n","sub_path":"spikeinterface/extractors/neoextractors/mearec.py","file_name":"mearec.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573732073","text":"\"\"\" SKD-Views\n\"\"\"\n\nfrom django.views.generic import ListView\nfrom skd.models import Configuration\n\n\nclass SkdListView(ListView):\n \"\"\"SKD-specific List view\n\n Creates a ListView, but with benefits:\n\n Row-specific actions, a default output, when no rows are returned from\n the queryset and a list of basic actions\n\n This relies on three additional template tags inside skd_listview (see\n there for more information).\n \"\"\"\n\n template_name = \"skd/listbase.html\"\n\n # Current category for menu\n current_category = \"\"\n\n # Text for empty lists\n no_rows = \"\"\n\n # Column-Headers\n list_columns = []\n\n # Context of this list\n list_context = \"\"\n\n # Paginator\n paginate_by = 10\n\n def __init__(self):\n\n super(SkdListView, self).__init__()\n\n if Configuration.objects.filter(key=\"pagination_max\").exists():\n\n self.paginate_by = Configuration.objects.get(\n key=\"pagination_max\"\n ).value\n\n def get_actions(self):\n \"\"\"Returns an array of dicts with \"title\" and \"url\" for the general\n actions under the displayed table\"\"\"\n\n pass\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(SkdListView, self).get_context_data(**kwargs)\n\n context[\"current_category\"] = self.current_category\n context[\"no_rows\"] = self.no_rows\n context[\"actions\"] = self.get_actions()\n context[\"columns\"] = self.list_columns\n context[\"list_context\"] = self.list_context\n\n return context\n","sub_path":"views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470799954","text":"class Solution:\n def titleToNumber(self, s: str) -> int:\n d = {}\n for i, v in enumerate(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n d[v] = i + 1\n \n val = 0\n exp = 1\n for i in range(len(s))[::-1]:\n val += d[s[i]] * exp\n exp *= 26\n \n return val","sub_path":"leetcode/171-Excel-Sheet-Column-Number.py","file_name":"171-Excel-Sheet-Column-Number.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203662734","text":"# -*- coding: utf-8 -*-\nimport re\n\nimport arrow\n\n\nclass TimeConverter(object):\n human_exps = [\n ('convert_human_style_1', re.compile(u\"(\\d+) (?:giờ|phút|giây|ngày) trước\")),\n ('convert_human_style_2', re.compile(\"(?:Today|Yesterday),\\s*\\d+:\\d+\")),\n ('convert_human_style_3', re.compile(u\"\\d+:\\d+\\s+(?:AM|PM)\\s+(?:Hôm nay|Hôm qua)\")),\n ]\n\n def time_now(self):\n return arrow.utcnow()\n\n @staticmethod\n def convert_str_to_tmp(time_str, format=None):\n if format:\n date_time = arrow.get(time_str, format)\n else:\n date_time = arrow.get(time_str)\n return date_time.timestamp\n\n def convert_human_style_1(self, m, time_str):\n t = int(m.group(1))\n PLUS = 7 * 60 * 60\n time_now = self.time_now()\n if u\"giờ\" in time_str:\n return time_now.shift(hours=-t).timestamp + PLUS\n elif u\"phút\" in time_str:\n return time_now.shift(minutes=-t).timestamp + PLUS\n elif u\"giây\" in time_str:\n return time_now.shift(seconds=-t).timestamp + PLUS\n elif u\"ngày\" in time_str:\n return time_now.shift(days=-t).timestamp + PLUS\n\n def convert_human_style_2(self, m, time_str):\n time_now = self.time_now()\n if \"Yesterday\" in time_str:\n normalized_time_str = time_str.replace(\"Yesterday\", time_now.shift(days=-1).format(\"DD-MM-YYYY\"))\n return self.convert_str_to_tmp(normalized_time_str, format='DD-MM-YYYY, HH:mm')\n elif \"Today\" in time_str:\n normalized_time_str = time_str.replace(\"Today\", time_now.format(\"DD-MM-YYYY\"))\n return self.convert_str_to_tmp(normalized_time_str, format='DD-MM-YYYY, HH:mm')\n\n def convert_human_style_3(self, m, time_str):\n time_now = self.time_now()\n normalized_time_str = \"\"\n if u\"Hôm qua\" in time_str:\n normalized_time_str = time_str.replace(u\"Hôm qua\", time_now.shift(days=-1).format(\"DD-MM-YYYY\"))\n elif u\"Hôm nay\" in time_str:\n normalized_time_str = time_str.replace(u\"Hôm nay\", time_now.format(\"DD-MM-YYYY\"))\n return self.convert_str_to_tmp(normalized_time_str, format='HH:mm A DD-MM-YYYY')\n\n def convert_human_str_to_tmp(self, time_str):\n for fun, human_exp in self.human_exps:\n m = human_exp.search(time_str)\n if m:\n return getattr(self, fun)(m, time_str)\n","sub_path":"timviec365/helpers/time_converter.py","file_name":"time_converter.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556730542","text":"# Euler problem 10: Summation of primes\r\n# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\r\n# Find the sum of all the primes below two million.\r\n#\r\n\r\nimport math\r\n\r\nprimes = [2, 3, 5, 7]\r\nnum = 9\r\n\r\n# get all primes below 2 million\r\nwhile primes[-1] < 2000000:\r\n #assume num is prime\r\n prime = True\r\n\r\n # Test vs off numbers 3 to sqrt(num) + 1\r\n for i in range(3, math.ceil(math.sqrt(num))+1, 2):\r\n if num%i == 0:\r\n prime = False\r\n \r\n if prime == True:\r\n\r\n primes.append(num)\r\n\r\n num += 2\r\n\r\nprint(primes)\r\n\r\n#sum all primes, excluding the final prime as it will be over 2 mil b.c while condition\r\nprint( sum(primes[:-1]))","sub_path":"10_sum_primes.py","file_name":"10_sum_primes.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388298642","text":"\"\"\"Script for visualizing CIFAR10 data\"\"\"\n\nimport sys\nfrom utils.logger import init_logger\nfrom utils.cifar10 import load_data\nfrom utils.plot import plot_images\n\nlogger = init_logger()\n\n\ndef select_random(img, cls):\n \"\"\"Select set of 10 random images for each class\"\"\"\n import numpy as np\n permutation = np.random.permutation(len(img))\n index = 0\n\n images = []\n for image_class in range(10):\n class_images = []\n while len(class_images) < 10:\n if cls[permutation[index]] == image_class:\n class_images.append(img[permutation[index]])\n index = (index + 1) % len(permutation)\n images.append(class_images)\n return images\n\n\ndef main(argv):\n \"\"\"Main function\"\"\"\n if len(argv) != 3:\n logger.error('Usage: %s data-path output-filename', argv[0])\n exit(1)\n\n data_path = argv[1]\n output_filename = argv[2]\n\n train_img, train_cls, _, _ = load_data(data_path)\n plot_images(select_random(train_img, train_cls), output_filename)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/plot_images.py","file_name":"plot_images.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240182597","text":"startString = '{\"title\":\"Is '\nendString = ' a good company?\", \"tags\": [\"business\", \"america\", \"finance\"], \"options\": [\"yes\", \"no\", \"not sure\"]}'\n\nf = open(\"smp500.txt\", \"r\")\n\nw = open(\"smp500.json\", \"w\")\n\nw.write(\"[\")\n\nfor x in f:\n w.write(startString + f.readline()[:-1] + endString + \",\")\n\nw.write(\"]\")\nw.close()","sub_path":"smp500/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403728133","text":"#calcolatrice in python\n#FILIPPO DI TOPPA\n\n#IMPORTAZIONI DI LIBRERIE\nfrom math import *\n\n# FUNZIONI DELLA CALCOLATRICE\n\n#SOMMA\ndef somma():\n print(\"\\nHAI SCELTO ADDIZIONE\\n\")\n x=int(input(\"inserire il primo numero\\n\"))\n y=int(input(\"inserisci il secondo numero\\n\"))\n somma=x+y\n print(\"risultato: \"+str(somma))\n#SOTTRAZIONE \ndef sottrazione():\n print(\"\\nHAI SCELTO SOTTRAZIONE\\n\")\n x=int(input(\"inserire il primo numero\\n\"))\n y=int(input(\"inserisci il secondo numero\\n\"))\n sottrazione=x-y\n print(\"risultato: \"+str(sottrazione))\n#MOLTIPLICAZIONE\ndef moltiplicazione():\n print(\"\\nHAI SCELTO MOLTIPLICAZIONE\\n\")\n x=int(input(\"inserire il primo numero\\n\"))\n y=int(input(\"inserisci il secondo numero\\n\"))\n moltiplicazione=x*y\n print(\"risultato: \"+str(moltiplicazione))\n#DIVISIONE\ndef divisione():\n print(\"\\nHAI SCELTO DIVISIONE\\n\")\n x=int(input(\"inserire il primo numero\\n\"))\n y=int(input(\"inserisci il secondo numero\\n\"))\n divisione=x/y\n print(\"risultato: \"+str(divisione)) \n#POTENZA\ndef potenza():\n print(\"\\nHAI SCELTO POTENZA\\n\")\n x=int(input(\"inserire il primo numero\\n\"))\n y=int(input(\"inserisci il secondo numero\\n\"))\n potenza=pow(x,y)\n print(\"risultato: \"+str(potenza))\n#RADICE QUADRATA\ndef radicequadrata():\n print(\"\\nHAI SCELTO RADICE QUADRATA\\n\")\n x=int(input(\"inserire il numero\\n\"))\n print(sqrt(x))\n#SENO\ndef seno():\n print(\"\\nHAI SCELTO SENO\\n\")\n x=int(input(\"inserire il numero\\n\"))\n print(sin(x))\n#COSENO\ndef coseno():\n print(\"\\nHAI SCELTO COSENO\\n\")\n x=int(input(\"inserire il numero\\n\"))\n print(cos(x))\n#USCITA\ndef uscita():\n print(\"\"\"\n ++++++++++++++++++++++++++++++\n |LA CALCOLATRICE VERRÀ CHIUSA|\n ++++++++++++++++++++++++++++++\n \"\"\")\n \n\n# MAIN DELLA CALCOLATRICE\nwhile True:\n print(\"\"\"\n ++++++++++++++++++++++++++++\n BENVENUTO NELLA CALCOLATRICE\n ++++++++++++++++++++++++++++\n \n QUI PUOI EFFETTUARE LA TUA SCELTA:\n \n [inserire il numero corrispondente alla scelta]\n\n +------------------+\n |addizione(1) |\n |sottrazione(2) |\n |moltiplicazione(3)|\n |divisione(4) |\n |potenza(5) |\n |radice quadrata(6)|\n |seno(7) |\n |coseno(8) |\n +------------------+----+\n |PER USCIRE SCRIVERE ESC|\n +------------------+----+\n \"\"\")\n scelta=input()\n if scelta=='1':\n somma()\n elif scelta=='2':\n sottrazione()\n elif scelta=='3':\n moltiplicazione()\n elif scelta=='4':\n divisione()\n elif scelta=='5':\n potenza()\n elif scelta=='6':\n radicequadrata()\n elif scelta=='7':\n seno()\n elif scelta=='8':\n coseno()\n elif scelta=='Esc' or scelta=='ESC' or scelta=='esc':\n uscita()\n break\n #LOOP DI FINE PROGRAMMA\n loop=input(\"\\n DESIDERI CONTINUARE? S/n\\n\")\n if loop=='s' or loop=='S':\n continue\n else:\n print(\"ARRIVEDERCI\")\n break\n","sub_path":"Calcolatrice.py","file_name":"Calcolatrice.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476424444","text":"import os\r\nimport sys\r\nimport time\r\n\r\nfrom oslo_log import log as logging\r\n\r\nfrom tempest import command_argument_string\r\nfrom tempest import config\r\nfrom tempest import reporting\r\nfrom tempest import tvaultconf\r\nfrom tempest.api.workloadmgr import base\r\nfrom tempest.lib import decorators\r\nfrom tempest.util import cli_parser\r\nfrom tempest.util import query_data\r\n\r\nsys.path.append(os.getcwd())\r\n\r\nLOG = logging.getLogger(__name__)\r\nCONF = config.CONF\r\n\r\n\r\nclass RestoreTest(base.BaseWorkloadmgrTest):\r\n\r\n credentials = ['primary']\r\n\r\n @classmethod\r\n def setup_clients(cls):\r\n super(RestoreTest, cls).setup_clients()\r\n reporting.add_test_script(str(__name__))\r\n\r\n @decorators.attr(type='smoke')\r\n @decorators.idempotent_id('9fe07175-912e-49a5-a629-5f52eeada4c9')\r\n @decorators.attr(type='workloadmgr_cli')\r\n def test_tvault1037_list_restore(self):\r\n try:\r\n # Prerequisites\r\n self.created = False\r\n self.workload_instances = []\r\n\r\n # Launch instance\r\n self.vm_id = self.create_vm()\r\n LOG.debug(\"VM ID: \" + str(self.vm_id))\r\n\r\n # Create volume\r\n self.volume_id = self.create_volume()\r\n LOG.debug(\"Volume ID: \" + str(self.volume_id))\r\n\r\n # Attach volume to the instance\r\n self.attach_volume(self.volume_id, self.vm_id)\r\n LOG.debug(\"Volume attached\")\r\n\r\n # Create workload\r\n self.workload_instances.append(self.vm_id)\r\n self.wid = self.workload_create(\r\n self.workload_instances,\r\n tvaultconf.parallel,\r\n workload_name=tvaultconf.workload_name)\r\n LOG.debug(\"Workload ID: \" + str(self.wid))\r\n time.sleep(5)\r\n\r\n # Create snapshot\r\n self.snapshot_id = self.workload_snapshot(\r\n self.wid, True, tvaultconf.snapshot_name)\r\n LOG.debug(\"Snapshot ID: \" + str(self.snapshot_id))\r\n self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)\r\n\r\n # Delete instance\r\n self.delete_vm(self.vm_id)\r\n LOG.debug(\"Instance deleted successfully\")\r\n\r\n # Delete corresponding volume\r\n self.delete_volume(self.volume_id)\r\n LOG.debug(\"Volume deleted successfully\")\r\n\r\n # Create one-click restore\r\n self.restore_id = self.snapshot_restore(\r\n self.wid, self.snapshot_id, tvaultconf.restore_name)\r\n LOG.debug(\"Restore ID: \" + str(self.restore_id))\r\n\r\n # Wait till restore is complete\r\n wc = query_data.get_snapshot_restore_status(\r\n tvaultconf.restore_name, self.snapshot_id)\r\n LOG.debug(\"Snapshot restore status: \" + str(wc))\r\n while (str(wc) != \"available\" or str(wc) != \"error\"):\r\n time.sleep(5)\r\n wc = query_data.get_snapshot_restore_status(\r\n tvaultconf.restore_name, self.snapshot_id)\r\n LOG.debug(\"Snapshot restore status: \" + str(wc))\r\n if (str(wc) == \"available\"):\r\n LOG.debug(\"Snapshot Restore successfully completed\")\r\n self.created = True\r\n break\r\n else:\r\n if (str(wc) == \"error\"):\r\n break\r\n\r\n if (self.created == False):\r\n reporting.add_test_step(\"One click Restore\", tvaultconf.FAIL)\r\n raise Exception(\"Snapshot Restore did not get created\")\r\n\r\n # List Restores using CLI command\r\n rc = cli_parser.cli_returncode(\r\n command_argument_string.restore_list)\r\n if rc != 0:\r\n reporting.add_test_step(\r\n \"Execute restore-list command\", tvaultconf.FAIL)\r\n raise Exception(\"Command did not execute correctly\")\r\n else:\r\n reporting.add_test_step(\r\n \"Execute restore-list command\", tvaultconf.PASS)\r\n LOG.debug(\"Command executed correctly\")\r\n\r\n wc = query_data.get_available_restores(CONF.identity.tenant_id)\r\n out = cli_parser.cli_output(command_argument_string.restore_list)\r\n if (int(wc) == int(out)):\r\n reporting.add_test_step(\r\n \"Verification with DB\", tvaultconf.PASS)\r\n LOG.debug(\r\n \"Restore list command listed available restores correctly\")\r\n else:\r\n reporting.add_test_step(\r\n \"Verification with DB\", tvaultconf.FAIL)\r\n raise Exception(\r\n \"Restore list command did not list available restores correctly\")\r\n reporting.test_case_to_write()\r\n\r\n except Exception as e:\r\n LOG.error(\"Exception: \" + str(e))\r\n reporting.set_test_script_status(tvaultconf.FAIL)\r\n reporting.test_case_to_write()\r\n","sub_path":"tempest/api/workloadmgr/restore/test_tvault1037_list_restore.py","file_name":"test_tvault1037_list_restore.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628629463","text":"#Run this file with the test cases in the same folder to produce the desired output files\n\nfrom readFile import readFile\nfrom writeFile import writeFile\nfrom item import item\nfrom writeFile import replace_line\n\ndef Part1(file_name):\n lines = readFile(file_name)\n i=0\n items =[]\n for line in lines:\n if line[0] =='(':\n myitem = item(line)\n items.append(myitem)\n i+=1\n items.sort()\n a=file_name.rstrip(\".in\")\n a=a+\".out\"\n f=open(a,'r+')\n f.truncate(0)\n f.close\n past=item('(-1,-1,-1,-1)')\n num =0\n line = 0\n \n for y in items:\n if past==y:\n num+=1\n z=\"Product Number: \" + y.content[2]+\"; Weight: \"+y.content[3]+\"; Qty: \"+str(num)+\"; Location: (\"+y.content[0]+\", \"+y.content[2]+\")\\n\"\n b=line-1\n replace_line(a,b,z)\n else:\n num = 1\n z=\"Product Number: \" + y.content[2]+\"; Weight: \"+y.content[3]+\"; Qty: \"+str(num)+\"; Location: (\"+y.content[0]+\", \"+y.content[2]+\")\\n\"\n writeFile(a,z)\n past = y\n line +=1\n \nPart1('1a.in')\nPart1('1b.in')\nPart1('1c.in')","sub_path":"Part1.py","file_name":"Part1.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581080027","text":"# Crie um programa onde o usuário possa digitar cinco valores numéricos e\n# cadastre-os em uma lista, já na posição correta de inserção (sem usar o sort()).\n# No final, mostre a lista ordenada na tela.\n\nlista = []\nfor c in range(5):\n n = int(input('Digite um valor: '))\n if c == 0 or n > lista[-1]: #Teste se for o primeiro valor (c == 0) ou n for maior do que o último valor da lista (lista[-1])\n lista.append(n) # Adiciona n na última posição da lista\n print('Adicionado ao final da lista...')\n else:\n pos = 0 # Variável pos na primeira posição\n while pos < len(lista): #Laço posição for menor que a quantidade de itens na lista (len(lista)\n if n <= lista[pos]: #Teste se n for menor ou igual a valor na lista na posição (lista[pos])\n lista.insert(pos, n) #Inserir n na lista na posição pos\n print(f'Adicionado na posição {pos} da lista.')\n break\n pos += 1\nprint('-=' * 30)\nprint(f'Os valores digitados em ordem foram {lista}.')\n","sub_path":"python_exercicios/desafio080.py","file_name":"desafio080.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639155582","text":"import json\nimport socket\nimport base64\nimport time\nfrom json.decoder import JSONDecodeError\nfrom uuid import uuid4\nfrom collections import OrderedDict\nfrom traceback import format_exc\n\nfrom tornado.tcpserver import TCPServer\nfrom tornado.tcpclient import TCPClient\nfrom tornado.iostream import StreamClosedError\nfrom tornado.util import TimeoutError\nfrom coincurve import verify_signature\n\nfrom yadacoin.core.config import get_config, Config\nfrom yadacoin.core.chain import CHAIN\n\n\nclass BaseRPC:\n async def write_result(self, stream, method, data, req_id):\n await self.write_as_json(stream, method, data, 'result', req_id)\n\n async def write_params(self, stream, method, data):\n await self.write_as_json(stream, method, data, 'params')\n\n async def write_as_json(self, stream, method, data, rpc_type, req_id=None):\n\n rpc_data = {\n 'id': req_id if req_id else str(uuid4()),\n 'method': method,\n 'jsonrpc': 2.0,\n rpc_type: data\n }\n if rpc_type == 'params':\n stream.message_queue[rpc_data['id']] = rpc_data\n await stream.write('{}\\n'.format(json.dumps(rpc_data)).encode())\n\nclass RPCSocketServer(TCPServer, BaseRPC):\n inbound_streams = {}\n inbound_pending = {}\n config = None\n\n async def handle_stream(self, stream, address):\n stream.synced = False\n stream.message_queue = {}\n while True:\n try:\n data = await stream.read_until(b\"\\n\")\n body = json.loads(data)\n method = body.get('method')\n if 'result' in body:\n if body['id'] in stream.message_queue:\n del stream.message_queue[body['id']]\n else:\n continue\n await getattr(self, method)(body, stream)\n except StreamClosedError:\n if hasattr(stream, 'peer'):\n await self.remove_peer(stream.peer)\n self.config.app_log.warning('Disconnected from {}: {}'.format(stream.peer.__class__.__name__, stream.peer.to_json()))\n break\n except:\n if hasattr(stream, 'peer'):\n self.config.app_log.warning('Bad data from {}: {}'.format(stream.peer.__class__.__name__, stream.peer.to_json()))\n stream.close()\n #self.config.app_log.warning(\"{}\".format(format_exc()))\n break\n\n async def remove_peer(self, peer):\n id_attr = getattr(peer, peer.id_attribute)\n if id_attr in self.inbound_streams[peer.__class__.__name__]:\n del self.inbound_streams[peer.__class__.__name__][id_attr]\n if id_attr in self.inbound_pending[peer.__class__.__name__]:\n del self.inbound_pending[peer.__class__.__name__][id_attr]\n\n\nclass RPCSocketClient(TCPClient):\n outbound_streams = {}\n outbound_pending = {}\n outbound_ignore = {}\n config = None\n\n async def connect(self, peer):\n try:\n id_attr = getattr(peer, peer.id_attribute)\n if id_attr in self.outbound_ignore[peer.__class__.__name__]:\n return\n if id_attr in self.outbound_pending[peer.__class__.__name__]:\n return\n if id_attr in self.outbound_streams[peer.__class__.__name__]:\n return\n if id_attr in self.config.nodeServer.inbound_pending[peer.__class__.__name__]:\n return\n if id_attr in self.config.nodeServer.inbound_streams[peer.__class__.__name__]:\n return\n if self.config.peer.identity.username_signature == peer.identity.username_signature:\n return\n if (self.config.peer.host, self.config.peer.host) == (peer.host, peer.port):\n return\n self.outbound_pending[peer.__class__.__name__][id_attr] = peer\n stream = await super(RPCSocketClient, self).connect(peer.host, peer.port, timeout=1)\n stream.synced = False\n stream.message_queue = {}\n stream.peer = peer\n stream.last_activity = int(time.time())\n try:\n result = verify_signature(\n base64.b64decode(stream.peer.identity.username_signature),\n stream.peer.identity.username.encode(),\n bytes.fromhex(stream.peer.identity.public_key)\n )\n if not result:\n self.config.app_log.warning('new {} peer signature is invalid'.format(peer.__class__.__name__))\n stream.close()\n return\n self.config.app_log.info('new {} peer is valid'.format(peer.__class__.__name__))\n except:\n self.config.app_log.warning('invalid peer identity signature')\n stream.close()\n return\n if id_attr in self.outbound_pending[peer.__class__.__name__]:\n del self.outbound_pending[peer.__class__.__name__][id_attr]\n self.outbound_streams[peer.__class__.__name__][id_attr] = stream\n self.config.app_log.info('Connected to {}: {}'.format(peer.__class__.__name__, peer.to_json()))\n return stream\n except StreamClosedError:\n await self.remove_peer(peer)\n self.config.app_log.warning('Streamed closed for {}: {}'.format(peer.__class__.__name__, peer.to_json()))\n except TimeoutError:\n await self.remove_peer(peer)\n self.config.app_log.warning('Timeout connecting to {}: {}'.format(peer.__class__.__name__, peer.to_json()))\n\n async def wait_for_data(self, stream):\n while True:\n try:\n body = json.loads(await stream.read_until(b\"\\n\"))\n if 'result' in body:\n if body['id'] in stream.message_queue:\n del stream.message_queue[body['id']]\n else:\n continue\n stream.last_activity = int(time.time())\n await getattr(self, body.get('method'))(body, stream)\n except StreamClosedError:\n del self.outbound_streams[stream.peer.__class__.__name__][stream.peer.rid]\n break\n\n async def remove_peer(self, peer):\n if peer.rid in self.outbound_streams[peer.__class__.__name__]:\n del self.outbound_streams[peer.__class__.__name__][peer.rid]\n if peer.rid in self.outbound_pending[peer.__class__.__name__]:\n del self.outbound_pending[peer.__class__.__name__][peer.rid]\n","sub_path":"yadacoin/tcpsocket/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361626644","text":"from item_module.models import *\n\nfrom player_module.item_system.cont_items import HumanPackItem\n\nimport battle_module.item_system.containers as Containers\n\n\n# ===================================================\n# 对战物资槽项\n# ===================================================\n@ItemManager.registerSlotContItem(\"对战物资槽项\",\n\tContainers.BattleItemSlot, pack_item=HumanPackItem)\nclass BattleItemSlotItem(SlotContItem):\n\n\tdef isUsable(self) -> bool:\n\t\t\"\"\"\n\t\t能否使用\n\t\tReturns:\n\t\t\t返回能否使用\n\t\t\"\"\"\n\t\treturn True\n\n\tdef useItem(self, **kwargs):\n\t\t\"\"\"\n\t\t使用物品\n\t\tArgs:\n\t\t\t**kwargs (**dict): 拓展参数\n\t\t\"\"\"\n\t\tsuper().useItem(ItemUseOccasion.Battle, **kwargs)\n\t\tself.dequip(index=0)\n\n\n","sub_path":"Server/ExermonServer/battle_module/item_system/cont_items.py","file_name":"cont_items.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171233332","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 24 22:17:31 2019\n\n@author: nafis\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nimport sys\n\n\n#import a folder of images and create an image array\ndef load_images_from_folder(folder):\n images = []\n file_list = []\n\n #first get only the filenames\n for filename in os.listdir(folder):\n file_list.append(filename)\n \n #sort them lexicographically, appraently this will create problem in Submitty\n file_list.sort()\n \n #for sorted file list, now import as image files\n for file in file_list:\n img = cv2.imread(os.path.join(folder,file))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n if img is not None:\n images.append(img)\n \n return images, file_list\n\n#this function will print everything according to format\ndef print_stuff(x, y, finalImage, E):\n counter=0\n print(\"\")\n print(\"Energies at ({:.0f}\".format(x)+\", {:.0f})\".format(y))\n for E_of_image in E:\n print(\"{:.0f}: \".format(counter)+\"{:.1f}\".format(E_of_image[x][y]))\n counter = counter + 1\n print(\"RGB: ({:.0f}\".format(finalImage[x,y,0])+\", {:.0f}\".format(finalImage[x, y, 1])+\", {:.0f})\".format(finalImage[x, y, 2]))\n \n\nif __name__=='__main__':\n args = sys.argv\n image_dir = args[1]\n output_file = args[2]\n sigma = float(args[3])\n p = int(args[4]) \n \n\n images, files = load_images_from_folder(image_dir)\n h = np.floor(2.5*sigma)\n ksize = int(2*h+1)\n gray_images = []\n E = []\n \n #calculate Energy for each image\n for img in (images): \n #create sobelx and sobely for image\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n gray_images.append(gray)\n sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0)\n sobely = cv2.Sobel(gray,cv2.CV_64F,0,1)\n gradient_magnitude = (sobelx**2) + (sobely**2)\n weighted_function = cv2.GaussianBlur(gradient_magnitude, (ksize,ksize), sigma)\n E.append(weighted_function)\n \n #apply energy matrix on each of the channel of a RGB image \n prod_sum_R = 0\n prod_sum_G = 0\n prod_sum_B = 0\n e_sum = 0\n M,N = images[0].shape[0], images[0].shape[1]\n finalImage = np.zeros((M,N,3))\n for i in range(len(images)):\n prod_sum_R = prod_sum_R + ((E[i]**p)*images[i][:,:,0])\n prod_sum_G = prod_sum_G + ((E[i]**p)*images[i][:,:,1])\n prod_sum_B = prod_sum_B + ((E[i]**p)*images[i][:,:,2])\n e_sum = e_sum + (E[i]**p)\n finalImage[:,:,0] = prod_sum_R/e_sum\n finalImage[:,:,1] = prod_sum_G/e_sum\n finalImage[:,:,2] = prod_sum_B/e_sum\n \n finalImage = np.round(finalImage).astype('uint8')\n \n #printing stuff\n print(\"Results:\")\n print_stuff(M//4, N//4, finalImage, E)\n print_stuff(M//4, (3*N)//4, finalImage, E)\n print_stuff((3*M)//4, N//4, finalImage, E)\n print_stuff((3*M)//4, (3*N)//4, finalImage, E)\n \n print(\"Wrote \"+output_file+\"\")\n \n cv2.imwrite(output_file, finalImage)\n \n\n \n \n \n","sub_path":"hw2/p4_sharp_focus.py","file_name":"p4_sharp_focus.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183180284","text":"# Project settings, Create new Python SDK, Virtual Environment\n# source ../venv/bin/activate\n# pip install numpy matplotlib h5py scipy Image\n# Right click lab2.py, Run\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_x_orig.shape[0]\nnum_px = train_set_x_orig.shape[2]\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\n\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1)\ntrain_set_x_flatten = train_set_x_flatten.T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))\n\nindex = 25\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")\nplt.show()\n\ntrain_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\ndef initialize_with_zeros(dim):\n w = np.zeros((dim, 1))\n b = 0\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n return w, b\n\ndef propagate(w, b, X, Y):\n m = X.shape[1]\n A = sigmoid(np.dot(w.T, X) + b) # compute activation\n cost = -np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) / m # compute cost\n dw = np.dot(X, (A - Y).T) / m # 2,3 * 3,1 => 2,1\n db = np.sum(A - Y) / m\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n grads = {\"dw\": dw, \"db\": db}\n return grads, cost\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = []\n\n for i in range(num_iterations):\n grads, cost = propagate(w, b, X, Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n w = w - learning_rate * dw\n b = b - learning_rate * db\n if i % 100 == 0:\n costs.append(cost)\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n params = {\"w\": w,\n \"b\": b}\n grads = {\"dw\": dw,\n \"db\": db}\n return params, grads, costs\n\ndef predict(w, b, X):\n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n A = sigmoid(np.dot(w.T, X) + b)\n\n for i in range(A.shape[1]):\n thresholder = lambda t: 1 if t > 0.5 else 0\n vfunc = np.vectorize(thresholder)\n Y_prediction = vfunc(A)\n\n assert(Y_prediction.shape == (1, m))\n return Y_prediction\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n w, b = initialize_with_zeros(X_train.shape[0])\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test,\n \"Y_prediction_train\" : Y_prediction_train,\n \"w\" : w,\n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n\n return d\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n\n# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")\n\n# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()\n\nlearning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations (hundreds)')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()\n\n\nfname = \"cat.jpg\"\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573124024","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\ndef update(apps, schema_editor):\n # Can't figure out how to get allauth models with apps.get_model\n SocialAccount = [m for m in apps.get_models() if m.__name__=='SocialAccount'][0]\n SocialApp = [m for m in apps.get_models() if m.__name__=='SocialApp'][0]\n SocialToken = [m for m in apps.get_models() if m.__name__=='SocialToken'][0]\n\n for acc in SocialAccount.objects.all():\n acc.extra_data_encrypted = acc.extra_data\n acc.save()\n\n for app in SocialApp.objects.all():\n app.client_id_encrypted = app.client_id\n app.key_encrypted = app.key\n app.secret_encrypted = app.secret\n app.save()\n\n for tok in SocialToken.objects.all():\n tok.token_encrypted = tok.token\n tok.token_secret_encrypted = tok.token_secret\n tok.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('socialaccount', '0005_auto_20200209_1049'),\n ]\n\n operations = [\n migrations.RunPython(update)\n ]\n","sub_path":"allauth/socialaccount/migrations/0006_migrate_encrypted_fields.py","file_name":"0006_migrate_encrypted_fields.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376168549","text":"from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed\nfrom wtforms import (\n StringField, SubmitField, BooleanField, FloatField,\n SelectField, SelectMultipleField, DateField, HiddenField\n)\nfrom wtforms.validators import (\n DataRequired, Optional, ValidationError, NumberRange\n)\nfrom wtforms_sqlalchemy.fields import QuerySelectField\nfrom flask_uploads import UploadSet, IMAGES\n\nfrom app import db, images\nfrom database.models import Region, Developer\n\n\nclass TranslatedDateTime(DateField):\n\n def gettext(self, string):\n return \"Nieprawidłowy format daty.\"\n\n\nclass TranslatedFloadField(FloatField):\n\n def gettext(self, string):\n return \"Nieprawidłowa wartość liczbowa.\"\n\n\nclass NonValidatingQuerySelectField(QuerySelectField):\n\n def pre_validate(self, form):\n pass\n\n\nclass OffersFilterForm(FlaskForm):\n region_id = QuerySelectField(\n \"Region\", query_factory=lambda: db.session.query(Region).all()\n )\n developer_id = QuerySelectField(\n \"Developer\", query_factory=lambda: db.session.query(Developer).all()\n )\n query = StringField(\"Query\")\n submit = SubmitField(\"Submit\")\n\n\n\nclass OfferRequestForm(FlaskForm):\n title = StringField(\"Tytuł\", validators=[DataRequired(\"To pole jest wymagane.\")])\n www = StringField(\"WWW\")\n imgfile = FileField(\n \"Wizualizacja\", \n validators=[\n FileAllowed(images, \"Nieprawidłowy format pliku (dopuszczalne formaty: .jpg, .jpe, .jpeg, .png, .gif, .svg, oraz .bmp).\")]\n )\n\n # Use non-validation field in order to allow users to pass new regions,\n # that are not in db (through hidden field region_). Users with \n # create/update privileges can create new regions with modal. Users \n # without those privileges can pass name of new region.\n region = NonValidatingQuerySelectField(\n \"Region\", allow_blank=True,\n query_factory=lambda: db.session.query(Region).all(),\n validators=(Optional(),)\n )\n\n address = StringField(\"Adres\")\n latitude = TranslatedFloadField(\n validators=(\n Optional(), \n NumberRange(-90, 90, \"Szerokość geograficzna musi się zawierać w przedziale od -90 do 90.\")\n )\n )\n longitude = FloatField(\n validators=(\n Optional(), \n NumberRange(-180, 180, \"Długość geograficzna musi się zawierać w przedziale od -180 do 180.\")\n )\n )\n\n # Use non-validation field in order to allow users to pass new developers,\n # that are not in db (through hidden field developer_). Users with \n # create/update privileges can create new developers with modal. Users \n # without those privileges can pass name of new developer.\n developer = NonValidatingQuerySelectField(\n \"Deweloper\", \n allow_blank=True,\n query_factory=lambda: db.session.query(Developer).all(),\n validators=(Optional(),)\n )\n\n start_date = TranslatedDateTime(\n \"Data Rozpoczęcia\", format=\"%Y-%m-%d\", validators=(Optional(),)\n )\n end_date = TranslatedDateTime(\n \"Data Zakończenia\", format=\"%Y-%m-%d\", validators=(Optional(),)\n )\n\n # Fields use for passing new regions/developers by users without\n # create/update privileges. \n region_ = HiddenField(\"_Region\")\n developer_ = HiddenField(\"_Developer\")\n\n parser = StringField(\"Parser\")\n parser_active = BooleanField(\"Parser Status\")\n\n active = BooleanField(\"Status (Aktywna)\")\n\n comment = StringField()\n submit = SubmitField(\"Wyślij\")","sub_path":"app/offers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537030847","text":"STACK_ENV_VERSIONINFO=\"20190416.1\"\n\nimport boto3\nimport botocore\nimport os, sys, datetime\nfrom os.path import dirname, abspath\nimport subprocess\nimport zipfile\nimport shutil\nfrom shutil import copyfile\nimport shlex\nimport argparse\n\nERRORCOUNT=0\n\nprint(\"STARTING IMPORT VERSION \" + STACK_ENV_VERSIONINFO)\n\n#Parse all the arguments\nparser = argparse.ArgumentParser(description='Import application database')\nparser.add_argument('APP_PLATFORM_NAME', help='AUTOMATIC App platform name (e.g., drupal)')\nparser.add_argument('APP_PLATFORM_MAJOR_VERSION', help='AUTOMATIC App platform major version number (e.g., 7, 8 or 9)')\nparser.add_argument('SRC_FOLDERNAME', nargs='?', default='?', help='REQUIRED subfolder name (environment name from the export operation, ignored if -lc flag is used)')\nparser.add_argument('SRC_FILENAME', nargs='?', default='?', help='REQUIRED filename to import (or ? to get listing of available files)')\nparser.add_argument('--localcopy', '-lc', help='Import from local file system instead of s3', action='store_true')\nparser.add_argument('--keepzip', '-kz', help='Keep the downloaded zip file instead of deleting it at the end', action='store_true')\nargs = parser.parse_args()\n\n#Assign arguments to our internal variables\nAPP_PLATFORM_NAME=args.APP_PLATFORM_NAME\nAPP_PLATFORM_MAJOR_VERSION=args.APP_PLATFORM_MAJOR_VERSION\nsrc_env_name = args.SRC_FOLDERNAME\nfile_name = args.SRC_FILENAME\nLOCAL_FILES=args.localcopy\nKEEP_ZIP=args.keepzip\n\nprint(\"... LOCAL_FILES = \" + str(LOCAL_FILES))\nprint(\"... KEEP_ZIP = \" + str(KEEP_ZIP))\n\nif LOCAL_FILES and KEEP_ZIP:\n raise Exception(\"The --keepzip flag is only relevant for s3 downloads!\")\n\nAPP_PLATFORM_ID = APP_PLATFORM_NAME+APP_PLATFORM_MAJOR_VERSION\n\nprint(\"## Importing database for \" + APP_PLATFORM_ID)\n\nif not LOCAL_FILES:\n print(\"## From environment \" + src_env_name)\n\nprint(\"## From file name \" + file_name)\n\nfrom hostlib import environment\n\nmyenv = environment.Environment()\n\n#Set some important values now\nDOCKER_WEBSERVER = myenv.env_profile_vars['WEB_CONTAINERNAME'].strip()\n\nBUCKET_NAME = myenv.env_profile_vars['SHARED_S3_STAGING_BUCKET_NAME']\nBUCKET_PATH = myenv.env_profile_vars['S3_DBDUMPS_FILEDIR'] + \"/\" + APP_PLATFORM_ID\n\nENVNAME = myenv.env_profile_vars['ENVIRONMENT_NAME']\nPROJECT = myenv.env_profile_vars['PROJECT_NAME']\nBUCKET_ENV_PATH = BUCKET_PATH + '/' + ENVNAME\n\nDRUSH_PATH_TO_WEBROOT = myenv.env_profile_vars['DOCROOT_PATH']\n\nREPO_FOLDER_NAME = myenv.env_profile_vars['REPO_FOLDER_NAME']\nWEB_DOCROOT_PATH = myenv.env_profile_vars['WEB_DOCROOT_PATH']\nSTACK_FOLDER_PATH = myenv.env_profile_vars['FOLDER_PATH']\n\nWEB_HOST_SHARED_TMP_PATH=myenv.env_profile_vars['WEB_HOST_SHARED_TMP_PATH']\nWEB_INTERNAL_SHARED_TMP_PATH=myenv.env_profile_vars['WEB_INTERNAL_SHARED_TMP_PATH']\n\nprint(\"## WEB_HOST_SHARED_TMP_PATH = \" + WEB_HOST_SHARED_TMP_PATH)\nprint(\"## WEB_INTERNAL_SHARED_TMP_PATH = \" + WEB_INTERNAL_SHARED_TMP_PATH)\n\n#Check the platform name and version parameters\nif APP_PLATFORM_NAME=='drupal':\n if APP_PLATFORM_MAJOR_VERSION == '7':\n DRUSH_DROP_CMD='sql-drop'\n elif APP_PLATFORM_MAJOR_VERSION == '8':\n DRUSH_DROP_CMD='sql:drop'\n elif APP_PLATFORM_MAJOR_VERSION == '9':\n DRUSH_DROP_CMD='sql:drop'\n else:\n raise Exception(\"INVALID APP_PLATFORM_MAJOR_VERSION=\"+APP_PLATFORM_MAJOR_VERSION)\nelse:\n raise Exception(\"INVALID APP_PLATFORM_NAME=\"+APP_PLATFORM_NAME)\n\nSHARED_TMP_DBBACKUPS = WEB_HOST_SHARED_TMP_PATH + '/db-backups'\nprint(\"## SHARED_TMP_DBBACKUPS=\"+SHARED_TMP_DBBACKUPS)\n\nSCHEMA_FILE_PATH=WEB_INTERNAL_SHARED_TMP_PATH + '/db-backups/db-schema.sql'\nDATA_FILE_PATH=WEB_INTERNAL_SHARED_TMP_PATH+'/db-backups/db-data.sql'\n\n#Find the real running container name\nrealContainerName = myenv.getRunningDockerContainerName(DOCKER_WEBSERVER)\nif not realContainerName:\n print(\"FAILED to find a running container matching name \" + DOCKER_WEBSERVER)\n exit(1)\nDOCKER_WEBSERVER=realContainerName\nprint(\"## DOCKER_WEBSERVER = \" + DOCKER_WEBSERVER)\n\nIS_PRODUCTION = myenv.env_profile_vars['IS_PRODUCTION']\nif ( IS_PRODUCTION != 'YES' and IS_PRODUCTION != 'NO' ):\n print('ENVIRONMENT ERROR: Missing or invalid value for IS_PRODUCTION!')\n print(' IS_PRODUCTION=\"' + IS_PRODUCTION + '\"')\n print(' TIP: Fix this by editing your environment.txt file.')\n exit(2)\n\nDB_BACKUPS_DIRECTORY = STACK_FOLDER_PATH + '/host-utils/db-backups'\nDB_BACKUPS_DIRECTORY_LOCAL = STACK_FOLDER_PATH + '/host-utils/db-backups-local'\n\nif not os.path.isdir(SHARED_TMP_DBBACKUPS):\n os.makedirs(SHARED_TMP_DBBACKUPS)\n print(\"Created SHARED_TMP_DBBACKUP directory %s was created.\" %SHARED_TMP_DBBACKUPS)\n\nBUCKET_ENV_PATH = BUCKET_PATH + '/' + src_env_name\ns3_key = BUCKET_ENV_PATH + '/' + file_name\n\ndef downloadS3File(fileS3Path):\n '''\n Download the file from S3 and return True or fail and return False\n '''\n print(\"Will download \" + fileS3Path)\n try:\n\n s3client = boto3.client('s3')\n s3client.download_file(BUCKET_NAME, fileS3Path, DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped/' + file_name)\n print(\"Downloaded \" + fileS3Path)\n return True\n\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"Did not find file\",fileS3Path,\"in\",BUCKET_NAME)\n return False\n else:\n raise\n\ndef showFolders(prefixFilter):\n '''\n Show all the S3 folders in the path\n '''\n print(\"## START Available Folders in\", prefixFilter)\n prefixLen1=len(prefixFilter)+1\n s3client = boto3.resource('s3')\n THEBUCKET=s3client.Bucket(BUCKET_NAME)\n THEBUCKET.objects.filter(Prefix=prefixFilter)\n thingsfound={}\n for object in THEBUCKET.objects.filter(Prefix=prefixFilter):\n foundthing=object.key[prefixLen1:]\n slashpos=foundthing.find('/')\n if slashpos > -1:\n nicename=object.key[prefixLen1:prefixLen1 + slashpos]\n thingsfound[nicename]=nicename\n\n thenum=0\n for nicename in thingsfound:\n thenum+=1\n print(thenum,\")\\t\",nicename)\n\n print(\"## END Available Folders in\", prefixFilter)\n\ndef showS3Files(prefixFilter):\n '''\n Show all the S3 files in the path\n '''\n print(\"## START Available Files in\", prefixFilter)\n prefixLen1=len(prefixFilter)+1\n s3client = boto3.resource('s3')\n THEBUCKET=s3client.Bucket(BUCKET_NAME)\n THEBUCKET.objects.filter(Prefix=prefixFilter)\n thenum=0\n for object in THEBUCKET.objects.filter(Prefix=BUCKET_ENV_PATH):\n thenum+=1\n print(thenum,\")\\t\",object.key[prefixLen1:])\n print(\"## END Available Files in\", prefixFilter)\n\ndef showLocalFiles(dirpath, suffixFilter):\n '''\n Show all the files in the path\n '''\n print(\"## START Available Files in\", dirpath)\n if not os.path.isdir(dirpath):\n print(\"ERROR directory is MISSING!\") \n else: \n files = os.listdir(dirpath)\n thenum=0\n for file in files:\n if suffixFilter == None or file.endswith(suffixFilter):\n thenum+=1\n print(thenum,\")\\t\",file)\n\n print(\"## END Available Files in\", dirpath)\n\nif ( IS_PRODUCTION == 'YES' ):\n\n print(\"WARNING: You are going to replace database in PRODUCTION!\")\n\n yyyymmdd = datetime.datetime.today().strftime('%Y-%m-%d')\n magicText = \"OVERWRITE_PRODUCTION_NOW_\" + yyyymmdd\n\n print('Type \"' + magicText + '\" to proceed')\n userinput = input()\n\n print( 'debug userinput=' + userinput )\n\n if ( userinput != magicText):\n #Too bad!\n print(\"Wrong input --- aborting now!!!\")\n exit(2)\n\n print()\n print(\"OK --- we will now OVERWRITE existing PRODUCTION database!\")\n print()\n\n#Just show listing?\nif LOCAL_FILES:\n #From local filesystem\n if file_name == '?':\n print(\"Missing the FILENAME argument!\")\n showLocalFiles(DB_BACKUPS_DIRECTORY_LOCAL, '.zip')\n print(\"NOTE: Use the --help argument to see full usage information\")\n exit(2)\n\n if os.path.isfile(DB_BACKUPS_DIRECTORY_LOCAL + '/' + file_name):\n print(\"Will import from \" + DB_BACKUPS_DIRECTORY_LOCAL + '/' + file_name)\n else:\n print(\"ERROR no file found with name\", file_name)\n showLocalFiles(DB_BACKUPS_DIRECTORY_LOCAL, '.zip')\n print(\"NOTE: Use the --help argument to see full usage information\")\n exit(2)\n\nelse:\n #From s3\n if src_env_name == '?':\n print(\"Missing the FOLDER NAME (source environment) argument!\")\n showFolders(BUCKET_PATH)\n print(\"NOTE: Use the --help argument to see full usage information\")\n exit(2)\n if file_name == '?':\n print(\"Missing the FILENAME argument!\")\n showS3Files(BUCKET_ENV_PATH)\n print(\"NOTE: Use the --help argument to see full usage information\")\n exit(2)\n\n#Create local folder if missing\nif not os.path.exists(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped'):\n os.makedirs(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n print(\"Created \", DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n\n# GET THE BACKUP FILE\ntry:\n if LOCAL_FILES:\n zip_ref = zipfile.ZipFile(DB_BACKUPS_DIRECTORY_LOCAL + '/' + file_name, 'r')\n zip_ref.extractall(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n print(\"Extracted files to \" + DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n zip_ref.close()\n else:\n # DOWNLOAD ZIPPED DB-BACKUP FROM S3\n if not downloadS3File(s3_key):\n showS3Files(BUCKET_ENV_PATH)\n exit(3)\n \n # UNZIP THE BACKUP FILE\n zip_ref = zipfile.ZipFile(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped/' + file_name, 'r')\n zip_ref.extractall(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n print(\"Extracted files to \" + DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n zip_ref.close()\n print(\"Downloaded zip file extracted\")\n\n DOWNLOADEDZIP=DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped/' + file_name\n if KEEP_ZIP:\n print(\"Keeping \" + file_name) \n copyfile(DOWNLOADEDZIP, DB_BACKUPS_DIRECTORY_LOCAL + '/' + file_name)\n else:\n os.remove(DOWNLOADEDZIP)\n print(\"Deleted \" + DOWNLOADEDZIP)\n\nexcept:\n print(\"ERROR FAILED GETTING \" + file_name)\n print(\"DETAILS:\", sys.exc_info()[0])\n exit(2)\n\ntry:\n # DROP THE CURRENT DATABASE\n print('Dropping all the tables in the current database.')\n resultdropcode = subprocess.call(\n ['docker', 'exec', '--user=web.mgmt', DOCKER_WEBSERVER, 'drush', DRUSH_DROP_CMD, '-y', '--root=' + DRUSH_PATH_TO_WEBROOT])\n if (resultdropcode != 0):\n # TODO WRITE TO LOG\n print(\"DETECTED ERROR DROPPING DATABASE TABLES!\")\n else:\n print('Current database has been flushed.')\n\nexcept:\n e = sys.exc_info()[0]\n print(\"ERROR FAILED DROPPING CURRENT DATABASE \" + str(e))\n print(\"DETAILS:\", sys.exc_info()[0])\n exit(2)\n\n\n# MOVE db-schema.sql FILE INTO THE SHARED SPACE\ntry:\n\n print(\"Will now copy SCHEMA file to \", SHARED_TMP_DBBACKUPS + '/db-schema.sql')\n copyfile(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped/db-schema.sql', SHARED_TMP_DBBACKUPS + '/db-schema.sql')\n print(\"Finished copy SCHEMA file to \", SHARED_TMP_DBBACKUPS + '/db-schema.sql')\n\nexcept ex:\n print(\"ERROR FAILED COPY! \", ex)\n print(\"DETAILS:\", sys.exc_info()[0])\n exit(2)\n\n# Import the schema file\nprint(\"Will now import SCHEMA file from \" + SCHEMA_FILE_PATH)\nRUNTHIS=shlex.split('docker exec --user=web.mgmt ' + DOCKER_WEBSERVER + ' sh -c \"drush --root=' + DRUSH_PATH_TO_WEBROOT + ' sql-cli < ' + SCHEMA_FILE_PATH + '\"')\nresultschemacode = subprocess.call(RUNTHIS)\nif (resultschemacode != 0):\n # TODO WRITE TO LOG\n print(\"DETECTED ERROR IMPORTING DATABASE SCHEMA!\")\n ERRORCOUNT+=1\nelse:\n print('Database schema imported')\n\n# MOVE db-data.sql FILE INTO THE SHARED SPACE\nprint(\"Will now copy DATA file to \", SHARED_TMP_DBBACKUPS + '/db-data.sql')\ncopyfile(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped/db-data.sql', SHARED_TMP_DBBACKUPS + '/db-data.sql')\nprint(\"Finished copy DATA file to \", SHARED_TMP_DBBACKUPS + '/db-data.sql')\n\n# Import the data file\nprint(\"Will now import DATA file from \" + DATA_FILE_PATH)\nRUNTHIS=shlex.split('docker exec --user=web.mgmt ' + DOCKER_WEBSERVER + ' sh -c \"drush --root=' + DRUSH_PATH_TO_WEBROOT + ' sql-cli < ' + DATA_FILE_PATH + '\"')\nresultDataCode = subprocess.call(RUNTHIS)\n\nif (resultDataCode != 0):\n # TODO WRITE TO LOG\n print(\"DETECTED ERROR IMPORTING DATABASE DATA!\")\n ERRORCOUNT+=1\nelse:\n print('Database data imported')\n\nif ERRORCOUNT == 0:\n # Only remove these files if we did not have errors -- so easier to debug\n os.remove(SHARED_TMP_DBBACKUPS + '/db-schema.sql')\n os.remove(SHARED_TMP_DBBACKUPS + '/db-data.sql')\n print(\"Removed SQL files from \" + SHARED_TMP_DBBACKUPS)\n shutil.rmtree(DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n print(\"Removed unzipped files from \" + DB_BACKUPS_DIRECTORY_LOCAL + '/unzipped')\n\nif(ERRORCOUNT > 0):\n print('## Import of ' + APP_PLATFORM_ID + ' data finished with ' + str(ERRORCOUNT) + ' errors')\n exit(2)\n\nprint('## Import of ' + APP_PLATFORM_ID + ' data finished with ' + str(ERRORCOUNT) + ' errors')\n","sub_path":"devutils/zcmd_python/stack/db-import.py","file_name":"db-import.py","file_ext":"py","file_size_in_byte":13127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334159138","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom django.utils import timezone\r\nfrom django.db.models.signals import post_save\r\nfrom PIL import Image\r\n\r\n\r\n# # Create your models here.\r\n\r\nCOLLOR_AFTER_WHATCH =(\r\n ('R','red'),\r\n ('B','grey')\r\n)\r\n\r\nclass Comments(models.Model):\r\n user = models.ForeignKey(User, on_delete=models.CASCADE)\r\n comment = models.CharField(max_length=300)\r\n po = models.ForeignKey('Post', on_delete=models.CASCADE, related_name='comments', null = True , blank = True)\r\n date = models.DateTimeField(default=timezone.now)\r\n\r\n def __str__(self):\r\n return f\"{self.user} comments on {self.po.author}\\'s image\"\r\n class Meta:\r\n ordering = ('-date',)\r\n\r\nclass Like(models.Model):\r\n user = models.ForeignKey(User, on_delete=models.CASCADE)\r\n po = models.ForeignKey('Post', on_delete=models.CASCADE, related_name='likes_related_name', null = True)\r\n date = models.DateTimeField(default=timezone.now)\r\n def __str__(self):\r\n return f\"{self.user} Likes {self.po}\"\r\n class Meta:\r\n ordering = ('-date',)\r\n\r\n\r\nclass Post(models.Model):\r\n author = models.ForeignKey(User, on_delete=models.CASCADE)\r\n image = models.ImageField(upload_to='posts_imgs' )\r\n discription = models.TextField(max_length=1000)\r\n post_likes_number = models.IntegerField(default=0)\r\n date = models.DateTimeField(default=timezone.now)\r\n\r\n def __str__(self):\r\n return f\"{self.author.username} post\"\r\n\r\n class Meta:\r\n ordering = ('-date',)\r\n\r\ndef add24hs():\r\n return timezone.now() + timezone.timedelta(days=1)\r\n \r\nclass Story(models.Model):\r\n author = models.ForeignKey(User, on_delete=models.CASCADE)\r\n image = models.ImageField(upload_to='posts_imgs', null=True,blank=True )\r\n discription = models.TextField(max_length=1000, null=True,blank=True)\r\n views = models.IntegerField(default=0)\r\n collor = models.CharField(max_length=1,choices=COLLOR_AFTER_WHATCH,default='R')\r\n posting_date = models.DateTimeField(default=timezone.now)\r\n end_date = models.DateTimeField(default=add24hs)\r\n active = models.BooleanField(default=True)\r\n\r\n def __str__(self):\r\n return f\"{self.author.username} story\"\r\n\r\n\r\n class Meta:\r\n ordering = ('-posting_date',)\r\n \r\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424808548","text":"from typing import Callable, List\nfrom functools import wraps\nimport hashlib\n\n\nLOCAL = dict()\n\ndef _hashargs(fname, argnames):\n h = f'{fname};' + ','.join(argnames)\n digest = hashlib.sha256(h.encode()).hexdigest()\n\n return digest\n\ndef partial_memoize(hash_names: List[str], store: str = 'local'):\n def func_decorator(f: Callable):\n @wraps(f)\n def wrapper(*args, **kwargs):\n argnames = [str(a) for a in args if a in hash_names]\n argnames.extend([str(v) for k, v in kwargs.items() if k in hash_names])\n \n # get the parameter hash\n h = _hashargs(f.__name__, argnames)\n \n # check if result exists\n if h in LOCAL:\n return LOCAL.get(h)\n else:\n # process\n result = f(*args, **kwargs)\n LOCAL[h] = result\n return result\n return wrapper\n return func_decorator\n","sub_path":"ruins/core/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106042657","text":"import uuid\nfrom django.db import models\nfrom django.contrib import admin\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom core.models import (\n Store,\n User\n)\n\n\nclass MessagingSettings(models.Model):\n \"\"\"\n\n \"\"\"\n CHAT_DISPLAY_AS_CHOICES = (\n (\"STORE\", \"Name of store\"),\n (\"USER\", \"User's full name\"),\n (\"CUSTOM\", \"Custom value\")\n )\n\n CHAT_STATUS_CHOICES = (\n (\"ONLINE\", \"Online\"),\n (\"AWAY\", \"Away\"),\n (\"HIDDEN\", \"Hidden\")\n )\n\n CHAT_AVAILABLE_TO_CHOICES = (\n (\"NOBODY\", \"Nobody\"),\n (\"CUSTOMER\", \"Customers\"),\n (\"STORE\", \"Everybody\")\n )\n\n uuid = models.UUIDField(default=uuid.uuid4, editable=False)\n store = models.OneToOneField(Store, on_delete=models.CASCADE)\n\n chat_display_as = models.CharField(\n max_length=8, choices=CHAT_DISPLAY_AS_CHOICES, default=\"STORE\")\n chat_display_name = models.CharField(max_length=64, null=True, blank=True)\n default_status = models.CharField(\n max_length=8, choices=CHAT_STATUS_CHOICES, default=\"ONLINE\")\n chat_available_to = models.CharField(\n max_length=8, choices=CHAT_AVAILABLE_TO_CHOICES, default=\"CUSTOMER\")\n play_sounds = models.BooleanField(default=True)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n\n@receiver(post_save, sender=Store)\ndef store_post_save(sender, instance, **kwargs):\n if kwargs[\"created\"]:\n MessagingSettings.objects.create(store=instance)\n\n\nclass Chat(models.Model):\n \"\"\"\n\n \"\"\"\n uuid = models.UUIDField(default=uuid.uuid4, editable=False)\n store = models.ForeignKey(Store, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n @property\n def messages(self):\n return Message.objects.filter(chat=self).order_by(\"created_at\")\n\n class Meta:\n unique_together = [\"store\", \"user\"]\n\n\n\nclass Message(models.Model):\n SENDER_CHOICES = (\n (\"STORE\", \"Store\"),\n (\"USER\", \"User\")\n )\n\n uuid = models.UUIDField(default=uuid.uuid4, editable=False)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n sender = models.CharField(max_length=8, choices=SENDER_CHOICES, default=\"USER\")\n message = models.TextField()\n seen = models.BooleanField(default=False)\n acknowledged = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = [\"-created_at\"]\n\n\n\nadmin.site.register(MessagingSettings)\nadmin.site.register(Chat)\nadmin.site.register(Message)\n","sub_path":"messaging/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448344253","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom posenet.params import SCALE_FACTOR\r\nfrom posenet.pose_constants import LOCAL_MAXIMUM_RADIUS, NUM_KEYPOINTS\r\nfrom posenet.decode import decode_pose\r\n\r\ndef within_nms_radius_fast(pose_coords, squared_nms_radius, point):\r\n if not pose_coords.shape[0]:\r\n return False\r\n return np.any(np.sum((pose_coords - point) ** 2, axis=1) <= squared_nms_radius)\r\n\r\ndef get_instance_score_fast(exist_pose_coords, squared_nms_radius, keypoint_scores, keypoint_coords):\r\n if exist_pose_coords.shape[0]:\r\n s = np.sum((exist_pose_coords - keypoint_coords) ** 2, axis=2) > squared_nms_radius\r\n not_overlapped_scores = np.sum(keypoint_scores[np.all(s, axis=0)])\r\n else:\r\n not_overlapped_scores = np.sum(keypoint_scores)\r\n return not_overlapped_scores / len(keypoint_scores)\r\n\r\ndef build_part_with_score_torch(score_threshold, local_max_radius, heatmaps_result):\r\n lmd = 2 * local_max_radius + 1\r\n max_vals = F.max_pool2d(heatmaps_result, lmd, stride=1, padding=1)\r\n max_loc = (heatmaps_result == max_vals) & (heatmaps_result >= score_threshold)\r\n max_loc_idx = max_loc.nonzero()\r\n scores_vec = heatmaps_result[max_loc]\r\n sort_idx = torch.argsort(scores_vec, descending=True)\r\n return scores_vec[sort_idx], max_loc_idx[sort_idx]\r\n\r\ndef decode_multiple_poses(heatmaps_result, offsets, displacements_fwd, displacements_bwd, output_stride,\r\n max_pose_detections, score_threshold, nms_radius, min_pose_score):\r\n\r\n part_scores, part_idx = build_part_with_score_torch(score_threshold, LOCAL_MAXIMUM_RADIUS, heatmaps_result)\r\n\r\n part_scores = part_scores.cpu().numpy()\r\n part_idx = part_idx.cpu().numpy()\r\n\r\n heatmaps_result = heatmaps_result.cpu().numpy()\r\n height = heatmaps_result.shape[1] #68\r\n width = heatmaps_result.shape[2] #121\r\n\r\n # change dimensions from (x, h, w) to (x//2, h, w, 2) to allow return of complete coord array\r\n offsets = offsets.cpu().numpy().reshape(2, -1, height, width).transpose((1, 2, 3, 0))\r\n displacements_fwd = displacements_fwd.cpu().numpy().reshape(2, -1, height, width).transpose((1, 2, 3, 0))\r\n displacements_bwd = displacements_bwd.cpu().numpy().reshape(2, -1, height, width).transpose((1, 2, 3, 0))\r\n\r\n squared_nms_radius = nms_radius ** 2\r\n pose_count = 0\r\n boxs = []\r\n pose_scores = np.zeros(max_pose_detections)\r\n pose_keypoint_scores = np.zeros((max_pose_detections, NUM_KEYPOINTS))\r\n pose_keypoint_coords = np.zeros((max_pose_detections, NUM_KEYPOINTS, 2))\r\n\r\n for root_score, (root_id, root_coord_y, root_coord_x) in zip(part_scores, part_idx):\r\n root_coord = np.array([root_coord_y, root_coord_x])\r\n root_image_coords = root_coord * output_stride + offsets[root_id, root_coord_y, root_coord_x]\r\n\r\n if within_nms_radius_fast(\r\n pose_keypoint_coords[:pose_count, root_id, :], squared_nms_radius, root_image_coords):\r\n continue\r\n\r\n keypoint_scores, keypoint_coords = decode_pose(root_score,\r\n root_id, root_image_coords,\r\n heatmaps_result, offsets, output_stride,\r\n displacements_fwd, displacements_bwd)\r\n\r\n pose_score = get_instance_score_fast(\r\n pose_keypoint_coords[:pose_count, :, :], squared_nms_radius, keypoint_scores, keypoint_coords)\r\n\r\n if min_pose_score == 0. or pose_score >= min_pose_score:\r\n pose_scores[pose_count] = pose_score\r\n pose_keypoint_scores[pose_count, :] = keypoint_scores\r\n pose_keypoint_coords[pose_count, :, :] = keypoint_coords\r\n pose_count += 1\r\n boxs.append(getBoundingBoxPoints(keypoint_coords))\r\n\r\n if pose_count >= max_pose_detections:\r\n break\r\n\r\n return pose_scores, pose_keypoint_scores, pose_keypoint_coords, boxs\r\n\r\ndef getBoundingBoxPoints(keypoint_coords):\r\n keypoint_coords = keypoint_coords/SCALE_FACTOR\r\n keypoint_coords = keypoint_coords.astype(np.int32)\r\n maxY = keypoint_coords[:,0].max()\r\n minY = keypoint_coords[:,0].min()\r\n maxX = keypoint_coords[:,1].max()\r\n minX = keypoint_coords[:,1].min()\r\n return [(minX, minY), (minX, maxY), (maxX, minY), (maxX, maxY)]\r\n\r\ndef draw_skel_and_kp(draw_image, pose_scores, keypoint_scores, keypoint_coords, min_pose_score, min_part_score):\r\n\r\n out_img = draw_image\r\n adjacent_keypoints = []\r\n cv_keypoints = []\r\n for ii, score in enumerate(pose_scores):\r\n if score < min_pose_score:\r\n continue\r\n\r\n new_keypoints = get_adjacent_keypoints(\r\n keypoint_scores[ii, :], keypoint_coords[ii, :, :]/SCALE_FACTOR, min_part_score)\r\n adjacent_keypoints.extend(new_keypoints)\r\n\r\n for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):\r\n if ks < min_part_score:\r\n continue\r\n cv_keypoints.append(cv2.KeyPoint(kc[1]/SCALE_FACTOR, kc[0]/SCALE_FACTOR, 10. * ks))\r\n\r\n if cv_keypoints:\r\n out_img = cv2.drawKeypoints(\r\n out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),\r\n flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))\r\n return out_img, cv_keypoints\r\n\r\ndef draw_keypoint(draw_image, pose_scores, keypoint_scores, keypoint_coords, min_pose_score, min_part_score):\r\n out_img = draw_image\r\n cv_keypoints = []\r\n\r\n keypoint_coords[:, :, :] = keypoint_coords[:, :, :]/SCALE_FACTOR\r\n\r\n for ii, score in enumerate(pose_scores):\r\n if score < min_pose_score:\r\n continue\r\n\r\n for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):\r\n if ks < min_part_score:\r\n continue\r\n cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10.))\r\n\r\n if cv_keypoints:\r\n out_img = cv2.drawKeypoints(\r\n out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),\r\n flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n return out_img, cv_keypoints\r\n\r\ndef get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):\r\n results = []\r\n for left, right in CONNECTED_PART_INDICES:\r\n if keypoint_scores[left] < min_confidence or keypoint_scores[right] < min_confidence:\r\n continue\r\n results.append(\r\n np.array([keypoint_coords[left][::-1], keypoint_coords[right][::-1]]).astype(np.int32),\r\n )\r\n return results\r\n\r\ndef lines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)):\r\n return out_img\r\n","sub_path":"production/opticalflow_package/posenet/decode_multi.py","file_name":"decode_multi.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607933244","text":"l=[]\ndef soinsu(n):\n for i in range(2,n+1):\n if n%i==0:\n l.append(i)\n if n/i!=1:\n soinsu(n//i)\n break\nsoinsu(600851475143)\nprint(sorted(list(set(l)))[-1])\n","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330200378","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('', home_page),\n path('/', post_with_comments),\n path('new-post/', post_creation_page),\n path('login-goto-news/', LoginView.as_view()),\n path('login/', RegisterView.as_view()),\n path('comments/', comments_page)\n]","sub_path":"src/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430272719","text":"import paho.mqtt.client as mqtt\n\nfrom ltr559 import LTR559\nltr559 = LTR559()\n\nfrom bme280 import BME280\nfrom pms5003 import PMS5003\nfrom enviroplus import gas\n\nimport collections, traceback\n\n\nclass EnvLogger:\n def __init__(self, client_id, host, port, username, password, prefix, use_pms5003, num_samples):\n self.bme280 = BME280()\n self.pms5003 = use_pms5003 and PMS5003() or None\n\n self.prefix = prefix\n\n self.connection_error = None\n self.client = mqtt.Client(client_id=client_id)\n self.client.on_connect = self.__on_connect\n self.client.username_pw_set(username, password)\n self.client.connect(host, port)\n\n self.samples = collections.deque(maxlen=num_samples)\n \n\n def __on_connect(self, client, userdata, flags, rc):\n errors = {\n 1: \"incorrect MQTT protocol version\",\n 2: \"invalid MQTT client identifier\",\n 3: \"server unavailable\",\n 4: \"bad username or password\",\n 5: \"connection refused\"\n }\n\n if rc > 0:\n self.connection_error = errors.get(rc, \"unknown error\")\n\n\n def __take_pm_readings(self):\n if self.pms5003 is None:\n return {}\n \n try:\n pm_data = self.pms5003.read()\n return {\n \"particulate/1.0\": pm_data.pm_ug_per_m3(1.0),\n \"particulate/2.5\": pm_data.pm_ug_per_m3(2.5),\n \"particulate/10.0\": pm_data.pm_ug_per_m3(10),\n }\n except:\n print(\"Failed to read from PMS5003. Resetting sensor.\")\n traceback.print_exc()\n self.pms5003.reset()\n return {}\n\n\n def take_readings(self):\n gas_data = gas.read_all()\n readings = {\n \"proximity\": ltr559.get_proximity(),\n \"lux\": ltr559.get_lux(),\n \"temperature\": self.bme280.get_temperature(),\n \"pressure\": self.bme280.get_pressure(),\n \"humidity\": self.bme280.get_humidity(),\n \"gas/oxidising\": gas_data.oxidising,\n \"gas/reducing\": gas_data.reducing,\n \"gas/nh3\": gas_data.nh3,\n }\n\n readings.update(self.__take_pm_readings())\n \n return readings\n\n\n def publish(self, topic, value):\n topic = self.prefix.strip(\"/\") + \"/\" + topic\n self.client.publish(topic, str(value))\n\n\n def update(self, publish_readings=True):\n self.samples.append(self.take_readings())\n\n if publish_readings:\n for topic in self.samples[0].keys():\n value_sum = sum([d[topic] for d in self.samples])\n value_avg = value_sum / len(self.samples)\n self.publish(topic, value_avg)\n\n self.client.loop()\n","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586282751","text":"\"\"\"example-02.py reimplements FileOpen from example-01.py as a\nfunction-based context manager\"\"\"\nimport contextlib\n\n\n@contextlib.contextmanager\ndef file_open(filename, mode):\n print(\"Opening file\")\n f = open(filename, mode)\n print(\"Starting with... block\")\n yield f # yield keyword will be discussed more closely in 'Generators' module\n print(\"Closing file\")\n f.close()\n\n\nif __name__ == \"__main__\":\n path = \"05-context-managers/example-01.py\"\n with file_open(path, \"r\") as f:\n print(f\"{path} has {len(f.readlines())} lines\")\n\n print(\"Finished\")\n","sub_path":"python-advanced-features-master/05-context-managers/example-02.py","file_name":"example-02.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340207969","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Wikipedia channel for IFTTT\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Copyright 2015 Ori Livneh \n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\nimport flask\nimport lxml.html\n\nfrom .utils import select, snake_case\nfrom .views import FeaturedFeedTriggerView\n\n\napp = flask.Flask(__name__)\napp.config.from_pyfile('../ifttt.cfg', silent=True)\n\n\n@app.errorhandler(401)\ndef unauthorized(e):\n \"\"\"Issue an HTTP 401 Unauthorized response with a JSON body.\"\"\"\n error = {'message': 'Unauthorized'}\n return flask.jsonify(errors=[error]), 401\n\n\n@app.after_request\ndef force_content_type(response):\n \"\"\"RFC 4627 stipulates that 'application/json' takes no charset parameter,\n but IFTTT expects one anyway. We have to twist Flask's arm to get it to\n break the spec.\"\"\"\n response.headers['Content-Type'] = 'application/json; charset=utf-8'\n return response\n\n\n@app.before_request\ndef validate_channel_key():\n \"\"\"Verify that the 'IFTTT-Channel-Key' header is present on each request\n and that its value matches the channel key we got from IFTTT. If a request\n fails this check, we reject it with HTTP 401.\"\"\"\n channel_key = flask.request.headers.get('IFTTT-Channel-Key')\n if not app.debug and channel_key != app.config.get('CHANNEL_KEY'):\n flask.abort(401)\n\n\n@app.route('/v1/test/setup', methods=['POST'])\ndef test_setup():\n \"\"\"Required by the IFTTT endpoint test suite.\"\"\"\n return flask.jsonify(data={'samples': {}})\n\n\n@app.route('/v1/status')\ndef status():\n \"\"\"Return HTTP 200 and an empty body, as required by the IFTTT spec.\"\"\"\n return ''\n\n\nclass PictureOfTheDay(FeaturedFeedTriggerView):\n \"\"\"Trigger view for Wikimedia Commons Picture of the Day\"\"\"\n\n feed = 'potd'\n wiki = 'commons.wikimedia.org'\n\n def parse_entry(self, entry):\n \"\"\"Scrape each PotD entry for its description and URL.\"\"\"\n item = super(PictureOfTheDay, self).parse_entry(entry)\n summary = lxml.html.fromstring(entry.summary)\n image_node = select(summary, 'a.image')\n desc_node = select(summary, '.description.en')\n item['picture_url'] = image_node.get('href')\n item['description'] = desc_node.text_content().strip()\n return item\n\n\nclass ArticleOfTheDay(FeaturedFeedTriggerView):\n \"\"\"Trigger view for English Wikipedia's Today's Featured Article.\"\"\"\n\n feed = 'featured'\n wiki = 'en.wikipedia.org'\n\n def parse_entry(self, entry):\n \"\"\"Scrape each AotD entry for its URL and title.\"\"\"\n item = super(ArticleOfTheDay, self).parse_entry(entry)\n summary = lxml.html.fromstring(entry.summary)\n read_more = select(summary, 'p:first-of-type > a:last-of-type')\n item['url'] = read_more.get('href')\n item['title'] = read_more.get('title')\n return item\n\n\nclass WordOfTheDay(FeaturedFeedTriggerView):\n \"\"\"Trigger view for English Wiktionary's Word of the Day.\"\"\"\n\n feed = 'wotd'\n wiki = 'en.wiktionary.org'\n\n def parse_entry(self, entry):\n \"\"\"Scrape each WotD entry for the word, article URL, part of speech,\n and definition.\"\"\"\n item = super(WordOfTheDay, self).parse_entry(entry)\n summary = lxml.html.fromstring(entry.summary)\n div = summary.get_element_by_id('WOTD-rss-description')\n anchor = summary.get_element_by_id('WOTD-rss-title').getparent()\n item['word'] = anchor.get('title')\n item['url'] = anchor.get('href')\n item['part_of_speech'] = anchor.getparent().getnext().text_content()\n item['definition'] = div.text_content().strip()\n return item\n\n\nfor view_class in (ArticleOfTheDay, PictureOfTheDay, WordOfTheDay):\n slug = snake_case(view_class.__name__)\n app.add_url_rule('/v1/triggers/%s' % slug,\n view_func=view_class.as_view(slug))\n","sub_path":"ifttt/ifttt.py","file_name":"ifttt.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230229821","text":"#Scikit-learn incorporation\n\nimport pickle\nimport nltk\nimport random\nfrom nltk.corpus import movie_reviews\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\n\n# a list of tuples --- features\ndocuments = [(list(movie_reviews.words(fileid)),category)\n for category in movie_reviews.categories()\n for fileid in movie_reviews.fileids(category)]\n\nrandom.shuffle(documents)\n\n# print(documents[1])\n\nall_words = []\nfor w in movie_reviews.words():\n all_words.append(w.lower())\n\nall_words = nltk.FreqDist(all_words) # the words here may be useless\n\nword_features = list(all_words.keys())[:3000] # we need more words that we commonly use\n\ndef find_features(document):\n words = set(document) # delete duplicates\n features = {} # dictionary\n for w in word_features:\n features[w] = (w in words)\n return features\n\n# we use a negative words repository to check the top 3000 words\n# false: not negative, ture: is negative\n# print((find_features(movie_reviews.words('neg/cv000_29416.txt'))))\n\nfeaturesets = [(find_features(rev),category) for (rev, category) in documents]\n\ntraining_set = featuresets[:1900]\ntesting_set = featuresets[1900:]\n\n# posterior = prior occurences * liklihood / evidence\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\n# uncomment when you want to train the module\n\n# then open the saved module here\n# classifier_f = open(\"naivebyes.pickle\")\n# classifier = pickle.load(classifier_f)\n# classifier_f.close()\n\n# * 100 to get percent\n# now we can change other classifier\nprint(\"Naive Bayes Algo accuracy percent:\", (nltk.classify.accuracy(classifier,testing_set))*100)\n# the most valuable words are when it comes to positive or negative reviews\n# given by ratio\nclassifier.show_most_informative_features(15)\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nprint(\"MultinomialNB accuracy percent:\",nltk.classify.accuracy(MNB_classifier, testing_set)*100)\n\nBNB_classifier = SklearnClassifier(BernoulliNB())\nBNB_classifier.train(training_set)\nprint(\"BernoulliNB accuracy percent:\",nltk.classify.accuracy(BNB_classifier, testing_set)*100)\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nprint(\"LogisticRegression_classifier accuracy percent:\", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)\n\nSGDClassifier_classifier = SklearnClassifier(SGDClassifier())\nSGDClassifier_classifier.train(training_set)\nprint(\"SGDClassifier_classifier accuracy percent:\", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100)\n\nSVC_classifier = SklearnClassifier(SVC())\nSVC_classifier.train(training_set)\nprint(\"SVC_classifier accuracy percent:\", (nltk.classify.accuracy(SVC_classifier, testing_set))*100)\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nprint(\"LinearSVC_classifier accuracy percent:\", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)\n\nNuSVC_classifier = SklearnClassifier(NuSVC())\nNuSVC_classifier.train(training_set)\nprint(\"NuSVC_classifier accuracy percent:\", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100)","sub_path":"E15.py","file_name":"E15.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53064563","text":"import numpy as np\nimport cv2\nimport pickle\nimport glob\nimport matplotlib.pyplot as plt\nimport os\nimport Lane_find_functions as Lff\nimport Image_processing_functions as IPF\nimport sys\nimport function_parameters as FP\n\ndef main():\n count = FP.frame\n dashcam_image_path = FP.dashcam_image_path\n #dashcam_image_path = './Test_images/dashcam_driving/'\n #dashcam_image_path = './Test_images/challnege_video/'\n #dashcam_image_path = './Test_images/harder_challenge_video/'\n #dashcam_image_path = './Test_images/project_video/'\n img_arg=\"frame\"\n #count = 138\n #count = 139\n #count = 170\n #count = 60\n #count = 290\n #count = 688\n #count = 0\n #count = 822\n\n k=0\n #cv2.namedWindow('prikaz', cv2.WINDOW_NORMAL)\n while k is not 27:\n\n imgOriginal = cv2.imread(dashcam_image_path+img_arg+str(count)+\".jpg\") # open image\n #cv2.imshow(img_arg+str(count)+\".jpg\", imgOriginal)\n cv2.namedWindow(img_arg+str(count)+\".jpg\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(img_arg+str(count)+\".jpg\",1280,720)\n if imgOriginal is None: # if image was not read successfully\n print (\"error: image not read from file \\n\\n\") # print error message to std out\n os.system(\"pause\") # pause so user can see error message\n return\n # imgOriginal = cv2.resize(imgOriginal,(640,360))\n processed_image =Lff.process_image_4lanes(imgOriginal,fullscreen=False)\n # processed_image = dynamic_calibration_information(processed_image)\n#--------------------------------------------------------------------------------------------------------------------\n cv2.imshow(img_arg+str(count)+\".jpg\", processed_image)\n #print(final_image.shape)\n #cv2.imwrite('Output_'+img_arg+str(count)+\".jpg\",processed_image)\n k = cv2.waitKey()\n if k == 83:\n count=int(count)+1\n elif k == 81:\n if count !=0:\n count=int(count)-1# hold windows open until user presses a key\n cv2.destroyAllWindows() # remove windows from memory\n cv2.destroyAllWindows()\n return\n\n###################################################################################################\nif __name__ == \"__main__\":\n main()\n","sub_path":"Test_with_function.py","file_name":"Test_with_function.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312550997","text":"import logging\nimport requests\n\nfrom tools.system_tools import get_value_from_env\n\n\nclass SkypeBot:\n \n def __init__(self):\n\n self._bot_name = get_value_from_env('BOT_NAME')\n self._token = None\n self._set_token_attempts = 5\n\n def _set_token(self):\n client_id = get_value_from_env('APP_ID')\n client_secret = get_value_from_env('APP_PASSWORD')\n\n url = get_value_from_env('BOT_CON_AUTH_URI')\n payload_workpiece = get_value_from_env('BOT_CON_PAYLOAD_WORKPIECE')\n payload = payload_workpiece.format(client_id=client_id,\n client_secret=client_secret)\n headers = {'Content-Type': 'application/x-www-form-urlencoded', }\n r = requests.post(url, headers=headers, data=payload)\n r_data = r.json()\n self._token = r_data.get('access_token')\n\n @property\n def name(self):\n return self._bot_name\n\n def send_message(self, payload):\n service = payload['serviceUrl']\n conversation_id = payload['conversation']['id']\n activity_id = payload[\"replyToId\"]\n\n url_workpiece = get_value_from_env('CONVERSATION_ENDP_WORKPIECE')\n if url_workpiece is not None:\n url = url_workpiece.format(service=service,\n conversation_id=conversation_id,\n activity_id=activity_id)\n\n i = 0\n while (self._token is None) and (i <= self._set_token_attempts):\n self._set_token()\n\n auth_token = 'Bearer {0}'.format(self._token)\n headers = {\n 'Authorization': auth_token,\n 'Content-Type': 'Application/json',\n }\n\n r = requests.post(url, headers=headers, json=payload)\n\n if 400 <= r.status_code <= 599:\n logging.error(f'error: {r.text}')\n else:\n logging.error('Environment error! No conversation endpoint!')\n","sub_path":"skypebot.py","file_name":"skypebot.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38679476","text":"#!/usr/bin/env python3\n#\n# References:\n# GPS - NMEA sentence information (http://aprs.gids.nl/nmea)\n# NMEA data (http://www.gpsinformation.org/dale/nmea.htm)\n\nimport time\nimport datetime\nimport threading\nfrom fake_interfaces import fake_serial\n\nclass FakeGpsSensor(threading.Thread):\n \"\"\"\n Fake GPS sensor implementation.\n \"\"\"\n\n def __init__(self, port):\n \"\"\"\n Initialize and start fake gps sensor thread.\n \"\"\"\n threading.Thread.__init__(self)\n self.__stop_event = threading.Event()\n self.__fake_serial = fake_serial.FakeSerial()\n self.__fake_serial.port = port\n self.start()\n\n def __del__(self):\n self.stop()\n\n def stop(self):\n \"\"\"\n Stop thread loop.\n \"\"\"\n self.__stop_event.set()\n\n def run(self):\n \"\"\"\n Thread loop.\n \"\"\"\n self.__fake_serial.open()\n while not self.__stop_event.is_set():\n self.__fake_serial.write(self.__get_gps_data())\n time.sleep(1)\n self.__fake_serial.close()\n\n def __nmea_checksum(self, msg):\n \"\"\"\n The checksum field consists of a '*' and two hex digits representing\n an 8 bit exclusive OR of all characters between, but not including,\n the '$' and '*'.\n \"\"\"\n checksum = 0\n for char in msg:\n checksum ^= ord(char)\n return hex(checksum).upper()[2:]\n\n def __get_gps_data(self):\n \"\"\"\n Get GPS data.\n\n $GPRMC,hhmmss.ss,A,llll.ll,a,yyyyy.yy,a,x.x,x.x,ddmmyy,x.x,a*hh\n 1 = UTC of position fix\n 2 = Data status (V=navigation receiver warning)\n 3 = Latitude of fix\n 4 = N or S\n 5 = Longitude of fix\n 6 = E or W\n 7 = Speed over ground in knots\n 8 = Track made good in degrees True\n 9 = UT date\n 10 = Magnetic variation degrees (Easterly var. subtracts from true\n course)\n 11 = E or W\n 12 = Checksum\n \"\"\"\n\n # Get current time in UTC\n now = datetime.datetime.now(datetime.timezone.utc)\n now_time = now.strftime('%H%M%S')\n now_date = now.strftime('%d%m%y')\n\n gprmc = 'GPRMC,' + now_time + \\\n '.00,A,2736.12493,S,04834.61709,W,0.234,,' + now_date + ',,,A'\n gprmc_checksum = self.__nmea_checksum(gprmc)\n msg = '$' + gprmc + '*' + gprmc_checksum + fake_serial.FakeSerial.CRLF \\\n + '$GPVTG,,,,,,,,,N*30' + fake_serial.FakeSerial.CRLF + \\\n '$GPGGA,140601.00,,,,,0,00,99.99,,,,,,*64' + \\\n fake_serial.FakeSerial.CRLF + \\\n '$GPGSV,3,1,10,05,34,266,,07,37,125,,08,14,133,18,09,34,049,33*79' \\\n + fake_serial.FakeSerial.CRLF + \\\n '$GPGLL,,,,,140601.00,V,N*48' + fake_serial.FakeSerial.CRLF\n\n return msg\n","sub_path":"implementacao/fake_devices/fake_gps_sensor.py","file_name":"fake_gps_sensor.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378830894","text":"try:\n from . import _reader\nexcept ImportError as e:\n from . import _reader_py as _reader\n\nfrom ._reader_common import HEADER_SCHEMA, SYNC_SIZE, MAGIC\nfrom ._reader_common import SchemaResolutionError\n\nacquaint_schema = _reader.acquaint_schema\nreader = iter_avro = _reader.iter_avro\nschemaless_reader = _reader.schemaless_reader\nread_data = _reader.read_data\nis_avro = _reader.is_avro\n\nREADERS = _reader.READERS\nLOGICAL_READERS = _reader.LOGICAL_READERS\n\n__all__ = [\n 'acquaint_schema', 'reader', 'schemaless_reader', 'read_data', 'is_avro',\n 'HEADER_SCHEMA', 'SYNC_SIZE', 'MAGIC', 'SchemaResolutionError',\n 'LOGICAL_READERS', 'READERS',\n]\n","sub_path":"fastavro/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486379286","text":"from django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom rest_framework_jwt import utils\n\n\nclass ServicesAPITestCase(APITestCase):\n\n fixtures = ['core-services.json', ]\n\n def setUp(self):\n\n user = User.objects.create_superuser(username='tester',\n email='tester@mail.com',\n password='t&s3TEr3')\n\n payload = utils.jwt_payload_handler(user)\n token = utils.jwt_encode_handler(payload)\n\n self.auth = 'JWT {0}'.format(token)\n\n def test_get_services(self):\n \"\"\"GET /api/v1/registries/ must return status code 200\"\"\"\n\n resp = self.client.get(reverse('service-list'),\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_200_OK, resp.status_code)\n self.assertEqual(len(resp.data), 6)\n\n def test_add_service(self):\n \"\"\"\n POST /api/v1/registries/ must return status code 201\n JSON response\n {\"service\": \"test3\", \"version\": \"0.0.1\", \"change\": \"created\"}\n \"\"\"\n\n resp = self.client.post(reverse('service-list'),\n {\"service\": \"test3\", \"version\": \"0.0.1\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_201_CREATED, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content,\n {\"service\": \"test3\", \"version\": \"0.0.1\", \"change\": \"created\"})\n\n def test_find_service(self):\n \"\"\"\n GET /api/v1/search/ must return status code 200 and\n JSON response {\"service\": \"test\", \"version\": \"0.0.1\", \"count\":2}\n \"\"\"\n\n resp = self.client.get(reverse('search'),\n {\"service\": \"test\", \"version\": \"0.0.1\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_200_OK, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content,\n {\"service\": \"test\", \"version\": \"0.0.1\", \"count\": 2})\n\n def test_find_non_existing_service(self):\n \"\"\"\n GET /api/v1/search/ must return status code 404 and\n JSON response {\"service\": \"test\", \"version\": \"0.0.4\", \"count\":0}\n \"\"\"\n\n resp = self.client.get(reverse('search'),\n {\"service\": \"test\", \"version\": \"0.0.4\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_404_NOT_FOUND, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content,\n {\"service\": \"test\", \"version\": \"0.0.4\", \"count\": 0})\n\n def test_find_service_without_version(self):\n \"\"\"\n GET /api/v1/search/ must return status code 200 and\n JSON response {\"service\": \"test\", \"count\":4}\n \"\"\"\n\n resp = self.client.get(reverse('search'),\n {\"service\": \"test\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_200_OK, resp.status_code)\n # JSON Response\n self.assertJSONEqual(resp.content, {\"service\": \"test\", \"count\": 4})\n\n def test_search_without_parameter(self):\n \"\"\"GET /api/v1/search/ must return status code 500\"\"\"\n\n resp = self.client.get(reverse('search'),\n {\"version\": \"0.0.1\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(\n status.HTTP_500_INTERNAL_SERVER_ERROR, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content,\n {\"detail\": \"Search parameter (service) could not be found\"})\n\n def test_search_without_service(self):\n \"\"\"GET /api/v1/search/ must return status code 500\"\"\"\n\n resp = self.client.get(reverse('search'),\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(\n status.HTTP_500_INTERNAL_SERVER_ERROR, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content,\n {\"detail\": \"Search parameters (service or version) \"\n \"could not be found\"})\n\n def test_update_service(self):\n \"\"\"\n PUT /api/v1/search/ must return status code 200 and\n JSON response {\"change\": \"changed\"}\n \"\"\"\n\n resp = self.client.put(reverse('update', args=[12]),\n {\"service\": \"test\", \"version\": \"0.0.4\"},\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_200_OK, resp.status_code)\n # JSON Response\n self.assertJSONEqual(resp.content, {\"change\": \"changed\"})\n\n def test_remove_service(self):\n \"\"\"\n DELETE /api/v1/search/ must return status code 200 and\n JSON response {\"service\": \"test2\", \"change\": \"removed\"}\n \"\"\"\n\n resp = self.client.delete(reverse('remove', args=[16]),\n HTTP_AUTHORIZATION=self.auth)\n self.assertEqual(status.HTTP_200_OK, resp.status_code)\n # JSON Response\n self.assertJSONEqual(\n resp.content, {\"service\": \"test2\", \"change\": \"removed\"})\n","sub_path":"registry/services/tests/test_service_registry_api.py","file_name":"test_service_registry_api.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305384987","text":"######################################## BIBLIOTECAS ####################################\nimport random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras import backend as K #importa backend para clear_session()\nimport tensorflow as tf\n\nclass DQNAgent:\n ########################### INICIALIZA ###########################################\n def __init__(self, state_size, action_size, epsilon, janela, n_neuronios, n_variaveis):\n self.state_size = state_size\n self.n_neuronios = n_neuronios\n self.action_size = action_size\n self.janela = janela\n self.n_variaveis = n_variaveis\n self.limpa_memoria()\n self.gamma = 0.99 # discount rate\n self.epsilon = epsilon # exploration rate\n self.learning_rate = 0.000001\n self.model = self.cria_modelo()\n self.model.summary()\n\n################################# REDE NEURAL ###########################################\n def cria_modelo(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n \n model.add(Dense(128, input_dim=self.state_size, activation='relu')) #camada de entrada (escondida)\n model.add(Dense(64, activation='relu')) #camada escondida\n model.add(Dense(32, activation='relu')) #camada escondida\n model.add(Dense(self.action_size, activation='softmax')) #camada de saida\n model.compile(loss='categorical_crossentropy', optimizer=tf.train.AdamOptimizer(learning_rate=self.learning_rate)) #compilador\n \n return model\n\n def limpa_memoria(self):\n self.state = np.empty((0,))\n self.next_state = np.empty((0,))\n \n def toma_acao(self, valores_ant, teste):\n if not teste and np.random.rand() <= self.epsilon: #se o numero aleatorio for menor que o epsilon\n return random.randrange(self.action_size) #retorna acao aleatoria\n estado = np.array([np.append(self.state, valores_ant)]) #cria valor de agora \n act_values = self.model.predict(estado, batch_size=1) #calcula qual a melhor acao\n return np.argmax(act_values[0]) # returns action\n\n def treina_modelo(self, acao, reward, valores_ant, valores_dps, batch_size=1):\n prox_estado = np.array([np.append(self.next_state, valores_dps)]) #cria proximo estado\n target = (reward + self.gamma * np.amax(self.model.predict(prox_estado, batch_size=batch_size)[0])) #pega valor que quer chegar\n \n estado = np.array([np.append(self.state, valores_ant)]) #cria valor de agora\n target_f = self.model.predict(estado, batch_size=batch_size) #pega valor que chegou\n target_f[0][acao] = target #define o valor que deseja chegar\n \n self.model.fit(estado, target_f, epochs=1, verbose=0, batch_size=batch_size) #treina modelo\n \n def tira_ultimo_state(self):\n if self.state.shape[0] > self.janela * self.n_variaveis:\n self.state = self.state[self.n_variaveis:] #tira os ultimos preos\n if self.next_state.shape[0] > self.janela * self.n_variaveis:\n self.next_state = self.next_state[self.n_variaveis:] #tira os ultimos preos\n \n def carrega_pesos(self, name):\n self.model.load_weights(name) #carrega pesos\n\n def salva_pesos(self, name):\n self.model.save_weights(name) #salva pesos","sub_path":"brunao/tensorflow_2.0/DQNModel.py","file_name":"DQNModel.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585157182","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^(?P[0-9]+)$', views.ProfileView.as_view(), name='profile'),\n url(r'^login/$', views.Login.as_view(), name='login'),\n url(r'^login/auth_user/$', views.auth_user, name='auth_user'),\n url(r'^signup/$', views.SignUp.as_view(), name='signup'),\n url(r'^logout/$', views.logout_view, name='logout'),\n url(r'^search/$', views.user_search, name='search'),\n url(r'^notfound/$',views.UserNotFound.as_view(), name='notfound'),\n url(r'^update_info/(?P[0-9]+)/$', views.update_info, name='update_info'),\n]\n","sub_path":"UserProfile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416382544","text":"import unittest\nfrom elevator import *\n\nclass TestMethods(unittest.TestCase):\n def test_constructor_set_invalid_door_status(self):\n #arrange\n with self.assertRaises(ValueError):\n e = Elevator(1, \"D\", 0, 10)\n def test_constructor_set_invalid_current_floor(self):\n #arrange\n with self.assertRaises(ValueError):\n e = Elevator(-1, \"C\", 0, 10)\n def test_constructor_invalid_current_floor(self):\n #arrange\n e = Elevator(1, \"C\", 0, 10)\n self.assertEqual(e.currentFloor, 1)\n def test_travese_traverse_correctly(self):\n e = Elevator(1, \"C\", 0, 10)\n e.traverse(8)\n self.assertEqual(e.currentFloor, 8)\n e.traverse(4)\n self.assertEqual(e.currentFloor, 4)\n e.traverse(10)\n self.assertEqual(e.currentFloor, 10)\n\n def test_travese_check_boundary(self):\n e = Elevator(1, \"C\", 0, 10)\n e.traverse(10)\n self.assertEqual(e.currentFloor, 10)\n\n def test_travese_check_invalid_floor(self):\n e = Elevator(1, \"C\", 0, 10)\n with self.assertRaises(ValueError):\n e.traverse(11)\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"elevator_test.py","file_name":"elevator_test.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614193718","text":"\"\"\"\nCreated on Wed Apr 22 15:15:16 2015\n\nQuiz 2 code.\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pickle\n\nfrom compute_sta import compute_sta\n\n\nFILENAME = 'c1p8.pickle'\n\nwith open(FILENAME, 'rb') as f:\n data = pickle.load(f)\n\nstim = data['stim']\nrho = data['rho']\n\n# print(len(stim), len(rho))\n\n\n\n# Fill in these values\nsampling_rate = 500\n# Convert to ms\nsampling_period = int((1/sampling_rate) * 1000)\nprint(f\"Sampling Period: {sampling_period}\")\n\n\nwindow = 300\nnum_timesteps = int(window / sampling_period)\nprint(f\"Number of Timesteps: {num_timesteps}\")\n\n\n# Compute STA\nsta = compute_sta(stim, rho, num_timesteps)\n\ntime = (np.arange(-num_timesteps, 0) + 1) * sampling_period\n# print(time, len(time))\n\nplt.plot(time, sta)\nplt.xlabel('Time (ms)')\nplt.ylabel('Stimulus')\nplt.title('Spike-Triggered Average')\n\nplt.show()","sub_path":"coursera-assignments/quiz2/quiz2.py","file_name":"quiz2.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180656240","text":"tc=int(input())\r\nfor _ in range(tc):\r\n li=[]\r\n n=int(input())\r\n li.append(tuple(map(int, input().split())))\r\n while True:\r\n tl=[]\r\n t=0\r\n for i in range(len(li[-1])):\r\n if i==0:\r\n t=abs(li[-1][i-1]-li[-1][i])\r\n else:\r\n tl.append(abs(li[-1][i-1]-li[-1][i]))\r\n tl.append(t)\r\n if sum(tl)==0:\r\n print(\"ZERO\")\r\n break\r\n elif tuple(tl) in li:\r\n print(\"LOOP\")\r\n break\r\n else:\r\n li.append(tuple(tl))","sub_path":"powerful104/8922.py","file_name":"8922.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558855381","text":"from django.contrib import admin\nfrom .models import Request, Comment\n\n\nclass RequestAdmin(admin.ModelAdmin):\n list_display = ('user', 'topic', 'priority', 'progress')\n list_display_links = ('user', 'topic')\n search_fields = ('user', 'topic', )\n\n\nadmin.site.register(Request, RequestAdmin)\nadmin.site.register(Comment)\n","sub_path":"help_desk/main_desk/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"127740370","text":"# Copyright (C) 2021 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom http import HTTPStatus\nimport pytest\nfrom .utils.config import get_method, patch_method, delete_method\nfrom deepdiff import DeepDiff\n\nclass TestGetOrganizations:\n _ORG = 2\n\n @pytest.mark.parametrize('privilege, role, is_member, is_allow', [\n ('admin', None, None, True),\n ('user', None, False, False),\n ('business', None, False, False),\n ('worker', None, False, False),\n (None, 'owner', True, True),\n (None, 'maintainer', True, True),\n (None, 'worker', True, True),\n (None, 'supervisor', True, True),\n ])\n def test_can_see_specific_organization(self, privilege, role, is_member,\n is_allow, find_users, organizations):\n exclude_org = None if is_member else self._ORG\n org = self._ORG if is_member else None\n user = find_users(privilege=privilege, role=role, org=org,\n exclude_org=exclude_org)[0]['username']\n\n response = get_method(user, f'organizations/{self._ORG}')\n if is_allow:\n assert response.status_code == HTTPStatus.OK\n assert DeepDiff(organizations(self._ORG), response.json()) == {}\n else:\n assert response.status_code == HTTPStatus.NOT_FOUND\n\nclass TestPatchOrganizations:\n _ORG = 2\n\n @pytest.fixture(scope='class')\n def request_data(self):\n return {'slug': 'new', 'name': 'new', 'description': 'new',\n 'contact': {'email': 'new@cvat.org'}}\n\n @pytest.fixture(scope='class')\n def expected_data(self, organizations, request_data):\n data = organizations(self._ORG).copy()\n data.update(request_data)\n return data\n\n @pytest.mark.parametrize('privilege, role, is_member, is_allow', [\n ('admin', None, None, True),\n ('user', None, False, False),\n ('business', None, False, False),\n ('worker', None, False, False),\n (None, 'owner', True, True),\n (None, 'maintainer', True, True),\n (None, 'worker', True, False),\n (None, 'supervisor', True, False),\n ])\n def test_can_update_specific_organization(self, privilege, role, is_member,\n is_allow, find_users, request_data, expected_data):\n exclude_org = None if is_member else self._ORG\n org = self._ORG if is_member else None\n user = find_users(privilege=privilege, role=role, org=org,\n exclude_org=exclude_org)[0]['username']\n\n response = patch_method(user, f'organizations/{self._ORG}', request_data)\n\n if is_allow:\n assert response.status_code == HTTPStatus.OK\n assert DeepDiff(expected_data, response.json(),\n exclude_paths=\"root['updated_date']\") == {}\n else:\n assert response.status_code != HTTPStatus.OK\n\nclass TestDeleteOrganizations:\n _ORG = 2\n\n @pytest.mark.parametrize('privilege, role, is_member, is_allow', [\n ('admin', None, None, True),\n (None, 'owner', True, True),\n (None, 'maintainer', True, False),\n (None, 'worker', True, False),\n (None, 'supervisor', True, False),\n ('user', None, False, False),\n ('business', None, False, False),\n ('worker', None, False, False),\n ])\n def test_can_delete(self, privilege, role, is_member,\n is_allow, find_users):\n exclude_org = None if is_member else self._ORG\n org = self._ORG if is_member else None\n user = find_users(privilege=privilege, role=role, org=org,\n exclude_org=exclude_org)[0]['username']\n\n response = delete_method(user, f'organizations/{self._ORG}')\n\n if is_allow:\n assert response.status_code == HTTPStatus.NO_CONTENT\n else:\n assert response.status_code != HTTPStatus.OK\n","sub_path":"tests/rest_api/test_0002_organizations.py","file_name":"test_0002_organizations.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358030652","text":"#!/usr/bin/env python\nimport select, subprocess, shlex\nfrom ipaddress import ip_address, ip_network\nfrom walt.common.tools import do, succeeds\nfrom walt.server import const, conf\nfrom walt.server.threads.main.snmp import Proxy\n\ndef ip(ip_as_str):\n return ip_address(str(ip_as_str))\n\ndef net(net_as_str):\n return ip_network(str(net_as_str), strict=False)\n\ndef get_walt_subnet():\n return net(conf['network']['walt-net']['ip'])\n\ndef get_walt_adm_subnet():\n walt_adm_conf = conf['network'].get('walt-adm', None)\n if walt_adm_conf is None:\n return None\n else:\n return net(walt_adm_conf['ip'])\n\ndef find_free_ip_near(ip, intf, increment):\n target_ip = ip\n while True:\n target_ip += increment\n if succeeds('arping -D -w 1 -I %s %s' % (intf, target_ip)):\n return target_ip\n\ndef smallest_subnet_for_these_ip_addresses(ip1, ip2):\n # start with /31, then /30 etc.\n # until is in this network too.\n for netrange in range(31,0,-1):\n net = ip_network('%s/%d' % (ip1, netrange), strict=False)\n if ip2 in net:\n return net\n\ndef get_mac_address(intf):\n with open('/sys/class/net/' + intf +'/address') as f:\n return f.read().strip()\n\ndef add_ip_to_interface(ip, subnet, intf):\n do('ip addr add %s/%d dev %s' % (ip, subnet.prefixlen, intf))\n\ndef del_ip_from_interface(ip, subnet, intf):\n do('ip addr del %s/%d dev %s' % (ip, subnet.prefixlen, intf))\n\ndef check_if_we_can_reach(remote_ip):\n return succeeds('ping -c 1 -w 1 %s' % remote_ip)\n\ndef is_walt_address(ip):\n return ip in get_walt_subnet()\n\ndef assign_temp_ip_to_reach_neighbor(neighbor_ip, callback, intf, *args):\n reached = False\n callback_result = None\n for increment in [ 1, -1 ]:\n free_ip = find_free_ip_near(neighbor_ip, intf, increment)\n subnet = smallest_subnet_for_these_ip_addresses(neighbor_ip, free_ip)\n print(free_ip, subnet)\n add_ip_to_interface(free_ip, subnet, intf)\n if check_if_we_can_reach(neighbor_ip):\n callback_result = callback(free_ip, neighbor_ip, intf, *args)\n reached = True\n del_ip_from_interface(free_ip, subnet, intf)\n if reached:\n break\n return (reached, callback_result)\n\ndef set_static_ip_on_switch(switch_ip, snmp_conf):\n p = Proxy(switch_ip, snmp_conf, ipsetup=True)\n p.ipsetup.record_current_ip_config_as_static()\n\ndef lldp_update():\n do('lldpcli update')\n\ndef get_server_ip():\n return conf['network']['walt-net']['ip'].split('/')[0]\n\ndef ip_in_walt_network(input_ip):\n subnet = get_walt_subnet()\n return ip(input_ip) in subnet\n\ndef ip_in_walt_adm_network(input_ip):\n subnet = get_walt_adm_subnet()\n if subnet is None:\n return False\n else:\n return ip(input_ip) in subnet\n\ndef get_dns_servers():\n local_server_is_dns_server = False\n dns_list = []\n with open('/etc/resolv.conf', 'r') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n if line[0] == '#':\n continue\n if line.startswith('nameserver'):\n for dns_ip in line.split(' ')[1:]:\n if dns_ip.startswith('127.'):\n local_server_is_dns_server = True\n continue\n dns_list.append(dns_ip)\n # If walt server is a DNS server, and no other DNS is available, let the\n # walt nodes use it (but not with its localhost address!)\n if local_server_is_dns_server and len(dns_list) == 0:\n dns_list.append(get_server_ip())\n # Still no DNS server... Hope that this one is reachable\n if len(dns_list) == 0:\n dns_list.append('8.8.8.8')\n return dns_list\n","sub_path":"server/walt/server/threads/main/network/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614312466","text":"\"\"\"\nAgnocomplete views.\n\"\"\"\nfrom six import with_metaclass\nfrom abc import abstractmethod, ABCMeta\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.utils.functional import cached_property\nfrom django.views.generic import View\n\nfrom .register import get_agnocomplete_registry\nfrom .exceptions import (\n AuthenticationRequiredAgnocompleteException,\n ImproperlyConfiguredView\n)\n\n\ntry:\n from django.http import JsonResponse\nexcept ImportError:\n # JsonResponse for Django 1.6.\n # Source: https://gist.github.com/philippeowagner/3179eb475fe1795d6515\n import json\n from django.http import HttpResponse\n\n class JsonResponse(HttpResponse):\n \"\"\"\n JSON response\n \"\"\"\n def __init__(self, content,\n status=None, content_type=None):\n super(JsonResponse, self).__init__(\n content=json.dumps(content),\n status=status, content_type=content_type)\n\n\nclass AgnocompleteJSONView(with_metaclass(ABCMeta, View)):\n \"\"\"\n Generic toolbox for JSON-returning views\n \"\"\"\n\n @property\n def content_type(self):\n \"\"\"\n Return content-type of the response.\n For a JSONResponseMixin, the obvious answer is ``application/json``.\n But Internet Explorer v8 can't handle this content-type and instead\n of processing it as a normal AJAX data response, it tries to download\n it.\n We're tricking this behaviour by sending back a ``text/html``\n content-type header instead.\n \"\"\"\n if 'HTTP_X_REQUESTED_WITH' in self.request.META:\n return \"application/json;charset=utf-8\"\n else:\n return \"text/html\"\n\n @abstractmethod\n def get_dataset(self):\n pass\n\n def get(self, *args, **kwargs):\n return JsonResponse(\n {'data': self.get_dataset()},\n content_type=self.content_type,\n )\n\n\nclass RegistryMixin(object):\n \"\"\"\n This mixin is able to return the agnocomplete registry.\n \"\"\"\n @cached_property\n def registry(self):\n \"\"\"\n Return the agnocomplete registry (cached)\n \"\"\"\n return get_agnocomplete_registry()\n\n\nclass UserContextFormViewMixin(object):\n \"\"\"\n This mixin is injecting the context variable into the form kwargs\n \"\"\"\n\n def get_agnocomplete_context(self):\n \"\"\"\n Return the view current user.\n\n You may want to change this value by overrding this method.\n \"\"\"\n return self.request.user\n\n def get_form_kwargs(self):\n \"\"\"\n Return the form kwargs.\n\n This method injects the context variable, defined in\n :meth:`get_agnocomplete_context`. Override this method to adjust it to\n your needs.\n \"\"\"\n data = super(UserContextFormViewMixin, self).get_form_kwargs()\n data.update({\n 'user': self.get_agnocomplete_context(),\n })\n return data\n\n\nclass CatalogView(RegistryMixin, AgnocompleteJSONView):\n \"\"\"\n The catalog view displays every available Agnocomplete slug available in\n the registry.\n \"\"\"\n def get_dataset(self):\n \"\"\"\n Return the registry key set.\n \"\"\"\n return tuple(self.registry.keys())\n\n\nclass AgnocompleteGenericView(AgnocompleteJSONView):\n def get_klass(self):\n \"\"\"\n Return the agnocomplete class to be used with the eventual query.\n \"\"\"\n # Return the instance if it's defined in the class properties\n if hasattr(self, 'klass') and self.klass:\n return self.klass\n raise ImproperlyConfiguredView(\"Undefined autocomplete class\")\n\n def get_dataset(self):\n klass = self.get_klass()\n # Query passed via the argument\n query = self.request.GET.get('q', \"\")\n if not query:\n # Empty set, no value to complete\n return []\n\n # Optional Page size\n try:\n page_size = int(self.request.GET.get('page_size', None))\n except:\n page_size = None\n\n # Agnocomplete instance is ready\n try:\n instance = klass(user=self.request.user, page_size=page_size)\n return instance.items(query=query)\n except AuthenticationRequiredAgnocompleteException:\n raise PermissionDenied(\n \"Unauthorized access to this Autocomplete\")\n except:\n # re-raise the unknown exception\n raise\n\n\nclass AgnocompleteView(RegistryMixin, AgnocompleteGenericView):\n\n def get_klass(self):\n \"\"\"\n Return the agnocomplete class to be used with the eventual query.\n \"\"\"\n # Extract the klass name from the URL arguments\n klass_name = self.kwargs.get('klass', None)\n klass = self.registry.get(klass_name, None)\n if not klass:\n raise Http404(\"Unknown autocomplete class `{}`\".format(klass_name))\n return klass\n","sub_path":"agnocomplete/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444215781","text":"#!/usr/bin/env python\n\n# Copyright (c) 2015 SpinPunch. All rights reserved.\n# Use of this source code is governed by an MIT-style license that can be\n# found in the LICENSE file.\n\n# automatically generate Alloy SKU tables for Kongregate\n\nimport SpinJSON\nimport SpinConfig\nimport AtomicFileWrite\nimport sys, re, traceback, os, string, getopt\n\nimport locale # for pretty number printing only\nlocale.setlocale(locale.LC_ALL, '')\n\n# regular expression that matches C++-style comments\ncomment_remover = re.compile('//.*?$') # |/\\*.*?/*/\nverbose = True\n\nif __name__ == '__main__':\n\n\n game_id = SpinConfig.game()\n\n opts, args = getopt.gnu_getopt(sys.argv[1:], 'uv', ['game-id=',])\n for key, val in opts:\n if key == '--game-id':\n game_id = val\n elif key == '-u':\n verbose = False\n\n out_fd = AtomicFileWrite.AtomicFileWrite(args[0], 'w', ident=str(os.getpid()))\n print >> out_fd.fd, \"// AUTO-GENERATED BY make_ai_bases_client.py\"\n\n out = {}\n\n COMMENTS = [None, None, 'Most Popular', None, None, None]\n\n SLATES = { \"P100M\": { \"level\": 100, \"slate\": \"M\",\n \"currency\": \"kgcredits\", \"ui_warning\": \"Limited time special offers:\",\n \"requires\": [{\"predicate\": \"ALWAYS_FALSE\"}], # disabled at 1403467211\n \"skus\": [{'alloy': 20000, 'kgcredits':2000},\n {'alloy': 10000, 'kgcredits': 1000},\n {'alloy': 5000, 'kgcredits': 500},\n {'alloy': 2500, 'kgcredits': 250},\n {'alloy': 1000, 'kgcredits': 100},\n {'alloy': 500, 'kgcredits': 50}\n ] },\n \"P100D1\": { \"level\": 100, \"slate\": \"D1\",\n \"currency\": \"kgcredits\", \"ui_warning\": \"Limited time special offers:\",\n \"requires\": [{\"predicate\": \"ALWAYS_TRUE\"}], # enabled at 1403467211\n \"skus\": [{'alloy': 24000, 'kgcredits':1999, 'nominal_alloy': 20000, 'ui_comment': 'Best Value', 'ui_pile_size': 5},\n {'alloy': 11500, 'kgcredits': 999, 'nominal_alloy': 10000, 'ui_comment': None, 'ui_pile_size': 4},\n {'alloy': 5500, 'kgcredits': 499, 'nominal_alloy': 5000, 'ui_comment': 'Most Popular', 'ui_pile_size': 3},\n {'alloy': 1050, 'kgcredits': 99, 'nominal_alloy': 1000, 'ui_comment': None, 'ui_pile_size': 2},\n {'alloy': 500, 'kgcredits': 49, 'ui_comment': None, 'ui_pile_size': 0}\n ] },\n }\n\n for slate_name, val in SLATES.iteritems():\n check_level = None\n last_level = None\n last_sku = None\n\n sorted_skus = sorted(val['skus'], key = lambda x: -x['alloy']) # from high to low price\n\n biggest_sku = sorted_skus[0]\n second_biggest_sku = sorted_skus[1]\n\n for sku_index in xrange(len(val['skus'])):\n data = val['skus'][sku_index]\n comment = val.get('comments',COMMENTS)[sku_index]\n\n sku_name = 'BUY_GAMEBUCKS_%d' % data['alloy'] + '_KG_'+slate_name\n\n # perform some sanity checks on pricing\n if last_sku is not None:\n # make sure SKUs are listed in descending order\n assert data['alloy'] < last_sku['alloy']\n\n # check for big (>50%) deviations in alloy exchange rate across SKUs\n this_level = float(data['alloy'])/data[val['currency']]\n if check_level is not None:\n delta = abs((this_level - check_level)/check_level)\n if delta > 0.20:\n raise Exception('big deviation on SKU %s %d vs %d: check_level %f this_level %f' % (sku_name, last_sku['alloy'], data['alloy'], check_level, this_level))\n check_level = this_level\n\n # make sure discount factor does not decline for larger purchases\n if last_level is not None:\n if this_level > last_level:\n raise Exception('alloy exchange rate declines on SKU: '+sku_name)\n last_level = this_level\n last_sku = data\n\n assert sku_name not in out\n pretty_alloy_amount = locale.format('%d', data['alloy'], True)\n sku = {\n 'quantity': data['alloy'],\n 'ui_name': '%GAMEBUCKS_QUANTITY %GAME_NAME %GAMEBUCKS_NAME',\n 'ui_description': \"%GAMEBUCKS_QUANTITY %GAME_NAME %GAMEBUCKS_NAME, which can be spent in game on speed-ups, resources, and special items\",\n 'activation': 'instant', 'icon': 'store_icon_grow_perimeter',\n 'paid': 1,\n 'currency': val['currency'],\n 'price_formula': 'constant',\n 'price': data[val['currency']],\n }\n if 'ui_pile_size' in data: sku['ui_pile_size'] = data['ui_pile_size']\n if 'ui_warning' in val: sku['ui_warning'] = val['ui_warning']\n if 'nominal_alloy' in data: sku['nominal_quantity'] = data['nominal_alloy']\n if comment: sku['ui_comment'] = comment\n\n ui_bonus_list = []\n if ui_bonus_list:\n sku['ui_bonus'] = '\\n'.join(ui_bonus_list)\n\n # how the predicate works:\n # SKU is enabled if:\n # EITHER kgcredits_sku_slate_override matches the slate_name\n # OR\n # kgcredits_sku_slate_override has not been set\n\n pred = {'predicate': 'AND', 'subpredicates':[\n {'predicate': 'FRAME_PLATFORM', 'platform': 'kg'},\n {'predicate': 'OR', 'subpredicates': [{'predicate': \"ANY_ABTEST\", 'key': 'kgcredits_sku_slate_override', 'value': slate_name, 'default': 0 },\n {'predicate': \"AND\", 'subpredicates': [{'predicate': \"ANY_ABTEST\", 'key': 'kgcredits_sku_slate_override', 'value': 'DEFAULT', 'default': 'DEFAULT' },\n ] + val.get('requires',[]) + data.get('requires',[]) }\n ] }\n ] }\n\n # prevent player from seeing biggest sku until has purchased at least as much as second-biggest\n if data is biggest_sku: # and len(val['skus']) >= 6:\n pred['subpredicates'].append({'predicate':'PLAYER_HISTORY', 'key':'money_spent', 'method':'>=',\n 'value': 0.07*second_biggest_sku['kgcredits'] - 0.01})\n\n sku['requires'] = pred\n out[sku_name] = sku\n\n out_keys = sorted(out.keys(), key = lambda x: -int(x.split('_')[2]))\n for name in out_keys:\n data = out[name]\n print >>out_fd.fd, '\"%s\":' % name, SpinJSON.dumps(data, pretty = False),\n if name != out_keys[-1]:\n print >>out_fd.fd, ','\n else:\n print >>out_fd.fd\n\n out_fd.complete()\n","sub_path":"gamedata/make_kgcredits_skus.py","file_name":"make_kgcredits_skus.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23550463","text":"from examples.XMLParser import getorders, getdict, commcost, getvm\nimport networkx as nx\n# from examples.my_scheduler import getorders, getdag, commcost\nfrom heft.util import reverse_dict\nimport matplotlib.pyplot as plt\nimport math\nfrom examples.listoperation import getVmList, recruise\n# Unresolved import:nx\norders = getorders()\nG = []\nsl = []\nel = []\njl = []\nsL = {}\neL = {}\njL = {}\n# print(len(orders))\nvm = getvm()\n# print(vm[0])\nfor i in range(0, len(orders), 1):\n G.append(nx.DiGraph())\n\n\nfor eachP in orders:\n\n for i in orders[eachP]:\n jl.append(orders[eachP][orders[eachP].index(i)][0])\n sl.append(orders[eachP][orders[eachP].index(i)][1])\n el.append(orders[eachP][orders[eachP].index(i)][2])\n sL.update({eachP: sl})\n eL.update({eachP: el})\n jL.update({eachP: jl})\n sl = []\n el = []\n jl = []\nresult = {}\nfor i in range(0, len(G), 1):\n for j in range(0, len(jL[vm[i]]), 1):\n result.update({jL[vm[i]][j]: {'vm': vm[i], 'pos': j, 'starttime': sL[vm[i]][j], 'endtime': eL[vm[i]][j]}})\nprint(result)\n\n\"\"\"Add waitcost into the schedule result\"\"\"\n\ndag = getdict()\nreverse_dag = reverse_dict(dag)\nfor i in range(0, len(G), 1):\n for j in range(1, len(jL[vm[i]]), 1):\n if jL[vm[i]][j] in dag:\n temp = 999999\n for k in range(0, len(dag[jL[vm[i]][j]]), 1):\n if result[dag[jL[vm[i]][j]][k]]['endtime'] < temp:\n temp = result[dag[jL[vm[i]][j]][k]]['endtime']\n sL[vm[i]][j] = temp\n\n\"\"\"Add communication cost to the schedule\"\"\"\nfor i in range(0, len(G), 1):\n for j in range(0, len(jL[vm[i]]) - 1, 1):\n if jL[vm[i]][j] in reverse_dag:\n temp = 0\n for k in range(0, len(reverse_dag[jL[vm[i]][j]]), 1):\n if temp < commcost(jL[vm[i]][j], reverse_dag[jL[vm[i]][j]][k]\n , result[jL[vm[i]][j]]['vm'], result[reverse_dag[jL[vm[i]][j]][k]]['vm']):\n temp = commcost(jL[vm[i]][j], reverse_dag[jL[vm[i]][j]][k]\n , result[jL[vm[i]][j]]['vm'], result[reverse_dag[jL[vm[i]][j]][k]]['vm'])\n\n eL[vm[i]][j] += temp\n\nprint('-------------------------------------')\nprint('Print the original list:')\nfor i in range(0, len(G), 1):\n print('vm[', vm[i], ']:', '\\n', jL[vm[i]], '\\n', sL[vm[i]], '\\n', eL[vm[i]])\n\nfor i in range(0, len(G), 1):\n sL[vm[i]], eL[vm[i]] = recruise(getVmList, sL[vm[i]], eL[vm[i]], jL[vm[i]])\n\nprint('------------------------------------')\nprint('Refresh the list:')\nfor i in range(0, len(G), 1):\n print('vm[', vm[i], ']:', '\\n', jL[vm[i]], '\\n', sL[vm[i]], '\\n', eL[vm[i]])\n\n\"\"\"Build a new DAG for computing InstanceHour\"\"\"\n\"\"\"Set nodes aG\"\"\"\n# //0-45 75-115 123-188 250-285 290-330 335-370\n\nprint('Print each node[starttime,endtime]:')\n\nfor i in range(0, len(G), 1):\n for j in range(0, len(sL[vm[i]]) + 1, 1):\n if j == 0:\n G[i].add_node(j, starttime=sL[vm[i]][0], endtime=sL[vm[i]][0])\n print(G[i].node[j][\"starttime\"], G[i].node[j][\"endtime\"])\n continue\n if j == len(sL[vm[i]]):\n G[i].add_node(j, starttime=eL[vm[i]][j - 1], endtime=eL[vm[i]][j - 1])\n print(G[i].node[j][\"starttime\"], G[i].node[j][\"endtime\"])\n continue\n G[i].add_node(j, starttime=eL[vm[i]][j - 1], endtime=sL[vm[i]][j])\n print(G[i].node[j][\"starttime\"], G[i].node[j][\"endtime\"])\n\"\"\"Set edges aG\"\"\"\nfor i in range(0, len(G), 1):\n for j in range(len(sL[vm[i]]), 0, -1):\n for k in range(0, j, 1):\n G[i].add_edge(k, j, weight=math.ceil((G[i].node[j][\"starttime\"] - G[i].node[k][\"endtime\"]) / 60))\n\n\"\"\"Compute the Instance Hour\"\"\"\npath = []\nfor i in range(0, len(G), 1):\n path.append(nx.dijkstra_path(G[i], 0, len(sL[vm[i]]), weight='weight'))\n print(vm[i], \":\", path[i])\ntemp = []\nfor i in range(len(G)):\n temp.append(0)\n\n\"\"\"print vm start time and end time\"\"\"\nfor i in range(0, len(G), 1):\n for j in range(0, len(path[i]) - 1, 1):\n print('vm[', vm[i], ']start time:', G[i].node[path[i][j]]['endtime'], 'end time:',\n G[i].node[path[i][j + 1]]['starttime']\n , G[i].get_edge_data(path[i][j], path[i][j + 1]))\n temp[i] += G[i].get_edge_data(path[i][j], path[i][j + 1])['weight']\n\ntotal = 0\nfor i in range(0, len(temp), 1):\n total += temp[i]\nprint('Total instance hour(With Commcost):', temp, \"\\n\", \"total:\", total)\n\n\n","sub_path":"examples/IHWithCommcost.py","file_name":"IHWithCommcost.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501727572","text":"import storm\nfrom transaction_tracker import TransactionTracker\nimport abc\n\n\nclass GlobalDetector(storm.BasicBolt):\n MIN_NUMBER_OF_TRANSACTIONS = 5\n\n def __init__(self):\n # only one entity to keep track of global\n # Storm topology will make sure this is the only Bolt\n self.entity = TransactionTracker()\n self.bolt_name = type(self).__name__\n\n @abc.abstractmethod\n def get_stream(self):\n return\n\n def process(self, tup):\n account_id, merchant_id, transaction_amount, should_alert = tup.values\n\n if should_alert:\n if (self.entity.number_of_transactions > self.MIN_NUMBER_OF_TRANSACTIONS and\n self.entity.zscore(transaction_amount) >= 3):\n storm.emitBolt([account_id, merchant_id, transaction_amount, self.bolt_name],\n stream=self.get_stream())\n\n self.entity.update_statistics(transaction_amount)\n","sub_path":"multilang/resources/global_fraud_detector.py","file_name":"global_fraud_detector.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392115679","text":"# -*- coding: utf-8 -*-\n\"\"\"forms\"\"\"\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.core.exceptions import ValidationError\nfrom django.utils.timezone import now as dt_now\nfrom django.utils.translation import gettext as _, gettext_lazy\n\nimport floppyforms as floppyforms\n\nfrom ..bs_forms import Form as BsForm\nfrom ..models import Newsletter, NewsletterSending, NewsletterItem\nfrom ..settings import get_article_class, get_newsletter_templates\nfrom ..widgets import ChosenSelectMultiple\n\nfrom .base import InlineHtmlEditableModelForm\n\n\nclass NewsletterItemAdminForm(forms.ModelForm):\n \"\"\"admin form for NewsletterItem\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NewsletterItemAdminForm, self).__init__(*args, **kwargs) # pylint: disable=E1002\n self.item = kwargs.get('instance', None)\n article_choices = [(a.id, '{0}'.format(a)) for a in get_article_class().objects.all()]\n self.fields['object_id'] = forms.ChoiceField(\n choices=article_choices, required=True, help_text=_(\"Select an article\")\n )\n self.fields['content_type'].required = False\n self.fields['content_type'].widget = forms.HiddenInput()\n\n def clean_content_type(self):\n \"\"\"validation\"\"\"\n return ContentType.objects.get_for_model(get_article_class())\n\n\nclass NewsletterSettingsForm(forms.ModelForm):\n \"\"\"Newsletter creation form\"\"\"\n\n class Meta:\n model = Newsletter\n fields = ('subject', 'template', 'newsletter_date', 'items', 'site')\n\n class Media:\n css = {\n 'all': ('chosen/chosen.css', ),\n }\n js = (\n 'chosen/chosen.jquery.js',\n )\n\n def __init__(self, user, *args, **kwargs):\n super(NewsletterSettingsForm, self).__init__(*args, **kwargs) # pylint: disable=E1002\n tpl_choices = get_newsletter_templates(None, user)\n if tpl_choices:\n self.fields[\"template\"] = forms.ChoiceField(choices=tpl_choices)\n else:\n self.fields[\"template\"] = forms.CharField()\n self.fields[\"subject\"].widget = forms.TextInput(attrs={'size': 30})\n self.fields[\"items\"].widget.attrs[\"class\"] = \"chosen-select\"\n choices = list(self.fields['items'].choices)\n sites_choices = []\n current_site = Site.objects.get_current()\n for choice in choices:\n obj_id = choice[0]\n obj = NewsletterItem.objects.get(id=obj_id)\n try:\n has_sites = getattr(obj.content_object, 'sites', None)\n except AttributeError:\n has_sites = False\n if has_sites:\n if current_site in obj.content_object.sites.all():\n sites_choices.append(choice)\n else:\n sites_choices.append(choice)\n self.fields['items'].choices = sites_choices\n self.fields['items'].widget = ChosenSelectMultiple(\n choices=self.fields['items'].choices, force_template=True\n )\n\n def clean_items(self):\n \"\"\"check items\"\"\"\n items = self.cleaned_data[\"items\"]\n choice_ids = [choice[0] for choice in self.fields['items'].choices]\n for item in items:\n if item.id not in choice_ids:\n raise ValidationError(_(\"Invalid choice\"))\n return items\n\n\nclass PublishArticleForm(forms.ModelForm):\n \"\"\"Publish article form\"\"\"\n class Meta:\n model = get_article_class()\n fields = ('publication',)\n widgets = {\n 'publication': forms.HiddenInput(),\n }\n\n\nclass NewsletterForm(InlineHtmlEditableModelForm):\n \"\"\"form for newsletter edition\"\"\"\n\n class Meta:\n model = Newsletter\n fields = ('subject', 'content', )\n\n\nclass NewsletterSchedulingForm(floppyforms.ModelForm):\n \"\"\"Newsletter scheduling\"\"\"\n class Meta:\n model = NewsletterSending\n fields = ('scheduling_dt',)\n\n def clean_scheduling_dt(self):\n \"\"\"validation\"\"\"\n sch_dt = self.cleaned_data['scheduling_dt']\n\n if not sch_dt:\n raise ValidationError(_(\"This field is required\"))\n\n if sch_dt < dt_now():\n raise ValidationError(_(\"The scheduling date must be in future\"))\n\n return sch_dt\n\n\nclass NewsletterTemplateForm(forms.Form):\n \"\"\"Newsletter template\"\"\"\n\n def __init__(self, newsletter, user, *args, **kwargs):\n super(NewsletterTemplateForm, self).__init__(*args, **kwargs) # pylint: disable=E1002\n choices = get_newsletter_templates(newsletter, user)\n if choices:\n self.fields[\"template\"] = forms.ChoiceField(choices=choices)\n else:\n self.fields[\"template\"] = forms.CharField()\n self.fields[\"template\"].initial = newsletter.template\n \n\nclass NewsletterAdminForm(forms.ModelForm):\n \"\"\"newsletter admin form\"\"\"\n def __init__(self, *args, **kwargs):\n super(NewsletterAdminForm, self).__init__(*args, **kwargs) # pylint: disable=E1002\n self.newsletter = kwargs.get('instance', None)\n choices = get_newsletter_templates(self.newsletter, getattr(self, \"current_user\", None))\n if choices:\n self.fields[\"template\"] = forms.ChoiceField(choices=choices)\n else:\n self.fields[\"template\"] = forms.CharField()\n self.fields[\"items\"].widget.attrs[\"class\"] = \"chosen-select\"\n\n class Meta:\n model = Newsletter\n fields = ('subject', 'content', 'template', 'source_url', 'items', 'newsletter_date', 'site')\n widgets = {}\n\n class Media:\n css = {\n 'all': ('css/admin-tricks.css', 'chosen/chosen.css', ),\n }\n js = (\n 'chosen/chosen.jquery.js',\n )\n\n \nclass NewsletterHandleRecipients(BsForm):\n email_help_text = gettext_lazy(\"Enter another address to send to someone who is not in the list\")\n email_label = gettext_lazy(\"Email\")\n\n emails = forms.MultipleChoiceField(\n widget=forms.CheckboxSelectMultiple(), required=False,\n label=_(gettext_lazy('Emails')), help_text=gettext_lazy('Check the address if you want to send it the test'),\n choices=[]\n )\n additional_email1 = forms.EmailField(required=False, label=email_label, help_text=email_help_text)\n additional_email2 = forms.EmailField(required=False, label=email_label, help_text=email_help_text)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = []\n for elt in getattr(settings, 'COOP_CMS_TEST_EMAILS', []):\n if isinstance(elt, str):\n choices.append((elt, elt))\n else:\n # tuple or list\n choices.append(elt)\n self.fields['emails'].choices = choices\n\n def clean(self):\n super().clean()\n emails = self.cleaned_data['emails']\n additional_email1 = self.cleaned_data['additional_email1']\n additional_email2 = self.cleaned_data['additional_email2']\n if not emails and not additional_email1 and not additional_email2:\n raise forms.ValidationError(_('Please select or enter at least one email'))\n","sub_path":"coop_cms/forms/newsletters.py","file_name":"newsletters.py","file_ext":"py","file_size_in_byte":7234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347662818","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nNAME\n patch_geocachers.py\n\nDESCRIPTION\n Patch geocachers data by sql queries\n\"\"\"\nimport os\nimport requests\nfrom pprint import pprint\nfrom django.db import connection\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom gpsfun.main.GeoCachSU.models import Geocacher\nfrom gpsfun.main.models import log, UPDATE_TYPE\nfrom gpsfun.geocaching_su_stat.utils import (\n LOGIN_DATA, logged, get_found_caches_countries, set_country_code,\n get_found_caches_oblast, set_oblast_code)\n\n\ndef patch_it(name):\n pathtofile = os.path.join(settings.SCRIPTS_ROOT, name)\n f = open(pathtofile, 'r')\n text = f.read()\n queries = text.split(';')\n for sql in queries:\n sql = sql.strip()\n\n if sql.startswith('SELECT') or sql.startswith('select') \\\n or not sql or sql.startswith('--') or sql.startswith('#'):\n continue\n else:\n print\n print('execute', sql)\n with connection.cursor() as cursor:\n cursor.execute(sql)\n\n\nclass Command(BaseCommand):\n help = 'Patch geocachers data by sql queries'\n\n def handle(self, *args, **options):\n\n sql_batches = (\n 'set_country_iso_for_geocachers.sql',\n 'set_admin_code_to_geocacher.sql',\n 'crimea.sql'\n )\n\n for name in sql_batches:\n patch_it('sql/' + name)\n print(name, ' processed')\n\n with requests.Session() as session:\n post = session.post(\n 'https://geocaching.su',\n data=LOGIN_DATA\n )\n r = session.get('https://geocaching.su')\n if not logged(r.text):\n print('Authorization failed')\n else:\n for uid in Geocacher.objects.filter(\n country_iso3__isnull=True).values_list('uid', flat=True):\n r = session.get(\n 'http://www.geocaching.su/site/popup/userstat.php',\n params={'s': 2, 'uid': uid}\n )\n country = get_found_caches_countries(uid, r.text)\n set_country_code(uid, country)\n names = {''}\n for uid in Geocacher.objects.filter(\n admin_code__isnull=True).values_list('uid', flat=True):\n r = session.get(\n 'http://www.geocaching.su/site/popup/userstat.php',\n params={'s': 2, 'uid': uid}\n )\n oblast = get_found_caches_oblast(uid, r.text)\n names.add(oblast)\n set_oblast_code(uid, oblast)\n\n\n log(UPDATE_TYPE.geocacher_patch, 'OK')\n\n return 'Geocachers data are updated'\n","sub_path":"gpsfun/gpsfun/geocaching_su_stat/management/commands/patch_geocachers.py","file_name":"patch_geocachers.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163569806","text":"#calculate the number of days between two dates\n# years months days\nimport sys\n\nargLength = len(sys.argv)\ndiffList = ['','','']\nif argLength == 7:\n for i in range(1, 4):\n diffList[i-1] = abs(int(sys.argv[i]) - int(sys.argv[i+3]))\n print('Years: {}\\nMonths: {}\\nDays: {}'.format(diffList[0], diffList[1], diffList[2]))\n","sub_path":"exercises/dateDiff.py","file_name":"dateDiff.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432580996","text":"#vjezba2_zd05.py\n#Jure Glavan, 4,11,2017\n\nsekunde = int(input(\"upisi vrijeme u sekundama: \"))\n\nminute, sekunde = divmod(sekunde, 60)\nsati, minute = divmod(minute, 60)\ndani, sati = divmod(sati, 24)\n\nprint(\"preracunato vrijeme iznosi {} dana, {} sati, {} minuta i {} sekundi\".format(dani, sati, minute, sekunde))\n\n#divmod funkcija prima 2 argumenta, izračunava \"div //\" i \"mod %\" vrijednosti argumenata i vrača ih kao par varijabli","sub_path":"PYTHON/IT/Vježba_2_Jure_Glavan/vjezba2_zd05.py","file_name":"vjezba2_zd05.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640072927","text":"from flask import Flask, render_template, url_for\n\napp = Flask(__name__)\n\n\n@app.route('/index//<prof>')\ndef mission(title, prof):\n if \"инженер\" in prof or \"строитель\" in prof:\n prof = \"Инженерные тренажеры\"\n img = url_for('static', filename=\"images/ship1.jpg\")\n else:\n prof = \"Научные симуляторы\"\n img = url_for('static', filename=\"images/sh1p2.jpg\")\n return render_template('base.html', title=title, profession=prof, image=img)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n","sub_path":"WEB. Шаблоны. flask-wtf/Тренировки в полёте/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310426579","text":"\"\"\"\nProblem Description\nGiven a matrix of integers A of size N x M consisting of 0 or 1.\n\nFor each cell of the matrix find the distance of nearest 1 in the matrix.\n\nDistance between two cells (x1, y1) and (x2, y2) is defined as |x1 - x2| + |y1 - y2|.\n\nFind and return a matrix B of size N x M which defines for each cell in A distance of nearest 1 in the matrix A.\n\nNOTE: There is atleast one 1 is present in the matrix.\n\"\"\"\n\nclass Solution:\n # @param A : list of list of integers\n # @return a list of list of integers\n def solve(self, A):\n rows, cols = len(A), len(A[0])\n # create ans matrix of same dimensions as A and initialise it with 0 \n ans = [[0]*(cols) for _ in range(rows)]\n\n from collections import deque \n dq = deque([])\n visited = set()\n # first add all the 1's to the Q with distance 0\n for r in range(rows):\n for c in range(cols):\n if A[r][c]==1:\n dq.append(((r,c), 0))\n visited.add((r,c))\n \n # keep going until the Q is empty\n while dq:\n curr = dq.popleft()\n curr_co_ord, curr_dist = curr[0], curr[1]\n x_offset = [-1, 0, 1, 0]\n y_offset = [0, -1, 0, 1]\n for k in range(4):\n # neighbour cell \n neighbour_co_ord = (curr_co_ord[0]+x_offset[k], curr_co_ord[1]+y_offset[k])\n # check if the neighbour cell is within matrix boundaries \n if neighbour_co_ord[0]>=0 and neighbour_co_ord[0]<rows and \\\n neighbour_co_ord[1]>=0 and neighbour_co_ord[1]<cols and \\\n A[neighbour_co_ord[0]][neighbour_co_ord[1]]!=1 and \\\n neighbour_co_ord not in visited:\n # add to visited \n visited.add(neighbour_co_ord)\n # update ans matrix with distance, \n # |x1-x2|+|y1-y2| will be +1 for all 4 neighbours\n ans[neighbour_co_ord[0]][neighbour_co_ord[1]] = curr_dist+1\n # add to Q\n dq.append((neighbour_co_ord, curr_dist+1))\n \n return ans","sub_path":"graph/day_2/distance_nearest_cell.py","file_name":"distance_nearest_cell.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143977690","text":"# -*- coding:utf-8 -*-\n\"\"\"文件管理业务类\"\"\"\nfrom flask import request\nfrom app.db import MONGO_DB as mongo\nfrom app.util.response import ResponseHelper\nfrom app.util.logger import create_logger\nfrom ws.main import socketio, emit\n\nclass FileManager:\n \"\"\"文件管理\"\"\"\n\n def __init__(self):\n self.logger = create_logger('SOURCE_DATA')\n\n def document_query(self):\n \"\"\"文档文件\"\"\"\n try:\n query = request.args\n name = query.get('name')\n page = query.get('page')\n limit = query.get('limit')\n if name:\n documents = list(mongo.db.Document.find({'md5': name}, {'_id':0}))\n count = 1\n if not all([page, limit]):\n return ResponseHelper.return_false_data(msg='参数错误')\n else:\n page = int(page)\n limit = int(limit)\n skip = (page - 1) * limit\n documents = list(mongo.db.Document.find({}, {'_id':0}).skip(skip).limit(limit))\n count = mongo.db.Document.find({}).count()\n data = {\n 'documents': documents,\n 'count': count\n }\n # emit('message_server', {'data': count}, namespace='/MESSAGE_TIP')\n # socketio.emit('message_server', {'data': count, 'userName': 'anonymity'}, namespace='/MESSAGE_TIP')\n return ResponseHelper.return_true_data(data)\n except Exception as err:\n self.logger.error('Server Error : %s' % str(err))\n return ResponseHelper.return_server_error(), 500\n\n def picture_query(self):\n \"\"\"图片文件\"\"\"\n\n try:\n query = request.args\n print(query)\n pictures = list(mongo.db.Picture.find({}, {'_id':0}))\n count = mongo.db.Picture.find({}).count()\n data = {\n 'pictures': pictures,\n 'count': count\n }\n return ResponseHelper.return_true_data(data)\n except Exception as err:\n self.logger.error('Server Error : %s' % str(err))\n return ResponseHelper.return_server_error(), 500\n\n def video_query(self):\n \"\"\"视频文件\"\"\"\n try:\n query = request.args\n print(query)\n videos = list(mongo.db.Video.find({}, {'_id':0}))\n count = mongo.db.Video.find({}).count()\n data = {\n 'videos': videos,\n 'count': count\n }\n return ResponseHelper.return_true_data(data)\n except Exception as err:\n self.logger.error('Server Error : %s' % str(err))\n return ResponseHelper.return_server_error(), 500\n\n","sub_path":"app/business/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35953448","text":"from math import inf\n\n\ndef lower_bound(a, val, lo, hi):\n \"\"\"\n find insertion position for val using binary search\n lower (left) and upper (right) bounds of 3:\n 1 2 3 3 3 4 5\n ^ ^\n if there is no val in [lo, hi) then lower_bound == upper_bound\n \"\"\"\n while lo < hi:\n mid = (lo + hi) // 2\n if a[mid] < val:\n lo = mid + 1\n else:\n hi = mid\n return lo\n\n\ndef lower_bound2(a, val, start, end):\n \"\"\"This implementation of binary search is less error-prone\n while still working in O(log n) time. It uses simple linear search\n with decreasing step size.\n \"\"\"\n step = end - start\n pos = start - 1\n while step > 0:\n while pos + step < end and a[pos + step] < val:\n pos += step\n step //= 2\n return pos + 1\n\n\ndef upper_bound(a, val, lo, hi):\n while lo < hi:\n mid = (lo + hi) // 2\n if val < a[mid]:\n hi = mid\n else:\n lo = mid + 1\n return lo\n\n\ndef equal_range(a, val, start, end):\n lo = lower_bound(a, val, start, end)\n hi = upper_bound(a, val, start, end)\n return lo, hi\n\n\ndef longest_increasing_subsequence(a):\n # d[i] is the lowest number that increasing subsequence of length i ends\n # with.\n d = [inf] * len(a)\n p = [-1] * len(a) # index predecessor of a[i]\n p_i = [-1] * len(a) # index of element d[i]\n d[0] = -inf\n n_largest = 0\n for i in range(len(a)):\n k = upper_bound(d, a[i], 0, len(d))\n if d[k - 1] < a[i] < d[k]:\n d[k] = a[i]\n p_i[k] = i\n p[i] = p_i[k - 1]\n n_largest = max(n_largest, k)\n\n path = []\n i = p_i[n_largest]\n while i != -1:\n path.append(a[i])\n i = p[i]\n path.reverse()\n return path\n\n\ndef maximum_sum_subarray(a):\n \"\"\"returns subarray [l, r) with maximal sum s\"\"\"\n s = 0 # prefix sum\n min_s, min_i = 0, -1 # minimum on s[0..r - 1]\n l, r, max_s = 0, 1, a[0]\n for i, e in enumerate(a):\n s += e\n # suppose i is right boundary,\n # then l - 1 is the minimum on s[0..r - 1]\n if s - min_s > max_s:\n l = min_i + 1\n r = i + 1\n max_s = s - min_s\n if s < min_s:\n min_i = i\n min_s = s\n\n return l, r, max_s\n\n\ndef maximum_sum_subarray2(a):\n \"\"\"another algorithm for O(n) max sum subarray\"\"\"\n s, cur_l = 0, 0 # current sum\n l, r, max_s = 0, 1, a[0]\n for i, e in enumerate(a):\n s += e\n # better to reset l index\n if s < e:\n s = e\n cur_l = i\n if s > max_s:\n max_s = s\n l = cur_l\n r = i + 1\n return l, r, max_s\n\n\ndef longest_common_subsequence(a, b):\n \"\"\"Returns longest common subsequence of a and b in O(n*m)\n Z is a subsequence of a if Z == a with some elements removed.\n\n properties of Z (optimal substructure):\n 1. a[-1] == b[-1] => Z[:-1] == LCS(a[:-1], b[:-1])\n 2. a[-1] != b[-1] and a[-1] != Z[-1] => Z == LCS(a[:-1], b)\n 2. a[-1] != b[-1] and b[-1] != Z[-1] => Z == LCS(a, b[:-1])\n \"\"\"\n n, m = len(a), len(b)\n lcs = [[0] * (m + 1) for _ in range(n + 1)] # lcs of prefixes\n p = [[(0, 0)] * (m + 1) for _ in range(n + 1)]\n\n # lcs[n][m] == 0\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n if a[i - 1] == b[j - 1]:\n lcs[i][j] = lcs[i - 1][j - 1] + 1\n p[i][j] = (i - 1, j - 1)\n elif lcs[i - 1][j] > lcs[i][j - 1]:\n lcs[i][j] = lcs[i - 1][j]\n p[i][j] = (i - 1, j)\n else:\n lcs[i][j] = lcs[i][j - 1]\n p[i][j] = (i, j - 1)\n\n z = []\n i, j = n, m\n while i > 0 and j > 0:\n if p[i][j] == (i - 1, j - 1):\n z.append(a[i - 1])\n i, j = p[i][j]\n z.reverse()\n return z\n","sub_path":"algorithms/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188366725","text":"import requests\nfrom decrypt521 import get_gt_challenge\n\ndef get_data(searchword, challenge, validate):\n data = {\n \"searchword\": searchword,\n \"geetest_challenge\": challenge,\n \"token\": \"83860651\",\n \"tab\": \"ent_tab\",\n \"geetest_seccode\": validate + \"|jordan\",\n \"geetest_validate\": validate,\n }\n\n\n\n","sub_path":"gsxt/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192054480","text":"def first_ones(word_one, word_two):\n num = 0\n if word_one[0] == word_two[0]:\n num = 10\n return num\n\ndef last_ones(word_one,word_two):\n num = 0\n if word_one[-1] == word_two[-1]:\n num = 10\n return num\n\ndef length_comparison(word_one,word_two):\n if len(word_one) <= len(word_two):\n num = 30*(float(len(word_one))/float(len(word_two)))\n else:\n num = 30*(float(len(word_two))/float(len(word_one)))\n return num\n\ndef unique_letters(word_one,word_two):\n list_one = list(word_one)\n list_two =list(word_two)\n uni = set(list_one).union(set(list_two))\n inters = set(list_one).intersection(set(list_two))\n num = float(len(inters))/float(len(uni))*50\n return num\n\ndef finder(outlist):\n funky = 0\n chunky = \"\"\n keys = list(outlist.keys())\n values = list(outlist.values())\n for i in range(0,len(keys)):\n if values[i] >= funky:\n funky = values[i]\n chunky = keys[i]\n \n return chunky\n \ndef find_word(sentence):\n sentence = sentence.replace(\".\", \"\").replace(\",\", \"\").lower()\n words = sentence.split(\" \")\n i = j = 0\n outlist = {}\n for one in words:\n outcome = 0\n for two in words:\n score = first_ones(one, two) + last_ones(one, two) + length_comparison(one, two) + unique_letters(one, two)\n outcome = outcome + score\n j = j + 1\n outlist[one] = outcome - 100 \n i = i +1\n return finder(outlist)\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert find_word(\"Speak friend and enter.\") == \"friend\", \"Friend\"\n assert find_word(\"Beard and Bread\") == \"bread\", \"Bread is Beard\"\n assert find_word(\"The Doors of Durin, Lord of Moria. Speak friend and enter. \"\n \"I Narvi made them. Celebrimbor of Hollin drew these signs\") == \"durin\", \"Durin\"\n assert find_word(\"Aoccdrnig to a rscheearch at Cmabrigde Uinervtisy.\"\n \" According to a researcher at Cambridge University.\") == \"according\", \"Research\"\n assert find_word(\"One, two, two, three, three, three.\") == \"three\", \"Repeating\"\n","sub_path":"Electric_Station_Find_Word.py","file_name":"Electric_Station_Find_Word.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207606239","text":"from models import Bookmark\nfrom rest_framework import viewsets\nfrom serializers import BookmarkSerializer\n\nclass BookmarkViewSet(viewsets.ModelViewSet):\n queryset = Bookmark.objects.all()\n serializer_class = BookmarkSerializer\n\n def list(self, request):\n if not request.user.is_superuser:\n self.queryset = self.queryset.filter(user=request.user)\n \n return super(BookmarkViewSet, self).list(request)\n","sub_path":"bookmarks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558479866","text":"\"\"\"\n.. _mlflow-regression-pipeline:\n\nThe MLflow Regression Pipeline is an MLflow Pipeline for developing high-quality regression models.\nIt is designed for developing models using scikit-learn and frameworks that integrate with\nscikit-learn, such as the ``XGBRegressor`` API from XGBoost. The corresponding pipeline\ntemplate repository is available at https://github.com/mlflow/mlp-regression-template, and the\n:py:class:`RegressionPipeline API Documentation <RegressionPipeline>` provides instructions for\nexecuting the pipeline and inspecting its results.\n\nThe training pipeline contains the following sequential steps:\n\n**ingest** -> **split** -> **transform** -> **train** -> **evaluate** -> **register**\n\nThe batch scoring pipeline contains the following sequential steps:\n\n**ingest** -> **predict**\n\nThe pipeline steps are defined as follows:\n\n - **ingest**\n - The **ingest** step resolves the dataset specified by the |'data' section in pipeline.yaml|\n and converts it to parquet format, leveraging the custom dataset parsing code defined in\n |steps/ingest.py| if necessary. Subsequent steps convert this dataset into training,\n validation, & test sets and use them to develop a model.\n\n .. note::\n If you make changes to the dataset referenced by the **ingest** step (e.g. by adding\n new records or columns), you must manually re-run the **ingest** step in order to\n use the updated dataset in the pipeline. The **ingest** step does *not* automatically\n detect changes in the dataset.\n\n .. _mlflow-regression-pipeline-split-step:\n\n - **split**\n - The **split** step splits the ingested dataset produced by the **ingest** step into\n a training dataset for model training, a validation dataset for model performance\n evaluation & tuning, and a test dataset for model performance evaluation. The fraction\n of records allocated to each dataset is defined by the ``split_ratios`` attribute of the\n |'split' step definition in pipeline.yaml|. The **split** step also preprocesses the\n datasets using logic defined in |steps/split.py|. Subsequent steps use these datasets\n to develop a model and measure its performance.\n\n - **transform**\n - The **transform** step uses the training dataset created by **split** to fit\n a transformer that performs the transformations defined in |steps/transform.py|. The\n transformer is then applied to the training dataset and the validation dataset, creating\n transformed datasets that are used by subsequent steps for estimator training and model\n performance evaluation.\n\n .. _mlflow-regression-pipeline-train-step:\n\n - **train**\n - The **train** step uses the transformed training dataset output from the **transform**\n step to fit an estimator with the type and parameters defined in |steps/train.py|. The\n estimator is then joined with the fitted transformer output from the **transform** step\n to create a model pipeline. Finally, this model pipeline is evaluated against the\n transformed training and validation datasets to compute performance metrics; custom\n metrics are computed according to definitions in |steps/custom_metrics.py| and the\n |'metrics' section of pipeline.yaml|. The model pipeline and its associated parameters,\n performance metrics, and lineage information are logged to MLflow Tracking, producing\n an MLflow Run.\n\n - **evaluate**\n - The **evaluate** step evaluates the model pipeline created by the **train** step on\n the test dataset output from the **split** step, computing performance metrics and\n model explanations. Performance metrics are compared against configured thresholds to\n compute a ``model_validation_status``, which indicates whether or not a model is good\n enough to be registered to the MLflow Model Registry by the subsequent **register**\n step. Custom performance metrics are computed according to definitions in\n |steps/custom_metrics.py| and the |'metrics' section of pipeline.yaml|. Model\n performance thresholds are defined in the\n |'validation_criteria' section of the 'evaluate' step definition in pipeline.yaml|. Model\n performance metrics and explanations are logged to the same MLflow Tracking Run used by\n the **train** step.\n\n - **register**\n - The **register** step checks the ``model_validation_status`` output of the preceding\n **evaluate** step and, if model validation was successful\n (as indicated by the ``'VALIDATED'`` status), registers the model pipeline created by\n the **train** step to the MLflow Model Registry. If the ``model_validation_status`` does\n not indicate that the model passed validation checks (i.e. its value is ``'REJECTED'``),\n the model pipeline is not registered to the MLflow Model Registry.\n If the model pipeline is registered to the MLflow Model Registry, a\n ``registered_model_version`` is produced containing the model name and the model version.\n\n .. note::\n The model validation status check can be disabled by specifying\n ``allow_non_validated_model: true`` in the\n |'register' step definition of pipeline.yaml|, in which case the model pipeline is\n always registered with the MLflow Model Registry when the **register** step is\n executed.\n\n - **predict**\n - The **predict** step\n\n.. |'split' step definition in pipeline.yaml| replace:: `'split' step definition in pipeline.yaml <https://github.com/mlflow/mlp-regression-template/blob/35f6f32c7a89dc655fbcfcf731cc1da4685a8ebb/pipeline.yaml#L36-L40>`__\n.. |'register' step definition of pipeline.yaml| replace:: `'register' step definition of pipeline.yaml <https://github.com/mlflow/mlp-regression-template/blob/35f6f32c7a89dc655fbcfcf731cc1da4685a8ebb/pipeline.yaml#L57-L63>`__\n.. |'data' section in pipeline.yaml| replace:: `'data' section in pipeline.yaml <https://github.com/mlflow/mlp-regression-template/blob/35f6f32c7a89dc655fbcfcf731cc1da4685a8ebb/pipeline.yaml#L15-L32>`__\n.. |'metrics' section of pipeline.yaml| replace:: `'metrics' section of pipeline.yaml <https://github.com/mlflow/mlp-regression-template/blob/35f6f32c7a89dc655fbcfcf731cc1da4685a8ebb/pipeline.yaml#L64-L73>`__\n.. |'validation_criteria' section of the 'evaluate' step definition in pipeline.yaml| replace:: `'validation_criteria' section of the 'evaluate' step definition in pipeline.yaml <https://github.com/mlflow/mlp-regression-template/blob/35f6f32c7a89dc655fbcfcf731cc1da4685a8ebb/pipeline.yaml#L47-L56>`__\n.. |steps/ingest.py| replace:: `steps/ingest.py <https://github.com/mlflow/mlp-regression-template/blob/main/steps/ingest.py>`__\n.. |steps/split.py| replace:: `steps/split.py <https://github.com/mlflow/mlp-regression-template/blob/main/steps/split.py>`__\n.. |steps/train.py| replace:: `steps/train.py <https://github.com/mlflow/mlp-regression-template/blob/main/steps/train.py>`__\n.. |steps/transform.py| replace:: `steps/transform.py <https://github.com/mlflow/mlp-regression-template/blob/main/steps/transform.py>`__\n.. |steps/custom_metrics.py| replace:: `steps/custom_metrics.py <https://github.com/mlflow/mlp-regression-template/blob/main/steps/custom_metrics.py>`__\n\"\"\"\n\nimport os\nimport logging\n\nfrom mlflow.pipelines.regression.v1 import dag_help_strings\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.pipelines.pipeline import _BasePipeline\nfrom mlflow.pipelines.steps.ingest import IngestStep, IngestScoringStep\nfrom mlflow.pipelines.steps.split import (\n SplitStep,\n _OUTPUT_TRAIN_FILE_NAME,\n _OUTPUT_VALIDATION_FILE_NAME,\n _OUTPUT_TEST_FILE_NAME,\n)\nfrom mlflow.pipelines.steps.transform import TransformStep\nfrom mlflow.pipelines.steps.train import TrainStep\nfrom mlflow.pipelines.steps.evaluate import EvaluateStep\nfrom mlflow.pipelines.steps.predict import (\n PredictStep,\n _SCORED_OUTPUT_FILE_NAME,\n)\nfrom mlflow.pipelines.steps.register import RegisterStep, RegisteredModelVersionInfo\nfrom mlflow.pipelines.step import BaseStep\nfrom typing import List, Any, Optional\nfrom mlflow.pipelines.utils import get_pipeline_root_path\nfrom mlflow.pipelines.utils.execution import get_or_create_base_execution_directory\nfrom mlflow.pipelines.utils.execution import get_step_output_path\nfrom mlflow.exceptions import MlflowException, INVALID_PARAMETER_VALUE\nfrom mlflow.tracking._tracking_service.utils import _use_tracking_uri\nfrom mlflow.utils.annotations import experimental\n\n_logger = logging.getLogger(__name__)\n\n\n@experimental\nclass RegressionPipeline(_BasePipeline):\n \"\"\"\n A pipeline for developing high-quality regression models. The pipeline is designed for\n developing models using scikit-learn and frameworks that integrate with scikit-learn,\n such as the ``XGBRegressor`` API from XGBoost. The corresponding pipeline\n template repository is available at https://github.com/mlflow/mlp-regression-template.\n The training pipeline contains the following sequential steps:\n\n **ingest** -> **split** -> **transform** -> **train** -> **evaluate** -> **register**\n\n while the batch scoring pipeline contains this set of sequential steps:\n\n **ingest_scoring** -> **predict**\n\n .. code-block:: python\n :caption: Example\n\n import os\n from mlflow.pipelines import Pipeline\n\n os.chdir(\"~/mlp-regression-template\")\n regression_pipeline = Pipeline(profile=\"local\")\n # Display a visual overview of the pipeline graph\n regression_pipeline.inspect()\n # Run the full pipeline\n regression_pipeline.run()\n # Display a summary of results from the 'train' step, including the trained model\n # and associated performance metrics computed from the training & validation datasets\n regression_pipeline.inspect(step=\"train\")\n # Display a summary of results from the 'evaluate' step, including model explanations\n # computed from the validation dataset and metrics computed from the test dataset\n regression_pipeline.inspect(step=\"evaluate\")\n \"\"\"\n\n _TRAIN_DAG_NAME = \"train_dag\"\n _TRAIN_DAG_STEPS = (\n # Training data ingestion DAG\n IngestStep,\n # Model training DAG\n SplitStep,\n TransformStep,\n TrainStep,\n EvaluateStep,\n RegisterStep,\n )\n\n _SCORING_DAG_NAME = \"scoring_dag\"\n _SCORING_DAG_STEPS = (\n # Batch scoring DAG\n IngestScoringStep,\n PredictStep,\n )\n\n _PIPELINE_STEPS = _TRAIN_DAG_STEPS + _SCORING_DAG_STEPS\n\n _STEPS_SUBGRAPH_MAP = dict()\n for _step_class in _TRAIN_DAG_STEPS:\n _STEPS_SUBGRAPH_MAP[_step_class] = _TRAIN_DAG_NAME\n for _step_class in _SCORING_DAG_STEPS:\n _STEPS_SUBGRAPH_MAP[_step_class] = _SCORING_DAG_NAME\n\n _SUBGRAPH_INDICES_MAP = {\n _TRAIN_DAG_NAME: (\n _PIPELINE_STEPS.index(_TRAIN_DAG_STEPS[0]),\n _PIPELINE_STEPS.index(_TRAIN_DAG_STEPS[-1]),\n ),\n _SCORING_DAG_NAME: (\n _PIPELINE_STEPS.index(_SCORING_DAG_STEPS[0]),\n _PIPELINE_STEPS.index(_SCORING_DAG_STEPS[-1]),\n ),\n }\n\n _DEFAULT_STEP_INDEX = _PIPELINE_STEPS.index(RegisterStep)\n\n def _get_step_classes(self):\n return self._PIPELINE_STEPS\n\n def _get_subgraph_for_target_step(self, target_step: BaseStep) -> List[BaseStep]:\n target_step_class = type(target_step)\n\n subgraph_name = self._STEPS_SUBGRAPH_MAP[target_step_class]\n s, e = self._SUBGRAPH_INDICES_MAP[subgraph_name]\n return self._steps[s : e + 1]\n\n def _get_default_step(self) -> BaseStep:\n return self._steps[self._DEFAULT_STEP_INDEX]\n\n def _get_pipeline_dag_file(self) -> str:\n import jinja2\n\n j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n pipeline_dag_template = j2_env.get_template(\"resources/pipeline_dag_template.html\").render(\n {\n \"pipeline_yaml_help\": {\n \"help_string_type\": \"yaml\",\n \"help_string\": dag_help_strings.PIPELINE_YAML,\n },\n \"ingest_step_help\": {\n \"help_string\": dag_help_strings.INGEST_STEP,\n \"help_string_type\": \"text\",\n },\n \"ingest_user_code_help\": {\n \"help_string\": dag_help_strings.INGEST_USER_CODE,\n \"help_string_type\": \"python\",\n },\n \"ingested_data_help\": {\n \"help_string\": dag_help_strings.INGESTED_DATA,\n \"help_string_type\": \"text\",\n },\n \"split_step_help\": {\n \"help_string\": dag_help_strings.SPLIT_STEP,\n \"help_string_type\": \"text\",\n },\n \"split_user_code_help\": {\n \"help_string\": dag_help_strings.SPLIT_USER_CODE,\n \"help_string_type\": \"python\",\n },\n \"training_data_help\": {\n \"help_string\": dag_help_strings.TRAINING_DATA,\n \"help_string_type\": \"text\",\n },\n \"validation_data_help\": {\n \"help_string\": dag_help_strings.VALIDATION_DATA,\n \"help_string_type\": \"text\",\n },\n \"test_data_help\": {\n \"help_string\": dag_help_strings.TEST_DATA,\n \"help_string_type\": \"text\",\n },\n \"transform_step_help\": {\n \"help_string\": dag_help_strings.TRANSFORM_STEP,\n \"help_string_type\": \"text\",\n },\n \"transform_user_code_help\": {\n \"help_string\": dag_help_strings.TRANSFORM_USER_CODE,\n \"help_string_type\": \"python\",\n },\n \"fitted_transformer_help\": {\n \"help_string\": dag_help_strings.FITTED_TRANSFORMER,\n \"help_string_type\": \"text\",\n },\n \"transformed_training_and_validation_data_help\": {\n \"help_string\": dag_help_strings.TRANSFORMED_TRAINING_AND_VALIDATION_DATA,\n \"help_string_type\": \"text\",\n },\n \"train_step_help\": {\n \"help_string\": dag_help_strings.TRAIN_STEP,\n \"help_string_type\": \"text\",\n },\n \"train_user_code_help\": {\n \"help_string\": dag_help_strings.TRAIN_USER_CODE,\n \"help_string_type\": \"python\",\n },\n \"fitted_model_help\": {\n \"help_string\": dag_help_strings.FITTED_MODEL,\n \"help_string_type\": \"text\",\n },\n \"mlflow_run_help\": {\n \"help_string\": dag_help_strings.MLFLOW_RUN,\n \"help_string_type\": \"text\",\n },\n \"custom_metrics_user_code_help\": {\n \"help_string\": dag_help_strings.CUSTOM_METRICS_USER_CODE,\n \"help_string_type\": \"python\",\n },\n \"evaluate_step_help\": {\n \"help_string\": dag_help_strings.EVALUATE_STEP,\n \"help_string_type\": \"text\",\n },\n \"model_validation_status_help\": {\n \"help_string\": dag_help_strings.MODEL_VALIDATION_STATUS,\n \"help_string_type\": \"text\",\n },\n \"register_step_help\": {\n \"help_string\": dag_help_strings.REGISTER_STEP,\n \"help_string_type\": \"text\",\n },\n \"registered_model_version_help\": {\n \"help_string\": dag_help_strings.REGISTERED_MODEL_VERSION,\n \"help_string_type\": \"text\",\n },\n \"predict_step_help\": {\n \"help_string\": dag_help_strings.PREDICT_STEP,\n \"help_string_type\": \"text\",\n },\n }\n )\n\n pipeline_dag_file = os.path.join(\n get_or_create_base_execution_directory(self._pipeline_root_path), \"pipeline_dag.html\"\n )\n with open(pipeline_dag_file, \"w\") as f:\n f.write(pipeline_dag_template)\n\n return pipeline_dag_file\n\n def run(self, step: str = None) -> None:\n \"\"\"\n Runs the full pipeline or a particular pipeline step, producing outputs and displaying a\n summary of results upon completion. Step outputs are cached from previous executions, and\n steps are only re-executed if configuration or code changes have been made to the step or\n to any of its dependent steps (e.g. changes to the pipeline's ``pipeline.yaml`` file or\n ``steps/ingest.py`` file) since the previous execution.\n\n :param step: String name of the step to run within the regression pipeline. The step and\n its dependencies are executed sequentially. If a step is not specified, the\n entire pipeline is executed. Supported steps, in their order of execution, are:\n\n - ``\"ingest\"``: resolves the dataset specified by the ``data/training`` section\n in the pipeline's configuration file (``pipeline.yaml``) and converts it to\n parquet format.\n\n - ``\"ingest_scoring\"``: resolves the dataset specified by the ``data/scoring``\n section in the pipeline's configuration file (``pipeline.yaml``) and converts\n it to parquet format.\n\n - ``\"split\"``: splits the ingested dataset produced by the **ingest** step into\n a training dataset for model training, a validation dataset for model\n performance evaluation & tuning, and a test dataset for model performance\n evaluation.\n\n - ``\"transform\"``: uses the training dataset created by the **split** step to\n fit a transformer that performs the transformations defined in the\n pipeline's ``steps/transform.py`` file. Then, applies the transformer to the\n training dataset and the validation dataset, creating transformed datasets\n that are used by subsequent steps for estimator training and model\n performance evaluation.\n\n - ``\"train\"``: uses the transformed training dataset output from the\n **transform** step to fit an estimator with the type and parameters defined\n in in the pipeline's ``steps/train.py`` file. Then, joins the estimator with\n the fitted transformer output from the **transform** step to create a model\n pipeline. Finally, evaluates the model pipeline against the transformed\n training and validation datasets to compute performance metrics.\n\n - ``\"evaluate\"``: evaluates the model pipeline created by the **train** step\n on the validation and test dataset outputs from the **split** step, computing\n performance metrics and model explanations. Then, compares performance\n metrics against thresholds configured in the pipeline's ``pipeline.yaml``\n configuration file to compute a ``model_validation_status``, which indicates\n whether or not the model is good enough to be registered to the MLflow Model\n Registry by the subsequent **register** step.\n\n - ``\"register\"``: checks the ``model_validation_status`` output of the\n preceding **evaluate** step and, if model validation was successful (as\n indicated by the ``'VALIDATED'`` status), registers the model pipeline\n created by the **train** step to the MLflow Model Registry.\n\n - ``\"predict\"``: uses the ingested dataset for scoring created by the\n **ingest_scoring** step and applies the specified model to the dataset.\n\n .. code-block:: python\n :caption: Example\n\n import os\n from mlflow.pipelines import Pipeline\n\n os.chdir(\"~/mlp-regression-template\")\n regression_pipeline = Pipeline(profile=\"local\")\n # Run the 'train' step and preceding steps\n regression_pipeline.run(step=\"train\")\n # Run the 'register' step and preceding steps; the 'train' step and all steps\n # prior to 'train' are not re-executed because their outputs are already cached\n regression_pipeline.run(step=\"register\")\n # Run all pipeline steps; equivalent to running 'register'; no steps are re-executed\n # because the outputs of all steps are already cached\n regression_pipeline.run()\n \"\"\"\n return super().run(step=step)\n\n @experimental\n def get_artifact(self, artifact_name: str) -> Optional[Any]:\n \"\"\"\n Reads an artifact from the pipeline's outputs. Supported artifact names can be obtained by\n examining the pipeline graph visualization displayed by\n :py:func:`RegressionPipeline.inspect()`.\n\n :param artifact_name: The string name of the artifact. Supported artifact values are:\n\n - ``\"ingested_data\"``: returns the ingested dataset created in the\n **ingest** step as a pandas DataFrame.\n\n - ``\"training_data\"``: returns the training dataset created in the\n **split** step as a pandas DataFrame.\n\n - ``\"validation_data\"``: returns the validation dataset created in the\n **split** step as a pandas DataFrame.\n\n - ``\"test_data\"``: returns the test dataset created in the **split** step\n as a pandas DataFrame.\n\n - ``\"ingested_scoring_data\"``: returns the scoring dataset created in the\n **ingest_scoring** step as a pandas DataFrame.\n\n - ``\"transformed_training_data\"``: returns the transformed training dataset\n created in the **transform** step as a pandas DataFrame.\n\n - ``\"transformed_validation_data\"``: returns the transformed validation\n dataset created in the **transform** step as a pandas DataFrame.\n\n - ``\"model\"``: returns the MLflow Model pipeline created in the **train**\n step as a :py:class:`PyFuncModel <mlflow.pyfunc.PyFuncModel>` instance.\n\n - ``\"transformer\"``: returns the scikit-learn transformer created in the\n **transform** step.\n\n - ``\"run\"``: returns the\n :py:class:`MLflow Tracking Run <mlflow.entities.Run>` containing the\n model pipeline created in the **train** step and its associated\n parameters, as well as performance metrics and model explanations created\n during the **train** and **evaluate** steps.\n\n - ``\"registered_model_version``\": returns the MLflow Model Registry\n :py:class:`ModelVersion <mlflow.entities.model_registry.ModelVersion>`\n created by the **register** step.\n\n - ``\"scored_data\"``: returns the scored dataset created in the\n **predict** step as a pandas DataFrame.\n\n :return: An object representation of the artifact corresponding to the specified name,\n as described in the ``artifact_name`` parameter docstring. If the artifact is\n not present because its corresponding step has not been executed or its output\n cache has been cleaned, ``None`` is returned.\n\n .. code-block:: python\n :caption: Example\n\n import os\n import pandas as pd\n from mlflow.pipelines import Pipeline\n from mlflow.pyfunc import PyFuncModel\n\n os.chdir(\"~/mlp-regression-template\")\n regression_pipeline = Pipeline(profile=\"local\")\n regression_pipeline.run()\n train_df: pd.DataFrame = regression_pipeline.get_artifact(\"training_data\")\n trained_model: PyFuncModel = regression_pipeline.get_artifact(\"model\")\n \"\"\"\n import mlflow.pyfunc\n\n (\n ingest_step,\n split_step,\n transform_step,\n train_step,\n _,\n register_step,\n ingest_scoring_step,\n predict_step,\n ) = self._steps\n\n def log_artifact_not_found_warning(artifact_name, step_name):\n _logger.warning(\n f\"The artifact with name '{artifact_name}' was not found.\"\n f\" Re-run the '{step_name}' step to generate it.\"\n )\n\n def read_run_id():\n train_output_dir = get_step_output_path(self._pipeline_root_path, train_step.name, \"\")\n run_id_file_path = os.path.join(train_output_dir, \"run_id\")\n if os.path.exists(run_id_file_path):\n with open(run_id_file_path, \"r\") as f:\n return f.read().strip()\n else:\n return None\n\n train_step_tracking_uri = train_step.tracking_config.tracking_uri\n pipeline_root_path = get_pipeline_root_path()\n\n def read_dataframe_from_path(artifact_path, step_name):\n import pandas as pd\n\n if os.path.exists(artifact_path):\n return pd.read_parquet(artifact_path)\n else:\n log_artifact_not_found_warning(artifact_name, step_name)\n return None\n\n artifact_path = self._get_artifact_path(\n artifact_name\n ) # path may or may not exist, error handling is in this function\n\n if artifact_name == \"ingested_data\":\n return read_dataframe_from_path(artifact_path, ingest_step.name)\n\n elif artifact_name == \"training_data\":\n return read_dataframe_from_path(artifact_path, split_step.name)\n\n elif artifact_name == \"validation_data\":\n return read_dataframe_from_path(artifact_path, split_step.name)\n\n elif artifact_name == \"test_data\":\n return read_dataframe_from_path(artifact_path, split_step.name)\n\n elif artifact_name == \"transformed_training_data\":\n return read_dataframe_from_path(artifact_path, transform_step.name)\n\n elif artifact_name == \"transformed_validation_data\":\n return read_dataframe_from_path(artifact_path, transform_step.name)\n\n elif artifact_name == \"model\":\n run_id = read_run_id()\n if run_id:\n with _use_tracking_uri(train_step_tracking_uri, pipeline_root_path):\n return mlflow.pyfunc.load_model(f\"runs:/{run_id}/{train_step.name}/model\")\n else:\n log_artifact_not_found_warning(\"model\", train_step.name)\n return None\n\n elif artifact_name == \"transformer\":\n run_id = read_run_id()\n if run_id:\n with _use_tracking_uri(train_step_tracking_uri, pipeline_root_path):\n return mlflow.sklearn.load_model(\n f\"runs:/{run_id}/{transform_step.name}/transformer\"\n )\n else:\n log_artifact_not_found_warning(\"transformer\", train_step.name)\n return None\n\n elif artifact_name == \"run\":\n run_id = read_run_id()\n if run_id:\n with _use_tracking_uri(train_step_tracking_uri, pipeline_root_path):\n return MlflowClient().get_run(run_id)\n else:\n log_artifact_not_found_warning(\"mlflow run\", train_step.name)\n return None\n\n elif artifact_name == \"registered_model_version\":\n if os.path.exists(artifact_path):\n registered_model_info = RegisteredModelVersionInfo.from_json(path=artifact_path)\n with _use_tracking_uri(train_step_tracking_uri, pipeline_root_path):\n return MlflowClient().get_model_version(\n name=registered_model_info.name, version=registered_model_info.version\n )\n else:\n log_artifact_not_found_warning(\"registered_model_version\", register_step.name)\n return None\n\n elif artifact_name == \"ingested_scoring_data\":\n return read_dataframe_from_path(artifact_path, ingest_scoring_step.name)\n\n elif artifact_name == \"scored_data\":\n return read_dataframe_from_path(artifact_path, predict_step.name)\n\n else:\n raise MlflowException(\n f\"The artifact with name '{artifact_name}' is not supported.\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n def _get_artifact_path(self, artifact_name: str) -> Optional[str]:\n \"\"\"\n Returns a path to an artifact, which may or may not exist depending on whether or not the\n corresponding pipeline step has been run.\n \"\"\"\n (\n ingest_step,\n split_step,\n transform_step,\n train_step,\n _,\n register_step,\n ingest_scoring_step,\n predict_step,\n ) = self._steps\n\n if artifact_name == \"ingested_data\":\n ingest_output_dir = get_step_output_path(self._pipeline_root_path, ingest_step.name, \"\")\n return os.path.join(ingest_output_dir, IngestStep._DATASET_OUTPUT_NAME)\n elif artifact_name == \"training_data\":\n split_output_dir = get_step_output_path(self._pipeline_root_path, split_step.name, \"\")\n return os.path.join(split_output_dir, _OUTPUT_TRAIN_FILE_NAME)\n elif artifact_name == \"validation_data\":\n split_output_dir = get_step_output_path(self._pipeline_root_path, split_step.name, \"\")\n return os.path.join(split_output_dir, _OUTPUT_VALIDATION_FILE_NAME)\n elif artifact_name == \"test_data\":\n split_output_dir = get_step_output_path(self._pipeline_root_path, split_step.name, \"\")\n return os.path.join(split_output_dir, _OUTPUT_TEST_FILE_NAME)\n elif artifact_name == \"transformed_training_data\":\n transform_output_dir = get_step_output_path(\n self._pipeline_root_path, transform_step.name, \"\"\n )\n return os.path.join(transform_output_dir, \"transformed_training_data.parquet\")\n elif artifact_name == \"transformed_validation_data\":\n transform_output_dir = get_step_output_path(\n self._pipeline_root_path, transform_step.name, \"\"\n )\n return os.path.join(transform_output_dir, \"transformed_validation_data.parquet\")\n elif artifact_name == \"model\":\n train_output_dir = get_step_output_path(self._pipeline_root_path, train_step.name, \"\")\n return os.path.join(train_output_dir, \"model\", \"model.pkl\")\n elif artifact_name == \"transformer\":\n transform_output_dir = get_step_output_path(\n self._pipeline_root_path, transform_step.name, \"\"\n )\n return os.path.join(transform_output_dir, \"transformer.pkl\")\n elif artifact_name == \"run\":\n train_output_dir = get_step_output_path(self._pipeline_root_path, train_step.name, \"\")\n return os.path.join(train_output_dir, \"run_id\")\n elif artifact_name == \"registered_model_version\":\n register_output_dir = get_step_output_path(\n self._pipeline_root_path, register_step.name, \"\"\n )\n return os.path.join(register_output_dir, \"registered_model_version.json\")\n elif artifact_name == \"ingested_scoring_data\":\n ingest_scoring_output_dir = get_step_output_path(\n self._pipeline_root_path, ingest_scoring_step.name, \"\"\n )\n return os.path.join(ingest_scoring_output_dir, IngestScoringStep._DATASET_OUTPUT_NAME)\n elif artifact_name == \"scored_data\":\n predict_output_dir = get_step_output_path(\n self._pipeline_root_path, predict_step.name, \"\"\n )\n return os.path.join(predict_output_dir, _SCORED_OUTPUT_FILE_NAME)\n else:\n raise MlflowException(\n f\"The artifact with name '{artifact_name}' is not supported.\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n def clean(self, step: str = None) -> None:\n \"\"\"\n Removes all pipeline outputs from the cache, or removes the cached outputs of a particular\n pipeline step if specified. After cached outputs are cleaned for a particular step, the\n step will be re-executed in its entirety the next time it is run.\n\n :param step: String name of the step to clean within the pipeline. If not specified,\n cached outputs are removed for all pipeline steps.\n\n .. code-block:: python\n :caption: Example\n\n import os\n from mlflow.pipelines import Pipeline\n\n os.chdir(\"~/mlp-regression-template\")\n regression_pipeline = Pipeline(profile=\"local\")\n # Run the 'train' step and preceding steps\n regression_pipeline.run(step=\"train\")\n # Clean the cache of the 'transform' step\n regression_pipeline.clean(step=\"transform\")\n # Run the 'split' step; outputs are still cached because 'split' precedes\n # 'transform' & 'train'\n regression_pipeline.run(step=\"split\")\n # Run the 'train' step again; the 'transform' and 'train' steps are re-executed because:\n # 1. the cache of the preceding 'transform' step was cleaned and 2. 'train' occurs after\n # 'transform'. The 'ingest' and 'split' steps are not re-executed because their outputs\n # are still cached\n regression_pipeline.run(step=\"train\")\n \"\"\"\n super().clean(step=step)\n\n def inspect(self, step: str = None) -> None:\n \"\"\"\n Displays a visual overview of the pipeline graph, or displays a summary of results from\n a particular pipeline step if specified. If the specified step has not been executed,\n nothing is displayed.\n\n :param step: String name of the pipeline step for which to display a results summary. If\n unspecified, a visual overview of the pipeline graph is displayed.\n\n .. code-block:: python\n :caption: Example\n\n import os\n from mlflow.pipelines import Pipeline\n\n os.chdir(\"~/mlp-regression-template\")\n regression_pipeline = Pipeline(profile=\"local\")\n # Display a visual overview of the pipeline graph.\n regression_pipeline.inspect()\n # Run the 'train' pipeline step\n regression_pipeline.run(step=\"train\")\n # Display a summary of results from the preceding 'transform' step\n regression_pipeline.inspect(step=\"transform\")\n \"\"\"\n super().inspect(step=step)\n","sub_path":"mlflow/pipelines/regression/v1/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":35827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498695171","text":"import copy\n\nfrom dlgo.ttt.ttttypes import Player, Point\n\n__all__ = [\n 'Board',\n 'GameState',\n 'Move',\n]\n\n\nclass IllegalMoveError(Exception):\n pass\n\n\nBOARD_SIZE = 3\nROWS = tuple(range(1, BOARD_SIZE + 1))\nCOLS = tuple(range(1, BOARD_SIZE + 1))\n# Top left to lower right diagonal\nDIAG_1 = (Point(1, 1), Point(2, 2), Point(3, 3))\n# Top right to lower left diagonal\nDIAG_2 = (Point(1, 3), Point(2, 2), Point(3, 1))\n\n\nclass Board:\n def __init__(self):\n self._grid = {}\n\n def place(self, player, point):\n assert self.is_on_grid(point)\n assert self._grid.get(point) is None\n self._grid[point] = player\n\n @staticmethod\n def is_on_grid(point):\n return 1 <= point.row <= BOARD_SIZE and \\\n 1 <= point.col <= BOARD_SIZE\n\n def get(self, point):\n \"\"\"Return the content of a point on the board.\n\n Returns None if the point is empty, or a Player if there is a\n stone on that point.\n \"\"\"\n return self._grid.get(point)\n\n def move_pawn(self, player, from_point, to_point):\n assert self.is_on_grid(from_point)\n assert self.is_on_grid(to_point)\n self._grid[from_point] = None\n self._grid[to_point] = player\n\n\nclass Move:\n def __init__(self, from_point, to_point):\n # Update Move class to now have a from and to point for Hexapawn\n self.from_point = from_point\n self.to_point = to_point\n\n\nclass GameState:\n def __init__(self, board, next_player, move):\n self.board = board\n self.next_player = next_player\n self.last_move = move\n\n def apply_move(self, move):\n \"\"\"Return the new GameState after applying the move.\"\"\"\n next_board = copy.deepcopy(self.board)\n next_board.move_pawn(self.next_player, move.from_point, move.to_point)\n return GameState(next_board, self.next_player.other, move)\n\n @classmethod\n def new_game(cls):\n board = Board()\n # Initialize starting Hexapawn pieces:\n board.place(Player.o, Point(1, 1))\n board.place(Player.o, Point(1, 2))\n board.place(Player.o, Point(1, 3))\n\n board.place(Player.x, Point(3, 2))\n board.place(Player.x, Point(3, 3))\n board.place(Player.x, Point(3, 1))\n\n return GameState(board, Player.x, None)\n\n def is_valid_move(self, move):\n # Check from and to point:\n # If 1 in front is blank\n # If diagonal is black\n\n return (\n # Can move to empty spot\n # self.board.get(move.to_point) is None or\n # Must use your piece as origin\n # self.board.get(move.from_point) == self.next_player.other and\n # Capture other player logic:\n \n # not (self.board.get(move.from_point) is None)\n # Can't move straight if other player is there\n # not (self.board.get(move.to_point) == Player.o and move.to_point[0] == move.from_point[0]) or\n # not (self.board.get(move.to_point) == Player.x and move.to_point[1] == move.from_point[1]) and\n # Starting point must be one of your own pieces:\n # Destination must be within 1 square radius:\n # ((move.to_point[0] == move.from_point[0] - 1) or\n # (move.to_point[0] == move.from_point[0] + 1) or\n # (move.to_point[1] == move.from_point[1] - 1) or\n # (move.to_point[1] == move.from_point[1] + 1)) and\n # Game isn't over\n not self.is_over())\n\n def legal_moves(self):\n moves = []\n for row_1 in ROWS:\n for col_1 in COLS:\n #For each point, also loop through every other point as a possible move\n for row_2 in ROWS:\n for col_2 in COLS:\n move = Move(Point(row_1, col_1), Point(row_2, col_2))\n if self.is_valid_move(move):\n moves.append(move)\n return moves\n\n def is_over(self):\n if self._reached_end_white(Player.x):\n return True\n if self._reached_end_black(Player.o):\n return True\n return False\n\n def _reached_end_white(self, player):\n \"\"\"Check to see if white player has moved a piece to end\n of the board.\"\"\"\n if self.board.get(Point(1, 1)) == player or \\\n self.board.get(Point(1, 2)) == player or \\\n self.board.get(Point(1, 3)) == player:\n return True\n return False\n\n def _reached_end_black(self, player):\n \"\"\"Check to see if black player has moved a piece to end\n of the board.\"\"\"\n if self.board.get(Point(3, 1)) == player or \\\n self.board.get(Point(3, 2)) == player or \\\n self.board.get(Point(3, 3)) == player:\n return True\n return False\n\n def winner(self):\n if self._reached_end_white(Player.x):\n return Player.x\n if self._reached_end_black(Player.o):\n return Player.o\n return None\n","sub_path":"code/dlgo/ttt/tttboard.py","file_name":"tttboard.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186868082","text":"import spotipy\nimport pickle\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nimport pandas as pd\nimport pydotplus as pydot\nfrom sklearn import tree\nfrom sklearn.externals.six import StringIO\n\nsp = spotipy.Spotify(auth='BQAiHZxR6CoyiJut9-UXuhGa6Uys1a5hOuDRAlwqyln3jA9zjMlBkCb-O8YqvRQKdSfcV7ytF81LIc5oL5GMyGpivcQ9XjQLZDysyf-yD9raABbYJo3zzOun8ZoVmuteoKC1TvFe3-kXvEZZkBdt4hp4KLC8g1CRWsFnuqe1rcaKT5pmuUV9gsqeXBKoGrSIeclJ-HO3-ZVJs5XCuU8JvxN6Iiw_CW-fNjGjIMtFVJkoyjV21KkK4f1LbUBV2GaJWfmnczI_FA0iMfL-WQs4htNwFDiZfx0w07BiiV0rHkeKgA')\n\ntrackFeatures = pickle.load(open(\"Pickles/Kau_AllTrackFeatures_v1_0612.pkl\",\"rb\"))\n\nx = sp.user_playlists('122054818')\ndata = trackFeatures\ndataframe = pd.DataFrame(data)\nremove_columns = ['album',\n 'analysis_url',\n 'artists',\n 'available_markets',\n 'disc_number',\n 'external_ids',\n 'external_urls',\n 'href',\n 'id',\n 'label',\n 'linked_from',\n 'mode',\n 'name',\n 'playlist_id',\n 'playlis_name',\n 'playlist_owner_id',\n 'preview_url',\n 'time_signature',\n 'track_href',\n 'track_number',\n 'type',\n 'uri',\n 'artist_genres_summary',\n 'explicit',\n 'artist_followers_average_summary',\n 'artist_followers_max_summary',\n 'artist_popularity_average_summary',\n 'artist_popularity_max_summary',\n 'duration_ms',\n 'key',\n 'popularity'\n ]\n\ntemp = list(dataframe.columns)\nfeature_names = [cols for cols in dataframe.columns if cols not in remove_columns]\n\nfor feature in feature_names:\n dataframe = dataframe[np.isfinite(dataframe[feature])]\n\nX_train, X_test, y_train, y_test = train_test_split(dataframe, dataframe['playlist_id'], test_size=0.33, random_state=25343254)\n\nclf = DecisionTreeClassifier(random_state=342342,max_depth=5)\nclf.fit(np.array(X_train[feature_names]),np.array(y_train))\n\n\ndot_data = StringIO()\ntree.export_graphviz(clf, out_file=dot_data, feature_names=feature_names, class_names=clf.classes_)\ngraph = pydot.graph_from_dot_data(dot_data.getvalue())\ngraph.write_pdf(\"Kau_MaxDepth5DecisionTree.pdf\")\n#graph.write_pdf(\"DecisionTreesClassifyPlaylist_maxdepth5_withgenres.pdf\")\n\n\nimportance_threshold = 0.0001\nimportant_features = [(index,importance) for (index,importance) in enumerate(clf.feature_importances_) if importance > importance_threshold]\nimportant_features_meaning = [(index[0],index[1],feature_names[index[0]]) for index in important_features]\n\naccuracy = clf.score(np.array(X_test[feature_names]),y_test)\nprobabs = clf.predict_proba(np.array(X_test[feature_names]))\nmydict = {}\nmydict[\"clf\"] = clf\nmydict[\"X_test\"] = X_test\nmydict[\"y_test\"] = y_test\nmydict[\"feature_names\"] = feature_names\n\npickle.dump(mydict,open(\"MultiClassModel.pkl\",\"wb\"))\n\nprint(accuracy)\npass","sub_path":"MultiClassDecisionTree.py","file_name":"MultiClassDecisionTree.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28596761","text":"\"\"\"\nAuthor: chiu cam minh\nDate: 05/09/2021\nProgram: project_03_page_203.py\nProblem:\n Convert Newton’s method for approximating square roots in Project 1 to a recursive function named newton. \n (Hint: The estimate of the square root should be passed as a second argument to the function.)\nSolution:\n \n\"\"\"\nimport math\n\n\ndef newton(x, estimate=1.0):\n if abs(x - estimate ** 2) <= 0.000001:\n return estimate\n else:\n estimate = newton(x, (estimate + x / estimate) / 2)\n return estimate\n\n\nif __name__ == \"__main__\":\n while True:\n x = input(\"Enter a positive number or enter/return key to quit: \")\n if x == \"\":\n break\n print(\"The program's estimate: \", newton(float(x)))\n print(\"Python's estimate: \", math.sqrt(float(x)))\n","sub_path":"workshop06/project_page_203/project_03.py","file_name":"project_03.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444971301","text":"# -*- coding: utf-8 -*-\n# __author__ = 'qinjincheng'\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\n[[6.9, 10, 10.5],\n [7.8, 12, 11.6],\n [8.3, 14, 12.9],\n [9.1, 16, 14.3],\n [9.5, 18, 17.5]]\n[7, 9, 13, 17.5, 18]\n\nX = np.array([[6.9, 10, 10.5],\n [7.8, 12, 11.6],\n [8.3, 14, 12.9],\n [9.1, 16, 14.3],\n [9.5, 18, 17.5]])\nprint('explanatory variable: {}'.format(X))\ny = np.array([7, 9, 13, 17.5, 18])\nprint('response variable: {}'.format(y))\n\nmodel = LinearRegression()\nmodel.fit(X, y)\nprint('model: {}'.format(model))\n\nxt = np.array([[10, 20, 20]])\nyt = model.predict(xt)\nprint('yt: {}'.format(yt))\n","sub_path":"scikit_learn/slm/2.3.py","file_name":"2.3.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585487707","text":"import os\nimport os.path as osp\nimport argparse\nimport numpy as np \nimport time\nimport torch\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom data import AudioDataset\nfrom model import alignment\n\nsys.path.append('data/process/')\n\nparser = argparse.ArgumentParser(description='PyTorch Audio-Visual')\n\nparser.add_argument('model', help='output directory to save models & results')\n\nparser.add_argument('-g', '--gpu', type=int, default=0,\\\n help='gpu device id')\n\nparser.add_argument('-t', '--is_train', type=int, default=1,\\\n help='use 1 to train model')\n\nparser.add_argument('-e', '--epochs', type=int, default=500,\\\n help='number of training epochs')\n\nparser.add_argument('-b', '--batchsize', type=int, default=16,\\\n help='number of samples per training batch')\n\nparser.add_argument('-m', '--nthreads', type=int, default=4,\\\n help='pytorch data loader threads')\n\nparser.add_argument('-lr', '--learning_rate', type=float, default=1e-5,\\\n help='learning rate')\n\nparser.add_argument('-vf', '--val_freq', type=float, default=25,\\\n help='number of epochs before testing validation set')\n\nargs = parser.parse_args()\n\n\ndef train(epoch, train_loader, optimizer_align, model_align, loss_fn):\n accs = []\n losses = []\n for batch_idx, (images, sounds, labels) in enumerate(train_loader):\n images_v = Variable(images.type(torch.FloatTensor)).cuda()\n sounds_v = Variable(sounds.type(torch.FloatTensor)).cuda()\n labels_v = Variable(labels).cuda()\n \n optimizer_align.zero_grad()\n aligned_res, _ = model_align(sounds_v, images_v)\n loss = loss_fn(aligned_res, labels_v)\n loss.backward()\n optimizer_align.step()\n losses.append(loss.item())\n accs.append(np.mean((torch.argmax(aligned_res,1) == labels_v).detach().cpu().numpy()))\n print(\"Epoch :\", epoch, np.mean(losses), np.mean(accs))\n\n\ndef test(epoch, test_loader, model_align, loss_fn, repeat = 5):\n accs = []\n losses = []\n for i in range(repeat):\n for batch_idx, (images, sounds, labels) in enumerate(test_loader):\n with torch.no_grad():\n images_v = Variable(images.type(torch.FloatTensor)).cuda()\n sounds_v = Variable(sounds.type(torch.FloatTensor)).cuda()\n labels_v = Variable(labels).cuda()\n\n aligned_res, _ = model_align(sounds_v, images_v)\n loss = loss_fn(aligned_res, labels_v)\n losses.append(loss.item())\n accs.append(np.mean((torch.argmax(aligned_res,1) == labels_v).detach().cpu().numpy()))\n print(\"Validation :\", epoch, np.mean(losses), np.mean(accs))\n\ndef activation(feature_map, weights, label):\n output = np.zeros((224,224))\n for i in range(512):\n output += imresize(feature_map[i], (224,224))*weights[label,i]\n return output\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu) # gpu device\n\n transform = transforms.Compose([\n transforms.ToPILImage(),\n # transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(224),\n transforms.ToTensor()])\n\n train_dataset = AudioDataset(train=True,transform=transform, h5_file='/media/jeff/Backup/CS598PS/data.h5')\n test_dataset = AudioDataset(train=False,transform=transform, h5_file='/media/jeff/Backup/CS598PS/data.h5')\n\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=4)\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batchsize, shuffle=False, num_workers=4)\n\n model_align = alignment().cuda()\n# checkpoint = torch.load(\"fixed_500.pth\")\n# model_align.load_state_dict(checkpoint.state_dict())\n\n loss_fn = nn.CrossEntropyLoss()\n optimizer_align = optim.Adam(model_align.parameters(), lr = args.learning_rate)\n \n if (args.is_train == 1): \n for epoch in range(args.epochs):\n train(epoch, train_loader, optimizer_align, model_align, loss_fn)\n if (epoch + 1)%args.val_freq == 0:\n test(epoch, test_loader, model_align, loss_fn)\n torch.save(model_align, args.model + '.pth')\n \n output = activation(feature_maps[0,:,0].detach().cpu().numpy(), weight.detach().cpu().numpy(),0)\n\n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61642121","text":"from django.contrib import admin\n\n\n# Register your models here.\nfrom django.contrib import admin\nfrom .models import (\n Redirection\n)\n\nclasses = [Redirection]\n\n\nclass ModelAdmin(admin.ModelAdmin):\n def save_model(self, request, obj, form, change):\n if not request.user.is_superuser:\n messages.error(request, \"Only superusers can change models\")\n return False\n return super(ModelAdmin, self).save_model(request, obj, form, change)\n\n\nfor c in classes:\n admin.site.register(c, ModelAdmin)\n","sub_path":"nginx_syntax/redirection/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645114634","text":"# Author: Wanjiru Wang'ondu\n\n# W2Q9 instructions\n# Input 2 lists - listA with n elements and listB which has all elements of listA except\n# one (but the rest are in the same order). Outputs the missing element. E.g.\n# f([8,1,2,3],[8,1,3]) outputs 2\n\n\nlistA = [\"apple\", \"banana\", \"grape\", 3, 5, 77, 12]\nlistB = [\"apple\", \"banana\", \"grape\", 3, 5, 77, 12, 44444]\n\n# to find the element that list b has that is unique, we use the difference method used on sets\n# to do this, we have to first convert the two lists to sets\nsetA = set(listA)\nsetB = set(listB)\n\n# then, we use the difference operator to find the difference and print to see the element that is in setB but not setA\n# the set which has an element the other doesn't have comes first\n# I printed the unique element out as a list item\n\nprint(list(setB.difference(setA)))\n\n\n\n\n\n","sub_path":"question9.py","file_name":"question9.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141024310","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 4 11:39:48 2019\n\n@author: p6001\n\"\"\"\n\nimport pandas as pd\nimport geopandas as gpd\nimport shapely\nimport numpy as np\nimport pyproj\nimport time\nimport os\n\n\n\ndef point_to_domain(emis_file,output_dir, name, def_emis, projection, inProj, grid_params, mask_out=False):\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n \n XORIG=grid_params['XORIG']\n YORIG=grid_params['YORIG']\n XCELL=grid_params['XCELL']\n nj=grid_params['nj']\n ni=grid_params['ni']\n \n \n start_time = time.time()\n \n outProj = pyproj.Proj(projection)\n \n x,y =pyproj.transform(inProj,outProj,list(emis_file['x'][:]), list(emis_file['y'][:]))\n \n emis_file['x']=x\n emis_file['y']=y\n\n emis_file=emis_file[(emis_file['x']>=XORIG)]\n emis_file=emis_file[(emis_file['y']>=YORIG)]\n emis_file=emis_file[(emis_file['x']<=XORIG+(nj-1)*XCELL)]\n emis_file=emis_file[(emis_file['y']<=YORIG+(ni-1)*XCELL)]\n \n if mask_out != False:\n mask=gpd.read_file(mask_out)\n mask=mask.to_crs(projection)\n emis_file['Point'] = list(zip(emis_file.x, emis_file.y))\n \n emis_file['Point'] = emis_file['Point'].apply(shapely.geometry.Point) \n emis_file=gpd.GeoDataFrame(emis_file, geometry='Point')\n emis_file=emis_file[emis_file['Point'].within(mask['geometry'].iloc[0])!=True]\n \n print(\"Data are masked by {}\".format(mask_out))\n \n dic_out={} \n for sn in emis_file['cat_internal'].unique():\n for de in def_emis.values():\n de='{0}_{1}'.format(de,sn)\n dic_out[de]=np.zeros([ni,nj])\n \n for sn in emis_file['cat_internal'].unique():\n \n print('Processing cat: {}'.format(sn))\n emis_que=emis_file[(emis_file['cat_internal']==sn)]\n \n for index, em in emis_que.iterrows():\n \n j=int((em['x']-XORIG)/(XCELL))\n i=int((em['y']-YORIG)/(XCELL))\n \n for de in def_emis.values(): \n dic_out['{0}_{1}'.format(de,sn)][i,j]+=em[de]\n \n print(\"cat {0} is regrided in {1:.3f} seconds ---\".format(sn,time.time() - start_time))\n \n for sn in emis_file['cat_internal'].unique():\n for de in def_emis.values():\n np.save('{0}/{1}-{2}-{3}'.format(output_dir,sn,de,name), dic_out['{0}_{1}'.format(de,sn)])\n\n \n \n \n print(\"Data for {0} are regrided in {1:.3f} seconds\".format(name,time.time() - start_time))\n\n\n\n\ndef regridding_control(emis_file, output_dir, name, def_emis,projection, inProj, grid_params):\n \n \n XORIG=grid_params['XORIG']\n YORIG=grid_params['YORIG']\n XCELL=grid_params['XCELL']\n nj=grid_params['nj']\n ni=grid_params['ni']\n \n\n emis_file=emis_file[(emis_file['x']>=XORIG)]\n emis_file=emis_file[(emis_file['y']>=YORIG)]\n emis_file=emis_file[(emis_file['x']<=XORIG+(nj-1)*XCELL)]\n emis_file=emis_file[(emis_file['y']<=YORIG+(ni-1)*XCELL)]\n \n dic_con={}\n for sn in emis_file['cat_internal'].unique():\n for de in def_emis.values():\n de='{0}_{1}'.format(de,sn)\n dic_con[de]=0\n \n \n for sn in emis_file['cat_internal'].unique():\n for de in def_emis.values():\n sk2=emis_file[(emis_file['cat_internal']==sn)]\n dic_con['{0}_{1}'.format(de,sn)]+=np.sum(sk2[de])\n \n print('################# CHECKING regriding of {0} inventory'.format(name))\n \n for sn in emis_file['cat_internal'].unique():\n for de in def_emis.values(): \n if '{0}_{1}'.format(de,sn) in dic_con.keys():\n \n sk=np.load('{0}/{1}-{2}-{3}.npy'.format(output_dir,sn,de,name)) \n \n a=np.sum(sk)\n \n b=np.sum(dic_con['{0}_{1}'.format(de,sn)])\n \n per=0\n if b > 0:\n per=np.round((a-b)/b*100,1) \n \n print('cat {0}, pollutant {1}, diference {2:.2f} in percent {3}'.format(sn,de, a-b, per))\n print('value in grid {0:.3f}'.format(a))\n print('value in source file {0:.3f}'.format(b)) \n \n \n","sub_path":"emPY/to_domain/src_to_domain/point_to_domain_zero.py","file_name":"point_to_domain_zero.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65102738","text":"# Usage\n# python build_balloons_vs_sky.py\n\n# Import packages\nfrom config import balloons_vs_sky_config as config\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom src.preprocessing import AspectAwarePreprocessor\nfrom src.io import HDF5DatasetWriter\nfrom imutils import paths\nimport numpy as np\nimport progressbar\nimport argparse\nimport json\nimport cv2\nimport os\nimport h5py\n\n# Construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--debug\", type=int, default=0,\n\thelp=\"Add debug files\")\nargs = vars(ap.parse_args())\n\n# Grab the paths to the images, extract the labels and encode them\nimagePaths = []\nimageLabels = []\npaths = list(paths.list_images(config.IMAGES_PATH))\nfor path in paths:\n\tlabel = path.split(os.path.sep)[-2]\n\tif label != \"origin\":\n\t\timagePaths.append(path);\n\t\timageLabels.append(label);\n\nif args[\"debug\"]:\n\tprint(\"[DEBUG] Labels {}\".format(imageLabels))\n\nle = LabelEncoder()\nimageLabels = le.fit_transform(imageLabels)\n\nif args[\"debug\"]:\n\tprint(\"[DEBUG] Labels encoded {}\".format(imageLabels))\n\tprint(\"[DEBUG] Dataset size {}\".format(len(imageLabels)))\n\n# Perform stratified sampling from the images set to build the\n# testing split\nsplit = train_test_split(imagePaths, imageLabels,\n\ttest_size=config.TEST_IMAGES_PCT, stratify=imageLabels,\n\trandom_state=42)\n(trainPaths, testPaths, trainLabels, testLabels) = split\n\n# Perform another stratified sampling, this time to build the\n# validation data and training data\nsplit = train_test_split(trainPaths, trainLabels,\n\ttest_size=config.VAL_IMAGES_PCT, stratify=trainLabels,\n\trandom_state=42)\n(trainPaths, valPaths, trainLabels, valLabels) = split\n\n# Construct a list pairing the training, validation, and testing\n# image paths along with their corresponding labels and output HDF5\n# files\ndatasets = [\n\t(\"train\", trainPaths, trainLabels, config.TRAIN_HDF5),\n\t(\"val\", valPaths, valLabels, config.VAL_HDF5),\n\t(\"test\", testPaths, testLabels, config.TEST_HDF5)]\n\n# Initialize the image preprocessor and the lists of RGB channel averages\naap = AspectAwarePreprocessor(config.IMAGE_WIDTH, config.IMAGE_HEIGHT)\n(R, G, B) = ([], [], [])\n\n# Loop over the dataset tuples\nfor (type, paths, labels, outputPath) in datasets:\n\t# Create HDF5 writer\n\tprint(\"[INFO] Building {}...\".format(outputPath))\n\twriter = HDF5DatasetWriter((len(paths), config.IMAGE_WIDTH, config.IMAGE_HEIGHT, 3), outputPath)\n\n\t# Initialize the progress bar\n\twidgets = [\"Building Dataset: \", progressbar.Percentage(), \" \",\n\t\tprogressbar.Bar(), \" \", progressbar.ETA()]\n\tpbar = progressbar.ProgressBar(maxval=len(paths),\n\t\twidgets=widgets).start()\n\n\t# Loop over the image paths\n\tfor (idx, (path, label)) in enumerate(zip(paths, labels)):\n\t\t# Load the image and process it\n\t\timage = cv2.imread(path)\n\t\timage = aap.preprocess(image)\n\n\t\t# If we are building the training dataset, then compute the\n\t\t# mean of each channel in the image, then update the\n\t\t# respective lists\n\t\tif type == \"train\":\n\t\t\t(b, g, r) = cv2.mean(image)[:3]\n\t\t\tR.append(r)\n\t\t\tG.append(g)\n\t\t\tB.append(b)\n\n\t\t# Add the image and label to the HDF5 dataset\n\t\twriter.add([image], [label])\n\t\tpbar.update(idx)\n\n\t# Close the HDF5 writer\n\tpbar.finish()\n\twriter.close()\n\n# Construct a dictionary of averages, then serialize the means to a\n# JSON file\nprint(\"[INFO] Serializing means...\")\nD = {\"R\": np.mean(R), \"G\": np.mean(G), \"B\": np.mean(B)}\nf = open(config.DATASET_MEAN, \"w\")\nf.write(json.dumps(D))\nf.close()\n\n# Display the shape of the HDF5 files created\nfilenames = [config.TRAIN_HDF5, config.VAL_HDF5, config.TEST_HDF5]\nif args[\"debug\"]:\n\tprint(\"[DEBUG] HDF5 files {}\".format(filenames))\nfor filename in filenames:\n\tdb = h5py.File(filename, \"r\")\n\tprint(\"[INFO] Created HDF5 file {} shape {}\".format(filename, db[\"images\"].shape))\n\tdb.close()\n","sub_path":"build_balloons_vs_sky.py","file_name":"build_balloons_vs_sky.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198471611","text":"#import math\nimport numpy as np \nfrom download_mnist import load\n#import operator \nimport time\nfrom collections import Counter\n# classify using kNN \n#x_train = np.load('../x_train.npy')\n#y_train = np.load('../y_train.npy')\n#x_test = np.load('../x_test.npy')\n#y_test = np.load('../y_test.npy')\nx_train, y_train, x_test, y_test = load()\nx_train = x_train.reshape(60000,28,28)\nx_test = x_test.reshape(10000,28,28)\nx_train = x_train.astype(float)\nx_test = x_test.astype(float)\ndef kNNClassify(newInput, dataSet, labels, k): \n result=[]\n ########################\n # Input your code here #\n ########################\n for i in newInput:\n L2_dist=0\n L2=[]\n for j in dataSet:\n '''\n This kind of method is low_efficient and low_accurate\n for row in range(28):\n for column in range(28):\n L2_dist+=np.sqrt((i[row][column]-j[row][column])**2)\n '''\n L2_dist=np.sum(np.sqrt((i-j)**2))\n L2.append(L2_dist)\n Min_list=[]\n for a in range(k):\n Min_list.append(L2.index(min(L2)))\n L2[L2.index(min(L2))]=float(\"inf\")\n classifier=[]\n for b in Min_list:\n classifier.append(labels[b])\n #print(classifier)\n result.append(Counter(classifier).most_common(1)[0][0]) \n \n \n \n ####################\n # End of your code #\n ####################\n return result\n\nstart_time = time.time()\noutputlabels=kNNClassify(x_test[0:20],x_train,y_train,10)\nresult = y_test[0:20] - outputlabels\nresult = (1 - np.count_nonzero(result)/len(outputlabels))\nprint (\"---classification accuracy for knn on mnist: %s ---\" %result)\nprint (\"---execution time: %s seconds ---\" % (time.time() - start_time))\n","sub_path":"DeepLearning/Homework1/Homework1_P3/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221761502","text":"import sys\nimport numpy\nfilename=sys.argv[1]\nimage_num=0\naverage=0\ncolor_average=0\nfeature_average=0\nweighted_average=0\nweighted_data =[]\ndata=[]\ncolor_data=[]\nfeature_data=[]\nwith open('%s' % filename, 'r') as input:\n content = input.readlines()\n content = [x.strip('\\n') for x in content]\n for line in content:\n line_input = line.split(\" \");\n weighted_average += float(line_input[1])\n average += float(line_input[2])\n weighted_data.append(float(line_input[1]))\n data.append(float(line_input[2]))\n \n color_average += float(line_input[3])\n feature_average += float(line_input[4])\n color_data.append(float(line_input[3]))\n feature_data.append(float(line_input[4]))\n image_num += 1\n\n# print('Average weighted flip metric mean: {}'.format(weighted_average/image_num))\n# print('flip metric weighted std: {}'.format(numpy.std(weighted_data)))\n# print('max: {}'.format(numpy.amax(weighted_data)))\nprint('### Average flip metric mean: {}'.format(1- average/image_num))\nprint('### flip metric std: {}'.format(numpy.std(data)))\n# print('max: {}'.format(numpy.amax(data)))\n\n# print('color diff mean: {}'.format(color_average/image_num))\n# print('color diff std: {}'.format(numpy.std(color_data)))\n# print('feature diff mean: {}'.format(feature_average/image_num))\n# print('feature diff std: {}'.format(numpy.std(feature_data)))\n","sub_path":"ILLIXR_SSIM_FLIP/FLIP/cal_average_weighted_mean.py","file_name":"cal_average_weighted_mean.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61108955","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\n\n# Permet de trier et d'extraire les individus et leurs informations relatives selon la population à laquelle ils appartiennent\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-p', '--panel', required=True, type=str,\n dest=\"p\", metavar=\"<panel>\",\n help=\"The relative name of the .panel file\")\n parser.add_argument('-k', '--key', required=True, type=str,\n dest=\"k\", metavar=\"<keyword>\",\n help=\"The keyword that must match each line of the .panel file.\")\n args = parser.parse_args()\n\n path = os.getcwd()\n call_samples = open(\"{0}/{1}\".format(path, args.p), 'r')\n call_samples.readline()\n output_file = open(\"{0}/{1}.txt\".format(path, args.k), 'w')\n nbr_indiv = 0\n for line in call_samples:\n if args.k in line.replace(\"\\n\", \"\").split(\"\\t\"):\n nbr_indiv += 1\n output_file.write(line.split(\"\\t\")[0]+\"\\n\")\n output_file.close()\n call_samples.close()\n print(\"{0} individuals extracted from {1} population\".format(nbr_indiv, args.k))\n","sub_path":"extract_pop.py","file_name":"extract_pop.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"92934963","text":"import numpy as np\nfrom PIL import Image\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file_name', type=str, required=True)\n parser.add_argument('-s', '--size', type=int)\n parser.add_argument('-k', type=int, required=True)\n args = parser.parse_args()\n return args\n\n\ndef load_image(path):\n img = np.array(Image.open(path).convert('L'))\n return img\n\n\ndef export_image(M_k, k):\n img2 = Image.fromarray(np.uint8(M_k))\n out_name = f'out_{k}.jpg'\n img2.save(out_name)\n print(f'{out_name} was generated.')\n\n\ndef matrix2singulars(M):\n import numpy.linalg as LA\n u, s, v = LA.svd(M)\n return u, s, v\n\n\ndef low_rank_approximation(k, u, s, v):\n ur = u[:, :k - 1]\n sr = np.diag(s[:k - 1])\n vr = v[:k - 1, :]\n Mk = np.dot(np.dot(ur, sr), vr)\n return Mk\n\n\ndef main():\n args = parse_args()\n image_path = args.file_name\n img = load_image(image_path)\n u, s, v = matrix2singulars(img)\n k = args.k\n Mk = low_rank_approximation(k, u, s, v)\n export_image(Mk, k)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"R9.py","file_name":"R9.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594081757","text":"\"\"\"\nSerialize the classifier object and trained model.\nIt creates a model file for storing learned parameters.\n\"\"\"\n\nimport sys\nimport h5py\nimport time\nimport numpy as np\nimport json\n\nimport sklearn\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC, LinearSVC, NuSVC, OneClassSVM, SVR\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, RandomForestClassifier, BaggingClassifier, BaggingRegressor, AdaBoostRegressor, \\\n ExtraTreesRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, RidgeCV, Lasso, MultiTaskLasso, ElasticNet, SGDClassifier, RidgeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeClassifier, ExtraTreeRegressor\nimport xgboost\n\nfrom sklearn import svm\nfrom sklearn.datasets import samples_generator\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.pipeline import Pipeline\nimport importlib\nfrom deepdish import io\nimport jsonpickler\n\n\nclass SerializeClass:\n\n @classmethod\n def __init__(self):\n \"\"\" Init method. \"\"\"\n self.model_file = \"model.h5\"\n\n @classmethod\n def compute_prediction_score(self, classifier, X_test, y_test):\n \"\"\"\n Evaluate classifier\n \"\"\"\n print(classifier)\n predictions = classifier.predict(X_test)\n match = [1 for x,y in zip(predictions, y_test) if x == y]\n prediction = len(match) / float(len(predictions))\n print(\"Prediction score: %.2f\" % prediction)\n\n @classmethod\n def train_model(self, classifier):\n \"\"\"\n Get a trained model\n \"\"\"\n # Loading the dataset\n digits = datasets.load_digits()\n n_samples = len(digits.images)\n X = digits.images.reshape((n_samples, -1))\n y = digits.target\n # Split the dataset in two equal parts\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n # Fit and return the classifier\n classifier.fit(X_train, y_train)\n self.compute_prediction_score(classifier, X_test, y_test)\n return classifier, X_test, y_test, X_train\n \n @classmethod\n def serialize_class(self):\n \"\"\"\n Convert to hdf5\n \"\"\"\n clf = SVC(C=3.0, kernel='poly', degree=5)\n clf = SVR()\n clf = LinearSVC(loss='hinge', tol=0.001, C=2.0)\n clf = LinearRegression(fit_intercept=True, n_jobs=2)\n clf = GaussianNB()\n clf = SGDClassifier(loss='hinge', learning_rate='optimal', alpha=0.0001)\n clf = KNeighborsClassifier(n_neighbors=6, weights='uniform', algorithm='ball_tree', leaf_size=32)\n #clf = RadiusNeighborsClassifier()\n clf = GradientBoostingClassifier(n_estimators=100)\n clf = ExtraTreeClassifier()\n clf = DecisionTreeClassifier(criterion='entropy', random_state=42)\n clf = DecisionTreeRegressor()\n # clf = ExtraTreeRegressor()\n #clf = GradientBoostingClassifier(n_estimators=10)\n clf = AdaBoostClassifier(n_estimators=2)\n #clf = AdaBoostRegressor()\n #clf = BaggingClassifier()\n #clf = BaggingRegressor()\n #clf = ExtraTreesClassifier(n_estimators=1)\n #clf = ExtraTreesRegressor()\n #clf = RandomForestClassifier()\n classifier, X_test, y_test, X = self.train_model(clf)\n print(\"Serializing...\")\n self.save_model(classifier)\n \n return X_test, y_test, classifier\n \n @classmethod\n def save_model(self, model):\n \"\"\"\n Save the dictionary to hdf5 file\n \"\"\"\n se_model = jsonpickler.dump(model)\n print(se_model)\n h5file = h5py.File(self.model_file, 'w')\n def recursive_save_model(h5file_obj, dictionary):\n for model_key, model_value in dictionary.items():\n type_name = type(model_value).__name__\n try:\n if type_name in ['ndarray']:\n h5file_obj.create_dataset(model_key, (model_value.shape), data=model_value)\n elif type_name in ['list']:\n if len(model_value) > 0:\n list_obj = all(isinstance(x, dict) for x in model_value)\n if list_obj is False:\n h5file_obj.create_dataset(model_key, data=json.dumps(model_value))\n else:\n for index, model_item in enumerate(model_value):\n model_key_item = model_key + \"/\" + str(index)\n if model_item is not None:\n if model_key_item in h5file_obj:\n recursive_save_model(model_key_item, model_item)\n else:\n group = h5file_obj.create_group(model_key_item)\n recursive_save_model(group, model_item)\n else:\n h5file_obj.create_dataset(model_key_item, data=json.dumps(model_item))\n else:\n h5file_obj.create_dataset(model_key, data=model_value)\n elif type_name in ['int', 'int32', 'int64', 'float', 'float32', 'float64', 'str', 'tuple', 'bool', 'None', 'NoneType']:\n if type_name in ['None', 'NoneType']:\n h5file_obj.create_dataset(model_key, data=json.dumps(model_value))\n else:\n h5file_obj.create_dataset(model_key, data=model_value)\n elif type_name in ['dict']:\n if model_key in h5file_obj:\n recursive_save_model(h5file_obj[model_key], model_value)\n else:\n group = h5file_obj.create_group(model_key)\n recursive_save_model(group, model_value)\n except Exception as exp:\n print(model_key, exp, model_value)\n continue\n recursive_save_model(h5file, se_model)\n print(\"--------------\")\n \n\nclass DeserializeClass:\n\n @classmethod\n def __init__(self, model_file):\n \"\"\" Init method. \"\"\"\n self.model_file = model_file\n \n @classmethod\n def recurse_list_items(self, file_obj, list_dict):\n \"\"\"\n Recurse list items\n \"\"\"\n for key in file_obj.keys():\n if file_obj.get(key).__class__.__name__ == 'Group':\n if str.isnumeric(key) is True:\n dict_key = list_dict\n else:\n list_dict[key] = dict()\n dict_key = list_dict[key]\n self.recurse_list_items(file_obj[key], dict_key)\n else:\n try:\n key_value = file_obj.get(key).value\n list_dict[key] = json.loads(key_value)\n except Exception as exp:\n if type(key_value).__name__ in ['ndarray']:\n list_dict[key] = key_value.tolist()\n else:\n list_dict[key] = key_value\n continue\n return list_dict\n \n @classmethod\n def load_model(self):\n \"\"\"\n Read the hdf5 file recursively\n \"\"\"\n print(\"Deserializing...\")\n model_obj = dict()\n h5file = h5py.File(self.model_file, 'r')\n counter = 0\n list_dict = dict()\n def recursive_load_model(h5file_obj, model_obj, list_dict={}, counter=0):\n for key in h5file_obj.keys():\n if h5file_obj.get(key).__class__.__name__ == 'Group':\n list_key = key + '/0'\n if list_key in h5file_obj:\n model_obj[key] = list()\n while True:\n list_key_iter = key + '/' + str(counter)\n if list_key_iter in h5file_obj:\n file_obj = h5file_obj[list_key_iter]\n for recurse_key in file_obj.keys():\n if file_obj.get(recurse_key).__class__.__name__ == 'Group':\n model_obj[recurse_key] = dict()\n recursive_load_model(file_obj[recurse_key], model_obj[recurse_key], list_dict, counter)\n else:\n try:\n key_value = file_obj.get(recurse_key).value\n list_dict[recurse_key] = json.loads(key_value)\n except Exception as exp:\n if type(key_value).__name__ in ['ndarray']:\n list_dict[recurse_key] = key_value.tolist()\n else:\n list_dict[recurse_key] = key_value\n continue\n \n model_obj[key].append(list_dict)\n else:\n break\n counter += 1\n \n else:\n model_obj[key] = dict()\n recursive_load_model(h5file_obj[key], model_obj[key])\n else:\n model_obj[key] = dict()\n try:\n key_value = h5file_obj.get(key).value\n model_obj[key] = json.loads(key_value)\n except Exception as exp:\n if type(key_value).__name__ in ['ndarray']:\n model_obj[key] = key_value.tolist()\n else:\n model_obj[key] = key_value\n continue\n return model_obj\n reconstructed_model = recursive_load_model(h5file, model_obj)\n print(reconstructed_model)\n #unloaded_model = jsonpickler.load(reconstructed_model)\n #return unloaded_model\n\n '''@classmethod\n def load_model(self):\n \"\"\"\n Read the hdf5 file recursively\n \"\"\"\n print(\"Deserializing...\")\n model_obj = dict()\n h5file = h5py.File(self.model_file, 'r')\n def recursive_load_model(h5file_obj, model_obj, counter=0):\n for key in h5file_obj.keys():\n if h5file_obj.get(key).__class__.__name__ == 'Group':\n list_key = key + '/0'\n counter = 0\n model_obj[key] = dict()\n if list_key in h5file_obj:\n list_dict = dict()\n def recurse_list_items(file_obj, list_dict, counter):\n model_obj[key] = list()\n while True:\n list_key_iter = key + '/' + str(counter)\n if list_key_iter in h5file_obj:\n for recurse_key in file_obj.keys():\n if file_obj.get(recurse_key).__class__.__name__ == 'Group':\n #print(key, file_obj.get(recurse_key))\n if str.isnumeric(recurse_key) is True:\n dict_key = list_dict\n \n else:\n list_dict[recurse_key] = dict()\n dict_key = list_dict[recurse_key]\n list_dict[recurse_key] = dict()\n recursive_load_model(file_obj[recurse_key], list_dict, counter)\n #recurse_list_items(file_obj[recurse_key], dict_key)\n else:\n try:\n key_value = file_obj.get(recurse_key).value\n list_dict[recurse_key] = json.loads(key_value)\n #print(key, key_value, file_obj.get(recurse_key))\n except Exception as exp:\n if type(key_value).__name__ in ['ndarray']:\n list_dict[recurse_key] = key_value.tolist()\n else:\n list_dict[recurse_key] = key_value\n #print(recurse_key, key_value, file_obj.get(recurse_key))\n continue\n else:\n break\n counter += 1 \n item_dict = recurse_list_items(h5file_obj[key], list_dict, counter)\n model_obj[key].append(item_dict)\n else:\n recursive_load_model(h5file_obj[key], model_obj[key])\n else:\n model_obj[key] = dict()\n try:\n key_value = h5file_obj.get(key).value\n model_obj[key] = json.loads(key_value)\n except Exception as exp:\n if type(key_value).__name__ in ['ndarray']:\n model_obj[key] = key_value.tolist()\n else:\n model_obj[key] = key_value\n continue\n return model_obj\n reconstructed_model = recursive_load_model(h5file, model_obj)\n print(reconstructed_model)\n unloaded_model = jsonpickler.load(reconstructed_model)\n return unloaded_model'''\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 1:\n print(\"Usage: python todictrecurr.py\")\n exit(1)\n start_time = time.time()\n serialize_clf = SerializeClass()\n X_test, y_test, classifier = serialize_clf.serialize_class()\n se_classifier = jsonpickler.dump(classifier)\n deserialize = DeserializeClass(serialize_clf.model_file)\n de_classifier = deserialize.load_model()\n #serialize_clf.compute_prediction_score(de_classifier, X_test, y_test)\n end_time = time.time()\n print (\"Program finished in %s seconds\" % str( end_time - start_time ))\n\n\n","sub_path":"backup_best.py","file_name":"backup_best.py","file_ext":"py","file_size_in_byte":15144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529915348","text":"class Solution:\n def countPrimeSetBits(self, L, R):\n \"\"\"\n :type L: int\n :type R: int\n :rtype: int\n \"\"\"\n primes = [2,3,5,7,11,13,17,19,23,29]\n count = 0\n for num in range(L,R+1):\n if bin(num)[2:].count('1') in primes:\n count += 1\n return count\n\nprint(Solution().countPrimeSetBits(2,1000))","sub_path":"countPrimeSetBits.py","file_name":"countPrimeSetBits.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649766702","text":"import _thread\nimport sys\nfrom threading import Thread\n\nfrom je_auto_control.wrapper.auto_control_keyboard import keys_table\nfrom je_auto_control.wrapper.platform_wrapper import keyboard_check\n\n\nclass CriticalExit(Thread):\n \"\"\"\n use to make program interrupt\n \"\"\"\n\n def __init__(self, default_daemon: bool = True):\n \"\"\"\n default interrupt is keyboard F7 key\n :param default_daemon bool thread setDaemon\n \"\"\"\n super().__init__()\n self.setDaemon(default_daemon)\n self._exit_check_key: int = keys_table.get(\"f7\")\n\n def set_critical_key(self, keycode: [int, str] = None) -> None:\n \"\"\"\n set interrupt key\n :param keycode interrupt key\n \"\"\"\n if isinstance(keycode, int):\n self._exit_check_key = keycode\n else:\n self._exit_check_key = keys_table.get(keycode)\n\n def run(self) -> None:\n \"\"\"\n listener keycode _exit_check_key to interrupt\n \"\"\"\n try:\n while True:\n if keyboard_check.check_key_is_press(self._exit_check_key):\n _thread.interrupt_main()\n except Exception as error:\n print(repr(error), file=sys.stderr)\n\n def init_critical_exit(self) -> None:\n \"\"\"\n should only use this to start critical exit\n may this function will add more\n \"\"\"\n critical_thread = self\n critical_thread.start()\n","sub_path":"je_auto_control/utils/critical_exit/critcal_exit.py","file_name":"critcal_exit.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307090647","text":"import numpy as np\n\nfrom flask import Flask, request, abort\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.exceptions import (InvalidSignatureError)\nfrom linebot.models import (MessageEvent, TextMessage, TextSendMessage, ImageMessage, ImageSendMessage)\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\napp = Flask(__name__)\n\nACCESS_TOKEN = \"ZWqC9dOrT8Rpi8YHLYkZiPT7IMB0TTiOlhgEM3qeQrEMwInbLhRAqo3wqbesJea5KIuUoa/9+TdFcxMeNo/g0VyiOKEm7pgq41jeYVy+gsqX8aVNyvkkJoP0pqiAhStUvWGK1MfATE6lzHhsIvIZDAdB04t89/1O/w1cDnyilFU=\"\nSECRET = \"818b6f3efb27d959f5e315aaa7886864\"\n\nFQDN = \"https://kinoko-takenoko.herokuapp.com\"\n\n\nline_bot_api = LineBotApi(ACCESS_TOKEN)\nhandler = WebhookHandler(SECRET)\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n signature = request.headers['X-Line-Signature']\n\n body = request.get_data(as_text=True)\n app.logger.info(\"Requestbody: \" + body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return'OK'\n\n#LINEに画像が送られてきた時の発生イベント\n@handler.add(MessageEvent, message=ImageMessage)\ndef handle_image_message(event):\n message_content = line_bot_api.get_message_content(event.message.id)\n with open(\"static/\"+event.message.id+\".jpg\", \"wb\") as f:\n f.write(message_content.content)\n\n test_url = \"./static/\"+event.message.id+\".jpg\"\n##########ここからAIモデル#################################\n img = image.load_img(test_url, target_size=(150, 150))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n\n model = load_model('kinotake_mix_0_01_ep50.h5')\n result_predict = model.predict(x)\n #res = result_predict[0]\n\n res = np.array(result_predict)\n if res[0] < 0.5:\n res[0] = 1 - res[0]\n print(res[0])\n okashi = \"きのこの山\"\n per = res[0] * 100\n \n elif res[0]>=0.5:\n okashi = \"たけのこの里\"\n per = res[0] * 100\n np.set_printoptions(precision=1)\n text = \"これは\"+ str(per).strip(\"[]\") + \"%の確率で\" + okashi + \"です。\"\n\n##############################################################\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=text))\n \nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331152200","text":"# coding=utf-8\n\"\"\"Performs face detection in realtime.\n\nBased on code from https://github.com/shanren7/real_time_face_recognition\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2017 François Gervais\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport cv2\nimport face\nimport os\nimport time\nimport numpy\nfrom PIL import Image, ImageDraw, ImageFont\ndef add_overlays(image, faces):\n if faces is not None:\n img_PIL = Image.fromarray(frame)\n font = ImageFont.truetype('simsun.ttc', 40)\n # 字体颜色\n fillColor1 = (255, 0, 0)\n fillColor2 = (0, 255, 0)\n draw = ImageDraw.Draw(img_PIL)\n for face in faces:\n face_bb = face.bounding_box.astype(int)\n draw.line([face_bb[0], face_bb[1], face_bb[2], face_bb[1]], \"green\")\n draw.line([face_bb[0], face_bb[1], face_bb[0], face_bb[3]], fill=128)\n draw.line([face_bb[0], face_bb[3], face_bb[2], face_bb[3]], \"yellow\")\n draw.line([face_bb[2], face_bb[1], face_bb[2], face_bb[3]], \"black\")\n if face.name is not None:\n if face.name == 'unknown':\n draw.text((face_bb[0], face_bb[1]), '陌生人', font=font, fill=fillColor2)\n else:\n draw.text((face_bb[0], face_bb[1]), face.name, font=font, fill=fillColor1)\n frame = numpy.asarray(img_PIL)\n return frame\n\n\ndef main():\n testdata_path = '../images'\n face_recognition = face.Recognition()\n start_time = time.time()\n for images in os.listdir(testdata_path):\n print(images)\n filename = os.path.splitext(os.path.split(images)[1])[0]\n file_path = testdata_path + \"/\" + images\n image = cv2.imread(file_path)\n frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n faces = face_recognition.identify(frame)\n image = add_overlays(image, faces)\n cv2.imwrite('../images_result/' + filename + '.jpg', image)\n end_time = time.time()\n spend_time = float('%.2f' % (end_time - start_time))\n print('spend_time:',spend_time)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/Images_face_recognition-more-chinese.py","file_name":"Images_face_recognition-more-chinese.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570199276","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport keras.backend as K\n\ndef imageStatistics (resultImage, groundTruth):\n resultImage = np.asarray(Image.open(resultImage))\n groundTruth = np.asarray(Image.open(groundTruth))\n if resultImage.shape!=groundTruth.shape:\n print('Cannot compare images with different dimensions')\n return\n# resultImage.setflags(write=1)\n# groundTruth.setflags(write=1)\n# resultImage[resultImage<0.5] = 0\n# resultImage[resultImage>=0.5] = 1\n# groundTruth[groundTruth<0.5] = 0\n# groundTruth[groundTruth>=0.5] = 1\n# \n# TP = 0\n# FP = 0\n# TN = 0\n# FN = 0\n# for i in range(0, resultImage.shape[0]):\n# for j in range (0, resultImage.shape[1]):\n# if resultImage[i,j]==groundTruth[i,j]==1:\n# TP += 1\n# if resultImage[i,j]==1 and resultImage[i,j]!=groundTruth[i,j]:\n# FP += 1\n# if resultImage[i,j]==groundTruth[i,j]==0:\n# TN += 1\n# if resultImage[i,j]==0 and resultImage[i,j]!=groundTruth[i,j]:\n# FP += 1\n# if (TP+FN)>0 and (TN+FP)>0:\n# sensitivity = TP/(TP + FN)\n# specificity = TN/(TN + FP)\n# dice = (2*TP)/(2*TP+FP+FN)\n# else:\n# sensitivity = 1\n# specificity = 1\n# dice = 1\n# print(TN)\n# sensitivity = TP/(TP + FN + 0.0000000000001)\n# specificity = TN/(TN + FP + 0.0000000000001)\n# dice = (2*TP)/(2*TP+FP+FN + 0.0000000000001)\n# resultImage = np.asarray(resultImage).astype(np.bool)\n# groundTruth = np.asarray(groundTruth).astype(np.bool)\n# intersection = np.logical_and(resultImage, groundTruth)\n# dice = 2.*intersection.sum()/(resultImage.sum() + groundTruth.sum())\n \n true_mask = np.asarray(groundTruth).astype(np.bool)\n pred_mask = np.asarray(resultImage).astype(np.bool)\n \n # If both segmentations are all zero, the dice will be 1. (Developer decision)\n im_sum = true_mask.sum() + pred_mask.sum()\n if im_sum == 0:\n return (1.0)\n \n # Compute Dice coefficient\n intersection = np.logical_and(true_mask, pred_mask)\n return 2. * intersection.sum() / im_sum\n #return (sensitivity, specificity, dice)\n","sub_path":"imageStatistics.py","file_name":"imageStatistics.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455804318","text":"import logging\nfrom typing import Optional\n\nimport click\nfrom cookiecutter.main import cookiecutter\n\nfrom .root import Root\nfrom .utils import command, group\n\n\nlog = logging.getLogger(__name__)\n\n\n@group()\ndef project() -> None:\n \"\"\"\n Project operations.\n \"\"\"\n\n\n@command()\n@click.argument(\"slug\", type=str, required=False)\nasync def init(root: Root, slug: Optional[str]) -> None:\n \"\"\"\n Initialize an empty project.\n\n Examples:\n\n # Initializes a scaffolding for the new project with the recommended project\n # structure (see http://github.com/neuromation/cookiecutter-neuro-project)\n neuro project init\n\n # Initializes a scaffolding for the new project with the recommended project\n # structure and sets default project folder name to \"example\"\n neuro project init my-project-id\n \"\"\"\n _project_init(slug)\n\n\ndef _project_init(slug: Optional[str], *, no_input: bool = False) -> None:\n extra_context = None\n if slug:\n extra_context = {\"project_slug\": slug}\n cookiecutter(\n \"gh:neuromation/cookiecutter-neuro-project\",\n checkout=\"release\",\n extra_context=extra_context,\n no_input=no_input,\n )\n\n\nproject.add_command(init)\n","sub_path":"neuromation/cli/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219360797","text":"import speedtest\nimport logging\n\nlogger = logging.getLogger('speedtest')\n\ndef get_results():\n\n sptest = speedtest.Speedtest()\n\n # Get the best server\n logger.info('Getting best server...')\n sptest.get_best_server()\n logger.debug(f'Server found: {sptest.results.server[\"sponsor\"]} ({sptest.results.server[\"name\"]})')\n\n # Get download speed\n logger.info('Getting download speed.')\n try:\n download = sptest.download()\n logger.debug(f'Download speed acquired: {download}')\n except:\n logger.error('Unable to obtain download speed.')\n download = 0.0\n\n # Get upload speed\n logger.info('Getting upload speed.')\n try:\n upload = sptest.upload(pre_allocate=False)\n logger.debug(f'Upload speed acquired: {upload}')\n except:\n logger.error('Unable to obtain upload speed.')\n upload = 0.0\n\n return download, upload, f'{sptest.results.server[\"sponsor\"]} ({sptest.results.server[\"name\"]})'","sub_path":"Speedtest/speedtest.py","file_name":"speedtest.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21980856","text":"#!/usr/bin/env python3\n\nimport config\nimport os\nimport shutil\nfrom datetime import datetime\n\nimport re\n\nhome_pattern = re.compile(r'^~')\n\n\ndef get_backup_sets():\n \"\"\"\n Get backup sets from config.py file\n \"\"\"\n\n result = dict()\n for e in dir(config):\n if e.startswith('SET_'):\n result[e.replace('SET_', '')] = getattr(config, e)\n return result\n \n\ndef backup_set(name, data):\n \"\"\"\n Backup files path from set\n \"\"\"\n src = os.path.join(name, datetime.now().strftime(config.TIMESTAMP_DIR))\n if os.path.exists(src):\n shutil.rmtree(src)\n for src in data:\n src = home_pattern.sub(os.path.expanduser('~'), src)\n dst = os.path.join(name, datetime.now().strftime(config.TIMESTAMP_DIR), *src.split(os.sep))\n copy_data(src, dst)\n\n\ndef copy_data(src, dst): \n \"\"\"\n Copy file or folder from src to dst\n \"\"\"\n\n if os.path.isdir(src):\n shutil.copytree(src, dst)\n elif os.path.isfile(src):\n try:\n os.makedirs(os.path.split(dst)[0])\n except:\n pass\n shutil.copyfile(src, dst)\n\n\nif __name__ == '__main__':\n for e in get_backup_sets():\n backup_set(e, get_backup_sets()[e])\n","sub_path":"home_backup.py","file_name":"home_backup.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70774546","text":"from django.core.exceptions import ValidationError\n\nimport requests\n\ndef isValidDockerhubImage(value: str):\n # example https://index.docker.io/v1/repositories/eyra/comic/tags/123\n try:\n image, tag = value.split(':')\n except ValueError:\n raise ValidationError(\"Invalid image format, use 'image:tag'\")\n resp = requests.get(f\"https://index.docker.io/v1/repositories/{image}/tags/{tag}\")\n\n if not resp.status_code == 200:\n raise ValidationError(\n f\"Id {value} does not exist in Dockerhub. \"\n )","sub_path":"app/comic/eyra/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164308911","text":"#!/usr/bin/python3\n\nimport time\nimport RPi.GPIO as GPIO\n\n\nINV = 11\t#pin that controls the inverting relay\nLATCH = 12\t#pin that controls the latching relay\n\n# setup\nGPIO.setup(INV, GPIO.OUT)\nGPIO.setup(LATCH, GPIO.OUT)\n\nGPIO.output(LATCH, 1)\nGPIO.output(INV, 0)\n\ncounter = 0\n\nwhile True:\n\t\n\t#operate inverter\n\tif counter == 9:\n\t\tGPIO.output(INV, 1) \n\telse:\n\t\tGPIO.output(INV, 0)\n\n\t#operate latch\t\n\tGPIO.output(LATCH, 1)\n\ttime.sleep(50)\n\tGPIO.output(LATCH, 0)\n\ttime.sleep(10)\n\t\n\tcounter = (counter + 1) % 10\n","sub_path":"ppip/durabilidade_fast.py","file_name":"durabilidade_fast.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541553352","text":"import json\nimport logging\nfrom .models import Order\nfrom .models import Payment\nfrom .models import Purchase\nfrom wechat.api import wx_order_query\n\nlogger = logging.getLogger('tscast.term')\n\ndef payment_status_update():\n unchecked_payments = Payment.objects.filter(status='wait-for-payment')\n for wait_pay in unchecked_payments:\n if not wait_pay.receipt:\n continue\n pay_receipt = json.loads(wait_pay.receipt)\n nonce_str = pay_receipt['prepay']['nonce_str']\n out_trade_no = wait_pay.order.uuid.get_hex()\n sign = pay_receipt['prepay']['sign']\n state = wx_order_query(nonce_str, out_trade_no, sign)\n if state:\n wait_pay.status = 'succeeded'\n logger.info('payment %s is %s' % (wait_pay.uuid, wait_pay.status))\n else:\n wait_pay.status = 'failed'\n wait_pay.save(force_update=True)\n Order.objects.filter(uuid=wait_pay.uuid).update(\n status=wait_pay.status\n )\n","sub_path":"src/tscast/term/crontask.py","file_name":"crontask.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398424719","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 15:18:52 2021\n\n@author: silicon\n\"\"\"\n\nfrom Read_DB import read_db \nfrom collections import Counter\nfrom plt_pic import plot_bar_pie\nimport csv\nimport sys\n\nrd = read_db()\npp = plot_bar_pie()\nmole_inchi_key = rd.get_compond_inchi()\n\n#all_ele_inchi_1 = (('AAAAJHGLNDAXFP-VNKVACROSA-N',), ('AAAAKTROWFNLEP-UHFFFAOYSA-N',), \n# ('AAAATQFUBIBQIS-IRXDYDNUSA-N',), ('AAAAZQPHATYWOK-JXMROGBWSA-N',))\n\ndef count_eles(the_set):\n eles_list_0 = []\n eles_list_1 = []\n eles_list_2 = []\n eles_list_3 = []\n eles_list_more = []\n \n for ele in the_set:\n #print(\"the\",ele)\n inchi_key = ele[0]\n num_ring = ele[1]\n try:\n num = rd.get_chembl_bb(inchi_key) \n except:\n try:\n num = rd.get_mole_num_baba(inchi_key) \n #num = rd.get_chembl_bb(inchi_key)\n except:\n print(inchi_key,\" does not get himself father\")\n continue\n #if source == \"ChEMBL_Drugs\":\n # pass\n #else:\n # continue\n if num_ring ==0:\n eles_list_0.append((inchi_key,num))\n elif num_ring ==1:\n eles_list_1.append((inchi_key,num))\n elif num_ring ==2:\n eles_list_2.append((inchi_key,num))\n elif num_ring ==3:\n eles_list_3.append((inchi_key,num))\n else:\n eles_list_more.append((inchi_key,num))\n \n return [eles_list_0, eles_list_1, eles_list_2, eles_list_3, eles_list_more] \n\n\ndef sort_plt(inchi_list,task='ele'): \n \n res = count_eles(inchi_list) \n name_1 = [[\"ele_chain_freq.csv\",\"ele_chain.jpg\"],[\"ele_single_freq.csv\",\"ele_single.jpg\"],\n [\"ele_double_freq.csv\",\"ele_double.jpg\"],[\"ele_triple_freq.csv\",\"ele_triple.jpg\"],\n [\"ele_more_freq.csv\",\"ele_more.jpg\"]] \n name_2 = [[\"sec_chain_freq.csv\",\"sec_chain.jpg\"],[\"sec_single_freq.csv\",\"sec_single.jpg\"],\n [\"sec_double_freq.csv\",\"sec_double.jpg\"],[\"sec_triple_freq.csv\",\"sec_triple.jpg\"],\n [\"sec_more_freq.csv\",\"sec_more.jpg\"]] \n \n title = \"elementary fragments vs themselves compounds number \"\n name = name_1\n if task == 'sec':\n name = name_2\n title = \"secondary fragments vs themselves compounds number \"\n #print('i am sec')\n for i in range(len(res)):\n res_i = res[i]\n if len(res_i)>0:\n pass\n else:\n continue\n file_name = name[i][0]\n pic_name = name[i][1]\n \n d_order = sorted(res_i,key=lambda x:x[1],reverse=True) \n x = [] \n y = [] \n \n f = open(file_name,\"w\") \n csv_w = csv.writer(f) \n for d_o in d_order: \n x.append(d_o[0]) \n y.append(d_o[1]) \n csv_w.writerow([d_o[0],d_o[1]]) \n f.close() \n \n title = title \n picture_name = pic_name \n #pp.plt_h_bar(x,y,title,picture_name) \n dict_count = {'<1':0,'1-2':0,\"2-3\":0,'3-4':0,'4-5':0,'5-6':0,\">6\":0}\n for y_i in y:\n if y_i < 10:\n dict_count['<1'] +=1\n elif y_i >= 10 and y_i <100:\n dict_count['1-2'] +=1\n elif y_i >= 100 and y_i <1000:\n dict_count['2-3'] +=1\n elif y_i >= 1000 and y_i <10000:\n dict_count['3-4'] +=1\n elif y_i >= 10000 and y_i <100000:\n dict_count['4-5'] +=1\n elif y_i >= 100000 and y_i <1000000:\n dict_count['5-6'] +=1\n else:\n dict_count['>6'] +=1\n \n x_1 = dict_count.keys()\n y_1 = dict_count.values()\n pp.plt_bar(x_1,y_1,title,picture_name)\n \ndef get_frag_heavy_atoms_num(frag_level):\n sum_n = 0\n nums = rd.get_mol_heavy_atom_number(frag_level) \n length = len(nums)\n if frag_level == 'sec':\n file_name = \"sec_frag_heavy_num.csv\"\n picture_name = 'sec_heavy_num.jpg'\n title = 'secondary_heavy_atomic_number'\n else:\n picture_name = 'ele_heavy_num.jpg'\n file_name = 'ele_frag_heavy_num.csv'\n title = 'elementary_heavy_atomic_number'\n f = open(file_name,'w')\n csv_w = csv.writer(f)\n dict_count = {'<10':0,'10-20':0,\"20-30\":0,'30-40':0,'40-50':0,'50-60':0,\">60\":0}\n for num in nums:\n sum_n += int(num[0])\n csv_w.writerow([int(num[0])])\n if int(num[0]) < 10:\n dict_count['<10'] +=1\n elif int(num[0]) >= 10 and int(num[0]) <20:\n dict_count['10-20'] +=1\n elif int(num[0]) >= 20 and int(num[0]) <30:\n dict_count['20-30'] +=1\n elif int(num[0]) >= 30 and int(num[0]) <40:\n dict_count['30-40'] +=1\n elif int(num[0]) >= 40 and int(num[0]) <50:\n dict_count['40-50'] +=1\n elif int(num[0]) >= 50 and int(num[0]) <60:\n dict_count['50-60'] +=1\n else:\n dict_count['>60'] +=1\n x_1 = dict_count.keys()\n y_1 = dict_count.values()\n pp.plt_bar_1(x_1,y_1,title,picture_name)\n \n print(\"number of fragments\",length)\n print('sum',sum_n)\n print('average',sum_n/length) \n \ndef get_calculated_molecules():\n pass\n \nif __name__ == \"__main__\":\n \n job_type = sys.argv[1]\n parameter = sys.argv[2]\n if job_type == 'freq':\n if parameter == 'ele':\n all_ele_inchi = rd.get_all_ele_frags_inchikey()\n sort_plt(all_ele_inchi,'ele') \n \n elif parameter == 'sec':\n all_sec_inchi = rd.get_all_sec_frags_inchikey()\n sort_plt(all_sec_inchi,'sec') \n else:\n print('you input an error parameter')\n sys.exit()\n \n elif job_type == 'atoms_num':\n if parameter == 'ele':\n get_frag_heavy_atoms_num(frag_level=\"ele\")\n elif parameter == 'sec':\n get_frag_heavy_atoms_num(frag_level=\"sec\")\n else:\n print('you input an error parameter')\n sys.exit()\n else:\n print('you input an error job_type')\n sys.exit() \n \n \n \n \n\n\n\n\n\n\n\n\n\n","sub_path":"count_fragments_frequences/count_ele_sec_freq.py","file_name":"count_ele_sec_freq.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413501614","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 9 10:55:36 2019\n\n@author: chengyu\n\"\"\"\nimport requests\nimport json\n\ndef get_intent(message_data, url = 'http://13.124.42.223:9999/api/chat/',\n access_token=None):\n \"\"\" Makes use of Send API:\n https://developers.facebook.com/docs/messenger-platform/send-api-reference\n \"\"\"\n headers = {\n 'Content-Type': 'application/json',\n }\n# params = {\n# 'access_token': access_token,\n# }\n \n payload= {\n \"fromUser\": {\n \"id\": \"string\",\n \"name\": \"string\"\n },\n \"toUser\": {\n \"id\": \"string\",\n \"name\": \"string\"\n },\n \"conversation\": {\n \"history\": [\n {\n \"text\": {\n \"data\": \"string\",\n \"isUser\": True,\n \"timestamp\": \"string\"\n }\n }\n ]\n },\n \"current\": message_data,\n \"mode\": \"string\",\n \"sessionId\": \"string\",\n \"messageType\": \"string\",\n \"event\": \"string\"\n }\n \n \n url = url\n response = requests.post(url, headers=headers, params=None,\n data=json.dumps(payload))\n response.raise_for_status()\n return response.json() \n\n#%%\n\nif __name__ == '__main__':\n print(get_intent(\"你是笨蛋么\"))","sub_path":"src/libs/request_util.py","file_name":"request_util.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290925515","text":"from django.conf.urls import url\n\nfrom . import views\nfrom .admin import BrandAdmin\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^little_parse', views.little_parse, name='little_parse'),\n url(r'^parse', views.parse, name='parse'),\n url(r'^brand_exp', views.brand_exp, name='brand_exp'),\n url(r'^product_exp', views.product_exp, name='product_exp'),\n url(r'^attribute_exp', views.attribute_exp, name='attribute_exp'),\n # url(r'^csv_exp_snippet', views.csv_exp_snippet, name='csv_exp_snippet'),\n url(r'^export_csv_brand', BrandAdmin.export_csv_brand, name='export_csv_brand'),\n]\n","sub_path":"petshop/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638731682","text":"\"\"\"Simple helper to paginate query\n\"\"\"\nfrom flask import url_for, request\nfrom flask_restful import inputs\n\n\nDEFAULT_PAGE_SIZE = 50\nDEFAULT_PAGE_NUMBER = 1\n\n\ndef paginate(query, schema):\n # HACK: change 'tenant' back to slug\n if request.view_args.get('tenant'):\n request.view_args['tenant'] = request.view_args.get('tenant').slug\n paginate = request.args.get('paginate', default=True, type=inputs.boolean)\n if not paginate:\n return schema.dump(query).data\n page = request.args.get('page', default=DEFAULT_PAGE_NUMBER, type=int)\n per_page = request.args.get('per_page', default=DEFAULT_PAGE_SIZE, type=int)\n page_obj = query.paginate(page=page, per_page=per_page)\n link_next = url_for(\n request.endpoint,\n page=page_obj.next_num if page_obj.has_next else page_obj.page,\n per_page=per_page,\n _external=True,\n **request.view_args\n )\n link_prev = url_for(\n request.endpoint,\n page=page_obj.prev_num if page_obj.has_prev else page_obj.page,\n per_page=per_page,\n _external=True,\n **request.view_args\n )\n\n return {\n 'total': page_obj.total,\n 'pages': page_obj.pages,\n 'next': link_next,\n 'prev': link_prev,\n 'results': schema.dump(page_obj.items).data\n }\n","sub_path":"amodys/commons/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575569445","text":"print (\"Scraping COSTCO\")\n# 如果先前沒安裝過requests 則這邊會失敗, 因此需要先安裝, 可用這個指令: pip install requests\nimport requests \n \n# 指定要抓取的網頁URL\nurl = \"https://www.costco.com.tw/Appliances/Cooling-Heating-Dehumidifiers/c/306\"\nurl = \"https://www.costco.com.tw/Electronics/Apple-Devices/iPhone/c/10704\"\n \n# 使用requests.get() 來得到網頁回傳內容\nr = requests.get(url)\n \n# request.get()回傳的是一個物件 \n# 若抓成功, 則網頁原始碼會放在物件的text屬性, 我們把它存在一個變數 'web_content'\nweb_content = r.text\n \n#print(web_content) 可以印出來看看, 會跟從網頁右鍵查看原始碼看到的一樣\n# 載入BeautifulSoup套件, 若沒有的話可以先: pip install beautifulsoup4\nfrom bs4 import BeautifulSoup\n\n# 以 Beautiful Soup 解析 HTML 程式碼 : \nsoup = BeautifulSoup(web_content, 'lxml')\n\n# 找出所有class為\"board-name\"的div elements\nlisterNameElements = soup.find_all('div', class_=\"product-name-container\")\nlisterNameElements\n\nlisterNames = [e.text for e in listerNameElements]\nlisterNames\n# 觀察網頁原始碼後看到\n# 雖然<div class=\"board-nuser\">裡面還有用<span>夾住我們想要的資料(人氣值)\n# 不過我們會用.text 直接取出所包含的文字部分即可 \npriceElements = soup.find_all('div', class_=\"product-price\")\n# 取出的文字的類型是字串, 我們可用int()轉成數字類型\nprice = [ e.text for e in priceElements]\nprice\nprint(len(listerNames), len(price))\n# 128 128\n \nfor bn, popu in zip(listerNames, price):\n print(popu, bn)\n","sub_path":"costco.py","file_name":"costco.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214312463","text":"from selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom PIL import Image\nimport csv\n\n# 初始化\n# 防止打印一些无用的日志\noption = webdriver.ChromeOptions()\noption.add_experimental_option(\"excludeSwitches\", ['enable-automation', 'enable-logging'])\nweb = Chrome(options=option)\n# 设置等待超时\nwait = WebDriverWait(web, 20)\n\n\n# 登录\ndef login():\n web.get(url)\n web.maximize_window() # 窗口最大化\n time.sleep(2)\n # 登录\n web.find_element(By.ID, 'loginname').send_keys('账号')\n web.find_element(By.ID, 'password').send_keys('密码')\n web.find_element(By.ID, 'isread_em').click()\n web.find_element(By.ID, 'login_btn_withPwd').click()\n time.sleep(2)\n\n\n# 对某元素截图\ndef save_pic(obj, name):\n try:\n pic_url = web.save_screenshot('.\\\\51job.png')\n print(\"%s:截图成功!\" % pic_url)\n\n # 获取元素位置信息\n left = obj.location['x'] * 1.25 # 自己通过原图与实际图片对比得出的系数\n top = obj.location['y'] * 1.25\n right = left + obj.size['width'] * 1.25\n bottom = top + obj.size['height'] * 1.25\n\n print('图:' + name)\n print('Left %s' % left)\n print('Top %s' % top)\n print('Right %s' % right)\n print('Bottom %s' % bottom)\n print('')\n\n im = Image.open('.\\\\51job.png')\n im = im.crop((left, top, right, bottom)) # 元素裁剪\n file_name = '51job_' + name + '.png'\n im.save(file_name) # 元素截图\n except BaseException as msg:\n print(\"%s:截图失败!\" % msg)\n\n\n# 设置元素可见\ndef show_element(element):\n web.execute_script(\"arguments[0].style=arguments[1]\", element, \"display: block;\")\n\n\n# 设置元素不可见\ndef hide_element(element):\n web.execute_script(\"arguments[0].style=arguments[1]\", element, \"display: none;\")\n\n\ndef cut():\n c_background = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, 'canvas.geetest_canvas_bg.geetest_absolute')))\n c_slice = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, 'canvas.geetest_canvas_slice.geetest_absolute')))\n c_full_bg = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, 'canvas.geetest_canvas_fullbg.geetest_fade.geetest_absolute')))\n hide_element(c_slice)\n save_pic(c_background, 'back') # 隐藏滑块\n show_element(c_slice)\n save_pic(c_slice, 'slice') # 所有的\n show_element(c_full_bg)\n save_pic(c_full_bg, 'full') # 隐藏所有的\n\n\n# 判断像素是否相同\ndef is_pixel_equal(bg_image, fullbg_image, x, y):\n \"\"\"\n :param bg_image: (Image)缺口图片\n :param fullbg_image: (Image)完整图片\n :param x: (Int)位置x\n :param y: (Int)位置y\n :return: (Boolean)像素是否相同\n \"\"\"\n # 获取缺口图片的像素点(按照RGB格式)\n bg_pixel = bg_image.load()[x, y]\n # 获取完整图片的像素点(按照RGB格式)\n fullbg_pixel = fullbg_image.load()[x, y]\n # 设置一个判定值,像素值之差超过判定值则认为该像素不相同\n threshold = 20\n # 判断像素的各个颜色之差,abs()用于取绝对值\n if (abs(bg_pixel[0] - fullbg_pixel[0] < threshold) and abs(bg_pixel[1] - fullbg_pixel[1] < threshold) and abs(\n bg_pixel[2] - fullbg_pixel[2] < threshold)):\n # 如果差值在判断值之内,返回是相同像素\n return True\n else:\n # 如果差值在判断值之外,返回不是相同像素\n return False\n\n\n# 计算滑块移动距离\ndef get_distance(bg_image, fullbg_image):\n '''\n :param bg_image: (Image)缺口图片\n :param fullbg_image: (Image)完整图片\n :return: (Int)缺口离滑块的距离\n '''\n # 滑块的初始位置\n distance = 60\n # 遍历像素点横坐标\n for i in range(distance, fullbg_image.size[0]):\n # 遍历像素点纵坐标\n for j in range(fullbg_image.size[1]):\n # 如果不是相同像素\n if not is_pixel_equal(fullbg_image, bg_image, i, j):\n # 返回此时横轴坐标就是滑块需要移动的距离\n return i\n\n\n# 破解滑块验证\ndef slide():\n distance = get_distance(Image.open('.\\\\51job_back.png'), Image.open('.\\\\51job_full.png')) / 1.25 # 要将原图与实际图对比的系数除掉\n try:\n slider = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.geetest_slider_button'))) # 找到滑块\n if slider:\n print(\"====有滑块验证=====\")\n action_chains = webdriver.ActionChains(web)\n # 点击,准备拖拽\n action_chains.click_and_hold(slider)\n action_chains.pause(0.2)\n action_chains.move_by_offset(distance - 10, 0)\n action_chains.pause(0.6)\n action_chains.move_by_offset(10, 0) # 添加修正过程\n action_chains.pause(0.6)\n action_chains.release()\n action_chains.perform() # 释放滑块\n time.sleep(5)\n else:\n print(\"===没有滑块验证===\")\n except Exception as e:\n print(\"===\" + str(e))\n\n\n# 取的页面职位信息\ndef get_data(page_nums):\n lists = web.find_element(By.CLASS_NAME, 'j_joblist').find_elements(By.CLASS_NAME, 'e')\n with open('data_job.csv', mode='a', encoding='utf-8', newline='') as f:\n csv_w = csv.writer(f)\n for list in lists:\n job_name = list.find_element(By.CLASS_NAME, 'jname').text\n com_name = list.find_element(By.CLASS_NAME, 'cname').text\n sal_val = list.find_element(By.CLASS_NAME, 'sal').text\n din = list.find_element(By.CLASS_NAME, 'd').text\n csv_w.writerow([job_name, com_name, sal_val, din])\n print(page_nums, 'over!!')\n\n\n# 搜索框输入python开始搜索前max_page页面\ndef search(input_str, max_page):\n web.find_element(By.ID, 'kwdselectid').send_keys(input_str)\n web.find_element(By.XPATH, '/html/body/div[3]/div/div[1]/div/button').click()\n time.sleep(0.5)\n page_nums = 1\n while page_nums <= max_page:\n get_data(page_nums)\n page_nums += 1\n web.find_element(By.CLASS_NAME, 'p_in').click()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n url = 'https://login.51job.com/login.php?loginway=0&isjump=0&lang=c&from_domain=i&url=http%3A%2F%2Fwww.51job.com%2F'\n login()\n cut()\n slide()\n search('python', 10)\n web.close()","sub_path":"tools/spiders/51job_spiders/slide.py","file_name":"slide.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442440076","text":"# Copyright 2019-2020 Wingify Software Pvt. Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Validate methods and parameters passed to the SDK \"\"\"\n\nimport sys\nimport json\nimport jsonschema\nfrom ..schemas.settings_file_schema import SETTINGS_FILE_SCHEMA\n\nservices = {\n 'logger': ['log'],\n 'event_dispatcher': ['dispatch'],\n 'user_storage': ['get', 'set']\n}\n\n\ndef is_valid_settings_file(settings_file):\n \"\"\" Validates the settings_file\n\n Args:\n settings_file (json):\n JSON object received from our server or somewhere else,\n must be json string representation.\n\n Returns:\n bool: Whether the settings file is valid or not\n \"\"\"\n try:\n settings_file = json.loads(settings_file)\n except Exception:\n return False\n try:\n jsonschema.validate(\n instance=settings_file,\n schema=SETTINGS_FILE_SCHEMA\n )\n except Exception:\n return False\n return True\n\n\ndef is_valid_service(service, service_name):\n \"\"\" Checks whether the service passed by the user\n contains the necessary methods or not\n\n Args:\n service (classobj): User defined class instance\n service_name (string): Name of the service\n\n Returns:\n bool: Whether the class instance provided is valid or not\n \"\"\"\n service_attributes = services.get(service_name)\n if not service_attributes:\n return False\n for attr in service_attributes:\n if getattr(service, attr, None) is None:\n return False\n return True\n\n\ndef is_valid_log_level(level):\n string_levels = [\n 'CRITICAL',\n 'FATAL',\n 'ERROR',\n 'WARN',\n 'WARNING',\n 'INFO',\n 'DEBUG',\n 'NOTSET',\n ]\n if isinstance(level, str) and level not in string_levels:\n return False\n else:\n return is_valid_number(level)\n\n\ndef is_valid_dict(val):\n return type(val) is dict\n\n\ndef is_valid_value(val):\n return val is not None and bool(val)\n\n\ndef is_valid_non_zero_number(val):\n return type(val) == int and is_valid_value(val)\n\n\ndef is_valid_number(val):\n return type(val) == int\n\n\ndef is_valid_unicode(val):\n if sys.version_info[0] < 3:\n return type(val) is unicode\n return False\n\n\ndef is_valid_string(val):\n return (type(val) == str or is_valid_unicode(val)) and is_valid_value(val)\n\n\ndef is_valid_basis_data_type(val):\n return type(val) in [int, float, bool, str]\n","sub_path":"vwo/helpers/validate_util.py","file_name":"validate_util.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611063313","text":"from js9 import j\nfrom .Doc import Doc\nfrom .DocWatchdog import DocWatchdog\nJSBASE = j.application.jsbase_get_class()\n\nimport copy\n\nimport imp\nimport sys\n\ndef loadmodule(name, path):\n parentname = \".\".join(name.split(\".\")[:-1])\n sys.modules[parentname] = __package__\n mod = imp.load_source(name, path)\n return mod\n\n\n\nclass DocSite(JSBASE):\n \"\"\"\n \"\"\"\n\n def __init__(self, path,name=\"\"):\n JSBASE.__init__(self)\n\n self.docgen = j.tools.docgenerator\n\n #init initial arguments\n\n config_path = j.sal.fs.joinPaths(path,\"docs_config.toml\")\n config_path2 = j.sal.fs.joinPaths(path,\"docs/docs_config.toml\")\n if not j.sal.fs.exists(config_path) and j.sal.fs.exists(config_path2):\n config_path=config_path2\n path = j.sal.fs.joinPaths(path,\"docs\")\n if j.sal.fs.exists(config_path):\n self.config = j.data.serializer.toml.load(config_path)\n else:\n raise RuntimeError(\"cannot find docs_config in %s\"%config_path)\n\n self.path = path\n if not j.sal.fs.exists(path):\n raise RuntimeError(\"Cannot find path:%s\"%path)\n\n\n if not name:\n if \"name\" not in self.config: \n self.name = j.sal.fs.getBaseName(self.path.rstrip(\"/\")).lower()\n else:\n self.name = self.config[\"name\"].lower()\n else:\n self.name = name.lower()\n\n self.name = j.data.text.strip_to_ascii_dense(self.name)\n\n self.defs = {}\n self.content_default = {} # key is relative path in docsite where default content found\n\n # need to empty again, because was used in config\n self.data_default = {} # key is relative path in docsite where default content found\n\n self.docs = {}\n self.htmlpages = {}\n self.others = {}\n self.files = {}\n self.sidebars = {}\n \n\n \n # check if there are dependencies\n if 'docs' in self.config:\n for item in self.config['docs']:\n if \"name\" not in item or \"url\" not in item:\n raise RuntimeError(\"config docs item:%s not well defined in %s\"%(item,self))\n name = item[\"name\"].strip().lower()\n url = item[\"url\"].strip()\n path = j.clients.git.getContentPathFromURLorPath(url)\n j.tools.docgenerator.load(path,name=name)\n\n self.logger_enable()\n self.logger.level=1\n\n self._git=None\n self._loaded = False\n\n self.logger.info(\"loaded:%s\"%self)\n \n\n @property\n def git(self):\n if self._git is None:\n gitpath = j.clients.git.findGitPath(self.path,die=False)\n if not gitpath:\n return\n if gitpath not in self.docgen._git_repos:\n self._git = j.tools.docgenerator._git_get(gitpath)\n self.docgen._git_repos[gitpath] = self.git \n return self._git \n\n @property\n def urls(self):\n urls = [item for item in self.docs.keys()]\n urls.sort()\n return urls\n\n def _processData(self, path):\n ext = j.sal.fs.getFileExtension(path).lower()\n if ext == \"\":\n # try yaml & toml\n self._processData(path + \".toml\")\n self._processData(path + \".yaml\")\n return\n\n if not j.sal.fs.exists(path):\n return {}\n\n if ext == \"toml\":\n data = j.data.serializer.toml.load(path)\n elif ext == \"yaml\":\n data = j.data.serializer.yaml.load(path)\n else:\n raise j.exceptions.Input(message=\"only toml & yaml supported\", level=1, source=\"\", tags=\"\", msgpub=\"\")\n\n if not j.data.types.dict.check(data):\n raise j.exceptions.Input(message=\"cannot process toml/yaml on path:%s, needs to be dict.\" %\n path, level=1, source=\"\", tags=\"\", msgpub=\"\")\n\n # dont know why we do this? something todo probably with mustache and dots?\n keys = [str(key) for key in data.keys()]\n for key in keys:\n if key.find(\".\") != -1:\n data[key.replace(\".\", \"_\")] = data[key]\n data.pop(key)\n\n fulldirpath = j.sal.fs.getDirName(path)\n rdirpath = j.sal.fs.pathRemoveDirPart(fulldirpath, self.path)\n rdirpath = rdirpath.strip(\"/\").strip().strip(\"/\")\n self.data_default[rdirpath] = data\n\n def load(self):\n \"\"\"\n walk in right order over all files which we want to potentially use (include)\n and remember their paths\n\n if duplicate only the first found will be used\n\n \"\"\"\n if self._loaded:\n return\n\n j.sal.fs.remove(self.path + \"errors.md\")\n path = self.path\n if not j.sal.fs.exists(path=path):\n raise j.exceptions.NotFound(\"Cannot find source path in load:'%s'\" % path)\n\n\n def clean(name):\n return j.data.text.strip_to_ascii_dense(name)\n\n def callbackForMatchDir(path, arg):\n base = j.sal.fs.getBaseName(path).lower()\n if base.startswith(\".\"):\n return False\n if base.startswith(\"_\"):\n return False\n return True\n\n def callbackForMatchFile(path, arg):\n base = j.sal.fs.getBaseName(path).lower()\n if base == \"_sidebar.md\":\n return True\n if base.startswith(\"_\"):\n return False\n ext = j.sal.fs.getFileExtension(path)\n if ext == \"md\" and base[:-3] in [\"summary\", \"default\"]:\n return False\n return True\n\n def callbackFunctionDir(path, arg):\n # will see if ther eis data.toml or data.yaml & load in data structure in this obj\n self._processData(path + \"/data\")\n dpath = path + \"/default.md\"\n if j.sal.fs.exists(dpath, followlinks=True):\n C = j.sal.fs.fileGetContents(dpath)\n rdirpath = j.sal.fs.pathRemoveDirPart(path, self.path)\n rdirpath = rdirpath.strip(\"/\").strip().strip(\"/\")\n self.content_default[rdirpath] = C\n return True\n\n def callbackFunctionFile(path, arg):\n self.logger.debug(\"file:%s\"%path)\n ext = j.sal.fs.getFileExtension(path).lower()\n base = j.sal.fs.getBaseName(path)\n if ext == \"md\":\n self.logger.debug(\"found md:%s\"%path)\n base = base[:-3] # remove extension\n doc = Doc(path, base, docsite=self)\n # if base not in self.docs:\n # self.docs[base.lower()] = doc\n self.docs[doc.name_dot_lower] = doc\n elif ext in [\"html\",\"htm\"]:\n self.logger.debug(\"found html:%s\"%path)\n raise RuntimeError()\n # l = len(ext)+1\n # base = base[:-l] # remove extension\n # doc = HtmlPage(path, base, docsite=self)\n # # if base not in self.htmlpages:\n # # self.htmlpages[base.lower()] = doc\n # self.htmlpages[doc.name_dot_lower] = doc\n else:\n \n if ext in [\"png\", \"jpg\", \"jpeg\", \"pdf\", \"docx\", \"doc\", \"xlsx\", \"xls\", \\\n \"ppt\", \"pptx\", \"mp4\",\"css\",\"js\"]:\n self.logger.debug(\"found file:%s\"%path)\n if base in self.files:\n raise j.exceptions.Input(message=\"duplication file in %s,%s\" %\n (self, path), level=1, source=\"\", tags=\"\", msgpub=\"\")\n self.files[base.lower()] = path\n # else:\n # self.logger.debug(\"found other:%s\"%path)\n # l = len(ext)+1\n # base = base[:-l] # remove extension\n # doc = DocBase(path, base, docsite=self)\n # if base not in self.others:\n # self.others[base.lower()] = doc\n # self.others[doc.name_dot_lower] = doc\n \n\n callbackFunctionDir(self.path, \"\") # to make sure we use first data.yaml in root\n\n j.sal.fswalker.walkFunctional(\n self.path,\n callbackFunctionFile=callbackFunctionFile,\n callbackFunctionDir=callbackFunctionDir,\n arg=\"\",\n callbackForMatchDir=callbackForMatchDir,\n callbackForMatchFile=callbackForMatchFile)\n\n self._loaded=True\n\n # def file_add(self, path):\n # if not j.sal.fs.exists(path, followlinks=True):\n # raise j.exceptions.Input(message=\"Cannot find path:%s\" % path, level=1, source=\"\", tags=\"\", msgpub=\"\")\n # base = j.sal.fs.getBaseName(path).lower()\n # self.files[base] = path\n\n # def files_copy(self, destination=None):\n # if not destination:\n # if self.hugo:\n # destination = \"static/files\"\n # else:\n # destination = \"files\"\n # dpath = j.sal.fs.joinPaths(self.outpath, destination)\n # j.sal.fs.createDir(dpath)\n # for name, path in self.files.items():\n # j.sal.fs.copyFile(path, j.sal.fs.joinPaths(dpath, name))\n\n # def process(self):\n # for key, doc in self.docs.items():\n # doc.process()\n # self._processed = True\n\n def error_raise(self, errormsg, doc=None):\n if doc is not None:\n errormsg2 = \"## ERROR: %s\\n\\n- in doc: %s\\n\\n%s\\n\" % (j.data.time.getLocalTimeHR(), doc, errormsg)\n j.sal.fs.writeFile(filename=self.path + \"errors.md\", contents=errormsg2, append=True)\n self.logger.error(errormsg2)\n doc.errors.append(errormsg)\n else:\n from IPython import embed\n self.logger.error(\"DEBUG NOW raise error\")\n embed()\n raise RuntimeError(\"stop debug here\")\n\n\n def file_get(self, name, die=True):\n self.load()\n \n for key, val in self.files.items():\n if key.lower() == name.lower():\n return val\n if die:\n raise j.exceptions.Input(message=\"Did not find file:%s in %s\" %\n (name, self), level=1, source=\"\", tags=\"\", msgpub=\"\")\n return None\n\n def doc_get(self, name, cat=\"\", die=True):\n \n import pudb; pudb.set_trace()\n \n self.load()\n\n if j.data.types.list.check(name):\n name = \"/\".join(name)\n\n name = name.strip(\"/\")\n name = name.lower() \n\n if name.endswith(\".md\"):\n name=name[:-3] #remove .md\n \n if \"/\" in name:\n name = name.replace(\"/\",\".\")\n\n name = name.strip(\".\") #lets make sure its clean again\n\n\n #let caching work\n if name in self.docs:\n if self.docs[name] is None and die:\n raise j.exceptions.Input(message=\"Cannot find doc with name:%s\" % name, level=1, source=\"\", tags=\"\", msgpub=\"\") \n return self.docs[name]\n\n #build candidates to search\n candidates = [name]\n if name.endswith(\"readme\"):\n candidates.append(name[:-6]+\"index\")\n else:\n candidates.append(name+\".readme\")\n\n if name.endswith(\"index\"): \n nr,res = self._doc_get(name[:-5]+\"readme\",cat=cat)\n if nr==1:\n return 1,res\n name = name[:-6]\n else:\n candidates.append(name+\".index\")\n\n #look for $fulldirname.$dirname as name of doc\n if \".\" in name: \n name0 = name+\".\"+name.split(\".\")[-1]\n candidates.append(name0)\n\n for cand in candidates:\n nr,res = self._doc_get(cand,cat=cat)\n if nr == 1:\n self.docs[name] = res #remember for caching\n return self.docs[name]\n\n if die:\n raise j.exceptions.Input(message=\"Cannot find doc with name:%s\" % name, level=1, source=\"\", tags=\"\", msgpub=\"\")\n else:\n return None\n\n def _doc_get(self, name, cat=\"\"):\n \n if name in self.docs:\n if cat is \"\":\n return 1, self.docs[name]\n else:\n if self.docs[name] == cat:\n return 1, self.docs[name]\n\n else:\n \n res = []\n for key,item in self.docs.items():\n if name in item.name_dot_lower:\n res.append(key)\n if len(res)>0:\n return len(res),self.docs[res[0]]\n else:\n return 0,None \n\n def sidebar_get(self, url):\n \"\"\"\n will calculate the sidebar, if not in url will return None\n \"\"\"\n self.load() \n if j.data.types.list.check(url):\n url = \"/\".join(url)\n self.logger.debug(\"sidebar_get:%s\"%url) \n if url in self.sidebars:\n return self.sidebars[url]\n\n url_original = copy.copy(url)\n url = url.strip(\"/\")\n url = url.lower()\n\n if url.endswith(\".md\"):\n url = url[:-3]\n\n url = url.replace(\"/\",\".\")\n url = url.strip(\".\")\n\n if url == \"\":\n self.sidebars[url_original]=None\n return None\n\n if \"_sidebar\" not in url:\n self.sidebars[url_original]=None\n return None #did not find sidebar just return None\n\n\n if url in self.docs:\n self.sidebars[url_original] = self._sidebar_process(self.docs[url].content,url_original=url_original)\n return self.sidebars[url_original] \n \n #did not find the usual location, lets see if we can find the doc allone\n url0=url.replace(\"_sidebar\",\"\").strip().strip(\".\").strip()\n if \".\" in url0: #means we can \n name=url0.split(\".\")[-1]\n doc=self.doc_get(name,die=False)\n if doc:\n #we found the doc, so can return the right sidebar\n possiblepath = doc.path_dir_rel.replace(\"/\",\".\").strip(\".\")+\"._sidebar\"\n if not possiblepath == url:\n return self.get(possiblepath)\n\n #lets look at parent\n \n if url0==\"\":\n raise RuntimeError(\"cannot be empty\")\n \n newurl = \".\".join(url0.split(\".\")[:-1])+\"._sidebar\"\n return self.sidebar_get(newurl)\n \n self.sidebars[url_original] = self._sidebar_process(self.docs[url].content,url_original=url_original)\n return self.sidebars[url_original]\n\n def _sidebar_process(self,c,url_original):\n \n def clean(c):\n out= \"\"\n state = \"start\"\n for line in c.split(\"\\n\"):\n lines = line.strip()\n if lines.startswith(\"*\"):\n lines=lines[1:]\n if lines.startswith(\"-\"):\n lines=lines[1:]\n if lines.startswith(\"+\"):\n lines=lines[1:]\n lines = lines.strip()\n if lines == \"\":\n continue\n if line.find(\"(/)\") is not -1:\n continue\n if line.find(\"---\") is not -1:\n if state == \"start\":\n continue\n state=\"next\"\n out+=line+\"\\n\"\n return out\n\n c=clean(c)\n\n out= \"* [Home](/)\\n\"\n \n for line in c.split(\"\\n\"):\n if line.strip()==\"\":\n out+=\"\\n\\n\"\n continue\n \n if \"(\" in line and \")\" in line:\n url = line.split(\"(\",1)[1].split(\")\")[0]\n else:\n url = \"\"\n if \"[\" in line and \"]\" in line:\n descr = line.split(\"[\",1)[1].split(\"]\")[0]\n pre = line.split(\"[\")[0]\n pre = pre.replace(\"* \",\"\").replace(\"- \",\"\")\n else:\n descr = line\n pre = \"<<\"\n\n if url:\n doc = self.doc_get(url,die=False)\n if doc is None:\n out+=\"%s* NOTFOUND:%s\"%(pre,url) \n else:\n out+=\"%s* [%s](/%s)\\n\"%(pre,descr,doc.name_dot_lower.replace(\".\",\"/\"))\n\n else: \n if not pre:\n pre = \" \"\n if pre is not \"<<\":\n out+=\"%s* %s\\n\"%(pre,descr)\n else:\n out+=\"%s\\n\"%(descr)\n\n res = self.doc_get(\"_sidebar_parent\",False)\n if res:\n out+=res.content \n else:\n out+=\"----\\n\\n\"\n for key,val in j.tools.docgenerator.docsites.items():\n if key.startswith(\"www\"):\n continue\n out+=\"[%s](../%s/)\\n\"%(key,key)\n\n return out\n\n def __repr__(self):\n return \"docsite:%s\" % ( self.path)\n\n __str__ = __repr__\n\n # def write(self):\n # if self._config:\n # j.sal.fs.removeDirTree(self.outpath)\n # dest = j.sal.fs.joinPaths(self.outpath, \"content\")\n # j.sal.fs.createDir(dest)\n # # source = self.path\n # # j.do.copyTree(source, dest, overwriteFiles=True, ignoredir=['.*'], ignorefiles=[\n # # \"*.md\", \"*.toml\", \"_*\", \"*.yaml\", \".*\"], rsync=True, recursive=True, rsyncdelete=False)\n\n # for key, doc in self.docs.items():\n # doc.process()\n\n # # find the defs, also process the aliases\n # for key, doc in self.docs.items():\n # if \"tags\" in doc.data:\n # tags = doc.data[\"tags\"]\n # if \"def\" in tags:\n # name = doc.name.lower().replace(\"_\", \"\").replace(\"-\", \"\").replace(\" \", \"\")\n # self.defs[name] = doc\n # if \"alias\" in doc.data:\n # for alias in doc.data[\"alias\"]:\n # name = alias.lower().replace(\"_\", \"\").replace(\"-\", \"\").replace(\" \", \"\")\n # self.defs[name] = doc\n\n # for key, doc in self.docs.items():\n # # doc.defs_process()\n # doc.write()\n\n # self.generator.generate(self)\n\n # if j.sal.fs.exists(j.sal.fs.joinPaths(self.path, \"static\"), followlinks=True):\n # j.sal.fs.copyDirTree(j.sal.fs.joinPaths(self.path, \"static\"), j.sal.fs.joinPaths(self.outpath, \"public\"))\n # else:\n # self.logger.info(\"no need to write:%s\"%self.path) ","sub_path":"JumpScale9Lib/tools/docgenerator/DocSite.py","file_name":"DocSite.py","file_ext":"py","file_size_in_byte":18759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55932245","text":"from sklearn.datasets import load_iris\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score,confusion_matrix\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nwart = pd.read_csv(\"/home/ai3/Desktop/common/ML/Day2/Questions/Immunotherapy.csv\")\nwarts = wart.as_matrix()\n\nX = warts[:,0:6]\ny = warts[:,7]\n\n\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)\n\n\nscores = []\nfor i in range(1,25):\n\tknn = KNeighborsClassifier (n_neighbors = i)\n\tknn.fit(X_train,y_train)\n\tp = knn.predict(X_test)\n\t\n\tconfusion_matrix(y_test,p)\n\ta = accuracy_score(y_test,p)\n\tscores.append(a)\n#print score\nplt.plot(range(1,25),scores)\nplt.show()","sub_path":"ML ans/day2/ml3.py","file_name":"ml3.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"3159745","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMIT License\n\nCopyright (c) 2016 Arnaud Aliès\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport sys\nimport re\n\nif (sys.version_info[0] < 3):\n import urllib2\n import urllib\n import HTMLParser\nelse:\n import html.parser\n import urllib.request\n import urllib.parse\n\nagent = {'User-Agent':\n\"Mozilla/4.0 (\\\ncompatible;\\\nMSIE 6.0;\\\nWindows NT 5.1;\\\nSV1;\\\n.NET CLR 1.1.4322;\\\n.NET CLR 2.0.50727;\\\n.NET CLR 3.0.04506.30\\\n)\"}\n\ngoogle_languages = \\\n{\"Afrikaans\":\"af\",\n\"Albanian\":\"sq\",\n\"Amharic\":\"am\",\n\"Arabic\":\"ar\",\n\"Armenian\":\"hy\",\n\"Azeerbaijani\":\"az\",\n\"Basque\":\"eu\",\n\"Belarusian\":\"be\",\n\"Bengali\":\"bn\",\n\"Bosnian\":\"bs\",\n\"Bulgarian\":\"bg\",\n\"Catalan\":\"ca\",\n\"Cebuano\":\"ceb\",\n\"Simplified_Chinese\":\"zh-CN\",\n\"Chinese\":\"zh-CN\",\n\"Traditional_Chinese\":\"zh-TW\",\n\"Corsican\":\"co\",\n\"Croatian\":\"hr\",\n\"Czech\":\"cs\",\n\"Danish\":\"da\",\n\"Dutch\":\"nl\",\n\"English\":\"en\",\n\"Esperanto\":\"eo\",\n\"Estonian\":\"et\",\n\"Finnish\":\"fi\",\n\"French\":\"fr\",\n\"Frisian\":\"fy\",\n\"Galician\":\"gl\",\n\"Georgian\":\"ka\",\n\"German\":\"de\",\n\"Greek\":\"el\",\n\"Gujarati\":\"gu\",\n\"Haitian Creole\":\"ht\",\n\"Hausa\":\"ha\",\n\"Hawaiian\":\"haw\",\n\"Hebrew\":\"iw\",\n\"Hindi\":\"hi\",\n\"Hmong\":\"hmn\",\n\"Hungarian\":\"hu\",\n\"Icelandic\":\"is\",\n\"Igbo\":\"ig\",\n\"Indonesian\":\"id\",\n\"Irish\":\"ga\",\n\"Italian\":\"it\",\n\"Japanese\":\"ja\",\n\"Javanese\":\"jw\",\n\"Kannada\":\"kn\",\n\"Kazakh\":\"kk\",\n\"Khmer\":\"km\",\n\"Korean\":\"ko\",\n\"Kurdish\":\"ku\",\n\"Kyrgyz\":\"ky\",\n\"Lao\":\"lo\",\n\"Latin\":\"la\",\n\"Latvian\":\"lv\",\n\"Lithuanian\":\"lt\",\n\"Luxembourgish\":\"lb\",\n\"Macedonian\":\"mk\",\n\"Malagasy\":\"mg\",\n\"Malay\":\"ms\",\n\"Malayalam\":\"ml\",\n\"Maltese\":\"mt\",\n\"Maori\":\"mi\",\n\"Marathi\":\"mr\",\n\"Mongolian\":\"mn\",\n\"Myanmar\":\"my\",\n\"Burmese\":\"my\",\n\"Nepali\":\"ne\",\n\"Norwegian\":\"no\",\n\"Nyanja\":\"ny\",\n\"Chichewa\":\"ny\",\n\"Pashto\":\"ps\",\n\"Persian\":\"fa\",\n\"Polish\":\"pl\",\n\"Portuguese\":\"pt\",\n\"Portugese\":\"pt\",\n\"Punjabi\":\"pa\",\n\"Romanian\":\"ro\",\n\"Russian\":\"ru\",\n\"Samoan\":\"sm\",\n\"Scots Gaelic\":\"gd\",\n\"Serbian\":\"sr\",\n\"Sesotho\":\"st\",\n\"Shona\":\"sn\",\n\"Sindhi\":\"sd\",\n\"Sinhala (Sinhalese)\":\"si\",\n\"Slovak\":\"sk\",\n\"Slovenian\":\"sl\",\n\"Somali\":\"so\",\n\"Spanish\":\"es\",\n\"Sundanese\":\"su\",\n\"Swahili\":\"sw\",\n\"Swedish\":\"sv\",\n\"Tagalog (Filipino)\":\"tl\",\n\"Tajik\":\"tg\",\n\"Tamil\":\"ta\",\n\"Telugu\":\"te\",\n\"Thai\":\"th\",\n\"Turkish\":\"tr\",\n\"Ukrainian\":\"uk\",\n\"Urdu\":\"ur\",\n\"Uzbek\":\"uz\",\n\"Vietnamese\":\"vi\",\n\"Welsh\":\"cy\",\n\"Xhosa\":\"xh\",\n\"Yiddish\":\"yi\",\n\"Yoruba\":\"yo\",\n\"Zulu\":\"zu\",\n}\n\n\ndef unescape(text):\n if (sys.version_info[0] < 3):\n parser = HTMLParser.HTMLParser()\n else:\n parser = html.parser.HTMLParser()\n return (parser.unescape(text))\n\n\ndef translate(to_translate, to_language=\"auto\", from_language=\"auto\"):\n \"\"\"Returns the translation using google translate\n you must shortcut the language you define\n (French = fr, English = en, Spanish = es, etc...)\n if not defined it will detect it or use english by default\n\n Example:\n print(translate(\"salut tu vas bien?\", \"en\"))\n hello you alright?\n \"\"\"\n base_link = \"http://translate.google.com/m?hl=%s&sl=%s&q=%s\"\n tmp_language = google_languages[to_language]\n if tmp_language == \"\":\n to_language = \"auto\"\n else:\n to_language = tmp_language\n tmp_language = google_languages[from_language]\n if tmp_language == \"\":\n from_language = \"auto\"\n else:\n from_language = tmp_language\n if (sys.version_info[0] < 3):\n to_translate = urllib.quote_plus(to_translate)\n link = base_link % (to_language, from_language, to_translate)\n request = urllib2.Request(link, headers=agent)\n raw_data = urllib2.urlopen(request).read()\n else:\n to_translate = urllib.parse.quote(to_translate)\n link = base_link % (to_language, from_language, to_translate)\n request = urllib.request.Request(link, headers=agent)\n raw_data = urllib.request.urlopen(request).read()\n data = raw_data.decode(\"utf-8\")\n expr = r'class=\"t0\">(.*?)<'\n re_result = re.findall(expr, data)\n if (len(re_result) == 0):\n result = \"\"\n else:\n result = unescape(re_result[0])\n return (result)\n","sub_path":"googleTranslate.py","file_name":"googleTranslate.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477117810","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\n\nclass Aggregator:\n def __init__(self, aggregated_size, reduction=tf.reduce_max, activation=tf.nn.relu, dropout=0.7,\n initializer=tf.contrib.layers.xavier_initializer(), regularizer=layers.l2_regularizer(scale=0.1),\n name=None):\n \"\"\"\n :param aggregated_size: the number of elements in the representation created\n :param reduction: order-independent method of pooling the response for each neighbour\n :param activation: activation function for the included dense layer\n :param dropout: quantity of dropout regularisation on the output of the included dense layer\n :param regularizer: regularisation for the dense layer\n :param initializer: initializer for the weights of the dense layer\n :param name: Name for the operation (optional).\n \"\"\"\n self._aggregated_size = aggregated_size\n self._reduction = reduction\n self._activation = activation\n self._dropout = dropout\n self._initializer = initializer\n self._regularizer = regularizer\n self._name = name\n\n def __call__(self, neighbour_features):\n \"\"\"\n Take a tensor that describes the features (aggregated or otherwise) of a set of neighbours and aggregate\n them through a dense layer and order-independent pooling/reduction\n\n :param neighbour_features: the neighbours' features, shape (num_neighbours, neighbour_feat_size)\n :return: aggregated representation of neighbours, shape (1, aggregated_size)\n \"\"\"\n\n with tf.name_scope(self._name, default_name=\"aggregate\") as scope:\n\n dense_layer = tf.layers.Dense(units=self._aggregated_size, activation=self._activation, use_bias=True,\n kernel_initializer=self._initializer, kernel_regularizer=self._regularizer,\n name=f'dense_layer_{self._name}')\n\n dense_output = dense_layer(neighbour_features)\n\n # tf.summary.histogram(self._name + '/dense/weights', dense_layer.weights)\n tf.summary.histogram(self._name + '/dense/bias', dense_layer.bias)\n tf.summary.histogram(self._name + '/dense/kernel', dense_layer.kernel)\n\n tf.summary.histogram(self._name + '/dense_output', dense_output)\n\n # Use dropout on output from the dense layer to prevent overfitting\n regularised_output = tf.nn.dropout(dense_output, self._dropout)\n tf.summary.histogram(self._name + '/regularised_output', regularised_output)\n\n # Use max-pooling (or similar) to aggregate the results for each neighbour. This is an important operation\n # since the order of the neighbours isn't considered, which is a property we need Note that this is reducing\n # not pooling, which is equivalent to having a pool size of num_neighbours\n reduced_output = self._reduction(regularised_output, axis=1)\n tf.summary.histogram(self._name + '/reduced_output', reduced_output)\n\n # If reducing reduced rank to 1, then add a dimension so that we continue to deal with matrices not vectors\n rank = tf.rank(reduced_output)\n if tf.executing_eagerly():\n evaluated_rank = rank.numpy()\n else:\n evaluated_rank = rank\n\n if evaluated_rank == 1:\n reduced_output = tf.expand_dims(reduced_output, 0)\n\n # # Get the output from shape (1, neighbour_feat_length) to (neighbour_feat_length, 1)\n # final_output = tf.transpose(reduced_output)\n\n return reduced_output\n","sub_path":"kglib/kgcn/core/nn/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213792638","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Class to do stateful video calibration of a mono camera. \"\"\"\n\nimport copy\nimport logging\n\nimport sksurgeryimage.calibration.point_detector as pd\n\nimport sksurgerycalibration.video.video_calibration_data as cd\nimport sksurgerycalibration.video.video_calibration_driver_base as vdb\nimport sksurgerycalibration.video.video_calibration_params as cp\nimport sksurgerycalibration.video.video_calibration_utils as cu\nimport sksurgerycalibration.video.video_calibration_wrapper as vc\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass MonoVideoCalibrationDriver(vdb.BaseVideoCalibrationDriver):\n \"\"\" Class to do stateful video calibration of a mono camera. \"\"\"\n def __init__(self,\n point_detector: pd.PointDetector,\n minimum_points_per_frame: int\n ):\n \"\"\"\n Stateful class for mono video calibration.\n\n :param point_detector: Class derived from PointDetector\n :param minimum_points_per_frame: Minimum number to accept frame\n \"\"\"\n super().__init__(minimum_points_per_frame)\n\n self.point_detector = point_detector\n\n # Create data holders, and parameter holders, specific to Mono.\n calibration_data = cd.MonoVideoData()\n calibration_params = cp.MonoCalibrationParams()\n\n # Pass them to base class, so base class can access them.\n self._init_internal(calibration_data, calibration_params)\n\n def grab_data(self,\n image,\n device_tracking=None,\n calibration_object_tracking=None):\n \"\"\"\n Extracts points, by passing it to the PointDetector.\n\n This will throw various exceptions if the input data is invalid,\n but will return empty arrays if no points were detected.\n So, no points is not an error. Its an expected condition.\n\n :param image: RGB image.\n :param device_tracking: transformation for the tracked device\n :param calibration_object_tracking: transformation of tracked\n calibration object\n :return: The number of points grabbed.\n \"\"\"\n number_of_points = 0\n\n ids, object_points, image_points = \\\n self.point_detector.get_points(image)\n\n if ids is not None and ids.shape[0] >= self.minimum_points_per_frame:\n\n ids, image_points, object_points = \\\n cu.convert_pd_to_opencv(ids,\n object_points,\n image_points)\n\n self.video_data.push(image,\n ids,\n object_points,\n image_points)\n\n self.tracking_data.push(device_tracking,\n calibration_object_tracking)\n\n number_of_points = image_points.shape[0]\n\n LOGGER.info(\"Grabbed: Returning %s points.\", str(number_of_points))\n return number_of_points\n\n def calibrate(self, flags=0):\n \"\"\"\n Do the video calibration, returning RMS re-projection error.\n\n :param flags: OpenCV calibration flags, eg. cv2.CALIB_FIX_ASPECT_RATIO\n :return: RMS projection\n \"\"\"\n rms_proj_err, camera_matrix, dist_coeffs, rvecs, tvecs = \\\n vc.mono_video_calibration(\n self.video_data.object_points_arrays,\n self.video_data.image_points_arrays,\n (self.video_data.images_array[0].shape[1],\n self.video_data.images_array[0].shape[0]),\n flags\n )\n\n self.calibration_params.set_data(camera_matrix,\n dist_coeffs,\n rvecs,\n tvecs)\n\n LOGGER.info(\"Mono calibration: rms_proj_err=%s.\", str(rms_proj_err))\n return rms_proj_err, copy.deepcopy(self.calibration_params)\n\n def iterative_calibration(self,\n number_of_iterations: int,\n reference_ids,\n reference_image_points,\n reference_image_size,\n flags: int = 0):\n \"\"\"\n Does iterative calibration, like Datta 2009,\n returning RMS re-projection error.\n :return: RMS projection\n \"\"\"\n rms_proj_err, param_copy = self.calibrate(flags=flags)\n cached_images = copy.deepcopy(self.video_data.images_array)\n\n for i in range(0, number_of_iterations):\n images = copy.deepcopy(cached_images)\n cu.detect_points_in_canonical_space(\n self.point_detector,\n self.minimum_points_per_frame,\n self.video_data,\n images,\n self.calibration_params.camera_matrix,\n self.calibration_params.dist_coeffs,\n reference_ids,\n reference_image_points,\n reference_image_size)\n\n rms_proj_err, param_copy = self.calibrate(flags=flags)\n\n self.point_detector.set_camera_parameters(\n self.calibration_params.camera_matrix,\n self.calibration_params.dist_coeffs)\n\n LOGGER.info(\"Iterative calibration: %s: rms_proj_err=%s.\",\n str(i), str(rms_proj_err))\n\n return rms_proj_err, param_copy\n\n def handeye_calibration(self,\n override_pattern2marker=None,\n use_opencv: bool = True,\n do_bundle_adjust: bool = False):\n \"\"\"\n Do handeye calibration, returning RMS re-projection error.\n\n Note: This handeye_calibration on this class assumes you are\n tracking both the calibration pattern (e.g. chessboard) and the\n device (e.g. laparoscope). So, the calibration routines calibrate\n for hand2eye and pattern2marker. If you want something more customised,\n work with video_calibration_hand_eye.py.\n\n :param override_pattern2marker: If provided a 4x4 pattern2marker\n that is taken as constant.\n :param use_opencv: If True we use OpenCV based methods, if false,\n Guofang Xiao's method.\n :param do_bundle_adjust: If True we do an additional bundle adjustment\n at the end.\n\n :return: RMS reprojection error\n :rtype: float\n \"\"\"\n\n rms_proj_err, handeye, pattern2marker = \\\n vc.mono_handeye_calibration(\n self.video_data.object_points_arrays,\n self.video_data.image_points_arrays,\n self.calibration_params.camera_matrix,\n self.calibration_params.dist_coeffs,\n self.tracking_data.device_tracking_array,\n self.tracking_data.calibration_tracking_array,\n self.calibration_params.rvecs,\n self.calibration_params.tvecs,\n override_pattern2marker=override_pattern2marker,\n use_opencv=use_opencv,\n do_bundle_adjust=do_bundle_adjust\n )\n\n self.calibration_params.set_handeye(handeye, pattern2marker)\n\n return rms_proj_err, copy.deepcopy(self.calibration_params)\n","sub_path":"sksurgerycalibration/video/video_calibration_driver_mono.py","file_name":"video_calibration_driver_mono.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531313317","text":"import OpenGL\nfrom OpenGL.GL import *\n\nimport numpy\n\nimport glfw\n\ndef ParseShader(filepath):\n\n vertexShader, fragmentShader = ('', '')\n currentShader = None\n\n with open(filepath, 'r') as src:\n for line in src:\n\n if '#shader' in line:\n if 'vertex' in line:\n currentShader = 'vertex'\n elif 'fragment' in line:\n currentShader = 'fragment'\n else:\n raise ValueError('Wrong Shader Format!!!')\n else:\n if currentShader == 'vertex':\n vertexShader += line\n elif currentShader == 'fragment':\n fragmentShader += line\n else:\n raise ValueError('Wrong Shader Format!!!')\n\n return vertexShader, fragmentShader\n\n\n\ndef CompileShader(shaderType, source):\n source = [source]\n\n id = glCreateShader(shaderType)\n glShaderSource(id, source)\n glCompileShader(id)\n\n result = glGetShaderiv(id, GL_COMPILE_STATUS)\n if result == GL_FALSE:\n message = glGetShaderInfoLog(id)\n print(message)\n glDeleteShader(id)\n return 0\n\n return id\n\ndef CreateShader(vertexShader, fragmentShader):\n program = glCreateProgram()\n vs = CompileShader(GL_VERTEX_SHADER, vertexShader)\n fs = CompileShader(GL_FRAGMENT_SHADER, fragmentShader)\n\n glAttachShader(program, vs)\n glAttachShader(program, fs)\n glLinkProgram(program)\n glValidateProgram(program)\n\n glDeleteShader(vs)\n glDeleteShader(fs)\n\n return program\n\n \n\n\ndef main():\n if not glfw.init():\n print('glfw.init error')\n return\n \n window = glfw.create_window(640, 480, 'Basic', None, None)\n if not window:\n glfw.terminate()\n print('glfw.create_window error')\n return\n\n glfw.make_context_current(window)\n\n print(glGetString(GL_VERSION))\n\n\n\n\n # vertex buffer\n positions = numpy.array([-0.5, -0.5,\n 0.5, -0.5,\n 0.5, 0.5,\n -0.5, 0.5], numpy.float32)\n\n\n buffer = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, buffer)\n glBufferData(GL_ARRAY_BUFFER, positions.itemsize * len(positions), positions, GL_STATIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, positions.itemsize * 2, None)\n\n\n\n\n\n # index buffer\n indecies = numpy.array([0, 1, 2,\n 2, 3, 0], numpy.uintc)\n\n ibo = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, indecies.itemsize * len(indecies), indecies ,GL_STATIC_DRAW)\n \n\n\n\n\n # shader\n vertexShader, fragmentShader = ParseShader('./cppRedo/Basic.shader')\n\n shader = CreateShader(vertexShader, fragmentShader)\n\n glUseProgram(shader)\n\n\n\n\n\n\n while not glfw.window_should_close(window):\n glClear(GL_COLOR_BUFFER_BIT)\n\n\n # draw call\n # glDrawArrays(GL_TRIANGLES, 0, 3)\n glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)\n\n glfw.swap_buffers(window)\n\n glfw.poll_events()\n\n\n # glDeleteProgram(shader)\n\n\nif __name__=='__main__':\n main()","sub_path":"cppRedo/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54387952","text":"\"\"\".\"\"\"\n\nimport functools\nimport math\n\nimport user\nimport boxbot\nimport floor\nimport shaft\nimport state\nimport util\n\ndef _check_locks(_name, _locks, _length, _time):\n if len(_locks) != _length:\n raise Exception(\"len(locks) \"+str(len(_locks))+\" != _length \"+str(_length))\n for _lock in _locks:\n if _lock is not None:\n util.check_number(\"_lock\", _lock, float, _time, False, math.inf, False)\n\ndef _clear_locks(_locks, _time):\n for _i, _lock in enumerate(_locks):\n if (_lock is not None) and (_lock <= _time):\n _locks[_i] = None\n\ndef create_simulator(_config, _created, _users):\n return Simulator(\\\n boxbot.create_boxbot(_config.boxbot_config, _created),\n [None for _floor in range(25)],\\\n _created,\\\n [floor.create_floor(_created, _floor) for _floor in range(25)],\\\n [None for _floor in range(4)],\\\n [shaft.create_shaft(_config.shaft_config, _created) for _shaft in range(4)],\\\n _users)\n\ndef _get_load(_shaft, _time, _users):\n _load = 0.0\n for _user in _users:\n _load += _user.get_load(_shaft, _time)\n return _load\n\ndef _locks_timeout(_locks):\n result = math.inf\n for _lock in _locks:\n if _lock is not None:\n result = min(result, _lock)\n return result\n\nclass Simulator(state.State):\n def __init__(self, _boxbot, _button_locks, _created, _floors, _shaft_locks, _shafts, _users):\n super(Simulator, self).__init__(_created,\\\n min(state.states_timeout(_boxbot, _floors, _shafts, _users),\\\n min(_locks_timeout(_button_locks), _locks_timeout(_shaft_locks))))\n util.check_type(\"_boxbot\", _boxbot, boxbot.Boxbot)\n _check_locks(\"_button_locks\", _button_locks, 25, _created)\n util.check_list(\"_floors\", _floors, floor.Floor)\n _check_locks(\"_shaft_locks\", _shaft_locks, 4, _created)\n util.check_list(\"_shafts\", _shafts, shaft.Shaft)\n util.check_list(\"_users\", _users, user.User)\n self._boxbot = _boxbot\n self._button_locks = _button_locks\n self._floors = _floors\n self._shaft_locks = _shaft_locks\n self._shafts = _shafts\n self._users = _users\n def __str__(self):\n return \"Simulator(id: \"+hex(id(self))\\\n +\", _boxbot: \"+str(self._boxbot)\\\n +\", _button_locks: [\"+functools.reduce(\\\n lambda x, y: y if x == \"\" else str(x)+\", \"+str(y), self._button_locks)\\\n +\"], _created: \"+str(self._created)\\\n +\", _floors: [\"+functools.reduce(\\\n lambda x, y: y if x == \"\" else str(x)+\", \"+str(y), self._floors)\\\n +\"], _shaft_locks: [\"+functools.reduce(\\\n lambda x, y: y if x == \"\" else str(x)+\", \"+str(y), self._shaft_locks)\\\n +\"], _shafts: [\"+functools.reduce(\\\n lambda x, y: y if x == \"\" else str(x)+\", \"+str(y), self._shafts)\\\n +\"], _users: [\"+functools.reduce(\\\n lambda x, y: y if x == \"\" else str(x)+\", \"+str(y), self._users)\\\n +\"], _timeout: \"+str(self._timeout)+\")\"\n def _create(self, _boxbot, _button_locks, _created, _floors, _shaft_locks, _shafts, _users):\n _clear_locks(_button_locks, _created)\n _clear_locks(_shaft_locks, _created)\n if (self._boxbot == _boxbot)\\\n and (self._button_locks == _button_locks)\\\n and (self._floors == _floors)\\\n and (self._shaft_locks == _shaft_locks)\\\n and (self._shafts == _shafts)\\\n and (self._users == _users):\n return self\n return Simulator(_boxbot, _button_locks, _created, _floors, _shaft_locks, _shafts, _users)\n def get_chime_direction(self, _floor, _shaft, _time):\n return self._floors[_floor].get_chime_direction(_shaft)\n def get_destination_indicator(self, _floor, _time):\n return self._floors[_floor].get_destination_indicator(_time)\n def get_destination_indicator_shaft(self, _floor, _time):\n return self._floors[_floor].get_destination_indicator_shaft(_time)\n def get_goals(self, _shaft, _time):\n return self._boxbot.get_goals(_shaft)\n def get_load(self, _shaft, _time):\n return _get_load(_shaft, _time, self._users)\n def get_open(self, _shaft, _time):\n return self._shafts[_shaft].get_open(_time)\n def get_position(self, _shaft, _time):\n return self._shafts[_shaft].get_position(_get_load(_shaft, _time, self._users), _time)\n def get_requested_destination(self, _floor, _time):\n return self._floors[_floor].get_requested_destination(_time)\n def get_users_in_shaft(self, _shaft, _time):\n result = []\n for _user in self._users:\n _in = _user.get_in_shaft(_shaft, _time)\n if _in is not None:\n util.check_type(\"_in[0]\", _in[0], str)\n util.check_number(\"_in[1]\", _in[1], float, 0.0, True, 1.0, True)\n result.append(_in)\n return result\n def find_joe(self, _name):\n for _user in self._users:\n if isinstance(_user, user.Joe) and (_user.get_name() == _name):\n return _user\n return None\n def is_blocked(self, _shaft, _time):\n return self._shafts[_shaft].is_blocked(_time)\n def is_closing(self, _shaft, _time):\n return self._shafts[_shaft].is_closing(_time)\n def is_moving_down(self, _shaft, _time):\n return self._shafts[_shaft].is_moving_down(_time)\n def is_moving_up(self, _shaft, _time):\n return self._shafts[_shaft].is_moving_up(_time)\n def is_open(self, _shaft, _time):\n return self._shafts[_shaft].is_open(_time)\n def is_opening(self, _shaft, _time):\n return self._shafts[_shaft].is_opening(_time)\n def is_overweight_indicator_set(self, _shaft, _time):\n return self._shafts[_shaft].is_overweight_indicator_set(_time)\n def short_description(self, _time, no_time=False):\n result = \"\"\n if not no_time:\n result += \"T %5.1f # \"%(_time)\n result += \"E \"\n for _s in range(4):\n if _s > 0:\n result += \" \"\n result += str(_s)+\"(\"\\\n +self._shafts[_s].short_description(_get_load(_s, _time, self._users), _time)\\\n +\")\"\n result += \" # F\"\n for _f in range(25):\n desc = self._floors[_f].short_description()\n if desc:\n result += \" \"+str(_f)+\"(\"+desc+\")\"\n result += \" # B\"+self._boxbot.short_description()\n result += \" # U\"\n for _user in self._users:\n desc = _user.short_description(_time)\n if desc:\n result += \" \"+desc\n return result\n def short_description_floor(self, _floor, _time):\n return self._floors[_floor].short_description()\n def short_description_shaft(self, _shaft, _time):\n return self._shafts[_shaft].short_description(_get_load(_shaft, _time, self._users), _time)\n def _time_impl(self, _input, _output, _time):\n _floors = list(self._floors)\n _shafts = list(self._shafts)\n _button_locks = list(self._button_locks)\n _shaft_locks = list(self._shaft_locks)\n _users = list(self._users)\n _new_users = []\n for _floor in range(25):\n _floors[_floor] = _floors[_floor].time(None, None, _time)\n for _shaft in range(4):\n _shafts[_shaft] =\\\n _shafts[_shaft].time(_SimulatorShaftInput(_shaft, _users), None, _time)\n _boxbot = self._boxbot.time(\\\n _SimulatorBoxbotInput(_floors, _shafts, _time, _users),\\\n _SimulatorBoxbotOutput(_floors, _shafts, _time, _users),\\\n _time)\n for _user_index, _x in enumerate(_users):\n _users[_user_index] = _users[_user_index].time(\n _SimulatorUserInput(_floors, _shafts, _time),\\\n _SimulatorUserOutput(\\\n _button_locks, _floors, _new_users, _shaft_locks, _shafts, _time),\\\n _time)\n _users.extend(_new_users)\n _users = list(filter(lambda _user: not isinstance(_user, user.NoOp), _users))\n return self._create(_boxbot, _button_locks, _time, _floors, _shaft_locks, _shafts, _users)\n\nclass _SimulatorBoxbotInput(boxbot.BoxbotInput):\n def __init__(self, _floors, _shafts, _time, _users):\n self._floors = _floors\n self._shafts = _shafts\n self._time = _time\n self._users = _users\n def get_load(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return _get_load(_shaft, _time, self._users)\n def get_position(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].get_position(_get_load(_shaft, _time, self._users), _time)\n def is_at_floor(self, _floor, _shaft, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_at_floor(_floor, _time)\n def is_blocked(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_blocked(_time)\n def is_closed(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_closed(_time)\n def is_closing(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_closing(_time)\n def is_moving(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_moving(_time)\n def is_moving_to_floor(self, _destination, _shaft, _time):\n util.check_number(\"_destination\", _destination, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_moving_to_floor(_destination, _time)\n def is_open(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_open(_time)\n def is_opening(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_opening(_time)\n def is_requested(self, _departure, _destination, _time):\n util.check_number(\"_departure\", _departure, int, 0, True, 24, True)\n util.check_number(\"_destination\", _destination, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._floors[_departure].is_requested(_destination, _time)\n\nclass _SimulatorBoxbotOutput(boxbot.BoxbotOutput):\n def __init__(self, _floors, _shafts, _time, _users):\n self._floors = _floors\n self._shafts = _shafts\n self._time = _time\n self._users = _users\n def clear_blocked_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].clear_blocked_indicator(_time)\n def clear_chime(self, _floor, _shaft, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_floor] = self._floors[_floor].clear_chime(_shaft, _time)\n def clear_destination_indicator(self, _departure, _time):\n util.check_number(\"_departure\", _departure, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_departure] = self._floors[_departure].clear_destination_indicator(_time)\n def clear_error_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].clear_error_indicator(_time)\n def clear_overweight_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].clear_overweight_indicator(_time)\n def clear_shaft_indicator(self, _departure, _time):\n util.check_number(\"_departure\", _departure, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_departure] = self._floors[_departure].clear_shaft_indicator(_time)\n def close(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].close(_time)\n def move_to_floor(self, _destination, _shaft, _time):\n util.check_number(\"_destination\", _destination, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].move_to_floor(\\\n _destination, _get_load(_shaft, _time, self._users), _time)\n def open2(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].open2(_time)\n def set_blocked_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].set_blocked_indicator(_time)\n def set_chime(self, _direction, _floor, _shaft, _time):\n util.check_direction(\"_direction\", _direction, True)\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_floor] = self._floors[_floor].set_chime(_direction, _shaft, _time)\n def set_destination_indicator(self, _departure, _destination, _time):\n util.check_number(\"_departure\", _departure, int, 0, True, 24, True)\n util.check_number(\"_destination\", _destination, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_departure] =\\\n self._floors[_departure].set_destination_indicator(_destination, _time)\n def set_error_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].set_error_indicator(_time)\n def set_overweight_indicator(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].set_overweight_indicator(_time)\n def set_shaft_indicator(self, _departure, _shaft, _time):\n util.check_number(\"_departure\", _departure, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[_departure] = self._floors[_departure].set_shaft_indicator(_shaft, _time)\n\nclass SimulatorConfig(object):\n def __init__(self, boxbot_config=boxbot.BoxbotConfig(), shaft_config=shaft.ShaftConfig()):\n util.check_type(\"boxbot_config\", boxbot_config, boxbot.BoxbotConfig)\n util.check_type(\"shaft_config\", shaft_config, shaft.ShaftConfig)\n self.boxbot_config = boxbot_config\n self.shaft_config = shaft_config\n\nclass _SimulatorShaftInput(shaft.ShaftInput):\n def __init__(self, _shaft, _users):\n self._shaft = _shaft\n self._users = _users\n def get_load(self, _time):\n return _get_load(self._shaft, _time, self._users)\n\nclass _SimulatorUserInput(user.UserInput):\n def __init__(self, _floors, _shafts, _time):\n self._floors = _floors\n self._shafts = _shafts\n self._time = _time\n def get_destination_indicator_dir(self, _floor, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._floors[_floor].get_destination_indicator_dir(_time)\n def get_destination_indicator_shaft(self, _floor, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._floors[_floor].get_destination_indicator_shaft(_time)\n def get_open(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].get_open(_time)\n def is_at_floor(self, _floor, _shaft, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_at_floor(_floor, _time)\n def is_chiming(self, _direction, _floor, _shaft, _time):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._floors[_floor].is_chiming(_direction, _shaft, _time)\n def is_overweight_indicator_set(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n return self._shafts[_shaft].is_overweight_indicator_set(_time)\n\nclass _SimulatorUserOutput(user.UserOutput):\n def __init__(self, _button_locks, _floors, _new_users, _shaft_locks, _shafts, _time):\n self._button_locks = _button_locks\n self._floors = _floors\n self._new_users = _new_users\n self._shaft_locks = _shaft_locks\n self._shafts = _shafts\n self._time = _time\n def add_user(self, _user, _time):\n util.check_type(\"_user\", _user, user.User)\n self._new_users.append(_user)\n def clear_blocked(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].clear_blocked(_time)\n def clear_requested(self, departure, destination, _time):\n util.check_number(\"departure\", departure, int, 0, True, 24, True)\n util.check_number(\"destination\", destination, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[departure] = self._floors[departure].clear_requested(destination, _time)\n def lock_buttons(self, _floor, _time, _timeout):\n util.check_number(\"_floor\", _floor, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n util.check_number(\"_timeout\", _timeout, float, self._time, False, math.inf, False)\n if self._button_locks[_floor] is not None:\n return False\n self._button_locks[_floor] = _timeout\n return True\n def lock_shaft(self, _shaft, _time, _timeout):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n util.check_number(\"_timeout\", _timeout, float, self._time, False, math.inf, False)\n if self._shaft_locks[_shaft] is not None:\n return False\n self._shaft_locks[_shaft] = _timeout\n return True\n def set_blocked(self, _shaft, _time):\n util.check_number(\"_shaft\", _shaft, int, 0, True, 3, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._shafts[_shaft] = self._shafts[_shaft].set_blocked(_time)\n def set_requested(self, departure, destination, _time):\n util.check_number(\"departure\", departure, int, 0, True, 24, True)\n util.check_number(\"destination\", destination, int, 0, True, 24, True)\n util.check_number(\"_time\", _time, float, self._time, True, self._time, True)\n self._floors[departure] = self._floors[departure].set_requested(destination, _time)\n","sub_path":"src/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":21949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49896171","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nCommon functionalities for AMT Driver\n\"\"\"\nimport logging\nimport pywsman\nimport xmltodict\nfrom ast import literal_eval\nfrom xml.etree import ElementTree\nfrom wry import data_structures\nfrom wry import exceptions\nfrom wry.decorators import retry, add_client_options\nfrom wry.config import RESOURCE_URIs, SCHEMAS\nfrom wry.data_structures import _strip_namespace_prefixes, WryDict\nfrom collections import OrderedDict\n\n\n\n_SOAP_ENVELOPE = 'http://www.w3.org/2003/05/soap-envelope'\n\nLOG = logging.getLogger(__name__)\n\nAMT_PROTOCOL_PORT_MAP = {\n 'http': 16992,\n 'https': 16993,\n}\n\n\ndef _validate(doc, silent=False):\n if doc is None:\n raise exceptions.AMTConnectFailure\n if not silent:\n if doc.is_fault():\n raise exceptions.WSManFault(doc)\n return doc\n\n\ndef get_options_copy(options):\n new_options = pywsman.ClientOptions()\n for attr in dir(options):\n if attr.startswith('get_'):\n setter = attr.replace('get_', 'set_')\n value = getattr(options, attr)()\n getattr(options, setter)(value)\n if options.get_flags() == 16:\n new_options.set_dump_request()\n return new_options\n\n\n@add_client_options\n@retry\ndef wsman_get(client, resource_uri, options=None, silent=False):\n '''Get target server info'''\n doc = client.get(options, resource_uri)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_pull(client, resource_uri, options=None, wsman_filter=None, context=None, silent=False):\n '''Get target server info'''\n doc = client.pull(options, wsman_filter, resource_uri, context)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_enumerate(client, resource_uri, options=None, wsman_filter=None, silent=False):\n '''Get target server info'''\n doc = client.enumerate(options, wsman_filter, resource_uri)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_put(client, resource_uri, data, options=None, silent=False):\n '''Invoke method on target server\n :param silent: Ignore WSMan errors, and return the document anyway. Does not\n ignore the endpoint being down.\n '''\n doc = client.put(options, resource_uri, str(data), len(data))\n return _validate(doc, silent=silent)\n\n@add_client_options\n@retry\ndef wsman_invoke(client, resource_uri, method, data=None, options=None, silent=False):\n '''Invoke method on target server.'''\n doc = client.invoke(options, resource_uri, str(method), pywsman.create_doc_from_string(str(data)))\n return _validate(doc, silent=silent)\n\n\ndef get_resource(client, resource_name, options=None, as_xmldoc=False):\n '''\n '''\n uri = RESOURCE_URIs[resource_name]\n doc = wsman_get(client, uri, options=options)\n if as_xmldoc:\n return doc\n return WryDict(doc)\n \n\ndef enumerate_resource(client, resource_name, wsman_filter=None, options=None):\n '''\n class.\n '''\n uri = RESOURCE_URIs[resource_name]\n doc = wsman_enumerate(client, uri, options=options) # Add in relevant kwargs... filter?\n doc = WryDict(doc)\n context = doc['EnumerateResponse']['EnumerationContext']\n ended = False\n output = {resource_name: []}\n while ended is False:\n doc = wsman_pull(client, uri, context=str(context), options=options)\n response = WryDict(doc)['PullResponse']\n ended = response.pop('EndOfSequence', False)\n output[resource_name].append(response['Items'][resource_name])\n return output\n\n\ndef put_resource(client, indict, options=None, uri=None, silent=False):\n '''\n Given a dict or describing a wsman resource, post this resource to the client.\n :returns: data_structures.WryDict\n :param indict: A dictionary or dictionary-like object (eg.\n common.RESOURCE_URIs.\n :param uri: If a mapping does not exist in common.RESOURCE_URIs, the resource URI can be specified manually here.\n :param mappings: A dictionary providing extra mappings between resource names and URIs.\n '''\n if not uri:\n uri = RESOURCE_URIs[indict.keys()[0]] # Possible to support multiple simply here?\n data = indict.as_xml()\n doc = wsman_put(client, uri, data, options=options, silent=silent)\n return WryDict(doc)\n\n\ndef invoke_method(service_name, method_name, options, client, resource_name=None, affected_item=None, selector=None, args_before=(), args_after=(), anonymous=False):\n '''\n selector should be a dictionary in the form:\n {selector_name: {element_name: element_value}} ???\n Change this for a tuple, I think, it will make things easier.\n '''\n if anonymous:\n address_schema = 'addressing_anonymous'\n else:\n address_schema = 'addressing'\n options = get_options_copy(options)\n service_uri = RESOURCE_URIs[service_name]\n\n def add_arguments(data_dict, argument_pairs=()):\n for arg_name, arg_value in argument_pairs:\n data_dict[method_name + '_INPUT'][arg_name] = {\n '#text': arg_value,\n '@xmlns': service_uri,\n }\n\n data = {method_name + '_INPUT': OrderedDict({'@xmlns': service_uri})}\n add_arguments(data, args_before)\n if resource_name:\n data[method_name + '_INPUT'].update(OrderedDict([\n (affected_item, OrderedDict([\n ('@xmlns', service_uri),\n ('Address', {\n '#text': SCHEMAS[address_schema],\n '@xmlns': SCHEMAS['addressing'],\n }),\n ('ReferenceParameters', {\n 'ResourceURI': {\n '#text': RESOURCE_URIs[resource_name],\n '@xmlns': SCHEMAS['wsman'],\n },\n '@xmlns': SCHEMAS['addressing'],\n }),\n ]))\n ]))\n add_arguments(data, args_after)\n if selector:\n data[method_name + '_INPUT'][affected_item]['ReferenceParameters']['SelectorSet'] = {\n 'Selector': {\n '#text': selector[1],\n '@Name': selector[0],\n },\n '@xmlns': SCHEMAS['wsman'],\n }\n if len(selector) > 2:\n assert len(selector) == 3\n options.add_selector(selector[0], selector[-1])\n\n xml = xmltodict.unparse(data, full_document=False, pretty=True)\n doc = wsman_invoke(client, service_uri, method_name, xml, options=options)\n returned = WryDict(doc)\n return_value = returned[method_name + '_OUTPUT']['ReturnValue']\n if return_value != 0:\n raise exceptions.NonZeroReturn(return_value)\n return not return_value\n\n","sub_path":"wry/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125707934","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.shortcuts import render,HttpResponse\nimport smtplib\nfrom email.mime.text import MIMEText\nimport random,pymysql,json,time\nfrom cmdb.models import userinfo\n# Create your views here.\n\ndef homepage(request):\n return render(request,'index.html')\n\ndef registry(request):\n if request.method == \"POST\":\n\n\n\n print(request.POST.get('username'))\n print(request.POST.get('email'))\n print(request.POST.get('password'))\n email = request.POST.get('email')\n username = request.POST.get('username')\n password = request.POST.get('password')\n conn = pymysql.connect(host=\"172.18.3.189\", user=\"root\", passwd=\"xinwei\", db=\"utob\")\n cur = conn.cursor()\n sql = \"insert cmdb_userinfo(email,username,password,createtime) VALUES('%s','%s','%s',now())\"%(email,username,password)\n mous = cur.execute(sql)\n conn.commit()\n\n return HttpResponse(json.dumps({\"status\": 1, \"result\": \"注册失败\"}))\n\n\n\n\ndef sendEmail(to_email):\n\n random_code = makecode(6)\n _user = \"894513081@qq.com\"\n _pwd = \"gzpsdvdyzgusbchi\"\n _to = to_email\n msg = MIMEText(\"your code is %s\"%random_code)\n msg[\"Subject\"] = \"NEW Bee CMDB\"\n msg[\"From\"] = _user\n msg[\"To\"] = _to\n\n try:\n s = smtplib.SMTP_SSL(\"smtp.qq.com\", 465)\n s.login(_user, _pwd)\n s.sendmail(_user, _to, msg.as_string())\n s.quit()\n print(\"Success!\")\n except smtplib.SMTPException as e:\n print(\"Falied,%s\" % e)\n return random_code\n\ndef makecode(num):\n '''用户输入验证码位数,随机生成带有字母和数字的验证码'''\n res = ''\n for i in range(num):\n zimu = chr(random.randint(97,122))\n digt = str(random.randint(0,9))\n res += random.choice([zimu,digt])\n return res\n\n#write cache to redis\ndef write_to_cache(email,random_code):\n\n print('sssssssssssss')\n cache.set(\"code_\"+email,{\"email\":email,\"random_code\":random_code})\n print(\"aaaaaaa\")\n\n\n##select cache to redis\n\ndef select_to_cache(email):\n ss = cache.get(email)\n if ss == None:\n cache.set(email,0)\n cache.expire(email, 86400)\n res = 0\n return res\n else:\n res = cache.get(email)+1\n cache.set(email,res)\n cache.expire(email, 86400)\n return res\n\n#write info to mysql\n\n# def check_email(to_email):\n#\n# conn = pymysql.connect(host=\"172.18.3.189\", user=\"root\", passwd=\"xinwei\", db=\"utob\")\n# cur = conn.cursor()\n# sql = \"select id from cmdb_userinfo where email= '%s'\"%to_email\n# res = cur.execute(sql)\n# conn.commit()\n# return res\n\ndef check(request):\n if request.method == \"POST\":\n res = request.POST\n column = list(res.keys())[0]\n to_something = list(res.values())[0]\n conn = pymysql.connect(host=\"172.18.3.189\", user=\"root\", passwd=\"xinwei\", db=\"utob\")\n cur = conn.cursor()\n sql = \"select id from cmdb_userinfo where %s= '%s'\"%(column,to_something)\n mous = cur.execute(sql)\n conn.commit()\n if not mous:\n if cache.get(to_something) is None or cache.get(to_something) < 3 :\n # random_code = sendEmail(to_something)\n select_to_cache(to_something)\n write_to_cache(to_something,'123456')\n status = 0\n result = \"\"\n return HttpResponse(json.dumps({\"status\": status, \"result\": result}))\n else:\n status = 1\n result = \"注册次数超过3次\"\n return HttpResponse(json.dumps({\"status\": status, \"result\": result}))\n else:\n status = 1\n result = 'email已使用'\n return HttpResponse(json.dumps({\"status\": status, \"result\": result}))\n\n\ndef validate_name(request):\n if request.method == \"POST\":\n res = request.POST\n column = list(res.keys())[0]\n to_something = list(res.values())[0]\n conn = pymysql.connect(host=\"172.18.3.189\", user=\"root\", passwd=\"xinwei\", db=\"utob\")\n cur = conn.cursor()\n sql = \"select id from cmdb_userinfo where %s= '%s'\"%(column,to_something)\n mous = cur.execute(sql)\n conn.commit()\n if not mous:\n status = 0\n result = \"\"\n return HttpResponse(json.dumps({\"status\": status, \"result\": result}))\n else:\n status = 1\n result = 'username已使用'\n return HttpResponse(json.dumps({\"status\": status, \"result\": result}))\n\n\ndef validate_code(request):\n js_email = request.POST.get(\"email\")\n js_code = request.POST.get(\"code\")\n print(js_email)\n print(js_code)\n key = cache.get(\"code_\"+js_email)\n print(key)\n if js_email and key:\n\n back_code = key.get(\"random_code\")\n if back_code == js_code:\n return HttpResponse(json.dumps({\"status\": 0, \"result\": \"ok\"}))\n else:\n return HttpResponse(json.dumps({\"status\": 1, \"result\": \"验证码错误\"}))\n\n else:\n return HttpResponse(json.dumps({\"status\": 1, \"result\": \"email无效\"}))","sub_path":"cmdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36185139","text":"import sys\nimport string\nimport math\nimport collections\n\n\nclass NbClassifier(object):\n \"\"\"\n BEFORE TUNING THE CLASSIFIER:\n\n train.txt: 0.993\n dev.txt: 0.966\n test.txt: 0.982\n \n AFTER TUNING THE CLASSIFIER:\n\n train.txt: .997\n dev.txt: 0.980\n test.txt: 0.984\n\n *accuracy improved further after improving pre-processing*\n \"\"\"\n \"\"\"\n A Naive Bayes classifier object has three parameters, all of which are populated during initialization:\n - a set of all possible attribute types\n - a dictionary of the probabilities P(Y), labels as keys and probabilities as values\n - a dictionary of the probabilities P(F|Y), with (feature, label) pairs as keys and probabilities as values\n \"\"\"\n \n def __init__(self, training_filename, stopword_file):\n self.attribute_types = set()\n self.label_prior = {} \n self.word_given_label = {} \n self.collect_attribute_types(training_filename)\n if stopword_file is not None:\n self.remove_stopwords(stopword_file)\n self.train(training_filename)\n\n \"\"\"\n\n A helper function to transform a string into a list of word strings.\n You should not need to modify this unless you want to improve your classifier in the extra credit portion.\n \"\"\"\n\n def extract_words(self, text):\n \n \n #punc = [\"%\", \"*\", \"+\", \"@\", \"{\", \"}\", \"|\", \"~\" ] # changes the punctuation to be removed\n no_punct_text = \"\".join([x for x in text.lower() if not x in string.punctuation])\n return [word for word in no_punct_text.split()]\n\n \"\"\"\n Given a stopword_file, read in all stop words and remove them from self.attribute_types\n Implement this for extra credit.\n \"\"\"\n\n def remove_stopwords(self, stopword_file):\n with open(stopword_file, 'r') as f:\n sw = self.extract_words(f.read())\n\n stop = set(sw)\n self.attribute_types = (self.attribute_types - stop)\n f.close()\n\n \"\"\"\n\n Given a training datafile, add all features that appear at least m times to self.attribute_types\n \"\"\"\n\n def collect_attribute_types(self, training_filename, m=1):\n\n longstr = \"\"\n with open(training_filename, 'r') as f:\n longstr = self.extract_words(f.read())\n\n wordcounter = collections.Counter(longstr)\n for word in longstr:\n if not word == \"spam\" and not word == \"ham\":\n if wordcounter[word] >= m:\n #if not word == \"spam\" or not word == \"ham\":\n self.attribute_types.add(word)\n #elif wordcounter[word] >= m+1:\n #self.attribute_types.add(word)\n\n f.close()\n print(len(self.attribute_types))\n \n \"\"\"\n Given a training datafile, estimate the model probability parameters P(Y) and P(F|Y).\n Estimates should be smoothed using the smoothing parameter k.\n \"\"\"\n\n def train(self, training_filename, k=1):\n\n self.label_prior = {}\n self.word_given_label = {}\n cardinality = len(self.attribute_types)\n totalwords = totalmsg = spamwords = spammsg = hamwords = hammsg = 0.0\n\n for word in self.attribute_types:\n self.word_given_label[word, \"ham\"] = 0.0\n self.word_given_label[word, \"spam\"] = 0.0\n \n with open(training_filename, 'r') as f:\n for line in f.readlines():\n words = self.extract_words(line) \n label = words[0]\n words.remove(label)\n totalwords += len(words)\n totalmsg += 1\n \n if label == \"ham\":\n hammsg += 1\n hamwords += len(words)\n elif label == \"spam\":\n spammsg += 1\n spamwords += len(words)\n \n for word in words:\n if word in self.attribute_types:\n self.word_given_label[word, label] += 1\n \n self.label_prior[\"ham\"] = hammsg/totalmsg\n self.label_prior[\"spam\"] = spammsg/totalmsg\n \n \n for j,v in self.word_given_label.items():\n count = v\n label = j[1] \n if label == \"spam\":\n self.word_given_label[j] = (count + k)/(spamwords + k*cardinality)\n elif label == \"ham\":\n self.word_given_label[j] = (count + k)/(hamwords + k*cardinality)\n \n for j,v in self.word_given_label.items():\n if v <= 0.0001:\n print(\"less\")\n \n \n f.close()\n\n \"\"\"\n Given a piece of text, return a relative belief distribution over all possible labels.\n The return value should be a dictionary with labels as keys and relative beliefs as values.\n The probabilities need not be normalized and may be expressed as log probabilities. \n \"\"\"\n\n def predict(self, text):\n\n beliefs = {}\n spamp = math.log(self.label_prior[\"spam\"])\n hamp = math.log(self.label_prior[\"ham\"])\n \n for word in text:\n if (word, \"ham\") in self.word_given_label.keys():\n hamp += math.log(self.word_given_label[(word, \"ham\")])\n\n if (word, \"spam\") in self.word_given_label.keys():\n spamp += math.log(self.word_given_label[(word, \"spam\")])\n\n beliefs[\"ham\"] = hamp\n beliefs[\"spam\"] = spamp\n\n return beliefs\n\n \"\"\"\n Given a datafile, classify all lines using predict() and return the accuracy as the fraction classified correctly.\n \"\"\"\n\n def evaluate(self, test_filename):\n correct = total = 0.0\n\n with open(test_filename, 'r') as f:\n for l in f.readlines():\n line = self.extract_words(l)\n label = line[0]\n line.remove(label)\n p = self.predict(line)\n #p = self.predict('BIG BROTHER ALERT! The computer has selected u for 10k cash or #150 voucher. Call 09064018838. NNT PO Box CRO1327 18+ BT Landline Cost 150ppm mobiles vary')\n #print(p)\n predicted = max(p.keys(), key=(lambda k: p[k]))\n if predicted == label:\n correct += 1\n total += 1\n \n f.close()\n return (correct/total)\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 3 or len(sys.argv) > 4:\n print(\"\\nusage: ./hmm.py [training data file] [test or dev data file] [(optional) stopword file]\")\n exit(0)\n\n elif len(sys.argv) == 3:\n classifier = NbClassifier(sys.argv[1], None)\n\n else:\n #sys.argv[3]\n classifier = NbClassifier(sys.argv[1], sys.argv[3])\n print(classifier.evaluate(sys.argv[2]))","sub_path":"AI/AI/Naive Bayes Classifier/nb-classifier.py","file_name":"nb-classifier.py","file_ext":"py","file_size_in_byte":6844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"175499660","text":"import math\n\ndef calculateFuel(mass):\n return math.floor(int(mass) / 3) - 2\n\ndef calculateFuelRecursive(mass):\n fuel = calculateFuel(mass)\n if fuel <= 0:\n return 0\n return fuel + calculateFuelRecursive(fuel)\n\nwith open ('../../../input/Advent2019/Dec01.txt', 'r') as f:\n records = [record for record in f.readlines()]\n\ndef partOne():\n fuelRequired = 0\n for record in records:\n fuelRequired += calculateFuel(record)\n return fuelRequired\n\ndef partTwo():\n fuelRequired = 0\n for record in records:\n fuelRequired += calculateFuelRecursive(record)\n return fuelRequired\n\n\nprint(\"Part One Result:\")\nprint(partOne())\nprint(\"Part Two Result:\")\nprint(partTwo())","sub_path":"python/src/Advent2019/Dec01.py","file_name":"Dec01.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60780816","text":"from turtle import Turtle\r\n\r\nclass Scoreboard(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.color(\"white\")\r\n self.penup()\r\n self.hideturtle()\r\n self.lscore = 0\r\n self.rscore = 0\r\n self.update()\r\n\r\n def update(self):\r\n self.clear()\r\n self.goto(-50, 200)\r\n self.write(self.lscore, align=\"center\", font=(\"Courier\", 80, \"normal\"))\r\n self.goto(50, 200)\r\n self.write(self.rscore, align=\"center\", font=(\"Courier\", 80, \"normal\"))\r\n\r\n def left_score(self):\r\n self.lscore += 1\r\n self.update()\r\n\r\n def right_score(self):\r\n self.rscore += 1\r\n self.update()\r\n","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205211263","text":"# -*- coding: utf-8 -*-\ndef get_constellation(month, day):\n \"\"\"\n 根据生日计算星座\n :param month: 月份\n :param day: 天数\n :return:\n \"\"\"\n dates = (21, 20, 21, 21, 22, 22, 23, 24, 24, 24, 23, 22)\n constellations = (\"摩羯座\", \"水瓶座\", \"双鱼座\", \"白羊座\", \"金牛座\", \"双子座\", \"巨蟹座\", \"狮子座\", \"处女座\", \"天秤座\", \"天蝎座\", \"射手座\", \"摩羯座\")\n if day < dates[month - 1]:\n return constellations[month - 1]\n else:\n return constellations[month]\n\n\ndef zodiac(month, day):\n constellations = ('摩羯座', '水瓶座', '双鱼座', '白羊座', '金牛座', '双子座',\n '巨蟹座', u'狮子座', '处女座', '天秤座', '天蝎座', '射手座')\n\n d = ((1, 20), (2, 19), (3, 21), (4, 21), (5, 21), (6, 22), (7, 23), (8, 23), (9, 23), (10, 23), (11, 23), (12, 23))\n return constellations[len(tuple(filter(lambda y: y <= (month, day), d))) % 12]\n\n\nprint(get_constellation(2, 19))\nprint(zodiac(2, 19))\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574343799","text":"import math\n\n\ndef multi_gcd(lst):\n if len(lst) == 1:\n return lst[0]\n gcd = 0\n for i in range(1, len(lst)):\n if i == 0:\n gcd = math.gcd(lst[0], lst[1])\n else:\n gcd = math.gcd(gcd, lst[i])\n\n\nn = int(input())\na = list(map(int, input().split()))\nprint(multi_gcd(a))\n","sub_path":"algorithm/gcm.py","file_name":"gcm.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236265974","text":"import argparse\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport yaml\n\nfrom lerot.query import load_queries\nfrom lerot.utils import get_class\n\n\nclass GenericExperiment:\n def __init__(self, args_str=None):\n # parse arguments\n parser = argparse.ArgumentParser(description=\"\"\"\n Construct and run a learning experiment. Provide either the name\n of a config file from which the experiment configuration is\n read, or provide all arguments listed under Command line. If\n both are provided the config file is ignored.\"\"\",\n prog=self.__class__.__name__)\n\n # option 1: use a config file\n file_group = parser.add_argument_group(\"FILE\")\n file_group.add_argument(\"-f\", \"--file\", help=\"Filename of the config \"\n \"file from which the experiment details\"\n \" should be read.\")\n\n # option 2: specify all experiment details as arguments\n detail_group = parser.add_argument_group(\"DETAILS\")\n detail_group.add_argument(\"-i\", \"--training_queries\",\n help=\"File from which to load the training queries (svmlight \"\n \"format).\")\n detail_group.add_argument(\"-j\", \"--test_queries\",\n help=\"File from which to load the test queries (svmlight format).\")\n detail_group.add_argument(\"-c\", \"--feature_count\", type=int,\n help=\"The number of features included in the data.\")\n detail_group.add_argument(\"-r\", \"--num_runs\", type=int,\n help=\"Number of runs (how many times to repeat the experiment).\")\n detail_group.add_argument(\"-q\", \"--num_queries\", type=int,\n help=\"Number of queries in each run.\")\n detail_group.add_argument(\"-u\", \"--user_model\",\n help=\"Class implementing a user model.\")\n detail_group.add_argument(\"-v\", \"--user_model_args\",\n help=\"Arguments for initializing the user model.\")\n # the retrieval system maintains ranking functions, accepts queries and\n # generates result lists, and in return receives user clicks to learn\n # from\n detail_group.add_argument(\"-s\", \"--system\",\n help=\"Which system to use (e.g., pairwise, listwise).\")\n detail_group.add_argument(\"-a\", \"--system_args\", help=\"Arguments for \"\n \"the system (comparison method, learning \"\n \"algorithm and parameters...).\")\n detail_group.add_argument(\"-o\", \"--output_dir\",\n help=\"(Empty) directory for storing output generated by this\"\n \" experiment. Subdirectory for different folds will be generated\"\n \"automatically.\")\n detail_group.add_argument(\"--output_dir_overwrite\", default=\"False\")\n detail_group.add_argument(\"-p\", \"--output_prefix\",\n help=\"Prefix to be added to output filenames, e.g., the name of \"\n \"the data set, fold, etc. Output files will be stored as \"\n \"OUTPUT_DIR/PREFIX-RUN_ID.txt\")\n detail_group.add_argument(\"-e\", \"--experimenter\", help=\"Experimenter type.\")\n detail_group.add_argument(\"-sd\", \"--seed\", type=int)\n # run the parser\n if args_str:\n args = parser.parse_known_args(args_str.split())[0]\n else:\n args = parser.parse_known_args()[0]\n\n # determine whether to use config file or detailed args\n self.experiment_args = None\n self.args_file = args.file\n if args.file:\n config_file = open(args.file)\n self.experiment_args = yaml.load(config_file)\n config_file.close()\n # overwrite with command-line options if given\n for arg, value in vars(args).items():\n if value:\n self.experiment_args[arg] = value\n else:\n self.experiment_args = vars(args)\n\n # workaround - check if we have all the arguments needed\n if not (\"training_queries\" in self.experiment_args and\n \"test_queries\" in self.experiment_args and\n \"feature_count\" in self.experiment_args and\n \"num_runs\" in self.experiment_args and\n \"num_queries\" in self.experiment_args and\n \"user_model\" in self.experiment_args and\n \"user_model_args\" in self.experiment_args and\n \"system\" in self.experiment_args and\n \"system_args\" in self.experiment_args and\n \"output_dir\" in self.experiment_args):\n parser.print_help()\n sys.exit(\"Missing required arguments, please check the program\"\n \" arguments or configuration file. %s\" %\n self.experiment_args)\n\n # set default values for optional arguments\n if \"query_sampling_method\" not in self.experiment_args:\n self.experiment_args[\"query_sampling_method\"] = \"random\"\n if \"output_dir_overwrite\" not in self.experiment_args:\n self.experiment_args[\"output_dir_overwrite\"] = False\n if \"experimenter\" not in self.experiment_args:\n self.experiment_args[\"experimenter\"] = \\\n \"experiment.LearningExperiment.LearningExperiment\"\n if \"evaluation\" not in self.experiment_args:\n self.experiment_args[\"evaluation\"] = \"evaluation.NdcgEval\"\n if \"processes\" not in self.experiment_args:\n self.experiment_args[\"processes\"] = 0\n if \"seed\" not in self.experiment_args:\n np.random.seed(42)\n else:\n np.random.seed(self.experiment_args['seed'])\n\n # locate or create directory for the current fold\n if not os.path.exists(self.experiment_args[\"output_dir\"]):\n os.makedirs(self.experiment_args[\"output_dir\"])\n elif not (self.experiment_args[\"output_dir_overwrite\"]) and \\\n os.listdir(self.experiment_args[\"output_dir\"]):\n # make sure the output directory is empty\n raise Exception(\n \"Output dir %s is not an empty directory. Please\"\n \" use a different directory, or move contents out of the way.\"\n % self.experiment_args[\"output_dir\"])\n\n logging.basicConfig(format='%(levelname)s %(module)s %(asctime)s: %(message)s',\n level=logging.INFO)\n logging.info(\"Arguments: %s\" % self.experiment_args)\n\n # Printing out arguments that are used in execution\n for k, v in sorted(self.experiment_args.items()):\n logging.info(\"\\t%s: %s\" % (k, v))\n config_bk = os.path.join(self.experiment_args[\"output_dir\"],\n \"config_bk.yml\")\n logging.info(\"Backing up configuration to: %s\" % config_bk)\n with open(config_bk, \"w\") as config_bk_file:\n yaml.dump(self.experiment_args, config_bk_file, default_flow_style=False)\n\n # load training and test queries\n training_file = self.experiment_args[\"training_queries\"]\n test_file = self.experiment_args[\"test_queries\"]\n self.feature_count = self.experiment_args[\"feature_count\"]\n logging.info(\"Loading training data: %s \" % training_file)\n self.training_queries = load_queries(training_file, self.feature_count)\n logging.info(\"... found %d queries.\" %\n self.training_queries.get_size())\n logging.info(\"Loading test data: %s \" % test_file)\n self.test_queries = load_queries(test_file, self.feature_count)\n logging.info(\"... found %d queries.\" % self.test_queries.get_size())\n\n # initialize and run the experiment num_run times\n self.num_runs = self.experiment_args[\"num_runs\"]\n self.output_dir = self.experiment_args[\"output_dir\"]\n self.output_prefix = self.experiment_args[\"output_prefix\"]\n self.experimenter = get_class(self.experiment_args[\"experimenter\"])\n\n def run(self):\n if self.experiment_args[\"processes\"] > 1:\n from multiprocessing import Pool\n pool = Pool(processes=self.experiment_args[\"processes\"])\n results = [\n pool.apply_async(self._run, (run_count,))\n for run_count in range(self.num_runs)\n ]\n pool.close()\n pool.join()\n for result in results:\n logging.info(\"Ready: {}\".format(result.ready()))\n logging.info(\"Successful: {}\".format(result.successful()))\n return [result.get() for result in results]\n else:\n # Run the experiment num_runs times and return the list of results\n return [self._run(run_id) for run_id in range(self.num_runs)]\n\n def _run(self, run_id):\n logging.info(\"run %d starts\" % run_id)\n aux_log_file = os.path.join(self.output_dir, \"_%s-%d.txt\" %\n (self.output_prefix, run_id))\n with open(aux_log_file, \"w\") as aux_log_fh:\n # Returns summary after running an experiment\n summarized_experiment = self.run_experiment(aux_log_fh)\n\n # Setup result log file\n log_file = os.path.join(self.output_dir, \"%s-%d.txt\" %\n (self.output_prefix, run_id))\n with open(log_file, \"w\") as log_fh:\n # todo: switch to csv\n yaml.dump(summarized_experiment, log_fh, default_flow_style=False)\n logging.info(\"run %d done \\n\" % run_id)\n\n return summarized_experiment\n\n def run_experiment(self, aux_log_fh):\n # Run an experiment with given parameters\n experiment = self.experimenter(self.training_queries, self.test_queries, self.feature_count, aux_log_fh,\n self.experiment_args)\n\n return experiment.run()\n","sub_path":"lerot/experiment/GenericExperiment.py","file_name":"GenericExperiment.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317196112","text":"import logging\nimport fcntl\nimport os\nimport re\nimport select\nimport signal\nimport subprocess\nimport sys\nimport threading\n\nfrom typing import List, Optional\n\nfrom . import linux\n\n\nclass QemuOutputProxy:\n quiet: bool\n linux_has_started: bool\n buffer: bytes\n\n def __init__(self):\n self.quiet = False\n self.buffer = b''\n self.linux_has_started = False\n\n def silence(self):\n self.quiet = True\n\n def start(self, proc_handle: 'subprocess.Popen[bytes]'):\n assert(proc_handle.stdout is not None)\n\n # Set the socket to be non-blocking, as we don't want to read in chuncks\n stdout_handle = proc_handle.stdout\n fl = fcntl.fcntl(stdout_handle, fcntl.F_GETFL)\n fcntl.fcntl(stdout_handle, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n # In order to avoid trashing the console with ANSI escape sequences\n # from grub/whatever is running before the kernel, we wait for output\n # that looks like a timestamp before we start actually proxying anything\n timestamp_matcher = re.compile(br'\\[[ \\d]+\\.\\d+\\]')\n\n while not self.quiet:\n ready, _, err = select.select([stdout_handle], [], [])\n\n if len(err) > 0:\n raise RuntimeError(\"Error during select in QemuOutputProxy\")\n\n ready_handle = ready[0]\n\n # Just read whatever is available\n raw_content = ready_handle.read()\n\n # In theory this should never happen due to the select, but just to\n # be safe\n if raw_content is None:\n continue\n\n if self.linux_has_started is False:\n combined = self.buffer + raw_content\n position = timestamp_matcher.search(combined)\n if position is not None:\n self.linux_has_started = True\n\n # Strip everything in the buffer before the match\n raw_content = combined[position.span()[0]:]\n self.buffer = b''\n else:\n self.buffer = combined\n continue\n\n # We cannot hold the print lock, because this thread may die at any\n # point (for example, if a user ctrl-c's). When that happens, if\n # we hold the print lock, the main thread will crash in an ugly way.\n sys.stdout.buffer.write(raw_content)\n sys.stdout.buffer.flush()\n\n # We are no longer proxying, so close stdout.\n proc_handle.stdout.close()\n\n\nclass QemuRunner:\n bin_name: str\n args: List[str]\n quiet: bool\n proxy: Optional[QemuOutputProxy]\n\n # As far as I can tell, this _has_ to be quoted. Otherwise, it will\n # fail at runtime because I guess something is actually run here and\n # subprocess.Popen is not actually subscriptable.\n proc_handle: 'Optional[subprocess.Popen[bytes]]'\n\n def __init__(self, args: List[str], *, bin_name: Optional[str] = None,\n quiet: bool = False, silenceable: bool = False) -> None:\n self.bin_name = bin_name or self.__find_qemu_bin_name()\n self.quiet = quiet\n self.args = args\n self.proc_handle = None\n self.proxy = None\n\n if silenceable is True:\n self.proxy = QemuOutputProxy()\n\n def __find_qemu_bin_name(self) -> str:\n return 'qemu-system-x86_64'\n\n def start(self) -> None:\n logging.info(\"Starting qemu process '{}' with arguments '{}'\".format(\n self.bin_name, self.args))\n\n # By default, perform no redirection\n stdin, stdout, stderr = None, None, None\n\n if self.quiet is True:\n stdin, stdout, stderr = subprocess.DEVNULL, subprocess.DEVNULL, None\n if self.proxy is not None:\n stdin, stdout, stderr = subprocess.DEVNULL, subprocess.PIPE, None\n\n self.proc_handle = subprocess.Popen(\n [self.bin_name] + self.args,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n\n # Automatically send SIGTERM to this process when the main Transient\n # process dies\n preexec_fn=lambda: linux.set_death_signal(signal.SIGTERM))\n\n if self.proxy is not None:\n # Start and drop this reference. Because the thread is a daemon it will\n # be killed when python dies\n thread = threading.Thread(target=self.proxy.start, args=(self.proc_handle,))\n thread.daemon = True\n thread.start()\n\n def silence(self):\n if self.quiet is True:\n return\n elif self.proxy is not None:\n self.proxy.silence()\n else:\n raise RuntimeError(\"Attempt to silence QemuRunner that is not silenceable\")\n\n def wait(self) -> int:\n if self.proc_handle is None:\n raise RuntimeError(\"QemuRunner cannot wait without being started\")\n\n logging.info(\"Waiting for qemu process to terminate\")\n self.proc_handle.wait()\n return self.proc_handle.returncode\n\n def terminate(self) -> None:\n if self.proc_handle is None:\n raise RuntimeError(\"QemuRunner cannot terminate without being started\")\n self.proc_handle.terminate()\n\n def kill(self) -> None:\n if self.proc_handle is None:\n raise RuntimeError(\"QemuRunner cannot be killed without being started\")\n self.proc_handle.kill()\n\n def returncode(self) -> int:\n if self.proc_handle is None:\n raise RuntimeError(\"QemuRunner cannot get a returncode without being started\")\n elif self.proc_handle.poll() is None:\n raise RuntimeError(\"QemuRunner cannot get a returncode without being exited\")\n else:\n return self.proc_handle.returncode\n","sub_path":"transient/qemu.py","file_name":"qemu.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430285747","text":"# -*- coding: utf-8 -*-\nimport tweepy\nimport json\nimport csv\n\nimport tweepbotconfig as config\n\n#Credentials for Tweepy\nCONSUMER_KEY = config.data['CONSUMER_KEY']\nCONSUMER_SECRET = config.data['CONSUMER_SECRET']\nACCESS_TOKEN = config.data['ACCESS_TOKEN']\nACCESS_SECRET = config.data['ACCESS_SECRET']\nBITLY_KEY = config.data['BITLY_KEY']\n\n\ndef get_tweets(api, screen_name):\n\t#http://docs.tweepy.org/en/v3.5.0/api.html#timeline-methods\n\t#API.user_timeline([id/user_id/screen_name][, since_id][, max_id][, count][, page])\n\t#structure of status object https://gist.github.com/dev-techmoe/ef676cdd03ac47ac503e856282077bf2\n\ttweets = []\n\tnew_tweets = api.user_timeline(screen_name, count=200, tweet_mode=\"extended\")\n\tif not new_tweets:\n\t\treturn 1\n\toldest = new_tweets[-1].id - 1\n\ttweets.extend(new_tweets)\n\n\t#save and remember last tweet id\n\t#next query max_id = last tweet id - 1\n\t#keep grabbing tweets until there are no tweets left to grab\n\t#https://gist.github.com/yanofsky/5436496\n\twhile True:\n\n\t\t#all subsequent requests use the max_id param to prevent duplicates\n\t\tnew_tweets = api.user_timeline(\n\t\t\tscreen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\t\tif len(new_tweets) == 0:\n\t\t\tbreak\n\t\t#save most recent tweets\n\t\ttweets.extend(new_tweets)\n\n\t\t#update the id of the oldest tweet less one\n\t\toldest = tweets[-1].id - 1\n\n\t#transform the tweepy tweets into a 2D array that will populate the csv\n\touttweets = [[tweet.id_str, tweet.created_at,\n\t\t\t (' '.join(tweet.full_text.splitlines())).lower()] for tweet in tweets]\n\n\t#write the csv\n\t#with open('{}_tweets.json'.format(screen_name), 'w+', encoding='utf-8') as f:\n\t\t#json.dump(outtweets, f)\n\twith open('{}_output.tsv'.format(screen_name),'w+', encoding='utf-8', newline='') as f:\n\t\twriter = csv.writer(f, delimiter='\\t')\n\t\twriter.writerow(['id', 'created_at', 'full_text'])\n\t\twriter.writerows(outtweets)\n\n\nif __name__ == '__main__':\n\t#authentication\n\tauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n\tauth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\tapi = tweepy.API(auth)\n\t#pass in screen name\n\tname_list = ['sewnsew1515', 'JosieBliss_']\n\tfor name in name_list:\n\t\tget_tweets(api, name)\n","sub_path":"scraping/twitterscrapper.py","file_name":"twitterscrapper.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77324538","text":"import collections\r\nimport operator\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport pickle\r\nimport datetime\r\nimport time\r\n\r\n# Special vocabulary symbols - we always put them at the start.\r\n_PAD = b\"_PAD\"\r\n_GO = b\"_GO\"\r\n_EOS = b\"_EOS\"\r\n_UNK = b\"_UNK\"\r\n# _START_VOCAB = [_PAD, _GO, _EOS, _UNK]\r\n_START_VOCAB_ENC = [_PAD, _EOS, _UNK]\r\n_START_VOCAB_DEC = [_PAD, _EOS, _UNK, _GO]\r\n\r\n# PAD_ID = 0\r\n# EOS_ID = 1\r\n# UNK_ID = 2\r\n# GO_ID = 3\r\n\r\n# Regular expressions used to tokenize.\r\n_WORD_SPLIT = re.compile(b\"([.,!?\\\"':;)(])\")\r\n\r\n\r\nvocab_list_file = \"../output/rnn_vocab_list_file.p\"\r\n\r\ndef _basic_tokenizer(sentence):\r\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\r\n words = []\r\n for space_separated_fragment in sentence.strip().split():\r\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\r\n return [w for w in words if w]\r\n\r\ndef _compute_vocab_size_stats(sorted_vocab_counts, vocab_size):\r\n pass\r\n\r\ndef _build_vocab(lines, vocab_size, source, tokenizer=None):\r\n tokens = tokenizer(lines) if tokenizer else _basic_tokenizer(lines)\r\n\r\n vocab_counts = {}\r\n for w in tokens:\r\n w = str(w).strip()\r\n if w in vocab_counts:\r\n vocab_counts[w] += 1\r\n else:\r\n vocab_counts[w] = 1\r\n\r\n # Write word dsitributions to a file\r\n date_str = datetime.datetime.now().strftime(\"%d_%m_%Y\")\r\n\r\n sorted_vocab_counts = sorted(list(vocab_counts.items()), key=operator.itemgetter(1), reverse=True)\r\n print(\"======================================================\")\r\n print(\"Source type: \", str(source))\r\n print(\"Actual Vocab Size: \", len(sorted_vocab_counts))\r\n\r\n # import pdb; pdb.set_trace()\r\n\r\n with open(\"vocab_data/\"+str(source)+\"_vocab_count_distribution_first_response_jan_2018_p1_\" + date_str +\".tsv\", \"w\") as f:\r\n for word, cnt in sorted_vocab_counts:\r\n f.write(word + \":\" + str(cnt) + '\\n')\r\n\r\n _compute_vocab_size_stats(sorted_vocab_counts, vocab_size)\r\n\r\n if source == 'user':\r\n word_order = _START_VOCAB_ENC + sorted(vocab_counts, key=vocab_counts.get, reverse=True)\r\n elif source == 'agent':\r\n word_order = _START_VOCAB_DEC + sorted(vocab_counts, key=vocab_counts.get, reverse=True)\r\n\r\n if vocab_size is not None:\r\n if len(word_order) > vocab_size:\r\n word_order = word_order[:vocab_size]\r\n else:\r\n print(\"Vocab Size is higher than the num unique words in the file \")\r\n print(\"Unique words: \", len(word_order))\r\n print(\"Vocab size: \", vocab_size)\r\n\r\n word_2_id_dict = dict([(x, y) for (y, x) in enumerate(word_order)])\r\n id_2_word_dict = dict([(y, x) for (y, x) in enumerate(word_order)])\r\n print(\"Word2ID Dict Len: \", len(word_2_id_dict))\r\n print(\"ID2Word Dict Len \", len(id_2_word_dict))\r\n\r\n # pickle.dump([word_order, word_2_id_dict, id_2_word_dict],open(\"data/dictionary_\"+ date_str +\"_dds_ddc_ce.p\", \"wb\"))\r\n pickle.dump([word_order, word_2_id_dict, id_2_word_dict],open(\"vocab_data/\"+str(source)+\"_first_response_jan_2018_p1_\"+ date_str +\".p\", \"wb\"))\r\n print(\"======================================================\")\r\n\r\ndef main(file_name, col_sep=\" +++$+++ \", combined_dict=True, vocab_size_enc=None, vocab_size_dec=None):\r\n\r\n user_txt = \"\"\r\n agent_txt = \"\"\r\n\r\n with open(file_name, 'r') as read_handler:\r\n for line in read_handler:\r\n cols = line.split(col_sep)\r\n user_txt = user_txt + \" \" + str(cols[2])\r\n agent_txt = agent_txt + \" \" + str(cols[3])\r\n\r\n if combined_dict:\r\n total_txt = user_txt + \" \" + agent_txt\r\n _build_vocab(total_txt, vocab_size)\r\n\r\n else:\r\n _build_vocab(user_txt, vocab_size_enc, source='user')\r\n _build_vocab(agent_txt, vocab_size_dec, source='agent')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start_time = time.time()\r\n\r\n VOCAB_SIZE_ENC = None\r\n VOCAB_SIZE_DEC = None\r\n # VOCAB_SIZE_ENC = 1687 # (USER) Words with at least a frequency of 3\r\n # VOCAB_SIZE_DEC = 1627 # (AGENT) Words with at least a frequency of 3\r\n\r\n file_name = \"../data/first-response-jan-2018-processed-p1.txt\"\r\n # file_name = \"data/first-response-jan-2018-processed-p1-p2-train.txt\"\r\n main(file_name, col_sep=\" +++$+++ \", combined_dict=False, vocab_size_enc = VOCAB_SIZE_ENC, vocab_size_dec = VOCAB_SIZE_DEC)\r\n\r\n print(\"Script Time: \", round(time.time()-start_time, 0), ' Seconds')\r\n","sub_path":"one-turn-predict-v1-master/seq2seq_with_attention/helper_scripts/build_vocabulary.py","file_name":"build_vocabulary.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455006138","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport pandas as pd\n\n#consumer key, consumer secret, access token, access secret.\nckey=\"QIqgjITOfksfMW4lRLDacQ\"\ncsecret=\"R8x0xN9iSKXGNxUtGKA2hgnlIhh5INZIOdgEfxzk\"\natoken=\"1401204486-BeLUAuruh294KeJX8NXvdqjCeZOQcLl6HWmMlgA\"\nasecret=\"pwjiLF42TbORaXtkCS5Oc24qywOU0eFN0esVcibA\"\n\nclass listener(StreamListener):\n\n def on_data(self, data):\n print(data)\n return(True)\n\n def on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=[\"Madrid\"])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478381140","text":"from flask import Flask,request\n\napp=Flask(__name__)\n\n@app.route(\"/\", methods=['GET','POST'])\ndef form():\n formularz=\"\"\"\n <form action=\"/\" method=\"POST\">\n <label>Wpisz zdanie: </label>\n <input type=\"text\" name=\"sentence\"/>\n <button type=\"submit\">Podaj długość</button>\n </form>\n \"\"\"\n if request.method==\"POST\":\n z=str(request.form['sentence'])\n words = z.split()\n words.sort()\n\n # # display the sorted words\n #\n # print(\"The sorted words are:\")\n # for word in words:\n # print(word)\n\n return \"Wyrazy w kolejności alfabetycznej: {}\".format(str(words))\n else:\n return formularz\n\napp.run(debug=True)","sub_path":"ex21.py","file_name":"ex21.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318980604","text":"\"\"\"\nID: sashrik1\nLANG: PYTHON3\nTASK: race\n\"\"\"\n\nfin = open('race.in', 'r')\nfout = open('race.out', 'w')\n\ninput_list = []\nfor i in fin:\n input_list.append(i.split('\\n')[0])\ntotal_meters = int(input_list[0].split(' ')[0])\nnum_tests = int(input_list[0].split(' ')[1]) # number values of x to be tested\nmax_speed_list = []\ni = 1\nwhile i < len(input_list):\n max_speed_list.append(int(input_list[i]))\n i += 1\nlaps_list = []\n\nx_count = 0\n# change 1 to num_tests\nwhile x_count < 1:\n laps = 1\n speed = 1\n meters_rem = total_meters - 1\n max_speed = max_speed_list[x_count]\n while meters_rem > 0:\n if speed < max_speed:\n speed += 1\n meters_rem -= speed\n laps += 1\n else:\n if (speed-max_speed)*2 < meters_rem:\n speed += 1\n meters_rem -= speed\n laps += 1\n else:\n speed -= 1\n meters_rem -= speed\n laps += 1\n print(speed)\n\n laps_list.append(laps)\n print(laps)\n x_count += 1\n\nprint(laps_list)\n\n# print(total_meters)\n# print(num_tests)\n# print(max_speed_list)\n\n# fin = open('race.in', 'r')\n# fout = open('race.out', 'w')\n\n\n\n","sub_path":"Jan2020_bronze/race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501408928","text":"'''documentation: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n'''\nfrom __future__ import print_function\nimport pickle\nimport os\nimport subprocess\nimport pandas as pd\nimport numpy as np\nimport pydotplus \nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nfrom IPython.display import Image\nfrom sklearn.externals.six import StringIO\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\nfrom sklearn import svm\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\n\n# get data from absolute path\ndef get_data():\n\tdf = pd.read_csv('data/limited_rel_matriz.csv', index_col=0)\n\treturn df\n\ndf = get_data()\n\n# head and tail\nprint('* df.head()', df.head(), sep='\\n', end='\\n\\n')\nprint('* df.head()', df.head(), sep='\\n', end='\\n\\n')\n\n# use pandas to show the relation types\nprint('* relation types:', df['parent_rel_EDU1'].unique(), sep='\\n')\n\n# delete unnecessary columns with strings and other irrelevant information\ndel df['spk_EDU1']\n\ndel df['conv_EDU1']\ndel df['parent_rel_EDU2']\ndel df['spk_EDU2']\ndel df['conv_EDU2']\ndel df['starttime_EDU2']\ndel df['parent.id_EDU2']\ndel df['endtime_EDU2']\ndel df['nwords_EDU1']\ndel df['nwords_EDU2']\n\n# Preprocessing: to pass data into scikit-learn, encode relation_types to integers\n# write function and return modified data frame and list of class names\n# maps target names to numbers according to the order they appear in df\ndef encode_target(df, target_column):\n\t\"\"\"Add column to df with integers for the target.\n\n\tArgs\n\t----\n\tdf -- pandas DataFrame.\n\ttarget_column -- column to map to int, producing\n\t\t new Target column.\n\n\tReturns\n\t-------\n\tdf_mod -- modified DataFrame.\n\ttargets -- list of target names.\n\t\"\"\"\n\tdf_mod = df.copy()\n\ttargets = df_mod[target_column].unique()\n\tmap_to_int = {name: n for n, name in enumerate(targets)} #???\n\tdf_mod['Target'] = df_mod[target_column].replace(map_to_int)\n\t\n\treturn (df_mod, targets)\n\n# show name and target column\ndf2, targets = encode_target(df, 'parent_rel_EDU1')\nprint('* df2.head()', df2[['Target', 'parent_rel_EDU1']].head(),\n sep=\"\\n\", end='\\n\\n')\nprint('* df2.tail()', df2[['Target', 'parent_rel_EDU1']].tail(),\n sep='\\n', end='\\n\\n')\nprint('* targets', targets, sep='\\n', end='\\n\\n')\n\n# get the names of the feature columns\nfeatures = list(df2.columns[7:])\nfeatures.remove('Target')\n\n# Preparo datos para clasificar. X = features, y = class\ny = df2['Target'] #target\nX = df2[features] #data\n\ntotal = y.shape[0]\nprint(y.value_counts())\nfor i in y.value_counts():\n\tprint(100*(float(i)/float(total)))\n\n# Binarize the output\ny = label_binarize(y, classes=[0,1,2,3,4,5,6,7,8,9,10,11])\nn_classes = y.shape[1]\n\t\n# Split into training and test set (e.g., 80/20)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\n\n# Learn to predict each class against the other\nclf = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, verbose = 1))\n\ny_score = clf.fit(X_train, y_train).decision_function(X_test)\n\n# Compute ROC curve and ROC area for each class \nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(n_classes):\n\tfpr[i], tpr[i], _ = roc_curve(y_test[:, 1], y_score[:, 1])\n\troc_auc[i] = auc(fpr[i], tpr[i])\n\n# Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n\n\n# Plot ROC curves\n\n# Compute macro-average ROC curve and ROC area\n\n# First aggregate all false positive rates\nall_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\nlw = 2 #?????????????\n\n# Then interpolate all ROC curves at this points\nmean_tpr = np.zeros_like(all_fpr)\nfor i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n# Finally average it and compute AUC\nmean_tpr /= n_classes\n\nfpr[\"macro\"] = all_fpr\ntpr[\"macro\"] = mean_tpr\nroc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n# Plot all ROC curves\nplt.figure()\nplt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\nplt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\ncolors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\nfor i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n\nplt.plot([0, 1], [0, 1], 'k--', lw=lw)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Some extension of Receiver operating characteristic to multi-class')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n\n\n\n\nwith open(\"final_models_svm.p\", \"w\") as f:\n\tpickle.dump(models, f)\n\n# models = pickle.load(open(\"final_models1.p\"))\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625872176","text":"# Author: Fernando Zuher\n# Place: Brazil\n# Date: February 2020\n# Book: Python Crash Course, 2nd Edition. Author: ERIC MATTHES.\n# About: Exercise, Chapter 5 - if Statements\n\n# 5-8. Hello Admin: Make a list of five or more usernames, including the name\n# 'admin'. Imagine you are writing code that will print a greeting to each user\n# after they log in to a website. Loop through the list, and print a greeting to\n# each user:\n\n# • If the username is 'admin' , print a special greeting, such as Hello admin,\n# would you like to see a status report?\n# • Otherwise, print a generic greeting, such as Hello Jaden, thank you for\n# logging in again.\n\nusernames = ['matheus', 'marcos', 'joão', 'paulo', 'pedro', 'admin']\n\nfor username in usernames:\n\tif username != 'admin':\n\t\tprint(f\"Hello {username.title()}, thank you for logging in again.\\n\")\n\telse:\n\t\tprint(f\"Hello {username}, would you like to see a status report?\\n\")\n","sub_path":"Python/Chapters/Part I - Basics/Chapter 5 - if Statements/5_8_hello_admin.py","file_name":"5_8_hello_admin.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66830721","text":"from hype.cloud.cloudprovider import CloudProvider\nfrom boto import ec2\n\n\nclass AmazonEC2(CloudProvider):\n def __init__(self, cfg):\n CloudProvider.__init__(self, cfg)\n self.conn = ec2.connect_to_region(str(self.cfg[\"region\"]),\n aws_access_key_id=str(self.cfg[\"access-key-id\"]),\n aws_secret_access_key=str(self.cfg[\"secret-access-key\"]))\n\n def __del__(self):\n self.conn.close()\n\n def get_started_nodes(self):\n running = []\n for i in self.pending:\n try:\n i.update()\n except boto.exception.EC2ResponseError:\n continue\n for node in [i for i in self.pending if i.state == \"running\"]:\n self.pending.remove(node)\n running.append(node)\n self.running.extend(running)\n return [i.public_dns_name for i in running]\n\n def start_nodes(self, n, bootstrap):\n if not n: return []\n reservation = self.conn.run_instances(self.cfg[\"image-id\"],\n min_count=n, max_count=n, key_name=self.cfg[\"key-pair\"],\n instance_type=self.cfg[\"instance-type\"],\n security_groups=[self.cfg[\"security-group\"]],\n user_data=bootstrap)\n self.pending.extend(reservation.instances)\n\n def stop_nodes(self, n):\n m = min(n, len(self.running))\n if not m: return []\n stopped = self.running[-m:]\n self.conn.terminate_instances([i.id for i in stopped])\n for node in stopped:\n self.running.remove(node)\n return [i.public_dns_name for i in stopped]\n","sub_path":"hype/cloud/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640263614","text":"\"\"\"My module datadicts in my elements module for CS131A:Wostner:Fall2016.\n\nThe first few lines of elements.DATAFILE look like this ...\n\n $ head ~/wostnotes/datafiles/chemical_elements.txt \n # CS131A:Wostner:Fall2016\n # The Chemical Elements, based on Wikipedia information.\n # The data is colon-separated and the headers are on the line below.\n\n symbol:element:atomic_number:density\n\n H:Hydrogen:1:0.00008988\n He:Helium:2:0.0001785\n Li:Lithium:3:0.534\n Be:Beryllium:4:1.85\n\n>>> elements.DATAFILE\n'/users/uwostner/wostnotes/datafiles/chemical_elements.txt'\n\n>>> elements.datareaders.getheaderline(elements.DATAFILE)\n'symbol:element:atomic_number:density'\n\n\n\"\"\"\n\nimport elements\nimport elements.datareaders\n\ndef datafile2dicts(datafile):\n \"\"\"Returns a list with one dict for each data line in datafile.\n\n >>> elements.datareaders.getheaderline(elements.DATAFILE)\n 'symbol:element:atomic_number:density'\n \n >>> mylist = datafile2dicts(elements.DATAFILE)\n\n >>> type(mylist)\n <class 'list'>\n\n >>> len(mylist)\n 118\n\n >>> type(mylist[0])\n <class 'dict'>\n\n >>> mylist[0] == {'element': 'Hydrogen', 'symbol': 'H', 'atomic_number': 1, 'density': 8.988e-05}\n True\n\n >>> mylist[-1] == {'symbol': 'Uuo', 'atomic_number': 118, 'element': 'Ununoctium', 'density': 5.0}\n True\n \n \"\"\"\n\n # replace the pass below with your own code\n mylist = list()\n\n headerline = elements.datareaders.getheaderline(elements.DATAFILE)\n #print(headerline)\n\n for dataline in open(datafile,'r'):\n line = dataline.strip()\n if (not line or line.startswith('#') or line.startswith('symbol')):\n continue\n else:\n #myline = str(elements.datareaders.dataline2list(line))\n #myline = elements.datareaders.dataline2list(line)\n #print(myline)\n #print(line)\n #print(type(elements.datareaders.dataline2list(line)))\n #print(type(myline))\n mylist.append(elements.datareaders.dataline2dict(line,headerline))\n\n #mylist.append(elements.datareaders.dataline2dict)\n #pass\n return mylist\n #print(mylist)\n #print(type(mylist))\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(optionflags=1068)\n\n","sub_path":"elements/datadicts.py","file_name":"datadicts.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529403808","text":"from .VigenereChiper import VigenereChiper\nfrom .SteganoText import SteganoText\nfrom .Stegano import Stegano\nimport os\n\n## TEXT + IMAGE -> STEGANOIMAGE\ndef put_txt_to_img(data):\n\ttext = os.path.join(data['basepath'],data['txtname'])\n\timg = os.path.join(data['basepath'],data['imgname'])\n\n\tkey = data['key']\n\t\n\t#Vigenere Process\n\tvc = VigenereChiper()\n\tcontent = vc.read(text)\n\tvc.encrypt(content,key)\n\tvc.save(text)\n\n\t## convert content to binary and save to the same file\n\tst = SteganoText()\n\tcontent = st.read(text)\n\tst.save_to_binary(content,text)\n\n\t## put binary content from text file to binary pixels of image file\n\tstegano = Stegano()\n\tstegano.save_text_image(text,img)\n\n## STEGANOIMAGE -> TEXT + IMAGE \ndef get_txt_from_img(data):\n\tkey = data['key']\n\ttext = os.path.join(data['basepath'],data['txtname'])\n\timg = os.path.join(data['basepath'],data['imgname'])\n\n\tstegano = Stegano()\n\tvc = VigenereChiper()\n\tstegano.expand_image(img,text)\n\n\t# # decrypt \n\tcontent = vc.read(text)\n\tvc.decrypt(content,key)\n\tvc.save(text)","sub_path":"server/app/machine/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578861944","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport multiselectfield.db.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('claim', '0012_claimtype_icon'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='claim',\n name='moderation',\n field=models.CharField(default='not_moderated', choices=[('not_moderated', 'Not moderated'), ('suspicious', 'Suspicious'), ('anonymous', 'From anonymous'), ('valid', 'Moderated')], max_length=50),\n ),\n migrations.RemoveField(\n model_name='moderator',\n name='show_claims',\n ),\n migrations.AddField(\n model_name='moderator',\n name='show_claims',\n field=multiselectfield.db.fields.MultiSelectField(default='not_moderated,suspicious,anonymous,valid', choices=[('not_moderated', 'Not moderated'), ('suspicious', 'Suspicious'), ('anonymous', 'From anonymous'), ('valid', 'Moderated')], max_length=40),\n ),\n migrations.DeleteModel(\n name='ModerationStatus',\n ),\n ]\n","sub_path":"claim/migrations/0013_auto_20151105_1941.py","file_name":"0013_auto_20151105_1941.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96151522","text":"import wx\nfrom types import ModuleType\nfrom math import ceil\n\nimport Settings\nimport AutoSelfieTaker\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self, var_dict=None, title='', rows=0, columns=2):\n super().__init__(parent=None, title=title, style=wx.STAY_ON_TOP | wx.MAXIMIZE_BOX | wx.CLIP_CHILDREN | wx.CAPTION)\n rows = int(ceil(len(var_dict)))\n self.__buildUI(rows, columns)\n self.__initUI(var_dict)\n self.__show()\n\n\n def __buildUI(self, rows=1, columns=2):\n self.panel = wx.Panel(self)\n self.panel.SetBackgroundColour(wx.Colour(160,200,240))\n self.box_sizer = wx.BoxSizer(wx.VERTICAL) \n self.grid_sizer = wx.FlexGridSizer(rows, columns, 1,1)\n self.grid_sizer.AddGrowableCol(0)\n self.grid_sizer.AddGrowableCol(1)\n self.box_sizer.Add(self.grid_sizer, flag=wx.EXPAND | wx.ALL, border=5)\n button_grid = wx.GridSizer(1, 4, 1, 1)\n self.start_btn = wx.Button(self.panel, label='Start Application')\n self.start_btn.Bind(wx.EVT_BUTTON, self.on_start)\n self.write_btn = wx.Button(self.panel, label='Write Values')\n self.write_btn.Bind(wx.EVT_BUTTON, self.on_write)\n self.reset_btn = wx.Button(self.panel, label='Reset Values')\n self.reset_btn.Bind(wx.EVT_BUTTON, self.on_reset)\n self.close_btn = wx.Button(self.panel, label='Close Application')\n self.close_btn.Bind(wx.EVT_BUTTON, self.on_close)\n button_grid.Add(self.start_btn, flag=wx.EXPAND | wx.ALL, border=5)\n button_grid.Add(self.write_btn, flag=wx.EXPAND | wx.ALL, border=5)\n button_grid.Add(self.reset_btn, flag=wx.EXPAND | wx.ALL, border=5)\n button_grid.Add(self.close_btn, flag=wx.EXPAND | wx.ALL, border=5)\n self.box_sizer.Add(button_grid, flag=wx.EXPAND | wx.ALL, border=5)\n self.panel.SetSizerAndFit(self.box_sizer)\n\n\n def __initUI(self, var_dict):\n offset = len(max(var_dict.keys(), key=len)) * 7\n self.var_dict = var_dict\n self.text_ctrl_dict = dict()\n self.label_value_dict = dict()\n\n for var, val in var_dict.items():\n if isinstance(val, dict):\n if val['_EDITABLE']:\n parent_sizer = wx.BoxSizer(wx.HORIZONTAL)\n dict_sizer = wx.BoxSizer(wx.VERTICAL)\n var_sizer = wx.BoxSizer(wx.VERTICAL)\n val_sizer = wx.BoxSizer(wx.VERTICAL)\n\n label = wx.StaticText(self.panel, label=var)\n variable = wx.Choice(self.panel, choices=[val for val in val.keys() if not val.startswith('_')])\n variable.Insert('Select', 0)\n variable.Append('Add New Range')\n variable.SetSelection(0)\n value = wx.TextCtrl(self.panel, value=val['_PLACEHOLDER'])\n dict_sizer.Add(label, flag=wx.EXPAND | wx.ALL, border=10)\n var_sizer.Add(variable, flag=wx.EXPAND | wx.ALL, border=10)\n val_sizer.Add(value, flag=wx.EXPAND | wx.ALL, border=10)\n\n size = dict_sizer.GetSize().Get()\n dict_sizer.SetMinSize(wx.Size(size[0]+offset, size[1]))\n size = var_sizer.GetSize().Get()\n var_sizer.SetMinSize(wx.Size(size[0]+150, size[1]))\n size = val_sizer.GetSize().Get()\n val_sizer.SetMinSize(wx.Size(size[0]+250, size[1]))\n\n parent_sizer.Add(dict_sizer, flag=wx.EXPAND)\n parent_sizer.Add(var_sizer, flag=wx.EXPAND)\n parent_sizer.Add(val_sizer, flag=wx.EXPAND)\n\n self.grid_sizer.Add(parent_sizer, flag=wx.EXPAND)\n\n self.text_ctrl_dict[value.GetId()] = str(val)\n self.label_value_dict[label.GetId()] = value.GetId()\n\n else:\n parent_sizer = wx.BoxSizer(wx.HORIZONTAL)\n var_sizer = wx.BoxSizer(wx.VERTICAL)\n val_sizer = wx.BoxSizer(wx.VERTICAL)\n\n label = wx.StaticText(self.panel, label=var)\n value = wx.TextCtrl(self.panel, value=str(val))\n var_sizer.Add(label, flag=wx.EXPAND | wx.ALL, border=10)\n val_sizer.Add(value, flag=wx.EXPAND | wx.ALL, border=10)\n\n size = var_sizer.GetSize().Get()\n var_sizer.SetMinSize(wx.Size(size[0]+offset, size[1]))\n size = val_sizer.GetSize().Get()\n val_sizer.SetMinSize(wx.Size(size[0]+400, size[1]))\n\n parent_sizer.Add(var_sizer, flag=wx.EXPAND)\n parent_sizer.Add(val_sizer, flag=wx.EXPAND)\n\n self.grid_sizer.Add(parent_sizer, flag=wx.EXPAND)\n\n self.text_ctrl_dict[value.GetId()] = str(val)\n self.label_value_dict[label.GetId()] = value.GetId()\n\n\n def __show(self):\n width = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)\n height = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)\n dimensions = self.box_sizer.ComputeFittingWindowSize(self).Get()\n pos=((width/2) - (dimensions[0]/2), (height/2) - (dimensions[1]/2))\n self.SetMinSize(self.box_sizer.ComputeFittingWindowSize(self))\n self.SetMaxSize(self.box_sizer.ComputeFittingWindowSize(self))\n self.SetPosition(wx.Point(pos[0], pos[1]))\n # print (self.text_ctrl_dict)\n self.Show()\n\n\n def on_start(self, event):\n self.Hide()\n AutoSelfieTaker.main(True)\n\n\n def on_write(self, event):\n for label_id, value_id in self.label_value_dict.items():\n label = self.FindWindowById(label_id).GetLabel()\n value = self.FindWindowById(value_id).GetValue()\n self.var_dict[label] = value\n\n # print (f'{label} = {value}')\n\n try:\n float(value) \n cmd_str = f'Settings.{label} = {value}'\n except ValueError:\n try:\n cmd_str = f'Settings.{label} = \\\"{value}\\\"'\n except SyntaxError:\n cmd_str = f'Settings.{label} = \\\"{value}\\\"'\n\n exec(cmd_str)\n\n cmd_str = f'print (type(Settings.{label}))'\n cmd_str = f'print (Settings.{label})'\n exec(cmd_str)\n\n print (type(Settings.blur_threshold))\n\n\n def on_reset(self, event):\n for ctrl_id, ctrl_val in self.text_ctrl_dict.items():\n text_ctrl = self.FindWindowById(ctrl_id)\n text_ctrl.SetValue(ctrl_val)\n\n\n def on_close(self, event):\n self.Close(True)\n self.Destroy()\n\n\nif __name__ == '__main__':\n\n variables = {key:value for key, value in vars(Settings).items() if not key.startswith('__') and not isinstance(value, ModuleType)}\n\n app = wx.App()\n frame = MyFrame(variables)\n app.MainLoop()\n","sub_path":"AST_GUI.py","file_name":"AST_GUI.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447924552","text":"#now they are arbitrary values\nCORRELATION_THRESHOLD = 50\nGAUSSIAN_MEAN = 30\nGAUSSIAN_STDEV = 3\n\nfrom CommonDataTypes import *\nfrom scipy import signal\nimport numpy as np\nimport PeakDetection\n\nclass CandidateSelector:\n \"\"\"\n Selects Candidates for later detection using correlation-\n for each point in the tomogram, try all the different templates and tilts, assign a max_correlation to each point.\n apply a blurring transformation to the max_correlation image (to unite close peaks), and then search for peaks\n \"\"\"\n def __init__(self, templates):\n self.templates = templates\n self.kernel = np.outer(signal.gaussian(GAUSSIAN_MEAN, GAUSSIAN_STDEV), signal.gaussian(GAUSSIAN_MEAN, GAUSSIAN_STDEV))\n #these are for debug\n self.max_correlation_per_3loc = None\n self.blurred_correlation_array = None\n self.positions = None\n\n def find_local_maxima(self, correlation_array):\n self.blurred_correlation_array = signal.fftconvolve(correlation_array[:,:,0], self.kernel, mode='same')\n res = np.nonzero(PeakDetection.detect_peaks(self.blurred_correlation_array)) #### 2D\n return [(x[0], x[1], 0) for x in zip(res[0], res[1]) if\n self.blurred_correlation_array[x] > CORRELATION_THRESHOLD]\n\n\n def select(self, tomogram):\n \"\"\"\n Find candidates for the template positions using max correlation.\n :param tomogram: The tomogram to search in\n :param debug: If this is set to True, return is a tuple of (orig_return, (correlation_dm, blurred_correlation dm) )\n :return: a list of candidates\n \"\"\"\n self.max_correlation_per_3loc = np.empty(tomogram.density_map.shape)\n for template_tuple in self.templates:\n for tilted in template_tuple:\n #max_correlation_per_3loc is an array representing the maximum on all correlations generated by all the templates and tilts for each 3-position\n self.max_correlation_per_3loc = np.maximum(self.max_correlation_per_3loc, signal.fftconvolve(tomogram.density_map, tilted.density_map, mode='same'))\n\n self.positions = self.find_local_maxima(self.max_correlation_per_3loc)\n return [Candidate(SixPosition(position, None), None) for position in self.positions]\n\n\n\nif __name__ == '__main__':\n\n from TemplateGenerator import generate_tilted_templates\n from TomogramGenerator import generate_tomogram\n import matplotlib.pyplot as plt\n\n templates = generate_tilted_templates()\n tomogram = generate_tomogram(templates, None)\n\n fig, ax = plt.subplots()\n ax.imshow(tomogram.density_map)\n\n correlation = signal.fftconvolve(tomogram.density_map, templates[1][2].density_map, mode='same')\n\n fig, ax = plt.subplots()\n ax.imshow(correlation)\n\n positions = CandidateSelector.find_local_maxima(None, correlation)\n maximums = np.zeros(correlation.shape)\n for position in positions:\n maximums[position] = correlation[position]\n fig, ax = plt.subplots()\n print(len(positions))\n ax.imshow(maximums)\n\n #plt.show()\n","sub_path":"src/CandidateSelector.py","file_name":"CandidateSelector.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21917953","text":"from __future__ import print_function\n\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FXMLUtil - Utilities for producint XML output\n\n (c) Copyright 2011 by SunGard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n See the test code at the end of the file for sample usage. \n\nNOTE\n XMLElement & XMLDocument originally from the ASPN Python Cookbook\n Extended with a couple of methods.\n \n Nasty replace of writexml method to get nicer formating \n\n-------------------------------------------------------------------------------------------------------\"\"\"\n\nfrom xml.dom.minidom import Document, parse, parseString, Element, _write_data, Node\nimport string\n\ntry:\n unicode\nexcept NameError:\n unicode = str\n\nenc = \"iso-8859-1\"\n\ndef writexml(self, writer, indent=\"\", addindent=\"\", newl=\"\"):\n # indent = current indentation\n # addindent = indentation to add to higher levels\n # newl = newline string\n writer.write(indent+\"<\" + self.tagName)\n\n attrs = self._get_attributes()\n a_names = attrs.keys()\n a_names.sort()\n\n for a_name in a_names:\n writer.write(\" %s=\\\"\" % a_name)\n _write_data(writer, attrs[a_name].value)\n writer.write(\"\\\"\")\n compact = 0\n if len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE:\n compact = 1\n if self.childNodes:\n if compact:\n writer.write(\">\") \n else:\n writer.write(\">%s\"%(newl))\n for node in self.childNodes:\n if compact:\n node.writexml(writer, \"\", \"\", \"\")\n else:\n node.writexml(writer, indent+addindent, addindent, newl)\n if compact:\n writer.write(\"</%s>%s\" % (self.tagName, newl))\n else:\n writer.write(\"%s</%s>%s\" % (indent, self.tagName, newl))\n else:\n writer.write(\"/>%s\"%(newl))\n\nElement.writexml = writexml\n\ndef _encode(v):\n if isinstance(v, unicode):\n v = v.encode(enc)\n return v\n\nclass XMLElement:\n\n def __init__(self, doc, el):\n self.doc = doc\n self.el = el\n\n def __getitem__(self, name):\n a = self.el.getAttributeNode(name)\n if a:\n return _encode(a.value)\n return None\n\n def __setitem__(self, name, value):\n self.el.setAttribute(name, _encode(value))\n\n def __delitem__(self, name):\n self.el.removeAttribute(name)\n\n def __str__(self):\n return _encode(self.doc.toprettyxml(indent=\" \"))\n\n def toString(self):\n return _encode(self.doc.toxml())\n\n def _inst(self, el):\n return XMLElement(self.doc, el)\n\n def get(self, name, default=None):\n a = self.el.getAttributeNode(name)\n if a:\n return _encode(a.value)\n return _encode(default)\n\n def add(self, tag, **kwargs):\n if isinstance(tag, int) or isinstance(tag, float): tag = str(tag)\n el = self.doc.createElement(tag)\n for k, v in kwargs.items():\n el.setAttribute(k, _encode(str(v)))\n return self._inst(self.el.appendChild(el))\n\n def addText(self, data):\n return self._inst(\n self.el.appendChild(\n self.doc.createTextNode(_encode(data))))\n\n def addComment(self, data):\n return self._inst(\n self.el.appendChild(\n self.doc.createComment(data)))\n\n def getText(self, sep=\" \"):\n rc = []\n for node in self.el.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data)\n return _encode(string.join(rc, sep))\n\n def getAll(self, tag):\n return map(self._inst, self.el.getElementsByTagName(tag))\n\nclass _Document(Document):\n\n def writexml(self, writer, indent=\"\", addindent=\"\", newl=\"\",encoding = None):\n if not encoding: encoding = enc\n writer.write('<?xml version=\"1.0\" encoding=\"%s\" ?>\\n' % encoding)\n for node in self.childNodes:\n node.writexml(writer, indent, addindent, newl)\n\nclass XMLDocument(XMLElement):\n\n def __init__(self, tag=None, **kwargs):\n self.doc = _Document()\n XMLElement.__init__(self, self.doc, self.doc)\n if tag:\n self.el = self.add(tag, **kwargs).el\n\n def parse(self, d):\n self.doc = self.el = parse(d)\n return self\n\n def parseString(self, d):\n self.doc = self.el = parseString(_encode(d))\n return self\n\n\n\n\n\n\"\"\"-------------------------------------------------------------------------------------------------------\n\nMODULE TEST CODE\n\n-------------------------------------------------------------------------------------------------------\"\"\"\n\nif __name__==\"__main__\":\n\n # Example of dumping a database structure\n doc = XMLDocument(\"database\", name=\"testdb\")\n table = doc.add(\"table\", name=\"test\")\n table.add(\"field\", name=\"counter\", type=\"int\")\n table.add(\"field\", name=\"name\", type=\"varchar\")\n table.add(\"field\", name=\"info\", type=\"text\")\n #print (doc)\n\n # Simulate reading a XML file\n ndoc = XMLDocument()\n ndoc.parseString(str(doc))\n root = ndoc.getAll(\"database\")\n if root:\n db = root[0]\n #print (\"Database:\", db[\"name\"])\n #for table in db.getAll(\"table\"):\n #print (\" Table:\", table[\"name\"])\n #for field in db.getAll(\"field\"):\n # print (\" Field:\", field[\"name\"], \"- Type:\", field[\"type\"])\n \n # It's object oriented\n d = XMLDocument(\"notice\").add(\"text\", format=\"plain\").addText(\"Some text\")\n print (d)\n","sub_path":"Extensions/Default/FPythonCode/FXMLUtil.py","file_name":"FXMLUtil.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571629434","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n pip_services_commons.validate.ArraySchema\r\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n \r\n Array schema implementation\r\n \r\n :copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.\r\n :license: MIT, see LICENSE for more details.\r\n\"\"\"\r\n\r\nfrom .Schema import Schema\r\nfrom .ValidationResultType import ValidationResultType\r\nfrom .ValidationResult import ValidationResult\r\nfrom ..reflect.ObjectReader import ObjectReader\r\n\r\nclass ArraySchema(Schema):\r\n value_type = None\r\n\r\n def __init__(self, value_type):\r\n self.value_type = value_type\r\n\r\n def _perform_validation(self, path, value, results):\r\n name = path if path != None else \"value\"\r\n value = ObjectReader.get_value(value)\r\n\r\n super(ArraySchema, self)._perform_validation(path, value, results)\r\n\r\n if value == None:\r\n return\r\n\r\n if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple):\r\n index = 0\r\n for element in value:\r\n element_path = str(index) if path == None or len(path) == 0 else path + \".\" + str(index)\r\n self._perform_type_validation(element_path, self.value_type, element, results)\r\n index += 1\r\n else:\r\n results.append(\r\n ValidationResult(\r\n path,\r\n ValidationResultType.Error,\r\n \"VALUE_ISNOT_ARRAY\",\r\n name + \" type must be List or Array\",\r\n \"List\",\r\n type(value)\r\n )\r\n )\r\n","sub_path":"pip_services_commons/validate/ArraySchema.py","file_name":"ArraySchema.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586745839","text":"from Component import Component\nfrom List import List\nfrom upisi import List\n\n\nkolone = [\"RB;\",\n \"DUZINA;\",\n \"KT;\",\n \"SIRINA;\",\n \"KT;\",\n \"OZNAKA;\",\n \"T;\",\n \"KOM;\",\n \"Materijal;\",\n \"Kvadratura materijala;\"\n \"Duzni metar materijala;\",\n \"Duzni metar trake\"\n ]\ndef preuredi():\n with open('ulaz.csv', \"a\", encoding=\"utf-8\") as file:\n file.write(\"\\n\")\n\ndef upisi():\n with open('Izlaz.csv', \"w\", encoding=\"utf-8\") as file:\n file.writelines(kolone)\n for line in List.component_lista:\n file.write('\\n' + str(line))\n file.write('\\n')\n for k in List.material:\n file.write('\\n' + k + \";\" + str(List.hash[k]).replace(\".\", \",\"))\n","sub_path":"upisi.py","file_name":"upisi.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620002409","text":"from background_task import background\r\nfrom django.utils.timezone import localtime, now\r\nimport math\r\n\r\n# Lets you easily log things when in production, by calling\r\n# log(asdf='hello world') to display {'asdf': 'hello world'}\r\n@background(schedule=24*60*60)\r\ndef log():\r\n return\r\n\r\n\r\ndef miniround_sub(competitor, problem, miniround):\r\n time = problem.exam.miniround_end_time(miniround)\r\n return competitor.submissions.filter(problem=problem, submit_time__lte=time).order_by(\"-submit_time\").first()\r\n\r\n\r\n\r\n# score.problem.exam and score.competitor.exam should always match\r\n# do we need to check for invalid scores?\r\n# TODO: what if problems, tasks, or minrounds get added/removed?\r\ndef update_scores(comp):\r\n from website.models import Score, TaskScore, MiniRoundScore, MiniRoundTotal\r\n exam = comp.exam\r\n for problem in exam.problems.all():\r\n s = Score.objects.filter(problem=problem, competitor=comp).first()\r\n if s is None:\r\n s = Score(problem=problem, competitor=comp)\r\n s.save()\r\n\r\n if exam.is_optimization:\r\n # create taskscores if they don't exist yet\r\n for task in problem.tasks.all():\r\n ts = TaskScore.objects.filter(task=task, score=s).first()\r\n if ts is None:\r\n ts = TaskScore(task=task, score=s)\r\n ts.save()\r\n\r\n elif exam.is_ai:\r\n # create miniroundscores if they don't exist\r\n for i in range(exam.num_minirounds+1):\r\n mrs = MiniRoundScore.objects.filter(score=s, miniround=i).first()\r\n if mrs is None:\r\n mrs = MiniRoundScore(score=s, miniround=i)\r\n mrs.save()\r\n if exam.is_ai:\r\n for i in range(exam.num_minirounds):\r\n mrt = MiniRoundTotal.objects.filter(competitor=comp, miniround=i).first()\r\n if mrt is None:\r\n mrt = MiniRoundTotal(competitor=comp, miniround=i)\r\n mrt.save()\r\n\r\n\r\n\r\n\r\ndef update_competitors(team):\r\n from website.models import Competitor, DivChoice\r\n for exam in team.contest.exams.all():\r\n # Django guarantees at most one competitor for each\r\n # (exam, team, mathlete) triple, so there are no duplicates\r\n\r\n # delete any invalid competitors\r\n comps = Competitor.objects.filter(exam=exam, team=team)\r\n for c in comps:\r\n if exam.is_team_exam:\r\n # this shouldn't happen, unless manually set in Django Admin\r\n if c.mathlete is not None:\r\n c.delete()\r\n else:\r\n # team members might have been kicked,\r\n # so delete the corresponding competitor\r\n if c.mathlete not in team.mathletes.all():\r\n c.delete()\r\n if exam.exampair is not None:\r\n dc = DivChoice.objects.filter(exampair=exam.exampair, mathlete=c.mathlete).first()\r\n if dc is None or dc.division != exam.division:\r\n c.delete()\r\n\r\n\r\n # make sure all valid competitors exist, and call update_scores\r\n if exam.is_team_exam:\r\n c = Competitor.objects.filter(exam=exam, team=team, mathlete=None).first()\r\n if c is None:\r\n c = Competitor(exam=exam, team=team, mathlete=None)\r\n c.save()\r\n update_scores(c)\r\n else:\r\n for m in team.mathletes.all():\r\n if exam.exampair is not None:\r\n dc = DivChoice.objects.filter(exampair=exam.exampair, mathlete=m).first()\r\n if dc is None or dc.division != exam.division:\r\n continue\r\n c = Competitor.objects.filter(exam=exam, team=team, mathlete=m).first()\r\n if c is None:\r\n c = Competitor(exam=exam, team=team, mathlete=m)\r\n c.save()\r\n update_scores(c)\r\n\r\n# initializes all Competitors, Scores, and TaskScores\r\n# ensures exactly one score for each (problem, competitor) pair,\r\n# and exactly one taskscore for each (task, score) pair\r\ndef update_contest(contest):\r\n log(starting='update_contest')\r\n from website.models import MiniRoundQueue\r\n try:\r\n for team in contest.teams.all():\r\n update_competitors(team)\r\n for exam in contest.exams.all():\r\n if exam.is_ai:\r\n for i in range(exam.num_minirounds+1):\r\n mrq = MiniRoundQueue.objects.filter(exam=exam, miniround=i).first()\r\n if mrq is None:\r\n mrq = MiniRoundQueue(exam=exam, miniround=i)\r\n if i == 0:\r\n mrq.num_games = 0\r\n mrq.save()\r\n except Exception as e:\r\n log(error=str(e), during='update_contest')\r\n log(finished='update_contest')\r\n\r\n\r\n\r\ndef reset_contest(contest):\r\n from website.models import MiniRoundQueue\r\n for team in contest.teams.all():\r\n for c in team.competitors.all():\r\n c.delete()\r\n if contest.locked and team.mathletes.count() == 0:\r\n team.delete()\r\n for exam in contest.exams.all():\r\n if exam.is_ai:\r\n for p in exam.problems.all():\r\n aiprob = p.aiproblem.first()\r\n for g in aiprob.aigames.all():\r\n g.delete()\r\n for i in range(exam.num_minirounds+1):\r\n mrq = MiniRoundQueue.objects.get(exam=exam, miniround=i)\r\n if i == 0:\r\n mrq.num_games = 0\r\n else:\r\n mrq.num_games = -1\r\n mrq.save()\r\n exam.display_miniround = 0\r\n exam.save()\r\n if exam.is_optimization:\r\n for p in exam.problems.all():\r\n for t in p.tasks.all():\r\n t.best_raw_points = None\r\n t.save()\r\n update_contest(contest)\r\n\r\n\r\ndef reset_exam(exam):\r\n from website.models import MiniRoundQueue\r\n if exam.is_ai:\r\n for p in exam.problems.all():\r\n aiprob = p.aiproblem.first()\r\n log(aiprob=str(aiprob), p=str(p))\r\n for g in aiprob.aigames.all():\r\n g.delete()\r\n for i in range(exam.num_minirounds+1):\r\n mrq = MiniRoundQueue.objects.get(exam=exam, miniround=i)\r\n if i == 0:\r\n mrq.num_games = 0\r\n else:\r\n mrq.num_games = -1\r\n mrq.save()\r\n exam.display_miniround = 0\r\n exam.save()\r\n if exam.is_optimization:\r\n for p in exam.problems.all():\r\n for t in p.tasks.all():\r\n t.best_raw_points = None\r\n t.save()\r\n\r\n\r\n\r\ndef compute_weighted_avg(score, m):\r\n from website.models import MiniRoundScore\r\n grace = score.problem.exam.num_grace_minirounds\r\n if m < grace:\r\n return 0\r\n else:\r\n num = 0.0\r\n den = 0.0\r\n for i in range(m+1):\r\n mrs = MiniRoundScore.objects.get(score=score, miniround=i)\r\n w = math.sqrt(max(i-grace+1, 0))\r\n num += w * mrs.avg_points\r\n den += w\r\n return num / den\r\n\r\n\r\ndef update_ai_leaderboard(exam, m):\r\n from website.models import Score, MiniRoundScore, MiniRoundTotal\r\n\r\n max_dict = {}\r\n for p in exam.problems.all():\r\n max_w_avg = 0.0\r\n for c in exam.competitors.all():\r\n s = Score.objects.get(problem=p, competitor=c)\r\n mrs = MiniRoundScore.objects.get(score=s, miniround=m)\r\n mrs.weighted_avg = compute_weighted_avg(s, m)\r\n mrs.save()\r\n max_w_avg = max(max_w_avg, mrs.weighted_avg)\r\n max_dict[p.problem_number] = max_w_avg\r\n\r\n for c in exam.competitors.all():\r\n mrt = MiniRoundTotal.objects.get(competitor=c, miniround=m)\r\n mrt.total_score = 0\r\n\r\n for p in exam.problems.all():\r\n max_w_avg = max_dict[p.problem_number]\r\n s = Score.objects.get(problem=p, competitor=c)\r\n mrs = MiniRoundScore.objects.get(score=s, miniround=m)\r\n if max_w_avg > 0:\r\n mrs.norm_w_avg = mrs.weighted_avg / max_w_avg * 100\r\n else:\r\n mrs.norm_w_avg = 0\r\n mrs.save()\r\n mrt.total_score += mrs.norm_w_avg\r\n mrt.save()\r\n exam.display_miniround = m\r\n exam.save()\r\n\r\n\r\ndef regrade_games():\r\n from website.models import AIGame\r\n games = AIGame.objects.filter(status=3)\r\n for g in games:\r\n g.status = 0\r\n g.history = None\r\n g.save()\r\n\r\ndef recheck_games():\r\n from website.models import AIGame\r\n from website.tasks import init_all_tasks\r\n games = AIGame.objects.filter(status=-1)\r\n for g in games:\r\n g.status = 0\r\n g.save()\r\n init_all_tasks()\r\n\r\n\r\n\r\n# temporary\r\ndef scores_from_csv(text):\r\n from website.models import Team, Problem, Competitor, Score, TaskScore\r\n lines = text.splitlines()\r\n data = [line.split(',') for line in lines]\r\n n = len(data)\r\n for i in range(n):\r\n team_id, prob_name, task_num, score = data[i][0], data[i][1], data[i][2], data[i][3]\r\n team_id = int(team_id)\r\n team = Team.objects.get(pk=team_id)\r\n prob = Problem.objects.get(name=prob_name)\r\n task_num = int(task_num)\r\n task = prob.tasks.get(task_number=task_num)\r\n comp = Competitor.objects.get(exam=prob.exam, team=team, mathlete=None)\r\n if score == '':\r\n score = None\r\n else:\r\n score = int(score)\r\n s = Score.objects.get(problem=prob, competitor=comp)\r\n ts = TaskScore.objects.get(task=task, score=s)\r\n g = prob.grader\r\n if g is None:\r\n log(BAD='g is None')\r\n if g.better(score, ts.raw_points):\r\n ts.raw_points = score\r\n ts.save()\r\n \r\n\r\ndef recompute_leaderboard(exam):\r\n log(msg='start recomputing')\r\n for p in exam.problems.all():\r\n g = p.grader\r\n for t in p.tasks.all():\r\n t.best_raw_points = None # reset\r\n for ts in t.taskscores.all():\r\n if g.better(ts.raw_points, t.best_raw_points):\r\n t.best_raw_points = ts.raw_points\r\n t.save()\r\n for ts in t.taskscores.all():\r\n if ts.raw_points is not None:\r\n ts.norm_points = g.normalize(ts.raw_points, t.best_raw_points)\r\n ts.save()\r\n for c in exam.competitors.all():\r\n c.total_score = 0 # reset\r\n for s in c.scores.all():\r\n s.points = 0 # reset\r\n for ts in s.taskscores.all():\r\n s.points += ts.norm_points\r\n s.points /= s.taskscores.count()\r\n s.save()\r\n c.total_score += s.points\r\n c.save()\r\n log(msg='done recomputing')\r\n\r\n\r\ndef reset_problem(p):\r\n for s in p.scores.all():\r\n s.points = 0\r\n s.save()\r\n for ts in s.taskscores.all():\r\n ts.raw_points = None\r\n ts.norm_points = 0\r\n ts.save()\r\n for sub in p.submissions.all():\r\n sub.points = None\r\n sub.status = 0\r\n sub.save()\r\n\r\n\r\ndef per_page(n):\r\n return 50\r\n\r\n\r\ndef default_div1(contest):\r\n from website.models import DivChoice\r\n log(starting='default_div1')\r\n try:\r\n for team in contest.teams.all():\r\n for m in team.mathletes.all():\r\n for exampair in contest.exampairs.all():\r\n dc = DivChoice.objects.filter(exampair=exampair, mathlete=m).first()\r\n if dc is None:\r\n dc = DivChoice(exampair=exampair, mathlete=m, division=1)\r\n dc.save()\r\n elif dc.division is None:\r\n dc.division = 1\r\n dc.save()\r\n except Exception as e:\r\n log(error=str(e), during='default_div1')\r\n log(finished='default_div1')\r\n\r\n\r\ndef exam_results_from_csv(exam, text):\r\n from website.models import Team, Problem, Competitor, Score, TaskScore\r\n log(start='exam_results_from_csv')\r\n try:\r\n lines = text.splitlines()\r\n data = [line.split(',') for line in lines]\r\n n = len(data)\r\n problems = exam.problem_list\r\n if exam.is_team_exam and exam.is_math:\r\n offset = 0\r\n else:\r\n offset = 1\r\n\r\n for i in range(n-1):\r\n team_name = data[i][0]\r\n code = data[i][1]\r\n if exam.is_power:\r\n team = Team.objects.filter(contest=exam.contest, invite_code=code).first()\r\n else:\r\n team = Team.objects.filter(contest=exam.contest, team_name=team_name).first()\r\n if team is None:\r\n log(error=f'could not find team with name {team_name} in exam_results_from_csv')\r\n continue\r\n if exam.is_team_exam:\r\n c = Competitor.objects.get(exam=exam, team=team, mathlete=None)\r\n else:\r\n name = data[i][1]\r\n c = None\r\n for cc in team.competitors.filter(exam=exam):\r\n names = [cc.name.lower().strip()]\r\n if cc.mathlete is not None:\r\n names += [cc.mathlete.user.full_name.lower().strip(), cc.mathlete.user.long_name.lower().strip()]\r\n if name.lower().strip() in names:\r\n if c is not None:\r\n log(duplicate_name=name, team=team_name, during='exam_results_from_csv')\r\n else:\r\n c = cc\r\n if c is None:\r\n log(error=f'could not find {name} on team {team_name} in exam_results_from_csv')\r\n continue\r\n for p in problems:\r\n s = Score.objects.get(competitor=c, problem=p)\r\n s.points = float(data[i][p.problem_number + offset])\r\n s.save()\r\n c.total_score = float(data[i][-1])\r\n c.save()\r\n if exam.is_math:\r\n for p in problems:\r\n p.weight = float(data[-1][p.problem_number + offset])\r\n p.save()\r\n except Exception as e:\r\n log(error=str(e), during='exam_results_from_csv')\r\n log(finished='exam_results_from_csv')\r\n \r\n\r\ndef calc_indiv_sweepstakes(contest):\r\n from website.models import IndivSweepstake\r\n log(start='calc_indiv_sweepstakes')\r\n for team in contest.teams.all():\r\n for mathlete in team.mathletes.all():\r\n iss = IndivSweepstake.objects.filter(team=team, mathlete=mathlete).first()\r\n if iss is None:\r\n iss = IndivSweepstake(team=team, mathlete=mathlete)\r\n iss.save()\r\n iss.update_total_score()\r\n log(finished='calc_indiv_sweepstakes')\r\n\r\n\r\ndef calc_sweepstakes(contest):\r\n from website.models import Sweepstake, Exam\r\n from django.db.models import Max\r\n log(start='calc_sweepstakes')\r\n for team in contest.teams.all():\r\n ss = Sweepstake.objects.filter(team=team).first()\r\n if ss is None:\r\n ss = Sweepstake(team=team)\r\n ss.save()\r\n ss.update_indiv_total()\r\n\r\n power_exam = contest.exams.filter(is_team_exam=True, show_results=True, exam_type=Exam.POWER).first()\r\n if power_exam:\r\n max_power = power_exam.competitors.aggregate(m=Max('total_score'))['m']\r\n team_exam = contest.exams.filter(is_team_exam=True, show_results=True, exam_type=Exam.MATH).first()\r\n if team_exam:\r\n max_team = team_exam.competitors.aggregate(m=Max('total_score'))['m']\r\n max_indiv = Sweepstake.objects.filter(team__contest=contest).aggregate(m=Max('indiv_total'))['m']\r\n\r\n for team in contest.teams.all():\r\n ss = team.sweepstake\r\n if power_exam:\r\n power_comp = team.competitors.filter(exam=power_exam).first()\r\n if power_comp is not None and max_power > 0:\r\n ss.norm_power = power_comp.total_score / max_power * 200\r\n if team_exam:\r\n team_comp = team.competitors.filter(exam=team_exam).first()\r\n if team_comp is not None and max_team > 0:\r\n ss.norm_team = team_comp.total_score / max_team * 200\r\n if max_indiv > 0:\r\n ss.norm_indiv = ss.indiv_total / max_indiv * 600\r\n ss.total_score = ss.norm_power + ss.norm_team + ss.norm_indiv\r\n ss.save()\r\n log(finished='calc_sweepstakes')\r\n\r\n","sub_path":"website/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430367037","text":"from .provider_test import ProviderTest\nfrom gunpowder import *\nimport numpy as np\n\nclass TestSourcePad(BatchProvider):\n\n def setup(self):\n\n self.provides(\n ArrayKeys.TEST_LABELS,\n ArraySpec(\n roi=Roi((200, 20, 20), (1800, 180, 180)),\n voxel_size=(20, 2, 2)))\n\n self.provides(\n PointsKeys.TEST_POINTS,\n PointsSpec(\n roi=Roi((200, 20, 20), (1800, 180, 180))))\n\n def provide(self, request):\n\n batch = Batch()\n\n roi_array = request[ArrayKeys.TEST_LABELS].roi\n roi_voxel = roi_array//self.spec[ArrayKeys.TEST_LABELS].voxel_size\n\n data = np.zeros(roi_voxel.get_shape(), dtype=np.uint32)\n data[:,::2] = 100\n\n spec = self.spec[ArrayKeys.TEST_LABELS].copy()\n spec.roi = roi_array\n batch.arrays[ArrayKeys.TEST_LABELS] = Array(\n data,\n spec=spec)\n\n return batch\n\nclass TestPad(ProviderTest):\n\n def test_output(self):\n\n points = PointsKey('TEST_POINTS')\n labels = ArrayKey('TEST_LABELS')\n\n pipeline = (\n TestSourcePad() +\n Pad(labels, Coordinate((20, 20, 20)), value=1) +\n Pad(points, Coordinate((10, 10, 10))))\n\n with build(pipeline):\n\n self.assertTrue(\n pipeline.spec[labels].roi == Roi(\n (180, 0, 0),\n (1840, 220, 220)))\n self.assertTrue(\n pipeline.spec[points].roi == Roi(\n (190, 10, 10),\n (1820, 200, 200)))\n\n batch = pipeline.request_batch(\n BatchRequest({\n labels: ArraySpec(\n Roi((180, 0, 0), (20, 20, 20)))\n }))\n\n self.assertEqual(np.sum(batch.arrays[labels].data), 1*10*10)\n","sub_path":"tests/cases/pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592404113","text":"# uncompyle6 version 3.3.5\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]\n# Embedded file name: c:\\Jenkins\\live\\output\\win_64_static\\Release\\python-bundle\\MIDI Remote Scripts\\Push2\\echo.py\n# Compiled at: 2019-04-23 16:19:13\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom ableton.v2.base import EventObject\nfrom ableton.v2.control_surface import EnumWrappingParameter, LiveObjectDecorator, NotifyingList, get_parameter_by_name\nfrom .device_decoration import DeviceOnOffOption, DeviceSwitchOption\nfrom .device_component import ButtonRange, DeviceComponentWithTrackColorViewData\nfrom .visualisation_settings import VisualisationGuides\n\nclass EchoDeviceDecorator(LiveObjectDecorator, EventObject):\n\n class EchoChannelName(int):\n pass\n\n EchoChannelName.left = EchoChannelName(0)\n EchoChannelName.right = EchoChannelName(1)\n\n def __init__(self, *a, **k):\n super(EchoDeviceDecorator, self).__init__(*a, **k)\n self._channel_names_provider = NotifyingList(available_values=[\n b'Left', b'Right'], default_value=EchoDeviceDecorator.EchoChannelName.left)\n self.channel_switch_parameter = EnumWrappingParameter(name=b'Channel Toggle', parent=self, values_host=self._channel_names_provider, index_property_host=self._channel_names_provider, values_property=b'available_values', index_property=b'index', value_type=EchoDeviceDecorator.EchoChannelName)\n self._additional_parameters = (\n self.channel_switch_parameter,)\n self.link_option = DeviceOnOffOption(name=b'Link', property_host=get_parameter_by_name(self, b'Link'))\n self.sync_l_option = DeviceOnOffOption(name=b'L Sync', property_host=get_parameter_by_name(self, b'L Sync'))\n self.sync_r_option = DeviceOnOffOption(name=b'R Sync', property_host=get_parameter_by_name(self, b'R Sync'))\n self.sync_m_option = DeviceOnOffOption(name=b'M Sync', property_host=get_parameter_by_name(self, b'L Sync'))\n self.sync_s_option = DeviceOnOffOption(name=b'S Sync', property_host=get_parameter_by_name(self, b'R Sync'))\n self.clip_dry_option = DeviceOnOffOption(name=b'Clip Dry', property_host=get_parameter_by_name(self, b'Clip Dry'))\n self.filter_on_option = DeviceOnOffOption(name=b'Filter', property_host=get_parameter_by_name(self, b'Filter On'))\n self.feedback_inv_option = DeviceOnOffOption(name=b'Invert', property_host=get_parameter_by_name(self, b'Feedback Inv'))\n self.modulation_times_four_option = DeviceOnOffOption(name=b'Mod 4x', property_host=get_parameter_by_name(self, b'Mod 4x'))\n self.reverb_loc_option = DeviceSwitchOption(name=b'Reverb Loc', parameter=get_parameter_by_name(self, b'Reverb Loc'))\n self.duck_option = DeviceOnOffOption(name=b'Duck', property_host=get_parameter_by_name(self, b'Duck On'))\n self.gate_option = DeviceOnOffOption(name=b'Gate', property_host=get_parameter_by_name(self, b'Gate On'))\n self.wobble_option = DeviceOnOffOption(name=b'Wobble', property_host=get_parameter_by_name(self, b'Wobble On'))\n self.noise_option = DeviceOnOffOption(name=b'Noise', property_host=get_parameter_by_name(self, b'Noise On'))\n self.channel_switch_lr_option = DeviceSwitchOption(name=b'L/R Switch', parameter=self.channel_switch_parameter, labels=[\n b'Left', b'Right'])\n self.channel_switch_ms_option = DeviceSwitchOption(name=b'M/S Switch', parameter=self.channel_switch_parameter, labels=[\n b'Mid', b'Side'])\n self.register_disconnectables(self._additional_parameters)\n self.register_disconnectables(self.options)\n\n @property\n def parameters(self):\n return tuple(self._live_object.parameters) + self._additional_parameters\n\n @property\n def options(self):\n return (\n self.channel_switch_lr_option,\n self.channel_switch_ms_option,\n self.link_option,\n self.sync_l_option,\n self.sync_r_option,\n self.sync_m_option,\n self.sync_s_option,\n self.clip_dry_option,\n self.filter_on_option,\n self.feedback_inv_option,\n self.modulation_times_four_option,\n self.reverb_loc_option,\n self.duck_option,\n self.gate_option,\n self.wobble_option,\n self.noise_option)\n\n\nclass EchoDeviceComponent(DeviceComponentWithTrackColorViewData):\n TUNNEL_VISUALISATION_CONFIGURATION_IN_BANKS = {0: ButtonRange(0, 3), \n 1: ButtonRange(2, 5)}\n FILTER_VISUALISATION_CONFIGURATION_IN_BANKS = {0: ButtonRange(4, 5), \n 3: ButtonRange(1, 4)}\n LFO_VISUALISATION_CONFIGURATION_IN_BANKS = {4: ButtonRange(0, 3)}\n\n def _parameter_touched(self, parameter):\n self._update_visualisation_view_data(self._adjustment_view_data)\n\n def _parameter_released(self, parameter):\n self._update_visualisation_view_data(self._adjustment_view_data)\n\n @property\n def _adjustment_view_data(self):\n is_linked = bool(get_parameter_by_name(self.device(), b'Link').value)\n adjusting_tunnel_left = adjusting_tunnel_right = False\n adjusting_filter_hp = adjusting_filter_lp = False\n adjusting_lfo = adjusting_lfo_phase = False\n touched_parameters = [ self.parameters[button.index] for button in self.parameter_touch_buttons if button.is_pressed\n ]\n for parameter in touched_parameters:\n if parameter.name == b'Feedback':\n adjusting_tunnel_left = adjusting_tunnel_right = True\n elif parameter.name.startswith(b'L '):\n adjusting_tunnel_left = True\n if parameter.name != b'L Offset' and is_linked:\n adjusting_tunnel_right = True\n elif parameter.name == b'R Offset' or parameter.name.startswith(b'R ') and not is_linked:\n adjusting_tunnel_right = True\n elif parameter.name in ('HP Freq', 'HP Res'):\n adjusting_filter_hp = True\n elif parameter.name in ('LP Freq', 'LP Res'):\n adjusting_filter_lp = True\n elif parameter.name == b'Mod Phase':\n adjusting_lfo_phase = True\n elif parameter.name.startswith(b'Mod '):\n adjusting_lfo = True\n\n return {b'AdjustingTunnelLeft': adjusting_tunnel_left, \n b'AdjustingTunnelRight': adjusting_tunnel_right, \n b'AdjustingFilterHighPass': adjusting_filter_hp, \n b'AdjustingFilterLowPass': adjusting_filter_lp, \n b'AdjustingLfo': adjusting_lfo, \n b'AdjustingLfoPhase': adjusting_lfo_phase}\n\n def _set_bank_index(self, bank):\n super(EchoDeviceComponent, self)._set_bank_index(bank)\n self._update_visualisation_view_data(self._configuration_view_data)\n self._update_visualisation_view_data(self._adjustment_view_data)\n self.notify_visualisation_visible()\n self.notify_shrink_parameters()\n\n @property\n def _visualisation_visible(self):\n return self._bank.index in self.TUNNEL_VISUALISATION_CONFIGURATION_IN_BANKS or self._bank.index in self.FILTER_VISUALISATION_CONFIGURATION_IN_BANKS or self._bank.index in self.LFO_VISUALISATION_CONFIGURATION_IN_BANKS\n\n @property\n def _shrink_parameters(self):\n if self._visualisation_visible:\n tunnel_config = self.TUNNEL_VISUALISATION_CONFIGURATION_IN_BANKS.get(self._bank.index, ButtonRange(-1, -1))\n filter_config = self.FILTER_VISUALISATION_CONFIGURATION_IN_BANKS.get(self._bank.index, ButtonRange(-1, -1))\n lfo_config = self.LFO_VISUALISATION_CONFIGURATION_IN_BANKS.get(self._bank.index, ButtonRange(-1, -1))\n return [ tunnel_config.left_index <= index <= tunnel_config.right_index or filter_config.left_index <= index <= filter_config.right_index or lfo_config.left_index <= index <= lfo_config.right_index for index in range(8)\n ]\n else:\n return [False] * 8\n\n @property\n def _configuration_view_data(self):\n tunnel_left, tunnel_right = self._calculate_view_size(self.TUNNEL_VISUALISATION_CONFIGURATION_IN_BANKS)\n filter_left, filter_right = self._calculate_view_size(self.FILTER_VISUALISATION_CONFIGURATION_IN_BANKS)\n lfo_left, lfo_right = self._calculate_view_size(self.LFO_VISUALISATION_CONFIGURATION_IN_BANKS)\n return {b'TunnelLeft': tunnel_left, \n b'TunnelRight': tunnel_right, \n b'FilterLeft': filter_left, \n b'FilterRight': filter_right, \n b'LfoLeft': lfo_left, \n b'LfoRight': lfo_right}\n\n def _initial_visualisation_view_data(self):\n view_data = super(EchoDeviceComponent, self)._initial_visualisation_view_data()\n view_data.update(self._configuration_view_data)\n view_data.update(self._adjustment_view_data)\n return view_data\n\n def _calculate_view_size(self, configuration):\n if self._bank.index not in configuration:\n return (0, 0)\n config = configuration[self._bank.index]\n return (\n VisualisationGuides.light_left_x(config.left_index),\n VisualisationGuides.light_right_x(config.right_index))","sub_path":"MIDI Remote Scripts/Push2/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":9222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229645615","text":"import os\nimport sys\nimport re\nimport inspect\nimport json\nimport pickle\nimport gzip\nfrom os.path import join as pjoin\nimport cv2\nimport numpy as np\nfrom scipy.io import loadmat\nfrom sklearn.preprocessing import LabelEncoder\nimport h5py\nimport pdb\nfrom scipy import io\n\ndef loadall(datadir, data):\n return zip(*[[imread(pjoin(datadir, name)), lbl, name] for lbl, files in data.items() for name in files])\n\ndef imread(fname, resize=None):\n im = cv2.imread(fname, flags=cv2.IMREAD_COLOR)\n if im is None:\n raise ValueError(\"Couldn't load image \" + fname)\n\n if resize is not None and im.shape[:2] != resize:\n im = cv2.resize(im, resize, interpolation=cv2.INTER_LANCZOS4)\n\n # In OpenCV, color dimension is last, but theano likes it to be first.\n # (That's map of triplets vs three maps philosophy.)\n # Also convert BGR to RGB while we're at it. Not that it makes any difference.\n im = np.rollaxis(im[:,:,::-1], 2, 0)\n return im.astype(np.float32) / 256\n\ndef load_HOCoffee(datadir,datafile='HOCoffee.json'):\n data = json.load(open(pjoin(datadir, datafile)))\n\n tr_imgs, tr_lbls, tr_names = loadall(datadir, data['train'])\n te_imgs, te_lbls, te_names = loadall(datadir, data['test'])\n\n le = LabelEncoder().fit(tr_lbls)\n return (\n np.array(tr_imgs), le.transform(tr_lbls).astype(np.int32), tr_names,\n np.array(te_imgs), le.transform(te_lbls).astype(np.int32), te_names,\n le\n )\n\ndef load_towncentre(datadir, normalize_angles=True):\n panre = re.compile('pan = ([+-]?\\d+\\.\\d+)\\n')\n valre = re.compile('valid = ([01])\\n')\n angles = []\n images = []\n names = []\n for father in os.listdir(datadir):\n try:\n for son in os.listdir(pjoin(datadir, father)):\n if not son.endswith('.txt'):\n continue\n\n lpan, lval = open(pjoin(datadir, father, son)).readlines()\n if int(valre.match(lval).group(1)) == 0:\n continue\n\n angles.append(float(panre.match(lpan).group(1)))\n # Now search for the corresponding filename, unfortunately, it has more numbers encoded...\n fnames = [f for f in os.listdir(pjoin(datadir, father)) if f.startswith(son.split('.')[0]) and not f.endswith('.txt')]\n assert len(fnames) == 1, \"lolwut\"\n names.append(fnames[0])\n images.append(cv2.imread(pjoin(datadir, father, fnames[0]), flags=cv2.IMREAD_COLOR))\n except NotADirectoryError:\n pass\n\n if normalize_angles:\n angles = [(a + 360*2) % 360 for a in angles]\n\n return images, angles, names\n\ndef load_cgdata(datadir):\n\timages = []\n\tangles = []\n\tnames = []\n\ttry:\n\t\tfor mother in os.listdir(datadir):\n\t\t\tsondir = pjoin(datadir,mother)\n\t\t\tfor son in os.listdir(sondir):\n\t\t\t\tfname = son.split('.')[0]\n\t\t\t\t# pdb.set_trace()\n\t\t\t\tangles.append(float(fname.split('_')[1]))\n\t\t\t\timages.append(cv2.imread(pjoin(sondir, son), flags=cv2.IMREAD_COLOR))\n\t\t\t\tnames.append(fname.split('_')[0])\n\n\t\timages = np.array(images)\n\t\tangles = np.array(angles)\n\n\texcept NotADirectoryError:\n\t\tpass\n\n\treturn images, angles, names\n\n# directryに名前を付けてしまった時\ndef load_new_cgdata(datadir):\n images = []\n angles = []\n names = []\n try:\n for mother in os.listdir(datadir):\n sondir = pjoin(datadir,mother)\n #pdb.set_trace()\n for son in os.listdir(sondir):\n fname = son.split('.')[0]\n angles.append(float(fname.split('_')[1]))\n images.append(cv2.imread(pjoin(sondir, son), flags=cv2.IMREAD_COLOR))\n names.append(mother)\n\n images = np.array(images)\n angles = np.array(angles)\n\n except NotADirectoryError:\n pass\n\n return images,angles,names\n\n# images:[w,h,c,n] -> [n,w,h,c]\n# labels:[n,1]\ndef load_mat(datadir):\n dataset = io.loadmat(datadir)\n # [w,h,c,n] -> [n,w,h,c]\n images = dataset[\"X\"].transpose(3,0,1,2)\n labels = dataset[\"y\"]\n return images,labels\n\n# SVHN(cropped) or MNIST\n# rate1: miner images cutrate\n# rate2: major images cutrate\n# sep: separate\ndef load_SVHN(x,y,n=None,minlab=[6,7,8,9,10],rate1=0.8,rate2=0.1,sep=False):\n\n minnums = []\n mininds = []\n majnums = []\n majinds = []\n \n #pdb.set_trace()\n\n for l in range(10):\n ind = np.where(y==l)[0]\n if l in minlab:\n mininds.append(ind)\n minnums.append(ind.shape[0])\n else:\n majinds.append(ind)\n majnums.append(ind.shape[0])\n\n minnums = (np.array(minnums)*(1-rate1)//1).astype(\"int\")\n majnums = (np.array(majnums)*(1-rate2)//1).astype(\"int\")\n\n if sep:\n leftinds = [mininds[i][num:] for i,num in enumerate(minnums)]\n leftinds += [majinds[i][num:] for i,num in enumerate(majnums)]\n\n mininds = [mininds[i][:num] for i,num in enumerate(minnums)]\n majinds = [majinds[i][:num] for i,num in enumerate(majnums)] \n\n #pdb.set_trace()\n\n inds = []\n for ind in mininds:\n inds += ind.tolist()\n \n for ind in majinds:\n inds += ind.tolist()\n inds = np.array(inds) \n\n #pdb.set_trace()\n\n if sep:\n lefts = []\n for ind in leftinds:\n lefts += ind.tolist()\n lefts = np.array(lefts)\n return x[inds], y[inds], x[lefts], y[lefts]\n else:\n if n is not None:\n return x[inds], y[inds], [n[ind] for ind in inds]\n else:\n return x[inds], y[inds]\n\n\n#===========================================\n## 前処理系\ndef flipped_classes(X, y, n, le, old, new):\n \"\"\"\n Horizontally flips all images in `X` which are labeled as `old` and label them as `new`.\n Returns the flipped X, y, n.\n \"\"\"\n indices = np.where(y == le.transform([old]))[0]\n return (\n flipany(X[indices], dim=3),\n np.full(len(indices), le.transform([new]), dtype=y.dtype),\n tuple(n[i] for i in indices)\n )\n\n\ndef flipall_classes(X, y, n, le, flips):\n \"\"\"\n Applies all `flips` to the whole dataset X, y, n and returns the augmented dataset.\n \"\"\"\n fx, fy, fn = [], [], []\n for old, new in flips:\n a, b, c = flipped_classes(X, y, n, le, old, new)\n fx.append(a) ; fy.append(b) ; fn.append(c)\n return np.concatenate([X] + fx), np.concatenate([y] + fy), n + sum(fn, tuple())\n\ndef flipany(a, dim):\n \"\"\"\n `flipany(a, 0)` is equivalent to `flipud(a)`,\n `flipany(a, 1)` is equivalent to `fliplr(a)` and the rest follows naturally.\n \"\"\"\n # Put the axis in front, flip that axis, then move it back.\n return np.swapaxes(np.swapaxes(a, 0, dim)[::-1], 0, dim)\n\ndef flip_images(images):\n return [flipany(img,dim=1) for img in images]\n\ndef flip_angles(angles):\n return [360 - ang for ang in angles]\n\ndef scale_all(images, size=(50, 50)):\n return [cv2.resize(im, size, interpolation=cv2.INTER_LANCZOS4) for im in images]\n\n#===========================================\n\ndef prepare_dump(real_use = False,cg_use = False,hocoffee = False):\n data_path = \"data\"\n towncentre_path = pjoin(data_path,\"TownCentreHeadImages\")\n cgdata_path = pjoin(data_path,\"cgdata\")\n if real_use:\n img, angle, name = load_towncentre(towncentre_path)\n img = scale_all(img,(50,50))\n x = np.array(img + flip_images(img))\n y = np.array(angle + flip_angles(angle))\n n = name + name\n f = gzip.open(pjoin(data_path,'TownCentre.pkl.gz'),'wb+')\n pickle.dump((x,y,n),f)\n if cg_use:\n cg_img, cg_angle, cg_name = load_cgdata(cgdata_path)\n f = gzip.open(pjoin(data_path,'CGData.pkl.gz'),'wb+')\n pickle.dump((cg_img,cg_angle,cg_name),f)\n if hocoffee:\n print(\"Augmenting HOCoffee... \")\n Xtr, ytr, ntr, Xte, yte, nte, le = load_HOCoffee(data_path, 'HOCoffee.json')\n Xtr, ytr, ntr = flipall_classes(Xtr, ytr, ntr, le, flips=[\n ('frnt', 'frnt'),\n ('rear', 'rear'),\n ('left', 'rght'),\n ('rght', 'left'),\n ('frlf', 'frrg'),\n ('frrg', 'frlf'),\n ])\n pickle.dump((Xtr, Xte, ytr, yte, ntr, nte, le),\n gzip.open(pjoin(data_path, 'HOCoffee-wflip.pkl.gz'), 'wb+'))\n","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362846323","text":"import requests\nimport time\nimport threading\nimport env\n\nfrom discord_webhook import DiscordWebhook\nfrom datetime import datetime\n\nliste_tdc = {}\n\n# l'objet {value, value, ...} défini un ensemble/set (au sens mathématique)\n# avantages : plus rapide, pas deux fois la même valeur, opérations du type intersection, union ect\n\nurls = {} #les webhooks ici\nliste_allemands = {\"arfore\", \"tellybridger\", \"austriangamingg\",\"blizzor\", \"caravasyt\", \"castcrafter\", \"nxtfake\" \"realbenex\", \"stegi\", \"Fabo\", \"tjantv\", \"verweisunq\", \"wichtiger\", \"x_sus\",\"xlymex\", \"zombiezocktyt\", \"priimme\", \"Trustcn\", \"xpieps_\",}\nliste_esp = {\"aurigas\", \"axozer\", \"barcagamer\", \"bobicraftmc\", \"cibergun\", \"conterstine\", \"crisgreen\", \"esvandal\", \"geramc\", \"giaantv\", \"hasvik\", \"icraktv\", \"lakshartnia\", \"luh\", \"mayichi\", \"serpias\", \"shadoune666\", \"suwie\", \"soypandih\",}\nliste_ita = {\"redd223\",\"ringoatm\", \"dere_x\",\"francycoso\", \"giankoextreme\", \"giacomoinsano\", \"guerrareturns\", \"iskandert\", \"micha3l_tv\", \"sonomrdomi\", \"MICHA3L_tv\", \"napo7700\", \"nolifegabbo\", \"nonsonolink\", \"nonsonolink\", \"novaxciv\", \"tech4play\", \"triton_707\", \"ultimateita\", \"alphatvlive\",}\nliste_US = {\"572_\", \"romulu5_\", \"a6doff\", \"antfrost\", \"bekyamon\", \"brumin\",\"golriver__\",\"graecie\",\"ragetrain\",\"reddoons\",\"velvetiscake\",\"sammygreen\",\"realsdslayer\",\"seapeekay\",\"spideyarmy\",\"spifeyy\",\"theorionsound\",\"tryhord_\",\"turbopiggyyt\",\"vgumiho\",\"vrax\",\"zachplaysanlive\",}\n\nliste_france = {\"aypierre\",\"bichard\",\"frigiel\",\"fukano\",\"fuzeiii\",\"guep\",\"jimmyboyyy\",\"mathieulapin\",\"luccio\",\"magicknup\",\"mathox\",\"nems\",\"niimbustv\",\"ninjaxxu\",\"nino_mssclick\",\"redtoxx_\",\"soulravenn\",\"thatdamngirll\",\"theguill84\",\"tityyy\",}\n\nexceptions = [\n {\"aypierre\", \"572_\"},\n {\"bichard\", \"a6doff\"},\n]\n\ntime.sleep(2)\n\ndef check(streamer, nb):\n # inutile de \"global\" partout, les listes sont des objets donc portée globale au fichier \n # Si ca n'était pas le cas : créer un fichier \"env\" (ex: env.liste_allemands après 'import env', réalisé plus bas ) ou passer en paramètre\n \n # Le texte retourné par l'API Twitch est sous format JSON, autant l'exploiter direct tel quel (.json() au lieu de .text)\n # le : f\"chaine {var}\" c'est de l'interpolation, ça permet de rendre le tout + clair\n text_json_from_twitch_api = requests.get(f\"https://tmi.twitch.tv/group/user/{streamer}/chatters?v={nb}\").json()\n json_from_twitch_api = text_json_from_twitch_api[\"chatters\"][\"viewers\"]\n \n\n # Y'a moyen d'optimiser tout ça encore plus mais je veux rester clair\n intersection = liste_allemands.intersection(json_from_twitch_api)\n if len(intersection) > 0: \n has_spotted_viewer(\n streamer=streamer, \n set_of_spotted_viewers=intersection, \n viewers_country = \"Allemand\"\n )\n\n intersection = liste_esp.intersection(json_from_twitch_api)\n if len(intersection) > 0: \n has_spotted_viewer(\n streamer=streamer, \n set_of_spotted_viewers=intersection, \n viewers_country = \"Espagnol\"\n )\n\n intersection = liste_ita.intersection(json_from_twitch_api)\n if len(intersection) > 0: \n has_spotted_viewer(\n streamer=streamer, \n set_of_spotted_viewers=intersection, \n viewers_country = \"italien\"\n )\n\n intersection = liste_US.intersection(json_from_twitch_api)\n if len(intersection) > 0: \n has_spotted_viewer(\n streamer=streamer, \n set_of_spotted_viewers=intersection, \n viewers_country = \"américain\"\n )\n \n\ndef has_spotted_viewer(streamer, set_of_spotted_viewers, viewers_country):\n for viewer_spotted in set_of_spotted_viewers:\n\n # Interruption de la fonction si dans l'exception\n if (set({streamer, viewer_spotted}) in exceptions) : continue\n \n # Le viewer a déjà été spoté\n if viewer_spotted in list(liste_tdc.keys()):\n viewer_spotted_datetime = liste_tdc[viewer_spotted]\n \n # on vérifie si le viewer est dans le délai : si il faut renvoyer l'alerte ou non\n if (datetime.now()-viewer_spotted_datetime).total_seconds() >= env.time_before_reset:\n \n liste_tdc[viewer_spotted] = datetime.now()\n send_alert(streamer, viewer_spotted, viewers_country)\n\n # Le viewer n'a pas encore été spoté\n else: \n send_alert(streamer, viewer_spotted, viewers_country)\n\n\ndef send_alert(streamer, viewer_spotted, viewers_country):\n message_spotted = f\"{datetime.now()} : Le streamer {streamer} a sur son chat : {viewer_spotted} ({viewers_country})\"\n print(message_spotted)\n\n liste_tdc[viewer_spotted] = datetime.now()\n\n data = {\n \"token\" : env.token,\n \"team\" : viewers_country,\n \"pseudo\" : viewer_spotted,\n \"streamer\": streamer,\n }\n print(requests.post(\"https://dadodasyra.fr/city/api/streamhack.php\", data=data).text)\n\n for url in urls:\n DiscordWebhook(url=url, content=message_spotted).execute()\n\nstarted_at = datetime.now()\nnb = 0\nwhile True:\n nb += 1\n now = datetime.now()\n hours_difference = round((now - started_at).total_seconds()/3600, 2)\n print(f\"{now} boucle numéro : {nb} ça fait {hours_difference} heures que le bot tourne\")\n for streamer in liste_france:\n x = threading.Thread(target=check, args=(streamer, nb), daemon=True)\n x.start()\n time.sleep(5)","sub_path":"StreamHacker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228713658","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\npath = \"../data/\"\nfrom os import listdir\nfrom os.path import isfile, join\nfilenames = [f for f in listdir(path) if isfile(join(path, f))]\n\n\nfor i_,x_ in enumerate(filenames):\n if (x_[-10:]!= \"jacobi.txt\"):\n filenames.pop(i_)\n filenames[i_] = eval(filenames[i_][1:-11])\n\nfilenames.sort()\n\nfor i in range(len(filenames)):\n filenames[i] = \"N\" + str(filenames[i]) + \"_jacobi.txt\"\n\nn = []\niter = []\nfor file in filenames:\n infile = open(path+file,\"r\")\n first_line = infile.readline().split()\n n.append(eval(first_line[2][2:]))\n iter.append(eval(first_line[1][11:]))\n infile.close()\nn = np.asarray(n)\niter = np.asarray(iter)\n\n\nplt.figure(figsize=(9, 6))\n\n# Remove the plot frame lines. They are unnecessary chartjunk.\nax = plt.subplot(111)\nax.spines[\"top\"].set_visible(False)\nax.spines[\"bottom\"].set_visible(False)\nax.spines[\"right\"].set_visible(False)\nax.spines[\"left\"].set_visible(False)\n\n\nax.get_xaxis().tick_bottom()\nax.get_yaxis().tick_left()\n\nrange_ = range(n[0],n[-1])\nfor y in range(iter[2], iter[-1], int((iter[-1]-iter[0])/5)):\n plt.plot(range_, [y] * len(range_), \"--\", lw=1.0, color=\"black\", alpha=0.3)\n\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\n\nplt.tick_params(axis=u\"both\", which=u\"both\", bottom=\"off\",length=0)\n\n\nplt.xlabel(\"n\",fontsize=14)\nplt.ylabel(\"Iterations\",fontsize=14)\n\nclr_2 = (0,0.5,1)\nplt.plot(n,iter,\"o\",color=clr_2)\nplt.plot(n,iter,\"--\",lw=2,color=clr_2)\nplt.text(n[-1]+10, iter[-1]-10, \"Datapoints\", fontsize=14, color=clr_2)\n\nclr_2 = (0.5,0,0.5)\nplt.plot(n,n**2,lw=2,color=clr_2)\nplt.text(n[-1]+10, n[-1]**2-10, r\"$n^2$\", fontsize=14, color=clr_2)\n\nplt.savefig(\"../figures/iteration_mot_n.png\", bbox_inches=\"tight\")\nplt.clf()\n","sub_path":"code/iteration_mot_n.py","file_name":"iteration_mot_n.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294551938","text":"import matplotlib.pyplot as plt\nimport data_parser\nimport numpy as np\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.metrics import mean_squared_error\n\n\ndef execute(model, data, savepath, *args, **kwargs):\n # Train the model using the training sets\n model.fit(data.get_x_data(), np.asarray(data.get_y_data()).ravel())\n overall_rms = np.sqrt(mean_squared_error(model.predict(data.get_x_data()), np.asarray(data.get_y_data()).ravel()))\n datasets = ['IVAR', 'ATR-1', 'ATR-2']\n colors = ['#BCBDBD', '#009AFF', '#FF0A09']\n fig, ax = plt.subplots()\n #calculate rms for each dataset\n for dataset in range(max(np.asarray(data.get_data(\"Data Set code\")).ravel()) + 1):\n data.remove_all_filters()\n data.add_inclusive_filter(\"Data Set code\", '=', dataset)\n Ypredict = model.predict(data.get_x_data())\n Ydata = np.asarray(data.get_y_data()).ravel()\n # calculate rms\n rms = np.sqrt(mean_squared_error(Ypredict, Ydata))\n # graph outputs\n ax.scatter(Ydata, Ypredict, s=7, color=colors[dataset], label= datasets[dataset], lw = 0)\n ax.text(.05, .83 - .05*dataset, '{} RMS: {:.3f}'.format(datasets[dataset],rms), fontsize=14, transform=ax.transAxes)\n\n ax.legend()\n ax.plot(ax.get_ylim(), ax.get_ylim(), ls=\"--\", c=\".3\")\n ax.set_xlabel('Measured (MPa)')\n ax.set_ylabel('Predicted (MPa)')\n ax.set_title('Full Fit')\n ax.text(.05, .88, 'Overall RMS: %.4f' % (overall_rms), fontsize=14, transform=ax.transAxes)\n fig.savefig(savepath.format(ax.get_title()), dpi=300, bbox_inches='tight')\n plt.clf()\n plt.close()\n","sub_path":"FullFit.py","file_name":"FullFit.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319972992","text":"from setuptools import setup\nimport snimpy\n\ntry:\n import multiprocessing\n import pysnmp\nexcept ImportError:\n pass\n\nif __name__ == \"__main__\":\n # MIB module\n try:\n import snimpy.mib\n ext_modules = [ snimpy.mib.ffi.verifier.get_extension() ]\n except ImportError:\n ext_modules = []\n\n readme = open('README.rst').read()\n history = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\n setup(name=\"snimpy\",\n version=snimpy.__version__,\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities',\n 'Topic :: System :: Monitoring'\n ],\n url='https://github.com/vincentbernat/snimpy',\n description=snimpy.__doc__,\n long_description=readme + '\\n\\n' + history,\n author=snimpy.__author__,\n author_email=snimpy.__email__,\n packages=[\"snimpy\"],\n entry_points = {\n 'console_scripts': [\n 'snimpy = snimpy.main:interact',\n ],\n },\n data_files = [('share/man/man1', ['man/snimpy.1'])],\n ext_modules = ext_modules,\n zip_safe = False,\n install_requires = [ \"cffi\", \"pysnmp >= 4\" ],\n setup_requires = [ \"cffi\" ],\n tests_require = [ \"cffi\", \"pysnmp >= 4\", \"nose\", \"mock\" ],\n test_suite=\"nose.collector\"\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208990547","text":"import os\n\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql import desc\n\nfrom flask import Flask, request\nfrom flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\n\nfrom connection import Base, engine\nfrom models import User, Post, Tag, Comment\n\n\nclass postsAPI(Resource):\n @jwt_required\n def get(self):\n session = sessionmaker(bind=engine)()\n posts = []\n for post in session.query(Post).order_by(desc(Post.created_at)):\n posts.append(post.toJson())\n return posts\n\n @jwt_required\n def post(self):\n session = sessionmaker(bind=engine)()\n data = request.json\n\n tags = []\n for etiqueta in data['tags']:\n tag = session.query(Tag).filter(\n Tag.name == etiqueta['name']).first()\n\n if tag is None:\n tag = Tag().fromJson(tag)\n session.add(tag)\n tags.append(tag)\n\n comments = []\n for comentario in data['comments']:\n comment = Comment().fromJson(comentario)\n session.add(comment)\n comments.append(comment)\n\n post = Post().fromJson(data)\n post['tags'] = tags\n post['comments'] = comments\n\n session.add(post)\n session.commit()\n\n return post.toJson()\n\n\nclass postsTagAPI(Resource):\n @jwt_required\n def get(self, tag):\n session = sessionmaker(bind=engine)()\n posts = []\n for post in session.query(Post).filter(Tag.name == tag):\n posts.append(post.toJson())\n return posts\n\n\nclass postsIdAPI(Resource):\n @jwt_required\n def get(self, id):\n session = sessionmaker(bind=engine)()\n post = session.query(Post).filter(Post.id == id).one_or_none()\n\n if post is not None:\n return post.toJson()\n else:\n return {}\n\n @jwt_required\n def put(self, id):\n session = sessionmaker(bind=engine)()\n data = request.json\n post = session.query(Post.id, Post.text).filter(\n Post.id == id).one_or_none()\n\n if post is not None:\n post.text = data['text']\n post.likes = data['likes']\n session.commit()\n\n return post.toJson()\n else:\n return {}\n\n @jwt_required\n def delete(self, id):\n session = sessionmaker(bind=engine)()\n post = session.query(Post).filter(Post.id == id).first()\n session.delete(post)\n session.commit()\n return {'success': 'true'}\n\n\nclass likeAPI(Resource):\n @jwt_required\n def post(self, id):\n session = sessionmaker(bind=engine)()\n post = session.query(Post).filter(Post.id == id).first()\n post.likes = post.likes + 1\n session.commit()\n return {'success': 'true'}\n\n @jwt_required\n def delete(self, id):\n session = sessionmaker(bind=engine)()\n post = session.query(Post).filter(Post.id == id).first()\n post.likes = post.likes - 1\n session.commit()\n return {'success': 'true'}\n","sub_path":"controllers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444608814","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm, UserUpdateForm\nfrom django.contrib.auth.decorators import login_required\n\n# reate your views here.\ndef register(request):\n\tif request.method == 'POST':\n\t\tform = UserRegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tmessages.success(request, f'Your account has been created! You are now able to Login {username}!')\n\t\t\treturn redirect('login')\n\telse:\n\t\tform = UserRegisterForm()\n\treturn render(request, 'users/register.html', {'form': form})\n\n@login_required\ndef account(request):\n\tif request.method == 'POST':\n\t\tu_form = UserUpdateForm(request.POST, instance=request.user)\n\t\tif u_form.is_valid():\n\t\t\tu_form.save()\n\t\t\tmessages.success(request, f'Your account has been updated!')\n\t\t\treturn redirect('account')\n\telse:\n\t\tu_form = UserUpdateForm(instance=request.user)\n\tcontext = {\n 'u_form': u_form,\n }\n\n\treturn render(request, 'users/account.html', context)\n\n# Create your views here.\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273666747","text":"#!/usr/bin/env python3 -B\nimport unittest\n\nfrom cromulent import vocab\n\nfrom tests import TestSalesPipelineOutput, classified_identifier_sets\n\nvocab.add_attribute_assignment_check()\n\nclass PIRModelingTest_AR128(TestSalesPipelineOutput):\n def test_modeling_ar128(self):\n '''\n AR-128: Link prov entry to object set\n '''\n output = self.run_pipeline('ar128')\n activities = output['model-activity']\n \n tx = activities['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#PROV,B-A136,1773-07-20,0090']\n self.assertIn('used_specific_object', tx)\n obj_sets = tx['used_specific_object']\n self.assertEquals(len(obj_sets), 1)\n obj_set = obj_sets[0]\n self.assertEquals(obj_set['_label'], 'Lot B-A136 0090')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_sales_issue_ar128.py","file_name":"test_sales_issue_ar128.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562971167","text":"import re\nfrom collections import namedtuple\n\nimport polib\n\nfrom dennis.tools import VariableTokenizer, parse_dennis_note\n\n\nTranslatedString = namedtuple(\n 'TranslatedString',\n ('msgid_fields', 'msgid_strings', 'msgstr_field', 'msgstr_string'))\n\n\nclass LintedEntry(object):\n def __init__(self, poentry):\n self.poentry = poentry\n self.msgid = poentry.msgid\n\n strs = []\n\n if not poentry.msgid_plural:\n strs.append(\n TranslatedString(\n ['msgid'], [poentry.msgid], 'msgstr', poentry.msgstr))\n\n else:\n msgid_fields = ('msgid', 'msgid_plural')\n msgid_strings = (poentry.msgid, poentry.msgid_plural)\n\n for key in sorted(poentry.msgstr_plural.keys()):\n strs.append(\n TranslatedString(\n msgid_fields,\n msgid_strings,\n 'msgstr[{0}]'.format(key),\n poentry.msgstr_plural[key]))\n\n # List of (msgid fields, msgid strings, msgstr field, msgstr\n # string) namedtuples\n self.strs = strs\n\n self.warnings = []\n self.errors = []\n\n def add_warning(self, code, trstr, msg):\n self.warnings.append((code, trstr, msg))\n\n def add_error(self, code, trstr, msg):\n self.errors.append((code, trstr, msg))\n\n def has_problems(self, errorsonly=False):\n if errorsonly:\n return bool(self.errors)\n return bool(self.warnings or self.errors)\n\n\nclass LintRule(object):\n num = ''\n name = ''\n desc = ''\n\n def lint(self, vartok, linted_entry):\n \"\"\"Takes a linted entry and adds errors and warnings\n\n :arg vartok: the variable tokenizer to use for tokenizing\n on variable tokens\n :arg linted_entry: the LintedEntry to work on\n\n \"\"\"\n raise NotImplemented\n\n\nclass MalformedNoTypeLintRule(LintRule):\n num = 'E101'\n name = 'notype'\n desc = '%(count) with no type at the end'\n\n def lint(self, vartok, linted_entry):\n # This only applies if one of the variable tokenizers\n # is PythonPercentVar.\n if not vartok.contains('pysprintf'):\n return\n\n malformed_re = re.compile(r'(?:%[\\(][^\\)\\s]+[\\)](?:\\s|$))')\n\n for trstr in linted_entry.strs:\n if not trstr.msgstr_string:\n continue\n\n malformed = malformed_re.findall(trstr.msgstr_string)\n if not malformed:\n continue\n\n malformed = [item.strip() for item in malformed]\n linted_entry.add_error(\n self.num,\n trstr,\n u'type missing: {0}'.format(u', '.join(malformed)))\n\n\nclass MalformedMissingRightBraceLintRule(LintRule):\n num = 'E102'\n name = 'missingrightbrace'\n desc = '{foo with missing }'\n\n def lint(self, vartok, linted_entry):\n # This only applies if one of the variable tokenizers\n # is PythonFormatVar.\n if not vartok.contains('pyformat'):\n return\n\n malformed_re = re.compile(r'(?:\\{[^\\}]+(?:\\{|$))')\n\n for trstr in linted_entry.strs:\n if not trstr.msgstr_string:\n continue\n\n malformed = malformed_re.findall(trstr.msgstr_string)\n if not malformed:\n continue\n\n malformed = [item.strip() for item in malformed]\n linted_entry.add_error(\n self.num,\n trstr,\n u'missing right curly-brace: {0}'.format(\n u', '.join(malformed)))\n\n\nclass MalformedMissingLeftBraceLintRule(LintRule):\n num = 'E103'\n name = 'missingleftbrace'\n desc = 'foo} with missing {'\n\n def lint(self, vartok, linted_entry):\n # This only applies if one of the variable tokenizers\n # is PythonFormatVar.\n if not vartok.contains('pyformat'):\n return\n\n malformed_re = re.compile(r'(?:(?:^|\\})[^\\{]+\\})')\n\n for trstr in linted_entry.strs:\n if not trstr.msgstr_string:\n continue\n\n malformed = malformed_re.findall(trstr.msgstr_string)\n if not malformed:\n continue\n\n malformed = [item.strip() for item in malformed]\n linted_entry.add_error(\n self.num,\n trstr,\n u'missing left curly-brace: {0}'.format(\n u', '.join(malformed)))\n\n\nclass MissingVarsLintRule(LintRule):\n num = 'W202'\n name = 'missingvars'\n desc = 'Checks for variables in msgid, but missing in msgstr'\n\n def lint(self, vartok, linted_entry):\n for trstr in linted_entry.strs:\n if not trstr.msgstr_string:\n continue\n\n msgid_tokens = vartok.extract_tokens(' '.join(trstr.msgid_strings))\n msgstr_tokens = vartok.extract_tokens(trstr.msgstr_string)\n\n missing = msgid_tokens.difference(msgstr_tokens)\n\n if missing:\n linted_entry.add_warning(\n self.num,\n trstr,\n u'missing variables: {0}'.format(\n u', '.join(sorted(missing))))\n\n\nclass InvalidVarsLintRule(LintRule):\n num = 'E201'\n name = 'invalidvars'\n desc = 'Checks for variables not in msgid, but in msgstr'\n\n def lint(self, vartok, linted_entry):\n for trstr in linted_entry.strs:\n if not trstr.msgstr_string:\n continue\n\n msgid_tokens = vartok.extract_tokens(' '.join(trstr.msgid_strings))\n msgstr_tokens = vartok.extract_tokens(trstr.msgstr_string)\n\n invalid = msgstr_tokens.difference(msgid_tokens)\n\n if invalid:\n linted_entry.add_error(\n self.num,\n trstr,\n u'invalid variables: {0}'.format(\n u', '.join(sorted(invalid))))\n\n\ndef get_available_lint_rules(name_and_num=False):\n lint_rules = {}\n\n for name, thing in globals().items():\n try:\n if issubclass(thing, LintRule) and thing.num:\n lint_rules[thing.num] = thing\n if name_and_num:\n lint_rules[thing.name] = thing\n except TypeError:\n pass\n\n return lint_rules\n\n\nclass InvalidRulesSpec(Exception):\n pass\n\n\ndef convert_rules(rules_spec):\n # This removes empty strings from the rules_spec.\n rules_spec = [rule for rule in rules_spec if rule]\n\n if not rules_spec:\n lint_rules = get_available_lint_rules()\n return [rule() for num, rule in lint_rules.items()]\n\n try:\n lint_rules = get_available_lint_rules(name_and_num=True)\n rules = [lint_rules[rule]() for rule in rules_spec]\n except KeyError:\n raise InvalidRulesSpec(rules_spec)\n\n return rules\n\n\nclass Linter(object):\n def __init__(self, vars_, rules_spec):\n self.vartok = VariableTokenizer(vars_)\n self.rules_spec = rules_spec\n self.rules = convert_rules(self.rules_spec)\n\n def lint_poentry(self, poentry):\n linted_entry = LintedEntry(poentry)\n\n skip = parse_dennis_note(poentry.comment)\n\n # Check the comment to see if what we should ignore.\n for lint_rule in self.rules:\n if skip == '*' or lint_rule.num in skip:\n continue\n\n lint_rule.lint(self.vartok, linted_entry)\n\n return linted_entry\n\n def verify_file(self, filename_or_string):\n \"\"\"Verifies strings in file.\n\n :arg filename_or_string: filename to verify or the contents of\n a pofile as a string\n\n :returns: list of LintedEntry objects each with errors and\n warnings\n\n :raises IOError: if the file is not a valid .po file or\n doesn't exist\n \"\"\"\n po = polib.pofile(filename_or_string)\n return [\n self.lint_poentry(entry) for entry in po.translated_entries()\n ]\n","sub_path":"dennis/linter.py","file_name":"linter.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553049970","text":"\"\"\"lunar URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\n# Python imports\n# Django imports\nfrom django.contrib import admin\nfrom django.urls import include, path\n# from django.conf.urls import include\nfrom django.conf.urls.i18n import i18n_patterns\n# Third-Party imports\n# Project imports\nfrom lunarapp import views as lunarviews\nfrom termsconditions import views as termsconditions_views\n\nurlpatterns = [\n path('i18n/', include('django.conf.urls.i18n')),\n path('admin/', admin.site.urls),\n path('health/', lunarviews.HealthView.as_view(), name='health'),\n path('accounts/login', include('django.contrib.auth.urls'), name='login'),\n path('accounts/logout', include('django.contrib.auth.urls'), name='logout'),\n path('unsubscribe/<unsubscribe_token>',\n lunarviews.Unsubscribe.as_view(), name='unsubscribe'),\n path('captcha/', include('captcha.urls')),\n]\n\nurlpatterns += i18n_patterns(\n path('', lunarviews.IndexView.as_view(), name='home'),\n path('termsandconditions/', termsconditions_views.terms, name='terms'),\n path('privacypolicy/', termsconditions_views.privacy, name='terms'),\n path('tell-a-friend/', lunarviews.TellAFriendView.as_view(), name='tellafriend'),\n path('monthly-lunar-horoscope/<horoscope_id>',\n lunarviews.MonthlyHoroscope.as_view(), name='monthly_horoscope'),\n)\n","sub_path":"lunar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652597071","text":"from sympy import *\n\n\"\"\"\nPDF Page 32, Chapter V, Example 7\n\nIf lt and l0 be the lengths of a rod of iron at the temperatures\nt Centigrades. and 0 Centigrades. respectively, then lt = l0 (1+0.000012t). \nFind the change of length of the rod per degree Centigrade.\n\"\"\"\n\nl0 = Symbol('l0')\nlt = Symbol('lt')\nt = Symbol('t')\n\nexpr = l0*(1+Float(0.000012)*t)\nresult = Derivative(expr, t).doit() # 1.2e-5*l0\n\n","sub_path":"exercises/02_7.py","file_name":"02_7.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232979111","text":"import setuptools\r\n\r\nwith open(\"README.md\", \"r\") as fh:\r\n long_description = fh.read()\r\nfh.close()\r\n\r\nsetuptools.setup(\r\n name=\"PyInsta-Scrape\",\r\n version=\"1.0\",\r\n author=\"Akash Pattnaik\",\r\n author_email=\"akashjio66666@gmail.com\",\r\n description=\"A Python Package Which Scrapes Instagram Data\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/BLUE-DEVIL1134/PyInsta\",\r\n packages=setuptools.find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n python_requires='>=3.6',\r\n)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538191781","text":"# dataset settings\ndataset_type = 'REFUGEDataset'\ndata_root = 'data/REFUGE'\ntrain_img_scale = (2056, 2124)\nval_img_scale = (1634, 1634)\ntest_img_scale = (1634, 1634)\ncrop_size = (512, 512)\n\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', reduce_zero_label=False),\n dict(\n type='RandomResize',\n scale=train_img_scale,\n ratio_range=(0.5, 2.0),\n keep_ratio=True),\n dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(type='PackSegInputs')\n]\nval_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Resize', scale=val_img_scale, keep_ratio=True),\n # add loading annotation after ``Resize`` because ground truth\n # does not need to do resize data transform\n dict(type='LoadAnnotations', reduce_zero_label=False),\n dict(type='PackSegInputs')\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Resize', scale=test_img_scale, keep_ratio=True),\n # add loading annotation after ``Resize`` because ground truth\n # does not need to do resize data transform\n dict(type='LoadAnnotations', reduce_zero_label=False),\n dict(type='PackSegInputs')\n]\nimg_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]\ntta_pipeline = [\n dict(type='LoadImageFromFile', backend_args=dict(backend='local')),\n dict(\n type='TestTimeAug',\n transforms=[\n [\n dict(type='Resize', scale_factor=r, keep_ratio=True)\n for r in img_ratios\n ],\n [\n dict(type='RandomFlip', prob=0., direction='horizontal'),\n dict(type='RandomFlip', prob=1., direction='horizontal')\n ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]\n ])\n]\ntrain_dataloader = dict(\n batch_size=4,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='InfiniteSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='images/training', seg_map_path='annotations/training'),\n pipeline=train_pipeline))\nval_dataloader = dict(\n batch_size=1,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='images/validation',\n seg_map_path='annotations/validation'),\n pipeline=val_pipeline))\ntest_dataloader = dict(\n batch_size=1,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='images/test', seg_map_path='annotations/test'),\n pipeline=val_pipeline))\n\nval_evaluator = dict(type='IoUMetric', iou_metrics=['mDice'])\ntest_evaluator = val_evaluator\n","sub_path":"configs/_base_/datasets/refuge.py","file_name":"refuge.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461844176","text":"\"\"\"\n@Author : liuwei\n@Version : 0.0.1\n------------------------------------\n@File : settings.py.py\n@Description : \n@CreateTime : 2020/3/14 19:48\n\"\"\"\nPORTAL_SETTINGS = {\n\t\"settings\": {\n\t\t\"analysis\": {\n\t\t\t\"filter\": {\n\t\t\t\t\"edge_ngram_filter\": {\n\t\t\t\t\t\"type\": \"edge_ngram\",\n\t\t\t\t\t\"min_gram\": 1,\n\t\t\t\t\t\"max_gram\": 50\n\t\t\t\t},\n\t\t\t\t\"pinyin_simple_filter\": {\n\t\t\t\t\t\"type\": \"pinyin\",\n\t\t\t\t\t\"keep_first_letter\": true,\n\t\t\t\t\t\"keep_separate_first_letter\": false,\n\t\t\t\t\t\"keep_full_pinyin\": false,\n\t\t\t\t\t\"keep_original\": false,\n\t\t\t\t\t\"limit_first_letter_length\": 50,\n\t\t\t\t\t\"lowercase\": true\n\t\t\t\t},\n\t\t\t\t\"pinyin_full_filter\": {\n\t\t\t\t\t\"type\": \"'pinyin\",\n\t\t\t\t\t\"keep_first_letter\": false,\n\t\t\t\t\t\"keep_separate_first_letter\": false,\n\t\t\t\t\t\"keep_full_pinyin\": true,\n\t\t\t\t\t\"none_chinese_pinyin_tokenize\": true,\n\t\t\t\t\t\"keep_original\": false,\n\t\t\t\t\t\"limit_first_letter_length\": 50,\n\t\t\t\t\t\"lowercase\": true\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"analyzer\": {\n\t\t\t\t\"ngramIndexAnalyzer\": {\n\t\t\t\t\t\"type\": \"custom\",\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"edge_ngram_filter\", \"lowercase\"]\n\t\t\t\t},\n\t\t\t\t\"ngramSearchAnalyzer\": {\n\t\t\t\t\t\"type\": \"custom\",\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"lowercase\"]\n\t\t\t\t},\n\t\t\t\t\"pinyiSimpleIndexAnalyzer\": {\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"pinyin_simple_filter\", \"edge_ngram_filter\", \"lowercase\"]\n\t\t\t\t},\n\t\t\t\t\"pinyiSimpleSearchAnalyzer\": {\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"pinyin_simple_filter\", \"lowercase\"]\n\t\t\t\t},\n\t\t\t\t\"pinyiFullIndexAnalyzer\": {\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"pinyin_full_filter\", \"lowercase\"]\n\t\t\t\t},\n\t\t\t\t\"pinyiFullSearchAnalyzer\": {\n\t\t\t\t\t\"tokenizer\": \"keyword\",\n\t\t\t\t\t\"filter\": [\"pinyin_full_filter\", \"lowercase\"]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t},\n\t\"mappings\": {\n\t\t\"properties\": {\n\t\t\t\"id\": {\n\t\t\t\t\"boost\": 1.0,\n\t\t\t\t\"index\": false,\n\t\t\t\t\"store\": true,\n\t\t\t\t\"type\": \"keyword\"\n\t\t\t},\n\t\t\t\"title\": {\n\t\t\t\t\"boost\": 1.0,\n\t\t\t\t\"index\": true,\n\t\t\t\t\"store\": true,\n\t\t\t\t\"type\": \"text\",\n\t\t\t\t\"analyzer\": \"ik_max_word\",\n\t\t\t\t\"search_analyzer\": \"ik_smart\",\n\t\t\t\t\"term_vector\": \"with_positions_offsets\"\n\t\t\t},\n\t\t\t\"url\": {\n\t\t\t\t\"boost\": 1.0,\n\t\t\t\t\"index\": true,\n\t\t\t\t\"store\": true,\n\t\t\t\t\"type\": \"text\",\n\t\t\t\t\"analyzer\": \"ik_max_word\",\n\t\t\t\t\"search_analyzer\": \"ik_smart\",\n\t\t\t\t\"term_vector\": \"with_positions_offsets\"\n\t\t\t},\n\t\t\t\"desc\": {\n\t\t\t\t\"boost\": 1.0,\n\t\t\t\t\"index\": true,\n\t\t\t\t\"store\": true,\n\t\t\t\t\"type\": \"text\",\n\t\t\t\t\"analyzer\": \"ik_max_word\",\n\t\t\t\t\"search_analyzer\": \"ik_smart\",\n\t\t\t\t\"term_vector\": \"with_positions_offsets\"\n\t\t\t},\n\t\t\t\"permissions\": {\n\t\t\t\t\"type\": \"text\",\n\t\t\t\t\"store\": true,\n\t\t\t\t\"index\": true\n\t\t\t},\n\t\t\t\"breadcrumb\": {\n\t\t\t\t\"type\": \"nested\",\n\t\t\t\t\"properties\": {\n\t\t\t\t\t\"id\": {\n\t\t\t\t\t\t\"index\": false,\n\t\t\t\t\t\t\"store\": true,\n\t\t\t\t\t\t\"type\": \"keyword\"\n\t\t\t\t\t},\n\t\t\t\t\t\"title\": {\n\t\t\t\t\t\t\"store\": true,\n\t\t\t\t\t\t\"index\": true,\n\t\t\t\t\t\t\"type\": \"keyword\"\n\t\t\t\t\t},\n\t\t\t\t\t\"url\": {\n\t\t\t\t\t\t\"store\": false,\n\t\t\t\t\t\t\"index\": false,\n\t\t\t\t\t\t\"type\": \"keyword\"\n\t\t\t\t\t},\n\t\t\t\t\t\"desc\": {\n\t\t\t\t\t\t\"store\": false,\n\t\t\t\t\t\t\"index\": false,\n\t\t\t\t\t\t\"type\": \"text\"\n\t\t\t\t\t},\n\t\t\t\t\t\"permissions\": {\n\t\t\t\t\t\t\"type\": \"text\",\n\t\t\t\t\t\t\"store\": false,\n\t\t\t\t\t\t\"index\": false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}","sub_path":"builder/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360084579","text":"import csv\nimport os\nimport sys\n\n# get absolute path for 'work_log.csv'\nfilename = os.path.join(\n os.path.abspath(os.curdir),\n 'work_log.csv'\n)\n# work_log.csv headers\nHEADER = ['date', 'title', 'time', 'notes']\n\n\ndef initialize_csv():\n \"\"\"\n Creates 'work_log.csv' if it does not already exist\n :return: None\n \"\"\"\n if not os.path.exists(filename):\n with open(filename, 'w', encoding='utf8') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=HEADER)\n writer.writeheader()\n return None\n\n\ndef read_csv():\n \"\"\"\n Opens and reads 'work_log.csv'\n :return: List of OrderDicts\n \"\"\"\n with open(filename, encoding='utf8') as infile:\n data = [row for row in csv.DictReader(infile)]\n return data\n\n\ndef write_csv(data):\n \"\"\"\n Complete re-write of 'work_log.csv' with updated data\n :param data: List of OrderDicts\n :return: None\n \"\"\"\n with open(filename, 'w', encoding='utf8', newline='') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=HEADER)\n writer.writerows(data)\n return None\n\n\ndef append_csv(task):\n \"\"\"\n Appends a task to the end of 'work_log.csv'\n :param task: Dict\n :return: None\n \"\"\"\n with open(filename, 'a', encoding='utf8', newline='') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=HEADER)\n writer.writerow(task)\n return None\n\n\ndef clear():\n \"\"\"Clear the screen\"\"\"\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef exit_program():\n \"\"\"Exit work log\"\"\"\n # add any clean up that needs to happen here\n sys.exit()\n\n\ndef work_log_by_date():\n \"\"\"\n Gets data from 'work_log.csv' iterates through the List of dicts creating a\n new dict with the dates as the key and list of tasks completed on that date\n as the value\n :return: dict\n \"\"\"\n data = read_csv()\n log = dict()\n for task in data:\n if task['date'] in log:\n log[task['date']].append(task)\n else:\n log[task['date']] = [task]\n return log\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589843346","text":"#!/usr/bin/env python\n\n\"\"\"9anime common code\"\"\"\n\n# general constants\nTITLE = '9anime'\nPREFIX = '/video/' + TITLE\nBASE_URL = 'http://{}.to'.format(TITLE)\nLIST_VIEW_CLIENTS = ['Android', 'iOS']\n\n####################################################################################################\ndef ParseVersion(version):\n try:\n return tuple(map(int, (version.split('.'))))\n except:\n return version\n","sub_path":"Contents/Code/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328596922","text":"def tosurfix(str):\n stack = []\n res = \"\"\n for i in str:\n if isFH(i):\n if i == \")\":\n while stack[-1] != \"(\":\n res += stack.pop(-1)\n stack.pop(-1)\n else:\n while len(stack) != 0 and getStack(stack[-1]) >= getOut(i):\n res += stack.pop(-1)\n stack.append(i)\n else:\n res += i\n while len(stack) != 0:\n res += stack.pop(-1)\n print(res)\n\n\ndef isFH(ch):\n temp = [\"+\", \"-\", \"*\", \"/\", \"(\", \")\", \"^\"]\n return ch in temp\n\n\ndef getOut(ch):\n if ch == \"+\" or ch == \"-\":\n return 1\n elif ch == \"*\" or ch == \"/\":\n return 2\n elif ch == \"^\":\n return 3\n elif ch == \"(\":\n return 4\n\n\ndef getStack(ch):\n if ch == \"+\" or ch == \"-\":\n return 1\n elif ch == \"*\" or ch == \"/\":\n return 2\n elif ch == \"^\":\n return 3\n elif ch == \"(\":\n return 0\n\n\nnum = int(input())\nfor j in range(num):\n infix = list(input())\n tosurfix(infix)\n","sub_path":"Code/CodeRecords/2172/60769/259778.py","file_name":"259778.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450654535","text":"N = int(input())\ndx = [1,2,2,1,-1,-2,-2,-1]\ndy = [2,1,-1,-2,-2,-1,1,2]\n\ndef bfs(st, de):\n queue = [st]\n time = [[0]*L for _ in range(L)]\n time[st[0]][st[1]] = 1\n while queue:\n node = queue.pop(0)\n if node==de:\n return time[de[0]][de[1]]\n \n for x,y in zip(dx,dy):\n ny = node[0]+y\n nx = node[1]+x\n if 0<=nx<L and 0<=ny<L and not time[ny][nx]:\n queue.append([ny,nx])\n time[ny][nx] = time[node[0]][node[1]]+1 \n\n\nfor i in range(N):\n L = int(input())\n st = list(map(int,input().split()))\n de = list(map(int,input().split()))\n print(bfs(st,de)-1)\n \n\n ","sub_path":"python_algorithm/Baekjoon/silver/7562.py","file_name":"7562.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74926961","text":"from os import listdir\nfrom os.path import join\n\nignore = {\"Ancient_Greek\", \"German\", \"Ancient_Greek-PROIEL\", \"Estonian\",\n \"Finnish-FTB\", \"French\", \"German\", \"Hungarian\", \"Indonesian\",\n \"Irish\", \"Japanese-KTC\", \"Latin\", \"Latin-ITT\", \"Latin-PROIEL\",\n \"Romanian\", \"Tamil\", \"Basque\", \"Dutch\", \"Arabic\", \"Croatian\", \"English\", \"Gothic\", \"Greek\", \"Hebrew\", \"Hindi\", \"Italian\", \"Old_Church_Slavonic\", \"Persian\", \"Polish\", \"Slovenian\", \"Bulgarian\", \"Danish\", \"Finnish\", \"Polish\", \"Portuguese\", \"Spanish\", \"Swedish\"}\n\nudname2lang = {\"UD_Ancient_Greek\": \"Ancient_Greek\",\n \"UD_Danish\": \"Danish\",\n \"UD_German\": \"German\",\n \"UD_Irish\": \"Irish\",\n \"UD_Old_Church_Slavonic\": \"Old_Church_Slavonic\",\n \"UD_Ancient_Greek-PROIEL\": \"Ancient_Greek-PROIEL\",\n \"UD_Dutch\": \"Dutch\",\n \"UD_Gothic\": \"Gothic\",\n \"UD_Italian\": \"Italian\",\n \"UD_Persian\": \"Persian\",\n \"UD_Arabic\": \"Arabic\",\n \"UD_English\": \"English\",\n \"UD_Greek\": \"Greek\",\n \"UD_Japanese-KTC\": \"Japanese-KTC\",\n \"UD_Basque\": \"Basque\",\n \"UD_Estonian\": \"Estonian\",\n \"UD_Hebrew\": \"Hebrew\",\n \"UD_Latin\": \"Latin\",\n \"UD_Bulgarian\": \"Bulgarian\",\n \"UD_Finnish\": \"Finnish\",\n \"UD_Hindi\": \"Hindi\",\n \"UD_Latin-ITT\": \"Latin-ITT\",\n \"UD_Croatian\": \"Croatian\",\n \"UD_Finnish-FTB\": \"Finnish-FTB\",\n \"UD_Hungarian\": \"Hungarian\",\n \"UD_Latin-PROIEL\": \"Latin-PROIEL\",\n \"UD_Czech\": \"Czech\",\n \"UD_French\": \"French\",\n \"UD_Indonesian\": \"Indonesian\",\n \"UD_Norwegian\": \"Norwegian\",\n \"UD_Portuguese\": \"Portuguese\",\n \"UD_Polish\": \"Polish\",\n \"UD_Romanian\": \"Romanian\",\n \"UD_Slovenian\": \"Slovenian\",\n \"UD_Tamil\": \"Tamil\",\n \"UD_Spanish\": \"Spanish\",\n \"UD_Swedish\": \"Swedish\"}\n\nlang2udname = {y: x for x,y in udname2lang.items()}\n\nlang2code = {\"Ancient_Greek\": \"grc\",\n \"Ancient_Greek-PROIEL\": \"grc_proiel\",\n \"Old_Church_Slavonic\": \"cu\",\n \"Gothic\": \"got\",\n \"Arabic\": \"ar\",\n \"Japanese-KTC\": \"ja_ktc\",\n \"Latin\": \"la\",\n \"Hindi\": \"hi\",\n \"Latin-ITT\": \"la_itt\",\n \"Latin-PROIEL\": \"la_proiel\",\n \"Norwegian\": \"no\",\n \"Dutch\": \"nl\",\n \"Estonian\": \"et\",\n \"Basque\": \"eu\",\n \"Croatian\": \"hr\",\n \"Danish\": \"da\",\n \"Finnish\": \"fi\",\n \"French\": \"fr\",\n \"Greek\": \"el\",\n \"Hungarian\": \"hu\",\n \"Irish\": \"ga\",\n \"Persian\": \"fa\",\n \"Swedish\": \"sv\",\n \"Bulgarian\": \"bg\",\n \"Czech\": \"cs\",\n \"English\": \"en\",\n \"Finnish-FTB\": \"fi_ftb\",\n \"German\": \"de\",\n \"Hebrew\": \"he\",\n \"Indonesian\": \"id\",\n \"Italian\": \"it\",\n \"Spanish\": \"es\",\n \"Portuguese\": \"pt\",\n \"Polish\": \"pl\",\n \"Romanian\": \"ro\",\n \"Slovenian\": \"sl\",\n \"Tamil\": \"ta\"\n }\n\ncode2lang = {y: x for x,y in lang2code.items()}\n\ndef get_ud_paths(base_path, type_, format_, coarse):\n assert type_ in {'dev', 'train', 'test'}\n assert format_ in {'conllu', 'conllx'}\n treebanks = [tb for tb in listdir(base_path) if not tb.startswith(\".\") and udname2lang[tb] not in ignore]\n coarse_grained_path = \"\"\n if coarse:\n coarse_grained_path = \".coarse_deprels\"\n path_format = lambda tb: join(base_path, tb, \"{}-ud-{}{}.{}\".format(lang2code[udname2lang[tb]], type_, coarse_grained_path, format_))\n tb_paths = {udname2lang[lang]: [path_format(lang)] for lang in treebanks}\n # this is such a hack, but i don't have time to do it correctly right now\n if format_ == \"conllu\" and type_ == \"train\":\n tb_paths[\"Czech\"] = [join(base_path, \"UD_Czech\", f) for f in ['cs-ud-train-c.conllu',\n 'cs-ud-train-m.conllu',\n 'cs-ud-train-v.conllu',\n 'cs-ud-train-l.conllu']]\n return tb_paths\n\ndef get_system_output_paths(base_path, type_, format_):\n assert type_ in {'dev', 'test'}\n assert format_ in {'conllu', 'conllx'}\n treebanks = [tb for tb in listdir(base_path) if not tb.startswith(\".\") and tb.endswith(format_) and tb.split(\".\")[0] not in ignore]\n return {lang.split(\".\")[0]: join(base_path, lang) for lang in treebanks}\n","sub_path":"tools/lang_utils.py","file_name":"lang_utils.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257471629","text":"class Solution:\n def longestCommonPrefix(self, strs):\n result = ''\n for x in zip(*strs):\n if len(set(x))==1:\n result +=list(set(x))[0]\n else:\n break\n return result\n\nanswer = Solution()\nprint(answer.longestCommonPrefix(['flower', 'flame', 'floor']))\n\n\n\n\n","sub_path":"practice_algorithm_test/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436151855","text":"# coding: utf-8\r\nfrom threading import Thread\r\nfrom pymongo import MongoClient\r\nfrom db import DATABASE\r\n\r\nthreads = []\r\n\r\n\r\npredlogi = ['без', 'безо', 'близ', 'в', 'во', 'вместо', 'вне', 'для', 'до', 'за', 'из', 'изо', 'к', 'ко',\r\n 'кроме', 'между', 'меж', 'на', 'над', 'надо', 'о', 'об', 'обо', 'от', 'ото', 'перед', 'передо',\r\n 'пред', 'предо', 'пo', 'под', 'подо', 'при', 'про', 'ради', 'с', 'со', 'сквозь', 'среди', 'у',\r\n 'через', 'чрез', 'и']\r\n\r\n\r\nokonchanie1 = ['ой', 'ую', 'юю', 'ого', 'ому', 'ым', 'ом', 'его', 'ему', 'им', 'ем', 'ого', 'ому',\r\n 'ое', 'ым', 'ом', 'его', 'ему', 'ее', 'им', 'ем', 'ых', 'ым', 'ыми', 'их', 'им', 'ими', 'ах',\r\n 'ях', 'ой', 'ою', 'ею', 'ом', 'ем', 'ами', 'ями', 'ая', 'яя', 'ый', 'ой', 'ий', 'ее',\r\n 'ые', 'ия', 'но', 'ов']\r\n\r\n\r\nokonchanie2 = ['ах', 'ях', 'ою', 'ею', 'ом', 'ем', 'ами', 'ями', 'ей', 'ую', 'юю', 'ого', 'ому', 'ым',\r\n 'ом', 'его', 'ему', 'им', 'ем', 'ого', 'ому', 'ым', 'ом', 'его', 'ему', 'им', 'ем', 'ых', 'ым',\r\n 'ыми', 'их', 'им', 'ими']\r\n\r\n\r\ndef analyzer(k):\r\n if len(k) <= 2:\r\n return\r\n w = k.split()\r\n if w[0][0].isdigit():\r\n return\r\n if w[0] in predlogi or w[len(w)-1] in predlogi:\r\n return\r\n if len(w) == 1 and any([w[0].endswith(i) for i in okonchanie1]):\r\n return\r\n if len(w) > 1 and any([w[0].endswith(i) for i in okonchanie2]):\r\n return\r\n else:\r\n return True\r\n\r\n\r\ndef saver(items):\r\n db1 = MongoClient(DATABASE)['allbiz_goods']\r\n for item in items:\r\n try:\r\n if len(item['words']) < 5:\r\n keys = item['all_keys']\r\n else:\r\n keys = list(set(item['all_keys']).union(set(item['some_keys'])))\r\n\r\n data = [{'_id': key} for key in keys if analyzer(key)]\r\n db1['keys'].insert(data, continue_on_error=True)\r\n except:\r\n continue\r\n\r\n\r\ndef run(buf):\r\n\r\n global threads\r\n if len(threads) <= 10:\r\n t = Thread(target=saver, args=(buf, ))\r\n t.start()\r\n threads.append(t)\r\n else:\r\n for t in threads:\r\n t.join()\r\n threads = []\r\n t = Thread(target=saver, args=(buf, ))\r\n t.start()\r\n threads.append(t)\r\n\r\n\r\ndef main():\r\n client = MongoClient(DATABASE)\r\n db = client['allbiz_goods']\r\n buf = []\r\n col = db.ukraine.find(no_cursor_timeout=True)[1466000:]\r\n for key in col:\r\n buf.append(key)\r\n if len(buf) >= 100:\r\n run(list(buf))\r\n buf = []\r\n print('100 goods added to analyze')\r\n\r\nif __name__ == '__main__':\r\n '''\r\n import cProfile\r\n import pstats\r\n cProfile.run('main()', 'restats')\r\n p = pstats.Stats('restats')\r\n p.sort_stats('time').print_stats(20)'''\r\n main()\r\n print('All Finished')\r\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351295409","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 Joachim Muth <joachim.henri.muth@gmail.com>, Gael Lederrey <gael.lederrey@epfl.ch>,\n# Stefano Savare <stefano.savare@epfl.ch>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nCross Validation class for blending modeling.\n\n\nUSAGE FOR NEW CROSS-VALIDATOR:\n cv = CrossValidator()\n cv.new_validator(df, k) # define indices and ground truth\n cv.k_fold_predictions(self, df, 'als', als, **kwargs) # compute predictions for model\n cv.evaluate_model('als') # cross-validation on model\n (add more models)\n cv.evaluate_blending({'als': 0.4, 'mf_sgd': 0.6}) # cross-validation of a blending\n\nUSAGE FOR STORED MODEL PREDICTIONS:\n cv = CrossValidator()\n cv.load_indices() # load indices from folder 'CV/indices'\n model_names = ['slope_one_rescaled',\n 'movie_mean_deviation_user',\n 'knn_ib']\n cv.load_predictions(model_names) # load predictions from folder 'CV/'\n cv.define_ground_truth(df) # define the ground truth\n cv.evaluation_all_models()\n dic_blend = {'slope_one_rescaled': 0.038461538461538464,\n 'movie_mean_deviation_user': 0.038461538461538464,\n 'knn_ib': 0.038461538461538464}\n cv.evaluate_blending(dic_blend) # cross-validation of a blending\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom helpers import create_folder, evaluate\nimport os\n\n\ndef elements_in_folder(folder):\n return len([name for name in os.listdir(folder)])\n\n\nclass CrossValidator:\n \"\"\"\n Class that provide a normalized version of the dataframe\ncv\n It provides all the method both to normalize the dataframe both to recover the right predictions\n from the predictions obtained from the normalized dataframe\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Store internally :\n prediction_dictionary: dict of prediction for all prediction (k predictions per model for CV)\n indices_dictionary: indices used for the k predictions (train and test)\n truth_dictionary: ground truth for the k prediction\n \"\"\"\n self.indices_dictionary = None\n self.truth_dictionary = None\n self.predictions_dictionary = {}\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \"\"\" CROSS VALIDATION \"\"\"\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n def new_validator(self, df, k, store=False):\n if store:\n self.indices_dictionary = self.define_indices_and_store(df, k)\n else:\n self.indices_dictionary = self.define_indices(df, k)\n\n self.truth_dictionary = self.define_ground_truth(df)\n self.predictions_dictionary = {}\n\n def define_indices(self, df, k):\n \"\"\" take a pandas.DataFrame and calculate k array of shuffled indices for cross-validation\"\"\"\n\n n = df.shape[0]\n cut = int(n / k)\n indices = list(range(n))\n\n np.random.seed(42)\n\n np.random.shuffle(indices)\n\n list_indices = []\n for i in range(k):\n list_indices.append(indices[cut * i: cut * (i + 1)])\n\n fold_dictionary = {}\n for i in range(k):\n train = np.array([x for j, x in enumerate(list_indices) if j != i]).flat\n train = list(train)\n train.sort()\n test = list_indices[i]\n test.sort()\n fold_dictionary[i] = {'train': train, 'test': test}\n\n self.indices_dictionary = fold_dictionary\n return fold_dictionary\n\n def k_fold_predictions(self, df, model, model_name, **kwargs):\n \"\"\"\n add a model to the predictions dictionary containing k pandas.DataFrame for each model\n\n Args:\n df (pandas.DataFrame): dataset, will be split according to indices_dictionary\n model (function): model to be used\n model_name (str): name given to the model in the dictionary\n **kwargs: arguments to be passed to the model function\n\n Returns:\n dict: key n_fold; value train/test dictionnary; sub_value: array of indices\n e.g. dict['name'][0]: prediction for first fold\n \"\"\"\n if self.indices_dictionary is None:\n print(\"[ERROR] first define fold_indices dictionary\")\n sys.exit()\n\n predictions_dict = {}\n for i in range(len(self.indices_dictionary)):\n train = df.loc[self.indices_dictionary[i]['train']].sort_index()\n test = df.loc[self.indices_dictionary[i]['test']].sort_index()\n\n predictions = model(train, test, **kwargs)\n predictions_dict[i] = predictions\n\n self.predictions_dictionary[model_name] = predictions_dict\n\n return predictions_dict\n\n def define_ground_truth(self, df):\n dic_truth = {}\n for i in self.indices_dictionary.keys():\n dic_truth[i] = df.loc[self.indices_dictionary[i]['test']]\n\n self.truth_dictionary = dic_truth\n return dic_truth\n\n def print_models(self):\n print(list(self.predictions_dictionary.keys()))\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \"\"\" EVALUATION \"\"\"\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n def evaluate_model(self, model_name):\n \"\"\" cross validation \"\"\"\n if model_name not in self.predictions_dictionary.keys():\n print(\"[ERROR] Model not defined in class: \", model_name)\n\n rmse = self.__inner_evaluate_model(self.predictions_dictionary[model_name])\n return rmse\n\n def __inner_evaluate_model(self, predictions_dict):\n if self.truth_dictionary is None:\n print(\"[ERROR] No ground truth dictionary defined\")\n sys.exit()\n\n rmse_list = []\n for i in predictions_dict.keys():\n pred = predictions_dict[i]\n truth = self.truth_dictionary[i]\n\n rmse = evaluate(pred, truth)\n rmse_list.append(rmse)\n return np.mean(rmse_list)\n\n def evaluation_all_models(self):\n for model_name in self.predictions_dictionary.keys():\n rmse = self.evaluate_model(model_name)\n print(\"RMSE for \", model_name, \" : \", rmse)\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \"\"\" BLENDING \"\"\"\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n def blend(self, weights):\n \"\"\" produce blended prediction with weights dictionary\"\"\"\n\n # initial predictions df\n random_name = list(self.predictions_dictionary.keys())[0]\n pred = {}\n\n # produce a new prediction DataFrame, based on any model DF (just in order to have indices)\n for i in self.predictions_dictionary[random_name].keys():\n pred[i] = pd.DataFrame.copy(self.predictions_dictionary[random_name][i])\n pred[i]['Rating'] = 0.0\n\n # add one by one weighted models\n for model_name in weights.keys():\n if model_name not in self.predictions_dictionary.keys():\n print(\"[WARNING] Model does not exist in class: \", model_name)\n else:\n for i in self.predictions_dictionary[model_name].keys():\n pred[i]['Rating'] += \\\n weights[model_name] * self.predictions_dictionary[model_name][i]['Rating']\n\n return pred\n\n def evaluate_blending(self, weights):\n \"\"\" cross-validate blended prediction \"\"\"\n if self.truth_dictionary is None:\n print(\"[ERROR] No ground truth dictionary defined\")\n\n blend_dict = self.blend(weights)\n\n rmse = self.__inner_evaluate_model(blend_dict)\n\n return rmse\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \"\"\" STORE AND LOAD \"\"\"\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n def k_fold_predictions_and_store(self, df, model, model_name, override, **kwargs):\n \"\"\" produce k-fold predictions AND store the prediction in files \"\"\"\n \n # check folder or create\n folder_name = './CV/' + model_name \n \n compute = True\n if not override:\n if os.path.isdir(folder_name):\n compute = False\n \n if compute:\n create_folder(folder_name)\n\n pred_dict = self.k_fold_predictions(df, model, model_name, **kwargs)\n\n for i in pred_dict.keys():\n file_name = folder_name + '/' + str(i) + '.csv'\n pred_dict[i].to_csv(file_name, index=False)\n\n def store_predictions(self):\n \"\"\" dump predictions_dictionary in file \"\"\"\n for model_name in self.predictions_dictionary.keys():\n folder_name = './CV/' + model_name\n create_folder(folder_name)\n\n for j in self.predictions_dictionary[model_name].keys():\n file_name = folder_name + '/' + str(j) + '.csv'\n self.predictions_dictionary[model_name][j].to_csv(file_name, index=False)\n\n def load_predictions(self, model_names):\n \"\"\" load models from list of name and add it (replace if already existing) to predictions_dictionary \"\"\"\n list_files = os.listdir('./CV/')\n for model_name in model_names:\n if model_name not in list_files:\n print(\"[ERROR] \" + model_name + \" does not exist\")\n sys.exit()\n\n n = elements_in_folder('./CV/' + model_name)\n pred_dict = {}\n for i in range(n):\n pred_dict[i] = pd.read_csv('./CV/' + model_name + \"/\" + str(i) + '.csv')\n\n self.predictions_dictionary[model_name] = pred_dict\n\n return self.predictions_dictionary\n\n def clean_predictions(self):\n \"\"\" clear all predictions \"\"\"\n self.predictions_dictionary = {}\n\n def define_indices_and_store(self, df, k):\n \"\"\" create indices_dictionary AND store it in file \"\"\"\n dic = self.define_indices(df, k)\n self.store_indices()\n\n return dic\n\n def store_indices(self):\n \"\"\" dump indices_dictionary in file \"\"\"\n folder_name = './CV/' + 'indices'\n create_folder(folder_name)\n\n for i in self.indices_dictionary.keys():\n file_name = folder_name + '/' + str(i)\n with open(file_name + '_train.csv', 'w') as file:\n for item in self.indices_dictionary[i]['train']:\n file.write(\"%s\\n\" % item)\n\n with open(file_name + '_test.csv', 'w') as file:\n for item in self.indices_dictionary[i]['test']:\n file.write(\"%s\\n\" % item)\n\n def load_indices(self):\n \"\"\" load indices and replace indices_dictionary by it \"\"\"\n # clear indices dictionary\n self.indices_dictionary = {}\n\n # simple version for working with CWD\n n = elements_in_folder('./CV/indices/')\n n = int(n / 2)\n\n for i in range(n):\n f = open('./CV/indices/' + str(i) + '_train.csv', 'r')\n lines = f.readlines()\n train = [int(i) for i in lines]\n\n f = open('./CV/indices/' + str(i) + '_test.csv', 'r')\n lines = f.readlines()\n test = [int(i) for i in lines]\n\n dic = {'train': train, 'test': test}\n self.indices_dictionary[i] = dic\n\n return self.indices_dictionary\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \"\"\" Variations \"\"\"\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \n def models_ceil(self, model_array):\n \"\"\" Create the ceil of the model predictions \"\"\"\n model_var = []\n for idx, name in enumerate(model_array):\n print(\"Ceil Variation for model %s\"%name)\n n = elements_in_folder('./CV/' + name + '/')\n folder_name = './CV/' + name + '_ceil/'\n create_folder(folder_name)\n model_var.append(name + '_ceil')\n for i in range(n):\n file_ = './CV/' + name + '/' + str(i) + '.csv'\n df = pd.read_csv(file_)\n ratings = np.array(df.Rating)\n ratings = np.ceil(ratings)\n df.Rating = ratings\n df.to_csv(folder_name + str(i) + '.csv', index=False)\n return model_var\n \n def models_round(self, model_array):\n \"\"\" Create the round of the model predictions \"\"\"\n model_var = []\n for idx, name in enumerate(model_array):\n print(\"Round Variation for model %s\"%name)\n n = elements_in_folder('./CV/' + name + '/')\n folder_name = './CV/' + name + '_round/'\n create_folder(folder_name)\n model_var.append(name + '_round')\n for i in range(n):\n file_ = './CV/' + name + '/' + str(i) + '.csv'\n df = pd.read_csv(file_)\n ratings = np.array(df.Rating)\n ratings = np.round(ratings)\n df.Rating = ratings\n df.to_csv(folder_name + str(i) + '.csv', index=False) \n return model_var\n \n def models_floor(self, model_array):\n \"\"\" Create the floor of the model predictions \"\"\"\n model_var = [] \n for idx, name in enumerate(model_array):\n print(\"Floor Variation for model %s\"%name)\n n = elements_in_folder('./CV/' + name + '/')\n folder_name = './CV/' + name + '_floor/'\n create_folder(folder_name)\n model_var.append(name + '_floor') \n for i in range(n):\n file_ = './CV/' + name + '/' + str(i) + '.csv'\n df = pd.read_csv(file_)\n ratings = np.array(df.Rating)\n ratings = np.floor(ratings)\n df.Rating = ratings\n df.to_csv(folder_name + str(i) + '.csv', index=False) \n return model_var \n\n\n","sub_path":"draft/blender2/cross_validator.py","file_name":"cross_validator.py","file_ext":"py","file_size_in_byte":13882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326168423","text":"from .models import Product, product_statuses\nfrom users.serializers import UserSerializer\nfrom rest_framework import serializers\nfrom auctions.serializers import AuctionSerializer\nfrom auctions.models import Auction\nfrom common.fields import Base64ImageField\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n last_auction = AuctionSerializer(\n source='get_last_auction',\n read_only=True\n )\n auction = AuctionSerializer(write_only=True)\n image = Base64ImageField(\n max_length=None, use_url=True,\n )\n\n class Meta:\n model = Product\n fields = ('id', 'created', 'title', 'description',\n 'status', 'available_for_sale',\n 'last_auction', 'auction', 'image'\n )\n read_only_fields = ('status',)\n\n def create(self, validated_data):\n auction_data = validated_data.pop('auction')\n product_item = Product.objects.create(**validated_data)\n auction_data.update(product=product_item)\n print(auction_data)\n Auction.objects.create(**auction_data)\n return product_item\n","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"322023161","text":"import sqlite3 as lite\r\n\r\n# Managing the Database and the CRUD(Create,Retrieve,Update and Delete) operations\r\n\r\n\r\nclass Database_Management():\r\n # TODO: Connecting to the database and creating the Table\r\n def __init__(self):\r\n global con\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute('''CREATE TABLE IF NOT EXISTS students (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, roll INTEGER NOT NULL UNIQUE, dept TEXT NOT NULL, year INTEGER NOT NULL, subjects TEXT NOT NULL, under_grad BOOLEAN NOT NULL DEFAULT 1)''')\r\n con.commit()\r\n print(\"Database successfully created\")\r\n except Exception:\r\n print(\"Oops Something Went Wrong\")\r\n # TODO: Insertion of data into the database\r\n\r\n def insert_data(self, data):\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute(\r\n '''INSERT INTO students (name,roll,dept,year,subjects,under_grad) VALUES (?,?,?,?,?,?);''', data)\r\n con.commit()\r\n return True\r\n except Exception:\r\n return False\r\n # TODO: Update the data into the database\r\n\r\n def update_data(self, id, data):\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute(\r\n '''UPDATE students set roll= ?,dept= ?, year= ?, subjects= ?,under_grad= ? where id = ?''', (data[0], data[1], data[2], data[3], data[4], id))\r\n con.commit()\r\n return True\r\n except Exception:\r\n return False\r\n # TODO: Fetch data from the database by id\r\n\r\n def fetch_by_id(self, id):\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute('''SELECT * FROM students WHERE id = ?''', (id,))\r\n con.commit()\r\n record = cur.fetchall()\r\n return record\r\n except Exception:\r\n return False\r\n # TODO: Fetch all the data from the database\r\n\r\n def fetch_data(self):\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute('''SELECT * FROM students''')\r\n con.commit()\r\n records = cur.fetchall()\r\n return records\r\n except Exception:\r\n return False\r\n # TODO: Delete data from the database\r\n\r\n def delete_data(self, id):\r\n try:\r\n con = lite.connect(\"sqlite.db\")\r\n with con:\r\n cur = con.cursor()\r\n cur.execute('''DELETE FROM students WHERE id = ?''', [id])\r\n con.commit()\r\n return True\r\n except Exception:\r\n return False\r\n\r\n# User Interface\r\n\r\n\r\ndef main():\r\n db = Database_Management()\r\n print(\"#\"*40)\r\n print(\"\\n\")\r\n print(\":: Student Management ::]\\n\")\r\n print(\"#\"*40)\r\n print(\"\\n\")\r\n print(\"1: To Insert Data Into the Database\\n\")\r\n print(\"2: To Update Data In the Database\\n\")\r\n print(\"3: To Fetch Data from the Database by id\\n\")\r\n print(\"4: To Delete Data from the Database\\n\")\r\n print(\"5: To Fetch all the data from the database\\n\")\r\n print(\"6: To Quit Managing the Database\\n\")\r\n\r\n while True:\r\n command = int(input(\"Command --> \"))\r\n print(\"\\n\")\r\n if command == 1:\r\n print(\"You can't change your Name later\\n\")\r\n name = input(\"Name: \")\r\n roll = int(input(\"Roll: \"))\r\n dept = input(\"Dept: \")\r\n year = int(input(\"Year: \"))\r\n subjects = input(\r\n \"Enter Your Sunjects Separated by Commas: \").split(\",\")\r\n under_grad = int(input(\"Under_Grad --> 1 For Yes and 0 For No: \"))\r\n data = [name, roll, dept, year, str(subjects), under_grad]\r\n if db.insert_data(data):\r\n print(\"Data Inserted Successfully\\n\")\r\n else:\r\n print(\"Data can't be inserted properly\\n\")\r\n elif command == 2:\r\n id = int(input(\"Enter the id whose data you want to update: \"))\r\n print(\"\\n\")\r\n roll = int(input(\"Roll: \"))\r\n dept = input(\"Dept: \")\r\n year = int(input(\"Year: \"))\r\n subjects = input(\r\n \"Enter Your Sunjects Separated by Spaces: \").split(\" \")\r\n under_grad = int(input(\"1 For Yes and 0 For No: \"))\r\n data = [roll, dept, year, str(subjects), under_grad]\r\n if db.update_data(id, data):\r\n print(\"Data Updated Successfully\\n\")\r\n else:\r\n print(\"Data Not Updated Something Went Wrong\\n\")\r\n elif command == 3:\r\n id = int(input(\"Enter the id whose data you want to fetch: \"))\r\n print(\"\\n\")\r\n if db.fetch_by_id(id):\r\n record = db.fetch_by_id(id)\r\n for val in record:\r\n print(f\"Id: {val[0]}\")\r\n print(f\"Name: {val[1]}\")\r\n print(f\"Roll: {val[2]}\")\r\n print(f\"Dept: {val[3]}\")\r\n print(f\"Year: {val[4]}\")\r\n print(f\"Subjects: {val[5]}\")\r\n Under_Grad = 'Yes' if val[6] == 1 else 'No'\r\n print(f\"Under_Grad: {Under_Grad}\")\r\n print(\"\\n\")\r\n else:\r\n print(\"Can't Fetch data\")\r\n elif command == 4:\r\n id = int(input(\"Enter the id whose data you want to delete: \"))\r\n print(\"\\n\")\r\n if db.delete_data(id):\r\n print(\"Successfully Deleted\\n\")\r\n else:\r\n print(\"Not Deleted\\n\")\r\n elif command == 5:\r\n if db.fetch_data():\r\n for pos, val in enumerate(db.fetch_data()):\r\n print(f\"Id: {val[0]}\")\r\n print(f\"Name: {val[1]}\")\r\n print(f\"Roll: {val[2]}\")\r\n print(f\"Dept: {val[3]}\")\r\n print(f\"Year: {val[4]}\")\r\n print(f\"Subjects: {val[5]}\")\r\n Under_Grad = 'Yes' if val[6] == 1 else 'No'\r\n print(f\"Under_Grad: {Under_Grad} \\n\")\r\n\r\n else:\r\n print(\"Fetching all the data can't be done\")\r\n elif command == 6:\r\n return False\r\n else:\r\n print(\"Wrong Choice\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"sqlite project3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84788160","text":"#!/usr/bin/env python\n\nimport sys,xmlrpclib\n\ndef koji(url,channel,pkg,behavior):\n\n\tsp = xmlrpclib.ServerProxy(url, allow_none=1)\n\t\t\n\tif behavior == 'latest':\n\t\tbuilds = sp.getLatestBuilds(channel,None,pkg)\n#\t\tprint build['nvr'] + '\\t' + str(build['package_id']) + '\\t' + build['completion_time']\n\telse:\n\t\t###\n\t\t# tag, event=None, inherit=False, prefix=None, latest=False, package=None, owner=None\n\t\t###\n\t\tbuilds = sp.listTagged(channel, None, False, None, False, pkg, None)\n\treturn builds\n","sub_path":"bin/kojiact.py","file_name":"kojiact.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262858177","text":"import logging\nfrom typing import Optional, Union\n\nimport dpkt\nfrom dpkt.ip import IP\nfrom dpkt.ntp import NTP\nfrom dpkt.udp import UDP\nfrom munch import Munch\n\nfrom core.configuration.data import ConfigurationData\nfrom core.packet_parsers.base import PacketParserInterface\nfrom core.lib.ip_utils import IpAddrUtils\n\n\nclass NtpPacketParser(PacketParserInterface):\n def __init__(self, config: ConfigurationData):\n self.config = config\n self.ip_utils = IpAddrUtils()\n\n @staticmethod\n def load_ntp_packet_from_ip_packet(ip_packet: IP) -> Optional[NTP]:\n try:\n udp_packet = UDP(ip_packet.data)\n return NtpPacketParser.load_ntp_packet_from_udp_packet(udp_packet)\n\n except BaseException as ex:\n logging.warning('Can not extract NTP packet from UDP packet. Error: `%s`', ex)\n raise ex\n\n @staticmethod\n def load_ntp_packet_from_udp_packet(udp_packet: UDP) -> Optional[NTP]:\n try:\n return NTP(udp_packet.data)\n\n except dpkt.dpkt.NeedData:\n logging.warning('Not enough data to extract NTP packet from UDP packet')\n\n except BaseException as ex:\n logging.warning('Can not extract NTP packet from UDP packet. Error: `%s`', ex)\n raise ex\n\n def extract_data(self, packet: NTP) -> Munch:\n data = Munch()\n try:\n data.ntp_mode = packet.mode\n data.ntp_interval = packet.interval\n data.ntp_stratum = packet.stratum\n data.ntp_reference_id = self.resolve_ntp_reference(packet)\n\n except BaseException as ex:\n logging.warning('Unable to extract NTP from `%s`. Error: `%s`', type(packet), ex)\n raise ex\n\n return data\n\n def resolve_ntp_reference(self, packet: NTP) -> Union[int, str]:\n reference_id = self.ip_utils.inet_to_str(packet.id)\n if reference_id is None:\n # Could not parse NTP REFID, probably it is a string\n return packet.id\n\n if reference_id == '0.0.0.0':\n # REFID is NULL but dpkt considers it as b'\\x00\\x00\\x00\\x00'\n return ''\n\n if self.config.use_numeric_values is True:\n return self.ip_utils.ip_to_int(reference_id)\n\n return reference_id\n","sub_path":"core/packet_parsers/ntp_parser.py","file_name":"ntp_parser.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30693023","text":"import os, sys, re\n\ndef ListFiles(path, extension = None):\n # ListFiles creates a list of files from given dir and extension and exclude hidden files\n # subfolders is included. \n\tfilelist = []\n\tfileabslist = []\n\tfor directory, dir_names, file_names in os.walk(path):\n\t\t\n\t\tfor file_name in file_names:\n\t\t\tif (not file_name.startswith('.')) & (file_name.endswith(extension)):\n\t\t\t\tfile_name_base = file_name.replace(extension, '')\n\t\t\t\tfilepath_tmp = os.path.join(directory, file_name)\n\t\t\t\tfilelist.append(file_name_base)\n\t\t\t\tfileabslist.append(filepath_tmp)\n\t\n\treturn filelist, fileabslist\n\ndef DirCheck(targetpaths):\n\t\"\"\"\n\tdircheck checks the target folder and create the folder if it does not exist.\n\ttargetdirlist: list of folderpath\n\t\"\"\"\n\t# print(type(targetpaths))\n\tif isinstance(targetpaths, str): \n\t\t# print(os.path.exists(targetpaths))\n\t\tif not os.path.exists(targetpaths):\n\t\t\tos.makedirs(targetpaths)\n\telif isinstance(targetpaths, list): \n\t\tfor path in targetpaths:\n\t\t\tif not os.path.exists(path):\n\t\t\t\tos.makedirs(path)\n\ndef SplitAll(path):\n # SplitAll splits file path into individual string. \n allparts = []\n while 1:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts\n\ndef SortByFolder(ipfilepaths, inputpattern = None): \n # input\n # 'Tile_'\n\n class imgfile:\n def __init__(self, filename, filepath, index):\n self.filename = filename\n self.filepath = filepath\n self.index = index\n\n cleaned_filelist = [] \n for ipfilepath in ipfilepaths:\n if inputpattern is not None: \n x = re.search(inputpattern, ipfilepath) # search the string by the given pattern\n try: \n ipfilepath_tmp = x.group(0)\n file_temp = SplitAll(ipfilepath) \n filename = os.path.split(ipfilepath)[1]\n cleaned_filelist.append(imgfile(filename, ipfilepath, int(file_temp[-2]))) \n except AttributeError:\n found = ''\n else:\n ipfilepath_tmp = ipfilepath\n file_temp = SplitAll(ipfilepath_tmp) \n filename = os.path.split(ipfilepath_tmp)[1]\n cleaned_filelist.append(imgfile(filename, ipfilepath_tmp, int(file_temp[-2]))) \n \n cleaned_filelist.sort(key =lambda x: x.index)\n\n return cleaned_filelist\n\ndef ListFolders(path):\n\tdirlist = []\n\tfor dir_name in os.listdir(path):\n\t\tif (not dir_name.startswith('.')):\n\t\t\tdirlist.append(dir_name)\n\treturn dirlist\n","sub_path":"core/fileop.py","file_name":"fileop.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238247641","text":"class Solution:\n def getRow(self, rowIndex):\n l=[[1]*i for i in range(1,rowIndex+2)]\n if rowIndex < 2:\n return l[-1]\n for i in range(2,len(l)):\n for j in range(1,i):\n l[i][j]=l[i-1][j-1] +l[i-1][j]\n return l[-1]\n \n \n\ns=Solution()\nprint(s.getRow(0))","sub_path":"Pascal_Triangle_RowIndex.py","file_name":"Pascal_Triangle_RowIndex.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314116842","text":"import readchar\nfrom random import randint, random\nimport os \n\npos_x = 0\npos_y = 1\nnumber_of_map_objects = 11\n\nobstacle_definition = \"\"\"\\\n#### #### ##### ###\n#### #### ##### \n #### \n###### #### ### \n###### #### ### ###\n###### #### ### ###\n # #### ### \n # #### ### ###\n \n ##### ########### ###\n ##### ## \n ##### ## ###### ###\n######## ## ### ###\n ## ### \\\n\"\"\"\n\nmy_position = [6, 3]\ntail_lenght = 0\ntail = []\nmap_objects = []\n\nend_game = False\ndied = False\n\n# Create obstacle map\nobstacle_definition = [list(row) for row in obstacle_definition.split(\"\\n\")]\nmap_width = len(obstacle_definition[0])\nmap_height = len(obstacle_definition)\n\n\n# Main loop\nwhile not end_game:\n os.system(\"cls\")\n \n # Generate random onjects on the map\n while len(map_objects) < number_of_map_objects:\n new_position = [randint(0, map_width - 1), randint(0, map_height - 1)]\n \n if new_position not in map_objects and new_position != my_position and \\\n obstacle_definition[new_position[pos_y]][new_position[pos_x]] != \"#\":\n map_objects.append(new_position)\n\n #Draw Map (borders)\n print(\"+\" + \"-\"*(map_width*3) + \"+\")\n\n for coordinate_y in range(map_height):\n print(\"|\", end=\"\")\n \n for cordinate_x in range(map_width):\n \n char_to_draw = \" \"\n object_in_cell = None\n tail_in_cell = None\n \n for mobject in map_objects:\n if mobject[pos_x] == cordinate_x and mobject[pos_y] == coordinate_y:\n char_to_draw = \"¿*?\"\n object_in_cell = mobject\n \n for tail_piece in tail:\n if tail_piece[pos_x] == cordinate_x and tail_piece[pos_y] == coordinate_y:\n char_to_draw = \"=@=\"\n tail_in_cell = tail_piece\n \n if my_position[pos_x] == cordinate_x and my_position[pos_y] == coordinate_y:\n char_to_draw = \"ºwº\"\n \n if object_in_cell:\n map_objects.remove(object_in_cell)\n tail_lenght += 1\n \n if tail_in_cell:\n end_game = True\n died = True\n \n if obstacle_definition[coordinate_y][cordinate_x] == \"#\":\n char_to_draw = \"[#]\"\n \n print(\"{}\".format(char_to_draw), end=\"\")\n print(\"|\")\n\n print(\"+\" + \"-\"*(map_width*3) + \"+\")\n print(\"Tail lenght {}\".format(tail_lenght))\n #print(\"Tail position {}\".format(tail))\n\n # Ask user where he/she wants to move\n #direction = input(\"Where do you want to move? [WASD]\\n\")\n direction = readchar.readchar().decode()\n new_position = None\n\n if direction == \"w\":\n new_position = [my_position[pos_x], (my_position[pos_y] - 1) % map_height]\n \n elif direction == \"s\":\n new_position = [my_position[pos_x], (my_position[pos_y] + 1) % map_height]\n \n elif direction == \"a\":\n new_position = [(my_position[pos_x] - 1) % map_width, my_position[pos_y]]\n \n elif direction == \"d\":\n new_position = [(my_position[pos_x] + 1) % map_width, my_position[pos_y]]\n \n elif direction == \"q\":\n end_game = True\n \n if new_position:\n if obstacle_definition[new_position[pos_y]][new_position[pos_x]] != \"#\":\n tail.insert(0, my_position.copy())\n tail = tail[:tail_lenght]\n my_position = new_position\n\nif died == True:\n print(\"You died!\")","sub_path":"maze_snake.py","file_name":"maze_snake.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280336968","text":"# coding=utf-8\n\nimport math\n\n\ndef quadratic(a, b, c):\n if not isinstance(a, (int, float)) or not isinstance(b, (int, float))\\\n or not isinstance(c, (int, float)):\n raise TypeError('输入必须是整型或浮点型')\n if a == 0:\n if b != 0:\n r = -c / b\n print('x = ', r)\n else:\n print('无解')\n else:\n D = (b * b - 4 * a * c)\n if D >= 0:\n d = math.sqrt(D)\n x1 = (-b + d) / (2 * a)\n x2 = (-b - d) / (2 * a)\n return x1, x2\n else:\n print('无解')\n\n\nif __name__ == '__main__':\n a = float(input('input a number: '))\n b = float(input('input a number: '))\n c = float(input('input a number: '))\n print('计算公式为:%dx * x + %dx + %d = 0' % (a, b, c))\n print(quadratic(a, b, c))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142917044","text":"\"\"\"Tests for RBTools help command and rbt command help options.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom rbtools.utils.process import execute\nfrom rbtools.utils.testbase import RBTestBase\n\n\nclass HelpCommandTests(RBTestBase):\n \"\"\"Tests for RBT help command and rbt command help options.\"\"\"\n\n def test_help_command(self):\n \"\"\"Testing RBT commands when running 'rbt help <command>'\"\"\"\n self._check_help_output(['rbt', 'help', 'alias'], 'alias')\n\n def test_help_options_before(self):\n \"\"\"Testing RBT commands when running 'rbt --help <command>' or 'rbt\n -h <command>'\n \"\"\"\n self._check_help_output(['rbt', '--help', 'alias'], 'alias')\n self._check_help_output(['rbt', '-h', 'alias'], 'alias')\n\n def test_help_options_after(self):\n \"\"\"Testing RBT commands when running 'rbt <command> --help' or 'rbt\n <command> -h'\n \"\"\"\n self._check_help_output(['rbt', 'alias', '--help'], 'alias')\n self._check_help_output(['rbt', 'alias', '-h'], 'alias')\n\n def test_help_invalid_command(self):\n \"\"\"Testing RBT commands when running '--help' or '-h' with an\n invalid command\n \"\"\"\n self._check_help_output(['rbt', 'invalid', '--help'],\n 'invalid', invalid=True)\n self._check_help_output(['rbt', 'invalid', '-h'], 'invalid',\n invalid=True)\n self._check_help_output(['rbt', 'help', 'invalid'], 'invalid',\n invalid=True)\n\n def test_help_multiple_args(self):\n \"\"\"Testing RBT commands when running the help command or help\n options with multiple arguments present\n \"\"\"\n self._check_help_output(['rbt', 'alias', 'extra_arg', '--help'],\n 'alias')\n self._check_help_output(['rbt', 'alias', 'extra_arg', '-h'], 'alias')\n self._check_help_output(['rbt', 'alias', '--help', 'extra_arg'],\n 'alias')\n self._check_help_output(['rbt', 'alias', '-h', 'extra_arg'], 'alias')\n self._check_help_output(['rbt', '--help', 'alias', 'extra_arg'],\n 'alias')\n self._check_help_output(['rbt', '-h', 'alias', 'extra_arg'], 'alias')\n self._check_help_output(['rbt', 'help', 'alias', 'extra_arg'], 'alias')\n\n def _check_help_output(self, command, subcommand, invalid=False):\n \"\"\"Check if a specific rbt command's output exists in test output.\n\n Args:\n command (list of unicode):\n The rbt command used for testing.\n\n subcommand (unicode)\n The unicode string of the rbt command type.\n\n invalid (bool, optional):\n If ``True``, check if output matches what is expected after\n running an invalid command. Otherwise, check if output\n matches what is expected after running a valid rbt command.\n \"\"\"\n try:\n output = execute(command)\n except Exception as e:\n self.fail(e)\n\n if invalid:\n self.assertIn('No help found for %s' % subcommand, output)\n else:\n self.assertIn('usage: rbt %s [options]' % subcommand, output)\n","sub_path":"rbtools/commands/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316424620","text":"import numpy as np\nimport time\nfrom modules import timer\n\n# список всех веществ\nsubstances = [\n 'H2','Li','Na','K','Cu','Rb','Ag','Cs','Au','Fr','Ca','Zn','Sr','Cd','Ba','Hg','Ra','Cn','Cl2','O2','S','C','N','P','Al',\n 'HCl','H2SO3','H2SO4','H2CO3',\n 'Cu(OH)2','LiOH','NaOH',\n 'CuO', 'SO3','SO2','CO2','LiO2','Na2O',\n 'NaI', 'K2SO4', 'K2CO3', 'Ba(NO3)2',\n]\n\n# список всех реакций\nreactions = [\n # 0 lvl\n [['Li'], ['water'], 'LiOH', '2Li + 2H2O -> 2LiOH + H2', 'Мне нужен гидроксид лития. Я его буду использовать для моего нового щелочного аккумулятора', 'Гидроксид лития можно получить в окислительно-восстановительной реакции. Для этого нужна вода', ['Li', 'OH']],\n [['SO3'], ['water'], 'H2SO4', 'SO3 + H2O -> H2SO4', 'Мне нужна серная кислота. Я буду ее использовать в минеральных удобрениях для моего сада', 'Серную кислоту можно получить, смешав оксид с водой', ['SO4']],\n [['CaCO3'], ['gorelka'], 'CaO', 'CaCO3 -> CaO + CO2', 'Мне нужен оксид кальция. Я его буду использовать для нейтрализации избыточной кислоты', 'Оксид кальция можно получить в реакции разложения при увеличении температуры', ['O', 'Ca']],\n [['SO2'], ['water'], 'H2SO3', 'SO2 + H2O -> H2SO3', 'Мне нужна сернистая кислота. С помощью нее я смогу законсервировать овощи из моего сада и отбелить шелковую рубашку', 'Сернистую кислоту можно получить, смешав оксид с водой', ['SO3']],\n [['H2', 'Cl2'], [''], 'HCl', 'H2 + Cl2 -> 2HCl', 'Мне нужна соляная кислота. Она мне поможет при дезинфекции помещения', 'Соляную кислоту можно получить в окислительно-восстановительной реакции. Необходимо два просты�� вещества', ['H', 'Cl']],\n [['CaO'], ['water'], 'Ca(OH)2', 'CaO + H2O -> Ca(OH)2', 'Мне нужен гидроксид кальция. Я хочу с помощью него побелить забор и деревья', 'Гидроксид кальция можно получить в реакции соединения с водой', ['Ca', 'O']],\n [['NaOH', 'HCl'], [''], 'NaCl', 'NaOH + HCl -> NaCl + Н2О', 'Мне нужна поваренная соль, дома у меня она закончилась, а в магазине пусто', 'Поваренная соль - хлорид натрия, ее можно получить в реакции обмена', ['Na', 'Cl']],\n [['NaBr', 'H2'], [''], 'NaBr', '2HBr + 2Na -> 2NaBr + H2', 'Мне нужен бромид натрия для устранения бессонницы и раздражительности', 'Бромид натрия можно получить в окислительно-восстановительной реакции с кислотой', ['Na', 'Br']],\n # [[], ['gorelka'], '', '', 'Мне нужен ', '', []],\n # [[], ['gorelka'], '', '', 'Мне нужен ', '', []],\n # [[], [''], '', '', 'Мне нужен ', '', []],\n # [[], [''], '', '', 'Мне нужен ', '', []],\n # [[], [''], '', '', 'Мне нужен ', '', []],\n # [[], [''], '', '', 'Мне нужен ', '', []],\n # [[], [''], '', '', 'Мне нужен ', '', []],\n\n # 1 lvl\n [['NaI', 'H2SO4'], [''], 'I2', 'NaI + H2SO4 -> H2S + H2O + I2 + Na2SO4', 'Мне нужен йод для раствора, дезинфецирующего раны', 'Йод можно получить в окислительно-восстановительной реакции с использованием кислоты', ['I', 'SO3', 'SO4']],\n [['K2CO3', 'H2SO4'], [''], 'K2SO4', 'H2SO4 + K2CO3 -> K2SO4 + K2SO4 + H2O + CO2', 'Мне нужен сульфат калия для удобрений', 'Сульфат калия можно получить в кислотно-щелочной реакции с использованием кислоты', ['K', 'SO4']],\n [['Ba(NO3)2', 'H2SO4'], [''], 'BaSO4', 'Ba(NO3)2 + H2SO4 -> BaSO4 + 2HNO3', 'Мне нужен сульфат бария для рентгенологических исследований пищевода', 'Сульфат бария можно получить в реакции обмена с использованием кислоты', ['Ba', 'SO4']],\n [['NaCl', 'AgNO3'], ['water'], 'NaNO3', 'NaCl + AgNO3 -> NaNO3 + AgCl', 'Мне нужна натриевая селитра для моих удобрений', 'Нитрат натрия можно получить в реакции обмена с поваренной солью в растворе', ['Ag', 'Cl', 'Na']],\n [['NaCl', 'AgNO3'], ['water'], 'AgCl', 'NaCl + AgNO3 -> NaNO3 + AgCl', 'Мне нужен хлорид серебра. Я его использую в моем эксперементальном радаре', 'Хлорид серебра можно получить в реакции обмена с поваренной солью в растворе', ['Ag', 'Cl', 'Na']],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n\n # 2 lvl\n [['NaI', 'MnO2', 'H2SO4'], [''], 'NaHSO4', '2NaI + MnO2 + 3H2SO4 -> 2NaHSO4 + MnSO4 + I2 + 2H2O', 'Мне нужен гидросульфат натрия для понижения pH в моем бассейне', 'Гидросульфат натрия можно получить в окислительно-восстановительной реакции с тремя реагентами', ['Na', 'H', 'SO4']],\n [['H2S', 'Cl2'], ['water'], 'H2SO4 и HCl', 'H2S + 4Cl2 + 4H2O -> H2SO4 + 8HCl', 'Мне нужны серная и соляная кислоты для моих минеральных удобрений и дезинфекции', 'Нужные кисслоты можно получить в окислительно-восстановительной реакции с газами и водой', ['H', 'S', 'Cl']],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n # [[], [''], '', '', 'Мне нужен йод для раствора, дезинфецирующего раны. Его можно получить в окислительно-восстановительной реакции', '', []],\n]\n\n# к каждой реакции добавляем укороченный список элементов, чтобы был возможен только однозначный ответ\nfor reaction in reactions:\n substances_special = []\n for i in range(len(substances)):\n metric = True\n for el in reaction[6]:\n if el in substances[i]:\n metric = False\n if metric:\n substances_special.append(substances[i])\n reaction.append(substances_special)\n\n# количество реакций, из которого можно выбирать реакцию для игрока с определенным рангом\nlvl_length = {\n 0: 8,\n 1: 13,\n 2: 15,\n}\n\n# шаблоны для ответа на неправильные реакции\nincorrect = {'reaction': 'incorrect', 'substance': 'Попробуйте еще раз, у Вас все получится)', 'money': 0}\nincorrect_multiplayer = {'reaction': 'incorrect', 'substance': 'Попробуйте еще раз, у Вас все получится)', 'store': 0}\n\nclass reaction_processing():\n def play(self, data, type): # выбираем реакцию для пользователя\n reactions_copy = reactions.copy()\n index_main = np.random.randint(0, lvl_length[data['rang']])\n substances_copy = reactions_copy[index_main][7].copy()\n list = reactions_copy[index_main][0].copy() # берем те вещества, которые точно должны быть\n for i in range(10 - len(list)): # добираем до 10 веществ произвольными веществами\n index = np.random.randint(0, len(substances_copy))\n while substances_copy[index] in reactions_copy[index_main][0]:\n index = np.random.randint(0, len(substances_copy))\n list.append(substances_copy[index])\n del substances_copy[index]\n list_to_send = []\n np.random.shuffle(list) # перемешиваем массив\n while len(list) != 0: # и еще раз перемешиваем массив другим способом\n index = np.random.randint(0, len(list))\n list_to_send.append(list[index])\n del list[index]\n res = {'substances': list_to_send, 'question': reactions_copy[index_main][4], 'index': index_main}\n if type == 'play_alone': # если игра одиночная, добавляем подсказку\n res['hint'] = reactions_copy[index_main][5]\n return res\n\n def play_alone_answer(self, data): # проверяем ответ в одиночной игре\n data['list_of_substances'] = data['list_of_substances'][1:-1].split(', ')\n data['list_of_objects'] = data['list_of_objects'][1:-1].split(', ')\n if len(data['list_of_substances']) != len(reactions[data['index']][0]) or len(data['list_of_objects']) != len(reactions[data['index']][1]):\n return incorrect\n for i in data['list_of_substances']:\n if i not in reactions[data['index']][0]:\n return incorrect\n for i in data['list_of_objects']:\n if i not in reactions[data['index']][1]:\n return incorrect\n sum = np.random.randint(25, 31)\n return {'reaction': reactions[data['index']][2], 'substance': reactions[data['index']][3], 'money': sum}\n\n def multi_play_answer(self, data): # проверяем ответ в мультиплеере\n data['list_of_substances'] = data['list_of_substances'][1:-1].split(', ')\n data['list_of_objects'] = data['list_of_objects'][1:-1].split(', ')\n if 'water' in data['list_of_objects']: # если выбрана вода, замедляем реакцию в зависимости от уровня прокачки воды\n time.sleep(data['water_vlv'])\n if 'gorelka' in data['list_of_objects']: # если выбрана горелка, замедляем реакцию в зависимости от уровня прокачки горелки\n time.sleep(data['gorelka_vlv'])\n if len(data['list_of_substances']) != len(reactions[data['index']][0]) or len(data['list_of_objects']) != len(reactions[data['index']][1]):\n return incorrect_multiplayer\n for i in data['list_of_substances']:\n if i not in reactions[data['index']][0]:\n return incorrect_multiplayer\n for i in data['list_of_objects']:\n if i not in reactions[data['index']][1]:\n return incorrect_multiplayer\n store = np.random.randint(50, 61)\n return {'reaction': reactions[data['index']][2], 'substance': reactions[data['index']][3], 'store': store}\n\nrep = reaction_processing()","sub_path":"reaction_processing.py","file_name":"reaction_processing.py","file_ext":"py","file_size_in_byte":13237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243905220","text":"import numpy as np\r\nimport config as p\r\n\r\nclass Memory():\r\n \r\n def __init__(self, capacity):\r\n \r\n super(Memory, self).__init__()\r\n \r\n self.ref_size = p.REF_IMG_RESO\r\n self.box_num = p.BOX_NUM\r\n self.capacity = capacity\r\n self.max_step=p.MAX_STEP\r\n \r\n self.s_mem=np.zeros((self.capacity, 1, self.ref_size, self.ref_size))\r\n self.obj_mem=np.zeros((self.capacity, self.box_num, 6))\r\n self.step_mem=np.zeros((self.capacity, self.max_step))\r\n \r\n self.a_mem=np.zeros((self.capacity, 1))\r\n self.r_mem=np.zeros((self.capacity, 1))\r\n \r\n self._s_mem=np.zeros((self.capacity, 1, self.ref_size, self.ref_size))\r\n self._obj_mem=np.zeros((self.capacity, self.box_num, 6))\r\n self._step_mem=np.zeros((self.capacity, self.max_step))\r\n \r\n self.memory_counter=0\r\n \r\n \r\n def store(self, s, obj, step, a, r, s_, obj_, step_):\r\n\r\n index = self.memory_counter % self.capacity \r\n \r\n self.s_mem[index, :] = s\r\n self.obj_mem[index,:]= obj\r\n self.step_mem[index, :]=step\r\n \r\n self.a_mem[index, :] = a\r\n self.r_mem[index, :] = r\r\n \r\n self._s_mem[index, :]= s_\r\n self._obj_mem[index,:]= obj_\r\n self._step_mem[index, :]=step_\r\n\r\n self.memory_counter += 1\r\n \r\n \r\n def clear(self):\r\n self.memory_counter=0\r\n \r\n \r\n def sample(self,num):\r\n \r\n if self.memory_counter<self.capacity:\r\n indices = np.random.choice(self.memory_counter, size=num)\r\n else:\r\n indices = np.random.choice(self.capacity, size=num)\r\n \r\n bs = self.s_mem[indices, :]\r\n bobj = self.obj_mem[indices, :]\r\n bstep = self.step_mem[indices, :]\r\n\r\n ba = self.a_mem[indices, :]\r\n br = self.r_mem[indices, :]\r\n \r\n bs_ = self._s_mem[indices, :]\r\n bobj_ = self._obj_mem[indices, :]\r\n bstep_ = self._step_mem[indices, :]\r\n \r\n return bs, bobj, bstep, ba, br, bs_, bobj_, bstep_\r\n \r\n \r\n\r\n \r\n \r\n ","sub_path":"Prim-Agent/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170222004","text":"teller5_3 = 0\nwith open('kaartnummers.txt') as k:\n for gegevens in k:\n teller5_3 += 1\n print('Deze file telt ' + str(teller5_3) + ' regels')\n\ninfile = open('kaartnummers.txt', 'r')\nlst = []\nfor lineList in infile.readlines():\n item = lineList.split(',')\n kaartnr = item[0]\n lst.append(kaartnr)\ngrootsteKaartnr = max(lst)\nregelKaartnr = lst.index(grootsteKaartnr) + 1\nprint('Het grootste kaartnummer is: ' + str(grootsteKaartnr) + ' en dat staat op regel ' + str(regelKaartnr))","sub_path":"pe7_3.py","file_name":"pe7_3.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20395796","text":"import grpc\nimport os\nimport sys\nimport json\n\nsys.path.append(\"/cache/proto/\")\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import metrics\n\nimport inference_pb2\nimport inference_pb2_grpc\n\nfrom data_generator import make_dataloader\nfrom persia.prelude import PyPersiaBatchData\n\n\ndef get_inference_stub():\n channel = grpc.insecure_channel(\"localhost:7070\")\n stub = inference_pb2_grpc.InferenceAPIsServiceStub(channel)\n return stub\n\n\ndef infer(stub, model_name, model_input):\n input_data = {\"batch\": model_input}\n response = stub.Predictions(\n inference_pb2.PredictionsRequest(model_name=model_name, input=input_data)\n )\n try:\n prediction = response.prediction.decode(\"utf-8\")\n prediction = prediction.splitlines()\n prediction = [x.strip() for x in prediction]\n prediction = [x.replace(\",\", \"\") for x in prediction]\n prediction = prediction[1:-1]\n prediction = [float(x) for x in prediction]\n return prediction\n except:\n exit(1)\n\n\nif __name__ == \"__main__\":\n\n test_filepath = os.path.join(\"/data/\", \"test.npz\")\n _, loader = make_dataloader(test_filepath, batch_size=1024)\n all_pred = []\n all_target = []\n\n for (dense, batch_sparse_ids, target) in tqdm(loader, desc=\"gen batch data...\"):\n batch_data = PyPersiaBatchData()\n batch_data.add_dense([dense])\n batch_data.add_sparse(batch_sparse_ids, False)\n\n model_input = batch_data.to_bytes()\n prediction = infer(get_inference_stub(), \"adult_income\", model_input)\n\n assert len(prediction) == len(\n target\n ), f\"miss results {len(prediction)} vs {len(target)}\"\n\n all_target.append(target)\n all_pred.append(prediction)\n\n all_pred, all_target = np.concatenate(all_pred), np.concatenate(all_target)\n\n fpr, tpr, th = metrics.roc_curve(all_target, all_pred)\n infer_auc = metrics.auc(fpr, tpr)\n\n print(f\"infer_auc = {infer_auc}\")\n\n result_filepath = os.environ[\"RESULT_FILE_PATH\"]\n with open(result_filepath, \"r\") as f:\n result = f.read()\n result = json.loads(result)\n\n eval_auc = result[\"eval_auc\"]\n auc_diff = abs(eval_auc - infer_auc)\n assert (\n auc_diff < 1e-6\n ), f\"infer error, expect auc diff < 1e-6 but got {auc_diff}\"\n","sub_path":"e2e/adult_income/serve_client.py","file_name":"serve_client.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351822323","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : maxpayyne\nDate : 2020-03-19\nPurpose: Maxpayyne is Programming\n\"\"\"\n\nimport argparse\nimport os\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Argparse: Getting command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Transcribing DNA into RNA\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument(\n \"File\",\n help=\"Input file(s)\",\n metavar=\"FILE\",\n nargs=\"+\",\n type=argparse.FileType(\"r\"),\n )\n parser.add_argument(\n \"-o\",\n \"--outdir\",\n help=\"Output directory\",\n type=str, # argparse.FileType('wt'),\n metavar=\"DIR\",\n default=\"out\",\n )\n\n args = parser.parse_args()\n\n if not os.path.isdir(args.outdir):\n os.makedirs(args.outdir)\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"The core of the Program\"\"\"\n\n args = get_args()\n out_dir = args.outdir\n\n num_files, tot_lin = 0, 0\n for f_h in args.File:\n num_files += 1\n out_file = os.path.join(out_dir, os.path.basename(f_h.name))\n out_f_h = open(out_file, \"wt\")\n lines, num_lines = [], 0\n for line in f_h:\n num_lines += 1\n lines.append(line.strip().replace(\"T\", \"U\"))\n tot_lin += num_lines\n line_s = \"\\n\".join(lines) # or print(line, file=out_f_h)\n out_f_h.write(line_s)\n out_f_h.close()\n sequence = \"sequence\" if num_lines == 1 else \"sequences\"\n files = \"file\" if num_files == 1 else \"files\"\n\n print(\n f'Done, wrote {tot_lin} {sequence} in {num_files} {files} to directory \"{args.outdir}\".'\n )\n\n\n# --------------------------------------------------\nif __name__ == \"__main__\":\n main()\n","sub_path":"assignments/07_rna/transcribe.py","file_name":"transcribe.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330313196","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.core.mail import EmailMessage\nfrom content.models import Update, Page, Menu\nfrom content import forms\n\ndef home(request):\n\tupdates = Update.objects.all().order_by('-post_date')[:4]\n\tctx = {'latest_big_news': updates,\n\t\t\t'social': True}\n\treturn basicPage('home.htm', ctx)\n\ndef page(request, slug):\n\tp = get_object_or_404(Page, slug=slug)\n\tctx = {'page': p}\n#\tif ctx['page'].get('title') == \"Talk The Talk\":\n#\t\treturn HttpResponse(\"hi\")\n\treturn basicPage('page.htm', ctx)\n\ndef basicPage(template, ctx):\n\tmenuPage(ctx)\n\treturn render_to_response(template, ctx)\n\t\ndef menuPage(ctx):\n\tmenuItems = Menu.objects.all().order_by('weight')\n\tmenu = [[x, [y for y in menuItems if y.parent == x]] for x in menuItems if not x.parent]\n\tctx['menu'] = menu\n\t\ndef contact(request):\n\tp = Page.objects.get(slug='contact')\n\tc = RequestContext(request)\n\tc['page'] = p\n\t\n\tif request.method == \"POST\":\t# form submitted\n\t\tcform = forms.ContactForm(request.POST)\n\t\tif cform.is_valid():\n\t\t\trecips = ['uniphil@gmail.com', 'alexcguest@gmail.com']\n\t\t\tif request.POST.get('confirm', False):\n\t\t\t\trecips += [request.POST['email']]\n\t\t\t\t\n\t\t\te = EmailMessage(\n\t\t\t\tsubject='[CSC Contact Form] ' + request.POST['subject'],\n\t\t\t\tbody=\"Message from \" + request.POST['email'] + \":\\n\" \\\n\t\t\t\t\t+ request.POST['message'],\n\t\t\t\tfrom_email=\"CSC Website Contact <info@childsoldiercycle.ca>\",\n\t\t\t\theaders = {'Reply-To': request.POST['email']},\n\t\t\t\tto=recips,\n\t\t\t)\n\t\t\te.send()\n\t\t\tc['thanks'] = True\n\t\telse:\t# invalid form\n\t\t\tc['tform'] = cform\n\telse:\t# first load\n\t\tc['tform'] = forms.ContactForm()\n\t\n\t\n\tmenuPage(c)\n\treturn render_to_response('contact.htm', c)\n\t\n\t\n\t\n\t\n\t\ndef ttt(request):\n\tp = Page.objects.get(slug='talk-the-talk')\n\tc = RequestContext(request)\n\tc['page'] = p\n\tif request.GET.get('success', False):\n\t\te = EmailMessage(\n\t\t\tsubject='[Child Soldier Cycle] Thanks for your click!',\n\t\t\tbody=\"\"\"\n\n<p><strong>Hello \"\"\" + request.session['name'] + \"\"\"!</strong></p>\n\n<p>Thanks for your interest in our Talk the Talk campaign! We'll soon be starting to accept video endorsements of <a href=\"http://new.childsoldiercycle.ca/talk-the-talk\">our message</a> to the Canadian government to join the international effort to disarm the LRA, and stop Joseph Kony. And we'll let you know right when that's ready!</p>\n\n<p>If you would like to know all there is to know about us, or do some of your own research on the LRA conflict, we have great resources here:</p>\n\n<p><strong><a href=\"https://docs.google.com/document/d/19brkLyLKg5RGtRQxEpyoaob-Gyhnz8_i23V6uMvg1Xw/edit\">Child Soldier Cycle Information Package</a></strong></p>\n\n<p>If you would like to talk to us about ANYTHING else, including how you can get more involved, hit \"reply\" to this email (<a href=\"mailto:info@childsoldiercycle.ca\">info@childsoldiercycle.ca</a>) and let us know!</p>\n\n<p><em>Sincerely,</em></p>\n\n<p>The Child Soldier Cycle Team</p>\n\n<p class=\"font-size:0.8em;color:#999\">To unsubscribe at any time, just <a href=\"mailto:info@childsoldiercycle.ca\">reply</a> with \"unsubscribe\" as the subject.</p>\n\n<p>P.S. Check us out on <a href=\"http://www.facebook.com/ChildSoldierCycle\">Facebook</a>, <a href=\"https://twitter.com/#!/CSCTalkTheTalk\">Twitter</a>, and <a href=\"https://plus.google.com/u/0/110082044185831966517/posts\">Google+</a>!</p>\n\n\t\t\t\"\"\",\n\t\t\tfrom_email='Child Soldier Cycle <info@childsoldiercycle.ca>',\n\t\t\tto=[request.session['email']],\n\t\t\tbcc=['info@childsoldiercycle.ca'],\n\t\t)\n\t\te.content_subtype = \"html\"\n\t\te.send()\n\t\tc['thanks'] = True\n\telse:\n\t\tif request.method == \"POST\":\n\t\t\tpform = forms.TalkerForm(request.POST)\n\t\t\tif pform.is_valid():\n\t\t\t\tpform.save()\n\t\t\t\trequest.session['email'] = request.POST['email']\n\t\t\t\trequest.session['name'] = request.POST['name']\n\t\t\t\treturn redirect(\"/talk-the-talk?success=true\")\n\t\t\telse:\n\t\t\t\tc['tform'] = pform\n\t\telse:\n\t\t\tc['tform'] = forms.TalkerForm()\n\t\n\tmenuPage(c)\n\treturn render_to_response(\"talker.htm\", c)\n","sub_path":"csc2/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65304311","text":"import openFile\nimport networkx as nx\n\ndef insertNodes():\n colnames = ['latitude', 'longtitude', 'searchval', 'blk_no', 'road_name', 'buidling', 'address']\n\n punggol = nx.Graph()\n data = openFile.openfile()\n Nodes = openFile.getNodes(data)\n latitude = openFile.getLatitude(data)\n longtitude = openFile.getlongtitude(data)\n blk_no=openFile.getblk_no(data)\n road_name = openFile.getroad_name(data)\n address = openFile.getroad_name(data)\n\n openFile.serachLatitude(\"latitude\", \"1.397946168\")\n\n for i in range(1,len(Nodes)):\n punggol.add_node(Nodes[i],\n latitude=latitude[i],\n longtitude=longtitude[i],\n blk_no=blk_no[i],\n road_name=road_name[i],\n address=address[i])\n\n return punggol\n\n\n\n","sub_path":"1008project/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52216400","text":"class Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n if not matrix:\n return 0\n row = len(matrix)\n col = len(matrix[0])\n DP = [[0]*col for _ in range(row)]\n maxSide = 0\n for i in range(row):\n for j in range(col): \n if matrix[i][j] =='1':\n if i==0 or j==0:\n #DP[i][j] =matrix[i][j]\n DP[i][j] = 1\n #print(\"matrix value:\",matrix[i][j])\n #print(\"DP value:\",DP[i][j])\n else:\n DP[i][j] = min(DP[i-1][j],DP[i-1][j-1],DP[i][j-1])+1\n\n maxSide = max(DP[i][j],maxSide)\n maxArea = maxSide*maxSide\n return maxArea","sub_path":"Week_04/homework/maximalSquare.py","file_name":"maximalSquare.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433764712","text":"import ssl, json, datetime\r\nimport pandas as pd\r\nimport urllib.request\r\nfrom pandas import DataFrame\r\n\r\nprint('집객시설 주소 찾기')\r\nfilename = 'jibhak_long_lat.csv' \r\n\r\ndf_raw=pd.read_csv(filename,encoding='cp949',index_col=0)\r\nprint(df_raw.head())\r\n\r\nprint(len(df_raw['longitude']))\r\n\r\ndef get_request_url(url):\r\n client_id = \"\"\r\n client_secret = \"\"\r\n \r\n req=urllib.request.Request(url)\r\n req.add_header('X-NCP-APIGW-API-KEY-ID', client_id)\r\n req.add_header('X-NCP-APIGW-API-KEY', client_secret)\r\n \r\n try:\r\n context=ssl._create_unverified_context()\r\n response=urllib.request.urlopen(req,context=context)\r\n if response.getcode()==200:\r\n# print('[%s] url request success' % datetime.datetime.now())\r\n return response.read().decode('utf-8')\r\n \r\n except Exception as err:\r\n print(err)\r\n print('[%s] error for url : %s' % (datetime.datetime.now(), url))\r\n pass\r\n \r\ndef getGeoData( address1, address2 ):\r\n url = 'https://naveropenapi.apigw.ntruss.com/map-reversegeocode/v2/gc'\r\n aaa = urllib.parse.quote(address1)\r\n bbb = urllib.parse.quote(address2)\r\n url += '?request=coordsToaddr&coords=%s' % ( aaa + ',' + bbb )\r\n url += '&sourcecrs=epsg:4326&output=json&orders=admcode'\r\n \r\n result = get_request_url( url )\r\n \r\n if ( result == None ):\r\n print(address1+','+address2)\r\n return None \r\n else :\r\n return json.loads( result ) # dict로 반환\r\n \r\nmylist=[]\r\nfor idx in range(len(df_raw['longitude'])):\r\n lat=df_raw.iloc[idx]['latitude']\r\n long=df_raw.iloc[idx]['longitude']\r\n address2=str(lat)\r\n address1=str(long)\r\n jsonResult=getGeoData(address1, address2)\r\n# print(address2,address1)\r\n# print(jsonResult)\r\n \r\n jsonlist=jsonResult['results'][0]['region']\r\n sido=jsonlist['area1']['name']\r\n gu=jsonlist['area2']['name']\r\n dong=jsonlist['area3']['name']\r\n# print(sido,gu,dong)\r\n \r\n \r\n mylist.append((address2,address1,sido,gu,dong))\r\n\r\nmycolumn=['latitude','longitude','sido','gu','dong']\r\n\r\nmyframe=DataFrame(mylist,columns=mycolumn)\r\n\r\nfilename='jibgak_address.csv'\r\n\r\nmyframe.to_csv(filename,encoding='cp949')\r\nprint(filename+'저장완료')","sub_path":"jibgak_find_address.py","file_name":"jibgak_find_address.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138124591","text":"import argparse\nimport random\nfrom pathlib import Path\n\n\ndef read_dimacs(cnf_file, compressed: bool):\n if compressed:\n import bz2\n with bz2.open(cnf_file) as fin:\n data = fin.read().decode()\n lines = data.splitlines(keepends=True)\n for idx, line in enumerate(lines):\n tokens = line.split()\n if tokens[0] == 'c':\n continue\n if tokens[0] == 'p':\n nv, nc = int(tokens[-2]), int(tokens[-1])\n lines.pop(idx)\n return nv, nc, lines\n else:\n with open(cnf_file) as fin:\n lines = fin.readlines()\n for idx, line in enumerate(lines):\n tokens = line.split()\n if tokens[0] == 'c':\n continue\n if tokens[0] == 'p':\n nv, nc = int(tokens[-2]), int(tokens[-1])\n lines.pop(idx)\n return nv, nc, lines\n raise Exception('invalid CNF file')\n\n\ndef main():\n Path.mkdir(Path(args.output_folder), exist_ok=True)\n nvs, ncs = [], []\n\n if args.strategy == 'random':\n for fin in Path(args.input_folder).rglob('*.cnf*'):\n compressed = 'bz2' in fin.suffix\n nv, nc, lines = read_dimacs(fin, compressed)\n nvs.append(nv)\n ncs.append(nc)\n assign = []\n add_stat = max(0, nv - args.size)\n\n assign.append('p cnf ' + str(nv) + ' ' + str(nc + add_stat) + '\\n')\n vars = random.choices(range(1, nv + 1), k=add_stat)\n for var in vars:\n assign.append(('-' if random.random() < 0.5 else '') + str(var) + ' 0' + '\\n')\n assign += lines\n fout = Path(args.output_folder) / (fin.parent.name + '_' + fin.name.split('.')[0] + '.cnf')\n with open(fout, 'w') as new_file:\n new_file.writelines(assign)\n\n print('---Original statistics---')\n print('Vars: [{}, {}], mean = {}'.format(min(nvs), max(nvs), sum(nvs) / len(nvs)))\n print('Clauses: [{}, {}], mean = {}'.format(min(ncs), max(ncs), sum(ncs) / len(ncs)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--strategy', type=str, required=True, help='random')\n parser.add_argument('-I', '--input_folder', type=str, required=True)\n parser.add_argument('-O', '--output_folder', type=str, required=True)\n parser.add_argument('-N', '--size', type=int, default=1000)\n args = parser.parse_args()\n\n main()\n","sub_path":"python/simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139503244","text":"import json\nfrom praw.models import Comment\nfrom webbrowser import open as webopen\nimport urllib.request\nimport time\nimport CGCommons\n\ndef find_winner_thread(meta, winner_no):\n # Get the thread link of the winner's comment\n for thread in meta['threads']:\n winner_no -= thread['trunc_length'] if meta['WinnerFromFile'] == \"Truncated\" else thread['length']\n if winner_no <= 0:\n return thread['link']\n\n\ndef get_winner_name(reddit=None, url=None, cid=None):\n # Get the winner's name\n return Comment(reddit, id=cid).author.name\n\n\ndef api_json(link):\n # Request data from API and load as a json-dict\n with urllib.request.urlopen(link) as url:\n try:\n return json.loads(url.read().decode())\n except ValueError:\n return None\n\n\ndef get_win_hash(meta=None):\n # Wait till 30 seconds after draw time first\n wait_time = meta['DrawTime'] - time.time() + 30 if (meta['DrawTime'] - time.time()) + 30 > 0 else 0\n print(\"Waiting {:.2f} seconds till draw!\".format(wait_time))\n time.sleep(wait_time)\n\n # Request the blocks of the entire day\n resp = api_json('https://blockchain.info/blocks/{}000?format=json'.format(meta['DrawTime'] - 1))\n block0 = 0\n # Iterate through each block, and save the height of the last block mined BEFORE the draw time\n for block in resp['blocks']:\n if block['time'] < meta['DrawTime']:\n block0 = block['height']\n\n # Calculate the winning block height/number\n win_block = block0 + meta['WaitTillBlock']\n print(\"Winning Block Number: {}\".format(win_block))\n\n # Keep requesting for the winner block's data till a valid reponse is received (i.e. it is mined)\n # while not (block := api_json('https://blockchain.info/block-height/{}?format=json'.format(str(win_block)))):\n block = api_json('https://blockchain.info/block-height/{}?format=json'.format(str(win_block)))\n while not block:\n height = api_json('https://blockchain.info/latestblock')['height']\n # print(\"Awaiting Block {}.... Current Block: {}\".format(win_block, height := api_json('https://blockchain.info/latestblock')['height']))\n print(\"Awaiting Block {}.... Current Block: {}\".format(win_block, height))\n if win_block - height <= 1:\n time.sleep(10)\n elif win_block - height == 2: # Sometimes 2 are mined near simultaneously\n time.sleep(30)\n else:\n time.sleep(90)\n block = api_json('https://blockchain.info/block-height/{}?format=json'.format(str(win_block)))\n\n # Return the winning block's hash and print the time it was mined, as a double-check\n print(\"\\nBlock Found!\\nBlock Time: {} UTC\\n\".format(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(block['blocks'][0]['time']))))\n return block['blocks'][0]['hash']\n\n\ndef main():\n with open('meta.json', 'r') as f:\n meta = json.load(f)\n\n file_name = meta['CID_Filename']\n\n if meta['WinnerFromFile'] == \"Truncated\": # Defaults to original file\n file_name = file_name.rstrip('.txt') + '_Truncated.txt'\n\n print(\"Drawing winner from {}\\nAbort if this is not correct!\\n\".format(file_name))\n\n with open(file_name, 'r') as f:\n comment_ids = [line.strip() for line in f]\n\n # Set winning hash, obtains from API if it is not provided\n win_hash = get_win_hash(meta) if meta['Win_Hash'] == '' else meta['Win_Hash']\n meta['Win_Hash'] = win_hash\n\n # Get winner and his/her details\n total = (len(comment_ids))\n winner_no = (1 + (int(win_hash, 16) % total))\n winner_id = comment_ids[winner_no - 1]\n winner_link = ''.join((find_winner_thread(meta, winner_no), winner_id))\n winner = get_winner_name(reddit=CGCommons.init_reddit(), cid=winner_id)\n\n # Print winner details\n print(\"Using {} comment list!\\n\".format(meta['WinnerFromFile']))\n print(\"Total Participants: {}\\nWinner: {}\\nHash: {}\\n\".format(total, winner_no, win_hash))\n print(\"Winner Comment ID: {}\".format(winner_id))\n print(\"Winning Comment URL: {}\".format(winner_link))\n print(\"Winner: {}\".format(winner))\n\n # Save winner details to meta\n meta['Total Participants'] = total\n meta['Winner_Number'] = winner_no\n meta['Winner_ID'] = winner_id\n meta['Winner_Link'] = winner_link\n meta['Winner'] = winner\n with open('meta.json', 'w') as outfile:\n json.dump(meta, outfile, indent=4)\n\n # Opens link to comment if user chooses so\n x = input(\"\\nEnter Y/y to open winning comment...\")\n if x.upper() == \"Y\":\n webopen(winner_link)\n x = input(\"Draw complete! Press Enter to exit...\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2020 Feb~Oct Draws/getWinner.py","file_name":"getWinner.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76166531","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 13:57:31 2018\n\n@author: aryman\n\"\"\"\nfrom tkinter import *\nimport sqlite3\nimport pyttsx3\n\n#connect with DB\nconn = sqlite3.connect('appointment.kexi')\nc = conn.cursor()\n\n#empty list\nnumber = []\npatients = []\n\nsql = \"SELECT * FROM donar\"\nres = c.execute(sql)\nfor r in res:\n ids = r[0]\n name = r[1]\n number.append(ids)\n patients.append(name)\n \n\n#window\nclass Application:\n def __init__(self, master):\n self.master = master\n \n self.x = 0\n \n self.heading = Label(master, text=\"Appointment\", font=('arial 40 bold'))\n self.heading.place(x=250, y=2)\n \n #button to chnage patients\n self.change = Button(master, text=\"Next Patient\", width=25, bg='steelblue', command=self.func)\n self.change.place(x=600, y=600)\n \n #empty \n self.n = Label(master, text=\"\", font=('arial 150 bold'))\n self.n.place(x=500, y=200)\n \n #patient\n self.pname = Label(master, text=\"\", font=('arial 150 bold'))\n self.pname.place(x=200, y=480)\n \n #fuction to speak\n def func(self):\n self.n.config(text=str(number[self.x]))\n self.pname.config(text=str(patients[self.x]))\n engine = pyttsx3.init()\n voices = engine.getProperty('voice')\n rate = engine.getProperty('rate')\n engine.setProperty('rate', rate=50)\n engine.say('Patient Number ' + str(number[self.x]) + str(patients[self.x]))\n engine.runAndWait()\n self.x += 1\n#creating the object\nroot = Tk()\n\n#title of App\nroot.title(\"Appointment\")\nb = Application(root)\n\n#resolution of the window\nroot.geometry('1200x720+0+0')\n\n#preventing he resize feature\nroot.resizable(True, True)\n\n#end the loop\nroot.mainloop()\n ","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472503690","text":"import os\nimport glob\n\nfrom experiments.utils import pickle_load, pickle_dump, get_IBM_backend, separate_multi_counts, jsd\n\n\nfrom pprint import pprint\n\ndef result_sched(dir_path, backend_name, save_path=None): \n # get path to this file and parent dir\n job_files = glob.glob(dir_path+'/*.pickle')\n # job_files = job_dir\n\n\n # open job files\n job_id_set = []\n bench_name_list = []\n for job_file in job_files:\n job_data = pickle_load(job_file)\n job_id_set.append(job_data[\"job\"])\n bench_name_list.append(job_data[\"bench_names\"])\n \n\n # load ibmq backend\n backend = get_IBM_backend(backend_name)\n simulator = get_IBM_backend('ibmq_qasm_simulator')\n\n # retrieve jobs and get load results\n \n # simulator\n counts_sim_set = _retrieve_load_result(job_id_set, bench_name_list, device=simulator, type='simulator')\n # pprint(counts_sim_set)\n\n # nonsched layout\n counts_set_nonsched = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='nonsched')\n # pprint(counts_set_nonsched)\n jsd_nonsched = _analyze_results(counts_sim_set, counts_set_nonsched)\n pprint(jsd_nonsched)\n\n # alap adaptive transpiler\n counts_set_alap = _retrieve_load_result(job_id_set, bench_name_list, device=backend, type='alap')\n # print(counts_set_alap)\n jsd_alap = _analyze_results(counts_sim_set, counts_set_alap)\n pprint(jsd_alap)\n\n eval_dict = {\n 'nonsched': jsd_nonsched,\n 'alap': jsd_alap\n }\n if save_path: \n dir_path = os.path.dirname(save_path)\n if not os.path.exists(dir_path):\n print('make directory: ', dir_path)\n os.mkdir(dir_path)\n pickle_dump(eval_dict, save_path)\n\ndef _retrieve_load_result(job_id_set, bench_name_list, device, type): \n counts_set = []\n for job_ids, name_list in zip(job_id_set, bench_name_list): \n job_s = device.retrieve_job(job_ids[type]['job_id']['single'])\n job_m = device.retrieve_job(job_ids[type]['job_id']['multi'])\n counts_s_dict = {}\n for i, qc in enumerate(job_ids[type]['qc']['single']):\n counts_s_dict[qc.name] = job_s.result().get_counts(i)\n counts_m = job_m.result().get_counts()\n counts_set.append((counts_s_dict, counts_m, name_list))\n\n return counts_set\n\ndef _analyze_results(counts_sim_set, counts_set): \n jsd_dict_list = []\n for sim_set, counts in zip(counts_sim_set, counts_set):\n counts_sim_s, counts_sim_m, _ = sim_set\n counts_s, counts_m, name_list = counts\n\n counts_m_list, num_clbits = separate_multi_counts(counts_m)\n print('num_clbits: ', num_clbits)\n jsd_dict = {}\n i = 0\n for qc_name, _counts_m, _bits in zip(name_list, counts_m_list[::-1], num_clbits[::-1]): \n jsd_s = jsd(counts_sim_s[qc_name], counts_s[qc_name], _bits)\n jsd_m = jsd(counts_sim_s[qc_name], _counts_m, _bits)\n\n jsd_u = jsd(counts_sim_s[qc_name], 'uni', _bits)\n jsd_dict[qc_name] = {'single': jsd_s, 'multi': jsd_m, 'uniform': jsd_u}\n i += 1\n jsd_dict_list.append(jsd_dict)\n return jsd_dict_list\n\n","sub_path":"scheduling/result_sched.py","file_name":"result_sched.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307236657","text":"import os\n\nclass Config:\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@127.0.0.1:3306/news'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SECRET_KEY = 'DH39HIDNNGS9'\n BASE_PATH = os.path.dirname(os.path.abspath(__file__))\n STATIC_PATH = os.path.join(BASE_PATH,'static')\n UPLOAD_PATH = os.path.join(STATIC_PATH,'upload')\n UPLOAD_ICON_PATH = os.path.join(UPLOAD_PATH,'icon')\n UPLOAD_IMAGES_PATH = os.path.join(UPLOAD_PATH,'images')\n\nclass Development(Config):\n ENV = 'development'\n\nclass Production(Config):\n ENV = 'production'\n DEBUG = False\n\n\n\nif __name__ == '__main__':\n print(Config.UPLOAD_ICON_PATH)","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472378297","text":"# lst = [5,7,6,12,1,13,9,18,5]\n# # lst.sort() #sort是list里面的一个方法\n# # print(lst)\n#\n# l1 = sorted(lst) #内置函数,返回给你一个新列表\n# print(l1)\n# l2 = sorted(lst,reverse=True)\n# print(l2)\n\n# 给列表排序,根据字符串的长度进行排序\n# lst = [\"大秧歌a\",\"尼古拉斯aaa\",\"赵四aaaaaa\",\"谢大脚\",\"艾泽拉斯\"]\n#\n# a = sorted(lst,key= lambda s : s.count(\"a\"))\n# print(a)\n\nlst = [\n {'id':1,'name':'alex','age':'12'},\n {'id':2,'name':'taibai','age':'19'},\n {'id':3,'name':'wusir','age':'48'},\n {'id':4,'name':'ritian','age':'28'},\n {'id':5,'name':'nvshen','age':'18'},\n ]\n# a = lambda s:s['age']\na = sorted(lst,key=lambda dic:dic['age'])\nprint(a)\n","sub_path":"day15/sorted.py","file_name":"sorted.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319573504","text":"from flask import Flask, render_template, request, redirect, url_for, Response\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, current_user, logout_user\nfrom random import choice\nfrom DatabaseInterface import DatabaseInterface\nfrom functions import generate_all_body_part_from_db_lst, generate_body_parts_lst_from_checkbox_lst, \\\n generate_select_string_for_random\n\napp = Flask(__name__)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\napp.config[\"SECRET_KEY\"] = \"secret\"\n\nusers = {'root': {'password': 'secret'}}\n\n\nclass User(UserMixin):\n pass\n\n\n@login_manager.user_loader\ndef user_loader(email):\n if email not in users:\n return\n\n user = User()\n user.id = email\n return user\n\n\n@login_manager.request_loader\ndef request_loader(request):\n email = request.form.get('email')\n if email not in users:\n return\n\n user = User()\n user.id = email\n\n return user\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n email = request.form['email']\n try:\n if request.form['password'] == users[email]['password']:\n user = User()\n user.id = email\n login_user(user)\n return redirect(url_for('index_page'))\n except KeyError:\n return render_template('bad_login.html')\n return render_template('bad_login.html')\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\n@login_manager.unauthorized_handler\ndef unauthorized_handler():\n return redirect(url_for('login'))\n\n\n@app.route('/')\n@login_required\ndef index_page():\n return render_template('index.html')\n # return redirect(url_for('login'))\n\n\n@app.route('/table')\n@login_required\ndef show_all():\n connect_to_db = DatabaseInterface('exercise.db')\n return render_template('index.html', table=1, items=connect_to_db.select_from_db())\n\n\n@app.route('/body_parts')\n@login_required\ndef select_parts():\n connect_to_db = DatabaseInterface('exercise.db')\n body_parts_lst = []\n for i in connect_to_db.select_parts():\n split_body_part = i[0].split(',')\n for j in split_body_part:\n body_parts_lst.append(j)\n body_parts_lst = set(body_parts_lst)\n body_parts_lst = list(body_parts_lst)\n body_parts_lst.sort()\n number_of_ids = []\n for i in range(1, len(body_parts_lst) + 1):\n number_of_ids.append(i)\n a = list(zip(number_of_ids, body_parts_lst))\n return render_template('index.html', table=2, items=a)\n\n\n@app.route('/random', methods=['post', 'get'])\n@login_required\ndef random():\n message = ''\n all_body_part_from_db = generate_all_body_part_from_db_lst('exercise.db')\n if request.method == 'POST':\n '''all_body_part_from_db - список для всех body_part из базы, разделенный по одной и отсортированный'''\n body_parts_lst_from_checkbox = generate_body_parts_lst_from_checkbox_lst(all_body_part_from_db)\n '''body_parts_lst_from_checkbox - список с отмеченными чекбоксами body_part'''\n select_str = generate_select_string_for_random(body_parts_lst_from_checkbox)\n '''select_str - селект-запрос с отмеченными чекбоксами body_part'''\n '''Проверка на корректное amount'''\n amount = request.form.get('amount')\n if amount.isdigit() and amount != 0:\n connect_to_db = DatabaseInterface('exercise.db')\n table = connect_to_db.select_query(select_str)\n s = ''\n rand_lst = []\n if int(amount) <= len(table):\n while len(rand_lst) != int(amount):\n rand_lst.append(choice(table))\n rand_lst = set(rand_lst)\n rand_lst = list(rand_lst)\n return render_template('random.html', items=all_body_part_from_db, random_list=rand_lst, table=1)\n else:\n message = f'IN DATABASE YOU HAVE ONLY {len(table)} STRINGS. PLEASE ENTER LESS THAN {len(table) + 1}.'\n else:\n message = \"You need to enter only positive digits\"\n return render_template('random.html', items=all_body_part_from_db, message=message)\n\n\n@app.route('/insert', methods=['post', 'get'])\n@login_required\ndef insert_exercise():\n message = ''\n if request.method == 'POST':\n name = request.form.get('name')\n body_part = request.form.get('body_part')\n about = request.form.get('about')\n pic_link = request.form.get('pic_link')\n connect_to_db = DatabaseInterface('exercise.db')\n all_rows = connect_to_db.select_for_insert()\n if name == '' or body_part == '' or about == '' or pic_link == '':\n message = 'You need to enter all values'\n return render_template('insert.html', message=message)\n else:\n if (name, body_part, about, pic_link) not in all_rows:\n connect_to_db.add_exercise(name, body_part, about, pic_link)\n message = f'String with {name}, {body_part}, {about}, {pic_link} added to DB'\n return render_template('insert.html', message=message)\n else:\n message = 'This data is already in table'\n return render_template('insert.html', message=message)\n return render_template('insert.html', message=message)\n\n\n@app.route('/delete', methods=['post', 'get'])\n@login_required\ndef delete_exercise():\n message = ''\n if request.method == 'POST':\n id_number = request.form.get('id_number')\n connect_to_db = DatabaseInterface('exercise.db')\n connect_to_db.select_id()\n if id_number.isdigit():\n if (int(id_number),) in connect_to_db.select_id():\n connect_to_db.delete_exercise(id_number)\n message = f'String with ID = {id_number} deleted from DB'\n else:\n message = f'String with ID = {id_number} does not exists!'\n else:\n message = 'You need to enter positive digit!'\n return render_template('delete.html', message=message)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297120707","text":"import sys\nsys.path.insert(1, 'final_task/rss_reader')\nfrom rss_parser import *\nfrom database_functions import *\nimport unittest\nfrom io import StringIO\nfrom unittest.mock import patch\n\nclass TestParseFunctions(unittest.TestCase):\n def setUp(self):\n\n self.dict_of_args = {\n \"url\": \"https://news.yahoo.com/rss/\",\n \"lim\": 1,\n \"json\": False,\n \"date\": \"20191122\",\n \"path\": r\"C:\\Users\\Lenovo\\PycharmProjects\\final_task\\FinalTaskRssParser\\final_task\\tests\",\n \"html\": True,\n \"pdf\": False}\n\n def test_read_news(self):\n with patch('sys.stdout', new=StringIO()) as fake_out_put:\n print_cache(self.dict_of_args)\n self.assertEqual(fake_out_put.getvalue().strip(), 'No results\\nTry to enter another date or url')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"final_task/tests/test_rss_parser.py","file_name":"test_rss_parser.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80532606","text":"\"\"\"\n Find the greatest common divisor between 2 numbers\n\n Questions:\n - negtive numbers?\n - zeros?\n\"\"\"\n\n\n\"\"\"\n common way:\n\n findGcd(100, 40)\n \n -> 40, 100%40\n findGcd(40, 20)\n \n -> 40, 40%20\n findGcd(20, 0)\n\n so gcd = 20\n\"\"\"\n\n\ndef findGcd(a, b):\n if b == 0:\n return a\n return findGcd(b, a % b)\n\n\nclass Solution(object):\n def gcd(self, arr):\n \"\"\"\n Euclidian Algorithm\n dividend = divisor*quotient+remainder\n 100=45*2+10\n 45=10*4+5\n 10=5*2+0\n in the last row, the remainder is 0, therefore 5 is the common divisor\n\n Time O(n)\n Space O(1)\n\n amcat amazon: passed 13 out of 13 testcases\n \"\"\"\n if len(arr) == 0:\n return 0\n res = arr[0]\n for i in range(1, len(arr)):\n res = self.findGcd(res, arr[i])\n return res\n\n def findGcd(self, a, b):\n if a == 0 or b == 0:\n return 0\n dividend = max(a, b)\n divisor = min(a, b)\n while divisor != 0:\n remainder = dividend % divisor\n if remainder == 0:\n break\n dividend = divisor\n divisor = remainder\n return divisor\n\n\nprint(Solution().gcd([1206, 3768, 366]))\nprint(Solution().gcd(\n [2 * 11 * 13 * 17 * 19 * 23, 13 * 17 * 23 * 3, 2 * 13 * 17]))\nprint(Solution().gcd([17, 19, 11]))\nprint(Solution().gcd([-1701, 3768])) # ?\nprint(Solution().gcd([1701, -3768])) # ?\nprint(Solution().gcd([-1701, -3768])) # ?\nprint(Solution().gcd([1, 2]))\nprint(Solution().gcd([]))\n","sub_path":"glassdoor/amazon/gcd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548188462","text":"import maya.cmds as cmds\nimport os\n\nimport System.utils as utils\nimport System.blueprint as blueprintMod\n#reload(blueprintMod)\n\nCLASS_NAME = \"Wing\"\n\nTITLE = \"Wing\"\nDESCRIPTION = \"Creates 3 joints (the middle joint acting as a hinge joint). Suggested use: Arm\"\nICON = os.environ[\"GEPPETTO\"] + \"/Icons/armMod_button.bmp\"\n\nclass Wing(blueprintMod.Blueprint):\n def __init__(self, userSpecifiedName, hookObject):\n jointInfo = [ [\"wing_1_joint\", [0.0, 0.0, 0.0]], [\"wing_2_joint\", [5, 0.0, -0.2]], [\"wing_3_joint\", [10, 0.0, 0.0]] ] \n \n gameJntNames = [\"wing1_\", \"wing2_\", \"wing3_\"]\n\n blueprintMod.Blueprint.__init__(self, CLASS_NAME, userSpecifiedName, jointInfo, hookObject, gameJntNames, altCtrl=0 )\n\n # 107\n def install_custom(self, joints):\n cmds.select(clear=True)\n ikJoints=[]\n \n if not self.mirrored:\n index = 0\n for joint in self.jointInfo:\n ikJoints.append(cmds.joint(n=self.moduleNamespace+\":IK_\"+joint[0], p=joint[1], absolute=True, rotationOrder=\"xyz\"))\n \n cmds.setAttr(ikJoints[index]+\".visibility\", 0)\n \n if index != 0:\n \tcmds.joint(ikJoints[index -1], edit=True, oj=\"xyz\", sao=\"yup\")\n \t\n index += 1\n else:\n rootJointName = self.jointInfo[0][0]\n tempDuplicateNodes = cmds.duplicate(self.originalModule+\":IK_\"+rootJointName, renameChildren=True)\n \n cmds.delete(tempDuplicateNodes.pop())\n \n mirrorXY = False\n mirrorYZ = False\n mirrorXZ = False\n if self.mirrorPlane == \"XY\":\n mirrorXY = True\n elif self.mirrorPlane == \"YZ\":\n mirrorYZ = True\n elif self.mirrorPlane == \"XZ\":\n mirrorXZ = True\n \n \n mirrorBehavior = False\n if self.rotationFunction == \"behavior\":\n mirrorBehavior = True\n \n mirrorJoints = cmds.mirrorJoint(tempDuplicateNodes[0], mirrorXY=mirrorXY, mirrorXZ=mirrorXZ, mirrorYZ=mirrorYZ, mirrorBehavior=mirrorBehavior)\n \n cmds.delete(tempDuplicateNodes)\n \n cmds.xform(mirrorJoints[0], ws=True, a=True, translation=cmds.xform(self.moduleNamespace+\":\"+rootJointName, q=True, ws=True, t=True))\n \n for i in range(3):\n jointName = self.jointInfo[i][0]\n newName = cmds.rename(mirrorJoints[i], self.moduleNamespace+\":IK_\"+jointName)\n ikJoints.append(newName)\n \t\n utils.addNodeToContainer(self.containerName, ikJoints)\n \n for joint in ikJoints:\n jointName = utils.stripAllNamespaces(joint)[1]\n cmds.container(self.containerName, edit=True, publishAndBind=[joint+\".rotate\", jointName+\"_R\"])\n \n cmds.setAttr(ikJoints[0]+\".preferredAngleY\", -50.0)\n cmds.setAttr(ikJoints[1]+\".preferredAngleY\", 50.0)\n\n # Call on the stretchy ik function from utils\n ikNodes = utils.RP_2segment_stretchy_IK(ikJoints[0], ikJoints[1], ikJoints[2], self.containerName)\n locators = (ikNodes[0], ikNodes[1], ikNodes[2])\n distanceNodes = ikNodes[3]\n \n # Point constraint stretch locators to transform control objects\n constraints = []\n for i in range(3):\n constraints.append(cmds.pointConstraint(self.getTranslationControl(joints[i]), locators[i], maintainOffset=False)[0])\n cmds.parent(locators[i], self.moduleNamespace+\":module_grp\", absolute=True)\n cmds.setAttr(locators[i]+\".visibility\", 0)\n \n utils.addNodeToContainer(self.containerName, constraints)\n \n scaleTarget = self.getTranslationControl(joints[1])\n paRepresentation = self.createPreferredAngleRepresentation(ikJoints[1], scaleTarget)\n \n cmds.setAttr(paRepresentation+\".axis\", lock=True)\n \n \n def UI_custom(self):\n joints = self.getJoints()\n\n self.createRotationOrderUIControl(joints[0])\n self.createRotationOrderUIControl(joints[1])\n \n \n def lock_phase1(self):\n # GAther and return all required information from this modules control objects.\n # Joint Positions = list of joint positions from the root down the hierarchy\n # Joint orientations = a list of orientations, or a list of axis information ( orient joint and secondaryAxisOrient from\n # # These are passed in the following tuple: ( orientations, None) or ( NOne, axis info)\n # JointRotationOrders = a list of joint rotation orders ( integer values gathered with getAttr)\n # jointPreferred Angles = a list of joint preferred angles, optional (can pass None)\n # hookObjedct = self.findHookObjectForLock()\n # rootTransform = a bool, either True or False. True = R, T, and S on root joint. False = R only.\n # \n # moduleInfo = (jointPositions , jointOrientations, jointRotationOrders, jointPreferredAngles, hookObject, rootTransform\n # Return moduleInfo\n \n jointPositions = []\n jointOrientationValues = []\n jointRotationOrders =[]\n jointPreferredAngles = []\n\n # Unlock container and delete ik handle.\n cmds.lockNode(self.containerName, lock=False, lockUnpublished=False)\n ikHandle = self.moduleNamespace+\":IK_\"+self.jointInfo[0][0]+\"_ikHandle\"\n cmds.delete(ikHandle)\n \n # Freeze transforms on the joints before lock\n for i in range(3):\n jointName = self.jointInfo[i][0]\n ikJointName = self.moduleNamespace+\":IK_\"+jointName\n cmds.makeIdentity(ikJointName, rotate=True, translate=False, scale=False, apply=True)\n\n jointPositions.append(cmds.xform(ikJointName, q=True, ws=True, t=True))\n \n jointRotationOrders.append(cmds.getAttr(self.moduleNamespace+\":\"+jointName+\".rotateOrder\"))\n \n if i < 2:\n jointOrientX = cmds.getAttr(ikJointName+\".jointOrientX\")\n jointOrientY = cmds.getAttr(ikJointName+\".jointOrientY\")\n jointOrientZ = cmds.getAttr(ikJointName+\".jointOrientZ\")\n \n jointOrientationValues.append( (jointOrientX, jointOrientY, jointOrientZ) )\n \n joint_paX = cmds.getAttr(ikJointName+\".preferredAngleX\")\n joint_paY = cmds.getAttr(ikJointName+\".preferredAngleY\")\n joint_paZ = cmds.getAttr(ikJointName+\".preferredAngleZ\")\n \n jointPreferredAngles.append( (joint_paX, joint_paY, joint_paZ) )\n \n\n jointOrientations= (jointOrientationValues, None)\n \n hookObject = self.findHookObjectForLock()\n rootTransform = False\n \n moduleNamespace = self.moduleNamespace\n \n moduleInfo = (jointPositions, jointOrientations, jointRotationOrders, jointPreferredAngles, hookObject, rootTransform, moduleNamespace)\n \n return moduleInfo\n \n \n \n\n \n \n \n \n\n","sub_path":"Modules/Blueprint/oldMods/wing.py","file_name":"wing.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150329575","text":"# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def sortedArrayToBST(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: TreeNode\r\n \"\"\"\r\n def bi(nums, l, r):\r\n if l > r:\r\n return None \r\n middle = (l + r) // 2\r\n root = TreeNode(nums[middle])\r\n root.left = bi(nums, l, middle - 1) \r\n root.right = bi(nums, middle + 1, r) \r\n return root \r\n\r\n length = len(nums)\r\n if length == 0:\r\n return None \r\n l, r = 0, length - 1\r\n return bi(nums, l, r)\r\n\r\nif __name__ == '__main__':\r\n from templates.TreeNode import *\r\n # inputs\r\n nums = [-10,-3,0,5,9]\r\n nums = list(range(6))\r\n print('-' * 30)\r\n res = Solution().sortedArrayToBST(nums)\r\n print_tree(res, 3)\r\n\r\n\r\n ","sub_path":"108_sortedArrayToBST.py","file_name":"108_sortedArrayToBST.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455952365","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom . import views\n\napp_name='sightings'\n\nurlpatterns = [\n path('', views.index, name = \"index\"),\n path('add/',views.add,name = \"add\"),\n path('stats/',views.stats,name='stats'),\n path('update/',views.update,name='update'),\n]\n","sub_path":"squirrel/sightings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177032513","text":"\n# Django\nfrom django.contrib import admin\n\n# Local\nfrom .models import Human\nfrom .models import Join\nfrom .models import Membership\nfrom .models import Role\nfrom .models import Status\nfrom .models import Structure\nfrom .models import Subscription\n\n\nclass SubscriptionInline(admin.TabularInline):\n model = Subscription\n fields = [\n 'items_editable',\n 'current_through',\n 'status',\n 'human',\n ]\n readonly_fields = [\n 'items_editable',\n 'current_through',\n 'status',\n 'human',\n ]\n show_change_link = True\n extra = 0\n # classes = [\n # 'collapse',\n # ]\n max_num = 0\n can_delete = False\n\n\nclass JoinInline(admin.TabularInline):\n model = Join\n fields = [\n '__str__',\n 'subscription',\n 'membership',\n 'structure',\n # 'human',\n 'vocal_part',\n 'inactive_date',\n 'status',\n ]\n readonly_fields = [\n '__str__',\n 'subscription',\n 'membership',\n 'structure',\n # 'human',\n 'vocal_part',\n 'inactive_date',\n 'status',\n ]\n show_change_link = True\n extra = 0\n # classes = [\n # 'collapse',\n # ]\n max_num = 0\n can_delete = False\n\n\nclass MembershipInline(admin.TabularInline):\n model = Membership\n fields = [\n '__str__',\n 'status',\n ]\n readonly_fields = [\n '__str__',\n 'status',\n ]\n show_change_link = True\n extra = 0\n # classes = [\n # 'collapse',\n # ]\n max_num = 0\n can_delete = False\n\n\nclass RoleInline(admin.TabularInline):\n model = Role\n fields = [\n '__str__',\n 'start_date',\n 'end_date',\n ]\n readonly_fields = [\n '__str__',\n 'start_date',\n 'end_date',\n ]\n show_change_link = True\n extra = 0\n # classes = [\n # 'collapse',\n # ]\n max_num = 0\n can_delete = False\n","sub_path":"project/bhs/inlines.py","file_name":"inlines.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297929755","text":"\n#%%\nimport rebound\nimport matplotlib.pyplot as plt\nimport pandas as pd \nimport numpy as np\nfrom ReBound_Propogation_2functions import trajectory_from_rebound\nfrom ReBound_Propogation_2functions import trajectory_rv_from_rebound\nfrom matplotlib import cm\nimport multiprocessing\nfrom joblib import Parallel, delayed\nimport time,os\nimport datetime, julian\nfrom cislunar_constant import *\n\nfolder = \"../Data/\"\n# date\ndate = \"2010-03-26 02:05\"\ndate_info = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M\")\nMJD_model = julian.to_jd(date_info,fmt='mjd')\n# rebound simulation\n# sim = rebound.Simulation()\n# sim.add('Geocenter',date = date)\n# sim.add('Luna')\n\n# save to binary setup file\nsimfile = f\"Cislunar_{MJD_model}.bin\"\n# sim.save(simfile)\nsim = rebound.Simulation(simfile)\nprint(f'{sim.units}')\nstart_time = time.time()\n\nearth = sim.particles[0]\nmoon = sim.particles[1]\nmoon_orbit = moon.calculate_orbit(primary=sim.particles[0]) # j2000 \nprint(moon_orbit)\n\n# create meshgrid from self defined values\nSMA_min, SMA_max = 20 * Re / AU, 80 * Re / AU #AU\nECC_min, ECC_max = 0.00001, 0.99999\nNo_steps_SMA = 100\nNo_steps_ECC = 20\nSMA_linspace = np.linspace(SMA_min,SMA_max,No_steps_SMA) \nECC_linspace = np.linspace(ECC_min,ECC_max,No_steps_ECC) \nSMA_grid,ECC_grid = np.meshgrid(SMA_linspace,ECC_linspace)\n\n# flatten for calculation\nSMA_flatlist = SMA_grid.flatten().tolist()\nECC_flatlist = ECC_grid.flatten().tolist()\nprint(len(SMA_flatlist))\n\n# take same INC,RAAN,AOP,MA in rad\nINC_moon,RAAN_moon,AOP_moon,MA_moon = moon_orbit.inc,moon_orbit.Omega,moon_orbit.omega,moon_orbit.M\n\n# for Trojan case only\n#MA_config = -60\n# for Greek case only\nMA_config = 60\n# for custom case\n#MA_config = 0\n\n# custom INC,RAAN,AOP,MA\nINC,RAAN,AOP= INC_moon,RAAN_moon,AOP_moon\nMA = MA_moon + np.deg2rad(MA_config)\n\n# rebound propagation detail\nsimintegrator = 'ias15' #'mercurius'\nsimdt = 0.0001 # 2pi is 1 year, time follows equation t = simdt/(2*pi)*1*365.25 (day)\nsim_ias15dt = 0 # ensure that close encounters do not stall the integration\nepsilon = 1e-8 # control accuracy for ias15\ntspan = 9/12 # 1 year is 1 year on Earth 365.25 solar days/earth days\nNo_output = 1000\nrb_detail = {'simintegrator':simintegrator, 'simdt':simdt, 'sim_ias15dt':sim_ias15dt,\n 'epsilon':epsilon, 'No_output':No_output, 'tspan':tspan}\n\n# Count number of cores\nnumCores = multiprocessing.cpu_count()\n# numCores = 11\n\n# trajectory write function\ndef trajectory_write(folder,simfile,MJD,SMA,ECC,INC,RAAN,AOP,MA,rb_detail):\n print(SMA)\n oe_extend_pd = trajectory_from_rebound(simfile,MJD,SMA,ECC,INC,RAAN,AOP,MA,**rb_detail)\n # rv_pd = trajectory_rv_from_rebound(simfile,MJD,SMA,ECC,INC,RAAN,AOP,MA)\n filename = simfile[0:-4] # delet string '.bin'\n trajectory_foldername = folder + f\"Trajectory_GreekCase_{filename}_{No_steps_SMA}x{No_steps_ECC}_{simintegrator}\"\n if not os.path.exists(trajectory_foldername): # create trajectory folder if not existed\n os.makedirs(trajectory_foldername)\n \n trajectory_filename=\"trajectory_\"+\"SMA\"+str(SMA)+\"_\"+\"ECC\"+str(ECC)+\"_oe.csv\"\n# trajectory_filename=\"trajectory_\"+\"SMA\"+str(SMA)+\"_\"+\"ECC\"+str(ECC)+\"_oe.h5\"\n oe_extend_pd.to_csv(trajectory_foldername+\"/\"+trajectory_filename)\n# oe_extend_pd.to_hdf(trajectory_foldername+\"/\"+trajectory_filename, key = 'trajectory', mode='w', format='table')\n\n # trajectory_rv_filename=\"trajectory_\"+\"SMA\"+str(SMA)+\"_\"+\"ECC\"+str(ECC)+\"_rv.csv\"\n# trajectory_rv_filename=\"trajectory_\"+\"SMA\"+str(SMA)+\"_\"+\"ECC\"+str(ECC)+\"_rv.h5\"\n # rv_pd.to_csv(trajectory_foldername+\"/\"+trajectory_rv_filename)\n# rv_pd.to_hdf(trajectory_foldername+\"/\"+trajectory_rv_filename, key = 'trajectory', mode='w', format='table')\n return\n\n# Run processes trajectory write functions\nParallel(n_jobs = numCores)(delayed(trajectory_write)(folder,simfile,MJD_model,SMA,ECC,INC,RAAN,AOP,MA,rb_detail) for SMA,ECC in zip(SMA_flatlist,ECC_flatlist))\n\n# time\nprint(time.time()-start_time)\n","sub_path":"Greek_ReBound_Earth_Moon.py","file_name":"Greek_ReBound_Earth_Moon.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326226506","text":"def build(code,create={},temp=[]):\r\n for posi,char in enumerate(code):\r\n if char==\"[\":temp.append(posi)\r\n if char==\"]\":value=temp.pop();create[posi]=value;create[value]=posi\r\n return create\r\ndef fuck_the_girl(text):\r\n text=[i for i in text if i in \"<>-+[],.\"]\r\n cells,code,cellptr,Map=[0],0,0,build(text)\r\n while code < len(text):\r\n char=text[code]\r\n if char == \">\":\r\n cellptr += 1\r\n if cellptr == len(cells): cells.append(0)\r\n if char == \"<\":cellptr = 0 if cellptr <= 0 else cellptr - 1\r\n if char == \"+\":cells[cellptr] = cells[cellptr]+1 if cells[cellptr] < 255 else 0\r\n if char == \"-\":cells[cellptr] = cells[cellptr]-1 if cells[cellptr] > 0 else 255\r\n if char == \"[\" and cells[cellptr]==0:code=Map[code]\r\n if char == \"]\" and cells[cellptr]!=0:code=Map[code]\r\n if char == \".\":\r\n try:print(chr(cells[cellptr]),end=\"\")\r\n except UnicodeEncodeError:pass\r\n if char == \",\": cells[cellptr] = ord(input())\r\n code+=1\r\nimport sys\r\nwith open(sys.argv[1],\"r\") as f:lines=list(filter(None, f.read().strip().splitlines()))\r\nfor line in lines:fuck_the_girl(line);print(\"\")\r\n\r\n","sub_path":"Brainfuck.py","file_name":"Brainfuck.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521171838","text":"import picamera\n\n# Configuration file to hold all PiCamera settings for video and images\ncamera = picamera.PiCamera()\ncamera.ISO = 800\ncamera.brightness = 55\ncamera.resolution = (720,480)\n\n# Settings below allow the camera to record upside down\ncamera.hflip = True\ncamera.vflip = True\n","sub_path":"Capture/captureconfig.py","file_name":"captureconfig.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"69942939","text":"import os\nfrom XmlParser import XmlParser\n\nsrc_filename = 'plugins.xml'\nworking_dir = os.getcwd()\nfile_path = working_dir + '\\\\src\\\\' + src_filename\nresult_path = working_dir + '\\\\out\\\\'\n\nif __name__ == \"__main__\":\n xml_parser = XmlParser(file_path)\n xml_parser.write_to_file(result_path)\n \n ","sub_path":"CrmPluginXmlParser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118762442","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\noptions = webdriver.ChromeOptions()\n\nprefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': r'D:\\pdf\\books\\books'}\noptions.add_experimental_option('prefs', prefs)\n\ndriver = webdriver.Chrome(chrome_options=options)\n\ndef start():\n\twith open('download_url.txt', 'r', encoding='utf-8') as f:\n\t\tfor line in f.readlines():\n\t\t\titems = line.split(' ')\n\n\t\t\turl = items[-2][:-1]\n\t\t\tpwd = items[-1]\n\n\t\t\t# if 'pan.baidu.com' in url:\n\t\t\t# \tprint(url + ' ' + pwd)\n\t\t\ttry:\n\t\t\t\tdownload(url, pwd)\n\t\t\texcept Exception:\n\t\t\t\tprint('something goes wrong!')\n\n\t\t\ttime.sleep(10)\n\ndef start_one():\n\twith open('download_url.txt', 'r', encoding='utf-8') as f:\n\t\tline = f.readline()\n\n\t\titems = line.split(' ')\n\t\turl = items[-2][:-1]\n\t\tpwd = items[-1]\n\n\t\ttry:\n\t\t\tdownload(url, pwd)\n\t\texcept Exception:\n\t\t\tprint('something goes wrong!')\n\ndef download(url, pwd):\n\tdriver.get(url)\n\tdriver.refresh()\n\n\tif pwd == '免费下载':\n\t\tpass\n\telse:\n\t\tinput = driver.find_element_by_css_selector('#accessCode')\n\n\t\tinput.send_keys(pwd)\n\t\tinput.send_keys(Keys.RETURN)\n\n\ttime.sleep(5)\n\n\tdownload = driver.find_elements_by_css_selector('.g-button')[1]\n\tdownload.click()\n\nif __name__ == '__main__':\n\tstart()\n\t# start_one()","sub_path":"ProjectCode/GrapComputerBooks/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104254945","text":"#!/usr/bin/python\n##########################################################################\n# If not stated otherwise in this file or this component's Licenses.txt\n# file the following copyright and licenses apply:\n#\n# Copyright 2015 RDK Management\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##########################################################################\n\n#######################################################################\n# Copyright [2014] [Cisco Systems, Inc.]\n# \n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#######################################################################\n\nimport optparse\nimport os\nimport sys\nimport unittest\nimport platform\n\n# Import HDK modules\nsys.path.append(os.path.join(os.path.dirname(sys.argv[0]), \"..\", \"..\", \"bin\", \"lib\"))\nfrom hdk.testutil import BuildSuite, UnittestSuite\n\n\n#\n# Main\n#\ndef main():\n\n # Command line options\n cmdParser = optparse.OptionParser()\n cmdParser.add_option(\"-r\", action = \"store_true\", dest = \"bNoClean\",\n help = \"No clean build\")\n cmdParser.add_option(\"-m\", action = \"store_true\", dest = \"bCheckMallocStats\",\n help = \"Check malloc statistics\")\n cmdParser.add_option(\"-t\", action = \"append\", dest = \"testsIncluded\",\n help = \"Run specified tests\", metavar = \"test\")\n cmdParser.add_option(\"-u\", action = \"store_true\", dest = \"bUpdateExpected\",\n help = \"Update expected output\")\n cmdParser.add_option(\"--debug\", action = \"store_true\", dest = \"bDebug\",\n help = \"Build debug binaries\")\n (cmdOptions, cmdArgs) = cmdParser.parse_args()\n\n # The unittest directory\n unittestDir = os.path.dirname(sys.argv[0])\n if not unittestDir:\n unittestDir = '.'\n\n # Create the test runner\n runner = unittest.TextTestRunner(verbosity = 2)\n\n # Build test suite\n buildDirs = []\n buildSuite = BuildSuite(unittestDir, buildDirs, None,\n not cmdOptions.bNoClean, cmdOptions.bDebug, cmdOptions.bUpdateExpected)\n if not runner.run(buildSuite).wasSuccessful():\n return 1\n\n # C unit test suite\n unittestSuite = UnittestSuite()\n unittestSuite.addTest(unittestDir, os.path.join(\"build\", \"unittest\"), \"unittest\",\n cmdOptions.bCheckMallocStats, cmdOptions.bUpdateExpected)\n\n if not runner.run(unittestSuite).wasSuccessful():\n return 1\n\n # Success\n return 0\n\n\n######################################################################\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"source/hdk2/hdk2/hdkcli_cpp/unittest/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33087938","text":"def binarySearch(a,start,end):\n while start<=end:\n mid=start+(end-start)//2\n print(\"mid\",mid,\"end\",end)\n if a[mid]>=a[end]:\n start=mid+1\n else:\n ans=mid\n end=mid-1\n return ans\n \n \n\na=list(map(int,input().split()))\nprint(a[binarySearch(a,0,len(a)-1)])","sub_path":"Basics/SmallestInRotated.py","file_name":"SmallestInRotated.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99509163","text":"class Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str: \n mydict = {}\n \n paragraph = paragraph.lower()\n \n for p in \"!?',;.\":\n paragraph = paragraph.replace(p, ' ')\n \n words = [x for x in paragraph.split() if x not in banned]\n \n for word in words:\n if word not in mydict:\n mydict[word] = 1\n else:\n mydict[word] += 1\n \n print(mydict)\n for k, v in mydict.items():\n if v == max(mydict.values()):\n return k","sub_path":"array_and_string/819.py","file_name":"819.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539189509","text":"from django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.home, name=\"home\"),\r\n path('action/', views.action, name=\"action\"),\r\n path('comedy/', views.comedy, name=\"comedy\"),\r\n path('drama/', views.drama, name=\"drama\"),\r\n path('fantasy/', views.fantasy, name=\"fantasy\"),\r\n path('oscars/', views.oscars, name=\"oscars\"),\r\n path('popular/', views.popular, name=\"popular\"),\r\n path('romance/', views.romance, name=\"romance\"),\r\n path('thriller/', views.thriller, name=\"thriller\"),\r\n path('top/', views.top, name=\"top\"),\r\n\r\n]\r\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505615064","text":"#!/usr/bin/env python\n\nimport unittest\n\nfrom schematics.models import Model\nfrom schematics.types import IntType, StringType\nfrom schematics.types.compound import ModelType, ListType\nfrom schematics.exceptions import ValidationError\n\n\nclass TestModelType(unittest.TestCase):\n\n def test_simple_embedded_models(self):\n class Location(Model):\n country_code = StringType()\n\n class Player(Model):\n id = IntType()\n location = ModelType(Location)\n\n p = Player(dict(id=1, location={\"country_code\": \"US\"}))\n\n self.assertEqual(p.id, 1)\n self.assertEqual(p.location.country_code, \"US\")\n\n p.location = Location({\"country_code\": \"IS\"})\n\n self.assertIsInstance(p.location, Location)\n self.assertEqual(p.location.country_code, \"IS\")\n\n def test_simple_embedded_models_is_none(self):\n class Location(Model):\n country_code = StringType()\n\n class Player(Model):\n id = IntType()\n location = ModelType(Location)\n\n p = Player(dict(id=1))\n\n self.assertEqual(p.id, 1)\n self.assertIsNone(p.location)\n\n def test_simple_embedded_model_is_none_within_listtype(self):\n class QuestionResources(Model):\n type = StringType()\n\n class Question(Model):\n id = StringType()\n resources = ModelType(QuestionResources)\n\n class QuestionPack(Model):\n id = StringType()\n questions = ListType(ModelType(Question))\n\n question_pack = QuestionPack({\n \"id\": \"1\",\n \"questions\": [\n {\n \"id\": \"1\"\n },\n ]\n })\n\n self.assertIsNone(question_pack.questions[0].resources)\n\n def test_raises_validation_error_on_init_with_partial_submodel(self):\n class User(Model):\n name = StringType(required=True)\n age = IntType(required=True)\n\n class Card(Model):\n user = ModelType(User)\n\n u = User({'name': 'Arthur'})\n c = Card({'user': u})\n\n with self.assertRaises(ValidationError):\n c.validate()\n\n def test_model_type(self):\n class User(Model):\n name = StringType()\n\n class Card(Model):\n user = ModelType(User)\n\n c = Card({\"user\": {'name': u'Doggy'}})\n self.assertIsInstance(c.user, User)\n self.assertEqual(c.user.name, \"Doggy\")\n\n def test_equality_with_embedded_models(self):\n class Location(Model):\n country_code = StringType()\n\n class Player(Model):\n id = IntType()\n location = ModelType(Location)\n\n p1 = Player(dict(id=1, location={\"country_code\": \"US\"}))\n p2 = Player(dict(id=1, location={\"country_code\": \"US\"}))\n\n self.assertNotEqual(id(p1.location), id(p2.location))\n self.assertEqual(p1.location, p2.location)\n\n self.assertEqual(p1, p2)\n\n def test_default_value_when_embedded_model(self):\n class Question(Model):\n question_id = StringType(required=True)\n\n type = StringType(default=\"text\")\n\n class QuestionPack(Model):\n\n question = ModelType(Question)\n\n pack = QuestionPack({\n \"question\": {\n \"question_id\": 1\n }\n })\n\n self.assertEqual(pack.question.question_id, \"1\")\n self.assertEqual(pack.question.type, \"text\")\n","sub_path":"tests/test_model_type.py","file_name":"test_model_type.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295443231","text":"import torch\nimport torch.nn as nn\nimport time\n\nfrom components.encoder import Encoder\n\ntextEncoderConfig = {\n \"input_dim\": 300,\n \"hidden_dim\": 256,\n \"drop_prob\": 0.3,\n \"num_layers\": 1,\n \"is_bidirectional\": True,\n \"rnn_drop_prob\": 0.0,\n \"batch_size\": 64\n}\n\nconfig = {\n \"label_dim\": 184,\n \"max_length\": 100,\n \"batch_size\": 64,\n \"kernel_size\": 3,\n \"padding\": 1,\n \"max_size\": 10000,\n \"hidden_dim\": 256\n}\n\n\ndef func_k(x):\n return torch.relu(1 - torch.abs(x)).float()\n\n\ndef func_b(x):\n return torch.clamp_max(torch.relu(x), 1).float()\n\n\n# def construct_feature_map(labels, boxes, box_numbers):\n# feature_map = torch.zeros(config[\"batch_size\"], config[\"label_dim\"],\n# config[\"max_length\"], config[\"max_length\"]).to(device=labels.device)\n# x0 = (boxes[:, :, 0] * config['max_length']).long()\n# y0 = (boxes[:, :, 1] * config['max_length']).long()\n# x1 = (boxes[:, :, 2] * config['max_length']).long() + x0\n# y1 = (boxes[:, :, 3] * config['max_length']).long() + y0\n# for batch_idx in range(config[\"batch_size\"]):\n# for box_idx in range(box_numbers[batch_idx]):\n# label = labels[batch_idx, box_idx]\n# left = x0[batch_idx, box_idx]\n# right = x1[batch_idx, box_idx]\n# top = y0[batch_idx, box_idx]\n# bottom = y1[batch_idx, box_idx]\n# feature_map[batch_idx, label, left:right, top:bottom] = 1\n# return feature_map\n\ndef generateFeatureMap(labels, boxes, box_numbers):\n final_feature_map = torch.zeros(config[\"batch_size\"], config[\"label_dim\"],\n config[\"max_length\"], config[\"max_length\"]).to(device=labels.device)\n coordinate_base = torch.arange(config[\"max_length\"]).float().unsqueeze(0).unsqueeze(0).to(device=labels.device)\n # 1 * 1 * max_length\n coordinate_x = coordinate_base.unsqueeze(-2).repeat(labels.size(0), labels.size(1), config[\"max_length\"], 1)\n # 1 * 1 * max_length * max_length\n coordinate_y = coordinate_base.unsqueeze(-1).repeat(labels.size(0), labels.size(1), 1, config[\"max_length\"])\n # 1 * 1 * max_length * max_length\n boxes = (boxes * config[\"max_length\"]).unsqueeze(-2).unsqueeze(-2).repeat(1, 1, config[\"max_length\"], config[\"max_length\"], 1)\n # batch_size * box_numbers * 1 * 1\n x_l = coordinate_x - boxes[:, :, :, :, 0]\n y_t = coordinate_y - boxes[:, :, :, :, 1]\n x_r = coordinate_x - (boxes[:, :, :, :, 0] + boxes[:, :, :, :, 2])\n y_b = coordinate_y - (boxes[:, :, :, :, 1] + boxes[:, :, :, :, 3])\n feature_map = torch.max(\n torch.max(func_k(x_l), func_k(x_r)) * func_b(y_t) * func_b(-y_b),\n torch.max(func_k(y_t), func_k(y_b)) * func_b(x_l) * func_b(-x_r)\n )\n for batch_idx in range(config['batch_size']):\n label = labels[batch_idx][:box_numbers[batch_idx]]\n feature_maps = feature_map[batch_idx][:box_numbers[batch_idx]]\n final_feature_map[batch_idx][label] = torch.max(final_feature_map[batch_idx][label], feature_maps)\n return final_feature_map\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self.text_encoder = Encoder(textEncoderConfig)\n self.convolution_layers = nn.Sequential(\n nn.BatchNorm2d(num_features=config[\"label_dim\"]),\n nn.ReLU(),\n nn.Conv2d(in_channels=config[\"label_dim\"],\n out_channels=config[\"label_dim\"],\n kernel_size=config[\"kernel_size\"],\n padding=config[\"padding\"]),\n nn.BatchNorm2d(num_features=config[\"label_dim\"]),\n nn.ReLU(),\n nn.Conv2d(in_channels=config[\"label_dim\"],\n out_channels=config[\"label_dim\"],\n kernel_size=config[\"kernel_size\"],\n padding=config[\"padding\"]),\n nn.BatchNorm2d(num_features=config[\"label_dim\"]),\n nn.ReLU(),\n nn.Conv2d(in_channels=config[\"label_dim\"],\n out_channels=1,\n kernel_size=config[\"kernel_size\"],\n padding=config[\"padding\"])\n )\n self.transform_layout = nn.Linear(config[\"max_size\"], config['hidden_dim'])\n self.transform_text = nn.Linear(config['hidden_dim'] * 2, config['hidden_dim'])\n self.fuse = nn.Linear(config['hidden_dim'] * 2, config['hidden_dim'])\n self.mlp = nn.Sequential(\n nn.Linear(config[\"hidden_dim\"], config[\"hidden_dim\"]),\n nn.ReLU(),\n nn.Linear(config[\"hidden_dim\"], config[\"hidden_dim\"]),\n nn.ReLU()\n )\n self.final = nn.Linear(config[\"hidden_dim\"], 1)\n\n def forward(self, captions, caption_lengths, labels, boxes, box_numbers, label_order):\n # last_time = time.time()\n\n # 1. Extract Text Information\n encoder_state = self.text_encoder.init_state().to(device=captions.device)\n text_outputs, text_final_state = self.text_encoder(captions, caption_lengths, encoder_state)\n text_embedding = self.transform_text(torch.cat((text_final_state[0], text_final_state[1]), dim=-1))\n # 2. Construct Feature Map and make transformation\n # print(\"PART1: \", time.time() - last_time)\n # last_time = time.time()\n\n feature_map = generateFeatureMap(labels, boxes, box_numbers)\n # print(feature_map.size())\n # print(\"PART2: \", time.time() - last_time)\n # last_time = time.time()\n flatten_feature_map = self.convolution_layers(feature_map).squeeze().contiguous().view(config[\"batch_size\"], -1)\n layout_embedding = self.transform_layout(flatten_feature_map)\n # 3. Fuse inputs and pass it to mlp\n fuse_input = self.fuse(torch.cat((layout_embedding, text_embedding), dim=-1))\n result = self.mlp(fuse_input) + fuse_input\n output = torch.sigmoid(self.final(result))\n # print(\"PART3: \", time.time() - last_time)\n return output\n","sub_path":"components/discriminator_CNN.py","file_name":"discriminator_CNN.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616736420","text":"import time\nimport sys\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv'}\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n print('Cities to explore Chicago, New York City or Washington')\n print('Valid entries are: ')\n print(' Chicago / C ')\n print(' New York City / NYC / N')\n print(' Washington / W ')\n city = None\n while city not in ['chicago', 'new york city', 'washington']:\n city = input('Please enter a city name: ').lower()\n if city == 'chicago' or city == 'c':\n city = 'chicago'\n elif city == 'new york city' or city == 'nyc' or city == 'n':\n city = 'new york city'\n elif city == 'washington' or city == 'w':\n city = 'washington'\n else:\n print('Invalid Entry - Chicago / C, New York City / NYC / N or Washington / W')\n city = input(\"Please enter a valid city name: \").lower()\n\n # get user input for month (all, january, february, ... , june)\n print('Please enter a number for the Month you would like to explore')\n print(\"January - 1 \\nFebruary - 2 \\nMarch - 3 \\nApril - 4 \\nMay - 5 \\nJune - 6 \\nAll - 0\")\n inputMonth = -1\n monthsDictionary = {1: 'january', 2: 'february', 3: 'march', 4: 'april', 5: 'may', 6: 'june', 0: 'all'}\n while inputMonth not in monthsDic.keys():\n inputMonth = eval(input(\"Please enter the month value for data required: \"))\n if inputMonth not in monthsDic.keys():\n print('That was a invalid entry, please try again\\n')\n month = monthsDic[inputMonth]\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n print('Please enter a number for the Day you would like to explore')\n print(\"Sunday - 1 \\nMonday - 2 \\nTuesday - 3 \\nWednesday - 4 \\nThursday - 5 \\nFriday - 6 \\nSaturday - 7 \\nAll - 0\")\n inputDay = -1\n dayDictionary = {1: 'sunday', 2: 'monday', 3: 'tuesday', 4: 'wednesday', 5: 'thursday', 6: 'friday', 7: 'saturday', 0: 'all'}\n while inputDay not in dayDictionary.keys():\n inputDay = eval(input(\"Please enter the day value for data required: \"))\n if inputDay not in dayDictionary.keys():\n print('That was a invalid entry, please try again: \\n')\n day = dayDictionary[inputDay]\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(\"{}.csv\".format(city.replace(\" \",\"_\")))\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n df['month'] = df['Start Time'].apply(lambda m: m.month)\n df['day_of_week'] = df['Start Time'].apply(lambda d: d.strftime('%A').lower())\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n df = df.loc[df['month'] == month,:]\n\n if day != 'all':\n df = df.loc[df['day_of_week'] == day,:]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print(\"The most common month is {}\".format(\n str(df['month'].mode().values[0]))\n )\n\n # display the most common day of week\n print(\"The most common day of the week is {}\".format(\n str(df['day_of_week'].mode().values[0]))\n )\n\n # display the most common start hour\n df['start_hour'] = df['Start Time'].dt.hour\n print(\"The most common start hour is {}\".format(\n str(df['start_hour'].mode().values[0]))\n )\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(\"The most common start station is {} \".format(\n df['Start Station'].mode().values[0])\n )\n\n # display most commonly used end station\n print(\"The most common end station is {}\".format(\n df['End Station'].mode().values[0])\n )\n\n # display most frequent combination of start station and end station trip\n df['routes'] = df['Start Station'] + \" \" + df['End Station']\n print(\"The most common start and end station combo is {}\".format(\n df['routes'].mode().values[0])\n )\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n df['duration'] = df['End Time'] - df['Start Time']\n\n # display total travel time\n print(\"The total travel time is {}\".format(\n str(df['duration'].sum()))\n )\n\n # display mean travel time\n print(\"The mean travel time is {}\".format(\n str(df['duration'].mean()))\n )\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df, city):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Here are the counts of various user types:\")\n print(df['User Type'].value_counts())\n\n if city != 'washington':\n # Display counts of gender\n print(\"Gender Counts: :\")\n print(df['Gender'].value_counts())\n\n # Display earliest, most recent, and most common year of birth\n print(\"The earliest birth year: {}\".format(\n str(int(df['Birth Year'].min())))\n )\n print(\"The latest birth year: {}\".format(\n str(int(df['Birth Year'].max())))\n )\n print(\"The most common birth year: {}\".format(\n str(int(df['Birth Year'].mode().values[0])))\n )\n else:\n print(\"washington file does not contain Gender and Birth Years\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"src/bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569018656","text":"import logging\nimport os\nfrom sys import platform\nimport re\nfrom datetime import datetime\nfrom modules.configuration import Integration\nimport warnings\n\n# Suppressing DeprecationWarnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef logging_config(integration_config: Integration, logging_mode: str= 'INFO', log_to_file: bool=False, executable_path: str = __file__) -> logging:\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger_inst = logging.getLogger()\n logger_inst.setLevel(logging_mode)\n if log_to_file is True:\n regex = r\"[^\\\\\\/](.+[\\\\\\/])*(.+.+)$\"\n matches = re.search(regex, executable_path)\n if matches:\n file_name = matches.group(2).replace('.py', '')\n else:\n file_name = 'unknown_process'\n log_name = integration_config.log_location + file_name + '_' + str(datetime.now().strftime(\"%Y-%m-%d_%H_%M_%S\")) + '.log'\n try:\n previous_log_location = os.environ['sio_atm'].encode('latin1')\n except Exception:\n previous_log_location = 'none'\n if platform == \"linux\" or platform == \"linux2\":\n with open(os.path.expanduser(\"~/.bashrc\"), \"a\") as outfile:\n # 'a' stands for \"append\"\n outfile.write(\"export sio_atm_old_log=\"+str(previous_log_location))\n outfile.write(\"export sio_atm_log=\" + log_name)\n elif platform == \"win32\":\n # do nothing :)\n pass\n fh = logging.FileHandler(log_name)\n fh.setLevel(logging_mode)\n fh.setFormatter(formatter)\n logger_inst.addHandler(fh)\n ch = logging.StreamHandler()\n ch.setLevel(logging_mode)\n ch.setFormatter(formatter)\n logger_inst.addHandler(ch)\n # Turning off the paramiko.transport DEBUG\n logging.getLogger(\"paramiko.transport\").setLevel(logging.WARNING)\n logging.getLogger(\"NodeGlobal\").setLevel(logging.WARNING)\n logging.getLogger(\"PhysNode_ssh_execute\").setLevel(logging.WARNING)\n return logger_inst","sub_path":"modules/Logger/logger_init.py","file_name":"logger_init.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590704226","text":"class Configuration:\n def __init__(self, population_size=None, chromosome_size=None, crossingover_probability=None,\n mutation_probability=None,\n max_generation_number=None, y=None, representation=None, range=None, extremum_sign=None):\n # hardcoded according to 3rd variant\n self.y = y\n self.range = range\n self.extremum_sign = extremum_sign\n\n self.population_size = population_size\n self.chromosome_size = chromosome_size\n self.crossingover_probability = crossingover_probability\n self.mutation_probability = mutation_probability\n self.max_generation_number = max_generation_number\n self.representation = representation\n\n def __repr__(self):\n return \"\"\"\nConfiguration {{\n Population size: {},\n Chromosome size: {},\n Crossingover probability: {},\n Mutation probability: {},\n Max generation number: {}\n}}\n \"\"\".format(self.population_size, self.chromosome_size, self.crossingover_probability, self.mutation_probability,\n self.max_generation_number)\n","sub_path":"common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334995735","text":"\"\"\"\ndata_preparation_event_number_model_training.py\n\nThis script is used to load diffuseness files and create a consolidated array for event_number_model\nIt takes significant time since it is not paralelized...several improvements could be made\n\nThe output arrays (x , y) are stored in /models/event_number/input_data\n\n\"\"\"\nfrom baseline import parameter\nimport os\nimport numpy as np\nimport pandas as pd\n\nparams = parameter.get_params()\ndata_rootfolder_path = os.path.join(params['dataset_dir'], 'num_sources_reduced/') # path to folders\ndata_output_path = os.path.join(params['dataset_dir'], 'models/event_number/input_data_reduced') # path to arrays\n\n# Import data and parse in pandas dataframes\n\nfor subdir, dirs, files in os.walk(data_rootfolder_path):\n for file in files:\n os.chdir(subdir)\n if file == 'diffuseness.npy':\n x = np.load(file).T\n if 'framedata_x' in locals():\n framedata_x = np.vstack((framedata_x, x))\n else:\n framedata_x = x\n if file == 'num_sources.npy':\n y = np.load(file)\n if 'framedata_y' in locals():\n framedata_y = np.concatenate((framedata_y, y))\n else:\n framedata_y = y\ni = 0\ncolumns = []\nfor value in x[0]:\n i += 1\n column = 'v' + str(i)\n columns.append(column)\n\ndff_x = pd.DataFrame(data=framedata_x, columns=columns)\ndff_y = pd.DataFrame(data=framedata_y, columns=['target'])\n\nprint(\"Features dataset size : \",dff_x.shape)\nprint(\"Targets dataset size : \",dff_y.shape)\ndff_x.to_pickle(os.path.join(data_output_path, 'training_x_event_number.pkl'))\ndff_y.to_pickle(os.path.join(data_output_path, 'training_y_event_number.pkl'))\n\n\n\n","sub_path":"APRI/data_prepatation_event_number_model.py","file_name":"data_prepatation_event_number_model.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456385536","text":"#!/usr/bin/env python3\n\nfrom csv import DictReader, DictWriter\nfrom random import randint\n\n\ndef simulate_hit_results(hit_path, results_path):\n with open(hit_path) as in_f, open(results_path, 'w', newline='') as out_f:\n num_items_per_hit = 0\n writer = None\n for in_row in DictReader(in_f):\n if writer is None:\n while 'id{}'.format(num_items_per_hit + 1) in in_row:\n num_items_per_hit += 1\n writer = DictWriter(out_f, fieldnames=[\n '{}{}'.format(prefix, i + 1)\n for prefix in ('Input.id', 'Answer.range')\n for i in range(num_items_per_hit)])\n writer.writeheader()\n out_row = dict()\n for i in range(num_items_per_hit):\n out_row['Input.id{}'.format(i + 1)] = in_row['id{}'.format(i + 1)]\n out_row['Answer.range{}'.format(i + 1)] = randint(0, 100)\n writer.writerow(out_row)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n parser = ArgumentParser(\n description='Simulate HIT results, given HIT batch data.',\n formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('hit_path', help='Path to HIT batch data CSV file')\n parser.add_argument('results_path', help='Path to HIT results CSV file')\n args = parser.parse_args()\n simulate_hit_results(args.hit_path, args.results_path)\n","sub_path":"tests/simulate_hit_results.py","file_name":"simulate_hit_results.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583493009","text":"import sqlite3\nfrom flask import Flask, request, render_template\n\napp = Flask(__name__)\n\n\ndef connecting_with_db():\n conn = sqlite3.connect('flask_dojo.db')\n cur = conn.cursor()\n conn.commit()\n\n\n@app.route('/')\ndef index():\n return 'Siema!'\n\n\n@app.route('/request-counter', methods=['GET', 'POST'])\ndef counter():\n if request.method == 'GET':\n add_method_to_db('GET')\n return render_template('counter.html')\n elif request.method == 'POST':\n add_method_to_db('POST')\n return render_template('counter.html')\n\n# if request.method:\n# add_method_to_db(request.method)\n\n\ndef add_method_to_db(request):\n conn = sqlite3.connect('flask_dojo.db')\n cur = conn.cursor()\n cur.execute(\"INSERT INTO requests (`method`) VALUES (?)\", (request,))\n conn.commit()\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151433946","text":"# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\"\"\"Output class.\n\nRepresents a widget that can be used to display output within the widget area.\n\"\"\"\n\nimport sys\nfrom functools import wraps\n\nfrom .domwidget import DOMWidget\nfrom .trait_types import TypedTuple\nfrom .widget import register\nfrom .._version import __jupyter_widgets_output_version__\n\nfrom traitlets import Unicode, Dict\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.display import clear_output\nfrom IPython import get_ipython\nimport traceback\n\n@register\nclass Output(DOMWidget):\n \"\"\"Widget used as a context manager to display output.\n\n This widget can capture and display stdout, stderr, and rich output. To use\n it, create an instance of it and display it.\n\n You can then use the widget as a context manager: any output produced while in the\n context will be captured and displayed in the widget instead of the standard output\n area.\n\n You can also use the .capture() method to decorate a function or a method. Any output\n produced by the function will then go to the output widget. This is useful for\n debugging widget callbacks, for example.\n\n Example::\n import ipywidgets as widgets\n from IPython.display import display\n out = widgets.Output()\n display(out)\n\n print('prints to output area')\n\n with out:\n print('prints to output widget')\n\n @out.capture()\n def func():\n print('prints to output widget')\n \"\"\"\n _view_name = Unicode('OutputView').tag(sync=True)\n _model_name = Unicode('OutputModel').tag(sync=True)\n _view_module = Unicode('@jupyter-widgets/output').tag(sync=True)\n _model_module = Unicode('@jupyter-widgets/output').tag(sync=True)\n _view_module_version = Unicode(__jupyter_widgets_output_version__).tag(sync=True)\n _model_module_version = Unicode(__jupyter_widgets_output_version__).tag(sync=True)\n\n msg_id = Unicode('', help=\"Parent message id of messages to capture\").tag(sync=True)\n outputs = TypedTuple(trait=Dict(), help=\"The output messages synced from the frontend.\").tag(sync=True)\n\n __counter = 0\n\n def clear_output(self, *pargs, **kwargs):\n \"\"\"\n Clear the content of the output widget.\n\n Parameters\n ----------\n\n wait: bool\n If True, wait to clear the output until new output is\n available to replace it. Default: False\n \"\"\"\n with self:\n clear_output(*pargs, **kwargs)\n\n # PY3: Force passing clear_output and clear_kwargs as kwargs\n def capture(self, clear_output=False, *clear_args, **clear_kwargs):\n \"\"\"\n Decorator to capture the stdout and stderr of a function.\n\n Parameters\n ----------\n\n clear_output: bool\n If True, clear the content of the output widget at every\n new function call. Default: False\n\n wait: bool\n If True, wait to clear the output until new output is\n available to replace it. This is only used if clear_output\n is also True.\n Default: False\n \"\"\"\n def capture_decorator(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if clear_output:\n self.clear_output(*clear_args, **clear_kwargs)\n with self:\n return func(*args, **kwargs)\n return inner\n return capture_decorator\n\n def __enter__(self):\n \"\"\"Called upon entering output widget context manager.\"\"\"\n self._flush()\n ip = get_ipython()\n kernel = None\n if ip and getattr(ip, \"kernel\", None) is not None:\n kernel = ip.kernel\n elif self.comm is not None and getattr(self.comm, 'kernel', None) is not None:\n kernel = self.comm.kernel\n\n if kernel:\n parent = None\n if hasattr(kernel, \"get_parent\"):\n parent = kernel.get_parent()\n elif hasattr(kernel, \"_parent_header\"):\n # ipykernel < 6: kernel._parent_header is the parent *request*\n parent = kernel._parent_header\n\n if parent and parent.get(\"header\"):\n self.msg_id = parent[\"header\"][\"msg_id\"]\n self.__counter += 1\n\n def __exit__(self, etype, evalue, tb):\n \"\"\"Called upon exiting output widget context manager.\"\"\"\n kernel = None\n if etype is not None:\n ip = get_ipython()\n if ip:\n kernel = ip\n ip.showtraceback((etype, evalue, tb), tb_offset=0)\n elif (self.comm is not None and\n getattr(self.comm, \"kernel\", None) is not None and\n # Check if it's ipykernel\n getattr(self.comm.kernel, \"send_response\", None) is not None):\n kernel = self.comm.kernel\n kernel.send_response(kernel.iopub_socket,\n u'error',\n {\n u'traceback': [\"\".join(traceback.format_exception(etype, evalue, tb))],\n u'evalue': repr(evalue.args),\n u'ename': etype.__name__\n })\n self._flush()\n self.__counter -= 1\n if self.__counter == 0:\n self.msg_id = ''\n # suppress exceptions when in IPython, since they are shown above,\n # otherwise let someone else handle it\n return True if kernel else None\n\n def _flush(self):\n \"\"\"Flush stdout and stderr buffers.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n\n def _append_stream_output(self, text, stream_name):\n \"\"\"Append a stream output.\"\"\"\n self.outputs += (\n {'output_type': 'stream', 'name': stream_name, 'text': text},\n )\n\n def append_stdout(self, text):\n \"\"\"Append text to the stdout stream.\"\"\"\n self._append_stream_output(text, stream_name='stdout')\n\n def append_stderr(self, text):\n \"\"\"Append text to the stderr stream.\"\"\"\n self._append_stream_output(text, stream_name='stderr')\n\n def append_display_data(self, display_object):\n \"\"\"Append a display object as an output.\n\n Parameters\n ----------\n display_object : IPython.core.display.DisplayObject\n The object to display (e.g., an instance of\n `IPython.display.Markdown` or `IPython.display.Image`).\n \"\"\"\n fmt = InteractiveShell.instance().display_formatter.format\n data, metadata = fmt(display_object)\n self.outputs += (\n {\n 'output_type': 'display_data',\n 'data': data,\n 'metadata': metadata\n },\n )\n","sub_path":"contrib/python/ipywidgets/py3/ipywidgets/widgets/widget_output.py","file_name":"widget_output.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634329265","text":"\"\"\"\nTests doing a spectrogram using an AudioSegment.\n\"\"\"\nimport numpy as np\nimport read_from_file\nimport sys\nimport visualize\n\ndef test(seg):\n print(\"Removing silence...\")\n result = seg.filter_silence()\n outname_silence = \"results/nosilence.wav\"\n result.export(outname_silence, format=\"wav\")\n visualize.visualize(result[:min(visualize.VIS_MS, len(result))], title=\"After Silence Removal\")\n print(\"After removal:\", outname_silence)\n\n # Now try again, but with massive threshold for silence removal\n # This will strip almost every sample in the file, leaving a practically empty\n # WAV file, which Pydub chokes on.\n _ = seg.filter_silence(threshold_percentage=99.9)\n\n return result\n\nif __name__ == \"__main__\":\n seg = read_from_file.test(sys.argv[1])\n test(seg)\n","sub_path":"tests/silence.py","file_name":"silence.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615876866","text":"# -*- coding: utf-8 -*-\n# flake8: noqa\n# isort:skip_file\n\"\"\"Library to validate, clean, transform and get metadata of ISBN strings (for devs).\"\"\"\n\n# Define isbnlib API and set lib environment\n\nimport logging as _logging\n\nfrom ._exceptions import (quiet_errors, ISBNLibException,\n NotRecognizedServiceError, NotValidISBNError,\n PluginNotLoadedError)\n\n# configuration\nfrom . import config # <-- first import\n\n# main modules\nfrom ._core import (is_isbn10, is_isbn13, to_isbn10, to_isbn13, check_digit10,\n check_digit13, clean, canonical, notisbn, get_isbnlike,\n get_canonical_isbn, GTIN13, EAN13, RE_ISBN10, RE_ISBN13,\n RE_LOOSE, RE_NORMAL, RE_STRICT)\nfrom ._ext import (cover, desc, mask, meta, info, editions, isbn_from_words,\n doi, ren)\nfrom ._goom import query as goom\nfrom ._doitotex import doi2tex\n\n# Ranges Database date\nfrom ._data.data4mask import RDDATE\n\n# config _logging for lib\n_nh = _logging.NullHandler()\n_logging.getLogger('isbnlib').addHandler(_nh)\n\n# alias\nISBN13 = EAN13\nean13 = EAN13\n\n# dunders\n__all__ = ('is_isbn10', 'is_isbn13', 'clean', 'check_digit10', 'check_digit13',\n 'mask', 'info', 'meta', 'to_isbn10', 'to_isbn13', 'get_isbnlike',\n 'notisbn', 'ean13', 'EAN13', 'cover', 'desc', 'canonical',\n 'get_canonical_isbn', 'editions', 'isbn_from_words', 'quiet_errors',\n 'config', '__version__', '__support__', 'doi', 'ren', 'ISBN13',\n 'GTIN13', 'ISBNLibException', 'NotRecognizedServiceError',\n 'NotValidISBNError', 'PluginNotLoadedError', 'goom', 'doi2tex',\n 'RDDATE')\n\n__version__ = '3.9.10'\n__support__ = 'py27, py34, py35, py36, py37, pypy, pypy3'\n","sub_path":"venv/Lib/site-packages/isbnlib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289115913","text":"import tkinter as tk\nfrom itertools import chain\nfrom cell import Cell\n\n\nclass Window:\n def __init__(self, width=720, height=720, title='Game of life'):\n self.root = tk.Tk()\n self.size = (width, height)\n self.width, self.height = self.size\n self.root.title(title)\n self.game_id = None\n self.create_layout()\n self.init_buttons()\n self.bind_mouse_click()\n\n def create_layout(self):\n \"\"\"\n Creates `<class 'tkinter.Frame'>` within window boundaries\n and `<class 'CanvasGrid'>` inside that frame. Canvas grid is used to\n display cell states in Game of Life.\n \"\"\"\n self.frame = tk.Frame(self.root, width=self.width, height=self.height)\n self.frame.pack()\n self.canvas = CanvasGrid(self.frame, width=self.width,\n height=self.height)\n self.canvas.pack()\n\n def init_buttons(self):\n \"\"\"\n Create and place buttons inside window.\n \"\"\"\n start_button = tk.Button(self.root, text='Start', command=self.update)\n stop_button = tk.Button(self.root, text='Stop', command=self.stop)\n start_button.pack(side=tk.LEFT)\n stop_button.pack(side=tk.RIGHT)\n\n def bind_mouse_click(self, callback=None):\n \"\"\"\n Use this method to specify callback for handling mouse clicks.\n :param callback: `<class 'function'>` if None is given then\n it uses `<class 'CanvasGrid.change_colour_on_click'>`\n \"\"\"\n if callback is None:\n callback = self.change_colour_on_click\n self.canvas.bind(\"<Button-1>\", callback)\n\n def update(self):\n \"\"\"\n Updates game screen.\n \"\"\"\n self.canvas.update_cells()\n self.canvas.paint_grid()\n self.game_id = self.root.after(100, self.update)\n\n\n def stop(self):\n self.root.after_cancel(self.game_id)\n\n\n def change_colour_on_click(self, event, color='green'):\n \"\"\"\n Changes colour of clicked element on `<class 'CanvasGrid'>`\n :param event: `<class 'tkinter.Event'>`\n :param color: str\n \"\"\"\n self.canvas.change_colour(event, color)\n\n\nclass CanvasGrid(tk.Canvas):\n SIZE = 70\n START_X = 10\n START_Y = 10\n STEP = 10\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.cells = []\n self.grid = []\n self.fill_grid()\n\n def update_cells(self):\n \"\"\"\n Applies Conway rules for every cell in grid.\n \"\"\"\n for cell in chain.from_iterable(self.cells):\n cell_dies = cell.apply_conway_rules(self.cells)\n if cell_dies:\n cell.next_status = not cell.alive\n else:\n cell.next_status = cell.alive\n\n @classmethod\n def find_rect_coordinates(cls, x, y):\n \"\"\"\n Translates coordinates of click event to rectangle boundaries.\n :param x: int\n :param y: int\n :return: tuple(int, int)\n \"\"\"\n x -= x % cls.START_X\n y -= y % cls.START_Y\n return x, y\n\n def fill_grid(self, color='white'):\n \"\"\"\n Fills `<class 'tkinter.Canvas'>` with recangles that represent cells.\n :param color: str\n \"\"\"\n x = CanvasGrid.START_X\n y = CanvasGrid.START_Y\n\n for row in range(CanvasGrid.SIZE):\n self.cells.append([])\n self.grid.append([])\n\n for column in range(CanvasGrid.SIZE):\n rect = self.create_rectangle(x, y, x + CanvasGrid.STEP,\n y + CanvasGrid.STEP, fill=color)\n self.grid[row].append(rect)\n self.cells[row].append(Cell(row, column))\n x += CanvasGrid.START_X\n\n x = CanvasGrid.START_X\n y += CanvasGrid.START_Y\n\n def change_colour(self, event, color):\n \"\"\"\n Changes colour of a cell.\n :param event: `<class 'tkinter.Event'>`\n \"\"\"\n x, y = CanvasGrid.find_rect_coordinates(event.x, event.y)\n try:\n iy = x // 10 - 1\n ix = y // 10 - 1\n if ix == -1 or iy == -1:\n raise IndexError\n if self.cells[ix][iy].alive:\n self.itemconfig(self.grid[ix][iy], fill='white')\n else:\n self.itemconfig(self.grid[ix][iy], fill=color)\n self.cells[ix][iy].toggle_status()\n except IndexError:\n return\n\n def paint_grid(self, alive_color='red'):\n \"\"\"\n Color rectangles to specified color that mimics its state.\n \"\"\"\n for cell in chain.from_iterable(self.cells):\n current_status = cell.alive\n if cell.next_status != current_status:\n if cell.next_status:\n self.itemconfig(self.grid[cell.x][cell.y], fill=alive_color)\n else:\n self.itemconfig(self.grid[cell.x][cell.y], fill='white')\n cell.alive = cell.next_status\n\n\nif __name__ == '__main__':\n window = Window()\n window.root.mainloop()\n","sub_path":"Python/game_of_life/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405615786","text":"import math\nimport time\nimport pandas as pd\nimport threading\nimport matplotlib.pyplot as plt\nimport numpy\ntheta0 = 0\ntheta1 = 0\nl = 0.3\ndf = pd.read_csv(\"house.csv\")\ndef data():\n global x\n global y\n x = df['SqFt']\n y = df['Price']\n\ndef repair():\n global theta0\n global theta1\n global l\n i = 0\n sum = 0\n for i in range(x.size):\n sum = sum + ((theta0 + (theta1*x[i])) - y[i])\n sum1 = sum/x.size;\n sum = 0\n i = 0\n for i in range(x.size):\n sum = sum + (((theta0+(theta1*x[i]))-y[i]) * x[i])\n sum = sum/x.size; \n if abs(l*sum) > 1: \n theta0 = int(theta0 - (l*sum1))\n theta1 = int(theta1 - (l*sum))\n return 1\n return 2\n\n\ndef the():\n while True:\n print(theta0)\n print(theta1)\n\n\ndef run():\n i = 0\n try:\n threading.Thread(target=the).start()\n except:\n print(\"error stating thread\")\n while True:\n r = repair()\n if r==2:\n break;\n print(theta0+(theta1*1000))\n\n\ndata()\nrun()\n \n","sub_path":"reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337883820","text":"import json\nfrom example.tests import TestBase\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.conf import settings\n\n\nclass MultipleIDMixin(TestBase):\n \"\"\"\n Test usage with MultipleIDMixin\n\n [<RegexURLPattern user-list ^user-viewsets/$>,\n <RegexURLPattern user-detail ^user-viewsets/(?P<pk>[^/]+)/$>]\n \"\"\"\n list_url = reverse_lazy('user-list')\n\n def test_single_id_in_query_params(self):\n \"\"\"\n Ensure single ID in query params returns correct result\n \"\"\"\n url = '/user-mixin-viewset/?ids[]={0}'.format(self.miles.pk)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n expected = {\n u'user': [{\n u'id': self.miles.pk,\n u'first_name': self.miles.first_name,\n u'last_name': self.miles.last_name,\n u'email': self.miles.email\n }]\n }\n\n json_content = json.loads(response.content)\n meta = json_content.get('meta')\n\n self.assertEquals(expected.get('user'), json_content.get('user'))\n self.assertEquals(meta.get('count', 0), 1)\n self.assertEquals(meta.get('next'), None)\n self.assertEqual(None, meta.get('next_link'))\n self.assertEqual(meta.get('page'), 1)\n\n def test_multiple_ids_in_query_params(self):\n \"\"\"\n Ensure multiple IDs in query params return correct result\n \"\"\"\n url = '/user-mixin-viewset/?ids[]={0}&ids[]={1}'.format(\n self.miles.pk, self.john.pk)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n expected = {\n u'user': [{\n u'id': self.john.pk,\n u'first_name': self.john.first_name,\n u'last_name': self.john.last_name,\n u'email': self.john.email\n }]\n }\n\n json_content = json.loads(response.content)\n meta = json_content.get('meta')\n\n self.assertEquals(expected.get('user'), json_content.get('user'))\n self.assertEquals(meta.get('count', 0), 2)\n self.assertEquals(meta.get('next'), 2)\n self.assertEqual(\n 'http://testserver/user-mixin-viewset/?ids%5B%5D=2&ids%5B%5D=1&page=2',\n meta.get('next_link'))\n self.assertEqual(meta.get('page'), 1)\n\n","sub_path":"example/tests/test_multiple_id_mixin.py","file_name":"test_multiple_id_mixin.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229843264","text":"from collections import namedtuple\nfrom ibanity import Ibanity\nfrom ibanity.Flatten import flatten_json\n\n\ndef create(financial_institution_id, payment_initiation_request_id, attributes, customer_access_token):\n uri = Ibanity.client.api_schema[\"customer\"][\"financialInstitution\"][\"paymentInitiationRequest\"][\"authorizations\"] \\\n .replace(\"{financialInstitutionId}\", financial_institution_id) \\\n .replace(\"{paymentInitiationRequestId}\", payment_initiation_request_id)\n\n body = {\n \"data\": {\n \"type\": \"authorization\",\n \"attributes\": attributes\n }\n }\n response = Ibanity.client.post(uri, body, {}, \"Bearer \" + str(customer_access_token))\n return flatten_json(response[\"data\"])\n\n","sub_path":"ibanity/api/Xs2a/PaymentInitiationRequestAuthorization.py","file_name":"PaymentInitiationRequestAuthorization.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442975654","text":"import cv2 as cv\nimport numpy as np\n\nvideo = cv.VideoCapture(0)\n\n\nwhile True:\n check, frame = video.read() \n cv.imshow(\"capture\", frame)\n\n key = cv.waitKey(1)\n if key == ord('q'):\n break\n\nvideo.release()\ncv.destroyAllWindows\n\n","sub_path":"opencv/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576870938","text":"from django.conf.urls import url\r\nfrom marvelhp import views\r\n\r\nurlpatterns = [\r\n # /marvel/\r\n url(r'^$', views.index, name='index'),\r\n url(r'(wallpapers)/$', views.wallpapers, name='wallpapers'),\r\n url(r'(videos)/$', views.videos, name='videos'),\r\n url(r'^(register)/$', views.Userformview.as_view(), name='Userformview'),\r\n\r\n]\r\n","sub_path":"marvelhp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637323656","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# The run data for each experiment's fittest solution was scraped from the log files and put in the following files for easy access\nsoln_data_files = ['random_gen_soln_data.txt', 'website_puzzle_soln_data.txt']\n\n# Formatting parameters\nsoln_data_titles = ['Randomly Generated Puzzles', 'Provided Puzzle']\nsoln_scales = [250, 300]\n\nfor index, file_name in enumerate(soln_data_files):\n with open(file_name, 'r') as file:\n eval_list = []\n fitness_list = []\n \n # Read the input file into the above lists\n line_count = -1\n\n for line in file:\n line_count += 1\n\n if line_count == 1:\n split_line = [int(_) for _ in line.split('\\t')]\n\n eval_list.append(split_line[0])\n fitness_list.append(split_line[1])\n\n elif line_count == 2:\n # Reset the line count\n line_count = -1\n \n # Graph evaluations vs. fitness\n plt.plot(eval_list, fitness_list, '-ro', linewidth = 2.0)\n\n # Set axis display parameters\n plt.xticks(np.arange(0, max(eval_list) + 500, soln_scales[index]))\n plt.yticks(np.arange(0, max(fitness_list) + 10, 10))\n plt.xlim(0, eval_list[-1] + (len(eval_list) * 20))\n\n # Include necessary labels\n plt.xlabel('evaluations')\n plt.ylabel('fitness')\n plt.title('Evaluations vs. Fitness for ' + soln_data_titles[index] + '\\n(without enforcing black cell number constraint)')\n plt.annotate('Maximum fitness: ' + str(fitness_list[-1]) + '\\nEvaluation number: ' + str(eval_list[-1]), xy = (eval_list[-1], fitness_list[-1]),\n xytext=(1, -60), ha='right', textcoords='offset points', arrowprops=dict(arrowstyle = 'simple', shrinkA = 0))\n\n # Save and close the plot\n plt.savefig(file_name[:file_name.find('data')] + 'graph.png')\n plt.close()\n","sub_path":"gen_graphs/gen_graphs.py","file_name":"gen_graphs.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610774027","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\nfrom datetime import datetime\n\n# set dict for ip\nip_dict = {}\nurl_dict = {}\n# set re pattern for request, ip, url\nrequest_p = re.compile('GET')\nip_p = re.compile(\n '[1-9]{1}\\d{0,2}\\.[1-9]{1}\\d{0,2}\\.[1-9]{1}\\d{0,2}\\.[1-9]{1}\\d{0,2}')\nurl_p = re.compile('\\\"GET .* HTTP\\/1\\.1\\\"')\ndt_p = re.compile('\\[\\d{1}.*\\d{1}\\]')\nresponse_p = re.compile('HTTP\\/1\\.1\\\" \\d{3}')\n# open log file\nwith open('nginx.log', 'r', encoding='utf-8') as log_file:\n # file can be listed for each line\n for line in log_file:\n match_request = request_p.search(line)\n if match_request is not None and match_request.group() == 'GET':\n match_dt = dt_p.search(line)\n if match_dt is not None and datetime.strptime(match_dt.group().strip('[,]'), '%d/%b/%Y:%H:%M:%S %z').date() == datetime(2017, 1, 11).date():\n match_ip = ip_p.search(line)\n if match_ip.group() not in ip_dict:\n ip_dict[match_ip.group()] = 1\n else:\n ip_dict[match_ip.group()] += 1\n else:\n continue\n match_response = response_p.search(line)\n if match_response is not None and match_response.group().split()[1] == '404':\n match_url = url_p.search(line)\n if match_url.group().split()[1] not in url_dict:\n url_dict[match_url.group().split()[1]] = 1\n else:\n url_dict[match_url.group().split()[1]] += 1\n else:\n continue\n else:\n continue\n\nlargest_ip = max(ip_dict, key=ip_dict.get)\nprint(largest_ip, ip_dict.get(largest_ip))\nlargest_url = max(url_dict, key=url_dict.get)\nprint(largest_url, url_dict.get(largest_url))\n","sub_path":"challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358273592","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/dataclay/util/management/classmgr/MetaClass.py\n# Compiled at: 2019-11-11 07:06:07\n# Size of source mod 2**32: 5308 bytes\n\"\"\" Class description goes here. \"\"\"\nfrom lru import LRU\nfrom dataclay.util.MgrObject import ManagementObject\nfrom .Type import Type\nfrom .Utils import STATIC_ATTRIBUTE_FOR_EXTERNAL_INIT, stub_only_def, py_code\n\nclass MetaClass(ManagementObject):\n _fields = [\n 'dataClayID',\n 'namespace',\n 'name',\n 'parentType',\n 'properties',\n 'operations',\n 'isAbstract',\n 'languageDepInfos',\n 'ecas']\n _internal_fields = [\n 'namespaceID',\n '_implementation_id_to_operation_cache']\n _typed_fields = {'parentType': Type}\n\n def get_operation_from_name(self, op_name):\n \"\"\"Return the operation from its name.\"\"\"\n for op in self.operations:\n if op.name == op_name:\n return op\n\n raise KeyError('Operation with name %s was not found in dataClay class %s' % (\n op_name, self.name))\n\n def get_operation(self, implementation_id):\n \"\"\"Return an Operation (management object) from an ImplementationID\n\n :param uuid.UUID implementation_id: The requested ImplementationID\n\n Given the UUID for a certain Implementation, lookup and return the\n corresponding Operation. Note that this method is memoized (cached) in\n order to improve performance (given that the lookup is slow).\n \"\"\"\n if not hasattr(self, '_implementation_id_to_operation_cache'):\n setattr(self, '_implementation_id_to_operation_cache', LRU(50))\n try:\n return self._implementation_id_to_operation_cache[implementation_id]\n except KeyError:\n pass\n\n for op in self.operations:\n for imp in op.implementations:\n if imp.dataClayID == implementation_id:\n self._implementation_id_to_operation_cache[implementation_id] = op\n return op\n\n raise KeyError('Operation for ImplementationID {%s} not found in class %s (in namespace %s)' % (\n implementation_id, self.name, self.namespace))\n\n def juxtapose_code(self, exeenv_flag=False):\n \"\"\"Return the complete source code for the current MetaClass.\n\n :param exeenv_flag: Set to true to generate code for the ExecutionEnvironment.\n :return: A valid source for this class.\n\n Note that this class will use the \"local_implementation\" of its\n operations > implementations when available. Undefined behaviour if\n not available.\n\n In scenarios where implementations are *not* `PythonImplementation` a\n pure-stub (intended for only persistent work mode) function is used. If\n the constructor is one of those non-Python methods, then the class is\n flagged as EXTERNAL_INIT only (see ExecutionGateway for further info).\n \"\"\"\n import dataclay.util.management.classmgr.python.PythonImplementation as PythonImplementation\n imp_codes = list()\n ops_done = set()\n for op in self.operations:\n if op.name.startswith('$$'):\n continue\n else:\n if op.name in ops_done:\n continue\n ops_done.add(op.name)\n if len(op.implementations) != 1:\n raise NotImplementedError('Found %d operations, but currently I only support one' % op.implementations)\n imp = op.implementations[0]\n if isinstance(imp, PythonImplementation):\n imp_codes.append(imp.code)\n if not op.name == '__init__':\n if op.name == '<init>':\n imp_codes.append('\\n %s = %s' % (\n STATIC_ATTRIBUTE_FOR_EXTERNAL_INIT, str(True)))\n op.name = '__init__'\n imp_codes.append(stub_only_def.render({'func_name':op.name, \n 'param_names':op.paramsOrder}))\n\n return py_code.render({'class_name':self.name.rsplit('.', 1)[-1], \n 'parent_name':self.parentType.typeName if self.parentType else 'DataClayObject', \n 'metaclass':self, \n 'imp_codes':imp_codes})","sub_path":"pycfiles/dataClay-2.1-py3.7/MetaClass.cpython-37.py","file_name":"MetaClass.cpython-37.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146225619","text":"corr = int(input())\na = input()\nb = input()\nn = len(a)\nincorr = n - corr\nans = 0\nfor x, y in zip(a, b):\n if x == y and corr > 0:\n ans += 1\n corr -= 1\n elif x != y and incorr > 0:\n ans += 1\n incorr -= 1\n\nprint(ans)\n\n","sub_path":"kattis/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603351335","text":"from flask import Flask, request, render_template\nfrom jinja2 import FileSystemLoader\nimport os\n\nfrom app_logging import get_logger\nfrom hotspots.seqpeek.view import seqpeek as seqpeek_view\nfrom hotspots.pathway_assoc_view import pathway_assoc_view\n\nlog = get_logger()\n\napp = Flask(__name__)\n\nTEMPLATE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n\n@app.route('/')\ndef landing_page():\n return(render_template(\"hotspots/landing.html\"))\n\n\n@app.route('/seqpeek/', defaults={'gene': '', 'tumor': ''})\ndef seqpeek(gene, tumor):\n request_gene = request.args.get('gene')\n request_tumor_list = [str(t) for t in request.args.getlist('tumor')]\n return(seqpeek_view(request_gene, request_tumor_list))\n\n\n@app.route('/pathway/', defaults={'gene': '', 'tumor': '', 'cluster': ''})\ndef pathway_assoc(gene, tumor, cluster):\n request_gene = request.args.get('gene')\n request_tumor = request.args.get('tumor')\n request_cluster = request.args.get('cluster')\n log.debug(\"{0} {1} {2}\".format(request_gene, request_tumor, request_cluster))\n return(pathway_assoc_view(request_gene, request_tumor, request_cluster))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n app.jinja_loader = FileSystemLoader(TEMPLATE_DIR)\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106422986","text":"1# -*- coding: utf-8 -*-\n\n#COMECE AQUI ABAIXO\nimport time\nnome = input('digite seu nome: ')\nnum= int(input('digite um número: '))\nfor i in range (10,0,-1):\n print(i)\n time.sleep(1)\nprint(nome + ' é gay')\n ","sub_path":"moodledata/vpl_data/380/usersdata/326/107185/submittedfiles/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501337520","text":"#!/usr/bin/env python3\n\nimport tkinter\n\nfrom functools import partial\n\nroot = tkinter.Tk() #创建顶层窗口\nlb1 = tkinter.Label(root, text=\"hello world!\", font = \"Aria 16 bold\")#创建标签\nb1 = tkinter.Button(root, bg='blue', fg='white', text=\"Button 1\")#创建按钮\nmybutton = partial(tkinter.Button, root, bg='blue', fg='white')\n\n#调用新的函数时,给出改变的参数即可\nb2 = mybutton(text='Button 2')\nb3 = tkinter.Button(root, bg='red', fg='red', text='QUIT', command=root.quit) #创建按钮,绑定了root.quit命令\n\nlb1.pack() #填充到界面\nb1.pack()\nb2.pack()\nb3.pack()\nroot.mainloop() #运行这个GUI应用","sub_path":"20190218/mygui.py","file_name":"mygui.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329355256","text":"from datetime import datetime\nimport os.path\ndef main():\n ClientFileName=raw_input(\"Enter client file name:\")\n DataFileName=raw_input(\"Enter data file name:\")\n try:\n file=open(ClientFileName,'r')\n except IOError:\n error=raw_input(\"Client file \\\"\"+ClientFileName+\"\\\" not found, press any key to exit\")\n return\n clients=[]\n for line in file:\n clients.append([line.strip()])\n if len(clients)==0:\n error=raw_input(\"Client file \\\"\"+ClientFileName+\"\\\" is empty, press any key to exit\")\n return\n file.close()\n try:\n file=open(DataFileName,'r')\n except IOError:\n error=raw_input(\"Data file \\\"\"+DataFileName+\"\\\" not found, press any key to exit\")\n return\n for line in file:\n line=line.split(',')\n \n for client in clients:\n if client[0]==line[0]:\n client.append(line)\n file.close()\n for client in clients:\n if os.path.isfile(client[0]+\" \"+str(datetime.now().year)+\"-\"+str(datetime.now().month).zfill(2)+\".csv\"):\n override=raw_input(\"This will overwrite an existing file, press '1' then 'Enter' to continue, or Press 'Enter' to Exit\")\n if override !='1':\n return\n file=open(client[0]+\" \"+str(datetime.now().year)+\"-\"+str(datetime.now().month).zfill(2)+\".csv\",'w')\n file.write('')\n file.close()\n file=open(client[0]+\" \"+str(datetime.now().year)+\"-\"+str(datetime.now().month).zfill(2)+\".csv\",'a')\n for entry in client[1:]:\n file.write(','.join(entry))\n #print ','.join(entry)\n file.close() \n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"ClientBillCreatorScript no Overflow no header.py","file_name":"ClientBillCreatorScript no Overflow no header.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310149357","text":"from .types import t_scheduler\nfrom .manager import Manager\nimport pretty_cron\n\n\nclass Refresh:\n naas = None\n role = t_scheduler\n\n def __init__(self):\n self.manager = Manager()\n\n def current_raw(self):\n json_data = self.manager.get_naas()\n for item in json_data:\n if item[\"type\"] == self.role:\n print(item)\n\n def currents(self):\n json_data = self.manager.get_naas()\n for item in json_data:\n kind = None\n if item[\"type\"] == self.role:\n cron_string = pretty_cron.prettify_cron(item[\"value\"])\n kind = f\"refresh {cron_string}\"\n print(f\"File ==> {item['path']} is {kind}\")\n\n def add(self, path=None, reccurence=None, params=None, silent=False):\n if not self.manager.notebook_path():\n print(\"No add done you are in already in naas folder\\n\")\n return\n if not reccurence:\n print(\"No reccurence provided\\n\")\n return\n cron_string = pretty_cron.prettify_cron(reccurence)\n current_file = self.manager.get_path(path)\n prod_path = self.manager.get_prod_path(current_file)\n if silent is False:\n print(\n f\"[Naas from Jupyter] => i have copied this {current_file} here: {prod_path} \\n\"\n )\n print(f\"it will refresh it {cron_string}\\n\")\n print(\n f'If you want to remove the refresh capability, just call .delete({path if path is not None else \"\" })) in this file'\n )\n return self.manager.add_prod(\n {\n \"type\": self.role,\n \"path\": current_file,\n \"params\": {},\n \"value\": reccurence,\n },\n silent,\n )\n\n def get(self, path=None):\n current_file = self.manager.get_path(path)\n self.manager.get_prod(current_file)\n\n def clear_output(self, path=None):\n current_file = self.manager.get_path(path)\n self.manager.clear_output(current_file)\n\n def get_output(self, path=None):\n current_file = self.manager.get_path(path)\n self.manager.get_output(current_file)\n\n def get_history(self, path=None, histo=None):\n if not histo:\n print(\"No histo provided\\n\")\n return\n current_file = self.manager.get_path(path)\n self.manager.get_history(current_file, histo)\n\n def list_history(self, path=None):\n current_file = self.manager.get_path(path)\n self.manager.list_history(current_file)\n\n def clear_history(self, path=None, histo=None):\n current_file = self.manager.get_path(path)\n self.manager.clear_history(current_file, histo)\n\n def delete(self, path=None, all=False, silent=False):\n if not self.manager.notebook_path():\n print(\"No delete done you are in already in naas folder\\n\")\n return\n current_file = self.manager.get_path(path)\n self.manager.del_prod({\"type\": self.role, \"path\": current_file}, silent)\n if all is True:\n self.manager.clear_history(current_file)\n self.manager.clear_output(current_file)\n\n def help(self):\n print(f\"=== {type(self).__name__} === \\n\")\n print(\n f\".add(path, params) => add path to the prod {type(self).__name__} server\\n\"\n )\n print(\n f\".delete(path) => delete path to the prod {type(self).__name__} server\\n\"\n )\n print(\n \".clear_history(histonumber, path) => clear history, history number and path are optionel, \\\n if you don't provide them it will erase full history of current file \\n\"\n )\n print(\n \".list_history(path) => list history, of a path or if not provided the current file \\n\"\n )\n print(\n \".get_history(histonumber, path) => get history file, of a path or if not provided the current file \\n\"\n )\n print(\n \".get(path) => get current prod file of a path, or if not provided the current file \\n\"\n )\n print(f\".currents() => get current list of {type(self).__name__} prod file\\n\")\n print(\n f\".current_raw() => get json current list of {type(self).__name__} prod file\\n\"\n )\n","sub_path":"naas/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35287244","text":"# John Flaherty HW6\n\nfrom Tkinter import *\nimport random\n\ntankWidth = 500\ntankHeight = 400\n\nroot = Tk()\nroot.title(\"Fish Tank\")\n\ntank = Canvas(root, width=tankWidth, height=tankHeight)\ntank.grid(row=0, column=0)\n\nfishLeft = PhotoImage(file=\"fishLeft.gif\")\n\nfishRight = PhotoImage(file=\"fishRight.gif\")\n\ntankPhoto = PhotoImage(file=\"tank.gif\")\ntank.create_image(tankWidth/2, tankHeight/2, image=tankPhoto)\n\ndef updateFish(xCoord, yCoord, dxCoord, dyCoord, image):\n\n if xCoord < 0 or xCoord > tankWidth:\n dxCoord *= -1\n if yCoord < 0 or yCoord > tankHeight:\n dyCoord *= -1\n \n xCoord += dxCoord\n yCoord += dyCoord\n tank.coords(image, xCoord, yCoord)\n\n if dxCoord < 0:\n tank.itemconfig(image,image=fishLeft)\n else:\n tank.itemconfig(image,image=fishRight)\n\n tank.after(20, updateFish, xCoord, yCoord, dxCoord, dyCoord, image)\n\ndef createFish():\n x = random.randrange(0, tankWidth+1)\n y = random.randrange(0, tankHeight+1)\n dx = random.randrange(-3, 4)\n dy = random.randrange(-3, 4)\n image = tank.create_image(x, y, image=fishRight)\n updateFish(x, y, dx, dy, image)\n\nfishButton = Button(root, text=\"Add New Fish\", command=createFish)\nfishButton.grid(row=1, column=0)\n\nroot.mainloop()\n\n","sub_path":"HW6/HW6.py","file_name":"HW6.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148351216","text":"#! python\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport geckodriver_autoinstaller\nfrom gad_db import *\nimport time\nimport urllib.request\nimport re\nimport os\nimport datetime\nfrom retrying import retry\nfrom pprint import pprint\n\ngeckodriver_autoinstaller.install()\n\n# Scraper for the Google Assistant Directory (Web version)\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn\n\ndef db_create_action(conn, action):\n \"\"\" Create a new action into the actions table\n if it does not already exist.\n The action parameter contains a list with\n the input values\n :param conn:\n :param action:\n :return: id of new action in the table\n \"\"\"\n sql = ''' INSERT OR IGNORE INTO actions(name, company, description, devices, actions, no_proposed_actions, ratings, number_ratings, claim) VALUES(?,?,?,?,?,?,?,?,?) '''\n print(action)\n cur = conn.cursor()\n cur.execute(sql, action)\n return cur.lastrowid\n\n\ndef db_create_category(conn, category):\n \"\"\" Create a new category into the categories table.\n :param conn:\n :param category:\n :return: id of new category in the table\n \"\"\"\n sql = ''' INSERT OR IGNORE INTO categories(name,parent) VALUES(?,?) '''\n print(category)\n cur = conn.cursor()\n cur.execute(sql, category)\n conn.commit()\n return cur.lastrowid\n\ndef db_create_action_category_relation(conn, actioncategory):\n \"\"\" Create a new action-category-relationship into\n the action_category table. The parameter actioncategory\n is a list containing action_id and category_id.\n :param conn:\n :param actioncategory:\n :return: id of new action-category-relationship in the table\n \"\"\"\n sql = ''' INSERT OR IGNORE INTO action_category(action_id,category_id) VALUES(?,?) '''\n cur = conn.cursor()\n cur.execute(sql, actioncategory)\n conn.commit()\n return cur.lastrowid\n\n@retry\ndef save_html(url, filename):\n \"\"\" Saves the url to filename into a new folder named according to the\n timestamp of the current scrape. In order to load all contents\n a browser is simulated in which the user scrolls down to the bottom\n of the website.\n :param url:\n :param filename:\n \"\"\"\n browser = webdriver.Firefox(options=options)\n browser.get(url)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n browser.execute_script(\"document.querySelector('.y3IDJd').scrollTop=10000000\")\n time.sleep(3)\n file_in_directory = os.path.join(scrape_directory, filename)\n with open(file_in_directory, \"w+\") as f:\n sourcecode = browser.page_source\n f.write(sourcecode)\n browser.close()\n print(\"saved successfully \" + filename)\n\ndef open_from_directory(filename):\n \"\"\" Opens a file from the folder in which save_html() saves files.\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n file_in_directory = os.path.join(scrape_directory, filename)\n with open(file_in_directory, 'r') as f:\n html_string = f.read()\n return html_string\n\ndef create_filename(string_filename, string_topcategory=False, string_subcategory = False, string_type = False):\n \"\"\" Deletes all special characters and lowers capital letters from a string\n and makes a html filename out of it.\n If topcategory is set, the filename will include \"-subcategory\", if not\n it will just say \"-category)\n :param string_filename:\n :return:\n \"\"\"\n filename = re.sub(\"[^A-Za-z0-9]+\", \"\", str(string_filename))\n filename = filename.lower()\n\n if string_topcategory == False:\n filename += \"-category.html\"\n else:\n if string_type == 'service':\n topcategory = re.sub(\"[^A-Za-z0-9]+\", \"\", str(string_topcategory))\n topcategory = topcategory.lower()\n subcategory = re.sub(\"[^A-Za-z0-9]+\", \"\", str(string_subcategory))\n subcategory = subcategory.lower()\n filename += \"-\"\n filename += topcategory\n filename += \"-\"\n filename += subcategory\n filename += \"-service.html\"\n else:\n topcategory = re.sub(\"[^A-Za-z0-9]+\", \"\", str(string_topcategory))\n topcategory = topcategory.lower()\n filename += \"-\"\n filename += topcategory\n filename += \"-subcategory.html\"\n return filename\n return filename\n\ndef make_url(url_piece):\n \"\"\" makes a full url out of the suburls found in\n Google's sourcecode\n :param url_piece:\n :return: full_url\n \"\"\"\n full_url = \"https://assistant.google.com/\"\n full_url += url_piece\n return full_url\n\n# Initialize global variables\nglobal scrape_directory\nglobal name_topcategory\nglobal name_subcategory\nglobal soup_topcategory\nglobal index\n\noptions = Options()\noptions.headless = True\n\n# Create timestamp for scrape\ndt_date = datetime.datetime.now()\ntimestamp = dt_date.strftime('%d-%m-%Y-%I-%M')\n\n# Create a new directory for the html_files\nscrape_directory = ''.join(['./', timestamp, '/'])\ndirname = os.path.dirname(scrape_directory)\nif not os.path.exists(dirname):\n os.mkdir(dirname)\n\n# Set a sqlite database in the directory (always create a new db with gad_db.py before scraping)\ndatabase = \"google_assistant_directory.db\"\n\n# Create a database connection\nconn = create_connection(database)\n\nif conn is not None:\n with conn:\n\n # Save the Google Assistant Directory start page\n save_html(\"https://assistant.google.com/explore\", \"start.html\")\n start = open_from_directory(\"start.html\")\n soup_start = BeautifulSoup(start, \"html.parser\")\n\n # Browse the start page for categories and extract their names\n for a in soup_start.find_all(\"a\", \"hSRGPd\", href=True, jslog=True)[1:19]:\n name_topcategory = a['aria-label']\n name_topcategory = \"\".join(name_topcategory)\n url = make_url(a['href'])\n filename = create_filename(name_topcategory)\n\n # Save the categories to the database\n category = (name_topcategory, 'no parent')\n category_id = db_create_category(conn, category)\n\n # Save the html site belonging to each category\n save_html(url, filename)\n\n # Open the new html files\n sourcecode_topcategory = open_from_directory(filename)\n soup_topcategory = BeautifulSoup(sourcecode_topcategory, \"html.parser\")\n\n # Browse for subcategories\n for b in soup_topcategory.find_all(\"div\", \"dLQiFb\"):\n\n # Save the html site belonging to each subcategory\n name_subcategory = b['data-title']\n name_subcategory = \"\".join(name_subcategory)\n filename_subcategory = create_filename(name_subcategory, name_topcategory)\n url = make_url(b['data-link'])\n print(url)\n\n # Save the subcategories to the database\n category = (name_subcategory, name_topcategory)\n category_id = db_create_category(conn, category)\n\n # Save the html site belonging to each subcategory\n save_html(url, filename_subcategory)\n\n # Browse for actions on \"View All\" pages\n # Open the new html files\n sourcecode_subcategory = open_from_directory(filename_subcategory)\n soup_subcategory = BeautifulSoup(sourcecode_subcategory, \"html.parser\")\n\n # search for all links pointing to actions (those including the string \"/services/\")\n for c in soup_subcategory.find_all(\"a\", href=re.compile(r'services/')):\n\n # search for all action titles and convert to labels and filenames\n div_tags = c.find_all(\"div\", \"FdWgBb\")\n name_service = div_tags[0].contents\n name_service = name_service[0]\n filename_service = create_filename(name_service, name_topcategory, name_subcategory, 'service')\n\n print(filename_service)\n url = make_url(c['href'])\n print(\"service url: \" + url)\n\n # Save the html site belonging to each action\n save_html(url, filename_service)\n\n # Open the new html files\n sourcecode_service = open_from_directory(filename_service)\n soup_service = BeautifulSoup(sourcecode_service, \"html.parser\")\n\n x = soup_service.find(\"div\", \"VTLJT\")\n\n # extract company name\n company_tags = x.find_all(\"div\", \"lUcxUb CbqDob\")\n if not company_tags:\n company = str()\n else:\n company = company_tags[0].contents\n company = \"\".join(company)\n\n # extract description of action\n description_tags = x.find_all(\"div\", \"IB9ccf\")\n if description_tags is not None:\n description = description_tags[0].contents\n description = \"\".join(description)\n else:\n description = str()\n\n # extract devices and make string\n devices_tags = x.find_all(\"div\", \"rkJR4e CdFZQ\")\n deviceslist = \"\"\n for i in devices_tags[1:len(devices_tags)]:\n devices = str(i.contents)\n deviceslist += devices\n print(\"devices: \" + deviceslist)\n\n # extract actions\n action_tags = x.find_all(\"span\", \"bCHKrf\")\n actionlist = \"\"\n for a in action_tags:\n action = str(a.contents)\n actionlist += action\n print(\"actions: \" + actionlist)\n no_actions = len(action_tags)\n print(no_actions, type(no_actions))\n\n # extract rating and number of ratings if available\n rating_tag = x.find(\"div\", \"NRNQAb\")\n if rating_tag is not None:\n rating_int = int(re.sub('[^0-9]','', ''.join(rating_tag.contents)))\n else:\n rating_int = int()\n\n number_of_user_ratings = x.find(\"div\", \"rriIab\")\n if number_of_user_ratings is not None:\n number_of_user_ratings = ''.join(number_of_user_ratings.contents)\n number_of_user_ratings_int = int(re.sub('[^0-9]','', number_of_user_ratings))\n else:\n number_of_user_ratings = int()\n\n # check if there is a \"Claim this page\" button\n claim = x.find(\"span\", \"VfPpkd-vQzf8d\")\n if claim is not None:\n claim = \"true\"\n else:\n claim = \"false\"\n\n # Save the services to the database\n db_action = (name_service, company, description, deviceslist, actionlist, no_actions, rating_int, number_of_user_ratings_int, claim)\n action_id = db_create_action(conn, db_action)\n\n # Save the category-action relationship\n action_category = (action_id, category_id)\n category_action_id = db_create_action_category_relation(conn, action_category)\n\n # Browse for proposed actions for each intent displayed in category overview\n\n for d in b.find_all(\"a\", href = re.compile(r'services/')):\n\n # Save the name of each action displayed in the overview\n for e in d.find_all(\"div\", \"FdWgBb\"):\n\n # overview_actions_tags = b.find_all(\"div\", \"FdWgBb\")\n # name_service_overview = overview_actions_tags[0].contents\n name_service_overview = e.contents\n name_service_overview = name_service_overview[0]\n filename_service_overview = create_filename(name_service_overview, name_topcategory, name_subcategory, 'service')\n print(filename_service_overview)\n url_overview = make_url(d['href'])\n print(\"service url_overview: \" + url_overview)\n\n # Save the html site belonging to each action\n save_html(url_overview, filename_service_overview)\n\n # Open the new html files\n sourcecode_service_overview = open_from_directory(filename_service_overview)\n soup_service_overview = BeautifulSoup(sourcecode_service_overview, \"html.parser\")\n\n #index_x = 0\n x = soup_service_overview.find(\"div\", \"VTLJT\")\n\n # extract company name\n company_tags = x.find_all(\"div\", \"lUcxUb CbqDob\")\n if not company_tags:\n company = str()\n else:\n company = company_tags[0].contents\n company = \"\".join(company)\n\n # extract description of action\n description_tags = x.find_all(\"div\", \"IB9ccf\")\n if description_tags is not None:\n description = description_tags[0].contents\n description = \"\".join(description)\n else:\n description = str()\n\n # extract devices and make string\n devices_tags = x.find_all(\"div\", \"rkJR4e CdFZQ\")\n deviceslist = \"\"\n for i in devices_tags[1:len(devices_tags)]:\n devices = str(i.contents)\n deviceslist += devices\n print(\"devices: \" + deviceslist)\n\n # extract actions\n action_tags = x.find_all(\"span\", \"bCHKrf\")\n actionlist = \"\"\n for a in action_tags:\n action = str(a.contents)\n actionlist += action\n print(\"actions: \" + actionlist)\n no_actions = len(action_tags)\n\n # extract actions\n action_tags = x.find_all(\"span\", \"bCHKrf\")\n actionlist = \"\"\n for a in action_tags:\n action = str(a.contents)\n actionlist += action\n print(\"actions: \" + actionlist)\n no_actions = len(action_tags)\n print(no_actions, type(no_actions))\n\n # extract rating and number of ratings if available\n rating_tag = x.find(\"div\", \"NRNQAb\")\n if rating_tag is not None:\n rating_int = int(re.sub('[^0-9]', '', ''.join(rating_tag.contents)))\n else:\n rating_int = int()\n\n number_of_user_ratings = x.find(\"div\", \"rriIab\")\n if number_of_user_ratings is not None:\n number_of_user_ratings = ''.join(number_of_user_ratings.contents)\n number_of_user_ratings_int = int(re.sub('[^0-9]', '', number_of_user_ratings))\n else:\n number_of_user_ratings = int()\n\n # check if there is a \"Claim this page\" button\n claim = x.find(\"span\", \"VfPpkd-vQzf8d\")\n if claim is not None:\n claim = \"true\"\n else:\n claim = \"false\"\n\n # Save the services from the overview to the database\n db_action = (name_service_overview, company, description, deviceslist, actionlist, no_actions, rating_int, number_of_user_ratings_int, claim)\n action_id = db_create_action(conn, db_action)\n\n # Save the category-action relationship\n action_category = (action_id, category_id)\n category_action_id = db_create_action_category_relation(conn, action_category)\n\n conn.close()\n\nelse:\n print(\"Error! cannot create the database connection.\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238004172","text":"class Solution:\n def killProcess(self, pids, ppids, kill):\n pid_ppid_map = collections.defaultdict(list)\n\n for (pid, ppid) in zip(pids, ppids):\n pid_ppid_map[ppid].append(pid)\n\n res = []\n stack = [kill]\n while stack:\n id = stack.pop()\n res.append(id)\n stack.extend(pid_ppid_map[id])\n\n return res","sub_path":"leetcode/py/582.py","file_name":"582.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140555417","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport gidgethub.routing\n\nfrom . import labels\n\nrouter = gidgethub.routing.Router()\n\n\ndef is_opened(event):\n return event.data[\"issue\"][\"state\"] == \"open\"\n\n\ndef has_classify(event):\n return any(\n label[\"name\"] == labels.Status.classify.value\n for label in event.data[\"issue\"][\"labels\"]\n )\n\n\nasync def add_classify_label(gh, event):\n await gh.post(\n event.data[\"issue\"][\"labels_url\"],\n data={\"labels\": [labels.Status.classify.value]},\n )\n\n\ndef has_labels(event):\n return event.data[\"issue\"][\"labels\"]\n\n\n# Removing 'classify' from closed issues is taken care of in the 'closed' submodule.\n@router.register(\"issues\", action=\"opened\")\n@router.register(\"issues\", action=\"reopened\")\nasync def classify_new_issue(event, gh, *args, **kwargs):\n \"\"\"Add the 'classify' label.\n\n If any labels already exist then don't apply the label as that implies that\n the issue has already been triaged.\n\n \"\"\"\n issue = event.data[\"issue\"]\n if issue[\"labels\"]:\n # Teammate pre-classified the issue when creating it.\n return\n await add_classify_label(gh, event)\n\n\n@router.register(\"issues\", action=\"labeled\")\nasync def added_label(event, gh, *args, **kwargs):\n if not is_opened(event):\n return\n elif event.data[\"label\"][\"name\"] == labels.Status.classify.value:\n return\n elif has_classify(event):\n await gh.delete(\n event.data[\"issue\"][\"labels_url\"], {\"name\": labels.Status.classify.value}\n )\n\n\n@router.register(\"issues\", action=\"unlabeled\")\nasync def no_labels(event, gh, *args, **kwargs):\n if not is_opened(event):\n return\n elif not has_labels(event):\n await add_classify_label(gh, event)\n","sub_path":"pvscbot/github/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146714813","text":"\n\nfrom xai.brain.wordbase.nouns._miscreant import _MISCREANT\n\n#calss header\nclass _MISCREANTS(_MISCREANT, ):\n\tdef __init__(self,): \n\t\t_MISCREANT.__init__(self)\n\t\tself.name = \"MISCREANTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"miscreant\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_miscreants.py","file_name":"_miscreants.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196212204","text":"\"\"\" \"\"\"\n\n# Standard library modules.\nimport unittest\nimport logging\nimport xml.etree.ElementTree as etree\nfrom io import StringIO\n\n# Third party modules.\n\n# Local modules.\nfrom pyhmsa.fileformat.xmlhandler.xmlhandler import _XMLHandler\nfrom pyhmsa.util.parameter import \\\n Parameter, NumericalAttribute, TextAttribute, XRayLineAttribute\nfrom pyhmsa.type.language import langstr\nfrom pyhmsa.type.xrayline import xrayline\n\n# Globals and constants variables.\nfrom pyhmsa.type.xrayline import NOTATION_IUPAC, NOTATION_SIEGBAHN\n\nclass MockParameter(Parameter):\n\n value1 = NumericalAttribute('s', True, 'Value1')\n value2 = NumericalAttribute('m', False, 'Value2')\n value3 = TextAttribute(False, 'Value3')\n value4 = XRayLineAttribute(NOTATION_SIEGBAHN, False, 'Value4')\n\n def __init__(self, value1, value2=None, value3=None, value4=None):\n self.value1 = value1\n self.value2 = value2\n self.value3 = value3\n self.value4 = value4\n\nclass Test_XMLHandler(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.h = _XMLHandler(1.0)\n self.obj = MockParameter(2.0)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n\n def test_parse_parameter_value1(self):\n source = StringIO(u'<Mock><Value1 DataType=\"double\" Unit=\"s\">2.0</Value1></Mock>')\n element = etree.parse(source)\n obj = self.h._parse_parameter(element, MockParameter)\n self.assertAlmostEqual(2.0, obj.value1, 2)\n self.assertEqual('s', obj.value1.unit)\n self.assertIsNone(obj.value2)\n\n def test_parse_parameter_value2(self):\n source = StringIO(u'<Mock><Value1 DataType=\"double\" Unit=\"s\">2.0</Value1><Value2 DataType=\"double\" Unit=\"nm\">4.0</Value2></Mock>')\n element = etree.parse(source)\n obj = self.h._parse_parameter(element, MockParameter)\n self.assertAlmostEqual(4.0, obj.value2, 2)\n self.assertEqual('nm', obj.value2.unit)\n\n def test_parse_parameter_value3(self):\n source = StringIO(u'<Mock><Value1 DataType=\"double\" Unit=\"s\">2.0</Value1><Value3>ABC</Value3></Mock>')\n element = etree.parse(source)\n obj = self.h._parse_parameter(element, MockParameter)\n self.assertEqual('ABC', obj.value3)\n\n source = StringIO(u'<Mock><Value1 DataType=\"double\" Unit=\"s\">2.0</Value1><Value3 alt-lang-en-US=\"abc\" alt-lang-ru=\"def\">ABC</Value3></Mock>')\n element = etree.parse(source)\n obj = self.h._parse_parameter(element, MockParameter)\n self.assertEqual('ABC', obj.value3)\n self.assertEqual('abc', obj.value3.alternatives['en-US'])\n self.assertEqual('def', obj.value3.alternatives['ru'])\n\n def test_parse_parameter_value4(self):\n source = StringIO(u'<Mock><Value1 DataType=\"double\" Unit=\"s\">2.0</Value1><Value4 Notation=\"IUPAC\" alt-Siegbahn=\"Ma\">M5-N6,7</Value4></Mock>')\n element = etree.parse(source)\n obj = self.h._parse_parameter(element, MockParameter)\n self.assertEqual('M5-N6,7', obj.value4)\n self.assertEqual(NOTATION_IUPAC, obj.value4.notation)\n self.assertEqual('Ma', obj.value4.alternative)\n self.assertEqual(NOTATION_SIEGBAHN, obj.value4.alternative.notation)\n\n def test_convert_parameter_value1(self):\n element = self.h._convert_parameter(self.obj, etree.Element('Mock'))\n self.assertEqual('2.0', element.find('Value1').text)\n self.assertEqual('s', element.find('Value1').get('Unit'))\n self.assertIsNone(element.find('Value2'))\n\n def test_convert_parameter_value2(self):\n self.obj.value2 = (4.0, 'nm')\n element = self.h._convert_parameter(self.obj, etree.Element('Mock'))\n self.assertEqual('4.0', element.find('Value2').text)\n self.assertEqual('nm', element.find('Value2').get('Unit'))\n\n def test_convert_parameter_value3(self):\n self.obj.value3 = langstr('ABC', {'en-US': 'abc', 'ru': 'def'})\n element = self.h._convert_parameter(self.obj, etree.Element('Mock'))\n self.assertEqual('ABC', element.find('Value3').text)\n self.assertEqual('abc', element.find('Value3').get('alt-lang-en-US'))\n self.assertEqual('def', element.find('Value3').get('alt-lang-ru'))\n\n def test_convert_parameter_value4(self):\n self.obj.value4 = xrayline('M5-N6,7', NOTATION_IUPAC, 'Ma')\n element = self.h._convert_parameter(self.obj, etree.Element('Mock'))\n self.assertEqual('M5-N6,7', element.find('Value4').text)\n self.assertEqual('IUPAC', element.find('Value4').get('Notation'))\n self.assertEqual('Ma', element.find('Value4').get('alt-Siegbahn'))\n\nif __name__ == '__main__': # pragma: no cover\n logging.getLogger().setLevel(logging.DEBUG)\n unittest.main()\n","sub_path":"pyhmsa/fileformat/xmlhandler/test_xmlhandler.py","file_name":"test_xmlhandler.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455979133","text":"__author__ = 'student'\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nx = np.arange(-10, 10.01, 0.01)\nplt.plot(x, x**2)\n#plt.show()\nx = (1, 10, 1000)\n#x=input()\n#y=np.log((1+x**2)[1/(np.sin(x)+1)/(5/4+1/x**15)])\ndef f(x):\n# for i in x\n a = math.exp(1/(math.sin(x)+1))\n b = 5/4+1/x**15\n y = math.log(a/b,1+x**2)\n return(y)\nprint(f(1))\nprint(f(10))\nprint(f(1000))\n","sub_path":"lab_2/ex_1.py","file_name":"ex_1.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617225916","text":"# -*- coding: utf-8 -*-\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom scrapy import log\nimport urlparse\nfrom dynamic.items import DynamicItem\n\nclass DynamicSpider(BaseSpider):\n\n\tname = 'my_spider'\n\tstart_urls = [\n\t\t\"http://www.aizhan.com\"\n\t]\n\tallowed_domains = ['www.aizhan.com']\n\n\tdef parse(self, response):\n\t\thxs = HtmlXPathSelector(response)\n\t\turls = hxs.select('//a/@href').extract()\n\t\tfor url in urls:\n\t\t\turl = urlparse.urljoin(response.url, url)\n\t\t\tself.log('Found category url: %s' %url)\n\t\t\tyield Request(url, callback= self.parseCategory)\n\n\tdef parseCategory(self, response):\n\t\t#sel = Selector(response)\n\t\titem = DynamicItem()\n\t\titem['url'] = response.url\n\n\t\tyield item\n\t\thxs = HtmlXPathSelector(response)\n\t\tlinks = hxs.select(\"//a/@href\").extract()\n\t\tfor link in links:\n\t\t\titemLink = urlparse.urljoin(response.url, link)\n\t\t\tself.log('Found item link %s' %itemLink, log.DEBUG)\n\t\t\tyield Request(itemLink, callback = self.parseItem)\n\n\tdef parseItem(self,response):\n\t\t#sel = Selector(response)\n\t\titem = DynamicItem()\n\t\titem['url'] = response.url # sel.xpath('//title').extract()\n\n\t\treturn item","sub_path":"dynamic/spiders/my_spider.py","file_name":"my_spider.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512798399","text":"import pytest\nimport requests\nfrom pytest_bdd import scenarios, when, then\n\n# public api to get random single or two-part jokes of a given category\nJOKES_API = \"https://v2.jokeapi.dev/joke\"\n\n# path to feature file\nscenarios(\"../features/two_part_jokes.feature\", example_converters=dict(category=str, joke_type=str, response_category=str,\n response_joke_type=str))\n\n\n# 'when' behavior test\n# calls jokes api with category and joke type query\n@pytest.fixture\n@when('the Jokes API is queried with \"<category>\" and \"<joke_type>\"')\ndef joke_response(category, joke_type):\n params = {'format': 'json', 'type': joke_type}\n url_ext = \"/\" + category\n response = requests.get(JOKES_API+url_ext, params=params)\n return response\n\n\n# 'then' behavior test\n# tests the data responded from the jokes api call with the category and joke type query\n@pytest.fixture\n@then('the response contains a category of \"<response_category>\", a joke type of \"<response_joke_type>\", and a setup '\n 'and delivery')\ndef test_joke_response_data(joke_response, response_category, response_joke_type):\n # actual\n json_response = joke_response.json()\n # check actual vs expected for category\n is_correct_response_category = response_category == json_response[\"category\"]\n # check actual vs expected for joke type\n is_correct_joke_type = response_joke_type == json_response[\"type\"]\n # check actual vs expected for setup and delivery\n contains_setup_and_delivery = \"setup\" in json_response and \"delivery\" in json_response\n # assert all actual vs expected comparisons\n assert is_correct_response_category and is_correct_joke_type and contains_setup_and_delivery\n\n\n# 'then' behavior test\n# tests that the response status code from the jokes api was success (200)\n@pytest.fixture\n@then(\"the response status code is 200\")\ndef test_joke_response_success(joke_response):\n # check actual vs expected and assert\n assert joke_response.status_code == 200\n","sub_path":"step_definitions/test_two_part_jokes.py","file_name":"test_two_part_jokes.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644666799","text":"\ndef count_pos_neg(basecamp, compcamp):\n basecamp[\"polarity\"]\n c_pos_neg = {}\n c_pos_neg[\"basecamp_p\"] = (basecamp[\"polarity\"] == \"positive\").sum()\n c_pos_neg[\"basecamp_n\"] = (basecamp[\"polarity\"] == \"negative\").sum()\n c_pos_neg[\"compcamp_p\"] = (compcamp[\"polarity\"] == \"positive\").sum()\n c_pos_neg[\"compcamp_n\"] = (compcamp[\"polarity\"] == \"negative\").sum()\n return c_pos_neg\n\n","sub_path":"server/get_pos_neg.py","file_name":"get_pos_neg.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140446538","text":"#coding = utf-8\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\nimport time\nimport wx\nfrom threading import Thread\nfrom wx.lib.pubsub import pub\nimport datetime\n\n\nclass WxFrame(wx.Frame):\n def __init__(self,parent=None,title=\"图片获取器\"):\n super(WxFrame,self).__init__(parent, title = title, style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MAXIMIZE_BOX)\n panel = wx.Panel(self)\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n self.s1 = wx.StaticText(panel, -1, \"网址:\")\n hbox.Add(self.s1, wx.EXPAND | wx.ALIGN_LEFT | wx.ALL , 5)\n\n self.tc1 = wx.TextCtrl(panel,size=(280,30),value = \"http://www.lanhi.com.cn\")\n hbox.Add(self.tc1, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.ALL, 5)\n\n self.bt1 = wx.Button(panel, -1, \"获取\")\n hbox.Add(self.bt1, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.ALL, 5)\n self.bt1.Bind(wx.EVT_BUTTON, self.OnClick)\n\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\n self.gu = wx.Gauge(panel,-1, 100, (100,20),(385,23))\n hbox2.Add(self.gu)\n\n hbox3 = wx.BoxSizer(wx.HORIZONTAL)\n self.consoleBox = wx.TextCtrl(panel,size = (385, 200), style= wx.TE_MULTILINE | wx.CB_READONLY)\n self.consoleBox.SetBackgroundColour(panel.BackgroundColour)\n hbox3.Add(self.consoleBox)\n\n vBox = wx.BoxSizer(wx.VERTICAL)\n vBox.Add(hbox)\n vBox.Add(hbox2)\n vBox.Add(hbox3)\n\n panel.SetSizer(vBox)\n self.Center()\n self.Show()\n self.Fit()\n pub.subscribe(self.updateGauge, \"update\")\n pub.subscribe(self.echo, \"echo\")\n def updateGauge(self, msg):\n t = msg\n if isinstance(t, int): # 如果是数字,说明线程正在执行,显示数字\n self.gu.SetValue(t)\n\n else: # 否则线程未执行,将按钮重新开启\n self.btn.Enable()\n def OnClick(self,event):\n self.btn = event.GetEventObject()\n self.btn.Disable()\n beauty = BeautifulPicture(self.tc1.Value)\n #btn.Enable()\n def echo(self,str):\n s = str\n self.consoleBox.AppendText(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S-') + s + \"\\n\")\nclass BeautifulPicture(Thread, WxFrame):\n def __init__(self, url):\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'} # 给请求指定一个请求头来模拟chrome浏览器\n self.url = url\n self.folder_path = os.getcwd() + \"\\BeautifulPicture\"\n Thread.__init__(self)\n self.start()\n\n def run(self):\n print(\"解析成功\")\n r = self.request(self.url)\n print(\"开始获取img标签\")\n all_img = BeautifulSoup(r.text, \"html.parser\").find_all('img')\n self.mkdir(self.folder_path)\n os.chdir(self.folder_path)\n img_len = len(all_img)\n for i in range(img_len):\n src = all_img[i]['src']\n msg = int(((i + 1) / img_len) * 100)\n if i == img_len - 1:\n msg = 100\n wx.CallAfter(pub.sendMessage, \"update\", msg=msg)\n self.save_img(src)\n os.chdir(\"..\")\n\n def request(self, url):\n r = requests.get(url)\n return r\n\n def mkdir(self, path):\n path = path.strip()\n isExists = os.path.exists(path)\n if not isExists:\n os.mkdir(path)\n def save_img(self, url):\n url = self.url + \"/\" + url\n filename = url.split(\"/\")[-1]\n img = self.request(url)\n f = open(filename, 'ab')\n f.write(img.content)\n f.close()\n print(filename + \"下载完成\")\n\napp = wx.App()\nWxFrame()\napp.MainLoop()","sub_path":"website/gethtml.py","file_name":"gethtml.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"116727843","text":"import argparse\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nfrom tqdm._tqdm import tqdm\nimport os\nimport random\nimport gc\nfrom scipy.special import softmax\nimport spacy\nfrom spacy.attrs import ORTH\n# from spacy.tokenizer import Tokenizer\n\nfrom transformers import AdamW\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, BertTokenizer\n\n# set random seeds\ntorch.backends.cudnn.deterministic = True\nrandom.seed(1234)\nnp.random.seed(1234)\ntorch.manual_seed(1234)\ntorch.cuda.manual_seed(1234)\n\nnlp = spacy.load('en')\nnlp.tokenizer.add_special_case(\"<pre>\", [{ORTH: \"<pre>\"}])\nnlp.tokenizer.add_special_case(\"</pre>\", [{ORTH: \"</pre>\"}])\nnlp.tokenizer.add_special_case(\"<event>\", [{ORTH: \"</event>\"}])\nnlp.tokenizer.add_special_case(\"</event>\", [{ORTH: \"</event>\"}])\n# nlp.tokenizer = Tokenizer(nlp.vocab)\n\n# Model locations\nCLF_MODEL = \"models/PrecondCLFModel.pt\"\nES_CTX_0 = \"models/EventSampler_Ctx_0.pt\"\nES_CTX_2 = \"models/EventSampler_Ctx_2.pt\"\nES_CTX_5 = \"models/EventSampler_Ctx_5.pt\"\n\n\n# Model for precondition classifer (reranking purpose)\nclass Model(nn.Module):\n def __init__(self, tokenizer, encoder, embedding_dim, hidden_dim, n_class):\n super(Model, self).__init__()\n self.use_cuda = True if torch.cuda.is_available() else False\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n\n self.tokenizer = tokenizer\n self.encoder = encoder\n\n self.output = nn.Linear(self.embedding_dim*2, n_class)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def get_var(self, tensor):\n if self.use_cuda:\n return Variable(tensor.cuda())\n else:\n return Variable(tensor)\n\n def encode(self, indexed_tokens):\n max_len = max([len(ids) for ids in indexed_tokens]) + 2\n tokens_tensor = []\n token_type_ids = []\n attention_mask = []\n for instance in indexed_tokens:\n encoded_input = self.tokenizer.prepare_for_model(\n instance, max_length=max_len, pad_to_max_length=True)\n tokens_tensor.append(encoded_input['input_ids'])\n token_type_ids.append(encoded_input['token_type_ids'])\n attention_mask.append(encoded_input['attention_mask'])\n\n tokens_tensor = torch.tensor(tokens_tensor)\n token_type_ids = torch.tensor(token_type_ids)\n attention_mask = torch.tensor(attention_mask)\n\n if self.use_cuda:\n tokens_tensor = tokens_tensor.cuda()\n token_type_ids = token_type_ids.cuda()\n attention_mask = attention_mask.cuda()\n\n return self.encoder(\n input_ids=tokens_tensor,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask)[0]\n\n def forward(self, sentences, relation):\n\n sent_output = self.encode(sentences)\n\n batch_size, seq_len, dim = sent_output.size()\n\n # rel_repr = []\n rel_repr = None\n for para, rels in zip(sent_output, relation):\n e1, e2 = rels\n e1_idx = torch.arange(e1[0], e1[1])\n e2_idx = torch.arange(e2[0], e2[1])\n e1_repr = torch.sum(\n para.index_select(0, self.get_var(e1_idx)),\n dim=0)\n e2_repr = torch.sum(\n para.index_select(0, self.get_var(e2_idx)),\n dim=0)\n\n e_repr = torch.cat((e1_repr, e2_repr)).unsqueeze(0)\n if rel_repr is None:\n rel_repr = e_repr\n else:\n rel_repr = torch.cat((rel_repr, e_repr), dim=0)\n\n logits = self.output(rel_repr)\n\n return self.softmax(logits)\n\n\ndef load_data(files, max_len=50, context=-1, eos='<eos>'):\n # If context is set as -1\n # data is loaded for Precondition Generator\n # Or (context >= 0), data is loaded for Event Sampler\n # Default value: 0\n dataset = {'train': {}, 'dev': {}}\n for set_info, f in files.items():\n with open(f) as fin:\n input_data = []\n target = []\n generation_seeds = []\n line_tqdm = tqdm(fin)\n for line in line_tqdm:\n row = line.strip().split(\"\\t\")\n if len(row[0].split()) > max_len:\n continue\n if \"<event>\" not in row[0]:\n continue\n\n precond = row[1].split(\"<pre> \")[1].split(\" </pre>\")[0]\n\n if context != -1:\n fcontext = row[0].split(\" <event> \")\n if len(fcontext) != 2:\n continue\n bcontext = fcontext[1].split(\" </event> \")\n if len(bcontext) != 2:\n continue\n event = bcontext[0]\n bcontext = bcontext[1].split()\n fcont = []\n fcontext = fcontext[0].split()[::-1]\n for i in range(context):\n if i > len(fcontext)-1 or fcontext[i] == '[BLANK]':\n break\n else:\n fcont.append(fcontext[i])\n bcont = []\n for i in range(context):\n if i > len(bcontext)-1 or bcontext[i] == '[BLANK]':\n break\n else:\n bcont.append(bcontext[i])\n\n if context != 0:\n event = fcont[::-1] + ['<event>'] \\\n + [event] + ['</event>'] + bcont\n else:\n event = [event]\n input_data.append(event + ['<sep>'] + [precond] + [eos])\n generation_seeds.append(event + ['<sep>'])\n target.append([precond] + [eos])\n\n else:\n input_data.append(\n row[0].split()\n + ['<E>', precond, '<sep>']\n + row[1].split() + [eos]\n )\n generation_seeds.append(\n row[0].split()\n + ['<E>', precond, '<sep>']\n )\n target.append(row[1].split() + [eos])\n\n dataset[set_info]['input'] = input_data\n dataset[set_info]['target'] = target\n dataset[set_info]['seed'] = generation_seeds\n\n return dataset\n\n\ndef prepare(dataset, tokenizer):\n data_input = {}\n gen_seed = {}\n target = {}\n for set_info, data in dataset.items():\n data_input[set_info] = []\n gen_seed[set_info] = []\n target[set_info] = []\n for input_text in data['input']:\n data_input[set_info].append(tokenizer.encode(\" \".join(input_text)))\n for input_text in data['seed']:\n gen_seed[set_info].append(tokenizer.encode(\" \".join(input_text)))\n for input_text in data['target']:\n target[set_info].append(tokenizer.encode(\" \".join(input_text)))\n\n return data_input, gen_seed, target\n\n\ndef clf_prepare(data, tokenizer):\n paragraphs = []\n relations = []\n for rid, row in enumerate(data):\n\n sent = row['sent'].split()\n\n tokens = tokenizer.tokenize(\" \".join(sent))\n if len(tokens) > 512:\n continue\n i, j, start_idx = 0, 0, 0\n new_idxs = []\n text_buf = []\n while i < len(sent):\n if sent[i] == \" \"*len(sent[i]):\n i += 1\n new_idxs.append(0)\n else:\n break\n while i < len(sent) and j < len(tokens):\n text_buf.append(tokens[j])\n if tokenizer.convert_tokens_to_string(text_buf) \\\n == tokenizer.convert_tokens_to_string(\n tokenizer.tokenize(sent[i])\n ):\n i += 1\n new_idxs.append(start_idx)\n start_idx = j+1\n text_buf = []\n j += 1\n new_idxs.append(len(tokens))\n\n paragraphs.append(tokenizer.convert_tokens_to_ids(tokens))\n\n relations.append(\n [[new_idxs[ii]+1 for ii in row['source']['idx']],\n [new_idxs[ii]+1 for ii in row['target']['idx']]]\n )\n\n return paragraphs, relations\n\n\ndef get_input_for_model(tokenizer, indexed_tokens, event_lens):\n lengths = [len(ids) for ids in indexed_tokens]\n max_len = min(max(lengths), 1024)\n tokens_tensor = []\n token_type_ids = []\n attention_mask = []\n targets = []\n for instance, l in zip(indexed_tokens, event_lens):\n # This returns:\n # padded input\n # token_type_ids\n # attention mask\n encoded_input = tokenizer.prepare_for_model(\n instance, max_length=max_len, pad_to_max_length=True)\n\n tokens_tensor.append(encoded_input['input_ids'])\n token_type_ids.append(encoded_input['token_type_ids'])\n attention_mask.append(encoded_input['attention_mask'])\n\n # Masked out token labels before the <sep> (inclusive)\n # and all <PAD> tokens\n # This makes loss calculated only on the precondition part\n # (after <sep> before <PAD>)\n targets.append(\n [-100]*l\n + instance[l:]\n + [-100]*(max_len - len(instance))\n )\n\n tokens_tensor = torch.tensor(tokens_tensor)\n token_type_ids = torch.tensor(token_type_ids)\n attention_mask = torch.tensor(attention_mask)\n targets = torch.tensor(targets)\n\n if torch.cuda.is_available():\n tokens_tensor = tokens_tensor.cuda()\n token_type_ids = token_type_ids.cuda()\n attention_mask = attention_mask.cuda()\n targets = targets.cuda()\n\n return (tokens_tensor, attention_mask, token_type_ids, targets)\n\n\ndef finetuning(args):\n\n torch.cuda.set_device(args.device)\n print(\"Load Data\")\n print(args.train_data, args.dev_data)\n files = {'train': args.train_data, 'dev': args.dev_data}\n\n model_name = 'gpt2'\n tokenizer = GPT2Tokenizer.from_pretrained(model_name, pad_token='<PAD>')\n # Add new tokens\n # <sep>: separator, a cue for model to generate\n # [BLANK]: a masked-out precondition part\n # <pre> ... </pre>: precondition markers\n # <event> ... </event>: target event markers\n # <E>: precondition candidate marker\n tokenizer.add_tokens(['<sep>', '[BLANK]',\n '<pre>', '</pre>', '<event>', '</event>', '<E>'])\n\n dataset = load_data(\n files,\n max_len=args.max_len,\n context=args.context,\n eos=tokenizer.eos_token\n )\n if args.load_model is not None:\n model = torch.load(args.load_model, map_location=f'cuda:{args.device}')\n\n else:\n model = GPT2LMHeadModel.from_pretrained(model_name)\n # Resize model according to the updated vocab\n model.resize_token_embeddings(len(tokenizer))\n\n if torch.cuda.is_available():\n model.cuda()\n\n # tokenize and get generatin seeds from data\n data_input, gen_seed, target = prepare(dataset, tokenizer)\n\n save_model_path = os.path.join(args.save_model_path, args.experiment)\n if not os.path.exists(save_model_path):\n os.makedirs(save_model_path)\n\n # add parameters to optimizer\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {\n 'params':\n [p for n, p in model.named_parameters()\n if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0\n },\n {\n 'params':\n [p for n, p in model.named_parameters()\n if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=1e-8\n )\n\n n_params = sum([np.prod(p.size()) for p in model.parameters()])\n print(\"#parameters: {}\".format(n_params))\n\n N = len(data_input['train'])\n print(N//args.batch_size)\n best_dev_loss = 9999\n\n for epoch in range(1, args.epochs+1):\n print(\"Epoch {}:\".format(epoch))\n batch_idxs = np.random.permutation(N//args.batch_size+1)\n line_tqdm = tqdm(batch_idxs, dynamic_ncols=True)\n total_loss = []\n\n for i, batch_idx in enumerate(line_tqdm):\n model.train()\n enc_input = data_input['train'][batch_idx*args.batch_size:min((batch_idx+1)*args.batch_size, N)]\n tmp = gen_seed['train'][batch_idx*args.batch_size:min((batch_idx+1)*args.batch_size, N)]\n event_lens = [len(s) for s in tmp]\n\n # get adjusted input for training\n input_feed = get_input_for_model(tokenizer, enc_input, event_lens)\n\n model.zero_grad()\n\n # train the model\n loss = model(\n input_ids=input_feed[0],\n attention_mask=input_feed[1],\n token_type_ids=input_feed[2],\n labels=input_feed[3]\n )[0]\n\n loss.backward()\n total_loss.append(loss.data.cpu().numpy().tolist())\n optimizer.step()\n gc.collect()\n torch.cuda.empty_cache()\n\n # print(\"Loss: {}\".format(sum(total_loss)/len(total_loss)))\n if i != 0 and (i % 3000 == 0 or i == len(batch_idxs)-1):\n model.eval()\n with torch.no_grad():\n # example generation\n for d, t in zip(gen_seed['dev'][:5], target['dev'][:5]):\n test = torch.tensor(d).unsqueeze(0).cuda()\n sent = model.generate(\n input_ids=test,\n max_length=100,\n top_p=0.95,\n repetition_penalty=1.2)\n print(\"Seed: \", tokenizer.decode(d))\n text = tokenizer.decode(sent[0][len(d):])\n text = text.split(tokenizer.eos_token)[0]\n print(\"Generated: \", text)\n print(\"Referece: \", tokenizer.decode(t))\n\n # check loss on dev set\n for set_info in ['dev']:\n NN = len(data_input[set_info])\n total_loss = []\n for idx in range(NN//args.batch_size):\n enc_input = data_input[set_info][idx*args.batch_size:min((idx+1)*args.batch_size, NN)]\n tmp = gen_seed[set_info][idx*args.batch_size:min((idx+1)*args.batch_size, NN)]\n event_lens = [len(s) for s in tmp]\n\n input_feed = get_input_for_model(\n tokenizer,\n enc_input,\n event_lens\n )\n\n loss = model(\n input_ids=input_feed[0],\n attention_mask=input_feed[1],\n token_type_ids=input_feed[2],\n labels=input_feed[3]\n )[0]\n\n total_loss.append(loss.data.cpu().numpy().tolist())\n\n loss = sum(total_loss) / len(total_loss)\n print(\"Test on {} set:\".format(set_info))\n print(\"\\tLoss: {}\".format(loss))\n if set_info == 'dev':\n if best_dev_loss > loss:\n best_dev_loss = loss\n torch.save(\n model,\n os.path.join(\n save_model_path,\n \"DevBest.pt\"\n )\n )\n\n return\n\n\ndef topk_generate(model, context, k=10, max_len=50):\n logits = model(\n input_ids=context\n )[0]\n\n logits = logits[:, -1, :]\n probs = F.softmax(logits, dim=-1)\n scores, idxs = torch.topk(probs, k)\n scores = scores.cpu().numpy().tolist()\n\n context = context.repeat(k, 1)\n context = torch.cat((context, idxs.view(k, 1)), dim=-1)\n\n for i in range(max_len-1):\n logits = model(\n input_ids=context\n )[0]\n\n logits = logits[:, -1, :]\n probs = F.softmax(logits, dim=-1)\n\n next_tokens = torch.argmax(probs, dim=1, keepdim=True)\n\n context = torch.cat([context, next_tokens], dim=1)\n\n return scores, context\n\n\ndef get_event(data, context=0):\n\n fcontext = data.split(\" <event> \")\n bcontext = fcontext[1].split(\" </event> \")\n event = bcontext[0]\n bcontext = bcontext[1].split()\n fcont = []\n fcontext = fcontext[0].split()[::-1]\n for i in range(context):\n if i > len(fcontext)-1 or fcontext[i] == '[BLANK]':\n break\n else:\n fcont.append(fcontext[i])\n bcont = []\n for i in range(context):\n if i > len(bcontext)-1 or bcontext[i] == '[BLANK]':\n break\n else:\n bcont.append(bcontext[i])\n\n event = fcont[::-1] + ['<event>'] + [event] + ['</event>'] + bcont\n if len(event) == 3:\n event = [event[1]]\n\n event += ['<sep>']\n\n return \" \".join(event)\n\n\ndef clf_test(model, test_para, test_relation, thr=0.5):\n model.eval()\n\n score = model(test_para, test_relation)\n score, idxs = torch.max(F.softmax(score, dim=-1), dim=-1)\n pred = idxs.cpu().tolist()\n score = score.cpu().tolist()\n\n for i in range(len(pred)):\n if pred[i] == 1:\n if score[i] < thr:\n pred[i] = 0\n score[i] = 1-score[i]\n\n # Apply softmax to scores to make them\n # in the same range of scores from Event Sampler\n out = (pred, softmax(score))\n\n return out\n\n\ndef precond_rerank(generated_precond, alpha=1.):\n alpha = 0.99\n rerank_score = [\n alpha*data['precond_score']\n + (1-alpha)*data['event_score']\n for data in generated_precond]\n sorted_idxs = np.argsort(rerank_score)[::-1]\n\n sorted_precond = []\n for idx in sorted_idxs:\n generated_precond[idx]['rerank_score'] = rerank_score[idx]\n sorted_precond.append(generated_precond[idx])\n\n return sorted_precond\n\n\ndef similarity_filter(clf_tokenizer, clf_model, sorted_precond, k=10):\n\n paragraphs = []\n for data in sorted_precond:\n paragraphs.append(clf_tokenizer.encode(data['precondition_text']))\n\n sent_encoding = clf_model.encode(paragraphs)\n cls_tokens = sent_encoding[:, 0]\n\n magnitude = torch.sqrt(\n torch.sum(\n torch.mul(\n cls_tokens,\n cls_tokens\n ),\n dim=-1,\n keepdim=True\n )\n )\n cls_tokens /= magnitude\n cos_sim = torch.matmul(cls_tokens, cls_tokens.transpose(1, 0))\n cos_sim = cos_sim.cpu().numpy()\n\n cos_mean = np.mean(cos_sim)\n cos_std = np.std(cos_sim)\n thr = cos_mean + cos_std\n\n result = []\n flag = [1]*len(cos_sim)\n for i in range(len(cos_sim)):\n if flag[i] == 0:\n continue\n else:\n result.append(sorted_precond[i])\n if len(result) == k:\n break\n # Filter out similar preconditions\n for j in range(i+1, len(cos_sim)):\n if flag[j] and cos_sim[i, j] >= thr:\n flag[j] = 0\n\n return result\n\n\ndef generation(args):\n\n torch.cuda.set_device(args.device)\n\n pretrain_model_name = 'gpt2'\n tokenizer = GPT2Tokenizer.from_pretrained(\n pretrain_model_name,\n pad_token='<PAD>'\n )\n tokenizer.add_tokens(['<sep>', '[BLANK]',\n '<pre>', '</pre>', '<event>', '</event>', '<E>'])\n\n model = torch.load(args.load_model, map_location=f'cuda:{args.device}')\n model.eval()\n if args.context == 0:\n event_sampler = torch.load(\n ES_CTX_0,\n map_location=f'cuda:{args.device}'\n )\n elif args.context == 3:\n event_sampler = torch.load(\n ES_CTX_2,\n map_location=f'cuda:{args.device}'\n )\n elif args.context == 5:\n event_sampler = torch.load(\n ES_CTX_5,\n map_location=f'cuda:{args.device}'\n )\n\n clf_model_name = 'bert-base-cased'\n clf_tokenizer = BertTokenizer.from_pretrained(\n clf_model_name,\n pad_token='<PAD>'\n )\n clf_model = torch.load(CLF_MODEL)\n\n if torch.cuda.is_available():\n model.cuda()\n event_sampler.cuda()\n clf_model.cuda()\n\n with torch.no_grad():\n if args.val:\n with open(\"data/val_multi_auto.txt\", \"r\") as fin, \\\n open(f\"val_{model_name}_c={args.context}.txt\", \"w\") \\\n as fout:\n header = [\"Target Event\", \"Generated Precondition\"]\n fout.write(\"\\t\".join(header) + \"\\n\")\n for lid, line in enumerate(fin):\n generated_precond = []\n row = line.strip().split(\"\\t\")\n\n print(\"Target Event: \", row[0])\n\n event = get_event(row[0], context=args.context)\n print(event)\n event_ids = tokenizer.encode(event)\n test = torch.tensor(event_ids).unsqueeze(0)\n if torch.cuda.is_available():\n test = test.cuda()\n scores, pre_events = topk_generate(\n event_sampler,\n test,\n k=100\n )\n\n line_tqdm = tqdm(\n enumerate(zip(pre_events, scores[0])),\n dynamic_ncols=True\n )\n for i, (e, score) in line_tqdm:\n e_text = tokenizer.decode(e[len(event_ids):])\n e_text = e_text.split(tokenizer.eos_token)[0]\n token_ids = tokenizer.encode(\n row[0]\n + f\" <E> {e_text} <sep>\"\n )\n gen_input = torch.tensor(token_ids).unsqueeze(0)\n if torch.cuda.is_available():\n gen_input = gen_input.cuda()\n\n sent = model.generate(\n input_ids=gen_input,\n max_length=150,\n top_p=0.95,\n repetition_penalty=1.2)\n print(\"Seed: \", tokenizer.decode(token_ids))\n text = tokenizer.decode(sent[0][len(token_ids):])\n text = text.split(tokenizer.eos_token)[0]\n print(\"Generated: \", text)\n\n sent = row[0].replace(\"[BLANK]\", text)\n doc = nlp(sent)\n sent = [t.text for t in doc if t.text != \" \"]\n\n doc = nlp(text)\n precond = [\n t.text for t in doc if t.text != \" \"\n and t.text != \"<pre>\"\n and t.text != \"</pre>\"\n ]\n\n sent_list = []\n pre_idx, post_idx = [], []\n for tid, t in enumerate(sent):\n if t in [\"<pre>\", \"<event>\", \"</pre>\", \"</event>\"]:\n length = len(sent_list)\n if t == \"<pre>\" or t == \"</pre>\":\n pre_idx.append(length)\n else:\n post_idx.append(length)\n else:\n sent_list.append(t)\n\n data = {}\n pre = \" \".join(sent_list[pre_idx[0]:pre_idx[1]])\n post = \" \".join(sent_list[post_idx[0]:post_idx[1]])\n event = event.split()[0]\n data['sent_id'] = f\"{event}{lid:03d}_{i:03d}\"\n data['source'] = {'event': pre, 'idx': pre_idx}\n data['target'] = {'event': post, 'idx': post_idx}\n data['label'] = 0\n data['event_score'] = score\n data['sent'] = \" \".join(sent_list)\n data['precondition_text'] = \" \".join(precond)\n\n generated_precond.append(data)\n\n paragraphs, relations = clf_prepare(\n generated_precond,\n clf_tokenizer\n )\n\n pred, scores = clf_test(clf_model, paragraphs, relations)\n for data, p, s in zip(generated_precond, pred, scores):\n data['prediction'] = p\n data['precond_score'] = s\n\n sorted_precond = precond_rerank(\n generated_precond,\n alpha=0.99\n )\n filtered_precond = similarity_filter(\n clf_tokenizer,\n clf_model,\n sorted_precond,\n k=10\n )\n\n for data in filtered_precond:\n print(data)\n fout.write(json.dumps(data) + \"\\n\")\n\n else:\n with open(\"data/test_gen_peko_blank_target.txt\", \"r\") as fin, \\\n open(f\"DiP_c={args.context}_eventsampling.txt\", \"w\") as eout, \\\n open(f\"DiP_c={args.context}_reranking.txt\", \"w\") as fout, \\\n open(f\"DiP_c={args.context}_reranking_filtering.txt\", \"w\") as ffout:\n # header = [\"Target Event\", \"Reference\", \"Generated Precondition\"]\n # fout.write(\"\\t\".join(header) + \"\\n\")\n inputs = []\n for line in fin:\n row = line.strip().split(\"\\t\")\n inputs.append(row)\n\n # Generate preconditions from 500 examples\n idxs = np.random.permutation(len(inputs))[:5]\n for lid, idx in enumerate(idxs):\n generated_precond = []\n row = inputs[idx]\n\n print(f\"{lid}\\t Target Event: {row[0]}\")\n\n event = get_event(row[0], context=args.context)\n print(f\"\\t Event trigger with context: {event}\")\n event_ids = tokenizer.encode(event)\n test = torch.tensor(event_ids).unsqueeze(0)\n if torch.cuda.is_available():\n test = test.cuda()\n\n # Generate TopK (K = 100) precondition events\n # using Event Sampler\n scores, pre_events = topk_generate(\n event_sampler,\n test,\n k=100\n )\n\n line_tqdm = tqdm(\n enumerate(zip(pre_events, scores[0])),\n dynamic_ncols=True\n )\n for i, (e, score) in line_tqdm:\n e_text = tokenizer.decode(e[len(event_ids):])\n e_text = e_text.split(tokenizer.eos_token)[0]\n token_ids = tokenizer.encode(\n row[0]\n + f\" <E> {e_text} <sep>\"\n )\n gen_input = torch.tensor(token_ids).unsqueeze(0)\n if torch.cuda.is_available():\n gen_input = gen_input.cuda()\n\n # Precondition Generation\n sent = model.generate(\n input_ids=gen_input,\n max_length=150,\n top_p=0.95,\n repetition_penalty=1.2)\n text = tokenizer.decode(sent[0][len(token_ids):])\n text = text.split(tokenizer.eos_token)[0]\n\n sent = row[0].replace(\"[BLANK]\", text)\n doc = nlp(sent)\n sent = [t.text for t in doc if t.text != \" \"]\n sent_list = []\n\n doc = nlp(text)\n precond = [\n t.text for t in doc if t.text != \" \"\n and t.text != \"<pre>\"\n and t.text != \"</pre>\"\n ]\n\n pre_idx, post_idx = [], []\n for tid, t in enumerate(sent):\n if t in [\"<pre>\", \"<event>\", \"</pre>\", \"</event>\"]:\n length = len(sent_list)\n if t == \"<pre>\" or t == \"</pre>\":\n pre_idx.append(length)\n else:\n post_idx.append(length)\n else:\n sent_list.append(t)\n\n data = {}\n if len(pre_idx) < 2 or len(post_idx) < 2:\n continue\n pre = \" \".join(sent_list[pre_idx[0]:pre_idx[1]])\n post = \" \".join(sent_list[post_idx[0]:post_idx[1]])\n event = event.split()[0]\n data['sent_id'] = f\"{event}{lid:03d}_{i:03d}\"\n data['source'] = {'event': pre, 'idx': pre_idx}\n data['target'] = {'event': post, 'idx': post_idx}\n data['label'] = 0\n data['event_score'] = score\n data['sent'] = \" \".join(sent_list)\n data['precondition_text'] = \" \".join(precond)\n\n generated_precond.append(data)\n\n # Top 10 preconditions after\n # event sampling + candidate generation\n for data in generated_precond[:10]:\n eout.write(json.dumps(data) + \"\\n\")\n\n paragraphs, relations = clf_prepare(\n generated_precond,\n clf_tokenizer\n )\n\n pred, scores = clf_test(clf_model, paragraphs, relations)\n for data, p, s in zip(generated_precond, pred, scores):\n data['prediction'] = p\n data['precond_score'] = s\n\n # Precondition Reranking\n sorted_precond = precond_rerank(\n generated_precond,\n alpha=0.99\n )\n\n # Top 10 preconditions after reranking\n for data in sorted_precond[:10]:\n fout.write(json.dumps(data) + \"\\n\")\n\n # Similarity Filter\n filtered_precond = similarity_filter(\n clf_tokenizer,\n clf_model,\n sorted_precond,\n k=10\n )\n\n # Top 10 preconditions after\n # filtering based on similarity score\n for data in filtered_precond:\n ffout.write(json.dumps(data) + \"\\n\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--train_data', type=str, default=\"data/train_gen_peko_blank_target.txt\")\n parser.add_argument('--dev_data', type=str, default=\"data/dev_gen_peko_blank_target.txt\")\n parser.add_argument('--test_data', type=str, default=\"../\")\n\n parser.add_argument('-ep', '--epochs', type=int, default=100)\n parser.add_argument('-bs', '--batch_size', type=int, default=32)\n parser.add_argument('-lr', '--learning_rate', type=float, default=1e-5)\n\n parser.add_argument('--load_model', type=str, default=None)\n parser.add_argument('-bin', '--save_model_path', type=str, default='data/PrecondGen/')\n parser.add_argument('-ex', '--experiment', type=str, default='test')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('-c', '--context', type=int, default=0)\n parser.add_argument('--val', action='store_true')\n parser.add_argument('-d', '--device', type=int, default=0)\n parser.add_argument('--max_len', type=int, default=100)\n\n args = parser.parse_args()\n\n if args.test:\n generation(args)\n else:\n finetuning(args)\n","sub_path":"dip_main.py","file_name":"dip_main.py","file_ext":"py","file_size_in_byte":34338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302132642","text":"'''\nimport inspect\n\n\ndef lineno():\n return inspect.currentframe().f_back.f_lineno\n\n\ndef caller():\n return inspect.currentframe().f_back.f_code.co_name\n\ndef f():\n print(caller())\n\ndef g():\n f()\n\nprint(lineno())\nprint(lineno())\ng()\n'''\n\n\"\"\"\nclass A:\n DIR = 0\n FILE = 1\n\n def __init__(self, name, type):\n self.name = name\n self.type = type\n\n def is_dir(self):\n return self.type == A.DIR\n\n def is_file(self):\n return self.type == A.FILE\n\n def __eq__(self, other):\n return self.name == other.name and self.type == other.type\n\n def __str__(self):\n return '{}'.format(self.name)\n\n\nclass B(A):\n def __init__(self, name):\n super().__init__(name, A.DIR)\n\n def __str__(self):\n return '[ DIR ] {}'.format(self.name)\n\n\nclass C(A):\n def __init__(self, name, id):\n super().__init__(name, A.FILE)\n self.id = id\n\n def __eq__(self, other):\n return (\n self.name == other.name and\n self.type == other.type and\n self.id == other.id)\n\n def __str__(self):\n return '[ FILE ] {} ({})'.format(self.name, self.id)\n\n\nl = [B('0'), C('1.o', 1212)]\nprint(l)\nprint(l[0].is_dir())\nprint(l[1].is_dir())\nfor i in l:\n print(i)\nprint(l[0] == l[1])\nprint(l[1] == l[0])\n\"\"\"\n\n\"\"\"\nimport itertools\n\nclass A:\n def __init__(self, x):\n self.x = x\n\nd = {1: {A(0), A(1)}, 2: {A(1), A(2), A(4)}}\nfor k, v in d.items():\n groups = itertools.groupby(v, lambda n: n.x % 2)\n for key, group in groups:\n if len(list(group)) == 1:\n continue\n for node in group:\n node.x = 0\nfor k, v in d.items():\n print(k, end=' ')\n for i in v:\n print(i.x, end=' ')\n print()\n\"\"\"\n\"\"\"\ndef groupby(seq, key):\n groups = {}\n for i in seq:\n groups.setdefault(key(i), set()).add(i)\n return groups\n\nclass A:\n def __init__(self, x):\n self.x = x\n\nd = {1: {A(0), A(1)}, 2: {A(1), A(2), A(4)}}\nfor k, v in d.items():\n print(k, end=': ')\n for i in v:\n print(i.x, end=' ')\n print()\nfor k, v in d.items():\n groups = groupby(v, lambda n: n.x % 2)\n for key, group in groups.items():\n if len(group) == 1:\n continue\n for node in group:\n node.x = 0\nfor k, v in d.items():\n print(k, end=': ')\n for i in v:\n print(i.x, end=' ')\n print()\n\"\"\"\n\n'''\nclass A:\n def __init__(self, x):\n self.x = x\n\n def __eq__(self, other):\n return self.x == other.x\n\n def __hash__(self):\n return hash((self.x))\n\na = A(1)\nb = A(2)\nc = A(3)\n\nd = {a: b, b: c}\nprint(d)\nfor k, v in d.items():\n print(k.x, v.x)\nfor k in d:\n if k.x == 2:\n k.x = 12\n print('-', k in d) # should be true, but false\n################################\nd = dict(d.items()) #\n################################\n\nprint(d)\nfor k, v in d.items():\n print(k.x, v.x)\nprint(A(2) in d)\nprint(A(12) in d)\nprint(A(12).__hash__())\nfor k in d.keys():\n print('=', k in d, k in d.keys(), k.__hash__())\n# print(d[A(1)])\n\n'''\n\nclass A:\n def __init__(self, x):\n self.x = x\n\n def __eq__(self, other):\n return self.x == other.x\n\n def __hash__(self):\n return hash((self.x))\n\n\nclass D(dict):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def refresh(self):\n # return D(self.items())\n return D([(k, {i for i in v}) for k, v in self.items()])\n\n\na = A(1)\nb = A(2)\nc = A(3)\nd = D({a: {b}, b: {c}})\nfor k, v in d.items():\n print('{}: {}'.format(k.x, ', '.join([str(i.x) for i in v])))\nfor k in d:\n if k.x == 2:\n k.x = 12\nfor k, v in d.items():\n print('{}: {}'.format(k.x, ', '.join([str(i.x) for i in v])))\nfor k in d.keys():\n print('0:', k in d, k in d.keys())\nprint('{}'.format(', '.join([str(i.x) for i in d[A(1)]])))\nprint('0:', A(12) in d[A(1)])\nd = d.refresh()\nfor k in d.keys():\n print('1:', k in d, k in d.keys())\nprint('1:', A(12) in d[A(1)])\nprint('{}'.format(', '.join([str(i.x) for i in d[A(1)]])))\n","sub_path":"AFS/expr.py","file_name":"expr.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430844184","text":"#-*- coding: utf-8 -*-\n\n\ndef convert_index(path1,path2,p):\n\tdelete = [\"!\",\"@\",\"#\",\"$\",\"%\",\"^\",\"&\",\"*\",\"(\",\")\",\"-\",\"=\",\"_\",\"+\",\"~\",\",\",\".\",\"?\",\"/\",\">\",\"<\",\" \",\"\\t\"]\n\n\tmax_indexs = []\n\thead = []\n\tcount = []\n\tread = \"\"\n\twith open(path1, 'r', encoding='utf8') as f:\n\t\tread = f.read()\n\t\tfor d in delete:\n\t\t\tread = read.replace(d, '')\n\n\twith open(path2, 'w', encoding='utf8') as f:\n\t\tf.write(read)\n\n\twith open(path2, 'r', encoding='utf8') as f:\n\t\tfor read in iter(lambda: f.readline(), ''):\n\t\t\tfor word in read:\n\t\t\t\tif word is '\\n':\n\t\t\t\t\t1+1\n\t\t\t\telif not word in head:\n\t\t\t\t\thead.append(word)\n\t\t\t\t\tcount.append(1)\n\t\t\t\telse:\n\t\t\t\t\tcount[head.index(word)] += 1\n\n\twhile True:\n\t\tif max(count) >= (len(head)*p):\n\t\t\tmax_index = count.index(max(count))\n\t\t\tmax_indexs.append(max_index)\n\t\t\tcount[max_index] = -1\n\t\telse:\n\t\t\tbreak\n\tresult = [head[value] for value in max_indexs]\n\treturn result\n\n\nif __name__ == '__main__':\n\tprint(convert_index(0.3))\n\n","sub_path":"FFP/module/frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17125802","text":"#!C:\\Python36\\python.exe\n\nimport process as p\nimport sys\n\n\n\nfile = sys.argv[1]\nfileName = sys.argv[2]\nall_words = p.get_words(file)\n\nword_scores = p.get_word_scores(all_words)\n\nall_sentences = p.get_sentences(file)\n\nall_sentences = p.omit_transition_sentences(all_sentences)\n\nsentence_scores = p.get_sentence_scores_list(all_sentences, word_scores)\nmaxi = len(all_sentences)\nimport random\nr = random.randint(4,int(maxi/2))\nnum_of_sentences =r\nif num_of_sentences > len(all_sentences):\n print(\"The summary cannot be longer than the text.\")\n\nthreshold = p.x_highest_score(sentence_scores, num_of_sentences)\n\ntop_sentences = p.top_sentences(all_sentences,sentence_scores,threshold)\n\nsummary = \"\"\nfor sentence in top_sentences:\n summary += sentence + \" \"\nsummary = summary[:-1]\n\ntext_file = open(\"summary/%s.txt\"%fileName, \"w\")\nprint(\"%s.txt\"%fileName)\ntext_file.write(summary)\ntext_file.close()\n\n\n\n","sub_path":"summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216776940","text":"def fibonacci(x):\n times = int(x)\n\n # first two terms\n n1, n2 = 0, 1\n i = 0\n\n # check if the number of terms is valid\n if times <= 0:\n print(\"Please enter a positive integer\")\n elif times == 1:\n print(\"Fibonacci series:\")\n print(n1)\n else:\n print(\"Fibonacci series:\")\n while i < times:\n print(n1)\n nth = n1 + n2\n # update values\n n1 = n2\n n2 = nth\n i += 1\nfibonacci(input(\"How many times: \"))\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138693152","text":"import network, util\nfrom gettext import lgettext as _\n\nPROTOCOL_INFO = {\n \"name\": \"Digg\",\n \"version\": \"1.0\",\n \n \"config\": [\n \"receive_enabled\",\n \"username\",\n \"color\",\n ],\n\n \"authtype\": \"none\",\n \"color\": \"#E5E025\",\n\n \"features\": [\n \"receive\",\n ],\n\n \"default_streams\": [\n \"receive\",\n ],\n}\n\nURL_PREFIX = \"http://services.digg.com\"\n\nclass Client:\n def __init__(self, acct):\n self.account = acct\n\n def _story(self, data):\n m = {}; \n m[\"mid\"] = str(data[\"id\"])\n m[\"service\"] = \"digg\"\n m[\"account\"] = self.account[\"id\"]\n m[\"time\"] = data[\"submit_date\"]\n\n m[\"text\"] = data[\"title\"] + \"\\n\" + data[\"description\"]\n m[\"content\"] = \"<b>%(title)s</b><br />%(description)s\" % data\n m[\"html\"] = \"<b>%(title)s</b><br />%(description)s\" % data\n user = data[\"friends\"][\"users\"][0]\n\n m[\"sender\"] = {}\n m[\"sender\"][\"nick\"] = user[\"name\"]\n m[\"sender\"][\"id\"] = user[\"name\"]\n m[\"sender\"][\"image\"] = user[\"icon\"]\n m[\"sender\"][\"url\"] = \"http://digg.com/users/%s\" % user[\"name\"]\n m[\"sender\"][\"is_me\"] = user[\"name\"] == self.account[\"username\"]\n if user.get(\"fullname\", 0): m[\"sender\"][\"name\"] = user[\"fullname\"]\n \n m[\"url\"] = data[\"link\"]\n m[\"likes\"] = {\"count\": data[\"diggs\"]}\n\n m[\"html\"] = util.linkify(m[\"text\"],\n ((util.PARSE_HASH, '#<a class=\"hash\" href=\"%s#search?q=\\\\1\">\\\\1</a>' % URL_PREFIX),\n (util.PARSE_NICK, '@<a class=\"nick\" href=\"%s/\\\\1\">\\\\1</a>' % URL_PREFIX)))\n\n m[\"content\"] = util.linkify(m[\"text\"],\n ((util.PARSE_HASH, '#<a class=\"hash\" href=\"gwibber:/tag?acct=%s&query=\\\\1\">\\\\1</a>' % m[\"account\"]),\n (util.PARSE_NICK, '@<a class=\"nick\" href=\"gwibber:/user?acct=%s&name=\\\\1\">\\\\1</a>' % m[\"account\"])))\n\n return m\n\n def _get(self, path, parse=\"story\", post=False, single=False, **args):\n url = \"/\".join((URL_PREFIX, path)) + \"?appkey=http://gwibber.com&type=json\"\n \n data = network.Download(url, util.compact(args) or None, post).get_json()[\"stories\"]\n if single: return [getattr(self, \"_%s\" % parse)(data)]\n if parse: return [getattr(self, \"_%s\" % parse)(m) for m in data]\n else: return []\n\n def __call__(self, opname, **args):\n return getattr(self, opname)(**args)\n\n def receive(self):\n return self._get(\"user/%s/friends/dugg\" % self.account[\"username\"])\n","sub_path":"rootfs/usr/share/pyshared/gwibber/microblog/digg.py","file_name":"digg.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448831784","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nclass Worker(QThread):\n x = pyqtSignal(int)\n y = pyqtSignal(int)\n\n def run(self):\n for i in range(10):\n print('第 %d 次' % i)\n self.x.emit(i)\n k = False\n if k is True:\n print(\"成功!!!\")\n # self.y.emit(i)\n self.sleep(1)\n pass\n\n\nclass MainWidget(QWidget):\n def __init__(self, parent=None):\n global k\n super(MainWidget, self).__init__(parent)\n self.setWindowTitle(\"是否进行?\")\n self.setMinimumSize(400, 300)\n self.thread = Worker()\n self.line = QLabel()\n self.btn1 = QPushButton('YES')\n self.btn2 = QPushButton('NO')\n layout = QGridLayout(self)\n layout.addWidget(self.line, 0, 0, 1, 2)\n layout.addWidget(self.btn1, 1, 0)\n layout.addWidget(self.btn2, 1, 1)\n self.th = Worker(self)\n self.btn1.clicked.connect(self.slotStart)\n self.btn2.clicked.connect(self.slotJump)\n\n def slotStart(self):\n self.th.x.connect(self.ShowX)\n # th.y.connect(self.ShowY)\n self.th.start()\n\n def slotJump(self):\n global k\n # self.th.wait()\n self.k = True\n\n def ShowX(self, i):\n self.line.setText(\"第%d\" % i)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n test = MainWidget()\n test.show()\n sys.exit(app.exec_())","sub_path":"references/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386777361","text":"class Txnz:\n def __init__(txn, sno, narration,date,withAmt):\n txn.sno = sno\n txn.narration = narration\n txn.date = date\n txn.withAmt = withAmt\n txn.deposit = deposit\n txn.balance = balance\n\n def myfunc(txn):\n print(\"Sno - \" , txn.sno)\n print(\"Desc - \" , txn.narration)\n print(\"Date - \" , txn.date)\n print(\"Withdraw - \" , txn.withAmt)\n print(\"deposit - \" , txn.deposit)\n print(\"balance - \" , txn.balance)\n\nimport PyPDF2\n\nfile = open(\"C:\\\\Python38\\\\zYpython\\\\BankRepos\\\\Axis\\\\CreditCardStatement_AUG_2019.pdf\",'rb')\nreading = PyPDF2.PdfFileReader(file)\nif reading.isEncrypted:\n reading.decrypt('')\n\npages = reading.getNumPages()\nprint(pages)\ni=0\nwhile i<pages:\n pg_content = reading.getPage(i)\n content = pg_content.extractText()\n contentss=content.split('\\n')\n print(contentss)\n i=i+1\n\n","sub_path":"BankRepoPArse/HDFCAnalyse/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459169526","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------\n# Name: matplotlib camembert\n# Purpose:\n# https://matplotlib.org/gallery/index.html\n# Author: Jean\n# http://apprendre-python.com/page-creer-graphiques-scientifiques-python-apprendre\n# Created: 23/01/2018\n# Copyright: (c) Jean 2018\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\"\"\"\n============\nBezier Curve\n============\n\nThis example showcases the PathPatch object to create a Bezier polycurve path\npatch.\n\"\"\"\n\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nPath = mpath.Path\n\nfig, ax = plt.subplots()\npp1 = mpatches.PathPatch(\n Path([(0, 0), (1, 0), (1, 1), (0, 0)],\n [Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),\n fc=\"none\", transform=ax.transData)\n\nax.add_patch(pp1)\nax.plot([0.75], [0.25], \"ro\")\nax.set_title('The red point should be on the path')\n\nplt.show()\n","sub_path":"mathplotlib quad_bezier.py","file_name":"mathplotlib quad_bezier.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331152438","text":"#!/usr/bin/env python\n\nimport sys\nimport rospy\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nimport actionlib\nfrom actionlib_msgs.msg import *\n# from geometry import rotate_pose_msg_by_euler_angles as rotate \nfrom math import pi\n# from geometry_msgs.msg import PoseArray, PoseStamped, PoseWithCovarianceStamped, Point, Quaternion, Twist\n\nclass GoToPose():\n def __init__(self):\n # rospy.init_node('nav_test', anonymous=False)\n\n #what to do if shut down (e.g. ctrl + C or failure)\n rospy.on_shutdown(self._shutdown)\n \n #tell the action client that we want to spin a thread by default\n self.move_base = actionlib.SimpleActionClient(\"move_base\", MoveBaseAction)\n rospy.loginfo(\"waiting for the action server to come up...\")\n #allow up to 5 seconds for the action server to come up\n self.move_base.wait_for_server(rospy.Duration(5))\n\n #we'll send a goal to the robot to tell it to move to a pose that's near the docking station\n self.goal = MoveBaseGoal()\n self.goal.target_pose.header.frame_id = 'odom'\n self.goal.target_pose.header.stamp = rospy.Time.now()\n \n \n def move_to_pose(self, x1, y1):\n # Goal\n self.goal.target_pose.pose.position.x = x1\n self.goal.target_pose.pose.position.y = y1\n self.goal.target_pose.pose.position.z = 0.0\n self.goal.target_pose.pose.orientation.x = 0.0\n self.goal.target_pose.pose.orientation.y = 0.0\n self.goal.target_pose.pose.orientation.z = -0.5\n self.goal.target_pose.pose.orientation.w = 0.1\n \n #start moving\n self.move_base.send_goal(self.goal)\n rospy.loginfo(\"Moving to desired position...\")\n #allow TurtleBot up to 60 seconds to complete task\n self.success = self.move_base.wait_for_result(rospy.Duration(60)) \n\n if not self.success:\n self.move_base.cancel_goal()\n rospy.loginfo(\"The base failed to reach the desired position :(\")\n else:\n # We made it!\n state = self.move_base.get_state()\n if state == GoalStatus.SUCCEEDED:\n rospy.loginfo(\"Destination reached!\")\n\n def _shutdown(self):\n rospy.loginfo(\"Stopped\")\n\n\nif __name__ == '__main__':\n try:\n GoToPose()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Exception thrown\")","sub_path":"scripts/move_to_pose.py","file_name":"move_to_pose.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"101193269","text":"import os\nimport sys\n\nglobal reiluse\ntry:\n __import__('imp').find_module('pyopenreil')\n from pyopenreil.REIL import *\n from pyopenreil.utils import asm\n reiluse = True\nexcept ImportError:\n reiluse = False\n pass\n\n\nclass Reil:\n def __init__(self, mode, *args, **kwargs):\n self.mode = mode\n self.reiluse = reiluse\n return\n\n def symprint(self,app):\n self.widget.Symrwidget.editor.append(app)\n return\n\n def entry(self):\n self.widget.Symrwidget.editor.clear()\n self.widget.commandWidget.stopButton.setDisabled(False)\n code = self.widget.codeWidget.getCleanCodeAsByte(as_string=True, parse_string=True)\n code = tuple(synt for synt in code.split('\\n') if synt)\n if reiluse:\n try:\n viasyntax = asm.Reader(ARCH_X86,(code),addr = 0)\n store = CodeStorageTranslator(viasyntax)\n irl = store.get_func(0)\n for func in irl.bb_list: self.symprint(str(func).replace(\" \"*4,\" \"))\n except (ReadError,OSError):\n self.symprint(\"An error occured when converting instructions into the symbolic form\")\n return\n else:\n self.symprint(\"pyopenREIL does not exists\")\n return\n","sub_path":"cemu/reil.py","file_name":"reil.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197388463","text":"def editDistance(str1, str2):\n \"\"\"creates the edit distance matrix between str1 and str2\n the actual edit distance will be at distances[-1][-1]\n @str1: the first string\n @str2: the second string\n @returns: the edit distance between str1 and str2\n \"\"\"\n distances = [] #create the distances matrixrix\n rowDim = len(str1) + 1\n colDim = len(str2) + 1\n for i in range(rowDim):\n distances.append([0] * colDim)\n\n #initialize the distances matrix\n #the first row shows how much it costs to convert str2[:i] to ''\n #the first column shols how much it costs to convert str1[:j] to ''\n# distances[0] = list(range(colDim))\n for i in range(rowDim):\n distances[i][0] = i\n for j in range(colDim):\n distances[0][j] = j\n\n for i in range(1, rowDim):\n for j in range(1, colDim):\n if(str1[i-1] == str2[j-1]): #characters at this position are the same\n distances[i][j] = distances[i-1][j-1] #so no cost to change\n else:\n distances[i][j] = min(\n distances[i-1][j], #deletion\n distances[i][j-1], #insertion\n distances[i-1][j-1]) + 1 #substitution\n\n return distances[-1][-1]\n#end edit distance\n","sub_path":"ECS_50/p2/editDist/editDist.py","file_name":"editDist.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273036888","text":"import serial\nimport os.path\nimport time\nimport csv\nimport sys\n\npath = os.path.join(os.getcwd(),'train.tsv')\n\ndef serial_read(start_time):\n current_time = time.time()\n elapsed_time = current_time - start_time\n elapsed_time = round(elapsed_time,1)\n if elapsed_time > 20:\n sys.exit()\n\n bytes_data = ser.readline()\n str_data = bytes_data.decode('utf-8')\n str_data = str_data.rstrip('\\r\\n')\n str_data = str(elapsed_time) + '\\t' + str_data \n print(str_data)\n write_csv(str_data)\n\n\ndef write_csv(str_data):\n #path = os.path.join(os.getcwd(),'train2.tsv')\n file = open(path, 'a')\n writer = csv.writer(file, lineterminator='\\n')\n csvlist = []\n csvlist.append(str_data)\n writer.writerow(csvlist)\n file.close()\n\nif __name__ == '__main__':\n ser = serial.Serial('/dev/cu.usbmodem1451',9600) #Connection to Arduino\n start_time = time.time()\n\n if not(os.path.exists(path)):\n file = open(path, 'w')\n file.write('Time'+'\\t'+'Sensor'+'\\n')\n file.close()\n\n while True:\n serial_read(start_time)\n\n\n\n\n","sub_path":"LeapSDK/src/model3.py","file_name":"model3.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462565096","text":"# -*- coding: utf-8 -*-\n\n# standard library imports\nimport argparse\nimport inspect\nimport ipaddress\nimport logging\nimport logging.config\nimport os\nimport sys\nimport textwrap\n\n# app imports\nfrom .__version__ import __author__, __version__\n\n\ndef setup_logger(args: argparse.Namespace) -> logging.Logger:\n if args.logging:\n if args.logging == \"debug\":\n logging_level = logging.DEBUG\n if args.logging == \"warning\":\n logging_level = logging.WARNING\n else:\n logging_level = logging.INFO\n\n default_logging = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"standard\": {\"format\": \"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"}\n },\n \"handlers\": {\n \"default\": {\n \"level\": logging_level,\n \"formatter\": \"standard\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\"\": {\"handlers\": [\"default\"], \"level\": logging_level}},\n }\n logging.config.dictConfig(default_logging)\n return logging.getLogger(__name__)\n\n\ndef setup_parser() -> argparse:\n \"\"\"Setup the parser for arguments passed into the module from the CLI.\n\n Returns:\n argparse object.\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(\n \"\"\"\nexploratory project to run a command across a list of Aruba controllers and save the responses locally\n \"\"\"\n ),\n epilog=f\"Made with Python by {__author__}\",\n fromfile_prefix_chars=\"2\",\n )\n parser.add_argument(\n \"-logging\",\n help=\"change logging output\",\n nargs=\"?\",\n choices=(\"debug\", \"warning\"),\n )\n group_cmd = parser.add_mutually_exclusive_group(required=True)\n group_cmd.add_argument(\"-cmd\", help=\"command to run\")\n group_cmd.add_argument(\"-cmdlist\", help=\"file containing commands to run\")\n\n group_ip = parser.add_mutually_exclusive_group(required=True)\n group_ip.add_argument(\"-ip\", help=\"IPv4 address of controller\")\n group_ip.add_argument(\n \"-iplist\", help=\"file containing IPv4 addresses of controllers\"\n )\n\n parser.add_argument(\n \"--syn\",\n dest=\"syn\",\n help=\"connect to controllers one at a time\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--decrypt\",\n dest=\"decrypt\",\n help=\"runs encrypt disable before desired command\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {v}\".format(v=__version__),\n )\n parser.set_defaults(syn=False, decrypt=False)\n return parser\n\n\ndef is_valid_ipv4_address(ip_address: str) -> bool:\n try:\n ipaddress.ip_address(ip_address)\n except ValueError:\n return False\n return True\n\n\ndef validateinput(args) -> bool:\n log = logging.getLogger(inspect.stack()[0][3])\n if args.cmd:\n if not validate_cmd(args.cmd):\n log.error(f\"invalid command {args.cmd}\")\n sys.exit(-1)\n if args.cmdlist:\n if not os.path.isfile(args.cmdlist):\n log.error(f\"command list file {args.cmdlist} doesn't exist\")\n sys.exit(-1)\n return True\n\n\ndef validate_cmd(cmd: str) -> bool:\n if cmd.strip() == \"\":\n return False\n if len(cmd.split(\" \")) == 1:\n return False\n if not isinstance(cmd, str):\n return False\n return True\n","sub_path":"runcommand/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387429974","text":"\n# 读取txt文件\ndef read_txt(filename):\n #获取文件地址\n filepath = '../data/'+filename\n #读取文件内容并返回\n with open(filepath,'r',encoding='utf-8') as f:\n return f.readlines()\n\nif __name__ == '__main__':\n read_txt('login.txt')\n print('_________'*10)\n '''使用方法遍历所有数据并转换成列表'''\n arrs =[]\n for data in read_txt('login.txt'):\n arrs.append(tuple(data.strip().split(',')))\n print(arrs[1:])","sub_path":"tools/read_txt.py","file_name":"read_txt.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19266978","text":"# coding=utf-8\n\"\"\"\ncreated: 12/13\n\"\"\"\nimport base64\nimport os\nfrom io import BytesIO\nimport cv2\nfrom PIL import Image\nfrom psutil import disk_usage\nfrom common.msg_def import IMAGE_SUFFIX, PAGESIZE\nfrom handlers.basehd import BaseHandler, check_token, check_authenticated\nfrom tornado.log import app_log as weblog\nfrom common.global_func import get_user_info\nimport json\n\nshort_cut_size = (25, 25)\n\n\ndef get_disk_usage(self, path):\n if not path.startswith(\"/opt/data\"):\n path = os.path.join(os.path.join(self.settings.get('top_path'), path))\n if not os.path.exists(path):\n return u\"路径不存在\"\n use_info = disk_usage(path)\n total = round(use_info.total / 1024 / 1024 / 1024, 2)\n # used = round(use_info.used / 1024 / 1024 / 1024, 2)\n free = round(use_info.free / 1024 / 1024 / 1024, 2)\n output = u\"已用{} %, 可用{} / 共{} G\".format(use_info.percent, free, total)\n return output\n\n\ndef get_imgshortcut_base64(realpath, suffix):\n if suffix == \"jpg\":\n suffix = \"jpeg\"\n img = Image.open(realpath)\n # img_size = os.path.getsize(realpath)\n # img = img.resize(short_cut_size, Image.ANTIALIAS)\n img.thumbnail(short_cut_size)\n # weblog.info(\"image wh: {} realpath:{}\".format(img.size, realpath))\n output_buffer = BytesIO()\n img.save(output_buffer, format=suffix)\n binary_data = output_buffer.getvalue()\n base64_data = base64.b64encode(binary_data).decode()\n prefix = \"data:image/gif;base64,\"\n return prefix + base64_data\n\n\ndef get_videoshortcut_base64(realpath, suffix=\"jpeg\"):\n\n cap = cv2.VideoCapture(realpath)\n if cap.isOpened():\n ret, frame = cap.read()\n else:\n ret, frame = False, None\n cap.release()\n img = Image.fromarray(frame)\n if ret:\n img.thumbnail(short_cut_size)\n output_buffer = BytesIO()\n img.save(output_buffer, format=suffix)\n\n weblog.info(\"video wh:{} realpath:{}\".format(img.size, realpath))\n binary_data = output_buffer.getvalue()\n base64_data = base64.b64encode(binary_data).decode()\n\n # img.save(\"E:\\\\test2.jpg\")\n prefix = \"data:image/gif;base64,\"\n\n return prefix + base64_data\n else:\n return None\n\n\ndef get_paths_app(file_path):\n # file_path = os.path.join('/opt/data', file_path)\n if \"\\\\\" in file_path:\n curpath = file_path.replace(\"\\\\\", \"/\")\n dir_list = list()\n file_list = list()\n shortcut_list = list()\n if os.path.exists(file_path):\n content = sorted(os.listdir(file_path))\n else:\n content = list()\n for name in content:\n all_name = os.path.join(file_path, name)\n if os.path.isdir(all_name):\n if name not in dir_list:\n dir_list.append(name)\n elif os.path.isfile(all_name):\n if name not in file_list:\n file_list.append(name)\n # suffix = all_name.split(\".\")[-1]\n # if suffix in [\"mp4\"]:\n # shortcut_list.append(get_videoshortcut_base64(all_name, \"jpeg\"))\n # elif suffix in IMAGE_SUFFIX:\n # shortcut_list.append(get_imgshortcut_base64(all_name, suffix))\n # else:\n # shortcut_list.append(None)\n\n dir_list.sort()\n # file_list.sort()\n return dir_list, file_list, shortcut_list\n\n\ndef get_paths(file_path, index=1):\n\n if \"\\\\\" in file_path:\n curpath = file_path.replace(\"\\\\\", \"/\")\n dir_list = list()\n file_list = list()\n shortcut_list = list()\n if os.path.exists(file_path):\n content = sorted(os.listdir(file_path))\n else:\n content = list()\n index = index - 1\n total = len(content) // PAGESIZE if len(content) % PAGESIZE == 0 else len(content) // PAGESIZE + 1\n\n if len(content) >= index * PAGESIZE:\n endsize = (index + 1) * PAGESIZE if (index + 1) * PAGESIZE < len(content) else len(content)\n content = content[index * PAGESIZE: endsize]\n\n for name in content:\n\n all_name = os.path.join(file_path, name)\n if os.path.isdir(all_name):\n if name not in dir_list:\n dir_list.append(name)\n elif os.path.isfile(all_name):\n if name not in file_list:\n file_list.append(name)\n\n suffix = all_name.split(\".\")[-1]\n if suffix in [\"mp4\"]:\n shortcut_list.append(get_videoshortcut_base64(all_name, \"jpeg\"))\n elif suffix in IMAGE_SUFFIX:\n shortcut_list.append(get_imgshortcut_base64(all_name, suffix))\n else:\n shortcut_list.append(None)\n # dir_list.sort()\n return dir_list, file_list, shortcut_list, total\n\n\nclass FSMainHandler(BaseHandler):\n\n # @authenticated\n @check_authenticated\n def get(self):\n curpath = self.get_argument(\"curpath\", None)\n action = self.get_argument(\"action\", None)\n curpage = int(self.get_argument(\"page\", '1'))\n # curpath = unquote_plus(curpath)\n if action is not None and action != \"APP\":\n curpath = os.path.dirname(curpath)\n # print(\"curpath:\", curpath)\n userinfo = get_user_info(self)\n # upload_path = self.settings.get('upload_path')\n # if curpath is None or curpath == \"\" or curpath == \"/\":\n # curpath = os.path.basename(upload_path)\n\n # new 20200821\n total = 1\n if curpage < 1:\n curpage = 1\n if curpath is not None and curpath.startswith(\"/\"):\n curpath = curpath[1:]\n if curpath is None or curpath == \"\" or curpath == \"/\":\n # curpath = os.path.basename(upload_path)\n curpath = \"\"\n dir_list = [\"public\", \"private\"]\n file_list = []\n shortcut_list = []\n elif curpath.startswith(\"private\"):\n private_path = os.path.join(self.top_path, \"private\", self.current_user)\n if not os.path.exists(private_path):\n os.makedirs(private_path)\n # private_curpath = curpath[0:7] + \"/\" + self.current_user + curpath[7:]\n if curpath == \"private\":\n dir_list = [self.current_user]\n file_list = []\n shortcut_list = []\n else:\n real_path = os.path.join(self.top_path, curpath)\n dir_list, file_list, shortcut_list, total = get_paths(real_path, curpage)\n else:\n real_path = os.path.join(self.top_path, curpath)\n dir_list, file_list, shortcut_list, total = get_paths(real_path, curpage)\n\n # real_path = os.path.join(self.top_path, curpath)\n # dir_list, file_list, shortcut_list = get_paths(real_path)\n if curpage > total:\n curpage = total\n weblog.info(\"page:{}/{}\".format(curpage, total))\n return self.render(\"fsmain.html\", userinfo=userinfo, curpath=curpath, dirs=dir_list, files=file_list,\n shortcut_list=shortcut_list, useage=get_disk_usage(self, curpath), page=curpage, total=total)\n\n @check_authenticated\n def post(self):\n pass\n\n def delete(self):\n pass\n\n\nclass AppFSMainHandler(BaseHandler):\n\n @check_token\n def get(self):\n curpath = self.get_argument(\"curpath\", None)\n action = self.get_argument(\"action\", None)\n loginname = self.get_argument(\"loginname\", None)\n # curpath = unquote_plus(curpath)\n if curpath is None:\n curpath = \"\"\n if loginname is None:\n return self.write(json.dumps({\"error_code\": 1, \"msg\": u\"用户未登陆\"}))\n if action is not None:\n curpath = os.path.dirname(curpath)\n # print(\"curpath:\", curpath)\n\n # userinfo = get_user_info(self)\n # upload_path = self.settings.get('upload_path')\n # if curpath is None or curpath == \"\" or curpath == \"/\":\n # curpath = os.path.basename(upload_path)\n #\n # real_path = os.path.join(self.top_path, curpath)\n # dir_list, file_list, shortcut_list = get_paths_app(real_path)\n\n # new 20200821\n if curpath is not None and curpath.startswith(\"/\"):\n curpath = curpath[1:]\n if curpath is None or curpath == \"\" or curpath == \"/\":\n # curpath = os.path.basename(upload_path)\n curpath = \"\"\n dir_list = [\"public\", \"private\"]\n file_list = []\n shortcut_list = []\n elif curpath.startswith(\"private\"):\n private_path = os.path.join(self.top_path, \"private\", loginname)\n if not os.path.exists(private_path):\n os.makedirs(private_path)\n # private_curpath = curpath[0:7] + \"/\" + self.current_user + curpath[7:]\n if curpath == \"private\":\n dir_list = [loginname]\n file_list = []\n shortcut_list = []\n else:\n real_path = os.path.join(self.top_path, curpath)\n dir_list, file_list, shortcut_list = get_paths_app(real_path)\n else:\n real_path = os.path.join(self.top_path, curpath)\n dir_list, file_list, shortcut_list = get_paths_app(real_path)\n\n return self.write(json.dumps({\"error_code\": 0, \"dirs\": dir_list, \"files\": file_list,\n \"curpath\": curpath, \"shortcut_list\": shortcut_list,\n \"useage\": get_disk_usage(self, curpath)}))\n","sub_path":"FSTornado/handlers/author/hd_main.py","file_name":"hd_main.py","file_ext":"py","file_size_in_byte":9435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649005074","text":"from backbone_server.errors.duplicate_key_exception import DuplicateKeyException\n\nfrom backbone_server.individual.edit import IndividualEdit\nfrom backbone_server.individual.fetch import IndividualFetch\n\nfrom openapi_server.models.individual import Individual\n\nimport psycopg2\n\nimport logging\nimport uuid\n\nclass IndividualPost(IndividualEdit):\n\n def __init__(self, conn):\n self._logger = logging.getLogger(__name__)\n self._connection = conn\n\n\n def post(self, individual):\n\n with self._connection:\n with self._connection.cursor() as cursor:\n\n IndividualEdit.check_for_duplicate(cursor, individual, None)\n\n uuid_val = uuid.uuid4()\n\n stmt = '''INSERT INTO individuals\n (id)\n VALUES (%s)'''\n args = (uuid_val,)\n\n cursor.execute(stmt, args)\n\n IndividualEdit.add_attrs(cursor, uuid_val, individual)\n\n individual = IndividualFetch.fetch(cursor, uuid_val)\n\n return individual\n\n","sub_path":"server/backbone_server/individual/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78825351","text":"import threading\nimport socket\nname = input('Choose a name >>> ')\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(('127.0.0.1', 6789))\n\n\ndef client_receive():\n while True:\n try:\n message = client.recv(1024).decode('utf-8')\n if message == \"Enter your name : \":\n client.send(name.encode('utf-8'))\n else:\n print(message)\n except:\n print('Error!')\n client.close()\n break\n\n\ndef client_send():\n while True:\n message = f'{name:15}: {input()}'\n client.send(message.encode('utf-8'))\n\n\nreceive_thread = threading.Thread(target=client_receive)\nreceive_thread.start()\n\nsend_thread = threading.Thread(target=client_send)\nsend_thread.start()\n","sub_path":"Lab-1/Q4/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395540834","text":"import unittest\nfrom unittest import mock\nimport cherry\n\n\nclass ApiTest(unittest.TestCase):\n\n def setUp(self):\n self.model = 'foo'\n self.text = 'random string'\n\n @mock.patch('cherry.api.Classify')\n def test_classify_api(self, mock_classify):\n cherry.classify(model=self.model, text=self.text)\n mock_classify.assert_called_once_with(model=self.model, text=self.text)\n\n @mock.patch('cherry.api.Trainer')\n def test_train_api(self, mock_train):\n cherry.train(model=self.model)\n mock_train.assert_called_once_with(\n self.model, categories=None, clf=None, clf_method='MNB',\n encoding=None, language='English', preprocessing=None, vectorizer=None,\n vectorizer_method='Count', x_data=None, y_data=None)\n\n @mock.patch('cherry.api.Performance')\n def test_performance_api(self, mock_performance):\n cherry.performance(model=self.model)\n mock_performance.assert_called_once_with(\n self.model, categories=None, clf=None, clf_method='MNB', encoding=None,\n language='English', n_splits=10, output='Stdout', preprocessing=None,\n vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)\n\n @mock.patch('cherry.api.Performance')\n def test_performance_api_model_clf_vectorizer(self, mock_performance):\n cherry.performance('foo', clf='clf', vectorizer='vectorizer')\n mock_performance.assert_called_with(\n 'foo', categories=None, clf='clf', clf_method='MNB',\n encoding=None, language='English', n_splits=10,\n output='Stdout', preprocessing=None, vectorizer='vectorizer',\n vectorizer_method='Count', x_data=None, y_data=None)\n\n # @mock.patch('cherry.api.Search')\n # def test_search_api(self, mock_search):\n # cherry.search(model='harmful', parameters={})\n # mock_search.assert_called_once_with(\n # 'harmful', clf=None, clf_method=None, cv=3, iid=False, method='RandomizedSearchCV',\n # n_jobs=1, parameters={}, vectorizer=None, vectorizer_method=None, x_data=None, y_data=None)\n\n @mock.patch('cherry.api.Display')\n def test_display_api(self, mock_display):\n cherry.display(model=self.model)\n mock_display.assert_called_once_with(\n self.model, categories=None, clf=None, clf_method='MNB',\n encoding=None, language='English', preprocessing=None,\n vectorizer=None, vectorizer_method='Count', x_data=None, y_data=None)\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17838712","text":"from time import time\nfrom tkinter import Canvas\nimport math\n\nfrom utils.config import ConfigValues\nfrom utils.internationalization import Internationalization\n\n\nclass FlatButton(Canvas):\n\n def __init__(self, parent, callback, arg=None, color=None, pressColor=None, fontSize=None, timeout=None, pressable=True):\n Canvas.__init__(self, parent, width=0, height=0, bd=-2, bg=color, highlightthickness=0, relief='ridge')\n\n Internationalization()\n\n self.config = ConfigValues()\n self.color = color\n if pressColor:\n self.pressColor = pressColor\n else:\n self.pressColor = self.config.values['colors']['mediumBlue']\n\n\n self.callback = callback\n self.arg = arg\n\n if fontSize:\n self.fontSize = fontSize\n else:\n self.fontSize = 13\n\n self.textColor = \"White\"\n self.text = \"\"\n self.oldText = \"\"\n self.bind(\"<Button-1>\", self.pressEvent)\n self.bind(\"<ButtonRelease-1>\", self.releaseEvent)\n self.bind(\"<Configure>\", self.centerText)\n self.timeout = 0\n\n if timeout:\n self.timeout = timeout\n\n self.timestamp = time()\n self.time_diff = 0\n self.counting = False\n self.press_arg = None\n\n self.pressable = pressable\n\n self.enabled = True\n\n def checkTimeout(self):\n if self.counting:\n self.time_diff = time() - self.timestamp\n if self.time_diff > self.timeout and self.oldText != \"\":\n self.setText(self.oldText)\n self.text = self.oldText\n self.oldText = \"\"\n self.callback(self.arg)\n else:\n self.text = _(\"Hold for\") + \"\\n\" + str(math.ceil(self.timeout - self.time_diff)) + \" s\"\n self.setText(self.text)\n return\n\n def setEnabled(self, state):\n if state == self.enabled:\n return\n self.enabled = state\n if not self.enabled:\n self.configure(bg=self.pressColor)\n else:\n self.configure(bg=self.color)\n\n def pressEvent(self, event):\n if not self.pressable:\n return\n\n if self.timeout > 0 and self.enabled:\n self.timestamp = time()\n self.oldText = self.text\n self.text = _(\"Hold for\") + \"\\n\" + str(self.timeout) + \" s\"\n self.setText(self.text)\n self.counting = True\n\n if self.callback and self.enabled:\n self.configure(bg=self.pressColor)\n if self.press_arg:\n self.callback(self.press_arg)\n\n def setCustomPressArgument(self, press_arg):\n self.press_arg = press_arg\n\n def releaseEvent(self, event):\n if not self.pressable:\n return\n\n if self.enabled:\n self.configure(bg=self.color)\n\n if self.counting:\n self.counting = False\n self.setText(self.oldText)\n self.text = self.oldText\n self.oldText = \"\"\n\n elif self.callback and self.enabled:\n arg = self.arg\n self.callback(arg)\n\n def setBackground(self, color=None):\n if not color:\n color = self.color\n self.configure(bg=color)\n\n def centerText(self, event):\n self.delete(\"all\")\n self.textId = self.create_text(0, 0, anchor=\"nw\", fill=self.textColor,font=\"HelveticaNeue \" + str(self.fontSize),\n text=self.text)\n xOffset = self.findXCenter(self.textId)\n yOffset = self.findYCenter(self.textId)\n self.move(self.textId, xOffset, yOffset)\n\n def findXCenter(self, item):\n coords = self.bbox(item)\n if coords is not None:\n xOffset = (self.winfo_width() / 2) - ((coords[2] - coords[0]) / 2)\n return xOffset\n return 0\n\n def findYCenter(self, item):\n coords = self.bbox(item)\n if coords is not None:\n yOffset = (self.winfo_height()/ 2) - ((coords[3] - coords[1]) / 2)\n return yOffset\n return 0\n\n def setText(self, text, color=None):\n if self.oldText == text:\n return\n\n self.text = text\n if color:\n self.textColor = color\n\n self.centerText(None)\n","sub_path":"src/utils/flatButton.py","file_name":"flatButton.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613495254","text":"from __future__ import unicode_literals\n\nimport logging\nimport pykka\n\nfrom mopidy import backend\nfrom .manager import GPIOManager\n\nlogger = logging.getLogger(__name__)\n\nclass GPIOBackend(pykka.ThreadingActor, backend.Backend):\n\n def __init__(self, config, audio):\n super(GPIOBackend, self).__init__()\n self.audio = audio\n self.manager = GPIOManager(self, config)\n\t\t\n def on_start(self):\n logger.info('Mopidy uses GPIO')\n\t\t\n def on_receive(self, message):\n logger.info('GPIO: on_receive started')\n action = message['action']\n if action == 'set_volume':\n value = message['value']\n if value < 0:\n value = 0\n elif value > 100:\n value = 100\n self.audio.set_volume(value)\n\n def mute(self):\n logger.info('GPIO: mute started')\n #self.audio.set_mute(True)\n self.audio.prepare_change()\n self.audio.set_uri(\"http://somafm.com/groovesalad.pls\")\n self.audio.start_playback()","sub_path":"mopidy_gpio/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453083383","text":"import math\nimport sys\nimport os\nimport argparse\nimport numpy as np\nfrom os.path import join as opj\nfrom matplotlib import pyplot as plt\nfrom collections import defaultdict\nfrom Bio import Seq, SeqIO, SeqRecord\nfrom itertools import chain\nfrom utils import *\n# from DNA.src.utils import *\nsys.setrecursionlimit(100000) # set the maximum depth as 1500\n\nclass PairEnd:\n def __init__(self,length,reads1,reads2,pair_dis=500):\n self.length = length\n self.pairs = {}\n self.pair_dis = pair_dis\n for i in range(len(reads1)):\n self.pairs[reads1[i]] = twin(reads2[i])\n self.pairs[twin(reads2[i])] = reads1[i]\n\n self.pairs[twin(reads1[i]) ] = reads2[i]\n self.pairs[twin(reads2[i])] = twin(reads1[i])\n \n def contain_pairs(self,contig):\n ans = []\n seqset = set()\n locmap = {}\n c_size = len(contig)\n for i in range(c_size - self.length+1):\n subc = contig[i : i+self.length]\n seqset.add(subc)\n locmap[subc] = i\n for read in self.pairs:\n if read in seqset:\n ans.append((read,locmap[read]))\n return ans\n \n def get_pair(self,p1):\n return self.pairs[p1]\n\n\ndef longest_path(graph, root,visited,depth,cached):\n if len(graph[root][1])==0:\n return [root]\n best = [root]\n cur = [root]\n # print(\"cur depth:{}\".format(depth))\n for child in graph[root][1]:\n if child not in visited:\n visited.add(child)\n if child in cached:\n cur = [root] + cached[child]\n else:\n tmp = longest_path(graph,child,visited,depth+1,cached)\n cur = [root] + tmp\n cached[child] = tmp\n visited.remove(child)\n if len(\"\".join(cur)) > len(\"\".join(best)) :\n best = cur\n if len(graph[root][1])>0 and best==[root]:\n # circle occur caused terminate\n # print(\"!!!!\")\n pass\n return best\n\ndef nodes_combine(nodes,k):\n assert(len(nodes) > 0)\n return nodes[0] + \"\".join(n[k-1:] for n in nodes[1:])\n\ndef choose_path(cur_path,cur_node,graph,k,target_pairs,pair_length):\n count = 0\n for p,loc in target_pairs:\n if len(cur_path)>=loc:\n if p == cur_path[loc-pair_length:loc]:\n return cur_path\n else:\n target_pairs.remove( (p,loc) )\n count += 1\n if len(target_pairs) == 0:\n return \"\"\n else:\n for node in graph[cur_node][1]:\n next_path = cur_path + node[k-1:]\n ans = choose_path( next_path, node, graph, k, target_pairs, pair_length)\n return ans\n\ndef likily_nodes(couple):\n assert isinstance(couple,list)\n assert (len(couple)==2)\n n1 = couple[0]\n n2 = couple[1]\n if len(n1) == len(n2):\n length = len(n1)\n same_count = sum( [ n1[i]==n2[i] for i in range(length) ] )\n if same_count/length > 0.9:\n return True\n else:\n if len(n1) > len(n2):\n tmp = n2\n n2 = n1\n n1 = n2\n i = 0\n j = 0\n same_count = 0\n length = len(n1)\n while i<len(n1) and j < len(n2):\n if n1[i] == n2[j]:\n same_count += 1\n i += 1\n j += 1\n else:\n j += 1\n if same_count/length > 0.9:\n return True\n return False\n \n\ndef bouble(graph,couple):\n if is_end(graph,couple[0]) or is_end(graph,couple[1] ):\n return False\n elif graph[couple[0]][1][0] != graph[couple[1]][1][0]:\n return False\n elif not is_2merge(graph,graph[couple[0]][1][0]):\n return False\n else:\n return likily_nodes(couple)\n\ndef tips(graph,couple):\n if is_end(graph,couple[0]) or is_end(graph,couple[0]):\n s_len = min(len(couple[0]),len(couple[1]))\n s0 = couple[0][:s_len+1]\n s1 = couple[1][:s_len+1]\n couple = [s0,s1]\n return likily_nodes(couple)\n return False\n\ndef is_signle(graph,node):\n return (len(graph[node][0]) == 0 and len(graph[node][1]) == 0)\n\ndef is_start(graph,node):\n return (len(graph[node][0]) == 0)\n\ndef is_end(graph,node):\n return (len(graph[node][1]) == 0)\n\ndef is_2split(graph,node):\n return (len(graph[node][1]) == 2)\n\ndef is_2merge(graph,node):\n return (len(graph[node][0]) == 2)\n\ndef is_2hub(graph,node):\n return is_2split(graph,node) and is_2merge(graph,node)\n\n# def is_nsplit(graph,node):\n# return len(graph[node][1])\n\n# def is_nmerge(graph,node):\n# return len(graph[node][0])\n\ndef compressed_graph_mining(graph, discription,mode=0):\n # using pair ends info\n k = discription['k']\n in_degree = discription['in_degree']\n out_degree = discription['out_degree']\n PES = discription['pair_ends']\n count_dict = discription['count_dict']\n results = []\n start_nodes = []\n end_nodes = []\n single_nodes = []\n split_nodes = []\n merge_nodes = []\n hub_nodes = []\n total_length = 0\n for node in graph:\n if len(graph[node][0]) == 0 : # in degree is 0\n start_nodes.append(node)\n assert in_degree[node] == 0, in_degree[node]\n if len(graph[node][1]) == 0 : # out degree is 0\n end_nodes.append(node) # both in-out degree is 0\n assert out_degree[node] == 0, out_degree[node]\n if len(graph[node][0]) == 0 and len(graph[node][1]) == 0:\n single_nodes.append(node)\n if len(graph[node][1]) > 1:\n # assert(len(graph[node][1]) == 2)\n split_nodes.append(node)\n if len(graph[node][0]) > 1:\n # assert(len(graph[node][0]) == 2)\n hub_nodes.append(node)\n if len(graph[node][0]) > 1:\n # assert(len(graph[node][0]) == 2)\n merge_nodes.append(node)\n total_length += len(node)\n print(\"All asserts passed.\")\n print(\"Split nodes number:{}\".format(len(split_nodes)))\n print(\"Merge nodes number:{}\".format(len(merge_nodes)))\n print(\"Hub nodes number:{}\".format(len(hub_nodes)))\n print(\"Start nodes number:{}\".format(len(start_nodes)))\n print(\"End nodes number:{}\".format(len(end_nodes)))\n print(\"Sigle nodes number:{}\".format(len(single_nodes)))\n print(\"The Start node and End nodes maybe same(single node case),Let's remove them.\")\n for node in single_nodes:\n # print(len(node))\n results.append(node)\n total_length -= len(node)\n start_nodes.remove(node)\n end_nodes.remove(node)\n print(\"Length:{}\".format(total_length))\n # assert(len(start_nodes) == 4)\n \n if mode==0:\n #### using pair end\n visited = set()\n # for node in start_nodes[0:2]:\n # visited.add(node)\n # path = longest_path(graph,node,visited,0)\n # for p in path:\n # visited.add(p)\n # ans = nodes_combine(path,k)\n # print(ans)\n # results.append(ans)\n for node in split_nodes:\n single_pairs = PES.contain_pairs(node)\n target_pairs = []\n for pair,loc in single_pairs:\n target_pairs.append(( PES.get_pair(pair),loc+PES.pair_dis ))\n right_path = choose_path(node,node,graph,k,target_pairs,PES.length)\n checked_len = len(node) - (k-1)\n cur_check = node\n nxt_check = node\n while right_path is not None and (checked_len < len(right_path)- (k-1)):\n choice = 0\n if len(graph[cur_check][1]) == 0:\n break\n # nxt_check = graph[cur_check][1][0]\n for i in range( len(graph[cur_check][1]) ):\n cur = graph[cur_check][1][i]\n if cur == right_path[checked_len:checked_len+len(cur)]:\n choice = i\n nxt_check = cur\n break\n checked_len += len(graph[cur_check][1][choice]) - (k-1)\n graph[cur_check][1] = [ graph[cur_check][1][choice] ]\n cur_check = nxt_check\n\n for i,node in enumerate(start_nodes[0:]):\n visited.add(node)\n cached = {}\n path = longest_path(graph,node,visited,0,cached)\n for p in path:\n visited.add(p)\n ans = nodes_combine(path,k)\n print(\"get and answer from star_node{}\".format(i))\n results.append(ans)\n elif mode == 1:\n ##### not using pairend, just simple search, considering the circle between two nodes\n visited = set()\n for node in start_nodes:\n cur_node = node\n ans = cur_node\n while len(graph[cur_node][1]) > 0:\n childs = graph[cur_node][1]\n nxt_node = \"\"\n if len(childs) == 1:\n if childs[0] not in visited:\n nxt_node = childs[0]\n elif len(childs) == 0:\n break\n else:\n if len(childs)==2 and likily_nodes(childs):\n for child in childs:\n if child not in visited:\n nxt_node = child\n break\n else:\n max_len = 0\n for child in childs:\n if child not in visited:\n if has_kmer(graph[child][1],cur_node):\n ans += child[k-1:]\n ans += cur_node[k-1:]\n # print(ans)\n visited.add(child)\n continue\n if len(child) > max_len:\n nxt_node = child\n if nxt_node == \"\":\n break\n else:\n ans += nxt_node[k-1:]\n visited.add(nxt_node)\n cur_node = nxt_node\n results.append(ans)\n else:\n ####### delete edge only, keep node alive.\n visited = set()\n for node in start_nodes:\n cur_node = node\n ans = cur_node\n while len(graph[cur_node][1]) > 0:\n childs = graph[cur_node][1]\n nxt_node = \"\"\n if len(childs) == 1:\n nxt_node = childs[0]\n elif len(childs) == 0:\n break\n else:\n if len(childs)==2 and likily_nodes(childs):\n for child in childs:\n if child not in visited:\n nxt_node = child\n break\n else:\n max_len = 0\n for child in childs:\n if has_kmer(graph[child][1],cur_node):\n ans += child[k-1:]\n ans += cur_node[k-1:]\n # print(ans)\n graph[child][1].remove(cur_node)\n graph[cur_node][1].remove(child)\n continue\n if len(child) > max_len:\n nxt_node = child\n if nxt_node == \"\":\n break\n else:\n ans += nxt_node[k-1:]\n graph[cur_node][1].remove(nxt_node)\n cur_node = nxt_node\n results.append(ans)\n return results\n\n\ndef kmerize(seq,k,count_dict, graph, in_degree, out_degree):\n for i in range(len(seq)-k +1 -1):\n kmer = seq[i:i+k]\n nxtkmer = seq[i+1:i+1+k]\n if kmer not in count_dict:\n graph[kmer] = [[],[]] # 0 is in and 1 is out\n out_degree[kmer] = 0\n in_degree[kmer] = 0\n count_dict[kmer] = 0\n if nxtkmer not in count_dict:\n graph[nxtkmer] = [[],[]]\n out_degree[nxtkmer] = 0\n in_degree[nxtkmer] = 0\n count_dict[nxtkmer] = 0\n \n if nxtkmer not in graph[kmer][1]:\n graph[kmer][1].append(nxtkmer)\n if kmer not in graph[nxtkmer][0]:\n graph[nxtkmer][0].append(kmer)\n\n out_degree[kmer] += 1\n in_degree[nxtkmer] += 1\n \n count_dict[kmer] += 1\n \n count_dict[seq[-k:]] += 1\n\ndef has_kmer(kmerlist,kmer):\n for km in kmerlist:\n if km == kmer:\n return True\n return False\n\ndef little_check(G):\n for node in G:\n for nxtnode in G[node][1]:\n assert( has_kmer(G[nxtnode][0],node) )\n for befnode in G[node][0]:\n assert( has_kmer(G[befnode][1],node) )\n\nclass DBG():\n \"\"\" The DBG Algorithm to implement the de novo problem. \"\"\"\n def __init__(self,k=29,step=1,limit=1):\n print(\"Initializing...\")\n self.k = k\n self.step = step\n self.limit = limit\n self.count_dict = None\n self.nodes = None\n self.graph = None\n self.in_degree = None\n self.out_degree = None\n self.pair_ends = None\n self.avg_coverage = 0\n\n def load_data(self,data_dir='../data/data1',file_type='short'):\n filenames = os.listdir(data_dir)\n rlist = []\n for fid,fname in enumerate(filenames):\n file_prefix = fname.split('.')[0].split('_')[0]\n if file_type!=\"all\" and file_prefix != file_type:\n continue\n print(\"Get data from {}\".format(fname))\n file_path = opj(data_dir,fname)\n reads = SeqIO.parse(file_path,'fasta')\n rlist.append(reads)\n print(\"Load the sequences in {} done\".format(data_dir))\n print(\"Building counting dict for them...\")\n return rlist\n\n def build_graph(self,reads_list,freq_limit=0):\n assert(isinstance(reads_list,list))\n graph={}\n k = self.k\n count_dict = defaultdict(int)\n in_degree = defaultdict(int)\n out_degree = defaultdict(int)\n cache1 = []\n cache2 = []\n for r_id,reads in enumerate(reads_list):\n for read in reads:\n seq = str(read.seq)\n kmerize(seq,k,count_dict,graph,in_degree,out_degree)\n kmerize(twin(seq),k,count_dict,graph,in_degree,out_degree)\n # kmerize(reverse(seq),k,count_dict,graph,in_degree,out_degree)\n # kmerize(reverse(twin(seq) ),k,count_dict,graph,in_degree,out_degree)\n if r_id == 0:\n cache1.append(seq)\n else:\n cache2.append(seq)\n if freq_limit > 1:\n new_dict = {}\n new_graph = {}\n new_in_degree = {}\n new_out_degree = {}\n for kmer in count_dict:\n if count_dict[kmer] >= freq_limit:\n new_dict[kmer] = count_dict[kmer]\n visited = set()\n for kmer in new_dict:\n new_graph[kmer] = [[],[]]\n new_in_degree[kmer] = 0\n new_out_degree[kmer] = 0\n for node in front_seq(kmer):\n if node in new_dict:\n new_graph[kmer][1].append(node)\n new_out_degree[kmer] += 1\n for node in back_seq(kmer):\n if node in new_dict:\n new_graph[kmer][0].append(node)\n new_in_degree[kmer] += 1\n count_dict = new_dict\n graph = new_graph\n in_degree = new_in_degree\n out_degree = new_out_degree\n \n self.pair_ends = PairEnd(len(seq),cache1,cache2)\n\n print(\"Size of count dict:{}\".format(len(count_dict)))\n print(\"Number of the graph nodes:{}\".format(len(graph)))\n self.graph = graph\n self.in_degree = in_degree\n self.out_degree = out_degree\n self.count_dict = count_dict\n count = 0\n for kmer in self.in_degree:\n if self.in_degree[kmer] == 0:\n count += 1\n print(\"Before Compress, 0 Degree count:{}\".format(count))\n \n def graph_simplify(self):\n old_graph = self.graph\n new_graph = {}\n new_in = defaultdict(int)\n new_out = defaultdict(int)\n new_dict = defaultdict(int)\n done = set()\n k = self.k\n node_num = 0\n nodes = []\n for kmer in old_graph:\n if kmer in done:\n continue\n node_num += 1\n new_node = kmer\n cur_kmer = kmer\n done.add(kmer)\n size = self.count_dict[kmer]\n\n kmer = cur_kmer\n while len(old_graph[kmer][0]) == 1:\n befkmer = old_graph[kmer][0][0]\n if len(old_graph[befkmer][1]) == 1:\n if befkmer in done:\n break\n done.add(befkmer)\n new_node = \"\".join(befkmer[:-k+1]) + new_node\n # new_node = befkmer\n kmer = befkmer\n size += self.count_dict[kmer]\n else:\n break\n\n kmer = cur_kmer\n while len(old_graph[kmer][1]) == 1:\n nxtkmer = old_graph[kmer][1][0]\n if len(old_graph[nxtkmer][0]) == 1:\n if nxtkmer in done:\n break # it will be ok without this judgement\n done.add(nxtkmer)\n new_node = new_node + \"\".join(nxtkmer[k-1:] )\n kmer = nxtkmer\n size += self.count_dict[kmer]\n else:\n break\n \n if new_node not in nodes:\n nodes.append(new_node)\n new_graph[new_node] = [[],[]]\n new_dict[new_node] = 0\n new_out[new_node] = 0\n new_in[new_node] = 0\n new_dict[new_node] += size/(len(new_node) - k + 1)\n \n total_coverage = 0\n print(\"Node Num:{}\".format(node_num))\n for i in range(node_num):\n node1 = nodes[i]\n total_coverage += new_dict[node1]\n for j in range(i+1,node_num):\n node2 = nodes[j]\n if node1[-k+1:] == node2[:k-1]:\n new_graph[node1][1].append(node2)\n new_out[node1] += 1\n new_graph[node2][0].append(node1)\n new_in[node2] += 1\n if node2[-k+1:] == node1[:k-1]:\n new_graph[node2][1].append(node1)\n new_out[node2] += 1\n new_graph[node1][0].append(node2)\n new_in[node1] += 1\n \n self.avg_coverage = total_coverage/len(new_dict)\n little_check(new_graph)\n self.graph = new_graph\n self.in_degree = new_in\n self.out_degree = new_out\n self.count_dict = new_dict\n print(\"Compressed Graph size:{}\".format(len(self.graph)))\n\n def problem_handling(self):\n graph = self.graph\n start_nodes = []\n for node in graph:\n if is_start(graph,node):\n start_nodes.append(node)\n visited = set()\n for node in self.count_dict:\n if (node in visited) or (node not in graph):\n continue\n cur_node = node\n visited.add(cur_node)\n while( len(graph[cur_node][1]) > 0 ):\n childs = graph[cur_node][1]\n if len(childs) == 2:\n if tips(graph,childs) or bouble(graph,childs):\n if self.count_dict[childs[0]] < self.count_dict[childs[1]]:\n remove_id = 0\n else :\n remove_id = 1\n \n removed_node = childs[remove_id]\n for grand_father in graph[removed_node][0]:\n graph[grand_father][1].remove(removed_node)\n for grand_child in graph[removed_node][1]:\n graph[grand_child][0].remove(removed_node)\n graph.pop(removed_node)\n\n # graph[cur_node][1] = [childs[1-remove_id]]\n # for grand_child in graph[childs[1-remove_id]][1]:\n # graph[grand_child][0] = graph[cur_node][1]\n # cur_node = grand_child\n else:\n cur_node = childs[0]\n else:\n break\n visited.add(cur_node)\n print(\"Re processed Graph size:{}\".format(len(graph)))\n\n def show_graph(self,dotpath,idtable=None):\n node2id = {}\n id2node = {}\n G = self.graph\n g_size = len(G)\n for id,node in enumerate(G):\n node2id[node] = id\n id2node[id] = node\n lines = [\"digraph G {\", \"graph [rankdir=LR, fontname=\\\"Courier\\\"];\", \"node [shape=record];\"]\n for id in range(g_size):\n node = id2node[id]\n lines.append(\"{}[label=\\\"{}({})\\\"];\".format(id,id,len(node)) )\n for id in range(g_size):\n node = id2node[id]\n for child in G[node][1]:\n child_id = node2id[child]\n lines.append(\"{} -> {} ;\".format(id,child_id))\n # for child in G[node][0]:\n # child_id = node2id[child]\n # lines.append(\"{} -> {} ;\".format(child_id,id))\n lines.append(\"}\")\n with open(dotpath,'w') as f:\n f.write('\\n'.join(lines))\n if idtable is not None:\n with open(idtable,'w') as f:\n for id in id2node:\n f.write(\"{} : {} \\n\".format(id,id2node[id]))\n return '\\n'.join(lines)\n\n def fit(self,data_dir='../data/data1',file_type='short',freq_limit=0):\n rlist = self.load_data(data_dir,file_type)\n self.build_graph(rlist,freq_limit)\n self.graph_simplify()\n self.problem_handling()\n self.graph_simplify()\n # node_len_lst = []\n # for node in self.graph:\n # node_len_lst.append(len(node))\n # plt.hist(node_len_lst,bins=100)\n # plt.show() \n\n def get_answers(self,mode='simple',graph_path=None,table_path=None):\n discription = { 'in_degree':self.in_degree,'count_dict':self.count_dict,\n 'out_degree':self.out_degree,'k':self.k,'pair_ends':self.pair_ends }\n if (graph_path is not None) :\n self.show_graph(graph_path,table_path)\n output = compressed_graph_mining(self.graph,discription)\n print(\"Number of results:{}\".format(len(output)))\n # print(output)\n return output\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n dbg = DBG(k=args.k,step=args.step,limit=args.limit)\n dbg.fit(args.data_dir,args.file_type,args.limit)\n res = dbg.get_answers( mode=args.mode,\n graph_path='./graph_2.dot',table_path='./id2node_2.txt')\n if args.top == 0:\n n = len(res)\n else:\n n = args.top\n seq_len = []\n for i,seq in enumerate(res):\n seq_len.append((i,len(seq)))\n seq_len = sorted(seq_len,key=lambda x: -x[1])\n ans = [res[x[0]] for x in seq_len[:n]]\n # print(ans)\n result_name = args.result_name\n with open(opj(args.result_dir, result_name) ,'w') as fout:\n for i,seq in enumerate(ans):\n fout.write(\">short_read_{}/1\".format(i))\n fout.write(\"\\n\")\n fout.write(seq)\n fout.write(\"\\n\")\n","sub_path":"src/dbg2.py","file_name":"dbg2.py","file_ext":"py","file_size_in_byte":23850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471392720","text":"#-------------------------------------------------------------------------------\n# Name: district LC extraction\n# Purpose: This python-based script for arcGIS toolbox of extracting built-up\n# area for each district in 2000 and 2010.\n#\n# Author: Ace\n#\n# Created: 09/03/2015\n# Copyright: (c) Ace 2015\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\nimport arcpy, os, sys\nfrom arcpy.sa import *\n\nrawPath=sys.argv[1] #The work directory of the file\nc2000workspace=sys.argv[2] #2000 land cover map path\nc2010workspace=sys.argv[3] #2010 land cover map path\nwantedClass=sys.argv[4] #The land cover scheme number for built-up\nstudyarea=sys.argv[5] #The dataset for storing the extracting info\noutputClassClip=sys.argv[6] #The input shapefile (features layer) name\n\narcpy.CheckOutExtension(\"Spatial\")\narcpy.env.overwriteOutput=True\n\nenv.workspace=rawPath\n\ndef extractClass(Img,wantclass):\n # extract the built-up from a raster land cover map based on the land cover scheme number\n fileName=os.path.basename(Img).rstrip(os.path.splitext(Img)[1])\n outName=\"Bu\"+fileName[1:5]\n outCon= Con(Img==wantclass,1,0)\n outCon.save(rawPath+\"\\\\\"+outName)\n return outName\n\ndef countBuPx(Img,district):\n # count the number of pixel for built-up with a zonal statistic tool\n outZonalName=Img+\"_dist\"\n outZonal=ZonalStatistics(district, \"OBJECTID_1\", Img, \"SUM\", \"DATA\")\n outZonal.save(outZonalName)\n\n #convert roadBuf to points to extract zonalPx values\n outPntName=Img+\"_distPnt\"\n arcpy.FeatureToPoint_management(district, outPntName, \"INSIDE\")\n ExtractMultiValuesToPoints(outPntName, [[outZonalName]], \"NONE\")\n return outPntName\n\ndef assignBuCount(district,Pnt):\n # update the pixel counts of built-up in the output shapefile based on the zonal statistics layers\n\n # Use ListFields to get a list of field objects\n fieldObjList = arcpy.ListFields(Pnt)\n\n # Create an empty list that will be populated with field names\n fieldNameList = []\n\n # For each field in the object list, add the field name to the\n # name list. If the field is required, exclude it, to prevent errors\n for field in fieldObjList:\n if field.name==\"distance\" or field.name==\"Bu2000\" or field.name==\"Bu2010\" or field.name==\"District\":\n continue\n if not field.required:\n fieldNameList.append(field.name)\n\n # Execute DeleteField to delete all fields in the field list.\n arcpy.DeleteField_management(Pnt, fieldNameList)\n\n fieldObj= arcpy.ListFields(Pnt)\n fieldName=[]\n for field in fieldObj:\n fieldName.append(field.name)\n\n # Update Bu areas within the districts\n for field in fieldName:\n if field==\"Bu2000\":\n BuField=\"Bu2000\"\n\n elif field==\"Bu2010\":\n BuField=\"Bu2010\"\n\n else:\n continue\n\n arcpy.AddField_management(district, BuField, \"FLOAT\")\n rows1=arcpy.UpdateCursor(district,sort_fields=\"District A\")\n for row1 in rows1:\n rows2 = arcpy.SearchCursor(Pnt,sort_fields=\"District A\")\n for row2 in rows2:\n if row2.getValue(\"District\")==row1.getValue(\"District\"):\n row1.setValue(BuField,float(row2.getValue(BuField))*12.5*12.5)\n rows1.UpdateRow(row1)\n break\n del row2\n del rows2\n del row1\n del rows1\n\n#-------------------------------------------------------------------------------\ncopyStudyarea=studyarea+\"_LCData\"\narcpy.Copy_management(district, new_dist)\nBuLayer1=extractClass(c2000workspace,wantedClass)\nBuLayer2=extractClass(c2010workspace,wantedClass)\n\nnewStudyArea=processbuf(copyStudyarea,roadBuf)\n\nzonalStat1=countBufPx(BuLayer1,copyStudyarea)\nassignBufCount(copyStudyarea,zonalStat1)\n\nzonalStat2=countBufPx(BuLayer2,copyStudyarea)\nassignBufCount(copyStudyarea,zonalStat2)\n\n","sub_path":"dist_LCLU.py","file_name":"dist_LCLU.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512801104","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, ReLU, Softmax\n\nfrom models.utils import load_mnist\n\nimport sys\nimport numpy as np\n\nnp.set_printoptions(threshold=sys.maxsize)\n\nfrom numpy.random import seed\nseed(42)# keras seed fixing\n# import tensorflow as tf\n# tf.random.set_seed(42)# tensorflow seed fixing\n\nmodel = Sequential()\n\nmodel.add(Conv2D(20, (5, 5), input_shape=(28, 28, 1), name='conv1'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(50, (5, 5), name='conv2'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Flatten())\n\nmodel.add(Dense(units=500, name='ip1'))\nmodel.add(ReLU())\n\nmodel.add(Dense(units=10, name='ip2'))\nmodel.add(Softmax())\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()\n\nprint(x_test[:10])\n\n# model.fit(x_train, y_train, batch_size=128, epochs=10)\n\nprint(model.evaluate(x_test, y_test))\n\n# print(model.layers[5].get_weights()[0])\n\nmodel.load_weights('leNet5_weights_sparse.h5', by_name=True)\n\nprint(model.evaluate(x_test, y_test))\n\n\n","sub_path":"old_experiments/convertor.py","file_name":"convertor.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208308494","text":"# coding:utf-8\n\nfrom . import api, formart_json\nfrom flask import request, current_app, session\nfrom Travel.utils.response_code import RET\nfrom flask import jsonify\nfrom Travel import redis_store, db, constans\nfrom Travel.models import User\nfrom sqlalchemy.exc import IntegrityError # 数据库异常表\nimport re\n\n@api.route(\"/login\", methods=[\"GET\"])\ndef login():\n \"\"\"用户登录\n 参数:姓名、手机号 或者 id\n \"\"\"\n req_dict = request.args\n user_id = req_dict.get(\"id\")\n mobile = req_dict.get(\"mobile\")\n name = req_dict.get(\"name\")\n\n if all([user_id]):\n # 校验错误登录次数\n user_ip = request.remote_addr\n\n try:\n access_nums = redis_store.get(\"acccess_nums_%s\" % user_ip)\n except Exception as e:\n current_app.logger.error(e)\n else:\n if access_nums is not None and int(access_nums) >= constans.LOGIN_ERROR_MAX_TIMES:\n return formart_json.formattingjson(data={}, errno=RET.REQERR, errmsg=\"错误次数过多,请稍后重试\")\n\n # 从数据库中根据手机号查询用户的数据对象\n user_id_int = int(user_id)\n try:\n user = User.query.filter_by(id=user_id_int).first()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return formart_json.formattingjson(data={}, errno=RET.DBERR, errmsg=\"获取用户信息失败\")\n if user is None:\n return formart_json.formattingjson(data={}, errno=RET.DATAERR, errmsg=\"请输出正确id\")\n # 验证成功 保存session\n session[\"name\"] = user.name\n session[\"mobile\"] = user.mobile\n session[\"user_id\"] = user.id\n user_info = formart_json.serialize(model=user)\n return formart_json.formattingjson(data=user_info, errno=RET.OK, errmsg=\"登录成功\")\n\n else:\n if not all([name, mobile]):\n return formart_json.formattingjson(data={}, errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n if not re.match(r\"1[123456789]\\d{9}\", mobile):\n return formart_json.formattingjson(data={}, errno=RET.PARAMERR, errmsg=\"手机合格式错误\")\n\n # 校验错误登录次数\n user_ip = request.remote_addr\n\n try:\n access_nums = redis_store.get(\"acccess_nums_%s\" % user_ip)\n except Exception as e:\n current_app.logger.error(e)\n else:\n if access_nums is not None and int(access_nums) >= constans.LOGIN_ERROR_MAX_TIMES:\n return formart_json.formattingjson(data={}, errno=RET.REQERR, errmsg=\"错误次数过多,请稍后重试\")\n\n # 从数据库中根据手机号查询用户的数据对象\n try:\n user = User.query.filter_by(mobile=mobile).first()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return formart_json.formattingjson(data={}, errno=RET.DBERR, errmsg=\"获取用户信息失败\")\n\n if user is None or not user.cheack_password(mobile):\n try:\n redis_store.incr(\"access_nums_%s\" % user_ip)\n redis_store.expire(\"access_nums_%s\" % user_ip, constans.LOGIN_ERROR_FORBID_TIME)\n except Exception as e:\n current_app.logger.error(e)\n return formart_json.formattingjson(data={}, errno=RET.DATAERR, errmsg=\"用户名或密码错误\")\n\n # 验证成功 保存session\n session[\"name\"] = user.name\n session[\"mobile\"] = user.mobile\n session[\"user_id\"] = user.id\n\n user_info = formart_json.serialize(model=user)\n return formart_json.formattingjson(data=user_info, errno=RET.OK, errmsg=\"登录成功\")\n\n\n@api.route(\"/register\", methods=[\"POST\"])\ndef register():\n \"\"\"注册用户\"\"\"\n req_dict = request.get_json()\n\n user_id = req_dict.get(\"userId\")\n name = req_dict.get(\"name\")\n mobile = req_dict.get(\"mobile\")\n\n if not all([mobile, name, user_id]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n if not re.match(r\"1[123456789]\\d{9}\", mobile):\n return jsonify(errno=RET.PARAMERR, errmsg=\"手机合格式错误\")\n\n # 添加数据库\n user = User(id=int(user_id), name=name, mobile=mobile)\n try:\n db.session.add(user)\n db.session.commit()\n except IntegrityError as e:\n # 数据库操作错误回滚\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DATAEXIST, errmsg=\"手机号已存在\")\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据库异常\")\n\n # 验证成功 保存session\n session[\"name\"] = user.name\n session[\"mobile\"] = user.mobile\n session[\"user_id\"] = user.id\n\n user_info = formart_json.serialize(model=user)\n return formart_json.formattingjson(data=user_info, errno=RET.OK, errmsg=\"注册成功\")\n\n\n","sub_path":"Travel/api_1_0/passport.py","file_name":"passport.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183225339","text":"# _*_ coding: utf_8 _*_\n\"\"\"\nCreated on 11/30/2016\n\n@author: Eddie\n\"\"\"\nfrom pyshgp.utils import merge_sets\nfrom pyshgp.push.interpreter import PushInterpreter\nfrom pyshgp.push.registered_instructions import get_instructions_by_pysh_type\nfrom pyshgp.gp.evolvers import SimplePushGPEvolver\nfrom pyshgp.gp.variation import (UniformMutation, Alternation,\n VariationOperatorPipeline)\n\ncases = [(False, False, False, False),\n (False, False, False, True),\n (False, False, True, False),\n (False, False, True, True),\n (False, True, False, False),\n (False, True, False, True),\n (False, True, True, False),\n (False, True, True, True),\n (True, False, False, False),\n (True, False, False, True),\n (True, False, True, False),\n (True, False, True, True),\n (True, True, False, False),\n (True, True, False, True),\n (True, True, True, False),\n (True, True, True, True)]\n\n\ndef one_bit_adder(c, a, b):\n xor_1 = not a == b\n s = not xor_1 == c\n\n and_1 = b and c\n and_2 = a and c\n and_3 = a and b\n c_out = and_1 or and_2 or and_3\n return (s, c_out)\n\n\ndef two_bit_adder(a_1, b_1, a_2, b_2):\n tmp_1 = one_bit_adder(0, a_1, b_1)\n s_1 = tmp_1[0]\n\n tmp_2 = one_bit_adder(tmp_1[1], a_2, b_2)\n s_2 = tmp_2[0]\n c_out = not tmp_1[1] == tmp_2[1]\n return (s_1, s_2, c_out)\n\n\ndef error_function(program):\n errors = []\n for case in cases:\n interpreter = PushInterpreter()\n outputs = interpreter.run(program, case, ['_boolean', '_boolean', '_boolean'])\n target = two_bit_adder(case[0], case[1], case[2], case[3])\n e = 0\n\n if outputs[0] is None:\n e += 1e4\n elif outputs[0] == target[0]:\n e += 1\n\n if outputs[1] is None:\n e += 1e4\n elif outputs[1] == target[1]:\n e += 1\n\n if outputs[2] is None:\n e += 1e4\n elif outputs[2] == target[2]:\n e += 1\n\n errors.append(e)\n return errors\n\n\natom_generators = list(merge_sets(get_instructions_by_pysh_type('_boolean'),\n get_instructions_by_pysh_type('_exec')))\nmut = UniformMutation(rate=0.1)\nalt = Alternation(rate=0.1, alignment_deviation=10)\nops = [(alt, 0.2), (mut, 0.3), (VariationOperatorPipeline((mut, alt)), 0.5)]\n\n\nif __name__ == \"__main__\":\n evo = SimplePushGPEvolver(n_jobs=-1, verbose=1, operators=ops,\n atom_generators=atom_generators,\n initial_max_genome_size=300,\n population_size=500, max_generations=300,\n simplification_steps=5000)\n evo.fit(error_function, 3, ['_boolean', '_boolean'])\n","sub_path":"examples/low-level/two_bit_adder.py","file_name":"two_bit_adder.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491554247","text":"from src.bot.Commands import Commands\nfrom src.command.Idle import Idle\nfrom src.utils.Pathfinder import Pathfinder\n\n\nclass Bot:\n\n def __init__(self):\n self.player_id = None\n self.game_state = None\n self.character_state = None\n self.other_bots = None\n self.commands = None\n self.pathfinder = Pathfinder()\n\n def set_player_id(self, player_id):\n self.player_id = player_id\n self.commands = Commands(player_id)\n\n def get_name(self):\n raise NotImplementedError\n\n def turn(self, game_state, character_state, other_bots):\n self.game_state = game_state\n self.character_state = character_state\n self.other_bots = other_bots\n self.pathfinder.set_game_state(game_state, other_bots)\n return Idle(self.player_id)\n","sub_path":"src/bot/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490225977","text":"try:\n from scrappyepfo.data_scraper import get_comp_list, get_comp_list_mca\nexcept:\n from data_scraper import get_comp_list, get_comp_list_mca\nfrom fuzzywuzzy import process\nimport pprint\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\n@app.get(\"/epfo/{companyName}\")\nasync def readEpfo(companyName):\n '''api call epfo'''\n return perform_epfo(companyName)\n\n@app.get(\"/mca/{companyName}\")\nasync def readMca(companyName):\n '''api call epfo'''\n return perform_mca(companyName)\n\n@app.get(\"/\")\nasync def root():\n '''api call root'''\n return {\"Status\":\"OK\" }\n\n@app.get(\"/devInfo/\")\nasync def devInfo():\n '''api call dev'''\n data = {\n \"Name\" : \"Sagar Paul\",\n \"Email\" : \"paul.sagar@yahoo.com\",\n \"Github\" : \"https://github.com/KB-perByte\",\n }\n return data\n\ndef perform_epfo(name):\n comp_list = get_comp_list(name)\n pprint.pprint(comp_list[0])\n return comp_list[0]\n\ndef perform_mca(name):\n comp_list = get_comp_list_mca(name)\n pprint.pprint(comp_list)\n return comp_list\n\n \n\n\n\n\n","sub_path":"scrappyepfo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368131261","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\nimport codecs\r\nimport sqlite3\r\ndef getStopWordS(StopWordPath):\r\n try:\r\n fileread = open(StopWordPath,\"r\")\r\n while 1:\r\n astr = fileread.readline()\r\n \r\n print(astr)\r\n finally:\r\n if fileread:\r\n fileread.close()\r\n\r\ndef DAO():\r\n conn = sqlite3.connect('test.db')\r\n conn.executemany(\"INSERT INTO WordFreq (Word, WordFreq) VALUES (?,1);\",add)\r\n conn.commit()\r\n print(\"新增条 %d 条\" %len(add))\r\n conn.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n getStopWordS(\"E:\\\\文本处理\\\\算法相关\\\\哈工大停用词表.txt\")","sub_path":"word/freq/stopWords.py","file_name":"stopWords.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637633489","text":"'''\nRegular expression:\n\nIdentifier: looking for things\n\n\\d = any number\n\\D = anything but a number\n\\s = space\n\\S = anything but a space\n\\w = any character\n\\W = any but character\n. = anything but a newline\n\\. = period\n\\b = whitespace around words\n\nModifier:\n{a,b} means in range == \\d{1,3} means we are expecting 1-3\n+ means match 1 or more\n? means match 0 or 1\n* means match 0 or more == could be anything but blank\n$ match for end of the String\n^ match the beginning of the String\n| means either or == \\d{1,3} | \\d{5,7} means expecting 1-3 or 5-7\n[] means range or \"variance\" == [A-Za-z] looking for capital 1st char, anything a-z 2nd char\n{x} expecting \"x\" amont\n\n\nWhite Space Character: you dont see but exist\n\n\\n new line\n\\s space\n\\t tab\n\\e escape\n\\f form feed\n\\r return\n\nDont forget\n. + * ? [] $ () {}\n'''\n\nimport re\n\nexampleString = '''\nJessica is 15 years old, and Daniel is 27 years old.\nEdward is 96, and his grandfather, Oscar, is 202.\n'''\n\n'''now find regex for name, then find regex for age\nr'' just mean that is a regular expression'''\nages = re.findall(r'\\d{1,3}', exampleString)\nnames = re.findall(r'[A-Z][a-z]*', exampleString)\n\nprint(ages)\nprint(names)\n\ncounter = 0\npeopleDict = {}\nfor name in names:\n peopleDict[name] = ages[counter]\n counter+=1\n\nprint(peopleDict)","sub_path":"Basic/test 27 - Regular Expression.py","file_name":"test 27 - Regular Expression.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"480441532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 6.2b\n\nСделать копию скрипта задания 6.2a.\n\nДополнить скрипт:\nЕсли адрес был введен неправильно, запросить адрес снова.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\"\"\"\na = input(\"Enter IP: \" )\nb = a.split(\".\")\n\nwhile True:\n if len(b)==4 and b[0].isdigit() and b[1].isdigit() and b[2].isdigit()and b[3].isdigit() and 0<=int(b[0])<=255 and 0<=int(b[1])<=255 and 0<=int(b[2])<=255 and 0<=int(b[3])<=255:\n if 1 <= int(b[0]) <= 223:\n print(\"unicast\")\n elif 224 <= int(b[0]) <= 239:\n print(\"multicast\")\n elif a == \"255.255.255.255\":\n print(\"local broadcast\")\n elif a == \"0.0.0.0\":\n print(\"unassigned\")\n else:\n print(\"unused\")\n break\n\n a = input(\"Enter IP address again: \" )\n b = a.split(\".\")\n\n","sub_path":"exercises/06_control_structures/task_6_2b.py","file_name":"task_6_2b.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544014224","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nfor num in range(101):\n # special case for prime\n if num == 3 or num == 5:\n print( num, \"is prime\")\n if num % 3 == 0 and num % 5 == 0:\n print(\"FizzBuzz\") \n elif num % 3 == 0 and num % 5 != 0:\n print(\"Fizz\")\n elif num % 5 == 0 and num % 3 != 0:\n print(\"Buzz\")\n else:\n if num > 1:\n for i in range(2,num):\n if (num % i) == 0 :\n print(num)\n break\n else:\n print(num, \"is prime\")\n","sub_path":"Homework 5.py","file_name":"Homework 5.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265657520","text":"from sys import argv\nfrom os import remove\nfrom os.path import dirname, realpath, exists\n\npath = dirname(realpath(__file__))\n\ndef func():\n if len(argv) < 3:\n print(\"You forgot the vhost that you want to delete.\")\n print('> cp vhost del {vhost}')\n else:\n url = '%s/../../../conf/vhost/%s.json' % (path, argv[2])\n if exists(url):\n remove(url)\n print(\"Vhost \" + argv[2] + \" successful delete.\")\n else:\n print(\"Vhost \" + argv[2] + \" not found.\")\n","sub_path":"lib/controlers/vhost/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521382687","text":"import numpy as np\nimport sys\n\n\n\n\nno_of_args = len(sys.argv)\nfor i in range(1,no_of_args):\n if sys.argv[i] == \"--grid\":\n i = i + 1\n input_grid_path = sys.argv[i]\n elif sys.argv[i] == \"--value_policy\":\n i = i+1\n policy =sys.argv[i] \n\n\n\n\nwith open(input_grid_path) as inp:\n maze_data = inp.readlines() \nmaze_data = [x.strip() for x in maze_data]\n\ngrid_row = np.int(len(maze_data))\na = maze_data[1].split()\ngrid_col = np.int(len(a))\n\ni = 0\nj = 0\n\ngrid = np.zeros((grid_row,grid_col))\n\nfor i in range(grid_row):\n b = maze_data[i].split()\n for j in range(grid_col):\n grid[i,j] = b[j] \n j = j+1\n i = i +1\n\n\nstart = 0\nend = 0\nno_of_states = np.int((grid_row)*(grid_col))\n\nfor k in range(no_of_states):\n row =np.int(k / (grid_col))\n col = k-row*(grid_col) \n if (1<=row<=grid_row-2 and 1<=col<=grid_col-2):\n if grid[row,col] == 2:\n start = k \n if grid[row,col] == 3:\n end = k \n\n\n\n\n\n\n\n\n\n\n\nwith open(policy) as inp:\n policy_data = inp.readlines()\n \npolicy_data = [x.strip() for x in policy_data]\n\n\n# print(policy_data[0])\n# print(policy_data[1])\n\n\npath = []\n\n# print(policy_data)\n\nb = 0\na1 = 0\nb1 = 0\ns = start\ns = np.int(s)\n\nend = np.int(end)\n# print(start)\nout = str('')\n\nwhile s!= end:\n f =2*(s)\n a1 = policy_data[f].split()\n # print(a1)\n b2 = a1[1]\n #North\n # print(type(b1))\n b2 =b2[0]\n b1 = int(b2)\n\n if b1 == 0:\n s= s-grid_col\n path.append('N')\n out = out+str('N ')\n # print(s)\n if b1 == 1:\n s = s+grid_col\n path.append('S')\n out = out+str('S ')\n # print(s)\n if b1 == 2:\n s = s+1\n path.append('E')\n out = out+str('E ')\n # print(s)\n if b1 == 3:\n s = s-1\n # print(s)\n path.append('W')\n out = out+str('W ')\n s =np.int(s)\n\n # a = policy_data[k].split()\n # b = a[1]\n# i = np.int(len(out)) \n# out = out - str(out[i-1])\nprint(out[:-1])\n","sub_path":"Assignment-2/base/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303606466","text":"import matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nrcParams['font.family'] = ['Heiti TC', 'Apple Color Emoji']\ncmap = plt.get_cmap(\"Set3\").colors\n\nlabels = ['村上春树', '渡边淳一', '马克西姆·高尔基', '冯友兰', '鲁迅', '赫尔曼·黑塞', '当年明月', 'Others']\n\nsizes = [10, 10, 3, 3, 3, 3, 3, 98]\n\nexplode = [.1, .1, .1, 0, 0, 0, 0, 0]\n\nfig1, ax1 = plt.subplots()\nfig1.patch.set_alpha(0.)\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=cmap)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()\n\n","sub_path":"archive/plt/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615887334","text":"\"\"\"\nCopyright 2020 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\n\nimport numpy as np\nfrom scipy.integrate import odeint\nfrom lmfit import minimize, Parameters, report_fit\n\n\"\"\"\nStatistical models for the transmission of infectious diseases\n\"\"\"\n\n\nclass SIR:\n \"\"\"SIR Model\"\"\"\n\n @classmethod\n def calibrate(cls, xs: tuple, t: float, parameters: [Parameters, tuple]) -> tuple:\n \"\"\"\n SIR model derivatives at t.\n\n :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved\n :param t: time parameter, inactive for this model\n :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N\n :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\n \"\"\"\n s, i, r = xs\n\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, N = parameters\n else:\n raise ValueError(\"Cannot recognize parameter input\")\n\n dSdt = - beta * s * i / N\n dIdt = beta * s * i / N - gamma * i\n dRdt = gamma * i\n\n return dSdt, dIdt, dRdt\n\n @classmethod\n def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float = 0.2, gamma: float = 0.1,\n beta_max: float = 10, gamma_max: float = 1, S0_fixed: bool = True, S0_max: float = 1e6,\n R0_fixed: bool = True, R0_max: float = 1e6, I0_fixed: bool = True, I0_max: float = 1e6) \\\n -> tuple:\n \"\"\"\n Produce a set of parameters for the SIR model.\n\n :param S0: initial number of susceptible in the population\n :param I0: initial number of infected in the population, usually set to 1\n :param R0: initial number of recovered/removed in the population, usually set to 0\n :param N: size of the population\n :param beta: transmission rate parameter\n :param gamma: recovery rate parameter\n :param beta_max: maximum value to consider for beta during parameter fitting\n :param gamma_max: maximum value of gamma to consider during parameter fitting\n :param S0_fixed: whether to keep S0 fixed during fitting\n :param S0_max: maximum value of S0 to consider during parameter fitting\n :param R0_fixed: whether to keep R0 fixed during fitting\n :param R0_max: maximum value of R0 to consider during parameter fitting\n :param I0_fixed: whether to keep I0 fixed during fitting\n :param I0_max: maximum value of I0 to consider during parameter fitting\n :return: tuple[Parameters, list]: (parameters, a list of the names of the variables for initial conditions)\n \"\"\"\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max)\n initial_conditions = ['S0', 'I0', 'R0']\n\n return parameters, initial_conditions\n\n\nclass SEIR:\n \"\"\"SEIR Model\"\"\"\n\n @classmethod\n def calibrate(cls, xs: tuple, t: float, parameters: [Parameters, tuple]) -> tuple:\n \"\"\"\n SEIR model derivatives at t.\n\n :param xs: variables that we are solving for, i.e. [S]usceptible, [E]xposed, [I]nfected, [R]emoved\n :param t: time parameter, inactive for this model\n :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, sigma, N\n :return: tuple, the derivatives dSdt, dEdt, dIdt, dRdt of each of the S, E, I, R variables\n \"\"\"\n s, e, i, r = xs\n\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n sigma = parameters['sigma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, sigma, N = parameters\n else:\n raise ValueError(\"Cannot recognize parameter input\")\n\n dSdt = -beta * s * i / N\n dEdt = beta * s * i / N - sigma * e\n dIdt = sigma * e - gamma * i\n dRdt = gamma * i\n\n return dSdt, dEdt, dIdt, dRdt\n\n @classmethod\n def get_parameters(cls, S0: float, E0: float, I0: float, R0: float, N: float, beta: float = 0.2, gamma: float = 0.1,\n sigma: float = 0.2, beta_max: float = 10, gamma_max: float = 1, sigma_max: float = 1,\n S0_fixed: bool = True, S0_max: float = 1e6, R0_fixed: bool = True, R0_max: float = 1e6,\n I0_fixed: bool = True, I0_max: float = 1e6, E0_fixed: bool = True, E0_max: float = 1e6) -> tuple:\n \"\"\"\n Produce a set of parameters for the SIR model.\n\n :param S0: initial number of susceptible in the population\n :param E0: initial number of exposed in the population\n :param I0: initial number of infected in the population, usually set to 1\n :param R0: initial number of recovered/removed in the population, usually set to 0\n :param N: size of the population\n :param beta: transmission rate parameter\n :param gamma: recovery rate parameter\n :param sigma: parameter controlling transition from exposed to infectious\n :param beta_max: maximum value to consider for beta during parameter fitting\n :param gamma_max: maximum value of gamma to consider during parameter fitting\n :param S0_fixed: whether to keep S0 fixed during fitting\n :param S0_max: maximum value of S0 to consider during parameter fitting\n :param E0_fixed: whether to keep E0 fixed during fitting\n :param E0_max: maximum value of E0 to consider during parameter fitting\n :param R0_fixed: whether to keep R0 fixed during fitting\n :param R0_max: maximum value of R0 to consider during parameter fitting\n :param I0_fixed: whether to keep I0 fixed during fitting\n :param I0_max: maximum value of I0 to consider during parameter fitting\n :return: tuple[Parameters, list]: (parameters, a list of the names of the variables for initial conditions)\n \"\"\"\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('E0', value=E0, min=0, max=E0_max, vary=not E0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max)\n parameters.add('sigma', value=sigma, min=0, max=sigma_max)\n initial_conditions = ['S0', 'E0', 'I0', 'R0']\n\n return parameters, initial_conditions\n\n\nclass EpidemicModel:\n\n \"\"\"Class to perform solutions and parameter-fitting of epidemic models\"\"\"\n\n def __init__(self, model, parameters: tuple = None, data: np.array = None, initial_conditions: list = None,\n fit_method: str = 'leastsq', error: callable = None):\n \"\"\"\n A class to standardize fitting and solving epidemiological models.\n\n :param model: the model to use, currently a class in the form of SIR, SEIR above\n :param parameters: tuple, parameters to use for the model, defaults to the output of [model].get_parameters\n :param data: np.array, data that can be used to calibrate the model\n :param initial_conditions: list, initial conditions for the model\n :param fit_method: str, the method to use to minimize the (given) error. Available methods are those in the\n lmfit.minimizer.minimize function. Default is Levenberg-Marquardt least squares minimization.\n :param error: callable, control which residuals (and in what form) to minimize for fitting.\n \"\"\"\n self.model = model\n self.parameters = parameters\n self.data = data\n self.initial_conditions = initial_conditions\n self.fit_method = fit_method\n self.error = error\n self.result = None\n self.fitted_parameters = None\n\n def solve(self, time_range: np.ndarray, initial_conditions: list, parameters):\n \"\"\"\n Integrate the model ODEs to get a solution.\n\n :param time_range: the time range to solve for\n :param initial_conditions: the initial conditions for the solution\n :param parameters: the parameters for the solution\n :return:\n \"\"\"\n x = odeint(self.model.calibrate, initial_conditions, time_range, args=(parameters,))\n return x\n\n def residual(self, parameters: Parameters, time_range: np.arange, data: np.array):\n \"\"\"\n Obtain fit error (to minimize).\n\n :param parameters: parameters to use (which we are usually minimizing the residual for)\n :param time_range: time range for solution (over which we obtain the residual)\n :param data: data to fit the models too (i.e. compute residuals in terms of)\n :return:\n \"\"\"\n initial_conditions = []\n for variable in self.initial_conditions:\n initial_conditions.append(parameters[variable].value)\n\n # obtain solution given current initial conditions and parameters\n solution = self.solve(time_range, initial_conditions, parameters)\n\n # compute residual, using custom error function if it has been passed in\n residual = (solution - data).ravel() if self.error is None else self.error(solution, data, parameters)\n return residual\n\n def fit(self, time_range: np.arange = None, parameters: [Parameters, tuple] = None, initial_conditions: list = None,\n residual=None, verbose: bool = False, data: np.array = None):\n \"\"\"\n Fit the model based on data in the form np.array([X1,...,Xn])\n \"\"\"\n if data is None:\n if self.data is None:\n raise ValueError(\"No data to fit the model on!\")\n data = self.data\n if initial_conditions is not None:\n self.initial_conditions = initial_conditions\n if self.initial_conditions is None:\n raise ValueError(\"No initial conditions to fit the model with!\")\n if parameters is None:\n if self.parameters is None:\n raise ValueError(\"No parameters to fit the model with!\")\n parameters = self.parameters\n if time_range is None:\n time_range = np.arange(data.shape[0])\n if residual is None:\n residual = self.residual\n\n result = minimize(residual, parameters, args=(time_range, data), method=self.fit_method)\n self.result = result\n self.fitted_parameters = result.params.valuesdict()\n\n if verbose:\n report_fit(result)\n\n return result\n","sub_path":"gs_quant/models/epidemiology.py","file_name":"epidemiology.py","file_ext":"py","file_size_in_byte":11754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131759624","text":"import itertools\nfrom collections import defaultdict\n\nlines = open('input.txt', 'r').read().strip().splitlines()\ndraws = lines[0].split(\",\")\n\nboards = []\nlookup = defaultdict(list)\nmarkers = []\nfor i in range(1, len(lines), 6):\n board = [l.split() for l in lines[i+1:i+6]]\n boards.append(board)\n markers.append([r[:] for r in [[False] * 5] * 5])\n for j, k in itertools.product(range(5), repeat=2):\n lookup[board[j][k]].append((i / 6, j, k))\n\n\nwinner = None\nwinners = set()\nfor number in draws:\n boards_with_number = lookup[number]\n for board_idx, i, j in boards_with_number:\n markers[board_idx][i][j] = True\n if all(markers[board_idx][i]) or all(m[j] for m in markers[board_idx]):\n winner = board_idx\n winners.add(board_idx)\n \n if len(winners) == len(boards):\n break\n else:\n continue\n \n break\n\nsum_unmarked = sum(int(boards[winner][i][j]) for i, j in itertools.product(range(5), repeat=2) if not markers[winner][i][j])\n\nresult = sum_unmarked * int(number)\nprint(\"Result: {}\".format(result))\n","sub_path":"2021/04/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46727195","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\n\n\nclass DataGenerator(tf.keras.utils.Sequence):\n def __init__(self, data, batch_size=6, dim=(768, 1024), n_channels=3, shuffle=True):\n \"\"\"\n Initialization\n \"\"\"\n self.data = data\n self.indices = self.data.index.tolist()\n self.dim = dim\n self.n_channels = n_channels\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.min_depth = 0.1\n self.on_epoch_end()\n\n def __len__(self):\n return int(np.ceil(len(self.data) / self.batch_size))\n\n def __getitem__(self, index):\n if (index + 1) * self.batch_size > len(self.indices):\n self.batch_size = len(self.indices) - index * self.batch_size\n # Generate one batch of data\n # Generate indices of the batch\n index = self.indices[index * self.batch_size : (index + 1) * self.batch_size]\n # Find list of IDs\n batch = [self.indices[k] for k in index]\n x, y = self.data_generation(batch)\n\n return x, y\n\n def on_epoch_end(self):\n\n \"\"\"\n Updates indexes after each epoch\n \"\"\"\n self.index = np.arange(len(self.indices))\n if self.shuffle == True:\n np.random.shuffle(self.index)\n\n def load(self, image_path, depth_map, mask):\n \"\"\"Load input and target image.\"\"\"\n\n image_ = cv2.imread(image_path)\n image_ = cv2.cvtColor(image_, cv2.COLOR_BGR2RGB)\n image_ = cv2.resize(image_, self.dim)\n image_ = tf.image.convert_image_dtype(image_, tf.float32)\n\n depth_map = np.load(depth_map).squeeze()\n\n mask = np.load(mask)\n mask = mask > 0\n\n max_depth = min(300, np.percentile(depth_map, 99))\n depth_map = np.clip(depth_map, self.min_depth, max_depth)\n depth_map = np.log(depth_map, where=mask)\n\n depth_map = np.ma.masked_where(~mask, depth_map)\n\n depth_map = np.clip(depth_map, 0.1, np.log(max_depth))\n depth_map = cv2.resize(depth_map, self.dim)\n depth_map = np.expand_dims(depth_map, axis=2)\n depth_map = tf.image.convert_image_dtype(depth_map, tf.float32)\n\n return image_, depth_map\n\n def data_generation(self, batch):\n\n x = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n for i, batch_id in enumerate(batch):\n x[i,], y[i,] = self.load(\n self.data[\"image\"][batch_id],\n self.data[\"depth\"][batch_id],\n self.data[\"mask\"][batch_id],\n )\n\n return x, y","sub_path":"depth_estimation/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31294466","text":"import random\nimport re, time\nimport string\n\nimport requests, bs4\n\n# 测试目录页面\ndef getAll():\n url = 'https://www.douban.com/group/haizhuzufang/discussion?start=0'\n time.sleep(6) # 豆瓣爬虫要求的最低间隔为5\n discussPage = requests.get(url)\n soup = bs4.BeautifulSoup(discussPage.text, \"html.parser\")\n if soup.prettify().find(\"403 Forbidden\") > 0:\n print(\"禁止访问403\")\n for i in soup.find_all('a',href=re.compile(r'https://www.douban.com/group/topic/\\d*/')):\n # print(type(i))\n # print(i.name,i.attrs)\n detailUrl = i.attrs['href']\n detailSoup = bs4.BeautifulSoup(requests.get(detailUrl).text, \"html.parser\")\n for j in detailSoup.find_all('p',\n class_=None,# 回复内容的标签有class,此处过滤掉回复\n text=re.compile(r'.*(中山大学|中大|晓港|鹭江).*')):\n print(detailUrl)\n print(\"\\t\",j.text)\n print(\"END\")\n\n# 测试细节页面\ndef getP():\n url = 'https://www.douban.com/group/topic/114162360/'\n detailSoup = bs4.BeautifulSoup(requests.get(url).text, \"html.parser\")\n for j in detailSoup.find_all('p',class_=None):\n print(\"\\t\",j.text)\n\n print('----------------------------->')\n #\n for j in detailSoup.find_all('p',text=re.compile(r'.*(中山大学|中大|晓港|鹭江|不限).*')):\n print(\"\\t\",j.text)\n\n# getAll()\n# getP()\n# 从abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789中选11个字符\ns = \"bid=%s\" % \"\".join(random.sample(string.ascii_letters + string.digits, 11))\n# print(s)\ns2 = random.sample(['1','2','3','4','5'],3)\nprint(\"A\".join(s2))","sub_path":"jingle/test/BSTest.py","file_name":"BSTest.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301743603","text":"class Solution:\n def letterCombinations(self, digits: str) -> List[str]: \n '''\n :type: digits: str\n :rtype: List[str]\n '''\n if len(digits) == 0:\n return []\n _dict = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl',\n '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n thres = len(digits)\n res = []\n self.dfs(_dict, digits, 0, thres, '', res)\n return res\n \n def dfs(self, _dict, digits, level, thres, substr, res):\n if level == thres:\n res.append(substr)\n return \n for char in _dict[digits[level]]:\n self.dfs(_dict, digits, level+1, thres, substr + char, res)","sub_path":"Python/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531966774","text":"import time\nfrom typing import Dict, List, Optional, cast\n\nimport pendulum\nfrom dagster import check\nfrom dagster.core.events import DagsterEvent, EngineEventData, MetadataEntry, log_step_event\nfrom dagster.core.execution.context.system import PlanOrchestrationContext\nfrom dagster.core.execution.plan.plan import ExecutionPlan\nfrom dagster.core.execution.plan.step import ExecutionStep\nfrom dagster.core.execution.retries import RetryMode\nfrom dagster.core.executor.step_delegating.step_handler.base import StepHandler, StepHandlerContext\nfrom dagster.grpc.types import ExecuteStepArgs\n\nfrom ..base import Executor\n\n\nclass StepDelegatingExecutor(Executor):\n def __init__(\n self,\n step_handler: StepHandler,\n retries: RetryMode,\n sleep_seconds: Optional[float] = None,\n check_step_health_interval_seconds: Optional[int] = None,\n should_verify_step: bool = False,\n ):\n self._step_handler = step_handler\n self._retries = retries\n self._sleep_seconds = cast(\n float, check.opt_float_param(sleep_seconds, \"sleep_seconds\", default=0.1)\n )\n self._check_step_health_interval_seconds = cast(\n int,\n check.opt_int_param(\n check_step_health_interval_seconds, \"check_step_health_interval_seconds\", default=20\n ),\n )\n self._should_verify_step = should_verify_step\n\n @property\n def retries(self):\n return self._retries\n\n def _pop_events(self, instance, run_id) -> List[DagsterEvent]:\n events = instance.logs_after(run_id, self._event_cursor)\n self._event_cursor += len(events)\n return [event.dagster_event for event in events if event.is_dagster_event]\n\n def _get_step_handler_context(\n self, plan_context, steps, active_execution\n ) -> StepHandlerContext:\n return StepHandlerContext(\n instance=plan_context.plan_data.instance,\n execute_step_args=ExecuteStepArgs(\n pipeline_origin=plan_context.reconstructable_pipeline.get_python_origin(),\n pipeline_run_id=plan_context.pipeline_run.run_id,\n step_keys_to_execute=[step.key for step in steps],\n instance_ref=plan_context.plan_data.instance.get_ref(),\n retry_mode=self.retries.for_inner_plan(),\n known_state=active_execution.get_known_state(),\n should_verify_step=self._should_verify_step,\n ),\n step_tags={step.key: step.tags for step in steps},\n pipeline_run=plan_context.pipeline_run,\n )\n\n def _log_new_events(self, events, plan_context, running_steps):\n # Note: this could lead to duplicated events if the returned events were already logged\n # (they shouldn't be)\n for event in events:\n log_step_event(\n plan_context.for_step(running_steps[event.step_key]),\n event,\n )\n\n def execute(self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan):\n check.inst_param(plan_context, \"plan_context\", PlanOrchestrationContext)\n check.inst_param(execution_plan, \"execution_plan\", ExecutionPlan)\n\n self._event_cursor = -1 # pylint: disable=attribute-defined-outside-init\n\n yield DagsterEvent.engine_event(\n plan_context,\n f\"Starting execution with step handler {self._step_handler.name}\",\n EngineEventData(),\n )\n\n with execution_plan.start(retry_mode=self.retries) as active_execution:\n running_steps: Dict[str, ExecutionStep] = {}\n\n if plan_context.resume_from_failure:\n yield DagsterEvent.engine_event(\n plan_context,\n \"Resuming execution from failure\",\n EngineEventData(),\n )\n\n prior_events = self._pop_events(\n plan_context.instance,\n plan_context.run_id,\n )\n for dagster_event in prior_events:\n yield dagster_event\n\n possibly_in_flight_steps = active_execution.rebuild_from_events(prior_events)\n for step in possibly_in_flight_steps:\n\n yield DagsterEvent.engine_event(\n plan_context,\n \"Checking on status of possibly launched steps\",\n EngineEventData(),\n step.handle,\n )\n\n # TODO: check if failure event included. For now, hacky assumption that\n # we don't log anything on successful check\n if self._step_handler.check_step_health(\n self._get_step_handler_context(plan_context, [step], active_execution)\n ):\n # health check failed, launch the step\n self._log_new_events(\n self._step_handler.launch_step(\n self._get_step_handler_context(\n plan_context, [step], active_execution\n )\n ),\n plan_context,\n {step.key: step for step in possibly_in_flight_steps},\n )\n\n running_steps[step.key] = step\n\n last_check_step_health_time = pendulum.now(\"UTC\")\n\n # Order of events is important here. During an interation, we call handle_event, then get_steps_to_execute,\n # then is_complete. get_steps_to_execute updates the state of ActiveExecution, and without it\n # is_complete can return true when we're just between steps.\n while not active_execution.is_complete:\n\n if active_execution.check_for_interrupts():\n if not plan_context.instance.run_will_resume(plan_context.run_id):\n yield DagsterEvent.engine_event(\n plan_context,\n \"Executor received termination signal, forwarding to steps\",\n EngineEventData.interrupted(list(running_steps.keys())),\n )\n active_execution.mark_interrupted()\n for _, step in running_steps.items():\n self._log_new_events(\n self._step_handler.terminate_step(\n self._get_step_handler_context(\n plan_context, [step], active_execution\n )\n ),\n plan_context,\n running_steps,\n )\n\n else:\n yield DagsterEvent.engine_event(\n plan_context,\n \"Executor received termination signal, not forwarding to steps because \"\n \"run will be resumed\",\n EngineEventData(\n metadata_entries=[\n MetadataEntry.text(str(running_steps.keys()), \"steps_in_flight\")\n ]\n ),\n )\n active_execution.mark_interrupted()\n\n return\n\n for dagster_event in self._pop_events(\n plan_context.instance,\n plan_context.run_id,\n ): # type: ignore\n\n # STEP_SKIPPED events are only emitted by ActiveExecution, which already handles\n # and yields them.\n if dagster_event.is_step_skipped:\n assert isinstance(dagster_event.step_key, str)\n active_execution.verify_complete(plan_context, dagster_event.step_key)\n\n else:\n yield dagster_event\n active_execution.handle_event(dagster_event)\n\n if dagster_event.is_step_success or dagster_event.is_step_failure:\n assert isinstance(dagster_event.step_key, str)\n del running_steps[dagster_event.step_key]\n active_execution.verify_complete(plan_context, dagster_event.step_key)\n\n # process skips from failures or uncovered inputs\n for event in active_execution.plan_events_iterator(plan_context):\n yield event\n\n curr_time = pendulum.now(\"UTC\")\n if (\n curr_time - last_check_step_health_time\n ).total_seconds() >= self._check_step_health_interval_seconds:\n last_check_step_health_time = curr_time\n for _, step in running_steps.items():\n self._log_new_events(\n self._step_handler.check_step_health(\n self._get_step_handler_context(\n plan_context, [step], active_execution\n )\n ),\n plan_context,\n running_steps,\n )\n\n for step in active_execution.get_steps_to_execute():\n running_steps[step.key] = step\n self._log_new_events(\n self._step_handler.launch_step(\n self._get_step_handler_context(plan_context, [step], active_execution)\n ),\n plan_context,\n running_steps,\n )\n\n time.sleep(self._sleep_seconds)\n","sub_path":"python_modules/dagster/dagster/core/executor/step_delegating/step_delegating_executor.py","file_name":"step_delegating_executor.py","file_ext":"py","file_size_in_byte":9977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523297627","text":"from flask import flash, redirect, render_template, url_for, request\nfrom flask_login import login_required, login_user,logout_user, current_user\nfrom app.auth import auth\nfrom .forms import LoginForm, RegistrationForm, EditProfileForm\nfrom app.models import Employee\nfrom app import db\nfrom datetime import datetime\n\n\n@auth.route('/register', methods=['POST', 'GET'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n username=form.username.data,\n first_name=form.first_name.data,\n last_name = form.last_name.data,\n password=form.password.data)\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully registered! You may now login', 'success')\n return redirect(url_for('auth.login'))\n return render_template('auth/register.html', form=form, title='Register')\n\n\n@auth.route('/login', methods=['POST', 'GET'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n employee = Employee.query.filter_by(email=form.email.data).first()\n if employee is not None and employee.verify_password(form.password.data):\n login_user(employee)\n if employee.is_admin:\n return redirect(url_for('home.admin_dashboard'))\n else:\n return redirect(url_for('home.dashboard'))\n else:\n flash('Invalid email or password', 'error')\n return render_template('auth/login.html', form=form, title='Login')\n\n\n@auth.route('/user/<username>')\n@login_required\ndef user(username):\n user = Employee.query.filter_by(username=username).first_or_404()\n posts = [\n {'author': user, 'body': 'Test post #1'},\n {'author': user, 'body': 'Test post #2'}\n ]\n return render_template('/auth/user.html', user=user, posts=posts)\n\n\n@auth.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session()\n\n\n@auth.route('/edit_profile', methods=['POST', 'GET'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.about_me = form.about_me.data\n db.session.commit()\n flash('You changes have been saved.', 'success')\n return redirect(url_for('auth.edit_profile'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.about_me.data = current_user.about_me\n return render_template('auth/edit_profile.html', title='Edit Profile', form=form)\n\n\n\n@auth.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You have successfully been logged out.', 'success')\n return redirect(url_for('auth.login'))\n","sub_path":"app/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581786734","text":"# PyCycsi.py - main program for Python simulator\n\nfrom __future__ import print_function # (for Python 2.7 support)\nfrom Machine import Machine\nimport sys\n\nDEBUG = False\n\ndef usage():\n print(\"Usage: python PyCycsi.py [-d] filename.cal\")\n sys.exit()\n\ndef main():\n argc = len(sys.argv)\n if argc==1 or argc > 3:\n usage()\n if argc == 2 and sys.argv == '-d':\n usage()\n if sys.argv[1] == '-d':\n DEBUG = True\n code_file = sys.argv[2]\n else:\n code_file = sys.argv[1]\n machine = Machine(code_file, DEBUG)\n ticks = machine.run()\n print(\"Program completed in %d ticks\" % ticks)\n \n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/PyCycsi.py","file_name":"PyCycsi.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288251325","text":"from django.shortcuts import render, get_object_or_404\n\nfrom cadastros.forms import ClienteForm, PessoaJuridicaForm, EnderecoForm, EmailForm, TelefoneForm\nfrom cadastros.models import Fornecedor, PessoaJuridica, Telefone, Pessoa, Email, Endereco\n\n\ndef fornecedor_detalhe_id(request, id):\n fornecedor = get_object_or_404(Fornecedor, pk=id)\n telefone = Telefone.objects.filter(pessoa_tel=id)\n email = Email.objects.filter(pessoa_email=id)\n fornecedor = Fornecedor.objects.get(id=id)\n endereco = Endereco.objects.get(pessoa_end=id)\n pessoa_juridica = PessoaJuridica.objects.get(pessoa_id=id)\n return render(request, 'cadastro/info_fornecedor_pj.html', {\n 'pessoa_juridica': pessoa_juridica,\n 'fornecedor': fornecedor,\n 'telefones':telefone,\n 'emails':email,\n 'endereco':endereco,\n })\n\ndef lista_fornecedor(request):\n fornecedores = Fornecedor.objects.all()\n return render(request, 'cadastro/lista_fornecedor.html', {'fornecedores': fornecedores}) ","sub_path":"Sistema_PBazar/cadastros/views/detalhe_fornecedor.py","file_name":"detalhe_fornecedor.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180399190","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef fgrosse (fo,p,alpha,xM):\r\n distance = xtodistance(xM,p)\r\n fgross = fo * np.exp(-(alpha * distance ** 2))\r\n return fgross\r\n\r\ndef frückstell (wo,m,xauslenkung):\r\n force = -wo**2*m*xauslenkung\r\n return force\r\n\r\ndef velocitytostrecke(deltat,v):\r\n deltas = deltat*v\r\n return deltas\r\n\r\ndef xtodistance(x,p):\r\n distance = np.sqrt(x**2+p**2)\r\n return distance\r\n\r\nm = 1\r\np = 50\r\nwo = 1\r\nf0 = 100\r\nalpha = 1/1000\r\n\r\nMvelocity = 20\r\nMx = -200\r\n\r\nmx = 0\r\nmvelocity = 0\r\n\r\ntges = 200\r\ndeltat = 10**-2\r\ntvorAusgabe = 100\r\ntAbstand = 1\r\niterations = tges/deltat\r\n\r\nmxarray = np.zeros(int(tvorAusgabe/deltat))\r\nMxarray = np.zeros(int(tvorAusgabe/deltat))\r\nfrmax = 0\r\nfgmax = 0\r\nxmax = 0\r\n\r\nfor i in range(int(iterations)):\r\n Mvelocity = Mvelocity\r\n Mx += velocitytostrecke(deltat,Mvelocity)\r\n\r\n mforce1 = fgrosse(f0,p,alpha,Mx)\r\n mforce2 = frückstell(wo,m,mx)\r\n mforce = mforce1+mforce2\r\n mvelocity += deltat*mforce/m\r\n mx += deltat*mvelocity\r\n\r\n if mforce2 > frmax:\r\n frmax = mforce2\r\n\r\n if mforce1 > fgmax:\r\n fgmax = mforce1\r\n\r\n if (i > (iterations-tvorAusgabe/deltat)):\r\n if mx > xmax:\r\n xmax = mx\r\n if i % 1 == 0:\r\n mxarray[i-(int(iterations-tvorAusgabe/deltat))] = mx\r\n Mxarray[i-(int(iterations-tvorAusgabe/deltat))] = Mx\r\n if i%1000 == 0:\r\n print('mx: ' + str(mx))\r\n print('Mx: ' + str(Mx))\r\n print('mv: ' + str(mvelocity))\r\n print('mforce: ' + str(mforce))\r\n print('rückstellkraft: ' + str(mforce2))\r\n print('fgross: ' + str(mforce1))\r\n print('maximale rückstellkraft: ' + str(frmax))\r\n print('maximale abstoßungskraft: ' + str(fgmax))\r\n\r\n\r\n\r\nbeta = alpha*Mvelocity**2\r\nF0 = f0*np.exp(-alpha*p**2)\r\n\r\namplitude = F0/(m*wo)*(np.pi/beta)**0.5*np.exp(-(wo**2)/(4*beta))\r\n\r\nprint('Amplitude nach Simulation: ' + str(xmax))\r\nprint('Amplitude sollte sein: ' + str(amplitude))\r\n\r\n\r\nplt.plot(mxarray)\r\nplt.show()\r\n\r\n#plt.plot(Mxarray)\r\n#plt.ylim(Mxarray[1],Mxarray[-1])\r\n#plt.show()","sub_path":"PycharmProjects/Physics Code/Theo/Blatt 8 Nr4.py","file_name":"Blatt 8 Nr4.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144413827","text":"\"\"\"\r\n逻辑回归 Logistic Regression\r\n\r\n如果模型在训练集上表现优秀,但在测试集上表现糟糕,模型就会过拟合\r\n正则化是用来防止模型过拟合的过程,常用有L1正则化和L2正则化\r\n\r\nL1正则化会将参数压缩为0,L2正则化只会让参数尽量小,不会取到0\r\n(1)如果数据特征量很大,数据维度很高,倾向于使用L1正则化;\r\n 若选择使用L1正则化,参数solver仅能够使用liblinear\r\n(2)若主要目的只是防止过拟合,选择L2正则化就足够了\r\n\"\"\"\r\nfrom sklearn.linear_model import LogisticRegression as LR\r\nfrom sklearn.datasets import load_breast_cancer\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ncancer = load_breast_cancer()\r\nx = cancer.data\r\ny = cancer.target\r\n\r\n# print(x.shape)\r\n\"\"\"\r\n# C的值越小,对逻辑回归的惩罚越大\r\nlr1 = LR(penalty='l1', solver='liblinear', C=0.5, max_iter=1000)\r\nlr2 = LR(penalty='l2', solver='liblinear', C=0.5, max_iter=1000)\r\n\r\nlr1 = lr1.fit(x, y)\r\nprint(lr1.coef_)\r\n# 计算不为0的特征有几个\r\nprint((lr1.coef_ != 0).sum(axis=1))\r\n\r\nlr2 = lr2.fit(x, y)\r\nprint(lr2.coef_)\r\n\"\"\"\r\n\r\n\"\"\"\r\nC的学习曲线\r\n\"\"\"\r\n\r\nl1 = []\r\nl2 = []\r\nl1test = []\r\nl2test = []\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=420)\r\n\r\nfor i in np.linspace(0.05, 1, 19):\r\n # max_iter参数用于梯度下降\r\n lr1 = LR(penalty='l1', solver='liblinear', C=i, max_iter=1000)\r\n lr2 = LR(penalty='l2', solver='liblinear', C=i, max_iter=1000)\r\n\r\n lr1 = lr1.fit(x_train, y_train)\r\n l1.append(accuracy_score(lr1.predict(x_train), y_train))\r\n # accuracy_score(lr1.predict(x_test), y_test) == lr1.score(x_test, y_test)\r\n l1test.append(accuracy_score(lr1.predict(x_test), y_test))\r\n\r\n lr2 = lr2.fit(x_train, y_train)\r\n l2.append(accuracy_score(lr2.predict(x_train), y_train))\r\n l2test.append(accuracy_score(lr2.predict(x_test), y_test))\r\n\r\n graph = [l1, l2, l1test, l2test]\r\n color = ['green', 'black', 'lightgreen', 'grey']\r\n label = ['L1', 'L2', 'L1Test', 'L2Test']\r\n\r\nplt.figure(figsize=(6, 6))\r\nfor i in range(len(graph)):\r\n plt.plot(np.linspace(0.05, 1, 19), graph[i], color=color[i], label=label[i])\r\nplt.legend(loc=4) # 右下角\r\nplt.show()\r\n# 从画出来的图来看,建议使用L2正则化,C的取值取0.8或0.9\r\n","sub_path":"MachineLearning/TsaiTsai/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513695783","text":"from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode\nfrom armulator.armv6.bits_ops import add\n\n\nclass Cbz(AbstractOpcode):\n def __init__(self, nonzero, n, imm32):\n super(Cbz, self).__init__()\n self.nonzero = nonzero\n self.n = n\n self.imm32 = imm32\n\n def execute(self, processor):\n if self.nonzero != processor.registers.get(self.n).all(0):\n processor.branch_write_pc(add(processor.registers.get_pc(), self.imm32, 32))\n","sub_path":"armulator/armv6/opcodes/abstract_opcodes/cbz.py","file_name":"cbz.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295071374","text":"# -*- coding: utf-8 -*-\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\n\nclass CursorWidget(QtGui.QWidget):\n\n def __init__(self, plot_widget):\n super(CursorWidget, self).__init__()\n \n self.plot_widget = plot_widget\n \n self.cursor = [False, False] \n\n self.position_layout = QtGui.QVBoxLayout()\n self.cursor_1_layout = QtGui.QVBoxLayout()\n self.cursor_2_layout = QtGui.QVBoxLayout()\n self.delta_layout = QtGui.QVBoxLayout() \n self.Delta_layout = QtGui.QHBoxLayout()\n self.cursor_box_layout = QtGui.QHBoxLayout() \n self.global_layout = QtGui.QVBoxLayout()\n self.auto_scale_layout = QtGui.QHBoxLayout()\n \n self.layout = QtGui.QVBoxLayout()\n \n\n self.position_box = QtGui.QGroupBox('Position')\n self.position_box.setAlignment(5)\n self.cursor_1_box = QtGui.QGroupBox('Cursor 1')\n self.cursor_1_box.setAlignment(5)\n self.cursor_2_box = QtGui.QGroupBox('Cursor 2')\n self.cursor_2_box.setAlignment(5)\n self.delta_box = QtGui.QGroupBox('Delta')\n self.delta_box.setAlignment(5)\n \n # Cursor\n \n self.cursor_button = QtGui.QPushButton('ON') \n self.cursor_button.setStyleSheet('QPushButton {color: green;}')\n self.cursor_button.setCheckable(True)\n self.cursor_X = QtGui.QLabel('X')\n self.cursor_X.setAlignment(QtCore.Qt.AlignCenter) \n self.cursor_1_X = QtGui.QLabel('')\n self.cursor_1_X.setAlignment(QtCore.Qt.AlignCenter) \n self.cursor_2_X = QtGui.QLabel('')\n self.cursor_2_X.setAlignment(QtCore.Qt.AlignCenter) \n self.cursor_Y = QtGui.QLabel('Y')\n self.cursor_Y.setAlignment(QtCore.Qt.AlignCenter) \n self.cursor_1_Y = QtGui.QLabel('')\n self.cursor_1_Y.setAlignment(QtCore.Qt.AlignCenter) \n self.cursor_2_Y = QtGui.QLabel('')\n self.cursor_2_Y.setAlignment(QtCore.Qt.AlignCenter) \n \n self.delta_X = QtGui.QLabel('X')\n self.delta_X.setAlignment(QtCore.Qt.AlignCenter) \n self.delta_Y = QtGui.QLabel('Y')\n self.delta_Y.setAlignment(QtCore.Qt.AlignCenter) \n \n #Cursor\n \n self.vLine_1 = pg.InfiniteLine(angle=90, movable=False)\n self.hLine_1 = pg.InfiniteLine(angle=0, movable=False)\n self.plot_widget.addItem(self.vLine_1, ignoreBounds=True)\n self.plot_widget.addItem(self.hLine_1, ignoreBounds=True)\n self.vLine_1.setVisible(self.cursor[0])\n self.hLine_1.setVisible(self.cursor[0])\n \n self.vLine_2 = pg.InfiniteLine(angle=90, movable=False, pen=(3,4))\n self.hLine_2 = pg.InfiniteLine(angle=0, movable=False, pen=(3,4))\n self.plot_widget.addItem(self.vLine_2, ignoreBounds=True)\n self.plot_widget.addItem(self.hLine_2, ignoreBounds=True)\n self.vLine_2.setVisible(self.cursor[1])\n self.hLine_2.setVisible(self.cursor[1])\n self.view_box = self.plot_widget.getViewBox() \n \n self.layout.addWidget(self.cursor_button)\n \n self.position_layout.addWidget(self.cursor_X)\n self.position_layout.addWidget(self.cursor_Y)\n \n self.cursor_1_layout.addWidget(self.cursor_1_X)\n self.cursor_1_layout.addWidget(self.cursor_1_Y)\n \n self.cursor_2_layout.addWidget(self.cursor_2_X)\n self.cursor_2_layout.addWidget(self.cursor_2_Y)\n \n self.position_box.setLayout(self.position_layout)\n self.cursor_1_box.setLayout(self.cursor_1_layout)\n self.cursor_2_box.setLayout(self.cursor_2_layout)\n \n self.cursor_box_layout.addWidget(self.position_box) \n self.cursor_box_layout.addWidget(self.cursor_1_box)\n self.cursor_box_layout.addWidget(self.cursor_2_box)\n \n self.delta_layout.addWidget(self.delta_X)\n self.delta_layout.addWidget(self.delta_Y)\n self.delta_box.setLayout(self.delta_layout)\n \n self.Delta_layout.addStretch(1)\n self.Delta_layout.addWidget(self.delta_box)\n self.Delta_layout.addStretch(1) \n \n self.delta_box.setVisible(self.cursor[1])\n \n self.layout.addLayout(self.cursor_box_layout) \n self.layout.addLayout(self.Delta_layout)\n \n self.setLayout(self.layout)\n \n # Connections\n self.cursor_button.clicked.connect(self.cursor_on)\n self.plot_widget.scene().sigMouseMoved.connect(self.mouseMoved)\n self.plot_widget.scene().sigMouseClicked.connect(self.mouseClicked)\n \n def auto_scale(self):\n self.plot_widget.enableAutoRange() \n \n def cursor_on(self): \n self.cursor[0] = not self.cursor[0]\n if self.cursor[0]:\n self.cursor_button.setStyleSheet('QPushButton {color: red;}')\n self.cursor_button.setText('OFF')\n self.set_visible()\n else:\n self.cursor[1] = False\n self.cursor_button.setStyleSheet('QPushButton {color: green;}')\n self.cursor_button.setText('ON')\n self.set_visible()\n self.reset_text()\n self.delta_box.setVisible(False)\n \n def mouseMoved(self, pos):\n if self.plot_widget.sceneBoundingRect().contains(pos):\n self.mousePoint = self.view_box.mapSceneToView(pos)\n self.vLine_1.setPos(self.mousePoint.x())\n self.hLine_1.setPos(self.mousePoint.y())\n if self.cursor[0]:\n if 1e-2 < np.abs(self.mousePoint.x()) < 1e3:\n self.cursor_1_X.setText('{:.2f}'.format(self.mousePoint.x()))\n else:\n self.cursor_1_X.setText('%.2e'%(self.mousePoint.x()))\n if 1e-2 < np.abs(self.mousePoint.y()) < 1e3:\n self.cursor_1_Y.setText('{:.2f}'.format(self.mousePoint.y()))\n else:\n self.cursor_1_Y.setText('%.2e'%(self.mousePoint.y()))\n if self.cursor[1] == True:\n if 1e-2 < np.abs(self.mousePoint.x() - self.cursor_2_x) < 1e3:\n self.delta_X.setText('X ' + '{:.2f}'.format(self.mousePoint.x() - self.cursor_2_x))\n else:\n self.delta_X.setText('X ' + '%.2e'%(self.mousePoint.x() - self.cursor_2_x)) \n if 1e-2 < np.abs(self.mousePoint.y() - self.cursor_2_y) < 1e3:\n self.delta_Y.setText('Y ' + '{:.2f}'.format(self.mousePoint.y() - self.cursor_2_y))\n else:\n self.delta_Y.setText('Y ' + '%.2e'%(self.mousePoint.y() - self.cursor_2_y))\n \n else:\n self.reset_text()\n self.delta_X.setText('X') \n self.delta_Y.setText('Y') \n self.delta_box.setVisible(False)\n \n def mouseClicked(self, evt):\n if self.cursor[0]:\n self.cursor_2_x = self.mousePoint.x()\n self.cursor_2_y = self.mousePoint.y()\n self.vLine_2.setPos(self.mousePoint.x())\n self.hLine_2.setPos(self.mousePoint.y())\n self.cursor[1] = True\n self.delta_box.setVisible(True)\n self.vLine_2.setVisible(self.cursor[1])\n self.hLine_2.setVisible(self.cursor[1])\n if 1e-2 < np.abs(self.mousePoint.x()) < 1e3:\n self.cursor_2_X.setText('{:.2f}'.format(self.mousePoint.x()))\n else:\n self.cursor_2_X.setText('%.2e'%(self.mousePoint.x()))\n if 1e-2 < np.abs(self.mousePoint.y()) < 1e3:\n self.cursor_2_Y.setText('{:.2f}'.format(self.mousePoint.y()))\n else:\n self.cursor_2_Y.setText('%.2e'%(self.mousePoint.y()))\n \n def set_visible(self):\n self.vLine_1.setVisible(self.cursor[0])\n self.hLine_1.setVisible(self.cursor[0])\n self.vLine_2.setVisible(self.cursor[1])\n self.hLine_2.setVisible(self.cursor[1])\n \n def reset_text(self):\n self.cursor_1_X.setText('')\n self.cursor_1_Y.setText('')\n self.cursor_2_X.setText('')\n self.cursor_2_Y.setText('')\n","sub_path":"lase/gui/cursor_widget.py","file_name":"cursor_widget.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"284238059","text":"from django import forms\n\nfrom .models import Categoria, SubCategoria, Marca, UnidadMedida, Producto\n\n\nclass FormBase(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n }) \n\nclass CategoriaForm(FormBase):\n\n class Meta:\n model = Categoria\n fields = ['descripcion', 'estado']\n labels = {\n 'descripcion':'Descripción de la categoria',\n 'estado':\"Estado\"\n }\n widget = {\n 'descripcion': forms.TextInput()\n }\n\nclass SubCategoriaForm(FormBase):\n categoria = forms.ModelChoiceField(\n queryset=Categoria.objects.filter(estado=True).order_by('descripcion')\n )\n\n class Meta:\n model = SubCategoria\n fields = ['categoria', 'descripcion', 'estado']\n labels = {\n 'descripcion':'Sub Categoria',\n 'estado':\"Estado\"\n }\n widget = {\n 'descripcion': forms.TextInput()\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['categoria'].empty_label = 'Seleccione categoría'\n\nclass MarcaForm(FormBase):\n \n class Meta:\n model = Marca\n fields = ['descripcion', 'estado']\n labels = {\n 'descripcion':'Marca',\n 'estado':\"Estado\"\n }\n widget = {\n 'descripcion': forms.TextInput()\n }\n \nclass UnidadMedidaForm(FormBase):\n \n class Meta:\n model = UnidadMedida\n fields = ['descripcion', 'estado']\n labels = {\n 'descripcion':'Unidad de medida',\n 'estado':\"Estado\"\n }\n widget = {\n 'descripcion': forms.TextInput()\n }\n \nclass ProductoForm(FormBase):\n \n class Meta:\n model = Producto\n fields = [\n 'codigo','codigo_barras','descripcion', 'estado', 'precio',\n 'existencia', 'ultima_compra', 'marca', 'subcategoria',\n 'unidad_medida'\n ]\n exclude = [\n 'um', 'fm', 'uc', 'fc'\n ]\n widget = {\n 'descripcion': forms.TextInput()\n }\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['ultima_compra'].widget.attrs['readonly'] = True\n self.fields['existencia'].widget.attrs['readonly'] = True","sub_path":"inv/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403294867","text":"\nshopping_list = []\n\n'''\n This function will display product from our shopping list\n'''\ndef print_list():\n # print(shopping_list)\n print()\n print(\"SHOPPING LIST:\")\n # print(shopping_list)\n # [0,1,2,3,4] range(5)\n # [\"A\", \"B\", \"C\"]\n\n # Print each product name in new line\n # for product in shopping_list:\n # print(product)\n\n # Print each product name in new line with\n for i in range(len(shopping_list)):\n print(\"%2d %s\" % (i+1, shopping_list[i]))\n\n\ndef add_product(product):\n shopping_list.append(product)\n print(product.upper(), \"ADDED TO SHOPPING LIST!\\n\")\n\n\ndef remove_product():\n print_list()\n to_remove = input(\"Provide product name to remove: \")\n\n if to_remove in shopping_list:\n shopping_list.remove(to_remove)\n else:\n print(\"Ups, that list not contain selected product!\")\n\n\ndef menu():\n print(\"MENU\")\n print(\"1. ADD PRODUCT\")\n print(\"2. SHOW PRODUCT LIST\")\n print(\"3. REMOVE PRODUCT\")\n print(\"0. EXIT\")\n\n # choice = input(\"Please provide action to do: \")\n # while not is_number(choice):\n # choice = input(\"Please provide action to do: \")\n\n # choice = int(choice)\n\n choice = input(\"Please provide action to do: \")\n while not choice in [\"0\", \"1\", \"2\", \"3\"]:\n choice = input(\"Please provide action to do: \")\n\n choice = int(choice)\n\n if choice == 1:\n # add product\n option = input(\"Do you want to add product? Y or N\")\n\n while option.upper() == 'Y':\n product = input(\"Please, provide product to add: \")\n add_product(product)\n option = input(\"Do you want to add another product? Y or N\")\n\n elif choice == 2:\n print_list()\n print()\n elif choice == 3:\n remove_product()\n else:\n exit()\n\n\ndef is_number(string):\n # https://www.w3schools.com/python/python_try_except.asp\n\n try:\n int(string)\n return True\n except:\n return False\n\n\ndef main():\n while True:\n menu()\n\n\nmain()\n","sub_path":"shop_list_Mentor.py","file_name":"shop_list_Mentor.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463794525","text":"import os\n\nparentdir = \"\"\n\ndef ready_parent_dir(parent):\n global parentdir\n parentdir = parent\n if not os.path.exists(parent):\n os.mkdir(parent)\n return True\n\n if os.path.isdir(parent):\n return True\n\n else:\n return False\n\ndef write(content, filename = \"\", isDB=False):\n global parentdir\n with open(parentdir+'/'+filename,'w') as f:\n f.write(content)\n \n\ndef read(filename, isDB=False):\n global parentdir\n with open(parentdir+'/'+filename,'r') as f:\n text = f.read()\n return text","sub_path":"crawl_logger.py","file_name":"crawl_logger.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461101283","text":"import rethinkdb as r\nfrom datetime import datetime, date, time\nfrom pytz import timezone\n\nc = r.connect()\nr.db(\"themis\").table_drop(\"pages\").run(c)\nr.db(\"themis\").table_drop(\"crawledUrls\").run(c)\nr.db(\"themis\").table_create(\"pages\").run(c)\nr.db(\"themis\").table_create(\"crawledUrls\").run(c)\n\ncursor = r.db(\"themis\").table(\"pagesNew2\").run(c)\ntitles = set()\nfor document in cursor:\n title = document['title']\n content = document['content']\n if (content == []):\n continue\n author = document['author']\n if (author == \"\"):\n continue\n url = document['uri']\n date = document['date']\n if (title in titles):\n print(\"breaked\")\n continue\n titles.add(title)\n date = datetime.strptime(date, \"%Y%m%d\")\n utc = timezone('UTC')\n date = utc.localize(date)\n data = {\n 'title' : title,\n 'content' : content,\n 'author' : author[3:],\n 'url' : url,\n 'date' : date\n }\n crawled = {\n 'url' : url,\n 'crawled' : 1\n }\n r.db(\"themis\").table(\"pages\").insert(data).run(c)\n r.db(\"themis\").table(\"crawledUrls\").insert(crawled).run(c)\n","sub_path":"modifyDatabase.py","file_name":"modifyDatabase.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105663024","text":"#!/usr/bin/env python\nimport pika\nimport pickle\nimport pymongo\nimport sys\nimport os\nimport logging\n\nfrom controllers.injector import Injector\n\nclass Queue(Injector):\n \"\"\"\n Class for abstracting queue logic\n \"\"\"\n def __init__(self):\n super().__init__()\n self.rabbitMQHost = os.environ.get(\"RABBITMQ_HOST\")\n self.dbHost = os.environ.get(\"DB_HOST\")\n self.queueName = os.environ.get(\"INFERENCE_QUEUE_NAME\")\n self.heartBeatTimeOut = int(os.environ.get(\"HEART_BEAT_TIMEOUT\"))\n self.blockedConnectionTimeOut = int(os.environ.get(\"BLOCKED_CONNECTION_TIMEOUT\"))\n\n self._channel = self._get_channel()\n\n def _get_channel(self):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host= self.rabbitMQHost,\n heartbeat=self.heartBeatTimeOut,\n blocked_connection_timeout=self.blockedConnectionTimeOut))\n channel = connection.channel()\n\n return channel\n\n def start_consuming_messages(self,callback_function):\n logging.error(\"****************************\")\n \n logging.info(\"model server - receiver\")\n self._channel.queue_declare(\n queue=self.queueName,\n durable=True \n )\n \n self._channel.queue_declare(\n queue=f'{self.queueName}-dlq', durable=True)\n\n logging.info(' [*] Waiting for messages. To exit press CTRL+C')\n self._channel.basic_qos(prefetch_count=1)\n self._channel.basic_consume(\n queue=self.queueName,\n auto_ack=True,\n on_message_callback=callback_function)\n self._channel.start_consuming()","sub_path":"ml_app/inference_worker/app/controllers/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186608007","text":"\"\"\"\ngame logic goes here\n\"\"\"\n\nfrom objects import player as plr\nfrom objects import card as cd\nfrom eucher import euc_cards as ecd\nimport random\n\n\ndef play_order(list, number):\n play_order = [list[(number - 1) % 4],\n list[number % 4],\n list[(number + 1) % 4],\n list[(number + 2) % 4]]\n return play_order\n\n\ndef assign_deal_order(): # sets up turn order at the start of each round\n player_list = plr.Player.List\n\n for player in player_list:\n if player.dealer is True:\n return play_order(player_list, player.player_number + 1)\n\n\ndef assign_play_order(): # play order will change after each trick\n player_list = plr.Player.List\n\n for player in player_list:\n if player.took_last_trick is True:\n player.took_last_trick = False # ensure that variable is set to False after play order has been set\n return play_order(player_list, player.player_number)\n\n\n\ndef assign_dealer(count):\n if count < 1:\n dealer = random.choice(plr.Player.List) # assign random person to be dealer at the game start\n dealer.dealer = True\n print('player ' + str(dealer.player_number) + ' has been made dealer')\n\n # debug\n print(dealer.dealer)\n else: # if not the start of the game, the person to the left of the dealer becomes the new dealer\n for player in plr.Player.List:\n if player.dealer is True:\n player.dealer = False\n plr.Player.List[player.player_number % 4].dealer = True\n\n\nclass CallingRound(object):\n\n \"\"\"\n manages player interaction for calling trump\n \"\"\"\n\n calling_round = None\n\n def __init__(self):\n # debug\n print(len(cd.Deck.List))\n self.top_card = cd.Deck.List[0]\n self.play_order = []\n CallingRound.calling_round = self\n\n def loop(self, count): # manages class methods\n assign_dealer(count)\n self.play_order = assign_deal_order()\n done = False\n while not done:\n print(self.top_card.suit)\n print(self.top_card.name)\n done = self.pass_or_call()\n\n @staticmethod\n def make_suit_trump(suit):\n for card in ecd.EucherDeck.ALL_CARDS:\n if card.suit is suit:\n card.trump = True\n ecd.Jack.set_left_bower()\n\n # debug //////////////////////////////////////////////////////\n \"\"\"\n print()\n for player in plr.Player.List:\n for card in player.hand:\n if card.trump is True:\n print(card.name)\n for card in cd.Deck.List:\n if card.trump is True:\n print(card.name)\n print()\n for card in ecd.EucherDeck.ALL_CARDS:\n if card.trump is True:\n print(card.name)\n print()\n for card in ecd.Jack.JACKS:\n print(card.name)\n print()\n for suit in ecd.Jack.red:\n print(suit)\n for suit in ecd.Jack.black:\n print(suit)\n \"\"\"\n # end debug ////////////////////////////////////////////////////\n\n def pass_or_call(self):\n for player in self.play_order:\n done = False\n while not done:\n player_input = self.get_player_input(player)\n if player_input == 'PASS':\n done = True\n elif player_input == 'TRUMP':\n print(self.top_card.suit + ' has been made trump')\n self.make_suit_trump(self.top_card.suit)\n self.pick_up_trump() # deal must pick up the face up card and discard a card(can be they picked up)\n return True\n else:\n print('invalid input')\n\n return self.pass_or_call_2()\n\n @staticmethod\n def get_player_input(player):\n player_input = input('player ' + str(player.player_number) + ' PASS or TRUMP? :').upper()\n return player_input\n\n def pick_up_trump(self):\n dealers_hand = self.play_order[3].hand\n\n dealers_hand.append(self.top_card) # adds the top card of the kitty ot the dealers hand\n cd.Deck.List.remove(self.top_card) # removes top card from the kitty\n\n print()\n for card in dealers_hand:\n print(card.name)\n print()\n\n done = False\n while not done:\n dealer_input = input('plaese select a card to discard: ').lower()\n for card in dealers_hand:\n if card.name.lower() == dealer_input:\n dealers_hand.remove(card) # removes chosen card from dealers hand\n cd.Deck.List.append(card) # places chosen card into the kitty\n done = True\n\n # condition after all players pass for the first time change\n\n def pass_or_call_2(self):\n\n print()\n print('second pass or call')\n print()\n\n for player in self.play_order:\n done = False\n while not done:\n player_input = self.get_player_input(player)\n if player_input == 'PASS':\n done = True\n elif player_input == 'TRUMP':\n while True:\n trump_input = input('please select a suit to call trump: ')\n for suit in cd.Card.SUITS:\n\n if trump_input == suit and suit != self.top_card.suit:\n print(suit + ' has been made trump')\n self.make_suit_trump(trump_input) # make selected suit trump\n return True\n elif trump_input == self.top_card.suit:\n print('You cannot call that trump')\n break\n\n else:\n print('invalid input')\n\n\n# --------------- PLAY ROUND LOGIC -----------------\n\nclass PlayRound(object):\n \"\"\"\n manages player interaction\n \"\"\"\n\n play_round = None\n\n def __init__(self):\n self.count = 0\n self.board = []\n PlayRound.play_round = self\n\n def loop(self):\n done = False\n\n while not done:\n\n if self.count == 0:\n play_order = assign_deal_order()\n else:\n play_order = assign_play_order()\n\n # self.reset_players()\n\n for player in play_order:\n self.select_card(player)\n\n # debug ///////////////////////////////////////////\n print()\n for card in self.board:\n print(card.name)\n # end debug ///////////////////////////////////////////\n\n self.select_highest_card() # picks highest card and awards the trick to the player who played it\n self.clean_board()\n\n self.count += 1\n\n if self.count > 4:\n done = True\n self.award_points()\n self.clear_trump()\n\n # debug //////////////////////////////////////////////////\n for team in plr.Team.List:\n print(team.points)\n # end debug\n\n def select_card(self, player):\n print()\n print('player ' + str(player.player_number) + ' please select a card to play')\n print()\n for card in player.hand:\n print(card.name)\n\n done = False\n while not done:\n print()\n player_input = input('please select a card to play: ').lower()\n for card in player.hand:\n if card.name.lower() == player_input:\n done = self.check_suit(card, player) # check if player is following suit\n\n def play_card(self, card, player): # removes selected card from player's hand and adds it to the board\n\n self.board.append(card)\n player.hand.remove(card)\n player.card_played = card\n\n def check_suit(self, card, player):\n suit_to_follow = self.set_suit_to_follow()\n\n if suit_to_follow is not None: # checks if suit to follow is None\n print()\n print('suit to follow is ' + suit_to_follow)\n if card.suit is not suit_to_follow: # checks selected card's suit to see if it matches suit to follow\n print()\n print('card is not suit to follow')\n for card_in_hand in player.hand: # checks players hand for cards that match suit to follow\n print()\n print('checking hand for card that follows suit')\n if card_in_hand.suit is suit_to_follow: # player must follow suit, return False and select again\n print('you have a ' + suit_to_follow + ' in your hand! you must follow suit')\n return False\n\n print('playing off suit card') # shows us that we cannot follow suit, so we can play any card\n\n self.play_card(card, player)\n return True\n\n def set_suit_to_follow(self):\n if len(self.board) > 0:\n suit_to_follow = self.board[0].suit\n else:\n suit_to_follow = None\n return suit_to_follow\n\n def select_highest_card(self):\n card_values = [] # keeps track of card values\n suit_to_follow = self.set_suit_to_follow()\n high_card = None\n for card in self.board:\n if card.suit is not suit_to_follow:\n continue\n else:\n card_values.append(card.RANKS[card.rank])\n if card.RANKS[card.rank] == max(card_values):\n high_card = card\n\n self.award_trick(high_card)\n\n def award_trick(self, high_card):\n for player in plr.Player.List:\n if player.card_played == high_card:\n player.tricks += 1\n print('player ' + str(player.player_number) + ' took the trick with ' + high_card.name)\n print('player ' + str(player.player_number) + ' has ' + str(player.tricks) + ' tricks.')\n player.took_last_trick = True\n print(str(player.took_last_trick) + ' player ' + str(player.player_number))\n\n def award_points(self):\n for team in plr.Team.List:\n total_tricks = 0\n\n for player in plr.Player.List:\n if player.team == team:\n total_tricks += player.tricks\n\n if total_tricks is 5:\n team.points += 2\n elif total_tricks > 2:\n team.points += 1\n\n def clean_board(self): # empties the board list\n for card in self.board[::-1]:\n self.board.remove(card)\n cd.Deck.List.append(card)\n\n @staticmethod\n def reset_players():\n for player in plr.Player.List: # resets variable\n player.took_last_trick = False\n\n @staticmethod\n def clear_trump():\n for eucher_card in cd.Deck.List:\n eucher_card.trump = False\n","sub_path":"eucher/game_logic.py","file_name":"game_logic.py","file_ext":"py","file_size_in_byte":11013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151001860","text":"import time\nfrom uuid import uuid4\n\nALARM_TOPIC = 'bp.ra.v1.alarms'\nALARM_TYPE = 'bp.v1.AlarmEvent'\nALARM_ACTIVE_STATE = 'set'\nALARM_CLEAR_STATE = 'clear'\nALARM_STATE_MAP = {\n 'CREATE': ALARM_ACTIVE_STATE,\n 'UPDATE': ALARM_ACTIVE_STATE,\n 'DELETE': ALARM_CLEAR_STATE\n}\nALARM_SERVICE_AFF_MAP = {\n 'MAJOR': 'SERVICE_AFFECTING',\n 'CRITICAL': 'SERVICE_AFFECTING',\n 'WARNING': 'NON_SERVICE_AFFECTING',\n 'MINOR': 'NON_SERVICE_AFFECTING',\n 'INFO': 'NON_SERVICE_AFFECTING'\n}\nRA_TYPE = 'Firefly'\nPOWER_SUPPLY_FAILURE = '1.3.6.1.4.1.2636.4.1.1'\nFAN_FAILURE = '1.3.6.1.4.1.2636.4.1.2'\nOVER_TEMPERATURE = '1.3.6.1.4.1.2636.4.1.3'\nREDUNDANCY_SWITCH_OVER = '1.3.6.1.4.1.2636.4.1.4'\nFRU_REMOVAL = '1.3.6.1.4.1.2636.4.1.5'\nFRU_INSERTION = '1.3.6.1.4.1.2636.4.1.6'\nFRU_POWER_OFF = '1.3.6.1.4.1.2636.4.1.7'\nFRU_POWER_ON = '1.3.6.1.4.1.2636.4.1.8'\nFRU_FAILED = '1.3.6.1.4.1.2636.4.1.9'\nFRU_OFFLINE = '1.3.6.1.4.1.2636.4.1.10'\nFRU_ONLINE = '1.3.6.1.4.1.2636.4.1.11'\nFRU_CHECK = '1.3.6.1.4.1.2636.4.1.12'\nFEB_SWITCH_OVER = '1.3.6.1.4.1.2636.4.1.13'\nHARDDISK_FAILED = '1.3.6.1.4.1.2636.4.1.14'\nHARDDISK_MISSING = '1.3.6.1.4.1.2636.4.1.15'\nBOOT_FROM_BACKUP = '1.3.6.1.4.1.2636.4.1.16'\nFM_LINK_ERROR = '1.3.6.1.4.1.2636.4.1.17'\nFM_CELL_DROP_ERROR = '1.3.6.1.4.1.2636.4.1.18'\nEXT_SRC_LOCK_LOST = '1.3.6.1.4.1.2636.4.1.19'\nPOWER_SUPPLY_OK = '1.3.6.1.4.1.2636.4.2.1'\nFAN_OK = '1.3.6.1.4.1.2636.4.2.2'\nTEMPERATURE_OK = '1.3.6.1.4.1.2636.4.2.3'\nFRU_OK = '1.3.6.1.4.1.2636.4.2.4'\nEXT_SRC_LOCK_ACQUIRED = '1.3.6.1.4.1.2636.4.2.5'\n\nALARM_LIST = {\n POWER_SUPPLY_FAILURE: ('POWER SUPPLY FAILURE', 'MAJOR', ALARM_ACTIVE_STATE, 'Power supply failure'),\n POWER_SUPPLY_OK: ('POWER SUPPLY FAILURE', 'MAJOR', ALARM_CLEAR_STATE, 'Power supply failure'),\n FAN_FAILURE: ('FAN FAILURE', 'CRITICAL', ALARM_ACTIVE_STATE, 'Fan failure'),\n FAN_OK: ('FAN FAILURE', 'MAJOR', ALARM_CLEAR_STATE, 'Fan failure'),\n OVER_TEMPERATURE: ('OVER TEMPERATURE', 'CRITICAL', ALARM_ACTIVE_STATE, 'Over temperature'),\n TEMPERATURE_OK: ('OVER TEMPERATURE', 'MAJOR', ALARM_CLEAR_STATE, 'Over temperature'),\n FRU_REMOVAL: ('FRU REMOVAL', 'NOTICE', ALARM_ACTIVE_STATE, 'FRU removed'),\n FRU_INSERTION: ('FRU REMOVAL', 'NOTICE', ALARM_CLEAR_STATE, 'FRU removed'),\n FRU_POWER_OFF: ('FRU POWER OFF', 'NOTICE', ALARM_ACTIVE_STATE, 'FRU Power off'),\n FRU_POWER_ON: ('FRU POWER OFF', 'NOTICE', ALARM_CLEAR_STATE, 'FRU Power off'),\n FRU_FAILED: ('FRU FAILED', 'WARNING', ALARM_ACTIVE_STATE, 'FRU Failed'),\n FRU_OK: ('FRU FAILED', 'MAJOR', ALARM_CLEAR_STATE, 'FRU Failed'),\n FRU_OFFLINE: ('FRU OFFLINE', 'NOTICE', ALARM_ACTIVE_STATE, 'FRU Offline'),\n FRU_ONLINE: ('FRU OFFLINE', 'NOTICE', ALARM_CLEAR_STATE, 'FRU Offline'),\n EXT_SRC_LOCK_LOST: ('EXT SRC LOCK LOST', 'MAJOR', ALARM_ACTIVE_STATE, 'Ext source lock lost'),\n EXT_SRC_LOCK_ACQUIRED: ('EXT SRC LOCK LOST', 'MAJOR', ALARM_CLEAR_STATE, 'Ext source lock lost'),\n REDUNDANCY_SWITCH_OVER: ('REDUNDANCY SWITCH OVER', 'MAJOR', ALARM_ACTIVE_STATE, 'Redundancy switch over'),\n FRU_CHECK: ('FRU CHECK', 'NOTICE', ALARM_ACTIVE_STATE, 'FRU check'),\n FEB_SWITCH_OVER: ('FEB SWITCH OVER', 'MAJOR', ALARM_ACTIVE_STATE, 'FRU switch over'),\n HARDDISK_FAILED: ('HARDDISK FAILED', 'MAJOR', ALARM_ACTIVE_STATE, 'Harddisk failed'),\n HARDDISK_MISSING: ('HARDDISK MISSING', 'MAJOR', ALARM_ACTIVE_STATE, 'Harddisk missing'),\n BOOT_FROM_BACKUP: ('BOOT FROM BACKUP', 'MAJOR', ALARM_ACTIVE_STATE, 'Boot from backup'),\n FM_LINK_ERROR: ('FM LINK ERROR', 'MAJOR', ALARM_ACTIVE_STATE, 'Link error'),\n FM_CELL_DROP_ERROR: ('FM CELL DROP ERROR', 'MAJOR', ALARM_ACTIVE_STATE, 'Cell drop error')\n}\n\n\ndef convert_time(t):\n '''ISO time year-month-dayThour-mins-seconds-millisecondsZ '''\n from datetime import datetime\n\n if type(t) in [str, unicode]:\n new_t = datetime.strptime(t, '%Y-%m-%d %H:%M:%S %Z')\n else:\n new_t = datetime.fromtimestamp(t / 1e3)\n new_t = new_t.strftime('%Y-%m-%dT%H:%M:%S.%f%Z')[:-3]\n new_t += \"Z\"\n return new_t\n\n\ndef check_alarm_data(data, **kwargs):\n '''\n '''\n if data:\n alarmInformation = data['alarm-information']\n if alarmInformation and 'alarm-detail' not in alarmInformation:\n # There are no active alarms\n alarmInformation['alarm-detail'] = {}\n\n return data\n\n\ndef sync_alarm_data(data, **kqargs):\n '''\n The alarms are syncd to kafka, to make sure the RA sync works fine\n return an empty dict.\n '''\n return {}\n\n\ndef convert_alarm(data, **kwargs):\n '''\n '''\n alarmOID = data['snmpTrapOID']\n alarmDetails = ALARM_LIST.get(alarmOID, None)\n if alarmDetails:\n resource = '%s-%s/%s/%s' % (data['jnxName'], data['jnxL1Index'],\n data['jnxL2Index'], data['jnxL3Index'])\n alarmData = {\n 'alarm-time': long(data['sysUpTimeInstance']),\n 'alarm-short-description': '%s %s' % (resource, alarmDetails[0]),\n 'alarm-class': alarmDetails[1],\n 'alarm-op': alarmDetails[2],\n 'alarm-description': alarmDetails[3],\n 'alarm-resource': resource\n }\n return process_alarms({'alarms': [alarmData]})\n\n return []\n\n\ndef process_alarms(data, **kwargs):\n ''' Convert the alarms to a format as expected by bpocore kafka exchange '''\n\n alarms = []\n for alarmDict in data.get('alarms', []):\n alarmType = alarmDict.get('alarm-short-description', '')\n resourceName, alarmType = alarmType.split(' ', 1)\n alarmDict['alarm-short-description'] = alarmType.replace(' ', '-').upper()\n alarmDict['alarm-resource'] = 'Firefly:%s' % resourceName\n alarm = convert_alarm_to_rasdk_format(alarmDict)\n alarms.append(alarm)\n\n return alarms\n\n\ndef convert_alarm_to_rasdk_format(alarmData, event_state='CREATE'):\n ''' Creates a alarm dict which can be published to bpocore alarm processor '''\n\n severity = alarmData.get('alarm-class', 'MINOR').upper()\n return {\n 'version': 1,\n 'header': {\n 'roleIds': [],\n 'envelopeId': str(uuid4()),\n 'timestamp': convert_time(time.time())\n },\n 'event': {\n '_type': ALARM_TYPE,\n 'id': str(uuid4()),\n 'resource': alarmData.get('alarm-resource', '%s:%s' % ('firefly', 'chassis')),\n 'op': alarmData.get('alarm-op', ALARM_ACTIVE_STATE),\n 'time': convert_time(alarmData.get('alarm-time', time.time())),\n 'severity': severity,\n 'additionalText': alarmData.get('alarm-description', ''),\n 'conditionSource': 'MANAGER',\n 'nativeConditionType': alarmData.get('alarm-short-description', ''),\n 'nativeConditionTypeQualifier': 'chassis',\n 'server': {\n 'id': \"1\",\n 'name': RA_TYPE\n },\n 'serviceAffecting': ALARM_SERVICE_AFF_MAP.get(severity, 'UNKNOWN'),\n 'additionalAttributes': {\n }\n }\n }\n\n\ndef push_alarms(data, **kwargs):\n toKafka = []\n\n append_tags = kwargs.get('append_tags', False)\n\n if append_tags:\n toKafka.append(build_sync_status_header('start', str(uuid4())))\n\n for alarm in data:\n toKafka.append({\n 'topic': ALARM_TOPIC,\n 'key': alarm.get('nodeId', alarm['header']['envelopeId']),\n 'msg': alarm\n })\n\n if append_tags:\n toKafka.append(build_sync_status_header('complete', str(uuid4())))\n\n return toKafka\n\n\ndef build_sync_status_header(state, domainID=''):\n return {\n 'topic': ALARM_TOPIC,\n 'key': domainID,\n 'msg': {\n 'version': 1,\n 'header': {\n 'roleIds': [],\n 'envelopeId': str(uuid4()),\n 'timestamp': convert_time(time.time())\n },\n 'event': {\n '_type': 'bp.v1.SyncEvent',\n 'id': \"1\",\n 'object_type': 'alarms',\n 'op': state\n }\n }\n }\n","sub_path":"rajuniper/handle_alarms.py","file_name":"handle_alarms.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590046751","text":"arregloNUmeros = [1,2, 3, 4, 5, 6, 7, 8, 9]\n\nfor numero in arregloNUmeros:\n print(numero)\n\nfor indice in range(1,5):\n print (f\"Numero de iteración: {indice}\")\n\nfor indice in range(3):\n print(f\"Numero de iteración: {indice}\")\n\nfor indice in range(7,10):\n print(f\"Numero de iteración: {indice}\")\n\nfor indice in range(10):\n if(indice == 6):\n break # Detener la ejecución de esta iteración, el loop no continua\n print(f\"Numero de iteración: {indice}\")\n\nfor indice in range(10):\n if(indice == 6):\n continue # Detener la ejecución de esta iteración, el loop continua\n if (indice == 4):\n continue # Detener la ejecución de esta iteración, el loop continua\n print(f\"Numero de iteración: {indice}\")\n\nnumeroAuxiliar = 10\nnumeroAuxiliarDos = 10\nwhile numeroAuxiliar < 10:\n print(f\"NUmero: {numeroAuxiliar}\")\n numeroAuxiliar += 1\n if numeroAuxiliarDos == 70:\n print(numeroAuxiliarDos)\n","sub_path":"01-Python/04_loops.py","file_name":"04_loops.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"373583682","text":"# License MIT\n\nimport os\nfrom setuptools import setup # ,find_packagesD\n\nDISTRO_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\ndef extract_requiremens(file):\n \"\"\"\n Extracts requirements from file\n\n :param file: path to file\n :return: list[str] -- list of requirements\n \"\"\"\n\n with open(file, 'r') as file:\n return file.read().splitlines()\n\n\nsetup(\n name='supertool-distro',\n version='0.1',\n description='super-super tool',\n author='Olya Nosova',\n author_imail='olenka-nosova@mail.ru',\n license='MIT',\n classifiers=[\n 'Topic :: Education'\n 'Programming Language :: Python :: 3.6',\n ],\n packages=['supertool'],\n install_requires=extract_requiremens(os.path.join(DISTRO_ROOT_PATH, 'requirements', 'base.txt')),\n test_requires=extract_requiremens(os.path.join(DISTRO_ROOT_PATH, 'requirements', 'test.txt')),\n test_suite='nose.collector',\n scripts=[os.path.join('bin', 'similar_files')],\n bin=[os.path.join('bin', 'similar_files')]\n)\n","sub_path":"hw14/supertool/src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154323693","text":"# -*- encoding: utf-8 -*-\r\n__author__ = 'zuoweiguang'\r\n\r\nimport logging # import logging module\r\nimport logging.config\r\nimport os\r\nimport datetime\r\n\r\n# 使用logging模块:\r\nclass fastmap_logging:\r\n\r\n def __init__(self):\r\n self.rootdir = os.getcwd()\r\n # 创建log文件夹\r\n self.log_folder = self.rootdir + os.sep + datetime.datetime.now().strftime(\"%Y%m%d\")\r\n # print \"log_folder::\", self.log_folder\r\n if not os.path.isdir(self.log_folder):\r\n os.makedirs(self.log_folder)\r\n #----------------------------------------------------------------------------\r\n # 获取一个logging对象\r\n def get_logging(self, logger_name):\r\n logger = logging.getLogger(logger_name)\r\n LOG_FILE = self.log_folder + os.sep + logger_name + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d\") + \".log\"\r\n fileHandler = logging.FileHandler(LOG_FILE)\r\n formatHandler = logging.Formatter(\"[%(asctime)s] %(filename)s %(funcName)s [lineno:%(lineno)s] [%(levelname)s]: %(message)s\")\r\n fileHandler.setFormatter(formatHandler)\r\n logger.addHandler(fileHandler)\r\n logger.setLevel(logging.DEBUG)\r\n return logger\r\n #----------------------------------------------------------------------------\r\n # 清空日志\r\n def clear_log(self, keyword):\r\n if os.path.exists(self.log_folder):\r\n filelist=os.listdir(self.log_folder)\r\n for log in filelist:\r\n if keyword in log:\r\n filepath = os.path.join(self.log_folder, log)\r\n if os.path.isfile(filepath):\r\n os.remove(filepath)\r\n\r\n\r\n\r\n\r\n# 使用及调用方式\r\ndef unin_test():\r\n # 日志信息\r\n info_message = \"this is logging test! INFO\"\r\n error_message = \"this is logging test! ERROR\"\r\n debug_message = \"this is logging test! DETAIL\"\r\n\r\n # new 一个logging类的时候,传入project_id\r\n fm_logging = fastmap_logging(\"99901\")\r\n info_logger = fm_logging.get_logging(\"info\")\r\n error_logger = fm_logging.get_logging(\"error\")\r\n\r\n # log记录方式如下:\r\n\r\n # 记录自定义信息或控制台信息\r\n info_logger.info(info_message)\r\n # 错误报出、异常报出\r\n error_logger.error(error_message)\r\n\r\n # 清空日志\r\n # fm_logging.clear_log(\"/app/fos/scripts/autoscripts/20150106/99901/detail_log_folder/fm_detail_20150106.log\")\r\n\r\n\r\n# 操作步骤 detail error\r\nif __name__=='__main__':\r\n\r\n unin_test()\r\n","sub_path":"fastmap_reportor/fastmap_logging.py","file_name":"fastmap_logging.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236812817","text":"import re\n\nfrom riot import Suite, Case\nfrom riot.riot import expand_specs, suites_iter\n\n\ndef test_expand():\n specs = list(\n expand_specs(\n [\n (\"django-pylibmc\", [\">=0.6,<0.7\"]),\n (\"django-redis\", [\">=4.5,<4.6\"]),\n (\"pylibmc\", [\"\"]),\n (\"python-memcached\", [\"\"]),\n ]\n )\n )\n\n assert len(specs) == 1\n (spec,) = specs\n assert len(spec) == 4\n\n specs = list(\n expand_specs(\n [\n (\"django-pylibmc\", [\">=0.6,<0.7\"]),\n (\"django-redis\", [\">=4.5,<4.6\"]),\n (\"pylibmc\", [\"\"]),\n (\"python-memcached\", [None, \"\"]),\n ]\n )\n )\n\n assert len(specs) == 2\n for s in specs:\n assert len(s) == 4\n\n\ndef test_suites_iter():\n suites = [\n Suite(\n name=\"tracer\",\n command=\"pytest tests/test_tracer.py\",\n cases=[\n Case(\n pys=[\n 2.7,\n 3.5,\n 3.6,\n 3.7,\n 3.8,\n ],\n pkgs=[(\"msgpack\", [None, \"==0.5.0\", \">=0.5,<0.6\", \">=0.6.0,<1.0\"])],\n ),\n ],\n ),\n ]\n\n instances = list(suites_iter(suites, pattern=re.compile(\".*\")))\n assert len(instances) == (5 * 4)\n\n suites = [\n Suite(\n name=\"django\",\n command=\"pytest tests/contrib/django\",\n cases=[\n Case(\n env=[(\"TEST_DATADOG_DJANGO_MIGRATION\", [None, \"1\"])],\n pys=[2.7, 3.5, 3.6],\n pkgs=[\n (\"django-pylibmc\", [\">=0.6,<0.7\"]),\n (\"django-redis\", [\">=4.5,<4.6\"]),\n (\"pylibmc\", [\"\"]),\n (\"python-memcached\", [\"\"]),\n (\"django\", [\">=1.8,<1.9\", \">=1.11,<1.12\"]),\n ],\n ),\n Case(\n env=[(\"TEST_DATADOG_DJANGO_MIGRATION\", [None, \"1\"])],\n pys=[3.5],\n pkgs=[\n (\"django-pylibmc\", [\">=0.6,<0.7\"]),\n (\"django-redis\", [\">=4.5,<4.6\"]),\n (\"pylibmc\", [\"\"]),\n (\"python-memcached\", [\"\"]),\n (\"django\", [\">=2.0,<2.1\", \">=2.1,<2.2\"]),\n ],\n ),\n Case(\n env=[(\"TEST_DATADOG_DJANGO_MIGRATION\", [None, \"1\"])],\n pys=[3.6, 3.7, 3.8],\n pkgs=[\n (\"django-pylibmc\", [\">=0.6,<0.7\"]),\n (\"django-redis\", [\">=4.5,<4.6\"]),\n (\"pylibmc\", [\"\"]),\n (\"python-memcached\", [\"\"]),\n (\n \"django\",\n [\">=2.0,<2.1\", \">=2.1,<2.2\", \">=2.2,<2.3\", \">=3.0,<3.1\"],\n ),\n ],\n ),\n ],\n ),\n ]\n\n instances = list(suites_iter(suites, pattern=re.compile(\".*\")))\n assert len(instances) == (2 * 3 * 2) + (2 * 1 * 2) + (2 * 3 * 4)\n","sub_path":"tests/test_riot.py","file_name":"test_riot.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25283259","text":"\"\"\"Module to calculate result.\"\"\"\nfrom pycalc.library import Library as lib\nfrom pycalc import exeptions\n\n\ndef calculate(expr):\n \"\"\"Calculate postfix string on stack.\"\"\"\n stack = []\n unary_operators = (lib.UNARY_PLUS, lib.UNARY_MINUS)\n for token in expr:\n try:\n if token == lib.FUNC_DELIMITER:\n stack.append(token)\n elif token == lib.FILLER:\n stack.append(token)\n elif token in lib.OPERATORS:\n if token in unary_operators:\n operator = stack.pop()\n stack.append(lib.OPERATORS[token].function(operator))\n continue\n op2, op1 = stack.pop(), stack.pop()\n stack.append(lib.OPERATORS[token].function(op1, op2))\n elif token in lib.user_functions:\n calculate_function(lib.user_functions, token, stack)\n elif token in lib.FUNCTIONS:\n calculate_function(lib.FUNCTIONS, token, stack)\n else:\n stack.append(float(token))\n except IndexError:\n raise exeptions.InvalidStringError(\n 'Not balanced operators and operands.')\n if len(stack) != 1:\n raise exeptions.InvalidStringError(\n 'Not balanced operators and operands.')\n return stack.pop()\n\n\ndef calculate_function(functions, token, stack):\n \"\"\"Calculate token as a function.\"\"\"\n operators = []\n if stack[-1] == lib.FILLER:\n stack.pop()\n stack.append(float(functions[token]()))\n else:\n while len(stack) >= 2 and stack[-2] == lib.FUNC_DELIMITER:\n operators.append(stack.pop())\n stack.pop()\n operators.append(stack.pop())\n operators.reverse()\n stack.append(float(functions[token](*operators)))\n","sub_path":"src/pycalc/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474920407","text":"# original copyright 2008 - 2015 Ruben Decrop\n# modifications copyright 2015 - 2016 Chessdevil Consulting bvba\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport io\nimport csv\nfrom bjk2016 import app\nfrom bjk2016.models.subscription import Subscription\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ntitle = {\n 'en': 'Confirmation subscription',\n 'nl': 'Bevestiging inschrijving',\n 'fr': 'Confirmation inscription',\n 'de': 'Bestätigung Anmeldung',\n}\n\nbody_text = {\n 'en': 'The subscription of {0} {1} is confirmed.\\nThe organisation team',\n 'nl': 'De inschrijving van {0} {1} is bevestigd.\\nHet organisatieteam',\n 'fr': \"l' inscription de {0} {1} est confirmée.\\nL' équipe d' organisation\",\n 'de': 'Die Anmeldung von {0} {1} ist bestätigt.\\nDas Organisationsteam'\n}\n\nbody_html = {\n 'en': 'The subscription of {0} {1} is confirmed.<br>The organisation team',\n 'nl': 'De inschrijving van {0} {1} is bevestigd.<br>Het organisatieteam',\n 'fr': \"L' inscription de {0} {1} est confirmée.<br>L' équipe d' organisation\",\n 'de': 'Die Anmeldung von {0} {1} ist bestätigt.<br>Das Organisationsteam'\n}\n\ndef subscribe_csv():\n \"\"\"\n generate a csv file of the subscriptions\n :return a string of the total csv file\n \"\"\"\n fnames = [\n '_id', 'birthdate', 'category', 'checknat', 'chesstitle', 'emailparent',\n 'emailplayer', 'federation', 'fidenation', 'fiderating', 'firstname',\n 'fullnameparent', 'gender', 'id_club', 'id_fide', 'id_national',\n 'locale', 'mailing1', 'mobileattendant', 'name', 'natrating',\n 'nationality', 'payamount', 'paydate', 'paymessage', 'rating',\n 'remarks'\n ]\n output = io.StringIO()\n csvwr = csv.DictWriter(output, fnames, extrasaction='ignore')\n csvwr.writeheader()\n ss = Subscription.mongo_find({})\n for sub in ss:\n csvwr.writerow(sub)\n return output.getvalue()\n\ndef subscribe_get(cat):\n \"\"\"\n get the subsciptions of a category\n :param cat: the category\n :return: the list of subsciptions\n \"\"\"\n gender = 'M' if cat[0] == 'b' else 'F'\n age = '-{0}'.format(cat[1:])\n log.debug('gender: %s, age: %s', gender, age)\n parts = Subscription.mongo_find({'gender': gender, 'category': age})\n log.debug('parts: %s', parts)\n return parts\n\ndef subscribe_player(prt):\n \"\"\"\n subscribe a participant\n :param prt: the participant\n :return:\n \"\"\"\n from reddevil import send_mail\n from bjk2016.i18n import get_locale\n locale = get_locale()\n s = Subscription()\n s.update(prt)\n s.locale = locale\n s.mongo_save()\n receivers = []\n if (s.emailplayer):\n receivers.append(s.emailplayer)\n if (s.emailparent):\n receivers.append(s.emailparent)\n f = s.firstname\n n = s.name\n msg = MIMEMultipart('alternative')\n msg['Subject'] = title[locale]\n msg['From'] = 'ruben@chessdevil.net'\n msg['To'] = ', '.join(receivers)\n receivers.append('luc.cornet@telenet.be')\n part1 = MIMEText(body_text[locale].format(f,n), 'plain')\n part2 = MIMEText(body_html[locale].format(f,n), 'html')\n msg.attach(part1)\n msg.attach(part2)\n send_mail(app, msg, 'ruben@chessdevil.net', receivers)\n return\n","sub_path":"bjk2016/bl/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155741895","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 29 January 2020\n@author: Dennis\n'''\n\nfrom odoo import api, fields, models, _\nfrom datetime import date, timedelta, datetime\nfrom odoo.exceptions import UserError, ValidationError\nfrom dateutil.relativedelta import relativedelta\n\nimport logging\n_logger = logging.getLogger(\"\\n\\n\\t\\t\\tTesting Module 1 2 3\")\n\nclass BIR1601C(models.Model):\n _name = 'bir.1601c'\n _inherit = ['mail.thread', 'mail.activity.mixin', 'resource.mixin', 'document.default.approval']\n _rec_name = 'month_year'\n\n @api.multi\n def _get_total_nontaxable(self):\n for i in self:\n i.total_nontoxable = sum([i.mwe, i.mwe_other, i.thirtheenth_and_other_benefits, i.deminimis, i.statutory_contribution, i.other_non_taxable_amount])\n\n @api.multi\n def _get_total_taxable(self):\n for i in self:\n i.taxable_compensation = i.total_compensation - i.total_nontoxable\n i.net_taxable = (i.total_compensation - i.total_nontoxable) - i.taxable_compensation_non_withheld\n\n @api.multi\n def _get_total_remittances(self):\n for i in self:\n i.tax_withheld_remittance = sum([i.tax_withheld, i.adjustment])\n i.total_remitance_made = sum([i.tax_remitted_previously, i.other_remittance])\n\n @api.multi\n def _compute_tax_dues(self):\n for i in self:\n i.tax_due = i.tax_withheld_remittance - i.total_remitance_made\n i.total_penalties = sum([i.surcharge, i.interest, i.compromise])\n i.total_tax_due = sum([i.tax_due, i.total_penalties])\n\n company_id = fields.Many2one('res.company', string=\"Company\", required=True, default=lambda self: self.env['res.company']._company_default_get('hrms_ph_statutory'), readonly=True, states={'draft': [('readonly', False)]},\n track_visibility=\"always\")\n month_year = fields.Char(string=\"Month-Year\", help=\"MM/YYYY\", required=True, readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n note = fields.Text(string=\"Notes\", track_visibility=\"always\")\n total_compensation = fields.Float(string=\"Total Amount of Compensation\", readonly=True)\n mwe = fields.Float(string=\"Statutory Minimum Wage (MWEs)\", help=\"Statutory Minimum Wage (MWEs)\", readonly=True)\n mwe_other = fields.Float(string=\"Minimum Wage Earner\", help=\"Holiday Pay, Overtime Pay, Night Shift, Differential Pay, Hazard Pay (Minimum Wage Earner)\", readonly=True)\n thirtheenth_and_other_benefits = fields.Float(string=\"13th Month Pay and Other Benefits\", readonly=True)\n deminimis = fields.Float(string=\"De Minimis Benefits\", readonly=True)\n statutory_contribution = fields.Float(string=\"Statutory Contribution\", help=\"SSS, GSIS, PHIC, HDMF Mandatory Contribution and Union Dues (employee's share only)\", readonly=True)\n other_non_taxable_amount = fields.Float(string=\"Other Non-Taxable Compensation\", readonly=True)\n other_non_taxable_details = fields.Char(string=\"Other Non-Taxable Compensation Details\", readonly=True)\n total_nontoxable = fields.Float(string=\"Total Non-Taxable Compensation\", compute=\"_get_total_nontaxable\")\n taxable_compensation = fields.Float(string=\"Taxable Compensation\", help=\"Taxable Compensation\", compute=\"_get_total_taxable\")\n taxable_compensation_non_withheld = fields.Float(string=\"Taxable compensation not subject to withholding tax\", help=\"Taxable compensation not subject to withholding tax (for employees, other than MWEs, receiving P250,000 & below for the year)\",\n readonly=True, states={'draft': [('readonly', False)]})\n net_taxable = fields.Float(string=\"Net Taxable Compensation\", compute=\"_get_total_taxable\")\n tax_withheld = fields.Float(string=\"Total Taxes Withheld\", readonly=True)\n adjustment = fields.Float(string=\"Adjustment\", help=\"Add/(Less): Adjustment of Taxes Withheld from Previous Month/s (From Part IV-Schedule 1, Item 4)\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n tax_withheld_remittance = fields.Float(string=\"Taxes Withheld for Remittance\", compute=\"_get_total_remittances\")\n tax_remitted_previously = fields.Float(string=\"Tax Remitted In Previously Filed\", help=\"Less: Tax Remitted in Return Previously Filed, if this is an amended return\", readonly=True, states={'draft': [('readonly', False)]})\n other_remittance = fields.Float(string=\"Other Remittances Amount Made\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n other_remittance_detail = fields.Char(string=\"Other Remittances Made\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n total_remitance_made = fields.Float(string=\"Total Tax Remittances Made\", compute=\"_get_total_remittances\")\n tax_due = fields.Float(string=\"Tax Still Due/(Over-remittance)\", help=\"Tax Still Due/(Over-remittance)\", compute=\"_compute_tax_dues\")\n surcharge = fields.Float(string=\"Surcharge\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n interest = fields.Float(string=\"Interest\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n compromise = fields.Float(string=\"Compromise\", readonly=True, states={'draft': [('readonly', False)]}, track_visibility=\"always\")\n total_penalties = fields.Float(string=\"Total Penalties\", compute=\"_compute_tax_dues\")\n total_tax_due = fields.Float(string=\"Tax Amount Still Due/(Overremittance)\", help=\"Tax Amount Still Due/(Overremittance)\", compute=\"_compute_tax_dues\")\n\n\n _sql_constraints = [\n ('month_year', 'unique (month_year, company_id)', 'Record already Exists for the given month!'),\n ]\n\n @api.constrains(\"month_year\")\n def _check_month_year_format(self):\n try:\n date = datetime.strptime(self.month_year, \"%m/%Y\")\n except:\n raise ValidationError(_(\"Cutoff Month format must be in 'MM/YYYY'\"))\n\n @api.multi\n def compute_compensation(self):\n total_compensation = basic_salary = other_salary = thirtheenth_and_other_benefits = deminimis = contributions = other_nontaxable = withholding = 0\n other_non_taxable_details = ''\n slip = self.env['hr.payslip.line'].search([\n ('slip_id.payslip_period', '=', self.month_year),\n ('slip_id.company_id', '=', self.company_id.id),\n ('salary_rule_id.bir_1601c_setting', 'not in', [False]),\n ('slip_id.state', '=', 'done')\n ])\n for i in slip:\n wage = i.slip_id.contract_id.wage\n if i.salary_rule_id.bir_1601c_setting == 'total_compensation':\n total_compensation += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'basic_salary' and wage <= 20833.00:\n basic_salary += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'other_salary' and wage <= 20833.00:\n other_salary += i.total\n other_non_taxable_details += 'i.name' + ', '\n elif i.salary_rule_id.bir_1601c_setting == '13th_and_other':\n thirtheenth_and_other_benefits += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'deminimis':\n deminimis += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'contributions':\n contributions += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'other_nontaxable':\n other_nontaxable += i.total\n elif i.salary_rule_id.bir_1601c_setting == 'withholding':\n withholding += i.total\n self.write({\n 'total_compensation': total_compensation,\n 'mwe': basic_salary,\n 'mwe_other': other_salary,\n 'thirtheenth_and_other_benefits': thirtheenth_and_other_benefits,\n 'deminimis': deminimis,\n 'statutory_contribution': contributions,\n 'other_nontaxable': other_nontaxable,\n 'tax_withheld': withholding,\n })\n return True\n","sub_path":"hrms_ph_statutory/models/bir_1601c.py","file_name":"bir_1601c.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224960932","text":"#!/usr/bin/env python3\n# red = 1, white = 0\n# source: https://www.youtube.com/watch?v=ZbTJ_YCTDLI&ab_channel=NASAJetPropulsionLaboratory\nchute = [\n \"RRRRRRRRRRRRRRRRRRRRwwwwwwwRwwwwwwwwwwwRwwwwwRwwRwwwwwwwwRwRwwwRRRRRRRRRRRRRRRRR\",\n \"wwwRRRRRRRRRRRRRRRRRwwwwwwRRwRwwwwwwRwwRwwwwwwwRRRwwwwwwRwwwwwwwwRwRwwwwwwwRRwwR\",\n \"wwwwwRwRwwwwwwwwRwwwwwwwwwRwwRwwwwwwRRRwwwwwwwwRRRwwwwwRwwRRwwwRRRRRRRRRRRRRRRRR\",\n \"wwwwRwwwRwwwwwwwRwRRwwwwRRRwRwwwwwwwRRRwwwwRRRwRRwwwwwwwRwRwwwwwwRRRRRwwwwwRwRRR\"\n];\n\nchunk_size=10 # characters are in blocks of 10 , with first 3 gores always white in characters\nring_no=0\nfor ring in chute:\n ring_no+=1\n data = []\n panels=ring.replace('R','1').replace('w','0')\n for i in range(0, len(panels), chunk_size):\n character_binary=panels[i:i + chunk_size]\n character_binary=character_binary[3:] # first 3 (white) panels are character separators, can be ignored\n chunk_int = int(character_binary, 2) # convert binary to integer\n chunk_ascii = chr(chunk_int + 64) # convert to an ascii character (65 = A)\n if chunk_int == 127:\n # blocks of 10 red panels act as stops, should not be included in message.\n continue\n elif ring_no < 4: # inner 3 rings are letters\n data.append({'ascii': chunk_ascii,\n 'int': chunk_int,\n 'binary': character_binary,\n })\n else: # outer ring includes integers and ascii characters\n if chunk_ascii in ['N', 'E', 'S', 'W'] :\n data.append({'ascii': chunk_ascii,\n 'int': chunk_int,\n 'binary': character_binary,\n })\n else:\n data.append({'ascii': str(chunk_int),\n 'int': ' ',\n 'binary': character_binary,\n })\n\n for line in ['binary', 'int', 'ascii']:\n for c in data:\n print(f\"{c[line]:^7}\", end=' ')\n print(\"\")\n","sub_path":"decode_parachute.py","file_name":"decode_parachute.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500733238","text":"import praw\nimport memetext, string\n\ndef isMemy(s):\n s.strip()\n allcaps = True\n for c in s:\n if c!=\" \" and c!='\\n':\n allcaps = c in string.ascii_uppercase\n if not allcaps:\n return False\n if s[0]==s[2]==s[4]==\" \" or s[1]==s[3]==s[5]==\" \":\n return True\n return False\ndef extractStr(s):\n s.strip()\n if s.find('\\n')!=-1:\n return s[:s.find('\\n')]\n return s.strip()\n# reddit = praw.Reddit('bot1')\n# \n# subreddit = reddit.subreddit(\"pythonforengineers\")\n# with open(\"ReplyIDs.txt\", \"r\") as f:\n# posts_replied_to = f.read()\n# posts_replied_to = posts_replied_to.split(\"\\n\")\n# posts_replied_to = list(filter(None, posts_replied_to))\n# for submission in subreddit.hot(limit=5):\n# if submission.id not in posts_replied_to:\n# forest = praw.models.comment_forest.CommentForest(submission)\n# try:\n# forest.replace_more(limit=None)\n# commentList = forest.list()\n# except:\n# print(\"No comments found in top 5 posts\")\n# break\n# for comment in commentList:\ncomment=\" S T A I U M S T A T U S \\n S \\n T \\n A \\n D \\n I \\n U \\n M \\n S \\n T \\n A \\n T \\n U \\n S\"\nif isMemy(comment):\n str = extractStr(comment)\n print(memetext.memeify(str))\n # posts_replied_to.append(submission.id)\n\n# with open(\"posts_replied_to.txt\", \"w\") as f:\n# for post_id in posts_replied_to:\n# f.write(post_id + \"\\n\")","sub_path":"eqr/functiontest.py","file_name":"functiontest.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628058326","text":"class Solution(object):\n def isPowerOfFour(self, num):\n \"\"\"\n :type num: int\n :rtype: bool\n \"\"\"\n mask = int('10'*15+'1',2)\n return mask & num == num and num != 0 and num & num -1 == 0\n\nprint(Solution().isPowerOfFour(20))","sub_path":"isPowerOfFour.py","file_name":"isPowerOfFour.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500739574","text":"#!/usr/bin/env python\nfrom kzpy3.vis2 import *\nclear_timer = Timer(1)\n\nimport kzpy3.Cars.robot_car_28Aug2017.nodes.runtime_parameters as rp\n\npotential_image = imread(rp.potential_field_png)\npotential_image = potential_image[:,:,0]\npotential_image = (255*z2o(1.0*potential_image)).astype(np.int)\n#mi(potential_image,'potential_image');spause()\nfrom kzpy3.Grapher_app.Graph_Image_Module import *\nwall_length = 4*107.0/100.0\nhalf_wall_length = wall_length/2.0\nhw = half_wall_length\nx_min = -(6.03/2.0)\nx_max = (6.03/2.0)\ny_min = -(6.03/2.0)\ny_max = 6.03/2.0\nGi = Graph_Image(xmin,x_min,\n\txmax,x_max,\n\tymin,y_min,\n\tymax,y_max,\n\txsize,shape(potential_image)[0],\n\tysize,shape(potential_image)[1])\nfor i in range(3):\n\tGi[img][:,:,i] = potential_image.copy()\n\n\n\nColors = {'Mr_Black':(255,0,0),'Mr_Yellow':(255,255,0),'Mr_Blue':(0,0,255),'Mr_Lt_Blue':(25,150,255),\n\t'Mr_Orange':(255,50,0),'Mr_Purple':(0,255,255)}\n\nk = mci(Gi[img],delay=5,scale=10)\n\ndone = False\nwhile not done:\n\ttry:\n\t\tsomething_happened = False\n\n\t\tfor car in sggo(opjD('*.car.txt')):\n\t\t\tcar_name = fname(car).split('.')[0]\n\t\t\tnew_car = car.replace('car','')\n\t\t\tunix('cp '+car+' '+new_car)\n\t\t\tl = txt_file_to_list_of_strings(new_car)\n\t\t\tfor ll in l:\n\t\t\t\texec(ll)\n\t\t\tif len(pose) == 4:\n\n\t\t\t\tif clear_timer.check():\n\n\t\t\t\t\tclear_timer.reset()\n\n\n\t\t\t\theading_floats = np.array(heading_floats)\n\n\t\t\t\tfor xxyy in xy:\n\n\t\t\t\t\tGi[img][int(xxyy[0]),int(xxyy[1]),:] = [0,150,10]\n\n\t\t\t\tcar_color = Colors[car_name]\n\t\t\t\tGi[ptsplot](x,[pose[0]],y,[pose[1]],color,car_color)\n\t\t\t\tGi[ptsplot](x,[pose[0]+pose[2]],y,[pose[1]+pose[3]],color,(0,255,0))\n\t\t\t\tsomething_happened = True\n\n\t\tif something_happened:\n\t\t\tk = mci(Gi[img],delay=5,scale=10)\n\t\t\tif k == ord('q'):\n\t\t\t\tdone = True\n\t\t\t\tbreak\n\t\tif True:#k == ord('r'):\n\t\t\t\tfor i in range(3):\n\t\t\t\t\tGi[img][:,:,i] = potential_image.copy()\n\t\n\texcept (KeyboardInterrupt, SystemExit):\n\t\traise\n\texcept Exception as e:\n\t\tprint(\"********** Exception ***********************\")\n\t\tprint(e.message, e.args)\n\t","sub_path":"scratch/c/_plot_aruco_paramiko2.py","file_name":"_plot_aruco_paramiko2.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526010562","text":"\"\"\"auth_server URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom rest_framework_auth_server.views import logged_in_jwt_token, FacebookLogin\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework.routers import SimpleRouter\nfrom rest_framework_auth_server.views import DetailedUserViewSet\n\nfrom rest_framework_extensions.routers import NestedRouterMixin\nfrom rest_auth_plus.views import SocialAccountViewSet, UserSocialAccountViewSet\n\nclass NestedSimpleRouter(NestedRouterMixin, SimpleRouter):\n pass\n\nrouter = NestedSimpleRouter()\nsocial_account_routes = router.register(r'social-account',\n SocialAccountViewSet,\n base_name='social-account')\n\nuser_router = NestedSimpleRouter()\nuser_routes = user_router.register(r'user',\n DetailedUserViewSet,\n base_name='user')\n\nuser_social_account_routes = user_routes.register(\n r'social-account',\n UserSocialAccountViewSet,\n base_name='user-social-account',\n parents_query_lookups=[UserSocialAccountViewSet.parent_fk]\n)\n\nurlpatterns = [\n url(r'^auth/', include(router.urls)),\n # order here is important, so we take precedence over rest_auth.urls\n url(r'^auth/rest-auth/', include(user_router.urls)),\n url(r'^auth/admin/', admin.site.urls),\n url(r'^auth/rest-auth/', include('rest_auth.urls')),\n url(r'^auth/facebook-login/', FacebookLogin.as_view(), name=\"fb_login\"),\n url(r'^auth/rest-auth/registration/', include('rest_auth.registration.urls')),\n url(r'^auth/jwt/', logged_in_jwt_token),\n\n]\n","sub_path":"tests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423251404","text":"\"\"\"\n@author: David Belais\n\"\"\"\n\nfrom __future__ import division,unicode_literals,print_function,with_statement,absolute_import\n\nimport re\nimport os\nimport csv\nfrom urllib2 import urlopen\nfrom warnings import warn\nfrom sets import Set\nfrom classy import *\nfrom fun import *\nfrom openpyxl import load_workbook\nfrom openpyxl.reader.iter_worksheet import RawCell\n\ndef objectify_worksheet(*args,**keys):\n\treturn objectify_matrix(read_worksheet(*args,**keys))\n\t\ndef objectify_matrix(matrix):\n\tobjects=[]\n\tcolumns=list(c for c in matrix[0])\n\tfor r in matrix[1:]:\n\t\to=Object()\n\t\tfor i in xrange(len(columns)):\n\t\t\to[columns[i]]=r[i] if len(r)>i else None\n\t\tobjects.append(o)\n\treturn objects\n\t\ndef read_matrix(matrix,**keys):\n\tlimits=keys['limits'] if keys.has_key('limits') else [[0,0],[None,None]]\n\tif not limits:\n\t\tlimits=[[0,0],[None,None]]\n\trow_offset=limits[0][1]\n\tcolumn_offset=limits[0][0]\n\trow_limit=limits[1][1]\n\tcolumn_limit=limits[1][0]\n\tif(row_limit):\n\t\tmatrix=list(matrix)[row_offset:row_limit]\n\telif(row_offset):\n\t\tmatrix=list(matrix)[row_offset:]\n\trows=[]\n\tcolumns=[]\n\tfor iter_row in matrix:\n\t\trow=[]\n\t\tif(column_limit):\n\t\t\titer_row=list(c for c in iter_row)[column_offset:column_limit]\n\t\telif(column_offset):\n\t\t\titer_row=list(c for c in iter_row)[column_offset:]\n\t\tfor cell in iter_row:\n\t\t\ttry:#if isinstance(cell,RawCell)\n\t\t\t\tvalue=cell.internal_value\n\t\t\texcept:\n\t\t\t\tvalue=cell\n\t\t\tif isstring(value):\n\t\t\t\tvalue=value.strip()\n\t\t\t\tif value:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif value[-1]=='%' and re.match(r'^[\\d\\.\\%\\,\\$]*$',value):\n\t\t\t\t\t\t\tvalue=float(value.replace('$','').replace('%','') or 0.0)/100.0\n\t\t\t\t\t\telif value[0]=='$' and re.match(r'^[\\d\\.\\%\\,\\$]*$',value):# \n\t\t\t\t\t\t\tvalue=float(value.replace('$','').replace('%','') or 0.0)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tlog('%s\\n%s' % (e,value))\n\t\t\tif isinstance(value,float):# and (value % 1)==0:\n\t\t\t\tvalue=intifint(value)\n\t\t\trow.append(value)\n\t\trows.append(row)\n\treturn List(rows,limits=limits,columns=columns)\n\ndef read_worksheet(*args,**keys):\n\tlimits=keys['limits'] if keys.has_key('limits') else [[0,0],[None,None]]\n\tm=re.match(r\"(?:([^\\[]+)?\\[)?([^\\[\\']*.xlsx)\\s*\\]?\\s*([^\\!\\']+)?\\'?\\!?([^\\!]+)?$\",args[0],flags=re.IGNORECASE) if args else None\n\ttry:\n\t\tg=sequence(m.groups())\n\texcept Exception as e:\n\t\traise e\n\tif len(iterstrip(g))>1:\n\t\tif not g[1] and len(args)>1:\n\t\t\tg[1]=args[1]\n\t\tif not g[2] and len(args)>2:\n\t\t\tg[2]=args[1]\n\t\tdirectory,workbook,worksheet,cell_range=g\n\telif args:\n\t\tlog(args)\n\t\tdirectory,workbook=os.path.split(args[0])\n\t\tworksheet=args[1] if len(args)>1 else None\n\t\tcell_range=args[2] if len(args)>2 else None\n\tif cell_range and not keys.has_key('limits'):\n\t\txmin,ymin,xmax,ymax=re.match(r'([A-Za-z]*)(\\d*):([A-Za-z]*)(\\d*)',cell_range).groups()\n\t\tlimits=[\n\t\t\t[\n\t\t\t\tsum('ABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(m)*26**(len(xmin)-xmin.index(m)-1) for m in xmin),\n\t\t\t\tint(ymin)-1\n\t\t\t],[\n\t\t\t\tsum('ABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(m)*26**(len(xmax)-xmax.index(m)-1) for m in xmax),\n\t\t\t\tint(ymax)-1\n\t\t\t]\n\t\t]\n\tpath=os.path.join(os.path.abspath(directory),workbook)\n\twb=load_workbook(\n\t\tfilename=path,\n\t\tuse_iterators=True\n\t)\n\tws=wb.get_sheet_by_name(name=worksheet)\n\tif not ws:\n\t\traise Exception(args)\n\titer_rows=ws.iter_rows()\n\trows=read_matrix(iter_rows,limits=limits)\n\treturn List(rows,name=worksheet,workbook=workbook,directory=directory,limits=rows.limits,path=path,columns=rows.columns)\n\ndef read_csv(path,**keys):\n\tlimits=keys.pop('limits',[[0,0],[None,None]])\n\tf=urlopen(path,**keys)\n\t#for row in csv.reader(f):\n\t#\tprint(row)\n\trows=read_matrix(csv.reader(f),limits=limits)\n\treturn List(rows,directory=os.path.dirname(path),limits=rows.limits,path=path,columns=rows.columns)\n\nif __name__==\"__main__\":\n\timport doctest\n\tdoctest.testmod()","sub_path":"ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575692766","text":"# -*- coding: utf-8 -*-\n\nfrom biosim.animals import Animal, Herbivore, Carnivore\nfrom operator import attrgetter\nimport numpy as np\nimport numba\n\n__author__ = 'Samir Adrik'\n__email__ = 'samir.adrik@gmail.com'\n\n\nclass Square:\n \"\"\"\n Square superclass from which all landscape classes are subclassed.\n Represents a single landscape square, or cell unit on a map.\n\n \"\"\"\n\n parameters = {}\n\n @classmethod\n def set_parameters(cls, new_parameters=None):\n \"\"\"\n Sets user-defined simulation parameters for all squares pertaining to\n the Square superclass.\n\n Parameters\n ----------\n new_parameters: dict\n Dictionary with parameters to be changed, must be a subset of\n default parameters.\n\n \"\"\"\n if not isinstance(new_parameters, dict):\n raise TypeError(\"'param_dict' must be type 'dict'\")\n\n for parameter in new_parameters.keys():\n if parameter not in cls.parameters.keys():\n raise ValueError(\"unknown parameter: '{}'\".format(parameter))\n if 'f_max' in new_parameters.keys():\n if not 0 <= new_parameters['f_max']:\n raise ValueError(\"parameter 'f_max' must be non-negative\")\n cls.parameters.update(new_parameters)\n\n def __init__(self):\n \"\"\"\n Constructor for the Square superclass.\n \"\"\"\n\n self.fodder = 0\n self.animals_in_square = {Herbivore: [], Carnivore: []}\n self.newcomers = {Herbivore: [], Carnivore: []}\n\n def restore_square(self):\n \"\"\"\n Restores a square to its default state, i.e. empties the square for\n animals.\n\n \"\"\"\n self.animals_in_square = {Herbivore: [], Carnivore: []}\n\n def allocate_animals(self, animals):\n \"\"\"\n Adds a number of animals of a given species to a square.\n\n Parameters\n ----------\n animals: list\n List of instances of a given species.\n\n \"\"\"\n if not isinstance(animals, list):\n raise TypeError(\"animals must be type 'list'\")\n\n for animal in animals:\n if type(animal) in self.animals_in_square:\n self.animals_in_square[type(animal)].append(animal)\n else:\n raise TypeError(\"invalid species\")\n\n def delete_animal(self, animal):\n \"\"\"\n Deletes an animal from the landscape square.\n\n Parameters\n ----------\n animal : class instance\n\n \"\"\"\n animals = self.animals_in_square[type(animal)]\n\n if not isinstance(animal, Animal):\n raise TypeError(\n \"'{} is not an instance of Animal class'\".format(\n animal.__name__))\n if type(animal) not in self.animals_in_square or animal not in \\\n animals:\n raise ValueError(\"unable to retrieve animal\")\n\n animals.remove(animal)\n\n def procreate_animals(self):\n \"\"\"\n Procreates new animals in a square.\n\n \"\"\"\n for animals in self.animals_in_square.values():\n num = len(animals)\n newborns = []\n for animal in animals:\n if animal.birth(num):\n newborn = type(animal)()\n animal.update_weight(newborn.weight)\n newborns.append(newborn)\n animals.extend(newborns)\n\n def aging_animals(self):\n \"\"\"\n Ages all the animals in a square.\n\n \"\"\"\n for animals in self.animals_in_square.values():\n for animal in animals:\n animal.aging()\n\n def weightloss_animals(self):\n \"\"\"\n All animals in a square lose weight.\n\n \"\"\"\n for animals in self.animals_in_square.values():\n for animal in animals:\n animal.weightloss()\n\n def death_animals(self):\n \"\"\"\n Determines if an animal dies for each animal inhabiting the square.\n\n \"\"\"\n for species, animals in self.animals_in_square.items():\n survivors = []\n for animal in animals:\n if not animal.death():\n survivors.append(animal)\n self.animals_in_square[species] = survivors\n\n def _grow_fodder(self):\n \"\"\"\n Sets new yearly amount of fodder according to growth rules for given\n landscape type.\n\n \"\"\"\n pass\n\n def _feed_herbivores(self):\n \"\"\"\n Feeds all herbivores inhabiting a square.\n\n \"\"\"\n herbivores = sorted(self.animals_in_square[Herbivore],\n key=attrgetter('fitness'), reverse=True)\n\n for herbivore in herbivores:\n appetite = herbivore.parameters['F']\n if appetite <= self.fodder:\n herbivore.eating(appetite)\n self.fodder -= appetite\n elif 0 < self.fodder < appetite:\n herbivore.eating(self.fodder)\n self.fodder = 0\n\n def _feed_carnivores(self):\n \"\"\"\n Feeds all carnivores inhabiting a square.\n\n \"\"\"\n self.animals_in_square[Carnivore].sort(key=attrgetter('fitness'),\n reverse=True)\n self.animals_in_square[Herbivore].sort(key=attrgetter('fitness'))\n\n for carnivore in self.animals_in_square[Carnivore]:\n appetite = carnivore.parameters['F']\n food_intake = 0\n survivors = []\n for idx, herbivore in enumerate(self.animals_in_square[Herbivore]):\n if food_intake >= appetite:\n survivors.extend(self.animals_in_square[Herbivore][idx:])\n break\n elif carnivore.kill(herbivore.fitness):\n desired_amount = appetite - food_intake\n if herbivore.weight <= desired_amount:\n food_intake += herbivore.weight\n elif herbivore.weight > desired_amount:\n food_intake += desired_amount\n else:\n survivors.append(herbivore)\n carnivore.eating(food_intake)\n self.animals_in_square[Herbivore] = survivors\n\n def feed_animals(self):\n \"\"\"\n Feeds all animals inhabiting a square, i.e. animals of both species.\n\n \"\"\"\n self._grow_fodder()\n self._feed_herbivores()\n self._feed_carnivores()\n\n def migrate_animals(self, neighbours):\n \"\"\"\n Migrates the animals in a square to a nearby square, i.e. a\n neighbouring square, if the animals should \"decide\" to move.\n Neighbouring squares can only be adjacent landscape squares in the\n vertical and horizontal directions. The propensity of moving to\n a neighbouring square is determined by the available fodder for a\n herbivore, and the number of herbivores in the neighbouring square\n for a carnivore. If the neighbouring square is type Ocean or\n Mountain, i.e. inhabitable, the propensity is zero. The neighbouring\n square to which an animal ultimately moves, is selected by\n introducing the cumulative probabilities, and \"choosing\" a random\n number on the unit interval.\n\n For more, see e.g. Donald Knuth, The Art of Computer Programming,\n vol 2, ch. 3.3.-3.4.\n\n Parameters\n ----------\n neighbours : list\n List of square instances.\n\n \"\"\"\n for species, animals in self.animals_in_square.items():\n if animals:\n props = [neighbour.compute_propensity(species) for neighbour in\n neighbours]\n if sum(props) == 0:\n break\n prob = np.array([prop / np.sum(props) for prop in props])\n cum_prob = np.cumsum(prob)\n migrated_animals = []\n for animal in animals:\n if animal.movement():\n n = 0\n rand_num = np.random.random()\n while rand_num >= cum_prob[n]:\n n += 1\n neighbours[n].newcomers[species].append(animal)\n migrated_animals.append(animal)\n self.animals_in_square[species] = [animal for animal in animals\n if animal not in\n migrated_animals]\n\n def add_newcomers(self):\n \"\"\"\n Adds the newly migrated animals to the animals currently occupying\n the square.\n\n \"\"\"\n for species in self.animals_in_square.keys():\n newcomers = self.newcomers[species]\n self.animals_in_square[species].extend(newcomers)\n self.newcomers[species] = []\n\n @staticmethod\n @numba.jit\n def _relative_abundance(num_animals, appetite, relevant_fodder):\n \"\"\"\n Computes the relative abundance of fodder in a square. For a\n Herbivore, the relative abundance of fodder is determined by the\n available fodder and the number of herbivores in the neighbouring\n square. And for a Carnivore, it is determined by the total weight of\n all herbivores occupying the square--which in essence is the\n available fodder for a carnivore--and number of carnivores in the\n neighbouring square.\n\n Parameters\n ----------\n num_animals : int\n Number of animals of the same species\n appetite : int, float\n Appetite of the species\n relevant_fodder : int, float\n Amount of relevant fodder available in cell\n\n Returns\n -------\n out : float\n Relative abundance of fodder in cell\n\n \"\"\"\n return relevant_fodder / ((num_animals + 1) * appetite)\n\n def compute_propensity(self, animal_type):\n \"\"\"\n Computes the propensity of moving from the square 'i' that is currently\n inhabited, to a neighbouring square 'j.' The neighbouring squares\n consist of the four adjacent squares in the vertical and horizontal\n directions. The propensity to move to a neighbouring square is\n computed from the relative abundance of the neighbouring squares. The\n propensity to move from square 'i' to square 'j' is zero if the\n destination square 'j' is Mountain or Ocean.\n\n Parameters\n ----------\n animal_type : class name\n\n Returns\n -------\n out : float\n Propensity to move\n\n \"\"\"\n num_animals = len(self.animals_in_square[animal_type]) + len(\n self.newcomers[animal_type])\n\n appetite = animal_type.parameters['F']\n\n relevant_fodder = self.fodder if animal_type == Herbivore else np.sum(\n herb.weight for herb in self.animals_in_square[Herbivore]\n + self.newcomers[Herbivore])\n\n relative_abundance = self._relative_abundance(num_animals, appetite,\n relevant_fodder)\n return np.exp(animal_type.parameters['lambda'] * relative_abundance)\n\n\nclass Jungle(Square):\n \"\"\"\n Jungle subclass. The Jungle landscape type is habitable for animals of\n both species. Due to comparatively high growth rate, the jungle is less\n prone to overgrazing than the savannah. Carnivores can hunt for\n herbivores in the jungle.\n\n \"\"\"\n parameters = {'f_max': 800.0}\n\n def __init__(self):\n super().__init__()\n self.fodder = self.parameters['f_max']\n\n def _grow_fodder(self):\n \"\"\"\n Sets new yearly amount of fodder according to growth rules for the\n Jungle landscape type.\n\n \"\"\"\n self.fodder = self.parameters['f_max']\n\n\nclass Savannah(Square):\n \"\"\"\n Savannah subclass. The Savannah landscape type is habitable for animals\n of both species. Compared to the Jungle landscape type, it has a lower\n growth rate. Carnivores can hunt for herbivores in the savannah.\n\n \"\"\"\n parameters = {'f_max': 300.0, 'alpha': 0.3}\n\n def __init__(self):\n \"\"\"\n Constructor for the Savannah subclass.\n\n \"\"\"\n\n super().__init__()\n self.fodder = self.parameters['f_max']\n\n def _grow_fodder(self):\n \"\"\"\n Sets new yearly amount of fodder according to growth rules for the\n Savannah landscape type.\n\n \"\"\"\n self.fodder += self.parameters['alpha'] * (\n self.parameters['f_max'] - self.fodder)\n\n\nclass Desert(Square):\n \"\"\"\n Desert subclass. The Desert landscape type is habitable for animals of\n both species. Since the desert doesn't have any vegetation, i.e. no\n fodder growth, herbivores won't be able to find food in the desert.\n Carnivores can, however, prey on herbivores in the desert.\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n\nclass Mountain(Square):\n \"\"\"\n Mountain subclass. The Mountain landscape type is impassable for animals\n of both species, and is completely inactive in the simulation.\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def compute_propensity(self, animal_type):\n \"\"\"\n Computes the propensity to move from an arbitrary source square to a\n Mountain square. Since a Mountain square is impassable for both species\n by default, it will always return zero, regardless of species type.\n\n Parameters\n ----------\n animal_type : class name\n\n Returns\n -------\n out : float\n Propensity to move.\n\n \"\"\"\n if animal_type:\n return 0.0\n\n\nclass Ocean(Square):\n \"\"\"\n Ocean subclass. The Ocean landscape type is impassable for animals of\n both species, and is completely inactive in the simulation.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor for the Ocean subclass.\n \"\"\"\n super().__init__()\n\n def compute_propensity(self, animal_type):\n \"\"\"\n Computes the propensity to move from an arbitrary source square to an\n Ocean square. Since an Ocean square is impassable for both species\n by default, it will always return zero, regardless of species type.\n\n Parameters\n ----------\n animal_type : class name\n\n Returns\n -------\n out : float\n Propensity to move.\n\n \"\"\"\n if animal_type:\n return 0.0\n","sub_path":"biosim/landscape.py","file_name":"landscape.py","file_ext":"py","file_size_in_byte":14503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262320945","text":"\"\"\"\nTry running sequential scan on a local file.\n\n\"\"\"\n\nfrom dapbench.jython.grinder import DapTestRunner\n\nlocal_file = '/home/spascoe/git/dapbench/ta_20101129/ta_6hrPlev_HadGEM2-ES_piControl_r1i1p1_197812010600-197901010000.nc'\n\n\n# Make 120 requests to variable ta\nTestRunner = DapTestRunner.configure('Local NetCDF test',\n local_file, 'ta',\n {'time': 120})\n \n","sub_path":"grinder/local_test.py","file_name":"local_test.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57891371","text":"import psv\nimport unittest\n\nfrom hypothesis.strategies import text, integers, lists, floats\nfrom hypothesis import given, settings\nimport string\nfrom random import randint\n\nimport os\nfilenames = [\"tests/dataset-folder/\", \"tests/dataset-only-one/\"]\nfor filename in filenames:\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\ndel os\n\n\nclass psv_load_tests(unittest.TestCase):\n\n\n def __init__(self, *args, **kwargs):\n super(psv_load_tests, self).__init__(*args, **kwargs)\n self.is_populated = False\n\n @given(lists(text(min_size=5, max_size=20, alphabet=string.ascii_letters), max_size=20, min_size=3))\n @given(integers(5,500))\n @settings(max_examples=1)\n def generate_data_str(self, columns, l):\n columns = tuple(set(columns))\n def _gen():\n for x in range(l):\n store = {}\n for column in columns:\n #Determine the kind of data to be used\n number = randint(1,3)\n if number == 1:\n store[column] = text(min_size=3, max_size=30, alphabet=string.ascii_letters).example()\n elif number == 2:\n store[column] = integers(1, 10000).example()\n elif number == 3:\n store[column] = floats().example()\n yield store\n self.csvloads_dict_tuple = tuple(_gen())\n self.csvloads_dict_columns = columns\n import io\n import csv\n with io.StringIO() as csvfile:\n fieldnames = self.csvloads_dict_columns\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for item in self.csvloads_dict_tuple:\n writer.writerow(item)\n self.csvloads_str = csvfile.getvalue()\n\n def generate_data_str_nonrandom(self, columns, l):\n columns = tuple(set(columns))\n def _gen():\n for x in range(l):\n store = {}\n for column in columns:\n number = randint(1,3)\n if number == 1:\n store[column] = text(min_size=3, max_size=30, alphabet=string.ascii_letters).example()\n elif number == 2:\n store[column] = integers(1, 10000).example()\n elif number == 3:\n store[column] = floats(1, 10000).example()\n yield store\n self.csvloads_dict_tuple = tuple(_gen())\n self.csvloads_dict_columns = columns\n import io\n import csv\n with io.StringIO() as csvfile:\n fieldnames = self.csvloads_dict_columns\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for item in self.csvloads_dict_tuple:\n writer.writerow(item)\n self.csvloads_str = csvfile.getvalue()\n\n def populate_folders(self):\n if self.is_populated:\n return None\n else:\n self.is_populated = True\n\n import csv\n columns = lists(text(min_size=5, max_size=20, alphabet=string.ascii_letters), max_size=20, min_size=2).example()\n for x in range(1,6):\n self.generate_data_str_nonrandom(columns, randint(1,50))\n with open(\"tests/dataset-folder/psv-tests-\"+str(x)+\".csv\", 'w+', encoding=\"UTF-8\", newline='') as csvfile:\n fieldnames = columns\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for item in self.csvloads_dict_tuple:\n writer.writerow(item)\n self.generate_data_str()\n\n with open(\"tests/dataset-only-one/test.csv\", 'w+', encoding=\"UTF-8\", newline='') as csvfile:\n fieldnames = self.csvloads_dict_columns\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for item in self.csvloads_dict_tuple:\n writer.writerow(item)\n\n\n def test_loads_str(self):\n self.generate_data_str()\n try:\n api = psv.loads(self.csvloads_str)\n except Exception as err:\n self.fail(str(err))\n self.assertTrue(bool(api.__columns__))\n\n def test_loads_dict(self):\n self.generate_data_str()\n try:\n api = psv.loads(self.csvloads_dict_tuple)\n except Exception as err:\n self.fail(str(err))\n self.assertFalse(bool(api.__columns__))\n\n def test_loaddir(self):\n self.populate_folders()\n try:\n api = psv.loaddir(\"tests/dataset-folder/\")\n except Exception as err:\n self.fail(str(err))\n\n def test_load(self):\n self.populate_folders()\n try:\n api = psv.load(\"tests/dataset-only-one/test.csv\")\n api2 = psv.load(open(\"tests/dataset-only-one/test.csv\", \"r\", encoding=\"UTF-8\"))\n except Exception as err:\n self.fail(str(err))\n\n def test_output_methods(self):\n self.generate_data_str()\n api = psv.loads(self.csvloads_str)\n try:\n api.outputs(quote_all=False)\n api.outputs(quote_all=True)\n api.output(\"TEST-OUTPUT-1.csv\", quote_all=False)\n api.output(\"TEST-OUTPUT-1.csv\", quote_all=True)\n except Exception as err:\n self.fail(str(err))\n\n try:\n api.outputs(columns={\"NOT SUPPORTED TYPE\",})\n api.output(\"NAME.csv\", columns={\"NOT SUPPORTED TYPE\",})\n self.fail(\"Output Method failed to catch unsupported type\")\n except ValueError:\n #TEST Passed\n pass\n\n\n @given(lists(text(min_size=5, max_size=20, alphabet=string.ascii_letters), max_size=20, min_size=3))\n @settings(max_examples=1)\n def test_api_new(self, columns):\n try:\n psv.new()\n psv.new(columns=columns)\n except Exception as err:\n self.fail(str(err))\n","sub_path":"tests/test_loading.py","file_name":"test_loading.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323203429","text":"from contextlib import contextmanager\n\nfrom ..exceptions import DependencyCycleError\n\n\nclass DependencyStack:\n \"\"\"\n Stores the stack of dependency instantiation to detect and prevent cycles\n by raising DependencyCycleError.\n\n Used in the DependencyContainer.\n\n This class is not thread-safe by itself.\n \"\"\"\n\n def __init__(self):\n self._stack = list()\n self._seen = set()\n\n @contextmanager\n def instantiating(self, dependency):\n \"\"\"\n Context Manager which has to be used when instantiating the\n dependency to keep track of the dependency path.\n\n When a cycle is detected, a DependencyCycleError is raised.\n \"\"\"\n if dependency in self._seen:\n raise DependencyCycleError(self._stack + [dependency])\n\n self._stack.append(dependency)\n self._seen.add(dependency)\n try:\n yield\n finally:\n self._seen.remove(self._stack.pop())\n","sub_path":"src/antidote/_internal/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348983999","text":"#REFERENCED MEDIAPIPE https://google.github.io/mediapipe/solutions/pose.html pose solutions\nimport cv2\nimport argparse\nfrom stuff import *\nimport mediapipe as mp\nfrom bpa import BPA\nfrom diff_exercises import EXTYPE\n\n#add arguements for cam or from another vd\nap = argparse.ArgumentParser()\nap.add_argument(\"-t\", \"--extype\", type=str, required=True)\nap.add_argument(\"-v\",\"--video\", type=str, required=False)\nargs = vars(ap.parse_args())\n\n#if there is no other vid source do cam\nif args[\"video\"] is not None:\n cp = cv2.VideoCapture(args[\"video\"])\nelse:\n cp = cv2.VideoCapture(0) \n\ncp.set(3, 800) \ncp.set(4, 480) \n\n#get pose solutions from mediapipe api\nmpp = mp.solutions.pose\n\nwith mpp.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n #add a counter for the number of squats/push-ups etc.\n mpd = mp.solutions.drawing_utils\n cnt = 0 \n stat= True \n #when cam is opened \n while cp.isOpened():\n ret, img = cp.read()\n #resize it\n img = cv2.resize(img, (800, 480), interpolation=cv2.INTER_AREA)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img.flags.writeable = False\n\n res = pose.process(img)\n img.flags.writeable = True\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n \n #try getting an exercise position otherwise pass\n try:\n lm = res.pose_landmarks.landmark\n cnt, stat = EXTYPE(lm).excalc(\n args[\"extype\"], cnt, stat)\n except:\n pass\n\n tble(args[\"extype\"], cnt, stat)\n #draw the landmarks\n mpd.draw_landmarks(\n img,\n res.pose_landmarks,\n mpp.POSE_CONNECTIONS,\n mpd.DrawingSpec(color=(255, 255, 255),\n thickness=4,\n circle_radius=3),\n mpd.DrawingSpec(color=(174, 180, 45),\n thickness=4,\n circle_radius=2),\n )\n #show the vid and lines\n cv2.imshow('Video', img)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n #quit release\n cp.release()\n cv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466741743","text":"def get_formatted_name(first_name, last_name):\n \"\"\"返回整洁的姓名\"\"\"\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()\n \n# 这是一个无限循环\nwhile True:\n print(\"\\nPlease tell me your name:\")\n print(\"(Enter 'quit' at any time to quit)\")\n f_name = input(\"First name: \")\n if f_name == 'quit':\n break\n l_name = input(\"Last name: \")\n if l_name == 'quit':\n break\n \n format_name = get_formatted_name(f_name, l_name)\n print(f\"\\nHello, {format_name}\")","sub_path":"Chapter8/Section3_Return/greeter.py","file_name":"greeter.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218907544","text":"import oauth2 as oauth\nimport cgi\nimport simplejson as json\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\n\nfrom main.models import UserProfile\nfrom main.models import Pool\n\nconsumer = oauth.Consumer(settings.LINKEDIN_TOKEN, settings.LINKEDIN_SECRET)\n\nrequest_token_url = 'https://api.linkedin.com/uas/oauth/requestToken'\naccess_token_url = 'https://api.linkedin.com/uas/oauth/accessToken'\nauthenticate_url = 'https://www.linkedin.com/uas/oauth/authenticate'\n\n\ndef oauth_login(request):\n if request.META['SERVER_PORT'] == 443:\n current_server = \"https://\" + request.META['HTTP_HOST']\n else:\n current_server = \"http://\" + request.META['HTTP_HOST']\n oauth_callback = current_server + reverse('oauth_authenticated')\n client = oauth.Client(consumer)\n resp, content = client.request(\"%s?oauth_callback=%s\" %\n (request_token_url, oauth_callback), \"GET\")\n if resp['status'] != '200':\n raise Exception(\"Invalid response from LinkedIn.\")\n request.session['request_token'] = dict(cgi.parse_qsl(content))\n url = \"%s?oauth_token=%s\" \\\n % (authenticate_url, request.session['request_token']['oauth_token'])\n return HttpResponseRedirect(url)\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n@login_required\ndef oauth_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('home'))\n\n\n@login_required\ndef pools(request, all=True):\n if not all:\n raise NotImplementedError('This is something we should think about.')\n user_profile = request.user.userprofile_set.get()\n pools = user_profile.pool_set.all()\n return render(request, 'my_pools.html', locals())\n\n\ndef oauth_authenticated(request):\n token = oauth.Token(\n request.session['request_token']['oauth_token'],\n request.session['request_token']['oauth_token_secret']\n )\n if 'oauth_verifier' in request.GET:\n token.set_verifier(request.GET['oauth_verifier'])\n client = oauth.Client(consumer, token)\n resp, content = client.request(access_token_url, \"GET\")\n if resp['status'] != '200':\n raise Exception(\"Invalid response from LinkedIn.\")\n access_token = dict(cgi.parse_qsl(content))\n headers = {'x-li-format': 'json'}\n fields = [\n 'id',\n 'first-name',\n 'last-name',\n 'email-address',\n ]\n url = \"http://api.linkedin.com/v1/people/~:(%s)\" % ','.join(fields)\n token = oauth.Token(\n access_token['oauth_token'],\n access_token['oauth_token_secret']\n )\n client = oauth.Client(consumer, token)\n resp, content = client.request(url, \"GET\", headers=headers)\n profile = json.loads(content)\n # Step 3: lookup the user or create them if they don't exist.\n firstname = profile['firstName']\n lastname = profile['lastName']\n identifier = profile['id']\n email = profile['emailAddress']\n try:\n user = User.objects.get(username=identifier)\n except User.DoesNotExist:\n user = User.objects.create_user(\n identifier,\n email,\n access_token['oauth_token_secret']\n )\n user.first_name = firstname\n user.last_name = lastname\n user.save()\n # Save our permanent token and secret for later.\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.oauth_token = access_token['oauth_token']\n userprofile.oauth_secret = access_token['oauth_token_secret']\n userprofile.save()\n # Authenticate the user and log them in using Django's pre-built\n # functions for these things.\n user = authenticate(\n username=identifier,\n password=access_token['oauth_token_secret']\n )\n login(request, user)\n return HttpResponseRedirect(reverse('home'))\n","sub_path":"linkediff/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492764906","text":"# case 1\nimport sys\nfrom itertools import product\n\ncount = 0\nbinary = ''\n\nlength = int(sys.stdin.readline())\n\nfor i in list(product(['0','1'],repeat=length)):\n tmp = ''.join(i)\n # print(tmp, tmp.count('0'))\n if tmp.count('0') % 2 == 0 and '00' in tmp or '0' not in tmp:\n count+=1\n print(tmp)\n\nprint(count%15746)\n\n# case 2\nimport sys\n\ntmp = [0] * 1000001\n\nlength = int(sys.stdin.readline())\ntmp[1], tmp[2] = 1, 2\nfor i in range(3, length+1):\n tmp[i] = (tmp[i-1] + tmp[i-2])%15746\n\nprint(tmp[length])","sub_path":"python/1904.py","file_name":"1904.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122055986","text":"import numpy as np\nfrom scipy.sparse import csr_matrix, find\nfrom scipy.spatial import cKDTree\nfrom tqdm import tqdm_notebook as tqdm\n\nfrom datasets.graph import draw_sample\nimport torch\nimport torch_geometric\nimport os\nimport os.path as osp\n\nimport glob\nraw_dir= '/home/sapta/hgcalNtuple_Aug24/clusters/'\nfnamelist = [filepath for filepath in glob.glob(raw_dir+'data_*.pt')]\ndata_list = []\nfor i in tqdm(fnamelist):\n data_list.append(torch.load(i))\n \ntotalev = len(data_list)\nprint('data_list[0].y = ', data_list[0].y)\nprint('total samples:',totalev)\n#print('data_list.y = ', data_list.y)\n\nimport torch_geometric\nntrainbatch = 2 #10 #was set to 50\nntestbatch = 1\ntrainloader = torch_geometric.data.DataLoader(data_list[:totalev-4100], batch_size=ntrainbatch)\ntestloader = torch_geometric.data.DataLoader(data_list[totalev-4100:totalev], batch_size=ntestbatch)\n\nimport os.path as osp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch_geometric.transforms as T\nfrom torch_geometric.data import DataLoader\nfrom tqdm import tqdm_notebook as tqdm\n\n\nfrom models.DynamicReductionNetwork import DynamicReductionNetwork\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.drn = DynamicReductionNetwork()\n\n def forward(self, data):\n logits = self.drn(data)\n return F.softplus(logits)\n\ndevice = torch.device('cuda')#('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\noptimizer = torch.optim.AdamW(model.parameters(), lr=0.001)\n\n\n\ndef resoloss(output,truth):\n batch_size = output.size()[0]\n print ('output.size()[0] = ', output.size()[0])\n mse = F.mse_loss(output, truth, reduction='mean')\n res = torch.sum((output-truth)**2/truth)/batch_size\n #return (mse + 0.2*res)\n return (res)\n\n\n\n\nmodel.train()\ndef train(epoch):\n model.train()\n loss = []\n print ('len(tqdm(trainloader)) = ' , len(tqdm(trainloader)))\n print (tqdm(trainloader))\n for data in tqdm(trainloader):\n print('data.y = ', data.y)\n data = data.to(device)\n print (data)\n optimizer.zero_grad()\n result = model(data)\n print ('result = ', result)\n lossc = resoloss(result, data.y)\n loss.append(lossc.item())\n lossc.backward()\n optimizer.step()\n \n print('train loss:',np.mean(np.array(loss)))\n\n\nfrom scipy.stats import norm\nimport matplotlib.mlab as mlab\nimport scipy.stats as scs\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\ndef gaussian(x, mean,a, sigma):\n return a * np.exp(-((x - mean)**2 / (2 * sigma**2)))\n\ndef evaluate(epoch):\n \"\"\"\"Evaluate the model\"\"\"\n model.zero_grad()\n torch.cuda.empty_cache()\n model.eval()\n loss = []\n frac = []\n for data in tqdm(testloader):\n data = data.to(device)\n result = model(data)\n lossc = resoloss(result, data.y)\n print ('result.item() = ', result.item())\n print ('data.y.item() = ', data.y.item())\n frac.append((result.item() - data.y.item())/data.y.item())\n loss.append(lossc.item())\n\n\n print('test loss:',np.mean(np.array(loss)))\n fracarr = np.array(frac)\n\n bin_heights, bin_borders, _ = plt.hist(fracarr, bins=100, label='histogram')\n bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2\n\n try:\n popt, _ = curve_fit(gaussian, bin_centers, bin_heights, p0=[0., 100., 1.],bounds = ([-np.inf,0,0],[np.inf,np.inf,np.inf]))\n x_interval_for_fit = np.linspace(bin_borders[0], bin_borders[-1], 100)\n plt.plot(x_interval_for_fit, gaussian(x_interval_for_fit, *popt), label='fit')\n plt.legend()\n plt.xlabel('pred - true / true')\n plt.ylabel('counts')\n plt.title(r'$\\mathrm{pred - true / true:}\\ \\mu=%.3f,\\ \\sigma=%.3f$' %(popt[0], popt[2]))\n plt.grid(True)\n plt.show()\n plt.savefig('test.png')\n\n except RuntimeError:\n print(\"Error - curve_fit failed\")\n plt.xlabel('pred - true / true')\n plt.ylabel('counts')\n plt.title('pred - true / true fit failed')\n plt.grid(True)\n plt.show()\n\n print ('np.mean(np.array(loss)) = ', np.mean(np.array(loss)))\n return np.mean(np.array(loss))\n\ncheckpoint_dir = '/home/sapta/hgcalNtuple_Aug24/checkpoint'\nos.makedirs(checkpoint_dir, exist_ok=True)\nbest_loss = 99999999\nfor epoch in range(1, 2): #10\n print ('epoch:',epoch)\n train(epoch)\n loss_epoch = evaluate(epoch)\n checkpoint_file = 'model_epoch_%03i.pth.tar' % ( epoch )\n torch.save(dict(model=model.state_dict()),\n os.path.join(checkpoint_dir,checkpoint_file ))\n if loss_epoch < best_loss:\n best_loss = loss_epoch\n print('new best test loss:',best_loss)\n torch.save(dict(model=model.state_dict()),\n os.path.join(checkpoint_dir,'model_checkpoint_best.pth.tar' ))\n","sub_path":"UpdateWithD49/ML/NotebooksForML/testFour.py","file_name":"testFour.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624024690","text":"'''\nA simple class to perform a DHC release & renew and return the renewal time\n'''\nfrom __future__ import print_function\nimport time\nimport subprocess\nfrom modules.wirelessadapter import *\n\nclass DhcpTester(object):\n '''\n A class to perform a DHC release & renew and return the renewal time\n '''\n\n def __init__(self, file_logger, debug=False, platform=\"rpi\"):\n\n self.platform = platform\n self.debug = debug\n self.file_logger = file_logger\n\n self.interface = ''\n self.duration = ''\n self.debug = debug\n self.platform = platform\n \n def bounce_interface(self, interface, file_logger, debug): \n '''\n Log an error before bouncing the wlan interface\n '''\n import sys\n\n adapter = WirelessAdapter(interface, file_logger, self.platform, debug) \n self.file_logger.error(\"Bouncing WLAN interface\")\n adapter.bounce_wlan_interface()\n self.file_logger.error(\"Interface bounced: {}\".format(interface)) \n\n # exit as something bad must have happened...\n sys.exit()\n\n def dhcp_renewal(self, interface):\n '''\n This function will release the current DHCP address and request a renewal.\n The renewal duration is timed and the result (in mS) returned\n\n Usage:\n tester_obj = DhcpTester(logger, debug=False)\n tester_obj.dhcp_renewal(\"wlan0\")\n \n If the renewal fails, the wlan interface will be bounced and the whole script will exit\n '''\n\n self.interface = interface\n\n if self.debug:\n print(\"Releasing dhcp address...\")\n\n self.file_logger.info(\"Releasing dhcp address...\")\n try:\n release_output = subprocess.check_output(\"sudo /sbin/dhclient -r -v {} 2>&1\".format(self.interface), shell=True).decode()\n # TODO: pattern search of: \"DHCPRELEASE of 192.168.1.89 on wlan0\"\n self.file_logger.info(\"Address released.\")\n if self.debug:\n print(\"Address released.\")\n except Exception as ex:\n self.file_logger.error(\"Issue releasing IP address: {}\".format(ex))\n if self.debug:\n print(\"Issue releasing IP address: {}\".format(ex))\n # If release fails, bounce interface to recover - script will exit\n self.bounce_interface(self.interface, self.file_logger, self.debug)\n\n start = time.time()\n\n if self.debug:\n print(\"Renewing dhcp address...\")\n\n self.file_logger.info(\"Renewing dhcp address...\")\n try:\n subprocess.check_output(\"sudo /sbin/dhclient -v {} 2>&1\".format(self.interface), shell=True).decode()\n # TODO: pattern search for \"bound to 192.168.1.89\"\n self.file_logger.info(\"Address renewed.\")\n if self.debug:\n print(\"Address renewed.\")\n except Exception as ex:\n self.file_logger.error(\"Issue renewing IP address: {}\".format(ex))\n if self.debug:\n print(\"Issue renewing IP address: {}\".format(ex))\n # If renewal fails, bounce interface to recover - script will exit\n self.bounce_interface(self.interface, self.file_logger, self.debug)\n\n end = time.time()\n self.duration = int(round((end - start) * 1000))\n\n self.file_logger.info(\"Renewal time: {}mS\".format(self.duration))\n if self.debug:\n print(\"Renewal time: {}mS\".format(self.duration))\n\n return self.duration\n\n\n def get_duration(self):\n ''' Get DHCP renewal duration '''\n return self.duration\n\n","sub_path":"modules/dhcptester.py","file_name":"dhcptester.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54385932","text":"_author_='dixas'\n\nfrom django.conf.urls import patterns, url\nfrom views import *\n\nurlpatterns = patterns('tiendavideojuegos.Servicios.seguridad.views',\n url(r'^$', 'vista_inicio', name=\"index\"),\n url(r'^nuevovideojuego', 'vista_nuevovideojuego', name=\"nuevovideojuego\"),\n url(r'^inicio/', inicio),\n url(r'^registra/', registra),\n url(r'^cliente/', cliente),\n url(r'^reservas/', reservas),\n url(r'^ventas/', ventas),\n url(r'^ingresos/', ingresos),\n url(r'^egresos/', egresos),\n url(r'^catalogo/', catalogo),\n url(r'^compras/', compras)\n\n)\n","sub_path":"Codigo/Django/Tienda/tiendavideojuegos/tiendavideojuegos/Servicios/seguridad/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497203783","text":"\"\"\"Have I been pwned module\"\"\"\nfrom cgi import escape\nfrom telegram.ext import Updater, CommandHandler\nfrom telegram import Bot, Update\nimport requests\nfrom emoji import emojize\nimport constants\nheaders = {\n 'User-Agent': 'Octeon: Have I been Pwned module'\n}\n\n\ndef preload(*_):\n return\n\n\ndef pwned(_: Bot, update: Update, user, args):\n account = \" \".join(args)\n r = requests.get(\n \"https://haveibeenpwned.com/api/v2/breachedaccount/%s\" % account)\n if r.status_code == 404:\n return emojize(\":white_check_mark:Got cool news for you! You are NOT pwned!\", use_aliases=True), constants.TEXT\n else:\n pwns = r.json()\n message = emojize(\":warning:<b>Oh No!</b> You have been <b>pwned</b>:\\n<b>Leaked data:</b><i>\")\n pwnedthings = {}\n pwnedsites = {}\n for pwn in pwns:\n pwnedsites.update({pwn[\"Title\"]: pwn[\"Title\"]})\n for data in pwn[\"DataClasses\"]:\n pwnedthings.update({data: data})\n message += escape(\", \".join(list(pwnedthings)))\n message += \"</i>\\n<b>From sites:</b><i>\\n\" + \\\n escape(\"\\n\".join(list(pwnedsites))) + \"</i>\"\n return message, constants.HTMLTXT\n\nCOMMANDS = [\n {\n \"command\":\"/pwned\",\n \"function\":pwned,\n \"description\":\"Have you been hacked?\",\n \"inline_support\": True\n }\n]","sub_path":"plugins/pwned.py","file_name":"pwned.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377838339","text":"from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\nimport re\n\nweights_file_directory = \"weights.txt\"\ndict_for_cbwd = {'NW': 1, \"NE\": 2, \"SE\": 3, \"cv\": 4}\n\n\ndef hello_world(request):\n return Response('Hello World!')\n\n\ndef convert_to_float_otherwise_none_or_string(input):\n try:\n if input is not None:\n return float(input)\n else:\n return 0.0\n except ValueError:\n if input is None:\n return 0.0\n if len(input) == 0:\n return None\n else:\n return str(input)\n\n\ndef predict_pm(request):\n inputs = \"2656.0,2014.0,1.0,3.0,13.0,29.0,-17.0,9.0,1022.0,NW,22.35,0.0,0.0\"\n print(request.method)\n if str(request.method) == 'POST':\n print(request.body.decode('ascii'))\n inputs = request.body.decode('ascii')\n inputs = re.split(\",\", inputs)\n for i in range(5, len(inputs)):\n inputs[i] = convert_to_float_otherwise_none_or_string(inputs[i])\n\n f = open(weights_file_directory, \"r\", encoding=\"iso8859_2\")\n line = f.readline()\n weights = re.split(',', line)\n for i in range(len(weights)):\n weights[i] = convert_to_float_otherwise_none_or_string(weights[i])\n f.close()\n\n record_anchor = 6\n predicted_pm = float(weights[record_anchor]) * float(inputs[record_anchor]) + \\\n float(weights[record_anchor + 1]) * float(inputs[record_anchor + 1]) + \\\n float(weights[record_anchor + 2]) * float(inputs[record_anchor + 2]-1000) + \\\n float(weights[record_anchor + 3]) * float(dict_for_cbwd.get(inputs[record_anchor + 3])) + \\\n float(weights[record_anchor + 4]) * float(inputs[record_anchor + 4]) + \\\n float(weights[record_anchor + 5]) * float(inputs[record_anchor + 5]) + \\\n float(weights[record_anchor + 6]) * float(inputs[record_anchor + 6])\n return Response(str(predicted_pm))\n\n\nif __name__ == '__main__':\n with Configurator() as config:\n config.add_route('hello', '/hello')\n config.add_route('predict', '/predict')\n config.add_view(hello_world, route_name='hello')\n config.add_view(predict_pm, route_name='predict')\n app = config.make_wsgi_app()\n server = make_server('localhost', 9090, app)\n server.serve_forever()\n","sub_path":"web/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577974800","text":"import math\n\n\ndef edistance(x1, x2):\n r = 0.0\n for i in range(len(x1) - 1):\n r += (x1[i] - x2[i]) ** 2\n return math.sqrt(r)\n\n\ndef get_neighbors(train, test_row, k):\n r = list()\n\n for train_row in train:\n d = edistance(train_row, test_row)\n r.append((train_row, d))\n\n r.sort(key=lambda x: x[1])\n\n n = list()\n\n for i in range(k):\n n.append(r[i][0])\n\n return n\n\n\ndef predict(train, test_row, k):\n\n n = get_neighbors(train, test_row, k)\n\n o = [i[-1] for i in n]\n\n m = max(o, key=o.count)\n\n return m\n","sub_path":"k_Nearest_Neighbors/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501403644","text":"# -*- coding: utf-8 -*-\n\n# Import the class\nimport kmapper as km\nfrom sklearn import cluster, preprocessing, manifold, decomposition\nimport processing.fifa as fifa\nfrom matplotlib import pyplot\n\n# 1 - get data\n# Some sample data\nfrom sklearn import datasets\n#data, labels = datasets.make_circles(n_samples=5000, noise=0.03, factor=0.1)\nsamples = 3000\ncubes = 20\nover_lap = 0.15\ndata = fifa.get_processed_data(sample=samples)\n\n# Initialize\nmapper = km.KeplerMapper(verbose=2)\n\n# distance_matrix\n# filtering with distance_matrix\n# Fit to and transform the data -> 여기서 normalize도 함.\nprojected_data = mapper.fit_transform(\n data,\n projection=None,\n scaler=preprocessing.Normalizer(),\n #scaler=preprocessing.MinMaxScaler(),\n ) # X-Y axis\n\n\n# pca = decomposition.PCA(n_components=2)\n# pca_data = pca.fit_transform(projected_data)\n#\n# pyplot.scatter(pca_data[:,0],pca_data[:,1],s=1)\n# pyplot.show()\n\n#print(pca_data.sum())\n\n# projected_data에 square distance matrix 가 나올 수 있음\n# 지금은 그냥 scaling만 한거...\n# map에서 square dist matrix가 드가면 뭘 따로 처리하나...?\n\n# Create dictionary called 'graph' with nodes, edges and meta-information\ngraph = mapper.map(projected_data[:,1],\n projected_data,\n cover=km.Cover(n_cubes=cubes, perc_overlap=over_lap),\n clusterer=cluster.KMeans(),\n precomputed=False)\n\n# Visualize it\nmapper.visualize(graph, path_html=\"make_circles_keplermapper_output.html\",\n title=\"make_circles(n_samples={0}, cube={1}, overlap={2})\".format(samples, cubes, over_lap))\n\n","sub_path":"mapper_test2.py","file_name":"mapper_test2.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326766566","text":"#-------------------------------------------------------------------------------\n# Name: GuiArticleDetails\n# Purpose: Article details window for the citation mapper program\n#\n# Author: Henrik Skov Midtiby\n#\n# Created: 2011-02-25\n# Copyright: (c) Henrik Skov Midtiby 2011\n# Licence: LGPL\n#-------------------------------------------------------------------------------\n#!/usr/bin/env python\n#\n# Copyright 2011 Henrik Skov Midtiby\n#\n# This program is free software: you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport re\nimport gtk\nimport pprint\nimport StringIO\nimport string\nimport webbrowser\nimport DoiLookup\nimport ArticleWithReferences\n\n\ndef open_url(widget, url):\n webbrowser.open(url)\n\n\n# noinspection PyAttributeOutsideInit\nclass GuiArticleDetails:\n def __init__(self):\n self.doi = None\n self.node_scrolled_window = None\n self.node_information_window = gtk.Window()\n self.node_information_window.set_title(\"Article details\")\n self.node_information_window.set_size_request(500, 300)\n self.vbox = gtk.VBox(False, 0)\n self.add_link_button()\n self.add_text_area()\n self.generate_node_scrolled_window()\n self.node_scrolled_window.show_all()\n self.add_request_doi_information_button()\n self.vbox.pack_start(self.node_scrolled_window, True, True, 0)\n self.node_information_window.add(self.vbox)\n self.node_information_window.show_all()\n gtk.link_button_set_uri_hook(open_url)\n\n def add_link_button(self):\n self.link_label = gtk.LinkButton(\"http://www.sdu.dk\",\n label=\"Locate article on Web of Science\")\n self.vbox.pack_start(self.link_label, False, False, 0)\n\n def add_text_area(self):\n self.text = gtk.TextView()\n self.text.set_wrap_mode(gtk.WRAP_WORD)\n\n def add_request_doi_information_button(self):\n self.requestDOIInformation = gtk.Button(\n \"Fetch more information based on DOI\")\n self.requestDOIInformation.show()\n self.vbox.pack_start(self.requestDOIInformation, False, False, 5)\n self.requestDOIInformation.connect(\n \"clicked\", self.request_doi_information_callback, None)\n\n def request_doi_information_callback(self, p1, p2):\n text = DoiLookup.get_doi_information(self.doi)\n end_iterator = self.text.get_buffer().get_end_iter()\n self.text.get_buffer().insert(end_iterator, '\\nDOI Information: \\n')\n\n for k, v in text.items():\n end_iterator = self.text.get_buffer().get_end_iter()\n self.text.get_buffer().insert(end_iterator, \"%-*s: %s\\n\" % (15, k, v))\n\n self.requestDOIInformation.hide()\n\n def generate_node_scrolled_window(self):\n self.node_scrolled_window = gtk.ScrolledWindow()\n self.node_scrolled_window.set_shadow_type(gtk.SHADOW_ETCHED_IN)\n self.node_scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n self.node_scrolled_window.add(self.text)\n\n def update_article_information(self, url,\n citationmapbuild=None,\n article=None):\n print(\"update_article_information url = '%s'\" % url)\n try:\n article = citationmapbuild.articles[url]\n except:\n print(\"Lookup failed\")\n\n self.update_buttons(url)\n\n if isinstance(article, ArticleWithReferences.ArticleWithReferences):\n # TODO: Consider to make this an asynchronous call, as it feels like the GUI\n # TODO: does not respond when opening detailed information about a paper.\n # TODO: The delay is on the order of five seconds.\n article = self.use_doi_information(article)\n self.text.get_buffer().insert_at_cursor('%s\\n' % url)\n self.text.get_buffer().insert_at_cursor('%d %s\\n' % (int(article.year), article.firstAuthor))\n article.print_information()\n self.doi = article.doi\n self.text.get_buffer().insert_at_cursor('%s\\n\\n' % article.title)\n self.text.get_buffer().insert_at_cursor('Source: %s\\n\\n' % article.origin)\n self.text.get_buffer().insert_at_cursor('%s\\n\\n' % article.abstract)\n self.text.get_buffer().insert_at_cursor('ncites: %d\\n' % article.ncites)\n self.text.get_buffer().insert_at_cursor('%s\\n' % article.references)\n\n self.insert_graph_information(article, citationmapbuild.graph)\n\n self.list_citation_of_current_article(url, citationmapbuild.graph)\n self.list_references_of_current_article(url, citationmapbuild.graph)\n\n full_info_as_text = self.get_all_information_as_text(article)\n self.text.get_buffer().insert_at_cursor(\n '\\nAll available information:\\n%s' % full_info_as_text)\n return\n else:\n print(\"Not an article\")\n\n def use_doi_information(self, article):\n try:\n doi_information = DoiLookup.get_doi_information(article.doi)\n article.title = doi_information['title']\n article.journal = doi_information['container-title']\n except:\n pass\n return article\n\n def get_all_information_as_text(self, article):\n all_knowledge_about_article = StringIO.StringIO()\n pp = pprint.PrettyPrinter(stream=all_knowledge_about_article)\n pp.pprint(article)\n full_info_as_text = all_knowledge_about_article.getvalue()\n return full_info_as_text\n\n def update_buttons(self, url):\n pattern = re.compile(\".*DOI (.*)\")\n res = pattern.match(url)\n if (res):\n print(res.group(1))\n self.update_doi_information(res.group(1))\n else:\n self.link_label.set_uri(\"http://google.com/#q=%s\" % url)\n self.link_label.set_label(\"Google this article\")\n self.requestDOIInformation.hide()\n print(\"Not found\")\n\n def insert_graph_information(self, article, graph):\n n_references_in_graph = graph.in_degree(article.id)\n n_citations_in_graph = graph.out_degree(article.id)\n self.text.get_buffer().insert_at_cursor(\n 'Number of references in graph: %s\\n' % n_references_in_graph)\n self.text.get_buffer().insert_at_cursor(\n 'Number of citations in graph: %s\\n' % n_citations_in_graph)\n\n def list_citation_of_current_article(self, url, graph):\n list_of_edges = graph.out_edges(url)\n self.text.get_buffer().insert_at_cursor(\"\\nCited by\\n\")\n for edge in list_of_edges:\n self.text.get_buffer().insert_at_cursor(\" * %s\\n\" % edge[1])\n\n def list_references_of_current_article(self, url, graph):\n list_of_edges = graph.in_edges(url)\n self.text.get_buffer().insert_at_cursor(\"\\nReferences\\n\")\n for edge in list_of_edges:\n self.text.get_buffer().insert_at_cursor(\" * %s\\n\" % edge[0])\n\n def update_doi_information(self, doi):\n print(\"Updating doi information: %s\" % doi)\n self.link_label.set_uri(\"http://dx.doi.org/%s\" % doi)\n self.link_label.set_label(\"Open full text\")\n self.doi = doi\n\n\ndef main():\n GuiArticleDetails()\n gtk.main()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/GuiArticleDetails.py","file_name":"GuiArticleDetails.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59203861","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\n\n\nfrom wagent import WinduAgent\nimport os\n\nimport time\n\n\nclass TestAll(unittest.TestCase):\n\n GROUP1 = os.environ.get('WAGENT_GROUP1')\n GROUP_404 = os.environ.get('WAGENT_GROUP_404')\n\n AGENT4 = os.environ.get('WAGENT_AGENT4')\n AGENT3 = os.environ.get('WAGENT_AGENT3')\n\n AGENT4_PW = os.environ.get('WAGENT_AGENT4_PW')\n\n EYEPHONE6 = os.environ.get('WAGENT_EYEPHONE6')\n\n PHOTO_401 = os.environ.get('WAGENT_PHONE_401')\n PHONE_404 = os.environ.get('WAGENT_PHONE_404')\n\n WAGENT_DEBUG = os.environ.get('WAGENT_DEBUG')\n\n def setUp(self):\n time.sleep(1.7)\n debug = False\n if self.WAGENT_DEBUG is not None:\n debug = True\n self.agent = WinduAgent.WinduAgent(self.AGENT4, 'Agent 4', self.AGENT4_PW, debug=debug, autoReply= WinduAgent.WinduAgent.AUTOREPLY_DELIVERED)\n\n def hasEvent(self, events, eventName, eventID):\n for e in events:\n if e['name'] == eventName and e['data']['id'] == eventID:\n return True\n return False\n\n\n # def testCreateMessageId(self):\n # msgid1 = self.agent.createMessageId()\n # ret = self.agent.sendMessage(self.EYEPHONE6, u'Message from test \\o/ ? ? ' + msgid 1 , msgid1)\n # self.assertEqual (ret['code'] ,\"200\")\n # self.assertEqual (ret['id'] , msgid1)\n\n\n def testSendMessage (self):\n ret = self.agent.sendMessage(self.EYEPHONE6, u'xFluke - 😍 ')\n self.assertEqual(ret ['code'], '200')\n\n def testSendMessageImage (self):\n ret = self.agent.sendMessageImage(self.EYEPHONE6,'https://upload.wikimedia.org/wikipedia/commons/d/db/Patern_test.jpg', ':D')\n self.assertEqual(ret ['code'], '200')\n\n def testSendLocation (self):\n ret = self.agent.sendMessageLocation(self.EYEPHONE6, '48.8571', '2.2943', 'Here :)')\n self.assertEqual(ret ['code'], '200')\n\n def testSendGetGroups(self):\n ret = self.agent.sendGetGroups()\n self.assertEqual(ret['code'], \"200\")\n\n # def testSendGetClientConfig(self):\n # ret = self.agent.sendGetClientConfig()\n # self.assertEqual(ret['code'], \"200\")\n def testConnectedStatus(self):\n self.agent.setConnectedStatus('online')\n ret = self.agent.getConnectedStatus()\n self.assertEqual(ret['code'], \"200\")\n self.assertEqual(ret['connected_status'], 'online')\n self.agent.setConnectedStatus('offline')\n ret = self.agent.getConnectedStatus()\n self.assertEqual(ret['code'], \"200\")\n self.assertEqual(ret['connected_status'], 'offline')\n\n ret = self.agent.isLoggedIn()\n self.assertTrue(ret)\n\n\n\n def testSendGetPrivacySettings(self):\n ret = self.agent.sendGetPrivacySettings()\n self.assertEqual(ret['code'], \"200\")\n # #\n def testSendSetPrivacySettings(self):\n ret = self.agent.sendSetPrivacySettings(\"last\", \"contacts\")\n self.assertEqual(ret['code'], \"200\")\n self.assertEqual(ret ['values']['last'], \"contacts\")\n\n def testSendGetProfilePicture (self):\n ret = self.agent.sendGetProfilePicture(self.EYEPHONE6)\n self.assertTrue(ret['filename'])\n self.assertEqual(ret['code'], \"200\")\n\n def testSendGetProfilePicture401 (self):\n ret = self.agent.sendGetProfilePicture(self.PHOTO_401)\n self.assertEqual(ret['code'], \"401\")\n\n def testSendGetProfilePicture404 (self):\n ret = self.agent.sendGetProfilePicture(self.PHONE_404)\n self.assertEqual(ret['code'], \"404\")\n\n def testSendGetProfilePicturePreview (self):\n ret = self.agent.sendGetProfilePicturePreview(self.EYEPHONE6)\n self.assertTrue(ret['filename'])\n self.assertEqual(ret['code'], \"200\")\n\n def testSendGetGroupPicturePreview (self):\n ret = self.agent.sendGetProfilePicturePreview(self.GROUP1)\n self.assertTrue(ret['filename'])\n self.assertEqual(ret['code'], \"200\")\n\n def testSendGetProfilePicturePreview404 (self):\n ret = self.agent.sendGetProfilePicturePreview(self.PHONE_404)\n self.assertEqual(ret['code'], \"404\")\n\n def testSendGetServerProperties (self):\n ret = self.agent.sendGetServerProperties()\n self.assertEqual(ret['code'], \"200\")\n\n\n # def testSendRemoveAccount (self):\n # ret = self.agent.sendRemoveAccount()\n # self.assertEqual(ret['code'], \"200\")\n\n\n def testSendGetConnectedStatuses (self):\n self.agent.setConnectedStatus('online')\n ret = self.agent.sendGetPresences([self.EYEPHONE6, self.AGENT3])\n values = ret['connected_status']\n self.assertEqual(2, len(values))\n self.assertEqual(ret['code'], \"200\")\n\n def testSendGetStatusesMessages (self):\n ret = self.agent.sendGetStatuses([self.EYEPHONE6, self.AGENT3])\n values = ret['statuses_messages']\n self.assertEqual(2, len(values))\n self.assertEqual(ret['code'], \"200\")\n\n # # def testCreateGroups (self):\n # # result = self.agent.sendGroupsChatCreate('Group4', ['xxx', 'xxx', 'xxx', 'xxx'])\n # # print (result)\n # # groupId = result ['groupid']\n # # self.assertIsNotNone(groupId)\n # # self.assertEqual(result['code'], \"200\")\n #\n # def testSetGroupSubject(self):\n # t = str(time.time())\n # result = self.agent.sendSetGroupSubject(self.GROUP1, u'Group1 🚀' + t)\n # self.assertEqual(result['code'], \"200\")\n #\n # def testSetGroupSubject404(self):\n # t = str(time.time())\n # result = self.agent.sendSetGroupSubject(self.GROUP_404, u'Homer Simpson ?')\n # self.assertEqual(result['code'], \"404\")\n\n # # def testPromoteDemoteParticipant (self):\n # # result = self.agent.sendDemoteParticipant(self.GROUP3, self.EYEPHONE6)\n # # self.assertEqual(result['code'], '200')\n # # time.sleep(0.3)\n # #\n # # result = self.agent.sendPromoteParticipant(self.GROUP3, self.EYEPHONE6)\n # # self.assertEqual(result['code'], '200')\n\n # # def testChangeParticipant (self):\n # # result = self.agent.sendGroupsParticipantAdd(self.GROUP1, self.EYEPHONE4)\n # # self.assertEqual(result['code'], '200')\n # # time.sleep(0.3)\n # # result = self.agent.sendGroupsParticipantRemove(self.GROUP1, self.EYEPHONE4)\n # # self.assertEqual(result['code'], '200')\n\n def testChangeStatus (self):\n result = self.agent.sendActiveStatus()\n time.sleep(1.2)\n self.assertEqual(result['code'], '200')\n result = self.agent.sendOfflineStatus()\n self.assertEqual(result['code'], '200')\n\n\n # def testSendMessageDelivered (self):\n # msgid1 = str(time.time()).replace('.','-')\n # ret = self.agent.sendMessage(self.AGENT2, u'Message from test \\o/ 👌' + msgid1 , msgid1)\n # self.agent2.peekEventsForce()\n # self.agent2.sendMessageDelivered(self.AGENT1, msgid1)\n # events = self.agent.peekEventsForce()\n # self.assertTrue(self.hasEvent(events, 'ongetreceipt', msgid1))\n\n # def testSendGetPrivacyBlockedList404 (self):\n # result = self.agent.sendGetPrivacyBlockedList()\n # self.assertEqual(result['code'], '404')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81368407","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------------------------------\n\"\"\"\nMastermind Problem\n-------------------\nContent\n\n ▶ class MastermindProblem(ProblemTemplate)\n\n ▶ dv_mastermind_template = {}\n \n ▶ mastermind_encoding1 = {} << Elements cannot be repeated\n \n ▶ mastermind_encoding2 = {} << Elements can be repeated\n \n ▶ def mastermind_get_neighbors\n\n─────────────────────────────────────────────────────────────────────────\n\nCIFO - Computation Intelligence for Optimization\n\nAuthor: Fernando A J Peres - fperes@novaims.unl.pt - (2019) version L4.0\n\n\"\"\"\n# -------------------------------------------------------------------------------------------------\n\n# import\nfrom copy import deepcopy\nfrom random import choice\n\nfrom dssg_challenge.ga.problem.problem_template import ProblemTemplate, ProblemObjective\nfrom dssg_challenge.ga.problem.solution import LinearSolution, Encoding\n\n# /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n# C O D E\n# /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n\n# -------------------------------------------------------------------------------------------------\n# Auxiliary variables - Encoding and Default Decision Variables \n# This DVs template can also be used in tests), but in the DVs are problem-specific, \n# so each problem instance will provide different values for the DVs.\n#\n# DVs - Decision Variables\n#--------------------------------------------------------------------------------------------------\n\n# template (example) decision variables for mastermind\ndv_mastermind_template = {\n \"Colors\" : ['Blue', 'Red', 'Yellow', 'Green', 'White', 'Magenta'],\n \"Colors-Id\" : [1, 2, 3, 4, 5, 6],\n \"Colors-Code\" : [\"da_blue\", \"da_red\", \"da_yellow\", \"da_white\", \"da_magenta\" ] # Dark_<color> Sty colors\n}\n\n# encoding 1: The elements cannot be repeated\nmastermind_encoding1 = {\n \"Size\" : 4,\n \"Is ordered\" : True,\n \"Can repeat\" : False,\n \"Data\" : [], # in the constructor must get the data from the Dvs (as it is DV dependent)\n \"Data Type\" : \"Choices\"\n} \n\n# encoding 2: The element can be repeated\nmastermind_encoding2 = {\n \"Size\" : 4,\n \"Is ordered\" : True,\n \"Can repeat\" : True,\n \"Data\" : [], # in the constructor must get the data from the Dvs (as it is DV dependent)\n \"Data Type\" : \"Choices\" # MinMax\n} \n\n# -------------------------------------------------------------------------------------------------\n# Class: Mastermind Problem Template\n# -------------------------------------------------------------------------------------------------\nclass MastermindProblem(ProblemTemplate):\n \"\"\"\n Expected Decision Variables, so deliver:\n decision_variables = {\n \"Color-Label\" : [...] << List of String\n \"Color-Id\" : [...] << List of Numbers \n \"Colors-Code\" : [...] << List of Strings (Sty background colors)\n }\n \n if decision_variable IS EMPTY it will be used default_decision_variables defined internally for tests\n\n @ constraints = {} this problem does not have constraints\n @ encoding_rule = {} there two types of encoding defined for this problem: mastermind_encoding1 and mastermind_encoding2mastermind_encoding1 is the default if it was not defined an encoding it will be used\n \n \"\"\"\n # Constructor\n #----------------------------------------------------------------------------------------------\n def __init__(self, decision_variables = [], constraints={}, encoding_rule = mastermind_encoding1):\n \"\"\"\n Mastermind Problem Template CONSTRUCTOR\n\n Parameters:\n\n @ decision_variables\n Expected Decision Variables, so deliver:\n decision_variables = {\n \"Color-Label\" : [...] << String\n \"Color-Id\" : [...] << Number \n \"Color-Code\" : [...] << String (Sty color)\n }\n\n if decision_variable IS EMPTY it will be used default_decision_variables defined internally for tests\n\n @ constraints = {} this problem does not have constraints\n\n @ encoding_rule = {} there two types of encoding defined for this problem: mastermind_encoding1 and mastermind_encoding2\n mastermind_encoding1 is the default if it was not defined an encoding it will be used\n \n \"\"\"\n # Call the Parent-class constructor to store these values and to execute any other logic to be implemented by the constructor of the super-class\n super().__init__(\n decision_variables, \n constraints, \n encoding_rule)\n\n self._name = \"Mastermind Problem\"\n\n if not decision_variables: \n # DEFAULT DECISION VARIABLE, if the problem instance does not define a specific decision variables\n # It will be used the default_decision_variables defined in this file\n self._decision_variables = dv_mastermind_template\n \n # Mastermind Problem will use \"Color-Id\" to create the encoding\n if \"Colors-Id\" in self._decision_variables:\n self._encoding_rule[\"Data\"] = self._decision_variables[\"Colors-Id\"]\n self._encoding.encoding_data = self._decision_variables[\"Colors-Id\"]\n\n # Problem Objective\n self._objective_function_list = [ self._objective_function1 ]\n self._objective_list = [ ProblemObjective.Maximization ] \n\n\n # Build Solution Function - build_solution()\n #----------------------------------------------------------------------------------------------\n def build_solution(self):\n \"\"\"\n Builds a mastermind guess based on the configured encoding\n \"\"\"\n # empty linear solution\n solution_representation = []\n size = self._encoding.size\n data = self._encoding.encoding_data\n\n # if elements can be repeated\n if self._encoding.can_repeat_elements:\n for _ in range(0, size):\n solution_representation.append( choice( data ) )\n\n solution = LinearSolution(representation = solution_representation, encoding_rule = self._encoding_rule)\n return solution\n # if elements cannot be repeated\n else: \n encoding_data_temp = deepcopy( data )\n\n for _ in range(0, size):\n element = choice( encoding_data_temp )\n solution_representation.append( element )\n encoding_data_temp.remove( element ) \n\n solution = LinearSolution(representation = solution_representation, encoding_rule = self._encoding_rule)\n return solution\n\n # Solution Admissibility Function - is_admissible()\n #----------------------------------------------------------------------------------------------\n def is_admissible( self, solution ): #<< use this signature in the sub classes, the meta-heuristic\n \"\"\"\n The solution of mastermind problem does not have a constraint. So, it always will return True\n \"\"\"\n return True\n\n # Objective Function 1\n #----------------------------------------------------------------------------------------------\n # It should be seen as an abstract method \n # Objective Function Signature: objective_function(self, solution, decision_variables, feedback = None)\n def _objective_function1(self, solution, decision_variables, feedback = None): #<< use this signature in the sub classes, the meta-heuristics\n \"\"\"\n Objective Function of Mastermind Problem,\n It calculates the fitness of a guess (candidate solution)\n \"\"\"\n fitness = 0\n\n if feedback :\n \n computed_solution_position = [False] * len(solution.representation)\n computed_feedback_position = [False] * len(solution.representation)\n\n for i in range( 0, len(solution.representation) ):\n if ( solution.representation[i] == feedback.representation[i] ):\n fitness += 10\n computed_solution_position[i] = True\n computed_feedback_position[i] = True\n\n for i in range( 0, len(feedback.representation) ):\n if not computed_solution_position[i]:\n for j in range( 0, len(solution.representation) ):\n if not computed_feedback_position[j] and solution.representation[i] == feedback.representation[j]: \n fitness += 3 \n computed_solution_position[i] = True\n computed_feedback_position[j] = True\n break \n \n return fitness\n else: return 0 \n\n# -------------------------------------------------------------------------------------------------\n# Neighborhood Function [get_neighbors()]\n# -------------------------------------------------------------------------------------------------\n\ndef mastermind_get_neighbors( solution, problem, neighborhood_size = 0 ):\n \"\"\"\n Generate the neighbors of a solution in accordance with neighborhood structure defined in this function \n solution : 1, 3, 4, 3\n neighbors : *, 3, 4, 3 | 1, *, 4, 3 | 1, 3, *, 3 | 1, 3, 4, *\n \"\"\"\n\n neighbors = []\n\n # if elements can be repeated\n if solution.encoding.can_repeat_elements:\n for i in range(0, len(solution.representation)):\n next_choices = deepcopy( solution.encoding.encoding_data )\n neighbor = deepcopy( solution )\n \n next_choices.remove( solution.representation[i] ) # remove the current position only, to avoid repeat this element in this position\n \n neighbor.representation[i] = choice( next_choices) # for DataType = Choice\n\n neighbors.append( neighbor )\n \n return neighbors\n # if elements cannot be repeated\n else: \n for i in range(0, len(solution.representation)):\n next_choices = deepcopy( solution.encoding.encoding_data )\n neighbor = deepcopy( solution )\n \n current_element = solution.representation[i]\n next_choices.remove( current_element )\n new_element = choice( next_choices ) # for DataType = Choice\n \n z = -1\n for j in range(0, len(neighbor.representation)):\n if neighbor.representation[j] == new_element: \n z = j\n break\n\n # if the element is not in other positions accept new_element\n if z == -1 : neighbor.representation[i] = new_element\n # else, the new_element is in another position, swap them.\n else: \n neighbor.representation[i] = neighbor.representation[z]\n neighbor.representation[z] = current_element\n\n neighbors.append( neighbor )\n \n return neighbors\n\n \n","sub_path":"dssg_challenge/ga/toy_problem/mastermind_problem.py","file_name":"mastermind_problem.py","file_ext":"py","file_size_in_byte":11232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302372835","text":"import Requester\nimport Animator\nimport math\n\nnumberOfPixels = 30\n\naapl = Requester.Requester(\"20181226\", \"cei\")\ncoloursArray = aapl.getMarketAverageColours()\n\ntry:\n strip = Animator.Animation(numberOfPixels, delay_time=0.08)\n\n strip.animateFromArray(coloursArray)\n\n strip.stop()\n\nexcept (KeyboardInterrupt, Exception) as ex:\n print(ex)\n strip.stop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596143395","text":"import json\nfrom os import path\n\nfrom invenio_workflows import Workflow\nfrom workflow.engine_db import WorkflowStatus\n\nfrom scoap3.modules.records.util import create_from_json\nfrom tests.responses import get_response_dir\n\n\ndef run_workflow(input_json_filename):\n \"\"\"Use input_json_filename to load hepcrawl response and to run article_upload workflow.\"\"\"\n\n file_path = path.join(get_response_dir(), 'hepcrawl', input_json_filename)\n with open(file_path, 'rt') as f:\n json_data = json.loads(f.read())\n\n workflow_id = create_from_json({'records': [json_data]}, apply_async=False)[0]\n return Workflow.query.get(workflow_id)\n\n\ndef test_hindawi():\n workflows_count = Workflow.query.count()\n workflow = run_workflow('hindawi.json')\n assert workflow.status == WorkflowStatus.COMPLETED\n assert Workflow.query.count() - workflows_count == 1\n\n\ndef test_aps():\n workflows_count = Workflow.query.count()\n workflow = run_workflow('aps.json')\n assert workflow.status == WorkflowStatus.COMPLETED\n assert Workflow.query.count() - workflows_count == 1\n\n\ndef test_elsevier():\n workflows_count = Workflow.query.count()\n workflow = run_workflow('elsevier/elsevier.json')\n assert workflow.status == WorkflowStatus.COMPLETED\n assert Workflow.query.count() - workflows_count == 1\n\n\ndef test_springer():\n workflows_count = Workflow.query.count()\n workflow = run_workflow('springer/springer.json')\n assert workflow.status == WorkflowStatus.COMPLETED\n assert Workflow.query.count() - workflows_count == 1\n\n\ndef test_oup():\n workflows_count = Workflow.query.count()\n workflow = run_workflow('oup/oup.json')\n assert workflow.status == WorkflowStatus.COMPLETED\n assert Workflow.query.count() - workflows_count == 1\n","sub_path":"tests/integration/test_article_upload.py","file_name":"test_article_upload.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145045459","text":"import pyvisgraph as vg\r\n#from visgraph.vis_graph import VisGraph as vg\r\nimport pygame\r\n\r\npygame.init()\r\n\r\ndisplay_width = 1280\r\ndisplay_height = 720\r\n\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (237, 41, 57)\r\ngray = (169, 169, 169)\r\ngreen = (0, 128, 0)\r\n\r\nLEFT = 1\r\nRIGHT = 3\r\n\r\ngameDisplay = pygame.display.set_mode((display_width, display_height))\r\npygame.display.set_caption('2.6 Milos Milunovic RN 17-2016')\r\nclock = pygame.time.Clock()\r\n\r\ndef draw_polygon(polygon, color, size, complete=True):\r\n if complete:\r\n polygon.append(polygon[0])\r\n p1 = polygon[0]\r\n for p2 in polygon[1:]:\r\n pygame.draw.line(gameDisplay, color, (p1.x, p1.y), (p2.x, p2.y), size)\r\n p1 = p2\r\n\r\ndef draw_visible_vertices(edges, color, size):\r\n for edge in edges:\r\n pygame.draw.line(gameDisplay, color, (edge.p1.x, edge.p1.y), (edge.p2.x, edge.p2.y), size)\r\n\r\ndef draw_text(mode_txt, color, size, x, y):\r\n font = pygame.font.SysFont(None, size)\r\n text = font.render(mode_txt, True, color)\r\n gameDisplay.blit(text, (x, y))\r\n\r\ndef help_screen():\r\n rectw = 550\r\n recth = 500\r\n rectwi = rectw-10\r\n recthi = recth-10\r\n startx = display_width*0.5-rectw/2\r\n starty = display_height*0.5-recth/2\r\n startxi = display_width*0.5-rectwi/2\r\n startyi = display_height*0.5-recthi/2\r\n\r\n helping = True\r\n while helping:\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_q:\r\n pygame.quit()\r\n quit()\r\n elif event.key == pygame.K_h:\r\n helping = False\r\n\r\n pygame.draw.rect(gameDisplay, black, (startx, starty, rectw, recth))\r\n pygame.draw.rect(gameDisplay, white, (startxi, startyi, rectwi, recthi))\r\n\r\n draw_text(\"-- 2.6 Shortest path among polygons --\", black, 30, startxi+90, startyi+10)\r\n draw_text(\"Q - QUIT\", black, 25, startxi+10, startyi+100)\r\n draw_text(\"H - Toggle Help\", black, 25, startxi+10, startyi+150)\r\n draw_text(\"D - Draw polygons\", black, 25, startxi+10, startyi+200)\r\n draw_text(\"C - Clear polygons\", black, 25, startxi+10, startyi+250)\r\n draw_text(\"S - Mark start and finish\", black, 25, startxi+10, startyi+300)\r\n pygame.display.update()\r\n clock.tick(10)\r\n\r\nclass Simulator():\r\n\r\n def __init__(self):\r\n self.polygons = []\r\n self.work_polygon = []\r\n self.mouse_point = None\r\n self.start_point = None\r\n self.end_point = None\r\n self.shortest_path = []\r\n\r\n self.g = vg.VisGraph()\r\n self.built = False\r\n self.mode_draw = True\r\n self.mode_path = False\r\n\r\n def toggle_draw_mode(self):\r\n self.mode_draw = not self.mode_draw\r\n self._clear_shortest_path()\r\n self.mode_path = False\r\n\r\n def close_polygon(self):\r\n if len(self.work_polygon) > 1:\r\n self.polygons.append(self.work_polygon)\r\n self.work_polygon = []\r\n self.g.build(self.polygons, status=False)\r\n self.built = True\r\n\r\n def toggle_shortest_path_mode(self):\r\n if self.mode_path:\r\n self._clear_shortest_path()\r\n self.mode_path = not self.mode_path\r\n self.mode_draw = False\r\n\r\n def clear_all(self):\r\n self.__init__()\r\n\r\n def _clear_shortest_path(self):\r\n self.shortest_path = []\r\n self.start_point = []\r\n self.end_point = [] \r\n\r\ndef game_loop():\r\n sim = Simulator()\r\n gameExit = False\r\n\r\n while not gameExit:\r\n\r\n for event in pygame.event.get():\r\n pos = pygame.mouse.get_pos()\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_q:\r\n pygame.quit()\r\n quit()\r\n elif event.key == pygame.K_h:\r\n help_screen()\r\n elif event.key == pygame.K_d:\r\n sim.toggle_draw_mode()\r\n elif event.key == pygame.K_s:\r\n sim.toggle_shortest_path_mode()\r\n\r\n if sim.mode_draw:\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_c:\r\n sim.clear_all()\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == LEFT:\r\n sim.work_polygon.append(vg.Point(pos[0], pos[1]))\r\n elif event.button == RIGHT:\r\n sim.close_polygon()\r\n\r\n if sim.mode_path and sim.built:\r\n if event.type == pygame.MOUSEBUTTONUP or any(pygame.mouse.get_pressed()):\r\n if pygame.mouse.get_pressed()[LEFT-1] or event.button == LEFT:\r\n sim.start_point = vg.Point(pos[0], pos[1])\r\n elif pygame.mouse.get_pressed()[RIGHT-1] or event.button == RIGHT:\r\n sim.end_point = vg.Point(pos[0], pos[1])\r\n if sim.start_point and sim.end_point:\r\n sim.shortest_path = sim.g.shortest_path(sim.start_point, sim.end_point)\r\n \r\n gameDisplay.fill(white)\r\n\r\n if len(sim.work_polygon) > 1:\r\n draw_polygon(sim.work_polygon, black, 3, complete=False)\r\n\r\n if len(sim.polygons) > 0:\r\n for polygon in sim.polygons:\r\n draw_polygon(polygon, black, 3)\r\n if len(sim.shortest_path) > 1:\r\n draw_polygon(sim.shortest_path, red, 3, complete=False)\r\n\r\n if sim.mode_draw:\r\n draw_text(\"-- Drawing --\", black, 25, 5, 5)\r\n elif sim.mode_path:\r\n draw_text(\"-- Path --\", black, 25, 5, 5)\r\n else:\r\n draw_text(\"-- Help --\", black, 25, 5, 5)\r\n\r\n pygame.display.update()\r\n clock.tick(20)\r\n\r\nif __name__ == \"__main__\":\r\n gameDisplay.fill(white)\r\n help_screen()\r\n game_loop()\r\n pygame.quit()\r\n quit()\r\n","sub_path":"2.6.py","file_name":"2.6.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444124166","text":"#coding:utf-8\n\n\"\"\"\n\\u6570\\u636E\\u5E93\\u8FDE\\u63A5\\u7BA1\\u7406\n\"\"\"\n\n__author__ = \"liangxiaokai@21cn.com\"\n__version__ = \"1.0\"\n__date__ = \"2011/04/14\"\n__copyright__ = \"Copyright (c) 2011\"\n__license__ = \"Python\"\n\nfrom connect import *\n\nfrom sqlalchemy import Table,Column,func\nfrom sqlalchemy.types import *\nfrom sqlalchemy.orm import Mapper\n\ntab_charge_record = Table(\"charge_record\", metadata,\n Column(\"id\",Integer, primary_key=True),\n Column(\"uid\",Integer),\n Column(\"is_first\",SmallInteger,default=0), # 0为非首充\n Column(\"charge_item_id\",Integer),\n Column(\"money\",Integer,default=0),\n Column(\"diamond\",Integer,default=0),\n Column(\"pay_mode\",Integer), #0 微信, 1:支付宝,2:网银 3:短信\n Column(\"create_time\", DateTime),\n )\n \n\n \nclass TChargeRecord(TableObject):\n def __init__(self):\n TableObject.__init__(self)\n \nmapper_charge_record = Mapper(TChargeRecord,tab_charge_record)\n\nif __name__==\"__main__\":\n pass","sub_path":"code/db/charge_record.py","file_name":"charge_record.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364597370","text":"import MyTwitter, sys\n\nclass LikeChecker():\n\n def __init__(self, list_name = None):\n self.twitter, self.user_id = MyTwitter.login()\n self.list_id = MyTwitter.get_list_id(list_name) if list_name else ''\n self.friends = MyTwitter.get_list_members(self.twitter, self.list_id)\n self.friends = [friend['id_str'] for friend in self.friends] + [self.user_id]\n self.follows = [user for user in MyTwitter.get_friends(self.twitter, self.user_id) if user['id_str'] not in self.friends]\n self.users = []\n self.get_date = lambda date: str(MyTwitter.get_date(date))\n\n def check_favorite(self, user, target):\n tweets = MyTwitter.get_like_tweets(self.twitter, user['id_str'], 1000, target, loop = True)\n if tweets == []: return None\n tweet = tweets[-1]\n if tweet['user']['id_str'] == target:\n date = self.get_date(tweet['created_at'])\n return date + '\\n\\n' + tweet['text']\n date = \"{0} ({1})\".format(self.get_date(tweet['created_at']), len(tweets))\n return date + '\\n\\n' + \"Not Found\"\n\n def setup(self, count = 2000):\n self.users = []\n tweets = MyTwitter.get_tweets(self.twitter, self.user_id, count)\n for i, tweet in enumerate(tweets):\n sys.stdout.write(\"\\r{0}%\".format(100*i//(len(tweets)-1)))\n sys.stdout.flush()\n date = self.get_date(tweet['created_at'])\n text = date + '\\n\\n' + tweet['text']\n fav_user_ids = MyTwitter.get_like_user_ids(tweet['id_str'], self.friends)\n user_ids = [user['id_str'] for user in self.users]\n self.users.extend([{'id_str': user_id, 'text': text} for user_id in fav_user_ids if user_id not in user_ids])\n follows = [friend['id_str'] for friend in self.follows]\n self.users = [user for user in self.users if user['id_str'] in follows]\n\n def show_fav_user(self):\n users = MyTwitter.get_users(self.twitter, user_ids = [user['id_str'] for user in self.users])\n for i, user in enumerate(self.users):\n message = \"{0}: {1}\\n\".format(i+1, users[i]['name'])\n message += \"https://twitter.com/{0}\\n\\n\".format(users[i]['screen_name'])\n message += user['text']\n print('=' * 50 + '\\n')\n print(message + '\\n')\n\n def show_not_fav_user(self):\n print(\"↓ Not Favorite User ↓\" + '\\n')\n for friend in self.follows:\n if friend['id_str'] not in [user['id_str'] for user in self.users] and friend['protected'] == False:\n print(friend['name'])\n print(\"https://twitter.com/\" + friend['screen_name'] + '\\n')\n\n def show_protected_user(self):\n user_ids = [user['id_str'] for user in self.users]\n for i, friend in enumerate(self.follows):\n if friend['id_str'] not in user_ids and friend['protected'] == True:\n message = friend['name'] + '\\n'\n message += \"https://twitter.com/{0}\".format(friend['screen_name'])\n print(message + '\\n')\n response = self.check_favorite(friend, self.user_id)\n if response is not None: print(response + '\\n')\n print('=' * 50 + '\\n')\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n LikeChecker = LikeChecker(sys.argv[1])\n elif len(sys.argv) == 1:\n LikeChecker = LikeChecker()\n else:\n print(\"Usage: python3 {0} (EXCLUDED_LIST_NAME)\".format(sys.argv[0]))\n sys.exit()\n LikeChecker.setup()\n input('\\n')\n while True:\n try:\n LikeChecker.show_fav_user()\n break\n except:\n pass\n input('=' * 50 + '\\n')\n LikeChecker.show_not_fav_user()\n input('=' * 50 + '\\n')\n LikeChecker.show_protected_user()\n","sub_path":"BasicLikeCheck.py","file_name":"BasicLikeCheck.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285636891","text":"from flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\n\ndef getBookList():\n return [\n {\n 'title': 'Steve Jobs',\n 'author': 'Walter Isaacson',\n 'currentPage': 31,\n 'totalPages': 571\n }\n ]\n\n@app.route(\"/\")\ndef dashboard():\n bookList = getBookList()\n return render_template('dashboard.html', books=getBookList())\n\n@app.route(\"/contribute\")\ndef contribute():\n return render_template('contribute.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"bookworm.py","file_name":"bookworm.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215007139","text":"import math\r\nimport random\r\nimport numpy as np\r\nfrom entities.parentselection.ParentSelection import ParentSelection\r\n\r\n\r\nclass SUS(ParentSelection):\r\n\r\n def getCandidates(self, citizens):\r\n\r\n candidates = []\r\n fitnessSum = 0\r\n\r\n # calculate sum of fitness of all genes.\r\n for i in range(len(citizens)):\r\n fitnessSum += math.sqrt(citizens[i].getFitness()) # scaling with sqrt\r\n\r\n # calculate a proportional fitness for each gene.\r\n fitnessRate = []\r\n for i in range(len(citizens)):\r\n fitnessRate.append(math.sqrt(citizens[i].getFitness()) / fitnessSum) # scaling with sqrt\r\n\r\n # at index i we sum up the fitness until index i.\r\n # flip the list - the lower the fitness, the better the solution.\r\n cumFitnessRate = list(np.cumsum(np.flip(fitnessRate)))\r\n\r\n # one spin\r\n r = random.random()\r\n for i in range(len(cumFitnessRate)):\r\n\r\n i = 0\r\n found = False\r\n # find the index that matching to r and append it to candidates list.\r\n while i < len(cumFitnessRate) and not found:\r\n if r < cumFitnessRate[i]:\r\n candidates.append(citizens[i])\r\n found = True\r\n i += 1\r\n\r\n # module of r\r\n r += (1 / len(citizens))\r\n if r >= 1:\r\n r = r - 1\r\n\r\n return np.array(candidates)\r\n","sub_path":"entities/parentselection/SUS.py","file_name":"SUS.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578755732","text":"import psycopg2\n\ndef importReactionData(sqlcursor):\n\t\n\tfilePath = '../sde/fsd/reactionProducts.csv'\n\t\n\twith open(filePath, 'r') as f:\n\t\tfileData = f.readlines()\n\t\n\tfor line in fileData:\n\t\tline = line.split(',')\n\t\tbp_typeID = line[0]\n\t\tactivity = line[1]\n\t\tproduct_typeID = line[2]\n\t\tproduct_qty = line[3]\n\t\tprint(str(bp_typeID) + \" \" + activity + \" \" + str(product_typeID) + \" \" + str(product_qty))\n\t\tsqlcursor.execute(\"INSERT INTO reaction_products (bp_typeid, activity, prod_typeid, qty) VALUES (%s, %s, %s, %s);\", (bp_typeID, activity, product_typeID, product_qty))\n\t\n\tfilePath = '../sde/fsd/reactionMaterials.csv'\n\t\n\twith open(filePath, 'r') as f:\n\t\tfileData = f.readlines()\n\t\t\n\tfor line in fileData:\n\t\tline = line.split(',')\n\t\tbp_typeID = line[0]\n\t\tactivity = line[1]\n\t\tmat_typeID = line[2]\n\t\tmat_qty = line[3]\n\t\tprint(str(bp_typeID) + \" \" + activity + \" \" + str(mat_typeID) + \" \" + str(mat_qty))\n\t\tsqlcursor.execute(\"INSERT INTO reaction_materials (bp_typeid, activity, mat_typeid, qty) VALUES (%s, %s, %s, %s);\", (bp_typeID, activity, mat_typeID, mat_qty))\n\ndef configureDatabase(sqlcursor):\n sqlcursor.execute(\"DROP TABLE IF EXISTS reaction_materials;\")\n\n sqlcursor.execute(\"CREATE TABLE reaction_materials (bp_typeid int, activity text, mat_typeid int, qty int);\")\n\n sqlcursor.execute(\"DROP TABLE IF EXISTS reaction_products;\")\n\n sqlcursor.execute(\"CREATE TABLE reaction_products (bp_typeid int, activity text, prod_typeid int, qty int, mat_eff real);\")\n\ndef main():\n pgsqlconnection = psycopg2.connect(\"dbname=dce user=dc password=bi12cyc17le host=172.31.27.71 port=5432\")\n\n sqlcursor = pgsqlconnection.cursor()\n\n configureDatabase(sqlcursor)\n \n importReactionData(sqlcursor)\n\n pgsqlconnection.commit()\n\n pgsqlconnection.close()\n\t\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"current_project/importReactions.py","file_name":"importReactions.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70257215","text":"from allennlp.data import DataLoader\nfrom allennlp.models import Model\nfrom allennlp.training.trainer import GradientDescentTrainer, Trainer\nfrom allennlp.training.optimizers import AdamOptimizer\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom allennlp.predictors import TextClassifierPredictor\n\nimport numpy as np\nimport pandas as pd\n\n\ndef build_classifier_trainer(\n model: Model,\n serialization_dir: str,\n train_loader: DataLoader,\n dev_loader: DataLoader,\n num_epochs: int = 1,\n cuda_device: int = -1,\n learning_rate: float = 0.000025,\n world_size: int = 1,\n distributed: bool = False\n) -> Trainer:\n parameters = [\n [n, p]\n for n, p in model.named_parameters() if p.requires_grad\n ]\n optimizer = AdamOptimizer(parameters, lr=learning_rate)\n trainer = GradientDescentTrainer(\n model=model,\n serialization_dir=serialization_dir,\n data_loader=train_loader,\n validation_data_loader=dev_loader,\n num_epochs=num_epochs,\n optimizer=optimizer,\n cuda_device=cuda_device,\n world_size=world_size,\n distributed=distributed,\n validation_metric='+accuracy'\n )\n return trainer\n\ndef run_training_loop():\n dataset_reader = build_dataset_reader()\n\n # These are a subclass of pytorch Datasets, with some allennlp-specific\n # functionality added.\n train_data, dev_data = read_data(dataset_reader)\n\n vocab = build_vocab(train_data + dev_data)\n model = build_model(vocab)\n\n # This is the allennlp-specific functionality in the Dataset object;\n # we need to be able convert strings in the data to integers, and this\n # is how we do it.\n train_data.index_with(vocab)\n dev_data.index_with(vocab)\n\n # These are again a subclass of pytorch DataLoaders, with an\n # allennlp-specific collate function, that runs our indexing and\n # batching code.\n train_loader, dev_loader = build_data_loaders(train_data, dev_data)\n\n # You obviously won't want to create a temporary file for your training\n # results, but for execution in binder for this course, we need to do this.\n with tempfile.TemporaryDirectory() as serialization_dir:\n trainer = build_trainer(\n model,\n serialization_dir,\n train_loader,\n dev_loader\n )\n print(\"Starting training\")\n trainer.train()\n print(\"Finished training\")\n return trainer","sub_path":"allennlp_experiments/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489381424","text":"from django import template\r\n\r\nregister = template.Library()\r\n\r\nfrom sace.alumnos.models import Matricula\r\n\r\n@register.filter\r\ndef analisis_aprobado(value):\r\n\tm = list(Matricula.objects.filter(alumno=value, estado__in = [1,3,5]).values_list('seccion__periodo_clases__inicio_periodo','seccion__malla_curricular__malla_oficial__curso__ordenamiento__valor').order_by('seccion__periodo_clases__inicio_periodo'))\r\n\testado = ''\r\n\tif len(m) > 1:\r\n\t\tif m[0][1] < m[1][1]:\r\n\t\t\testado = 'APROBADO'\r\n\t\telse:\r\n\t\t\testado = 'REPITENTE'\r\n\t\r\n\treturn estado","sub_path":"alumnos/templatetags/alumnos_tags.py","file_name":"alumnos_tags.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633277000","text":"from setuptools import setup, find_packages\n\n\ndef get_requirements(env=\"\"):\n if env:\n env = \"-{}\".format(env)\n with open(\"requirements{}.txt\".format(env)) as fp:\n return [x.strip() for x in fp.read().split(\"\\n\") if not x.startswith(\"#\")]\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name='fastapi_websocket_rpc',\n version='0.1.20',\n author='Or Weis',\n author_email=\"or@authorizon.com\",\n description=\"A fast and durable bidirectional JSON RPC channel over Websockets and FastApi.\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n url=\"https://github.com/authorizon/fastapi_websocket_rpc\",\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\"\n ],\n python_requires='>=3.7',\n install_requires=get_requirements(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522481496","text":"import numpy as np;\nEQUATION = '../data/equation.txt'\n\n\ndef lite_to_dict(line):\n print(line)\n left, right = map(lambda x: x.strip(), line.split('='))\n return {\n 'left': left,\n 'right': int(right)\n }\n\n\ndef map_to_array_left_dict(item):\n left_arr = item['left'].split()\n\n dict_exp = {}\n res_array = []\n for value in left_arr:\n if value.isdigit():\n dict_exp['coef'] = int(value)\n elif value == '+':\n if 'coef' not in dict_exp:\n dict_exp['coef'] = 1\n elif value == '-':\n if 'coef' not in dict_exp:\n dict_exp['coef'] = 1\n dict_exp['coef'] *= -1\n else:\n if 'coef' not in dict_exp:\n dict_exp['coef'] = 1\n dict_exp['var'] = value\n res_array.append(dict_exp)\n dict_exp = {}\n return res_array\n\n\ndef map_to_array_right(item):\n return item['right']\n\n\ndef sort_place(items):\n items.sort(key=lambda x: x['var'])\n return items\n\n\ndef main():\n file = open(EQUATION, \"r\")\n lines = file.readlines()\n\n array_of_dict = list(map(lite_to_dict, lines))\n\n a_dict = list(map(map_to_array_left_dict, array_of_dict))\n exp_array = [item for sublist in a_dict for item in sublist]\n a_sorted = list(map(sort_place, a_dict))\n\n a = list(map(lambda x: list(map(lambda y: y['coef'], x)), a_sorted))\n b = list(map(map_to_array_right, array_of_dict))\n\n res = np.linalg.solve(a, b)\n variables = sorted(list(set(map(lambda x: x['var'], exp_array))))\n\n string = ''\n for key, value in enumerate(variables):\n string = string + value + ': ' + str(res[key]) + ', '\n\n print(string)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/equation-solver.py","file_name":"equation-solver.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282215851","text":"from sqlalchemy import create_engine, text\nfrom clickhouse_driver import Client\n\nimport pandas as pd\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nfrom ETL_context_folder.Logger import Logger\nimport argparse\n\n\nclass PROPERTY_INFO_CONTEXT:\n def __init__(self, maria_id, maria_password, local_clickhouse_id,\n local_clickhouse_password,\n local_clickhouse_db_name):\n\n self.Maria_id = maria_id\n self.Maria_password = maria_password\n\n self.Clickhouse_id = local_clickhouse_id\n self.Clickhouse_password = local_clickhouse_password\n self.Clickhouse_DB = local_clickhouse_db_name\n\n local_tz = tzlocal()\n now_time = datetime.now(tz=local_tz).strftime(\"%Y%m%d%H%H\")\n print(now_time)\n self.inner_logger = Logger(now_time, \"shop_data_context_\" + now_time + '.json')\n self.connect_db()\n\n def connect_db(self):\n self.MariaDB_Engine = create_engine('mysql+pymysql://{0}:{1}@192.168.100.108:3306/dreamsearch'\n .format(self.Maria_id, self.Maria_password))\n self.MariaDB_Engine_Conn = self.MariaDB_Engine.connect()\n\n self.MariaDB_Shop_Engine = create_engine('mysql+pymysql://{0}:{1}@192.168.100.106:3306/dreamsearch'\n .format(self.Maria_id, self.Maria_password))\n self.MariaDB_Shop_Engine_Conn = self.MariaDB_Shop_Engine.connect()\n\n self.Local_Click_House_Engine = create_engine(\n 'clickhouse://{0}:{1}@localhost/{2}'.format(self.Clickhouse_id, self.Clickhouse_password,\n self.Clickhouse_DB))\n self.Local_Click_House_Conn = self.Local_Click_House_Engine.connect()\n\n return True\n\n def extract_product_cate_info(self):\n self.connect_db()\n product_cate_info_sql = \"\"\"\n SELECT \n apci.ADVER_ID,\n apci.PRODUCT_CODE as PCODE,\n apci.ADVER_CATE_NO as PRODUCT_CATE_NO,\n apsc.FIRST_CATE,\n apsc.SECOND_CATE,\n apsc.THIRD_CATE\n FROM dreamsearch.ADVER_PRDT_CATE_INFO as apci\n join\n (select * \n from \n dreamsearch.ADVER_PRDT_STANDARD_CATE) as apsc\n on apci.ADVER_CATE_NO = apsc.no;\n \"\"\"\n text_sql = text(product_cate_info_sql)\n self.product_cate_info_df = pd.read_sql(text_sql, self.MariaDB_Shop_Engine_Conn)\n return True\n\n def return_adver_id_list(self):\n self.connect_db()\n adver_id_list_sql = \"\"\"\n SELECT \n distinct ADVER_ID\n FROM \n dreamsearch.ADVER_PRDT_CATE_INFO;\n \"\"\"\n sql_text = text(adver_id_list_sql)\n self.ADVER_ID_LIST = pd.read_sql(sql_text, self.MariaDB_Shop_Engine_Conn)['ADVER_ID']\n return True\n\n def create_shop_property_table(self, table_name):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.{0}\n (\n STATS_DTTM String,\n ADVER_ID Nullable(String),\n PCODE Nullable(String),\n PRODUCT_CATE_NO Nullable(String),\n FIRST_CATE Nullable(String),\n SECOND_CATE Nullable(String),\n THIRD_CATE Nullable(String),\n PNM Nullable(String), \n PRICE Nullable(UInt16)\n ) ENGINE = MergeTree\n PARTITION BY STATS_DTTM\n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\".format(table_name)\n result = client.execute(DDL_sql)\n return result\n\n def update_shop_property_table(self, table_name):\n self.connect_db()\n self.extract_product_cate_info()\n self.return_adver_id_list()\n product_property_df_list = []\n size = self.ADVER_ID_LIST.shape[0]\n data_count = 0\n now_time = datetime.now(tz=tzlocal()).strftime(\"%Y%m%d%H%H\")\n print(\"Start_time :\", now_time)\n for i, ADVER_ID in enumerate(self.ADVER_ID_LIST):\n price_info_sql = \"\"\"\n SELECT \n USERID as ADVER_ID,\n PCODE,PNM,\n PRICE\n FROM dreamsearch.SHOP_DATA\n WHERE USERID = '{0}';\n \"\"\".format(ADVER_ID)\n sql_text = text(price_info_sql)\n try:\n product_price_info_df = pd.read_sql(sql_text, self.MariaDB_Shop_Engine_Conn)\n merged_df = pd.merge(self.product_cate_info_df, product_price_info_df, on=['ADVER_ID', 'PCODE'])\n product_property_df_list.append(merged_df)\n except:\n self.connect_db()\n product_price_info_df = pd.read_sql(sql_text, self.MariaDB_Shop_Engine_Conn)\n merged_df = pd.merge(self.product_cate_info_df, product_price_info_df, on=['ADVER_ID', 'PCODE'])\n product_property_df_list.append(merged_df)\n if i % 10 == 0:\n print(\"{0}/{1} : \".format(i, size), ADVER_ID)\n now_time = datetime.now(tz=tzlocal()).strftime(\"%Y%m%d%H%H\")\n final_df = pd.concat(product_property_df_list)\n final_df['STATS_DTTM'] = datetime.now().strftime('%Y%m%d%H')\n print(\"End time : \", now_time)\n final_df.to_sql(table_name, con=self.Local_Click_House_Engine, index=False, if_exists='append')\n return True\n\n def create_adver_property_table(self, table_name):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.{0}\n (\n STATS_DTTM String,\n ADVER_ID Nullable(String),\n CTGR_SEQ_3 Nullable(String),\n CTGR_NM_3 Nullable(String),\n CTGR_SEQ_2 Nullable(String),\n CTGR_NM_2 Nullable(String),\n CTGR_SEQ_1 Nullable(String),\n CTGR_NM_1 Nullable(String)\n ) ENGINE = MergeTree\n PARTITION BY STATS_DTTM\n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\".format(table_name)\n result = client.execute(DDL_sql)\n return result\n\n def update_adver_property_table(self, table_name):\n self.connect_db()\n # try:\n Adver_Cate_Df_sql = \"\"\"\n select\n MCUI.USER_ID as ADVER_ID, \n ctgr_info.CTGR_SEQ_3,\n ctgr_info.CTGR_NM_3,\n ctgr_info.CTGR_SEQ_2,\n ctgr_info.CTGR_NM_2,\n ctgr_info.CTGR_SEQ_1,\n ctgr_info.CTGR_NM_1\n from dreamsearch.MOB_CTGR_USER_INFO as MCUI\n join\n (\n SELECT \n third_depth.CTGR_SEQ as CTGR_SEQ_KEY, third_depth.CTGR_SEQ_NEW as CTGR_SEQ_3, third_depth.CTGR_NM as CTGR_NM_3,\n second_depth.CTGR_SEQ_NEW as CTGR_SEQ_2, second_depth.CTGR_NM as CTGR_NM_2,\n first_depth.CTGR_SEQ_NEW as CTGR_SEQ_1, first_depth.CTGR_NM as CTGR_NM_1\n from dreamsearch.MOB_CTGR_INFO third_depth\n join dreamsearch.MOB_CTGR_INFO second_depth\n join dreamsearch.MOB_CTGR_INFO first_depth\n on 1=1 \n AND third_depth.CTGR_DEPT = 3\n AND second_depth.CTGR_DEPT = 2\n AND first_depth.CTGR_DEPT = 1\n AND second_depth.USER_TP_CODE = '01'\n AND second_depth.USER_TP_CODE = first_depth.USER_TP_CODE\n AND third_depth.USER_TP_CODE = second_depth.USER_TP_CODE\n AND second_depth.HIRNK_CTGR_SEQ = first_depth.CTGR_SEQ_NEW\n AND third_depth.HIRNK_CTGR_SEQ = second_depth.CTGR_SEQ_NEW) as ctgr_info\n on MCUI.CTGR_SEQ = ctgr_info.CTGR_SEQ_KEY;\n \"\"\"\n Adver_Cate_Df = pd.read_sql(Adver_Cate_Df_sql, self.MariaDB_Engine_Conn)\n Adver_Cate_Df = Adver_Cate_Df.drop_duplicates(subset='ADVER_ID')\n Adver_Cate_Df['STATS_DTTM'] = datetime.now().strftime('%Y%m%d%H')\n Adver_Cate_Df.to_sql(table_name, con=self.Local_Click_House_Engine, index=False, if_exists='append')\n return True\n # except:\n # print(\"Extract_Adver_Cate_Info error happend\")\n # return False\n\n def create_media_property_table(self, table_name):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.{0}\n (\n STATS_DTTM String,\n MEDIA_SCRIPT_NO Nullable(String),\n MEDIASITE_NO Nullable(String),\n MEDIA_ID Nullable(String),\n SCRIPT_TP_CODE Nullable(String),\n MEDIA_SIZE_CODE Nullable(String),\n ENDING_TYPE Nullable(String),\n M_BACON_YN Nullable(String),\n ADVRTS_STLE_TP_CODE Nullable(String),\n MEDIA_CATE_INFO Nullable(String),\n MEDIA_CATE_NAME Nullable(String)\n ) ENGINE = MergeTree\n PARTITION BY STATS_DTTM\n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\".format(table_name)\n result = client.execute(DDL_sql)\n return result\n\n def update_media_property_table(self, table_name):\n self.connect_db()\n # try :\n PAR_PROPERTY_INFO_sql = \"\"\"\n select \n ms.no as MEDIA_SCRIPT_NO,\n MEDIASITE_NO,\n ms.userid as MEDIA_ID,\n mpi.SCRIPT_TP_CODE,\n mpi.MEDIA_SIZE_CODE,\n product_type as \"ENDING_TYPE\",\n m_bacon_yn as \"M_BACON_YN\",\n ADVRTS_STLE_TP_CODE as \"ADVRTS_STLE_TP_CODE\",\n media_cate_info.scate as \"MEDIA_CATE_INFO\",\n media_cate_info.ctgr_nm as \"MEDIA_CATE_NAME\"\n from dreamsearch.media_script as ms\n join\n (\n SELECT no, userid, scate, ctgr_nm\n FROM dreamsearch.media_site as ms\n join\n (SELECT mpci.CTGR_SEQ, CTGR_SORT_NO, mci.CTGR_NM\n FROM dreamsearch.MEDIA_PAR_CTGR_INFO as mpci\n join dreamsearch.MOB_CTGR_INFO as mci\n on mpci.CTGR_SEQ = mci.CTGR_SEQ_NEW) as media_ctgr_info\n on ms.scate = media_ctgr_info.CTGR_SORT_NO) as media_cate_info\n join\n (select PAR_SEQ, ADVRTS_PRDT_CODE,SCRIPT_TP_CODE, MEDIA_SIZE_CODE \n from dreamsearch.MEDIA_PAR_INFO\n where PAR_EVLT_TP_CODE ='04') as mpi\n on ms.mediasite_no = media_cate_info.no\n and media_cate_info.scate = {0}\n and mpi.par_seq = ms.no;\n \"\"\"\n result_list = []\n for i in range(1, 18):\n result = pd.read_sql(PAR_PROPERTY_INFO_sql.format(i), self.MariaDB_Engine_Conn)\n result_list.append(result)\n Media_Info_Df = pd.concat(result_list)\n Media_Info_Df['MEDIA_SCRIPT_NO'] = Media_Info_Df['MEDIA_SCRIPT_NO'].astype('str')\n Media_Info_Df['STATS_DTTM'] = datetime.now().strftime('%Y%m%d%H')\n Media_Info_Df.to_sql(table_name, con=self.Local_Click_House_Engine, index=False, if_exists='append')\n return True\n # except :\n # return False\n\n ##### so many data.....\n def create_kwrd_property_table(self, table_name):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.{0}\n (\n STATS_DTTM String,\n KWRD_SEQ Nullable(String),\n KWRD_NM Nullable(String)\n ) ENGINE = MergeTree\n PARTITION BY STATS_DTTM\n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\".format(table_name)\n result = client.execute(DDL_sql)\n return result\n\n def update_kwrd_property_table(self, table_name):\n self.connect_db()\n kwrd_sql = \"\"\"\n select\n CODE_TP_ID,\n CODE_ID,\n CODE_VAL,\n USE_YN,\n CODE_DESC\n FROM\n dreamsearch.MOBON_COM_CODE\n \"\"\"\n kwrd_sql = text(kwrd_sql)\n kwrd_df = pd.read_sql(kwrd_sql, self.MariaDB_Engine_Conn)\n kwrd_df['STATS_DTTM'] = datetime.now().strftime('%Y%m%d%H')\n kwrd_df.to_sql('MOBON_COM_CODE', con=self.Local_Click_House_Engine, index=False, if_exists='append')\n return True\n\n ##### so skip!\n\n def create_mobon_com_code_table(self):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.MOBON_COM_CODE\n (\n STATS_DTTM String,\n CODE_TP_ID Nullable(String),\n CODE_ID Nullable(String),\n CODE_VAL Nullable(String),\n USE_YN Nullable(String),\n CODE_DESC Nullable(String)\n ) ENGINE = MergeTree\n PARTITION BY STATS_DTTM\n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\"\n result = client.execute(DDL_sql)\n return result\n\n def update_mobon_com_code_table(self):\n self.connect_db()\n mobon_com_code_sql = \"\"\"\n select\n CODE_TP_ID,\n CODE_ID,\n CODE_VAL,\n USE_YN,\n CODE_DESC\n FROM\n dreamsearch.MOBON_COM_CODE\n \"\"\"\n mobon_com_code_sql = text(mobon_com_code_sql)\n mobon_com_code_df = pd.read_sql(mobon_com_code_sql, self.MariaDB_Engine_Conn)\n mobon_com_code_df['STATS_DTTM'] = datetime.now().strftime('%Y%m%d%H')\n mobon_com_code_df.to_sql('MOBON_COM_CODE', con=self.Local_Click_House_Engine, index=False, if_exists='append')\n return True\n\n def create_mob_camp_media_hh_stats(self):\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS TEST.MOB_CAMP_MEDIA_HH_STATS\n (\n STATS_DTTM Uint16,\n STATS_HH Uint16,\n PLTFOM_TP_CODE String,\n ADVRTS_PRDT_CODE String,\n ADVRTS_TP_CODE String,\n SITE_CODE String,\n MEDIA_SCRIPT_NO UInt16,\n ITL_TP_CODE String,\n ADVER_ID String, \n TOT_EPRS_CNT UInt16,\n PAR_EPRS_CNT UInt16,\n CLICK_CNT UInt16\n ) ENGINE = MergeTree\n PARTITION BY ( STATS_DTTM, STATS_HH, ADVRTS_TP_CODE ) \n ORDER BY STATS_DTTM\n SETTINGS index_granularity=8192\n \"\"\"\n result = client.execute(DDL_sql)\n return result\n\n def update_mob_camp_media_hh_stats(self, initial_dttm, last_dttm, ADVRTS_TP_CODE='01'):\n self.connect_db()\n dt_index = pd.date_range(start=str(initial_dttm), end=str(last_dttm))\n stats_dttm_list = dt_index.strftime(\"%Y%m%d\").tolist()\n stats_hh_list = ['0{0}'.format(i) if i < 10 else str(i) for i in range(0, 24)]\n\n for stats_dttm in stats_dttm_list:\n for stats_hh in stats_hh_list:\n sql = \"\"\"\n SELECT\n STATS_DTTM,\n STATS_HH,\n PLTFOM_TP_CODE,\n ADVRTS_PRDT_CODE,\n ADVRTS_TP_CODE,\n SITE_CODE,\n MEDIA_SCRIPT_NO,\n ITL_TP_CODE,\n ADVER_ID,\n TOT_EPRS_CNT,\n PAR_EPRS_CNT,\n CLICK_CNT\n FROM BILLING.MOB_CAMP_MEDIA_HH_STATS\n WHERE STATS_DTTM = {0}\n and STATS_HH = '{1}'\n and ADVRTS_PRDT_CODE = '01'\n and ADVRTS_TP_CODE = '{2}'\n and ITL_TP_CODE = '01'\n and CLICK_CNT > 0;\n \"\"\".format(stats_dttm, stats_hh, ADVRTS_TP_CODE)\n sql = text(sql)\n stats_df = pd.read_sql(sql, self.MariaDB_Engine_Conn)\n stats_df.to_sql('MOB_CAMP_MEDIA_HH_STATS', con=self.Local_Click_House_Engine, index=False,\n if_exists='append')\n print(stats_dttm, stats_hh, 'completed')\n return True\n\n def delete_old_data(self, table_name):\n self.connect_db()\n dttm_list_sql = \"\"\"\n SELECT\n distinct STATS_DTTM\n FROM\n TEST.{0}\n \"\"\".format(table_name)\n dttm_list_sql = text(dttm_list_sql)\n dttm_list = list(pd.read_sql(dttm_list_sql, self.Local_Click_House_Engine)['STATS_DTTM'])\n int_dttm_list = [int(i) for i in dttm_list]\n max_dttm = max(int_dttm_list)\n if int_dttm_list.count(max_dttm) > 1:\n print('delete_old_data passed')\n return False\n else:\n client = Client(host='localhost')\n DDL_sql = \"\"\"\n ALTER TABLE TEST.{0} DELETE WHERE STATS_DTTM <> '{1}'\n \"\"\".format(table_name, max_dttm)\n result = client.execute(DDL_sql)\n print(\"delete old data\")\n return True\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--create_table\", help=\"--add_table ~~ \", default=None)\n args = parser.parse_args()\n\n # for develop\n # maria_id = \"dyyang\"\n # maria_password = \"dyyang123!\"\n\n # for service\n logger_name = input(\"logger name is : \")\n logger_file = input(\"logger file name is : \")\n maria_id = \"analysis\"\n maria_password = \"analysis@2020\"\n click_house_id = \"click_house_test1\"\n click_house_password = \"0000\"\n click_house_DB = \"TEST\"\n\n Property_Info_Context = PROPERTY_INFO_CONTEXT(maria_id, maria_password,\n click_house_id, click_house_password, click_house_DB)\n\n logger = Logger(logger_name, logger_file)\n Shop_Property_Return = Property_Info_Context.create_shop_property_table('SHOP_PROPERTY_INFO')\n logger.log(\"Shop_Property_Return\", Shop_Property_Return)\n Shop_property_update_return = Property_Info_Context.update_shop_property_table('SHOP_PROPERTY_INFO')\n logger.log(\"Shop_Property_Return\", Shop_property_update_return)\n Delete_Old_data_return = Property_Info_Context.delete_old_data('SHOP_PROPERTY_INFO')\n logger.log('delete old data', Delete_Old_data_return)\n\n Adver_Property_Return = Property_Info_Context.create_adver_property_table('ADVER_PROPERTY_INFO')\n logger.log(\"Adver_Property_Return\", Adver_Property_Return)\n Adver_Property_update_return = Property_Info_Context.update_adver_property_table('ADVER_PROPERTY_INFO')\n logger.log(\"Adver_Property_Return\", Adver_Property_update_return)\n Delete_Old_data_return = Property_Info_Context.delete_old_data('ADVER_PROPERTY_INFO')\n logger.log('delete ADVER_PROPERTY_INFO return', Delete_Old_data_return)\n\n Media_Property_Return = Property_Info_Context.create_media_property_table('MEDIA_PROPERTY_INFO')\n logger.log(\"Media_Property_Return\", Media_Property_Return)\n Media_Property_update_Return = Property_Info_Context.update_media_property_table('MEDIA_PROPERTY_INFO')\n logger.log(\"Media_Property_Return\", Media_Property_update_Return)\n Delete_Old_data_return = Property_Info_Context.delete_old_data('MEDIA_PROPERTY_INFO')\n logger.log('delete MEDIA_PROPERTY_INFO data', Delete_Old_data_return)\n\n Mobon_Com_Code_return = Property_Info_Context.create_mobon_com_code_table()\n logger.log(\"create mobon com code table\", \"success\")\n Update_Mobon_Com_Code_Return = Property_Info_Context.update_mobon_com_code_table()\n logger.log(\"update mobon com code table\", \"success\")\n Delete_old_data_return = Property_Info_Context.delete_old_data('MOBON_COM_CODE')\n logger.log(\"detele mobon com code old data\", \"success\")\n\n initial_dttm = input(\"MOB_CAMP_MEDIA_HH_STATS initial dttm ( ex) 20201010 ) :\")\n last_dttm = input(\"MOB_CAMP_MEDIA_HH_STATS last dttm ( ex) 20201020 ) : \")\n Create_Camp_Media_STATS_Return = Property_Info_Context.create_mob_camp_media_hh_stats()\n logger.log(\"Create_Camp_Media_Return\", Create_Camp_Media_STATS_Return)\n Update_Camp_Media_STATS_Return = Property_Info_Context.update_mob_camp_media_hh_stats(initial_dttm, last_dttm)\n logger.log(\"Update_Camp_Media_Return\", Update_Camp_Media_STATS_Return)\n","sub_path":"FIRST_ETL_folder/PROPERTY_INFO_CONTEXT.py","file_name":"PROPERTY_INFO_CONTEXT.py","file_ext":"py","file_size_in_byte":20022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610449210","text":"import requests\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n# from tkinter import messagebox\nimport datetime\n# for input key verify\nimport re\n# import sqlite3\n\n\n# # database to store inputs, no reason for this except to see if can do it\n# conn = sqlite3.connect('money.db')\n# # create cursos\n# c = conn.cursor\n\n# # create table\n# # THE FLIPPING execute doesnt work???? whyyyyy!!!!!\n# c.execute(\"\"\"CREATE TABLE spy (\n# {from_currency_variable} text,\n# {amount} interger,\n# {to_currency_variable} integer\n# )\"\"\")\n\n# # databse commit changes\n# conn.commit()\n# # databse close connection\n# conn.close()\n\n\n\n# constructor of class\nclass CurrencyConverter():\n def __init__(self,url):\n # requests.get() loads the page then .json() will convert \n self.data = requests.get(url).json()\n # store it in a data variable\n self.currencies = self.data['conversion_rates']\n \n\n def convert(self, from_currency, to_currency, amount): \n initial_amount = amount \n if from_currency != 'ILS' : \n amount = amount / self.currencies[from_currency] \n \n # limiting to 2 decimal places \n amount = round(amount * self.currencies[to_currency], 2) \n return amount\n\n# UI class for currency converter\nclass App:\n \n # Frame created\n def __init__(self, converter):\n self.root = tk.Tk()\n # Date time for tab title\n time = datetime.datetime.now()\n time = time.strftime(\"Current Date: %d-%m-%Y Current time: %H:%M\")\n self.root.title('Real Time Currency Conversion ' + time)\n self.currency_converter = converter\n # Background color which only affect tabs, must have magenta in code!\n self.root.configure(background = 'dark magenta')\n self.root.geometry(\"700x400\")\n \n # Create Tabs\n self.tabControl = ttk.Notebook(self.root)\n self.tabControl.pack(pady=1)\n # ------NEED HELP CREATING FRAMES PROPERLY\n # --------------------------------------------\n # Create Two Frames - not doing this right or not linking them right\n self.currency_frame = Frame(self.tabControl, width=700, height=400)\n self.calculator_frame = Frame(self.tabControl, width=700, height=400)\n self.bio_frame = Frame(self.tabControl, width=700, height=400)\n\n self.currency_frame.pack(fill=\"both\", expand=10)\n self.calculator_frame.pack(fill=\"both\", expand=1)\n self.bio_frame.pack(fill=\"both\", expand=1)\n # --------------------------------------------\n\n # NEED HELP CREATING TABS! TO THE WIDGET!!#\n # --------------------------------------------\n \n # Add Tabs\n self.tabControl.add(self.currency_frame, text=\"Currencies\")\n self.tabControl.add(self.calculator_frame, text=\"Calculator\")\n self.tabControl.add(self.bio_frame, text=\"Bio_Frame\")\n # --------------------------------------------\n # self.tab1 = ttk.Frame(self.tabControl)\n # self.tabControl.add(self.tab1, text = \"Converter\")\n \n # self.tab2 = ttk.Frame(self.tabControl)\n # self.tabControl.add(self.tab2, text = \"Calculator\")\n \n # self.tabControl.pack(expand=1, fill=\"both\")\n\n # # Create Two Frames\n # currency_frame = Frame(my_notebook, width=700, height=400)\n # calculator_frame = Frame(my_notebook, width=700, height=400)\n\n # currency_frame.pack(fill=\"both\", expand=1)\n # calculator_frame.pack(fill=\"both\", expand=1)\n\n # --------------------------------------------\n # \n \n # background image + size option1 works\n # IMAGE_PATH = 'images/buddha1.jpg'\n # WIDTH, HEIGHT = 500, 200\n # img = ImageTk.PhotoImage(Image.open(IMAGE_PATH).resize((WIDTH, HEIGHT), Image.ANTIALIAS))\n # lbl = tk.Label(self, image=img)\n # lbl.img = img # for when used inside a method/function.\n # lbl.place(relx=0.5, rely=0.5, anchor='center') # Place image in center of parent.\n \n\n # responsive background image\n self.image = Image.open(\"images/bg2.jpg\")\n self.img_copy= self.image.copy()\n self.background_image = ImageTk.PhotoImage(self.image)\n self.background = Label(self.currency_frame, image=self.background_image)\n self.background.pack(fill=BOTH, expand=YES)\n # no need for this when using frames, frame rezies image\n # self.background.bind('<Configure>', self._resize_image)\n \n\n # Label\n self.intro_label = Label(self.currency_frame, text = 'Developer\\'s Institute Hackacthon 2 \\nCurrency converter', relief = tk.RAISED, borderwidth = 4)\n self.intro_label.config(font = ('Verdana',14,'bold'))\n self.root.iconbitmap('images/icon.ico')\n self.date_label = Label(self.currency_frame, text = f\"1 New Israeli Shekel = {self.currency_converter.convert('ILS','USD',1)} USD \\n Last Update : {self.currency_converter.data['time_last_update_utc']}\", relief = tk.GROOVE, font = ('Verdana',12,'bold'), borderwidth = 5)\n\n self.intro_label.place(relx = .5 , rely = .25, anchor=CENTER)\n self.date_label.place(relx = .5 , rely = .47, anchor=CENTER)\n\n # Entry box\n valid = (self.root.register(self.restrictNumberOnly), '%d', '%P')\n self.amount_field = Entry(self.currency_frame,bd = 3, relief = tk.RIDGE, justify = tk.CENTER,validate='key', validatecommand=valid)\n self.converted_amount_field_label = Label(self.currency_frame, text = '', fg = 'black', bg = 'white', relief = tk.RIDGE, justify = tk.CENTER, width = 17, borderwidth = 3)\n\n # dropdown\n self.from_currency_variable = StringVar(self.currency_frame)\n self.from_currency_variable.set(\"ILS\") # default value\n self.to_currency_variable = StringVar(self.currency_frame)\n self.to_currency_variable.set(\"USD\") # default value\n\n font = (\"Courier\", 12, \"bold\")\n self.root.option_add('*TCombobox*Listbox.font', font)\n # here it selects the currency keys from api/json see if can get \n # a better api for free that displays currency name instead of acronym\n self.from_currency_dropdown = ttk.Combobox(self.currency_frame, textvariable=self.from_currency_variable,values=list(self.currency_converter.currencies.keys()), font = font, state = 'readonly', width = 12, justify = tk.CENTER)\n self.to_currency_dropdown = ttk.Combobox(self.currency_frame, textvariable=self.to_currency_variable,values=list(self.currency_converter.currencies.keys()), font = font, state = 'readonly', width = 12, justify = tk.CENTER)\n\n # placing of 4 lateral field boxes\n\n self.from_currency_dropdown.place(relx = .06 , rely = .65, anchor=W)\n self.amount_field.place(relx = .067 , rely = .75, anchor=W, height =35 )\n self.to_currency_dropdown.place(relx = .93 , rely = .65, anchor=E)\n self.converted_amount_field_label.place(relx = .923 , rely = .75, anchor=E, height =35)\n\n # Convert button\n self.convert_button = Button(self.currency_frame, text = \"Convert\", fg = \"black\", command = self.perform) \n self.convert_button.config(font=('Courier', 16, 'bold'))\n self.convert_button.place(relx = .5 , rely = .65, anchor=CENTER)\n\n # method for image to be responsive \n # # no need for this when using frames, frame rezies image\n \n # def _resize_image(self,event):\n\n # new_width = event.width\n # new_height = event.height\n # self.image = self.img_copy.resize((new_width, new_height))\n # self.background_image = ImageTk.PhotoImage(self.image)\n # self.background.configure(image = self.background_image)\n \n# Method that will take the user input and convert the amount into the desired currency\n# and display it on the converted_amount entry box. Part of App class\n def perform(self):\n amount = float(self.amount_field.get())\n from_curr = self.from_currency_variable.get()\n to_curr = self.to_currency_variable.get()\n\n converted_amount = self.currency_converter.convert(from_curr,to_curr,amount)\n converted_amount = round(converted_amount, 2)\n\n self.converted_amount_field_label.config(text = str(converted_amount))\n\n # not working try again!\n # def restrictNumberOnly(self, action):\n # vcmd = (self(self.callback))\n # w = Entry(self.string, validate='all', validatecommand=(vcmd, '%P')) \n # w.pack()\n # if str.isdigit(action) or action == \"\":\n # return True\n # else:\n # return False\n\n\n# user can enter only a number/float in Amount Field working\n def restrictNumberOnly(self, action, string):\n regex = re.compile(r\"[0-9,]*?(\\.)?[0-9,]*$\")\n result = regex.match(string)\n return string==\"\" or (string.count('.')<=1 and result is not None)\n\n# creates the main function:\n# 1. Creates the Converter 2. Creates the UI for Converter\nif __name__ == '__main__':\n url = 'https://v6.exchangerate-api.com/v6/21ead1dbb8d52ca2cdfe3088/latest/ILS'\n converter = CurrencyConverter(url)\n\n App(converter)\n mainloop()","sub_path":"Week6/hackathon2/currency-converter-project.py","file_name":"currency-converter-project.py","file_ext":"py","file_size_in_byte":9207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152641039","text":"from configobj import ConfigObj\n\nimport os\nimport sys\nimport re\n\nbasedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))\ntmpConf = ConfigObj(os.path.join(basedir, 'config', 'webpanda.cfg'))\n\n# expand all values\ntmpSelf = sys.modules[ __name__ ]\n\nfor tmpKey,tmpVal in tmpConf.iteritems():\n # convert string to bool/int\n if tmpVal == 'True':\n tmpVal = True\n elif tmpVal == 'False':\n tmpVal = False\n elif re.match('^\\d+$',tmpVal):\n tmpVal = int(tmpVal)\n # update dict\n tmpSelf.__dict__[tmpKey] = tmpVal\n\n# set hostname\nif not tmpSelf.__dict__.has_key('basedir'):\n tmpSelf.__dict__['basedir'] = basedir\n","sub_path":"webpanda/common/client_config.py","file_name":"client_config.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124391057","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-armv7l/egg/tests/util/mockedglobals.py\n# Compiled at: 2015-11-06 23:45:35\nimport logging, mock\nfrom salve.log import gen_handler\nfrom salve.context import ExecutionContext\nfrom .mockedio import MockedIO\nfrom .context import clear_exec_context\n\nclass MockedGlobals(MockedIO):\n\n def __init__(self):\n MockedIO.__init__(self)\n self.logger = logging.getLogger(__name__)\n self.logger.propagate = False\n self.logger_patch = mock.patch('salve.logger', self.logger)\n self.action_logger_patches = [ mock.patch('salve.action.%s.logger' % loc, self.logger) for loc in [\n 'backup.file', 'backup.directory',\n 'copy.file', 'create.file',\n 'copy.directory', 'create.directory',\n 'modify.chmod', 'modify.chown',\n 'modify.file_chmod', 'modify.file_chown',\n 'modify.dir_chmod', 'modify.dir_chown']\n ]\n\n def setUp(self):\n self.logger.setLevel(logging.DEBUG)\n ExecutionContext().transition(ExecutionContext.phases.STARTUP, quiet=True)\n MockedIO.setUp(self)\n clear_exec_context()\n self.logger_patch.start()\n self.logger.addHandler(gen_handler(stream=self.stderr))\n for p in self.action_logger_patches:\n p.start()\n\n def tearDown(self):\n MockedIO.tearDown(self)\n self.logger_patch.stop()\n self.logger.handlers = []\n for p in self.action_logger_patches:\n p.stop()","sub_path":"pycfiles/salve-2.4.2-py2.7/mockedglobals.py","file_name":"mockedglobals.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348526546","text":"import copy\nfrom Queue import PriorityQueue\n\nfrom helpers import *\nfrom node import Node\nfrom problem import Problem\n\n\n#Explored and frontier queue intialization\nexplored = PriorityQueue()\nfrontier = PriorityQueue()\n\n#A* graph search algorithm \ndef a_star_search(problem):\n #Intialize root node\n node = Node(problem.initial_state, None, \"none\", 0, 0, 0)\n node.cost_to_go = cost_to_go(node.state)\n \n #Add root to frontier\n frontier.put((node.cost_to_go + node.path_cost ,node))\n\n #Perform search\n while(not frontier.empty()):\n node = frontier.get()[1]\n #Check if goal reached\n if problem.goal_test(node.state):\n return solution(node, \"\")\n\n explored.put((node.cost_to_go + node.path_cost ,node))\n\n #Generate child nodes\n for action in problem.actions:\n child = child_node(problem, node, action)\n #Add unexplored nodes to frontier if not already there\n if((not search_for_node(child,explored) and not search_for_node(child,frontier)) and (child.state != None)):\n child.cost_to_go = cost_to_go(child.state)\n frontier.put((child.cost_to_go + child.path_cost, child))\n #Check if nodes in frontier need to be replaced\n else:\n search_node_in_frontier(child)\n\n#Calculate cost to go for node state\ndef cost_to_go(state):\n add_to_cost = 0\n # get array [1,2,3,4,5,6,7,8]\n new = goal_state[1:]\n #Calculate manhattan distance for each number\n for each_num in new:\n add_to_cost += mhd(state.index(each_num),goal_state.index(each_num))\n return add_to_cost\n\n#Calculate manhattan distance\ndef mhd(n,m):\n corrdinates = {0:(0,0), 1:(1,0), 2:(2,0), 3:(0,1), 4:(1,1), 5:(2,1), 6:(0,2), 7:(1,2), 8:(2,2)}\n x1,y1 = corrdinates[n]\n x2,y2 = corrdinates[m]\n return (abs(x1-x2) + abs(y1-y2))\n\n#Check if node is in queue\ndef search_for_node(childnode, priorty_queue):\n for node_tuple in list(priorty_queue.queue):\n node=node_tuple[1]\n if childnode.state == node.state and childnode.parent_node == node.parent_node and childnode.action==node.action and childnode.path_cost==node.path_cost and childnode.depth==node.depth and childnode.cost_to_go==node.cost_to_go:\n return True\n else:\n return False\n\n#Replaces higher path cost node in queue with child node\ndef search_node_in_frontier(childnode):\n for node_tuple in list(frontier.queue):\n node=node_tuple[1]\n #Replace node\n if(childnode.state == node.state and node.path_cost > childnode.path_cost ):\n node.state = copy.copy(childnode.state)\n node.parent_node = copy.copy(childnode.parent_node)\n node.action = copy.copy(childnode.action)\n node.path_cost = copy.copy(childnode.path_cost)\n node.depth = copy.copy(childnode.depth)\n node.cost_to_go = copy.copy(childnode.cost_to_go)\n\n\n\n\n#Runs search on problem\ndef main():\n problem = Problem(goal_state, initial_state)\n a_star_search(problem)\n\nmain()\n","sub_path":"a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430934161","text":"# Import pygame library\r\nimport pygame\r\npygame.init() \r\n# Create a game screen and set its title\r\nscreen = pygame.display.set_mode((600, 600))\r\npygame.display.set_caption(\"Breakout Game\")\r\n# Define the RGB color combinations of rectangle objects\r\nWHITE = (255,255,255)\r\nDARKBLUE = (36,90,190)\r\nLIGHTBLUE = (0,176,240)\r\nRED = (255,0,0)\r\nORANGE = (255,150,0)\r\nYELLOW = (255,255,0)\r\n# Create a paddle and a ball rectangle objects\r\npaddle = pygame.Rect(300,500,60,10)\r\nball = pygame.Rect(200,250,10,10)\r\n# Define variables to track ball and paddle movement\r\nballx = -1\r\nbally = -1\r\npaddlex = 2\r\n# Create red, orange and yellow bricks using list comprehension\r\nbricksR=[pygame.Rect(10 + i* 100,60,80,30) for i in range(7)]\r\nbricksO=[pygame.Rect(10 + i* 100,100,80,30) for i in range(7)]\r\nbricksY=[pygame.Rect(10 + i* 100,140,80,30) for i in range(7)]\r\n\r\n# Create a variable to store player score and name it as \"score\"\r\nscore = 0\r\n\r\n# Game loop\r\ncarryOn = True\r\nwhile carryOn:\r\n for event in pygame.event.get(): # User did something\r\n if event.type == pygame.QUIT: # If user clicked close\r\n carryOn = False # Flag that we are done so we exit this loop \r\n screen.fill(DARKBLUE)\r\n # Check for user input to move the paddle\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RIGHT:\r\n if paddle.x < 540: \r\n paddle.x += 2\r\n if event.key == pygame.K_LEFT:\r\n if paddle.x > 0:\r\n paddle.x -= 2\r\n \r\n pygame.draw.line(screen, WHITE, [0, 38], [600, 38], 2)\r\n pygame.draw.rect(screen,LIGHTBLUE,paddle)\r\n \r\n # Insert code to display score text here\r\n font = pygame.font.Font(None, 34)\r\n text = font.render(\"Score: \" , 1, WHITE)\r\n screen.blit(text, (20,10))\r\n \r\n # Update x and y position of the ball on screen \r\n ball.x = ball.x + ballx\r\n ball.y = ball.y + bally\r\n # Limiting ball movement on screen along x-axis\r\n if ball.x >= 590:\r\n ballx = -ballx\r\n if ball.x <= 10:\r\n ballx = -ballx\r\n # Limiting ball movement on screen along y-axis\r\n if ball.y >= 590:\r\n bally = -bally\r\n if ball.y <= 10:\r\n bally = -bally\r\n pygame.draw.rect(screen,WHITE ,ball)\r\n # Check for paddle and ball collision and change the ball direction if they collided\r\n if paddle.collidepoint(ball.x, ball.y):\r\n bally = -bally\r\n # Draw red bricks on screen\r\n for i in bricksR:\r\n pygame.draw.rect(screen,RED,i)\r\n \r\n # Draw orange bricks on screen\r\n for i in bricksO:\r\n pygame.draw.rect(screen,ORANGE,i)\r\n \r\n # Draw yellow bricks on screen\r\n for i in bricksY:\r\n pygame.draw.rect(screen,YELLOW,i)\r\n \r\n # Code for red brick and ball collision here\r\n for i in bricksR:\r\n # Remove red brick using \"collidepont()\" function\r\n if i.collidepoint(ball.x,ball.y):\r\n bricksR.remove(i)\r\n # Reverse ball direction upon collision\r\n ballx = -ballx\r\n bally = -bally\r\n # Increase player score by 3\r\n score += 3\r\n \r\n # Code for orange brick and ball collision here\r\n for i in bricksO:\r\n # Remove orange brick using \"collidepont()\" function\r\n if i.collidepoint(ball.x,ball.y):\r\n bricksO.remove(i)\r\n # Reverse ball direction upon collision\r\n ballx = -ballx\r\n bally = -bally\r\n # Increase player score by 3\r\n score += 3\r\n \r\n # Code for yellow brick and ball collision here\r\n for i in bricksY:\r\n # Remove yellow brick using \"collidepont()\" function\r\n if i.collidepoint(ball.x,ball.y):\r\n bricksY.remove(i)\r\n # Reverse ball direction upon collision\r\n ballx = -ballx\r\n bally = -bally\r\n # Increase player score by 3\r\n score += 3\r\n \r\n pygame.time.wait(8)\r\n pygame.display.flip() \r\npygame.quit( )","sub_path":"C7_PP_SA1_Student2_Solution.py","file_name":"C7_PP_SA1_Student2_Solution.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"102971810","text":"from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import HiveContext\nfrom pyspark.sql.types import StringType, ArrayType\nfrom pyspark.sql.functions import udf, col\nimport random\nimport string\nimport math\nimport pandas as pd\nimport numpy as np\n\nclass UsedFunctions:\n\n def randomString(self,length):\n letters = string.ascii_letters\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\n def clustered(self,id,numRows):\n return math.floor(id -1)/numRows\n\n def scattered(self,id,numRows):\n return abs((id -1 % numRows))\n\n def randomised(self,seed,numRows):\n random.seed(seed)\n return abs(random.randint(0, numRows) % numRows)\n\n def padString(self,id,chars,length):\n n = int(math.log10(id) + 1)\n result_str = ''.join(random.choice(chars) for i in range(length-n)) + str(id)\n return result_str\n\n def padSingleChar(self,chars,length):\n result_str = ''.join(chars for i in range(length))\n return result_str\n\n def println(self,lst):\n for ll in lst:\n print(ll[0])\n\nusedFunctions = UsedFunctions()\n\nsc = SparkContext.getOrCreate()\nsqlContext = SQLContext(sc)\nHiveContext = HiveContext(sc)\n\nnumRows = 5 ## do in increment of 50K rows otherwise you blow up driver memory!\n#\n## Check if table exist otherwise create it\n#\nmm = np.empty(shape=(0,7))\nstart = 1\nend = start + numRows - 1\nprint (\"starting at ID = \",start, \",ending on = \",end)\nRange = range(start, end+1)\nfor i in Range:\n r0 = str(i)\n r1 = str(usedFunctions.clustered(i,numRows)) \n r2 = str(usedFunctions.scattered(i,numRows))\n r3 = str(usedFunctions.randomised(i,numRows))\n r4 = usedFunctions.randomString(50)\n r5 = usedFunctions.padString(i, \" \", 50)\n r6 = usedFunctions.padSingleChar(\"x\", 40)\n mm = np.append(mm,np.array([[r0,r1,r2,r3,r4,r5,r6]]),axis=0)\n ##print(r0,r1,r2,r3,r4,r5,r6)\nprint(\"\\nAll rows \")\nprint(mm)\n#df = sc.parallelize(mm).toDF\n#df.show()\n","sub_path":"jjj.py","file_name":"jjj.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390036516","text":"import torch\nfrom torch.quantization import DeQuantStub, QuantStub\n\nfrom proj2_code.my_alexnet import MyAlexNet\n\n\nclass MyAlexNetQuantized(MyAlexNet):\n def __init__(self):\n '''\n Init function to define the layers and loss function.\n '''\n super().__init__()\n\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n '''\n Perform the forward pass with the net.\n\n Hints:\n 1. Use the self.quant() and self.dequant() layer on input/output.\n\n Args:\n - x: the input image [Dim: (N,C,H,W)]\n Returns:\n - y: the output (raw scores) of the net [Dim: (N,15)]\n '''\n model_output = None\n\n x = x.repeat(1, 3, 1, 1) # as AlexNet accepts color images\n ############################################################################\n # Student code begin\n ############################################################################\n\n raise NotImplementedError(\n 'forward() for MyAlexNetQuantized is not implemented')\n\n ############################################################################\n # Student code end\n ############################################################################\n\n return model_output\n","sub_path":"proj2/my_alexnet_quantized.py","file_name":"my_alexnet_quantized.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163817950","text":"import sys\n\nBIG_NUM = sys.maxsize\n\n\ndef recursiveMatrixChain(p, i, j):\n if i == j:\n return 0\n n = len(p) - 1\n m = [[BIG_NUM for x in range(n)] for x in range(n)]\n for k in range(i, j):\n cost = recursiveMatrixChain(p, i, k) + recursiveMatrixChain(p, k + 1, j) + p[i - 1] * p[k] * p[j]\n if cost < m[i][j]:\n m[i][j] = cost\n return m[i][j]\n\n\nclrs_test = [30, 35, 15, 5, 10, 20, 25] # this is the example from page 214\nprint(recursiveMatrixChain(clrs_test, 0, 5))\n","sub_path":"算法导论/15DynamicProgramming/矩阵链的最优括号化-rec.py","file_name":"矩阵链的最优括号化-rec.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516928468","text":"import collections\nimport json\nimport queue\nimport threading\n\nfrom anacreonlib import Anacreon\nimport creds\nfrom fraktur_b.utils import find_thing, get_valid_improvement_list\n\n\nif __name__ == '__main__':\n ConstructionOrder = collections.namedtuple(\"ConstructionOrder\", [\"planet_id\", \"planet_name\", \"improvement_id\", \"improvement_name\"])\n\n api = Anacreon(creds.USERNAME, creds.PASSWORD)\n api.gameID = creds.ERA_4_ALPHA\n api.sovID = creds.CURRENT_SOV\n\n api.get_game_info()\n api.get_objects()\n\n construction_orders = queue.Queue()\n limiting_semaphore = threading.BoundedSemaphore(6)\n\n def fill_construction_orders():\n while not construction_orders.empty(): # If there are more to be added or it is not empty\n limiting_semaphore.acquire()\n\n try:\n construction_order = construction_orders.get()\n print(\"Building a\", construction_order.improvement_name, \"on planet\", construction_order.planet_name, \"(planet ID\", construction_order.planet_id, \")\")\n api.build_improvement(construction_order.planet_id, construction_order.improvement_id)\n finally:\n limiting_semaphore.release()\n\n print(\"Beginning to iterate through planets\")\n for planet_id, planet in api.objects_dict.items():\n print(\"Grabbed new planet\")\n if planet[\"sovereignID\"] == api.sovID:\n if planet[\"class\"] == \"world\":\n valid_improvements = get_valid_improvement_list(planet, api.scenario_info)\n for trait_id in valid_improvements:\n trait_dict = api.scenario_info[trait_id]\n try:\n if trait_dict[\"role\"] == \"lifeSupport\":\n planet_name = planet[\"name\"]\n structure_name = trait_dict[\"nameDesc\"]\n construction_orders.put(ConstructionOrder(planet_id, planet_name, trait_id, structure_name))\n print(\"Added an order for\", planet_name, \"to get a\", structure_name)\n except KeyError:\n pass\n\n print(\"Done iterating\")\n\n\n print(\"Making threads\")\n for _ in range(6):\n threading.Thread(target=fill_construction_orders).start()\n\n print(\"Created threads\")\n\n","sub_path":"fraktur_b/maintenance/build_habitat_structures.py","file_name":"build_habitat_structures.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245905844","text":"class Solution(object):\n def findDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n nums.sort()\n ans = []\n if not nums:\n return ans\n pre_num = nums[0]\n for now_num in nums[1:]:\n if pre_num == now_num:\n ans.append(now_num)\n else:\n pre_num = now_num\n return ans\n","sub_path":"code/Find_All_Duplicates_in_an_Array.py","file_name":"Find_All_Duplicates_in_an_Array.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395290492","text":"import pandas as pd\r\n\r\ndef extract(covid_url,john_hopkins_data):\r\n\r\n print (\"************************************************************************************************\")\r\n url = covid_url\r\n if url =='':\r\n url = \"https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv\"\r\n df = pd.read_csv(url,error_bad_lines=False)\r\n\r\n ########################################\r\n # TRANSFORMATION STARTS HERE\r\n ########################################\r\n # changing the type of the column 'date' as DATE type\r\n df2 = df.astype({'date':'datetime64[ns]','cases':'int64','deaths':'int64'})\r\n #print(type(df2))\r\n #Retriving recent rows by filtering rows from last run date\r\n #greater than last run date\r\n #Depending on the type of the load ,rows should be filtered\r\n last_run_date = '2020-09-18'\r\n #df3 = df2[(df['date'] > last_run_date)]\r\n #print('*******************************************')\r\n\r\n #get John Hopins Data and combine it with current dates\r\n #print (\"************************************************************************************************\")\r\n john_hopkins_dataset_url = john_hopkins_data\r\n if john_hopkins_dataset_url == '':\r\n john_hopkins_dataset_url = \"https://raw.githubusercontent.com/datasets/covid-19/master/data/time-series-19-covid-combined.csv?opt_id=oeu1597116776410r0.9370121973993177\"\r\n\r\n john_hopkins_data = pd.read_csv(john_hopkins_dataset_url,error_bad_lines=False)\r\n country_list = ['US','us']\r\n john_hopkins_data = john_hopkins_data[john_hopkins_data['Country/Region'].isin(country_list) ]\r\n #print(john_hopkins_data)\r\n #selecting Date and recovered column\r\n john_hopkins_data_recovered = john_hopkins_data[['Date','Recovered']]\r\n\r\n #print(john_hopkins_data_recovered)\r\n john_hopkins_data_recovered = john_hopkins_data_recovered.astype({'Date':'datetime64[ns]','Recovered':'int64'})\r\n #print(john_hopkins_data_recovered)\r\n\r\n #print('******************************************')\r\n #Merging two datasets based on date\r\n final_df = df2.join(john_hopkins_data_recovered.set_index('Date'), on ='date',how='left').fillna(0)\r\n #filtering records based on last run date\r\n #final_df = final_df[final_df['date'] >last_run_date]\r\n #print(final_df)\r\n return final_df\r\n","sub_path":"Extract_script.py","file_name":"Extract_script.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"128810270","text":"from src.image_utils import ImageUtils\nimport numpy as np\n\nclass FilterBase:\n @staticmethod\n def filter(window_func,img,n,m,filename=None):\n new_img = np.ndarray(img.shape)\n if len(img.shape) == 3:\n new_img = FilterBase.filter_color(window_func,img,n,m)\n elif len(img.shape) == 2:\n new_img = FilterBase.filter_flat(window_func,img,n,m)\n\n if filename is not None:\n cv2.imwrite(\"images/filtered/\"+filename+\"_amf.jpg\",new_img)\n return new_img\n\n @staticmethod\n def filter_flat(window_func,img,n,m):\n N = img.shape[0]\n M = img.shape[1]\n b_img = ImageUtils.pad_to_size(img,(N+n,M+m))\n new_img = np.ndarray(b_img.shape)\n for i in range(n//2,N+n//2):\n for j in range(m//2,M+m//2):\n new_img[i,j] = window_func(b_img,i,j,n,m)\n\n return new_img[n//2:N+n//2,m//2:M+m//2]\n @staticmethod\n def filter_color(window_func,img,n,m):\n new_img = np.ndarray(img.shape)\n new_img[:,:,0] = FilterBase.filter_flat(window_func,img[:,:,0],n,m)\n new_img[:,:,1] = FilterBase.filter_flat(window_func,img[:,:,1],n,m)\n new_img[:,:,2] = FilterBase.filter_flat(window_func,img[:,:,2],n,m)\n return new_img\n \n \n\n","sub_path":"src/filter_base.py","file_name":"filter_base.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389743046","text":"import logging\nimport os\nimport time\nimport re\nfrom urllib.request import urlretrieve\n\nimport pytricia\n\nfrom persin.config import RKN_PATH, RKN_DUMP_URL\nfrom persin.config import RKN_UPDATE_INTERVAL, PROXY_IPS\n\nDUMP_PATH = os.path.join(RKN_PATH, \"dump.csv\")\n\n\ndef retrieve_blocklist():\n os.makedirs(RKN_PATH, exist_ok=True)\n urlretrieve(RKN_DUMP_URL, DUMP_PATH)\n\n\ndef is_blocklist_outdated():\n try:\n return time.time() - os.path.getmtime(DUMP_PATH) >= RKN_UPDATE_INTERVAL\n except OSError:\n return True\n\n\ndef build_blocklist():\n if is_blocklist_outdated():\n logging.info(\"RKN block list outdated, downloading it\")\n retrieve_blocklist()\n url_re = re.compile(b'^https://([^/]+)')\n logging.info(\"Loading RKN block list\")\n pyt = pytricia.PyTricia()\n domains = []\n with open(DUMP_PATH, \"rb\") as f:\n for line in f:\n parts = line.split(b\";\")\n if len(parts) <= 1:\n continue\n for ip in parts[0].split(b\" | \"):\n if b\":\" in ip or not ip:\n continue\n try:\n pyt[ip.decode()] = 1\n except ValueError:\n continue\n if parts[2]:\n for url in parts[2].split(b\" | \"):\n m = url_re.match(url)\n if m:\n domains.append(m[1])\n for ip in PROXY_IPS:\n pyt[ip] = 1\n logging.info(f\"RKN block list loaded: {len(pyt)} addresses, {len(domains)} domains.\")\n return pyt, domains\n","sub_path":"persin/rkn.py","file_name":"rkn.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518929112","text":"from django.conf import settings\nfrom elasticsearch_dsl import Search, Q\nfrom elasticsearch_dsl.query import MultiMatch, MatchPhrase\nfrom .configs import ES\n\nclient = settings.ES_CLIENT\n\n\ndef search_news(query, fields, match_phrase=False, time_filter=''):\n if match_phrase:\n q = MultiMatch(query=query, fields=fields, type=\"phrase\")\n else:\n if fields == ['summary', 'title', 'content']:\n q = MultiMatch(query=query, fields=['title^3', 'title.std', 'content^2', 'content.std'], type=\"most_fields\")\n elif fields == 'title':\n q = MultiMatch(query=query, fields=['title', 'title.std'], type=\"most_fields\")\n else:\n q = MultiMatch(query=query, fields=['content', 'content.std'], type=\"most_fields\")\n\n if time_filter:\n return Search(using=client, index=\"news\").filter('range', time={'gte': time_filter}).highlight('title',\n 'content').query(q)\n\n return Search(using=client, index=\"news\").highlight('title', 'content').query(q)\n","sub_path":"search_api/api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222698261","text":"from random import choice, random\nfrom bokeh.io import show, output_file\nfrom bokeh.plotting import figure\nfrom bokeh.models import (GraphRenderer, StaticLayoutProvider, Circle, LabelSet,\n ColumnDataSource)\nfrom bokeh.palettes import Spectral8\nfrom graph1 import Graph \nimport math\n\n# class BokehGraph:\n# \"\"\"Class that takes a graph and exposes drawing methods.\"\"\"\n# def __init__(self, graph, title='Graph', width=10, height=10, show_axis=False, show_grid=False, circle_size=35):\n# if not graph.vertices:\n# raise Exception('Graph should contain vertices!')\n# self.graph = Graph()\n\n# self.width = width\n# self.height = height\n# self.pos = {} # dict to map vertices to x, y positons\n# self.plot = figure(title=title, x_range=(0, width), y_range=(0, height))\n# self.plot.axis.visible = show_axis\n# self.plot.grid.visible = show_grid\n# self._setup_graph_renderer(circle_size)\n\n# def _setup_graph_renderer(self, circle):\n# graph_renderer = GraphRenderer()\n\n# graph_renderer.node_renderer.data_source.add(\n# list(self.graph.vertices.keys()), 'index')\n# graph_renderer.renderer.data_source.add(\n# self.get_random_colors(), 'color')\n# graph_renderer.node_renderer.glyph = Circle(size=circle_size, fill_color='color')\n# graph_renderer.edge_renderer.data_source.data = self._get_edge_indexes()\n# self.randomize()\n# graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=self.pos)\n# self.plot.renderers.append(graph_renderer)\n\n# def _get_random_colors(self):\n# colors = []\n# for _ in range(len(self.graph_vertices)):\n# color='#'+''.join([choice('0123456789ABCDEF') for j in range(6)])\n# colors.append(color)\n# return colors\n\n# def _get_edge_indexes(self):\n# start_indices = []\n# end_endices = []\n# checked = set()\n\n# for vertex, edges in self.graph.vertices.items():\n# if vertex not in checked:\n# for destination in edges:\n# start_indices.append(vertex)\n# end_indices.append(destination)\n# checked.add(vertex)\n\n# return dict(start=start_indices, end=end_endices)\n\n# def show(self, output_path='./graph.html'):\n# output_file(output_path)\n# show(self.plot)\n\n# def randomize(self):\n# for vertex in self.graph.verticex:\n# self.pos[vertex] = (1 + random() + (self.width -2), 1 + random() * (self.height - 2)) \n\n\n# from draw import BokehGraph \n# graph = Graph()\n# graph.add_vertex('A') \n# graph.add_vertex('B') \n# graph.add_edge('A', 'B') \n# graph \n# graph.vertices\n# bg = BokehGraph(graph)\n# bg.pos\n# bg.show()\n\n\n\n# solution from a student\nclass BokehGraph:\n \"\"\"Class that takes a graph and exposes drawing methods.\"\"\"\n\n def __init__(self, graph):\n self.graph = graph\n\n def show(self):\n node_indices = list(self.graph.vertices.keys())\n x_value = [x for (x, y) in self.graph.vertices.items()]\n y_value = [list(y) for (x, y) in self.graph.vertices.items()]\n\n plot = figure(title=\"Graph Layout\", x_range=(0, 4),\n y_range=(0, 4), tools=\"\", toolbar_location=None)\n\n graph = GraphRenderer()\n\n graph.node_renderer.data_source.add(x_value, 'index')\n graph.node_renderer.data_source.add(Spectral8, 'color')\n graph.node_renderer.glyph = Circle(radius=0.1, fill_color='color')\n\n print(\"\\n x value: \", x_value)\n print(\"\\n y value: \", y_value)\n\n graph.edge_renderer.data_source.data = dict(\n start=x_value, end=y_value)\n\n circ = [int(i) for i in x_value]\n x = [i for i in circ]\n y = [math.sin(i) for i in circ] # sinus shape\n\n graph_layout = dict(zip(x_value, zip(x, y)))\n graph.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)\n\n plot.renderers.append(graph)\n\n output_file('graph.html')\n show(plot)\n\ngraph = Graph() # Instantiate your graph\ngraph.add_vertex('0')\ngraph.add_vertex('1')\ngraph.add_vertex('2')\ngraph.add_vertex('3')\ngraph.add_edge('0', '1')\ngraph.add_edge('0', '3')\ngraph.add_edge('1', '2')\nbg = BokehGraph(graph)\nbg.show()\n\ndft(graph.vertices, '0', [])\nbft(graph.vertices, '0')\nprint(graph.vertices)","sub_path":"projects/graph/src/draw1.py","file_name":"draw1.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71398426","text":"import copy\nimport random\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom network import *\nfrom ou_noise import *\n\nclass DDPG():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, device, state_size, action_size, random_seed, hidden_in_dim, hidden_out_dim, activation, \n tau, lr_actor, lr_critic, weight_decay, epsilon, epsilon_decay):\n \n \"\"\"Initialize an Agent object.\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n \"\"\"\n super(DDPG, self).__init__()\n \n self.state_size = state_size\n self.action_size = action_size\n \n self.device = device\n self.tau = tau\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n\n # Actor Network (w/ Target Network)\n self.actor_local = Network(self.state_size, self.action_size, hidden_in_dim, hidden_out_dim, activation=activation, is_actor=True).to(self.device)\n self.actor_target = Network(self.state_size, self.action_size, hidden_in_dim, hidden_out_dim, activation=activation, is_actor=True).to(self.device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Network(self.state_size*2, self.action_size*2, hidden_in_dim, hidden_out_dim, activation=activation, is_actor=False).to(self.device)\n self.critic_target = Network(self.state_size*2, self.action_size*2, hidden_in_dim, hidden_out_dim, activation=activation, is_actor=False).to(self.device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic, weight_decay=weight_decay)\n\n # Same initialization\n self.__copy__(self.actor_local, self.actor_target)\n self.__copy__(self.critic_local, self.critic_target)\n\n # Noise process\n self.noise = OUNoise(action_size, seed=random_seed)\n \n def act(self, state, noise_scale=0.0):\n \"\"\"Returns actions for given state as per current policy.\"\"\"\n\n if isinstance(state, np.ndarray):\n state = torch.from_numpy(state).float()\n \n self.actor_local.eval()\n with torch.no_grad():\n action = self.actor_local(state.to(self.device))\n self.actor_local.train()\n return action + noise_scale*self.noise.noise()\n\n def target_act(self, state, noise_scale=0.0):\n \"\"\"Returns actions for given state as per current policy.\"\"\"\n\n if isinstance(state, np.ndarray):\n state = torch.from_numpy(state).float()\n return self.actor_target(state.to(self.device)) + noise_scale*self.noise.noise()\n\n def reset(self):\n self.noise.reset()\n\n def update_exploration_strategy(self):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + ? * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n # ---------------------------- update noise ---------------------------- #\n self.epsilon -= self.epsilon_decay\n self.noise.reset()\n\n def soft_update(self, local_model, target_model):\n \"\"\"Soft update model parameters.\n ?_target = t*?_local + (1 - t)*?_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(self.tau*local_param.data + (1.-self.tau)*target_param.data)\n\n def __copy__(self, source, target):\n for src_param, target_param in zip(source.parameters(), target.parameters()):\n target_param.data.copy_(src_param.data)\n\n","sub_path":"multi-agent-tennis/maddpg/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366540103","text":"import re\nfrom os.path import dirname, join\n\nfrom setuptools import setup, find_packages\n\n\nwith open(join(dirname(__file__), 'markdownserver', '__init__.py')) as f:\n version = re.match(r'.*__version__ = \\'(.*)\\'', f.read(), re.S).group(1)\n\n\ndependencies = [\n 'easycli',\n 'yhttp >= 2.8, < 3',\n 'markdown2',\n]\n\n\nsetup(\n name='markdown-httpserver',\n version=version,\n url='https://github.com/babakhani/markdown-httpserver',\n author='Reza Babakhani',\n author_email='babakhani.reza@gmail.com',\n description='markdown http server',\n packages=find_packages(exclude=['tests']),\n install_requires=dependencies,\n license='MIT',\n entry_points={\n 'console_scripts': [\n 'ms = markdownserver:app.climain',\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239041786","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\n\nimport datetime\nimport time\nimport pytz\nfrom datetime import date, datetime, timedelta\nfrom odoo.exceptions import UserError, ValidationError\n\nclass reportHrIncidentsWizard(models.TransientModel):\n _name = 'report.hr_incidents.hr_leave_incidents_report'\n \n @api.multi\n def _get_report_values(self, docids, data=None):\n print ('data22')\n return {\n 'doc_ids': '',\n 'doc_model': 'hr.leave',\n 'docs': '',\n 'data': data,\n }\n\n \nclass reportHrInhabilityAbsenteeism(models.TransientModel):\n _name = 'report.hr_incidents.inhability_absenteeism_report_view'\n \n @api.multi\n def _get_report_values(self, docids, data=None):\n print ('data')\n return {\n 'doc_ids': '',\n 'doc_model': 'hr.leave',\n 'docs': '',\n 'data': data,\n }\n \n \n","sub_path":"hr_incidents/report/hr_incidents.py","file_name":"hr_incidents.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483131923","text":"from collections import deque\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\n\ndef level_order_traversal(root):\n queue = deque([root])\n res = []\n while queue:\n cur_level = []\n n = len(queue)\n for i in range(n):\n p = queue.popleft()\n cur_level.append(p.val)\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n res.append(cur_level)\n return res\n\n\ndef reverse_level_order_traversal(root):\n res = []\n queue = deque([root])\n while queue:\n cur_level = []\n n = len(queue)\n for i in range(n):\n cur_level.append(queue.popleft())\n\n res.insert(0, cur_level)\n return res\n\n\ndef zigzag_level_order_traversal(root):\n queue = deque([root])\n res = []\n ltr = False\n while queue:\n n = len(queue)\n cur_level = []\n for _ in range(n):\n p = queue.popleft()\n if ltr:\n cur_level.append(p)\n else:\n cur_level.insert(0, p)\n ltr = not ltr\n res.append(cur_level)\n return res\n\n\ndef zigzag_traversal(root):\n stack1, stack2 = deque([root]), deque([])\n res = []\n ltr = True\n\n while stack1:\n for _ in range(len(stack1)):\n p = stack1.pop()\n print(p.val)\n if ltr:\n if p.left:\n stack2.append(p.left)\n if p.right:\n stack2.append(p.right)\n else:\n if p.right:\n stack2.append(p.right)\n if p.left:\n stack2.append(p.left)\n ltr = not ltr\n stack1, stack2 = deque(), stack1\n\n\ndef level_averages(root):\n\n queue = deque([root])\n res = []\n while queue:\n n = len(queue)\n r_sum = 0\n for _ in range(n):\n p = queue.popleft()\n r_sum += p.val\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n res.append(r_sum//n)\n return res\n\n\ndef minimum_depth(root):\n queue = deque([root])\n depth = 0\n while queue:\n\n n = len(queue)\n for _ in range(n):\n p = queue.popleft()\n if not p.left and not p.right:\n return depth\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n depth += 1\n return depth\n\n\ndef connect_level_order_siblings(root):\n\n queue = deque([root])\n while queue:\n prev = None\n n = len(queue)\n for _ in range(n):\n p = queue.popleft()\n if prev:\n prev.next = p\n p = prev\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n return root\n\n\ndef connect_all_level_order_siblings(root):\n queue = deque([root])\n prev, cur = None, None\n\n while queue:\n cur = queue.popleft()\n\n if prev:\n prev.next = cur\n prev = cur\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n return\n\n\ndef find_level_order_successor(root, target):\n queue = deque([root])\n while queue:\n\n n = len(queue)\n\n for _ in range(n):\n p = queue.popleft()\n\n if p.val == target:\n break\n\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n\n return queue[0] if queue else None\n\n\ndef right_view_of_binary_tree(root):\n\n queue = deque([root])\n res = []\n\n while queue:\n n = len(queue)\n\n for i in range(n):\n p = queue.popleft()\n\n if i == n-1:\n res.append(p.val)\n if p.left:\n queue.append(p.left)\n if p.right:\n queue.append(p.right)\n return res\n","sub_path":"revise-daily/google-redo/educative/breadth_first.py","file_name":"breadth_first.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"268232552","text":"class Solution(object):\n def maximumGap(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if nums is None or len(nums) <= 1:\n return 0\n \n nums = sorted(nums)\n gap = nums[1] - nums[0]\n \n \n for i in range(1, len(nums)):\n if nums[i] - nums[i - 1] > gap:\n gap = nums[i] - nums[i - 1] \n \n return gap\n \ninstance = Solution()\nnums = [100, 3, 2, 1]\nprint(instance.maximumGap(nums))\n","sub_path":"src/MaximumGap.py","file_name":"MaximumGap.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561006336","text":"#!/usr/bin/env python\n\n\"\"\"\nStart script for agent.\n\"\"\"\n\n__copyright__ = '(c) Webyog, Inc'\n__author__ = 'Vishal P.R'\n__email__ = 'hello@sealion.com'\n\nimport sys\nimport os\nimport getopt\nimport signal\n\n#add module lookup paths to sys.path so that import can find them\n#we are inserting at the begining of sys.path so that we can be sure that we are importing the right module\nexe_path = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 1)[0]\nsys.path.insert(0, exe_path + '/lib/socketio_client') \nsys.path.insert(0, exe_path + '/lib/websocket_client')\nsys.path.insert(0, exe_path + '/opt/default/python/')\nsys.path.insert(0, exe_path + '/lib')\nsys.path.insert(0, exe_path + '/src')\n\nimport version_info\nimport exit_status\n\ntry:\n #try parsing the options\n options, args = getopt.getopt(sys.argv[1:], '', ['insecure', 'version', 'debug']) \n options = [option[0] for option in options]\nexcept Exception:\n options, args = [], sys.argv[1:] #reset the options so that the service module takes care \n \n#whether to disable SSL verification; refer lib/request/__init__.py\nif '--insecure' in options:\n __insecure_ssl__ = True\n \nsignal.signal(signal.SIGINT, lambda x, y: sys.exit(exit_status.AGENT_ERR_INTERRUPTED)) #setup signal handling for SIGINT\n \nif '--version' in options: #print version and exit\n version_info.print_version()\n sys.exit() \nelif '--debug' in options and len(args) == 1 and args[0] == 'start':\n #do not detach the process from the controlling terminal; so use main module\n import main as main_module\nelse:\n import service as main_module #normal startup\n \nmain_module.run(*(tuple(args))) #run the selected module with the arguments\n","sub_path":"code/bin/sealion.py","file_name":"sealion.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"13610080","text":"\"\"\"\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom AmazonScraper import AmazonScraper\nfrom BestBuyScraper import BestBuyScraper\nfrom WalmartScraper import WalmartScraper\n\n\nclass Scraper:\n \"\"\"\n Scraper chooses which scraper to run.\n 'ChooseScraper' chooses the scraper.\n @author Arcane94\n \"\"\"\n\n def ChooseScraper(self, url):\n \"\"\"\n Chooses the scraper based on product url\n @param url: URL of the product\n @return: the stock status information\n \"\"\"\n\n if \"amazon\" in url:\n amazonscraper = AmazonScraper(url)\n stock_info, cost = amazonscraper.job()\n return stock_info, cost\n\n if \"bestbuy\" in url:\n bestbuyscraper = BestBuyScraper(url)\n stock_info, cost = bestbuyscraper.job()\n return stock_info, cost\n\n if \"walmart\" in url:\n walmartscraper = WalmartScraper(url)\n stock_info, cost = walmartscraper.job()\n return stock_info, cost\n","sub_path":"code/Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342255313","text":"#!/usr/bin/python\n\nimport sys\nfrom subprocess import Popen, PIPE, os\nimport csv\n\nnTrials = 10\nsum = 0\naverages = []\ntimes = []\nfast = 0\nslow = 0\nfastThread = 0\nslowThread = 0\n\n#check for correct arg length\nif (len(sys.argv) != 2): # not 2 args\n\tprint ('usage: hello.py <k>')\nelse: # have 2 args\n\tfor x in range(1,9):\n\t\tsum = 0 #reset sum for next run\n\t\t# set number of threads (1 - 8) to x\n\t\tos.environ['OMP_NUM_THREADS'] = str(x)\n\t\t\n\t\tprint('Finding runtime average for ' + str(x) + ' threads...')\n\t\tfor y in range(1, nTrials+1): # loop nTrials times\n\t\t\n\t\t\t# call harmonic with that number of threads for a num of trials\n\t\t\tprocess = Popen(['./harmonic', sys.argv[1]], stdout=PIPE, stderr=PIPE)\n\t\t\tprocess.wait()\n\t\t\tz = float(process.stdout.read())\n\t\t\tsum = sum + z\n\t\t# calculate average of nTrials using sum/nTrials\n\t\taverage = sum/nTrials\n\t\tprint('Average for ' + str(x) + ' threads with '+ str(y) + ' trials is ' + str(average))\n\n\t\t# store averages in array\n\t\taverages.append(average)\n\t\t# store corresponding thread count\n\t\ttimes.append(x)\n\t\n\t# do some calculations for fun\n\tfast = min(averages)\n\tfastIndex = averages.index(fast)\n\tslow = max(averages)\n\tslowIndex = averages.index(slow)\n\tfastThread = times[fastIndex]\n\tslowThread = times[slowIndex]\n\tpercentSpeedup = fast/slow\n\tprint('Speedup percentage is ' + str(int(percentSpeedup*100)) + '%!')\n\tprint('Fastest thread average is ' + str(fast) + ' with ' + str(fastThread) + ' threads')\n\tprint('Slowest thread average is ' + str(slow) + ' with ' + str(slowThread) + ' threads')\n\t# output average to a text file (or better yet, csv)\n\tresultfile = open('results.csv', 'wb')\n\twr = csv.writer(resultfile, quoting=csv.QUOTE_ALL)\n\twr.writerow(times)\n\twr.writerow(averages)\n","sub_path":"harmonic_sum/harmonic.py","file_name":"harmonic.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14475796","text":"import requests\n\nurl = \"http://api.arbetsformedlingen.se/af/v0/platsannonser/soklista/yrkesgrupper\"\n\nparameters = {\"yrkesomradeid\":4}\n\nheaders = {\"Accept-Language\":\"application/json\" }\n\nresponse = requests.get(url, headers=headers, params=parameters)\n\n\n\n\nprint( response.status_code, response.content)\n","sub_path":"platsbanken-api-call-example/call_platsbanken.py","file_name":"call_platsbanken.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219160337","text":"\"\"\"\r\nMachine Learning plots\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ndef plot_hist(df, x_col, x_labels):\r\n \"\"\" To plot histogram for frequency counts \"\"\"\r\n \r\n print(\"Plotting histogram for %s...\" %x_col)\r\n print(\"Total counts:\", len(df))\r\n uniq_labels = sorted(df.unique())\r\n x_labels = x_labels\r\n plt.figure(figsize=(6, 6))\r\n plt.style.use('seaborn-whitegrid')\r\n plt.rcParams.update({'font.size': 15})\r\n x_pos = np.arange(len(df)) #the label locations\r\n ax = sns.countplot(df)\r\n ax.set_xticklabels(x_labels, rotation=40, ha='right')\r\n plt.tight_layout()\r\n for v in range(len(uniq_labels)):\r\n plt.text(v, (df == v).sum(), str((df == v).sum()))\r\n plt.show()\r\n \r\ndef plot_hbar_by_sum(df, x_axis, y_axis, y_col):\r\n \"\"\" To plot bar chart for data aggregated by sum \"\"\"\r\n \r\n print(\"Plotting horizontal bar chart aggregated by sum...\")\r\n \r\n plt.rcParams.update({'font.size': 25})\r\n plt.style.use('seaborn-whitegrid')\r\n fig, ax = plt.subplots(figsize=(40, 60))\r\n\r\n y_pos = np.arange(len(y_axis)) #the label locations\r\n y_labels = y_axis\r\n width = 0.08 #the width of the bars\r\n cluster = -(len(y_axis)/2)\r\n bar_colors = ['red', 'green', 'blue', 'gold', 'white', 'black', 'orange', 'magenta', 'grey', 'yellow', 'purple', 'cyan']\r\n for x in x_axis:\r\n idx = x_axis.index(x)\r\n x_data = df.groupby(y_col)[x].sum()\r\n ax.barh(y_pos-cluster*width, x_data, width, label=x, color=bar_colors[idx], edgecolor='navy', align='center')\r\n cluster += 1\r\n \r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(y_labels)\r\n ax.invert_yaxis() # labels read top-to-bottom\r\n ax.set_xlabel('Counts')\r\n ax.set_title('Counts of all binary features of all %ss' %y_col)\r\n \r\n plt.legend(loc='upper right')\r\n plt.show()\r\n \r\ndef plot_bar_groupby_pivot(df, feature, target):\r\n \"\"\" To plot bar chart for grouped data with pivot \"\"\"\r\n \r\n print(\"Plotting horizontal bar chart of %s group by %s ...\" %(feature, target))\r\n \r\n uniq_feature = df[feature].unique()\r\n\r\n df1 = df.groupby([target,feature]).size().reset_index(name='count')\r\n #print(df1)\r\n df2 = df1.pivot(index=target, columns=feature, values='count')\r\n #print(df2)\r\n\r\n y_pos = np.arange(len(df2.index))\r\n width = 0.12\r\n cluster = -(len(df2.index)/2)\r\n plt.rcParams.update({'font.size': 15})\r\n plt.style.use('seaborn-whitegrid')\r\n fig, ax = plt.subplots(figsize=(12, 12))\r\n for f in uniq_feature:\r\n ax.barh(y_pos-cluster*width, df2[f], width, label=f, color=f, edgecolor='navy', align='center')\r\n cluster += 1\r\n\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(df2.index)\r\n ax.invert_yaxis() # labels read top-to-bottom\r\n ax.set_xlabel(feature)\r\n ax.set_title('%s by %s' %(feature, target))\r\n \r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\ndef plot_bar_singlexy(df, feature, target, x_labels):\r\n \"\"\" To plot bar chart for single x against y \"\"\"\r\n \r\n print(\"Plotting bar chart of %s by %s ...\" %(feature, target))\r\n \r\n uniq_feature = df[feature].unique()\r\n\r\n df1 = df.groupby([target,feature]).size().reset_index(name='count')\r\n #print(df1)\r\n df2 = df1.pivot(index=target, columns=feature, values='count')\r\n #print(df2)\r\n \r\n uf_len = len(uniq_feature)\r\n index_len = len(df2.index)\r\n x_pos = np.arange(index_len) #the label locations\r\n x_labels = x_labels\r\n #x_labels = decode_religion(df2.index.to_list()) #produce religion as labels on x-axis\r\n #print(\"Type of x_labels:\", df2.index.to_list())\r\n \r\n \r\n fig_w = max(len(uniq_feature) * 2, 5)\r\n fig_h = max(len(uniq_feature) * 1, 3)\r\n font_s = max(len(uniq_feature) * 2, 10)\r\n width = min((fig_w/uf_len)+0.10, 0.15) #the width of the bars\r\n cluster = -(index_len/2)\r\n \r\n plt.figure(figsize=(fig_w, fig_h))\r\n plt.style.use('seaborn-whitegrid')\r\n plt.rcParams.update({'font.size': font_s})\r\n\r\n for f in uniq_feature:\r\n plt.bar(x_pos-cluster*width, df2[f], width, label=f)\r\n cluster += 1\r\n \r\n plt.ylabel('Number of countries')\r\n plt.title('%s counts by %s' %(feature, target))\r\n plt.xlabel(target)\r\n plt.xticks(x_pos, x_labels, rotation=45)\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\ndef plot_cfm(df, mod_name, test_acc, ax=None):\r\n \"\"\" To plot confusion matrix \"\"\"\r\n \r\n ax = ax\r\n ax.set_title(\"%s model \\nAccuracy:%.3f\" %(mod_name, test_acc))\r\n sns.heatmap(df, annot=True, fmt='d', ax=ax)\r\n ax.set_ylim(ax.get_ylim()[0]+0.5, 0) #fix seaborn heatmaps problem in matplotlib 3.1.1\r\n ax.set_ylabel(\"Actual target\")\r\n ax.set_xlabel(\"Predicted target\")\r\n\r\ndef plot_cv(cv_acc_scores, ax):\r\n \"\"\" To plot cross validation \"\"\"\r\n \r\n #print(\"Cross validation accuracy scores:\", cv_acc_scores) \r\n print(\"Cross validation test score average: %.3f\" %(cv_acc_scores['test_score'].mean()))\r\n print(\"Cross validation train score average:%.3f\" %(cv_acc_scores['train_score'].mean()))\r\n ax = ax\r\n y_pos = np.arange(len(cv_acc_scores['train_score']))\r\n #print(\"y_pos:\", y_pos)\r\n ax.plot(y_pos, cv_acc_scores['train_score'], label='Training accuracy')\r\n ax.plot(y_pos, cv_acc_scores['test_score'], label='Validation accuracy')\r\n ax.set_xlabel('cv iterations')\r\n ax.set_ylabel('accuracy')\r\n plt.legend(loc='bottom right')\r\n\r\ndef plot_line_w_dict(dict_data, dict_keys, x_axis, x_label, y_label, g_title):\r\n \"\"\" To plot a line chart with dictionary data \"\"\"\r\n #print(dict_data[dict_key])\r\n plt.figure(figsize=(20, 10))\r\n plt.style.use('seaborn-whitegrid')\r\n plt.rcParams.update({'font.size': 22})\r\n plt.xlabel(x_label)\r\n plt.title(g_title)\r\n best_test_acc_score = 0\r\n best_mean_acc_score = 0\r\n for k in dict_keys:\r\n max_test_acc_score = max(dict_data[k])\r\n mean_test_acc_score = np.mean(dict_data[k])\r\n if max_test_acc_score > best_test_acc_score and mean_test_acc_score > best_mean_acc_score:\r\n best_test_acc_score = max_test_acc_score\r\n best_mean_acc_score = mean_test_acc_score\r\n best_model = k\r\n print(\"The highest test scores for model %s is %.3f, mean is %.3f\" %(k, max_test_acc_score, mean_test_acc_score))\r\n plt.plot(x_axis.astype(str), dict_data[k], label=k)\r\n print(\"The best test accuracy score is: %.3f\" %best_test_acc_score)\r\n print(\"The best test mean accuracy score is: %.3f\" %best_mean_acc_score)\r\n print(\"The best model is:\", best_model)\r\n plt.legend(loc='bottom left')\r\n plt.show()\r\n\r\ndef plot_corr(corr_matrix, ylabel):\r\n \"\"\" To plot a correlation chart \"\"\"\r\n\r\n mask = np.triu(np.ones_like(corr_matrix, dtype=np.bool)) #generate a mask for the upper triangle\r\n plt.figure(figsize=(20,20))\r\n plt.rcParams.update({'font.size': 15})\r\n ax = sns.heatmap(corr_matrix, mask=mask, square=True, annot=True)\r\n #print(\"ylim:\", ax.get_ylim())\r\n ax.set_ylim(ax.get_ylim()[0]+0.5, 0) #fix seaborn heatmaps problem in matplotlib 3.1.1\r\n plt.title('Correlation')\r\n plt.ylabel(ylabel)\r\n plt.show()\r\n\r\ndef plot_multiple_lines(dict_data, dict_keys, x_axis, x_label, y_label, g_title):\r\n #plot line chart for the test scores\r\n x_axis = np.arange(1, splits_count+1)\r\n plt.figure(figsize=(20, 10))\r\n plt.style.use('seaborn-whitegrid')\r\n plt.rcParams.update({'font.size': 22})\r\n plt.xlabel('n_splits')\r\n plt.title('Test scores with StratifiedShuffleSplit()')\r\n for model in clf_models:\r\n mod_name = get_model_name(model)\r\n print(\"The highest CV scores for model %s is %.3f, mean is %.3f\" %(mod_name, max(dict_cv_acc[mod_name]), np.mean(dict_cv_acc[mod_name])))\r\n print(\"The highest test scores for model %s is %.3f, mean is %.3f\" %(mod_name, max(dict_test_acc[mod_name]), np.mean(dict_test_acc[mod_name])))\r\n plt.plot(x_axis.astype(str), dict_test_acc[mod_name], label=mod_name)\r\n plt.legend(loc='bottom left')\r\n plt.show()","sub_path":"ml_plot.py","file_name":"ml_plot.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470685603","text":"def greet_user(user_name, age=1):\n print(\"hello\", user_name)\n print(\"I am \", age, \" old\")\n\n\ngreet_user('jail', 10)\n\ngreet_user(age=1232, user_name='tree')\n\ngreet_user('hi', age=12)\n\ngreet_user('xr')\n\n\ndef make_tshit(size='L', word='I Love Python'):\n print('You T Shirt is ', size, ' The word is ', word)\n\n\nmake_tshit()\nmake_tshit(word='hello world')\nmake_tshit(size='S', word='hello world 999')\n\n\ndef get_name(last_name, first_name=''):\n if first_name:\n return first_name + ' ' + last_name\n else:\n return last_name\n\n\nprint(get_name('hello', 'world'))\nprint(get_name(last_name='world'))\n\n\ndef build_person(first_name, last_name, age=''):\n person = {'first_name': first_name, 'last_name': last_name, 'age': age}\n return person\n\n\nprint(build_person('hello', 'world'))\n\n\ndef print_person_list(names):\n names.sort()\n for name in names:\n print(name)\n\n\nnames = ['A', 'e', 'c']\nprint(names)\nprint_person_list(names[:])\nprint(names)\n\n\ndef cookie(age, **info):\n print(age)\n for k, v in info.items():\n print(k)\n print(v)\n\n\na=cookie(9, a='1', b='2')\n\n","sub_path":"ch08/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25268409","text":"# -*- coding:utf-8 -*-\n\nimport datetime\n\nfrom django import http\nfrom django.conf import settings\nfrom django.db.models.query import QuerySet\nfrom django_six import Support_ValuesQuerySet, ValuesQuerySet\nfrom excel_base import (BytesIO, StringIO, as_csv, as_dict_row_merge_xls, as_list_row_merge_xls, as_row_merge_xls,\n as_xls, is_py2)\n\n\n# Min (Max. Rows) for Widely Used Excel\n# http://superuser.com/questions/366468/what-is-the-maximum-allowed-rows-in-a-microsoft-excel-xls-or-xlsx\nEXCEL_MAXIMUM_ALLOWED_ROWS = 65536\n# Column Width Limit For ``xlwt``\n# https://github.com/python-excel/xlwt/blob/master/xlwt/Column.py#L22\nEXCEL_MAXIMUM_ALLOWED_COLUMN_WIDTH = 65535\n\n\ndef __init__(self, data, output_name='excel_data', format='%Y%m%d%H%M%S', headers=None, force_csv=False, encoding='utf-8-sig', font=None, sheet_name='Sheet 1', blanks_for_none=True, auto_adjust_width=True, min_cell_width=1000, vert=0x01, horz=0x01, hvert=0x01, hhorz=0x02, merge_type=None, mapping=None, timezone=None):\n self.data = data\n self.output_name = output_name\n self.format = format\n self.headers = headers\n self.force_csv = force_csv\n self.encoding = encoding\n self.font = font\n self.sheet_name = sheet_name\n self.blanks_for_none = blanks_for_none\n self.auto_adjust_width = auto_adjust_width\n self.min_cell_width = min_cell_width\n # VERT_TOP     = 0x00    顶端对齐\n # VERT_CENTER  = 0x01    居中对齐(垂直方向上)\n # VERT_BOTTOM  = 0x02    底端对齐\n # HORZ_LEFT    = 0x01    左端对齐\n # HORZ_CENTER  = 0x02    居中对齐(水平方向上)\n # HORZ_RIGHT   = 0x03    右端对齐\n self.vert = vert\n self.horz = horz\n self.hvert = hvert\n self.hhorz = hhorz\n self.mapping = mapping\n self.timezone = timezone or settings.TIME_ZONE\n\n if merge_type != 'dict_row_merge':\n if not isinstance(self.data, dict):\n self.data = {self.sheet_name: {'data': self.data, 'headers': self.headers}}\n\n # Make sure we've got the right type of data to work with\n # ``list index out of range`` if data is ``[]``\n valid_data = True\n for sheet_name, sheet_info in self.data.items():\n sheet_data = sheet_info.get('data') or []\n sheet_headers = sheet_info.get('headers')\n if Support_ValuesQuerySet and isinstance(sheet_data, ValuesQuerySet):\n sheet_data = list(sheet_data)\n elif isinstance(sheet_data, QuerySet):\n sheet_data = list(sheet_data.values())\n if not hasattr(sheet_data, '__getitem__'):\n valid_data = False\n break\n if sheet_data:\n if isinstance(sheet_data[0], dict):\n if sheet_headers is None:\n sheet_headers = list(sheet_data[0].keys())\n sheet_data = [[row[col] for col in sheet_headers] for row in sheet_data]\n if not hasattr(sheet_data[0], '__getitem__'):\n valid_data = False\n break\n if sheet_headers and not hasattr(sheet_headers[0], '__getitem__'):\n valid_data = False\n break\n sheet_info['data'] = sheet_data\n sheet_info['headers'] = sheet_headers\n self.data[sheet_name] = sheet_info\n assert valid_data is True, 'ExcelStorage requires a sequence of sequences'\n\n self.output = StringIO() if is_py2 else BytesIO()\n if merge_type == 'row_merge':\n _, content_type, file_ext = (self.as_row_merge_xls, 'application/vnd.ms-excel', 'xls')\n elif merge_type == 'list_row_merge':\n _, content_type, file_ext = (self.as_list_row_merge_xls, 'application/vnd.ms-excel', 'xls')\n elif merge_type == 'dict_row_merge':\n _, content_type, file_ext = (self.as_dict_row_merge_xls, 'application/vnd.ms-excel', 'xls')\n else:\n # Excel has a limit on number of rows; if we have more than that, make a csv\n use_xls = True if len(self.data) <= self.EXCEL_MAXIMUM_ALLOWED_ROWS and not self.force_csv else False\n _, content_type, file_ext = (self.as_xls, 'application/vnd.ms-excel', 'xls') if use_xls else (self.as_csv, 'text/csv', 'csv')\n self.output.seek(0)\n super(ExcelResponse, self).__init__(self.output, content_type=content_type)\n file_name_ext = '_{0}'.format(datetime.datetime.now().strftime(self.format)) if self.format else ''\n self['Content-Disposition'] = 'attachment;filename=\"%s.%s\"' % ('{0}{1}'.format(self.output_name, file_name_ext).replace('\"', '\\\"'), file_ext)\n\n\nnames = dir(http)\n\n\nclsdict = {\n 'EXCEL_MAXIMUM_ALLOWED_ROWS': EXCEL_MAXIMUM_ALLOWED_ROWS,\n 'EXCEL_MAXIMUM_ALLOWED_COLUMN_WIDTH': EXCEL_MAXIMUM_ALLOWED_COLUMN_WIDTH,\n '__init__': __init__,\n 'as_xls': as_xls,\n 'as_row_merge_xls': as_row_merge_xls,\n 'as_list_row_merge_xls': as_list_row_merge_xls,\n 'as_dict_row_merge_xls': as_dict_row_merge_xls,\n 'as_csv': as_csv,\n}\n\n\nif 'FileResponse' in names:\n ExcelResponse = type('ExcelResponse', (http.FileResponse, ), clsdict)\nelif 'StreamingHttpResponse' in names:\n ExcelResponse = type('StreamingHttpResponse', (http.StreamingHttpResponse, ), clsdict)\nelse:\n ExcelResponse = type('HttpResponse', (http.HttpResponse, ), clsdict)\n","sub_path":"django_excel_response/excel_response.py","file_name":"excel_response.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38492125","text":"import utils\nimport json\nimport os\nimport numpy as np\n'''\n根据data中的txt生成另一个语音识别项目MASR所需的index文件\n'''\n\n# 生成全数据集的拼音和汉字列表\ndef make_all_file(datatype='train'):\n read_files = []\n read_files.append('thchs_{}.txt'.format(datatype))\n read_files.append('aishell_{}.txt'.format(datatype))\n read_files.append('prime_{}.txt'.format(datatype))\n read_files.append('stcmd_{}.txt'.format(datatype))\n path_lst,han_lst = [],[]\n for file in read_files:\n sub_path = os.path.join(utils.cur_path,'data')\n sub_file = os.path.join(sub_path,file)\n with open(sub_file, 'r', encoding='utf8') as f:\n data = f.readlines()\n for line in data:\n wav, _, han = line.split('\\t')\n path_lst.append(wav)\n han_lst.append(han.strip('\\n'))\n return path_lst,han_lst\n\n\ndef write_file():\n for name in ['train','dev']:\n with open('data/{}.index'.format(name), 'w', encoding='utf8') as f:\n tmp_path,tmp_han = make_all_file(name)\n for i,p in enumerate(tmp_path):\n f.write('{},{}\\n'.format(p,tmp_han[i]))\n\n label = np.load(os.path.join(utils.cur_path,'data','han_vocab_A.npy')).tolist()\n label[0]='_'\n with open('data/labels.json', 'w', encoding='utf-8') as fs:\n json.dump(label, fs)\n\n\nif __name__ == \"__main__\":\n write_file()\n print('Finish!')","sub_path":"build_masr.py","file_name":"build_masr.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483498319","text":"import time\nimport pandas as pd\nimport numpy as np\nimport calendar\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # User input for cities (Chicago, New York City, Washington), using while loop to ensure valid city input, normalized with lower()\n while True:\n city = input(\"\\nFor which city would you like to see data: Chicago, New York City, or Washington? \\n\").lower()\n if city not in ('chicago', 'new york city', 'washington'):\n print(\"Please enter either Chicago, New York City, or Washington. Invalid entry.\")\n continue\n else:\n break\n\n # User input for month between January and June, or all, using while loop to ensure valid month input, normalized with lower()\n while True:\n month = input(\"\\nFor which month would you like to see data: January, February, March, April, May, June, or all? \\n\").lower()\n if month not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):\n print(\"Please enter a month between January and June. Invalid entry.\")\n continue\n else:\n break\n\n # User input for day of the week, or all, normalized with lower()\n while True:\n day = input(\"\\nFor which day would you like to see data: \"\n \"Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday, or all?\\n\").lower()\n if day not in ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all'):\n print(\"Please enter full name of a day of the week. Invalid entry\")\n continue\n else:\n break\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load file into dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # month and day of week from Start Time, creating new columns. Return month name, not integer.\n df['month'] = df['Start Time'].dt.strftime('%B')\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month, if applicable\n if month != 'all':\n # use index of months list to get corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by day of week, if applicable\n if day!= 'all':\n # filter by day of week to create new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\ndef display_data(df):\n \"\"\"Displays five rows of the data in no order\"\"\"\n\n while True:\n data_rows = input(\"Would you like to see a sample of the data? Y or N\\n\").lower()\n if data_rows == 'y':\n print( df.sample(5))\n elif data_rows == 'n':\n break\n else:\n print(\"Please choose Y or N.\\n\")\n\n df.sample(5)\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n frequent_month = df['month'].mode()[0]\n print(\"The most common month for travel is \", frequent_month)\n\n # display the most common day of week\n frequent_day = df['day_of_week'].mode()[0]\n print(\"The most common day of the week is \", frequent_day)\n\n # display the most common start hour. Converted to time format for the hour.\n df['hour'] = df['Start Time'].values.astype('<M8[h]')\n df['hour'] = df['hour'].dt.time\n frequent_hour = df['hour'].mode()[0]\n print(\"The most common time of the day for travel is \", frequent_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station. idxmax returns the value in the row for the highest count\n frequent_start = df['Start Station'].value_counts().idxmax()\n print(\"The most commonly used start station is \", frequent_start)\n\n # display most commonly used end station. idxmax returns the value in the row for the highest count\n frequent_end = df['End Station'].value_counts().idxmax()\n print(\"The most commonly used end station is \", frequent_end)\n\n # display most frequent combination of start station and end station trip.\n combo_station = df.groupby(['Start Station', 'End Station']).size().sort_values(ascending=False).idxmax()\n print('The most common combination of start and end station: ', combo_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n travel_time = sum(df['Trip Duration'])\n # divide time in seconds by seconds in a day to get days\n print(\"The total travel time is \", travel_time/86400 , \"days.\")\n\n # display mean travel time\n avg_travel_time = df['Trip Duration'].mean()\n print(\"The average travel time is \", avg_travel_time/60, \"minutes.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Count of user types:\\n', user_types)\n\n\n # Display counts of gender\n try:\n count_gender = df['Gender'].value_counts()\n print('Count of gender types:\\n', count_gender)\n except KeyError:\n print(\"No gender data available for this month.\")\n\n\n # Display earliest, most recent, and most common year of birth\n try:\n oldest_birth_year = df['Birth Year'].min()\n print(\"The oldest birth year is \", oldest_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n try:\n youngest_birth_year = df['Birth Year'].max()\n print(\"The youngest birth year is \", youngest_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n try:\n common_birth_year = df['Birth Year'].value_counts().idxmax()\n print(\"The most common birth year is \", common_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n display_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":7999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559753810","text":"import socket, sys, re\nfrom math import *\nimport base64\n\nserver = \"irc.root-me.org\" # settings\nchannel = \"#root-me_challenge\"\nbotnick = \"yckai\"\n#input('wait1')\nirc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # defines the socket\n\n#input('wait2')\nprint (\"connecting to:\" + server)\n\n#input('wait3')\nirc.connect((server, 6667))\n\n#input('wait4') # connects to the server\nmsg = \"USER \"+ botnick +\" \"+ botnick +\" \"+ botnick +\" :Beter than Enigma\\n\"\nirc.send(msg.encode(\"Utf8\")) # user authentication\ntext = irc.recv(4096) # receive the text\nprint(text)\n\n#input('wait5')\nmsg = \"NICK \" + botnick + \"\\n\"\nirc.send(msg.encode(\"Utf8\")) # sets nick\ntext = irc.recv(4096) # receive the text\nprint(text)\n\n#input('wait6')\nmsg = \"JOIN \" + channel + \"\\n\"\nirc.send(msg.encode(\"Utf8\")) # join the chan\ntext = irc.recv(4096) # receive the text\nprint(text)\n\nmsg = \"PRIVMSG candy !ep2 \\n\"\nirc.send(msg.encode(\"Utf8\")) # auth\n\nwhile 1:\n text = irc.recv(4096) # receive the text\n print(text)\n PRIVMSG = re.search(\"PRIVMSG\", str(text))\n if PRIVMSG is not None :\n a = str(text).split(\":\")\n b = str(a[2]).replace(\"\\\\r\\\\n'\", \"\")\n print(str(b))\n b=base64.b64decode(str(b))\n b=b.decode(\"utf-8\")\n print(str(b))\n msg= \"PRIVMSG candy !ep2 -rep \" + str(b) +\" \\n\"\n irc.send(msg.encode(\"Utf8\"))","sub_path":"ep4.py","file_name":"ep4.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596321202","text":"import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport matplotlib.pyplot as plt\n\n\ndef compare_signatures(sig1, sig2):\n # sig1 is 30-by-96\n # sig2 is 27-by-96\n M = cosine_similarity(sig1, sig2)\n # print(M)\n plt.imshow(M, cmap='hot', interpolation='nearest')\n plt.xlabel(\"calculated signatures\")\n plt.ylabel(\"calculated signatures\")\n plt.title(\"Cosine Similarity of calculated Signatures\")\n plt.colorbar()\n plt.show()\n # signature_names = np.genfromtxt(\"data/signatures/signatures.txt\", dtype=str, usecols=list(range(3,30)), max_rows=1, delimiter='\\t') # M is 30-by-27 so for each of the 30 signatures in sig1, we get their cosine similarity to each of the 27 signatures in sig2\n # T = range(M.shape[0])\n # for i in range(M.shape[1]):\n # print(\"For \" + str(signature_names[i]))\n # print(\"max cosine similarity is \" + str(np.amax(M[:,i])))\n # print(\"corresponding calculated signature is \" + str(np.argmax(M[:,i])))\n # plt.plot(T, M[:,i])\n # plt.show()\n\ndef compare_calculated_simulated_signatures():\n calculated_signatures = np.load(\"output/9_29_17/signatures/calculated-signatures-5.npy\")\n actual_signatures = np.load(\"data/examples/example-signatures.npy\")\n compare_signatures(calculated_signatures, actual_signatures)\n\ndef compare_calculated_cosmic_signatures():\n N = 27\n calculated_signatures = np.load(\"output/10_5_17/signatures/real-data-signatures-\" + str(N) + \".npy\")\n calculated_categories = np.loadtxt(\"data/whole_exome/ALL_exomes_mutational_catalog_96_subs.txt\", dtype=str, skiprows=1, usecols=list(range(0,1)))\n\n cosmic_signatures = np.loadtxt(\"data/signatures/signatures.txt\", dtype=np.dtype(np.float32), delimiter=\"\\t\", skiprows=1, usecols=list(range(3,30)))\n cosmic_signatures = cosmic_signatures.transpose()\n cosmic_categories = np.loadtxt(\"data/signatures/signatures.txt\", dtype=str, skiprows=1, usecols=list(range(2,3)))\n cosmic_signatures = standardized_signature_category_order(calculated_categories, cosmic_signatures, cosmic_categories)\n # compare_signatures(cosmic_signatures, original_signatures)\n compare_signatures(calculated_signatures, calculated_signatures)\n\n# given the order of categories in one group of signatures and the order of the categories in another group of signatures\n# re-order the categories in the second group so they're the same order\ndef standardized_signature_category_order(categories1, sig2, categories2):\n l = list()\n for category in categories1:\n l.append(sig2[:, categories2 == category])\n return np.concatenate(l, axis=1)\n\n\nif __name__ == '__main__':\n compare_calculated_cosmic_signatures()\n # compare_calculated_simulated_signatures()\n","sub_path":"compare_signatures.py","file_name":"compare_signatures.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220111101","text":"from math import sqrt\nC = [1,1,1,1,1,1,1,1,1,21]\n\ndef standard_deviation(array):\n reference_point = sum(array)/len(array)\n \n distances = []\n for value in array:\n squared_distance = (value -reference_point)**2\n distances.append(squared_distance)\n \n variance = sum(distances)/len(distances)\n \n return sqrt(variance)\n\nstandard_deviation_C = standard_deviation(C)","sub_path":"standardDeviation.py","file_name":"standardDeviation.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73848256","text":"\"\"\"Ground based telemetry reception and saving script\"\"\"\nimport os\nfrom queue import Queue\nfrom threading import Thread\n\nfrom whitevest.lib.atomic_value import AtomicValue\nfrom whitevest.lib.buffer_session_store import BufferSessionStore\nfrom whitevest.lib.configuration import Configuration\nfrom whitevest.lib.const import TESTING_MODE\nfrom whitevest.lib.utils import create_gps_thread\nfrom whitevest.threads.ground_data import (\n replay_telemetry,\n telemetry_log_writing_loop,\n telemetry_reception_loop,\n)\nfrom whitevest.threads.ground_server import (\n telemetry_dashboard_server,\n telemetry_streaming_server,\n)\n\n\ndef main():\n \"\"\"Ground based telemetry reception and saving script\"\"\"\n\n # Load up the system configuration\n configuration = Configuration(\n os.getenv(\"GROUND_CONFIG_FILE\", None),\n Configuration.default_ground_configuration,\n )\n\n # Queue to manage data synchronization between telemetry reception and data logging\n new_data_queue = Queue()\n\n # Manages the data buffers\n buffer_session_store = BufferSessionStore(configuration)\n\n # Holds the most recent GPS data\n gps_value = AtomicValue((0.0, 0.0, 0.0, 0.0))\n\n if not TESTING_MODE:\n gps_thread = create_gps_thread(configuration, gps_value)\n gps_thread.start()\n\n write_thread = Thread(\n target=telemetry_log_writing_loop,\n args=(\n new_data_queue,\n buffer_session_store,\n ),\n daemon=True,\n )\n write_thread.start()\n\n streaming_server_thread = Thread(\n target=telemetry_streaming_server,\n args=(\n configuration,\n buffer_session_store,\n ),\n daemon=True,\n )\n streaming_server_thread.start()\n\n dashboard_server_thread = Thread(\n target=telemetry_dashboard_server,\n args=(\n configuration,\n buffer_session_store,\n ),\n daemon=True,\n )\n dashboard_server_thread.start()\n\n if TESTING_MODE:\n replay_telemetry(new_data_queue, os.getenv(\"REPLAY_DATA\"))\n else:\n telemetry_reception_loop(\n configuration,\n new_data_queue,\n gps_value,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"whitevest/bin/ground.py","file_name":"ground.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"507771720","text":"# import os\n\n# os.chdir('/home/adammer/git/TensorFlow-NRE/')\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport datetime\nimport os\nimport network_transe as network\nfrom sklearn.metrics import average_precision_score\nfrom sklearn import linear_model, decomposition, datasets, tree\nfrom sklearn.externals import joblib\n\nos.chdir('/opt/work/wikipedia/nre')\n\nHIDDEN_EMBEDDINGS_SIZE = 10;\n\nFLAGS = tf.app.flags.FLAGS\n# change the name to who you want to send\n#tf.app.flags.DEFINE_string('wechat_name', 'Tang-24-0325','the user you want to send info to')\n# tf.app.flags.DEFINE_string('wechat_name', 'filehelper',\n# 'the user you want to send info to')\n\n# global answers\n\n# if you want to try itchat, please set it to True\nitchat_run = False\nif itchat_run:\n import itchat\n\ndef expand_probs(classes, probs, all_classes):\n res = np.zeros((probs.shape[0], all_classes));\n for i in range(0, probs.shape[0]):\n for j in range(0, len(probs[i])):\n res[i, classes[j]] = probs[i, j];\n return res;\n\n\ndef main(_):\n save_path = './model/';\n\n # ATTENTION: change pathname before you load your model\n pathname = \"./model/ATT_GRU_model-\"\n\n wordembedding = np.load('./data/vec.npy')\n\n test_settings = network.Settings()\n test_settings.vocab_size = 2268896\n # test_settings.num_classes = 53\n test_settings.num_classes = 11\n test_settings.big_num = 71\n\n big_num_test = test_settings.big_num\n\n test_emb = np.load('./data/test_emb.npy');\n\n '''\n emb_tree = joblib.load(save_path + 'embclassifier');\n print('predicting from embeddings');\n pred_emb = emb_tree.predict_proba(test_emb);\n pred_emb = expand_probs(emb_tree.classes_, pred_emb, test_settings.num_classes);\n '''\n\n with tf.Graph().as_default():\n\n sess = tf.Session()\n with sess.as_default():\n\n def test_step(word_batch, pos1_batch, pos2_batch, y_batch, emb_batch):\n\n feed_dict = {}\n total_shape = []\n total_num = 0\n total_word = []\n total_pos1 = []\n total_pos2 = []\n\n for i in range(len(word_batch)):\n total_shape.append(total_num)\n total_num += len(word_batch[i])\n for word in word_batch[i]:\n total_word.append(word)\n for pos1 in pos1_batch[i]:\n total_pos1.append(pos1)\n for pos2 in pos2_batch[i]:\n total_pos2.append(pos2)\n\n total_shape.append(total_num)\n total_shape = np.array(total_shape)\n total_word = np.array(total_word)\n total_pos1 = np.array(total_pos1)\n total_pos2 = np.array(total_pos2)\n\n feed_dict[mtest.total_shape] = total_shape\n feed_dict[mtest.input_word] = total_word\n feed_dict[mtest.input_pos1] = total_pos1\n feed_dict[mtest.input_pos2] = total_pos2\n feed_dict[mtest.input_y] = y_batch\n feed_dict[mtest.input_emb] = emb_batch\n\n loss, accuracy, prob = sess.run(\n [mtest.loss, mtest.accuracy, mtest.prob], feed_dict)\n return prob, accuracy\n\n # evaluate p@n\n def eval_pn(test_y, test_word, test_pos1, test_pos2, test_emb, test_settings):\n allprob = []\n acc = []\n for i in range(int(len(test_word) / float(test_settings.big_num))):\n prob, accuracy = test_step(test_word[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_pos1[i * test_settings.big_num:(\n i + 1) * test_settings.big_num], test_pos2[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_y[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_emb[i * test_settings.big_num:(i + 1) * test_settings.big_num])\n acc.append(\n np.mean(np.reshape(np.array(accuracy), (test_settings.big_num))))\n prob = np.reshape(\n np.array(prob), (test_settings.big_num, test_settings.num_classes))\n for single_prob in prob:\n allprob.append(single_prob[1:])\n allprob = np.reshape(np.array(allprob), (-1))\n\n eval_y = []\n for i in test_y:\n eval_y.append(i[1:])\n allans = np.reshape(eval_y, (-1))\n order = np.argsort(-allprob)\n\n # print('allans shape: ' + str(allans.shape))\n\n print('P@100:')\n top100 = order[:100]\n correct_num_100 = 0.0\n for i in top100:\n if allans[i] == 1:\n correct_num_100 += 1.0\n print(correct_num_100 / 100)\n\n print('P@200:')\n top200 = order[:200]\n correct_num_200 = 0.0\n for i in top200:\n if allans[i] == 1:\n correct_num_200 += 1.0\n print(correct_num_200 / 200)\n\n print('P@300:')\n top300 = order[:300]\n correct_num_300 = 0.0\n for i in top300:\n if allans[i] == 1:\n correct_num_300 += 1.0\n print(correct_num_300 / 300)\n\n if itchat_run:\n tempstr = 'P@100\\n' + str(correct_num_100 / 100) + '\\n' + 'P@200\\n' + str(\n correct_num_200 / 200) + '\\n' + 'P@300\\n' + str(correct_num_300 / 300)\n itchat.send(tempstr, FLAGS.wechat_name)\n\n with tf.variable_scope(\"model\"):\n mtest = network.GRU(\n is_training=False, word_embeddings=wordembedding, settings=test_settings)\n\n saver = tf.train.Saver()\n\n # ATTENTION: change the list to the iters you want to test !!\n #testlist = range(9025,14000,25)\n # testlist = [10900]\n testlist = [5800]\n for model_iter in testlist:\n\n saver.restore(sess, pathname + str(model_iter))\n print(\"Evaluating P@N for iter \" + str(model_iter))\n\n if itchat_run:\n itchat.send(\"Evaluating P@N for iter \" +\n str(model_iter), FLAGS.wechat_name)\n\n print('Evaluating P@N for one')\n if itchat_run:\n itchat.send('Evaluating P@N for one', FLAGS.wechat_name)\n\n test_y = np.load('./data/pone_test_y.npy')\n test_emb = np.load('./data/pone_test_emb.npy');\n test_word = np.load('./data/pone_test_word.npy')\n test_pos1 = np.load('./data/pone_test_pos1.npy')\n test_pos2 = np.load('./data/pone_test_pos2.npy')\n eval_pn(test_y, test_word, test_pos1, test_pos2, test_emb, test_settings)\n\n print('Evaluating P@N for two')\n if itchat_run:\n itchat.send('Evaluating P@N for two', FLAGS.wechat_name)\n test_y = np.load('./data/ptwo_test_y.npy')\n test_emb = np.load('./data/ptwo_test_emb.npy');\n test_word = np.load('./data/ptwo_test_word.npy')\n test_pos1 = np.load('./data/ptwo_test_pos1.npy')\n test_pos2 = np.load('./data/ptwo_test_pos2.npy')\n eval_pn(test_y, test_word, test_pos1, test_pos2, test_emb, test_settings)\n\n print('Evaluating P@N for all')\n if itchat_run:\n itchat.send('Evaluating P@N for all', FLAGS.wechat_name)\n test_y = np.load('./data/pall_test_y.npy')\n test_emb = np.load('./data/pall_test_emb.npy');\n test_word = np.load('./data/pall_test_word.npy')\n test_pos1 = np.load('./data/pall_test_pos1.npy')\n test_pos2 = np.load('./data/pall_test_pos2.npy')\n eval_pn(test_y, test_word, test_pos1, test_pos2, test_emb, test_settings)\n\n time_str = datetime.datetime.now().isoformat()\n print(time_str)\n print('Evaluating all test data and save data for PR curve')\n if itchat_run:\n itchat.send(\n 'Evaluating all test data and save data for PR curve', FLAGS.wechat_name)\n\n test_y = np.load('./data/testall_y.npy')\n test_emb = np.load('./data/test_emb.npy');\n test_word = np.load('./data/testall_word.npy')\n test_pos1 = np.load('./data/testall_pos1.npy')\n test_pos2 = np.load('./data/testall_pos2.npy')\n\n allprob = []\n acc = []\n print('Len test_word: ' + str(len(test_word)));\n for i in range(int(len(test_word) / float(test_settings.big_num))):\n prob, accuracy = test_step(test_word[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_pos1[i * test_settings.big_num:(\n i + 1) * test_settings.big_num], test_pos2[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_y[i * test_settings.big_num:(i + 1) * test_settings.big_num], test_emb[i * test_settings.big_num:(i + 1) * test_settings.big_num])\n acc.append(\n np.mean(np.reshape(np.array(accuracy), (test_settings.big_num))))\n prob = np.reshape(\n np.array(prob), (test_settings.big_num, test_settings.num_classes))\n for single_prob in prob:\n allprob.append(single_prob)\n\n allprobarray = np.array(allprob)\n print('Shape allprobarray: ' + str(allprobarray.shape))\n\n dists = test_emb[:, :HIDDEN_EMBEDDINGS_SIZE];\n subjs = test_emb[:, HIDDEN_EMBEDDINGS_SIZE:HIDDEN_EMBEDDINGS_SIZE*2];\n objs = test_emb[:, HIDDEN_EMBEDDINGS_SIZE*2:];\n\n norms = np.linalg.norm(dists, axis=1, keepdims=True);\n\n norms = (norms / (norms.max(axis = 0) - norms.min(axis=0)) - 0.5 ) * 2;\n objs = (objs / (objs.max(axis = 0) - objs.min(axis=0)) - 0.5 ) * 2;\n\n print('Shape norms: ' + str(norms.shape))\n\n# print('predicting from ensemble');\n# ens_tree = joblib.load(save_path + 'ensclassifier');\n\n# pred_ens = ens_tree.predict_proba(np.hstack((allprobarray, norms, objs)));\n# pred_ens = expand_probs(ens_tree.classes_, pred_ens, test_settings.num_classes);\n\n\n '''\n filt = np.logical_and(pred_ens[:,1]>pred_ens[:,0], norms[:,0]>6.0);\n\n for i in range(0, pred_ens.shape[0]):\n if filt[i]==True:\n pred_ens[i, 0] = 1.0;\n pred_ens[i, 1] = 0.0;\n\n filt = np.logical_and(pred_ens[:,0]>pred_ens[:,1], norms[:,0]<2.0);\n\n for i in range(0, pred_ens.shape[0]):\n if filt[i]==True:\n pred_ens[i, 0] = 0.0;\n pred_ens[i, 1] = 1.0;\n '''\n\n # allprob_with_na = np.reshape(pred_ens, (-1));\n # allprob = np.reshape(pred_ens[:,1:], (-1));\n # print('Shape allprobarray: ' + str(allprobarray.shape))\n allprob = np.reshape(allprobarray[:, 1:], (-1));\n allprob_with_na = np.reshape(allprobarray, (-1));\n print('Shape allprob: ' + str(allprob.shape))\n print('Shape allprob_with_na: ' + str(allprob_with_na.shape))\n order = np.argsort(-allprob)\n\n print('saving all test result...')\n current_step = model_iter\n\n # ATTENTION: change the save path before you save your result\n # !!\n np.save('./out/allprob_iter_' +\n str(current_step) + '.npy', allprob_with_na)\n allans = np.load('./data/allans.npy')\n\n print('Shape allans: ' + str(allans.shape))\n print('Shape allprob: ' + str(allprob.shape))\n\n # caculate the pr curve area\n average_precision = average_precision_score(allans, allprob)\n print('PR curve area:' + str(average_precision))\n\n if itchat_run:\n itchat.send('PR curve area:' +\n str(average_precision), FLAGS.wechat_name)\n\n time_str = datetime.datetime.now().isoformat()\n print(time_str)\n print('P@N for all test data:')\n print('P@100:')\n top100 = order[:100]\n correct_num_100 = 0.0\n for i in top100:\n if allans[i] == 1:\n correct_num_100 += 1.0\n print(correct_num_100 / 100)\n\n print('P@200:')\n top200 = order[:200]\n correct_num_200 = 0.0\n for i in top200:\n if allans[i] == 1:\n correct_num_200 += 1.0\n print(correct_num_200 / 200)\n\n print('P@300:')\n top300 = order[:300]\n correct_num_300 = 0.0\n for i in top300:\n if allans[i] == 1:\n correct_num_300 += 1.0\n print(correct_num_300 / 300)\n\n if itchat_run:\n tempstr = 'P@100\\n' + str(correct_num_100 / 100) + '\\n' + 'P@200\\n' + str(\n correct_num_200 / 200) + '\\n' + 'P@300\\n' + str(correct_num_300 / 300)\n itchat.send(tempstr, FLAGS.wechat_name)\n\n\nif __name__ == \"__main__\":\n if itchat_run:\n itchat.auto_login(hotReload=True, enableCmdQR=2)\n tf.app.run()\n","sub_path":"test_GRU_transe.py","file_name":"test_GRU_transe.py","file_ext":"py","file_size_in_byte":14051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86096567","text":"import logging\n\nfrom decorator.injectdbsession import inject_db_session\nfrom .entity import Artist\nfrom sqlalchemy.orm import lazyload\n\n\n@inject_db_session()\nclass ArtistRepo:\n logger = logging.getLogger(__name__)\n\n def get_artists_by_page(self, index, offset):\n query = self._session.query(Artist).options(lazyload(\"albums\")) \\\n .order_by(Artist.name).limit(offset).offset((index - 1) * offset)\n return query.all()\n\n def get_artists_list(self):\n query = self._session.query(Artist).options(\n lazyload(\"albums\")).order_by(Artist.name)\n return query.all()\n\n def get_artists_by_name(self, artist_name):\n query = self._session.query(Artist).options(lazyload(\"albums\")).filter(\n Artist.name == artist_name)\n return query.all()\n\n def save(self, artist):\n try:\n self._session.add(artist)\n self._session.flush()\n self._session.commit()\n return artist\n except:\n self._session.rollback()\n raise\n\n def delete(self, artist):\n try:\n self._session.delete(artist)\n self._session.commit()\n except:\n self._session.rollback()\n raise","sub_path":"daemon/database/artistrepo.py","file_name":"artistrepo.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591952522","text":"import re\n# import time\n\n\ndef ms2s(s):\n if s.endswith('m'):\n return str(float(s[:-1]))\n elif s.endswith('u'):\n return str(float(s) / 1000.0)\n else:\n return str(float(s) * 1000)\n\n\nclass WrkLog(object):\n\n BODY_LEN_WITH_ERR = 15\n BODY_LEN_WITHOUT_ERR = 13\n\n def __init__(self, arr):\n # self.time = time.strptime(arr[0], \"%a %b %d %H:%M:%S %Z %Y\\n\")\n self.parse(arr)\n\n def parse_body(self, arr):\n print(arr)\n self.duration = re.compile(\n \"Running (.+) test @ (http.+)\").match(arr[0]).group(1)\n self.host = re.compile(\n \"Running (.+) test @ (http.+)\").match(arr[0]).group(2)\n self.threads = re.compile(\n \" (\\d+) threads and (\\d+) connections\").match(arr[1]).group(1)\n self.connections = re.compile(\n \" (\\d+) threads and (\\d+) connections\").match(arr[1]).group(2)\n\n self.latency_avg = re.compile(\n \" Latency\\s+(.+?)s\\s+\").match(arr[3]).group(1)\n self.latency_50 = re.compile(\" 50%\\s+(.+)s\").match(arr[6]).group(1)\n self.latency_75 = re.compile(\" 75%\\s+(.+)s\").match(arr[7]).group(1)\n self.latency_90 = re.compile(\" 90%\\s+(.+)s\").match(arr[8]).group(1)\n self.latency_99 = re.compile(\" 99%\\s+(.+)s\").match(arr[9]).group(1)\n\n self.latency_avg = ms2s(self.latency_avg)\n self.latency_50 = ms2s(self.latency_50)\n self.latency_75 = ms2s(self.latency_75)\n self.latency_90 = ms2s(self.latency_90)\n self.latency_99 = ms2s(self.latency_99)\n\n self.requests = re.compile(\n \" (\\d+) requests in .+, (.+) read\").match(arr[10]).group(1)\n try:\n self.read = re.compile(\" (\\d+) requests in .+, \" +\n \"(\\d*\\.\\d+|\\d+)MB read\").match(arr[10]).group(2)\n except AttributeError:\n self.read = str(float(re.compile(\" (\\d+) requests in .+, \" +\n \"(\\d*\\.\\d+|\\d+)KB read\").match(arr[10]).group(2)) / 1024)\n\n self.errors = \"0\"\n self.Socket_read_error = \"0\"\n if self.body_length > WrkLog.BODY_LEN_WITHOUT_ERR:\n # todo\n for err in arr[11:-2]:\n try:\n self.Socket_read_error = re.compile(\n \" Socket errors: connect (\\d+), read (\\d+), write (\\d+),\" +\n \" timeout (\\d+)\").match(err).group(2)\n except AttributeError:\n self.errors = re.compile(\n \" Non-2xx or 3xx responses: (\\d+)\").match(err).group(1)\n self.req_per_sec = re.compile(\n \"Requests/sec:\\s+(\\d*\\.\\d+|\\d+)\").match(arr[-2]).group(1)\n try:\n self.trans_per_sec = re.compile(\n \"Transfer/sec:\\s+(.+)KB\").match(arr[-1]).group(1)\n except AttributeError:\n self.trans_per_sec = str(1024 * float(re.compile(\n \"Transfer/sec:\\s+(.+)MB\").match(arr[-1]).group(1)))\n\n def parse_head(self, arr):\n self.time = \"\\\"\" + arr[0][:-1] + \"\\\"\"\n\n def parse_footer(self, arr):\n pass\n\n def parse(self, arr):\n self.length = len(arr)\n\n for i in range(self.length):\n if(re.compile(\"Running (.+) test @ (http.+)\").match(arr[i])):\n start = i\n break\n for i in range(self.length):\n if(re.compile(\"Transfer/sec:\\s+.+\").match(arr[i])):\n end = i\n break\n self.body_length = end - start + 1\n if self.body_length not in range(WrkLog.BODY_LEN_WITHOUT_ERR,\n WrkLog.BODY_LEN_WITH_ERR + 1):\n raise Exception(\"illigle body length: {}\".format(self.body_length))\n self.parse_head(arr[:start])\n self.parse_body(arr[start:end + 1])\n self.parse_footer(arr[end + 1:])\n\n def __str__(self):\n return ','.join([self.connections, self.latency_avg, self.latency_50,\n self.latency_75, self.latency_90, self.latency_99,\n self.requests, self.read, self.req_per_sec,\n self.trans_per_sec, self.Socket_read_error,\n self.errors, self.time])\n\n\ndef f(fname):\n with open(fname) as file:\n arr = []\n while True:\n line = file.readline()\n if not line:\n break\n if line.startswith(\"-\" * 10):\n farr = []\n elif line.startswith(\"=\" * 10):\n arr.append(farr)\n else:\n farr.append(line)\n return [WrkLog(a) for a in arr]\n\n\nif __name__ == \"__main__\":\n import sys\n fname = sys.argv[1]\n for f in f(fname):\n print(str(f))\n","sub_path":"wrk_log_2_csv.py","file_name":"wrk_log_2_csv.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520617167","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assignments', '0004_auto_20151009_2300'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AssignmentTestCase',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('case_input', models.TextField(max_length=8096, blank=True)),\n ('case_output', models.TextField(max_length=8096, blank=True)),\n ('max_memory_usage', models.PositiveSmallIntegerField(default=0)),\n ('max_cpu_usage', models.PositiveSmallIntegerField(default=0)),\n ('flags', models.TextField(max_length=1024, blank=True)),\n ('assignment', models.ForeignKey(to='assignments.CourseAssignment', related_name='testcases')),\n ],\n ),\n ]\n","sub_path":"server/api/assignments/migrations/0005_assignmenttestcase.py","file_name":"0005_assignmenttestcase.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244164597","text":"# -*- coding: utf-8 -*-\n\n# This work was created by participants in the DataONE project, and is\n# jointly copyrighted by participating institutions in DataONE. For\n# more information on DataONE, see our web site at http://dataone.org.\n#\n# Copyright 2009-2016 DataONE\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"View handler middleware\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport StringIO\n\nimport d1_gmn.app.middleware.session_cert\nimport d1_gmn.app.middleware.session_jwt\n\nimport d1_common\nimport d1_common.const\n\nimport django.conf\n\n\nclass ViewHandler(object):\n def process_view(self, request, view_func, view_args, view_kwargs):\n # Log which view is about the be called.\n logging.info(\n u'Calling view: func_name=\"{}\", method=\"{}\", args=\"{}\", kwargs=\"{}\", url=\"{}\"'\n .format(\n view_func.__name__, request.method, view_args, view_kwargs, request.path_info\n )\n )\n # logging.debug(request.headers)\n self.process_session(request)\n\n def process_session(self, request):\n # For simulating an HTTPS connection with client authentication when\n # debugging via regular HTTP, two mechanisms are supported. (1) A full\n # client side certificate can be included and (2) a list of subjects can be\n # included. Both use vendor specific extensions (HTTP headers that start\n # with the string \"VENDOR_\".) In some testing scenarios, it is convenient to\n # submit lists of subjects without having to generate certificates. In other\n # scenarios, it is desirable to simulate an HTTPS interaction as closely as\n # possible by providing a complete certificate.\n request.primary_subject_str, request.all_subjects_set = (\n self.get_active_subject_set(request)\n )\n\n def get_active_subject_set(self, request):\n \"\"\"Get a set containing all subjects for which the current connection has\n been successfully authenticated\n \"\"\"\n # Handle complete certificate in vendor specific extension.\n if django.conf.settings.DEBUG_GMN:\n if 'HTTP_VENDOR_INCLUDE_CERTIFICATE' in request.META:\n request.META['SSL_CLIENT_CERT'] = \\\n self.pem_in_http_header_to_pem_in_string(\n request.META['HTTP_VENDOR_INCLUDE_CERTIFICATE'])\n\n # Add subjects from any provided certificate and JWT and store them in\n # the Django request obj.\n cert_primary_str, cert_equivalent_set = (\n d1_gmn.app.middleware.session_cert.get_subjects(request)\n )\n jwt_subject_list = (\n d1_gmn.app.middleware.session_jwt.\n validate_jwt_and_get_subject_list(request)\n )\n primary_subject_str = cert_primary_str\n all_subjects_set = (\n cert_equivalent_set | {cert_primary_str} | set(jwt_subject_list)\n )\n if len(jwt_subject_list) == 1:\n jwt_primary_str = jwt_subject_list[0]\n if jwt_primary_str != cert_primary_str:\n if cert_primary_str == d1_common.const.SUBJECT_PUBLIC:\n primary_subject_str = jwt_primary_str\n else:\n logging.warn(\n 'Both a certificate and a JWT were provided and the primary '\n 'subjects differ. Using the certificate for primary subject and'\n 'the JWT as equivalent.'\n )\n\n logging.info(u'Primary active subject: {}'.format(primary_subject_str))\n logging.info(\n u'All active subjects: {}'.format(', '.join(sorted(all_subjects_set)))\n )\n\n # Handle list of subjects in vendor specific extension:\n if django.conf.settings.DEBUG_GMN:\n # This is added to any subjects obtained from cert and/or JWT.\n if 'HTTP_VENDOR_INCLUDE_SUBJECTS' in request.META:\n request.all_subjects_set.update(\n request.META['HTTP_VENDOR_INCLUDE_SUBJECTS'].split('\\t')\n )\n\n return primary_subject_str, all_subjects_set\n\n def pem_in_http_header_to_pem_in_string(self, header_str):\n header = StringIO.StringIO(header_str)\n pem = StringIO.StringIO()\n pem.write('-----BEGIN CERTIFICATE-----\\n')\n while True:\n pem_line = header.read(64)\n if len(pem_line) == 0:\n break\n pem.write(pem_line + '\\n')\n pem.write('-----END CERTIFICATE-----\\n')\n return pem.getvalue()\n","sub_path":"gmn/src/d1_gmn/app/middleware/view_handler.py","file_name":"view_handler.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514058784","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n assert orient == 'x' or orient == 'y'\n assert sobel_kernel % 2 == 1 # kernel size needs to be odd\n assert thresh[0] < thresh[1]\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)\n abs_sobel = np.abs(sobel)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = (scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])\n\n return grad_binary.astype(int)\n\n\ndef mag_thresh(img, sobel_kernel=3, thresh=(0, 255)):\n assert sobel_kernel % 2 == 1 # kernel size needs to be odd\n assert thresh[0] < thresh[1]\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)\n abs_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n mag_binary = (scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])\n\n return mag_binary.astype(int)\n\n\ndef dir_thresh(img, sobel_kernel=3, thresh=(0, np.pi / 2)):\n assert sobel_kernel % 2 == 1 # kernel size needs to be odd\n assert thresh[0] < thresh[1]\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)\n grad_dir = np.arctan2(np.abs(sobely), np.abs(sobelx))\n dir_binary = (grad_dir >= thresh[0]) & (grad_dir <= thresh[1])\n\n return dir_binary.astype(int)\n\n# Load the image\nimage = cv2.imread('signs_vehicles_xygrad.png')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# Choose a Sobel kernel size\nksize = 5 # Choose a larger odd number to smooth gradient measurements\n\n# Apply each of the thresholding functions\ngradx = abs_sobel_thresh(\n image, orient='x', sobel_kernel=ksize, thresh=(30, 100))\ngrady = abs_sobel_thresh(\n image, orient='y', sobel_kernel=ksize, thresh=(30, 100))\nmag_binary = mag_thresh(image, sobel_kernel=ksize, thresh=(30, 100))\ndir_binary = dir_thresh(image, sobel_kernel=ksize, thresh=(0.7, 1.3))\n\n# combined = ((gradx == 1) & (grady == 1)) | (\n# (mag_binary == 1) & (dir_binary == 1))\ncombined = ((gradx == 1) & (grady == 1)) & (\n (mag_binary == 1) & (dir_binary == 1))\n\n# Plot the result\nf, axes = plt.subplots(3, 2, figsize=(6, 6))\nf.tight_layout()\naxes[0, 0].imshow(image)\naxes[0, 0].set_title('Original image')\naxes[0, 1].imshow(gradx, cmap='gray')\naxes[0, 1].set_title('Grad x')\naxes[1, 0].imshow(grady, cmap='gray')\naxes[1, 0].set_title('Grad y')\naxes[1, 1].imshow(mag_binary, cmap='gray')\naxes[1, 1].set_title('Mag')\naxes[2, 0].imshow(dir_binary, cmap='gray')\naxes[2, 0].set_title('Grad dir')\naxes[2, 1].imshow(combined, cmap='gray')\naxes[2, 1].set_title('Combined')\n# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\nplt.show()\n","sub_path":"quiz/combining_thresholds.py","file_name":"combining_thresholds.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362575339","text":"import os\nimport xml.etree.ElementTree as ET\n\nimport h5py\n\nfrom openmc.mixin import EqualityMixin\nfrom openmc.clean_xml import clean_xml_indentation\n\n\nclass DataLibrary(EqualityMixin):\n \"\"\"Collection of cross section data libraries.\n\n Attributes\n ----------\n libraries : list of dict\n List in which each item is a dictionary summarizing cross section data\n from a single file. The dictionary has keys 'path', 'type', and\n 'materials'.\n\n \"\"\"\n\n def __init__(self):\n self.libraries = []\n\n def register_file(self, filename):\n \"\"\"Register a file with the data library.\n\n Parameters\n ----------\n filename : str\n Path to the file to be registered.\n\n \"\"\"\n h5file = h5py.File(filename, 'r')\n\n materials = []\n filetype = 'neutron'\n for name in h5file:\n if name.startswith('c_'):\n filetype = 'thermal'\n materials.append(name)\n\n library = {'path': filename, 'type': filetype, 'materials': materials}\n self.libraries.append(library)\n\n def export_to_xml(self, path='cross_sections.xml'):\n \"\"\"Export cross section data library to an XML file.\n\n Parameters\n ----------\n path : str\n Path to file to write. Defaults to 'cross_sections.xml'.\n\n \"\"\"\n root = ET.Element('cross_sections')\n\n # Determine common directory for library paths\n common_dir = os.path.dirname(os.path.commonprefix(\n [lib['path'] for lib in self.libraries]))\n if common_dir == '':\n common_dir = '.'\n\n directory = os.path.relpath(common_dir, os.path.dirname(path))\n if directory != '.':\n dir_element = ET.SubElement(root, \"directory\")\n dir_element.text = directory\n\n for library in self.libraries:\n lib_element = ET.SubElement(root, \"library\")\n lib_element.set('materials', ' '.join(library['materials']))\n lib_element.set('path', os.path.relpath(library['path'], common_dir))\n lib_element.set('type', library['type'])\n\n # Clean the indentation to be user-readable\n clean_xml_indentation(root)\n\n # Write XML file\n tree = ET.ElementTree(root)\n tree.write(path, xml_declaration=True, encoding='utf-8',\n method='xml')\n","sub_path":"openmc/data/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180609553","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 25 18:12:02 2019\n\n@author: Luis Cristóvão\n\"\"\"\n\nimport flask as fl\n\n\napp = fl.Flask(__name__, static_url_path='')\n\nmap = {\n \"my_key\": \"333\"\n}\n\n@app.route('/')\ndef index_route():\n return fl.render_template('index.html')\n\n@app.route('/getKey/<key>')\ndef getKey(key):\n\t\n\tresp = fl.Response(map[key])\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n\n@app.route('/setKey/<key>/<data>')\ndef setKey(key,data):\n\tmap[key]=data\n\tresp = fl.Response(\"Key {0}:{1} inserted!\".format(key,data))\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n\n@app.route('/getKey',methods=[\"POST\"])\ndef getKeyPost():\n\treceived_values=fl.request.data.decode(\"ascii\")\n\tresp = fl.Response(map[received_values])\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n \n@app.route('/setKey/<key>',methods=[\"POST\"])\ndef setKeyPost(key):\n\treceived_values=fl.request.data.decode(\"ascii\")\n\tmap[key]=received_values\n\tresp = fl.Response(\"Key {0}:{1} inserted!\".format(key,received_values))\n\tresp.headers['Access-Control-Allow-Origin'] = '*'\n\treturn resp\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)","sub_path":"GeneralServer/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380819012","text":"from VITA_PRINTER import VITA # Controls the device and the camera\nimport imageProcessor as imgProc # Runs the vein finding\nfrom exceptions import OutofBoundsError # Custom-defined exceptions module\nfrom exceptions import InvalidResponseError # Custom-defined exceptions module\nimport sys # Used to cmd line arguments\nfrom time import sleep # used to signal catastrophic failure\n \n# TODO: Make the currently unrecoverable errors that terminate the program recoverable\n# TODO: implement DEMO mode with live viewing\n# TODO: implement inVeins() force sensing\n\n# Finds veins and presents their location.\n# It takes in a string filename and a VITA object device,\n# and returns x and y as distances.\ndef veinImaging(filename, device):\n # Is the sensor connected?\n try:\n height = device.getDistance()\n except IOError:\n closeProgram(device)\n else:\n device.capture(filename)\n # is the image readable?\n try:\n imgProc.isValidImg(filename)\n except ValueError:\n closeProgram(device)\n else:\n imgProc.preprocess(filename)\n\n x,y = imgProc.getVeins(filename, height)\n\n return x, y\n\ndef inVeins(force, distanceDown):\n return None\n\n# Takes in a Vita object and \n# finalizes the state of the program for a safe\n# shutdown and release of all resources. \ndef closeProgram(device):\n imgProc.closeWindows()\n device.terminate()\n raise SystemExit(0)\n\n# Parses the command line options to set a string filename and activate\n# demo mode with th '-D' option which shows the images. By default, if no \n# options are given demo mode is off and the images are saved to veins.png \n# in the current directory. Returns the string filename and boolean DEMO\ndef getOptions():\n DEMO = False\n filename = 'veins'\n if sys.argv[1] == '-D':\n DEMO = True\n if len(sys.argv) >= 3:\n filename = sys.argv[2]\n return filename, DEMO\n\n\n\ndef main():\n deviceConnection = '/dev/ttyUSB0'\n sensorConnection = '/dev/ttyUSB1'\n filename, DEMO = getOptions() # need to implement demo mode\n baud = 250000\n\n ## setup\n device = VITA(deviceConnection, baud, sensorConnection)\n\n try:\n device.awaitVeins()\n except:\n closeProgram(device)\n\n ## vein finding\n x, y = veinImaging(filename, device)\n SPEED = 2500 # mm/min\n while (x != 0 and y != 0):\n x, y = veinImaging(filename, device)\n try:\n device.move(x, y, 0, SPEED)\n except:\n closeProgram(device)\n\n ## injection\n R = 0; G = 0; B = 255 # Blue\n device.statusLED(R,G,B)\n\n distanceDown = 0 #mm\n z = -1 # mm\n\n force = device.getForce()\n if (force == -1): # check that the force sensor is functioning\n print(\"Force sensor not connected. Injection Failed\")\n closeProgram(device)\n else:\n while(not inVeins(force, distanceDown)):\n try:\n device.move(0,0,z,SPEED)\n except:\n device.retractNeedle()\n closeProgram(device)\n distanceDown += abs(z)\n\n force = device.getForce()\n if (force == -1): # check that the force sensor is functioning\n print(\"Force sensor not connected. Injection Failed\")\n device.retractNeedle()\n closeProgram(device)\n \n device.waitForButtonPress() # will loop infinitely until button is pressed\n\n ## retraction\n device.retractNeedle()\n closeProgram(device)\n \nif __name__ == \"__main__\":\n main()","sub_path":"Current Iteration/camera_v5_with-CLAHE.py","file_name":"camera_v5_with-CLAHE.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328416048","text":"import functools\nimport time\nimport gc\nimport logging\n\nclass TimeLogger:\n \"\"\"\n Log time spent in a function.\n Can also log an additional custom mesage for debug / info / warning... purposes\n \n Example:\n -------\n\n @TimeLogger(custom_message=\"This a warning to be logged at the end\", custom_message_type=\"warning\")\n def print_something():\n print(\"Hello World\") \n\n \"\"\"\n \n def __init__(self, custom_message:str=None, custom_message_type:str=\"debug\", verbose=False):\n super(TimeLogger, self).__init__()\n self.custom_message = custom_message\n self.custom_message_type = custom_message_type\n self.verbose = verbose\n self.logger = logging.getLogger(\"pegasus\")\n\n def __call__(self, function):\n if hasattr(function, \"_pg_wrapped\") :\n function = function._pg_wrapped\n \n @functools.wraps(function)\n def _do(*args, **kwargs):\n if self.verbose:\n self.logger.info(\n \"Entering: %s\" % function.__code__.co_name\n )\n\n start = time.perf_counter()\n res = function(*args, **kwargs)\n end = time.perf_counter()\n \n self.logger.info(\n \"Time spent on '{}' = {:.2f}s.\".format(\n function.__code__.co_name,\n end - start\n )\n )\n\n if self.custom_message is not None:\n log_fct = getattr(self.logger, self.custom_message_type)\n log_fct(self.custom_message)\n \n return res\n\n _do._pg_wrapped = function\n return _do\n\nclass GCCollect:\n \"\"\"\n Python's garbage collection is not that automatic. This decorator is\n especially useful for freeing memory after functions that call on modules\n that create a lot intermediary objects.\n\n Example:\n -------\n\n @GCCollect()\n def print_something():\n print(\"Hello World\")\n\n \"\"\"\n def __init__(self):\n super(GCCollect, self).__init__()\n \n def __call__(self, function):\n if hasattr(function, \"_pg_wrapped\") :\n function = function._pg_wrapped\n\n @functools.wraps(function)\n def _do(*args, **kwargs):\n res = function(*args, **kwargs)\n gc.collect()\n\n return res\n\n _do._pg_wrapped = function\n return _do\n","sub_path":"pegasus/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445599053","text":"# -*- coding: utf-8 -*-\nimport os\nimport click\nimport logging\nfrom dotenv import find_dotenv, load_dotenv\nimport pandas as pd\nimport pyodbc\nfrom tqdm import tqdm\nimport hashlib\nimport querys\n\n\n@click.command()\n#\n# @click.argument('input_filepath', type=click.Path(exists=True))\n# @click.argument('output_filepath', type=click.Path())\ndef main():\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n global logger\n global project_dir\n logger.info('Getting dataset from datawarehouse')\n\n nombre_archivo = os.path.join(os.environ.get('RAW_DATA_PATH'), os.environ.get('RAW_DATA_FILE'))\n\n data = get_dataset(os.environ.get('LIMITE_AFILIADOS'), os.environ.get('LONGITUD_PERIODOS_LARGOS'))\n data.to_csv(os.path.join(project_dir, nombre_archivo))\n data = hash_ids(data)\n data.to_csv(os.path.join(project_dir, nombre_archivo))\n\n\ndef get_dataset(maxafiliados, minlongitud):\n\n global logger\n\n cnxn = pyodbc.connect(driver=os.environ.get('DW_DRIVER'),\n host=os.environ.get('DW_HOST'),\n database=os.environ.get('DATABASE'))\n cnxn2 = pyodbc.connect(driver=os.environ.get('DW_DRIVER'),\n host=os.environ.get('DW_HOST'),\n database=os.environ.get('DATABASE'))\n\n sql = querys.query_top_afiliados\n\n sql = sql.format(maxafiliados, minlongitud)\n logger.info(\"Executing SQL: \" + sql)\n\n querychunks = pd.read_sql(sql, cnxn, chunksize=100)\n\n sql = querys.query_afiliados\n\n df = pd.DataFrame()\n for chunk in tqdm(querychunks):\n cedulas = ','.join(chunk.fkAfiliado.apply(str))\n sqlformated = sql.format(cedulas)\n\n logger.info(\"Executing SQL: \" + sqlformated)\n chunks = pd.read_sql(sqlformated, cnxn2, chunksize=1000)\n\n for chunkaportes in chunks:\n df = pd.concat([df, chunkaportes])\n\n return df\n\n\ndef hash_ids(df):\n df['Hash'] = df.apply(lambda x: hashlib.md5(\n '{}_{}'.format(str.strip(x.TipoIdAfiliado), str(x.IdAfiliado)).encode('utf-8')).hexdigest(), axis=1)\n del df['IdAfiliado']\n del df['TipoIdAfiliado']\n\n return df\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n logger = logging.getLogger(__name__)\n logger.info(project_dir)\n\n main()\n","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637425018","text":"import rospy\nfrom std_msgs.msg import Float64MultiArray\nfrom cv_bridge import CvBridge, CvBridgeError\nimport message_filters\nfrom sensor_msgs.msg import Image\nimport ros_numpy\n\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\n\nfrom utils import detector_utils as detector_utils\nimport tensorflow as tf\nfrom multiprocessing import Queue, Pool\nimport datetime\nimport argparse\n\nfrom scipy import ndimage\nimport numpy as np\nfrom IPython import embed\nimport open3d as o3d\n\ntf.debugging.set_log_device_placement(True)\n\nframe_processed = 0\nscore_thresh = 0.2\n\n\n# Create a worker thread that loads graph and\n# does detection on images in an input queue and puts it on an output queue\nrgb_img = []\ndepth_img = []\n\nfocalLengthX = 624.3427734375\nfocalLengthY = 624.3428344726562\n\ncenterX = 305.03887939453125\ncenterY = 244.86605834960938\n\ncube_size = [200, 200, 200]\n\ndef display_inlier_outlier(cloud, ind):\n inlier_cloud = cloud.select_down_sample(ind)\n outlier_cloud = cloud.select_down_sample(ind, invert=True)\n\n print(\"Showing outliers (red) and inliers (gray): \")\n outlier_cloud.paint_uniform_color([1, 0, 0])\n inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])\n o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])\n\n\ndef callback(rgb_msg, depth_msg):\n global rgb_img, depth_img\n try:\n rgb_img = ros_numpy.numpify(rgb_msg)\n depth_img = ros_numpy.numpify(depth_msg)\n except CvBridgeError as e:\n rospy.logerr(e)\n\n\ndef calculateCoM(dpt):\n \"\"\"\n Calculate the center of mass\n :param dpt: depth image\n :return: (x,y,z) center of mass\n \"\"\"\n\n dc = dpt.copy()\n dc[dc < 0] = 0\n dc[dc > 10000] = 0\n cc = ndimage.measurements.center_of_mass(dc > 0)\n num = np.count_nonzero(dc)\n com = np.array((cc[1]*num, cc[0]*num, dc.sum()), np.float)\n\n if num == 0:\n return np.array((0, 0, 0), np.float)\n else:\n return com/num\n\n\ndef clean_depth_map(depth, com, size, com_type=\"2D\"):\n if com_type == \"2D\":\n com3d = [(com[0] + int(left) - depth.shape[1]/2) * com[2] / focalLengthX,\n (com[1] + int(top) - depth.shape[0]/2) * com[2] / focalLengthY, com[2]]\n else:\n com3d = com\n x_min = com3d[0] - size[0] / 2\n x_max = com3d[0] + size[0] / 2\n y_min = com3d[1] - size[1] / 2\n y_max = com3d[1] + size[1] / 2\n z_min = com3d[2] - size[2] / 2\n z_max = com3d[2] + size[2] / 2\n\n points = depth2pc(depth, True, left, top)\n points_tmp = points.copy()\n if len(points):\n hand_points_ind = np.all(\n np.concatenate((points[:, 0].reshape(-1, 1) > x_min, points[:, 0].reshape(-1, 1) < x_max,\n points[:, 1].reshape(-1, 1) > y_min, points[:, 1].reshape(-1, 1) < y_max,\n points[:, 2].reshape(-1, 1) > z_min, points[:, 2].reshape(-1, 1) < z_max), axis=1), axis=1)\n points_tmp = points[hand_points_ind]\n depth = pc2depth(points[hand_points_ind])\n return points_tmp, depth\n\n\ndef jointsImgTo3D(sample):\n \"\"\"\n Normalize sample to metric 3D\n :param sample: joints in (x,y,z) with x,y in image coordinates and z in mm\n :return: normalized joints in mm\n \"\"\"\n ret = np.zeros((sample.shape[0], 3), np.float32)\n for i in range(sample.shape[0]):\n ret[i] = jointImgTo3D(sample[i])\n return ret\n\n\ndef jointImgTo3D(sample):\n \"\"\"\n Normalize sample to metric 3D\n :param sample: joints in (x,y,z) with x,y in image coordinates and z in mm\n :return: normalized joints in mm\n \"\"\"\n ret = np.zeros((3,), np.float32)\n # convert to metric using f\n ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX\n ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY\n ret[2] = sample[2]\n return ret\n\n\ndef depth2pc(depth, after_crop=False, left=0, top=0):\n points = []\n for v in range(depth.shape[0]):\n for u in range(depth.shape[1]):\n Z = int(depth[v, u])\n if Z == 0:\n continue\n v_m = v\n u_m = u\n if after_crop:\n v_m = v + int(top)\n u_m = u + int(left)\n X = int((u_m - centerX) * Z / focalLengthX)\n Y = int((v_m - centerY) * Z / focalLengthY)\n points.append([X, Y, Z])\n points_np = np.array(points)\n return points_np\n\n\ndef pc2depth(pc_local):\n pc = pc_local.copy()\n width = 640\n height = 480\n pc[:, 0] = pc[:, 0] / pc[:, 2].astype(float) * focalLengthX + centerX\n pc[:, 1] = pc[:, 1] / pc[:, 2].astype(float) * focalLengthY + centerY\n uvd = []\n for i in range(pc.shape[0]):\n if 0 < pc[i, 0] < width and 0 < pc[i, 1] < height:\n uvd.append(pc[i, :].astype(int))\n depth = uvd2depth(np.array(uvd), width, height)\n return depth\n\n\ndef depth2uvd(depth):\n depth = depth.squeeze()\n v, u = np.where(depth != 0)\n v = v.reshape(-1, 1)\n u = u.reshape(-1, 1)\n return np.concatenate([u, v, depth[v, u]], axis=1)\n\n\ndef uvd2depth(uvd, width, height):\n depth = np.zeros((height, width, 1), np.uint16)\n depth[uvd[:, 1], uvd[:, 0]] = uvd[:, 2].reshape(-1, 1)\n return depth\n\n\ndef joint3DToImg(sample):\n \"\"\"\n Denormalize sample from metric 3D to image coordinates\n :param sample: joints in (x,y,z) with x,y and z in mm\n :return: joints in (x,y,z) with x,y in image coordinates and z in mm\n \"\"\"\n ret = np.zeros((3,), np.float32)\n # convert to metric using f\n if sample[2] == 0.:\n ret[0] = centerX\n ret[1] = centerY\n return ret\n ret[0] = sample[0]/sample[2]*focalLengthX+centerX\n ret[1] = sample[1]/sample[2]*focalLengthY+centerY\n ret[2] = sample[2]\n return ret\n\n\ndef worker(input_q, depth_q, output_q, cap_params, frame_processed):\n global rgb_img, depth_img\n\n print(\">> loading frozen model for worker\")\n detection_graph, sess = detector_utils.load_inference_graph()\n sess = tf.Session(graph=detection_graph)\n im_width, im_height = (640, 480)\n\n pcd = o3d.geometry.PointCloud()\n pcd_crop = o3d.geometry.PointCloud()\n inlier_cloud = o3d.geometry.PointCloud()\n\n previous_center_point = np.array([0, 0, 0])\n while True:\n # print(\"> ===== in worker loop, frame \", frame_processed)\n image_np = input_q.get()\n depth_np = depth_q.get()\n if image_np is not None:\n # Actual detection. Variable boxes contains the bounding box coordinates for hands detected,\n # while scores contains the confidence for each of these boxes.\n # Hint: If len(boxes) > 1 , you may assume you have found at least one hand (within your score threshold)\n #\n boxes, scores = detector_utils.detect_objects(\n image_np, detection_graph, sess)\n # boxes, scores = detector_utils.gpu_detect_objects(image_np,\n # detection_graph, sess)\n ind = np.argmax(scores)\n bbx = boxes[ind]\n (left, right, top, bottom) = (bbx[1] * im_width, bbx[3] * im_width,\n bbx[0] * im_height, bbx[2] * im_height)\n depth_crop = depth_np[int(top):int(bottom), int(left):int(right)]\n # depth_crop[np.where(depth_crop > 900)] = 0\n points_crop = depth2pc(depth_crop, True, int(left), int(top))\n if len(points_crop):\n pcd_crop.points = o3d.utility.Vector3dVector(points_crop)\n cl, ind = pcd_crop.remove_statistical_outlier(nb_neighbors=20,\n std_ratio=1.0)\n pcd.points = o3d.utility.Vector3dVector(points_crop)\n inlier_cloud = pcd_crop.select_down_sample(ind)\n center_point = inlier_cloud.get_center()\n\n # if center_point is far away with the previous center point\n else:\n # if no hand is detected, use previous center point\n center_point = previous_center_point\n\n previous_center_point = center_point\n print(center_point)\n points_q.put(center_point)\n\n # draw bounding boxes\n detector_utils.draw_box_on_image(\n cap_params['num_hands_detect'], cap_params[\"score_thresh\"],\n scores, boxes, cap_params['im_width'], cap_params['im_height'],\n image_np)\n center_img = joint3DToImg(center_point)\n cv2.circle(image_np, (int(center_img[0]), int(center_img[1])),\n 5, (0, 255, 0), -1)\n\n # add image_np annotated with bounding box to queue\n output_q.put(image_np)\n frame_processed += 1\n else:\n output_q.put(image_np)\n sess.close()\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--source', type=int, default=0, help='Device index of the camera.')\n parser.add_argument('--num_hands',type=int,default=1,help='Max number of hands to detect.')\n parser.add_argument('--fps',type=int,default=20,help='Show FPS on detection/display visualization')\n parser.add_argument('--width',type=int,default=640,help='Width of the frames in the video stream.')\n parser.add_argument('--height',type=int,default=480,help='Height of the frames in the video stream.')\n parser.add_argument('--display',type=int,default=0,help='Display the detected images using OpenCV. This reduces FPS')\n parser.add_argument('--num-workers',type=int,default=1,help='Number of workers.')\n parser.add_argument('--queue-size',type=int,default=5,help='Size of the queue.')\n args = parser.parse_args()\n\n rospy.init_node('hand_track_arm')\n pub_bbx = rospy.Publisher('hand_bbx', Float64MultiArray, queue_size=1)\n depth_sub = message_filters.Subscriber(\n '/camera/aligned_depth_to_color/image_raw', Image)\n rgb_sub = message_filters.Subscriber('/camera/color/image_raw', Image)\n ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub], 10, 0.1, allow_headerless=True)\n ts.registerCallback(callback)\n rospy.sleep(1)\n\n input_q = Queue(maxsize=args.queue_size)\n depth_q = Queue(maxsize=args.queue_size)\n output_q = Queue(maxsize=args.queue_size)\n points_q = Queue(maxsize=args.queue_size)\n\n cap_params = {}\n frame_processed = 0\n cap_params['im_width'], cap_params['im_height'] = 640, 480\n cap_params['score_thresh'] = score_thresh\n\n # max number of hands we want to detect/track\n cap_params['num_hands_detect'] = args.num_hands\n\n print(cap_params, args)\n\n # spin up workers to paralleize detection.\n pool = Pool(args.num_workers, worker,\n (input_q, depth_q, output_q, cap_params, frame_processed))\n\n start_time = datetime.datetime.now()\n num_frames = 0\n fps = 0\n index = 0\n if args.display > 0:\n cv2.namedWindow('Multi-Threaded Detection', cv2.WINDOW_NORMAL)\n\n while True:\n index += 1\n # embed()\n input_q.put(rgb_img)\n depth_q.put(depth_img)\n\n # worker(input_q, depth_q, output_q, cap_params, frame_processed)\n output_frame = output_q.get()\n center_points = points_q.get()\n if center_points is not None:\n msg = Float64MultiArray()\n msg.data = center_points\n print(msg)\n pub_bbx.publish(msg)\n # output_frame = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)\n elapsed_time = (datetime.datetime.now() - start_time).total_seconds()\n num_frames += 1\n fps = num_frames / elapsed_time\n # print(\"frame \", index, num_frames, elapsed_time, fps)\n\n if output_frame is not None:\n if args.display > 0:\n if args.fps > 0:\n detector_utils.draw_fps_on_image(\"FPS : \" + str(int(fps)),\n output_frame)\n cv2.imshow('Multi-Threaded Detection', output_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n if num_frames == 400:\n num_frames = 0\n start_time = datetime.datetime.now()\n else:\n print(\"frames processed: \", index, \"elapsed time: \",\n elapsed_time, \"fps: \", str(int(fps)))\n else:\n # print(\"video end\")\n break\n elapsed_time = (datetime.datetime.now() - start_time).total_seconds()\n fps = num_frames / elapsed_time\n print(\"fps\", fps)\n pool.terminate()\n # cv2.destroyAllWindows()\n","sub_path":"realsense_detect_multi_threaded.py","file_name":"realsense_detect_multi_threaded.py","file_ext":"py","file_size_in_byte":12593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57701021","text":"import re,urllib.parse,urllib.request,urllib.error\r\nfrom bs4 import BeautifulSoup as BS\r\n\r\nbaseUrl = 'http://www.bing.com/search?'\r\nword = 'Yapstone+email'\r\nprint(word)\r\nword = word.encode(encoding='utf-8', errors='strict')\r\n#print(word)\r\n\r\ndata = {'q':word}\r\ndata = urllib.parse.urlencode(data)\r\n#print(data)\r\nurl = baseUrl+data\r\nprint(url)\r\n\r\ntry:\r\n html = urllib.request.urlopen(url)\r\nexcept urllib.error.HTTPError as e:\r\n print(e.code)\r\nexcept urllib.error.URLError as e:\r\n print(e.reason)\r\n\r\nsoup = BS(html,\"html.parser\")\r\ncount = soup.findAll(class_=\"b_caption\")\r\nprint(count[0].p.get_text())\r\n","sub_path":"openbing.py","file_name":"openbing.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275972554","text":"from __future__ import absolute_import\n\nimport maya.cmds as cmds\n\nfrom rigging.library.utils import controller as rlu_controller, transform as rlu_transform\nfrom rigging.tools import utils as rt_utils\n\n\nclass Build:\n def __init__(self,\n pupil_jnt,\n iris_jnt,\n pupil_prefix,\n iris_prefix,\n eyeball_jnt,\n eye_jnt_grp_offset,\n scale,\n eye_ctrl,\n side,\n suffix_controller,\n ):\n self.iris_connect_grp = cmds.group(em=1, n='irisConnect' + side + '_grp')\n cmds.delete(cmds.parentConstraint(eyeball_jnt, self.iris_connect_grp))\n\n # CREATE CONTROLLER\n pupil_ctrl = rlu_controller.Control(match_obj_first_position=pupil_jnt,\n prefix=pupil_prefix,\n shape=rlu_controller.CIRCLEPLUS, groups_ctrl=[''],\n ctrl_size=scale * 0.08,\n ctrl_color='red', lock_channels=['v'],\n suffix=suffix_controller,\n side=side)\n\n iris_ctrl = rlu_controller.Control(match_obj_first_position=iris_jnt,\n prefix=iris_prefix,\n shape=rlu_controller.CIRCLEPLUS, groups_ctrl=[''],\n ctrl_size=scale * 0.12,\n suffix=suffix_controller,\n ctrl_color='blue', lock_channels=['v'], side=side)\n\n # CREATE GROUP CORESPONDENT THE JOINTS\n pupil_jnt_grp = rlu_transform.create_parent_transform(parent_list=[''], object=pupil_jnt,\n match_position=pupil_jnt,\n prefix=pupil_prefix, suffix='_jnt', side=side)\n\n iris_jnt_grp = rlu_transform.create_parent_transform(parent_list=[''], object=iris_jnt, match_position=iris_jnt,\n prefix=iris_prefix, suffix='_jnt', side=side)\n\n # ASSIGNED THE INSTANCE CLASS\n self.pupil_ctrl = pupil_ctrl.control\n self.pupil_ctrl_grp = pupil_ctrl.parent_control[0]\n self.iris_ctrl = iris_ctrl.control\n self.iris_ctrl_grp = iris_ctrl.parent_control[0]\n\n rt_utils.connect_attr_object(pupil_ctrl.control, pupil_jnt)\n rt_utils.connect_attr_object(iris_ctrl.control, iris_jnt)\n\n cmds.parent(self.pupil_ctrl_grp, self.iris_ctrl)\n cmds.parent(self.iris_connect_grp, eye_ctrl)\n rt_utils.connect_attr_rotate(eye_jnt_grp_offset, self.iris_connect_grp)\n\n cmds.parent(iris_jnt_grp[0], eyeball_jnt)\n cmds.parent(self.iris_ctrl_grp, self.iris_connect_grp)\n","sub_path":"rigging/library/base/face/irisPupil.py","file_name":"irisPupil.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497234754","text":"import datetime\nfrom amaascore.core.amaas_model import AMaaSModel\n\nclass EODBook(AMaaSModel):\n\n def __init__(self, asset_manager_id, book_id, utc_close_time,\n eod_book_status='Active', *args, **kwargs):\n self.asset_manager_id = asset_manager_id\n self.book_id = book_id\n self.utc_close_time = utc_close_time.strftime('%H:%M:%S') if isinstance(utc_close_time, datetime.time) else str(utc_close_time)\n self.eod_book_status = eod_book_status\n super(EODBook, self).__init__(*args, **kwargs)","sub_path":"amaascore/asset_managers/eod_book.py","file_name":"eod_book.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62937924","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\daversy\\db\\oracle\\trigger.py\n# Compiled at: 2016-01-14 15:12:15\nfrom daversy.utils import *\nfrom daversy.db.object import Trigger\n\nclass TriggerBuilder(object):\n \"\"\"Represents a builder for a trigger.\"\"\"\n DbClass = Trigger\n XmlTag = 'trigger'\n Query = '\\n SELECT trigger_name, table_name, lower(base_object_type) AS type,\\n replace(dbms_metadata.get_ddl(\\'TRIGGER\\', trigger_name),\\n \\'\"\\' || user || \\'\".\\') AS definition\\n FROM sys.user_triggers\\n ORDER BY trigger_name\\n '\n PropertyList = odict((\n 'TRIGGER_NAME', Property('name')), (\n 'TYPE', Property('object-type')), (\n 'TABLE_NAME', Property('object-name')), (\n 'DEFINITION', Property('definition', None, lambda x: x.read(), cdata=True)))\n\n @staticmethod\n def addToState(state, trigger):\n trigger.definition = trim_spaces(trigger.definition)\n state.triggers[trigger.name] = trigger\n\n @staticmethod\n def createSQL(trigger):\n return trigger.definition + '\\n\\n'","sub_path":"pycfiles/daversy-0.3.4-py2.7/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610055239","text":"\"\"\" Detection Data parser and processing for YOLO.\nParse image and ground truths in a dataset to training targets and package them\ninto (image, labels) tuple for RetinaNet.\n\"\"\"\nimport tensorflow as tf\nfrom yolo.ops import preprocessing_ops\nfrom yolo.ops import box_ops as box_utils\nfrom official.vision.beta.ops import preprocess_ops\nfrom official.vision.beta.dataloaders import parser, utils\n\n\ndef _coco91_to_80(classif, box, areas, iscrowds):\n \"\"\"Function used to reduce COCO 91 to COCO 80, or to convert from the 2017 \n foramt to the 2014 format\"\"\"\n # Vector where index i coralates to the class at index[i].\n x = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,\n 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,\n 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85,\n 86, 87, 88, 89, 90\n ]\n no = tf.expand_dims(tf.convert_to_tensor(x), axis=0)\n \n # Resahpe the classes to in order to build a class mask.\n ce = tf.expand_dims(classif, axis=-1)\n\n # One hot the classificiations to match the 80 class format.\n ind = ce == tf.cast(no, ce.dtype)\n\n # Select the max values. \n co = tf.reshape(tf.math.argmax(tf.cast(ind, tf.float32), axis=-1), [-1])\n ind = tf.where(tf.reduce_any(ind, axis=-1))\n\n # Gather the valuable instances.\n classif = tf.gather_nd(co, ind)\n box = tf.gather_nd(box, ind)\n areas = tf.gather_nd(areas, ind)\n iscrowds = tf.gather_nd(iscrowds, ind)\n\n # Restate the number of viable detections, ideally it should be the same.\n num_detections = tf.shape(classif)[0]\n return classif, box, areas, iscrowds, num_detections\n\n\nclass Parser(parser.Parser):\n \"\"\"Parser to parse an image and its annotations into a dictionary of \n tensors.\"\"\"\n\n def __init__(self,\n output_size,\n min_level=3,\n max_level=5,\n jitter=0.0,\n jitter_mosaic=0.0,\n resize=1.0,\n resize_mosaic=1.0,\n area_thresh=0.1,\n max_num_instances=200,\n aug_rand_angle=0.0,\n aug_rand_saturation=1.0,\n aug_rand_brightness=1.0,\n aug_rand_hue=1.0,\n random_pad=True,\n aug_scale_min=1.0,\n aug_scale_max=1.0,\n mosaic_min=1.0,\n mosaic_max=1.0,\n aug_rand_transalate=0.0,\n mosaic_translate=0.0,\n anchor_t=4.0,\n dynamic_conv=False,\n stride=None,\n scale_xy=None,\n use_scale_xy=False,\n best_match_only=False,\n masks=None,\n anchors=None,\n letter_box=False,\n random_flip=True,\n use_tie_breaker=True,\n dtype='float32',\n coco91to80=False,\n anchor_free_limits=None, \n seed=None):\n \"\"\"Initializes parameters for parsing annotations in the dataset.\n Args:\n output_size: `Tensor` or `list` for [height, width] of output image. The\n output_size should be divided by the largest feature stride 2^max_level.\n min_level: `int` number of minimum level of the output feature pyramid.\n max_level: `int` number of maximum level of the output feature pyramid.\n jitter: `float` for the maximum change in aspect ratio expected in \n each preprocessing step.\n jitter_mosaic: `float` for the maximum change in aspect ratio expected in \n each preprocessing step to be applied to mosaiced images.\n resize: `float` for the maximum change in image size.\n resize_mosaic: `float` for the maximum change in image size to be applied \n to mosaiced images.\n area_thresh: `float` for the minimum area of a box to allow to pass \n through for optimization.\n max_num_instances: `int` for the number of boxes to compute loss on.\n aug_rand_angle: `float` indicating the maximum angle value for \n angle. angle will be changes between 0 and value.\n aug_rand_saturation: `float` indicating the maximum scaling value for \n saturation. saturation will be scaled between 1/value and value.\n aug_rand_brightness: `float` indicating the maximum scaling value for \n brightness. brightness will be scaled between 1/value and value.\n aug_rand_hue: `float` indicating the maximum scaling value for \n hue. saturation will be scaled between 1 - value and 1 + value.\n aug_scale_min: `float` indicating the minimum scaling value for image \n scale jitter. \n aug_scale_max: `float` indicating the maximum scaling value for image \n scale jitter.\n mosaic_min: `float` indicating the minimum scaling value for image \n scale jitter for mosaiced images. \n mosaic_max: `float` indicating the maximum scaling value for image \n scale jitter for mosaiced images.\n random_pad: `bool` indiccating wether to use padding to apply random \n translation true for darknet yolo false for scaled yolo.\n aug_rand_transalate: `float` ranging from 0 to 1 indicating the maximum \n amount to randomly translate an image.\n mosaic_translate: `float` ranging from 0 to 1 indicating the maximum \n amount to randomly translate an image for mosaiced images.\n anchor_t: `float` indicating the threshold over which an anchor will be \n considered for prediction, at zero, all the anchors will be used and at\n 1.0 only the best will be used. for anchor thresholds larger than 1.0 \n we stop using the IOU for anchor comparison and resort directly to \n comparing the width and height, this is used for the scaled models. \n dynamic_conv: `bool` for whether to use a padding in evaluation on the \n GPUs.\n stride: `int` for how much the model scales down the images at the largest\n level.\n scale_xy: dictionary `float` values inidcating how far each pixel can see \n outside of its containment of 1.0. a value of 1.2 indicates there is a \n 20% extended radius around each pixel that this specific pixel can \n predict values for a center at. the center can range from 0 - value/2 \n to 1 + value/2, this value is set in the yolo filter, and resused here. \n there should be one value for scale_xy for each level from min_level to \n max_level.\n use_scale_xy: `boolean` indicating weather the scale_xy values shoudl be \n used or ignored. used if set to True. \n best_match_only: `boolean` indicating how boxes are selected for \n optimization.\n masks: dictionary of lists of `int` values indicating the indexes in the \n list of anchor boxes to use an each prediction level between min_level \n and max_level. each level must have a list of indexes. \n anchors: list of lists of `float` values for each anchor box.\n letter_box: `boolean` indicating whether upon start of the datapipeline \n regardless of the preprocessing ops that are used, the aspect ratio of \n the images should be preserved. \n random_flip: `boolean` indicating whether or not to randomly flip the \n image horizontally. \n use_tie_breaker: `boolean` indicating whether to use the anchor threshold \n value.\n dtype: `str` indicating the output datatype of the datapipeline selecting \n from {\"float32\", \"float16\", \"bfloat16\"}.\n coco91to80: `bool` for wether to convert coco91 to coco80 to minimize \n model parameters.\n seed: `int` the seed for random number generation. \n \"\"\"\n self._coco91to80 = coco91to80\n\n # Base initialization\n image_w = output_size[1]\n image_h = output_size[0]\n if stride is None:\n self._net_down_scale = 2**max_level\n else:\n self._net_down_scale = stride\n\n # Assert that the width and height is viable\n assert image_w % self._net_down_scale == 0\n assert image_h % self._net_down_scale == 0\n\n # Set the width and height properly\n self._image_w = image_w\n self._image_h = image_h\n\n # Set the anchor boxes and masks for each scale\n self._anchors = anchors\n self._masks = {\n key: tf.convert_to_tensor(value) for key, value in masks.items()\n }\n self._use_tie_breaker = use_tie_breaker\n self._max_num_instances = max_num_instances\n\n # Image scaling params\n self._jitter = 0.0 if jitter is None else jitter\n self._resize = 1.0 if resize is None else resize\n self._aug_scale_min = aug_scale_min\n self._aug_scale_max = aug_scale_max\n self._aug_rand_translate = aug_rand_transalate\n\n # Mosaic scaling params\n self._jitter_mosaic = 0.0 if jitter_mosaic is None else jitter_mosaic\n self._resize_mosaic = 0.0 if resize_mosaic is None else resize_mosaic\n self._mosaic_min = mosaic_min\n self._mosaic_max = mosaic_max\n self._mosaic_translate = mosaic_translate\n\n # Image spatial distortion\n self._random_flip = random_flip\n self._letter_box = letter_box\n self._random_pad = random_pad\n self._aug_rand_angle = aug_rand_angle\n\n # Color space distortion of the image\n self._aug_rand_saturation = aug_rand_saturation\n self._aug_rand_brightness = aug_rand_brightness\n self._aug_rand_hue = aug_rand_hue\n self._best_match_only = best_match_only\n\n # Set the per level values needed for operation\n self._dynamic_conv = dynamic_conv\n self._scale_xy = scale_xy\n self._anchor_t = anchor_t\n self._use_scale_xy = use_scale_xy\n keys = list(self._masks.keys())\n\n self._scale_up = {key: 3 for i, key in enumerate(keys)\n } if self._use_scale_xy else {key: 1 for key in keys}\n self._area_thresh = area_thresh\n self._anchor_free_limits = anchor_free_limits\n\n self._seed = seed\n\n # Set the data type based on input string\n if dtype == 'float16':\n self._dtype = tf.float16\n elif dtype == 'bfloat16':\n self._dtype = tf.bfloat16\n elif dtype == 'float32':\n self._dtype = tf.float32\n else:\n raise Exception(\n 'Unsupported datatype used in parser\\\n only {float16, bfloat16, or float32}.'\n )\n\n def _build_grid(self,\n raw_true,\n width,\n height,\n batch=False,\n use_tie_breaker=False,\n is_training=True):\n '''Private function for building the full scale object and class grid.'''\n mask = self._masks\n inds = {}\n upds = {}\n true_conf = {}\n\n # based on if training or not determine how to scale up the number of\n # boxes that may result for final loss computation\n scale_up = self._scale_up\n\n # for each prediction path generate a properly scaled output prediction map\n for key in self._masks.keys():\n if self._use_scale_xy:\n scale_xy = self._scale_xy[key]\n else:\n scale_xy = 1\n\n # build the actual grid as well and the list of boxes and classes AND\n # their index in the prediction grid\n indexes, updates, true_grid = preprocessing_ops.build_grided_gt_ind(\n raw_true, self._masks[key], width // 2**int(key),\n height // 2**int(key), 0, raw_true['bbox'].dtype, scale_xy,\n scale_up[key], use_tie_breaker)\n\n # set/fix the shape of the indexes\n ishape = indexes.get_shape().as_list()\n ishape[-2] = self._max_num_instances * scale_up[key]\n indexes.set_shape(ishape)\n\n # set/fix the shape of the updates\n ishape = updates.get_shape().as_list()\n ishape[-2] = self._max_num_instances * scale_up[key]\n updates.set_shape(ishape)\n\n # add all the values to the final dictionary\n inds[key] = indexes\n upds[key] = tf.cast(updates, self._dtype)\n true_conf[key] = true_grid\n return mask, inds, upds, true_conf\n\n def _get_identity_info(self, image):\n \"\"\"Get an identity image op to pad all info vectors, this is used because \n graph compilation if there are a variable number of info objects in a list.\n \"\"\"\n shape_ = tf.shape(image)\n val = tf.stack([\n tf.cast(shape_[:2], tf.float32),\n tf.cast(shape_[:2], tf.float32),\n tf.ones_like(tf.cast(shape_[:2], tf.float32)),\n tf.zeros_like(tf.cast(shape_[:2], tf.float32)),\n ])\n return val\n\n def _jitter_scale(self, image, shape, letter_box, jitter, resize, random_pad,\n aug_scale_min, aug_scale_max, translate, angle,\n perspective):\n if (aug_scale_min != 1.0 or aug_scale_max != 1.0):\n crop_only = True\n # jitter gives you only one info object, resize and crop gives you one,\n # if crop only then there can be 1 form jitter and 1 from crop\n reps = 1\n else:\n crop_only = False\n reps = 0\n infos = []\n image, info_a, _ = preprocessing_ops.resize_and_jitter_image(\n image,\n shape,\n letter_box=letter_box,\n jitter=jitter,\n resize=resize,\n crop_only=crop_only,\n random_pad=random_pad,\n seed=self._seed,\n )\n infos.extend(info_a)\n stale_a = self._get_identity_info(image)\n for _ in range(reps):\n infos.append(stale_a)\n image, _, affine = preprocessing_ops.affine_warp_image(\n image,\n shape,\n scale_min=aug_scale_min,\n scale_max=aug_scale_max,\n translate=translate,\n degrees=angle,\n perspective=perspective,\n random_pad=random_pad,\n seed=self._seed,\n )\n return image, infos, affine\n\n def reorg91to80(self, data):\n \"\"\"Function used to reduce COCO 91 to COCO 80, or to convert from the 2017 \n foramt to the 2014 format\"\"\"\n if self._coco91to80:\n (data['groundtruth_classes'], data['groundtruth_boxes'],\n data['groundtruth_area'], data['groundtruth_is_crowd'],\n _) = _coco91_to_80(data['groundtruth_classes'], data['groundtruth_boxes'],\n data['groundtruth_area'], data['groundtruth_is_crowd'])\n return data\n\n def _parse_train_data(self, data):\n \"\"\"Parses data for training and evaluation.\"\"\"\n # Down size coco 91 to coco 80 if the option is selected.\n data = self.reorg91to80(data)\n\n # Initialize the shape constants.\n image = data['image']\n boxes = data['groundtruth_boxes']\n classes = data['groundtruth_classes']\n height, width = preprocessing_ops.get_image_shape(image)\n\n if self._random_flip:\n # Randomly flip the image horizontally.\n image, boxes, _ = preprocess_ops.random_horizontal_flip(\n image, boxes, seed=self._seed)\n\n if not data['is_mosaic']:\n image, infos, affine = self._jitter_scale(\n image, [self._image_h, self._image_w], self._letter_box, self._jitter,\n self._resize, self._random_pad, self._aug_scale_min,\n self._aug_scale_max, self._aug_rand_translate, self._aug_rand_angle,\n 0.0)\n else:\n image, infos, affine = self._jitter_scale(\n image, [self._image_h, self._image_w], self._letter_box,\n self._jitter_mosaic, self._resize_mosaic, self._random_pad,\n self._mosaic_min, self._mosaic_max, self._mosaic_translate,\n self._aug_rand_angle, 0.0)\n\n # Clip and clean boxes.\n boxes, inds = preprocessing_ops.apply_infos(\n boxes,\n infos,\n affine=affine,\n shuffle_boxes=False,\n area_thresh=self._area_thresh,\n seed=self._seed)\n classes = tf.gather(classes, inds)\n info = infos[-1]\n\n # Apply scaling to the hue saturation and brightness of an image.\n image = tf.cast(image, self._dtype)\n image = image / 255\n image = preprocessing_ops.image_rand_hsv(\n image,\n self._aug_rand_hue,\n self._aug_rand_saturation,\n self._aug_rand_brightness,\n seed=self._seed, \n darknet= not self._use_scale_xy)\n\n # Cast the image to the selcted datatype.\n image = tf.clip_by_value(image, 0.0, 1.0)\n height, width = self._image_h, self._image_w\n image, labels = self._build_label(\n image,\n boxes,\n classes,\n width,\n height,\n info,\n inds,\n data,\n is_training=True)\n return image, labels\n\n def _parse_eval_data(self, data):\n # Down size coco 91 to coco 80 if the option is selected.\n data = self.reorg91to80(data)\n\n # Get the image shape constants and cast the image to the selcted datatype.\n image = tf.cast(data['image'], self._dtype)\n boxes = data['groundtruth_boxes']\n classes = data['groundtruth_classes']\n\n if not self._dynamic_conv:\n height, width = self._image_h, self._image_w\n else:\n fit = lambda x: tf.cast((tf.math.ceil(\n (x / self._net_down_scale) + 0.5) * self._net_down_scale), x.dtype)\n height, width = preprocessing_ops.get_image_shape(image)\n height, width = fit(height), fit(width)\n\n image, infos, _ = preprocessing_ops.resize_and_jitter_image(\n image, [height, width],\n letter_box=self._letter_box,\n random_pad=False,\n shiftx=0.5,\n shifty=0.5,\n jitter=0.0,\n resize=1.0)\n\n # Clip and clean boxes.\n image = image / 255\n boxes, inds = preprocessing_ops.apply_infos(\n boxes, infos, shuffle_boxes=False, area_thresh=self._area_thresh)\n classes = tf.gather(classes, inds)\n info = infos[-1]\n\n image, labels = self._build_label(\n image,\n boxes,\n classes,\n width,\n height,\n info,\n inds,\n data,\n is_training=False)\n return image, labels\n\n def _build_label(self,\n image,\n boxes_,\n classes,\n width,\n height,\n info,\n inds,\n data,\n is_training=True):\n \"\"\"Label construction for both the train and eval data. \"\"\"\n\n # Set the image shape.\n imshape = image.get_shape().as_list()\n imshape[-1] = 3\n image.set_shape(imshape)\n\n # Get the best anchors.\n boxes = box_utils.yxyx_to_xcycwh(boxes_)\n best_anchors, ious = preprocessing_ops.get_best_anchor(\n boxes,\n self._anchors,\n width=width,\n height=height,\n iou_thresh=self._anchor_t,\n anchor_free_limits = self._anchor_free_limits, \n best_match_only=self._best_match_only)\n\n # Set/fix the boxes shape.\n bshape = boxes.get_shape().as_list()\n boxes = preprocessing_ops.pad_max_instances(boxes, self._max_num_instances, 0)\n bshape[0] = self._max_num_instances\n boxes.set_shape(bshape)\n\n # Set/fix the classes shape.\n cshape = classes.get_shape().as_list()\n classes = preprocessing_ops.pad_max_instances(classes, self._max_num_instances, -1)\n cshape[0] = self._max_num_instances\n classes.set_shape(cshape)\n\n # Set/fix the best anchor shape.\n bashape = best_anchors.get_shape().as_list()\n best_anchors = preprocessing_ops.pad_max_instances(best_anchors, self._max_num_instances, -1)\n bashape[0] = self._max_num_instances\n best_anchors.set_shape(bashape)\n\n # Set/fix the ious shape.\n ishape = ious.get_shape().as_list()\n ious = preprocessing_ops.pad_max_instances(ious, self._max_num_instances, 0)\n ishape[0] = self._max_num_instances\n ious.set_shape(ishape)\n\n # Set/fix the area shape.\n area = data['groundtruth_area']\n area = tf.gather(area, inds)\n ashape = area.get_shape().as_list()\n area = preprocessing_ops.pad_max_instances(area, self._max_num_instances, 0)\n ashape[0] = self._max_num_instances\n area.set_shape(ashape)\n\n # Set/fix the is_crowd shape.\n is_crowd = data['groundtruth_is_crowd']\n is_crowd = tf.gather(is_crowd, inds)\n ishape = is_crowd.get_shape().as_list()\n is_crowd = preprocessing_ops.pad_max_instances(\n tf.cast(is_crowd, tf.int32), self._max_num_instances, 0)\n ishape[0] = self._max_num_instances\n is_crowd.set_shape(ishape)\n\n # Build the dictionary set.\n labels = {\n 'source_id': utils.process_source_id(data['source_id']),\n 'bbox': tf.cast(boxes, self._dtype),\n 'classes': tf.cast(classes, self._dtype),\n 'area': tf.cast(area, self._dtype),\n 'is_crowd': is_crowd,\n 'best_anchors': tf.cast(best_anchors, self._dtype),\n 'best_iou_match': ious,\n 'width': width,\n 'height': height,\n 'info': info,\n 'num_detections': tf.shape(inds)[0]\n }\n\n # Build the grid formatted for loss computation in model output format.\n grid, inds, upds, true_conf = self._build_grid(\n labels,\n width,\n height,\n use_tie_breaker=self._use_tie_breaker,\n is_training=is_training)\n\n # Update the labels dictionary.\n labels['bbox'] = box_utils.xcycwh_to_yxyx(labels['bbox'])\n labels['upds'] = upds\n labels['inds'] = inds\n labels['true_conf'] = true_conf\n\n # Sets up groundtruth data for evaluation.\n groundtruths = {\n 'source_id': data['source_id'],\n 'height': data['height'],\n 'width': data['width'],\n 'num_detections': labels['num_detections'],\n 'image_info': info,\n 'boxes': boxes_,\n 'classes': labels['classes'],\n 'areas': data['groundtruth_area'],\n 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),\n }\n groundtruths['source_id'] = utils.process_source_id(\n groundtruths['source_id'])\n groundtruths = utils.pad_groundtruths_to_fixed_size(groundtruths,\n self._max_num_instances)\n\n labels['groundtruths'] = groundtruths\n return image, labels\n","sub_path":"yolo/dataloaders/yolo_input.py","file_name":"yolo_input.py","file_ext":"py","file_size_in_byte":21695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457814900","text":"\"\"\"\nmain.py\n\"\"\"\nimport sys\nfrom PyQt4 import QtGui\nfrom mainwindow import MainWindow\nfrom dialoginspect import DialogInspect\nfrom imagetagger import ImageTagger\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n image_tagger = ImageTagger()\n # bind the main image tagger object\n main_window = MainWindow(image_tagger)\n dialog_inspect = DialogInspect(image_tagger)\n # register signal-slots for updates across the two windows\n main_window.sig_closing.connect(dialog_inspect.quit)\n main_window.sig_sample_updated.connect(dialog_inspect.update_view)\n dialog_inspect.sig_tag_updated.connect(main_window.update_from_dialog_inspect)\n # place windows\n desk_geo = QtGui.QApplication.desktop().geometry()\n main_geo = main_window.geometry()\n dlg_geo = dialog_inspect.geometry()\n main_window.move((desk_geo.width() - main_geo.width() - dlg_geo.width()) / 2,\n (desk_geo.height() - main_geo.height()) / 2)\n dialog_inspect.move((desk_geo.width() + main_geo.width() - dlg_geo.width()) / 2,\n (desk_geo.height() - dlg_geo.height()) / 2)\n\n main_window.show()\n dialog_inspect.show()\n sys.exit(app.exec_())\n","sub_path":"dtyu/ImageTagger/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647736431","text":"import asyncio\nimport enum\nimport itertools\nimport logging\nimport queue\nimport threading\nfrom typing import Callable, Generic, Optional, Union\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal # type: ignore\n\nfrom aiortc import RTCPeerConnection, RTCSessionDescription\nfrom aiortc.contrib.media import MediaRelay\nfrom aiortc.mediastreams import MediaStreamTrack\n\nfrom .eventloop import get_server_event_loop\nfrom .models import (\n AudioProcessorBase,\n AudioProcessorFactory,\n AudioProcessorT,\n MediaPlayerFactory,\n MediaRecorderFactory,\n VideoProcessorBase,\n VideoProcessorFactory,\n VideoProcessorT,\n VideoTransformerBase,\n)\nfrom .process import (\n AsyncAudioProcessTrack,\n AsyncVideoProcessTrack,\n AudioProcessTrack,\n VideoProcessTrack,\n)\nfrom .receive import AudioReceiver, VideoReceiver\nfrom .relay import get_global_relay\n\n__all__ = [\n \"AudioProcessorBase\",\n \"AudioProcessorFactory\",\n \"VideoTransformerBase\",\n \"VideoProcessorBase\",\n \"MediaPlayerFactory\",\n \"MediaRecorderFactory\",\n \"VideoProcessorFactory\",\n \"WebRtcMode\",\n \"TimeoutError\",\n \"WebRtcWorker\",\n]\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass WebRtcMode(enum.Enum):\n RECVONLY = enum.auto()\n SENDONLY = enum.auto()\n SENDRECV = enum.auto()\n\n\nclass TimeoutError(Exception):\n pass\n\n\nTrackType = Literal[\"input:video\", \"input:audio\", \"output:video\", \"output:audio\"]\n\n\nasync def _process_offer(\n mode: WebRtcMode,\n pc: RTCPeerConnection,\n offer: RTCSessionDescription,\n relay: MediaRelay,\n source_video_track: Optional[MediaStreamTrack],\n source_audio_track: Optional[MediaStreamTrack],\n in_recorder_factory: Optional[MediaRecorderFactory],\n out_recorder_factory: Optional[MediaRecorderFactory],\n video_processor: Optional[VideoProcessorBase],\n audio_processor: Optional[AudioProcessorBase],\n video_receiver: Optional[VideoReceiver],\n audio_receiver: Optional[AudioReceiver],\n async_processing: bool,\n sendback_video: bool,\n sendback_audio: bool,\n callback: Callable[[Union[RTCSessionDescription, Exception]], None],\n on_track_created: Callable[[TrackType, MediaStreamTrack], None],\n):\n try:\n in_recorder = None\n if in_recorder_factory:\n in_recorder = in_recorder_factory()\n\n out_recorder = None\n if out_recorder_factory:\n out_recorder = out_recorder_factory()\n\n @pc.on(\"iceconnectionstatechange\")\n async def on_iceconnectionstatechange():\n logger.info(\"ICE connection state is %s\", pc.iceConnectionState)\n if pc.iceConnectionState == \"failed\":\n await pc.close()\n\n if mode == WebRtcMode.SENDRECV:\n\n @pc.on(\"track\")\n def on_track(input_track):\n logger.info(\"Track %s received\", input_track.kind)\n\n if input_track.kind == \"video\":\n on_track_created(\"input:video\", input_track)\n elif input_track.kind == \"audio\":\n on_track_created(\"input:audio\", input_track)\n\n output_track = None\n\n if input_track.kind == \"audio\":\n if source_audio_track:\n logger.info(\n \"Set %s as an input audio track\", source_audio_track\n )\n output_track = source_audio_track\n elif audio_processor:\n AudioTrack = (\n AsyncAudioProcessTrack\n if async_processing\n else AudioProcessTrack\n )\n logger.info(\n \"Set %s as an input audio track with audio_processor %s\",\n input_track,\n AudioTrack,\n )\n output_track = AudioTrack(\n track=relay.subscribe(input_track),\n processor=audio_processor,\n )\n else:\n output_track = input_track # passthrough\n elif input_track.kind == \"video\":\n if source_video_track:\n logger.info(\n \"Set %s as an input video track\", source_video_track\n )\n output_track = source_video_track\n elif video_processor:\n VideoTrack = (\n AsyncVideoProcessTrack\n if async_processing\n else VideoProcessTrack\n )\n logger.info(\n \"Set %s as an input video track with video_processor %s\",\n input_track,\n VideoTrack,\n )\n output_track = VideoTrack(\n track=relay.subscribe(input_track),\n processor=video_processor,\n )\n else:\n output_track = input_track\n\n if (output_track.kind == \"video\" and sendback_video) or (\n output_track.kind == \"audio\" and sendback_audio\n ):\n logger.info(\n \"Add a track %s of kind %s to %s\",\n output_track,\n output_track.kind,\n pc,\n )\n pc.addTrack(relay.subscribe(output_track))\n else:\n logger.info(\n \"Block a track %s of kind %s\", output_track, output_track.kind\n )\n\n if out_recorder:\n logger.info(\"Track %s is added to out_recorder\", output_track.kind)\n out_recorder.addTrack(relay.subscribe(output_track))\n if in_recorder:\n logger.info(\"Track %s is added to in_recorder\", input_track.kind)\n in_recorder.addTrack(relay.subscribe(input_track))\n\n if output_track.kind == \"video\":\n on_track_created(\"output:video\", output_track)\n elif output_track.kind == \"audio\":\n on_track_created(\"output:audio\", output_track)\n\n @input_track.on(\"ended\")\n async def on_ended():\n logger.info(\"Track %s ended\", input_track.kind)\n if in_recorder:\n await in_recorder.stop()\n if out_recorder:\n await out_recorder.stop()\n\n elif mode == WebRtcMode.SENDONLY:\n\n @pc.on(\"track\")\n def on_track(input_track):\n logger.info(\"Track %s received\", input_track.kind)\n\n if input_track.kind == \"video\":\n on_track_created(\"input:video\", input_track)\n elif input_track.kind == \"audio\":\n on_track_created(\"input:audio\", input_track)\n\n if input_track.kind == \"audio\":\n if audio_receiver:\n logger.info(\n \"Add a track %s to receiver %s\", input_track, audio_receiver\n )\n audio_receiver.addTrack(input_track)\n elif input_track.kind == \"video\":\n if video_receiver:\n logger.info(\n \"Add a track %s to receiver %s\", input_track, video_receiver\n )\n video_receiver.addTrack(input_track)\n\n if in_recorder:\n logger.info(\"Track %s is added to in_recorder\", input_track.kind)\n in_recorder.addTrack(input_track)\n\n @input_track.on(\"ended\")\n async def on_ended():\n logger.info(\"Track %s ended\", input_track.kind)\n if video_receiver:\n video_receiver.stop()\n if audio_receiver:\n audio_receiver.stop()\n if in_recorder:\n await in_recorder.stop()\n\n await pc.setRemoteDescription(offer)\n if mode == WebRtcMode.RECVONLY:\n for t in pc.getTransceivers():\n output_track = None\n if t.kind == \"audio\":\n if source_audio_track:\n if audio_processor:\n AudioTrack = (\n AsyncAudioProcessTrack\n if async_processing\n else AudioProcessTrack\n )\n logger.info(\n \"Set %s as an input audio track \"\n \"with audio_processor %s\",\n source_audio_track,\n AudioTrack,\n )\n output_track = AudioTrack(\n track=source_audio_track, processor=audio_processor\n )\n else:\n output_track = source_audio_track # passthrough\n elif t.kind == \"video\":\n if source_video_track:\n if video_processor:\n VideoTrack = (\n AsyncVideoProcessTrack\n if async_processing\n else VideoProcessTrack\n )\n logger.info(\n \"Set %s as an input video track \"\n \"with video_processor %s\",\n source_video_track,\n VideoTrack,\n )\n output_track = VideoTrack(\n track=source_video_track, processor=video_processor\n )\n else:\n output_track = source_video_track # passthrough\n\n if output_track:\n logger.info(\"Add a track %s to %s\", output_track, pc)\n pc.addTrack(relay.subscribe(output_track))\n # NOTE: Recording is not supported in this mode\n # because connecting player to recorder does not work somehow;\n # it generates unplayable movie files.\n\n if output_track.kind == \"video\":\n on_track_created(\"output:video\", output_track)\n elif output_track.kind == \"audio\":\n on_track_created(\"output:audio\", output_track)\n\n if video_receiver and video_receiver.hasTrack():\n video_receiver.start()\n if audio_receiver and audio_receiver.hasTrack():\n audio_receiver.start()\n\n if in_recorder:\n await in_recorder.start()\n if out_recorder:\n await out_recorder.start()\n\n answer = await pc.createAnswer()\n await pc.setLocalDescription(answer)\n\n callback(pc.localDescription)\n except Exception as e:\n logger.debug(\"Error occurred in process_offer\")\n logger.debug(e)\n callback(e)\n\n\n# See https://stackoverflow.com/a/42007659\nwebrtc_thread_id_generator = itertools.count()\n\n\nclass WebRtcWorker(Generic[VideoProcessorT, AudioProcessorT]):\n _webrtc_thread: Union[threading.Thread, None]\n _answer_queue: queue.Queue\n _video_processor: Optional[VideoProcessorT]\n _audio_processor: Optional[AudioProcessorT]\n _video_receiver: Optional[VideoReceiver]\n _audio_receiver: Optional[AudioReceiver]\n _input_video_track: Optional[MediaStreamTrack]\n _input_audio_track: Optional[MediaStreamTrack]\n _output_video_track: Optional[MediaStreamTrack]\n _output_audio_track: Optional[MediaStreamTrack]\n\n @property\n def video_processor(self) -> Optional[VideoProcessorT]:\n return self._video_processor\n\n @property\n def audio_processor(self) -> Optional[AudioProcessorT]:\n return self._audio_processor\n\n @property\n def video_receiver(self) -> Optional[VideoReceiver]:\n return self._video_receiver\n\n @property\n def audio_receiver(self) -> Optional[AudioReceiver]:\n return self._audio_receiver\n\n @property\n def input_video_track(self) -> Optional[MediaStreamTrack]:\n return self._input_video_track\n\n @property\n def input_audio_track(self) -> Optional[MediaStreamTrack]:\n return self._input_audio_track\n\n @property\n def output_video_track(self) -> Optional[MediaStreamTrack]:\n return self._output_video_track\n\n @property\n def output_audio_track(self) -> Optional[MediaStreamTrack]:\n return self._output_audio_track\n\n def __init__(\n self,\n mode: WebRtcMode,\n source_video_track: Optional[MediaStreamTrack] = None,\n source_audio_track: Optional[MediaStreamTrack] = None,\n player_factory: Optional[MediaPlayerFactory] = None,\n in_recorder_factory: Optional[MediaRecorderFactory] = None,\n out_recorder_factory: Optional[MediaRecorderFactory] = None,\n video_processor_factory: Optional[\n VideoProcessorFactory[VideoProcessorT]\n ] = None,\n audio_processor_factory: Optional[\n AudioProcessorFactory[AudioProcessorT]\n ] = None,\n async_processing: bool = True,\n video_receiver_size: int = 4,\n audio_receiver_size: int = 4,\n sendback_video: bool = True,\n sendback_audio: bool = True,\n ) -> None:\n self._webrtc_thread = None\n self.pc = RTCPeerConnection()\n self._answer_queue = queue.Queue()\n\n self.mode = mode\n self.source_video_track = source_video_track\n self.source_audio_track = source_audio_track\n self.player_factory = player_factory\n self.in_recorder_factory = in_recorder_factory\n self.out_recorder_factory = out_recorder_factory\n self.video_processor_factory = video_processor_factory\n self.audio_processor_factory = audio_processor_factory\n self.async_processing = async_processing\n self.video_receiver_size = video_receiver_size\n self.audio_receiver_size = audio_receiver_size\n self.sendback_video = sendback_video\n self.sendback_audio = sendback_audio\n\n self._video_processor = None\n self._audio_processor = None\n self._video_receiver = None\n self._audio_receiver = None\n self._input_video_track = None\n self._input_audio_track = None\n self._output_video_track = None\n self._output_audio_track = None\n\n def _run_webrtc_thread(\n self,\n sdp: str,\n type_: str,\n ):\n try:\n self._webrtc_thread_impl(\n sdp=sdp,\n type_=type_,\n )\n except Exception as e:\n logger.warn(\"An error occurred in the WebRTC worker thread: %s\", e)\n self._answer_queue.put(e) # Send the error object to the main thread\n\n def _webrtc_thread_impl(\n self,\n sdp: str,\n type_: str,\n ):\n logger.debug(\n \"_webrtc_thread_impl starts\",\n )\n\n loop = get_server_event_loop()\n asyncio.set_event_loop(loop)\n\n offer = RTCSessionDescription(sdp, type_)\n\n def callback(localDescription):\n self._answer_queue.put(localDescription)\n\n def on_track_created(track_type: TrackType, track: MediaStreamTrack):\n if track_type == \"input:video\":\n self._input_video_track = track\n elif track_type == \"input:audio\":\n self._input_audio_track = track\n elif track_type == \"output:video\":\n self._output_video_track = track\n elif track_type == \"output:audio\":\n self._output_audio_track = track\n\n video_processor = None\n if self.video_processor_factory:\n video_processor = self.video_processor_factory()\n\n audio_processor = None\n if self.audio_processor_factory:\n audio_processor = self.audio_processor_factory()\n\n video_receiver = None\n audio_receiver = None\n if self.mode == WebRtcMode.SENDONLY:\n video_receiver = VideoReceiver(queue_maxsize=self.video_receiver_size)\n audio_receiver = AudioReceiver(queue_maxsize=self.audio_receiver_size)\n\n self._video_processor = video_processor\n self._audio_processor = audio_processor\n self._video_receiver = video_receiver\n self._audio_receiver = audio_receiver\n\n relay = get_global_relay()\n\n source_audio_track = None\n source_video_track = None\n if self.player_factory:\n player = self.player_factory()\n if player.audio:\n source_audio_track = relay.subscribe(player.audio)\n if player.video:\n source_video_track = relay.subscribe(player.video)\n else:\n if self.source_video_track:\n source_video_track = relay.subscribe(self.source_video_track)\n if self.source_audio_track:\n source_audio_track = relay.subscribe(self.source_audio_track)\n\n @self.pc.on(\"iceconnectionstatechange\")\n async def on_iceconnectionstatechange():\n iceConnectionState = self.pc.iceConnectionState\n if iceConnectionState == \"closed\" or iceConnectionState == \"failed\":\n self._unset_processors()\n\n loop.create_task(\n _process_offer(\n self.mode,\n self.pc,\n offer,\n relay=relay,\n source_video_track=source_video_track,\n source_audio_track=source_audio_track,\n in_recorder_factory=self.in_recorder_factory,\n out_recorder_factory=self.out_recorder_factory,\n video_processor=video_processor,\n audio_processor=audio_processor,\n video_receiver=video_receiver,\n audio_receiver=audio_receiver,\n async_processing=self.async_processing,\n sendback_video=self.sendback_video,\n sendback_audio=self.sendback_audio,\n callback=callback,\n on_track_created=on_track_created,\n )\n )\n\n def process_offer(\n self, sdp, type_, timeout: Union[float, None] = 10.0\n ) -> RTCSessionDescription:\n self._webrtc_thread = threading.Thread(\n target=self._run_webrtc_thread,\n kwargs={\n \"sdp\": sdp,\n \"type_\": type_,\n },\n daemon=True,\n name=f\"webrtc_worker_{next(webrtc_thread_id_generator)}\",\n )\n self._webrtc_thread.start()\n\n try:\n result = self._answer_queue.get(block=True, timeout=timeout)\n except queue.Empty:\n self.stop(timeout=1)\n raise TimeoutError(\n \"Processing offer and initializing the worker \"\n f\"has not finished in {timeout} seconds\"\n )\n\n if isinstance(result, Exception):\n raise result\n\n return result\n\n def _unset_processors(self):\n self._video_processor = None\n self._audio_processor = None\n if self._video_receiver:\n self._video_receiver.stop()\n self._video_receiver = None\n if self._audio_receiver:\n self._audio_receiver.stop()\n self._audio_receiver = None\n\n def stop(self, timeout: Union[float, None] = 1.0):\n self._unset_processors()\n if self._webrtc_thread:\n self._webrtc_thread.join(timeout=timeout)\n\n\nasync def _test():\n client = RTCPeerConnection()\n client.createDataChannel(\"test\")\n\n offer = await client.createOffer()\n\n webrtc_worker = WebRtcWorker(mode=WebRtcMode.SENDRECV)\n localDescription = webrtc_worker.process_offer(offer.sdp, offer.type)\n\n print(\"localDescription:\")\n print(localDescription)\n\n webrtc_worker.stop()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n\n asyncio.run(_test())\n","sub_path":"streamlit_webrtc/webrtc.py","file_name":"webrtc.py","file_ext":"py","file_size_in_byte":20644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144876432","text":"import numpy as np\n\nfrom layer import *\nfrom network import *\nfrom utility import *\n\ndef getNetwork(fMin = 50, fMax = 100, capitance = 0.5, resistance = 64, vThreshold = 25, tau = 10, minSupervisedCurrent = -1, maxSupervisedCurrent = 1):\n #IN\n #np.float32 fMin: mean firing rate with input 0, in Hz\n #np.float32 fMax: mean firing rate with input 1, in Hz\n #np.float32 capitance: C_m in μF\n #np.float32 resistance: R_m in kΩ\n #np.float32 vThreshold: threshold voltage V_t in mV\n #np.float32 tau: time constant for spike response\n #np.float32 minSupervisedCurrent: min supervised input current with 0, in μA\n #np.float32 maxSupervisedCurrent: max supervised input current with 1, in μA\n #OUT\n #network.supervised SNN: spiking neuron network\n neuronLayerList = []\n neuronLayerList.append(poissonInput(2, fMin = fMin, fMax = fMax))\n neuronLayerList.append(supervisedLIF(2, capitance = capitance, resistance = resistance, vThreshold = vThreshold))\n # neuronLayerList.append(supervisedLIF(1, vThreshold = vThreshold))\n SNN = supervised(neuronLayerList, minSupervisedCurrent, maxSupervisedCurrent, synapseConfig = {'tau': tau})\n return SNN\n\ndef getDataset(layerSize):\n #OUT\n #np.ndarray dataX, dtype = np.float32: input data\n #list dataY [np.ndarray dataYi, dtype = np.float32]: supervised input for each layer\n dataX = np.empty((4, 2), dtype = np.float32)\n dataX[0] = (1, 1)\n dataX[1] = (1, 0)\n dataX[2] = (0, 1)\n dataX[3] = (0, 0)\n\n dataYList = []\n\n dataYList.append([0 for i in range(4)])\n #supervised idata for layer 1. here I use ((~x0) and x1, x0 and (not x1)). Maybe (x0 nand x1, x0 or x1) is better\n dataYList.append(np.array( ((~dataX[:, 0].astype(np.bool) & dataX[:, 1].astype(np.bool)).astype(np.float32),\n (dataX[:, 0].astype(np.bool) & ~dataX[:, 1].astype(np.bool)).astype(np.float32)), dtype = np.float32).transpose())\n # dataYList.append(np.array( ((~dataX[:, 0].astype(np.bool) | ~dataX[:, 1].astype(np.bool)).astype(np.float32),\n # (dataX[:, 0].astype(np.bool) | dataX[:, 1].astype(np.bool)).astype(np.float32)), dtype = np.float32).transpose())\n dataYList.append((dataX[:, 0].astype(np.bool) ^ dataX[:, 1].astype(np.bool)).astype(np.float32))\n # dataY = [(dataY0[i], dataY1[i], dataY2[i]) for i in range(4)]\n dataY = [[dataYList[layer][i] for layer in range(layerSize)] for i in range(4)]\n return dataX, dataY\n\ndef getAverageRate(spikeRateListList, dataSize):\n #IN\n #list spikeRateListList [[np.ndarray spikeRate, dtype = np.float32]]: [[spikeRate for each layer] for each input data] \n #OUT\n #list averageSpikeRateList, [np.ndarray averageSpikeRate, dtype = np.float32]: average spiking rate in last iteration in Hz\n averageSpikeRateList = []\n for layerIdx in range(len(spikeRateListList[0])):\n layerSize = spikeRateListList[0][layerIdx].size\n tempSpikeRateList = np.empty((dataSize, layerSize), dtype = np.float32)\n for dataIdx in range(dataSize):\n tempSpikeRateList[dataIdx] = spikeRateListList[dataIdx][layerIdx]\n\n tempAverageSpikeRate = np.mean(tempSpikeRateList, axis = 0)\n averageSpikeRateList.append(tempAverageSpikeRate)\n return averageSpikeRateList\n\ndef preTrain(SNN, dataX, dataY, forwardTime = 1000):\n #IN\n #network.supervised SNN: spiking neuron network\n #np.ndarray dataX, dtype = np.float32: input data\n #list dataY [np.ndarray dataYi, dtype = np.float32]: supervised input for each layer\n #int forwardTime: time to forward\n #OUT\n #list spikeRateListList [[np.ndarray spikeRate, dtype = np.float32]]: [[spikeRate for each layer] for each input data] \n dataSize = dataX.shape[0]\n idxList = np.array(range(dataSize), dtype = np.int8)\n\n spikeRateListList = []\n for idx in idxList:\n SNN.bcmPreUpdate(dataX[idx], dataY[idx], forwardTime)\n spikeRateListList.append(SNN.spikeRateList)\n return spikeRateListList\n\ndef train(SNN, dataX, dataY, iterNum, forwardTime = 1000, learningRate = 0.1, layerConstrainList = None, trainLayerSet = None):\n #IN\n #network.supervised SNN: spiking neuron network\n #np.ndarray dataX, dtype = np.float32: input data\n #list dataY [np.ndarray dataYi, dtype = np.float32]: supervised input for each layer\n #int iterNum: iteration to train\n #int forwardTime: time to forward\n #np.float32 learningRate: step size for changing weights\n #list layerconstrainList [function layerConstrain]: constrains of weights for each layer\n #set trainLayerset: the index of layer that need to train\n #OUT\n #list spikeRateListList [[np.ndarray spikeRate, dtype = np.float32]]: [[spikeRate for each layer] for each input data]\n dataSize = dataX.shape[0]\n idxList = np.array(range(dataSize), dtype = np.int8)\n spikeRateListList = preTrain(SNN, dataX, dataY, forwardTime)\n\n for iters in range(iterNum):\n averageSpikeRateList = getAverageRate(spikeRateListList, dataSize)\n spikeRateListList = []\n print('iter %d: ' %iters)\n np.random.shuffle(idxList)\n for idx in idxList:\n print(' %d, %d: ' %(dataX[idx, 0].astype(np.int8), dataX[idx, 1].astype(np.int8)), end = '')\n #forward\n spikeRate = SNN.bcmPreUpdate(dataX[idx], dataY[idx], forwardTime)\n spikeRateListList.append(SNN.spikeRateList)\n print(spikeRate, end = '')\n # print(SNN.spikeRateList)\n #update\n SNN.bcmUpdate(averageSpikeRateList, learningRate, forwardTime, layerConstrainList, trainLayerSet)\n print(', ', end = '')\n #predict (for debug)\n SNN.reset()\n # SNN.refresh(refreshTime)\n spike = SNN.batchedPredict(dataX[idx], forwardTime)\n print(np.sum(spike, axis = 0).astype(np.float32) / forwardTime * 1000)\n # SNN._printWeight()\n test(SNN, dataX, iterNum = 1, forwardTime = forwardTime)\n return SNN\n\ndef test(SNN, dataX, iterNum, forwardTime = 1000, plot = False, legend = True, fn_save = None):\n #IN\n #network.supervised SNN: spiking neuron network\n #np.ndarray dataX, dtype = np.float32: input data\n #int iterNum: iteration to train\n #int forwardTime: time to forward\n #bool plot: True: plot spike list; False: no plot\n if fn_save is None:\n fn_save = 'noName'\n dataSize = dataX.shape[0]\n idxList = np.array(range(dataSize), dtype = np.int8)\n\n testResult = [[None for i in range(dataSize)] for j in range(iterNum)]\n\n for iters in range(iterNum):\n print('iter %d: ' %iters)\n np.random.shuffle(idxList)\n for idx in idxList:\n print(' %d, %d: ' %(dataX[idx, 0].astype(np.int8), dataX[idx, 1].astype(np.int8)), end = '')\n SNN.reset()\n spike = SNN.batchedPredict(dataX[idx], forwardTime)\n rate = np.sum(spike, axis = 0).astype(np.float32) / forwardTime * 1000\n print(rate)\n testResult[iters][idx] = rate\n if plot is True:\n plotSpikeList(SNN.spikeListList, legend = legend, fn_save = fn_save + '.input%d.iter%d' %(idx, iters))\n testResult = np.array(testResult)\n return testResult\n\n\ndef layer1Constrain(weight):\n #IN\n #np.ndarray weight, shape = (2, 2), dtype = float32: synapse weights\n #OUT\n #np.ndarray weight, shape = (2, 2), dtype = float32: synapse weights with constrains\n tempWeight = np.empty_like(weight)\n tempWeight[0, 0] = (weight[0, 0] + weight[1, 1]) / 2\n tempWeight[1, 1] = (weight[0, 0] + weight[1, 1]) / 2\n tempWeight[0, 1] = (weight[0, 1] + weight[1, 0]) / 2\n tempWeight[1, 0] = (weight[0, 1] + weight[1, 0]) / 2\n return tempWeight\n\ndef layer2Constrain(weight):\n #IN\n #np.ndarray weight, shape = (2, 1), dtype = float32: synapse weights\n #OUT\n #np.ndarray weight, shape = (2, 1), dtype = float32: synapse weights with constrains\n tempWeight = np.empty_like(weight)\n tempWeight[0, 0] = (weight[0, 0] + weight[1, 0]) / 2\n tempWeight[1, 0] = (weight[0, 0] + weight[1, 0]) / 2\n return tempWeight\n\n\ndef trainLayer1(fMin, fMax, capitance, resistance, vThreshold, tau, minSupervisedCurrent, maxSupervisedCurrent, forwardTime, learningRate):\n #IN\n #np.float32 fMin: mean firing rate with input 0, in Hz\n #np.float32 fMax: mean firing rate with input 1, in Hz\n #np.float32 capitance: C_m in μF\n #np.float32 resistance: R_m in kΩ\n #np.float32 vThreshold: threshold voltage V_t in mV\n #np.float32 tau: time constant for spike response\n #np.float32 minSupervisedCurrent: min supervised input current with 0, in μA\n #np.float32 maxSupervisedCurrent: max supervised input current with 1, in μA\n #int forwardTime: time to forward\n #np.float32 learningRate: step size for changing weights\n #OUT\n #network.supervised SNN: 2-layer spiking neuron network\n SNN = getNetwork(fMin, fMax, capitance, resistance, vThreshold, tau, minSupervisedCurrent, maxSupervisedCurrent)\n dataX, dataY = getDataset(layerSize = 2)\n for i in range(4):\n print(dataX[i], dataY[i])\n SNN = train(SNN, dataX, dataY, iterNum = 19, forwardTime = forwardTime, learningRate = learningRate, layerConstrainList = [layer1Constrain], trainLayerSet = {0})\n # SNN._printWeight()\n return SNN\n\ndef trainLayer2(SNN, capitance, resistance, vThreshold, forwardTime, learningRate):\n #IN\n #network.supervised SNN: 2-layer spiking neuron network\n #np.float32 capitance: C_m in μF\n #np.float32 resistance: R_m in kΩ\n #np.float32 vThreshold: threshold voltage V_t in mV\n #int forwardTime: time to forward\n #np.float32 learningRate: step size for changing weights\n #OUT\n #network.supervised SNN: 3-layer spiking neuron network\n SNN.extend(supervisedLIF(1, capitance = capitance, resistance = resistance, vThreshold = vThreshold))\n dataX, dataY = getDataset(layerSize = 3)\n for i in range(4):\n print(dataX[i], dataY[i])\n SNN = train(SNN, dataX, dataY, iterNum = 5, forwardTime = forwardTime, learningRate = learningRate, layerConstrainList = [layer1Constrain, layer2Constrain], trainLayerSet = {1})\n # SNN._printWeight()\n return SNN\n\n\n\nif __name__ == '__main__':\n np.random.seed(6983)\n color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n\n fMin = 100\n fMax = 200\n capitance = 0.5\n resistance = 64\n vThreshold = 12\n tau = 8\n minSupervisedCurrent = -4\n maxSupervisedCurrent = 4\n forwardTime = 500\n learningRate = 8e-10\n\n print('\\ntrain layer 1')\n SNN = trainLayer1(fMin, fMax, capitance, resistance ,vThreshold, tau, minSupervisedCurrent, maxSupervisedCurrent, forwardTime, learningRate)\n print('\\ntrain layer 2')\n SNN = trainLayer2(SNN, capitance, resistance, vThreshold, forwardTime, learningRate)\n dataX, _ = getDataset(layerSize = 3)\n print('\\ntest')\n fn_save = 'xorBCM'\n result = test(SNN, dataX, iterNum = 50, forwardTime = 1000, plot = False, fn_save = fn_save)\n\n iterNum, dataSize, neuronNum = result.shape\n xAxis = np.arange(dataSize)\n xIdx = np.argsort(xAxis)\n for iters in range(iterNum):\n for idx in range(neuronNum):\n point = plt.scatter(xAxis[xIdx], result[iters, xIdx, idx], c = color[idx], marker = '.')\n\n meanResult = np.mean(result, axis = 0)\n for idx in range(neuronNum):\n point = plt.scatter(xAxis[xIdx], result[iters, xIdx, idx], c = color[idx], marker = 'o')\n point.set_label('neuron ' + str(idx))\n\n plt.xlabel('inputs')\n plt.ylabel('firing rate')\n plt.legend(loc = 0)\n plt.title('xor')\n plt.tight_layout()\n plt.savefig('../docs/plots/' + fn_save + '.iter%d' %iters + '.rate.png')\n plt.show()","sub_path":"code/xorBCM.py","file_name":"xorBCM.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436895547","text":"# coding=utf-8\nimport operator\nimport re\nfrom math import __dict__ as functions_dict\n\n# I don't want this functions and variables\n# noinspection SpellCheckingInspection\nunused_functions = (\n 'atan2', 'ceil', 'copysign', 'fmod', 'frexp', 'fsum', 'gcd', 'hypot', 'isclose', 'isfinite', 'isinf', 'isnan',\n 'ldexp', 'modf', 'pow', 'trunc', 'nan')\nfunctions_dict = functions_dict\nfor function in unused_functions:\n try:\n del functions_dict[function]\n except KeyError:\n continue\nfunctions_dict['inf'] = float('inf')\n\n\nclass Expression(object):\n def __init__(self, express):\n self.__expression = express\n self.__value = None\n self.__proceed()\n\n def __str__(self):\n # noinspection PyStringFormat\n return ('%.30f' % self.__value).rstrip('0').rstrip('.')\n\n def get_express(self):\n return self.__expression\n\n def __proceed(self):\n self.__verify_characters()\n self.__input_multiplying()\n self.__verify_order()\n self.__parse()\n self.__to_poland_inverse_notation()\n self.__evaluate()\n\n def __to_poland_inverse_notation(self):\n operands = ['#']\n output = []\n priorities = {r'^': 4, r'*': 3, r'/': 3, r'-': 2, r'+': 2, r'#': 0}\n\n # Processing expression\n digit_expr = re.compile(r'\\-?\\d+\\.?\\d*')\n for char in self.__expression:\n if digit_expr.match(char): # digits\n output.append(char)\n elif char in functions_dict: # Pi, euler constant and inf\n output.append(char)\n elif char[:-1] in functions_dict: # Functions\n operands.append('(')\n operands.append(char[:-1])\n elif char in priorities: # Math operands\n while priorities.get(operands[-1], 1) >= priorities[char]:\n output.append(operands.pop())\n operands.append(char)\n elif char == '(': # Obvious\n operands.append(char)\n elif char == ')': # Same\n while operands[-1] != '(':\n if operands[-1] == '#':\n raise ValueError\n output.append(operands.pop())\n operands.pop()\n else: # Wrong characters\n raise ValueError\n\n # Gathering final result\n for op in operands[::-1]:\n if op == '(': # Wrong braces in the expression\n raise ValueError\n elif op == '#': # Everything is ok\n break\n else:\n output.append(operands.pop())\n self.__expression = output\n\n def __parse(self):\n # noinspection PyTypeChecker\n self.__expression = re.compile(r'(\\d+)\\s*\\-\\s*(\\d+\\.?\\d*)').sub(r'\\1+(-\\2)', self.__expression)\n regex = re.compile(r'\\-?\\d+\\.?\\d*|\\+|-|\\*|\\^|/|\\(|\\)|\\w+\\(|e|pi|inf')\n\n temp = self.__expression\n self.__expression = regex.findall(self.__expression)\n # noinspection PyUnresolvedReferences\n # self.__expression is str when this method is invoked\n if len(''.join(self.__expression)) != len(temp.replace(' ', '')): # If cannot parse all expression\n raise ValueError\n self.__expression.append(')')\n self.__expression.insert(0, '(')\n\n def __verify_characters(self):\n verify_regex = re.compile(r'[^a-z0-9\\+\\-\\*/\\^\\(\\)\\.\\s ]')\n if verify_regex.search(self.__expression):\n raise ValueError\n\n def __verify_order(self):\n verify_regex = re.compile(\n r'[\\+\\-\\*\\^/]{2,}|' # Two or more operators in a row\n r'\\(\\s*\\)|' # Empty braces\n r'[\\+\\-\\*/]\\s*\\)|' # Operator without a number\n r'\\(\\s*[\\+\\*\\^/]|' # Same here\n r'\\)\\s*[a-z0-9]|'\n r'[a-zA-Z]+\\s*\\d+',\n re.VERBOSE)\n if verify_regex.search(''.join(self.__expression)):\n raise ValueError\n\n def __input_multiplying(self):\n\n missing_multiply_regex = re.compile(r'(\\d+)\\s*(\\(|[a-z]+)')\n self.__expression = missing_multiply_regex.sub(r'\\1*\\2', self.__expression)\n\n missing_multiply_regex = re.compile(r'\\)\\s*\\(')\n self.__expression = missing_multiply_regex.sub(r')*(', self.__expression)\n\n missing_multiply_regex = re.compile(r'(pi|e|inf)\\(')\n self.__expression = missing_multiply_regex.sub(r'\\1*(', self.__expression)\n\n def __evaluate(self):\n stack = []\n operands = {'+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv,\n '^': operator.pow}\n digit_expr = re.compile(r'\\-?\\d+\\.?\\d*')\n for char in self.__expression:\n if digit_expr.match(char):\n stack.append(float(char))\n elif char == 'pi' or char == 'e' or char == 'inf':\n stack.append(functions_dict[char])\n elif char in operands:\n stack.append(operands[char](stack.pop(-2), stack.pop()))\n elif char in functions_dict:\n stack.append(functions_dict[char](stack.pop()))\n if isinstance(stack[0], complex):\n raise FloatingPointError\n else:\n self.__value = stack[0]\n","sub_path":"calculator/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331447281","text":"import logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import Tieba, Column, Account, Area\n\nlogger = logging.getLogger('tieba')\n\nclass ChoiceFieldNoValidation(forms.ChoiceField):\n def validate(self, value):\n pass\n\n\nclass TiebaForm(forms.Form):\n\n TIEBA_SITE_CHOICES = (\n (tieba['site'], _(\"{}\".format(tieba['site'])))\n for tieba in Tieba.objects.values('site').distinct()\n )\n COLUMN_SITE_CHOICES = (\n (column['id'], _(\"{}\".format(column['column'])))\n for column in Column.objects.values('id', 'column').distinct()\n )\n AREA_SITE_CHOICES = (\n (area['id'], _(\"{}\".format(area['prov_city'])))\n for area in Area.objects.values('id', 'prov_city').distinct()\n )\n ACCOUNT_SITE_CHOICES = (\n (tieba['id'], _(\"{}\".format(tieba['username'])))\n for tieba in Account.objects.values('id', 'username').distinct()\n )\n WECHAT_ACCOUNT_CHOICES = (\n ('190265939', _('190265939')),\n ('2533651272', _('2533651272')),\n ('774988330', _('b774988330')),\n # ('515550681', _('515550681')),\n )\n CATEGORY_CHOICES = (\n ('手表', _('手表')),\n ('包包', _('包包')),\n )\n\n title = forms.CharField(label='帖文标题',\n widget=forms.Textarea(attrs={\n \"maxlength\": \"500\",\"rows\": \"10\",\n \"style\": \"margin: 0px 0px 9px; width: 543px; height: 144px;\"\n }),\n max_length=500,\n error_messages={'required': '帖文标题不能为空.'})\n site = forms.ChoiceField(choices=TIEBA_SITE_CHOICES,\n required=True,\n label='选择贴吧',\n widget=forms.Select(attrs={'class':'select', 'onChange':'getColumnAndAccountOptions(this.value)'}))\n column = forms.ChoiceField(required=True, widget=forms.Select(),\n label=u'选择栏目', choices=COLUMN_SITE_CHOICES)\n area = forms.ChoiceField(required=False, widget=forms.Select(),\n label=u'选择区域', choices=AREA_SITE_CHOICES)\n account = forms.ChoiceField(required=True, widget=forms.Select(),\n label=u'选择账号', choices=ACCOUNT_SITE_CHOICES)\n wechat = forms.ChoiceField(choices=WECHAT_ACCOUNT_CHOICES,\n required=True, widget=forms.Select())\n category = forms.ChoiceField(choices=CATEGORY_CHOICES,\n required=True, widget=forms.Select())\n is_editing = forms.BooleanField(initial=False, required=False,\n widget=forms.HiddenInput)\n\n def __init__(self, *args, **kwargs):\n super(TiebaForm, self).__init__(*args, **kwargs)\n for x in [ 'title', 'site', 'column', 'area',\n 'account', 'wechat', 'category']:\n self.fields[x].widget.attrs['class'] = 'required'\n self.fields['site'].choices = [(\"\", \"{flag} 请选择贴吧 {flag}\".format(flag='-'*20))] + list(self.fields['site'].choices)\n self.fields['column'].choices = [(\"\", \"\".format(flag='-'*20))] + list(self.fields['column'].choices)\n self.fields['area'].choices = [(\"\", \"\".format(flag='-'*20))] + list(self.fields['area'].choices)\n self.fields['account'].choices = [(\"\", \"\".format(flag='-'*20))] + list(self.fields['account'].choices)\n\n","sub_path":"src/main/python/dashboard/tieba/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98757997","text":"import unittest\nfrom typing import Any, Dict, List, Union\n\n\ndef sample_ontology() -> Dict[str, Any]:\n return {\n \"tools\": [{\n \"required\": False,\n \"name\": \"Dog\",\n \"color\": \"#FF0000\",\n \"tool\": \"rectangle\",\n \"classifications\": []\n }],\n \"classifications\": [{\n \"required\":\n True,\n \"instructions\":\n \"This is a question.\",\n \"name\":\n \"this_is_a_question.\",\n \"type\":\n \"radio\",\n \"options\": [{\n \"label\": \"Yes\",\n \"value\": \"yes\"\n }, {\n \"label\": \"No\",\n \"value\": \"no\"\n }]\n }]\n }\n\n\ndef test_create_ontology(client, project) -> None:\n \"\"\" Tests that the ontology that a project was set up with can be grabbed.\"\"\"\n frontend = list(client.get_labeling_frontends())[0]\n project.setup(frontend, sample_ontology())\n normalized_ontology = project.ontology().normalized\n\n def _remove_schema_ids(\n ontology_part: Union[List, Dict[str, Any]]) -> Dict[str, Any]:\n \"\"\" Recursively scrub the normalized ontology of any schema information.\"\"\"\n removals = {'featureSchemaId', 'schemaNodeId'}\n\n if isinstance(ontology_part, list):\n return [_remove_schema_ids(part) for part in ontology_part]\n if isinstance(ontology_part, dict):\n return {\n key: _remove_schema_ids(value)\n for key, value in ontology_part.items()\n if key not in removals\n }\n return ontology_part\n\n removed = _remove_schema_ids(normalized_ontology)\n assert removed == sample_ontology()\n\n ontology = project.ontology()\n\n tools = ontology.tools()\n assert tools\n for tool in tools:\n assert tool.feature_schema_id\n assert tool.schema_node_id\n\n classifications = ontology.classifications()\n assert classifications\n for classification in classifications:\n assert classification.feature_schema_id\n assert classification.schema_node_id\n for option in classification.options:\n assert option.feature_schema_id\n assert option.schema_node_id\n","sub_path":"tests/integration/test_ontology.py","file_name":"test_ontology.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337692353","text":"\" An entry set class for acquiring entries with Gibbs formation energies\"\nfrom typing import List, Optional, Union\n\nfrom monty.json import MontyDecoder, MSONable\nfrom pymatgen.analysis.phase_diagram import PhaseDiagram\nfrom pymatgen.core import Composition\nfrom pymatgen.entries.computed_entries import (\n ComputedEntry,\n ComputedStructureEntry,\n ConstantEnergyAdjustment,\n)\nfrom pymatgen.entries.entry_tools import EntrySet\nfrom tqdm.auto import tqdm\n\nfrom rxn_network.entries.gibbs import GibbsComputedEntry\nfrom rxn_network.entries.nist import NISTReferenceEntry\nfrom rxn_network.thermo.utils import expand_pd\n\n\nclass GibbsEntrySet(EntrySet):\n \"\"\"\n An extension of pymatgen's EntrySet to include factory methods for constructing\n GibbsComputedEntry objects from zero-temperature ComputedStructureEntry objects.\n \"\"\"\n\n def __init__(self, entries: List[Union[GibbsComputedEntry, NISTReferenceEntry]]):\n \"\"\"\n The supplied collection of entries will automatically be converted to a set of\n unique entries.\n\n Args:\n entries: A collection of entry objects that will make up the entry set.\n \"\"\"\n super().__init__(entries)\n self.entries_list = list(entries)\n self.build_indices()\n\n def filter_by_stability(\n self, e_above_hull: float, include_polymorphs: Optional[bool] = False\n ) -> \"GibbsEntrySet\":\n \"\"\"\n Filter the entry set by a metastability (energy above hull) cutoff.\n\n Args:\n e_above_hull: Energy above hull, the cutoff describing the allowed\n metastability of the entries as determined via phase diagram\n construction.\n include_polymorphs: optional specification of whether to include\n metastable polymorphs. Defaults to False.\n\n Returns:\n A new GibbsEntrySet where the entries have been filtered by an energy\n cutoff (e_above_hull) via phase diagram construction.\n \"\"\"\n pd_dict = expand_pd(self.entries)\n\n filtered_entries = set()\n all_comps = dict()\n\n for chemsys, pd in pd_dict.items():\n for entry in pd.all_entries:\n if (\n entry in filtered_entries\n or pd.get_e_above_hull(entry) > e_above_hull\n ):\n continue\n\n formula = entry.composition.reduced_formula\n if not include_polymorphs and (formula in all_comps):\n if all_comps[formula].energy_per_atom < entry.energy_per_atom:\n continue\n filtered_entries.remove(all_comps[formula])\n\n all_comps[formula] = entry\n filtered_entries.add(entry)\n\n return self.__class__(filtered_entries)\n\n def build_indices(self):\n for idx, e in enumerate(self.entries_list):\n e.data.update({\"idx\": idx})\n\n def get_min_entry_by_formula(self, formula: str) -> ComputedEntry:\n \"\"\"\n Helper method for acquiring the ground state entry with the specified formula.\n\n Args:\n formula: The chemical formula of the desired entry.\n\n Returns:\n Ground state computed entry object.\n \"\"\"\n comp = Composition(formula).reduced_composition\n possible_entries = filter(\n lambda x: x.composition.reduced_composition == comp, self.entries\n )\n return sorted(possible_entries, key=lambda x: x.energy_per_atom)[0]\n\n def stabilize_entry(self, entry: ComputedEntry, tol: float = 1e-6) -> ComputedEntry:\n \"\"\"\n Helper method for lowering the energy of a single entry such that it is just\n barely stable on the phase diagram.\n\n Args:\n entry: A computed entry object.\n tol: The numerical padding added to the energy correction to guarantee\n that it is determined to be stable during phase diagram construction.\n\n Returns:\n A new ComputedEntry with energy adjustment making it appear to be stable.\n \"\"\"\n chemsys = [str(e) for e in entry.composition.elements]\n entries = self.get_subset_in_chemsys(chemsys)\n pd = PhaseDiagram(entries)\n e_above_hull = pd.get_e_above_hull(entry)\n\n if e_above_hull == 0.0:\n new_entry = entry\n else:\n e_adj = -1 * pd.get_e_above_hull(entry) * entry.composition.num_atoms - tol\n adjustment = ConstantEnergyAdjustment(\n value=e_adj,\n name=\"Stabilization Adjustment\",\n description=\"Shifts energy so that \" \"entry is on the convex hull\",\n )\n\n entry_dict = entry.as_dict()\n entry_dict[\"energy_adjustments\"].append(adjustment)\n new_entry = MontyDecoder().process_decoded(entry_dict)\n\n return new_entry\n\n @classmethod\n def from_pd(cls, pd: PhaseDiagram, temperature: float) -> \"GibbsEntrySet\":\n \"\"\"\n Constructor method for building a GibbsEntrySet from an existing phase diagram.\n\n Args:\n pd: Phase Diagram object (pymatgen)\n temperature: Temperature [K] for determining Gibbs Free Energy of\n formation, dGf(T)\n\n Returns:\n A GibbsEntrySet containing a collection of GibbsComputedEntry and\n experimental reference entry objects at the specified temperature.\n\n \"\"\"\n gibbs_entries = []\n for entry in pd.all_entries:\n if entry.composition.is_element and entry not in pd.el_refs.values():\n continue\n composition = entry.composition\n\n if composition.reduced_formula in NISTReferenceEntry.REFERENCES:\n new_entry = NISTReferenceEntry(\n composition=composition, temperature=temperature\n )\n else:\n structure = entry.structure\n formation_energy_per_atom = pd.get_form_energy_per_atom(entry)\n\n new_entry = GibbsComputedEntry.from_structure(\n structure=structure,\n formation_energy_per_atom=formation_energy_per_atom,\n temperature=temperature,\n energy_adjustments=None,\n parameters=entry.parameters,\n data=entry.data,\n entry_id=entry.entry_id,\n )\n\n gibbs_entries.append(new_entry)\n\n return cls(gibbs_entries)\n\n @classmethod\n def from_entries(\n cls, entries: List[ComputedStructureEntry], temperature: float\n ) -> \"GibbsEntrySet\":\n \"\"\"\n Constructor method for initializing GibbsEntrySet from T = 0 K\n ComputedStructureEntry objects, as acquired from a thermochemical\n database e.g. The Materials Project. Automatically expands the phase\n diagram for large chemical systems (10 or more elements) to avoid limitations\n of Qhull.\n\n Args:\n entries: List of ComputedStructureEntry objects, as downloaded from The\n Materials Project API.\n temperature: Temperature for estimating Gibbs free energy of formation [K]\n\n Returns:\n A GibbsEntrySet containing a collection of GibbsComputedEntry and\n experimental reference entry objects at the specified temperature.\n \"\"\"\n e_set = EntrySet(entries)\n new_entries = set()\n if len(e_set.chemsys) <= 9: # Qhull algorithm struggles beyond 9 dimensions\n pd = PhaseDiagram(e_set)\n return cls.from_pd(pd, temperature)\n\n pd_dict = expand_pd(list(e_set))\n for chemsys, pd in tqdm(pd_dict.items()):\n gibbs_set = cls.from_pd(pd, temperature)\n new_entries.update(gibbs_set)\n\n return cls(new_entries)\n","sub_path":"y2mn2o7_selectivity/reaction-network/src/rxn_network/entries/entry_set.py","file_name":"entry_set.py","file_ext":"py","file_size_in_byte":7857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220835251","text":"#!/usr/bin/env python\n# coding=utf-8\n# author = 'huhuhushan'\n\nimport Image\nimport ImageFont\nimport ImageDraw\n\nim = Image.open(\"1.png\")\nim.convert('RGB')\nd = ImageDraw.Draw(im)\nd.setink(256 + 255 + 256 * 256)\nFont = ImageFont.truetype('/usr/share/fonts/truetype/limelight-elementary/Limelight.ttf',40)\nimageL, imageH = im.size\n#for i in range(128):\ni = 20\nd.text((imageL - i, imageH - 120), '1', font = Font)\nim.save('new.jpeg')\n","sub_path":"addnum2pic.py","file_name":"addnum2pic.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234339890","text":"from datetime import datetime\nimport enchant\nimport twitter\nimport pytz\n\n\nfrom django.conf import settings\n\nutc = pytz.UTC\n\n\nclass TwitterTypo(object):\n \"\"\"Main class that handles all the twitter tweet optimisation and typo checking\"\"\"\n\n def __init__(self, battles):\n self.battles = battles\n\n self.api = twitter.Api(\n consumer_key=settings.TWITTER_CONSUMER_KEY,\n consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n access_token_key=settings.TWITTER_ACCESS_TOKEN_KEY,\n access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET\n )\n\n def fetch_tweets(self, b):\n \"\"\"Get the tweets for a single battle using the twitter API\"\"\"\n hashtag_1 = b.hashtag_1.hashtag\n hashtag_2 = b.hashtag_2.hashtag\n since = b._since\n until = b._until\n s1 = self.api.GetSearch(\n term=hashtag_1,\n since=since,\n until=until,\n )\n\n s2 = self.api.GetSearch(\n term=hashtag_2,\n since=since,\n until=until\n )\n print(\"{} tweets found for hashtag 1\".format(len(s1)))\n print(\"{} tweets found for hashtag 2\".format(len(s2)))\n return s1, s2\n\n def filter_by_time(self, tweet_set, start_datetime, end_datetime):\n \"\"\"Filter out tweets that are not within our battle start and end time\"\"\"\n t = []\n\n for tweet in tweet_set:\n d = tweet.created_at\n d_parsed = datetime.strptime(d, '%a %b %d %H:%M:%S +0000 %Y')\n d_localized = utc.localize(d_parsed)\n\n if start_datetime <= d_localized <= end_datetime:\n t.append(tweet)\n\n return t\n\n def count_typos(self, tweet_set):\n \"\"\"Function to count the number of typos in a list of tweets\"\"\"\n d = enchant.Dict(\"en_GB\")\n tweets = [tweet.text for tweet in tweet_set]\n\n typo_count = []\n for t in tweets:\n t_list = t.split(' ')\n for word in t_list:\n if '#' in word:\n # skip\n next\n elif 'https://' in word:\n next\n elif not word:\n next\n else:\n typo_count.append(d.check(word))\n return typo_count.count(False)\n\n def parse_battles(self):\n \"\"\"\"Main function that fetches tweets and calculates typos\"\"\"\n for b in self.battles:\n s1, s2 = self.fetch_tweets(b)\n\n s1_filtered = self.filter_by_time(s1, b.start_time, b.end_time)\n s2_filtered = self.filter_by_time(s2, b.start_time, b.end_time)\n\n print(\"number of filtered results is {}\".format(len(s1_filtered)))\n\n count_s1 = self.count_typos(s1_filtered)\n count_s2 = self.count_typos(s2_filtered)\n print(\"{} typos for hashtag_1\".format(count_s1))\n print(\"{} typos for hashtag_2\".format(count_s2))\n\n b.hashtag_1_typos = count_s1\n b.hashtag_2_typos = count_s2\n b.results_calculated = True\n b.is_complete = True\n b.generate_result()\n b.save()\n","sub_path":"both/battle/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490351287","text":"import Tkinter as tk\nimport tkFont\nimport ttk\nimport settings\nfrom PIL import Image, ImageTk\nimport pdb\n\nclass config_highlevel:\n\tdef __init__(self): \n\t\tconfig_highlevel = tk.Toplevel()\n\t\tconfig_highlevel.configure(bg = settings.back_color)\n\t\tconfig_highlevel.minsize(width = 900, height = 600)\n\t\tconfig_highlevel.columnconfigure(0, weight = 1)\n\n\t\tself.title_font_size = 8\n\t\tself.subtitle_font_size = 6 \n\n\t\tself.title_font = tkFont.Font(family = 'Consolas', size = self.title_font_size)\n\t\tself.subtitle_font = tkFont.Font(family = 'Consolas', size = self.subtitle_font_size)\n\t\tself.led_control_registers(config_highlevel)\n\n\tdef led_control_registers(self, master):\n\t\tself.led_control_registers = tk.LabelFrame(master, text = \"LED CONTROL REGISTERS\", \n\t\t\tfg= settings.font_color, bg = settings.back_color)\n\t\tself.led_control_registers.grid(row = 0, column = 0, \n\t\t\tsticky = tk.N + tk.S + tk.W + tk.E)\n\n\t\tself.reg0x14\n\t\tself.reg0x22\n\t\tself.reg0x23\n\t\tself.reg0x24\n\t\tself.reg0x25\n\t\tself.reg0x30\n\t\tself.reg0x31\n\t\tself.reg0x34\n\t\tself.reg0x35\n\t\tself.reg0x36\n\n\tdef afe_global_config_registers(self, master):\n\t\tself.afe_global_config_registers = tk.LabelFrame(master, text = \"LED CONTROL REGISTERS\", \n\t\t\tfg= settings.font_color, bg = settings.back_color)\n\t\tself.afe_global_config_registers.grid(row = 1, column = 0, \n\t\t\tsticky = tk.N + tk.S + tk.W + tk.E)\n\n\t\tself.reg0x37\n\t\tself.reg0x3C\n\t\tself.reg0x54\n\t\tself.reg0x58\n\t\tself.reg0x5A\n\t\t#slot A\n\t\tself.reg0x39\n\t\tself.reg0x42\n\t\tself.reg0x43\n\t\tself.reg0x55\n\t\tself.reg0x5A\n\t\t#slot B\n\t\tself.reg0x3B\n\t\tself.reg0x44\n\t\tself.reg0x45\n\t\tself.reg0x58\n\n\n\tdef system_registers(self, master):\n\n\t\tself.reg0x00\n\t\tself.reg0x01\n\t\tself.reg0x02\n\t\tself.reg0x06\n\t\tself.reg0x08\n\t\tself.reg0x09\n\t\tself.reg0x0A\n\t\tself.reg0x0B\n\t\tself.reg0x0D\n\t\tself.reg0x0F\n\t\tself.reg0x10\n\t\tself.reg0x11\n\t\tself.reg0x38\n\t\tself.reg0x4B\n\t\tself.reg0x4D\n\t\tself.reg0x4E\n\t\tself.reg0x4F\n\t\tself.reg0x50\n\t\tself.reg0x5F\n\n\tdef adc_registers(self, master):\n\n\t\tself.reg0x12\n\t\tself.reg0x15\n\t\tself.reg0x18\n\t\tself.reg0x19\n\t\tself.reg0x1A\n\t\tself.reg0x1B\n\t\tself.reg0x1E\n\t\tself.reg0x1F\n\t\tself.reg0x02\n\t\tself.reg0x21","sub_path":"heartbox_source/config_registerlevel.py","file_name":"config_registerlevel.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488723387","text":"import json\nimport requests\n\nCOMMENTS_URL = \"https://vikvok-anldg2io3q-ew.a.run.app/comments/original/{}\"\nUSERS_URL = \"https://vikvok-anldg2io3q-ew.a.run.app/users/{}\"\n\n\ndef original_voice_comments(request):\n request_json = request.get_json(silent=True)\n request_args = request.args\n if request_json and \"voiceId\" in request_json:\n voice_id = request_json[\"voiceId\"]\n elif request_args and \"voiceId\" in request_args:\n voice_id = request_args[\"voiceId\"]\n else:\n return \"voiceId not found!\"\n\n comments_json = requests.get(COMMENTS_URL.format(voice_id)).json()\n for i, comment in enumerate(comments_json):\n user_id = comment[\"userId\"]\n user = requests.get(USERS_URL.format(user_id)).json()\n del comments_json[i][\"userId\"]\n comments_json[i][\"user\"] = user\n\n return json.dumps(comments_json)\n","sub_path":"functions/voice/original/id/comments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458723361","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"HaNa\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\n\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load(\"RecoMET/METProducers.METSignificance_cfi\")\nprocess.load(\"RecoMET/METProducers.METSignificanceParams_cfi\")\n##____________________________________________________________________________||\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load(\"JetMETCorrections.Modules.JetResolutionESProducer_cfi\")\nfrom CondCore.DBCommon.CondDBSetup_cfi import *\n\nprocess.jer = cms.ESSource(\"PoolDBESSource\",\n CondDBSetup,\n toGet = cms.VPSet(\n # Pt Resolution\n cms.PSet(\n record = cms.string('JetResolutionRcd'),\n tag = cms.string('JR_MC_PtResolution_Summer15_25nsV6_AK4PFchs'),\n label = cms.untracked.string('AK4PFchs_pt')\n ),\n\n # Phi Resolution\n cms.PSet(\n record = cms.string('JetResolutionRcd'),\n tag = cms.string('JR_MC_PhiResolution_Summer15_25nsV6_AK4PFchs'),\n label = cms.untracked.string('AK4PFchs_phi')\n ),\n\n # Scale factors\n cms.PSet(\n record = cms.string('JetResolutionScaleFactorRcd'),\n tag = cms.string('JR_DATAMCSF_Summer15_25nsV6_AK4PFchs'),\n label = cms.untracked.string('AK4PFchs')\n ),\n ),\n connect = cms.string('sqlite:Summer15_25nsV6.db')\n )\n\nprocess.es_prefer_jer = cms.ESPrefer('PoolDBESSource', 'jer')\n\n##____________________________________________________________________________||\nprocess.TFileService = cms.Service(\"TFileService\", fileName = cms.string(\"histo.root\") )\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring()\n)\n\nprocess.load(\"Haamm.HaNaMiniAnalyzer.Hamb_cfi\")\n#process.TTH \n\nimport FWCore.ParameterSet.VarParsing as opts\noptions = opts.VarParsing ('analysis')\noptions.register('sync',\n 0,\n opts.VarParsing.multiplicity.singleton,\n opts.VarParsing.varType.int ,\n \"\")\noptions.register('sample',\n\t\t 'TTbar',\n opts.VarParsing.multiplicity.singleton,\n opts.VarParsing.varType.string,\n 'Sample to analyze')\noptions.register('job',\n 0,\n opts.VarParsing.multiplicity.singleton,\n opts.VarParsing.varType.int ,\n \"number of the job\")\noptions.register('nFilesPerJob',\n 1,\n opts.VarParsing.multiplicity.singleton,\n opts.VarParsing.varType.int ,\n \"number of the files pre job\")\noptions.register('output',\n \"out\",\n opts.VarParsing.multiplicity.singleton,\n opts.VarParsing.varType.string ,\n \"could be root://eoscms//eos/cms/store/user/hbakhshi/out\")\n\noptions.parseArguments()\n\n\ntheSample = None\nimport os\n\nif options.sync == 0 :\n from Samples76.Samples import MiniAOD76Samples as samples\n\n\n for sample in samples:\n if sample.Name == options.sample :\n theSample = sample\n\n if not theSample.Name == options.sample:\n raise NameError(\"The correct sample is not found %s !+ %s\" % (sample.Name , options.sample) )\n\n if theSample == None:\n raise NameError(\"Sample with name %s wasn't found\" % (options.sample))\nelse:\n from Haamm.HaNaMiniAnalyzer.Sample import *\n theSample = Sample( \"Sync\" , \"Sync\" , 100 , False , 0 , \"\" )\n theSample.Files = ['/store/mc/RunIIFall15MiniAODv2/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12_ext3-v1/00000/0C5BB11A-E2C1-E511-8C02-002590A831B6.root']\n options.nFilesPerJob = 1\n options.output = \"out\" \n options.job = 0\n\nprocess.Hamb.sample = theSample.Name\nprocess.Hamb.LHE.useLHEW = theSample.LHEWeight\nprocess.Hamb.isData = theSample.IsData\n\nif not ( options.job < theSample.MakeJobs( options.nFilesPerJob , options.output ) ):\n raise NameError(\"Job %d is not in the list of the jobs of sample %s with %d files per run\" % (options.job , options.sample , options.nFilesPerJob ) )\njob = theSample.Jobs[ options.job ]\n\nprocess.source.fileNames.extend( job.Inputs )\nprocess.TFileService.fileName = job.Output\n\nprocess.maxEvents.input = options.maxEvents\n\nif theSample.IsData :\n import FWCore.PythonUtilities.LumiList as LumiList\n process.source.lumisToProcess = LumiList.LumiList(filename = (process.Hamb.SetupDir.value() + '/JSON.txt')).getVLuminosityBlockRange()\n process.GlobalTag.globaltag = '76X_dataRun2_v15'\n process.p = cms.Path( process.METSignificance + process.Hamb )\n for v in range(0 , 10 ):\n process.Hamb.HLT.HLT_To_Or.append( 'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v%d' % (v) )\n process.Hamb.HLT.HLT_To_Or.append( 'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v%d' % (v) )\n\nelse :\n process.GlobalTag.globaltag = '76X_dataRun2_16Dec2015_v0' #76X_mcRun2_asymptotic_RunIIFall15DR76_v1\n from PhysicsTools.PatAlgos.producersLayer1.jetUpdater_cff import *\n process.patJetCorrFactorsReapplyJEC = updatedPatJetCorrFactors.clone(\n src = cms.InputTag(\"slimmedJets\"),\n levels = ['L1FastJet', \n 'L2Relative', \n 'L3Absolute'],\n payload = 'AK4PFchs' ) # Make sure to choose the appropriate levels and payload here!\n\n process.patJetsReapplyJEC = updatedPatJets.clone(\n jetSource = cms.InputTag(\"slimmedJets\"),\n jetCorrFactorsSource = cms.VInputTag(cms.InputTag(\"patJetCorrFactorsReapplyJEC\"))\n )\n\n process.Hamb.Jets.Input = \"patJetsReapplyJEC\"\n process.METSignificance.srcPfJets = \"patJetsReapplyJEC\"\n process.p = cms.Path( process.patJetCorrFactorsReapplyJEC + process.patJetsReapplyJEC + process.METSignificance + process.Hamb)\n if options.sync == 0 :\n for v in range(0 , 10 ):\n process.Hamb.HLT.HLT_To_Or.append( 'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v%d' % (v) )\n process.Hamb.HLT.HLT_To_Or.append( 'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v%d' % (v) )\n\n","sub_path":"test/Hamb_cfg.py","file_name":"Hamb_cfg.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21165110","text":"from app.api.abstract_facade import JSONAPIAbstractFacade\nfrom app.models import Document\n\n\n# decorator for test purposes\ndef decorator_function_with_arguments(arg1, arg2, arg3):\n def wrap(f):\n print(\"Wrapping\", f)\n def wrapped_f(*args, **kwargs):\n print(\"Inside wrapped_f()\")\n res = f(*args, **kwargs)\n return res\n return wrapped_f\n return wrap\n\n\nclass DocumentFacade(JSONAPIAbstractFacade):\n \"\"\"\n\n \"\"\"\n TYPE = \"document\"\n TYPE_PLURAL = \"documents\"\n\n @property\n def id(self):\n return self.obj.id\n\n @staticmethod\n @decorator_function_with_arguments(\"decorated\", \"resource\", \"!\")\n def get_resource_facade(url_prefix, doc_id, **kwargs):\n e = Document.query.filter(Document.id == doc_id).first()\n if e is None:\n kwargs = {\"status\": 404}\n errors = [{\"status\": 404, \"title\": \"document %s does not exist\" % doc_id}]\n else:\n e = DocumentFacade(url_prefix, e, **kwargs)\n kwargs = {}\n errors = []\n return e, kwargs, errors\n\n @staticmethod\n def create_resource(model, obj_id, attributes, related_resources):\n if \"origin-date-range-label\" in attributes:\n attributes[\"origin_date\"] = attributes.pop(\"origin-date-range-label\")\n return JSONAPIAbstractFacade.create_resource(model, obj_id, attributes, related_resources)\n\n @property\n def resource(self):\n resource = {\n **self.resource_identifier,\n \"attributes\": {\n \"id\": self.obj.id,\n \"title\": self.obj.title,\n \"subtitle\": self.obj.subtitle,\n \"origin-date-id\": self.obj.origin_date.id if self.obj.origin_date else None\n },\n \"meta\": self.meta,\n \"links\": {\n \"self\": self.self_link\n }\n }\n\n if self.obj.origin_date:\n resource[\"attributes\"][\"origin-date-range-label\"] = self.obj.origin_date.range_label\n\n if self.with_relationships_links:\n resource[\"relationships\"] = self.get_exposed_relationships()\n\n return resource\n\n def __init__(self, *args, **kwargs):\n super(DocumentFacade, self).__init__(*args, **kwargs)\n \"\"\"Make a JSONAPI resource object describing what is a document\n\n A document is made of:\n attributes:\n id:\n name:\n relationships:\n editors\n Returns\n -------\n A dict describing the corresponding JSONAPI resource object\n \"\"\"\n\n from app.api.editor.facade import EditorFacade\n self.relationships = {\n \"editors\": {\n \"links\": self._get_links(rel_name=\"editors\"),\n \"resource_identifier_getter\": self.get_related_resource_identifiers(EditorFacade, \"editors\", to_many=True),\n \"resource_getter\": self.get_related_resources(EditorFacade, \"editors\", to_many=True),\n },\n }\n","sub_path":"app/api/document/facade.py","file_name":"facade.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48495257","text":"__author__ = 'jeff'\n\n\"\"\"\n班上有 N 名学生。其中有些人是朋友,有些则不是。他们的友谊具有是传递性。如果已知 A 是 B 的朋友,B 是 C 的朋友,那么我们可以认为 A 也是 C 的朋友。所谓的朋友圈,是指所有朋友的集合。\n\n给定一个 N * N 的矩阵 M,表示班级中学生之间的朋友关系。如果M[i][j] = 1,表示已知第 i 个和 j 个学生互为朋友关系,否则为不知道。你必须输出所有学生中的已知的朋友圈总数。\n\n示例 1:\n\n输入: \n[[1,1,0],\n [1,1,0],\n [0,0,1]]\n输出: 2 \n说明:已知学生0和学生1互为朋友,他们在一个朋友圈。\n第2个学生自己在一个朋友圈。所以返回2。\n示例 2:\n\n输入: \n[[1,1,0],\n [1,1,1],\n [0,1,1]]\n输出: 1\n说明:已知学生0和学生1互为朋友,学生1和学生2互为朋友,所以学生0和学生2也是朋友,所以他们三个在一个朋友圈,返回1。\n注意:\n\nN 在[1,200]的范围内。\n对于所有学生,有M[i][i] = 1。\n如果有M[i][j] = 1,则有M[j][i] = 1。\n\n\"\"\"\n\n\"\"\"\n解题思路:\n上面的问题其实是一个遍历的问题,二维数组是一个图,找到一个图中有几个群\n\n通过bfs解决问题\n\n\"\"\"\n\nfrom collections import deque\n\n\nclass Solution:\n\n def findCircleNum(self, M) -> int:\n \"\"\"\"\"\"\n q = deque()\n res = 0\n nodes = set(range(len(M)))\n while len(nodes) > 0:\n root = nodes.pop()\n q.appendleft(root)\n while len(q) > 0:\n node = q.pop()\n for i in range(len(M)):\n if i == node:\n continue\n if i in nodes and M[node][i] == 1:\n q.appendleft(i)\n nodes.remove(i)\n res += 1\n return res\n\n\nif __name__ == '__main__':\n m = [\n [1, 1, 0],\n [1, 1, 1],\n [0, 1, 1]\n ]\n m = [[1,1,0],\n [1,1,0],\n [0,0,1]]\n r = Solution().findCircleNum(m)\n print('======%s', r)\n\n","sub_path":"letcode/字节跳动章节/数组与排序/朋友圈.py","file_name":"朋友圈.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521239142","text":"import tensorflow as tf\n\n\n# 참고링크 : https://www.tensorflow.org/guide/low_level_intro#layers\n\nx = tf.placeholder(tf.float32, shape=[None, 3]) # 입력 정의\n# model = tf.layers.Dense(units=1) # Layer가 1층, 1개인 Dense 모델\n# y = model(x) # Layer에 input을 연결\n\ny = tf.layers.dense(x, units=1) # shortcut 함수를 이용해서, 위 2줄을 1출로 바꿀 수 있음.\n\ninit = tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init)\nprint(sess.run(y, {x:[[1,2,3],[4,5,6]]})) # 입력에 'feed_dict='을 생략할 수 있음.\nprint(sess.run(y, feed_dict={x:[[7,8,9],[10,11,12]]}))\n\nsess.close()","sub_path":"tests/test_layers.py","file_name":"test_layers.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514442351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 19 10:02:53 2012\n\n@author: anatoliy\n\"\"\"\n\nimport numpy as np\n\nimport unittest\nimport numpy.testing as npt\n\nimport crowdmisc\nimport crowdio\nimport draw\n\nfrom crowdmisc import Properties\n\nclass TestTrackingRegions(unittest.TestCase):\n def setUp(self):\n Properties.mode = 'top'\n Properties.neighborhood_size_coeff = 1.5\n Properties.dimension_of_motion = 1\n Properties.min_velocity_in_pixels = 1\n\n def test_updateFlowTrackingRegion(self):\n mask = np.zeros((10, 10), dtype = np.uint8)\n trackingRegions = crowdmisc.constructTrackingRegions(mask)\n trackingRegions['flowMask'] = mask\n \n crowdmisc.updateFlowTrackingRegion([(2,1)], 0.5, trackingRegions)\n crowdmisc.updateFlowTrackingRegion([(2,3)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['flowCorners'], [(4,2), (4, 6)])\n crowdmisc.updateFlowTrackingRegion([(3,3),(3,1)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['flowCorners'], [(4,2), (4, 6), (6,6), (6,2)])\n\n trueMask = np.zeros((10, 10), np.uint8)\n trueMask[2:7][:, 4:7] = 255\n npt.assert_equal(trackingRegions['flowMask'], trueMask)\n \n crowdmisc.updateFlowTrackingRegion([(2, 2)], 0.5, trackingRegions)\n self.assertEqual(trackingRegions['flowCorners'], [(4,4)])\n self.assertEqual(trackingRegions['displayFlowCorners'], [(2,2)])\n \n def test_updateEgomotionTrackingRegion(self):\n mask = np.zeros((20, 10), dtype = np.uint8)\n trackingRegions = crowdmisc.constructTrackingRegions(mask)\n \n crowdmisc.updateEgomotionTrackingRegion([(2,1), (2,3)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['stableCorners'], [(4,2), (4, 6)])\n crowdmisc.updateEgomotionTrackingRegion([(3,3)], 0.5, trackingRegions)\n crowdmisc.updateEgomotionTrackingRegion([(3,1)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['stableCorners'], [(4,2), (4, 6), (6,6), (6,2)])\n\n trueMask = np.zeros((20, 10), np.uint8)\n trueMask[2:7][:, 4:7] = 255\n npt.assert_equal(trackingRegions['stableRegionMask'], trueMask)\n \n crowdmisc.updateEgomotionTrackingRegion([(5,1)], 0.5, trackingRegions)\n crowdmisc.updateEgomotionTrackingRegion([(5,3)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['stableCorners'],\\\n [(4,2), (4, 6), (6,6), (6,2), (10,2), (10, 6)])\n crowdmisc.updateEgomotionTrackingRegion([(6,3)], 0.5, trackingRegions)\n crowdmisc.updateEgomotionTrackingRegion([(6,1)], 0.5, trackingRegions)\n self.assertEquals(trackingRegions['stableCorners'],\n [(4,2), (4, 6), (6,6), (6,2), (10,2), (10, 6), (12,6), (12,2)])\n \n trueMask[2:7][:, 10:13] = 255\n #trueMask[0, 0] = 1\n npt.assert_equal(trackingRegions['stableRegionMask'], trueMask)\n \n def test_warpTrackingRegion(self):\n mask = np.zeros((10, 10), dtype = np.uint8)\n trackingRegions = crowdmisc.constructTrackingRegions(mask)\n crowdmisc.updateEgomotionTrackingRegion([(2,3), (2,1), (3,1), (3,3)],\n 0.5, trackingRegions)\n crowdmisc.updateFlowTrackingRegion([(2,3), (2,1), (3,1), (3,3)],\n 0.5, trackingRegions)\n warp_matrix = np.eye(3)\n warp_matrix[0, 2] = 2\n warp_matrix[1, 2] = -1\n \n out = crowdmisc.warpTrackingRegions(trackingRegions, mask, warp_matrix)\n self.assertListEqual(out['flowCorners'], [(6, 5), (6, 1), (8, 1), (8,5)])\n self.assertListEqual(out['stableCorners'], [(6, 5), (6, 1), (8, 1), (8,5)])\n self.assertAlmostEqual(out['configImageZoom'], 0.5)\n self.assertListEqual(out['displayFlowCorners'], \n [(3, 3), (3, 1), (4, 1), (4,3)])\n self.assertListEqual(out['displayStableCorners'], \n [(3, 3), (3, 1), (4, 1), (4,3)])\n # Check that trackingRegions are left unchanged\n reserveTrackingRegions = crowdmisc.constructTrackingRegions(mask) \n crowdmisc.updateEgomotionTrackingRegion([(2,3), (2,1), (3,1), (3,3)],\n 0.5, reserveTrackingRegions)\n crowdmisc.updateFlowTrackingRegion([(2,3), (2,1), (3,1), (3,3)],\n 0.5, reserveTrackingRegions)\n keys = trackingRegions.keys()\n for key in keys:\n npt.assert_array_equal(np.array(reserveTrackingRegions[key]),\n np.array(trackingRegions[key]))\n \n true_mask = np.zeros((10, 10), dtype=np.uint8)\n true_mask[1:6][:, 6:9] = 255\n npt.assert_equal(out['flowMask'], true_mask)\n npt.assert_equal(out['stableRegionMask'], true_mask)\n \n true_scaling = np.eye(3)\n true_scaling[(0, 1), (0, 1)] = [0.5, 0.25]\n true_shift = np.eye(3)\n true_shift[(0, 1), (2,2)] = [-3, -0.25]\n npt.assert_almost_equal(out['flowWarpMatrix'], np.dot(true_shift, true_scaling))\n self.assertAlmostEqual(out['minVelocity'], 0.25)\n\nclass TestTransformations(unittest.TestCase):\n def testCompute_unit_square_transform(self):\n pts = [(0,2), (0, 0), (2,0), (2,2)]\n mat = np.eye(3)\n mat[0, 0] = mat[1,1] = 0.5\n out = crowdmisc.computeUnitSquareTransform(pts)\n #out = np.array(out)\n npt.assert_almost_equal(mat, out)\n\n def testBackproject_sample_rect(self):\n # create a warp matrix for a 10 x 20 square with an offset of (15, 25)\n warp_matrix = np.array([[0.1, 0, -1.4449],\n [0, 0.05, -1.24440],\n [0,0,1]], np.float32)\n sample_region = draw.backproject_sample_rect(warp_matrix, (0.5, 0.5))\n self.assertListEqual(\n sample_region,\n [(17, 40), (17,30), (22, 30), (22, 40)])\n sample_region = draw.backproject_sample_rect(warp_matrix, (0.5, 0.25))\n self.assertListEqual(\n sample_region,\n [(17, 37), (17,32), (22, 32), (22, 37)])\n \n def testCompute_sample_image_coordinates(self):\n\n # Test 1: cropping on the top left\n sample_region = [(25, 25), (15, 15), (5, 25), (15, 34)]\n neighborhood_roi, local_sample_bounds =\\\n draw.compute_sample_image_coordinates((100, 100), sample_region)\n # Expanding the neighborhood, we get:\n # x = 5 - 1.5 * 21 = 5 - 31.5 = -26.5\n # y = 15 - 1.5 * 20 = -15\n # width = 84\n # height = 80\n # cropping to (0, 0), we get width = 84 - 26.5 = 57.5\n # height = 80 - 15 = 65\n self.assertEqual(neighborhood_roi, \n (0, 0, 57.5, 65.0))\n self.assertListEqual(local_sample_bounds,\n [(25, 25), (15, 15), (5, 25), (15, 34)])\nclass TestIO(unittest.TestCase):\n def setUp(self):\n self.imageSequence = crowdio.ImageSequence(\"\", \"\")\n def testGetFileTime(self):\n filename = 'DSC_1234.jpg'\n time = self.imageSequence.getFileTime(filename)\n self.assertEquals(1234, time)\n filename = '0000543220.bmp'\n time = self.imageSequence.getFileTime(filename)\n self.assertEquals(543220, time)\n filename = 'file55432.123.JPG'\n time = self.imageSequence.getFileTime(filename)\n self.assertAlmostEquals(55432.123, time)\n \n def testFindProcessedFrames(self):\n fileList =['ds1.jpg', 'ds2.jpg', 'ds3.jpg', 'ds4.jpg', 'ds5.jpg']\n processedFrames = self.imageSequence.findProcessedFrames(3, fileList)\n self.assertListEqual(processedFrames,\n ['ds1.jpg', 'ds2.jpg', 'ds3.jpg'])\n \n \nclass TestTrackingImage(unittest.TestCase):\n def testGetTrackingFeatures(self):\n image = np.zeros((20, 20), dtype = np.uint8)\n def testGoodFeaturesToTrack(*arglist):\n features = np.array([[10, 12],\n [3, 4],\n [9, 8],\n [18, 14]]).reshape(-1, 1, 2)\n return features\n \n mask = np.zeros_like(image)\n mask[4:15, 8:13] = 1\n trackedFrame = crowdmisc.TrackedFrame(\n image, goodFeaturesToTrack=testGoodFeaturesToTrack)\n features = trackedFrame.getTrackingFeatures(4, mask)\n npt.assert_array_equal(features, \n np.array([[10, 12], [9, 8]]).reshape((2, 1, 2)))\n mask = np.zeros_like(mask)\n features = trackedFrame.getTrackingFeatures(4, mask)\n self.assertFalse(features)\n \n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30925244","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 脚本名:\nCreated on 2018--\n@author:David Yisun\n@group:data\n\"\"\"\nclass A():\n name = None\n custom_settings = None\n print('good man')\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n print(\"A:{0}\".format(name))\n\nclass B(A):\n name = 'u'\n def __init__(self):\n self.nam = 'yisun'\n","sub_path":"_test/_test_object.py","file_name":"_test_object.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556300580","text":"import pygame\nimport math\nfrom vector import *\nfrom camera import cam\nfrom graph import *\nfrom random import randint\n#import time\n\ndef read_obj(self,file_name,origin):\n\tvertices = []\n\tfaces = []\n\tmesh = graph(origin)\n\twith open (file_name,'rb') as f:\n\t\tfor line in f:\n\t\t\ttemp1 = line.decode(\"utf-8\")\n\t\t\tif temp1[0] == \"v\":\n\t\t\t\ttemp2 = temp1.strip(\"\\n\")\n\t\t\t\ttemp3 = temp2.split(' ')\n\t\t\t\t\n\t\t\t\tvertices.append([float(temp3[1]),float(temp3[2]),float(temp3[3])])\n\t\t\tif temp1[0] == \"f\":\n\t\t\t\ttemp2 = temp1.strip(\"\\n\")\n\t\t\t\ttemp3 = temp2.split(' ')\n\t\t\t\tfaces.append([int(temp3[1])-1,int(temp3[2])-1,int(temp3[3])-1])\n\tfor tri in faces:\n\t\tmesh.add_tri([vector(vertices[tri[0]]),vector(vertices[tri[1]]),vector(vertices[tri[2]])],(randint(140,150),randint(170,220),20))\n\treturn mesh\t\n\nclass renderer:\n\tdef __init__(self,xres,yres,fov,near,far):\n\t#model matrix (model to world), view matrix (world to camera cordinates), projection matrix (camera to perspective)\n\t\tcolors = {\"black\":(0,0,0),\"white\":(255,255,255),\"red\":(255,0,0),\"green\":(0,255,0),\"blue\":(0,0,255),\"yellow\":(255,255,0),\"grey\":(211,211,211),\"go\":(0,78,100)}\n\t\tself.running = True\n\t\tself.xres = xres\n\t\tself.yres = yres\n\t\tself.main_camera = cam(vector([0,0,0]),fov,xres,yres,near,far)\n\t\tself.xbox_controler = False\n\t\tself.joystick = None\n\t\tself.object_buffer = {}\n\t\tpygame.init()\n\t\tself.display = pygame.display.set_mode((xres,yres))\n\t\tself.crashed = False\n\t\tself.clock = pygame.time.Clock()\n\t\tpygame.display.set_caption(\"SAIengine\")\n\t\tpygame.mouse.set_visible(False)\n\t\tpygame.event.set_grab(True)\n\t\t\n\t\tcount = pygame.joystick.get_count()\n\t\tfor i in range(count):\n\t\t\tself.joystick = pygame.joystick.Joystick(i)\n\t\t\tif (self.joystick.get_name() == \"Controller (XBOX 360 For Windows)\"):\n\t\t\t\tself.joystick.init()\n\t\t\t\tself.xbox_controler = True\n\t\t\t\tbreak\n\t\tif not (self.xbox_controler):\n\t\t\tself.joystick = None\n\t\t\n\tdef game_pad(self, l_sensitivity, r_sensitivity, t_sensitivity):\n\t\tanalog_right = self.joystick.get_axis(4)\n\t\tanalog_up = self.joystick.get_axis(3)\n\t\tanalog_forward = self.joystick.get_axis(1)\n\t\tanalog_right_t = self.joystick.get_axis(0)\n\t\tanalog_fly = self.joystick.get_axis(2)\n\t\tif analog_right > 0.2:\n\t\t\tself.main_camera.rotate(r_sensitivity*analog_right, self.main_camera.up)\n\t\tif analog_right < -0.2:\n\t\t\tself.main_camera.rotate(r_sensitivity*analog_right, self.main_camera.up)\n\t\tif (analog_up > 0.2) and (self.main_camera.forward*self.main_camera.up<0.85):\n\t\t\tself.main_camera.rotate(r_sensitivity*analog_up, self.main_camera.rightward)\n\t\tif (analog_up < -0.2) and (self.main_camera.forward*self.main_camera.up.scale(-1)<0.85):\n\t\t\tself.main_camera.rotate(r_sensitivity*analog_up, self.main_camera.rightward)\n\t\tif \tanalog_right_t > 0.2:\n\t\t\tself.main_camera.translate(self.main_camera.rightward.scale(l_sensitivity*analog_right_t))\n\t\tif \tanalog_right_t < -0.2:\n\t\t\tself.main_camera.translate(self.main_camera.rightward.scale(l_sensitivity *analog_right_t))\n\t\tif \tanalog_forward > 0.2:\n\t\t\tself.main_camera.translate(cross(self.main_camera.up,self.main_camera.rightward.scale(l_sensitivity*analog_forward)))\n\t\tif \tanalog_forward < -0.2:\n\t\t\tself.main_camera.translate(cross(self.main_camera.up,self.main_camera.rightward.scale(l_sensitivity*analog_forward)))\n\t\tif analog_fly < -0.2:\n\t\t\tself.main_camera.translate(self.main_camera.up.scale(-1*analog_fly*t_sensitivity))\n\t\tif analog_fly > 0.2:\n\t\t\tself.main_camera.translate(self.main_camera.up.scale(-1*analog_fly*t_sensitivity))\n\t\n\tdef add_obj(self,mesh,name):\n\t\tif (name not in self.object_buffer):\n\t\t\tself.object_buffer[name] = mesh\n\n\tdef display_triangle(self,tri):\n\t\tv1 = (((self.xres*tri.data[0].data[0]) + self.xres)/2.0,((self.yres*tri.data[0].data[1])-self.yres)/-2.0)\n\t\tv2 = (((self.xres*tri.data[1].data[0]) + self.xres)/2.0,((self.yres*tri.data[1].data[1])-self.yres)/-2.0)\n\t\tv3 = (((self.xres*tri.data[2].data[0]) + self.xres)/2.0,((self.yres*tri.data[2].data[1])-self.yres)/-2.0)\n\t\tpygame.draw.polygon(self.display,tri.color,[v1,v2,v3],0)\n\n\tdef run_frame(self):\n\t\tif not self.crashed:\n\t\t\tself.display.fill((255,255,255))\n\t\t\tif (self.xbox_controler):\n\t\t\t\tself.game_pad(0.1,0.01,0.07)\n\t\t\t\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\t\tself.crashed = True\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tself.crashed = True\n\t\t\t\n\t\t\tfor name, mesh in self.object_buffer.items():\n\t\t\t\tself.main_camera.push(mesh)\n\t\t\tfor tri in self.main_camera.pop():\n\t\t\t\tself.display_triangle(tri)\n\t\t\tpygame.display.update()\n\t\t\tprint(self.clock.get_fps())\n\t\t\tself.clock.tick(60)\n\t\t\treturn True\n\t\t\n\t\telse:\n\t\t\tpygame.quit()\n\t\t\treturn False","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62450012","text":"# -*- coding:UTF-8 -*-\nimport requests\nfrom selenium import webdriver\nimport re\nfrom urllib import parse\nimport time\nfrom selenium.webdriver.chrome.options import Options\n\nheader = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"\n \" AppleWebKit/537.36 (KHTML, like Gecko)\"\n \" Chrome/70.0.3538.110 Safari/537.36\"\n}\n\nrooturl = 'https://www.biqukan.com/1_1094/'\n\n\ndef all_page_url(rooturl):\n response = requests.get(rooturl, headers=header)\n urls = re.findall('<dd><a href =\"(.*)\".*/dd>', response.text)\n reurls = []\n for url in urls:\n reurls.append(parse.urljoin(rooturl, url))\n return reurls\n\n\ndef page_download_parser(url):\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n driver = webdriver.Chrome(\"G:\\python 资源\\Chrome浏览器驱动(webdriver)\\chromedriver.exe\", chrome_options=chrome_options)\n try:\n driver.get(url)\n result_t = driver.find_element_by_tag_name('h1')\n result = driver.find_element_by_xpath(\"//div[contains(@id,'content')]\")\n with open('G:\\python 资源\\python project\\笔趣阁在线小说爬取\\\\'+result_t.text+'.txt','w') as file:\n file.write(result_t.text+'\\n')\n file.write(result.text)\n # print(result.text)\n except:\n print(\"页面下载解析出错\")\n finally:\n driver.quit()\n\n\ndef main():\n urls = []\n urls = all_page_url(rooturl)\n count = 1\n for url in urls[15:-3]:\n count += 1\n try:\n page_download_parser(url)\n time.sleep(0.5)\n print(\"第%d下载成功\" % count)\n\n except:\n print(\"第%d下载出错\" % count)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"笔趣阁在线小说爬取/TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637419062","text":"from django.http import JsonResponse\nfrom threading import Lock\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n\n\nluns = []\nluns_lock = Lock()\n\ndef save_luns_to_file():\n global luns\n with open('./lun.db', 'w') as db_file:\n json.dump(luns, db_file)\n\ndef load_luns_from_file():\n global luns\n with open('./lun.db', 'r') as db_file:\n luns = json.load(db_file)\n\nwith luns_lock:\n load_luns_from_file()\n\n@csrf_exempt\ndef custom404(request):\n return JsonResponse({ 'status_code': 404, 'error': 'The resource was not found' })\n\n@csrf_exempt\ndef custom400(request):\n return JsonResponse({ 'status_code': 400, 'error': 'Bad request' })\n\n@csrf_exempt\ndef _create_luns(request):\n result = []\n new_luns = request\n for new_lun in new_luns:\n if not new_lun or not 'name' in new_lun or not 'size' in new_lun or new_lun['size'] <= 0:\n return custom400(request)\n\n with luns_lock:\n for new_lun in new_luns:\n item = {\n 'id' : luns[-1]['id'] + 1,\n 'name' : new_lun['name'],\n 'initiator' : new_lun.get('initiator','default'),\n 'target' : new_lun.get('target','default'),\n 'size' : new_lun['size']\n }\n\n result.append(item)\n luns.append(item)\n save_luns_to_file()\n return JsonResponse({ \"status_code\" : 200, \"luns\" : result }, json_dumps_params={ \"indent\" : 4 })\n\n@csrf_exempt\ndef _get_luns(request):\n return JsonResponse({ \"status_code\" : 200, \"luns\" : luns }, json_dumps_params={ \"indent\" : 4 })\n\n@csrf_exempt\ndef handle_luns(request):\n if request.method == \"POST\":\n temp=json.load(request)\n if not temp:\n return custom400(request)\n return _create_luns(temp)\n else:\n return _get_luns(request)\n\n@csrf_exempt\ndef _get_lun(lun):\n return JsonResponse({\"status_code\" : 200, \"lun\" : lun}, json_dumps_params={\"indent\" : 4})\n\n@csrf_exempt\ndef _get_lun_size(lun):\n return JsonResponse({\"status_code\" : 200, \"lun\" : lun[\"size\"]}, json_dumps_params={\"indent\" : 4})\n\n@csrf_exempt\ndef _delete_lun(lun):\n luns.remove(lun)\n return JsonResponse({\"status_code\" : 200}, json_dumps_params={\"indent\" : 4})\n save_luns_to_file()\n\n@csrf_exempt\ndef _update_lun(lun, request):\n item=request\n lun['size'] = item['size']\n save_luns_to_file()\n return JsonResponse({\"status_code\" : 200, \"lun\" : lun}, json_dumps_params={\"indent\" : 4})\n\n@csrf_exempt\ndef handle_lun(request, lun_id):\n with luns_lock:\n # get lun according to lun_id\n lun = filter(lambda x: x['id'] == int(lun_id), luns)\n if len(lun) == 0:\n return custom404(request)\n lun = lun[0]\n\n if request.method == \"PUT\":\n temp = json.load(request)\n if not temp:\n return custom400(request)\n return _update_lun(lun, temp)\n elif request.method == \"DELETE\":\n return _delete_lun(lun)\n else:\n return _get_lun(lun)\n\n\n\n@csrf_exempt\ndef handle_lun_size(request, lun_id):\n with luns_lock:\n # get lun according to lun_id\n lun = filter(lambda x: x['id'] == int(lun_id), luns)\n if len(lun) == 0:\n return custom404(request)\n lun = lun[0]\n\n if request.method == \"PUT\":\n temp = json.load(request)\n if not temp:\n return custom400(request)\n return _update_lun(lun, temp)\n else:\n return _get_lun_size(lun)\n\n","sub_path":"Desktop/EMC_Test/Test3/lun/lun/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649025971","text":"from original_larva import OriginalLarva\r\nfrom new_larva import NewLarva\r\n\r\nfrom util import Error\r\n\r\ndef larva_factory(larva_name, location, velocity):\r\n \"\"\"Very simple form of factory function \r\n Used to decouple other modules from specific view implementations.\r\n \"\"\"\r\n larva = None\r\n if larva_name == OriginalLarva.__name__:\r\n larva = OriginalLarva(location, velocity)\r\n elif larva_name == NewLarva.__name__:\r\n larva = NewLarva(location, velocity)\r\n else:\r\n raise Error('Bad larva name!')\r\n return larva\r\n","sub_path":"larva_factory.py","file_name":"larva_factory.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64315433","text":"# !/usr/bin/python\n\n\"\"\"\nCopyright ©️: 2020 Seniatical / _-*™#7519\nLicense: Apache 2.0\nA permissive license whose main conditions require preservation of copyright and license notices.\nContributors provide an express grant of patent rights.\nLicensed works, modifications, and larger works may be distributed under different terms and without source code.\nFULL LICENSE CAN BE FOUND AT:\n https://www.apache.org/licenses/LICENSE-2.0.html\nAny violation to the license, will result in moderate action\nYou are legally required to mention (original author, license, source and any changes made)\n\"\"\"\n\nimport asyncio\nimport datetime\n\nimport TagScriptEngine\nimport discord\nimport pymongo.cursor\nfrom discord.ext import commands\nimport TagScriptEngine as tagscript\nfrom pydantic import BaseModel\nfrom core.abc import KarenMixin, KarenMetaClass\n\n\nclass Tags(commands.Cog, KarenMixin, metaclass=KarenMetaClass):\n def __init__(self, bot):\n super().__init__()\n\n self.bot = bot\n self.client = bot.client\n \n column = self.client['Bot']\n self.table = column['Tags']\n\n bot.cache.cache['Tags'] = dict()\n\n self.max_tags = 10\n\n for tag in self.table.find():\n guild = bot.get_guild(tag['_id'])\n\n if not guild:\n continue\n\n if bot.cache.cache['Tags'].get(tag['_id']):\n bot.cache.cache['Tags'][tag['_id']].update({tag['name']: self.Tag(\n content=tag['value'], name=tag['name'], guild=guild.id,\n author=getattr(guild.get_member(tag['author']), 'id', 0),\n created_at=datetime.datetime.fromisoformat(tag['created_at']),\n updated_at=datetime.datetime.fromisoformat(tag['updated_at']),\n uses=tag['uses']\n )})\n else:\n self.bot.cache.cache['Tags'][tag['_id']] = dict()\n self.bot.cache.cache['Tags'][tag['_id']][tag['name']] = self.Tag(\n content=tag['value'], name=tag['name'], guild=guild.id,\n author=getattr(guild.get_member(tag['author']), 'id', 0),\n created_at=datetime.datetime.fromisoformat(tag['created_at']),\n updated_at=datetime.datetime.fromisoformat(tag['updated_at']),\n uses=tag['uses']\n )\n\n tagscript_blocks = [\n tagscript.MathBlock(),\n tagscript.RandomBlock(),\n tagscript.RangeBlock(),\n tagscript.AnyBlock(),\n tagscript.IfBlock(),\n tagscript.AllBlock(),\n tagscript.BreakBlock(),\n tagscript.StrfBlock(),\n tagscript.StopBlock(),\n tagscript.AssignmentBlock(),\n tagscript.FiftyFiftyBlock(),\n tagscript.ShortCutRedirectBlock(\"args\"),\n tagscript.LooseVariableGetterBlock(),\n tagscript.SubstringBlock(),\n tagscript.EmbedBlock(),\n tagscript.ReplaceBlock(),\n tagscript.PythonBlock(),\n tagscript.URLEncodeBlock(),\n tagscript.RequireBlock(),\n tagscript.BlacklistBlock(),\n tagscript.CommandBlock(),\n tagscript.OverrideBlock(),\n ]\n\n self.engine = TagScriptEngine.Interpreter(tagscript_blocks)\n\n class Tag(BaseModel):\n r\"\"\" An object which represents a TAG \"\"\"\n content: str\n name: str\n guild: int\n author: int\n created_at: datetime.datetime\n updated_at: datetime.datetime\n uses: int\n\n async def save_tag(self, ctx: commands.Context, name: str, value: str):\n tag_info_as_dict = await self.get_tag_info(ctx, name, value)\n tag = self.Tag(content=value, name=name, guild=ctx.guild.id, author=ctx.author.id,\n created_at=ctx.message.created_at, updated_at=ctx.message.created_at,\n uses=0\n )\n try:\n self.bot.cache.cache['Tags'][ctx.guild.id][tag.name] = tag\n except KeyError:\n self.bot.cache.cache['Tags'][ctx.guild.id] = dict()\n self.bot.cache.cache['Tags'][ctx.guild.id][tag.name] = tag\n\n await self.bot.loop.run_in_executor(\n None, self.table.insert_one, tag_info_as_dict\n )\n\n @staticmethod\n async def get_tag_info(ctx: commands.Context, name: str, value: str) -> dict:\n tag = {'_id': ctx.guild.id, 'author': ctx.author.id,\n 'created_at': ctx.message.created_at.isoformat(),\n 'updated_at': ctx.message.created_at.isoformat(),\n 'name': name, 'value': value, 'uses': 0,\n }\n return tag\n\n @staticmethod\n async def get_seeds(ctx: commands.Context, tag):\n author = {\n 'avatar': ctx.author.avatar,\n 'name': ctx.author.name,\n 'id': ctx.author.id,\n 'display_name': ctx.author.display_name,\n 'mention': ctx.author.mention\n }\n\n seed = {\n \"author\": author,\n \"user\": author,\n \"tag_owner\": tag.author,\n \"tag_name\": tag.name,\n \"tag_uses\": tag.uses\n }\n if ctx.guild:\n guild = {\n 'name': ctx.guild.name,\n 'icon': ctx.guild.icon,\n 'id': ctx.guild.id,\n 'owner': ctx.guild.owner\n }\n seed.update(guild=guild, server=guild)\n return seed\n\n async def tag_exists(self, ctx: commands.Context, name: str):\n tag = await self.bot.loop.run_in_executor(\n None, self.table.find_one, {'_id': ctx.guild.id, 'name': name}\n )\n if tag is not None:\n return True\n return False\n\n async def surpassed_limit(self, ctx: commands.Context):\n tags: pymongo.cursor.Cursor = await self.bot.loop.run_in_executor(\n None, self.table.find, {'_id': ctx.guild.id, 'author': ctx.author.id}\n )\n return tags.count() > self.max_tags\n\n @commands.group(invoke_without_command=True)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def tag(self, ctx: commands.Context, *, tag: str):\n try:\n tags = self.bot.cache.cache['Tags'][ctx.guild.id]\n except KeyError:\n tags = None\n\n if not tags or not tags.get(tag):\n return await ctx.message.reply(content='This tag doesn\\'t exist', mention_author=False)\n tag = tags[tag]\n seeds = await self.get_seeds(ctx, tag)\n\n content = self.engine.process(tag.content, seed_variables=seeds)\n\n if ctx.message.reference and ctx.message.reference.resolved:\n return await ctx.message.reference.resolved.reply(content=content.body, mention_author=False)\n return await ctx.send(content.body)\n\n @tag.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def delete(self, ctx: commands.Context, *, tag: str):\n global confirmed\n\n tags: dict = self.bot.cache.cache['Tags'].get(ctx.guild.id)\n if not tags or not tags.get(tag):\n return await ctx.message.reply(content='This tag doesn\\'t exist!')\n tag = tags[tag]\n\n if not tag.author == ctx.author.id and not ctx.author.guild_permissions.manage_guild:\n return await ctx.message.reply(content='This tag doesn\\'t belong to you.')\n\n confirmed = False\n\n def check(m):\n global confirmed\n\n if m.author.id == ctx.author.id and m.channel.id == ctx.channel.id and m.content.lower() in ['y', 'yes']:\n confirmed = True\n return True\n elif m.author.id == ctx.author.id and m.channel.id == ctx.channel.id and m.content.lower() in ['n', 'no']:\n return True\n try:\n await ctx.send('Are you sure you would like to delete this tag, response with `(y / n)`')\n message: discord.Message = await self.bot.wait_for('message', check=check, timeout=30.0)\n except asyncio.TimeoutError:\n return await ctx.message.reply(content='You didn\\'t confirm in time!')\n if not confirmed:\n return await message.reply(content='Ok, I will not delete this tag!')\n\n await self.bot.loop.run_in_executor(\n None, self.table.delete_one, {'_id': ctx.guild.id, 'name': tag.name}\n )\n self.bot.cache.cache['Tags'][ctx.guild.id].pop(tag.name)\n\n return await message.reply(content='Successfully deleted this tag for you!')\n\n @tag.command(aliases=['add'])\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def create(self, ctx: commands.Context):\n await ctx.send('What will be the name for this tag, keep it under **50** characters!')\n try:\n tag_name: discord.Message = await self.bot.wait_for('message', check=lambda m: m.author.id == ctx.author.id and ctx.channel.id == m.channel.id, timeout=30.0)\n except asyncio.TimeoutError:\n return await ctx.message.reply(content='Failed to create tag, You didn\\'t provide a name in time!')\n if len(tag_name.content) > 50:\n return await tag_name.reply(content='Failed to create tag, Your tags name must be under **50** characters')\n\n if await self.tag_exists(ctx, tag_name.content):\n return await tag_name.reply(content='This tag already exists in your server!')\n\n await tag_name.reply(content='Ok then, your tags name will be called **%s**, now what will be the tags content?' % tag_name.content)\n\n try:\n tag_content: discord.Message = await self.bot.wait_for('message', check=lambda m: m.author.id == ctx.author.id and ctx.channel.id == m.channel.id, timeout=30.0)\n except asyncio.TimeoutError:\n return await tag_name.reply(content='Failed to create tag, You didn\\'t respond in time!')\n\n if await self.surpassed_limit(ctx):\n return await tag_content.reply(content='You have already reached the maximum amount of tags, which is **%s**' % self.max_tags)\n\n ## No point checking for content length because the bot just sends what they said\n\n await self.save_tag(ctx, tag_name.content, tag_content.content)\n\n return await ctx.send('Created a new tag for this server using the name **%s**' % tag_name.content)\n\n\ndef setup(bot):\n bot.add_cog(Tags(bot))\n\n","sub_path":"Bot/src/tools/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554487042","text":"from Geometry2D import *\n\n\nclass RaiseEyebrows:\n def load(self, points):\n point17 = Point(points[17][0], points[17][1])\n point18 = Point(points[18][0], points[18][1])\n point19 = Point(points[19][0], points[19][1])\n point20 = Point(points[20][0], points[20][1])\n point21 = Point(points[21][0], points[21][1])\n left_triangle = Triangle(point17, point19, point21)\n\n point22 = Point(points[22][0], points[22][1])\n point23 = Point(points[23][0], points[23][1])\n point24 = Point(points[24][0], points[24][1])\n point25 = Point(points[25][0], points[25][1])\n point26 = Point(points[26][0], points[26][1])\n right_triangle = Triangle(point22, point24, point26)\n\n point0 = Point(points[0][0], points[0][1])\n point16 = Point(points[16][0], points[16][1])\n self.segment = Segment(point0, point16)\n\n left_angle = left_triangle.beta_angle * 57.2958\n right_angle = right_triangle.beta_angle * 57.2958\n\n condition1 = left_angle < 128 and right_angle < 128\n condition2 = abs(left_angle - right_angle) < 4\n\n div1 = self.compare_len(point17, point26)\n div2 = self.compare_len(point18, point25)\n div3 = self.compare_len(point19, point24)\n div4 = self.compare_len(point20, point23)\n div5 = self.compare_len(point21, point22)\n div = (div1 + div2 + div3 + div4 + div5) / 5\n condition3 = div >= 0.85\n\n return condition1 and condition3\n\n def compare_len(self, point1, point2):\n segment = self.segment\n perpendicular1 = getPerpendicular(segment, point1)\n point12 = perpendicular1.getCrossing(segment)\n segment1 = Segment(point1, point12)\n\n perpendicular2 = getPerpendicular(segment, point2)\n point22 = perpendicular2.getCrossing(segment)\n segment2 = Segment(point2, point22)\n\n return segment1.length / segment2.length if segment1.length < segment2.length else segment2.length / segment1.length\n","sub_path":"exercises/RaiseEyebrows.py","file_name":"RaiseEyebrows.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170241380","text":"from colorama import Fore, Back\n\nclass View:\n\n @staticmethod\n def show_menu(title, *options):\n print(f'\\t\\t{Back.MAGENTA} {title} {Back.RESET}')\n for option in range(len(options)):\n print(f'\\t{Fore.YELLOW}[{option + 1}] {options[option]}')\n \n @staticmethod\n def show_menu_coptions(title, **options):\n print(f'\\t\\t{Back.MAGENTA} {title} {Back.RESET}')\n for key, value in options.items():\n print(f'\\t{Fore.YELLOW}[{key}] {value}')","sub_path":"HypeSquadChanger/view/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34113266","text":"import cv2\nimport dlib\nimport time\nimport urllib\nimport os\nimport pathlib\n\n\nimport cognitive_face as CF\n\nfrom models import *\nfrom global_variables import *\n\n\n\nCF.Key.set(cfKey)\nCF.BaseUrl.set(URL)\n\n\n\n###############################################################################\n# create person group if not exits\n###############################################################################\n# add person\n###############################################################################\n# add faces to person\n###############################################################################\n###############################################################################\n\n\n\n# registrar estudiante por consola\n# guardar estudiante en db\n# check if exist person group\n# read faces\n# add faces to personGroup\n\n\n\n###############################################################################\n###############################################################################\n###############################################################################\n\n# Ok, iterate all person groups when not exists person group\n# crete person group\ndef createPersonGroupIfNotExits():\n personGroups = CF.person_group.lists()\n for personGroup in personGroups:\n if personGroupId == personGroup['personGroupId']:\n return None\n result = CF.person_group.create(personGroupId)\n return result\n\n\n# ok, create person id and update in database\ndef createPersonInCfAndUpdateInLocalDb(student):\n # def create(person_group_id, name, user_data=None):\n response = CF.person.create(personGroupId, f\"{student.code}\")\n # update personId\n Student.update(personGuid = response['personId']).where(Student.id == student.id).execute()\n Student.update(personId = response['personId']).where(Student.id == student.id).execute()\n print(response)\n # return response\n return Student.get_by_id(student.id)\n\n\n\n# add image faces to person\ndef addImageFacesToPerson(student, folder):\n for filename in os.listdir(folder):\n if filename.endswith(\".jpg\"):\n print(filename)\n\n # corregit el url de image\n # ver el objeto URL\n # os.path.abspath\n\n fullFileName = os.path.join(folder, filename)\n # absPath = os.path.abspath(fullFileName)\n # imgurl = urllib.request.pathname2url(absPath)\n # imgurl = pathlib.Path(fullFileName).as_uri()\n\n res = CF.face.detect(fullFileName)\n\n print(res)\n\n if len(res) != 1:\n print(\"No face detected in image\")\n else:\n # def add_face(image,\n # person_group_id,\n # person_id,\n # user_data=None,\n # target_face=None):\n res = CF.person.add_face(fullFileName, personGroupId, student.personId)\n print(res)\n\n # time.sleep(6)\n\n\n\n###############################################################################\n###############################################################################\n###############################################################################\n\n\n# ok, Read person details from console, full name and code\ndef readPersonDetails():\n student = Student()\n\n student.fullName = str(input(\"Ingrese nombre y apellido de una persona:\"))\n student.code = str(input(\"Ingrese el codigo o identificador de la persona:\"))\n \n student.personGuid = str(uuid.uuid4())\n student.folderGuid = str(uuid.uuid4())\n student.personId = str(uuid.uuid4())\n\n return student\n\n\n\n\n\n# ok, save student in database\ndef saveStudentInDb(student):\n newStudent = Student.create(fullName = student.fullName, \n code = student.code, \n folderGuid = student.folderGuid,\n personGuid = student.personGuid,\n personId = student.personId)\n newStudent.save()\n return newStudent\n\n\n# ok, register faces of student\ndef creaateSampleFacesStudent(student, folderForSave):\n sampleNum = 0\n nSamples = NUMBER_OF_SAMPLES\n\n cap = cv2.VideoCapture(0)\n detector = dlib.get_frontal_face_detector()\n\n while(True):\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n dets = detector(img, 1)\n for i, d in enumerate(dets):\n # TODO: check this part\n sampleNum += 1\n fileName = \"person--\" + str(student.folderGuid) + \"--\" + str(sampleNum) + \".jpg\"\n fullFileName = os.path.join(folderForSave, fileName)\n # se toma la captura 2 veces\n # cv2.imwrite(folderPath + \"/User.\" + Id + \".\" + str(sampleNum) + \".jpg\", img[d.top():d.bottom(), d.left():d.right()]) \n\n cv2.imwrite(fullFileName, img[d.top():d.bottom(), d.left():d.right()])\n cv2.rectangle(img, (d.left(), d.top()) ,(d.right(), d.bottom()),(0,255,0) ,2)\n # show \n cv2.waitKey(200)\n cv2.imshow('frame', img)\n # show \n cv2.waitKey(1)\n print(f\"image num {sampleNum}\")\n if(sampleNum >= nSamples):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\n################################################################################################\n\n# crea un folder donde se guardara las imagenes de muestra\ndef createFolderForDataset(folderName):\n folderDataset = \"datasets\"\n currentDirectory = os.getcwd()\n folderPath = os.path.join(currentDirectory, folderDataset, folderName)\n if not os.path.exists(folderPath):\n os.makedirs(folderPath)\n return folderPath\n\n\n\n# OK, check status, if not trained train\n# entrena el modelo\ndef train():\n # def train(person_group_id):\n # statusTrained = CF.person_group.get_status(personGroupId)\n # if statusTrained[\"status\"] != \"succeeded\":\n # res = CF.person_group.train(personGroupId)\n res = CF.person_group.train(personGroupId)\n print(\"Se entreno el grupo de personas\")\n return res\n\n\n# elimina el grupo de personas\ndef eliminarPersonGroup(personGr):\n CF.person_group.delete(personGr)\n print(\"eliminado el person group\")\n\n\n\ndef trainIfNotTrained():\n # def train(person_group_id):\n statusTrained = CF.person_group.get_status(personGroupId)\n if statusTrained[\"status\"] != \"succeeded\":\n res = CF.person_group.train(personGroupId)\n\n\n\n\n\n####################################\n\n\n\n\n\n\n\n\n\n\n# only if main\n\nif __name__ == \"__main__\":\n \n\n student = readPersonDetails()\n student = saveStudentInDb(student)\n\n folder = createFolderForDataset(student.folderGuid)\n creaateSampleFacesStudent(student, folder)\n\n createPersonGroupIfNotExits()\n\n student = createPersonInCfAndUpdateInLocalDb(student)\n addImageFacesToPerson(student, folder)\n ####################################\n\n\n # eliminarPersonGroup(personGroupId)\n\n # train()\n","sub_path":"caso_de_uso_registrar_persona.py","file_name":"caso_de_uso_registrar_persona.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"420362636","text":"from _context import vugrad\n\nimport numpy as np\n\n# for running from the command line\nfrom argparse import ArgumentParser\n\nimport vugrad as vg\n\n# Parse command line arguments\nparser = ArgumentParser()\n\nparser.add_argument('-D', '--dataset',\n dest='data',\n help='Which dataset to use. [synth, mnist]',\n default='synth', type=str)\n\nparser.add_argument('-b', '--batch-size',\n dest='batch_size',\n help='The batch size (how many instances to use for a single forward/backward pass).',\n default=128, type=int)\n\nparser.add_argument('-e', '--epochs',\n dest='epochs',\n help='The number of epochs (complete passes over the complete training data).',\n default=20, type=int)\n\nparser.add_argument('-l', '--learning-rate',\n dest='lr',\n help='The learning rate. That is, a scalar that determines the size of the steps taken by the '\n 'gradient descent algorithm. 0.1 works well for synth, 0.0001 works well for MNIST.',\n default=0.01, type=float)\n\nargs = parser.parse_args()\n\n## Load the data\nif args.data == 'synth':\n (xtrain, ytrain), (xval, yval), num_classes = vg.load_synth()\nelif args.data == 'mnist':\n (xtrain, ytrain), (xval, yval), num_classes = vg.load_mnist(final=False, flatten=True)\nelse:\n raise Exception(f'Dataset {args.data} not recognized.')\n\nprint(f'## loaded data:')\nprint(f' number of instances: {xtrain.shape[0]} in training, {xval.shape[0]} in validation')\nprint(f' training class distribution: {np.bincount(ytrain)}')\nprint(f' val. class distribution: {np.bincount(yval)}')\n\nnum_instances, num_features = xtrain.shape\n\n## Create the model.\nmlp = vg.MLP(input_size=num_features, output_size=num_classes)\n\nn, m = xtrain.shape\nb = args.batch_size\n\nprint('\\n## Starting training')\n\ncl = '...'\n\nfor epoch in range(args.epochs):\n\n print(f'epoch {epoch:03}')\n\n if epoch % 1 == 0:\n ## Compute validation accuracy\n\n o = mlp(vg.TensorNode(xval))\n oval = o.value\n\n predictions = np.argmax(oval, axis=1)\n num_correct = (predictions == yval).sum()\n acc = num_correct / yval.shape[0]\n\n o.clear() # gc the computation graph\n\n print(f' accuracy: {acc:.4}')\n\n cl = 0.0 # running sum of the training loss\n\n # We loop over the data in batches of size `b`\n for fr in range(0, n, b):\n\n # The end index of the batch\n to = min(fr + b, n)\n\n # Slice out the batch and its corresponding target values\n batch, targets = xtrain[fr:to, :], ytrain[fr:to]\n\n # Wrap the inputs in a Node\n batch = vg.TensorNode(value=batch)\n\n outputs = mlp(batch)\n loss = vg.celoss(outputs, targets)\n # -- The computation graph is now complete. It consists of the mlp, together with the computation of\n # the scalar loss.\n # -- The variable `loss` is the TreeNode at the very top of our computation graph. This means we can call\n # it to perform operations on the computation graph, like clearing the gradients, starting the backpropgation\n # and clearing the graph.\n\n cl += loss.value\n # -- We must be careful here to extract the _raw_ value for the running loss. What would happen if we kept\n # a running sum using the TensorNode?\n\n # Start the backpropagation\n loss.backward()\n\n # pply gradient descent\n for parm in mlp.parameters():\n parm.value -= args.lr * parm.grad\n # -- Note that we are directly manipulating the members of the parm TensorNode. This means that for this\n # part, we are not building up a computation graph.\n\n # -- In Pytorch, the gradient descent is abstracted away into an Optimizer. This allows us to build slightly more\n # complexoptimizers than plain graident descent.\n\n # Finally, we need to reset the gradients to zero ...\n loss.zero_grad()\n # ... and delete the parts of the computation graph we don't need to remember.\n loss.clear()\n\n print(f' running loss: {cl:.4}')\n","sub_path":"experiments/train_mlp.py","file_name":"train_mlp.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447235868","text":"# Sprite classes for platform game\nimport pygame as pg\nfrom settings import *\nfrom game_manager import *\nfrom player_bullet import *\nfrom life import *\nvec = pg.math.Vector2\n\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game):\n pg.sprite.Sprite.__init__(self)\n self.game = game\n # sprites load\n self.crt_frame = 0\n self.last_update = 0\n self.load_images()\n self.image = self.standing_frame\n self.rect = self.image.get_rect()\n self.rect.width = 10\n self.lifes = list()\n\n next_life_pos_x = 0\n for life in range(GameManager.number_of_lifes):\n self.lifes.append(Life(LIFE_POS_X + next_life_pos_x, LIFE_POS_Y))\n next_life_pos_x += 40\n self.number_of_lifes = GameManager.number_of_lifes\n self.rect.center = (DISPLAY_WIDTH / 2 - 100, DISPLAY_HEIGHT / 2)\n\n\n self.throws = False\n\n # position, velocity,acceleration and movement of the player\n self.pos = vec(DISPLAY_WIDTH / 2 - 100, DISPLAY_HEIGHT / 2)\n self.vel = vec(0, 0)\n self.acc = vec(0, 0)\n self.jumping = False\n self.walking = False\n self.player_center = self.rect.width / 2\n self.world = game.backgrounds[game.world_number]\n\n def remove_life(self):\n self.number_of_lifes -= 1\n self.lifes[self.number_of_lifes].kill()\n self.lifes.pop(self.number_of_lifes)\n\n if self.number_of_lifes <= 1:\n self.game.playing = False\n GameManager.reset = True\n\n def throw_bullet(self):\n if not self.throws:\n self.bullet = PlayerBullet(self.game, self, self.game.is_boss_fight)\n self.throws = True\n if not GameManager.mute:\n self.game.bullet_snd.play()\n def get_image(self, path, img_name):\n image = pg.transform.scale(pg.image.load(path + img_name).convert(), (PLAYER_WIDTH, PLAYER_HEIGHT))\n image.set_colorkey(BLACK)\n return image\n\n def load_images(self):\n self.standing_frame = self.get_image(IMG_PLAYER_PATH, 'player_0.png')\n self.jump_frame_r = self.get_image(IMG_PLAYER_PATH, 'player_jump.png')\n self.jump_frame_l = pg.transform.flip(self.jump_frame_r, True, False)\n self.walk_frame_r = [self.get_image(IMG_PLAYER_PATH, 'player_' + str(i) + '.png') for i in range(1, 13)]\n\n self.walk_frame_l = []\n for frame in self.walk_frame_r:\n self.walk_frame_l.append(pg.transform.flip(frame, True, False))\n self.walk_len = len(self.walk_frame_r)\n\n def jump(self):\n if self.pos.y == DISPLAY_HEIGHT - BLOCK_HEIGHT and not self.jumping:\n self.jumping = True\n self.vel.y = -PLAYER_JUMP\n if not GameManager.mute:\n self.game.jump_snd.play()\n\n def jump_cut(self):\n if self.jumping:\n if self.vel.y < -PLAYER_SMALL_JUMP:\n self.vel.y = -PLAYER_SMALL_JUMP\n\n\n def update(self):\n self.animate()\n self.acc = vec(0, PLAYER_GRAV)\n keys = pg.key.get_pressed()\n if keys[pg.K_UP]:\n self.jump()\n if keys[pg.K_LEFT]:\n self.acc.x = -PLAYER_ACC\n if keys[pg.K_RIGHT]:\n self.acc.x = PLAYER_ACC\n\n\n # apply friction\n self.acc += self.vel * PLAYER_FRICTION\n\n # equations of motion\n self.vel += 0.5 * self.acc\n #fix so that the player stops\n if abs(self.vel.x) < 0.1:\n self.vel.x = 0\n self.pos += self.vel\n\n\n '''There is a display and a stage area\n the display area is what we see and the stage area is the scrolling background behind \n '''\n if not GameManager.world_number == BOSS_WORLD_NUMBER:\n # if the player reaches the end of the stage\n if self.pos.x >STAGE_WIDTH - self.player_center: self.pos.x = STAGE_WIDTH - self.player_center\n # if the player goes to the beginning of the stage\n if self.pos.x < self.player_center: self.pos.x = self.player_center\n\n # if the player has not reached the middle of the display area\n if self.pos.x < START_SCROLL_X:\n self.game.x_progression = self.pos.x\n # if the player has reached the end of the stage and we don't need to scroll anymore\n elif self.pos.x > STAGE_WIDTH - START_SCROLL_X:\n self.game.x_progression = self.pos.x - STAGE_WIDTH + DISPLAY_WIDTH\n # if the player is in the middle, so not at the end or the beginning of the stage\n else:\n self.game.x_progression = START_SCROLL_X\n self.world.stage_pos_x += -self.vel.x\n\n self.rect.midbottom = vec(self.game.x_progression, self.pos.y)\n else:\n if self.pos.x > DISPLAY_WIDTH - self.player_center: self.pos.x = DISPLAY_WIDTH - self.player_center\n if self.pos.x < self.player_center: self.pos.x = self.player_center\n self.rect.midbottom = self.pos\n\n def animate(self):\n now = pg.time.get_ticks()\n if self.vel.x != 0 and not self.jumping:\n self.walking = True\n else:\n self.walking = False\n\n if self.jumping:\n\n if self.vel.x > 0:\n self.image = self.jump_frame_r\n else:\n self.image = self.jump_frame_l\n self.rect = self.image.get_rect()\n\n #Walk animation\n if self.walking:\n if now - self.last_update > 50:\n self.last_update = now\n self.crt_frame = (self.crt_frame + 1) % self.walk_len\n if self.vel.x > 0:\n self.image = self.walk_frame_r[self.crt_frame]\n else:\n self.image = self.walk_frame_l[self.crt_frame]\n self.rect = self.image.get_rect()\n if not self.jumping and not self.walking:\n self.image = self.standing_frame\n self.rect = self.image.get_rect()\n\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77293261","text":"import sys\n\nfrom quadforlss import forecasting as fore\n\nfrom quadforlss import estimator\n\nimport opentext\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nimport scipy\n\nimport pickle\n\nimport sympy as sp\n\ndef faa(r, cgg, cgn, cnn, dercgg, dercgn, dercnn):\n A = dercgg/cgg-2*r**2.*dercgn/cgn\n A = A**2.\n B = 2*r**2.*(1.-r**2.)*(dercgn/cgn)**2.\n C = 2*r**2.*dercnn/cnn\n C *= (dercgg/cgg-2*dercgn/cgn)\n D = dercnn/cnn\n D = D**2.\n tot = 1./(2*(1.-r**2.)**2.)\n tot *= (A+B+C+D)\n return tot\n\nif len(sys.argv) == 1:\n print('Choose your directory!')\n sys.exit()\n\ndirec = str(sys.argv[1])\n\nfilename = 'values.txt'\nvalues = opentext.get_values(direc+'/'+filename)\n\n#Specify your data directory with the N curves\ndata_dir = direc+'/data_dir/'\n\n#Specify your output plot directory\noutput = direc+'/pics/'\n\nprint('Parameter Values are, ', values)#, 'bgfid ', bgfid, ' bnfid ', bnfid, ' cfid ',cfid, ' fnlfid ', fnlfid)\n\nwith open(direc+'/data_dir/spectra.pickle', 'rb') as handle:\n dic = pickle.load(handle, encoding='latin1')\n\nP_L = dic['PL']\nPgg = dic['Cgg']\nPnn = dic['Cnn']\nPgn = dic['Cgn']\ndfnlPgg = dic['dfnlCgg']\ndfnlPgn = dic['dfnlCgn']\ndfnlPnn = dic['dfnlCnn']\nN = dic['Ngg'] \nK = dic['K']\n\nnbar = values['ngal']\nfnlfid = values['fnlfid']\n\nshot = 1./dic['ngal']+0.*Pgg\n\n\n#### Define symbols\n\nb, fnl, nbar, Pnl, func = sp.symbols('b fnl nbar Pnl func')\n\nbias = b+fnl*func\nP_total = bias**2.+1/nbar\n\n\nprint(P_total)\n\nprint(sp.diff(P_total, b))\n\nderb_P_s = sp.diff(P_total, b)\nderb_P = sp.lambdify([b, fnl, nbar, Pnl, func], derb_P_s, 'numpy')\n\nresult = derb_P(1., 0., 1., Pnn*0.+1., Pgn*0.+1.)\nprint(result)\n'''\n\nfig, ax = plt.subplots( nrows=1, ncols=1 )\n#plt.xlim(0.01, 0.1)\n#plt.ylim(1e1, 1e8)\nplt.xlabel('$K$ $(h Mpc^{-1})$')\nplt.ylabel('$P$ $(h^{-3} Mpc^{3})$')\nax.plot(K, Pgg, label = 'Pgg for fnl='+str(fnlfid))\nax.plot(K, Pnn, label = 'Pnn, n = growth est')\nax.plot(K, Pgn, label = 'Pgn')\nax.legend(loc = 'best', prop = {'size': 6})\nfig.savefig(output+'powers_forecast_fid_fnl'+str(fnlfid)+'.png', dpi = 300)\nplt.close(fig)\n\nfig, ax = plt.subplots( nrows=1, ncols=1 )\n#plt.xlim(0.01, 0.1)\n#plt.ylim(1e1, 1e8)\nplt.xlabel('$K$ $(h Mpc^{-1})$')\nplt.ylabel('$P$ $(h^{-3} Mpc^{3})$')\nax.plot(K, K*0+shot, label = 'Shot Noise')\nax.plot(K, N, label = 'Nnn, n = growth est')\nax.plot(K, P_L, label = 'P Linear')\nax.legend(loc = 'best', prop = {'size': 6})\nfig.savefig(output+'signal_noise_powers_forecast_fid_fnl'+str(fnlfid)+'.png', dpi = 300)\nplt.close(fig)\n\n\nkeyfnl = 'fnl'\n\nel1, el2 = keyfnl, keyfnl\n\n\ndef D(y):\n return 1/(1+y)\nz = 0.\n\n\nforecast = fore.getcompleteFisher(cgg = Pgg, cgn = Pgn, cnn = Pnn, acgg = dfnlPgg, acgn = dfnlPgn, acnn = dfnlPnn)\nforecastgg = fore.getcompleteFisher(cgg = Pgg, cgn = 1.e-4, cnn = 1.e-4, acgg = dfnlPgg, acgn = 0., acnn = 0.) #put only derivatives to zero\n\nr = Pgn/np.sqrt(Pgg*Pnn)\n\nf = faa(r = r, cgg = Pgg, cgn = Pgn, cnn = Pnn, dercgg = dfnlPgg, dercgn = dfnlPgn, dercnn = dfnlPnn)\n\nforecast = f.copy()\n\nf = faa(r = 0, cgg = Pgg, cgn = Pgn, cnn = Pnn, dercgg = dfnlPgg, dercgn = 0., dercnn = 0.)\n\nerrorgg = forecastgg**-0.5\nerror = forecast**-0.5\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize = (10, 10))\n\nax3.set_xlabel('$K$ ($h$Mpc$^{-1}$)')\nax3.set_ylabel('$ \\sigma(f_{nl}) $')\nax3.loglog(K, error, label = 'Sigma(fnl = '+str(fnlfid)+')')\nax3.loglog(K, errorgg, label = 'Galaxy Only Sigma(fnl = '+str(fnlfid)+')')\nax3.legend(loc = 'best', prop = {'size': 6})\n\nax1.set_xlabel('$K$ ($h$Mpc$^{-1}$)')\nax1.set_ylabel('$ r $')\nax1.plot(K, r, label = 'Corr, fnl='+str(fnlfid))\nax1.legend(loc = 'best', prop = {'size': 6})\n\nprint('Integrated Fisher Error')\n\nKminforvol = np.min(K)\nV = (2.*np.pi)**3./Kminforvol**3.\n\nFisherPerMode = forecast\nFisherPerModegg = forecastgg\n\nkmin = np.min(K)\nkmax = np.max(K)\n\nprint('kmin, ', kmin, ' kmax, ', kmax)\n\nKs = np.arange(kmin, kmax/1.5, 0.001)\n\nIntegratedFish = np.array([])\nIntegratedFishgg = np.array([])\nIntegratedFishVol = np.array([])\nIntegratedFishggVol = np.array([])\nfor Kmin in Ks:\n IntFish = fore.getIntregratedFisher(K, FisherPerMode, Kmin, kmax, V)\n IntegratedFish = np.append(IntegratedFish, IntFish**-0.5)\n IntFish = fore.getIntregratedFisher(K, FisherPerModegg, Kmin, kmax, V)\t\n IntegratedFishgg = np.append(IntegratedFishgg, IntFish**-0.5)\n\nh = 0.67\nV = (2*np.pi)**3/kmin**3.#h**3*100*10**9\nV = (np.pi)**3/kmin**3/2.\nprint('Volume, ', ((2*np.pi)**3/(kmin*h*1e3)**3.))\n\nfor Kmin in Ks:\n IntFish = fore.getIntregratedFisher(K, FisherPerMode, Kmin, kmax, V)\n IntegratedFishVol = np.append(IntegratedFishVol, IntFish**-0.5)\n IntFish = fore.getIntregratedFisher(K, FisherPerModegg, Kmin, kmax, V)\n IntegratedFishggVol = np.append(IntegratedFishggVol, IntFish**-0.5)\t\n\ntext = ''#'zerocorr'\nnp.savetxt(data_dir+'singlefisher'+text+'.txt', np.c_[K, error, errorgg])\nnp.savetxt(data_dir+'integratedfisher'+text+'.txt', np.c_[Ks, IntegratedFishVol, IntegratedFishggVol])\nnp.savetxt(data_dir+'r.txt', np.c_[K, r])\n\n\n\nax2.set_xlabel('$K_{min}$ ($h$Mpc$^{-1}$)')\nax2.set_ylabel('$ Integrated \\sigma(f_{nl}) $')\nax2.loglog(Ks, IntegratedFishVol, label = 'Integrated $\\sigma(fnl = $'+str(fnlfid)+') for $V = $'+'{:.2E}'.format(V)+'$h^{-3}Mpc^3$')\nax2.loglog(Ks, IntegratedFishggVol, label = 'Galaxies Integrated $\\sigma(fnl = $'+str(fnlfid)+') for $V = $'+'{:.2E}'.format(V)+'$h^{-3}Mpc^3$')\nax2.legend(loc = 'best', prop = {'size': 6})\nax4.set_xlabel('$K_{min}$ ($h$Mpc$^{-1}$)')\nax4.set_ylabel('$Fraction$')\nax4.plot(Ks, Ks*0.+1.)\nax4.plot(Ks, IntegratedFish/IntegratedFishgg, label = 'Integrated $\\sigma_{combined/gonly}(fnl = $'+str(fnlfid)+')')\nax4.legend(loc = 'best', prop = {'size': 6})\nplt.subplots_adjust(hspace = 0.2, wspace = 0.5)\nfig.savefig(output+'plots.png', dpi = 300)\nplt.close(fig)\n'''\n","sub_path":"example/sympyforecast.py","file_name":"sympyforecast.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380398572","text":"from pyramid.i18n import TranslationStringFactory\nfrom eduid_am.user import User\n\ntranslation_domain = 'eduid-dashboard'\nTranslationString = TranslationStringFactory(translation_domain)\n\n\ndef locale_negotiator(request):\n settings = request.registry.settings\n available_languages = settings['available_languages'].keys()\n cookie_name = settings['lang_cookie_name']\n\n cookie_lang = request.cookies.get(cookie_name, None)\n if cookie_lang and cookie_lang in available_languages:\n return cookie_lang\n\n user = request.session.get('user')\n if user:\n preferredLanguage = user.get_preferred_language()\n if preferredLanguage:\n return preferredLanguage\n\n locale_name = request.accept_language.best_match(available_languages)\n\n if locale_name not in available_languages:\n locale_name = settings.get('default_locale_name', 'sv')\n return locale_name\n","sub_path":"eduiddashboard/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462833589","text":"import concurrent.futures\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport logging\nimport time\n\n# pylint: disable=E0401\n\nimport modules.nested_data\nimport modules.custom_log\n\n\nurl_template = \"https://www.c5game.com/market.html?appid=578080&sort=price.desc&page={0}\"\n\ndef c5game(wrapper_kwargs, scraper):\n\n # upd obj for cache\n upd = {wrapper_kwargs.get('game'): {}}\n\n # Update local CNY rate\n wrapper_kwargs['CNY'] = json.loads(requests.get(\n 'http://{0}:{1}/get'.format(\n wrapper_kwargs.get('storage').conf.cl_host,\n wrapper_kwargs.get('storage').conf.cl_port\n ),\n params={\n 'api_key': wrapper_kwargs.get('storage').conf.cl_api_key,\n 'path': 'meta:currencies'\n }\n ).text).get('CNY')\n\n # Wait for next loop if no CNY\n if not wrapper_kwargs['CNY']:\n logging.info(\"No CNY\")\n return upd\n\n # Pack scraper to wrapper kwargs for using in another threads\n wrapper_kwargs['scraper'] = scraper\n\n # Generate all pages links\n tt = time.time()\n pages_links = _generate_pages(wrapper_kwargs)\n modules.custom_log.info('pages links generated', game=wrapper_kwargs.get('game'), market=wrapper_kwargs.get('market'), tt=tt)\n \n # Pages processing\n with concurrent.futures.ThreadPoolExecutor(15) as tpe:\n args = zip([wrapper_kwargs]*len(pages_links), pages_links)\n for res in tpe.map(_parse_page, args):\n upd[wrapper_kwargs.get('game')] = modules.nested_data.merge(upd[wrapper_kwargs.get('game')], res)\n\n return upd\n\ndef _generate_pages(wrapper_kwargs):\n\n pages_links = []\n\n # Get page\n resp = wrapper_kwargs['scraper'].get(url_template.format(1), timeout=120)\n page = BeautifulSoup(resp.content, 'lxml')\n\n # Extract pages count\n try:\n pages_count = int(page.find('li', {'class': 'last'}).find('a').get('href').split('=')[-1])\n except:\n pages_count = 1\n \n # Generate links\n for num in range(1, pages_count+1):\n pages_links.append(url_template.format(num))\n \n return pages_links\n\n\ndef _parse_page(args, retries=0):\n wrapper_kwargs, link = args\n if retries > 3:\n return {}\n try:\n upd = {}\n\n # Get page\n res = wrapper_kwargs['scraper'].get(link, timeout=120)\n if res.status_code != 200:\n return _parse_page(args, retries=retries+1)\n page = BeautifulSoup(res.content, 'lxml')\n\n # Iterate items on page\n for item in page.find('div', {'class': 'tab-pane'}).find_all('li'):\n\n # Extract info\n name = item.find('p', {'class': 'name'}).find('span').get_text().strip()\n price = float(item.find('span', {'class': 'price'}).get_text().replace('¥', '').strip())\n link = 'https://www.c5game.com{0}'.format(\n item.find('p', {'class': 'name'}).find('a')['href']\n )\n direction = item['class'][0].strip()\n\n # Set fields\n if name not in upd:\n upd[name] = {}\n if direction == 'selling':\n upd[name]['c5game.com(sale)|price'] = price / wrapper_kwargs.get('CNY')\n upd[name]['c5game.com(sale)|available'] = True\n upd[name]['c5game.com(sale)|link'] = link\n elif direction == 'purchaseing':\n upd[name]['c5game.com(purchase)|price'] = price / wrapper_kwargs.get('CNY')\n upd[name]['c5game.com(purchase)|available'] = True\n upd[name]['c5game.com(purchase)|link'] = link\n \n return upd\n\n except Exception as e:\n logging.exception(e)\n _parse_page(args, retries=retries+1)\n","sub_path":"core/src/parsers/pubg/c5game.py","file_name":"c5game.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542436711","text":"#-*- coding:utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow.contrib import seq2seq\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport numpy as np\nimport os\nimport pickle\n\ndef main():\n data_dir = './data/cp.txt'\n text = load_data(data_dir)\n view_sentence_range = (0, 10)\n\n\n print('数据情况:')\n print('不重复单词(彩票开奖记录)的个数: {}'.format(len({word: None for word in text.split()})))\n scenes = text.split('\\n\\n')\n sentence_count_scene = [scene.count('\\n') for scene in scenes]\n print('开奖期数: {}期'.format(int(np.average(sentence_count_scene))))\n\n sentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\n print('行数: {}'.format(len(sentences)))\n word_count_sentence = [len(sentence.split()) for sentence in sentences]\n print('平均每行单词数: {}'.format(np.ceil(np.average(word_count_sentence))))\n\n print()\n print('开奖记录从 {} 到 {}:'.format(*view_sentence_range))\n print('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))\n\n # Preprocess Training, Validation, and Testing Data\n preprocess_and_save_data(data_dir, create_lookup_tables)\n\n int_text, vocab_to_int, int_to_vocab = load_preprocess()\n\n '''\n num_epochs 设置训练几代。\n batch_size 是批次大小。\n rnn_size 是RNN的大小(隐藏节点的维度)。\n embed_dim 是嵌入层的维度。\n seq_length 是序列的长度,始终为1。\n learning_rate 是学习率。\n show_every_n_batches 是过多少batch以后打印训练信息。\n '''\n\n # Number of Epochs\n num_epochs = 25\n # Batch Size\n batch_size = 32\n # RNN Size\n rnn_size = 1000\n # Embedding Dimension Size\n embed_dim = 1000\n # Sequence Length\n seq_length = 1\n # Learning Rate\n learning_rate = 0.01\n # Show stats for every n number of batches\n show_every_n_batches = 10\n\n save_dir = './save'\n\n tf.reset_default_graph()\n train_graph = tf.Graph()\n with train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\n logits, final_state, embed_matrix = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]]))\n # cost = build_loss(logits, targets, vocab_size)\n\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embed_matrix), 1, keepdims=True))\n normalized_embedding = embed_matrix / norm\n\n probs_embeddings = tf.nn.embedding_lookup(normalized_embedding,\n tf.squeeze(tf.argmax(probs, 2))) # np.squeeze(probs.argmax(2))\n probs_similarity = tf.matmul(probs_embeddings, tf.transpose(normalized_embedding))\n\n y_embeddings = tf.nn.embedding_lookup(normalized_embedding, tf.squeeze(targets))\n y_similarity = tf.matmul(y_embeddings, tf.transpose(normalized_embedding))\n\n # data_moments = tf.reduce_mean(y_similarity, axis=0)\n # sample_moments = tf.reduce_mean(probs_similarity, axis=0)\n similar_loss = tf.reduce_mean(tf.abs(y_similarity - probs_similarity))\n total_loss = cost + similar_loss\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(total_loss) # cost\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if\n grad is not None] # clip_by_norm\n train_op = optimizer.apply_gradients(capped_gradients)\n\n # Accuracy\n correct_pred = tf.equal(tf.argmax(probs, 2),\n tf.cast(targets, tf.int64)) # logits <--> probs tf.argmax(targets, 1) <--> targets\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\n batches = get_batches(int_text[:-(batch_size + 1)], batch_size, seq_length)\n test_batches = get_batches(int_text[-(batch_size + 1):], batch_size, seq_length)\n top_k = 10\n topk_acc_list = []\n topk_acc = 0\n sim_topk_acc_list = []\n sim_topk_acc = 0\n\n range_k = 5\n floating_median_idx = 0\n floating_median_acc_range_k = 0\n floating_median_acc_range_k_list = []\n\n floating_median_sim_acc_range_k = 0\n floating_median_sim_acc_range_k_list = []\n\n losses = {'train': [], 'test': []}\n accuracies = {'accuracy': [], 'topk': [], 'sim_topk': [], 'floating_median_acc_range_k': [],\n 'floating_median_sim_acc_range_k': []}\n\n with tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n # 训练的迭代,保存训练损失\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([total_loss, final_state, train_op], feed) # cost\n losses['train'].append(train_loss)\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # 使用测试数据的迭代\n acc_list = []\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # test_batches[0][0]\n for batch_i, (x, y) in enumerate(test_batches):\n # Get Prediction\n test_loss, acc, probabilities, prev_state = sess.run(\n [total_loss, accuracy, probs, final_state],\n {input_text: x,\n targets: y,\n initial_state: prev_state}) # cost\n\n # 保存测试损失和准确率\n acc_list.append(acc)\n losses['test'].append(test_loss)\n accuracies['accuracy'].append(acc)\n\n print('Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(test_batches),\n test_loss))\n\n # 利用嵌入矩阵和生成的预测计算得到相似度矩阵sim\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, np.squeeze(probabilities.argmax(2)))\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n sim = similarity.eval()\n\n # 保存预测结果的Top K准确率和与预测结果距离最近的Top K准确率\n topk_acc = 0\n sim_topk_acc = 0\n for ii in range(len(probabilities)):\n\n nearest = (-sim[ii, :]).argsort()[0:top_k]\n if y[ii] in nearest:\n sim_topk_acc += 1\n\n if y[ii] in (-probabilities[ii]).argsort()[0][0:top_k]:\n topk_acc += 1\n\n topk_acc = topk_acc / len(y)\n topk_acc_list.append(topk_acc)\n accuracies['topk'].append(topk_acc)\n\n sim_topk_acc = sim_topk_acc / len(y)\n sim_topk_acc_list.append(sim_topk_acc)\n accuracies['sim_topk'].append(sim_topk_acc)\n\n # 计算真实值在预测值中的距离数据\n realInSim_distance_list = []\n realInPredict_distance_list = []\n for ii in range(len(probabilities)):\n sim_nearest = (-sim[ii, :]).argsort()\n idx = list(sim_nearest).index(y[ii])\n realInSim_distance_list.append(idx)\n\n nearest = (-probabilities[ii]).argsort()[0]\n idx = list(nearest).index(y[ii])\n realInPredict_distance_list.append(idx)\n\n print('真实值在预测值中的距离数据:')\n print('max distance : {}'.format(max(realInPredict_distance_list)))\n print('min distance : {}'.format(min(realInPredict_distance_list)))\n print('平均距离 : {}'.format(np.mean(realInPredict_distance_list)))\n print('距离中位数 : {}'.format(np.median(realInPredict_distance_list)))\n print('距离标准差 : {}'.format(np.std(realInPredict_distance_list)))\n\n print('真实值在预测值相似向量中的距离数据:')\n print('max distance : {}'.format(max(realInSim_distance_list)))\n print('min distance : {}'.format(min(realInSim_distance_list)))\n print('平均距离 : {}'.format(np.mean(realInSim_distance_list)))\n print('距离中位数 : {}'.format(np.median(realInSim_distance_list)))\n print('距离标准差 : {}'.format(np.std(realInSim_distance_list)))\n # sns.distplot(realInPredict_distance_list, rug=True) #, hist=False\n # plt.hist(np.log(realInPredict_distance_list), bins=50, color='steelblue', normed=True )\n\n # 计算以距离中位数为中心,范围K为半径的准确率\n floating_median_sim_idx = int(np.median(realInSim_distance_list))\n floating_median_sim_acc_range_k = 0\n\n floating_median_idx = int(np.median(realInPredict_distance_list))\n floating_median_acc_range_k = 0\n for ii in range(len(probabilities)):\n nearest_floating_median = (-probabilities[ii]).argsort()[0][\n floating_median_idx - range_k:floating_median_idx + range_k]\n if y[ii] in nearest_floating_median:\n floating_median_acc_range_k += 1\n\n nearest_floating_median_sim = (-sim[ii, :]).argsort()[\n floating_median_sim_idx - range_k:floating_median_sim_idx + range_k]\n if y[ii] in nearest_floating_median_sim:\n floating_median_sim_acc_range_k += 1\n\n floating_median_acc_range_k = floating_median_acc_range_k / len(y)\n floating_median_acc_range_k_list.append(floating_median_acc_range_k)\n accuracies['floating_median_acc_range_k'].append(floating_median_acc_range_k)\n\n floating_median_sim_acc_range_k = floating_median_sim_acc_range_k / len(y)\n floating_median_sim_acc_range_k_list.append(floating_median_sim_acc_range_k)\n accuracies['floating_median_sim_acc_range_k'].append(floating_median_sim_acc_range_k)\n\n print('Epoch {:>3} floating median sim range k accuracy {} '.format(epoch_i, np.mean(\n floating_median_sim_acc_range_k_list))) #:.3f\n print('Epoch {:>3} floating median range k accuracy {} '.format(epoch_i, np.mean(\n floating_median_acc_range_k_list))) #:.3f\n print('Epoch {:>3} similar top k accuracy {} '.format(epoch_i, np.mean(sim_topk_acc_list))) #:.3f\n print('Epoch {:>3} top k accuracy {} '.format(epoch_i, np.mean(topk_acc_list))) #:.3f\n print('Epoch {:>3} accuracy {} '.format(epoch_i, np.mean(acc_list))) #:.3f\n\n # Save Model\n saver.save(sess, save_dir) # , global_step=epoch_i\n print('Model Trained and Saved')\n embed_mat = sess.run(normalized_embedding)\n\n\n sns.distplot(realInSim_distance_list, rug=True)\n sns.distplot(realInPredict_distance_list, rug=True)\n\n plt.plot(losses['train'], label='Training loss')\n plt.legend()\n _ = plt.ylim()\n\n plt.plot(losses['test'], label='Test loss')\n plt.legend()\n _ = plt.ylim()\n\n plt.plot(accuracies['accuracy'], label='Accuracy')\n plt.plot(accuracies['topk'], label='Top K')\n plt.plot(accuracies['sim_topk'], label='Similar Top K')\n plt.plot(accuracies['floating_median_acc_range_k'], label='Floating Median Range K Acc')\n plt.plot(accuracies['floating_median_sim_acc_range_k'], label='Floating Median Sim Range K Acc')\n plt.legend()\n _ = plt.ylim()\n\n for batch_i, (x, y) in enumerate(test_batches):\n plt.plot(y, label='Targets')\n plt.plot(np.squeeze(probabilities.argmax(2)), label='Prediction')\n plt.legend()\n _ = plt.ylim()\n\n # Save parameters for checkpoint\n save_params((seq_length, save_dir))\n\n _, vocab_to_int, int_to_vocab = load_preprocess()\n seq_length, load_dir = load_params()\n\n with train_graph.as_default():\n saver = tf.train.Saver()\n\n with tf.Session(graph=train_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n embed_mat = sess.run(embed_matrix)\n\n viz_words = 1000\n tsne = TSNE()\n with train_graph.as_default():\n embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])\n\n fig, ax = plt.subplots(figsize=(24, 24))\n for idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)\n\n gen_length = 17\n prime_word = '202'\n\n loaded_graph = tf.Graph() # loaded_graph\n with tf.Session(graph=train_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(train_graph) # loaded_graph\n\n # Sentences generation setup\n gen_sentences = [prime_word]\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, probabilities.argmax())\n valid_embedding = tf.reshape(valid_embedding, (1, len(int_to_vocab)))\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n sim = similarity.eval()\n\n pred_word = pick_word(probabilities[dyn_seq_length - 1], sim, int_to_vocab, 5, 'median')\n\n gen_sentences.append(pred_word)\n\n cp_script = ' '.join(gen_sentences)\n cp_script = cp_script.replace('\\n ', '\\n')\n cp_script = cp_script.replace('( ', '(')\n\n print(cp_script)\n\n int_sentences = [int(words) for words in gen_sentences]\n int_sentences = int_sentences[1:]\n\n val_data = [[103], [883], [939], [36], [435], [173], [572], [828], [509], [723], [145], [621], [535], [385],\n [98], [321], [427]]\n\n plt.plot(int_sentences, label='History')\n plt.plot(val_data, label='val_data')\n plt.legend()\n _ = plt.ylim()\n\n\n\n# def build_loss(logits, targets, num_classes):\n# ''' Calculate the loss from the logits and the targets.\n\n# Arguments\n# ---------\n# logits: Logits from final fully connected layer\n# targets: Targets for supervised learning\n# num_classes: Number of classes in targets\n\n# '''\n# y_one_hot = tf.one_hot(tf.squeeze(targets), num_classes)\n# y_reshaped = tf.reshape(y_one_hot, (batch_size, num_classes))\n\n# # Softmax cross entropy loss\n# loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)\n# loss = tf.reduce_mean(loss)\n# return loss\n\ndef get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from <loaded_graph>\n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n inputs = loaded_graph.get_tensor_by_name(\"input:0\")\n initial_state = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n final_state = loaded_graph.get_tensor_by_name(\"final_state:0\")\n probs = loaded_graph.get_tensor_by_name(\"probs:0\")\n return inputs, initial_state, final_state, probs\n\ndef pick_word(probabilities, sim, int_to_vocab, top_n = 5, pred_mode = 'sim'):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :param use_max: use max probabilities number\n :param top_n: Top number\n :return: String of the predicted word\n \"\"\"\n vocab_size = len(int_to_vocab)\n\n if pred_mode == 'sim':\n p = np.squeeze(sim)\n p[np.argsort(p)[:-top_n]] = 0\n p = p / np.sum(p)\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return int_to_vocab[c]\n elif pred_mode == 'median':\n p = np.squeeze(sim)\n p[np.argsort(p)[:floating_median_sim_idx - top_n]] = 0\n p[np.argsort(p)[floating_median_sim_idx + top_n:]] = 0\n p = np.abs(p) / np.sum(np.abs(p))\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return int_to_vocab[c]\n elif pred_mode == 'topk':\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-top_n]] = 0\n p = p / np.sum(p)\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return int_to_vocab[c]\n elif pred_mode == 'max':\n return int_to_vocab[probabilities.argmax()]\n\ndef get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n inputs = tf.placeholder(tf.int32, [None, None], name=\"input\")\n targets = tf.placeholder(tf.int32, [None, None], name=\"targets\")\n LearningRate = tf.placeholder(tf.float32)\n return inputs, targets, LearningRate\n\ndef get_init_cell(batch_size, rnn_size):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)#num_units=embed_dim\n cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * 2)\n InitialState = cell.zero_state(batch_size, tf.float32)\n InitialState = tf.identity(InitialState, name=\"initial_state\")\n return cell, InitialState\n\ndef get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for <input_data>.\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Tuple (Embedded input, embed_matrix)\n \"\"\"\n embed_matrix = tf.Variable(tf.random_uniform([vocab_size, embed_dim], -1, 1))\n embed_layer = tf.nn.embedding_lookup(embed_matrix, input_data)\n return embed_layer, embed_matrix\n\ndef build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n Outputs, final_State = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n final_State = tf.identity(final_State, name=\"final_state\")\n return Outputs, final_State\n\ndef build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :param embed_dim: Number of embedding dimensions\n :return: Tuple (Logits, FinalState, embed_matrix)\n \"\"\"\n embed_layer, embed_matrix = get_embed(input_data, vocab_size, embed_dim)\n Outputs, final_State = build_rnn(cell, embed_layer)\n logits = tf.layers.dense(Outputs, vocab_size)\n return logits, final_State, embed_matrix\n\ndef get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n batchCnt = len(int_text) // (batch_size * seq_length)\n int_text_inputs = int_text[:batchCnt * (batch_size * seq_length)]\n int_text_targets = int_text[1:batchCnt * (batch_size * seq_length)+1]\n\n result_list = []\n x = np.array(int_text_inputs).reshape(1, batch_size, -1)\n y = np.array(int_text_targets).reshape(1, batch_size, -1)\n\n x_new = np.dsplit(x, batchCnt)\n y_new = np.dsplit(y, batchCnt)\n\n for ii in range(batchCnt):\n x_list = []\n x_list.append(x_new[ii][0])\n x_list.append(y_new[ii][0])\n result_list.append(x_list)\n\n return np.array(result_list)\n\n\n# get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)\n\ndef create_lookup_tables():\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n vocab_to_int = {str(ii).zfill(3) : ii for ii in range(1000)}\n int_to_vocab = {ii : str(ii).zfill(3) for ii in range(1000)}\n return vocab_to_int, int_to_vocab\n\ndef load_data(path):\n \"\"\"\n Load Dataset from File\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, \"r\") as f:\n data = f.read()\n\n return data\n\n\ndef preprocess_and_save_data(dataset_path, create_lookup_tables):\n \"\"\"\n Preprocess Text Data\n \"\"\"\n text = load_data(dataset_path)\n\n text = text.lower()\n # text = text.split()\n\n words = [word for word in text.split()]\n\n reverse_words = [text.split()[idx] for idx in (range(len(words) - 1, 0, -1))]\n vocab_to_int, int_to_vocab = create_lookup_tables() # text\n # int_text = [vocab_to_int[word] for word in text]\n int_text = [vocab_to_int[word] for word in reverse_words]\n pickle.dump((int_text, vocab_to_int, int_to_vocab), open('preprocess.p', 'wb'))\n\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of <batch_size> or less\n \"\"\"\n return pickle.load(open('preprocess.p', mode='rb'))\n\n\ndef save_params(params):\n \"\"\"\n Save parameters to file\n \"\"\"\n pickle.dump(params, open('params.p', 'wb'))\n\n\ndef load_params():\n \"\"\"\n Load parameters from file\n \"\"\"\n return pickle.load(open('params.p', mode='rb'))\n\nif __name__ == \"__main__\":\n main()","sub_path":"utils/predict/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":23297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"253669419","text":"from flask import Flask, request\nfrom main import publish_message\n\napp = Flask(__name__)\n\n#http://127.0.0.1:5000/send?email=mehta.pritesh@gmail.com&message=hello world!\n\n@app.route('/')\ndef hello():\n\treturn \"Sample web app to send email\"\n\n@app.route('/send', methods=['POST'])\ndef send_email():\n\t#req_data = request.get_json()\n\tmsg = request.args.get('message')\n\temail = request.args.get('email')\n\ttry:\n\t\tpublish_message(msg, email)\n\t\treturn {\"status\": 200}\n\texcept Exception as e:\n\t\treturn {\"status\": 500, \"error\": e}\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', debug=True, port=80)","sub_path":"producer_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343148503","text":"import collections\nimport functools\nimport types\n\n\ndef wrap_container(container_class, log):\n '''\n Returns a subclass of `container_class` that will call log for any setitem.\n\n It will also change the name of subconatiners if it is a defaultdict, so\n that they contain the name of the parent object for loggin.\n '''\n if container_class == collections.deque:\n return wrap_deque(log)\n\n def __setitem__(self, k, v):\n log('{}[{}]={}'.format(self.__name__, k, v))\n super(LogContainer, self).__setitem__(k, v)\n\n def __getitem__(self, k):\n value = super(LogContainer, self).__getitem__(k)\n if isinstance(self, collections.defaultdict):\n value.__name__ = '{}[{}]'.format(self.__name__, str(k))\n return value\n\n LogContainer = type(\n 'Log{}'.format(container_class.__name__),\n (container_class,),\n {'__setitem__': __setitem__, '__getitem__': __getitem__}\n )\n return LogContainer\n\n\ndef wrap_deque(log):\n def appendleft(self, v):\n log('{}.appendleft({})'.format(self.__name__, v))\n super(LogDequeue, self).appendleft(v)\n\n def pop(self, v):\n log('{}.pop({})'.format(self.__name__, v))\n super(LogDequeue, self).pop(v)\n\n LogDequeue = type(\n 'LogDequeue',\n (collections.deque,),\n {'appendleft': appendleft, 'pop': pop}\n )\n return LogDequeue\n\n\nclass DebugMetaClass(type):\n '''\n Logs the arguments and return values for every method call.\n\n Also log's when dictionary and list attributes are changed.\n '''\n\n level = 0\n\n def __init__(cls, name, bases, namespace, log_function):\n type.__init__(cls, name, bases, namespace)\n\n def __new__(cls, name, bases, namespace, log_function):\n cls.base_log_function = log_function\n\n for name, value in namespace.items():\n log_container = functools.partial(wrap_container, log=cls.log)\n if isinstance(value, types.FunctionType):\n new_value = cls.make_log(value)\n elif isinstance(value, collections.defaultdict):\n logged_factory = log_container(value.default_factory)\n new_value = log_container(collections.defaultdict)(logged_factory)\n elif isinstance(value, dict):\n new_value = log_container(dict)(**value)\n elif isinstance(value, list):\n new_value = log_container(list)(*value)\n else:\n continue\n new_value.__name__ = name\n namespace[name] = new_value\n return type.__new__(cls, name, bases, namespace)\n\n @classmethod\n def log(cls, message):\n cls.base_log_function(' ' * cls.level + message)\n\n @classmethod\n def make_log(cls, func):\n @functools.wraps(func)\n def log_wrapper(*args, **kwargs):\n cls.log_method_call(func, *args, **kwargs)\n cls.level += 1\n result = func(*args, **kwargs)\n cls.level -= 1\n cls.log_method_return(result)\n return result\n return log_wrapper\n\n @classmethod\n def log_method_call(cls, func, *args, **kwargs):\n arguments = ', '.join(\n list(map(str, args[1:])) + ['{}={}'.format(str(k), str(v)) for k, v in kwargs.items()]\n )\n cls.log('{}({})'.format(func.__name__, arguments))\n\n @classmethod\n def log_method_return(cls, result):\n cls.log('->{}'.format(str(result)))\n","sub_path":"debug_logging.py","file_name":"debug_logging.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536322861","text":"from flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\nfrom be.model import user\nfrom be.model import store\nfrom be.model import order\n\nbp_order = Blueprint(\"order\", __name__, url_prefix=\"/order\")\n\n@bp_order.route(\"/createOrder\", methods=[\"POST\"])\ndef createOrder():\n orderId: str = request.json.get(\"orderId\", \"\")\n sellerName: str = request.json.get(\"sellerName\", \"\")\n buyerName : str = request.json.get(\"buyerName\", \"\")\n orderStatus : str = request.json.get(\"orderStatus\",\"\")\n cartlist : list = request.json.get(\"cartlist\",[])\n addr : str = request.json.get(\"addr\",\"\")\n o = order.Order\n ok= o.createOrder(orderId = orderId, sellerName = sellerName, buyerName = buyerName, orderStatus = orderStatus, cartlist = cartlist, addr = addr)\n if ok:\n return jsonify({\"message\": \"ok\"}), 200\n else:\n return jsonify({\"message\": \"Inquiry failed, no order\"}), 501\n\n@bp_order.route(\"/getOrder\", methods=[\"POST\"])\ndef getOrder():\n username: str = request.json.get(\"username\", \"\")\n token: str = request.headers.get(\"token\", \"\")\n o = order.Order\n ok, orderlist = o.getOrder(username=username, token=token)\n if ok:\n return jsonify({\"message\": \"ok\", \"orderlist\": orderlist}), 200\n else:\n return jsonify({\"message\": \"Inquiry failed, no order\"}), 501\n\n@bp_order.route(\"/cancelOrder\", methods=[\"POST\"])\ndef cancelOrder():\n orderId: str = request.json.get(\"orderId\", \"\")\n o = order.Order\n ok = o.cancelOrder(orderId=orderId)\n if ok:\n return jsonify({\"message\": \"ok\"}), 200\n else:\n return jsonify({\"message\": \"cancel failed, token error\"}), 401","sub_path":"be/view/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98569312","text":"import numpy as np \nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\nfrom decision_tree import decision_tree_fit, decision_tree_classify\n\ndef bagging(X, y):\n\tn_samples = X.shape[0]\n\n\tidx = np.random.randint(0, n_samples, size=(n_samples,))\n\n\treturn X[idx], y[idx]\n\n\ndef random_forest_fit(X, y, n_trees=100):\n\tforest = []\n\tfor _ in range(n_trees):\n\t\tfeatures = np.random.choice(4, np.random.randint(2, 4), replace=False)\n\t\ttree = decision_tree_fit(*bagging(X[:,features], y))\n\t\ttree['features_used'] = features\n\t\tforest.append(tree)\n\t\n\treturn forest\n\ndef random_forest_predict(sample, forest):\n\tensemble = [decision_tree_classify(sample[tree['features_used']], tree) for tree in forest]\n\tu, c = np.unique(ensemble, return_counts=True)\n\treturn u[np.argmax(c)]\n\n\ndef main():\n\tiris = datasets.load_iris()\n\n\tX = iris.data\n\ty = iris.target\n\n\tn_samples = X.shape[0]\n\tn_trees = 100\n\n\tforest = random_forest_fit(X, y, n_trees)\n\tpredictions = [random_forest_predict(sample, forest) for sample in X]\n\t# print(f'Samples classified correctly: {sum(predictions==y)}')\n\n\tn_trees_range = [j for j in range(1, n_trees)]\n\taccuracy = []\n\tfor k in n_trees_range:\n\t\tpredictions = [random_forest_predict(sample, forest[:k]) for sample in X]\n\t\taccuracy.append(sum(predictions==y) / n_samples)\n\n\n\tplt.plot(range(1,n_trees), accuracy)\n\tplt.xlabel('k trees')\n\tplt.ylabel('accuracy')\n\tplt.xlim([1, n_trees])\n\tplt.title('Accuracy score for different number of trees')\n\tplt.show()\n\n\nif __name__=='__main__':\n\tmain()","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36255563","text":"from zipfile import ZipFile\nimport os\n\n\ndef unzip(zipfile, path, encoding='gbk'):\n\twith ZipFile(zipfile, 'r') as myzip:\n\t\tfor name in myzip.namelist():\n\t\t\ttry:\n\t\t\t\tfilename = name.encode('cp437').decode(encoding)\n\t\t\texcept UnicodeEncodeError:\n\t\t\t\tfilename = name\n\t\t\tpathname = os.path.join(path, os.path.dirname(filename))\n\t\t\tif not os.path.exists(pathname) and pathname != \"\":\n\t\t\t\tos.makedirs(pathname)\n\t\t\tdata = myzip.read(name)\n\t\t\tfile = os.path.join(path, filename)\n\t\t\tif not os.path.exists(file):\n\t\t\t\twith open(file, 'wb') as f:\n\t\t\t\t\tf.write(data)\n\n\ndef zip_dir(dirname, zippath):\n\twith ZipFile(zippath, 'w') as myzip:\n\t\tif os.path.isfile(dirname):\n\t\t\tfiles = dirname\n\t\t\tmyzip.write(files)\n\t\t\treturn\n\t\telse:\n\t\t\tfiles = [os.path.join(root, name) for root, dirs, files in os.walk(dirname) for name in files]\n\t\t\tfor file in files:\n\t\t\t\tmyzip.write(file)\n","sub_path":"compress/zipit.py","file_name":"zipit.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23563160","text":"'''\r\n一只青蛙掉在井里了,井高20米,青蛙白天网上爬3米,晚上下滑2米,问第几天能出来?请编程求出\r\n'''\r\n\r\nheigh=20\r\nheigh1=0\r\nday = 0\r\n\r\nwhile True:\r\n day = day+1\r\n heigh2 = heigh1 + 3\r\n heigh1 = heigh1 + 3-2\r\n\r\n\r\n if heigh2 >=heigh:\r\n print(\"青蛙第\",day,\"天能出来\")\r\n break","sub_path":"day03/class/任务8.py","file_name":"任务8.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256793979","text":"import random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nfrom model.utils.config import cfg\nfrom model.rpn.rpn import _RPN\nfrom model.roi_pooling.modules.roi_pool import _RoIPooling\nfrom model.roi_crop.modules.roi_crop import _RoICrop\nfrom model.roi_align.modules.roi_align import RoIAlignAvg\nfrom model.faster_rcnn.cgcn_models import CGCN\nfrom model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer\nimport time\nimport pickle\nimport pdb\nfrom model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes, class_agnostic):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n self.class_agnostic = class_agnostic\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n # define rpn\n self.RCNN_rpn = _RPN(self.dout_base_model)\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\n self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n\n self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE\n self.RCNN_roi_crop = _RoICrop()\n\n \n if cfg.GCN.RE_CLASS:\n self.Class_GCN = CGCN(cfg.GCN.N_FEAT, cfg.GCN.N_HID, cfg.GCN.DROPOUT, self.n_classes, t = 0.05, adj_file = cfg.GCN.ADJ_FILE)\n\n # def forward(self, im_data, im_info, gt_boxes, num_boxes, gt_classes):\n # def forward(self, im_data, im_info, gt_boxes, num_boxes, iteration, mlgcn_threshold):\n # def forward(self, im_data, im_info, gt_boxes, num_boxes, mlgcn_threshold):\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\n batch_size = im_data.size(0)\n\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n\n # feed image data to base model to obtain base feature map\n base_feat = self.RCNN_base(im_data)\n\n # feed base feature map to RPN to obtain rois\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n\n # if it is training phase, then use ground truth bboxes for refining\n if self.training:\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\n\n rois_label = Variable(rois_label.view(-1).long())\n rois_target = Variable(rois_target.view(-1, rois_target.size(2)))\n rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))\n rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))\n else:\n rois_label = None\n rois_target = None\n rois_inside_ws = None\n rois_outside_ws = None\n rpn_loss_cls = 0\n rpn_loss_bbox = 0\n\n rois = Variable(rois)\n # do roi pooling based on predicted rois\n\n if cfg.POOLING_MODE == 'crop':\n # pdb.set_trace()\n # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))\n grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)\n grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()\n pooled_feat = self.RCNN_roi_crop(base_feat, Variable(grid_yx).detach())\n if cfg.CROP_RESIZE_WITH_MAX_POOL:\n pooled_feat = F.max_pool2d(pooled_feat, 2, 2)\n elif cfg.POOLING_MODE == 'align':\n pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))\n elif cfg.POOLING_MODE == 'pool':\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\n\n # feed pooled features to top model\n pooled_feat = self._head_to_tail(pooled_feat)\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\n if self.training and not self.class_agnostic:\n # select the corresponding columns according to roi labels\n bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)\n bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))\n bbox_pred = bbox_pred_select.squeeze(1)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(pooled_feat)\n\n # cls_re_score, regular_term = self.Class_GCN(cls_score[:,1:])\n # new_cls_score = torch.cat((cls_score[:,0].view(-1,1),cls_re_score),dim = -1)\n # cls_prob = F.softmax(new_cls_score, 1) \n\n cls_prob = F.softmax(cls_score, 1) \n \n # # cls_prob_old = F.softmax(cls_score, dim=1)\n # if cfg.GCN.RE_CLASS:\n # cls_reclass_score, regular_term = self.Class_GCN(cls_prob_old)\n \n # max min\n # cls_max = torch.max(cls_score, dim = 1)[0].view(-1, 1)\n # cls_min = torch.min(cls_score, dim = 1)[0].view(-1, 1)\n # cls_prob = (cls_score - cls_min)/(cls_max - cls_min)\n \n # # div sum \n # cls_reclass_sum = torch.sum(cls_reclass_score, dim = 1 ).view(-1, 1)\n # cls_reclass_prob = torch.div(cls_reclass_score, cls_reclass_sum)\n \n # ## ml-gcn\n # cls_feature = pickle.load(open('data/VOCdevkit2007/VOC2007/feature.pkl', 'rb'))\n # cls_feature = torch.from_numpy(cls_feature).cuda()\n # cls_gcn_scores = torch.mm(pooled_feat, cls_feature)\n # # cls_gcn_prob = F.softmax(cls_gcn_scores, 1)\n # cls_gcn_max = torch.max(cls_gcn_scores, dim = 1)[0].view(-1, 1)\n # cls_gcn_min = torch.min(cls_gcn_scores, dim = 1)[0].view(-1, 1)\n # cls_gcn_prob = (cls_gcn_scores - cls_gcn_min)/(cls_gcn_max - cls_gcn_min)\n \n \n # # ml-gcn\n # _, max_index = torch.max(cls_prob, dim = 1)\n # col_mask = torch.ne(max_index, 0)\n # mask = col_mask.view(-1, 1).repeat(1, 21)\n # cls_prob = torch.where(mask, cls_prob, cls_gcn_prob)\n\n # ## reclass select bg\n # _, max_index = torch.max(cls_reclass_score, dim = 1)\n # col_mask = torch.ne(max_index, 0)\n # mask = col_mask.view(-1, 1).repeat(1, 21)\n # cls_score = torch.where(mask, cls_reclass_score, cls_prob_old)\n # cls_prob = torch.where(mask, cls_reclass_prob, cls_prob_old)\n\n\n # # gt assign\n # gt_assign = torch.zeros(self.n_classes).cuda().float()\n # gt_assign[gt_classes] = 1\n # gt_reclass_prob = torch.mul(cls_prob_old, gt_assign)\n # # gt_reclass_socre = torch.sum(gt_reclass_socre, dim = 1 ).view(-1, 1)\n # # gt_reclass_prob = torch.div(gt_reclass_socre, gt_reclass_sum)\n \n # # ml-gcn assign\n # mlgcn_assign = pickle.load(open(\"/home/junjie/Framework/ML_GCN/data/{}.pkl\".format(iteration), 'rb'))\n # mlgcn_assign = F.softmax(torch.from_numpy(mlgcn_assign), dim = 1)\n # mlgcn_assign = torch.cat((torch.tensor(0.0).view(1, -1), mlgcn_assign), dim = -1).float().cuda()\n # mlgcn_assign[ mlgcn_assign >= mlgcn_threshold] = 1\n # mlgcn_assign[ mlgcn_assign < mlgcn_threshold] = 0\n # gt_reclass_prob = torch.mul(cls_reclass_prob, mlgcn_assign)\n # # gt_reclass_socre = torch.sum(gt_reclass_socre, dim = 1 ).view(-1, 1)\n # # gt_reclass_prob = torch.div(gt_reclass_socre, gt_reclass_sum)\n # max_index = torch.argmax(cls_prob_old, dim = 1)\n # col_mask = torch.ne(max_index, 0)\n # mask = col_mask.view(-1, 1).repeat(1, 21)\n # cls_prob = torch.where(mask, gt_reclass_prob, cls_prob_old)\n\n # ml-gcn-before head\n # cls_feature = pickle.load(open('data/VOCdevkit2007/VOC2007/feature.pkl', 'rb'))\n # cls_feature = torch.from_numpy(cls_feature).cuda()\n # cls_pool = nn.AdaptiveMaxPool2d((2, 1))\n # base_feat = cls_pool(base_feat).view(base_feat.size(0), -1)\n # cls_gcn_scores = torch.mm(base_feat, cls_feature)\n # cls_gcn_prob = F.softmax(cls_gcn_scores, 1)\n\n # # mlgcn_threshold = 0.05\n # cls_gcn_prob[ cls_gcn_prob >= mlgcn_threshold] = 1\n # cls_gcn_prob[ cls_gcn_prob < mlgcn_threshold] = 0\n # cls_gcn_prob = torch.cat((torch.ones(300, 1).float().cuda(),cls_gcn_prob),dim = -1)\n # gcn_reclass_prob = torch.mul(cls_gcn_prob, cls_prob_old)\n # max_index = torch.argmax(cls_prob_old, dim = 1)\n # col_mask = torch.ne(max_index, 0)\n # mask = col_mask.view(-1, 1).repeat(1, 21)\n # cls_prob = torch.where(mask, gcn_reclass_prob, cls_prob_old)\n\n\n # cls_gcn_max = torch.max(cls_gcn_scores, dim = 1)[0].view(-1, 1)\n # cls_gcn_min = torch.min(cls_gcn_scores, dim = 1)[0].view(-1, 1)\n # cls_gcn_prob = (cls_gcn_scores - cls_gcn_min)/(cls_gcn_max - cls_gcn_min)\n # cls_gcn_prob = torch.cat((torch.zeros(300, 1).float().cuda(), cls_gcn_prob), dim = 1)\n \n \n RCNN_loss_cls = 0\n RCNN_loss_bbox = 0\n RCNN_loss_regular = 0\n\n if self.training:\n # classification loss\n if cfg.GCN.RE_CLASS:\n RCNN_loss_regular = regular_term\n \n RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)\n \n # bounding box regression L1 loss\n RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)\n\n rpn_loss_cls = torch.unsqueeze(rpn_loss_cls, 0)\n rpn_loss_bbox = torch.unsqueeze(rpn_loss_bbox, 0)\n RCNN_loss_cls = torch.unsqueeze(RCNN_loss_cls, 0)\n RCNN_loss_bbox = torch.unsqueeze(RCNN_loss_bbox, 0)\n\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\n\n return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, RCNN_loss_regular, rois_label\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\n normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n","sub_path":"lib/model/faster_rcnn/faster_rcnn_reclass.py","file_name":"faster_rcnn_reclass.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404146554","text":"#Uses python3\nimport sys\n\n# Return the trie built from patterns\n# in the form of a dictionary of dictionaries,\n# e.g. {0:{'A':1,'T':2},1:{'C':3}}\n# where the key of the external dictionary is\n# the node ID (integer), and the internal dictionary\n# contains all the trie edges outgoing from the corresponding\n# node, and the keys are the letters on those edges, and the\n# values are the node IDs to which these edges lead.\ndef build_trie(patterns):\n tree = dict()\n # write your code here\n #create root node of trie\n tree[0]={}\n index=0\n\n for pattern in patterns:\n #traverse the trie from root node\n current=tree[0]\n for letter in pattern:\n #find whether the letter already exist or not\n #if True, keep going traversing the trie\n if letter in current.keys():\n current=tree[current[letter]]\n\n #else, create a new node and add it to the trie\n #also chang the reference to the new node from previous\n else:\n index += 1\n current[letter]=index\n tree[index]={}\n current=tree[index]\n\n return tree\n\n\nif __name__ == '__main__':\n patterns = sys.stdin.read().split()[1:]\n tree = build_trie(patterns)\n for node in tree:\n for c in tree[node]:\n print(\"{}->{}:{}\".format(node, tree[node][c], c))\n","sub_path":"Course4 Algorithms on Strings/Week1/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39845093","text":"class Perceptron:\n def __init__(self, w1, w2, theta):\n self.w1 = w1\n self.w2 = w2\n self.theta = theta\n\n def forward(self, x1, x2):\n y = self.w1 * x1 + self.w2 * x2\n if y >= self.theta:\n return 1\n else:\n return 0\n\nx1_list = [1, 1, 0, 0]\nx2_list = [1, 0, 1, 0]\nandg = Perceptron(0.5, 0.5, 0.7)\nnandg = Perceptron(-0.5, -0.5, -0.7)\norg = Perceptron(0.5, 0.5, 0.5)\nfor i,j in zip(x1_list, x2_list):\n print(\"AND({0}, {1}) = {2}\\t\".format(i, j, andg.forward(i,j)), end=\"\")\n print(\"NAND({0}, {1}) = {2}\\t\".format(i,j,nandg.forward(i,j)), end=\"\")\n print(\"OR({0}, {1}) = {2}\\t\".format(i,j,org.forward(i,j)))\n","sub_path":"p2_ニューラルネットワーク実装/myanswer/a1-2.py","file_name":"a1-2.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572674746","text":"from rest_framework.response import Response\n\n\ndef success_response(message, data, **kwargs):\n \"\"\"\n return a formated response if request successful\n \"\"\"\n return Response({\n \"status\": \"success\",\n \"message\": message,\n \"data\": data,\n }, kwargs.get('status_code'))\n\n\ndef error_response(message, **kwargs):\n return Response({\n \"status\": \"error\",\n \"message\": message,\n }, kwargs.get('status_code'))\n","sub_path":"shoppingList/helpers/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494377817","text":"#这种方式不需要关闭文件流,并且设置定了编码\nwith open(\"info.txt\",encoding='utf-8') as file_object:\n contents=file_object.read()\n print(contents)\n#方式二\nfstr=open(\"info.txt\",encoding='utf-8')\nprint(fstr.read().rstrip())\nfstr.close()\nprint(fstr.closed)\n#逐行打印文件\ntry:\n with open(\"info1.txt\",encoding='utf-8') as file_object:\n for line in file_object:\n print(line.rstrip())\nexcept Exception as ex:\n print(ex)\nelse:\n print(\"执行成功!\")\n#利用json写入\nimport json\nnumbers=[1,2,3,4,5]\nwith open(\"info.txt\",encoding='utf-8',mode='a') as file_object:\n json.dump(numbers,file_object)\n","sub_path":"文件.py","file_name":"文件.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196507924","text":"import os\nimport sys\nimport subprocess\nfrom optparse import make_option,OptionParser\nfrom vhd_tb.management.base import BaseCommand\nfrom vhd_tb.bsim import *\n\nclass Command(BaseCommand):\n help = \"Show Wave Form in gtkwave\"\n args = \"Project\"\n option_list = (\n make_option(\"-w\",\"--work-dir\",action='store', dest='work_dir',default=\"\"),\n )\n\n def __init__(self):\n pass\n\n def run_from_argv(self,argv):\n parser = self.create_parser(argv[0],argv[1])\n options, args = parser.parse_args(argv[2:])\n if args != []:\n return self.execute(args,options)\n else:\n self.print_help(argv[0],argv[1])\n sys.exit(1)\n\n def execute(self,args, options):\n try:\n wdir = options.work_dir\n except:\n wdir = \"\"\n try:\n arg = options.tb_name\n except:\n arg = args[0]\n gtkwave(arg,wdir)\n","sub_path":"vhd_tb/management/commands/gtkwave.py","file_name":"gtkwave.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423280369","text":"import unittest\nfrom unittest.mock import Mock, MagicMock, call\n\nfrom tornado.testing import AsyncTestCase, gen_test\nfrom tornado.concurrent import Future\n\nfrom httpsrvvcr import recorder\n\n\ndef future_mock(value):\n fut = Future()\n fut.set_result(value)\n return Mock(return_value=fut)\n\n\ndef response_mock():\n response = Mock()\n response.code = 200\n response.body = b'Response body'\n response.headers = {'Content-Type': 'text/plain', 'Content-Length': 100}\n return response\n\n\ndef client_mock(response):\n client = Mock()\n client.fetch = future_mock(response)\n return client\n\n\ndef create_handler(request, client, target, writer):\n handler = recorder.ProxyHandler(\n MagicMock(), request, httpclient=client, target=target, writer=writer)\n handler.set_status = Mock()\n handler.finish = Mock()\n handler.write = Mock()\n handler.set_header = Mock()\n return handler\n\n\ndef request_mock():\n request = Mock()\n request.method = 'GET'\n request.headers = {'Accept': 'appliaction/json'}\n request.body = None\n request.uri = '/'\n return request\n\n\nclass ProxyHandlerTest(AsyncTestCase):\n def setUp(self):\n super().setUp()\n self.response = response_mock()\n self.client = client_mock(self.response)\n self.target = 'http://nowhere.com'\n self.request = request_mock()\n self.writer = Mock()\n self.handler = create_handler(self.request, self.client, self.target, self.writer)\n\n @gen_test\n def test_should_fetch_target_url(self):\n yield self.handler.prepare()\n self.client.fetch.assert_called_with(\n self.target + self.request.uri,\n method=self.request.method,\n headers=self.request.headers,\n allow_nonstandard_methods=True,\n body=self.request.body)\n\n @gen_test\n def test_should_respond_with_target_code(self):\n yield self.handler.prepare()\n self.handler.set_status.assert_called_with(self.response.code)\n\n @gen_test\n def test_should_respond_with_target_body(self):\n yield self.handler.prepare()\n self.handler.write.assert_called_with(self.response.body)\n\n @gen_test\n def test_should_respond_without_body(self):\n self.response.body = b''\n yield self.handler.prepare()\n self.assertFalse(self.handler.write.called);\n\n @gen_test\n def test_should_respond_with_target_headers(self):\n yield self.handler.prepare()\n self.handler.set_header.assert_has_calls([\n call('Content-Length', 100),\n call('Content-Type', 'text/plain')], any_order=True)\n\n @gen_test\n def test_should_finish_request(self):\n yield self.handler.prepare()\n self.handler.finish.assert_called_with()\n\n @gen_test\n def test_should_record_response(self):\n yield self.handler.prepare()\n self.writer.write.assert_called_with(self.request, self.response)\n\n @gen_test\n def test_should_set_cors_header(self):\n yield self.handler.prepare()\n self.handler.set_header.assert_called_with(\n 'Access-Control-Allow-Origin', '*')\n\n @gen_test\n def test_should_rewrite_host_header_in_request(self):\n self.request.headers = { 'Host': '127.0.0.1:3000' }\n yield self.handler.prepare()\n self.client.fetch.assert_called_with(\n self.target + self.request.uri,\n method=self.request.method,\n headers={'Host': 'nowhere.com'},\n allow_nonstandard_methods=True,\n body=self.request.body)\n\n @gen_test\n def test_should_exclude_headers_in_response(self):\n self.response.headers['Transfer-Encoding'] = 'chunked'\n yield self.handler.prepare()\n self.assertNotIn((('Transfer-Encoding', 'chunked'),), self.handler.set_header.call_args_list)\n\n\nclass YamlWriterTest(unittest.TestCase):\n def setUp(self):\n self.wrapped_writer = Mock()\n self.dumped = 'dumped yaml here'\n self.yaml = Mock()\n self.yaml.dump = Mock(return_value=self.dumped)\n self.ywriter = recorder.YamlWriter(self.wrapped_writer, self.yaml)\n self.data = {'something': 'here'}\n\n def test_should_dump_yaml(self):\n self.ywriter.write(self.data)\n self.yaml.dump.assert_called_with(\n self.data, default_flow_style=False, allow_unicode=True)\n\n def test_should_write_to_wrapped_writer(self):\n self.ywriter.write(self.data)\n self.wrapped_writer.write.assert_called_with(self.dumped)\n\n\nclass VcrWriterTest(unittest.TestCase):\n def setUp(self):\n self.request = request_mock()\n self.response = response_mock()\n self.wrapped_writer = Mock()\n self.parsed_json = {'json': True}\n self.json = Mock()\n self.json.loads = Mock(return_value=self.parsed_json)\n self.writer = recorder.VcrWriter(self.wrapped_writer, self.json)\n\n def test_should_write_request_and_response(self):\n self.writer.write(self.request, self.response)\n self.wrapped_writer.write.assert_called_with([{\n 'request': {\n 'path': self.request.uri,\n 'method': self.request.method,\n 'headers': self.request.headers,\n 'text': self.request.body,\n 'json': None,\n },\n 'response': {\n 'code': self.response.code,\n 'headers': self.response.headers,\n 'text': self.response.body.decode('utf8'),\n 'json': None,\n },\n }])\n\n def test_should_parse_request_json(self):\n self.request.headers['Content-Type'] = 'application/json'\n self.request.body = b'request json'\n self.writer.write(self.request, self.response)\n self.json.loads.assert_called_with(self.request.body.decode('utf8'))\n\n def test_should_parse_response_json(self):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.body = b'response json'\n self.writer.write(self.request, self.response)\n self.json.loads.assert_called_with(self.response.body.decode('utf8'))\n\n def test_should_write_response_json(self):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.body = b'response json'\n self.writer.write(self.request, self.response)\n self.wrapped_writer.write.assert_called_with([{\n 'request': {\n 'path': self.request.uri,\n 'method': self.request.method,\n 'headers': self.request.headers,\n 'text': self.request.body,\n 'json': None,\n },\n 'response': {\n 'code': self.response.code,\n 'headers': self.response.headers,\n 'text': None,\n 'json': self.parsed_json,\n },\n }])\n\n def test_should_write_request_json(self):\n self.request.headers['Content-Type'] = 'application/json'\n self.request.body = b'response json'\n self.writer.write(self.request, self.response)\n self.wrapped_writer.write.assert_called_with([{\n 'request': {\n 'path': self.request.uri,\n 'method': self.request.method,\n 'headers': self.request.headers,\n 'json': self.parsed_json,\n 'text': None,\n },\n 'response': {\n 'code': self.response.code,\n 'headers': self.response.headers,\n 'text': self.response.body.decode('utf8'),\n 'json': None,\n },\n }])\n\n def test_should_write_no_headers(self):\n writer = recorder.VcrWriter(self.wrapped_writer, self.json, True)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.body = b'response json'\n writer.write(self.request, self.response)\n self.wrapped_writer.write.assert_called_with([{\n 'request': {\n 'path': self.request.uri,\n 'method': self.request.method,\n 'headers': None,\n 'text': self.request.body,\n 'json': None,\n },\n 'response': {\n 'code': self.response.code,\n 'headers': None,\n 'text': None,\n 'json': self.parsed_json,\n },\n }])\n\n def test_shoud_skip_target_methods(self):\n writer = recorder.VcrWriter(self.wrapped_writer, self.json, skip_methods=['POST'])\n self.request.method = 'POST'\n writer.write(self.request, self.response)\n self.assertFalse(self.wrapped_writer.write.called)\n\n\n\n\n\n","sub_path":"tests/recorder_test.py","file_name":"recorder_test.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333319167","text":"import cgi\nimport pickle\n\nimport webapp2\nfrom all_std_libs import *\nimport numpy\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n buf = 'this is a server which hosts calculation resource for python.'\n buf += '<br>'\n buf += '<a href=\"https://github.com/y-abe/calcserver\">'\n buf += 'https://github.com/y-abe/calcserver'\n buf += '</a>'\n self.response.write(buf)\n\n def post(self):\n func = cgi.escape(self.request.get('func'))\n func = pickle.loads(func)\n args0 = cgi.escape(self.request.get('args0'))\n args0 = pickle.loads(args0)\n args1 = cgi.escape(self.request.get('args1'))\n args1 = pickle.loads(args1)\n \n result = func(*args0, **args1)\n result = pickle.dumps(result)\n \n self.response.out.write(result)\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71642345","text":"\"\"\"\npandas 导入导出\n\"\"\"\n\nimport pandas as pd\n\n\"\"\"\n可以读取哪些类型的文件:\n read_csv\n read_excel\n read_hdf\n read_sql\n read_json\n read_msgpack\n read_html\n read_gbq\n read_stata\n read_sas\n read_clipboard\n read_pickle\n\"\"\"\n\ndata = pd.read_excel('data.xls')\n\nprint(data)\n\n# DataFrame 导出\ndata.to_csv('out.csv')\n\n","sub_path":"pandas/ariescc5.py","file_name":"ariescc5.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336455832","text":"#!/usr/bin/python\n\n__author__ = 'Rex'\n\nimport os\n\n\ndef start():\n SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n os.chdir(SCRIPT_DIR)\n while True:\n if os.system('python manage.py processblock') == 2:\n break\n\n\nif __name__ == \"__main__\":\n start()\n","sub_path":"DjangoWebServer/process_block.py","file_name":"process_block.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486249535","text":"fsug = open('outputs/wx_suggestions2.txt','r')\nout = open('outputs/final/suggestions2.txt','w')\nlines = fsug.readlines()\n\nfor line in lines:\n\ttemp = line.split()\n\tif temp[1] == ':':\n\t\tout.write(temp[0] + ' Belongs to synset: ' + temp[2] + '\\n')\n\telif temp[1] == ';':\n\t\tout.write(temp[0] + ' Should belong to synset: ' + temp[2] + '\\n')\nout.close()\n","sub_path":"Project/src/suggest_word_to_synset.py","file_name":"suggest_word_to_synset.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568746268","text":"#! /usr/bin/env python\n# -*- coding: latin-1 -*-\n\nfrom constants import *\nimport types\nimport actions\nimport conditions\nimport effects\n\ndefault_agents = [types.TypedObject(\"?agt%d\" %i, \"agent\") for i in (0,1)]\n\nclass Predicate(object):\n def __init__(self, name, arguments, value, believers=None, initially=False):\n self.name = name\n self.believers = believers\n self.arguments = arguments\n self.initially = initially\n if value is None:\n value = [types.TypedObject(\"?_val\", \"boolean\")]\n self.value = value\n # various string representations of parts of the predicate\n self.args_decl_str = \" \".join(\"%s - %s\" % (d.name, d.type) for d in self.arguments)\n self.args_str = \" \".join(d.name for d in self.arguments)\n self.value_type_str = \" \".join(types.pddl_type_str(d.type) for d in self.value)\n #print \"vts:\", self.value_type_str\n\n @staticmethod\n def parse(alist):\n name = alist[0]\n if name == K_PREFIX or name == DIRECT_K_PREFIX:\n return ModalPredicate.parse(alist)\n if \":\" not in alist:\n alist.extend(\": ?_val - boolean\".split())\n separator_position = alist.index(\":\")\n arguments = types.parse_typed_list(alist[1:separator_position], only_variables=True)\n value = types.parse_typed_list(alist[separator_position+1:], only_variables=True)\n result = Predicate(name, arguments, value)\n return result\n\n @staticmethod\n def svar_in_domain_predicate_name(svar_name):\n return \"%s%s%s\" % (IN_DOMAIN_KW, PREFIX_SEP, svar_name)\n\n def svar_in_domain_predicate(self):\n name = Predicate.svar_in_domain_predicate_name(self.name)\n return Predicate(name, self.arguments, value=self.value, believers=self.believers)\n\n @staticmethod\n def svar_domain_predicate_name(svar_name):\n return \"%s%s%s\" % (DOMAIN_KW, PREFIX_SEP, svar_name)\n\n def svar_domain_predicate(self):\n name = Predicate.svar_domain_predicate_name(self.name)\n return Predicate(name, self.arguments, value=self.value, believers=self.believers)\n\n def arity(self):\n return len(self.arguments)\n\n def is_boolean(self):\n if self.value[0].type == \"boolean\":\n return True\n if bool(self.believers):\n return True\n# if self.name.startswith(IN_DOMAIN_KW):\n# return True\n \n def is_multi_valued(self):\n return not self.is_boolean()\n \n def initially_pred(self):\n return Predicate(self.name, self.arguments, value=self.value, believers=self.believers,initially=True) \n\n def knowledge_pred(self, believers):\n if self.believers:\n # no beliefs about beliefs - for the time being\n return None\n return Predicate(self.name, self.arguments, value=None, believers=believers)\n \n def __str__(self):\n if self.is_boolean():\n s = \"%s(%s)\" % (self.name, \", \".join(map(str, self.arguments)))\n else:\n s = \"%s(%s : %s)\" % (self.name, \", \".join(map(str, self.arguments)), \", \".join(map(str, self.value)))\n if self.believers:\n if len(self.believers) == 1:\n raise Exception(\"something's wrong with\", self.predicate, self.believers)\n else:\n if self.believers[0].name == self.believers[1].name:\n s = \"k(%s %s)\" % (\" \".join(map(str, self.believers[0])), s)\n else:\n s = \"mb(%s %s)\" % (\" \".join(map(str, self.believers)), s) \n return s\n\n def pddl_str_name(self, k_axiom=False):\n name = self.name\n if self.believers:\n if k_axiom:\n name = \"kval__%s\" % name\n else:\n name = \"kvald__%s\" % name\n if self.initially:\n name = INITIALLY_PREFIX + PREFIX_SEP + name\n return name\n\n def pddl_str(self, k_axiom=False):\n name = self.pddl_str_name(k_axiom)\n args = self.arguments\n if self.believers:\n args = self.believers + args\n# return \"%s %s\" % (name, \" \".join(map(types.pddl_str, args)))\n return \"%s %s %s\" % (name, \" \".join(map(types.pddl_str, args)), \" \".join(map(types.pddl_str, self.value)))\n \n\n\nclass ModalPredicate(Predicate):\n def __init__(self, modal_op, arguments, predicate):\n Predicate.__init__(self, modal_op, arguments, None)\n self.predicate = predicate\n\n @staticmethod\n def parse(alist):\n predicate = Predicate.parse(alist[-1])\n name = alist[0]\n args = alist[1:-1]\n arguments = types.parse_typed_list(args, only_variables=True)\n return ModalPredicate(name, arguments, predicate)\n\n def is_boolean(self):\n # this is not necessarily true forever, but right now, we\n # do only allow positive occurences\n return True\n\n def pddl_str(self):\n name = self.name\n args = self.arguments\n name = \"%s__%s\" % (name, self.predicate.name)\n args = args + self.predicate.arguments + self.predicate.value\n return \"%s %s\" % (name, \" \".join(map(types.pddl_str, args)))\n \n \n\n","sub_path":"subarchitectures/planner.sa/branches/stable-1/src/python/standalone/mapl/predicates.py","file_name":"predicates.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491472581","text":"from lxml import etree\nimport requests\n\nfrom dataverse import Dataverse\nfrom exceptions import UnauthorizedError, ConnectionError\nfrom utils import get_elements\n\n\nclass Connection(object):\n\n def __init__(self, host, token):\n # Connection Properties\n self.token = token\n self.host = host\n self.sd_uri = \"https://{host}/dvn/api/data-deposit/v1.1/swordv2/service-document\".format(host=self.host)\n self.service_document = None\n self.connected = False\n \n self.connect()\n\n @property\n def auth(self):\n return self.token, None\n\n def connect(self):\n resp = requests.get(self.sd_uri, auth=self.auth)\n\n if resp.status_code == 403:\n raise UnauthorizedError('The credentials provided are invalid.')\n elif resp.status_code != 200:\n raise ConnectionError('Could not connect to the Dataverse')\n\n self.service_document = etree.XML(resp.content)\n self.connected = True\n \n def get_dataverses(self, refresh=False):\n if refresh:\n self.connect()\n\n collections = get_elements(\n self.service_document[0],\n tag=\"collection\",\n )\n \n return [Dataverse(self, col) for col in collections]\n\n def get_dataverse(self, alias, refresh=False):\n return next((dataverse for dataverse in self.get_dataverses(refresh)\n if dataverse.alias == alias), None)\n","sub_path":"dataverse/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"547384663","text":"#!/usr/bin/env python3\n\"\"\"\nCreated on Sat Aug 12 20:26:40 2017\n\n@author: caldas\n\"\"\"\n# from selenium import webdriver\n# driver = webdriver.Firefox(executable_path = '/usr/local/bin/geckodriver')\n#\n# url = \"https://www.youraddress.com\"\n#\n# driver.execute_script(\"window.open(url,\"_self\");\") <--- JAVASCRIPT!\n# (\"window.open('');\")\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport datetime as dt\nimport pandas as pd\n\n# Creating dataframe to save data.\ncolumns = [\"date\", \"coin\", \"value\", \"actual_value\", \"running_balance\", \"status\", \"wallet_hash\", \"paid\"]\ncol_names_payout = ['total', 'paid', 'balance', 'last payment', 'last payout']\n\ndf = pd.DataFrame(columns=columns)\n\nurl = \"https://www.genesis-mining.com/transactions/index/page:\"\ndriver = webdriver.Firefox(executable_path='/usr/local/bin/geckodriver')\ndriver.get(\"https://www.genesis-mining.com/en\")\n\n\ndef append_data(soup, df):\n transactions = soup.find(\"div\", {\"id\": \"my-transactions\"})\n table = transactions.find('table', attrs={'class': 'dash'})\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n\n actual_value = 'N/A'\n running_balance = 'N/A'\n try:\n balance_status = row.find('td', attrs={'data-label': 'Status'}).find('span', attrs={'class': 'icon-box'})[\n 'data-title']\n start_find_str = cols[1] + ' mining, '\n start_pos = balance_status.find(start_find_str) + len(start_find_str)\n end_pos = balance_status.find(' added to balance.')\n actual_value = balance_status[start_pos: end_pos]\n\n start_find_str = 'Total Balance: '\n start_pos = balance_status.find(start_find_str) + len(start_find_str)\n end_pos = balance_status.find(' ' + cols[1] + '. ')\n running_balance = balance_status[start_pos: end_pos]\n except:\n print(\"Status is not defined\")\n\n df = df.append({\n 'date': cols[0],\n 'coin': cols[1],\n 'value': cols[2][:10],\n 'actual_value': actual_value,\n 'running_balance': running_balance,\n 'status': cols[3],\n 'wallet_hash': cols[4],\n 'paid': cols[3]\n }, ignore_index=True)\n return df\n\n\ndef wait_sometime(time):\n WebDriverWait(driver, time)\n\n\ntry:\n element = WebDriverWait(driver, 100).until(\n EC.presence_of_element_located((By.ID, \"current-mining\"))\n )\n\nfinally:\n # Get number of pages to loop through\n print(\"Parsing page %s.\" % 1)\n driver.get(url + str(1))\n source = driver.page_source.encode('utf-8')\n soup = BeautifulSoup(source, 'html.parser')\n\n # pages = str(soup.findAll(\"p\", { \"class\" : \"pager-info\" }))\n pages = str(soup.findAll(\"p\", {\"class\": \"pagination-info\"}))\n start = pages.find(\"Page 1 of \")\n end = pages.find(\", showing\")\n npages = int(pages[start + 10: end])\n df = append_data(soup, df)\n\n # wait some times\n wait_sometime(100)\n\n for current in range(2, (npages + 1)):\n print(\"Parsing page %s out of %s.\" % (current, npages))\n driver.get(url + str(current))\n\n source = driver.page_source.encode('utf-8')\n soup = BeautifulSoup(source, 'html.parser')\n df = append_data(soup, df)\n wait_sometime(100)\n\ndriver.quit()\n\n### Working on Dataframe\n\ndf['date'] = pd.to_datetime(df['date'], format='%d.%m.%Y')\n# df['date'] = df['date'].apply(lambda x: dt.datetime.strftime(x, '%m-%d-%Y'))\n\ndf.paid = df.paid.apply(lambda x: True if x == \"Sent to wallet\" else False)\n\ndf = df.sort_values(by=['date'])\n\n# Create a new dataframe\nmissing_dates = pd.DataFrame(columns=columns)\n\nrow_iterator = df.iterrows()\n_, last = next(row_iterator) # take first item from row_iterator\nfor i, row in row_iterator:\n days_diff = (row['date'].date() - last['date'].date()).days\n if days_diff == 1:\n last = row\n continue\n\n missing_start_date = last['date']\n for _ in range(1, days_diff):\n missing_start_date = (missing_start_date + dt.timedelta(days=1))\n print(missing_start_date)\n missing_dates = missing_dates.append({\n 'date': missing_start_date,\n 'coin': row['coin'],\n 'value': 0.0,\n 'actual_value': 0.0,\n 'running_balance': 0.0,\n 'status': 'Missing',\n 'wallet_hash': 'n/a'}, ignore_index=True)\n\n print('-------========')\n last = row\n\nresult = pd.concat([df, missing_dates])\nresult = result.sort_values(by=['date']).reset_index(drop=True)\n\n# Get all coins\nmy_coins = df.coin.unique()\n\nprint(\"Coins in your account:\")\nprint(my_coins)\n\n# Create a new dataframe\npayouts = pd.DataFrame(index=my_coins, columns=col_names_payout)\n\n## add values to the payout dataframe\nfor row in payouts.iterrows():\n coin = row[0]\n my_df = df.loc[df.coin == coin]\n\n payouts.at[coin, 'total'] = my_df.value.sum()\n\n paid = df.value.loc[(df.coin == coin) & (df.paid == True)].sum()\n\n payouts.at[coin, 'value'] = my_df.value.loc[my_df.paid == True].sum()\n\n payouts.at[coin, 'last payment'] = my_df.date.max()\n\n payouts.at[coin, 'last payout'] = my_df.loc[my_df.paid == True].date.max()\n\npayouts.balance = payouts.total - payouts.paid\n\n## save df\ndf.to_csv(\"source.csv\")\n\n# print(missing_dates)\nmissing_dates.to_csv(\"missing_dates.csv\")\n\n# print(result (Source + Missing))\nresult.to_csv(\"result.csv\")\n\n# print (payouts.csv)\npayouts.to_csv(\"payouts.csv\")\n\nprint(payouts)\n\n\n\n","sub_path":"auto-genesis.py","file_name":"auto-genesis.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85283242","text":"def findMode(numbers):\n dict = {}\n\n for n in numbers:\n if n not in dict:\n dict[n] = 1\n else:\n dict[n] = dict[n] + 1\n return max(dict, key=lambda k: dict[k])\n\n\nprint(findMode([1, 2, 2, 3, 4, 5, 6, 6, 6]))\n","sub_path":"mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52952412","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\n\r\ndef minmax_Norm(dataSet):\r\n minVals = dataSet.min(0) # 取每一列的最小值\r\n maxVals = dataSet.max(0) # 取每一列的最大值\r\n ranges = maxVals - minVals\r\n normDataSet = np.zeros(np.shape(dataSet))\r\n m = dataSet.shape[0]\r\n normDataSet = dataSet - np.tile(minVals, (m, 1))\r\n normDataSet = normDataSet/np.tile(ranges, (m, 1))\r\n return normDataSet\r\n\r\ndef ZScore_Norm(dataSet):\r\n mu = dataSet.average()\r\n sigma = dataSet.std()\r\n x = (x - mu) / sigma\r\n return datsSet\r\n\r\ndef sigmoid_Norm(dataSet,useStatus):\r\n if useStatus:\r\n return 1.0 / (1 + np.exp(-float(dataSet)))\r\n else:\r\n return float(dataSet)\r\n\r\nif __name__ == '__main__':\r\n # 导入数据\r\n df1 = pd.read_csv('C:\\\\Users\\\\Tianh\\\\Desktop\\\\DMLab\\\\data\\\\wineequality\\\\winequality-white.csv', delimiter=';')\r\n print(df1.head())\r\n\r\n # 数据预处理\r\n true_label = df1['quality']\r\n true_label = np.array(true_label)\r\n fea = df1.drop('quality', axis=1)\r\n fea = np.array(fea)\r\n res = np.unique(fea)\r\n print(res)\r\n\r\n norm_fea = minmax_Norm(fea)\r\n norm_fea\r\n\r\n # k-means 聚类\r\n cluster_num = 6\r\n kmeans = KMeans(n_clusters=cluster_num, random_state=0).fit(fea)\r\n predict_label = kmeans.labels_\r\n print(kmeans.cluster_centers)\r\n\r\n # 聚类评价\r\n # Mutual Information based scores 互信息\r\n print(metrics.adjusted_mutual_info_score(true_label, predict_label))\r\n\r\n # Homogeneity 同质性 每个群集只包含单个类的成员\r\n print(metrics.homogeneity_score(true_label, predict_label))\r\n\r\n # completeness 完整性 类的所有成员都分配给同一个群集\r\n print(metrics.completeness_score(true_label, predict_label))\r\n\r\n # 两者的调和平均V-measure:\r\n print(metrics.v_measure_score(true_label, predict_label))\r\n\r\n # Fowlkes-Mallows scores\r\n # The Fowlkes-Mallows score FMI is defined as the geometric mean\r\n # of the pairwise precision and recall:\r\n print(metrics.fowlkes_mallows_score(true_label, predict_label))\r\n\r\n # Silhouette Coefficient 轮廓系数\r\n print(metrics.silhouette_score(fea, predict_label, metric='euclidean'))\r\n\r\n # Calinski-Harabaz Index 分数值ss越大则聚类效果越好\r\n # 类别内部数据的协方差越小越好,\r\n # 类别之间的协方差越大越好,这样的Calinski-Harabasz分数会高。\r\n print(metrics.calinski_harabaz_score(fea, predict_label))\r\n \r\n","sub_path":"K-means+K-mediod/data_experiment_WINE_white_Data.py","file_name":"data_experiment_WINE_white_Data.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614431207","text":"from dotty_dict import dotty\nfrom app.domain.event import Event\nfrom app.domain.flow import Flow\nfrom app.domain.profile import Profile\nfrom app.domain.session import Session\n\n\nclass DotAccessor:\n\n def __init__(self, profile: Profile, session: Session, payload: dict, event: Event, flow: Flow):\n self.flow = dotty(flow.dict())\n self.event = dotty(event.dict())\n self.payload = dotty(payload)\n self.session = dotty(session.dict())\n self.profile = dotty(profile.dict())\n\n def __delitem__(self, key):\n if key.startswith('profile@'):\n key = key[len('profile@'):]\n del self.profile[key]\n elif key.startswith('session@'):\n raise KeyError(\"Could not set session, session is read only\")\n elif key.startswith('flow@'):\n raise KeyError(\"Could not set flow, flow is read only\")\n elif key.startswith('payload@'):\n key = key[len('payload@'):]\n del self.payload[key]\n elif key.startswith('event@'):\n raise KeyError(\"Could not delete event, event is read only\")\n else:\n raise ValueError(\n \"Invalid dot notation. Accessor not available. \" +\n \"Please start dotted path with one of the accessors: [profile@, session@, payload@, event@] \")\n\n def __setitem__(self, key, value):\n if key.startswith('profile@'):\n key = key[len('profile@'):]\n self.profile[key] = self.__getitem__(value) if not isinstance(value, dict) else value\n elif key.startswith('session@'):\n raise KeyError(\"Could not set session, session is read only\")\n elif key.startswith('flow@'):\n raise KeyError(\"Could not set flow, flow is read only\")\n elif key.startswith('payload@'):\n key = key[len('payload@'):]\n self.payload[key] = self.__getitem__(value) if not isinstance(value, dict) else value\n elif key.startswith('event@'):\n raise KeyError(\"Could not set event, event is read only\")\n else:\n raise ValueError(\n \"Invalid dot notation. Accessor not available. \" +\n \"Please start dotted path with one of the accessors: [profile@, session@, payload@, event@] \")\n\n def __getitem__(self, dot_notation):\n if isinstance(dot_notation, str):\n if dot_notation.startswith('flow@'):\n value = dot_notation[len('flow@'):]\n try:\n return self.flow[value]\n except KeyError:\n raise KeyError(\"Invalid dot notation. Could not find value for `{}` in flow.\".format(value))\n elif dot_notation.startswith('profile@'):\n value = dot_notation[len('profile@'):]\n try:\n return self.profile[value]\n except KeyError:\n raise KeyError(\"Invalid dot notation. Could not find value for `{}` in profile.\".format(value))\n elif dot_notation.startswith('session@'):\n value = dot_notation[len('session@'):]\n try:\n return self.session[value]\n except KeyError:\n raise KeyError(\"Invalid dot notation. Could not find value for `{}` in session.\".format(value))\n elif dot_notation.startswith('payload@'):\n value = dot_notation[len('payload@'):]\n try:\n return self.payload[value]\n except KeyError:\n raise KeyError(\"Invalid dot notation. Could not find value for `{}` in payload.\".format(value))\n elif dot_notation.startswith('event@'):\n value = dot_notation[len('event@'):]\n try:\n return self.event[value]\n except KeyError:\n raise KeyError(\"Invalid dot notation. Could not find value for `{}` in event.\".format(value))\n return dot_notation\n\n def __contains__(self, item):\n try:\n self.__getitem__(item)\n return True\n except KeyError:\n return False\n\n @staticmethod\n def get(dot_notation, payload, prefix):\n value = dot_notation[len(prefix + '@'):]\n try:\n return payload[value]\n except KeyError:\n raise KeyError(\"Could not find value for `{}` in {}\".format(value, prefix))\n\n @staticmethod\n def set(key, value, payload, prefix):\n key = key[len(prefix + '@'):]\n payload[key] = value\n","sub_path":"app/process_engine/dot_accessor.py","file_name":"dot_accessor.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574338013","text":"from rest_framework.test import APITestCase\n\nfrom orders.models import Order, Detail, Customer\nfrom datetime import datetime\n\nfrom products.models import Category, Product\n\n\nclass ApiBasesObjectsTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.category_1 = Category.objects.create(name='Category 1')\n cls.category_2 = Category.objects.create(name='Category 2')\n\n cls.product_1 = Product.objects.create(\n name='product 1',\n category=cls.category_1,\n description='description of product 1',\n price=100,\n photo='photo url 1'\n )\n cls.product_2 = Product.objects.create(\n name='product 2',\n category=cls.category_1,\n description='description of product 2',\n price=200,\n photo='photo url 2'\n )\n cls.product_3 = Product.objects.create(\n name='product 3',\n category=cls.category_2,\n description='description of product 3',\n price=300,\n photo='photo url 3'\n )\n cls.customer = Customer.objects.create(\n name='John',\n country='Uzb',\n address='some address',\n phone='+998999999999'\n )\n cls.order = Order.objects.create(\n customer=cls.customer,\n date=datetime.now()\n )\n cls.order_1 = Order.objects.create(\n customer=cls.customer,\n date=datetime.now()\n )","sub_path":"payments/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177089592","text":"import os\n\nfrom a2wsgi import ASGIMiddleware\nfrom starlette.applications import Starlette\nfrom starlette.responses import JSONResponse\nfrom starlette.routing import Route\n\nfrom rigor import Suite, Config, execute\n\n\nasync def home(request):\n return JSONResponse({\"a\": 123})\n\n\nasgi = Starlette(debug=True, routes=[Route(\"/home\", home)])\nwsgi = ASGIMiddleware(asgi)\n\nROOT_DIR = os.path.join(os.path.dirname(__file__), \"wsgi\")\npaths = [ROOT_DIR]\n\n\ndef test_wsgi():\n config = Config.load(paths)\n suite = Suite.create(paths, config, app=wsgi, concurrency=0, retries=0)\n result = execute(suite)\n assert result.success\n\n\ndef test_asgi():\n config = Config.load(paths)\n suite = Suite.create(paths, config, app=asgi, concurrency=1, retries=0)\n result = execute(suite)\n assert result.success\n","sub_path":"tests/test_wsgi.py","file_name":"test_wsgi.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"11945056","text":"from django.contrib.gis.geos.polygon import Polygon\nfrom django.db.models import Q\nfrom collections import defaultdict\nfrom functools import reduce\nimport re\n\nfrom .models import Attribute\nfrom parcel.models import LotSize, LotQuantiles\n\n\ndef get_lot_size_groups():\n quantiles = LotQuantiles.objects.all()[0]\n return {\n \"small\": {\"lot_size__lte\": quantiles.small_lot},\n \"medium\": {\"lot_size__lte\": quantiles.medium_lot,\n \"lot_size__gt\": quantiles.small_lot},\n \"large\": {\"lot_size__gt\": quantiles.medium_lot}\n }\n\nLOT_SIZES = None\n\ndef get_lot_sizes():\n global LOT_SIZES\n if not LOT_SIZES:\n LOT_SIZES = get_lot_size_groups()\n\n return LOT_SIZES\n\n\ndef make_size_query(param):\n size_query = get_lot_sizes().get(param.lower())\n if size_query:\n return size_query\n\n m = re.match(r\"([<>])(=?)(\\d+(\\.\\d+)?)\", param)\n if m:\n size_op = \"lot_size__{op}{eq}\".format(\n op=\"lt\" if m.group(1) == \"<\" else \"gt\",\n eq=\"e\" if m.group(2) else \"\")\n return {size_op: float(m.group(3))}\n\n\ndef run_attributes_query(d):\n \"\"\"Construct a Proposal query from query parameters.\n\n :param d: A dictionary-like object, typically something like\n request.GET.\n\n :returns: A\n \"\"\"\n subqueries = []\n\n for k, val in d.items():\n if not k.startswith(\"attr.\"):\n continue\n\n subqueries.append(Q(handle=k[5:], text_value__contains=val))\n\n if subqueries:\n query = reduce(Q.__or__, subqueries, Q())\n attrs = Attribute.objects.filter(query)\\\n .values(\"proposal_id\", \"handle\",\n \"text_value\")\n attr_maps = defaultdict(int)\n for attr in attrs:\n attr_maps[attr[\"proposal_id\"]] += 1\n\n return [pid for pid, c in attr_maps.items() if c == len(subqueries)]\n\n\nquery_params = {\n \"case\": \"case_number\",\n \"address\": \"address\",\n \"source\": \"source\"\n}\n\n\ndef build_proposal_query_dict(d):\n subqueries = {}\n ids = run_attributes_query(d) or []\n\n if \"id\" in d:\n ids = re.split(r\"\\s*,\\s*\", d[\"id\"])\n\n if \"text\" in d:\n subqueries[\"address__icontains\"] = d[\"text\"]\n\n if d.get(\"region\"):\n regions = re.split(r\"\\s*;\\s*\", d[\"region\"])\n subqueries[\"region_name__in\"] = regions\n\n if ids:\n subqueries[\"pk__in\"] = ids\n\n if \"projects\" in d:\n if d[\"projects\"] == \"null\":\n subqueries[\"project__isnull\"] = True\n elif d[\"projects\"] == \"all\":\n subqueries[\"project__isnull\"] = False\n\n if \"lotsize\" in d:\n parcel_query = make_size_query(d[\"lotsize\"])\n if parcel_query:\n parcel_ids = LotSize.objects.filter(**parcel_query).values(\"parcel_id\")\n subqueries[\"parcel_id__in\"] = parcel_ids\n\n bounds = d.get(\"box\")\n if bounds:\n coords = [float(coord) for coord in bounds.split(\",\")]\n # Coordinates are submitted to the server as\n # latMin,longMin,latMax,longMax, but from_bbox wants its arguments in a\n # different order:\n bbox = Polygon.from_bbox((coords[1], coords[0], coords[3], coords[2]))\n subqueries[\"location__within\"] = bbox\n\n # If status is anything other than 'active' or 'closed', find all\n # proposals.\n status = d.get(\"status\", \"active\").lower()\n if status == \"closed\":\n subqueries[\"complete\"] = True\n elif status == \"active\":\n subqueries[\"complete\"] = False\n\n event = d.get(\"event\")\n if event:\n try:\n subqueries[\"event\"] = int(event)\n except ValueError:\n pass\n\n for k in d:\n if k in query_params:\n subqueries[query_params[k]] = d[k]\n\n return subqueries\n\n\ndef build_proposal_query(d):\n subqueries = build_proposal_query_dict(d)\n return Q(**subqueries)\n","sub_path":"server/proposal/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180008003","text":"\ndef minmax(seq):\n\n \"\"\"\n Takes a sequence of one or more numbers and returns the smallest and largest numbers\n in the form of a tuple of length two.\n \"\"\"\n smallest = seq[0]\n largest = seq[0]\n for n in seq:\n if n > largest:\n largest = n\n elif n < smallest:\n smallest = n\n else:\n continue\n return smallest, largest\n\n\n\nprint(minmax([1,-3,34,12,100]), '-- should return (-3, 100)')\nprint(minmax([1,0,34,12,101]), '-- should return (0, 101)')\nprint(minmax([1,34,12,40]), '-- should return (1, 40)')\nprint(minmax([2, 1,3,34,12,9]), '-- should return (1, 34)')","sub_path":"python-fundamentals/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146869194","text":"import sqlite3\nimport datetime\n\n\ndef get_chrome(con: sqlite3.Connection) -> list:\n \"\"\"\n Reads database of Chrome history and process data\n :param con: database connection\n :return: list of visits\n \"\"\"\n c = con.cursor()\n\n c.execute(\"select id, url, title from urls\")\n url_id_tuples = c.fetchall()\n url_id = dict()\n for url in url_id_tuples:\n url_id[url[0]] = (url[1], url[2])\n\n c.execute(\"select url, visit_time, visit_duration from visits\")\n results_with_url_as_id = c.fetchall()\n\n results = []\n for result in results_with_url_as_id:\n url = url_id[result[0]]\n date = datetime.datetime.fromtimestamp((result[1]) / 1000000 - 11644473600).__str__().split()\n results.append((url[0], url[1], date[0], date[1], result[2]))\n\n c.close()\n return results\n\n\ndef get_chrome_os(user: str, os: str) -> list:\n \"\"\"\n Reads Chrome History on Linux\n Returns list of tuples. Each tuple has structure:\n (url: srt, title: str, date_of_last_visit: str(\"yyyy-mm-dd\"),\n time_of_last_visit: str(\"hh:mm:ss.ms\"), time_of_visit: int)\n :param user: username of computer\n :param os: OS of computer. Can be \"Windows\", \"Linux\" or \"MacOS\"\n :return: list of visits\n \"\"\"\n if os == \"Linux\":\n con = sqlite3.connect(f'/home/{user}/.config/google-chrome/Default/History')\n # elif os == \"Windows\":\n # con = sqlite3.connect(f'C:\\Users\\{user}\\AppData\\Local\\Google\\Chrome\\User Data\\Default')\n elif os == \"MacOS\":\n con = sqlite3.connect(f'/Users/{user}/Library/Application Support/Google/Chrome/Default/History')\n else:\n raise ValueError(\"Incorrect OS\")\n return get_chrome(con)\n\n\ndef write_data_to_file(history: list, filename: str) -> None:\n \"\"\"\n Writes data to file\n :param history: list of visits of browser\n :param filename: name of file to write\n :return:\n \"\"\"\n with open(filename, \"w\") as file:\n for element in history:\n file.write(str(element) + \"\\n\")","sub_path":"lab7/get_browser_history.py","file_name":"get_browser_history.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359107270","text":"\"\"\"\nHealth check for Horizon.\n\nHealthy: Corresponding stellar-core is 'Synced'\nUnhealthy: Corresponding stellar-core is not 'Synced'\n\"\"\"\nimport json\nimport time\nimport os\n\nimport requests\nfrom flask import Flask\nfrom flask_cors import CORS\n\n\nAPP = Flask(__name__)\nCORS(APP)\nSTART_TIMESTAMP = time.time()\n\n# Load configuration from env variables\nCORE_INFO_URL = os.environ['CORE_INFO_URL']\nHORIZON_INFO_URL = os.environ['HORIZON_INFO_URL']\nBUILD_VERSION = os.environ['BUILD_VERSION']\nREQUEST_TIMEOUT = float(os.environ['REQUEST_TIMEOUT'])\nMAX_HEALTHY_DIFF = 10\n\n\ndef make_reply(msg, code):\n \"\"\"Create a JSON reply for /status.\"\"\"\n reply = {\n 'status': 'Healthy' if code == 200 else 'Unhealthy',\n 'description': msg,\n 'start_timestamp': START_TIMESTAMP,\n 'build': BUILD_VERSION\n }\n\n return json.dumps(reply), code\n\n\n@APP.route(\"/status\")\ndef status():\n \"\"\"Check if the stellar core is synced.\"\"\"\n try:\n response = requests.get(CORE_INFO_URL, timeout=REQUEST_TIMEOUT)\n response.raise_for_status()\n\n is_core_healthy = (response.json()['info']['state'] == 'Synced!')\n core_status_str = 'synced' if is_core_healthy else 'not synced'\n\n response = requests.get(HORIZON_INFO_URL, timeout=REQUEST_TIMEOUT)\n response.raise_for_status()\n\n core_latest_ledger = int(response.json()['core_latest_ledger'])\n history_latest_ledger = int(response.json()['history_latest_ledger'])\n is_horizon_healthy = (core_latest_ledger - history_latest_ledger) < MAX_HEALTHY_DIFF\n horizon_status_str = 'synced' if is_horizon_healthy else 'not synced'\n\n msg = 'Core, Horizon status is: ({}, {})'.format(core_status_str, horizon_status_str)\n\n if is_core_healthy and is_horizon_healthy:\n return make_reply(msg, 200)\n return make_reply(msg, 503)\n except Exception as e:\n return make_reply('Could not perform health check: {}'.format(str(e)), 503)\n","sub_path":"apps/horizon-health-check/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474696223","text":"\"\"\"A - Round Up the Mean\nhttps://atcoder.jp/contests/abc082/tasks/abc082_a\na b\n\n>>> main(1, 3)\n2\n>>> main(7, 4)\n6\n>>> main(5, 5)\n5\n\n\"\"\"\n\n\ndef main(a: int, b: int):\n print((a + b + 1) // 2)\n\n\nif __name__ == \"__main__\":\n a, b = map(int, input().split(\" \"))\n\n main(a, b)\n","sub_path":"abc/abc082/abc082_a.py","file_name":"abc082_a.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527844220","text":"#!/usr/bin/env python\r\n\r\nimport numpy as np\r\nimport sklearn\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nimport pickle\r\n\r\nfrom sensor_stick.srv import GetNormals\r\nfrom sensor_stick.features import compute_color_histograms\r\nfrom sensor_stick.features import compute_normal_histograms\r\nfrom visualization_msgs.msg import Marker\r\n\r\nfrom sensor_stick.marker_tools import *\r\nfrom sensor_stick.msg import DetectedObjectsArray\r\nfrom sensor_stick.msg import DetectedObject\r\nfrom sensor_stick.pcl_helper import *\r\n\r\ndef get_normals(cloud):\r\n get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)\r\n return get_normals_prox(cloud).cluster\r\n\r\n\r\n# Create Publishers\r\ndetected_objects_pub = rospy.Publisher('/detected_objects', PointCloud2, queue_size=10)\r\nobject_markers_pub = rospy.Publisher('/object_markers', PointCloud2, queue_size=10)\r\n\r\n# Callback function for your Point Cloud Subscriber\r\ndef pcl_callback(pcl_msg):\r\n rospy.loginfo(\"Received new cloud data!\\n\")\r\n\r\n # Convert ROS msg to PCL data\r\n #---------------------------------------\r\n cloud = ros_to_pcl(pcl_msg)\r\n\r\n\r\n # Voxel Grid Downsampling\r\n #---------------------------------------\r\n vox = cloud.make_voxel_grid_filter()\r\n # voxel size\r\n LEAF_SIZE = 0.01 # cm seems to be a good choice to not miss features\r\n\r\n # Set the voxel size in the filter object\r\n vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\r\n\r\n # Call the filter function to obtain the resultant downsampled point cloud\r\n cloud_filtered = vox.filter()\r\n\r\n\r\n # PassThrough Filter\r\n #---------------------------------------\r\n # create filter object\r\n passthrough = cloud_filtered.make_passthrough_filter()\r\n\r\n # Assign axis and range to the passthrough filter object.\r\n filter_axis = 'z'\r\n passthrough.set_filter_field_name(filter_axis)\r\n axis_min = 0.6\r\n axis_max = 1.15\r\n passthrough.set_filter_limits(axis_min, axis_max)\r\n\r\n # Apply filter to the voxel grid, resultant point cloud. \r\n cloud_filtered = passthrough.filter()\r\n\r\n\r\n # RANSAC Plane Segmentation\r\n #---------------------------------------\r\n # create segmentation object\r\n seg = cloud_filtered.make_segmenter()\r\n\r\n # Set the model you wish to fit \r\n seg.set_model_type(pcl.SACMODEL_PLANE)\r\n seg.set_method_type(pcl.SAC_RANSAC)\r\n\r\n # Max distance for a point to be considered fitting the model Experiment with different values for max_distance for segmenting the table\r\n max_distance = 0.005\r\n seg.set_distance_threshold(max_distance)\r\n\r\n # Call the segment function to obtain set of inlier indices and model coefficients\r\n inliers, coefficients = seg.segment()\r\n\r\n\r\n # Extract table and objects\r\n #---------------------------------------\r\n pcl_table = cloud_filtered.extract(inliers, negative=False)\r\n pcl_objects = cloud_filtered.extract(inliers, negative=True)\r\n\r\n\r\n # Euclidean Clustering\r\n #---------------------------------------\r\n white_cloud = XYZRGB_to_XYZ(pcl_objects)\r\n tree = white_cloud.make_kdtree()\r\n\r\n # Create a cluster extraction object\r\n ec = white_cloud.make_EuclideanClusterExtraction()\r\n\r\n # Set tolerances for distance threshold as well as minimum and maximum cluster size (in points)\r\n ec.set_ClusterTolerance(0.005)\r\n ec.set_MinClusterSize(10)\r\n ec.set_MaxClusterSize(250)\r\n\r\n # Search the k-d tree for clusters\r\n ec.set_SearchMethod(tree)\r\n # Extract indices for each of the discovered clusters\r\n cluster_indices = ec.Extract()\r\n\r\n # Create Cluster-Mask Point Cloud to visualize each cluster separately\r\n #---------------------------------------\r\n # Assign a color corresponding to each segmented object in scene\r\n cluster_color = get_color_list(len(cluster_indices))\r\n\r\n color_cluster_point_list = []\r\n\r\n for j, indices in enumerate(cluster_indices):\r\n for i, indice in enumerate(indices):\r\n color_cluster_point_list.append([white_cloud[indice][0],\r\n white_cloud[indice][1],\r\n white_cloud[indice][2],\r\n rgb_to_float(cluster_color[j])])\r\n\r\n #Create new cloud containing all clusters, each with unique color\r\n cluster_cloud = pcl.PointCloud_PointXYZRGB()\r\n cluster_cloud.from_list(color_cluster_point_list)\r\n \r\n\r\n # Convert PCL data to ROS messages\r\n #---------------------------------------\r\n pcl_table_ros = pcl_to_ros(pcl_table)\r\n pcl_objects_ros = pcl_to_ros(pcl_objects)\r\n ros_cluster_ros = pcl_to_ros(cluster_cloud)\r\n\r\n\r\n # Publish ROS messages\r\n #---------------------------------------\r\n pcl_objects_pub.publish(pcl_table_ros)\r\n pcl_table_pub.publish(pcl_objects_ros)\r\n pcl_cluster_pub.publish(ros_cluster_ros)\r\n\r\n\r\n # Classify the clusters!\r\n #---------------------------------------\r\n detected_objects_labels = []\r\n detected_objects = []\r\n\r\n\r\n for index, pts_list in enumerate(cluster_indices):\r\n # Grab the points for the cluster from the extracted outliers (cloud_objects)\r\n pcl_cluster = cloud_objects.extract(pts_list)\r\n\r\n # convert the cluster from pcl to ROS using helper function\r\n pcl_cluster_ros = pcl_to_ros(pcl_cluster)\r\n\r\n # Extract histogram features\r\n chists = compute_color_histograms(sample_cloud, using_hsv=False)\r\n normals = get_normals(sample_cloud)\r\n nhists = compute_normal_histograms(normals)\r\n feature = np.concatenate((chists, nhists))\r\n labeled_features.append([feature, model_name])\r\n\r\n # Make the prediction, retrieve the label for the result and add it to detected_objects_labels list\r\n prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))\r\n label = encoder.inverse_transform(prediction)[0]\r\n detected_objects_labels.append(label)\r\n\r\n # Publish a label into RViz\r\n label_pos = list(white_cloud[pts_list[0]])\r\n label_pos[2] += .4\r\n object_markers_pub.publish(make_label(label,label_pos, index))\r\n\r\n # Add the detected object to the list of detected objects.\r\n do = DetectedObject()\r\n do.label = label\r\n do.cloud = ros_cluster\r\n detected_objects.append(do)\r\n\r\n rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))\r\n\r\n # Publish the list of detected objects. This is the output you'll need to complete the upcoming project!\r\n detected_objects_pub.publish(detected_objects)\r\n\r\n\r\ndef object_recon():\r\n # ROS node initialization\r\n rospy.init_node('object_recon')\r\n\r\n # Create Subscribers\r\n subs = rospy.Subscriber('/sensor_stick/point_cloud', pc2.PointCloud2, pcl_callback, queue_size=1)\r\n\r\n # Initialize color_list\r\n get_color_list.color_list = []\r\n\r\n # Load Model From disk\r\n model = pickle.load(open('model.sav', 'rb'))\r\n clf = model['classifier']\r\n encoder = LabelEncoder()\r\n encoder.classes_ = model['classes']\r\n scaler = model['scaler']\r\n\r\n # Spin while node is not shutdown\r\n while not rospy.is_shutdown():\r\n rospy.spin()\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n object_recon()\r\n except rospy.ROSInterruptException:\r\n pass\r\n\r\n\r\n","sub_path":"prj_3d_perceptio/object_recon.py","file_name":"object_recon.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138215395","text":"# Создать программно файл в текстовом формате, записать в него построчно данные, вводимые пользователем.\n# Об окончании ввода данных свидетельствует пустая строка.\n\n\ndef user_inp():\n while True:\n user_input = input(\"Введите данные для записи в файл: \")\n if user_input == \"\":\n break\n with open(\"hw_01.txt\", \"a\", encoding=\"u8\") as file_obj:\n file_obj.writelines(user_input + '\\n')\n print(\"Данные записаны!\")\n\n\nuser_inp()\n","sub_path":"lesson05/lesson05hw01.py","file_name":"lesson05hw01.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211731365","text":"#\n# @lc app=leetcode.cn id=1260 lang=python3\n#\n# [1260] 二维网格迁移\n#\n# https://leetcode-cn.com/problems/shift-2d-grid/description/\n#\n# algorithms\n# Easy (57.29%)\n# Likes: 24\n# Dislikes: 0\n# Total Accepted: 5.6K\n# Total Submissions: 9.4K\n# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]\\n1'\n#\n# 给你一个 m 行 n 列的二维网格 grid 和一个整数 k。你需要将 grid 迁移 k 次。\n# \n# 每次「迁移」操作将会引发下述活动:\n# \n# \n# 位于 grid[i][j] 的元素将会移动到 grid[i][j + 1]。\n# 位于 grid[i][n - 1] 的元素将会移动到 grid[i + 1][0]。\n# 位于 grid[m - 1][n - 1] 的元素将会移动到 grid[0][0]。\n# \n# \n# 请你返回 k 次迁移操作后最终得到的 二维网格。\n# \n# \n# \n# 示例 1:\n# \n# \n# \n# 输入:grid = [[1,2,3],[4,5,6],[7,8,9]], k = 1\n# 输出:[[9,1,2],[3,4,5],[6,7,8]]\n# \n# \n# 示例 2:\n# \n# \n# \n# 输入:grid = [[3,8,1,9],[19,7,2,5],[4,6,11,10],[12,0,21,13]], k = 4\n# 输出:[[12,0,21,13],[3,8,1,9],[19,7,2,5],[4,6,11,10]]\n# \n# \n# 示例 3:\n# \n# 输入:grid = [[1,2,3],[4,5,6],[7,8,9]], k = 9\n# 输出:[[1,2,3],[4,5,6],[7,8,9]]\n# \n# \n# \n# \n# 提示:\n# \n# \n# 1 <= grid.length <= 50\n# 1 <= grid[i].length <= 50\n# -1000 <= grid[i][j] <= 1000\n# 0 <= k <= 100\n# \n# \n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def shiftGrid(self, grid: List[List[int]], k: int) -> List[List[int]]:\n g = [[0] * len(grid[0]) for _ in range(len(grid))]\n for r in range(len(grid)):\n for c in range(len(grid[0])):\n g[(r + (c + k) // len(grid[0])) % len(grid)][(c + k) % len(grid[0])] = grid[r][c]\n return g\n\n# @lc code=end\n","sub_path":"easy/1260.二维网格迁移.py","file_name":"1260.二���网格迁移.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280881720","text":"import os, json, boto3\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template\nfrom werkzeug.utils import secure_filename\nfrom helpers import *\n\nUPLOAD_FOLDER = \"/tmp/\"\nALLOWED_EXTENSIONS = set([\"txt\", \".csv\"])\n\napp = Flask(__name__)\napp.config[\"UPLOAD_FOLDER}\"] = UPLOAD_FOLDER\napp.secret_key = \"insights\"\napp.config.from_object(\"config\")\n#app.config.from_object(\"insights.config\")\n\n\ndef allowed_filename(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n@app.route('/upload', methods=[\"GET\", \"POST\"])\ndef upload_file():\n if request.method == \"POST\":\n print(type(request.files))\n if 'file' in request.files.getlist('file'):\n flash('no file part')\n print(\"NO FILE PART\")\n return redirect(request.url)\n file = request.files['file']\n\n if file.filename == '':\n print(\"FILE.FILENAME\")\n return redirect(request.url)\n\n if file and allowed_filename(file.filename):\n print(\"type of file\", type(file), type(file.filename))\n '''\n rows = file.readlines()\n rows = [line.decode('utf-8').strip() for line in rows]\n rows = [int(val) for val in rows if val != '']\n total = 0\n average = None\n\n if len(rows) != 0:\n for val in rows:\n total += val\n average = total / len(rows)\n '''\n file.filename = secure_filename(file.filename)\n\n output = upload_file_to_s3(file, app.config['S3_BUCKET'])\n print(\"OUTPUT: \", output)\n print(app.config[\"S3_LOCATION\"], file.filename)\n download_status = download_from_s3(app.config['S3_BUCKET'], file.filename)\n\n if download_status is True:\n print(\"Downloaded from S3 successfully\")\n else:\n print(\"Unsuccessful\")\n #return render_template('upload_success.html', average=average)\n\n return render_template('upload.html')\n\n\n@app.route(\"/\")\ndef home():\n return \"It's working\"\n\n\nif __name__ == '__main__':\n '''\n app.debug = True\n port = int(os.environ.get('PORT', 5001))\n app.run(host='0.0.0.0', port=port)\n '''\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385664374","text":"import random\r\nimport time\r\n\r\nplay_again = 'yes'\r\n\r\nwhile play_again == 'yes':\r\n\r\n print('You are in a land full of dragons.\\nIn front of you, you see two caves.\\nIn one cave, the dragon is '\r\n 'friendly and will share his treasure with you.'\r\n '\\nThe other dragon is greedy and hungry, and will eat you on sight')\r\n\r\n comp_choice = random.randint(1, 2)\r\n\r\n print('Which cave will you go into? (1 or 2)')\r\n user_choice = int(input())\r\n\r\n while user_choice not in [1, 2]:\r\n print('Which cave will you go into? (1 or 2)')\r\n user_choice = int(input())\r\n\r\n print('You approach the cave...')\r\n time.sleep(2)\r\n print('It is dark and spooky...')\r\n time.sleep(2)\r\n print('A large dragon jumps out in front of you!')\r\n time.sleep(2)\r\n print('He opens his jaws and...')\r\n time.sleep(2)\r\n\r\n if user_choice == comp_choice:\r\n print('Shares his treasures with you')\r\n else:\r\n print('Gobbles you down its throat.\\nSorry, that is just life')\r\n\r\n time.sleep(2)\r\n print()\r\n\r\n print('Do you want to play again? (yes or no)')\r\n play_again = input().lower()\r\n if play_again == 'no':\r\n print('Dragon will still eat you las las')\r\n\r\n time.sleep(3)\r\n","sub_path":"Coven_labs/Task 5___DragonCave.py","file_name":"Task 5___DragonCave.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647746385","text":"# Copyright 2020- Robot Framework Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom subprocess import DEVNULL, PIPE, STDOUT, CalledProcessError, Popen\n\nINSTALLATION_DIR = Path(__file__).parent / \"wrapper\"\n# This is required because weirdly windows doesn't have `npm` in PATH without shell=True.\n# But shell=True breaks our linux CI\nSHELL = True if platform.platform().startswith(\"Windows\") else False\n\n\ndef rfbrowser_init(skip_browser_install: bool):\n print(\"Installing node dependencies...\")\n if not (INSTALLATION_DIR / \"package.json\").is_file():\n print(\n f\"Installation directory `{INSTALLATION_DIR}` does not contain the required package.json \"\n + \"\\nPrinting contents:\"\n )\n for root, _dirs, files in os.walk(INSTALLATION_DIR):\n level = root.replace(INSTALLATION_DIR.__str__(), \"\").count(os.sep)\n indent = \" \" * 4 * (level)\n print(\"{}{}/\".format(indent, os.path.basename(root)))\n subindent = \" \" * 4 * (level + 1)\n for f in files:\n print(\"{}{}\".format(subindent, f))\n raise RuntimeError(\"Could not find robotframework-browser's package.json\")\n if not os.access(INSTALLATION_DIR, os.W_OK):\n sys.tracebacklimit = 0\n raise RuntimeError(\n f\"`rfbrowser init` needs write permissions to {INSTALLATION_DIR}\"\n )\n\n print(f\"Installing rfbrowser node dependencies at {INSTALLATION_DIR}\")\n\n try:\n subprocess.run([\"npm\", \"-v\"], stdout=DEVNULL, check=True, shell=SHELL)\n except (CalledProcessError, FileNotFoundError, PermissionError) as exception:\n print(\n \"Couldn't execute npm. Please ensure you have node.js and npm installed and in PATH.\"\n \"See https://nodejs.org/ for documentation\"\n )\n sys.exit(exception)\n\n if skip_browser_install:\n os.environ[\"PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD\"] = \"1\"\n else:\n if not os.environ.get(\"PLAYWRIGHT_BROWSERS_PATH\"):\n os.environ[\"PLAYWRIGHT_BROWSERS_PATH\"] = \"0\"\n\n process = Popen(\n \"npm install --production\",\n shell=True,\n cwd=INSTALLATION_DIR,\n stdout=PIPE,\n stderr=STDOUT,\n )\n\n while process.poll() is None:\n if process.stdout:\n output = process.stdout.readline()\n print(output.decode(\"utf-8\"))\n\n if process.returncode != 0:\n raise RuntimeError(\n \"Problem installing node dependencies.\"\n + f\"Node process returned with exit status {process.returncode}\"\n )\n\n print(\"rfbrowser init completed\")\n\n\ndef show_trace(file: str):\n print(f\"Opening file: {file}\")\n playwright = INSTALLATION_DIR / \"node_modules\" / \"playwright\"\n local_browsers = playwright / \".local-browsers\"\n env = os.environ.copy()\n env[\"PLAYWRIGHT_BROWSERS_PATH\"] = str(local_browsers)\n trace_arguments = [\n \"npx\",\n \"playwright\",\n \"show-trace\",\n file,\n ]\n subprocess.run(trace_arguments, env=env, shell=SHELL)\n\n\n# Based on: https://stackoverflow.com/questions/3853722/how-to-insert-newlines-on-argparse-help-text\nclass SmartFormatter(argparse.HelpFormatter):\n def _split_lines(self, text, width):\n if text.startswith(\"Possible commands are:\"):\n parts = []\n for part in text.splitlines():\n part = argparse.HelpFormatter._split_lines(self, part, width)\n parts.extend(part if part else \"\\n\")\n return parts\n return argparse.HelpFormatter._split_lines(self, text, width)\n\n\ndef run():\n parser = argparse.ArgumentParser(\n description=\"Robot Framework Browser library command line tool.\",\n formatter_class=SmartFormatter,\n )\n parser.add_argument(\n \"command\",\n help=(\n \"Possible commands are:\\ninit\\nshow-trace\\n\\ninit command will install the required node dependencies. \"\n \"init command is needed when library is installed or updated.\\n\\nshow-trace command will start the \"\n \"Playwright trace viewer tool.\\n\\nSee the each command argument group for more details what (optional) \"\n \"arguments that command supports.\"\n ),\n )\n install = parser.add_argument_group(\"init options\")\n install.add_argument(\n \"--skip-browsers\",\n help=\"If defined skips the Playwright browser installation. Argument is optional\",\n default=False,\n action=\"store_true\",\n )\n trace = parser.add_argument_group(\"show-trace options\")\n trace.add_argument(\n \"--file\",\n \"-F\",\n help=(\n \"Full path to trace zip file. See New Context keyword for more details how to \"\n \"create trace file. Argument is mandatory.\"\n ),\n default=False,\n )\n args = parser.parse_args()\n if args.command == \"init\":\n rfbrowser_init(args.skip_browsers)\n elif args.command == \"show-trace\":\n if not args.file:\n raise Exception(\"show-trace needs also --file argument\")\n show_trace(args.file)\n else:\n raise Exception(\n f\"Command should be init or show-trace, but it was {args.command}\"\n )\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"Browser/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315475164","text":"#!/usr/bin/env python3\n\n# external modules\n\n# my modules\nimport Tree\nimport DCC\n\n\n# Login to DCC\ns = DCC.login(Site = 'Production')\n\n# This utility creates an html report of the content below the specified collection\n\n# froot = 'B. Enclosure Pre-Preliminary Design and Requirements Phase'\n# coll = 'Collection-2219'\n\n# froot = 'C. Enclosure Preliminary Design Phase'\n# coll = 'Collection-2219'\n\ncoll = 'Collection-10598'\nfroot = coll\n\ntr = Tree.return_tree(s, coll, froot)\nTree.html_tree(s,tr,froot)","sub_path":"Make_html_tree.py","file_name":"Make_html_tree.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475382297","text":"#9/13/2018\n#Description: For homework we were asked to make three different programs with given instructions. The instrctions for this one was\n#to make a program that took 2 values for Fahrenheit and Windspeed as t and v respectively and calculated the wind chill. WE were also\n#told that the formula was only valid if it was less than or equal to the absolute value of 50 and the wind speed was only valid if it \n#was more than or equal to 3mph or less than or equal to 120mph. The part that I struggled with most was probably kicking people out of\n#the program/telling them that their arguments were not valid. That was becuase my code was unorganized. To solve this problem I opened\n#up a fresh untitled area and went to work copying and pasting and then eventually simplifying and fixing my errors.\n\n\nimport sys\nif len(sys.argv)==3:\n\tt = float(sys.argv[1])\n\tv = float(sys.argv[2])\n\tif t>=abs(50) and v>=3 and v<=120:\n\t\tw = 35.74+(.06215*t)+((.4275*t)-35.75)*(v**.16)\n\t\tprint(\"The wind chill is:\",str(w))\n\tif t<abs(50):\n\t\tprint(\"Please enter a value for Fahrenheit that is less than or equal to the absolute value of 50\")\n\tif v<3:\n\t\tprint(\"Please enter a value for wind speed in miles that is more than or equal to 3\")\n\tif v>120:\n\t\tprint(\"Please enter a value for wind speed in miles that is less than or equal to 120\")\nif len(sys.argv) !=3:\n\tprint(\"Please only enter the 2 specified arguments in the form: <program> <Fahrenheit> <Wind Speed>\")","sub_path":"Submitted homework/3Homeworks/WindTempCalc.py","file_name":"WindTempCalc.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604581312","text":"from .event_server import EventServer\nfrom .event_client import EventClient\nfrom .rpc_server import RpcServer\nfrom .rpc_client import RpcClient\n\n\nclass Synapse(EventServer, EventClient, RpcServer, RpcClient):\n def __init__(self, app_name='', app_id='', sys_name='', mq_host='', mq_port='', mq_user='',\n mq_pass='', debug=False,\n disable_rpc_client=False, disable_event_client=False, event_callback_map={},\n rpc_callback_map={}):\n self.app_name = app_name\n self.sys_name = sys_name\n self.mq_host = mq_host\n self.mq_port = mq_port\n self.mq_user = mq_user\n self.mq_pass = mq_pass\n self.app_id = app_id\n self.debug = debug\n self.disable_rpc_client = disable_rpc_client\n self.disable_event_client = disable_event_client\n self.event_callback_map = event_callback_map\n self.event_callback_map = rpc_callback_map\n\n def __del__(self):\n self.conn.release()\n\n def serve(self):\n self.serve_handler()\n while self.is_server:\n try:\n self.conn.drain_events()\n except:\n self.log(\"[Synapse Error] System Connection Lost, Reconnect... \")\n self.conn.release()\n self.serve_handler()\n\n def serve_handler(self):\n if self.app_name == \"\" or self.sys_name == \"\":\n self.log(\"[Synapse Error] Must Set app_name and sys_name , system exit .\")\n exit(1)\n else:\n self.log(\"[Synapse Info] System Name: %s\" % self.sys_name)\n self.log(\"[Synapse Info] System App Name: %s\" % self.app_name)\n self.log(\"[Synapse Info] App MaxProcessNum: %d\" % self.proccess_num)\n if self.debug:\n self.log(\"[Synapse Warn] System Run Mode: Debug\")\n else:\n self.log(\"[Synapse Info] System Run Mode: Production\")\n if self.app_id == \"\":\n self.app_id = self.random_str()\n self.log(\"[Synapse Info] System App Id: %s\" % self.app_id)\n self.create_connection()\n self.check_exchange()\n if self.event_callback_map == {}:\n self.log(\"[Synapse Warn] Event Server Handler Disabled: event_callback_map not set\")\n else:\n self.is_server = True\n self.event_server_serve()\n for k in self.event_callback_map:\n self.log(\"[Synapse Info] *ENT: %s -> %s\" % (k, self.event_callback_map[k].__name__))\n if self.rpc_callback_map == {}:\n self.log(\"[Synapse Warn] Rpc Handler Server Disabled: rpc_callback_map not set\")\n else:\n self.is_server = True\n self.rpc_server_serve()\n for k in self.rpc_callback_map:\n self.log(\"[Synapse Info] *RPC: %s -> %s\" % (k, self.rpc_callback_map[k].__name__))\n if self.disable_event_client:\n self.log(\"[Synapse Warn] Event Sender Disabled: disable_event_client set True\")\n else:\n self.log(\"[Synapse Info] Event Sender Ready\")\n if self.disable_rpc_client:\n self.log(\"[Synapse Warn] Rpc Sender Disabled: disable_rpc_client set True\")\n else:\n self.is_server = True\n self.rpc_client_serve()\n if self.is_server:\n self.conn.ensure_connection(self.reconnect)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236426193","text":"\"\"\"Tests for UVFITS object.\"\"\"\nimport numpy as np\nimport copy\nimport os\nimport nose.tools as nt\nfrom pyuvdata import UVData\nimport pyuvdata.utils as uvutils\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\n\n\ndef test_ReadNRAO():\n \"\"\"Test reading in a CASA tutorial uvfits file.\"\"\"\n UV = UVData()\n testfile = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n expected_extra_keywords = ['OBSERVER', 'SORTORD', 'SPECSYS',\n 'RESTFREQ', 'ORIGIN']\n uvtest.checkWarnings(UV.read_uvfits, [testfile], message='Telescope EVLA is not')\n nt.assert_equal(expected_extra_keywords.sort(),\n UV.extra_keywords.keys().sort())\n del(UV)\n\n\ndef test_noSPW():\n \"\"\"Test reading in a PAPER uvfits file with no spw axis.\"\"\"\n UV = UVData()\n testfile_no_spw = os.path.join(DATA_PATH, 'zen.2456865.60537.xy.uvcRREAAM.uvfits')\n uvtest.checkWarnings(UV.read_uvfits, [testfile_no_spw], known_warning='paper_uvfits')\n del(UV)\n\n\n# this test commented out because the file is too large to include in the repo\n# def test_readRTS():\n# \"\"\"Test reading in an RTS UVFITS file.\"\"\"\n# UV = UVData()\n# testfile = os.path.join(DATA_PATH, 'pumav2_SelfCal300_Peel300_01.uvfits')\n# test = UV.read_uvfits(testfile)\n# nt.assert_true(test)\n\ndef test_breakReadUVFits():\n \"\"\"Test errors on reading in a uvfits file with subarrays and other problems.\"\"\"\n UV = UVData()\n multi_subarray_file = os.path.join(DATA_PATH, 'multi_subarray.uvfits')\n nt.assert_raises(ValueError, UV.read_uvfits, multi_subarray_file)\n\n del(UV)\n\n\ndef test_spwnotsupported():\n \"\"\"Test errors on reading in a uvfits file with multiple spws.\"\"\"\n UV = UVData()\n testfile = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1scan.uvfits')\n nt.assert_raises(ValueError, UV.read_uvfits, testfile)\n del(UV)\n\n\ndef test_readwriteread():\n \"\"\"\n CASA tutorial uvfits loopback test.\n\n Read in uvfits file, write out new uvfits file, read back in and check for\n object equality.\n \"\"\"\n uv_in = UVData()\n uv_out = UVData()\n testfile = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n write_file = os.path.join(DATA_PATH, 'test/outtest_casa.uvfits')\n uvtest.checkWarnings(uv_in.read_uvfits, [testfile], message='Telescope EVLA is not')\n uv_in.write_uvfits(write_file)\n uvtest.checkWarnings(uv_out.read_uvfits, [write_file], message='Telescope EVLA is not')\n nt.assert_equal(uv_in, uv_out)\n\n # check that if x_orientation is set, it's read back out properly\n uv_in.x_orientation = 'east'\n uv_in.write_uvfits(write_file)\n uvtest.checkWarnings(uv_out.read_uvfits, [write_file], message='Telescope EVLA is not')\n nt.assert_equal(uv_in, uv_out)\n\n # check that if antenna_diameters is set, it's read back out properly\n uvtest.checkWarnings(uv_in.read_uvfits, [testfile], message='Telescope EVLA is not')\n uv_in.antenna_diameters = np.zeros((uv_in.Nants_telescope,), dtype=np.float) + 14.0\n uv_in.write_uvfits(write_file)\n uvtest.checkWarnings(uv_out.read_uvfits, [write_file], message='Telescope EVLA is not')\n nt.assert_equal(uv_in, uv_out)\n\n # check error if timesys is 'IAT'\n testfile = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n uvtest.checkWarnings(uv_in.read_uvfits, [testfile], message='Telescope EVLA is not')\n uv_in.timesys = 'IAT'\n nt.assert_raises(ValueError, uv_in.write_uvfits, write_file)\n\n del(uv_in)\n del(uv_out)\n\n\ndef test_extra_keywords():\n uv_in = UVData()\n uv_out = UVData()\n uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n testfile = os.path.join(DATA_PATH, 'test/outtest_casa.uvfits')\n uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')\n\n # check for warnings & errors with extra_keywords that are dicts, lists or arrays\n uv_in.extra_keywords['testdict'] = {'testkey': 23}\n uvtest.checkWarnings(uv_in.check, message=['testdict in extra_keywords is a '\n 'list, array or dict'])\n nt.assert_raises(TypeError, uv_in.write_uvfits, testfile, run_check=False)\n uv_in.extra_keywords.pop('testdict')\n\n uv_in.extra_keywords['testlist'] = [12, 14, 90]\n uvtest.checkWarnings(uv_in.check, message=['testlist in extra_keywords is a '\n 'list, array or dict'])\n nt.assert_raises(TypeError, uv_in.write_uvfits, testfile, run_check=False)\n uv_in.extra_keywords.pop('testlist')\n\n uv_in.extra_keywords['testarr'] = np.array([12, 14, 90])\n uvtest.checkWarnings(uv_in.check, message=['testarr in extra_keywords is a '\n 'list, array or dict'])\n nt.assert_raises(TypeError, uv_in.write_uvfits, testfile, run_check=False)\n uv_in.extra_keywords.pop('testarr')\n\n # check for warnings with extra_keywords keys that are too long\n uv_in.extra_keywords['test_long_key'] = True\n uvtest.checkWarnings(uv_in.check, message=['key test_long_key in extra_keywords '\n 'is longer than 8 characters'])\n uvtest.checkWarnings(uv_in.write_uvfits, [testfile], {'run_check': False},\n message=['key test_long_key in extra_keywords is longer than 8 characters'])\n uv_in.extra_keywords.pop('test_long_key')\n\n # check handling of boolean keywords\n uv_in.extra_keywords['bool'] = True\n uv_in.extra_keywords['bool2'] = False\n uv_in.write_uvfits(testfile)\n uvtest.checkWarnings(uv_out.read_uvfits, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(uv_in, uv_out)\n uv_in.extra_keywords.pop('bool')\n uv_in.extra_keywords.pop('bool2')\n\n # check handling of int-like keywords\n uv_in.extra_keywords['int1'] = np.int(5)\n uv_in.extra_keywords['int2'] = 7\n uv_in.write_uvfits(testfile)\n uvtest.checkWarnings(uv_out.read_uvfits, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(uv_in, uv_out)\n uv_in.extra_keywords.pop('int1')\n uv_in.extra_keywords.pop('int2')\n\n # check handling of float-like keywords\n uv_in.extra_keywords['float1'] = np.int64(5.3)\n uv_in.extra_keywords['float2'] = 6.9\n uv_in.write_uvfits(testfile)\n uvtest.checkWarnings(uv_out.read_uvfits, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(uv_in, uv_out)\n uv_in.extra_keywords.pop('float1')\n uv_in.extra_keywords.pop('float2')\n\n # check handling of complex-like keywords\n uv_in.extra_keywords['complex1'] = np.complex64(5.3 + 1.2j)\n uv_in.extra_keywords['complex2'] = 6.9 + 4.6j\n uv_in.write_uvfits(testfile)\n uvtest.checkWarnings(uv_out.read_uvfits, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(uv_in, uv_out)\n uv_in.extra_keywords.pop('complex1')\n uv_in.extra_keywords.pop('complex2')\n\n # check handling of comment keywords\n uv_in.extra_keywords['comment'] = ('this is a very long comment that will '\n 'be broken into several lines\\nif '\n 'everything works properly.')\n uv_in.write_uvfits(testfile)\n uvtest.checkWarnings(uv_out.read_uvfits, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(uv_in, uv_out)\n\n\ndef test_ReadUVFitsWriteMiriad():\n \"\"\"\n read uvfits, write miriad test.\n Read in uvfits file, write out as miriad, read back in and check for\n object equality.\n \"\"\"\n uvfits_uv = UVData()\n miriad_uv = UVData()\n uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n testfile = os.path.join(DATA_PATH, 'test/outtest_miriad')\n uvtest.checkWarnings(uvfits_uv.read_uvfits, [uvfits_file], message='Telescope EVLA is not')\n uvfits_uv.write_miriad(testfile, clobber=True)\n uvtest.checkWarnings(miriad_uv.read_miriad, [testfile], message='Telescope EVLA is not')\n\n nt.assert_equal(miriad_uv, uvfits_uv)\n\n # check that setting the phase_type keyword also works\n uvtest.checkWarnings(miriad_uv.read_miriad, [testfile], {'phase_type': 'phased'},\n message='Telescope EVLA is not')\n\n # check that setting the phase_type to drift raises an error\n nt.assert_raises(ValueError, miriad_uv.read_miriad, testfile, phase_type='drift'),\n\n # check that setting it works after selecting a single time\n uvfits_uv.select(times=uvfits_uv.time_array[0])\n uvfits_uv.write_miriad(testfile, clobber=True)\n uvtest.checkWarnings(miriad_uv.read_miriad, [testfile],\n message='Telescope EVLA is not')\n\n nt.assert_equal(miriad_uv, uvfits_uv)\n\n del(uvfits_uv)\n del(miriad_uv)\n\n\ndef test_multi_files():\n \"\"\"\n Reading multiple files at once.\n \"\"\"\n uv_full = UVData()\n uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')\n testfile1 = os.path.join(DATA_PATH, 'test/uv1.uvfits')\n testfile2 = os.path.join(DATA_PATH, 'test/uv2.uvfits')\n uvtest.checkWarnings(uv_full.read_uvfits, [uvfits_file], message='Telescope EVLA is not')\n uv1 = copy.deepcopy(uv_full)\n uv2 = copy.deepcopy(uv_full)\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1.write_uvfits(testfile1)\n uv2.write_uvfits(testfile2)\n uvtest.checkWarnings(uv1.read_uvfits, [[testfile1, testfile2]], nwarnings=2,\n message=['Telescope EVLA is not'])\n # Check history is correct, before replacing and doing a full object check\n nt.assert_true(uvutils.check_histories(uv_full.history + ' Downselected to '\n 'specific frequencies using pyuvdata. '\n 'Combined data along frequency axis '\n 'using pyuvdata.', uv1.history))\n\n uv1.history = uv_full.history\n nt.assert_equal(uv1, uv_full)\n\n\ndef test_readMSWriteUVFits_CASAHistory():\n \"\"\"\n read in .ms file.\n Write to a uvfits file, read back in and check for casa_history parameter\n \"\"\"\n ms_uv = UVData()\n uvfits_uv = UVData()\n ms_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.ms')\n testfile = os.path.join(DATA_PATH, 'test/outtest_uvfits')\n ms_uv.read_ms(ms_file)\n ms_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvtest.checkWarnings(uvfits_uv.read_uvfits, [testfile],\n message='Telescope EVLA is not')\n nt.assert_equal(ms_uv, uvfits_uv)\n del(uvfits_uv)\n del(ms_uv)\n","sub_path":"pyuvdata/tests/test_uvfits.py","file_name":"test_uvfits.py","file_ext":"py","file_size_in_byte":10582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141435309","text":"import time\nfrom Base.base import Base\nfrom selenium.webdriver.support.ui import WebDriverWait # 用于处理元素等待\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom Check.PageCheck import PageAssert\nfrom Common.TestAsserts import Assertion\nfrom Base import ReadConfig\nfrom Base.Mylog import LogManager\nfrom Data.DataMgnt.TestResult import TestResultOper as TR\nfrom Common.function import retStackFunc\n\nrc = ReadConfig.ReadConfig(\"ngboss_config.ini\")\nlogger = LogManager('RuleCheck').get_logger_and_add_handlers(1,is_add_stream_handler=True, log_path=ReadConfig.log_path, log_filename=time.strftime(\"%Y-%m-%d\")+'.log' )\n\n#================处理页面元素公共类,包含主套餐、服务、优惠、平台服务等======================#\n\nclass RuleCheckBefore(Base):\n '''公共规则检查'''\n def checkRule(self,scene='ruleCheck'):\n '''页面执行规则判断'''\n # stackInfo = retStackFunc()\n # # # funcCode = stackInfo['func']\n # print('=====被调用的函数:')\n # logger.info(stackInfo)\n '''处理Wade弹出的各种提示窗口(Error、Success、Warn、Help、Tips)'''\n loc_WadeMessage = (By.XPATH,'//div[starts-with(@id,\"wade_messagebox\") and not(contains(@style,\"display: none\"))]')\n try:\n ele_wadeMsg = self.find(loc_WadeMessage)\n logger.info('找到WadeMsg弹出框:{}'.format(str(ele_wadeMsg)))\n classname = self.get(loc_WadeMessage,Type='attribute',name='class') #取出WadeMsg的class属性值,判断是什么类型弹出\n logger.info('wadeMsg的类型:{}'.format(classname))\n time.sleep(2)\n WadeMsg = ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[1]/div[2]').text\n logger.info('WadeMessageBox返回的信息:{}'.format(WadeMsg))\n '''根据classname类型按钮处理'''\n if 'c_msg-error' in classname:\n print('弹出WadeMsg的是错误提示')\n logger.info(\"业务校验失败:{}\".format(WadeMsg))\n print('业务校验信息:{}'.format(WadeMsg))\n step_str = \"业务校验\"\n self.screen_step(step_str) # 这个保存在测试记录文档中\n self.screenshot_SaveAsDoc(step_str) # 截图单独保存到doc\n time.sleep(3)\n WadeMsg = '业务校验失败' + WadeMsg\n elif 'c_msg-success' in classname:\n print('弹出WadeMsg的是成功提示')\n ele_suc = ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[2]/button')\n self.click_on_element(ele_suc)\n self.sendEnter()\n time.sleep(2)\n WadeMsg = '弹出校验成功信息:' + WadeMsg\n elif 'c_msg-warn' in classname:\n print('弹出WadeMsg的是告警提示')\n step_str = \"业务受理提示信息\"\n self.screenshot_SaveAsDoc(step_str)\n ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[2]/button').click() # 关闭提示窗口\n self.sendEnter()\n time.sleep(2)\n WadeMsg = '警告信息:' + WadeMsg\n elif 'c_msg-however' in classname:\n print('弹出WadeMsg的是however')\n step_str = \"业务受理提示信息\"\n self.screenshot_SaveAsDoc(step_str)\n ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[2]/button').click() # 关闭提示窗口\n self.sendEnter()\n time.sleep(2)\n WadeMsg = '业务校验:' + WadeMsg\n logger.info(WadeMsg)\n elif 'c_msg-help' in classname:\n print('弹出WadeMsg的是帮助提示')\n ele_help = ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[2]/button[1]')\n self.click_on_element(ele_help)\n self.sendEnter()\n time.sleep(3)\n elif 'c_msg c_msg-h c_msg-phone-v c_msg-full' == classname:\n print('弹出WadeMsg的是普通提示')\n step_str = \"业务受理提示信息\"\n logger.info('业务受理提示信息:{}'.format(WadeMsg))\n self.screenshot_SaveAsDoc(step_str)\n ele_wadeMsg.find_element_by_xpath('./div/div[2]/div[2]/button[1]').click() # 关闭提示窗口\n time.sleep(2)\n WadeMsg = '出现提示信息:' + WadeMsg\n self.sendEnter()\n except:\n WadeMsg = '没有弹出WadeMessage提示,校验通过'\n logger.info('======WadeMessageBox页面返回============={}'.format(WadeMsg))\n TR().updateRuleCheckInfo(msg=WadeMsg,sceneCode=scene)\n Assertion().assertNotIn('校验失败',WadeMsg,msg='[规则校验通过]')\n Assertion().assertNotIn('校验不通过',WadeMsg,msg='[规则校验通过]')\n return WadeMsg\n\n def CheckRuleBefore(self):\n '''个人业务前置业务规则判断'''\n # self.screen_step('CheckRuleBefore业务规则判断')\n rulemsg = ''\n # rulemsg = PageAssert(self.driver).assert_WadePage()\n rulemsg = self.checkRule()\n logger.info('Wade页面返回的业务规则校验信息:'.format(rulemsg))\n return rulemsg\n\n","sub_path":"Check/RuleCheck.py","file_name":"RuleCheck.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494860484","text":"\"\"\"\n一个repo的代码是谁写的\n利用gitblame命令\n\"\"\"\nimport os\nfrom pprint import pprint\nimport subprocess as sp\nfrom collections import Counter, defaultdict\nfrom os.path import *\nfrom typing import Callable\nimport re\n\n\ndef walk(path, should_enter: Callable):\n if isfile(path):\n raise Exception(f\"{path} is a file\")\n folders = []\n files = []\n for son in os.listdir(path):\n if isfile(join(path, son)):\n files.append(son)\n else:\n folders.append(son)\n if should_enter(join(path, son)):\n for i in walk(join(path, son), should_enter):\n yield i\n yield path, folders, files\n\n\ndef should_enter(folder):\n if isfile(folder):\n return False\n if basename(folder).startswith('.'):\n return False\n if basename(folder) == 'node_modules':\n return False\n return True\n\n\na = defaultdict(lambda: 0)\nfor parent, folders, files in walk('.', should_enter):\n for f in files:\n if f.endswith('.py') or f.endswith('.go') or f.endswith('.js'):\n filepath = join(parent, f)\n cmd = f\"git blame {filepath}\"\n try:\n resp = sp.check_output(cmd, shell=True)\n resp = str(resp, encoding='utf8')\n except Exception as ex:\n print(ex, f'run command {cmd} error')\n continue\n lines = resp.splitlines()\n for line in lines:\n fields = re.search('\\((.+?)\\)', line).group(1).split()\n name = fields[0]\n a[name] += 1\npprint(a)\n","sub_path":"whowrite.py","file_name":"whowrite.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603357139","text":"# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom composer.core import Logger, State\nfrom composer.core.callback import Callback\n\n\nclass GradMonitor(Callback):\n \"\"\"Logs the L2 norm.\n \n This callback logs the L2 norm of all the parameters in the model,\n and optionally, the L2 norm of the parameters by each layer.\n\n It logs the L2 norm on each batch under the ``grad_l2_norm/step`` key.\n If ``log_layer_grad_norms`` is True (default False), then\n layer-wise L2 norms are logged under ``layer_grad_l2_norm/LAYER_NAME``.\n\n Args:\n log_layer_grad_norms (bool, optional):\n Whether to log the L2 normalization of each layer.\n Defaults to False.\n \"\"\"\n\n def __init__(self, log_layer_grad_norms: bool = False):\n super().__init__()\n self.log_layer_grad_norms = log_layer_grad_norms\n\n def after_train_batch(self, state: State, logger: Logger):\n \"\"\"Compute the gradient L2 norm after the reduction of the\n backwards pass across GPUs. This function iterates over the\n parameters of the model and hence may cause a reduction in\n throughput while training large models. In order to ensure\n correctness, this function should be called after gradient\n unscaling in cases where gradients are scaled.\n\n Args:\n state (State): The :class:`~composer.core.State` object\n used during training.\n logger (Logger):\n The :class:`~composer.core.logging.logger.Logger` object.\n \"\"\"\n norm = None\n layer_norms = {}\n for name, p in state.model.named_parameters():\n if p.grad is not None and p.requires_grad:\n param_grad_norm = p.grad.detach().data.norm(2).item() # type: ignore\n if self.log_layer_grad_norms:\n layer_norms[f'layer_grad_l2_norm/{name}'] = param_grad_norm\n\n param_grad_norm = param_grad_norm**2\n norm = param_grad_norm if not norm else norm + param_grad_norm\n\n norm = norm**0.5\n logger.metric_batch({'grad_l2_norm/step': norm})\n if self.log_layer_grad_norms:\n logger.metric_batch(layer_norms)\n","sub_path":"composer/callbacks/grad_monitor.py","file_name":"grad_monitor.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219350256","text":"import re\nimport string\n\ndef extractVariables(input, pattern):\n regex = re.sub(r'{\\$(.+?);}', r'(?P<_\\1>.+)', pattern)\n values = list(re.search(regex, input).groups())\n keys = re.findall(r'{(.+?)}', pattern)\n pairs = dict(zip(keys, values))\n return pairs\n\ndef extractArgList(data):\n pattern='hello, my name is {$name;} and I am a {$age;} year old {$what;}'\n pairs = extractVariables(data, pattern)\n #print pairs\n\n argList=[]\n for k,v in pairs.iteritems():\n pos = len(argList)\n name=str(k.split('$')[1].split(';')[0])\n value=v\n argList.append((pos,str(name),value))\n\n return argList\n\n#p = 'hello, my name is {$name;} and I am a {$age;} year old {$what;}'\n#t = 'hello, my name is Rubinder and I am a 27 year old [1,2,3,4,5]'\n\n#extractArgList(t, p)","sub_path":"ParserDemo.py","file_name":"ParserDemo.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379377963","text":"from django.contrib import admin\n\nfrom django.conf import settings\n\nfrom django import forms\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import (Project,\n MarketingClaimProject,\n SocialMediaProject,\n ContactProject,\n Constant,\n Page,\n # ImagenExtra,\n Redirection,\n Translation\n )\n\n# Register your models here.\n\nclass ProjectAdmin(admin.ModelAdmin):\n\t\"\"\"docstring for ProjectAdmin\"\"\"\n\t\n\tlist_display = (\n\t\t'code', 'id', 'name', 'active_languages', 'domain', 'domain_ssl', 'extra_robots'\n\t)\n\n\tsearch_fields = ['name', 'code']\n\tfieldsets = [\n [\n _('General'),\n {\n 'fields': (\n 'code', 'name', 'active_languages', \n 'domain_ssl', 'extra_robots',)\n }\n ],\n [\n \t_('Domains'),\n \t{\n \t'fields': []\n \t}\n ],\n ]\n\n\tfor lang in settings.LANGUAGES:\n\t\tfieldsets[1][1]['fields'].append('domain_%s' % lang[0])\n\n\nclass RedirectionAdmin(admin.ModelAdmin):\n list_display = ('project', 'old_url', 'new_url',)\n search_fields = ('old_url',)\n list_filter = ('project',)\n\nclass PageAdminForm(forms.ModelForm):\n \"\"\"docstring for PageAdminForm\"\"\"\n def __init__(self, *args, **kwargs):\n super(PageAdminForm, self).__init__(*args, **kwargs)\n project = self.fields[\"project\"]._queryset[0]\n fields = [\"url\",]\n # for field in fields:\n # for lang in project.languages:\n # setattr(\n # self,\n # \"clean_%s_%s\" % (field, lang),\n # partial(self._clean, field, lang))\n\nclass PageAdmin(admin.ModelAdmin):\n \"\"\"docstring for PageAdmin\"\"\"\n\n form = PageAdminForm\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"parent\":\n code_project = request.META.get(\"CMS_PROJECT_CODE\")\n kwargs['queryset'] = Page.objects.filter(project__code=code_project).order_by('code')\n return super(PageAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n \n list_display = (\n 'code', 'id', 'project', 'name', 'parent', 'is_home', 'ssl',\n 'url', 'view', 'is_active', 'is_menu', 'in_order',\n # 'nivel',\n )\n\n list_filter = ('project', 'code', 'is_active',)\n\n list_editable = ['parent', 'is_active', 'in_order', ]\n\n fieldsets = [\n [_('General'), {\n 'fields': (\n 'project', 'code', 'parent', 'is_active', 'is_menu', 'is_home', 'ssl',\n )\n }],\n [_('View'), {\n 'fields': (\n 'view', 'context',\n )\n }],\n ]\n\n search_fields = ['code', ]\n lang_search_fields = ['name', 'url', \"content\", ]\n \n for lang_code, lang_name in settings.LANGUAGES:\n lang_fields = []\n # lang_search_fields = []\n for field in Page._meta.translatable_fields:\n lang_fields.append('%s_%s' % (field, lang_code))\n # lang_search_fields.append('%s_%s' % (field, lang_code))\n\n fieldsets.append((_('Content %s' % lang_name), {'fields': lang_fields }))\n # search_fields.extend(lang_search_fields)\n\n # for lang_code, lang_name in settings.LANGUAGES:\n \n # fieldsets.append((_('Content %s') % lang_name, {\n # 'fields': [\n # '%s_%s' % (field, lang_name) for field in Page._meta.translatable_fields\n # ]\n # }))\n\n # search_fields.extend([\n # \"%s_%s\" % (field, lang_name) for field in lang_search_fields\n # ])\n\n class Media:\n js = [\n '/static/grappelli/tinymce/jscripts/tiny_mce/tiny_mce.js',\n '/static/tinymce_setup.js',\n ]\n\n\n \nclass TranslationAdmin(admin.ModelAdmin):\n list_display = ('project', 'page', 'identifier', 'is_exist',)\n list_filter = ('project', 'page__code', 'is_exist',)\n search_fields = ['identifier', ]\n lang_search_fields = ('value',)\n list_display_links = ('identifier',)\n fieldsets = [\n [_('Content'), {\n 'fields': []\n }]\n ]\n\n for code, name in settings.LANGUAGES:\n fieldsets[0][1]['fields'].append('value_%s' % code)\n # search_fields.extend([\n # \"%s_%s\" % (field, code) for field in lang_search_fields\n # ])\n\n\n\n\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(Redirection, RedirectionAdmin)\nadmin.site.register(Translation, TranslationAdmin)","sub_path":"libs/cms_engine/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483065369","text":"import torch\nfrom torchvision import transforms\n\nfrom utils.transforms import *\n\n\nclass Config:\n def __init__(self, task_id=1):\n self.task_id = task_id\n self.output_dir = \"output\"\n\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.num_device = torch.cuda.device_count()\n\n self.num_classes = 20\n self.dataset = \"ImageFolder\"\n self.data_dir = \"/data/face/parsing/dataset/CelebAMask-HQ_processed3\"\n self.sample_dir = \"/data/face/parsing/dataset/testset_210720_aligned\"\n self.image_size = (512, 512)\n self.crop_size = (448, 448)\n self.do_val = True\n\n self.train_transform = Compose([RandomHorizontalFlip(),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),\n RandomScale((0.75, 1.25)),\n RandomRotation(),\n RandomCrop(self.crop_size),\n ToTensor(),\n Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n self.val_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n self.test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n\n self.model_name = \"BiSeNet\"\n self.model_args = Dict(\n backbone=\"STDCNet1446\",\n pretrain_model=\"snapshot/STDCNet1446_76.47.tar\",\n use_boundary_2=False,\n use_boundary_4=False,\n use_boundary_8=True,\n use_boundary_16=False,\n use_conv_last=False\n )\n\n self.loss_name = \"Loss\"\n self.loss_args = Dict(score_thresh=0.7, ignore_idx=255)\n\n self.optimizer_name = \"Optimizer\"\n self.optimizer_args = Dict(\n momentum=0.9,\n weight_decay=5e-4,\n warmup_start_lr=1e-5,\n power=0.9,\n )\n\n self.lr = 0.01\n self.batch_size = 64\n self.milestones = Dict()\n self.epochs = 40\n\n def build(self, steps=None, num_classes=None):\n if \"lr0\" not in self.optimizer_args:\n self.optimizer_args[\"lr0\"] = self.lr\n\n if \"max_iter\" not in self.optimizer_args and steps is not None:\n self.optimizer_args[\"max_iter\"] = self.epochs * steps\n\n if \"warmup_steps\" not in self.optimizer_args and steps is not None:\n self.optimizer_args[\"warmup_steps\"] = steps\n\n if num_classes is not None:\n self.num_classes = num_classes\n\n return self\n\n\nclass Dict(dict):\n def __getattr__(self, item):\n return self.get(item, None)\n\n def __setattr__(self, key, value):\n self[key] = value\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622452779","text":"import config\nimport pickle\nfrom tqdm import tqdm\nimport pandas as pd\nfrom dataset import MyDataset\nfrom word_sequence import WordSequence\n\nif __name__ == '__main__':\n print(\"start\")\n ws = WordSequence()\n train_data = MyDataset(train=True)\n\n for idx in range(len(train_data)):\n if idx % 10000 == 0:\n print(idx)\n title, text, label = train_data[idx]\n ws.fit(title)\n ws.fit(text)\n\n print(\"正在建立...\")\n ws.build_vocab(max_num_words=config.MAX_NUM_WORDS)\n print(len(ws))\n pickle.dump(ws, open(\"ws.pkl\", \"wb\"))\n\n# 为啥使用tqdm会多取元素?\n# for lines, labels in tqdm(train_data, total=len(train_data)):\n# # 当您将total用作参数提供时tqdm,您可以估算出代码应运行多少次迭代,\n# # 因此它将为您提供预测信息(即使您提供的可迭代项没有长度)。\n# ws.fit(lines)\n\n","sub_path":"10.real-and-fake-news/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219984627","text":"\nimport struct\nfrom collections import OrderedDict\n\n\nCThostFtdcDepthMarketDataField = [ # sizeof=408\n ('TradingDay', '9s', '交易日'),\n ('InstrumentID', '31s', '合约代码'),\n ('ExchangeID', '9s', '交易所代码'),\n ('ExchangeInstID', '31s', '合约在交易所的代码'),\n ('LastPrice', 'd', '最新价'),\n ('PreSettlementPrice', 'd', '上次结算价'),\n ('PreClosePrice', 'd', '昨收盘'),\n ('PreOpenInterest', 'd', '昨持仓量'),\n ('OpenPrice', 'd', '今开盘'),\n ('HighestPrice', 'd', '最高价'),\n ('LowestPrice', 'd', '最低价'),\n ('Volume', 'i', '数量'),\n ('Turnover', 'd', '成交金额'),\n ('OpenInterest', 'd', '持仓量'),\n ('ClosePrice', 'd', '今收盘'),\n ('SettlementPrice', 'd', '本次结算价'),\n ('UpperLimitPrice', 'd', '涨停板价'),\n ('LowerLimitPrice', 'd', '跌停板价'),\n ('PreDelta', 'd', '昨虚实度'),\n ('CurrDelta', 'd', '今虚实度'),\n ('UpdateTime', '9s', '最后修改时间'),\n ('UpdateMillisec', 'i', '最后修改毫秒'),\n ('BidPrice1', 'd', '申买价一'),\n ('BidVolume1', 'i', '申买量一'),\n ('AskPrice1', 'd', '申卖价一'),\n ('AskVolume1', 'i', '申卖量一'),\n ('BidPrice2', 'd', '申买价二'),\n ('BidVolume2', 'i', '申买量二'),\n ('AskPrice2', 'd', '申卖价二'),\n ('AskVolume2', 'i', '申卖量二'),\n ('BidPrice3', 'd', '申买价三'),\n ('BidVolume3', 'i', '申买量三'),\n ('AskPrice3', 'd', '申卖价三'),\n ('AskVolume3', 'i', '申卖量三'),\n ('BidPrice4', 'd', '申买价四'),\n ('BidVolume4', 'i', '申买量四'),\n ('AskPrice4', 'd', '申卖价四'),\n ('AskVolume4', 'i', '申卖量四'),\n ('BidPrice5', 'd', '申买价五'),\n ('BidVolume5', 'i', '申买量五'),\n ('AskPrice5', 'd', '申卖价五'),\n ('AskVolume5', 'i', '申卖量五'),\n ('AveragePrice', 'd', '当日均价'),\n ('ActionDay', '9s', '业务日期')]\n\n\nclass CbtlMD(object):\n def __init__(self, fn):\n try:\n self.f = open(fn, \"rb\")\n self.eof = False\n except FileNotFoundError:\n self.f = None\n self.eof = True\n raise\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.eof:\n raise StopIteration()\n\n buf = self.f.read(4)\n if not buf:\n self.eof = True\n raise StopIteration()\n\n if len(buf) < 4:\n print(\"truncated file\")\n self.eof = True\n raise StopIteration()\n\n size, = struct.unpack(\"I\", buf)\n\n # print(\"size:\", size)\n buf = self.f.read(size)\n if len(buf) < size:\n print(\"truncated file\")\n self.eof = True\n raise StopIteration()\n\n ts, tsu = struct.unpack(\"QQ\", buf[:16]) # 16字节的timeval\n\n if buf[16:19] != b'MD\\x00':\n print(\"bad buf\")\n self.eof = True\n raise StopIteration()\n return ts, tsu, buf\n\n\ndef MdUnpack1(buf):\n t = struct.unpack(\"@9x8s63xd120x8s4xididi\", buf[19:19 + 252]) # 取最后一个字段off+len\n op, latest, ts, tsm, BP1, BA1, SP1, SA1 = t\n print(\"+++\", op, latest, ts, tsm, BP1, BA1, SP1, SA1)\n\n\ndef ctp_unpack(bufr:bytes, off=0):\n \"\"\"\n :param struct_name: eg CThostFtdcInstrumentField\n :type struct_name: str\n :param bufr\n :type bytes\n\n \"\"\"\n\n # struct_comment, struct_fields, struct_size = ctp_types.get(struct_name, (None,)*3)\n struct_fields = CThostFtdcDepthMarketDataField\n\n packfmt = \"@\"\n for field_name, fieldfmt, comment in struct_fields:\n packfmt += fieldfmt\n # print(packfmt)\n packsz = struct.calcsize(packfmt) # struct_size may bigger than packsz because of align\n\n # print(struct_name, \"#\", struct_comment)\n a = struct.unpack(packfmt, bufr[off:off+packsz])\n d = OrderedDict()\n for i, v in enumerate(a):\n if struct_fields[i][1].endswith(\"s\"):\n z = v.find(b\"\\x00\")\n if z >= 0: v = v[:z]\n v = v.decode(\"gbk\", errors=\"ignore\")\n # print(\" \", struct_fields[i][0], repr(v), \"#\", struct_fields[i][2])\n d[struct_fields[i][0]] = (v, struct_fields[i][2])\n return d\n\n","sub_path":"src/archive_frmd/ut_btl_md.py","file_name":"ut_btl_md.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"5244810","text":"# 여행사 : 가나 여행사\n# 홍 길동님이 내일 프랑스 여행을 갑니다.\n# 박 영희님이 모레 이탈리아 여행을 갑니다.\nclass Travel:\n com = '가나'\n def __init__(self,name,firstname):\n self.name = name\n self.firstname = firstname\n\n def travel(self,day,country):\n msg = self.firstname+self.name+'이 '+day+' '+country+' 여행을 갑니다.'\n return msg\n\n\nclass Account:\n bank = 'KB'\n\n def __init__(self, name):\n self.name = name\n self.accno = ''\n\n def accInfo(self, accno):\n self.accno = accno\n\n def showInfo(self):\n return self.accno\n\nprint(f'{Travel.com} 여행사')\nhong = Travel('길동','홍')\nprint(hong.travel('내일','프랑스'))\npark = Travel('영희','박')\nprint(park.travel('모레','이탈리아'))\n\nsoo = Account('철수')\nhee = Account('영희')\nsoo.accInfo('143-09-45677')\nhee.accInfo('154-09-43059')\nprint(f'{soo.name} {Account.bank} {soo.showInfo()}')\nprint(f'{hee.name} {Account.bank} {hee.showInfo()}')","sub_path":"classes/calcTest/calcTest08.py","file_name":"calcTest08.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239479687","text":"from PymoNNto.Exploration.Network_UI.TabBase import *\n\nclass hist_tab(TabBase):\n\n def __init__(self, weight_attr='W', title='Weight Dist.', timesteps=1000, mask_param='Input_Mask', mask_color_add=(-100, -100, -100)):#mask_param=None #\n super().__init__(title)\n self.weight_attr = weight_attr\n self.timesteps = timesteps\n\n def add_recorder_variables(self, neuron_group, Network_UI):\n return\n\n def initialize(self, Network_UI):\n self.additionaltab = Network_UI.add_tab(title=self.title)\n\n self.weight_hist_plots = {}\n self.net_weight_hist_plots = {}\n\n for i,transmitter in enumerate(Network_UI.transmitters):\n if i>0:\n Network_UI.tab.add_row()\n\n self.weight_hist_plots[transmitter] = Network_UI.tab.add_plot(title=transmitter + ' selected weight hist', x_label=transmitter + ' synapse size', y_label='Frequency')\n self.net_weight_hist_plots[transmitter] = Network_UI.tab.add_plot(title=transmitter + ' network weight hist', x_label=transmitter + ' synapse size', y_label='Frequency')\n\n Network_UI.tab.add_row()\n #self.min_hist_slider = QSlider(1) # QtCore.Horizontal\n #self.min_hist_slider.setMinimum(-1)\n #self.min_hist_slider.setMaximum(10)\n #self.min_hist_slider.setSliderPosition(0)\n #self.min_hist_slider.mouseReleaseEvent = Network_UI.static_update_func\n #self.min_hist_slider.setToolTip('slide to cut away smallest weights')\n #Network_UI.tab.add_widget(self.min_hist_slider) # , stretch=0.1\n\n Network_UI.tab.add_widget(QLabel('min: '))\n self.qsb_min = QDoubleSpinBox()\n self.qsb_min.setDecimals(5)\n self.qsb_min.setValue(0.0)\n self.qsb_min.setSingleStep(0.00001)\n Network_UI.tab.add_widget(self.qsb_min)\n\n Network_UI.tab.add_widget(QLabel('max: '))\n self.qsb_max = QDoubleSpinBox()\n self.qsb_max.setDecimals(5)\n self.qsb_max.setValue(1.0)\n self.qsb_max.setSingleStep(0.00001)\n Network_UI.tab.add_widget(self.qsb_max)\n\n #Network_UI.tab.add_row()\n Network_UI.tab.add_widget(QLabel('bins: '))\n self.bin_slider = QSlider(1) # QtCore.Horizontal\n self.bin_slider.setMinimum(1)\n self.bin_slider.setMaximum(100)\n self.bin_slider.setSliderPosition(50)\n self.bin_slider.mouseReleaseEvent = Network_UI.static_update_func\n self.bin_slider.setToolTip('slide to change bin count')\n Network_UI.tab.add_widget(self.bin_slider) # , stretch=0.1\n\n def update_Synapse_Historgrams(self, Network_UI, group, net_color_input):\n #msl = self.min_hist_slider.sliderPosition() * 0.001\n\n min_weight = self.qsb_min.value()\n max_weight = self.qsb_max.value()\n\n bins = self.bin_slider.sliderPosition()\n\n for transmitter in Network_UI.transmitters:\n\n self.net_weight_hist_plots[transmitter].clear()\n self.weight_hist_plots[transmitter].clear()\n\n glu_syns = group.afferent_synapses[transmitter]\n if len(glu_syns) > 0:\n\n GLU_syn_list = get_combined_syn_mats(glu_syns, None, self.weight_attr)\n GLU_syn_list_en = get_combined_syn_mats(glu_syns, None, \"enabled\")\n if len(GLU_syn_list) > 0:\n GLU_syn = GLU_syn_list[list(GLU_syn_list.keys())[0]]\n en_mask = GLU_syn_list_en[list(GLU_syn_list_en.keys())[0]].astype(bool)*(GLU_syn > min_weight)*(GLU_syn < max_weight)#GLU_syn > msl\n\n self.net_weight_hist_plots[transmitter].clear()\n y, x = np.histogram(GLU_syn[en_mask], bins=bins)#[GLU_syn[not_input_mask] > msl]\n curve = pg.PlotCurveItem(x, y, stepMode=True, fillLevel=0, brush=group.color)\n self.net_weight_hist_plots[transmitter].addItem(curve)\n\n self.weight_hist_plots[transmitter].clear()\n y, x = np.histogram(GLU_syn[Network_UI.selected_neuron_mask()][en_mask[Network_UI.selected_neuron_mask()]], bins=bins)#[selected_neuron_GLU_syn > msl]\n curve = pg.PlotCurveItem(x, y, stepMode=True, fillLevel=0, brush=Network_UI.neuron_select_color)\n self.weight_hist_plots[transmitter].addItem(curve)\n\n\n def update(self, Network_UI):\n if self.additionaltab.isVisible():\n\n group = Network_UI.selected_neuron_group()\n n=group#for eval comand\n\n mca = (0,0,0)\n\n net_color_input = np.clip([group.color[0] + mca[0], group.color[1] + mca[1], group.color[2] + mca[2], 255], 0, 255)\n\n self.update_Synapse_Historgrams(Network_UI, group, net_color_input)\n","sub_path":"Exploration/Network_UI/Basic_Tabs/hist_tab.py","file_name":"hist_tab.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358689494","text":"\"\"\"Testing basic ShipEngineClient functionality.\"\"\"\nimport pytest\nimport responses\n\nfrom shipengine_sdk import ShipEngine\nfrom shipengine_sdk.errors import ClientSystemError\nfrom shipengine_sdk.models import ErrorCode, ErrorSource, ErrorType\nfrom shipengine_sdk.models.address import Address\nfrom shipengine_sdk.models.enums import Endpoints\n\n\ndef validate_address(address):\n shipengine = ShipEngine(\n dict(\n api_key=\"baz\",\n base_uri=Endpoints.TEST_RPC_URL.value,\n page_size=50,\n retries=2,\n timeout=10,\n )\n )\n return shipengine.validate_address(address)\n\n\ndef valid_residential_address() -> Address:\n return Address(\n street=[\"4 Jersey St\"],\n city_locality=\"Boston\",\n state_province=\"MA\",\n postal_code=\"02215\",\n country_code=\"US\",\n )\n\n\ndef get_500_server_error() -> Address:\n return Address(\n street=[\"500 Server Error\"],\n city_locality=\"Boston\",\n state_province=\"MA\",\n postal_code=\"02215\",\n country_code=\"US\",\n )\n\n\nclass TestShipEngineClient:\n @responses.activate\n def test_500_server_response(self):\n responses.add(\n responses.POST,\n Endpoints.TEST_RPC_URL.value,\n json={\n \"jsonrpc\": \"2.0\",\n \"id\": \"req_DezVNUvRkAP819f3JeqiuS\",\n \"error\": {\n \"code\": \"-32603\",\n \"message\": \"Unable to connect to the database\",\n \"data\": {\"source\": \"shipengine\", \"type\": \"system\", \"code\": \"unspecified\"},\n },\n },\n status=500,\n )\n try:\n validate_address(get_500_server_error())\n except ClientSystemError as e:\n assert e.message == \"Unable to connect to the database\"\n assert e.request_id is not None\n assert e.source == ErrorSource.SHIPENGINE.value\n assert e.error_type == ErrorType.SYSTEM.value\n assert e.error_code == ErrorCode.UNSPECIFIED.value\n with pytest.raises(ClientSystemError):\n validate_address(get_500_server_error())\n\n @responses.activate\n def test_404_server_response(self):\n responses.add(\n responses.POST,\n Endpoints.TEST_RPC_URL.value,\n json={\n \"jsonrpc\": \"2.0\",\n \"id\": \"req_DezVNUvRkAP819f3JeqiuS\",\n \"error\": {\n \"code\": \"-32603\",\n \"message\": \"Content not found.\",\n \"data\": {\"source\": \"shipengine\", \"type\": \"system\", \"code\": \"not_found\"},\n },\n },\n status=404,\n )\n try:\n validate_address(valid_residential_address())\n except ClientSystemError as e:\n assert e.message == \"Content not found.\"\n assert e.request_id is not None\n assert e.source == ErrorSource.SHIPENGINE.value\n assert e.error_type == ErrorType.SYSTEM.value\n assert e.error_code == ErrorCode.NOT_FOUND.value\n with pytest.raises(ClientSystemError):\n validate_address(valid_residential_address())\n","sub_path":"tests/http_client/test_http_client.py","file_name":"test_http_client.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288303681","text":"from django.conf.urls import include, url\r\nfrom authors import views\r\nfrom authors.views import ListAuthors, DetailAuthor, CreateAuthor, UpdateAuthor, DeleteAuthor\r\n\r\n\r\n\r\n\r\nurlpatterns = [\r\n url(r'^$', ListAuthors.as_view(), name='authors_list'),\r\n url(r'^(?P<pk>(\\d))$', views.DetailAuthor, name='author_item'),\r\n url(r'^add$', CreateAuthor.as_view(), name='author_add'),\r\n url(r'update/(?P<pk>(\\d))', UpdateAuthor.as_view(), name='author_update'),\r\n url(r'del/(?P<pk>(\\d))$', DeleteAuthor.as_view(), name=\"author_delete\"),\r\n url(r'^(?P<pk>(\\d))/addlike/$', views.add_like, name='author_like'),\r\n]","sub_path":"authors/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489398748","text":"import os\nimport discord\nimport logging\n\n\n# load .envfile if in development\ndevel = os.getenv(\"DEVEL\")\nif devel:\n from dotenv import load_dotenv\n load_dotenv(dotenv_path=\".env.local\")\n\nclient = discord.Client()\nmy_secret = os.getenv(\"TOKEN\")\n\n\nlogger = logging.getLogger(\"discord\")\nlogger.setLevel(logging.INFO)\nhandler = logging.FileHandler(filename=\"discord.log\", encoding=\"utf-8\", mode=\"w\")\nhandler.setFormatter(\n logging.Formatter(\"%(asctime)s:%(levelname)s:%(name)s: %(message)s\")\n)\nlogger.addHandler(handler)\n\n\n@client.event\nasync def on_ready():\n print(\"We have logged in as {0.user}\".format(client))\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith(\"$hello\"):\n await message.channel.send(\n \"Hello! This bot is still under construction, please come back later\"\n )\n\n\nclient.run(my_secret)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494440374","text":"import os\n\nimport numpy as np\nimport pytest\n\nimport pyvista\nfrom pyvista import QtInteractor, MainWindow\nfrom pyvista.plotting import system_supports_plotting\n\n\nNO_PLOTTING = not system_supports_plotting()\n\n\ntry:\n from PyQt5.Qt import (QMainWindow, QFrame, QVBoxLayout, QAction)\n has_pyqt5 = True\nexcept:\n has_pyqt5 = False\n class QMainWindow(object):\n pass\n\n\nclass TstWindow(MainWindow):\n def __init__(self, parent=None, show=True):\n MainWindow.__init__(self, parent)\n\n self.frame = QFrame()\n vlayout = QVBoxLayout()\n self.vtk_widget = QtInteractor(self.frame)\n vlayout.addWidget(self.vtk_widget.interactor)\n\n self.frame.setLayout(vlayout)\n self.setCentralWidget(self.frame)\n\n mainMenu = self.menuBar()\n\n fileMenu = mainMenu.addMenu('File')\n self.exit_action = QAction('Exit', self)\n self.exit_action.setShortcut('Ctrl+Q')\n self.exit_action.triggered.connect(self.close)\n fileMenu.addAction(self.exit_action)\n\n meshMenu = mainMenu.addMenu('Mesh')\n self.add_sphere_action = QAction('Add Sphere', self)\n self.exit_action.setShortcut('Ctrl+A')\n self.add_sphere_action.triggered.connect(self.add_sphere)\n meshMenu.addAction(self.add_sphere_action)\n\n self.signal_close.connect(self.vtk_widget.interactor.close)\n\n if show:\n self.show()\n\n def add_sphere(self):\n sphere = pyvista.Sphere()\n self.vtk_widget.add_mesh(sphere)\n self.vtk_widget.reset_camera()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_qt_interactor(qtbot):\n window = TstWindow(show=False)\n qtbot.addWidget(window)\n window.add_sphere()\n assert np.any(window.vtk_widget.mesh.points)\n window.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotting_axes_scale(qtbot):\n sphere = pyvista.Sphere()\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n plotter.add_mesh(sphere)\n assert np.any(plotter.mesh.points)\n\n dlg = plotter.scale_axes_dialog(show=False)\n\n value = 2.0\n dlg.x_slider_group.value = value\n assert plotter.scale[0] == value\n\n dlg.x_slider_group.spinbox.setValue(-1)\n assert dlg.x_slider_group.value == 0\n dlg.x_slider_group.spinbox.setValue(1000.0)\n assert dlg.x_slider_group.value < 100\n\n plotter._last_update_time = 0.0\n plotter.update()\n plotter.update_app_icon()\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotting_camera(qtbot):\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n plotter.add_mesh(pyvista.Sphere())\n\n cpos = [(0.0, 0.0, 1.0), (0.0, 0.0, 0.0), (0.0, 1.0, 0.0)]\n plotter.camera_position = cpos\n plotter.save_camera_position()\n plotter.camera_position = [(0.0, 0.0, 3.0), (0.0, 0.0, 0.0), (0.0, 1.0, 0.0)]\n\n # load existing position\n # NOTE: 2 because first two (0 and 1) buttons save and clear positions\n plotter.saved_cameras_tool_bar.actions()[2].trigger()\n assert plotter.camera_position == cpos\n\n plotter.clear_camera_positions()\n # 2 because the first two buttons are save and clear\n assert len(plotter.saved_cameras_tool_bar.actions()) == 2\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotter_export_files(qtbot, tmpdir):\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n plotter.add_mesh(pyvista.Sphere())\n\n filename = str(tmpdir.mkdir(\"tmpdir\").join('tmp.png'))\n plotter.update()\n dlg = plotter._qt_screenshot(show=False)\n dlg.selectFile(filename)\n dlg.accept()\n plotter.close()\n\n assert os.path.isfile(filename)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotter_export_files_again(qtbot, tmpdir):\n plotter = pyvista.BackgroundPlotter(off_screen=False, show=False, title='Testing Window')\n plotter.add_mesh(pyvista.Sphere())\n\n filename = str(tmpdir.mkdir(\"tmpdir\").join('tmp.png'))\n # plotter.update()\n dlg = plotter._qt_screenshot(show=False)\n dlg.selectFile(filename)\n dlg.accept()\n plotter.close()\n\n assert os.path.isfile(filename)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotter_export_vtkjs(qtbot, tmpdir):\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n plotter.add_mesh(pyvista.Sphere())\n\n filename = str(tmpdir.mkdir(\"tmpdir\").join('tmp'))\n dlg = plotter._qt_export_vtkjs(show=False)\n dlg.selectFile(filename)\n dlg.accept()\n plotter.close()\n\n assert os.path.isfile(filename + '.vtkjs')\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotting_orbit(qtbot):\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n plotter.add_mesh(pyvista.Sphere())\n # perform the orbit:\n plotter.orbit_on_path(bkg=False, step=0.0)\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(not has_pyqt5, reason=\"requires pyqt5\")\ndef test_background_plotting_add_callback(qtbot):\n plotter = pyvista.BackgroundPlotter(off_screen=False, title='Testing Window')\n sphere = pyvista.Sphere()\n plotter.add_mesh(sphere)\n\n def mycallback():\n sphere.points *= 0.5\n plotter.add_callback(mycallback, interval=1000, count=3)\n plotter.close()\n","sub_path":"tests/test_qt_plotting.py","file_name":"test_qt_plotting.py","file_ext":"py","file_size_in_byte":6084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96715196","text":"import numpy as np\n\nfrom scipy.optimize import fmin\n\ndef heston_obj(s0, dt, params):\n kappa = params[\"kappa\"]\n theta = params[\"theta\"]\n lda = params[\"lda\"]\n rho = params[\"rho\"]\n v0 = params[\"v0\"]\n\n\ndef generate_kalman_example(params, N=1000):\n \"\"\"Generate example for KF\"\"\"\n np.random.seed(254)\n F = params[0]\n H = params[1]\n Q = params[2]\n R = params[3]\n x0 = params[4]\n v0 = params[5]\n\n x = np.zeros(N+1)\n y = np.zeros(N+1)\n \n # step once\n x[0] = x0\n x[1] = F * x0 + np.random.normal(0, np.sqrt(Q))\n y[1] = H * x[1] + v0\n \n for i in range(2,N+1):\n x[i] = F * x[i-1] + np.random.normal(0, np.sqrt(Q))\n y[i] = H * x[i] + np.random.normal(0, np.sqrt(R))\n return x, y\n\ndef kalman_obj(y, # int observations \n params # list params\n ):\n F = params[0]\n H = params[1]\n Q = params[2]\n R = params[3]\n x0 = params[4]\n v0 = params[5]\n\n # init values\n obj = 0\n x_update = x0\n P = v0\n N = len(y) - 1\n for i in range(1, len(y)):\n # prediction step and objective\n x_pred = F * x_update\n P_next = F*P*F + Q\n S = H*P_next*H + R\n\n delta = y[i] - H*x_pred\n obj += delta * (1/S) * delta + np.log(np.abs(S))\n\n # measurement update\n K = P_next * H * (1/S) # kalman gain\n x_update = x_pred + K * delta\n P = (1 - K*H)*P_next*(1 - K*H) + K*R*K\n return obj/N\n\ndef obj(f, y):\n \"\"\"Wrapper for fmin to allow passing target y to obj. function\"\"\"\n def fix_y(params):\n return f(y, params)\n return fix_y\n\ndef kalman_path(y, params, N=1000, return_filter=False):\n F = params[0]\n H = params[1]\n Q = params[2]\n R = params[3]\n x0 = params[4]\n v0 = params[5]\n\n # init values\n x_pred = np.zeros(N+1)\n x_update = np.zeros(N+1)\n x_update[0] = x0\n P = v0\n for i in range(1, len(x_pred)):\n # prediction step and objective\n x_pred[i] = F * x_update[i-1]\n P_next = F*P*F + Q\n S = H*P_next*H + R\n delta = y[i] - H*x_pred[i]\n \n # measurement update\n K = P_next * H * (1/S) # kalman gain\n x_update[i] = x_pred[i] + K * delta\n P = (1 - K*H)*P_next*(1 - K*H) + K*R*K\n return (x_pred, x_update) if return_filter else x_pred\n","sub_path":"estimation.py","file_name":"estimation.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639266298","text":"from HTMLTestRunner import HTMLTestRunner\nimport HTMLTestReportCN\nimport unittest\nimport os,time\n\nclass Train(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print('begin')\n def test_TrainA(self):\n print('执行测试1')\n\n def test_TrainB(self):\n print('执行测试2')\n\n def test_TrainC(self):\n print('执行测试3')\n @classmethod\n def tearDownClass(cls):\n print('ok')\n\ndef run():\n report = open(r'D:\\Desktop\\a\\report-{}.html'.format(time.strftime('%y%m%d-%H%M%S', time.localtime())), 'wb')\n suit = unittest.TestSuite()\n suit.addTests(map(Train,['test_TrainA','test_TrainB','test_TrainC']))\n runner = HTMLTestReportCN.HTMLTestRunner(stream=report,\n title='测试标题',\n description='测试概况',\n tester='chy')\n runner.run(suit)\n report.close()\n\n# 写一个定时执行的方法\ndef timing(str):\n t = 1\n while t<2:\n tim = time.strftime('%H%M',time.localtime())\n if tim == str:\n print('开始运行脚本')\n run()\n print('脚本运行完成')\n break\n else:\n time.sleep(30)\n\n\n\ntiming('1540')","sub_path":"180326a.py","file_name":"180326a.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645223242","text":"#!/usr/bin/env python\n\nimport socket\nimport hashlib\nimport ast\nfrom threading import Thread\nimport time\n#import Blockchain\nfrom lib.Blockchain import Blockchain\n\n\ntry:\n from configparser import ConfigParser\nexcept ImportError:\n from ConfigParser import ConfigParser\n\nclass Node:\n\n #--------GLOBAL VARIABLES----------------\n \n TCP_PORT = 5003\n BUFFER_SIZE = 1024 # size of the receiveng buffer -- we can adapt it to the lenght\n # of our messages witch will speed up the transition\n Password = b'Dricot' # users password\n sec = 0 # counter but I think it's gonna be useless\n data = ''\n\n #-------------METHODS-------------------\n\n \n \n\n \n def description(self):\n return '{} IP is {} and it is connected to the server at {}. Its neighbours are {} and {}' . format(self.username,self.ip_address,self.server_address,self.nextIP1,self.nextIP2) \n \n \n def runNodesListener(self):\n \"\"\"\n This function runs as a thread. It is responsible for listening to the neighboring nodes\n \"\"\"\n \n socketNodes = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socketNodes.bind((self.ip_address, 5003))\n\n while True:\n socketNodes.listen(5)\n try :\n conn, addr1 = socketNodes.accept()\n data = conn.recv(self.BUFFER_SIZE)\n if data:\n decriptedData = ast.literal_eval(data.decode('utf-8'))\n addr = decriptedData[0]\n try:\n \"\"\"\n We want to know what kind of message we received\n Here we consider it is a new block\n \"\"\"\n \n receivedBlock = decriptedData[1]['Block']\n if self.blockchain.chain == []:\n self.arrivingBlock(decriptedData[1], addr, receivedBlock) \n\n else:\n if receivedBlock['previous_hash'] == self.blockchain.last_block['hash']:\n self.arrivingBlock(decriptedData[1], addr, receivedBlock)\n else:\n self.message = self.setMessage((self.ip_address,{'Confirmation':'block rejected'}))\n nodesMessage = Thread(target = self.runNodesMessage) #Problem. We kill the last thread even if it didn't accomplished the task\n nodesMessage.setDaemon(True)\n nodesMessage.start()\n \n \n except KeyError:\n try:\n \"\"\"\n The message is not a new block but a response to a received block\n If the block is rejected we drop everything and broadcast a message of rejection\n If it is accepted we check if it is accepted by every neighbour if yes we ad it to the chain\n and broadcast the info\n \"\"\"\n if self.blockchain.waiting_blocks != []:\n receivedConfirmation = decriptedData[1]['Confirmation']\n \n if receivedConfirmation == 'block rejected':\n self.blockchain.waiting_blocks.clear()\n self.contactedIP.clear()\n self.message = self.setMessage((self.ip_address,decriptedData[1]))\n nodesMessage = Thread(target = self.runNodesMessage) #Problem. We kill the last thread even if it didn't accomplished the task\n nodesMessage.setDaemon(True)\n nodesMessage.start()\n elif receivedConfirmation == 'All my neighbours ok':\n if addr in self.neighboursOk:\n pass\n else:\n self.neighboursOk.append(addr)\n if self.verifyConfirmed(self.neighboursOk):\n if self.blockchain.waiting_blocks != []:\n self.blockchain.chain.append(self.blockchain.waiting_blocks[0])\n print(self.blockchain.chain)\n self.blockchain.waiting_blocks.clear()\n self.neighboursOk.clear()\n self.confirmed.clear() \n else:\n continue\n except KeyError:\n continue\n else:\n continue\n except socket.timeout:\n pass\n\n \n def runNodesMessage(self):\n \"\"\"\n Function sending infomrmations to other nodes\n \"\"\"\n while True:\n for neighbour in self.nextIP:\n socketNodes = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while True:\n try:\n socketNodes.connect((neighbour, 5003))\n socketNodes.send(self.message)\n break\n except TimeoutError:\n pass\n except ConnectionRefusedError:\n pass\n socketNodes.close()\n break\n\n \n\n def setMessage(self,block):\n message = str(block).encode('utf-8')\n return message\n\n \n def arrivingBlock(self,data, addr, receivedBlock):\n \"\"\"\n Looks if the received block is in the waiting list. If yes we\n check if the address is already recorded. If no it is added to the waiting list\n and broadcasted.\n \"\"\"\n \n if self.blockchain.waiting_blocks == []:\n self.confirmed.clear()\n self.neighboursOk.clear()\n self.confirmed.append(addr)\n self.blockchain.putting_block(receivedBlock)\n\n self.message = self.setMessage((self.ip_address,data))\n nodesMessage = Thread(target = self.runNodesMessage) \n nodesMessage.setDaemon(True)\n nodesMessage.start()\n nodesMessage.join()\n \n if self.verifyConfirmed(self.confirmed):\n \n self.message = self.setMessage((self.ip_address,{'Confirmation': 'All my neighbours ok'}))\n nodesMessage = Thread(target = self.runNodesMessage) \n nodesMessage.setDaemon(True)\n nodesMessage.start()\n nodesMessage.join()\n self.confirmed.clear()\n\n else:\n if receivedBlock in self.blockchain.waiting_blocks:\n if addr not in self.confirmed:\n self.confirmed.append(addr)\n if self.verifyConfirmed(self.confirmed):\n self.message = self.setMessage((self.ip_address,{'Confirmation': 'All my neighbours ok'}))\n nodesMessage = Thread(target = self.runNodesMessage) \n nodesMessage.setDaemon(True)\n nodesMessage.start()\n nodesMessage.join()\n self.confirmed.clear()\n else:\n self.blockchain.putting_block(receivedBlock)\n self.blockchain.waiting_blocks = [self.blockchain.compare_blocks()]\n if self.blockchain.waiting_blocks[0] == receivedBlock:\n self.confirmed.clear()\n self.confirmed.append(addr)\n self.message = self.setMessage((self.ip_address,{'Block': self.blockchain.waiting_blocks[0]}))\n nodesMessage = Thread(target = self.runNodesMessage) \n nodesMessage.setDaemon(True)\n nodesMessage.start()\n nodesMessage.join()\n \n \n\n def verifyIfAccepted(self):\n verified = True\n for contact in self.nextIP:\n if 'block accepted' == contactedIP[contact]:\n verified = True\n else:\n verified = False\n break\n return verified\n\n \n \n def verifyConfirmed(self,listOfPeople):\n verified = True\n for addr in self.nextIP:\n \n if addr in listOfPeople:\n verified = True\n else:\n verified = False\n break\n return verified\n \n\n def sendMessage(self, message):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while True:\n try:\n s.connect((self.server_address, self.TCP_PORT))\n s.send(message)\n s.close()\n break\n except TimeoutError:\n pass\n except ConnectionRefusedError:\n pass\n\n \n\n\n def __init__(self):\n \"\"\"\n Constructor of the node\n \"\"\"\n \n config=ConfigParser()\n config.read('../config/host.ini')\n self.ip_address=config.get('node','ip_address')\n self.username=config.get('node','username')\n self.server_address=config.get('registration','ip_address')\n self.password=config.get('registration','Password')\n items = config.items('neigbours')\n self.nextIP = [] # list of the neighbours' IP addresses\n i = 0\n for neighbour in items:\n self.nextIP.append(neighbour[1])\n i+=1\n self.message = b''\n self.blockchain = Blockchain()\n self.contactedIP = {}\n self.confirmed = []\n self.neighboursOk = []\n\n\ndef main():\n\n node = Node() \n MESSAGE = str({'Username':node.username,'Password':node.password}).encode('utf-8')\n node.sendMessage(MESSAGE)\n \n nodeListener = Thread(target = node.runNodesListener) \n \n \n nodeListener.start()\n \n \n \n \nif __name__ == '__main__': main()\n\n\n\n\n \n","sub_path":"node4/src/node_test.py","file_name":"node_test.py","file_ext":"py","file_size_in_byte":10529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207744295","text":"def pad_sequence(sequence):\n sequence = iter(sequence)\n return sequence\n\n\ndef ngrams(sequence, n):\n sequence = pad_sequence(sequence)\n\n history = []\n while n > 1:\n history.append(next(sequence))\n n -= 1\n for item in sequence:\n history.append(item)\n yield tuple(history)\n del history[0]\n\n\n\nclass QCustomTableWidgetItem (QTableWidgetItem):\n def __init__ (self, value):\n super(QCustomTableWidgetItem, self).__init__(QtCore.QString('%s' % value))\n\n def __lt__ (self, other):\n if (isinstance(other, QCustomTableWidgetItem)):\n selfDataValue = float(self.data(QtCore.Qt.EditRole).toString())\n otherDataValue = float(other.data(QtCore.Qt.EditRole).toString())\n return selfDataValue < otherDataValue\n else:\n return QTableWidgetItem.__lt__(self, other)","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336950409","text":"import matplotlib.pyplot as plt \nimport pandas as pd \nimport numpy as np\nimport math\nimport csv\n\n# Считываем данные с файла web_traffic.tsv.\n\ndf = pd.read_csv('web_traffic.tsv', sep='\\t', header=None)\n\nX, Y = df[0], df[1]\n\nx = list(X)\ny = list(Y)\n\n# Фильтрация точек\n\nfor i in range(len(y)):\n if math.isnan(y[i]):\n y[i] = 0\n else:\n y[i] = y[i]\n \n# Визуализация точек\n\nplt.plot(x, y, 'k*')\n\n# Создание массивов\n\nnumpy_x = np.array(x)\nnumpy_y = np.array(y)\n\n# Подбор коэффициентов с помощью метода polyfit\n\nth0, th1 = np.polyfit(numpy_x, numpy_y, 1)\nth2, th3, th4 = np.polyfit(numpy_x, numpy_y, 2)\nth5, th6, th7, th8 = np.polyfit(numpy_x, numpy_y, 3)\nth9, th10, th11, th12, th13 = np.polyfit(numpy_x, numpy_y, 4)\nth14, th15, th16, th17, th18, th19 = np.polyfit(numpy_x, numpy_y, 5)\n\nf1 = lambda x: th0*x + th1\nf2 = lambda x: th2*x**2 + th3*x + th4\nf3 = lambda x: th5*x**3 + th6*x**2 + th7*x + th8\nf4 = lambda x: th9*x**4 + th10*x**3 + th11*x**2 + th12*x + th13\nf5 = lambda x: th14*x**5 + th15*x**4 + th16*x**3 + th17*x**2 + th18*x + th19\n\n# Вычисление среднеквадратичной ошибки \n\ndef sq_error(X,Y,f_x=None): \n squared_error = []; \n for i in range(len(X)): \n squared_error.append((f_x(X[i])-Y[i])**2) \n return sum(squared_error)\n\nprint(f\"Ср. кв. ошибка при полиноме = 1 составляет : {sq_error(x, y, f1)}\")\nprint(f\"Ср. кв. ошибка при полиноме = 2 составляет : {sq_error(x, y, f2)}\")\nprint(f\"Ср. кв. ошибка при полиноме = 3 составляет : {sq_error(x, y, f3)}\")\nprint(f\"Ср. кв. ошибка при полиноме = 4 составляет : {sq_error(x, y, f4)}\")\nprint(f\"Ср. кв. ошибка при полиноме = 5 составляет : {sq_error(x, y, f5)}\")\n\n# Отображение функций при разных значениях полиномы\n\nx1 = list(range(744, 751))\n\nfunc1 = np.poly1d(np.polyfit(numpy_x, numpy_y, 1))\nplt.plot(x1, func1(x1))\n\nfunc2 = np.poly1d(np.polyfit(numpy_x, numpy_y, 2))\nplt.plot(x1, func2(x1))\n\nfunc3 = np.poly1d(np.polyfit(numpy_x, numpy_y, 3))\nplt.plot(x1, func3(x1))\n\nfunc4 = np.poly1d(np.polyfit(numpy_x, numpy_y, 4))\nplt.plot(x1, func4(x1))\n\nfunc5 = np.poly1d(np.polyfit(numpy_x, numpy_y, 5))\nplt.plot(x1, func5(x1))\n\nplt.show()\n","sub_path":"2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585110425","text":"l,r=map(int,input(\"enter l,r\").split(\" \"))\nif(l>r):\n a=l\nelse:\n a=r\nfor i in range(a,100000):\n if(i%l==0 and i%r==0):\n print(i)\n break\n \n","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149611962","text":"from django import template\nfrom django.utils.translation import get_language\n\nregister = template.Library()\n\ndef get_relations(source):\n return [p for p in dir(source) if p.endswith('_related')]\n\ndef get_relation_attributes(relation):\n\n return {\n 'lang': get_language(),\n 'object_ref_model_name': relation.object_ref._meta.model_name,\n 'object_ref_object_name': relation.object_ref._meta.object_name,\n 'relation_object_name': relation._meta.object_name,\n 'relation_label': relation._meta.label,\n 'object_ref_id': relation.object_ref_id,\n 'property': relation._meta.verbose_name.title(),\n 'value': relation.value,\n 'confidence': relation.confidence,\n 'sources': relation.sources.all(),\n }\n\n\n@register.filter\ndef render_from_source(source, attribute):\n html = ''\n \n props = getattr(source, attribute).all()\n\n if props:\n for prop in props:\n \n attributes = get_relation_attributes(prop)\n attributes['source_id'] = source.id\n\n html += ''' \n <tr>\n <td><a href=\"/{lang}/{object_ref_model_name}/edit/{object_ref_id}/?source_id={source_id}\">{object_ref_object_name}</a></td>\n <td>{property}</td>\n <td>{value}</td>\n <td>{confidence}</td>\n </tr>\n '''.format(**attributes)\n\n return html\n","sub_path":"sfm_pc/templatetags/render_from_source.py","file_name":"render_from_source.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426750961","text":"import numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n\nfrom lightwood.constants.lightwood import COLUMN_DATA_TYPES\n\n\nclass BoostMixer():\n\n def __init__(self, quantiles=None):\n self.targets = None\n self.quantiles = quantiles\n\n def fit(self, ds=None, callback=None):\n output_features = data_source.configuration['output_features']\n\n self.targets = {}\n for output_feature in output_features:\n self.targets[output_feature['name']] = {\n 'type': output_feature['type']\n }\n if 'weights' in output_feature:\n self.targets[output_feature['name']]['weights'] = output_feature['weights']\n else:\n self.targets[output_feature['name']]['weights'] = None\n\n X = []\n\n for row in ds:\n X.append(np.array(row[0]))\n\n X = np.array(X)\n for target_col_name in self.targets:\n Y = ds.get_column_original_data(target_col_name)\n\n if self.targets[target_col_name]['type'] == COLUMN_DATA_TYPES.CATEGORICAL:\n weight_map = self.targets[target_col_name]['weights']\n if weight_map is None:\n sample_weight = [1 for x in real]\n else:\n sample_weight = []\n for val in Y:\n sample_weight.append(weight_map[val])\n\n self.targets[target_col_name]['model'] = GradientBoostingClassifier(n_estimators=600)\n self.targets[target_col_name]['model'].fit(X,Y,sample_weight=sample_weight)\n\n elif self.targets[target_col_name]['type'] == COLUMN_DATA_TYPES.NUMERIC:\n self.targets[target_col_name]['model'] = GradientBoostingRegressor(n_estimators=600)\n self.targets[target_col_name]['model'].fit(X,Y)\n if self.quantiles is not None:\n self.targets[target_col_name]['quantile_models'] = {}\n for i, quantile in enumerate(self.quantiles):\n self.targets[target_col_name]['quantile_models'][i] = GradientBoostingRegressor(n_estimators=600, loss='quantile',alpha=quantile)\n self.targets[target_col_name]['quantile_models'][i].fit(X,Y)\n\n else:\n self.targets[target_col_name]['model'] = None\n\n\n def predict(self, when_data_source, targets=None):\n X = []\n for row in when_data_source:\n X.append(np.array(row[0]))\n\n predictions = {}\n if targets is None:\n targets = self.targets\n for target_col_name in self.targets:\n\n if self.targets[target_col_name]['model'] is None:\n predictions[target_col_name] = None\n else:\n predictions[target_col_name] = {}\n predictions[target_col_name]['predictions'] = [x for x in self.targets[target_col_name]['model'].predict(X)]\n\n try:\n predictions[target_col_name]['selfaware_confidences'] = [max(x) for x in self.targets[target_col_name]['model'].predict_proba(X)]\n except Exception as e:\n pass\n\n if 'quantile_models' in self.targets[target_col_name]:\n lower_quantiles = self.targets[target_col_name]['quantile_models'][0].predict(X)\n upper_quantiles = self.targets[target_col_name]['quantile_models'][1].predict(X)\n\n predictions[target_col_name]['confidence_range'] = [[lower_quantiles[i],upper_quantiles[i]] for i in range(len(lower_quantiles))]\n predictions[target_col_name]['quantile_confidences'] = [self.quantiles[1] - self.quantiles[0] for i in range(len(lower_quantiles))]\n\n\n\n\n return predictions\n","sub_path":"lightwood/mixers/boost/boost.py","file_name":"boost.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370050811","text":"from typing import Dict, Any\nimport json\nimport copy\nimport logging\n\nimport networkx as nx\n\n\ndef graph_factory(graph: Dict[str, Any]) -> nx.Graph:\n \"\"\"\n Parameters\n ----------\n graph : dict\n\n \"\"\"\n\n edges = graph.get(\"edges\", None)\n nodes = graph.get(\"nodes\", None)\n\n # if not edges:\n # raise ValueError(\"edges are required\")\n\n # need to be as permissive as possible. if user does\n # not\n is_multigraph = graph.get(\"multigraph\", True)\n is_directed = graph.get(\"directed\", False)\n\n # multi graphs are not valid graphs for `nereid`. These are caught\n # by the src.network.validate module. we need to create them as\n # multi graphs so we can identify which edges are duplicated.\n if is_multigraph:\n if is_directed:\n g = nx.MultiDiGraph()\n\n else:\n g = nx.MultiGraph()\n # this is the most tolerant type of graph\n\n elif is_directed:\n g = nx.DiGraph()\n\n else:\n g = nx.Graph() # for testing purposes\n\n if edges:\n\n if g.is_multigraph():\n g = nx.from_edgelist(\n [\n (\n d.get(\"source\"),\n d.get(\"target\"),\n d.get(\"key\", None),\n d.get(\"metadata\", {}),\n )\n for d in edges\n ],\n create_using=g,\n )\n else:\n g = nx.from_edgelist(\n [\n (d.get(\"source\"), d.get(\"target\"), d.get(\"metadata\", {}))\n for d in edges\n ],\n create_using=g,\n )\n\n if nodes:\n g.add_nodes_from([(n.get(\"id\"), n.get(\"metadata\", {})) for n in nodes])\n\n return g\n\n\ndef thin_graph_dict(graph_dict: Dict[str, Any]) -> Dict[str, Any]:\n result = copy.deepcopy(graph_dict)\n\n nodes = result.get(\"nodes\", None)\n if nodes is not None:\n for dct in nodes:\n dct[\"metadata\"] = {}\n\n edges = result.get(\"edges\", [{}])\n for dct in edges:\n meta = dct.get(\"metadata\", {})\n\n if \"key\" in meta and meta.get(\"key\", None) is not None:\n key = meta[\"key\"]\n dct[\"metadata\"] = {\"key\": key}\n else:\n dct[\"metadata\"] = {}\n\n return result\n\n\ndef nxGraph_to_dict(g: nx.Graph) -> Dict[str, Any]:\n result: Dict[str, Any] = nx.node_link_data(g, {\"link\": \"edges\"})\n for dct in result[\"nodes\"]:\n id_ = dct.pop(\"id\")\n dct[\"metadata\"] = copy.deepcopy(dct)\n dct[\"id\"] = id_\n\n for dct in result[\"edges\"]:\n source = dct.pop(\"source\")\n target = dct.pop(\"target\")\n dct[\"metadata\"] = copy.deepcopy(dct)\n dct.pop(\"key\", 0)\n dct[\"source\"] = source\n dct[\"target\"] = target\n\n return result\n\n\ndef clean_graph_dict(g):\n return nxGraph_to_dict(nx.relabel_nodes(g, lambda x: str(x)))\n","sub_path":"nereid/nereid/src/network/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426465832","text":"# -*- coding: utf8 -*-\n\nfrom time import time\n\n__author__ = 'sergey'\n\nclass CacheTTLseconds(object):\n \"\"\"\n Simple cache storage\n \n {\n key (int | str) : [\n timestamp (float), - then added, updated, set to 0 if expired\n values (int | str) - some data\n ], ...\n }\n \n \"\"\"\n\n OFFSET_TIME = 0\n OFFSET_VALUE = 1\n\n _max_ttl = 300\n\n _storage = None\n\n def __init__(self):\n self._storage = {}\n pass\n\n def __len__(self):\n return len(self._storage)\n\n def set_max_ttl(self, seconds):\n self._max_ttl = seconds\n return self\n\n def set(self, key, value):\n self._storage[ key ] = [time(), value]\n return self\n\n def get(self, key, default=None):\n # not setted\n now = time()\n\n item = self._storage.get(key, [0, default])\n val = item[self.OFFSET_VALUE]\n t = item[self.OFFSET_TIME]\n\n if now - t > self._max_ttl:\n return val\n\n # update time only if value was set\n if key in self._storage:\n self._storage[ key ][self.OFFSET_TIME] = now\n\n return val\n\n def unset(self, key):\n if key in self._storage:\n del self._storage[ key ]\n return self\n\n def clear(self):\n now = time()\n count = 0\n for key, item in tuple(self._storage.items()):\n if now - item[self.OFFSET_TIME] > self._max_ttl:\n del self._storage[key]\n count += 1\n return count\n","sub_path":"dedupsqlfs/lib/cache/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430519144","text":"\"\"\"create tags table\n\nRevision ID: c9976e355f3f\nRevises: c87c01d4b94a\nCreate Date: 2020-12-29 10:16:35.236983\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c9976e355f3f'\ndown_revision = 'c87c01d4b94a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('tags',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('tag_name', sa.String(length=50), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('tags')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/20201229_101635_create_tags_table.py","file_name":"20201229_101635_create_tags_table.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637571993","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nfrom utils import CanadianScraper, CanadianPerson as Person\n\nCOUNCIL_PAGE = 'http://www.ontla.on.ca/web/members/member_addresses.do'\n\n\nclass OntarioPersonScraper(CanadianScraper):\n\n def scrape(self):\n page = self.lxmlize(COUNCIL_PAGE)\n for block in page.xpath('//div[@class=\"addressblock\"]'):\n name_elem = block.xpath('.//a[@class=\"mpp\"]')[0]\n name = ' '.join(name_elem.text.split())\n\n riding = block.xpath('.//div[@class=\"riding\"]//text()')[0].strip().replace('--', '\\u2014')\n district = riding.replace('Chatham—Kent', 'Chatham-Kent') # m-dash to hyphen\n mpp_url = name_elem.attrib['href']\n\n mpp_page = self.lxmlize(mpp_url)\n\n image = mpp_page.xpath('//img[@class=\"mppimg\"]/@src')\n party = mpp_page.xpath('//div[@class=\"mppinfoblock\"]/p[last()]/text()')[0].strip()\n\n p = Person(primary_org='legislature', name=name, district=district, role='MPP', party=party)\n if image:\n p.image = image[0]\n p.add_source(COUNCIL_PAGE)\n p.add_source(mpp_url)\n\n email = block.xpath('.//div[@class=\"email\"]')\n if email:\n p.add_contact('email', self.get_email(email[0]))\n\n phone = block.xpath('.//div[@class=\"phone\"]//text()')\n if phone:\n p.add_contact('voice', phone[0], 'legislature')\n\n yield p\n","sub_path":"ca_on/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302491729","text":"\"\"\"\nsandman\n-------\n\n**sandman** \"makes things REST\". Have an existing database you'd like to expose via\na REST API? Normally, you'd have to write a ton of boilerplate code for\nthe ORM you're using. \n\nWe're programmers. We don't write boilerplate.\n\nSimple Setup\n````````````\n\n.. code:: python\n\n from sandman import app, db\n\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///chinook'\n\n from sandman.model import register, Model\n\n class Artist(Model):\n __tablename__ = 'Artist'\n\n class Album(Model):\n __tablename__ = 'Album'\n\n class Playlist(Model):\n __tablename__ = 'Playlist'\n\n register((Artist, Album, Playlist))\n\n app.run()\n\nLet's start our new API server and make a request.\n\n.. code:: bash\n\n $ python runserver.py &\n * Running on http://127.0.0.1:5000/\n\n $ curl GET http://localhost:5000/artists\n\nHere is the JSON returned:\n\n.. code:: json\n\n {\n \"ArtistId\": 273,\n \"Name\": \"C. Monteverdi, Nigel Rogers - Chiaroscuro; London Baroque; London Cornett & Sackbu\",\n \"links\": [\n {\n \"rel\": \"self\",\n \"uri\": \"/artists/ArtistId\"\n }\n ]\n },\n {\n \"ArtistId\": 274,\n \"Name\": \"Nash Ensemble\",\n \"links\": [\n {\n \"rel\": \"self\",\n \"uri\": \"/artists/ArtistId\"\n }\n ]\n },\n {\n \"ArtistId\": 275,\n \"Name\": \"Philip Glass Ensemble\",\n \"links\": [\n {\n \"rel\": \"self\",\n \"uri\": \"/artists/ArtistId\"\n }\n ]\n }\n ]\n\nBatteries Included\n``````````````````\n\nWith **sandman**, (almost) zero boilerplate code is required. Your existing database\nstructure and schema is introspected and your database tables magically get a\nRESTful API. For each table, Sandman creates:\n\n- proper endpoints \n \n- support for a configurable set of HTTP verbs \n \n - GET\n\n - POST\n\n - PATCH\n\n - DELETE\n\n- responses with appropriate ``rel`` links automatically\n\n- essentially a HATEOAS-based service sitting in front of your database\n\n*Warning: Sandman is still very much a work in progress.* It is not suitable for\nuse **anywhere.** Don't use it for anything important. It's also often changing \nin backwards incompatible ways.\n\nLinks\n`````\n\n* `website <http://www.github.com/jeffknupp/sandman/>`_\n* `documentation <http://pythonhosted.org/sandman/>`_\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport sys\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nsetup(\n name='sandman',\n version='0.2.3.4',\n url='http://github.com/jeffknupp/sandman/',\n license='Apache Software License',\n author='Jeff Knupp',\n tests_require=['pytest'],\n install_requires=['Flask>=0.10.1',\n 'Flask-SQLAlchemy>=1.0',\n 'SQLAlchemy==0.8.2',\n ],\n cmdclass={'test': PyTest},\n author_email='jeff@jeffknupp.com',\n description='Automated REST APIs for existing database-driven systems',\n long_description=__doc__,\n packages=['sandman', 'sandman.test'],\n include_package_data=True,\n platforms='any',\n test_suite='sandman.test.test_sandman',\n classifiers = [\n 'Programming Language :: Python',\n 'Development Status :: 4 - Beta',\n 'Natural Language :: English',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150381813","text":"'''\nCreated on 7 May 2014\n\n@author: Tom\n'''\nimport unittest\n\nimport EVSelect\n\nclass Test(unittest.TestCase):\n\n def testgetlist(self):\n namefilename = 'testnamefile.txt'\n countryfilename = 'testcountryfile.txt'\n names, countries = EVSelect.getlists(namefilename, countryfilename)\n \n self.assertEqual(len(names), 6, 'There should be 6 names in the testnames file')\n self.assertEqual(len(countries), 15, 'There should be 15 names in the testcountries file')\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testGetList']\n unittest.main()","sub_path":"src/EVSelector/TestSelector.py","file_name":"TestSelector.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"563634774","text":"__author__='siyi'\n\n# -*- coding:utf-8 -*-\n\nfrom urllib.error import URLError\nimport urllib.request\nimport pymysql\nimport time\nimport re\n\nclass Iask():\n\n def __init__(self):\n self.enable=True\n self.base_url='http://iask.sina.com.cn'\n self.q_num=1\n self.a_num=1\n self.page=1\n self.conn=pymysql.connect(user='root',password='siyi01',database='iask',charset='utf8') #没有charset:'latin-1' codec can't encode characters in position 33-40: ordinal not in range(256)\n self.cursor=self.conn.cursor()\n\n #def creTable(self):\n #self.cursor.execute('create table question(ID_q smallint not null auto_increment,question blob,name_q varchar(255),time_q datetime,primary key (ID_q))')\n #self.cursor.execute('create table answer(ID_a smallint not null auto_increment,ID_q smallint not null,answer blob,name_a varchar(255),flag_g char(1),time_a datetime,primary key(ID_a),foreign key(ID_q) references question(ID_q))')\n #self.cursor.execute('create table urllist(ID_u smallint not null auto_increment,url varchar(255),primary key(ID_u))')\n\n def getUrl(self,start=1,end=5,content=None):\n #f=open('/home/siyi/study/python/iask.txt','w') #改用数据库\n num=1\n for i in range(end):\n url=self.pageUrl(content)\n req=urllib.request.Request(url)\n try:\n response=urllib.request.urlopen(req)\n content=response.read().decode('utf-8')\n pattern=re.compile(r'<div class=\"question-title.*?<a href=\"(.*?)\" target',re.S)\n items=re.findall(pattern,content)\n for item in items:\n item=self.base_url+item\n data_u=(num,item)\n print(data_u)\n self.insertData('urllist',data_u)\n num+=1\n #stop=input() #控制过程\n self.conn.commit()\n except URLError as e:\n print('页面获取失败\\n')\n if hasattr(e,'code'):\n print('错误代码:',e.code)\n if hasattr(e,'reason'):\n print('错误原因:',e.reason)\n #f.close()\n\n def pageUrl(self,content=None):\n if not content:\n url=self.base_url+'/c/187.html'\n else:\n pattern=re.compile('<a href=\"(.*?)\" style=\"width: 65px\">下一页</a>')\n npage=re.search(pattern,content)\n url=self.base_url+npage.group(1)\n return url\n\n def getContent(self,url):\n req=urllib.request.Request(url)\n response=urllib.request.urlopen(req)\n content=response.read().decode('utf-8')\n return content\n\n def getQuestion(self,q_num,content):\n pattern=re.compile(r'<div class=\"question_text\">.*?\">(.*?)</pre>.*?<div class=\"ask_autho clearfix\">.*?\">(.*?)</span>.*?<span class=\"ask-time mr10\">(.*?)</span>',re.S)\n item=re.findall(pattern,content)\n try:\n item=item[0]\n text=self.getText(item[0])\n name_q=self.getQname(item[1])\n time=self.getTime(item[2])\n question=(q_num,text,name_q,time)\n except:\n question=(q_num,'此问题不是有效问题!','','1970-01-01 00:00:00')\n return question\n\n def getGanswer(self,q_num,content):\n pattern=re.compile(r'div class=\"good_answer.*?\">.*?\">.*?\">(.*?)</a>.*?<span class=\"time mr10\"> \\| (.*?)</span>.*?answer_text\">.*?\">(.*?)</pre>',re.S)\n item=re.findall(pattern,content)\n if item:\n item=item[0]\n time=self.getTime(item[1])\n text=self.getText(item[2])\n ganswer=(self.a_num,q_num,text,item[0],'1',time)\n else:\n return\n return ganswer\n\n def getUanswer(self,q_num,content):\n uanswers=[]\n if 'recommend-info cf' in content:\n pattern=re.compile(r'<li class=\"clearfix\">.*?answer_txt.*?\">.*?\">(.*?)</pre>.*?class=\"recommend-info cf\">.*?\">.*?\">.*?>(.*?</span>.*?\">.*?)</span>.*?class=\"time fl\">(.*?)</span>',re.S)\n else:\n pattern=re.compile(r'<li class=\"clearfix\">.*?answer_txt.*?\">.*?\">(.*?)</pre>.*?author_name\">(.*?)</a>.*?answer_t\">(.*?)</span>',re.S)\n items=re.findall(pattern,content)\n if items:\n for item in items:\n time=self.getTime(item[2])\n text=self.getText(item[0])\n expert=self.getExperit(item[1])\n ganswer=(self.a_num,q_num,text,expert,'0',time)\n uanswers.append(ganswer)\n self.a_num+=1\n else:\n return\n return uanswers\n\n def insertData(self,table,data):\n cursor=self.conn.cursor()\n cursor.execute('insert into %s values %s'%(table,data))\n\n def getTime(self,time0):\n if '分钟前'in time0:\n time1=re.sub('[^\\d+]','',time0)\n time2=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()-int(time1)*60))\n elif '小时前' in time0:\n time1=re.sub('[^\\d+]','',time0)\n time2=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()-int(time1)*3600))\n else:\n time2=time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(time0,'%y-%m-%d'))\n return time2\n\n def getText(self,text):\n text1=re.sub('[\\t ]+','',text)\n text2=re.sub('[<br/>]+','\\n',text1)\n text3=re.sub('[\\r ]+','\\n',text2)\n return text3.strip()\n\n def getQname(self,name):\n if '匿名' in name:\n name_q=name #匿名提问者\n else:\n pattern=re.compile('>(.*?)</a>')\n item=re.search(pattern,name)\n name_q=item.group(1)\n return name_q\n\n def getExperit(self,expert):\n expert1=re.sub('[\\r\\t\\n ]*','',expert)\n expert2=re.sub('<.*?>','-',expert1)\n return expert2\n\n def main(self):\n #self.getUrl() #获取问题链接\n #self.creTable() #创建表\n while self.enable==True:\n try:\n q=open('/home/siyi/study/python/q_num.txt','r')\n a=open('/home/siyi/study/python/a_num.txt','r')\n self.q_num=int(q.readline())\n self.a_num=int(a.readline())\n q.close()\n a.close()\n except:\n self.q_num=1\n self.a_num=1\n result=self.cursor.execute('select url from urllist where ID_u=%s'%self.q_num) #查询问题链接\n if result==1:\n url=self.cursor.fetchone()[0]\n else:\n self.enable=False\n return\n content=self.getContent(url) #获取问题页页面内容\n data_q=self.getQuestion(self.q_num,content)#匹配问题\n #print(data_q)\n self.insertData('question',data_q) #插入问题至数据库\n data_ga=self.getGanswer(self.q_num,content) #匹配好评回答\n if data_ga:\n #print(data_ga)\n self.insertData('answer',data_ga) #插入好评回答至数据库\n self.a_num+=1\n data_ua=self.getUanswer(self.q_num,content) #匹配普通回答\n if data_ua:\n for item in data_ua:\n #print(item)\n self.insertData('answer',item) #插入普通回答至数据库\n #self.a_num+=1 #这里无效,加在函数里\n self.q_num+=1\n self.conn.commit()\n q=open('/home/siyi/study/python/q_num.txt','w')\n a=open('/home/siyi/study/python/a_num.txt','w')\n q.write(str(self.q_num))\n a.write(str(self.a_num))\n q.close()\n a.close()\n #contrl=input()\n #if contrl=='q':\n #self.enable=False\n #return\n self.cursor.close()\n self.conn.close()\n\nif __name__=='__main__':\n iask=Iask()\n iask.main()\n","sub_path":"spider_/iask.py","file_name":"iask.py","file_ext":"py","file_size_in_byte":8000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99258719","text":"import random\n\n\nclass AnimalNoises:\n def __init__(self, home_language='english'):\n self.home_language = home_language\n self.languages = {}\n\n def add_sound(self, sound, language=''):\n if language == '':\n language = self.home_language\n language = language.lower()\n if language not in self.languages:\n self.languages[language] = []\n\n self.languages[language].append(sound)\n\n def random_noise(self):\n language = random.choice(list(self.languages.keys()))\n sounds = self.languages.get(language)\n sound = random.choice(sounds)\n return sound\n\n\ndog_noises = AnimalNoises()\ndog_noises.languages = {\n 'English': ['woof', 'ruff', 'arf', 'bow wow', 'yap', 'yip'],\n 'German': ['wuff', 'wau'],\n 'Turkish': ['hev', 'hav'],\n 'Spanish': ['guau-guau', 'gua', 'jau'],\n 'Afrikaans': ['blaf', 'woef', 'keff'],\n 'Russian': ['gav', 'гав-гав', 'tyav', 'тяв-тяв'],\n 'Korean': ['meong', '멍'],\n 'Japanese': ['wan-wan', 'ワン', 'kyan-kyan', 'キャンキャン'],\n 'Italian': ['bau'],\n 'Hindi': ['bow'],\n 'Icelandic': ['voff'],\n 'Dutch': ['blaf', 'kef', 'waf', 'woef'],\n 'Danish': ['vov', 'vuf'],\n 'Mandarin': ['wang', '汪'],\n 'Albanian': ['ham'],\n 'French': ['waouh', 'ouah', 'ouaf', 'vaf', 'wouf', 'wouaf', 'jappe']\n}\n\nif __name__ == '__main__':\n print(dog_noises.random_noise())\n","sub_path":"Chapter 09 - Classes/animal_noises.py","file_name":"animal_noises.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615631855","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/30 1:07 PM\n# @Author : Yinghao Qin\n# @Email : y.qin@hss18.qmul.ac.uk\n# @File : modified_model.py\n# @Software: PyCharm\n# Reference:\n# https://github.com/WZMIAOMIAO/deep-learning-for-image-processing/tree/master/pytorch_learning\n# https://github.com/39239580/googlenet-pytorch/blob/master/Inception_v1_mnist.py\n# https://www.kaggle.com/grfiv4/plot-a-confusion-matrix\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass GoogLeNet_MNIST(nn.Module):\n def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):\n super(GoogLeNet_MNIST, self).__init__()\n\n self.conv1 = BasicConv2d(1, 8, kernel_size=5, padding=2)\n self.conv2 = BasicConv2d(8, 32, kernel_size=3, padding=1)\n\n self.inception3a = Inception(32, 16, 24, 32, 4, 8, 8)\n self.inception3b = Inception(64, 32, 32, 48, 8, 24, 16)\n self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception4a = Inception(120, 48, 24, 52, 4, 12, 16)\n self.inception4b = Inception(128, 40, 28, 56, 6, 16, 16)\n self.inception4c = Inception(128, 32, 32, 64, 12, 16, 16)\n self.inception4d = Inception(128, 28, 36, 72, 8, 16, 16)\n self.inception4e = Inception(132, 64, 40, 80, 8, 32, 32)\n self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception5a = Inception(208, 64, 40, 80, 8, 32, 32)\n self.inception5b = Inception(208, 96, 48, 96, 12, 32, 32)\n\n self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)\n # self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(0.4)\n self.fc = nn.Linear(256, num_classes)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n # N x 1 x 28 x 28\n x = self.conv1(x)\n # N x 8 x 28 x 28\n x = self.conv2(x)\n\n # N x 32 x 28 x 28\n x = self.inception3a(x)\n # N x 64 x 28 x 28\n x = self.inception3b(x)\n # N x 120 x 28 x 28\n x = self.maxpool1(x)\n # N x 120 x 14 x 14\n\n x = self.inception4a(x)\n # N x 128 x 14 x 14\n x = self.inception4b(x)\n # N x 128 x 14 x 14\n x = self.inception4c(x)\n # N x 128 x 14 x 14\n x = self.inception4d(x)\n # N x 132 x 14 x 14\n x = self.inception4e(x)\n # N x 208 x 14 x 14\n x = self.maxpool2(x)\n\n # N x 208 x 7 x 7\n x = self.inception5a(x)\n # N x 208 x 7 x 7\n x = self.inception5b(x)\n # N x 256 x 7 x 7\n\n x = self.avgpool(x)\n # N x 256 x 1 x 1\n x = torch.flatten(x, 1)\n # N x 256\n x = self.dropout(x)\n x = self.fc(x)\n # N x 10 (num_classes)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\nclass Inception(nn.Module):\n def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):\n super(Inception, self).__init__()\n\n self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)\n\n self.branch2 = nn.Sequential(\n BasicConv2d(in_channels, ch3x3red, kernel_size=1),\n BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # ensure the output size equals the input size\n )\n\n self.branch3 = nn.Sequential(\n BasicConv2d(in_channels, ch5x5red, kernel_size=1),\n BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2) # ensure the output size equals the input size\n )\n\n self.branch4 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1),\n BasicConv2d(in_channels, pool_proj, kernel_size=1)\n )\n\n def forward(self, x):\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n outputs = [branch1, branch2, branch3, branch4]\n return torch.cat(outputs, 1) # concatenate the branches in channel dimension | [batch, channel, high, width]\n\n\nclass InceptionAux(nn.Module):\n def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3)\n self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # output[batch, 128, 4, 4]\n\n self.fc1 = nn.Linear(2048, 1024) # 2048 = 128 * 4 * 4\n self.fc2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n x = self.averagePool(x)\n # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n x = self.conv(x)\n # N x 128 x 4 x 4\n x = torch.flatten(x, 1)\n x = F.dropout(x, 0.5, training=self.training) # In the original paper, the dropout is 70%, but we set it as 50%\n # N x 2048\n x = F.relu(self.fc1(x), inplace=True)\n x = F.dropout(x, 0.5, training=self.training)\n # N x 1024\n x = self.fc2(x)\n # N x num_classes\n return x\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x","sub_path":"code/GoogLeNet/modified_model.py","file_name":"modified_model.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451074622","text":"'''Code by Kuntal Kokate\nApril 24th,2020\nReleased under GNU GPL\n'''\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n#if using termux\nimport subprocess\nimport shlex\n#end if\n\n\nomega= np.linspace(-100, 100,20000)\nzeros=[]#a this list contains zeros of G(s)\npoles=[0, -6, -9]#a this list contains poles of G(s)\nH_s=signal.ZerosPolesGain(zeros,poles,1) # feed zeros and poles as inputs to this function to get\nw, H=signal.freqresp(H_s,w=omega)#feed H_s and omega values as inputs to this funtion it will return frequency response H of H_s and w the range of omega values of H\nGAIN=1#enter the GAIN function which could be either constant or variable\nH=H*GAIN#we multiply GAIN function with H(jw) to get our desired final transfer functions frequency response\n#finally H is the frequency response of our desired Transfer Function\n\n#Plotting the Nyquist plot\nplt.plot(H.real, H.imag)\nplt.plot(-0.00123, 0, 'ro')\nplt.text(-0.00123, -0.005, \"(-1/810, 0)\")\nplt.ylabel(\"${Im}\\{G(j\\omega)\\}$\")\nplt.title(\"NYQUIST PLOT\")\nplt.xlabel(\"${Re}\\{G(j\\omega)\\}$\")\nplt.grid()\nplt.xlim(-0.0051, 0.0051)\nplt.ylim(-0.02, 0.02)\n\n\n# plot the zoomed portion\nX_detail = H.real[4000:9500]\nY_detail = H.imag[4000:9500]\nsub_axes = plt.axes([.6, .6, .25, .25])\nsub_axes.grid()\nsub_axes.plot(X_detail, Y_detail,c='k')\nsub_axes.plot(-0.00123, 0, 'ro')\nsub_axes.text(-0.00123, -0.0005, \"(-1/810, 0)\")\nsub_axes.plot(H.real[-9500:-4000], H.imag[-9500:-4000], c='k')\nsub_axes.set_xticks([])\nsub_axes.set_yticks([])\n\n\n#if using termux\nplt.savefig('./figs/ee18btech11028_1.pdf')\nplt.savefig('./figs/ee18btech11028_1.eps')\nsubprocess.run(shlex.split(\"termux-open ./figs/ee18btech11028_1.pdf\"))\n#else\n\nplt.show()\n","sub_path":"ketan/codes/ee18btech11028_1.py","file_name":"ee18btech11028_1.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521770177","text":"# -*- coding: utf-8 -*-\n\nfrom partie import PartiePipopipette\nfrom planche import Planche\nfrom boite import Boite\nfrom ligne import Ligne\n\nfrom collections import Counter\nimport traceback\nimport colorama\n\nCRED = '\\033[91m'\nCGREEN = '\\33[92m'\nCEND = '\\033[0m'\n\n\nclass Testeur:\n def __init__(self, *groupes):\n self.groupes = groupes\n\n def test(self):\n for groupe in self.groupes:\n groupe.executer_tous_tests()\n\n print(\"\\n***BILAN FINAL***\\n\")\n\n for groupe in self.groupes:\n groupe.bilan_final()\n\n\nclass GroupeTest:\n def __init__(self, nom, tests, fonction_a_tester):\n self.nom = nom\n self.tests = tests\n self.fonction_a_tester = fonction_a_tester\n self.tests_echoues = []\n\n def executer_tous_tests(self):\n print(\"\\n***DÉBUT TESTS {}***\\n\".format(self.nom.upper()))\n\n for test in self.tests:\n succes, msg = test.executer(self.fonction_a_tester)\n\n if not succes:\n self.tests_echoues.append(test.nom)\n print(CRED, end='')\n print()\n print(msg)\n print(CEND, end='')\n\n self._bilan_execution()\n\n def _bilan_execution(self):\n print()\n if len(self.tests_echoues) == 0:\n print(CGREEN, end='')\n print(\"Tous les {} tests du groupe {!r} ont été complétés avec succès.\".format(len(self.tests), self.nom))\n print(CEND, end='')\n else:\n print(\n \"Sur les {} tests du groupe {!r}, {} ont échoué. Il s'agit des tests nommés {}\" \\\n \"Voir la sortie en console plus haut pour plus de détails.\" \\\n .format(len(self.tests), self.nom, len(self.tests_echoues), \"\".join((\"\\n\\n\", CRED, *[test + \"\\n\" for test in self.tests_echoues], CEND, \"\\n\")))\n )\n\n def bilan_final(self):\n if len(self.tests_echoues) == 0:\n print(CGREEN, end='')\n print(\"Tous les {} tests du groupe {!r} ont été complétés avec succès.\".format(len(self.tests), self.nom))\n print(CEND, end='')\n else:\n print(CRED, end='')\n print(\"{} des {} tests du groupe {!r} ont échoué.\".format(len(self.tests_echoues), len(self.tests),\n self.nom))\n print(CEND, end='')\n\n\ndef _formatter_arguments(**kwargs):\n n_arguments = len(kwargs)\n\n if n_arguments == 0:\n return \" d'appeler \"\n elif n_arguments == 1:\n return \" de fournir {!r}={!r} à \".format(*[item for paire in kwargs.items() for item in paire])\n else:\n pre = \" de fournir les arguments \" + \"{!r}={!r},\" * (n_arguments - 2)\n return (pre + \"{!r}={!r} et {!r}={!r} à \").format(*[item for paire in kwargs.items() for item in paire])\n\n\ndef _formatter_attributs(**attributs):\n n_attributs = len(attributs)\n\n def format_attribut(nom_attribut, valeur):\n return \"self.{}={}\".format(nom_attribut, valeur)\n\n if n_attributs == 1:\n return \" {}\\n\".format(*[format_attribut(nom, valeur) for nom, valeur in attributs.items()])\n else:\n pre = \" {}\\n\\n\" * (n_attributs - 2)\n return (pre + \" {}\\n\\n et {} \").format(*[format_attribut(nom, valeur) for nom, valeur in attributs.items()])\n\n\ndef _message_erreur_sortie_defaut(nom_test, nom_fonction_a_tester, kwargs, sortie_attendue, sortie_actuelle,\n precisions):\n arguments = _formatter_arguments(**kwargs)\n\n premiere_ligne = \"ÉCHEC DU TEST {} !\\n\".format(nom_test.upper())\n corps = \"En essayant \" + arguments + \"votre fonction {}(), la sortie attendue était {!r} mais a \" \\\n \"plutôt été {!r}. Pensez à utiliser le débogueur ou encore à faire imprimer des valeurs intermédiaires dans \" \\\n \"votre fonction pour {}() pour cerner la source du problème !\".format(\n nom_fonction_a_tester, sortie_attendue, sortie_actuelle, nom_fonction_a_tester)\n\n if precisions != '':\n corps += '\\n\\nLes précisions suivantes vous sont données pour vous aider à cerner le problème :\\n' + precisions\n\n return premiere_ligne + corps\n\n\ndef _message_erreur_attributs_defaut(nom_test, nom_fonction_a_tester, kwargs, attributs_attendus, attributs_actuels,\n precisions):\n arguments = _formatter_arguments(**kwargs)\n attributs_attendus_formattes = _formatter_attributs(**attributs_attendus)\n attributs_actuels_formattes = _formatter_attributs(**attributs_actuels)\n\n premiere_ligne = \"ÉCHEC DU TEST {} !\\n\".format(nom_test.upper())\n corps = \"En essayant \" + arguments + \"votre fonction {}(), les attributs attendus étaient\\n\\n{}\\n\\nmais ont \" \\\n \"plutôt été\\n\\n{}\\n\\nPensez à utiliser le débogueur ou encore à faire imprimer des valeurs intermédiaires dans \" \\\n \"votre fonction pour {}() pour cerner la source du problème !\".format(\n nom_fonction_a_tester, attributs_attendus_formattes, attributs_actuels_formattes, nom_fonction_a_tester)\n\n if precisions != '':\n corps += '\\n\\nLes précisions suivantes vous sont données pour vous aider à cerner le problème :\\n' + precisions\n\n return premiere_ligne + corps\n\n\ndef _message_erreur_exception_defaut(nom_test, fonction_a_tester, kwargs):\n arguments = _formatter_arguments(**kwargs)\n\n premiere_ligne = \"ÉCHEC DU TEST {} !\\n\".format(nom_test.upper())\n corps = \"En essayant \" + arguments + \"votre fonction {}(), une erreur a été retournée. Cette erreur est \" \\\n \"la suivante : \\n\\n{}\".format(fonction_a_tester, traceback.format_exc())\n\n return premiere_ligne + corps\n\n\ndef formattage_erreur_cles_dict(paires_manquantes, paires_en_trop):\n msg = 'Les clés de dictionaire attendues ne correspondaient pas à celle reçues.\\n'\n\n if len(paires_manquantes) > 0:\n if len(paires_manquantes) == 1:\n msg += \"Une clé attendue était manquante:\\n\\n{}\\n\".format(paires_manquantes[0])\n else:\n msg += \"Plusieurs clés attendues étaient manquantes:\\n\\n\"\n msg += (\"{}\\n\" * len(paires_manquantes)).format(*[paire for paire in paires_manquantes])\n\n if len(paires_en_trop) > 0:\n if len(paires_en_trop) == 1:\n msg += \"Une clé reçue était en trop:\\n\\n{}\\n\".format(paires_en_trop[0])\n else:\n msg += \"Plusieurs clés reçues étaient en trop:\\n\\n\"\n msg += (\"{}\\n\" * len(paires_en_trop)).format(*[paire for paire in paires_en_trop])\n\n return msg\n\n\ndef formattage_erreur_valeurs_dict(valeurs_non_identiques):\n msg = 'Les valeurs de dictionaire attendues ne correspondaient pas à celle reçues.\\n'\n\n if len(valeurs_non_identiques) == 1:\n msg += \"Une valeur à la clé {} était différente: on attendait {} mais on a reçu {}\\n\".format(*(\n valeurs_non_identiques[0]))\n else:\n msg += \"Plusieurs clés étaient différentes:\\n\\n\"\n for t in valeurs_non_identiques:\n msg += \"À la clé {} on attendait {} mais on a reçu {}\\n\".format(*t)\n\n return msg\n\n\ndef formattage_erreur_items_liste(items_absents, items_en_trop):\n msg = 'Les items de liste attendus ne correspondaient pas à ceux reçus.\\n'\n\n if len(items_absents) > 0:\n msg += \"Les items suivants étaient attendus mais n'ont pas été reçus:\\n\\n\"\n for item, nb_manquant in items_absents.items():\n for _ in range(nb_manquant):\n msg += '{}\\n'.format(item)\n\n if len(items_en_trop) > 0:\n msg += \"Les items suivants ont été reçus mais n'étaient pas attendus:\\n\\n\"\n for item, nb_en_trop in items_en_trop.items():\n for _ in range(nb_en_trop):\n msg += '{}\\n'.format(item)\n\n return msg\n\n\ndef _operateur_egalite_defaut(sortie_attendue, sortie_recue):\n type_attendu, type_recu = type(sortie_attendue), type(sortie_recue)\n\n if type_attendu is type_recu:\n if isinstance(sortie_attendue, dict):\n cles_manquantes = set(sortie_attendue.keys()) - set(sortie_recue.keys())\n cles_en_trop = set(sortie_recue.keys()) - set(sortie_attendue.keys())\n\n if len(cles_manquantes | cles_en_trop) > 0:\n return formattage_erreur_cles_dict([(cle, sortie_attendue[cle]) for cle in cles_manquantes],\n [(cle, sortie_recue[cle]) for cle in cles_en_trop])\n\n valeurs_non_identiques = [(cle, sortie_attendue[cle], sortie_recue[cle]) for cle in sortie_attendue\n if _operateur_egalite_defaut(sortie_attendue[cle], sortie_recue[cle]) is not None]\n\n if len(valeurs_non_identiques) == 0:\n return None\n else:\n return formattage_erreur_valeurs_dict(valeurs_non_identiques)\n elif isinstance(sortie_attendue, list) or isinstance(sortie_attendue, tuple):\n items_absents = Counter(sortie_attendue)\n items_absents.subtract(Counter(sortie_recue))\n items_absents = {k: v for k, v in items_absents.items() if v > 0}\n\n items_en_trop = Counter(sortie_recue)\n items_en_trop.subtract(Counter(sortie_attendue))\n items_en_trop = {k: v for k, v in items_en_trop.items() if v > 0}\n\n if sum(items_absents.values()) + sum(items_en_trop.values()) > 0:\n return formattage_erreur_items_liste(items_absents, items_en_trop)\n else:\n return None\n elif isinstance(sortie_attendue, Boite) or isinstance(sortie_attendue, Ligne):\n if _operateur_egalite_defaut(vars(sortie_attendue), vars(sortie_recue)) is None:\n return None\n else:\n return ''\n else:\n if sortie_attendue == sortie_recue:\n return None\n else:\n return ''\n else:\n return 'La sortie attendue était de type {} alors que la sortie reçue a plutôt été de type {}'.format(\n type_attendu, type_recu)\n\n\ndef operateur_egalite_un_item_tuple(index_a_tester):\n return lambda sortie_attendue, sortie_recue: _operateur_egalite_defaut(sortie_attendue, sortie_recue[index_a_tester]\n )\n\n\nclass TestSortieObjet:\n def __init__(self,\n objet,\n nom,\n kwargs,\n sortie_attendue,\n msg_erreur_sortie=_message_erreur_sortie_defaut,\n msg_erreur_exception=_message_erreur_exception_defaut,\n operateur_egalite=_operateur_egalite_defaut):\n self.objet = objet\n self.nom = nom\n self.kwargs = kwargs\n self.sortie_attendue = sortie_attendue\n self.msg_erreur_sortie = msg_erreur_sortie\n self.msg_erreur_exception = msg_erreur_exception\n self.operateur_egalite = operateur_egalite\n\n def executer(self, nom_fonction_a_tester):\n try:\n self.objet = self.objet()\n sortie_actuelle = getattr(self.objet, nom_fonction_a_tester)(**self.kwargs)\n\n msg_erreur = self.operateur_egalite(self.sortie_attendue, sortie_actuelle)\n if msg_erreur is None:\n return True, None\n else:\n return False, self.msg_erreur_sortie(self.nom, nom_fonction_a_tester, self.kwargs, self.sortie_attendue,\n sortie_actuelle, msg_erreur)\n\n except:\n return False, self.msg_erreur_exception(self.nom, nom_fonction_a_tester, self.kwargs)\n\n\nclass TestAttributsObjet:\n def __init__(self,\n objet,\n nom,\n kwargs,\n attributs_attendus,\n msg_erreur_attribut=_message_erreur_attributs_defaut,\n msg_erreur_exception=_message_erreur_exception_defaut,\n operateur_egalite=_operateur_egalite_defaut):\n self.objet = objet\n self.nom = nom\n self.kwargs = kwargs\n self.attributs_attendus = attributs_attendus\n self.msg_erreur_attribut = msg_erreur_attribut\n self.msg_erreur_exception = msg_erreur_exception\n self.operateur_egalite = operateur_egalite\n\n def executer(self, nom_fonction_a_tester):\n try:\n self.objet = self.objet()\n getattr(self.objet, nom_fonction_a_tester)(**self.kwargs)\n\n attributs_actuels = {nom: getattr(self.objet, nom) for nom in self.attributs_attendus}\n\n msg_erreurs = [\n self.operateur_egalite(attendu, actuel)\n for attendu, actuel in zip(self.attributs_attendus.values(), attributs_actuels.values())\n ]\n\n if all([msg is None for msg in msg_erreurs]):\n return True, None\n else:\n return False, self.msg_erreur_attribut(self.nom, nom_fonction_a_tester, self.kwargs,\n self.attributs_attendus, attributs_actuels,\n '\\n\\n'.join([msg for msg in msg_erreurs if msg is not None]))\n\n except:\n return False, self.msg_erreur_exception(self.nom, nom_fonction_a_tester, self.kwargs)\n\n\ndef planche_vide():\n return Planche()\n\n\ndef planche_presque_vide():\n planche = Planche()\n\n planche.lignes[(0, 0, 'H')].jouee = True\n\n return planche\n\n\ndef planche_pleine():\n planche = Planche()\n\n for ligne in planche.lignes.values():\n ligne.jouee = True\n\n for index, boite in planche.boites.items():\n num = index[0] + index[1]\n\n boite.assigner_couleur(\"rouge\" if num % 2 == 0 else \"bleu\")\n\n return planche\n\n\ndef planche_presque_pleine():\n planche = planche_pleine()\n\n planche.lignes[(0, 0, 'H')].jouee = False\n planche.boites[(0, 0)].pleine = False\n planche.boites[(0, 0)].couleur = ''\n\n return planche\n\n\ndef obj_to_func(obj):\n return lambda: obj\n\n\ndef groupe_init_boites():\n tests = []\n\n boites_attendues = {\n (0, 0): Boite(),\n (1, 0): Boite(),\n (2, 0): Boite(),\n (0, 1): Boite(),\n (1, 1): Boite(),\n (2, 1): Boite(),\n (0, 2): Boite(),\n (1, 2): Boite(),\n (2, 2): Boite()\n }\n\n tests.append(TestAttributsObjet(planche_vide, 'test_initilisation_boites', {}, {'boites': boites_attendues}))\n tests.append(TestAttributsObjet(planche_pleine, 'test_reinitilisation_boites', {}, {'boites': boites_attendues}))\n\n return GroupeTest('Planche.init_boites', tests, 'initialiser_boites')\n\n\ndef groupe_init_lignes():\n tests = []\n\n lignes_attendues = {\n (0, 3, 'V'): Ligne(),\n (1, 3, 'V'): Ligne(),\n (2, 3, 'V'): Ligne(),\n (3, 0, 'H'): Ligne(),\n (3, 1, 'H'): Ligne(),\n (3, 2, 'H'): Ligne(),\n (0, 0, 'H'): Ligne(),\n (0, 0, 'V'): Ligne(),\n (1, 0, 'H'): Ligne(),\n (1, 0, 'V'): Ligne(),\n (2, 0, 'H'): Ligne(),\n (2, 0, 'V'): Ligne(),\n (0, 1, 'H'): Ligne(),\n (0, 1, 'V'): Ligne(),\n (1, 1, 'H'): Ligne(),\n (1, 1, 'V'): Ligne(),\n (2, 1, 'H'): Ligne(),\n (2, 1, 'V'): Ligne(),\n (0, 2, 'H'): Ligne(),\n (0, 2, 'V'): Ligne(),\n (1, 2, 'H'): Ligne(),\n (1, 2, 'V'): Ligne(),\n (2, 2, 'H'): Ligne(),\n (2, 2, 'V'): Ligne()\n }\n\n ma_planche_pleine = planche_vide()\n for ligne in ma_planche_pleine.lignes.values():\n ligne.jouee = True\n\n tests.append(TestAttributsObjet(planche_vide, 'test_initilisation_lignes', {}, {'lignes': lignes_attendues}))\n tests.append(\n TestAttributsObjet(obj_to_func(ma_planche_pleine), 'test_reinitilisation_lignes', {},\n {'lignes': lignes_attendues}))\n\n return GroupeTest('Planche.init_lignes', tests, 'initialiser_lignes')\n\n\ndef tests_etape_1():\n print(\"*******TESTS ÉTAPE 1*******\\n\")\n testeur = Testeur(groupe_init_boites(), groupe_init_lignes())\n testeur.test()\n\n\ndef groupe_coup_dans_les_limites():\n tests = []\n\n tests.append(TestSortieObjet(planche_vide, 'test_horizontale_en_jeu', {'index_ligne': (1, 0, 'H')}, True))\n tests.append(TestSortieObjet(planche_vide, 'test_verticale_en_jeu', {'index_ligne': (1, 2, 'V')}, True))\n tests.append(TestSortieObjet(planche_vide, 'test_colonne_negative', {'index_ligne': (1, -1, 'H')}, False))\n tests.append(TestSortieObjet(planche_vide, 'test_colonne_trop_elevee', {'index_ligne': (1, 4, 'H')}, False))\n tests.append(TestSortieObjet(planche_vide, 'test_ligne_negative', {'index_ligne': (-1, 1, 'H')}, False))\n tests.append(TestSortieObjet(planche_vide, 'test_ligne_trop_elevee', {'index_ligne': (4, 0, 'H')}, False))\n tests.append(TestSortieObjet(planche_vide, 'test_orientation_invalide', {'index_ligne': (1, 0, 'Invalide')}, False))\n tests.append(TestSortieObjet(planche_vide, 'test_ligne_horizontale_bas', {'index_ligne': (3, 0, 'H')}, True))\n tests.append(TestSortieObjet(planche_vide, 'test_verticale_droite', {'index_ligne': (1, 3, 'V')}, True))\n\n return GroupeTest('Planche.coup_dans_les_limites', tests, 'coup_dans_les_limites')\n\n\ndef groupe_est_pleine():\n tests = []\n\n tests.append(TestSortieObjet(planche_vide, 'test_planche_vide', {}, False))\n tests.append(TestSortieObjet(planche_pleine, 'test_planche_pleine', {}, True))\n\n return GroupeTest('Planche.est_pleine', tests, 'est_pleine')\n\n\ndef groupe_jouer_coup():\n tests = []\n\n ligne_jouee = Ligne()\n ligne_jouee.jouee = True\n lignes_attendues = {\n (0, 3, 'V'): Ligne(),\n (1, 3, 'V'): Ligne(),\n (2, 3, 'V'): Ligne(),\n (3, 0, 'H'): Ligne(),\n (3, 1, 'H'): Ligne(),\n (3, 2, 'H'): Ligne(),\n (0, 0, 'H'): Ligne(),\n (0, 0, 'V'): Ligne(),\n (1, 0, 'H'): Ligne(),\n (1, 0, 'V'): Ligne(),\n (2, 0, 'H'): Ligne(),\n (2, 0, 'V'): Ligne(),\n (0, 1, 'H'): Ligne(),\n (0, 1, 'V'): Ligne(),\n (1, 1, 'H'): Ligne(),\n (1, 1, 'V'): Ligne(),\n (2, 1, 'H'): Ligne(),\n (2, 1, 'V'): Ligne(),\n (0, 2, 'H'): Ligne(),\n (0, 2, 'V'): ligne_jouee,\n (1, 2, 'H'): Ligne(),\n (1, 2, 'V'): Ligne(),\n (2, 2, 'H'): Ligne(),\n (2, 2, 'V'): Ligne()\n }\n\n tests.append(\n TestAttributsObjet(planche_vide, 'test_jouer_coup_rouge', {\n 'index_ligne': (0, 2, 'V'),\n 'couleur': 'rouge'\n }, {\n 'lignes': lignes_attendues,\n 'couleur_dernier_coup': 'rouge',\n 'position_dernier_coup': (0, 2, 'V')\n }))\n tests.append(\n TestAttributsObjet(planche_vide, 'test_jouer_coup_bleu', {\n 'index_ligne': (0, 2, 'V'),\n 'couleur': 'bleu'\n }, {\n 'lignes': lignes_attendues,\n 'couleur_dernier_coup': 'bleu',\n 'position_dernier_coup': (0, 2, 'V')\n }))\n\n return GroupeTest('Planche.jouer_coup', tests, 'jouer_coup')\n\n\ndef groupe_valider_coup():\n tests = []\n\n tests.append(\n TestSortieObjet(planche_vide,\n 'test_coup_valide', {'index_ligne': (0, 0, 'H')},\n True,\n operateur_egalite=operateur_egalite_un_item_tuple(index_a_tester=0)))\n tests.append(\n TestSortieObjet(planche_vide,\n 'test_orientation_invalide', {'index_ligne': (0, 0, 'Invalide')},\n False,\n operateur_egalite=operateur_egalite_un_item_tuple(index_a_tester=0)))\n tests.append(\n TestSortieObjet(planche_vide,\n 'test_coup_hors_limites', {'index_ligne': (-1, -1, 'H')},\n False,\n operateur_egalite=operateur_egalite_un_item_tuple(index_a_tester=0)))\n tests.append(\n TestSortieObjet(planche_pleine,\n 'test_ligne_deja_jouee', {'index_ligne': (0, 0, 'H')},\n False,\n operateur_egalite=operateur_egalite_un_item_tuple(index_a_tester=0)))\n\n return GroupeTest('Planche.valider_coup', tests, 'valider_coup')\n\n\ndef groupe_obtenir_coups_possible():\n tests = []\n\n toutes_lignes = [(0, 3, 'V'), (1, 3, 'V'), (2, 3, 'V'), (3, 0, 'H'), (3, 1, 'H'), (3, 2, 'H'), (0, 0, 'H'),\n (0, 0, 'V'), (1, 0, 'H'), (1, 0, 'V'), (2, 0, 'H'), (2, 0, 'V'), (0, 1, 'H'), (0, 1, 'V'),\n (1, 1, 'H'), (1, 1, 'V'), (2, 1, 'H'), (2, 1, 'V'), (0, 2, 'H'), (0, 2, 'V'), (1, 2, 'H'),\n (1, 2, 'V'), (2, 2, 'H'), (2, 2, 'V')]\n presque_toutes_lignes = toutes_lignes.copy()\n presque_toutes_lignes.remove((0, 0, 'H'))\n aucune_ligne = []\n une_ligne = [(0, 0, 'H')]\n\n tests.append(TestSortieObjet(planche_vide, 'test_obtenir_coups_planche_vide', {}, toutes_lignes))\n tests.append(\n TestSortieObjet(planche_presque_vide, 'test_obtenir_coups_planche_presque_vide', {}, presque_toutes_lignes))\n tests.append(TestSortieObjet(planche_pleine, 'test_obtenir_coups_planche_pleine', {}, aucune_ligne))\n tests.append(TestSortieObjet(planche_presque_pleine, 'test_obtenir_coups_planche_presque_pleine', {}, une_ligne))\n\n return GroupeTest('Planche.obtenir_coups_possibles', tests, 'obtenir_coups_possibles')\n\n\ndef tests_etape_2():\n print(\"*******TESTS ÉTAPE 2*******\\n\")\n testeur = Testeur(groupe_coup_dans_les_limites(), groupe_est_pleine(), groupe_jouer_coup(), groupe_valider_coup(),\n groupe_obtenir_coups_possible())\n testeur.test()\n\n\ndef groupe_obtenir_idx_boites_a_valider():\n tests = []\n\n p_h = planche_vide()\n p_h.position_dernier_coup = (1, 1, 'H')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_h), 'test_obtenir_idx_boites_a_valider_ligne_horizontale_milieu', {}, [(0, 1),\n (1, 1)]))\n\n p_v = planche_vide()\n p_v.position_dernier_coup = (1, 1, 'V')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_v), 'test_obtenir_idx_boites_a_valider_ligne_verticale_milieu', {}, [(1, 0),\n (1, 1)]))\n\n p_h_haut = planche_vide()\n p_h_haut.position_dernier_coup = (0, 1, 'H')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_h_haut), 'test_obtenir_idx_boites_a_valider_ligne_horizontale_haut', {},\n [(0, 1)]))\n\n p_h_bas = planche_vide()\n p_h_bas.position_dernier_coup = (3, 1, 'H')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_h_bas), 'test_obtenir_idx_boites_a_valider_ligne_horizontale_bas', {}, [(2, 1)]))\n\n p_v_gauche = planche_vide()\n p_v_gauche.position_dernier_coup = (1, 0, 'V')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_v_gauche), 'test_obtenir_idx_boites_a_valider_ligne_verticale_gauche', {},\n [(1, 0)]))\n\n p_v_droite = planche_vide()\n p_v_droite.position_dernier_coup = (1, 3, 'V')\n\n tests.append(\n TestSortieObjet(obj_to_func(p_v_droite), 'test_obtenir_idx_boites_a_valider_ligne_verticale_droite', {},\n [(1, 2)]))\n\n return GroupeTest('Planche.obtenir_idx_boites_a_valider', tests, 'obtenir_idx_boites_a_valider')\n\n\ndef groupe_compter_lignes_jouees_boite():\n tests = []\n\n planche = planche_vide()\n\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_sans_ligne_jouee',\n {'idx_boite': (0, 0)}, 0))\n\n planche = planche_vide()\n planche.lignes[(0, 0, 'H')].jouee = True\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_ligne_haut_jouee',\n {'idx_boite': (0, 0)}, 1))\n\n planche = planche_vide()\n planche.lignes[(2, 1, 'H')].jouee = True\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_ligne_bas_jouee', {'idx_boite': (1, 1)},\n 1))\n\n planche = planche_vide()\n planche.lignes[(1, 2, 'V')].jouee = True\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_ligne_gauche_jouee',\n {'idx_boite': (1, 2)}, 1))\n\n planche = planche_vide()\n planche.lignes[(2, 2, 'V')].jouee = True\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_ligne_droite_jouee',\n {'idx_boite': (2, 1)}, 1))\n\n planche = planche_pleine()\n tests.append(\n TestSortieObjet(obj_to_func(planche), 'test_compter_lignes_jouees_boite_toutes_lignes_jouees',\n {'idx_boite': (1, 1)}, 4))\n\n return GroupeTest('Planche.compter_lignes_jouees_boite', tests, 'compter_lignes_jouees_boite')\n\n\ndef groupe_bilan_boites():\n tests = []\n\n tests.append(TestSortieObjet(planche_pleine, 'test_bilan_boites_5_rouges_4_bleus', {}, (4, 5)))\n\n planche_rouge = planche_pleine()\n\n for boite in planche_rouge.boites.values():\n boite.couleur = 'rouge'\n\n tests.append(TestSortieObjet(obj_to_func(planche_rouge), 'test_bilan_boites_planche_pleine_rouge', {}, (0, 9)))\n\n return GroupeTest('Planche.bilan_boites', tests, 'bilan_boites')\n\n\ndef tests_etape_3():\n print(\"*******TESTS ÉTAPE 3*******\\n\")\n testeur = Testeur(groupe_obtenir_idx_boites_a_valider(), groupe_compter_lignes_jouees_boite(),\n groupe_bilan_boites())\n testeur.test()\n\n\ndef tests_complets():\n print(\"*******TESTS COMPLETS*******\\n\")\n testeur = Testeur(groupe_init_boites(), groupe_init_lignes(), groupe_coup_dans_les_limites(), groupe_est_pleine(),\n groupe_jouer_coup(), groupe_valider_coup(), groupe_obtenir_coups_possible(),\n groupe_obtenir_idx_boites_a_valider(), groupe_compter_lignes_jouees_boite(),\n groupe_bilan_boites())\n testeur.test()\n\n\nif __name__ == \"__main__\":\n colorama.init()\n # Commentez et décommentez les tests correspondant à l'étape à laquelle vous êtes rendu\n # pour conserver une sortie claire.\n # tests_etape_1()\n # tests_etape_2()\n # tests_etape_3()\n tests_complets()","sub_path":"pipopipette/tests_planche.py","file_name":"tests_planche.py","file_ext":"py","file_size_in_byte":26312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87383274","text":"import sys\n\nfrom PySide6.QtGui import Qt\nfrom PySide6.QtWidgets import QWidget, QLCDNumber, QSlider, QVBoxLayout, QApplication\n\n\nclass Ex(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n lcd = QLCDNumber(self)\n sld = QSlider(Qt.Horizontal, self)\n\n vbox = QVBoxLayout()\n vbox.addWidget(lcd)\n vbox.addWidget(sld)\n\n self.setLayout(vbox)\n sld.valueChanged.connect(lcd.display)\n\n self.setGeometry(300, 300, 250, 250)\n self.setWindowTitle(\"Signal & slot\")\n self.show()\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Ex()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/pyside_demo/zc15.py","file_name":"zc15.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68425016","text":"'''\nGiven a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.\n\nEach number in C may only be used once in the combination.\n\nNote:\nAll numbers (including target) will be positive integers.\nThe solution set must not contain duplicate combinations.\nFor example, given candidate set [10, 1, 2, 7, 6, 1, 5] and target 8,\nA solution set is:\n[\n [1, 7],\n [1, 2, 5],\n [2, 6],\n [1, 1, 6]\n]\n'''\nclass Solution(object):\n def combinationSum2(self, arr, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n self.result = []\n self.helper(sorted(arr), target, [], 0)\n return self.result\n\n def helper(self, arr, target, singleResult, index):\n if target < 0:\n return\n if target == 0:\n self.result.append(singleResult[:])\n return\n for i in range(index, len(arr)):\n if i > index and arr[i] == arr[i - 1]: continue\n singleResult.append(arr[i])\n self.helper(arr, target - arr[i], singleResult, i + 1)\n singleResult.pop()\n","sub_path":"Python/leetcode/40-CombinationSumII.py","file_name":"40-CombinationSumII.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501022809","text":"book = input().split()\npages = input().split()\nnum = int(book[1])\nnotebooks = 1\nfor page in pages:\n if int(page) > num:\n notebooks += 1\n num = 100 - int(page)\n else:\n num -= int(page)\n\nprint(notebooks)\n","sub_path":"Week9t3/Problem A - Charles.py","file_name":"Problem A - Charles.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574405089","text":"\"\"\" Calculate features given protein FASTA sequence\n\"\"\"\n\nimport numpy as np\n\nfasta_alph = 'ARNDCEQGHILKMFPSTWYV'\n\ndef parseFasta(fasta_input):\n \"\"\" parse FASTA-formatted input\n \"\"\"\n\n # initialize sequence id and string to an empty string\n seq_id = ''\n seq_str = ''\n\n # iterate over each line\n for line in fasta_input.split(\"\\n\"):\n\n # do not read if empty line\n if not line:\n continue\n\n if(seq_id == '' and seq_str == ''):\n if(line[0] == \">\"):\n seq_id = line.split()[0][1:]\n if(seq_id == ''):\n raise Exception('FASTA file error: Empty id encountered.')\n elif(line[0] == '#'):\n pass\n elif(line.strip()):\n # non-empty line...\n # print(line.strip())\n raise Exception('Error in FASTA file.')\n else:\n if((line.strip() == '' or line[0] == '>') or line[0] == '#'):\n yield (seq_id, seq_str)\n seq_str = ''\n if(line[0] == '>'):\n seq_id = line.split()[0][1:]\n else:\n seq_id = ''\n else:\n seq_str += line.strip()\n\n # return the last sequence (not if the file was empty)\n if not(seq_id == ''):\n yield (seq_id, seq_str)\n\ndef letter_count(seq, alph):\n\n return np.array([seq.count(l) for l in alph], dtype=int)\n\ndef letter_composition(seq, alph):\n\n return letter_count(seq, alph) / float(len(seq))\n\ndef extractFeatures(fasta_input):\n \"\"\" Main function for extracting features\n\n Features currently implemented:\n - amino acid composition\n\n ARGS\n fasta_input: string of FASTA-formatted sequences\n\n RETURNS\n IDs (list of str): protein names\n feats (2D numpy array): real-valued protein features\n \"\"\"\n\n # parse sequences\n seqtuples = list(parseFasta(fasta_input))\n\n # calculate matrix of feature values\n nseq = len(seqtuples)\n feats = [] \n IDs = []\n for i, (seqID, seq) in enumerate(seqtuples):\n \n IDs.append(seqID)\n feats.append(letter_composition(seq, fasta_alph))\n\n return IDs, np.array(feats) \n\n\n\n","sub_path":"src/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231218453","text":"import os\nimport shutil\nimport time\nfrom pathlib import Path\n\nimport pytest\n\nfrom astrality.filewatcher import DirectoryWatcher\n\n\n@pytest.yield_fixture\ndef test_files():\n \"\"\"Return paths related to two test files and cleanup afterwards.\"\"\"\n watched_directory = Path('/tmp/astrality')\n test_file1 = watched_directory / 'tmp_test_file1'\n\n recursive_dir = watched_directory / 'test_folder'\n test_file2 = recursive_dir / 'tmp_test_file2'\n\n yield watched_directory, recursive_dir, test_file1, test_file2\n\n # Cleanup files\n if test_file1.is_file():\n os.remove(test_file1)\n if test_file2.is_file():\n os.remove(test_file2)\n if recursive_dir.is_dir():\n shutil.rmtree(recursive_dir)\n\n\n@pytest.yield_fixture\ndef watch_dir():\n \"\"\"Instanciate a directory watcher and stop it after its use.\"\"\"\n class EventSaver:\n \"\"\"Mock class for testing callback function.\"\"\"\n\n def __init__(self):\n self.called = 0\n\n def save_argument(self, path: Path) -> None:\n self.called += 1\n self.argument = path\n\n event_saver = EventSaver()\n\n # Watch a temporary directory\n watched_directory = Path('/tmp/astrality')\n dir_watcher = DirectoryWatcher(\n directory=watched_directory,\n on_modified=event_saver.save_argument,\n )\n\n yield dir_watcher, event_saver\n\n dir_watcher.stop()\n\n\n@pytest.mark.slow\ndef test_filesystem_watcher(test_files, watch_dir):\n \"\"\"\n Test correct callback invocation on directory watching.\n\n Sometimes the on_modified function is called several times by watchdog,\n for a unknown reason. It might be other tests which interfer. We therefore\n check if the lower bound of calls is satisfied, but do not test the exact\n number of calls to on_modified.\n \"\"\"\n watched_directory, recursive_dir, test_file1, test_file2 = test_files\n dir_watcher, event_saver = watch_dir\n\n # Start watching the directory\n dir_watcher.start()\n\n # Nothing has been modified yet\n assert not hasattr(event_saver, 'argument')\n assert event_saver.called == 0\n\n # Create an empty file\n test_file1.touch()\n\n # New files are not considered \"modified\"\n time.sleep(0.7)\n assert not hasattr(event_saver, 'argument')\n assert event_saver.called == 0\n\n # But when we write to it, it is considered \"modified\"\n with open(test_file1, 'w') as file:\n file.write('test_content')\n\n time.sleep(0.7)\n assert event_saver.argument == test_file1\n assert event_saver.called >= 1\n\n # Create a directory in the watched directory\n recursive_dir.mkdir(parents=True)\n\n # Subdirectories are not of interest\n time.sleep(0.7)\n assert event_saver.argument == test_file1\n assert event_saver.called >= 1\n\n # Create a file in the subdirectory\n test_file2.write_text('test')\n\n # Both the touch event and the write event are considered of interest\n time.sleep(0.7)\n assert event_saver.argument == test_file2\n assert event_saver.called >= 2\n","sub_path":"astrality/tests/test_filewatcher.py","file_name":"test_filewatcher.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631093665","text":"#-------------------------------------------------------------------------------\n# Name: SerialDemux Example\n# Purpose:\n#\n# Author: Derryn Harvie\n#\n# Created: 05/05/2014\n# Copyright: No\n# Licence: Public Domain\n#-------------------------------------------------------------------------------\nimport serial\nfrom demux import SerialDemux\n\ndef main():\n s_port = serial.Serial(9, 115200)\n multiplexer = SerialDemux()\n multiplexer.setHandler(dataHandler, 31)\n\n try:\n while(True):\n serialByte = s_port.read()\n multiplexer.processByte(ord(serialByte))\n except KeyboardInterrupt:\n s_port.close()\n\ndef dataHandler(value, address):\n print('A different handler for address: ' + str(address))\n print('Received :' + str(value) + ' hex: ' + hex(value))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/demux/test_demux.py","file_name":"test_demux.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323048945","text":"# -*- coding: utf-8\n\n# pyKol - Gestion de colles en CPGE\n# Copyright (c) 2018 Florian Hatat\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"Vues de manipulation des semaines de colles.\"\"\"\n\nfrom datetime import timedelta\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction, models\nfrom django.core.exceptions import PermissionDenied\n\nfrom pykol.models.base import Classe\nfrom pykol.models.colles import Semaine, CollesReglages\nfrom pykol.forms.colloscope import SemaineFormSet, \\\n\t\tSemaineNumeroGenerateurForm\n\n@login_required\ndef semaines(request, slug):\n\t\"\"\"\n\tVue qui permet de créer la liste des semaines de colle pour une\n\tclasse donnée.\n\n\tAu lieu de saisir toutes les semaines une à une, cette vue propose\n\tla liste de l'ensemble des semaines (démarrant les lundis) de\n\tl'année scolaire en cours. Le professeur en charge du colloscope\n\tpeut alors sélectionner celles qui figureront effectivement dans le\n\tcolloscope de la classe. Il peut numéroter les semaines manuellement\n\tou bien fournir un format pour générer automatiquement les numéros.\n\t\"\"\"\n\tclasse = get_object_or_404(Classe, slug=slug)\n\tif not request.user.has_perm('pykol.change_colloscope', classe):\n\t\traise PermissionDenied\n\n\ttry:\n\t\tcolles_reglages = CollesReglages.objects.get(classe=classe)\n\texcept CollesReglages.DoesNotExist:\n\t\tcolles_reglages = CollesReglages(classe=classe)\n\t\tcolles_reglages.save()\n\n\tformset_prefix = \"semaines\"\n\n\tif request.method == 'POST':\n\t\tformset = SemaineFormSet(request.POST, prefix=formset_prefix,\n\t\t\t\t\tform_kwargs={'classe': classe})\n\t\tgenform = SemaineNumeroGenerateurForm(request.POST,\n\t\t\t\tinstance=colles_reglages, prefix=\"gen\")\n\n\n\t\tgenform.save()\n\n\t\tif genform.is_valid() and genform.cleaned_data['numeros_auto']:\n\t\t\tformset.full_clean()\n\t\t\tnew_formset_data = {}\n\n\t\t\tid_colle = 0\n\n\t\t\tfor id_semaine, form in enumerate(formset.forms):\n\t\t\t\tfor field in ('debut', 'fin', 'est_colle'):\n\t\t\t\t\tnew_formset_data['{prefix}-{id_semaine}-{field}'.format(\n\t\t\t\t\t\tprefix=formset_prefix,\n\t\t\t\t\t\tid_semaine=id_semaine,\n\t\t\t\t\t\tfield=field)] = form.cleaned_data.get(field)\n\n\t\t\t\tif form.cleaned_data.get('semaine'):\n\t\t\t\t\tnew_formset_data['{prefix}-{id_semaine}-{field}'.format(\n\t\t\t\t\t\tprefix=formset_prefix,\n\t\t\t\t\t\tid_semaine=id_semaine,\n\t\t\t\t\t\tfield='semaine')] = form.cleaned_data.get('semaine').pk\n\n\t\t\t\tif form.cleaned_data['est_colle']:\n\t\t\t\t\tnew_formset_data['{prefix}-{id_semaine}-{field}'.format(\n\t\t\t\t\t\tprefix=formset_prefix,\n\t\t\t\t\t\tid_semaine=id_semaine,\n\t\t\t\t\t\tfield='numero')] = \\\n\t\t\t\t\t\t\tgenform.cleaned_data['numeros_format'].format(\n\t\t\t\t\t\t\tnumero=id_colle + 1,\n\t\t\t\t\t\t\tquinzaine=id_colle // 2 + 1,\n\t\t\t\t\t\t\tparite=(id_colle + 1) % 2,\n\t\t\t\t\t\t\tparite_alpha='AB'[id_colle % 2])\n\n\t\t\t\t\tid_colle += 1\n\n\t\t\tfor field in ('TOTAL_FORMS', 'INITIAL_FORMS',\n\t\t\t\t\t'MAX_NUM_FORMS',):\n\t\t\t\tfield_name = '{prefix}-{field}'.format(prefix=formset_prefix,\n\t\t\t\t\t\tfield=field)\n\t\t\t\tnew_formset_data[field_name] = formset.data[field_name]\n\n\t\t\t# On ne remplace le formset que si le générateur a réussi à\n\t\t\t# compléter les semaines manquantes\n\t\t\tformset = SemaineFormSet(new_formset_data,\n\t\t\t\t\tprefix=formset_prefix,\n\t\t\t\t\tform_kwargs={'classe': classe})\n\n\t\tif formset.is_valid():\n\t\t\twith transaction.atomic():\n\t\t\t\tfor id_semaine, data in enumerate(formset.cleaned_data):\n\t\t\t\t\tif data['semaine']:\n\t\t\t\t\t\tif data['est_colle']:\n\t\t\t\t\t\t\tsemaine = data['semaine']\n\t\t\t\t\t\t\tsemaine.debut = data['debut']\n\t\t\t\t\t\t\tsemaine.fin = data['fin']\n\t\t\t\t\t\t\tsemaine.numero = data['numero']\n\t\t\t\t\t\t\tsemaine.save()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata['semaine'].delete()\n\n\t\t\t\t\telif data['est_colle']:\n\t\t\t\t\t\tSemaine(debut=data['debut'],\n\t\t\t\t\t\t\t\tfin=data['fin'],\n\t\t\t\t\t\t\t\tnumero=data['numero'],\n\t\t\t\t\t\t\t\tclasse=classe).save()\n\n\t\t\treturn redirect('colloscope_semaines', classe.slug)\n\n\telse:\n\t\t# Calcul des semaines de toute l'année\n\t\ttoutes_semaines = []\n\n\t\t# Générateur pour itérer tous les lundis de l'année\n\t\tdef lundirange():\n\t\t\tannee = classe.annee\n\t\t\tlundi = annee.debut - timedelta(days=annee.debut.weekday())\n\t\t\twhile lundi < annee.fin:\n\t\t\t\tif not annee.est_vacances(lundi):\n\t\t\t\t\tyield lundi\n\t\t\t\tlundi += timedelta(days=7)\n\n\t\t# On récupère déjà la liste des semaines existantes\n\t\ttoutes_semaines = list(Semaine.objects.filter(classe=classe).annotate(\n\t\t\tsemaine=models.F('pk'), est_colle=models.Value(True,\n\t\t\t\toutput_field=models.BooleanField()),\n\t\t\t).values('debut', 'fin', 'est_colle', 'numero', 'semaine'))\n\n\t\t# On prévoit des semaines vides si l'utilisateur souhaite les\n\t\t# cocher, pour les lundis qui manquent.\n\t\tlundis_existants = set([s['debut'] for s in toutes_semaines])\n\n\t\tfor lundi in lundirange():\n\t\t\tif lundi in lundis_existants:\n\t\t\t\tcontinue\n\t\t\ttoutes_semaines.append({\n\t\t\t\t'debut': lundi,\n\t\t\t\t'fin': lundi + timedelta(days=6),\n\t\t\t\t'est_colle': False,\n\t\t\t\t'numero': None,\n\t\t\t})\n\n\t\ttoutes_semaines.sort(key=lambda s: s['debut'])\n\n\t\tformset = SemaineFormSet(initial=toutes_semaines,\n\t\t\t\tprefix=formset_prefix,\n\t\t\t\tform_kwargs={'classe': classe})\n\t\tgenform = SemaineNumeroGenerateurForm(\n\t\t\t\tinstance=colles_reglages,\n\t\t\t\tprefix=\"gen\")\n\n\treturn render(request, 'pykol/colloscope/semaines.html',\n\t\t\tcontext={\n\t\t\t\t'classe': classe,\n\t\t\t\t'formset': formset,\n\t\t\t\t'genform': genform,\n\t\t\t\t})\n\n","sub_path":"views/colloscope/semaines.py","file_name":"semaines.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278882570","text":"DEBUG = True\nREVERSE_TASK = False\n\nimport copy\n\ndef print_matr(matr, system_of_nulls = None, quouted_zeros = None):\n if system_of_nulls == None:\n system_of_nulls = []\n for i in range(0, len(matr)):\n for j in range(0, len(matr)):\n if matr[i][j] == 0 and ((i, j) in system_of_nulls):\n print('*', end=' ')\n if (quouted_zeros is not None) and matr[i][j] == 0 and ((i, j) in quouted_zeros):\n print('\\'', end=' ')\n else:\n print(matr[i][j], end=' ')\n print()\n print()\n\ndef debug_print(outp, system_of_nulls = None, quoted_zeros = None):\n if DEBUG:\n if isinstance(outp, str):\n print(outp)\n else:\n print_matr(outp, system_of_nulls, quoted_zeros)\n\ndef matrix_preparation(matr):\n for j in range(0, len(matr)):\n if not REVERSE_TASK:\n min_elem = matr[0][j]\n else:\n max_elem = matr[0][j]\n for i in range(0, len(matr)):\n if not REVERSE_TASK:\n min_elem = matr[i][j] if matr[i][j] < min_elem else min_elem\n else:\n max_elem = matr[i][j] if matr[i][j] > max_elem else max_elem\n for i in range(0, len(matr)):\n if not REVERSE_TASK:\n matr[i][j] -= min_elem\n else:\n matr[i][j] = max_elem - matr[i][j]\n for i in range(0, len(matr)):\n min_elem = min(matr[i])\n matr[i] = list(map(lambda x: x - min_elem, matr[i]))\n debug_print(matr)\n\ndef check_null(system_of_nulls, a, b):\n for i in range(0, len(system_of_nulls)):\n if system_of_nulls[i][0] == a:\n return False\n if system_of_nulls[i][1] == b:\n return False\n return True\n\ndef find_independent_nulls(matr):\n system_of_nulls = []\n for i in range(0, len(matr)):\n for j in range(0, len(matr)):\n if (matr[i][j] == 0) and check_null(system_of_nulls, i, j):\n system_of_nulls.append((i, j))\n return system_of_nulls\n\ndef is_any_unmarked_nulls(matr, marked_columns, marked_rows):\n umarked_columns = set(range(0, len(matr))) - set(marked_columns)\n umarked_rows = set(range(0, len(matr))) - set(marked_rows)\n for i in umarked_rows:\n for j in umarked_columns:\n if matr[i][j] == 0:\n return True\n return False\n\ndef create_unmarked_nulls(matr, marked_columns, marked_rows):\n umarked_columns = set(range(0, len(matr))) - set(marked_columns)\n umarked_rows = set(range(0, len(matr))) - set(marked_rows)\n min_elem = min([matr[i][j] for i in umarked_rows for j in umarked_columns])\n for i in umarked_rows:\n for j in range(0, len(matr)):\n matr[i][j] -= min_elem\n for i in range(0, len(matr)):\n for j in marked_columns:\n matr[i][j] += min_elem\n return False\n\ndef row_contains_marked_and_unmarked_nulls(matr, marked_columns, marked_rows, system_of_nulls):\n umarked_columns = set(range(0, len(matr))) - set(marked_columns)\n umarked_rows = set(range(0, len(matr))) - set(marked_rows)\n for i in umarked_rows:\n for j in umarked_columns:\n if matr[i][j] == 0:\n ret = False\n old_z = None\n for z in system_of_nulls:\n if z[0] == i:\n ret = True\n old_z = z\n break\n return ret, (i, j), old_z\n\ndef select_and_take(zeros, coord_type, coord, l_line, first_iteration):\n if not first_iteration:\n try:\n found = [z for z in zeros if (z[coord_type] == coord) and z not in l_line][0]\n except IndexError:\n return False\n else:\n found = zeros[len(zeros) - 1]\n l_line.append(found)\n return True\n\n\ndef build_l_line(stared_zeros, quoted_zeros):\n l_line = []\n first_iteration = True\n while True:\n coord = l_line[len(l_line) - 1][0] if len(l_line) > 0 else 0\n result = select_and_take(quoted_zeros, 0, coord,l_line, first_iteration)\n if not result:\n break\n first_iteration = False\n coord = l_line[len(l_line) - 1][1]\n select_and_take(stared_zeros, 1, coord,l_line, first_iteration)\n return l_line\n\ndef calc_func(original_matr, solution):\n result = 0\n for i in range(0, len(solution)):\n for j in range(0, len(solution)):\n result += solution[i][j] * original_matr[i][j]\n return result\n\nmatr = [[6, 10, 4, 5, 8],\n [8, 10, 7, 9, 11],\n [4, 8, 9, 10, 6],\n [5, 9, 6, 11, 10],\n [6, 11, 6, 3, 9]]\n\nmatr = [[1, 1, 1, 1, 1],\n [1, 10, 4, 3, 6],\n [1, 6, 9, 5, 2],\n [1, 8, 5, 2, 4],\n [1, 4, 2, 9, 3]]\n\n\noriginal_matr = copy.deepcopy(matr)\nprint_matr(matr)\nmatrix_preparation(matr)\nsystem_of_nulls = find_independent_nulls(matr)\niteration_num = 0\nwhile len(system_of_nulls) < len(matr):\n iteration_num = iteration_num + 1\n print(\"iteration %d\" % (iteration_num))\n debug_print(matr, system_of_nulls)\n marked_columns = [system_of_nulls[i][1] for i in range(0, len(system_of_nulls))]\n marked_rows = []\n quoted_zeros = []\n cont = True\n while cont:\n if not is_any_unmarked_nulls(matr, marked_columns, marked_rows):\n create_unmarked_nulls(matr, marked_columns, marked_rows)\n (cont, new_z, old_z) = row_contains_marked_and_unmarked_nulls(matr, marked_columns, marked_rows, system_of_nulls)\n if cont:\n marked_columns.remove(old_z[1])\n marked_rows.append(new_z[0])\n quoted_zeros.append(new_z)\n l_line = build_l_line(system_of_nulls, quoted_zeros)\n debug_print(matr, system_of_nulls, quoted_zeros)\n for i in range(0, len(l_line)):\n if i % 2 == 1:\n system_of_nulls.remove(l_line[i])\n else:\n system_of_nulls.append(l_line[i])\ndebug_print(matr, system_of_nulls)\nresult = [[1 if (i, j) in system_of_nulls else 0 for j in range(0, len(matr))] for i in range(0, len(matr))]\nprint_matr(result)\nprint('Solution is: %d' % (calc_func(original_matr, result)))","sub_path":"vengrian.py","file_name":"vengrian.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309718480","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 6 19:22:32 2017\n@author: iv\n\"\"\"\n###############################\n## SELECT PLATFORM: ##########\n#############################\n###-- WINDOWS OR LINUX --###\n###########################\nimport sys\nsys.stdout.flush()\nif sys.platform == 'win32':\n path = 'C:\\\\Users\\\\ivan.ortiz\\\\Documents\\\\MisProgramas_Iv_PYTHON\\\\CRIPTOMONEDAS\\\\'\n print ('\\n#### Windows System ####')\n system = sys.platform\nelse:\n path = '/home/iv/Desktop/MasterBIGDATA/CRIPTOMONEDAS/'\n print ('\\n#### Linux System ####')\n system = sys.platform\n\nprint('\\n' + sys.platform + ' System\\n')\nprint ('#####################################')\nprint ('#####################################')\nprint ('\\n### Importing Libraries... ###')\n\nimport time\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport csv\nimport json\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport lxml\nimport urllib\nimport statsmodels\nimport requests as rq\nimport scipy\nimport tables\nimport hmac, hashlib, base64\nfrom requests.auth import AuthBase\nimport datetime as dt\nimport timeit\nfrom scipy import stats\nimport tqdm\n#import math\n#import pylab as pl\n#import seaborn as sns\n#import pylab\n#from pandas.tools.plotting import scatter_matrix\n#import sklearn\n#import nltk\n#from pandas_datareader import wb, DataReader\n#import wget\n\nhora_ejecucion = 12 # time to stop and restart en utc --- +1 invierno +2 verano\nminuto_ejecucion = 55\nhora_inicio = datetime.datetime.utcnow()\ncrypto = 'BTC-EUR'\n###########################################################################################\n## Create custom authentication for Exchange #############################################\n#########################################################################################\n#######################################################################\n## CUENTA REAL - ACTIVAR PARA HACER TRANSFER REALES ##################\n#####################################################################\nprint('\\n### Authenticating... ###')\nclass CoinbaseExchangeAuth(AuthBase):\n def __init__(self, api_key, secret_key, passphrase):\n self.api_key = api_key\n self.secret_key = secret_key\n self.passphrase = passphrase\n\n def __call__(self, request):\n timestamp = str(time.time())\n message = timestamp + request.method + request.path_url + (request.body or '')\n hmac_key = base64.b64decode(self.secret_key)\n signature = hmac.new(hmac_key, message, hashlib.sha256)\n# signature = hmac.new(hmac_key, message.encode(), hashlib.sha256)\n signature_b64 = signature.digest().encode('base64').rstrip('\\n')\n# signature_b64 = base64.b64encode(signature.digest()).decode()\n\n request.headers.update({\n 'CB-ACCESS-SIGN': signature_b64,\n 'CB-ACCESS-TIMESTAMP': timestamp,\n 'CB-ACCESS-KEY': self.api_key,\n 'CB-ACCESS-PASSPHRASE': self.passphrase,\n 'Content-Type': 'application/json'\n })\n return request\napi_url = 'https://api.pro.coinbase.com/' ## la real\nkiko = '' #sys.argv[1] # text\nsandra = '' #sys.argv[2] # text\npablo = '' #sys.argv[3] # text\nauth = CoinbaseExchangeAuth(kiko, sandra, pablo)\n#######################################################################\n#######################################################################\n\n#######################################################################\n## FIN CUENTA REAL ###################################################\n#####################################################################\n\n######################################################################\n## GET ACCOUNTS #####################################################\n####################################################################\naccount = rq.get(api_url + 'accounts', auth=auth)\nprint (account.json())\naccount1 = account.json()\n# Disp_iniciales\ndisp_ini = {}\nfor item in account1:\n disp_ini.update({item['currency']:item['available']})\n######################################################################\n### FUNCIONES #######################################################\n####################################################################\nprint ('\\n### Defining functions... ###')\n\ndef sma(n,datos):\n if (len(datos) > n):\n media = sum(datos[-n:])/n\n return media\n else:\n return datos[0]\n\ndef ema(n,datos,alpha,media_ant):\n if len(datos) > n:\n expmedia = datos[-1]*alpha+(1-alpha)*media_ant[-1]\n return expmedia\n else:\n return datos[0]\n\ndef lag(n, df):\n for i in range(n):\n df['lag_%s' %(i+1)] = df['ltc_eur'].shift(i) - df['ltc_eur'].shift(i+1)\n\ndef percent(p_ini,p_fin):\n percen = (p_fin-p_ini)/abs(p_ini)\n return percen\n\ndef rsi(n,df1):\n u = []\n d = []\n for i in range(1,len(df1)):\n if df1[i]>=0:\n u.append(df1[i])\n else:\n d.append(df1[i])\n sumapos = sum(u)\n sumneg = sum(d)\n if (sumneg != 0):\n rs = abs(sumapos/sumneg)\n rsi_index = 100 - (100/(1+rs))\n else:\n rsi_index = 100\n return rsi_index\n\ndef compare_dates(df,fecha_inicio,fecha_final):\n valor = []\n for item in df:\n valor.append(time.strptime(item,'%Y-%m-%dT%H:%M:%S.%fZ')>=fecha_inicio)and(time.strptime(item,'%Y-%m-%dT%H:%M:%S.%fZ')<=fecha_final)\n return valor\n\ndef valor_op(side,size,price,fee):\n if side == 'buy':\n signo = -1\n elif side == 'sell':\n signo = 1\n fee = -float(fee)\n valor = signo*(float(size)*float(price)+float(fee))\n return valor\n\ndef assign_serial(id_number, serial_dicc):\n if id_number in serial_dicc.keys():\n valor = seriales[id_number]\n else:\n valor = 0\n return valor\n###############################################################################\n##############################################################################\n\n######################################################################\n##### INICIO tramo para datos anteriores ###########################\n###################################################################\nvect_hist = {}\ncifra_origen = 1000000\nfinal1 = 0\ncomp = False\ncont = 0\npag_historic = 3000 #1000\nprint ('### Gathering Data... ')\nb = pd.DataFrame()\nfor i in tqdm.tqdm([10000000,1000000,100000,10000,1000,100]):\n while comp == False:\n r = rq.get(api_url + 'products/'+ crypto +'/trades?after=%s' %(cifra_origen+cont*i), auth = auth) # va de 100 en 100\n try:\n origen1 = [x['trade_id'] for x in r.json()]\n except:\n continue\n final = origen1[0]\n comp = (final == final1)\n coincide = cont-1\n final1 = final\n cont += 1\n\n cifra_origen=cifra_origen + (coincide-1)*i\n cont=0\n comp = False\n\nif system == 'linux2':\n for i in tqdm.trange (pag_historic): # 200 SON UNOS 12 DIAS APROX\n r = rq.get(api_url + 'products/' + crypto + '/trades?after=%s' %(cifra_origen+coincide*100-i*100), auth = auth)\n try:\n a = [x for x in r.json()]\n a2 = pd.DataFrame(a)\n b = b.append(a2)\n except:\n continue\n\nhist_df = b.sort_values('trade_id', ascending = True)\nhist_df.to_csv('btc_tot.csv', sep = ',')\n\n\n\n\n","sub_path":"Lechuga_DEF_v1.0_HISTORICO_COMPLETO.py","file_name":"Lechuga_DEF_v1.0_HISTORICO_COMPLETO.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564141184","text":"\nfrom classy import Class\nfrom matplotlib.colors import LogNorm\nfrom scipy.interpolate import interp1d\nfrom bcoeff import bcoeff\nimport matplotlib.pyplot as plt\nimport scipy.constants as const\nimport math\nimport os\nimport numpy as np\nimport warnings\nimport csv\nimport sys\n\n\ndef lscoeff(self, data, mv, Massbins):\n\n\t####################################################################\n #### Store the redshifts where bcc fit and bcc Ls are available in arrays\n\tred2 = [0.0,0.5,1.0,2.0]\n\tl2= len(red2)\n\t\n\t####################################################################\n\t#### Store the mass bins available in an array\n\tmbins = ['M1', 'M2', 'M3', 'M4']\n\n\t\n\t####################################################################\n\t#### get the coefficients from the dat files in the data directory \n\t#### get the large scale amplitude of bcc at different z and for different neutrino masses\n\tself.data_directory = data.path['root']\n\n\t####################################################################\n\t#### get the rescaling coefficients according to neutrino mass\n\t#### if you want to change the large sclae bias from Tinker to Crocce + ST \n\t#### change the line 'LS_z='+str(i)+'_.txt' in 'LS2_z='+str(i)+'_.txt'\n\tbcc_LS000 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/0.0eV/large_scale/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS000[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS003 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/other neutrinos masses/0.03/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS003[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS006 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/other neutrinos masses/0.06/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS006[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS010 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/other neutrinos masses/0.10/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS010[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS013 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/other neutrinos masses/0.13/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS013[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS015 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/0.15eV/large_scale/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS015[ind,count] = f[ind2]\n\t#------------------------------\n\tbcc_LS030 = np.zeros((l2,len(Massbins)))\n\tfor i in red2:\n\t\tdat_file_path = os.path.join(self.data_directory, 'BE_HaPPy/coefficients/other neutrinos masses/0.30/'\\\n\t\t'LS_z='+str(i)+'_.txt')\n\t\tf = np.loadtxt(dat_file_path)\n\t\tind = red2.index(i)\n\t\tfor count,j in enumerate(Massbins):\n\t\t\tind2 = mbins.index(j)\n\t\t\tbcc_LS030[ind,count] = f[ind2]\n\n\t###############################################################\n\t\tif mv == 0.0:\n\t\t\treturn bcc_LS000, bcc_LS000\n\t\tif mv == 0.03:\n\t\t\treturn bcc_LS000, bcc_LS003\n\t\telif mv == 0.06:\n\t\t\treturn bcc_LS000, bcc_LS006\n\t\telif mv == 0.10:\n\t\t\treturn bcc_LS000, bcc_LS010\n\t\telif mv == 0.13:\n\t\t\treturn bcc_LS000, bcc_LS013\n\t\telif mv == 0.15:\n\t\t\treturn bcc_LS000, bcc_LS015\n\t\telif mv == 0.30:\n\t\t\treturn bcc_LS000, bcc_LS030\n\t\t\n\t\tbias_eff0_t1=np.sum(hmf[bin1]*dm[bin1]*bt[bin1])/np.sum(dm[bin1]*hmf[bin1])\n","sub_path":"ls_coeff.py","file_name":"ls_coeff.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148163480","text":"# -*- coding:utf-8 -*-\nimport sys\nimport re\nimport time\nimport sched\nimport traceback\nimport threading\nimport datetime\nfrom PySide import QtCore\nfrom PySide.QtGui import QApplication,QLabel,QKeyEvent,QColor\n\nhtml = \"\"\"\n<html>\n <head>\n <style type=\"text/css\">\n h2 {\n color: rgb(252,250,242);\n margin-left: 20px; \n }\n QLabel{\n background-color: rgb(11,16,19);\n\n }\n </style>\n </head>\n <body>\n <div>\n <h2>text</h2> \n </div>\n </body>\n</html>\n\"\"\"\nscheduler = sched.scheduler(time.time,time.sleep)\n\nclass Qtsignal(QtCore.QObject):\n updatesignal = QtCore.Signal()\n\nclass MyLabel(QLabel):\n\n def __init__(self):\n labelstyle = re.findall('QLabel.*?}',html,re.S)[0]\n QLabel.__init__(self)\n self.setStyleSheet(labelstyle)\n self.initui() \n\n def initui(self):\n width,height = 340,50\n self.setGeometry(1366-width,768-height-40,width,height)\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint|QtCore.Qt.Tool)\n qt.updatesignal.connect(self.signalaction)#不能加括号用函数形式,原因不明\n\n def signalaction(self): \n nowtime = datetime.datetime.fromtimestamp(time.time())\n text = u'{0:%Y}年{0:%m}月{0:%d}日{0:%H}时{0:%M}分{0:%S}秒'.format(nowtime)\n self.setText(text)\n\n\n def mouseDoubleClickEvent(self,event):\n scheduler.cancel(scheduler.queue[0])\n self.close()\n sys.exit()\n\n def keyPressEvent(self,event):\n if event.key() == QtCore.Qt.Key_Escape:\n scheduler.cancel(scheduler.queue[0])\n self.close()\n sys.exit()\n\n def setText(self,text):\n text = html.replace('text',text)\n QLabel.setText(self,text)\n\n def refunc(self):\n qt.updatesignal.emit()\n scheduler.enter(1, 0, self.refunc, ())\n # print time.time()\n\n def beginsched(self):\n scheduler.enter(0, 0, self.refunc, ())\n scheduler.run()\n\n\n\nif __name__ == '__main__':\n qt = Qtsignal()\n app = QApplication(sys.argv)\n label = MyLabel()\n t = threading.Thread(target=label.beginsched)\n t.start()\n label.show()\n app.exec_()\n\n","sub_path":"GUI/myclock/myclock.py","file_name":"myclock.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160874379","text":"import bottle\nimport os\nfrom beaker.middleware import SessionMiddleware\n\ndef create_app(conf_func):\n application = bottle.default_app()\n conf_func(application)\n\n bottle.TEMPLATE_PATH = [ os.path.join(application.config['su1000.root_dir'], 'templates') ]\n\n session_opts = {\n 'session.type': 'file',\n 'session.cookie_expires': 300,\n 'session.data_dir': application.config['su1000.sessions_dir'],\n 'session.auto': True\n }\n\n bottle.SimpleTemplate.defaults['vk_client_id'] = application.config['su1000.vk_client_id']\n bottle.SimpleTemplate.defaults['urlparts'] = lambda: bottle.request.urlparts\n\n application = SessionMiddleware(application, session_opts)\n return application\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309740887","text":"'''\nCreated on 5 Jan 2010\n\n@author: metson\n'''\n\nfrom WMCore.WebTools.RESTModel import RESTModel\nfrom SiteDB.REST import Validate\n\nclass Put(RESTModel):\n '''\n Put: Put data related to the sites known to SiteDB\n \n '''\n\n def __init__(self, config):\n '''\n Initialise the RESTModel and add some methods to it.\n '''\n RESTModel.__init__(self, config)\n \n del self.methods['POST']\n del self.methods['GET']\n \n self.methods['PUT'] = {'list': {'args':['cms_name', 'site_name', \n 'country', 'usage', 'url', \n 'logourl'],\n 'call': self.add_site,\n 'version': 1},\n 'links': {'args':['name', 'url'],\n 'call': self.add_link,\n 'version': 1}}\n\n def add_link(self, *args, **kwargs):\n \"\"\"\n Add a link to the site\n Args: name, url\n \"\"\"\n input = self.sanitise_input(args, kwargs, 'add_link')\n try:\n sql = \"\"\"insert into sitelinks (SITEID, URL) values (\n (select id from siteinfo where cms_name = :name), :url) \n \"\"\"\n binds = {'name': input['name'], 'url': input['url']}\n self.dbi.processData(sql, binds)\n return True\n except:\n self.exception(\"Could not add link for input:\" % input)\n data = {\"exception\": e, \n \"message\": \"Could not add link\",\n \"execeptiontype\": str(type(e)).split(\"'\")[1],\n 'binds': binds}\n return data\n \n def add_site(self, *args, **kwargs):\n \"\"\"\n Add a link to the site\n Args: name, url\n \"\"\"\n input = self.sanitise_input(args, kwargs, 'add_site')\n try:\n sql = \"\"\"insert into siteinfo\n (CMS_NAME, SITE_NAME, COUNTRY, USAGE, URL, LOGOURL) values (\n :cms_name, :site_name, :country, :usage, :url, :logourl) \n \"\"\"\n binds = {'cms_name': input['cms_name'],\n 'site_name': input['site_name'], \n 'url': input['url'],\n 'country': input['country'], \n 'usage': input['usage'], \n 'url': input['url'], \n 'logoirl': input['logourl']}\n self.dbi.processData(sql, binds)\n binds = {'cms_name': input['cms_name']}\n result = self.dbi.processData('''select * from siteinfo \n where cms_name = :cms_name''', binds)\n return self.formatDict(result)\n except:\n self.exception(\"Could not add link for input:\" % input)\n data = {\"exception\": e, \n \"message\": \"Could not add link\",\n \"execeptiontype\": str(type(e)).split(\"'\")[1],\n 'binds': binds}\n return data","sub_path":"Applications/SiteDB/src/python/SiteDB/REST/Sites/Put.py","file_name":"Put.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456418328","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32, Header\nfrom geometry_msgs.msg import PoseStamped, Pose, Quaternion\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nimport math\nfrom cv_bridge import CvBridge\nimport matplotlib.pyplot as plt\n\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport yaml\n\n\n#############################################\n# tl_detector.py\n# First cut: Udacity\n# rtarkunde: Modified as per the walkthrough\n#############################################\n\nOBSERV_DIST = 100\nSTATE_COUNT_THRESHOLD = 1\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n #sub6 = rospy.Subscriber('/image_raw', Image, self.image_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n self.light_positions = self.config['stop_line_positions']\n self.light_classifier = TLClassifier()\n self.camera_image = None\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.bridge = CvBridge()\n self.state_count = 0\n self.image_count = 0\n self.image_period = 6 # processes every sixth image for latency.\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n Args:\n msg (Image): image from car-mounted camera\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n self.image_count = self.image_count + 1\n # Process images only once in image period to counter latency\n if self.image_count % self.image_period == 0:\n light_wp, state = self.process_traffic_lights()\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED \\\n or state == TrafficLight.YELLOW else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n\n self.state_count = self.state_count + 1\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n Returns:\n int: index of the closest waypoint in self.waypoints\n \"\"\"\n waypoint = 0\n waypoints_list = self.waypoints.waypoints\n dist = float('inf')\n for i in range(len(waypoints_list)):\n new_dist = self.calc_distance_points_3D(pose.position,\n waypoints_list[i].pose.pose.position)\n if dist > new_dist:\n dist = new_dist\n waypoint = i\n return waypoint\n\n\n\n def get_light_state(self):\n \"\"\" Get current color of the traffic light \"\"\"\n if self.camera_image is None:\n return False\n else:\n self.camera_image.encoding = \"rgb8\"\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n #Perform classification here\n state = self.light_classifier.classify(cv_image)\n # Use last state if classifier is not sure\n if state == TrafficLight.UNKNOWN and self.last_state:\n state = self.last_state\n return state\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n\n light = None\n\n if hasattr(self, 'waypoints') and hasattr(self, 'pose'):\n\n min_dist = float('inf')\n\n car_wp = self.get_closest_waypoint(self.pose.pose)\n\n light_positions = self.light_positions\n\n i = 0\n\n for light_pos in light_positions:\n\n light_wp, tl_candid, tl_dist = self.calclulate_distance_to_traffic_light(car_wp, light_pos)\n\n if (tl_dist < min_dist) \\\n and (car_wp % len(self.waypoints.waypoints)) < (light_wp % len(self.waypoints.waypoints)) \\\n and (tl_dist < OBSERV_DIST) :\n closest_light_wp = light_wp\n min_dist = tl_dist\n light = tl_candid\n\n i += 1\n\n state = TrafficLight.UNKNOWN\n light_wp = -1\n\n if light:\n state = self.get_light_state()\n light_wp = closest_light_wp\n\n else:\n light_wp = -1\n state = TrafficLight.RED\n\n return light_wp, state\n\n\n def calclulate_distance_to_traffic_light(self, car_waypoint, light_position):\n tl_candid = self.create_tl(0.0, TrafficLight.UNKNOWN, light_position[0], light_position[1], 0.0)\n light_waypoint = self.get_closest_waypoint(tl_candid.pose.pose)\n tl_dist = self.calc_distance_coords_2D(self.waypoints.waypoints[car_waypoint].pose.pose.position.x,\n self.waypoints.waypoints[car_waypoint].pose.pose.position.y,\n self.waypoints.waypoints[light_waypoint].pose.pose.position.x,\n self.waypoints.waypoints[light_waypoint].pose.pose.position.y)\n return light_waypoint, tl_candid, tl_dist\n\n\n def calc_distance_coords_2D(self, x1, y1, x2, y2):\n return math.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n def calc_distance_points_3D(self, a, b):\n return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n\n def create_tl(self, yaw, state, x, y, z):\n traffic_light = TrafficLight()\n\n traffic_light.header = Header()\n traffic_light.pose.header = Header()\n traffic_light.pose = PoseStamped()\n\n\n traffic_light.state = state\n\n traffic_light.pose.pose.position.x = x\n traffic_light.pose.pose.position.y = y\n traffic_light.pose.pose.position.z = z\n\n\n traffic_light.pose.header.stamp = rospy.Time.now()\n traffic_light.pose.header.frame_id = 'world'\n\n traffic_light.header.stamp = rospy.Time.now()\n traffic_light.header.frame_id = 'world'\n\n q = tf.transformations.quaternion_from_euler(0.0, 0.0, math.pi * yaw / 180.0)\n traffic_light.pose.pose.orientation = Quaternion(*q)\n\n return traffic_light\n\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604770306","text":"import numpy as np\n\ndef compute_power_levels(serial_number):\n rack_id = np.tile(np.arange(1, 301), (300,1)) + 10\n\n grid = rack_id * np.array([np.arange(1, 301)]).T\n grid += serial_number\n grid *= rack_id\n\n lowest_hundred = (grid // 1000) * 10\n grid //= 100\n grid -= lowest_hundred\n grid -= 5\n\n return grid\n\ndef get_power_level(x, y, grid):\n return grid[y-1][x-1]\n\ndef test_power_levels(serial_number, expected, pos):\n grid = compute_power_levels(serial_number)\n assert expected == get_power_level(*pos, grid)\n\ndef sum_along_axis(matrix, indices, dims):\n sums = np.empty(dims)\n for i in range(dims[0]):\n c = np.r_[0, matrix[i].cumsum()][indices]\n sums[i] = c[:, 1] - c[:, 0]\n return sums\n\n\n# Testing power level computations\ntest_data = [\n (8, 4, (3, 5)),\n (57, -5, (122, 79)),\n (39, 0, (217, 196)),\n (71, 4, (101, 153))\n]\n\nfor test in test_data:\n test_power_levels(*test)\n\nserial_number = 8\n\ndef get_max_indices(matrix):\n x, y = np.unravel_index(np.argmax(matrix, axis=None), matrix.shape)\n return x+1, y+1\n\ndef find_best_fuel_cells(serial_number):\n grid_size = 300\n\n grid = compute_power_levels(serial_number)\n\n max_sum = 0\n\n indices = np.arange(grid_size + 1)\n for j in range(1, 300):\n sum_size = grid_size - j + 1\n idcs = np.array([indices[:-j], indices[j:]]).T\n\n hor_sums = sum_along_axis(grid, idcs, (grid_size, sum_size))\n total_sums = sum_along_axis(hor_sums.T, idcs, (sum_size, sum_size))\n\n if total_sums.max() > max_sum:\n max_sum = total_sums.max()\n result = *get_max_indices(total_sums), j\n\n return result\n\nserial_number = 5153\nprint(find_best_fuel_cells(serial_number))\n","sub_path":"11/chronalcharge.py","file_name":"chronalcharge.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312541461","text":"from google.cloud import vision # Google Cloud Vision API client library\nfrom google.cloud.vision import types\n\ndef faces(path):\n client = vision.ImageAnnotatorClient() # instantiate a client for Vison API service\n with open(path, 'rb') as f: # open/close a file\n content = f.read() # read file into memory\n image = types.Image(content=content)\n response = client.face_detection(image=image) # POST request JSON\n faces = response.face_annotations # convert response JSON into Python object\n return faces\n\nif __name__ == '__main__':\n PATH = 'photo/1.jpg' # set relative PATH of photo file\n print(len(faces(PATH)), 'face(s) found!')","sub_path":"Pi/lib/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565954865","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nimport json\nimport logging\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Dict, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union\n\nfrom airbyte_cdk.models import (\n AirbyteCatalog,\n AirbyteConnectionStatus,\n AirbyteLogMessage,\n AirbyteMessage,\n AirbyteStateMessage,\n ConfiguredAirbyteCatalog,\n ConfiguredAirbyteStream,\n Level,\n Status,\n SyncMode,\n)\nfrom airbyte_cdk.models import Type as MessageType\nfrom airbyte_cdk.sources.connector_state_manager import ConnectorStateManager\nfrom airbyte_cdk.sources.source import Source\nfrom airbyte_cdk.sources.streams import Stream\nfrom airbyte_cdk.sources.streams.core import StreamData\nfrom airbyte_cdk.sources.streams.http.http import HttpStream\nfrom airbyte_cdk.sources.utils.record_helper import stream_data_to_airbyte_message\nfrom airbyte_cdk.sources.utils.schema_helpers import InternalConfig, split_config\nfrom airbyte_cdk.utils.event_timing import create_timer\nfrom airbyte_cdk.utils.traced_exception import AirbyteTracedException\n\n\nclass AbstractSource(Source, ABC):\n \"\"\"\n Abstract base class for an Airbyte Source. Consumers should implement any abstract methods\n in this class to create an Airbyte Specification compliant Source.\n \"\"\"\n\n SLICE_LOG_PREFIX = \"slice:\"\n\n @abstractmethod\n def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:\n \"\"\"\n :param logger: source logger\n :param config: The user-provided configuration as specified by the source's spec.\n This usually contains information required to check connection e.g. tokens, secrets and keys etc.\n :return: A tuple of (boolean, error). If boolean is true, then the connection check is successful\n and we can connect to the underlying data source using the provided configuration.\n Otherwise, the input config cannot be used to connect to the underlying data source,\n and the \"error\" object should describe what went wrong.\n The error object will be cast to string to display the problem to the user.\n \"\"\"\n\n @abstractmethod\n def streams(self, config: Mapping[str, Any]) -> List[Stream]:\n \"\"\"\n :param config: The user-provided configuration as specified by the source's spec.\n Any stream construction related operation should happen here.\n :return: A list of the streams in this source connector.\n \"\"\"\n\n # Stream name to instance map for applying output object transformation\n _stream_to_instance_map: Dict[str, Stream] = {}\n\n @property\n def name(self) -> str:\n \"\"\"Source name\"\"\"\n return self.__class__.__name__\n\n def discover(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteCatalog:\n \"\"\"Implements the Discover operation from the Airbyte Specification.\n See https://docs.airbyte.com/understanding-airbyte/airbyte-protocol/#discover.\n \"\"\"\n streams = [stream.as_airbyte_stream() for stream in self.streams(config=config)]\n return AirbyteCatalog(streams=streams)\n\n def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:\n \"\"\"Implements the Check Connection operation from the Airbyte Specification.\n See https://docs.airbyte.com/understanding-airbyte/airbyte-protocol/#check.\n \"\"\"\n try:\n check_succeeded, error = self.check_connection(logger, config)\n if not check_succeeded:\n return AirbyteConnectionStatus(status=Status.FAILED, message=repr(error))\n except Exception as e:\n return AirbyteConnectionStatus(status=Status.FAILED, message=repr(e))\n\n return AirbyteConnectionStatus(status=Status.SUCCEEDED)\n\n def read(\n self,\n logger: logging.Logger,\n config: Mapping[str, Any],\n catalog: ConfiguredAirbyteCatalog,\n state: Union[List[AirbyteStateMessage], MutableMapping[str, Any]] = None,\n ) -> Iterator[AirbyteMessage]:\n \"\"\"Implements the Read operation from the Airbyte Specification. See https://docs.airbyte.com/understanding-airbyte/airbyte-protocol/.\"\"\"\n logger.info(f\"Starting syncing {self.name}\")\n config, internal_config = split_config(config)\n # TODO assert all streams exist in the connector\n # get the streams once in case the connector needs to make any queries to generate them\n stream_instances = {s.name: s for s in self.streams(config)}\n state_manager = ConnectorStateManager(stream_instance_map=stream_instances, state=state)\n self._stream_to_instance_map = stream_instances\n with create_timer(self.name) as timer:\n for configured_stream in catalog.streams:\n stream_instance = stream_instances.get(configured_stream.stream.name)\n if not stream_instance:\n raise KeyError(\n f\"The requested stream {configured_stream.stream.name} was not found in the source.\"\n f\" Available streams: {stream_instances.keys()}\"\n )\n stream_is_available, error = stream_instance.check_availability(logger, self)\n if not stream_is_available:\n logger.warning(f\"Skipped syncing stream '{stream_instance.name}' because it was unavailable. Error: {error}\")\n continue\n try:\n timer.start_event(f\"Syncing stream {configured_stream.stream.name}\")\n yield from self._read_stream(\n logger=logger,\n stream_instance=stream_instance,\n configured_stream=configured_stream,\n state_manager=state_manager,\n internal_config=internal_config,\n )\n except AirbyteTracedException as e:\n raise e\n except Exception as e:\n logger.exception(f\"Encountered an exception while reading stream {configured_stream.stream.name}\")\n display_message = stream_instance.get_error_display_message(e)\n if display_message:\n raise AirbyteTracedException.from_exception(e, message=display_message) from e\n raise e\n finally:\n timer.finish_event()\n logger.info(f\"Finished syncing {configured_stream.stream.name}\")\n logger.info(timer.report())\n\n logger.info(f\"Finished syncing {self.name}\")\n\n @property\n def per_stream_state_enabled(self) -> bool:\n return True\n\n def _read_stream(\n self,\n logger: logging.Logger,\n stream_instance: Stream,\n configured_stream: ConfiguredAirbyteStream,\n state_manager: ConnectorStateManager,\n internal_config: InternalConfig,\n ) -> Iterator[AirbyteMessage]:\n self._apply_log_level_to_stream_logger(logger, stream_instance)\n if internal_config.page_size and isinstance(stream_instance, HttpStream):\n logger.info(f\"Setting page size for {stream_instance.name} to {internal_config.page_size}\")\n stream_instance.page_size = internal_config.page_size\n logger.debug(\n f\"Syncing configured stream: {configured_stream.stream.name}\",\n extra={\n \"sync_mode\": configured_stream.sync_mode,\n \"primary_key\": configured_stream.primary_key,\n \"cursor_field\": configured_stream.cursor_field,\n },\n )\n logger.debug(\n f\"Syncing stream instance: {stream_instance.name}\",\n extra={\n \"primary_key\": stream_instance.primary_key,\n \"cursor_field\": stream_instance.cursor_field,\n },\n )\n\n use_incremental = configured_stream.sync_mode == SyncMode.incremental and stream_instance.supports_incremental\n if use_incremental:\n record_iterator = self._read_incremental(\n logger,\n stream_instance,\n configured_stream,\n state_manager,\n internal_config,\n )\n else:\n record_iterator = self._read_full_refresh(logger, stream_instance, configured_stream, internal_config)\n\n record_counter = 0\n stream_name = configured_stream.stream.name\n logger.info(f\"Syncing stream: {stream_name} \")\n for record in record_iterator:\n if record.type == MessageType.RECORD:\n record_counter += 1\n yield record\n\n logger.info(f\"Read {record_counter} records from {stream_name} stream\")\n\n @staticmethod\n def _limit_reached(internal_config: InternalConfig, records_counter: int) -> bool:\n \"\"\"\n Check if record count reached limit set by internal config.\n :param internal_config - internal CDK configuration separated from user defined config\n :records_counter - number of records already red\n :return True if limit reached, False otherwise\n \"\"\"\n if internal_config.limit:\n if records_counter >= internal_config.limit:\n return True\n return False\n\n def _read_incremental(\n self,\n logger: logging.Logger,\n stream_instance: Stream,\n configured_stream: ConfiguredAirbyteStream,\n state_manager: ConnectorStateManager,\n internal_config: InternalConfig,\n ) -> Iterator[AirbyteMessage]:\n \"\"\"Read stream using incremental algorithm\n\n :param logger:\n :param stream_instance:\n :param configured_stream:\n :param state_manager:\n :param internal_config:\n :return:\n \"\"\"\n stream_name = configured_stream.stream.name\n stream_state = state_manager.get_stream_state(stream_name, stream_instance.namespace)\n\n if stream_state and \"state\" in dir(stream_instance):\n stream_instance.state = stream_state\n logger.info(f\"Setting state of {stream_name} stream to {stream_state}\")\n\n slices = stream_instance.stream_slices(\n cursor_field=configured_stream.cursor_field,\n sync_mode=SyncMode.incremental,\n stream_state=stream_state,\n )\n logger.debug(f\"Processing stream slices for {stream_name} (sync_mode: incremental)\", extra={\"stream_slices\": slices})\n\n total_records_counter = 0\n has_slices = False\n for _slice in slices:\n has_slices = True\n if logger.isEnabledFor(logging.DEBUG):\n yield AirbyteMessage(\n type=MessageType.LOG,\n log=AirbyteLogMessage(level=Level.INFO, message=f\"{self.SLICE_LOG_PREFIX}{json.dumps(_slice, default=str)}\"),\n )\n records = stream_instance.read_records(\n sync_mode=SyncMode.incremental,\n stream_slice=_slice,\n stream_state=stream_state,\n cursor_field=configured_stream.cursor_field or None,\n )\n record_counter = 0\n for message_counter, record_data_or_message in enumerate(records, start=1):\n message = self._get_message(record_data_or_message, stream_instance)\n yield message\n if message.type == MessageType.RECORD:\n record = message.record\n stream_state = stream_instance.get_updated_state(stream_state, record.data)\n checkpoint_interval = stream_instance.state_checkpoint_interval\n record_counter += 1\n if checkpoint_interval and record_counter % checkpoint_interval == 0:\n yield self._checkpoint_state(stream_instance, stream_state, state_manager)\n\n total_records_counter += 1\n # This functionality should ideally live outside of this method\n # but since state is managed inside this method, we keep track\n # of it here.\n if self._limit_reached(internal_config, total_records_counter):\n # Break from slice loop to save state and exit from _read_incremental function.\n break\n\n yield self._checkpoint_state(stream_instance, stream_state, state_manager)\n if self._limit_reached(internal_config, total_records_counter):\n return\n\n if not has_slices:\n # Safety net to ensure we always emit at least one state message even if there are no slices\n checkpoint = self._checkpoint_state(stream_instance, stream_state, state_manager)\n yield checkpoint\n\n def _read_full_refresh(\n self,\n logger: logging.Logger,\n stream_instance: Stream,\n configured_stream: ConfiguredAirbyteStream,\n internal_config: InternalConfig,\n ) -> Iterator[AirbyteMessage]:\n slices = stream_instance.stream_slices(sync_mode=SyncMode.full_refresh, cursor_field=configured_stream.cursor_field)\n logger.debug(\n f\"Processing stream slices for {configured_stream.stream.name} (sync_mode: full_refresh)\", extra={\"stream_slices\": slices}\n )\n total_records_counter = 0\n for _slice in slices:\n if logger.isEnabledFor(logging.DEBUG):\n yield AirbyteMessage(\n type=MessageType.LOG,\n log=AirbyteLogMessage(level=Level.INFO, message=f\"{self.SLICE_LOG_PREFIX}{json.dumps(_slice, default=str)}\"),\n )\n record_data_or_messages = stream_instance.read_records(\n stream_slice=_slice,\n sync_mode=SyncMode.full_refresh,\n cursor_field=configured_stream.cursor_field,\n )\n for record_data_or_message in record_data_or_messages:\n message = self._get_message(record_data_or_message, stream_instance)\n yield message\n if message.type == MessageType.RECORD:\n total_records_counter += 1\n if self._limit_reached(internal_config, total_records_counter):\n return\n\n def _checkpoint_state(self, stream: Stream, stream_state, state_manager: ConnectorStateManager):\n # First attempt to retrieve the current state using the stream's state property. We receive an AttributeError if the state\n # property is not implemented by the stream instance and as a fallback, use the stream_state retrieved from the stream\n # instance's deprecated get_updated_state() method.\n try:\n state_manager.update_state_for_stream(stream.name, stream.namespace, stream.state)\n\n except AttributeError:\n state_manager.update_state_for_stream(stream.name, stream.namespace, stream_state)\n return state_manager.create_state_message(stream.name, stream.namespace, send_per_stream_state=self.per_stream_state_enabled)\n\n @staticmethod\n def _apply_log_level_to_stream_logger(logger: logging.Logger, stream_instance: Stream):\n \"\"\"\n Necessary because we use different loggers at the source and stream levels. We must\n apply the source's log level to each stream's logger.\n \"\"\"\n if hasattr(logger, \"level\"):\n stream_instance.logger.setLevel(logger.level)\n\n def _get_message(self, record_data_or_message: Union[StreamData, AirbyteMessage], stream: Stream):\n \"\"\"\n Converts the input to an AirbyteMessage if it is a StreamData. Returns the input as is if it is already an AirbyteMessage\n \"\"\"\n if isinstance(record_data_or_message, AirbyteMessage):\n return record_data_or_message\n else:\n return stream_data_to_airbyte_message(stream.name, record_data_or_message, stream.transformer, stream.get_json_schema())\n","sub_path":"dts/airbyte/airbyte-cdk/python/airbyte_cdk/sources/abstract_source.py","file_name":"abstract_source.py","file_ext":"py","file_size_in_byte":16034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165265534","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Sonrisatrol - Django 1.4 - Python 2.7.3\r\n# Desarrollado por José Cols - josecolsg@gmail.com - @josecols\r\n\r\nfrom django.contrib import admin\r\nfrom models import Seccion, Etiqueta, Medalla, Rango, Publicacion, \\\r\n PerfilUsuario\r\nfrom forms import SeccionForm\r\n\r\n\r\nclass PublicacionAdmin(admin.ModelAdmin):\r\n\r\n def formfield_for_foreignkey(\r\n self,\r\n db_field,\r\n request,\r\n **kwargs\r\n ):\r\n\r\n if db_field.name == 'autor':\r\n kwargs['initial'] = request.user.id\r\n return db_field.formfield(**kwargs)\r\n return super(PublicacionAdmin,\r\n self).formfield_for_foreignkey(db_field, request,\r\n **kwargs)\r\n\r\n list_display = ('titulo', 'favoritos', 'aprobado')\r\n search_fields = ('titulo', 'etiquetas')\r\n list_filter = ('aprobado', )\r\n\r\n\r\nclass SeccionAdmin(admin.ModelAdmin):\r\n\r\n form = SeccionForm\r\n\r\n class Media:\r\n\r\n css = {'all': ('/static/js/ghost/ghostdown.css', )}\r\n js = ('/static/js/jquery-1.10.2.min.js',\r\n '/static/js/ghost/ghostdown.js')\r\n\r\n\r\nadmin.site.register(Seccion, SeccionAdmin)\r\nadmin.site.register(Etiqueta)\r\nadmin.site.register(Medalla)\r\nadmin.site.register(Rango)\r\nadmin.site.register(Publicacion, PublicacionAdmin)\r\nadmin.site.register(PerfilUsuario)\r\n\r\n","sub_path":"main/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312524568","text":"from math import log\nfrom idacyber import ColorFilter\nimport ida_kernwin\n\n# http://www.color-hex.com/color-palette/54234\n\n# taken from http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html\ndef H(data):\n if not data:\n return 0\n entropy = 0\n for x in range(256):\n p_x = float(data.count(chr(x)))/len(data)\n if p_x > 0:\n entropy += - p_x*log(p_x, 2)\n return entropy\n\nclass Histogram(ColorFilter):\n name = \"Histogram\"\n width = 256\n lock_width = True\n zoom = 2\n link_pixel = False\n support_selection = True\n highlight_cursor = False\n disable_data = True\n show_address_range = False\n\n def __init__(self):\n self.entropy = 0.0\n self.max_count = 0\n self.hist = []\n self.bufsize = 0\n\n def on_get_annotations(self, address, size, mouse_offs):\n cursor_x = mouse_offs % Histogram.width\n annotations = None\n if self.bufsize and cursor_x in xrange(len(self.hist)):\n count = self.hist[cursor_x]\n annotations = [(None, None, '>> Histogram <<', 0xf2f0f0),\n (None, None, '', None),\n (None, None, 'Start: 0x%X' % address, 0xf2f0f0),\n (None, None, 'End: 0x%X' % (address+self.bufsize), 0xf2f0f0),\n (None, None, 'Size: 0x%X' % (self.bufsize), 0xf2f0f0),\n (None, None, '', None),\n (None, None, 'Entropy: %f' % float(self.entropy), 0xf2f0f0),\n (None, None, 'Byte: 0x%02X x %d (%.2f%%)' % (cursor_x, count, (count/float(self.bufsize))*100.0), 0xf2f0f0)]\n return annotations\n\n def on_process_buffer(self, buffers, addr, size, mouse_offs):\n colors = [(True, 0x193d5a)] * size\n self.hist = [0] * 256\n width = Histogram.width\n\n height = int(round(size / width))\n e = ''\n self.bufsize = 0\n for mapped, buf in buffers:\n if mapped:\n self.bufsize += len(buf)\n for c in buf:\n e += c\n self.hist[ord(c)] += 1\n self.entropy = H(e)\n self.max_count = max(self.hist)\n cursor_x = mouse_offs % width\n\n if self.max_count and height:\n bars = []\n for i in xrange(len(self.hist)):\n count = self.hist[i]\n bars.append(int(round((count/float(self.max_count))*height)))\n\n for i in xrange(len(bars)):\n dst_y = bars[i]\n for y in xrange(dst_y):\n colors[height*width - width+i - y*width] = (True, 0xf2f0f0 if i == cursor_x else [0xffad00,0xc10000][i%2])\n\n return colors\n\n def on_get_tooltip(self, addr, size, mouse_offs):\n i = mouse_offs % Histogram.width\n tooltip = 'This space for rent'\n if self.bufsize:\n tooltip = '0x%02X x %d (%.2f%%)' % (i, self.hist[i], (self.hist[i]/float(self.bufsize))*100.0)\n\n return tooltip\n\ndef FILTER_INIT(pw):\n return Histogram()\n \ndef FILTER_EXIT():\n return","sub_path":"cyber/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142621248","text":"import pytest\nimport json\nfrom prosys.model.project import Project\nfrom prosys.rest.app import create_app\nfrom prosys.adapters.to_serializable import to_serializable\n\n\n@pytest.fixture\ndef db(mock_db_simple_projects):\n return mock_db_simple_projects\n\n\n@pytest.fixture\ndef client(db):\n app = create_app('TestConfig', db)\n return app.test_client()\n\n\ndef get_projects_from_repo(app):\n res = app.get('/api/projects')\n return json.loads(res.data.decode('utf-8'))['value']\n\n\ndef test_get_initial_projects(client, db):\n res = client.get('/api/projects')\n data = json.loads(res.data.decode('utf-8'))\n set([project['name'] for project in data['value']]) == set(\n [project.name for project in db.get_projects()])\n\n\ndef test_get_projects_filters(client):\n res = client.get('/api/projects',\n data=json.dumps({'name': 'TestProject3'}),\n content_type='application/json')\n data = json.loads(res.data.decode('utf-8'))['value']\n assert len(data) == 1\n assert data[0]['name'] == 'TestProject3'\n\n\ndef test_delete_project(client):\n res = client.get('/api/projects')\n projects = json.loads(res.data.decode('utf-8'))['value']\n prj = projects[0]\n assert len(projects) == 4\n assert prj in projects\n\n res = client.post('/api/project/del/' + str(prj['id']))\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'project deleted'\n res = client.get('/api/projects')\n projects = json.loads(res.data.decode('utf-8'))['value']\n assert len(projects) == 3\n assert prj not in projects\n\n res = client.post('/api/project/del/-1')\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'project id not found'\n\n\ndef test_save_project(client):\n # test creating\n assert len(get_projects_from_repo(client)) == 4\n project = Project('New project', 5)\n res = client.post('/api/project/save/',\n data=json.dumps(project, default=to_serializable),\n content_type='application/json')\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'project saved'\n project_dict = data['value']\n assert project_dict['name'] == 'New project'\n assert project_dict['prio'] == 5\n assert project_dict['id'] is not None\n assert len(get_projects_from_repo(client)) == 5\n # test editing\n res = client.post('/api/project/save/' + project_dict['id'],\n data=json.dumps({\n 'name': 'Edited project'\n }), content_type='application/json')\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'project saved'\n project_dict = data['value']\n assert project_dict['name'] == 'Edited project'\n assert len(get_projects_from_repo(client)) == 5\n # test invalid arguments\n res = client.post('/api/project/save/' + project_dict['id'],\n data=json.dumps({\n 'name': 'Edited project',\n 'prio': '3'\n }), content_type='application/json')\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'Key \\'prio\\' error: wrong type'\n # test missing arguments\n res = client.post('/api/project/save/',\n data=json.dumps({\n 'prio': 3\n }), content_type='application/json')\n data = json.loads(res.data.decode('utf-8'))\n assert data['message'] == 'name is missing'\n","sub_path":"prosys.server/tests/rest/api/test_projects.py","file_name":"test_projects.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308468152","text":"\"\"\"\nSee Appendix F for resources used\nNamed this as originally was going to try implement a full algorithmic\ntrading strategy but would have required to build a backtesting platform.\nInstead moved it to Technical Analysis Indicators.\nThree Moving Averages, RSI index\n@author: William Dagnall\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nplt.style.use('fivethirtyeight')\n\n\n\n#Plots exponential moving averages of stocks\ndef buy_sell(ticker):\n data = pd.read_csv(f'stock_dfs/{ticker}.csv')\n data = data.set_index(pd.DatetimeIndex(data['Date'].values))\n short_exponential_moving_averages = data.Close.ewm(span=10, adjust = False).mean()\n print(data)\n #calculate medium exponential moving average\n middle_exponential_moving_averages = data.Close.ewm(span=50, adjust = False).mean()\n \n #Long slow moving averages\n long_moving_exponential_averages = data.Close.ewm(span=200, adjust = False).mean()\n data['Short'] = short_exponential_moving_averages\n data['Middle'] = middle_exponential_moving_averages\n data['Long'] = long_moving_exponential_averages\n \n buy = []\n sell = []\n \n flag_long = False\n flag_short = False\n #Sorting the data to see where is best to buy and sell\n for i in range(0 , len(data)):\n if data['Middle'][i] < data['Long'][i] and data['Short'][i] < data['Middle'][i] and flag_long == False and flag_short == False:\n buy.append(data['Adj Close'][i])\n sell.append(np.nan)\n flag_short = True\n #Used as signals to see where the EMA's move across the Adj Close Price Line\n elif flag_short == True and data['Short'][i] > data['Middle'][i]:\n sell.append(data['Adj Close'][i])\n buy.append(np.nan)\n flag_short = False\n \n elif data['Middle'][i] > data['Long'][i] and data['Short'][i] > data['Middle'][i] and flag_long == False and flag_short == False:\n buy.append(data['Adj Close'][i])\n sell.append(np.nan)\n flag_long = True\n \n elif flag_long == True and data['Short'][i] < data['Middle'][i]:\n sell.append(data['Adj Close'][i])\n buy.append(np.nan)\n flag_long = False\n else:\n buy.append(np.nan)\n sell.append(np.nan)\n \n\n data['Buy'] = buy\n data['Sell'] = sell\n\n \n plt.figure(figsize=(13, 5))\n plt.title(f'{ticker} Adj Close Values', fontsize=16)\n plt.plot(data['Adj Close'], label=\"Adj Close Price\",color = 'red', alpha=0.35)\n \n plt.scatter(data.index, data['Buy'], color = 'green', marker='^', alpha = 0.75)\n plt.scatter(data.index, data['Sell'], color = 'blue', marker = 'v', alpha = 0.75)\n \n plt.plot(short_exponential_moving_averages, label= \"Short EMA\", color = 'yellow', alpha=0.35)\n plt.plot(middle_exponential_moving_averages, label = \"Middle EMA\",color = 'green', alpha=0.35)\n plt.plot(long_moving_exponential_averages, label = \"Long EMA\", color = 'blue', alpha=0.35)\n plt.xlabel(\"Date\", fontsize=12)\n plt.ylabel(\"Adj Close Price\", fontsize=12)\n plt.legend()\n plt.show()\n\n\n#buy_sell('NFLX')\n\n\n\ndef relative_strength_index(ticker):\n \n data = pd.read_csv(f'stock_dfs/{ticker}.csv')\n data = data.set_index(pd.DatetimeIndex(data['Date'].values))\n\n \n #Use relative strength index RSI, tells whether stock is overbought or not\n change_in_price = data['Adj Close'].diff(1)\n change_in_price = change_in_price.dropna()\n\n change_upwards = change_in_price.copy()\n change_downwards = change_in_price.copy()\n \n #Positive Values\n change_upwards [change_upwards < 0]= 0\n #Negative Values\n change_downwards [change_downwards >0]= 0\n \n #Timeframe\n days = 14\n #Average gain and loos\n avg_gain = change_upwards.rolling(window=days).mean()\n avg_loss = abs(change_downwards.rolling(window=days).mean())\n \n #RSI\n relative_strength = avg_gain / avg_loss\n relative_strength_index = 100.0 - (100.0 / (1.0 + relative_strength))\n \n\n \n #Plot RSI and various weightings\n plt.figure(figsize=(13, 5))\n plt.title(f'{ticker} Relative Strength Index', fontsize=16)\n plt.plot(relative_strength_index, label = 'Relative Strength Index')\n plt.xlabel(\"Date\", fontsize=12)\n plt.ylabel(\"RSI\", fontsize=12)\n\n plt.axhline(10, linestyle='--', alpha = 0.35, color = 'orange')\n plt.axhline(20, linestyle='--', alpha = 0.35, color = 'red')\n plt.axhline(30, linestyle='--', alpha = 0.35, color = 'green')\n plt.axhline(70, linestyle='--', alpha = 0.35, color = 'green')\n plt.axhline(80, linestyle='--', alpha = 0.35, color = 'red')\n plt.axhline(90, linestyle='--', alpha = 0.35, color = 'orange')\n plt.legend()\n plt.show()\n \n #RSI over 70 indicates security is overbought, under 30 undervalued\n new_dataframe = pd.DataFrame()\n new_dataframe['Adj Close'] = data['Adj Close']\n new_dataframe['RSI'] = relative_strength_index\n\n \n#relative_strength_index('AAPL')\n\n","sub_path":"moving_averages_trading.py","file_name":"moving_averages_trading.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604540537","text":"class BinaryTree:\r\n class Node:\r\n def __init__(self, data, left=None, right=None):\r\n self.data = data\r\n self.left_node = left\r\n self.right_node = right\r\n\r\n def __init__(self, root=None):\r\n self.root = root\r\n\r\n def is_leaf_node(self, node):\r\n return node is None\r\n\r\n def preorder(self, node):\r\n if not self.is_leaf_node(node):\r\n print(node.data, end=\"\")\r\n self.preorder(node.left_node)\r\n self.preorder(node.right_node)\r\n\r\n def inorder(self, node):\r\n if not self.is_leaf_node(node):\r\n self.inorder(node.left_node)\r\n print(node.data, end=\"\")\r\n self.inorder(node.right_node)\r\n\r\n def postorder(self, node):\r\n if not self.is_leaf_node(node):\r\n self.postorder(node.left_node)\r\n self.postorder(node.right_node)\r\n print(node.data, end=\"\")\r\n\r\nn = int(input())\r\ntemp_dict = {}\r\n\r\ntree = BinaryTree()\r\n\r\nfor _ in range(n):\r\n root, left, right = input().split()\r\n new_root_node = tree.Node(root)\r\n\r\n temp_dict[left] = {}\r\n temp_dict[left]['parent_node'] = new_root_node\r\n temp_dict[left]['order'] = 'left'\r\n\r\n temp_dict[right] = {}\r\n temp_dict[right]['parent_node'] = new_root_node\r\n temp_dict[right]['order'] = 'right'\r\n\r\n if root == 'A':\r\n tree.root = new_root_node\r\n else:\r\n if temp_dict[root]['order'] == 'left':\r\n temp_dict[root]['parent_node'].left_node = new_root_node\r\n else:\r\n temp_dict[root]['parent_node'].right_node = new_root_node\r\n \r\ntree.preorder(tree.root)\r\nprint()\r\ntree.inorder(tree.root)\r\nprint()\r\ntree.postorder(tree.root)\r\nprint()","sub_path":"tree_traversal_with_class.py","file_name":"tree_traversal_with_class.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574912084","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 17:07:26 2018\n\n@author: Thorin\n\"\"\"\nfrom functools import wraps\nfrom textwrap import fill\nfrom logging import debug, info, error, warning\n\n\ndef pretty_output(*args, separator=\" \",\n width=80, # fill.__defaults__[0],\n fill=fill, map=map):\n separator_line = separator*width\n return fill(\"\\n\".join((\"\\n\", separator_line,\n *map(str, args),\n separator_line, \"\\n\")),\n width=width,\n drop_whitespace=False)\n\n\ndef prettier(func):\n @wraps(func)\n def prettier_logger(*args, **kwargs):\n func(pretty_output(*args, **kwargs))\n return prettier_logger\n\n\ndef enclose_with_quotes(t):\n return t.join((\"'\",)*2)\n\n\npretty_debug, pretty_info, pretty_error, pretty_warning = map(prettier,\n (debug,\n info,\n error,\n warning))\n\n\ndef pretty_print(*args, **kwargs):\n print(pretty_output(*args, **kwargs))\n","sub_path":"build/exe.win-amd64-3.6/tools/text_and_output.py","file_name":"text_and_output.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381623990","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 1 23:25:03 2018\r\n\r\n@author: Rahul Vedanta\r\n\"\"\"\r\n\r\n#First we’ll read some test data.\r\nwith open('C:\\\\Users\\\\Ritesh\\\\Desktop\\\\txt\\\\tweet_extracted.txt') as f:\r\n testData = f.read().splitlines()\r\nf.close()\r\n\r\n#reading classifer saved in a file\r\nimport pickle\r\nclassifierFile = open('C:\\\\Users\\\\Ritesh\\\\Desktop\\\\txt\\\\myClassifier_2.pickle', 'rb')\r\nclassifier = pickle.load(classifierFile)\r\nclassifierFile.close()\r\n\r\n#variable declarations\r\nhappyTweets=0\r\nsadTweets=0\r\nangryTweets=0\r\nneutralTweets=0\r\ntotalTweets=len(testData)\r\nfor tweet in testData:\r\n #Call the classify(text) method to use the classifier.\r\n sentiment=classifier.classify(tweet)\r\n if(sentiment=='hap'):\r\n happyTweets=happyTweets+1\r\n elif(sentiment=='sad'):\r\n sadTweets=sadTweets+1\r\n elif(sentiment=='ang'):\r\n angryTweets=angryTweets+1\r\n elif(sentiment=='neu'):\r\n neutralTweets=neutralTweets+1\r\n #print(tweet,sentiment,sep='\\t')\r\n\r\n#printing the percentage\r\nprint('\\n\\n')\r\nprint('Happy Tweets %: {}%'.format(happyTweets/totalTweets*100))\r\nprint('Sad Tweets %: {}%'.format(sadTweets/totalTweets*100))\r\nprint('Angry Tweets %: {}%'.format(angryTweets/totalTweets*100))\r\nprint('Neutral Tweets %: {}%'.format(neutralTweets/totalTweets*100))","sub_path":"Twitter Sentiment Analysis/testingClassifier_2.py","file_name":"testingClassifier_2.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528527211","text":"from __future__ import division\nfrom sklearn import metrics\nimport utilities as utils\nimport numpy as np\nnp.random.seed(123) # for reproducibility\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\nimport keras\n\n# floyd run --gpu --data harsha/datasets/mnist-fashion/1:data \"python lenet.py\"\n\nimages_path = \"data/train-images-idx3-ubyte.gz\"\nlabels_path = \"data/train-labels-idx1-ubyte.gz\"\nX_train, y_train = utils.load_data(images_path, labels_path)\n\nimages_path = \"data/t10k-images-idx3-ubyte.gz\"\nlabels_path = \"data/t10k-labels-idx1-ubyte.gz\"\nX_test, y_test = utils.load_data(images_path, labels_path)\n\n# floyd run --data harsha/datasets/mnist-fashion/1:data --mode jupyter\n\n# define data preparation\ndatagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)\n# fit parameters from data\n\n\n# utils.display_img(X_train, y_train, 1)\n# utils.display_multiple_img(X_train, 0, 20)\n\nsplit_size = int(X_train.shape[0]*0.7)\n\n# X_train, X_val = X_train[:split_size], X_train[split_size:]\n# y_train, y_val = y_train[:split_size], y_train[split_size:]\n\nprint(\"Training set size \" + str(X_train.shape[0]))\n# print(\"Validation set size \" + str(X_val.shape[0]))\nprint(\"Test set size \" + str(X_test.shape[0]))\n\n#\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\ndatagen.fit(X_train)\n\n# Convert 1-dimensional class arrays to 10-dimensional class matrices\nY_train = np_utils.to_categorical(y_train, 10)\nY_test = np_utils.to_categorical(y_test, 10)\n\nprint(X_train.shape)\nprint(Y_train.shape)\n\nmodel = Sequential()\nmodel.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(28, 28, 1)))\nprint(model.output_shape)\n\nmodel.add(Convolution2D(32, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nbatch_size = 100\noutput_folder = 'output/new/'\ntbCallBack = keras.callbacks.TensorBoard(log_dir=output_folder, histogram_freq=1,\n write_graph=True, write_images=True)\n\n# model.fit(X_train, Y_train,\n# batch_size=batch_size, epochs=20, validation_split=0.2, verbose=1, callbacks=[tbCallBack])\n\n# model.fit(X_train, Y_train,\n# batch_size=batch_size, nb_epoch=2, verbose=1, callbacks=[tbCallBack])\n\n# model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),\n# steps_per_epoch=X_train.shape[0] // batch_size,\n# epochs=2,\n# validation_data=(X_test, y_test))\nprint(X_train.shape[0])\ndata = datagen.flow(X_train, Y_train, batch_size=batch_size)\n\ncount = 0\nfor dat in data:\n count = count+1\n\nprint(count)\n\n# print(sum([1 for dat in data]))\n# model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),\n# samples_per_epoch=80000,\n# nb_epoch=2)\n#\n#\n# score = model.evaluate(X_test, Y_test, verbose=0)\n#\n# model.save(output_folder + 'lenet-noreg-epoch250-b10000.h5')\n#\n# print(model.summary())\n# print(score)\n# print(\"Accuracy: %.2f%%\" % (score[1]*100))\n# print(\"done\")\n#\n","sub_path":"lenet-augment.py","file_name":"lenet-augment.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141421339","text":"import requests\r\nfrom operator import itemgetter\r\n\r\nurl = 'https://hacker-news.firebaseio.com/v0/topstories.json'\r\nr = requests.get(url)\r\nprint(\"Status Code:\", r.status_code)\r\n\r\njson_r = r.json()\r\nlistt = []\r\nfor dictt in json_r[:30]:\r\n\turl_2 = ('https://hacker-news.firebaseio.com/v0/item/' + str(dictt) + '.json')\r\n\tr_2 = requests.get(url_2)\r\n\tprint(\"Status Code 0:\", r_2.status_code)\r\n\tjson_r_2 = r_2.json()\r\n\t\r\n\tdictt_2 = {'title' : json_r_2['title'], 'link' : 'https://news.ycombinator.com/item?id=' + str(dictt), 'comments' : json_r_2.get('descendants', 0)}\r\n\t\r\n\tlistt.append(dictt_2)\r\n\t\r\nlistt = sorted(listt, key=itemgetter('comments'), reverse=True)\r\n\r\nfor dictt_2 in listt:\r\n\ttry:\r\n\t\tprint(\"\\nTitle:\", dictt_2['title'])\r\n\t\tprint(\"Link:\", dictt_2['link'])\r\n\t\tprint(\"Comments:\",dictt_2['comments'])\r\n\texcept UnicodeEncodeError:\r\n\t\tNone\r\n\telse:\r\n\t\tNone\r\n","sub_path":"API/API_hacker_news.py","file_name":"API_hacker_news.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579346128","text":"#! /usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\nimport time\nimport threading\nfrom uiautomator import Device\n\ndef dialog():\n if d(text=\"Make Phone your default Phone app?\").exists:\n d(text=\"SET DEFAULT\").click.wait()\n if d(text=\"Use Messaging instead of LightOS as your SMS app?\").exists:\n d(text=\"YES\").click.wait()\n if d(text=\"Use LightOS as Home\").exists:\n d.click(240, 500)\n if d(text=\"Use Launcher3 as Home\").exists:\n d(text=\"ALWAYS\").click.wait()\n\ndef select_wifi():\n if d(text=\"arimaguest\").exists:\n d(text=\"arimaguest\").click.wait()\n d(text=\"Forget Network\").click.wait()\n else:\n d.click(419, 300)\n i=360\n while i <= 600:\n d.click(419, i)\n if d(text=\"arimaguest\").exists:\n d(text=\"arimaguest\").click.wait()\n return\n i+=30\n\ndef screen_on():\n d.screen.on()\n\nserial = sys.argv[1]\nd = Device(serial)\n\n# set up uiautomator\nt = threading.Thread(target = screen_on)\nt.start()\ntime.sleep(60)\nsubprocess.call(\"adb -s \" + str(serial) + \" shell input keyevent 26\",shell=True)\ntime.sleep(2)\nsubprocess.call(\"adb -s \" + str(serial) + \" shell input tap 259 413\",shell=True)\nt.join()\n\nd(text=\"CONTINUE\").wait.exists(timeout=30)\nd(text=\"CONTINUE\").click.wait()\nd(text=\"CONNECT TO WIFI\").click.wait()\nwhile d(text=\"Connect To Wifi\").exists:\n select_wifi()\n time.sleep(20)\n\nwhile not d(text=\"INSTALL\").exists:\n d.screen.on()\n time.sleep(3)\n\nd(text=\"INSTALL\").click.wait()\nif d(text=\"REBOOT LIGHTOS\").exists:\n d(text=\"REBOOT LIGHTOS\").click.wait()\ntime.sleep(60)\n\n# change light os to android os\nd.screen.on()\nsubprocess.call(\"adb -s \" + str(serial) + \" shell input keyevent 24 25 24 25 25 KEYCODE_STOP_RECORD\",shell=True)\ndialog()\ndialog()\ndialog()\ndialog()\n\n# turn off WiFi\nsubprocess.call(\"adb -s \" + str(serial) + \" shell svc wifi disable\",shell=True)\n","sub_path":"autotest/tradefed/config/4101_monkey_config.py","file_name":"4101_monkey_config.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432043295","text":"from login.forms_group import platform_forms\nfrom login.templates.platform.add_Partner.Add_CopyrightPartner import add_CopyrightPartner\nfrom login.templates.utils.confutils import login_control, init_configs\nfrom django.shortcuts import redirect, render\n\n\ndef add_copyright_partner(request):\n \"\"\"\n 添加版权合作方\n :param request:\n :return:\n \"\"\"\n if request.session.is_empty() and login_control():\n return redirect('/login/')\n if request.method:\n copyright_form = platform_forms.CopyrightForm(request.POST)\n # print('copyright_form0000000000000000000000000000000000000',copyright_form)\n # print('copyright_form1111111111111111111111111111111111111111',type(copyright_form))\n if copyright_form.is_valid():\n host_name = copyright_form.cleaned_data.get('host')\n print('hostname2222222222222222222222222',host_name)\n copyright_type = copyright_form.cleaned_data.get('copyright_type')\n partnerType = copyright_form.cleaned_data.get('partnerType')\n print('数据类型:', type(copyright_type))\n init_configs(host_name)\n copyright_list = []\n for copyright in copyright_type:\n result = add_CopyrightPartner(int(copyright), int(partnerType))\n copyright_list.append(result)\n if copyright_list:\n list_key = [i[0] for i in copyright_list]\n list_value = [i[1] for i in copyright_list]\n print(list_key, list_value)\n json_res = dict(zip(list_key, list_value))\n message = \"版权合作方添加成功,列表为:%s\" % (str(json_res))\n return render(request, 'login/platform/copyright_partner.html', locals())\n return render(request, 'login/platform/copyright_partner.html', locals())\n","sub_path":"login/viewas/platform_views/copyright_views.py","file_name":"copyright_views.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104034937","text":"#!/usr/bin/env python\n\n# Copyright 2016 Preferred Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport threading\n\nimport rospy\nimport actionlib\nfrom actionlib_msgs.msg import GoalStatus\n\nfrom apc2016.msg import CalibrationAction, CalibrationGoal\nfrom apc2016.srv import Success, CalibrationUpdate\n\nimport util\n\n\n(logdebug, loginfo, logwarn, logerr) = util.get_loggers(\"PickProcessStrategy\")\n\n\nclass PickProcessStrategy(object):\n def __init__(self, pos_info):\n self.pos_info = pos_info\n self.start = time.time()\n cd = threading.Thread(target=self.countdown)\n cd.daemon = True\n cd.start()\n\n def countdown(self):\n while True:\n time.sleep(10)\n elapsed = util.get_elapsed_time(self.start)\n loginfo(\"elapsed time: %s\" % elapsed)\n\n def calibrate(self):\n # forget the calibration data\n loginfo(\"wait for forget_calibration service\")\n rospy.wait_for_service('forget_calibration')\n f = rospy.ServiceProxy('forget_calibration', Success)\n try:\n success = f()\n except Exception as e:\n logwarn(\"forget_calibration failed with an error: %s\" % e)\n return False\n if not success:\n logwarn(\"forget_calibration did not succeed\")\n return False\n loginfo(\"forget_calibration was successful\")\n\n # calibrate the shelf\n loginfo(\"wait for calibration service\")\n client = actionlib.SimpleActionClient('calibration',\n CalibrationAction)\n client.wait_for_server()\n loginfo(\"calibrating shelf position\")\n client.send_goal(CalibrationGoal())\n client.wait_for_result(rospy.Duration.from_sec(120.0))\n state = client.get_state()\n if state != GoalStatus.SUCCEEDED:\n logwarn(\"calibration did not finish in time\")\n return False\n result = client.get_result()\n if not result.success:\n logwarn(\"calibration did not succeed\")\n return False\n ul, ur = result.upper_left, result.upper_right\n loginfo(\"calibration result: %s, %s\" % (ul, ur))\n if ul.y == -1:\n return False\n\n # now update the calibration with the points we just obtained\n loginfo(\"wait for update_calibration service\")\n rospy.wait_for_service('update_calibration')\n cal = rospy.ServiceProxy('update_calibration', CalibrationUpdate)\n try:\n loginfo(\"send calibration data\")\n success = cal(ul, ur)\n except Exception as e:\n logwarn(\"update_calibration failed with an error: %s\" % e)\n return False\n if not success:\n logwarn(\"update_calibration did not succeed\")\n return False\n loginfo(\"update_calibration was successful\")\n return True\n\n def execute(self, items):\n \"\"\"Sort the items passed in as a parameter by some\n metric and move them into the tote.\"\"\"\n raise NotImplementedError(\"override this\")\n","sub_path":"script/pick_process/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"13045696","text":"#! usr/bin/env python3\n# -*- Coding: UTF-8 -*-\n\n\"\"\"Module creating favorite products data base table based on django ORM\"\"\"\n\nfrom django.db import models\nfrom products.models import Product\nfrom django.conf import settings\n\n\nclass Favorite(models.Model):\n \"\"\"Class representing favorite prodcuts table fields\"\"\"\n\n class Meta:\n db_table = 'Favorite'\n\n substituted = models.ForeignKey(\n Product,\n on_delete=models.CASCADE)\n\n substitute = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n related_name='+')\n\n user = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='favorites')\n","sub_path":"favorites/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336150295","text":"import textfsm,time,re\ndef check_cpu(ssh,nme,version,fhand,devicedict):\n \n\n version = devicedict.gennodedict['version']['soft_ver']\n fhand.write(\"<!doctype html><html><head> <title>\"+nme+\"\")\n fhand.write(\"Version \"+version+\"\\n\\n\")\n if version != 'cisco_nxos':\t \n boo=True\n while boo:\n try:\n ret=ssh.send_command(\"sh proc cpu | ex 0.0\",use_textfsm=True)\n print(ret)\n boo=False\n except Exception as e: \n print(\"9 Exception Raised , Trying again\",e)\n boo=True\n #ssh=ConnectHandler(device_type=version,host=devicedict.sship,username=\"rit\",password=\"CMSnoc$1234\")\n continue\n if not(isinstance(ret,list)):\n boo=True\n print(\"9 return from sh proc cpu not proper, trying again\",nme,ssh.device_type)\n else:\n boo=False\t \n fhand.write(\"Show proc cpu | ex 0.0\\n\")\n fhand.write(str(ret))\n fhand.write(\"\\n\\n\")\n ct1=0\n for line in ret:\n\n if ct1==0:\n cpu={}\n if 'cpu_5_sec' in line.keys():\n cpu['cpu_5_sec']=line['cpu_5_sec']\n if 'cpu_1_min' in line.keys():\n cpu['cpu_1_min']=line['cpu_1_min']\n if 'cpu_5_min' in line.keys():\n cpu['cpu_5_min']=line['cpu_5_min']\n devicedict.gennodedict['CPU']=cpu \n \n combine={}\n if 'process' in line.keys():\n combine['process']=line['process']\n if 'proc_5_sec' in line.keys():\n combine['proc_5_sec']=line['proc_5_sec']\n if 'proc_1_min' in line.keys():\n combine['proc_1_min']=line['proc_1_min']\n if 'proc_5_min' in line.keys():\n combine['proc_5_min']=line['proc_5_min']\n devicedict.gennodedict[line['pid']]=combine \n ct1+=1\n #NXOS SH PROC CPU\n if version == 'cisco_nxos':\t \n boo=True\n while boo:\n try:\n ret=ssh.send_command(\"sh proc cpu | ex 0.0\",use_textfsm=True)\n print(ret)\n boo=False\n except Exception as e: \n print(\"9 Exception Raised , Trying again\",e)\n boo=True\n #ssh=ConnectHandler(device_type=version,host=devicedict.sship,username=\"rit\",password=\"CMSnoc$1234\")\n continue\n if not(isinstance(ret,list)):\n boo=True\n print(\"9 return from sh proc cpu not proper, trying again\",nme,ssh.device_type)\n else:\n boo=False\t \n \n print(\"NEXUS CPU: \", ret)\n fhand.write(\"Show proc cpu | ex 0.0 \\n\")\n fhand.write(str(ret))\n fhand.write(\"\\n\\n\")\n ker_flag=0\n for line in ret:\n\n cpu={}\n if line['kernel']!='':\n cpu['user']=line['user']\n cpu['kernel']=line['kernel']\n cpu['idle']=line['idle']\n devicedict.gennodedict['CPU']=cpu \n ker_flag=1 \n else:\n combine={}\n if 'process' in line.keys():\n combine['process']=line['process']\n if 'proc_1_sec' in line.keys():\n combine['proc_1_sec']=line['proc_1_sec']\n devicedict.gennodedict[line['pid']]=combine \n if ker_flag==0:\n ret=ssh.send_command(\"show proc cpu | inc kernel\") \n ret=ret.split()\n fhand.write(\"Show proc cpu | inc kernel\\n\")\n fhand.write(str(ret))\n print(ret)\n fhand.write(\"\\n\\n\")\n cpu={}\n cpu['kernel']=ret[ret.index('kernel,')-1][:-1]\n cpu['user']=ret[ret.index('user,')-1][:-1]\n cpu['idle']=ret[ret.index('idle')-1][:-1]\n devicedict.gennodedict['CPU']=cpu\n\n return devicedict","sub_path":"cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620880492","text":"\nimport gym\nfrom gym.wrappers import TimeLimit\nimport numpy as np\n\n\nclass DistanceReward(gym.Wrapper):\n\n def __init__(self, env):\n super(DistanceReward, self).__init__(env)\n\n def step(self, action):\n assert self.env.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n\n if not self.env.is_valid_action(action):\n action = self.env.prev_act\n self.env.prev_act = action\n\n prev_snake_head = self.env.snake.head\n snake_tail = self.env.snake.step(action)\n\n reward = 0.\n done = False\n \n #snake ate food\n if self.env.snake.head == self.env.food:\n reward += 1.\n self.env.snake.snake.append(snake_tail)\n empty_cells = self.env.get_empty_cells()\n self.env.food = empty_cells[self.env.np_random.choice(len(empty_cells))]\n \n #snake collided wall\n elif self.env.is_collided_wall(self.env.snake.head):\n reward -= 1.\n done = True\n \n #snake bite itself \n elif self.env.snake.head in self.env.snake.body:\n reward -= 1.\n done = True\n\n else:\n snake_len = len(self.env.snake.snake)\n prev_distance = self.distance_to_food(prev_snake_head)\n curr_distance = self.distance_to_food(self.env.snake.head)\n if prev_distance < curr_distance:\n reward -= 0.1\n if prev_distance > curr_distance:\n reward += 0.1\n \n reward = np.clip(reward, -1., 1.)\n\n return self.env.get_image(), reward, done, {}\n\n def distance_to_food(self, head):\n head_x, head_y = head\n food_x, food_y = self.env.food\n return np.sqrt((head_x - food_x)**2 + (head_y - food_y)**2)\n","sub_path":"dopamine/common/reward_wrapper.py","file_name":"reward_wrapper.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1272134","text":"from __future__ import print_function\nimport gzip\nimport os\nimport pickle\nimport sys\nimport subprocess\n\nPYFILE_VERSION = 1\n\nclass VersionError(Exception):\n pass\n\n\ndef loadData(filename):\n try:\n with gzip.open(filename) as f:\n x = f.read()\n except OSError as e:\n e.args = [filename + \" is not a valid .mod file\"]\n raise\n return parseInput(x, filename)\n\ndef hashFile(filename):\n p = subprocess.check_output([\"md5sum\", filename])\n return p.decode().split()[0]\n\ndef parseInput(x, filename):\n res = {}\n \n x = x.decode()\n header = x.split('\\n')[0]\n x = x.replace('\\n', ' ')\n \n if 'GFORTRAN' not in header:\n raise ValueError('Not a gfortran mod file')\n \n res['version'] = int(header.split(\"'\")[1])\n res['orig_file'] = header.split()[-1]\n res['filename'] = filename\n res['checksum'] = hashFile(filename)\n \n if res['version'] == 14:\n from . import parseMod14 as p\n else:\n raise VersionError(\"Only supports mod version 14\")\n \n pm = p.parseMod(x,filename,res,PYFILE_VERSION)\n \n return pm\n\t\ndef run(filename,output=None,save=True,unpack=True):\n x = loadData(filename)\n x.processData()\n if save:\n if output is None:\n output=fpyname(filename)\n \n x.save(output)\n \n if unpack:\n return x.getUnpackedData()\n else:\n return x\n\ndef fpyname(filename):\n return filename.split('.')[0] + '.fpy'\n\n#################################\n\nif __name__ == \"__main__\":\n if len(sys.argv[1:]) > 0:\n files = sys.argv[1:]\n \n for filename in files:\n run(filename,save=True,unpack=False)\n","sub_path":"gfort2py/parseMod/parseMod.py","file_name":"parseMod.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70764203","text":"import pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom ranking import *\nfrom functions import *\n\n###############################################################################\n# Read datasets #\n###############################################################################\ndata = pd.read_table('./data/u.data', names=['UserId', 'ItemId', 'Rating', 'Timestamp'], index_col=[0,1])\nitem = pd.read_table('./data/u.item', sep='|', names=['ItemId', 'Title', 'ReleaseDt', 'VideoReleaseDt', 'Url', \\\n 'Unknown', 'Action', 'Adventure', 'Animation', 'Children',\\\n 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Noir',\\\n 'Horror', 'Musical','Mystery', 'Romance', 'Sci-Fi', 'Thriller',\\\n 'War', 'Western'], index_col=0)\n\n###############################################################################\n# Create Alternate Rating Values #\n###############################################################################\n\nvals = pd.DataFrame()\n# Thumbs Up/Down ranking; unfortunately ratings of 3 must be ignored for this type of rating\nvals['Fresh'] = data['Rating'].apply(lambda x: x > 3)\nvals['Rotten'] = data['Rating'].apply(lambda x: x < 3)\n\n# Create buckets for ratings 1-5\n# This will be easier to handle for some ranking systems\nfor i in xrange(1,6):\n vals[str(i)] = data['Rating'].apply(lambda x: x == i)\nvals = vals.groupby(level=1).sum()\n\n###############################################################################\n# Transform item data #\n###############################################################################\n# call genre function on item\nitem = genre(item)\nitem['temp'] = item['Title'].loc[item['Title']!= 'unknown'].apply(titleParse)\nitem[['Title', 'Year']] = item['temp'].apply(pd.Series)\nitem.drop('temp', axis = 1, inplace=True)\n\nscore = pd.merge(item, data.groupby(level=1).agg({'Rating':{'Average':'mean', 'Total':'count'}}), left_index=True, right_index=True) #.sort_values(by = 'Rating', ascending=False)\nscore = pd.merge(score, vals, left_index=True, right_index=True)\nscore = score.rename(index=str, columns={(u'Rating', u'Average'):'Average',(u'Rating', u'Total'):'Ratings',(u'Fresh', u'Fresh'):'Fresh',(u'Rotten', u'Rotten'):'Rotten'})\nscore[['Title', 'Average', 'Ratings', 'Fresh', 'Rotten']].sort_values(by=['Average', 'Ratings'], ascending=False).head()\n\ngenre_average = pd.merge(item, data, left_index=True, right_index=True).groupby('Genre').mean()['Rating']\nscore = pd.merge(score, pd.DataFrame(genre_average), left_on='Genre', right_index=True)\n \nscore['Confidence'] = score[['Fresh', 'Rotten']].apply(confidence, axis = 1) \nscore['CredInt'] = score[['Average', 'Rating', 'Ratings']].apply(bayes, axis=1)\nscore['Polarizing'] = score[['Fresh', 'Rotten', 'Ratings']].apply(polarizing, axis=1)\nscore['Polarizing2'] = score[['1', '2', '3', '4', '5']].apply(polarizing2, axis=1) \n\nscore.to_csv('./data/film_ratings.csv')\n\naverage = plt.figure(1)\nconf = plt.figure(2)\ncred = plt.figure(3)\npol1 = plt.figure(4)\npol2 = plt.figure(4)\n\naverage = sns.distplot(score['Average'], label='Average Rating')\nplt.savefig('./fig/pure_average_distribution.png')\nplt.clf()\nconf = sns.distplot(score['Confidence'], label='Wilson Score')\nplt.savefig('./fig/confidence_distribution.png')\nplt.clf()\ncred = sns.distplot(score['CredInt'])\nplt.savefig('./fig/credibility_distribution.png', label='Baysian Credibility Inteval')\nplt.clf()\npol1 = sns.distplot(score['Polarizing'])\nplt.savefig('./fig/polarizing1_distribution.png', label='Polarizing1')\nplt.clf()\npol2 = sns.distplot(score['Polarizing2'])\nplt.savefig('./fig/polarizing2_distribution.png', label='Polarizing2')\nplt.clf()\n\n","sub_path":"ranking-comparison.py","file_name":"ranking-comparison.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629354559","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport glob\nfrom datetime import date\nfrom xlrd import open_workbook,xldate_as_tuple\nfrom xlwt import Workbook\n\ninput_folder = sys.argv[1]\noutput_file = sys.argv[2]\n\noutput_workbook = Workbook()\noutput_worksheet = output_workbook.add_sheet('add_sheet')\n\nfirst_sheet = True\ndata = []\nfor input_file in glob.glob(os.path.join(input_folder,'*.xlsx')):\n print(os.path.basename(input_file))\n with open_workbook(input_file) as workbook:\n for worksheet in workbook.sheets():\n if first_sheet:\n header = worksheet.row_values(0)\n data.append(header)\n first_sheet = False\n for row_index in range(1,worksheet.nrows):\n row_list = []\n for column_index in range(worksheet.ncols):\n cell_value = worksheet.cell_value(row_index, column_index)\n cell_type = worksheet.cell_type(row_index,column_index)\n if cell_type == 3:\n date_cell = xldate_as_tuple(cell_value,workbook.datemode)\n date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')\n row_list.append(date_cell)\n else:\n row_list.append(cell_value)\n if row_list:\n data.append(row_list)\nfor index_list,output_list in enumerate(data):\n for element_list, element in enumerate(output_list):\n output_worksheet.write(index_list, element_list, element)\n\noutput_workbook.save(output_file)","sub_path":"PrepareExam/middle_test/test03/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39631131","text":"import os, sys, ctypes\nimport win32com.shell.shell as shell\n\n#eventlog 파일 이름, 전체 경로 수집\nobjpaths = [\"C:\\\\Windows\\\\System32\\\\Winevt\\\\logs\"]\n\ndef uac_require():\n\tasadmin = 'Administrator'\n\ttry:\n\t\tif sys.argv[-1] != asadmin:\n\t\t\tscript = os.path.abspath(sys.argv[0])\n\t\t\tparams = ''.join([script]+sys.argv[1:]+[asadmin])\n\t\t\tshell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)\n\t\treturn True\n\texcept Exception:\n\t\tprint(\"uac Failed\")\n\t\treturn False\n\ndef getDirList(objpath):\n\tpathisexist = os.path.exists(objpath)\n\treturn pathisexist\n\ndef getFullPath(objpath):\n\tc = 0\n\tfor (path, dirs, files) in os.walk(objpath):\n\t\tfor f in files:\n\t\t\tif os.path.splitext(f)[-1] == '.evtx' : \n\t\t\t\tprint(path+\"\\\\\"+f)\n\t\t\t\tprint(f)\n\t\t\t\tc = c+1\n\t\t\telse : \n\t\t\t\tpass\n\tprint(\"\\n\")\n\tprint(\"%d files are finded\" %c)\n\n\tif c==0:\n\t\tprint(\"Empty Folder\\nNo File Exists\\n\")\n\t\treturn False\n\treturn True\n\nif __name__ == '__main__':\n\n\ttry:\n\t\tif ctypes.windll.shell32.IsUserAnAdmin():\n\n\t\t\tfor i in objpaths:\n\t\t\t\tif getDirList(i):\n\t\t\t\t\tprint(i+\" Exist the Folder\\n\")\n\t\t\t\t\tgetFullPath(i)\n\t\t\t\telse:\n\t\t\t\t\tprint(i+\" is Failed\\n\")\n\t\telse:\n\t\t\tprint(\"\\nPlease Excute Administrator\")\t\t\t\n\n\texcept Exception as e:\n\t\tprint(e)\n\t\tprint(\"\\n Please Excute Administrator\")\n\n","sub_path":"eventlog_parser01.py","file_name":"eventlog_parser01.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119980086","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis short script uses curl requests to search the last 100 builds of\na jenkins job to find recurring errors, written in Python3.\nIt results in printing a list of links to builds that match the search\nAs the requests package is not included within kv, you will need to either\ndownload this package yourself or reference the one included inside\ncouchbase-cli.\nThis is currently limited to searching for log patterns contained within\none line of the logs, as the search checks line-by-line.\n\"\"\"\n\nimport argparse\nimport requests\nimport sys\nimport time\n\n# Create argparser so the user can specify which job to search\nargParser = argparse.ArgumentParser()\nargParser.add_argument('--job', '-j', type=str,\n help='The cv job to query. '\n \"Common jobs are: 'kv_engine-ASan-UBSan-master', \"\n \"'kv_engine-clang_analyzer-master', \"\n \"'kv_engine-linux-master', \"\n \"'kv_engine-threadsanitizer-master', \"\n \"'kv_engine-windows-master', \"\n \"'kv_engine-clang_format', \"\n \"'kv-engine-cv-perf'\", required=True)\nargParser.add_argument('--search', '-s', type=str,\n help='The string to search the logs for', required=True)\nargParser.add_argument('--build-no', '-b', type=int,\n help='The build number of cv job to check backwards from. '\n '0 (default) fetches latest build number', default=0)\nargParser.add_argument('--no-of-builds', '-n', type=int,\n help='The number of builds to check back', default=100)\nargParser.add_argument('--format', '-f', default=\"plain\", type=str,\n help=\"Select the format to print results. \"\n \"Available formats are: \"\n \"plain (default), log-line, jira\")\nargParser.add_argument('--url-prefix', '-u', type=str, default='cv',\n help='Determine the endpoint of logs to check, ' \\\n 'http://.jenkins.couchbase.com')\n\nargs = argParser.parse_args()\njob = 'job/' + args.job + '/'\n\nserverURL = 'http://' + str(args.url_prefix) + '.jenkins.couchbase.com/'\n\navailableFormats = [\"plain\", \"log-line\", \"jira\"]\noutputFormat = args.format.lower()\nassert outputFormat in availableFormats, \"%r format is not supported\" % outputFormat\n\nconsoleText = '/consoleText/'\nresultURLs = {}\nfailedBuildNums = []\n\nif args.build_no == 0:\n # need to fetch the latest build number\n r = requests.get(serverURL + job + 'lastBuild/api/json')\n j = r.json()\n args.build_no = j['number']\n\nprint(\"Searching for:\", ('\"' + args.search + '\"'), \"in console logs of job:\",\n args.job, \"between build\", args.build_no - (args.no_of_builds - 1),\n \"and\", args.build_no, file=sys.stderr)\n\nstart_time = time.time()\nfor i in range(0, args.no_of_builds):\n lines = []\n print('\\r >>> Current progress: {} '.format(str(i)), end='',\n flush=True, file=sys.stderr)\n r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)\n if r.status_code != 200:\n failedBuildNums.append(args.build_no-i)\n for line in r.text.split('\\n'):\n result = line.find(args.search)\n if result != -1:\n lines.append(line)\n if lines:\n resultURLs[serverURL + job + str(args.build_no-i) + '/console/'] = lines\n\nprint('\\r Completed search in', (time.time() - start_time), 's', file=sys.stderr)\nif failedBuildNums:\n print(\"Failed log request on build(s) no:\", failedBuildNums, file=sys.stderr)\n\n# Ensure above prints actually print before results (and not mangled inside results)\nsys.stderr.flush()\n\nif not resultURLs:\n # Empty results, did not find any matches\n print(\"No matches found\")\nelif outputFormat == 'jira':\n # Print in a JIRA format\n print(\"{panel:title=Search for\", ('\"' + args.search + '\"'),\n \"in console logs of job\", args.job, \"between build no\",\n args.build_no - (args.no_of_builds - 1), \"and\", args.build_no, '}')\n for url in resultURLs:\n print('[', url, ']', sep=\"\")\n print('{noformat}')\n for line in resultURLs[url]:\n print(line)\n print('{noformat}')\n print(\"{panel}\")\nelif outputFormat == \"log-line\":\n # Print findings with log line attached\n for url in resultURLs:\n print(url, end=\" : \")\n for line in resultURLs[url]:\n print(line, end=\", \")\n print('\\n', end=\"\")\nelse: # outputFormat == \"plain\"\n # Print findings normally\n for url in resultURLs:\n print(url)\n","sub_path":"scripts/jenkins_console_log_search.py","file_name":"jenkins_console_log_search.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549348102","text":"from django.shortcuts import render\nfrom studentapp.models import student_leave\nfrom academic_app.views import Marksubmit,facloginsubmit\nfrom academic_app.models import marksubmit,attendances,studentregistration\n\n\n# Create your views here.\ndef stulogin(request):\n return render(request,'login.html')\ndef studentleave(request):\n queryset = student_leave.objects.all()\n return render(request,'student-leave-management.php',{'authors':queryset})\ndef stuleaveapp(request):\n if request.method=='POST':\n leave=request.POST.get('leave')\n name=request.POST.get('name')\n leave_on=request.POST.get('leave_on')\n leave_upto=request.POST.get('leave_upto')\n reason=request.POST.get('reason')\n message=request.POST.get('message')\n a=student_leave(leave=leave,name=name,leave_on=leave_on,leave_upto=leave_upto,reason=reason,message=message)\n a.save()\n queryset = student_leave.objects.all()\n return render(request,'student-leave-management.php',{'authors':queryset})\n\ndef student_assessment_php(request):\n query = marksubmit.objects.all().filter(student_name=request.session['usr'])\n return render(request,'student-assessment.php',{'authors':query})\ndef student_attendence_php(request):\n y = attendances.objects.all().filter(student_name=request.session['usr'])\n return render(request,'student-attendence.php',{'authors':y})\ndef student_profile_php(request):\n queryset=studentregistration.objects.all().filter(name=request.session['usr'])\n return render(request,'student-profile.php',{'key':queryset}) \ndef student_edit_php(request):\n queryset=studentregistration.objects.all().filter(name=request.session['usr'])\n return render(request,'student-edit.php',{'key':queryset}) \ndef editprofile(request):\n if request.method=='POST': \n admission_no=request.POST.get('admission_no')\n admission_date=request.POST.get('admission_date')\n name=request.POST.get('name')\n dob=request.POST.get('dob')\n gender=request.POST.get('gender')\n mobile=request.POST.get('mobile')\n guardian=request.POST.get('guardian')\n studentregistration.objects.filter(name=request.session['usr']).update(admission_no=admission_no,admission_date=admission_date,name=name,dob=dob,gender=gender,guardian=guardian,mobile=mobile)\n queryset=studentregistration.objects.all().filter(name=request.session['usr'])\n return render(request,'student-profile.php',{'key':queryset}) ","sub_path":"studentapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188291824","text":"# 課題6 画像の二値化\n# 画像を二値化せよ.\n\n# --------------------------------------------\n# インポート\n# --------------------------------------------\n\n# OpenCV 読み込み\nimport cv2\n# Numpy 読み込み\nimport numpy as np\n# MatPlotLib 読み込み\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# --------------------------------------------\n# 関数\n# --------------------------------------------\n\n# OpenCVのグレースケール画像をMatplotlibで表示する\ndef image_show(orig_img):\n # 解像度\n resolution = 72\n # figure作成\n fig = plt.figure(figsize=(orig_img.shape[1] / resolution, orig_img.shape[0] / resolution), dpi=resolution)\n # figure内にaxis追加\n ax = plt.subplot(111)\n # axisに画像を表示する\n im = ax.imshow(orig_img, cmap=\"gray\")\n # axisに図を追加できるように\n divider = make_axes_locatable(ax)\n # axisの右に、2%の大きさの図を0.1インチの隙間を空けて生成\n cax = divider.append_axes(\"right\", size=\"2%\", pad=0.1)\n # 作成した部分にカラーバーを生成\n fig.colorbar(im, cax=cax, orientation='vertical')\n plt.show()\n\n\n# 値を 0 <= v <= 255 に収める\ndef minmax(v):\n v = min(255, v)\n v = max(0, v)\n return v\n\n\n# Floyd–Steinbergのアルゴリズムでディザリング\ndef dithering_gray(inMat):\n\n # 画像サイズ\n h = inMat.shape[0]\n w = inMat.shape[1]\n\n # 画像を走査\n for y in range(0, h - 1):\n for x in range(1, w - 1):\n\n # 現在のピクセルを2値化\n old_p = inMat[y, x]\n new_p = np.round(old_p / 255.0) * 255\n inMat[y, x] = new_p\n\n # 差分を計算\n quant_error_p = old_p - new_p\n\n # 近傍ピクセルの値を変更\n inMat[y, x + 1] = minmax(inMat[y, x + 1] + quant_error_p * 7 / 16.0)\n inMat[y + 1, x - 1] = minmax(inMat[y + 1, x - 1] + quant_error_p * 3 / 16.0)\n inMat[y + 1, x] = minmax(inMat[y + 1, x] + quant_error_p * 5 / 16.0)\n inMat[y + 1, x + 1] = minmax(inMat[y + 1, x + 1] + quant_error_p * 1 / 16.0)\n\n return inMat\n\n\n# --------------------------------------------\n# 処理\n# --------------------------------------------\n\n# ファイルから画像を読み込み\nsrc_img = cv2.imread('resource/original.png')\ngray_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)\n\n# グレー画像を表示して待機\nimage_show(gray_img)\ninput(\"Displaying gray image. Hit Enter.\\n\")\n\n# 通常2値化\n# 画像をコピー\npost_img = gray_img.copy()\n# しきい値128による2階調化\npost_img = (post_img > 128) * 255\n# 画像を表示して待機\nimage_show(post_img)\ninput(\"Displaying 2 color image. Hit Enter.\\n\")\n\n# ディザリング\n# 画像をコピー\npost_img = gray_img.copy()\n# ディザリング処理\npost_img = dithering_gray(post_img)\n# 画像を表示して待機\nimage_show(post_img)\ninput(\"Displaying dithered image. Hit Enter.\\n\")\n\n\n# プログラム終了\nquit()\n","sub_path":"kadai_06/kadai_06.py","file_name":"kadai_06.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525292488","text":"import pandas as pd\nimport numpy as np\nfrom objects_API.BacteriumJ import BacteriumJson\nfrom objects_API.BacteriophageJ import BacteriophageJson\nfrom objects_API.StrainJ import StrainJson\nfrom objects_API.SpecieJ import SpecieJson\nfrom objects_API.CoupleJ import CoupleJson\n\nfrom objects_new.Couples_new import Couple\nfrom objects_new.Organisms_new import Organism\nfrom objects_new.Strains_new import Strain\nfrom objects_new.Species_new import Specie\n\nfrom configuration.configuration_api import ConfigurationAPI\nfrom rest_client.AuthenticationRest import AuthenticationAPI\n\n\ndef readCSVToDataFrame(path_file):\n dataframe_interactions = pd.read_csv(filepath_or_buffer = path_file, sep=',')\n return dataframe_interactions\n\ndef getCoupleFromInteractionIdOldDB(interaction_id):\n couple_obj = Couple.get_couples_by_list_id([interaction_id])[0]\n return couple_obj\n\ndef getPhageDesignationById(id_phage):\n organism_obj = Organism.get_organism_by_id(id_phage)\n id_strain = organism_obj.fk_strain\n strain_obj = Strain.get_strain_by_id(id_strain)\n strain_designation = strain_obj.designation\n return strain_designation\n\n\ndef getBacteriumStrainSpecieDesignationById(id_bacterium):\n organism_obj = Organism.get_organism_by_id(id_bacterium)\n id_strain = organism_obj.fk_strain\n strain_obj = Strain.get_strain_by_id(id_strain)\n\n\n id_specie = strain_obj.fk_specie\n\n\n specie_obj = Specie.get_specie_by_id(id_specie)\n specie_designation = specie_obj.designation\n\n strain_designation = strain_obj.designation\n\n taxonomy_bacterium = 'Specie: ' + specie_designation + ' Strain: ' + strain_designation\n\n return taxonomy_bacterium\n\n\ndef getBacteriophageDesignationNewDBById(id_new_phage:int):\n bacteriophage_obj = BacteriophageJson.getByID(id_new_phage)\n phage_designation = bacteriophage_obj.designation\n\n return phage_designation\n\n\ndef getBacteriumDesignationNewByID(id_new_bacterium:int):\n bacterium_obj = BacteriumJson.getByID(id_new_bacterium)\n strain_id = bacterium_obj.strain\n strain_obj = StrainJson.getByID(strain_id)\n strain_designation = strain_obj.designation\n\n specie_id = strain_obj.specie\n specie_obj = SpecieJson.getByID(specie_id)\n specie_designation = specie_obj.designation\n\n taxonomy_bacterium = 'Specie: ' + specie_designation + ' Strain: ' + strain_designation\n\n return taxonomy_bacterium\n\ndef addInteractionsNewDB(interaction_type:bool, bacterium_id:int, phage_id:int, level_id:int, lysis_id:int, persone_responsible:int, source_data_id:int, validity_id:bool):\n\n\n couple_obj = CoupleJson(interaction_type = interaction_type, bacteriophage = phage_id, bacterium = bacterium_id, level = level_id, lysis = lysis_id, person_responsible = persone_responsible, source_data = source_data_id, validity = validity_id)\n\n\n couple_obj_json = couple_obj.setCouple()\n return couple_obj_json\n\n\ndef writeIdsInserted(id_interaction_old, id_interaction_new):\n str_write = str(id_interaction_old) + ',' + str(id_interaction_new)\n with open(\"ids_couples_inserted.txt\", \"a\") as myfile:\n myfile.write(str_write)\n\npath_file_name = 'data_ids_interaction_new_old_db.csv'\n\ndataframe_data = readCSVToDataFrame(path_file_name)\n\n\nconf_obj = ConfigurationAPI()\nconf_obj.load_data_from_ini()\nAuthenticationAPI().createAutenthicationToken()\n\nfor index, row in dataframe_data.iterrows():\n #Old couple treatment\n interaction_id = row['interaction_id_old_db']\n couple_obj = getCoupleFromInteractionIdOldDB(interaction_id)\n id_phage = couple_obj.fk_phage\n id_bacterium = couple_obj.fk_bacteria\n\n phage_designation = getPhageDesignationById(id_phage)\n bacterium_designation = getBacteriumStrainSpecieDesignationById(id_bacterium)\n\n #New couple treatment\n id_new_phage = row['id_phage']\n id_new_bacterium = row['id_bact']\n\n phage_designation_new = getBacteriophageDesignationNewDBById(id_new_phage)\n bacterium_designation_new = getBacteriumDesignationNewByID(id_new_bacterium)\n\n type_interaction = couple_obj.interact_pn\n source_data_id = couple_obj.fk_source_data\n person_resposible_id = 3 #Xavier\n level_interact_id = couple_obj.fk_level_interact\n lysis_inter_id = couple_obj.fk_lysis_inter\n validity_id = 4\n #Couple object insertion\n print('------------------------')\n print('Interaction Informations')\n print('Old phage: {0}'.format(phage_designation))\n print('New phage: {0}'.format(phage_designation_new))\n print('Old bacterium: {0}'.format(bacterium_designation))\n print('New bacterium: {0}'.format(bacterium_designation_new))\n\n if phage_designation == phage_designation_new and bacterium_designation == bacterium_designation_new:\n couple_obj = addInteractionsNewDB(interaction_type = type_interaction, bacterium_id = id_new_bacterium, phage_id = id_new_phage, level_id = level_interact_id, lysis_id = lysis_inter_id, persone_responsible = person_resposible_id, source_data_id = source_data_id, validity_id = validity_id)\n print(couple_obj)\n id_new_couple = couple_obj.id\n id_old_obj = interaction_id\n writeIdsInserted(id_old_obj, id_new_couple)\n else:\n input_value = input(\"Insert (1 = Yes; other = No) \")\n if input_value == '1':\n couple_obj = addInteractionsNewDB(interaction_type = type_interaction, bacterium_id = id_new_bacterium, phage_id = id_new_phage, level_id = level_interact_id, lysis_id = lysis_inter_id, persone_responsible = person_resposible_id, source_data_id = source_data_id, validity_id = validity_id)\n print(couple_obj)\n id_new_couple = couple_obj.id\n id_old_obj = interaction_id\n writeIdsInserted(id_old_obj, id_new_couple)\n\n\n\n \nprint(dataframe_data)","sub_path":"Importation_insertion_data/Migration_organism_interactions_verification.py","file_name":"Migration_organism_interactions_verification.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362007006","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\nimport os\r\nimport sys\r\nimport tkinter as tk # 导入 Tkinter 库\r\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\r\nfrom PIL import Image, ImageTk, ImageDraw\r\nfrom time import sleep\r\nimport numpy as np\r\nimport cv2 as cv\r\n\r\nDEF_WIDTH = 1080\r\nDEF_HEIGHT = 720\r\nIMAGE_HEIGHT = 720\r\nFRAME_LEFT_WIDTH = 360\r\n# 太小的选定区域我们需要丢弃,防止误操作\r\nMINI_RECT_AREA = 20\r\n\r\n\r\nclass RawImageEditor:\r\n def __init__(self, win, img, rects):\r\n # 变量X和Y用来记录鼠标左键按下的位置\r\n self.X = tk.IntVar(value=0)\r\n self.Y = tk.IntVar(value=0)\r\n self.sel = False\r\n self.lastDraw = None\r\n self.lastDraws = []\r\n self.imageScale = 1.0 # 图片缩放比例\r\n self.dispWidth = DEF_WIDTH # 图片��示区域的最大高度,宽度\r\n self.dispHeight = DEF_HEIGHT\r\n self.rawImage = img\r\n self.calcImageScale(self.rawImage) # 更新self.imageScale\r\n self.dispWidth = int(self.imageScale * self.rawImage.width) # 显示比例缩放后的图片\r\n self.dispHeight = int(self.imageScale * self.rawImage.height)\r\n # 图片缩放\r\n self.dispImage = self.rawImage.resize((self.dispWidth, self.dispHeight))\r\n # 选择区域\r\n self.selPositions = []\r\n for r in rects:\r\n self.selPositions.append(\r\n (r[0] * self.imageScale, r[1] * self.imageScale, r[2] * self.imageScale, r[3] * self.imageScale))\r\n print('self.selPositions___111为:')\r\n print(self.selPositions)\r\n\r\n # 创建顶级组件容器\r\n self.top = tk.Toplevel(win, width=self.dispWidth, height=self.dispHeight)\r\n # 不显示最大化、最小化按钮\r\n # self.top.overrideredirect(True)\r\n # Make topLevelWindow remain on top until destroyed, or attribute changes.\r\n self.top.attributes('-topmost', 'true')\r\n self.canvas = tk.Canvas(self.top, bg='white', width=self.dispWidth, height=self.dispHeight)\r\n self.tkImage = ImageTk.PhotoImage(self.dispImage)\r\n self.canvas.create_image(self.dispWidth // 2, self.dispHeight // 2, image=self.tkImage) # 前两个参数为坐标\r\n for r in self.selPositions:\r\n draw = self.canvas.create_rectangle(r[0], r[1], r[2], r[3], outline='green')\r\n self.lastDraws.append(draw)\r\n print(draw)\r\n print('self.selPositions___222为:')\r\n print(self.selPositions)\r\n print(self.lastDraws)\r\n # self.canvas.bind('', self.onLeftButtonDown)\r\n\r\n # 鼠标左键按下的位置\r\n def onLeftButtonDown(event):\r\n self.X.set(event.x)\r\n self.Y.set(event.y)\r\n # 开始截图\r\n self.sel = True\r\n # 重新绘制已经选择的区域\r\n for draw in self.lastDraws:\r\n self.canvas.delete(draw)\r\n self.lastDraws = []\r\n for r in self.selPositions:\r\n draw = self.canvas.create_rectangle(r[0], r[1], r[2], r[3], outline='green')\r\n self.lastDraws.append(draw)\r\n print('鼠标按下了')\r\n\r\n self.canvas.bind('', onLeftButtonDown)\r\n\r\n # 鼠标左键移动,显示选取的区域\r\n def onLeftButtonMove(event):\r\n if not self.sel:\r\n return\r\n try:\r\n # 删除刚画完的图形,要不然鼠标移动的时候是黑乎乎的一片矩形\r\n self.canvas.delete(self.lastDraw)\r\n except Exception as e:\r\n pass\r\n self.lastDraw = self.canvas.create_rectangle(self.X.get(), self.Y.get(), event.x, event.y, outline='green')\r\n\r\n self.canvas.bind('', onLeftButtonMove)\r\n\r\n # 获取鼠标左键抬起的位置,保存区域截图\r\n def onLeftButtonUp(event):\r\n self.sel = False\r\n sleep(0.1)\r\n # 考虑鼠标左键从右下方按下而从左上方抬起的截图\r\n left, right = sorted([self.X.get(), event.x])\r\n top, bottom = sorted([self.Y.get(), event.y])\r\n if (right - left) * (bottom - top) > MINI_RECT_AREA:\r\n self.selPositions.append((left, top, right, bottom))\r\n print(self.selPositions)\r\n print('左键释放了')\r\n # self.top.destroy() # 鼠标左键抬起,即关掉topLevel\r\n\r\n # 鼠标右键按下(),没实现,event没绑定?\r\n def onRightButtonDown(event):\r\n self.sel = False\r\n self.top.destroy()\r\n\r\n self.canvas.bind('', onRightButtonDown)\r\n self.canvas.bind('', onLeftButtonUp)\r\n self.canvas.pack(fill=tk.BOTH, expand=tk.YES)\r\n\r\n # 计算图片的缩放比例\r\n def calcImageScale(self, image):\r\n w = image.width\r\n h = image.height\r\n self.imageScale = 1.0\r\n # 计算最小的缩放比例,保证原始宽高比\r\n if w > self.dispWidth and h > self.dispHeight:\r\n ws = self.dispWidth * 1.0 / w\r\n hs = self.dispHeight * 1.0 / h\r\n if ws < hs:\r\n self.imageScale = ws\r\n else:\r\n self.imageScale = hs\r\n elif w > self.dispWidth and h < self.dispHeight:\r\n self.imageScale = self.dispWidth * 1.0 / w\r\n elif w < self.dispWidth and h > self.dispHeight:\r\n self.imageScale = self.dispHeight * 1.0 / h\r\n\r\n # 延时显示\r\n def waitForWindow(self, win):\r\n win.wait_window(self.top)\r\n\r\n # 转换为原始像素位置\r\n def selectedPositions(self):\r\n realPos = []\r\n for r in self.selPositions:\r\n realPos.append(\r\n (r[0] / self.imageScale, r[1] / self.imageScale, r[2] / self.imageScale, r[3] / self.imageScale))\r\n print('=============')\r\n print(self.selPositions)\r\n print('真实坐标为:')\r\n print(realPos)\r\n return realPos\r\n\r\n\r\nclass MainWin(tk.Tk):\r\n def __init__(self):\r\n if sys.version_info >= (3, 0):\r\n super().__init__()\r\n else:\r\n tk.Tk.__init__(self)\r\n self.title('图像处理工具')\r\n self.geometry('{}x{}'.format(DEF_WIDTH, DEF_HEIGHT))\r\n self.rawImagePath = '' # 图片路径\r\n self.rawImage = None # self.rawImage 原始图像,未经过缩放处理\r\n self.transRawImage = None # self.transRawImage 经过转换处理之后的原始图像,没有经过缩放处理\r\n self.dispImage = None # self.dispImage 显示图像,可能经过缩放处理\r\n self.imageScale = 1.0 # 图片缩放比例,根据缩放比例进行显示的时候的缩放处理,后期选择区域的时候,需要进行缩放还原\r\n self.leftFrameWidth = FRAME_LEFT_WIDTH\r\n self.frameDispHeight = DEF_HEIGHT # 整个窗口的高度\r\n self.labelTextHeight = 20 # 文本标签的高度\r\n self.btnHeight = 40 # 按钮的高度\r\n self.imageDispWidth = IMAGE_HEIGHT # 图片显示区域的最大高度,宽度\r\n self.imageDispHeight = self.frameDispHeight / 2 - self.labelTextHeight * 2\r\n # 选择区域\r\n self.liRect = []\r\n self.rawImageEditor = None\r\n self.setupUI()\r\n\r\n # 图片缩放\r\n def scaleDisplayImage(self, image):\r\n w = image.width\r\n h = image.height\r\n self.imageScale = 1.0\r\n # 计算最小的缩放比例,保证原始宽高比\r\n if w > self.imageDispWidth and h > self.imageDispHeight:\r\n ws = self.imageDispWidth * 1.0 / w\r\n hs = self.imageDispHeight * 1.0 / h\r\n if ws < hs:\r\n self.imageScale = ws\r\n else:\r\n self.imageScale = hs\r\n elif w > self.imageDispWidth and h < self.imageDispHeight:\r\n self.imageScale = self.imageDispWidth * 1.0 / w\r\n elif w < self.imageDispWidth and h > self.imageDispHeight:\r\n self.imageScale = self.imageDispHeight * 1.0 / h\r\n # 图片缩放\r\n return image.resize((int(self.imageScale * w), int(self.imageScale * h)))\r\n\r\n # 选择图片文件\r\n def selectImageFile(self):\r\n path = tk.StringVar()\r\n file_entry = tk.Entry(self, state='readonly', text=path)\r\n path_ = askopenfilename()\r\n path.set(path_)\r\n return file_entry.get()\r\n\r\n # 打开图片时使用,传值(图)给展示函数\r\n def openAndDisplayImage(self):\r\n self.rawImagePath = self.selectImageFile()\r\n print(self.rawImagePath)\r\n if '' != self.rawImagePath:\r\n self.rawImage = Image.open(self.rawImagePath)\r\n self.rawImage = self.rawImage.convert('RGBA')\r\n self.drawRawImageDisp()\r\n\r\n # 绘制ListBox,显示选中的矩形坐标\r\n def drawListBox(self):\r\n self.l_box.delete(0, tk.END)\r\n for item in self.liRect:\r\n r = '{},{},{},{}'.format(round(item[0], 1), round(item[1], 1), round(item[2], 1), round(item[3], 1))\r\n self.l_box.insert(0, r) # 显示值\r\n\r\n # 显示已经画了矩形的图片\r\n def drawRawImageDisp(self, selItems=[]):\r\n self.dispImage = self.scaleDisplayImage(self.rawImage) # 缩放后的图片\r\n self.dispImage = self.dispImage.convert('RGB')\r\n draw = ImageDraw.Draw(self.dispImage)\r\n for i in range(len(self.liRect)):\r\n r = self.liRect[i]\r\n if i in selItems:\r\n draw.rectangle(\r\n (r[0] * self.imageScale, r[1] * self.imageScale, r[2] * self.imageScale, r[3] * self.imageScale),\r\n outline=\"red\")\r\n else:\r\n draw.rectangle(\r\n (r[0] * self.imageScale, r[1] * self.imageScale, r[2] * self.imageScale, r[3] * self.imageScale),\r\n outline=\"green\")\r\n img = ImageTk.PhotoImage(self.dispImage)\r\n self.image_l_raw.config(image=img)\r\n self.image_l_raw.image = img\r\n\r\n # 删除选中的ListBox控件中对应的矩形\r\n def deleteSelectedItemFromListBox(self):\r\n # print(self.l_box.get(self.l_box.curselection()))\r\n idx = self.l_box.curselection()\r\n if len(idx) > 0:\r\n kp = []\r\n for v in range(len(self.liRect)):\r\n if v not in idx:\r\n kp.append(self.liRect[v])\r\n self.liRect = kp\r\n self.drawListBox()\r\n self.drawRawImageDisp()\r\n # 打开图片时使用,获得地址\r\n\r\n # 点击图片\r\n def rawImageLabelClicked(self, event):\r\n if None != self.rawImage:\r\n if None == self.rawImageEditor:\r\n self.rawImageEditor = RawImageEditor(self, self.rawImage, self.liRect)\r\n self.rawImageEditor.waitForWindow(self.image_l_raw) # 延时显示\r\n self.liRect = self.rawImageEditor.selectedPositions() # 确定原始图像对应的矩形坐标\r\n self.rawImageEditor = None\r\n self.drawListBox()\r\n self.drawRawImageDisp()\r\n\r\n # ListBox控件选中的矩形(坐标)\r\n def onRectListboxSelect(self, event):\r\n idx = self.l_box.curselection()\r\n if len(idx) > 0:\r\n self.drawRawImageDisp(idx)\r\n\r\n # 显示转换后的图片\r\n def drawTransImageDisp(self):\r\n transImage = self.scaleDisplayImage(self.transRawImage)\r\n transImage = transImage.convert('L')\r\n img = ImageTk.PhotoImage(transImage)\r\n self.image_l_trans.config(image=img)\r\n self.image_l_trans.image = img\r\n\r\n # 图片转换\r\n def doTransRawImage(self):\r\n self.transRawImage = Image.new('L', (self.rawImage.width, self.rawImage.height))\r\n for r in self.liRect:\r\n im = self.rawImage.crop(r)\r\n cv_im = cv.cvtColor(np.asarray(im), cv.COLOR_RGB2BGR)\r\n hsv = cv.cvtColor(cv_im, cv.COLOR_BGR2HSV)\r\n _, _, v = cv.split(hsv)\r\n avg = np.average(v.flatten())\r\n pixels = im.load()\r\n for j in range(im.height):\r\n for i in range(im.width):\r\n hv = v[j, i]\r\n if hv < avg * 1.2:\r\n # im.putpixel((i, j), 0) # slow\r\n pixels[i, j] = 0\r\n '''else :\r\n im.putpixel((i, j), (255, 255, 255, 255))'''\r\n self.transRawImage.paste(im, (int(r[0]), int(r[1])), mask=None)\r\n self.drawTransImageDisp()\r\n\r\n def onTransRawImageBtnClicked(self):\r\n if None != self.rawImage:\r\n self.doTransRawImage()\r\n\r\n def onSaveTransRawImageBtnClicked(self):\r\n if None != self.transRawImage:\r\n ext = os.path.splitext(self.rawImagePath)[-1]\r\n (path, name) = os.path.split(self.rawImagePath)\r\n filename = asksaveasfilename(title='\b保存图片', initialfile=name,\r\n filetypes=((\"jpeg files\", \"*{}\".format(ext)), (\"all files\", \"*.*\")))\r\n if '' != filename:\r\n self.transRawImage.save(filename)\r\n\r\n def setupUI(self):\r\n # 左边菜单栏\r\n left_f = tk.Frame(self, height=self.frameDispHeight, width=self.leftFrameWidth)\r\n left_f.pack(side=tk.LEFT)\r\n # 各种功能按钮名称及位置\r\n btnOpen = tk.Button(left_f, text='打开图像', command=self.openAndDisplayImage)\r\n btnOpen.place(y=25, x=30, width=300, height=self.btnHeight)\r\n btnTrans = tk.Button(left_f, text='处理图像', command=self.onTransRawImageBtnClicked)\r\n btnTrans.place(y=85, x=30, width=300, height=self.btnHeight)\r\n l_selRect = tk.Label(left_f, text='鼠标选定区域')\r\n l_selRect.place(x=0, y=165, width=self.leftFrameWidth, height=self.labelTextHeight)\r\n '''列表'''\r\n self.l_box = tk.Listbox(left_f) # 创建两个列表组件\r\n self.l_box.place(x=0, y=165 + self.labelTextHeight, width=self.leftFrameWidth, height=270)\r\n self.l_box.bind('<>', self.onRectListboxSelect)\r\n self.drawListBox()\r\n # 删除选定项\r\n btnDel = tk.Button(left_f, text='删除选定项', command=self.deleteSelectedItemFromListBox)\r\n btnDel.place(y=460, x=30, width=300, height=self.btnHeight)\r\n btnSave = tk.Button(left_f, text='保存结果', command=self.onSaveTransRawImageBtnClicked)\r\n btnSave.place(y=550, x=30, width=300, height=self.btnHeight)\r\n # 右侧图像显示栏\r\n right_f = tk.Frame(self, height=self.frameDispHeight, width=self.imageDispWidth)\r\n right_f.pack(side=tk.RIGHT)\r\n l_rawT = tk.Label(right_f, text='原始图片')\r\n l_rawT.place(x=0, y=0, width=self.imageDispWidth, height=self.labelTextHeight)\r\n self.image_l_raw = tk.Label(right_f, relief='ridge')\r\n self.image_l_raw.place(x=0, y=self.labelTextHeight, width=self.imageDispWidth, height=self.imageDispHeight)\r\n self.image_l_raw.bind(\"\", self.rawImageLabelClicked)\r\n l_transT = tk.Label(right_f, text='处理后图片')\r\n l_transT.place(x=0, y=self.labelTextHeight + self.imageDispHeight, width=self.imageDispWidth,\r\n height=self.labelTextHeight)\r\n self.image_l_trans = tk.Label(right_f, relief='ridge')\r\n self.image_l_trans.place(x=0, y=self.labelTextHeight + self.imageDispHeight + self.labelTextHeight,\r\n width=self.imageDispWidth, height=self.imageDispHeight)\r\n\r\n\r\nif __name__ == '__main__':\r\n win = MainWin()\r\n # 进入消息循环\r\n win.mainloop()","sub_path":"shiyan/dazuoye/example_2.py","file_name":"example_2.py","file_ext":"py","file_size_in_byte":15664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633639285","text":"#Imports various systems that will be in use here\nimport random\nimport sys\n\n#Setting up variables\nrounds = 0\nround_count = 0\nplayer_score = 0\npc_score = 0\n\n#Script asks player how many rounds he wants to play\ndef roundsetup():\n global rounds\n rounds = input(\"How many rounds do you wish to play?\\n\")\n select()\n\n#This function asks the player for his move by taking numerical input\n#1 = rock, 2 = paper, 3 = scissors\ndef select():\n selection = input(\"What's your choice? 1 = rock, 2 = paper, 3 = scissors\\n\")\n if selection == \"1\" or \"2\" or \"3\":\n pc_select(selection)\n else:\n # If player types in invalid input the function asks him to choose once again\n select()\n#Get random value and compare it to the player's selection\ndef pc_select(select):\n pc_select = (random.randint(1,3))\n choose_winner(select, pc_select)\n return pc_select\n\n#Compare PC and player's selection and find out who won\n#Rock, paper, scissors are in ascending order, whorever has superior / greater number wins\ndef choose_winner(select, pc_select):\n if int(select) == int(pc_select):\n #If both PC and player made the same selection the round ends in a tie\n print(\"It's a tie!\")\n round_end(\"tie\")\n elif int(select) > int(pc_select):\n #Player wins\n print(\"Player wins!\")\n round_end(\"player\")\n elif int(pc_select) > int(select):\n #PC wins\n print(\"PC wins!\")\n round_end(\"pc\")\n else:\n #This one will never trigger generally\n print(\"Somebody won and somebody lost idk\")\n\n#Whoever won gets the point to their score\ndef round_end(winner):\n global rounds\n global round_count\n round_count += 1\n if winner == \"player\":\n #Increase player score\n global player_score\n player_score += 1\n elif winner == \"pc\":\n #Increase PC score\n global pc_score\n pc_score += 1\n else:\n #If it's a tie, nobody gains a point\n select()\n if int(round_count) < int(rounds):\n #If the game is not over yet, ask the player for his choice once again\n select()\n else:\n #Once game ends, print scores and ask player if he wants to play again\n print(\"\\n[Player score] = \" + str(player_score) + \"\\n[PC Score] = \" + str(pc_score))\n print(\"Thanks for playing!\")\n ask_rematch()\n\n#Ask player if he wants to play again\ndef ask_rematch():\n print(\"Would you like to play again? [Y/N]\")\n decision = input()\n if decision in (\"y\", \"Y\"):\n #Restart the game\n restart()\n elif decision in (\"n\", \"N\"):\n #If player doesn't want to play again, close the game\n end()\n else:\n ask_rematch()\n\n#Set variables to default and begin the game anew\ndef restart():\n global rounds\n rounds = 0\n global round_count\n round_count = 0\n global player_score\n player_score = 0\n global pc_score\n pc_score = 0\n roundsetup()\n\n#Close the game\ndef end():\n sys.exit(0)\n \n \n\n# THIS IS WHERE THE SCRIPT ACTUALLY TRIGGERS\n \nprint(\"=== Welcome to Rock, Paper, Scissors! ===\")\nroundsetup()\n","sub_path":"rock_paper_scissors_2021.py","file_name":"rock_paper_scissors_2021.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27998270","text":"import argparse\n\nimport os\nimport cv2\nimport json\nimport shutil\nimport numpy as np\nimport rasterio as rio\n\nimport wwtool\nfrom wwtool.datasets import Convert2COCO\n\nclass SHP2COCO(Convert2COCO):\n def __generate_coco_annotation__(self, annotpath, imgpath):\n \"\"\"\n docstring here\n :param self: \n :param annotpath: the path of each annotation\n :param return: dict() \n \"\"\"\n objects = self.__shp_parse__(annotpath, imgpath)\n \n coco_annotations = []\n\n for object_struct in objects:\n bbox = object_struct['bbox']\n segmentation = object_struct['segmentation']\n label = object_struct['label']\n\n width = bbox[2]\n height = bbox[3]\n area = height * width\n\n if area <= self.small_object_area and self.groundtruth:\n self.small_object_idx += 1\n continue\n\n coco_annotation = {}\n coco_annotation['bbox'] = bbox\n coco_annotation['segmentation'] = [segmentation]\n coco_annotation['category_id'] = label\n coco_annotation['area'] = np.float(area)\n\n coco_annotations.append(coco_annotation)\n \n return coco_annotations\n \n def __shp_parse__(self, label_file, image_file):\n \"\"\"\n (xmin, ymin, xmax, ymax)\n \"\"\"\n img_fn = os.path.splitext(os.path.basename(image_file))[0]\n\n if 'train' in imageset:\n geo_info_file = os.path.join(geopath, img_fn + '.png')\n geo_info = rio.open(geo_info_file)\n coord_flag = '4326'\n else:\n geo_info = rio.open(image_file)\n coord_flag = 'pixel'\n\n objects = []\n masks = shp_parser(label_file, geo_info, coord=coord_flag)\n total_object_num = len(masks)\n for mask in masks:\n object_struct = {}\n\n xmin, ymin, xmax, ymax = wwtool.pointobb2bbox(mask['segmentation'])\n bbox_w = xmax - xmin\n bbox_h = ymax - ymin\n\n object_struct['bbox'] = [xmin, ymin, bbox_w, bbox_h]\n object_struct['segmentation'] = mask['segmentation']\n object_struct['label'] = 1\n \n objects.append(object_struct)\n \n if total_object_num > self.max_object_num_per_image:\n self.max_object_num_per_image = total_object_num\n\n geo_info.close()\n return objects\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument(\n '--imagesets',\n type=str,\n nargs='+',\n choices=['trainval', 'test'])\n parser.add_argument(\n '--release_version', default='v1', type=str)\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # basic dataset information\n info = {\"year\" : 2019,\n \"version\" : \"1.0\",\n \"description\" : \"SHP-COCO\",\n \"contributor\" : \"Jinwang Wang\",\n \"date_created\" : \"2020\"\n }\n \n licenses = [{\"id\": 1,\n \"name\": \"Attribution-NonCommercial\",\n \"url\": \"http://creativecommons.org/licenses/by-nc-sa/2.0/\"\n }]\n\n # dataset's information\n image_format='.jpg'\n anno_format='.shp'\n\n shp_class = [{'supercategory': 'none', 'id': 1, 'name': 'footprint', }]\n\n core_dataset_name = 'buildchange'\n imagesets = ['train_shanghai']\n release_version = 'v0'\n keypoint = True\n\n shp_parser = wwtool.ShpParse()\n\n anno_name = [core_dataset_name, release_version]\n if keypoint:\n for idx in range(len(shp_class)):\n shp_class[idx][\"keypoints\"] = ['top', 'right', 'bottom', 'left']\n shp_class[idx][\"skeleton\"] = [[1,2], [2,3], [3,4], [4,1]]\n anno_name.append('keypoint')\n \n for imageset in imagesets:\n imgpath = './data/{}/{}/{}/images'.format(core_dataset_name, release_version, imageset)\n annopath = './data/{}/{}/{}/shp_4326'.format(core_dataset_name, release_version, imageset)\n geopath = './data/{}/{}/{}/geo_info'.format(core_dataset_name, release_version, imageset)\n save_path = './data/{}/{}/coco/annotations'.format(core_dataset_name, release_version)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if 'val' in imageset:\n sub_anno_fold = True\n else:\n sub_anno_fold = False\n\n shp = SHP2COCO(imgpath=imgpath,\n annopath=annopath,\n image_format=image_format,\n anno_format=anno_format,\n data_categories=shp_class,\n data_info=info,\n data_licenses=licenses,\n data_type=\"instances\",\n groundtruth=True,\n small_object_area=0,\n sub_anno_fold=sub_anno_fold)\n\n images, annotations = shp.get_image_annotation_pairs()\n\n json_data = {\"info\" : shp.info,\n \"images\" : images,\n \"licenses\" : shp.licenses,\n \"type\" : shp.type,\n \"annotations\" : annotations,\n \"categories\" : shp.categories}\n\n anno_name.insert(1, imageset)\n with open(os.path.join(save_path, \"_\".join(anno_name) + \".json\"), \"w\") as jsonfile:\n json.dump(json_data, jsonfile, sort_keys=True, indent=4)\n","sub_path":"tools/datasets/buildchange/shp2coco.py","file_name":"shp2coco.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307590831","text":"import argparse\nimport logging\nimport os\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication\n\nfrom planner import constants\nfrom planner.gui.mainwindow import MainWindow\n\nfile_name = os.path.basename(__file__).replace('.py', '')\n\n# configure logging\nlog = logging.getLogger(file_name)\nlog.setLevel(logging.ERROR)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.ERROR)\n\nformatter = logging.Formatter(\n u'[%(name)s:%(levelname)s]: %(message)s')\nch.setFormatter(formatter)\n\nlog.addHandler(ch)\n\n\ndef get_settings(options):\n # initialize settings dictionary\n settings = dict()\n\n # process debug\n settings['debug'] = True if options.debug else False\n if options.debug:\n log.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n log.debug('Args: %s', options)\n\n # return settings\n return settings\n\n\ndef get_options(arguments):\n parser = argparse.ArgumentParser(prog=__doc__,\n description='')\n # process command line args\n\n parser.add_argument('-d',\n '--debug',\n help='Display debug messages.',\n action=\"store_true\")\n\n return get_settings(parser.parse_args(arguments))\n\n\ndef run():\n # get command line arguments\n arguments = sys.argv[1:]\n\n # get settings from arguments\n settings = get_options(arguments)\n\n # create Qt application\n app = QApplication(sys.argv)\n\n # initialize app information\n app.setOrganizationName(constants.APP_ORG)\n app.setApplicationName(constants.APP_NAME)\n app.setApplicationVersion(constants.APP_VERSION)\n\n # create main window\n main_window = MainWindow()\n\n # start app\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"planner/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347691973","text":"import sys\nimport tarfile\nimport os\nimport platform\n\ntarball = os.path.abspath(sys.argv[1])\ndirname = os.path.abspath(sys.argv[2])\nif platform.system() == 'OS/390':\n import tempfile \n tempfile = tempfile.NamedTemporaryFile()\n cmdtype = (\"cd %s\"\n \" && (iconv -f IBM-1047 -t ISO8859-1 %s | gunzip -c > %s)\"\n \" && pax -ofrom=ISO8859-1,to=IBM-1047 -rf %s\")\n cmd = cmdtype % (dirname, tarball, tempfile.name, tempfile.name)\n os.system(cmd)\nelse:\n tfile = tarfile.open(tarball,'r:gz');\n tfile.extractall(dirname)\nsys.exit(0)\n\n","sub_path":"deps/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487597342","text":"import argparse\nimport os\nimport sys\nimport re\n\n\nparser = argparse.ArgumentParser(description='Rename files.')\nparser.add_argument('--prefix',\n type=str,\n help='Prefix which will be used to create file name. Default is None.',\n default='')\nparser.add_argument('--suffix',\n type=str,\n help='Suffix which will be used to create file name. Default is None.',\n default='')\nparser.add_argument('--digits',\n type=int,\n help='Number of digits which will be used to distinguish files. Default is 4.',\n default=4)\nparser.add_argument('--path',\n type=str,\n help='Path to work directory.',\n required=True)\nparser.add_argument('--extension',\n type=str,\n help='All files with provided file extension will be renamed. Default is *.',\n default='*')\n\narguments = parser.parse_args()\n\n\ndef msg(_string, _msg_type='INFO'):\n \"\"\"\n Prints message with specified prefix.\n\n :param _string: Message to print\n :param _msg_type: Message qualifier\n :return: None\n \"\"\"\n print('[{}] {}'.format(_msg_type, _string))\n\n\ndef check_content(_path, _digi):\n \"\"\"\n Gets directory content and validates it.\n\n :param _path: Directory path\n :param _digi: Width of unique part of the file name\n :return: Listed files\n \"\"\"\n _content = (os.listdir(_path))\n if len(_content) == 0:\n msg('Directory is empty. Nothing to do.')\n sys.exit(0)\n if len(_content) > (int('9' * _digi)):\n msg('Directory contains more elements of specified type then '\n 'it can be renamed with provided argument --digits', 'ERROR')\n sys.exit(1)\n return _content\n\n# Path: /home/python/Pictures/wallpapers\n\n\ndef main(args):\n \"\"\"\n Gets directory content and renames it accordingly.\n\n :param args: Arguments which have been provided to script\n :return: None\n \"\"\"\n msg('path: {}'.format(args.path))\n msg('prefix: {}'.format(args.prefix))\n msg('digits: {}'.format(args.digits))\n msg('suffix: {}'.format(args.suffix))\n msg('extension: {}'.format(args.extension.lower()))\n\n def rename(_file, _counter):\n \"\"\"\n Renames file.\n\n :param _file: File to rename\n :param _counter: Unique file number\n :return: None\n \"\"\"\n _ext = _file.lower().split('.')[-1]\n os.rename(os.path.join(args.path, _file),\n os.path.join(args.path, '{}{:0{width}}{}.{}'.format(args.prefix, _counter,\n args.suffix, _ext, width=args.digits)))\n\n def match_file(_file):\n \"\"\"\n Checks if file should be renamed.\n\n :param _file: File to check\n :return: Corresponding match object\n \"\"\"\n if args.extension == '*':\n _pattern = r'\\..*$'\n else:\n _pattern = r'\\.{}$'.format(args.extension.lower())\n return re.search(_pattern, _file)\n\n def iterate_through_dir():\n \"\"\"\n Iterates through directory content.\n\n :return: None\n \"\"\"\n _directory_content = check_content(args.path, args.digits)\n _directory_content = sorted(_directory_content)\n _counter = 0\n for _file in _directory_content:\n if os.path.isfile(os.path.join(args.path, _file)) and match_file(_file):\n rename(_file, _counter)\n _counter += 1\n\n if not os.path.isdir(args.path):\n msg('Directory dose not exist: {}'.format(args.path), 'ERROR')\n sys.exit(1)\n\n if args.extension != '*':\n iterate_through_dir()\n else:\n iterate_through_dir()\n\n\nif __name__ == \"__main__\":\n main(arguments)\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144156835","text":"import sys\nimport os\nimport json\nimport calendar\nimport time\n\nimport datetime\nimport dateutil.relativedelta\n\nfrom github import Github\nfrom github import RateLimitExceededException\nimport pandas as pd\nimport argparse\nimport pprint\n\nfrom github_connection.github import github_connection\nfrom rate_limiter.rate_limiter import rate_limiter\nfrom output.ouput import outputer\nfrom dataframe.csv import dataframe_to_csv\nfrom dataframe.markdown import dataframe_to_markdown\n\nfrom mergers.convertors import for_output\nfrom mergers.pull_requests import pull_requests\n\npp = pprint.PrettyPrinter(indent=4)\nspacer = ' '\n\n\n\n# main class\nclass merge_report(github_connection):\n dataframe_handler = None\n start_date = None\n end_date = None\n default_date_structure = {}\n\n def __init__(self, organization_slug, team_slug, dataframe_handler, start_date, end_date):\n super().__init__( organization_slug=organization_slug, team_slug=team_slug )\n self.dataframe_handler = dataframe_handler\n self.start_date = start_date\n self.end_date = end_date\n\n return\n\n # main function body\n def generate(self):\n # conversion and save\n out = outputer(for_output, self.dataframe_handler.save)\n # get the team repos\n print('---------------------------')\n print('Getting team {} repos'.format(self.team_slug))\n print('---------------------------')\n get_and_set_repos = self.get_and_set_team_repos\n api_runner = rate_limiter(self.g)\n api_runner.run(get_and_set_repos)\n # totals & counters\n total = self.team_repos.totalCount\n x = 1\n # loop over each repo and find details\n for repo in self.team_repos:\n print('[{}/{}] {}'.format(x, total, repo.name))\n # create the pr class\n pr = pull_requests(repo, self.start_date, self.end_date)\n # run via the limiter, with grouping running after\n api_runner.run(pr.get, out.output)\n api_runner.run(pr.group, out.output)\n # add the results\n out.append(repo.name, pr.structure)\n print('---------------------------')\n x += 1\n #\n out.output()\n return\n\n\ndef main():\n # date handling\n now = datetime.datetime.utcnow()\n start = now - dateutil.relativedelta.relativedelta(months=6)\n start = start.replace(day=1, hour=0, minute=0, second=0)\n\n parser = argparse.ArgumentParser(description='Generate a report of merges to the default branch - grouped by month - by repo.')\n # github org & team\n parser.add_argument(\"--organization\",\n default=\"ministryofjustice\",\n help=\"Set the orginisation to query against\" )\n\n parser.add_argument(\"--team\",\n default=\"opg\",\n help=\"Set the team to fetch repositories from\" )\n # determine if we output as csv or markdown from the dataframe\n parser.add_argument(\"--type\",\n default=\"csv\",\n choices=[\"csv\", \"md\"],\n help=\"Output as either a csv or a markdown file (default to csv)\"\n )\n # pick the location to save the report\n parser.add_argument(\"--filename\",\n default=\"merge_counts\",\n help=\"Name of the file to save results to (excluding extension)\" )\n\n # start & end date of the report\n parser.add_argument(\"--start\",\n type=datetime.date.fromisoformat,\n default=start,\n help=\"Set the start date for this report (default: {})\".format(start.strftime(\"%Y-%m-%d\")) )\n parser.add_argument(\"--end\",\n type=datetime.date.fromisoformat,\n default=now,\n help=\"Set the end date for this report (default: {})\".format(now.strftime(\"%Y-%m-%d\")) )\n\n\n\n args = parser.parse_args()\n filename = args.filename\n # convert to a datetime for consistency\n start_date = datetime.datetime(args.start.year, args.start.month, args.start.day)\n end_date = args.end\n # create the dataframe handler\n if args.type == \"csv\":\n df = dataframe_to_csv(filename)\n else:\n df = dataframe_to_markdown(filename)\n\n report = merge_report(args.organization, args.team, df, start_date, end_date)\n report.generate()\n return\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"merge_report.py","file_name":"merge_report.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95859292","text":"# TO-DO: Complete the selection_sort() function below\ndef selection_sort(arr):\n # loop through n-1 elements\n for i in range(len(arr)):\n smallest_index = i\n\n for j in range(i + 1, len(arr)):\n if arr[smallest_index] > arr[j]:\n smallest_index = j\n # TO-DO: swap\n # Your code here\n arr[i], arr[smallest_index] = arr[smallest_index], arr[i]\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort(arr):\n # Your code here\n swap = False\n for index in range(0, len(arr) - 1):\n if index == len(arr) - 1:\n print(\"End of array, ending execution\")\n break;\n elif arr[index] > arr[index + 1]:\n print(\"Greater value found\")\n print(f\"{arr[index]} is greater than {arr[index + 1]}\")\n print(\"Swapping...\")\n swap = True\n arr[index], arr[index + 1] = arr[index + 1], arr[index]\n print(\"Finished swapping....\")\n if swap:\n print(\"One swapped ocurred, checking again\")\n bubble_sort(arr)\n return arr\n\n# arr = [5, 1, 4, 0]\n# print(bubble_sort(arr))\n\nprint(selection_sort([4, 1, 3, 10]))\n\n\n'''\nSTRETCH: implement the Count Sort function below\n\nCounting sort is a sorting algorithm that works on a set of data where\nwe specifically know the maximum value that can exist in that set of\ndata. The idea behind this algorithm then is that we can create \"buckets\"\nfrom 0 up to the max value. This is most easily done by initializing an\narray of 0s whose length is the max value + 1 (why do we need this \"+ 1\"?).\n\nEach buckets[i] then is responsible for keeping track of how many times\nwe've seen `i` in the input set of data as we iterate through it.\nOnce we know exactly how many times each piece of data in the input set\nshowed up, we can construct a sorted set of the input data from the\nbuckets.\n\nWhat is the time and space complexity of the counting sort algorithm?\n'''\ndef counting_sort(arr, maximum=None):\n # Your code here\n\n\n return arr\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403321937","text":"# Trackrr.py (sync.py)\n# Trackrr's API Client\n\n# Imports\n\nimport typing\n\nimport httpx\nimport json\n\nfrom trackrr.errors import (\n ParsingError,\n InvalidAPIKey,\n InvalidParams,\n APIError,\n APIReturnedError\n)\n\nfrom trackrr.__version__ import __version__ as trackrr_version\n\nclass Trackrr:\n \"\"\" Represents a client used for requests \n \n Args:\n api_key (str): Required key to interact with the Trackrr API\n version (str, optional): The API version. Default value is \"v1\"\n\n Raises:\n InvalidAPIKey (Exception): Raised when an API key is invalid/missing \n \"\"\"\n def __init__(self, api_key: str=None, version: str=\"v1\"):\n if api_key is None:\n raise InvalidAPIKey(\"An API was not given/is required\")\n\n self.session = None\n self.headers = { \"X-Trackrr-APIKey\": api_key, \"X-Trackrr-Identifier\": f\"SyncPython - {trackrr_version}\" }\n self.base_url = f\"https://api.trackrr.cc/{version}\"\n\n def create_session(self):\n \"\"\" Creates an aiohttp session \"\"\"\n # Creating a session under an async function is recommended\n self.session = httpx.Client()\n\n def send_request(self, url: str, params: dict) -> dict:\n \"\"\" Sends a request to the Trackrr API \n This function will handle errors for the API\n\n You do not need to call this function directly\n\n Args:\n url (str): The request URL\n params (dict): Parameters given to the request\n\n Raises:\n ParsingError (Exception): Raised when a request can't be parsed\n\n Returns:\n dict: The contents of the request\n \"\"\"\n\n url = self.base_url + url\n\n if self.session is None:\n # Create a session if one doesn't exist\n self.create_session()\n\n try:\n send_request = self.session.get(url, params=params, headers=self.headers)\n if send_request.status_code == 502:\n raise APIError(\"Request returned a 502. This may mean the service is unavailable.\")\n \n if send_request.status_code == 200:\n parse_request_to_json = send_request.json()\n return parse_request_to_json\n except json.JSONDecodeError:\n raise ParsingError(f\"Issue with parsing the request from the API.\")\n \n if send_request.status_code == 500:\n if send_request.headers.get(\"Content-Type\") == \"application/json\":\n # This means that the error was probably generated by the API not the webserver or DNS.\n raise APIReturnedError(\"Server returned a 500.\", request=send_request)\n else:\n raise APIError(\"Server returned a 500 with improper JSON output.\")\n \n if send_request.status_code == 422:\n try:\n parse_request_to_json = send_request.json()\n \n _return_message = []\n\n for validation_error in parse_request_to_json['detail']:\n selected_line_of_code_data = validation_error['loc']\n selected_loc_type = selected_line_of_code_data[0]\n selected_loc_name = selected_line_of_code_data[1]\n\n selected_line_of_code_message = validation_error['msg']\n selected_line_of_code_type = validation_error['type']\n\n _return_message.append(\n f\"{selected_loc_type} {selected_loc_name}: {selected_line_of_code_message} ({selected_line_of_code_type})\"\n )\n\n if _return_message == []:\n raise InvalidParams(\"Service returned a validation error but there was no validation data found. This is probably a problem with the service.\")\n\n if len(_return_message) > 1:\n raise InvalidParams(\"API Returned Error: \" + \", \".join(_return_message))\n \n raise InvalidParams(\"API Returned Error: \" + _return_message[0])\n\n\n except json.JSONDecodeError:\n raise InvalidParams(\"Service returned a validation error and another exception occurred when parsing the message.\")\n\n\n\n\n def search_isrc_code(self, isrc_code: str, filter_services: typing.Union[str, typing.List]=None, flags: str = None):\n current_params = {}\n\n if flags:\n current_params['flag'] = flags\n\n if filter_services:\n if isinstance(filter_services, str):\n raise ValueError(\"You cannot have only one service in filter services. If using multiple services, Please supply: \", \"list\")\n else:\n filtered_services = \",\".join(filter_services)\n current_params['filteredServices'] = filtered_services\n\n current_params['isrcCode2'] = isrc_code\n\n response = self.send_request(\"/search/song/isrc\", params=current_params)\n \n song_results = [Song(response['services'][x]) for x in response['meta']['servicesReturned']]\n services_returned = response['meta']['servicesReturned']\n return TrackrrResult(\n list_of_songs=song_results,\n services_returned=services_returned\n )\n\n\n def search_song_by_url(self, url: str, filter_services: typing.Union[str, typing.List]=None, flags: str = None):\n current_params = {}\n\n if flags:\n current_params['flag'] = flags\n\n if filter_services:\n if isinstance(filter_services, str):\n raise ValueError(\"You cannot have only one service in filter services. If using multiple services, Please supply: \", \"list\")\n else:\n filtered_services = \",\".join(filter_services)\n current_params['filteredServices'] = filtered_services\n\n current_params['url'] = url\n\n response = self.send_request(\"/search/song/link\", params=current_params)\n \n song_results = [Song(response['services'][x]) for x in response['meta']['servicesReturned']]\n services_returned = response['meta']['servicesReturned']\n return TrackrrResult(\n list_of_songs=song_results,\n services_returned=services_returned\n )\n\n\n def search_song(self, search_query: str, filter_services: typing.Union[str, typing.List]=None, flags: str = None):\n current_params = {}\n\n if flags:\n current_params['flag'] = flags\n\n if filter_services:\n if isinstance(filter_services, str):\n raise ValueError(\"You cannot have only one service in filter services. If using multiple services, Please supply: \", \"list\")\n else:\n filtered_services = \",\".join(filter_services)\n current_params['filteredServices'] = filtered_services\n\n current_params['query'] = search_query\n\n response = self.send_request(\"/search/song\", params=current_params)\n \n song_results = [Song(response['services'][x]) for x in response['meta']['servicesReturned']]\n services_returned = response['meta']['servicesReturned']\n return TrackrrResult(\n list_of_songs=song_results,\n services_returned=services_returned\n )\n\n","sub_path":"trackrr/clients/sync_client.py","file_name":"sync_client.py","file_ext":"py","file_size_in_byte":7220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"480362184","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n#imports\nimport re, sys\n\n\n#function to test for occurrence of Feature 2\ndef feature_1(tagstr) :\n\n flag = False\n tagset = tagstr.split(\"$^$\")\n\n if len(tagset) > 0 :\n for i in tagset :\n if len(i.split()) > 2 :\n flag = True\n break\n return flag\n \n\n#function to extract posts from file and search them\ndef search_file(data_list) :\n\n #testing for occurrences of feature 1\n count_f1 = 0\n for data in data_list :\n if(feature_1(data)) :\n count_f1 += 1\n\n #returning total number of posts\n #and number of posts matching \n print(\"Number of posts: \"+ str(len(data_list)-1))\n print(\"Occurrences: \"+ str(count_f1))\n print(\"Percentage: \", str(float(count_f1*100.0/float(len(data_list)-1))))\n return len(data_list)-1, count_f1\n\n\n#function to extract posts from file\ndef extract_posts(scrape_file):\n L = []\n f = open(scrape_file)\n data = f.read().split(\"@#@\")\n f.close()\n return data\n\n \n#driver\nif __name__ == '__main__' :\n\n tot_posts = 0\n count_occ = 0\n\n for i in range(1, 11) :\n f = \"tags\" + str(i) + \".txt\"\n print(\"\\nFor data in \"+f+\" : \")\n data = extract_posts(f)\n posts, occ = search_file(data)\n tot_posts += int(posts)\n count_occ += int(occ)\n\n print(\"\\nTotal compiled data shows: \")\n print(\"Total number of posts: \"+str(tot_posts))\n print(\"Total occurrences: \"+str(count_occ))\n print(\"Total percentage: \"+str(float(count_occ*100.0/float(tot_posts))))\n\n","sub_path":"tumbtest_for_feature_2.py","file_name":"tumbtest_for_feature_2.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150018748","text":"def create_board(n):\n board = []\n for row in range(n):\n board.append([])\n for column in range(n):\n board[row].append('0')\n return board\n\n\ndef print_board(board):\n for row in board:\n # print space in between numbers\n print(\" \".join(row))\n\n\n# recursively call put_queen and change queen position if queen_present\n# returns false\ndef put_queen(board, queen, size):\n if queen == size:\n return True\n for place in range(size):\n if queen_present(board, place, queen):\n board[place][queen] = '1'\n # shift queen position\n if put_queen(board, queen+1, size):\n return True\n board[place][queen] = '0'\n return False\n\n\n# based on backtracking algorithm, check if\n# [row][column] is valid spot for queen\ndef queen_present(board, row, column):\n # check if queen present\n for i in range(column):\n if board[row][i] == '1':\n return False\n # check diagonal\n for i, j in zip(range(row, -1, -1), range(column, -1, -1)):\n if board[i][j] == '1':\n return False\n # check diagonal\n for i, j in zip(range(row, len(board), 1), range(column, -1, -1)):\n if board[i][j] == '1':\n return False\n # current position is a valid spot for queen\n return True\n\n\ndef test_board(n):\n board = create_board(n)\n if put_queen(board, 0, n):\n print(\"N = \" + str(n))\n print_board(board)\n print(\"\\n\")\n\n\nfor i in range(3, 11):\n test_board(i)","sub_path":"CS299/Project3/NQueens.py","file_name":"NQueens.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630970515","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/1 22:59\n# @Blog : http://www.cnblogs.com/uncleyong\n# @Gitee : https://gitee.com/uncleyong\n# @QQ交流群 : 652122175\n# @公众号 : 全栈测试笔记\n\n\nfrom conf.settings import PATTERN\nfrom lib.global_variables import gv\n\nimport re\ndef parameter_substitution(strParam):\n '''依赖参数值替换为实际值'''\n keys = re.findall(PATTERN, strParam)\n for key in keys:\n value = gv.getVar(key)\n strParam = strParam.replace('${' + key + '}', str(value)) # replace返回替换后的新字符串\n print('替换参数化值后的字符串是:', strParam)\n return strParam\nif __name__ == '__main__':\n strParam = parameter_substitution(\"http://127.0.0.1:9999/login2\")\n print(strParam)\n","sub_path":"unittest_demo3/lib/parameter_substitution.py","file_name":"parameter_substitution.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246534170","text":"from __future__ import division\nimport nltk, re, pprint , time\nfrom nltk.stem.lancaster import LancasterStemmer\nf1 = open('sample1.txt')\nf2 = open('sample2.txt')\nraw1 = f1.read()\nraw2 = f2.read()\n\nlist_raw1 = raw1.split('.')\nlist_raw2 = raw2.split('.')\nprint(list_raw1)\nlist_tok1 = []\nlist_tok2 = []\n\nfor i in range(len(list_raw1)):\n list_tok1.append(nltk.word_tokenize(list_raw1[i].lower()))\n\nfor i in range(len(list_raw2)):\n list_tok2.append(nltk.word_tokenize(list_raw2[i].lower()))\n\nlist_pos1 = []\nlist_pos2 = []\n\nfor i in range(len(list_raw1)):\n list_pos1.append(nltk.pos_tag(list_tok1[i]))\n\nfor i in range(len(list_raw2)):\n list_pos2.append(nltk.pos_tag(list_tok2[i]))\n\nst = LancasterStemmer()\n\n#print (pos)\n#grammar = \"NP: {
?*}\"\n#cp = nltk.RegexpParser(grammar)\n#result = cp.parse(pos)\n#print (result)\n#result.draw()\nlist1 = []\nlist2 = []\nfor j in range(len(list_pos1)):\n list1 = []\n for i in range(len(list_pos1[j])):\n if list_pos1[j][i][1] == 'NN':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'JJ':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'JJR':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'JJS':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'NNS' :\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'NNP':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'NNPS' :\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'POS':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'RB':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'RBR':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'RBS':\n list1.append(list_pos1[j][i][0])\n if list_pos1[j][i][1] == 'VB':\n list1.append(st.stem(list_pos1[j][i][0]))\n if list_pos1[j][i][1] == 'VBD':\n list1.append(st.stem(list_pos1[j][i][0]))\n if list_pos1[j][i][1] == 'VBG':\n list1.append(st.stem(list_pos1[j][i][0]))\n if list_pos1[j][i][1] == 'VBN':\n list1.append(st.stem(list_pos1[j][i][0]))\n # if list_pos1[j][i][1] == 'VBP':\n # list1.append(st.stem(list_pos1[j][i][0]))\n if list_pos1[j][i][1] == 'VBZ':\n list1.append(st.stem(list_pos1[j][i][0]))\n list2.append(list1)\n\nlist3 = []\n\nfor j in range(len(list_pos2)):\n list1 = []\n for i in range(len(list_pos2[j])):\n if list_pos2[j][i][1] == 'NN':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'JJ':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'JJR':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'JJS':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'NNS' :\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'NNP':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'NNPS' :\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'POS':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'RB':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'RBR':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'RBS':\n list1.append(list_pos2[j][i][0])\n if list_pos2[j][i][1] == 'VB':\n list1.append(st.stem(list_pos2[j][i][0]))\n if list_pos2[j][i][1] == 'VBD':\n list1.append(st.stem(list_pos2[j][i][0]))\n if list_pos2[j][i][1] == 'VBG':\n list1.append(st.stem(list_pos2[j][i][0]))\n if list_pos2[j][i][1] == 'VBN':\n list1.append(st.stem(list_pos2[j][i][0]))\n #if list_pos2[j][i][1] == 'VBP':\n # list1.append(st.stem(list_pos2[j][i][0]))\n if list_pos2[j][i][1] == 'VBZ':\n list1.append(st.stem(list_pos2[j][i][0]))\n list3.append(list1)\n\n\nprint (\"The words from document 1\\n\" ,list2)\nprint (\"The words from document 2\\n\" ,list3)\n\n\nlist4 = []\ncount = 0\ncount1 = 0\ncount2 = 0\nfor i in range(len(list2)):\n list4 = set(list2[i])\n count1 += len(list4)\n list5 = []\n list6 = []\n length = 0\n mlength = 0\n for j in range(len(list3)):\n list5 = set(list3[j])\n list6 = list4 & list5\n length = len(list6)\n if length > mlength:\n mlength = length\n count+=mlength\nprint(count1)\nprint(count)\na = count / count1\n#b = count /count2\nb = a \nx = ((a+b)/2)*100\n\nprint (\"Percentage of plagiarsm :\\n\\t\\t\",x)\n","sub_path":"Plagiarism Checker/plagiarism.py","file_name":"plagiarism.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"101184301","text":"from illuminate import InteropDataset\nimport glob\nimport os\nimport gzip\nimport sys\n\n\n#Modif_20200306\n\n\nclass FastqReader:\n \"\"\"\n Pour lecture de fichier fastq\n \"\"\"\n def __init__(self, fname):\n self.__file = None\n self.__gz = False\n self.__eof = False\n self.filename = fname\n if self.filename.endswith(\".gz\"):\n self.__gz = True\n self.__file = gzip.open(self.filename, \"r\")\n else:\n self.__gz = False\n self.__file = open(self.filename, \"r\")\n if self.__file == None:\n print(\"Failed to open file \" + self.filename)\n sys.exit(1)\n\n def __del__(self):\n if self.__file != None:\n self.__file.close()\n\n def nextRead(self):\n if self.__eof == True or self.__file == None:\n return None\n\n lines = []\n # read 4 (lines, name, sequence, strand, quality)\n for i in xrange(0, 4):\n line = self.__file.readline().rstrip()\n if len(line) == 0:\n self.__eof = True\n return None\n lines.append(line)\n return lines\n\n def isEOF(self):\n return False\n\nclass MiSeqStatComputer():\n \"\"\"\n Calculateur de statistiques MiSeq\n \"\"\"\n\n def __init__(self,run_path,lspq_miseq_dir_path):\n pass\n self.run_path = run_path\n self.lspq_miseq_dir_path = lspq_miseq_dir_path\n self.cluster_passing_filter = 0\n self.cluster_density_ = 0\n self.perc_over_q30 = 0\n\n #print('RUN PATH IS ', self.run_path)\n\n self.Dataset = InteropDataset(self.run_path)\n self.experiment_name = self.Dataset.Metadata().experiment_name\n\n #print('experiment_name IS ', self.experiment_name)\n #print('lspq_miseq_dir_path IS ', self.lspq_miseq_dir_path)\n\n self.r1_samples_qual = {}\n self.r2_samples_qual = {}\n\n self.total_nb_reads = 0\n\n\n def ComputeRunStat(self):\n \"\"\"\n Statistiques de la run\n :return:\n \"\"\"\n qualitymetrics = self.Dataset.QualityMetrics()\n tilemetrics = self.Dataset.TileMetrics()\n #print qualitymetrics.read_qscore_results['q30']\n #print qualitymetrics.read_qscore_results.__str__()\n #print qualitymetrics.read_config\n\n self.perc_over_q30 = round((float(qualitymetrics.read_qscore_results['q30'][0]) + float(qualitymetrics.read_qscore_results['q30'][3]))/2,1)\n\n cluster_density = tilemetrics.mean_cluster_density\n self.cluster_density_ = round(float(cluster_density)/1000,1)\n\n self.cluster_passing_filter = round(float(tilemetrics.mean_cluster_density_pf)/float(cluster_density),3) * 100\n\n def qual_stat(self,qstr):\n \"\"\"\n Nombre de base ayant une valeur de qualite superieur a Q30\n :param qstr:\n :return:\n \"\"\"\n q30 = 0\n for q in qstr:\n qual = ord(q) - 33\n if qual >= 30:\n q30 += 1\n return q30\n\n\n def ComputeSamplesStat(self):\n \"\"\"\n Calcul de statistique par sample\n :return:\n \"\"\"\n\n for fastq in glob.glob(os.path.join(self.run_path,'Data','Intensities','BaseCalls',\"*.fastq.gz\")):\n sample_name = os.path.basename(fastq).split('_')[0]\n sens = os.path.basename(fastq).split('_')[3]\n\n fastq_reader = FastqReader(fastq)\n total_count = 0\n q30_count = 0\n nb_read = 0\n\n while True:\n read = fastq_reader.nextRead()\n if read == None:\n break\n nb_read += 1\n total_count += len(read[3])\n q30 = self.qual_stat(read[3])\n q30_count += q30\n\n del(fastq_reader)\n\n self.total_nb_reads += nb_read\n\n if sens == \"R1\":\n self.r1_samples_qual[sample_name] = {}\n self.r1_samples_qual[sample_name]['nb_read'] = str(nb_read)\n self.r1_samples_qual[sample_name]['q30_perc'] = (round(100 * float(q30_count) / float(total_count),1))\n else:\n self.r2_samples_qual[sample_name] = {}\n self.r2_samples_qual[sample_name]['nb_read'] = str(nb_read)\n self.r2_samples_qual[sample_name]['q30_perc'] = (round(100 * float(q30_count) / float(total_count), 1))\n\n def WriteStat(self):\n \"\"\"\n Enregistrer les statistiques dans un fichier\n :return:\n \"\"\"\n\n #print('IN WRITE STAT')\n stat_file = os.path.join(self.lspq_miseq_dir_path,'2_MiSeqRunTrace',self.experiment_name.split('_')[2],'FinalMiSeqStat_' + self.experiment_name + \".txt\")\n\n with open(stat_file,'w') as stat_file_handle:\n stat_file_handle.write(\"ID\\tNb_Reads\\tCluster_Density_K_mm2\\tCluster_Passing_Filter\tOver_Q30\\tR1_R2_Mean_Q30\\n\")\n\n for sample in self.r1_samples_qual.keys():\n mean_q30 = str(round((float(self.r1_samples_qual[sample]['q30_perc']) + float(self.r2_samples_qual[sample]['q30_perc'])) / 2,2))\n stat_file_handle.write(sample + '_R1\\t' + str(self.r1_samples_qual[sample]['nb_read']) + '\\t' + 'NA\\t' + 'NA\\t' + str(self.r1_samples_qual[sample]['q30_perc']) + '\\t' + mean_q30 + '\\n')\n stat_file_handle.write(sample + '_R2\\t' + str(self.r2_samples_qual[sample]['nb_read']) + '\\t' + 'NA\\t' + 'NA\\t' + str(self.r2_samples_qual[sample]['q30_perc']) + '\\t ' + '\\n')\n\n stat_file_handle.write('RUN\\t' + str(self.total_nb_reads) + '\\t' + str(self.cluster_density_) + '\\t' + str(self.cluster_passing_filter) + '\\t' + str(self.perc_over_q30) + '\\t ' + '\\n')\n\n def GetPercOverQ30(self):\n return self.perc_over_q30\n\n def GetClusterDensity(self):\n return self.cluster_density_\n\n","sub_path":"MiSeqRunTransfer/Stat.py","file_name":"Stat.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218629605","text":"from glob import glob\nfrom setuptools import find_packages, setup\n\ntypes = ('*.json', '*.bin', '*txt')\ndata_files = []\nfor t in types:\n data_files.extend(glob('spert/data/**/' + t, recursive=True))\n\nsetup(\n name='spert',\n packages=find_packages(),\n data_files=[('config', ['spert/configs/eval.cfg', 'spert/configs/train.cfg']),\n ('data', data_files)],\n package_data={'': ['*.json'], '': ['*.bin', '*.txt']},\n include_package_data=True,\n version='2.0.0',\n description='spert',\n author='Me',\n license='MIT',\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626423337","text":"\"\"\"\nThis is script for one-hot coding method and deleting low-frequency feature\nAuthor: Zhihao LI\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport math\nfrom IPython.core.debugger import Tracer\nfrom sklearn.preprocessing import OneHotEncoder\n\n#Load train_csv\ntrain_csv = pd.read_csv(\"/home/li/kaggle/Benz/dataset/train.csv\")\ntest_csv = pd.read_csv(\"/home/li/kaggle/Benz/dataset/test.csv\")\ntest_csv.insert(1, \"y\", 1)\n\ndata_csv = pd.concat([train_csv, test_csv], ignore_index=True)\n\n#Tracer()()\n\n#Define a generic function using Pandas replace function\ndef coding(col, codeDict):\n colCoded = pd.Series(col, copy=True)\n for key, value in codeDict.items():\n colCoded.replace(key, value, inplace=True)\n return colCoded\n\n#Define a function for listing count of all items in X0-X8\nnamespace = globals()\ndef item_dict(df):\n for i in range(9):\n if i != 7:\n namespace[\"col_X%d_count\" %i] = {}\n namespace[\"col_X%d_dict\" %i] = {}\n #Tracer()()\n col = data_csv[\"X%d\" %i]\n col_drop = col.drop_duplicates()\n for value in col_drop.values:\n namespace[\"col_X%d_count\" %i][value] = 0\n for value in col.values:\n #Tracer()()\n for key in namespace[\"col_X%d_count\" %i]:\n if value == key:\n namespace[\"col_X%d_count\" %i][key] += 1\n namespace[\"col_X%d_count_sort\" %i] = sorted(\\\n namespace[\"col_X%d_count\" %i].items(), key=lambda t:t[1],\\\n reverse=True)\n idx = 0\n for item in namespace[\"col_X%d_count_sort\" %i]:\n namespace[\"col_X%d_dict\" %i][item[0]] = idx\n idx += 1\n\n #Check whether count works well\n #count = 0\n #for key,value in namespace[\"col_X%d_count\" %i].items():\n # count += value\n #Tracer()()\n\n#list dict for count of different values in X0-X8 columns\nitem_dict(data_csv)\n\n#Tracer()()\n#print(col_X0_dict)\n\n#Tracer()()\ntotal = 0\nfor i in range(9):\n if i != 7:\n print(\"col_X%d: %d different values\" %(i,len(namespace[\"col_X%d_count\" %i])))\n total += len(namespace[\"col_X%d_count\" %i])\nprint(\"if one-hot encoder, %i features add\" %total)\n\n#Tracer()()\nfor i in range(9):\n if i != 7:\n data_csv[\"X%d\" %i] = coding(data_csv[\"X%d\" %i], namespace[\"col_X%d_dict\" %i])\n\n#print(data_csv[\"X1\"])\n#print(col_X4_count_sort)\n\n#Delete columns which have low entropy\ncol_del = []\nfor i in range(386):\n if (i != 7) and (i !=9) and (i != 25) and (i != 72) and (i != 121) \\\n and (i != 149) and (i != 188) and (i != 193) and (i != 303) \\\n and (i != 381):\n #print(data_csv[\"X%d\" %i].value_counts())\n\n if i >= 10:\n if len(data_csv[\"X%d\" %i].value_counts()) == 2:\n count_0 = data_csv[\"X%d\" %i].value_counts()[0]\n count_1 = data_csv[\"X%d\" %i].value_counts()[1]\n #Tracer()()\n diff = abs(count_0 - count_1)\n # 50 could be changed to other value\n if diff >= (8148 - 200):\n col_del.append(\"X%d\" %i)\n data_csv.drop((\"X%d\" %i), inplace=True, axis=1)\n else:\n col_del.append(\"X%d\" %i)\n data_csv.drop([\"X%d\" %i], inplace=True, axis=1)\n#Delete X3 and X8 Columns\ncol_del.append(\"X3\")\ncol_del.append(\"X8\")\n#Tracer()()\ndata_csv.drop([\"X3\",\"X8\"], inplace=True, axis=1)\nprint(col_del)\n\n#Tracer()()\n#print(data_csv.shape)\n\n#OneHotEncoder for X0-X8\nencoder_list = [\"X0\", \"X1\", \"X2\", \"X4\", \"X5\", \"X6\"]\ncate_features = [0, 1, 2, 4, 5, 6]\nfeature_counts = []\nfor i in cate_features:\n feature_counts.append(len(namespace[\"col_X%d_dict\" %i]))\n\n\nprint(feature_counts)\nenc = OneHotEncoder(categorical_features=np.array([0,1,2,3,4,5]), n_values=feature_counts)\ndata_matrix = data_csv.as_matrix(columns=None)\n#print(data_matrix[:,0])\nfeature_data = data_matrix[:, 2:]\n#Tracer()()\ntarget_data = data_matrix[:,1]\nenc.fit(feature_data)\nfeature_data = enc.transform(feature_data).toarray()\n#Tracer()()\n#print(feature_data)\n\nall_data = np.c_[target_data, feature_data]\n\n#Output as a csv file, first row is the target data\n#np.savetxt(\"/home/li/kaggle/Benz/dataset/one_hot_coding_train.csv\", train_data)\ndf_all = pd.DataFrame(all_data)\ndf_all.to_csv(\"/home/li/kaggle/Benz/dataset/one_hot_coding_all.csv\")\n\ndf_train = pd.DataFrame(all_data[:4209, :])\ndf_test = pd.DataFrame(all_data[4209: ,1:])\ndf_train.to_csv(\"/home/li/kaggle/Benz/dataset/one_hot_encoding_train2.csv\")\ndf_test.to_csv(\"/home/li/kaggle/Benz/dataset/one_hot_encoding_test2.csv\")\n\nTracer()()\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360131901","text":"\r\ndef inttopath(i: int) -> str:\r\n if i == 1:\r\n return \"pyramides\\\\p1.txt\"\r\n if i == 2:\r\n return \"pyramides\\\\p2.txt\"\r\n if i == 3:\r\n return \"pyramides\\\\p3.txt\"\r\n if i == 4:\r\n return \"pyramides\\\\p4.txt\"\r\n if i == 5:\r\n return \"pyramides\\\\p5.txt\"\r\n\r\ndef load(rang: int) -> list:\r\n\r\n L = list()\r\n path = inttopath(rang)\r\n\r\n fichier = open(path, \"r\")\r\n\r\n L = fichier.readlines()\r\n fichier.close()\r\n\r\n for i in range(len(L)):\r\n L[i] = L[i][:-1]\r\n\r\n return L\r\n\r\n\r\n","sub_path":"Theme D/DM_pyramide/kassab_leonard/kassab_leonard/leonard_loadpyramide.py","file_name":"leonard_loadpyramide.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481028712","text":"from ceci import PipelineStage\nimport numpy as np\nimport logging\nimport os\nimport copy\nimport scipy.interpolate\nfrom astropy.io import fits\nimport pymaster as nmt\nfrom .flatmaps import read_flat_map\nfrom .types import FitsFile, DummyFile\nimport sacc\nfrom theory.predict_theory import GSKYPrediction\nimport gsky.sacc_utils as sutils\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nclass GuessSpecter(PipelineStage) :\n name=\"GuessSpecter\"\n inputs=[('masked_fraction', FitsFile), ('depth_map', FitsFile),\n ('gamma_maps', FitsFile), ('act_maps', FitsFile)]\n outputs=[('dummy', DummyFile)]\n config_options={'saccdirs': [str], 'output_run_dir': 'NONE',\n 'noisesacc_filename': 'NONE', 'tracers': [str]}\n\n def get_output_fname(self,name,ext=None):\n fname=self.output_dir+name\n if ext is not None:\n fname+='.'+ext\n return fname\n\n def parse_input(self) :\n \"\"\"\n Check sanity of input parameters.\n \"\"\"\n # This is a hack to get the path of the root output directory.\n # It should be easy to get this from ceci, but I don't know how to.\n self.output_dir = self.get_output('dummy',final_name=True)[:-5]\n if not os.path.isdir(self.output_dir):\n os.makedirs(self.output_dir)\n\n return\n\n def get_cl_cpld(self, cl, ls_th, leff_hi, wsp_hi, msk_prod):\n\n cl_mc = wsp_hi.couple_cell(ls_th, cl)[0]/ np.mean(msk_prod)\n cl_intp = scipy.interpolate.interp1d(leff_hi, cl_mc, bounds_error=False,\n fill_value=(cl_mc[0], cl_mc[-1]))\n cl_o = cl_intp(ls_th)\n\n return cl_o\n\n def get_masks(self):\n\n logger.info('Reading masks.')\n\n masks = []\n for trc in self.config['tracers']:\n trc_id, trc_ind = trc.split('_')\n trc_ind = int(trc_ind)\n\n if trc_id == 'gc':\n fsk, mp_depth = read_flat_map(self.get_input(\"depth_map\"),i_map=0)\n mp_depth[np.isnan(mp_depth)] = 0\n mp_depth[mp_depth > 40] = 0\n msk_depth = np.zeros_like(mp_depth)\n msk_depth[mp_depth >= self.config['depth_cut']] = 1\n fskb, mskfrac = read_flat_map(self.get_input(\"masked_fraction\"), i_map=0)\n # Create binary mask (fraction>threshold and depth req.)\n msk_bo = np.zeros_like(mskfrac)\n msk_bo[mskfrac > self.config['mask_thr']] = 1\n msk_bi = msk_bo * msk_depth\n mask = mskfrac * msk_bi\n elif trc_id == 'wl':\n hdul = fits.open(self.get_input('gamma_maps'))\n _, mask = read_flat_map(None, hdu=[hdul[6 * trc_ind + 2]])\n elif trc_id == 'kappa':\n hdul = fits.open(self.get_input('act_maps'))\n _, mask = read_flat_map(None, hdu=hdul[3])\n elif trc_id == 'y':\n hdul = fits.open(self.get_input('act_maps'))\n _, mask = read_flat_map(None, hdu=hdul[1])\n else:\n raise NotImplementedError()\n\n masks.append(mask)\n\n fsk, _ = read_flat_map(self.get_input(\"masked_fraction\"), i_map=0)\n\n return masks, fsk\n\n def guess_spectra(self, params, saccfile_coadd, noise_saccfile_coadd):\n\n if 'dcpl_cl' in self.config.keys():\n logger.info('dcpl_cl provided.')\n if self.config['dcpl_cl']:\n logger.info('Computing coupled guess spectra.')\n saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec = self.guess_spectra_cpld(params,\n saccfile_coadd, noise_saccfile_coadd)\n else:\n logger.info('Computing uncoupled guess spectra.')\n saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec = self.guess_spectra_uncpld(params,\n saccfile_coadd, noise_saccfile_coadd)\n else:\n logger.info('dcpl_cl not provided. Computing uncoupled guess spectra.')\n saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec = self.guess_spectra_uncpld(params,\n saccfile_coadd, noise_saccfile_coadd)\n\n return saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec\n\n def guess_spectra_uncpld(self, params, saccfile_coadd, noise_saccfile_coadd=None):\n\n theor = GSKYPrediction(saccfile_coadd)\n\n cl_theor = theor.get_prediction(params)\n\n saccfile_guess_spec = copy.deepcopy(saccfile_coadd)\n if noise_saccfile_coadd is not None:\n saccfile_guess_spec.mean = noise_saccfile_coadd.mean + cl_theor\n else:\n saccfile_guess_spec.mean = cl_theor\n\n return saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec\n\n def guess_spectra_cpld(self, params, saccfile_coadd, noise_saccfile_coadd=None):\n\n ell_theor = np.arange(self.config['ellmax'])\n theor = GSKYPrediction(saccfile_coadd, ells=ell_theor)\n\n cl_theor = theor.get_prediction(params)\n\n masks, fsk = self.get_masks()\n\n dl_min = int(min(2 * np.pi / np.radians(fsk.lx), 2 * np.pi / np.radians(fsk.ly)))\n ells_hi = np.arange(2, 15800, dl_min * 1.5).astype(int)\n bpws_hi = nmt.NmtBinFlat(ells_hi[:-1], ells_hi[1:])\n leff_hi = bpws_hi.get_effective_ells()\n\n cl_cpld = []\n trc_combs = saccfile_coadd.get_tracer_combinations()\n for i, (tr_i, tr_j) in enumerate(trc_combs):\n\n logger.info('Computing wsp for trc_comb = {}.'.format((tr_i, tr_j)))\n\n tr_i_ind = self.config['tracers'].index(tr_i)\n tr_j_ind = self.config['tracers'].index(tr_j)\n\n mask_i = masks[tr_i_ind]\n mask_j = masks[tr_j_ind]\n\n cl_theor_curr = [cl_theor[i]]\n if 'wl' in tr_i:\n field_i = nmt.NmtFieldFlat(np.radians(fsk.lx), np.radians(fsk.ly),\n mask_i.reshape([fsk.ny,fsk.nx]),\n [mask_i.reshape([fsk.ny,fsk.nx]), mask_i.reshape([fsk.ny,fsk.nx])],\n templates=None)\n cl_theor_curr.append(np.zeros_like(cl_theor[i]))\n else:\n field_i = nmt.NmtFieldFlat(np.radians(fsk.lx), np.radians(fsk.ly),\n mask_i.reshape([fsk.ny,fsk.nx]),\n [mask_i.reshape([fsk.ny,fsk.nx])],\n templates=None)\n if 'wl' in tr_j:\n field_j = nmt.NmtFieldFlat(np.radians(fsk.lx), np.radians(fsk.ly),\n mask_j.reshape([fsk.ny,fsk.nx]),\n [mask_j.reshape([fsk.ny,fsk.nx]), mask_j.reshape([fsk.ny,fsk.nx])],\n templates=None)\n cl_theor_curr.append(np.zeros_like(cl_theor[i]))\n else:\n field_j = nmt.NmtFieldFlat(np.radians(fsk.lx), np.radians(fsk.ly),\n mask_j.reshape([fsk.ny,fsk.nx]),\n [mask_j.reshape([fsk.ny,fsk.nx])],\n templates=None)\n\n wsp_hi_curr = nmt.NmtWorkspaceFlat()\n wsp_hi_curr.compute_coupling_matrix(field_i, field_j, bpws_hi)\n\n msk_prod = mask_i*mask_j\n\n cl_cpld_curr = self.get_cl_cpld(cl_theor_curr, ell_theor, leff_hi, wsp_hi_curr, msk_prod)\n\n if noise_saccfile_coadd is not None:\n logger.info('Adding noise.')\n if tr_i == tr_j:\n if 'wl' in tr_i:\n datatype = 'cl_ee'\n else:\n datatype = 'cl_00'\n l_curr, nl_curr = noise_saccfile_coadd.get_ell_cl(datatype, tr_i, tr_j, return_cov=False)\n nl_curr_int = scipy.interpolate.interp1d(l_curr, nl_curr, bounds_error=False,\n fill_value=(nl_curr[0], nl_curr[-1]))\n nl_curr_hi = nl_curr_int(ell_theor)\n cl_cpld_curr += nl_curr_hi\n\n cl_cpld.append(cl_cpld_curr)\n\n # Add tracers to sacc\n saccfile_guess_spec = sacc.Sacc()\n for trc_name, trc in saccfile_coadd.tracers.items():\n saccfile_guess_spec.add_tracer_object(trc)\n\n for i, (tr_i, tr_j) in enumerate(trc_combs):\n if 'wl' not in tr_i and 'wl' not in tr_j:\n saccfile_guess_spec.add_ell_cl('cl_00', tr_i, tr_j, ell_theor, cl_cpld[i])\n elif ('wl' in tr_i and 'wl' not in tr_j) or ('wl' not in tr_i and 'wl' in tr_j):\n saccfile_guess_spec.add_ell_cl('cl_0e', tr_i, tr_j, ell_theor, cl_cpld[i])\n saccfile_guess_spec.add_ell_cl('cl_0b', tr_i, tr_j, ell_theor, np.zeros_like(cl_cpld[i]))\n else:\n saccfile_guess_spec.add_ell_cl('cl_ee', tr_i, tr_j, ell_theor, cl_cpld[i])\n saccfile_guess_spec.add_ell_cl('cl_eb', tr_i, tr_j, ell_theor, np.zeros_like(cl_cpld[i]))\n saccfile_guess_spec.add_ell_cl('cl_bb', tr_i, tr_j, ell_theor, np.zeros_like(cl_cpld[i]))\n\n return saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec\n\n def run(self):\n\n self.parse_input()\n\n saccfiles = []\n for saccdir in self.config['saccdirs']:\n if self.config['output_run_dir'] != 'NONE':\n path2sacc = os.path.join(saccdir, self.config['output_run_dir'] + '/' + 'power_spectra_wodpj')\n else:\n path2sacc = os.path.join(saccdir, 'power_spectra_wodpj')\n sacc_curr = sacc.Sacc.load_fits(self.get_output_fname(path2sacc, 'sacc'))\n logger.info('Read {}.'.format(self.get_output_fname(path2sacc, 'sacc')))\n assert sacc_curr.covariance is not None, 'saccfile {} does not contain covariance matrix. Aborting.'.format(\n self.get_output_fname(path2sacc, 'sacc'))\n saccfiles.append(sacc_curr)\n\n if self.config['noisesacc_filename'] != 'NONE':\n logger.info('Adding noise to theoretical cls.')\n noise_saccfiles = []\n for i, saccdir in enumerate(self.config['saccdirs']):\n if self.config['output_run_dir'] != 'NONE':\n path2sacc = os.path.join(saccdir, self.config['output_run_dir'] + '/' + self.config['noisesacc_filename'])\n else:\n path2sacc = os.path.join(saccdir, self.config['noisesacc_filename'])\n noise_sacc_curr = sacc.Sacc.load_fits(self.get_output_fname(path2sacc, 'sacc'))\n logger.info('Read {}.'.format(self.get_output_fname(path2sacc, 'sacc')))\n if noise_sacc_curr.covariance is None:\n logger.info('noise sacc has no covariance. Adding covariance matrix to noise sacc.')\n noise_sacc_curr.add_covariance(saccfiles[i].covariance.covmat)\n noise_saccfiles.append(noise_sacc_curr)\n noise_saccfile_coadd = sutils.coadd_sacc_means(noise_saccfiles, self.config)\n else:\n logger.info('Creating noise-free theoretical cls.')\n noise_saccfile_coadd = None\n\n # Need to coadd saccfiles after adding covariance to noise saccfiles\n saccfile_coadd = sutils.coadd_sacc_means(saccfiles, self.config)\n\n params = {\n 'cosmo': self.config['cosmo'],\n 'hmparams': self.config['hmparams']\n }\n\n saccfile_coadd, noise_saccfile_coadd, saccfile_guess_spec = self.guess_spectra(params, saccfile_coadd,\n noise_saccfile_coadd)\n\n if self.config['output_run_dir'] != 'NONE':\n input_dir = os.path.join('inputs', self.config['output_run_dir'])\n input_dir = self.get_output_fname(input_dir)\n else:\n input_dir = self.get_output_fname('inputs')\n if not os.path.isdir(input_dir):\n os.makedirs(input_dir)\n logger.info(('Created {}.'.format(input_dir)))\n\n if self.config['output_run_dir'] != 'NONE':\n coadd_dir = os.path.join('coadds', self.config['output_run_dir'])\n coadd_dir = self.get_output_fname(coadd_dir)\n else:\n coadd_dir = self.get_output_fname('coadds')\n if not os.path.isdir(coadd_dir):\n os.makedirs(coadd_dir)\n logger.info(('Created {}.'.format(coadd_dir)))\n\n saccfile_coadd.save_fits(os.path.join(coadd_dir, 'saccfile_coadd_test.sacc'), overwrite=True)\n logger.info('Written {}.'.format(os.path.join(coadd_dir, 'saccfile_coadd.sacc')))\n if self.config['noisesacc_filename'] != 'NONE':\n noise_saccfile_coadd.save_fits(os.path.join(coadd_dir, 'noise_saccfile_coadd_test.sacc'), overwrite=True)\n logger.info('Written {}.'.format(os.path.join(coadd_dir, 'noise_saccfile_coadd.sacc')))\n if self.config['noisesacc_filename'] != 'NONE':\n saccfile_guess_spec.save_fits(os.path.join(input_dir, 'saccfile_guess_spectra_test.sacc'), overwrite=True)\n logger.info('Written {}.'.format(os.path.join(input_dir, 'saccfile_guess_spectra.sacc')))\n else:\n saccfile_guess_spec.save_fits(os.path.join(input_dir, 'saccfile_noise-free_guess_spectra_test.sacc'), overwrite=True)\n logger.info('Written {}.'.format(os.path.join(input_dir, 'saccfile_noise-free_guess_spectra.sacc')))\n\n # Permissions on NERSC\n os.system('find /global/cscratch1/sd/damonge/GSKY/ -type d -exec chmod -f 777 {} \\;')\n os.system('find /global/cscratch1/sd/damonge/GSKY/ -type f -exec chmod -f 666 {} \\;')\n\nif __name__ == '__main__':\n cls = PipelineStage.main()","sub_path":"gsky/guess_specter.py","file_name":"guess_specter.py","file_ext":"py","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"397767001","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 20:23:20 2020\n\n@author: Stan\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom matplotlib.ticker import PercentFormatter\nimport seaborn as sns\n\nsns.set(style='dark')\n#sns.set_palette(sns.cubehelix_palette())\n\ndf = pd.read_excel('.\\data\\sales.xlsx')\ndf.columns = ['date_time', 'item_name', 'id', 'sales_qtt']\ndf = df.drop(columns='item_name')\ndf['id'] = df['id'].astype('category')\ndf.set_index('date_time', inplace = True)\n\n#EDA\ndf.head()\ndf.info()\ndf.describe()\ndf['id'].value_counts()\ndf['sales_qtt'].plot(kind='hist', cumulative=False)\ndf['sales_qtt'].plot(kind='hist', cumulative=True)\nplt.close()\n\n#ECDF - daily sales\nx = np.sort(df['sales_qtt'])\ny = np.arange(1, len(x) + 1)/len(x)\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('Quantity sold (daily)')\n_ = plt.ylabel('Cumulative probability')\nplt.xticks(range(0, x.max()+1))\nplt.margins(0.02)\nplt.show()\nplt.savefig(r'.\\analysis\\charts\\daily_sales_ecdf.png')\nplt.close()\n\n#ECDF - weekly sales\ndf_weekly = df.groupby('id').resample('W').sum()\ndf_weekly.reset_index(inplace=True)\ndf_weekly.set_index('date_time', inplace = True)\ndf_weekly = df_weekly[df_weekly['sales_qtt'] != 0]\n\nx = np.sort(df_weekly['sales_qtt'])\ny = np.arange(1, len(x) + 1)/len(x)\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('Quantity sold (weekly)')\n_ = plt.ylabel('Cumulative probability')\nplt.margins(0.02)\nplt.show()\nplt.savefig(r'.\\analysis\\charts\\weekly_sales_ecdf.png')\nplt.close()\n\n#Sales by day of week\ndf_dow = df.copy()\ndf_dow['dow'] = df_dow.index.strftime('%a')\ndf_dow.reset_index(inplace=True)\ndf_dow = df_dow.groupby('dow').sum()\ndows = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\ndf_dow = df_dow.reindex(dows)\ndf_dow.plot()\nplt.savefig(r'.\\analysis\\charts\\sales_by_dow.png')\nplt.close()\n\n#Pareto chart\ndf_top = df.groupby('id').sum()\ndf_top.sort_values('sales_qtt', ascending=False, inplace=True)\ndf_top['sales_pct_cum'] = df_top['sales_qtt'].cumsum()/df_top['sales_qtt'].sum()*100\ndf_top = df_top.reset_index()\ndf_top['id'] = df_top['id'].astype(str)\n\nfig, ax = plt.subplots()\nplt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\nax.bar(df_top['id'], df_top['sales_qtt'], color=\"C0\")\nax2 = ax.twinx()\nax2.plot(df_top['id'], df_top['sales_pct_cum'], color=\"C1\", marker='.')\nax2.yaxis.set_major_formatter(PercentFormatter())\nax2.axhline(y=80, color='r')\nax2.axhline(y=50, color='C1')\nplt.show()\nplt.savefig(r'.\\analysis\\charts\\pareto.png')\nplt.close()\n\n\n#Aggregated weekly sales decomposition\ndf_weekly_total = df.resample('W').sum()\nweekly_decompose = sm.tsa.seasonal_decompose(df_weekly_total, model='additive', period=4)\nfig1 = weekly_decompose.plot()\ndf_weekly_total.hist(bins=20)\n\n#Aggregated daily sales decomposition\ndf_daily_total = df.resample('D').sum()\ndaily_decompose = sm.tsa.seasonal_decompose(df_daily_total, model='additive', period=28)\nfig2 = daily_decompose.plot()\ndf_daily_total.hist(bins=20)\n\n\n\n","sub_path":"analysis/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625488082","text":"import MySQLdb\r\nimport datetime\r\nimport numpy as np\r\n\r\n\r\ndef nonlin(x, deriv=False):\r\n if deriv == True:\r\n return x*(1-x)\r\n return 1/(1+np.exp(-x))\r\n\r\n\r\ndef get_predict(trainX, trainY, testX):\r\n # clf = svm.SVR()\r\n # clf.fit(trainX, trainY)\r\n # # print testX\r\n # return clf.predict([testX])[0]\r\n trainX = np.array(trainX)\r\n trainY = np.array([trainY]).T\r\n syn0 = 2 * np.random.random((10, 1)) - 1\r\n\r\n for iter in xrange(5000):\r\n # forward propagation\r\n l0 = trainX\r\n l1 = nonlin(np.dot(l0, syn0))\r\n\r\n # how much did we miss?\r\n l1_error = trainY - l1\r\n\r\n # multiply how much we missed by the\r\n # slope of the sigmoid at the values in l1\r\n l1_delta = l1_error * nonlin(l1, True)\r\n\r\n # update weights\r\n syn0 += np.dot(l0.T, l1_delta)\r\n # print \"Output After Training:\"\r\n # print l1\r\n # print syn0\r\n #\r\n\r\n # syn1 = 2 * np.random.random((13, 1)) - 1\r\n # for j in xrange(10000):\r\n #\r\n # l1 = 1 / (1 + np.exp(-(np.dot(trainX, syn0))))\r\n # l2 = 1 / (1 + np.exp(-(np.dot(l1, syn1))))\r\n # l2_delta = (trainY - l2) * (l2 * (1 - l2))\r\n # l1_delta = l2_delta.dot(syn1.T) * (l1 * (1 - l1))\r\n # syn1 += l1.T.dot(l2_delta)\r\n # syn0 += trainX.T.dot(l1_delta)\r\n l1 = 1 / (1 + np.exp(-(np.dot([testX], syn0))))\r\n return l1[0][0]\r\n\r\n\r\ndef draw(id):\r\n db = MySQLdb.connect(\"572b2568442c7.sh.cdb.myqcloud.com\", \"cdb_outerroot\", \"software2015\", \"test\",\r\n port=8161)\r\n cursor = db.cursor()\r\n cursor.execute('select `close`, `date` from `stock_2014` where `amount` >0 and `stockid` = \"'+id+'\"')\r\n # cursor.execute('select `close`, `date` from `bench` where `stockid` = \"'+id+'\" and `date` >= \"2014-01-01\"'\r\n # ' and `date` <= \"2014-12-31\"')\r\n # cursor.execute('select `close`, `date` from `bench` where `stockid` = \"'+id+'\" and '\r\n # '`date`>=\"2014-01-01\" and `date`<=\"2014-12-31\"')\r\n raw = list(cursor.fetchall())\r\n base = []\r\n for each in raw:\r\n base.append(each[0])\r\n # if each[0] >= 0:\r\n # base.append(1)\r\n # else:\r\n # base.append(0)\r\n actual = []\r\n date = []\r\n cursor.execute('select `close`, `date` from `stock_2015` where `amount` >0 and `stockid` = \"'+id+'\"')\r\n # cursor.execute('select `close`, `date` from `bench` where `stockid` = \"'+id+'\" and '\r\n # '`date`>=\"2015-01-01\"')\r\n # cursor.execute('select `close`, `date` from `bench` where `stockid` = \"' + id + '\" and `date` >= \"2015-01-01\"'\r\n # ' and `date` <= \"2015-12-31\"')\r\n raw = list(cursor.fetchall())\r\n\r\n for each in raw:\r\n actual.append(each[0])\r\n date.append(each[1])\r\n\r\n cursor.execute('select `close`, `date` from `stock_2016` where `amount` >0 and `stockid` = \"' + id + '\"')\r\n # cursor.execute('select `close`, `date` from `bench` where `stockid` = \"' + id + '\" and `date` >= \"2016-01-01\"')\r\n raw = list(cursor.fetchall())\r\n\r\n for each in raw:\r\n actual.append(each[0])\r\n date.append(each[1])\r\n # if each[0] >= 0:\r\n # actual.append(1)\r\n # else:\r\n # actual.append(0)\r\n predict = []\r\n predict_upper = []\r\n predict_low = []\r\n X = []\r\n j = 1\r\n for each in actual:\r\n X.append(j)\r\n j += 1\r\n trainX = []\r\n trainY = []\r\n for x in range(0, len(base)-10):\r\n tempX = base[x:x+10]\r\n tempY = base[x+10]\r\n sum = 0\r\n for every in tempX:\r\n sum += every\r\n for k in range(0, len(tempX)):\r\n tempX[k] = float(tempX[k]) / float(sum)\r\n # if tempX[k] >= 0:\r\n # tempX[k] = 1\r\n # else:\r\n # tempX[k] = 0\r\n tempY = float(tempY) / float(sum)\r\n trainX.append(tempX)\r\n trainY.append(tempY)\r\n testX = base[-10:]\r\n sum = 0\r\n for every in testX:\r\n sum += every\r\n for i in range(0, len(testX)):\r\n testX[i] = float(testX[i]) / float(sum)\r\n price = get_predict(trainX, trainY, testX)*sum\r\n var = np.var(np.array(base)) / len(base)\r\n d = np.sqrt(var)\r\n predict.append(price)\r\n predict_upper.append(price+d)\r\n predict_low.append(price-d)\r\n base.append(each)\r\n\r\n time = date[-1]\r\n time = datetime.datetime.strptime(time, \"%Y-%m-%d\")\r\n time = time + datetime.timedelta(1)\r\n for x in range(0, 15):\r\n date.append(time.strftime(\"%Y-%m-%d\"))\r\n time = time + datetime.timedelta(1)\r\n base.append(predict[-1])\r\n X.append(j)\r\n j += 1\r\n trainX = []\r\n trainY = []\r\n for x in range(0, len(base) - 10):\r\n tempX = base[x:x + 10]\r\n tempY = base[x + 10]\r\n sum = 0\r\n for every in tempX:\r\n sum += every\r\n for k in range(0, len(tempX)):\r\n tempX[k] = float(tempX[k]) / float(sum)\r\n tempY = float(tempY) / float(sum)\r\n trainX.append(tempX)\r\n trainY.append(tempY)\r\n testX = base[-10:]\r\n sum = 0\r\n for every in testX:\r\n sum += every\r\n for i in range(0, len(testX)):\r\n testX[i] = float(testX[i]) / float(sum)\r\n price = get_predict(trainX, trainY, testX)*sum\r\n var = np.var(np.array(base)) / len(base)\r\n d = np.sqrt(var)\r\n predict.append(price)\r\n predict_upper.append(price + d)\r\n predict_low.append(price - d)\r\n\r\n # print actual\r\n # print predict\r\n # print X\r\n\r\n # hit = 0\r\n # total = 0\r\n #\r\n # for x in range(0, len(actual)):\r\n # total += 1\r\n # if actual[x] == 1 and predict[x] > 0.5:\r\n # hit += 1\r\n # elif actual[x] == 0 and predict[x] < 0.5:\r\n # hit += 1\r\n # print float(hit) / float(total)\r\n\r\n # plt.plot(X[:-14], actual, 'g')\r\n # plt.plot(X, predict, 'r')\r\n # plt.plot(X, predict_upper, 'b')\r\n # plt.plot(X, predict_low, 'y')\r\n # # plt.ylim(-4, 4)\r\n # # plt.xlim(200, 300)\r\n # plt.show()\r\n data = []\r\n for x in range(0, len(date)):\r\n if date[x] <= '2016-06-15':\r\n data.append([id, date[x], predict[x], predict_upper[x], predict_low[x], 0])\r\n else:\r\n data.append([id, date[x], predict[x], predict_upper[x], predict_low[x], 1])\r\n # cursor.execute('update `forecast` set `price_middle` = %s, `price_high` = %s, `price_low` = %s '\r\n # ' where `stockid` = \"%s\" and `date` = \"%s\"' % (predict[x], predict_upper[x], predict_low[x], id, date[x]))\r\n insert_cmd = 'insert into `bp_forecast` (`stockid`, `date`, `price_middle`, `price_high`, `price_low`, `unstable`) ' \\\r\n ' VALUES (%s, %s, %s, %s, %s,%s)'\r\n\r\n cursor.executemany(insert_cmd, tuple(data))\r\n db.commit()\r\n db.close()\r\n\r\n# draw('hs300')\r\n# draw('sh000001')\r\ndraw('sh600004')\r\n# file = open('list.txt')\r\n# while 1:\r\n# line = file.readline()\r\n# if not line:\r\n# break\r\n# print line[:8]\r\n# try:\r\n# draw(line[:8])\r\n# except Exception, e:\r\n# # print 'no data ' + line[:8]\r\n# print e\r\n","sub_path":"reference/Data/script/bp_establish.py","file_name":"bp_establish.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594879121","text":"from base64 import b64decode\nfrom base64 import b64encode\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport random\n\nblock_size = AES.block_size\n\n\nclass AESCrypto:\n\tdef __init__(self, key):\n\t\tself.key = key\n\t\tself.bs = 16\n\n\tdef _unpadPKCS5(self, s):\n\t\treturn s[0:-ord(s[-1])]\n\n\tdef _padPKCS5(self, s):\n\t\treturn s + (\n\t\t self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)\n\n\tdef encrypt(self, raw):\n\t\tpadded_plain_text = self._padPKCS5(raw)\n\t\tiv = Random.new().read(AES.block_size)\n\t\tcipher = AES.new(self.key.encode(\"utf8\"), AES.MODE_CBC, iv)\n\t\tivEncoded = b64encode(iv)\n\t\tdataEncoded = b64encode(cipher.encrypt(padded_plain_text.encode(\"utf8\")))\n\t\treturn b64encode(dataEncoded+':'.encode(\"utf8\")+ivEncoded)\n\t\t\n\n\tdef decrypt(self, enc):\n\t\tenc = b64decode(enc)\n\t\tdataArr = enc.split(b\":\")\n\t\tiv = b64decode(dataArr[1])\n\t\tenc = b64decode(dataArr[0])\n\t\tcipher = AES.new(self.key.encode(\"utf8\"), AES.MODE_CBC, iv)\n\t\treturn self._unpadPKCS5(cipher.decrypt(enc).decode('utf-8'))\n\t\t\nkey = \"0123456789abcdef\" or b\"0123456789abcdef\"\nplaintext = \"pruebadesdepython3\" or b\"pruebadesdepython3\"\nciphertext = AESCipher(key).encrypt(plaintext)\nprint(ciphertext)\ntextfromjava = \"OFpHSUw2YlRPdmVmbXh2VjI2UVRzUGlrZWdVeHZOZmdZTFB0bUxYTExLND06SmdNSWUrdnAwZUtIZXVYV3crRjgzQT09\" or b\"OFpHSUw2YlRPdmVmbXh2VjI2UVRzUGlrZWdVeHZOZmdZTFB0bUxYTExLND06SmdNSWUrdnAwZUtIZXVYV3crRjgzQT09\"\ncleartext = AESCipher(key).decrypt(textfromjava)\nprint(cleartext)","sub_path":"Phyton/3.0/AESCrypto.py","file_name":"AESCrypto.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446872016","text":"import threading\nimport subprocess\n\n\nclass ClientRunner(threading.Thread):\n\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n cmd_client = \"C:/Users/35114/Anaconda3/envs/python2.7/python.exe f:/Competition/XWDistribute/pysample/main.py\"\n # cmd_client = \"ping www.baidu.com\"\n s = subprocess.Popen(cmd_client, stdout=subprocess.PIPE, universal_newlines=True)\n while True:\n next_line = s.stdout.readline()\n print(\"%s\" % (next_line.strip()))\n if next_line == \"\":\n break\n\n\nif __name__ == \"__main__\":\n\n nodes = 20\n for i in range(nodes):\n client = ClientRunner(\"client-%d\" % (i + 1))\n client.start()\n","sub_path":"XWDistribute/XWDistribute/pysample/ClientRunner.py","file_name":"ClientRunner.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497517392","text":"#!/usr/bin/env python\n\n\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectFromModel\n\nfrom skater.core.explanations import Interpretation\nfrom skater.model import InMemoryModel\n\n\n# In[5]:\n\n\nXtest = pickle.load(open('./Data/pickledtraintestdata/X_test.pkl', 'rb'))\nytest = pickle.load(open('./Data/pickledtraintestdata/y_test.pkl', 'rb'))\nRFModel = pickle.load(open('./SavedRFModel/Python_RF.pickle', 'rb'))\n\n\n# In[3]:\n\n\nrow_no_to_interpret = 954\ndata_for_prediction = Xtest.iloc[row_no_to_interpret]\ndata_for_prediction_array = data_for_prediction.values.reshape(1, -1)\npredicted_proba = RFModel.predict_proba(Xtest)\nRFprediction = RFModel.predict(data_for_prediction_array)\n\n#print(data_for_prediction)\nprint(\"Real target\", ytest.iloc[row_no_to_interpret])\nprint(\"Random forest Predicted\", RFprediction)\nprint(\"Predict_proba\", np.round_(predicted_proba[row_no_to_interpret], decimals=2))\n\n\n# In[8]:\n\n\n#%%time\ninterpreter = Interpretation(Xtest, feature_names=Xtest.columns)\n\n\n# In[9]:\n\n\n#%%time\nmodel = InMemoryModel(RFModel.predict_proba, examples=Xtest)\n\n\n# In[6]:\n\n\n#%%time\ninterpreter = Interpretation(Xtest, feature_names=Xtest.columns)\n\nmodel = InMemoryModel(RFModel.predict_proba, examples=Xtest)\n\nplots = interpreter.feature_importance.plot_feature_importance(model, ascending = False)\n\n\n# In[7]:\n\n\n#%%time\npyint_model = InMemoryModel(RFModel.predict_proba, examples=Xtest, target_names=[0,1])\n\n\n# In[8]:\n\n\n#%%time\naxes_list = interpreter.partial_dependence.plot_partial_dependence(['V14'],\n pyint_model, \n grid_resolution=30, \n with_variance=True,\n figsize = (9, 7))\n\n\n# In[9]:\n\n\naxes_list = interpreter.partial_dependence.plot_partial_dependence(['V4'],\n pyint_model, \n grid_resolution=30, \n with_variance=True,\n figsize = (9, 7))\n\n\n\n# In[10]:\n\n\naxes_list = interpreter.partial_dependence.plot_partial_dependence(['V12'],\n pyint_model, \n grid_resolution=30, \n with_variance=True,\n figsize = (9, 7))\n\n\n# In[11]:\n\n\naxes_list = interpreter.partial_dependence.plot_partial_dependence(['V10'],\n pyint_model, \n grid_resolution=30, \n with_variance=True,\n figsize = (9, 7))\n\n\n# In[12]:\n\n\nfrom sklearn.metrics import f1_score\nprint(\"RF -> F1 Score: {1}\". format('RF', f1_score(ytest, RFModel.predict(Xtest))))\n\n\n# In[13]:\n\n\n#%%time\nmodel = InMemoryModel(RFModel.predict_proba, examples=Xtest, target_names=[0, 1])\n \ninterpreter.partial_dependence.plot_partial_dependence([('V14', 'V4')], model, grid_resolution=10)\n\n\n# In[14]:\n\n\n#%%time\ninterpreter.partial_dependence.plot_partial_dependence([('V14', 'V12')], model, grid_resolution=10)\n\n\n# In[15]:\n\n\n#%%time\nmodel_no_proba = InMemoryModel(RFModel.predict, \n examples=Xtest, \n unique_values=[0,1])\nplots = interpreter.feature_importance.plot_feature_importance(model_no_proba, ascending = False)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Code_PythonPrograms/Global/Pyfile/Skater.py","file_name":"Skater.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452529885","text":"import multiprocessing\nimport sql_conect\nimport time\n\n# 测试上传功能\ndef insertTidKilo(tid):\n mysqldb = sql_conect.sqlDB('superepc应用测试TX', 'uniform_interface')\n mysqldb.connect()\n fullKmList = []\n sql = '''SELECT DISTINCT mr.TID as tid,ml.M_TIMER as timer_id, mr.M_MONTH,mr.M_USED_MONTH,ml.M_ACTION \nFROM \nm_maintain_rule mr, \nm_maintain_list ml, \nm_maintain_standardname h ,\nm_working_rule wr \nWHERE mr.MAINTAIN_LIST_ID = ml.MAINTAIN_LIST_ID and ml.M_TIMER=h.M_TIMER and wr.TID=mr.TID AND wr.MAINTAIN_LIST_ID=mr.MAINTAIN_LIST_ID AND ml.M_ACTION <> '无' AND mr.M_VALID=1 \n and mr.TID = %s ''' % tid\n tidKmList = mysqldb.select(sql)\n for subTidKmList in tidKmList:\n subTotKmList = 0\n if subTidKmList[2] + subTidKmList[3] > 0:\n if subTidKmList[2] > 0:\n i = 0\n while subTotKmList <= 240:\n subTotKmList = subTidKmList[2] * i + subTidKmList[3]\n kl = (subTidKmList[0], subTidKmList[1], subTotKmList,subTidKmList[4])\n if kl not in fullKmList and subTotKmList <= 240 and subTotKmList > 0:\n if subTidKmList[4] == \"定期必做\":\n # 如果已经有检查后做的数据则删除\n klcheck = (subTidKmList[0], subTidKmList[1], subTotKmList, '检查后做')\n if klcheck in fullKmList:\n killIndex = fullKmList.index(klcheck)\n del fullKmList[killIndex]\n if subTidKmList[4] == \"检查后做\":\n # 如果已经有定期必做的数据则不插入\n klcheck = (subTidKmList[0], subTidKmList[1], subTotKmList, '定期必做')\n if klcheck in fullKmList:\n i += 1\n continue\n fullKmList.append(kl)\n i += 1\n elif subTidKmList[2] == 0:\n subTotKmList = subTidKmList[3]\n kl = (subTidKmList[0], subTidKmList[1], subTotKmList,subTidKmList[4])\n if kl not in fullKmList and subTotKmList <= 240:\n if subTidKmList[4] == \"定期必做\":\n klcheck = (subTidKmList[0], subTidKmList[1], subTotKmList, '检查后做')\n if klcheck in fullKmList:\n killIndex = fullKmList.index(klcheck)\n del fullKmList[killIndex]\n if subTidKmList[4] == \"检查后做\":\n klcheck = (subTidKmList[0], subTidKmList[1], subTotKmList, '定期必做')\n if klcheck in fullKmList:\n continue\n fullKmList.append(kl)\n sql = \"insert into td_all.m_maintain_month_info(tid,timer_id,`month`,action) values(%s,%s,%s,%s)\"\n mysqldb.runSqlMany(sql, fullKmList)\n mysqldb.close()\n\ndef div_list(ls,n):\n n_groups = [ls[i:i+n] for i in range(0,len(ls),n)]\n return n_groups\n\n\nif __name__ == \"__main__\":\n time_start = time.time()\n\n mysqldb = sql_conect.sqlDB('superepc应用测试TX', 'uniform_interface')\n mysqldb.connect()\n\n tablename = \"td_all.m_maintain_month_info\"\n sql = \"TRUNCATE TABLE %s\" % tablename\n mysqldb.runSql(sql)\n\n sql = \"\"\"SELECT distinct tid from uniform_interface.m_maintain_rule \nwhere tid not in (select distinct tid from %s)\"\"\" % tablename\n tidList = mysqldb.select(sql)\n # for tid in tidList:\n # insertTidKilo(tid)\n # print(tid)\n partList = div_list(tidList,10)\n for subPartList in partList:\n try:\n # insertTidKilo(subPartList[0])\n p0 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[0]))\n p1 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[1]))\n p2 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[2]))\n p3 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[3]))\n p4 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[4]))\n p5 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[5]))\n p6 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[6]))\n p7 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[7]))\n p8 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[8]))\n p9 = multiprocessing.Process(target=insertTidKilo, args=(subPartList[9]))\n p0.start()\n p1.start()\n p2.start()\n p3.start()\n p4.start()\n p5.start()\n p6.start()\n p7.start()\n p8.start()\n p9.start()\n p0.join()\n p1.join()\n p2.join()\n p3.join()\n p4.join()\n p5.join()\n p6.join()\n p7.join()\n p8.join()\n p9.join()\n print(subPartList)\n except Exception as err:\n print(str(err) + \"无tid,已完成!\")\n mysqldb.close()\n\n time_end = time.time()\n print('time cost', time_end-time_start,'s')\n\n\n","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598624091","text":"import streamlit as st\nimport pickle\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport numpy as np\nimport pickle\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer\n# Load the pipeline and data\n# pipe = pickle.load(open('pipe_logistic.sav', 'rb'))\nX_test = pickle.load(open('X_test.sav', 'rb'))\ny_test = pickle.load(open('y_test.sav', 'rb'))\nX_train = pickle.load(open('X_train.sav', 'rb'))\n\ndic = {0: 'Bad', 1: 'Good'}\n\n#Function to test certain index of dataset\ndef test_demo(index):\n values = X_test.iloc[index].astype('float64') # Input the value from dataset\n\n # Create four sliders in the sidebar\n a = st.sidebar.slider('External Risk Estimate', 0.0, 100.0, values[16], 1.0)\n b = st.sidebar.slider('Months Since Oldest Trade Open', 0.0, 604.0, values[17], 1.0)\n c = st.sidebar.slider('Months Since Most Recent Trade Open', 0.0, 207.0, values[18], 1.0)\n d = st.sidebar.slider('Average Months in File', 4.0, 332.0, values[19], 1.0)\n e = st.sidebar.slider('Number Satisfactory Trades', 0.0, 100.0, values[20], 1.0 )\n f = st.sidebar.slider('Number Trades 60+ Ever', 0.0, 100.0, values[21], 1.0 )\n g = st.sidebar.slider('Number Trades 90+ Ever', 0.0, 100.0, values[22], 1.0 )\n h = st.sidebar.slider('Percent Trades Never Delinquent', 0.0, 100.0, values[23], 1.0 )\n i = st.sidebar.slider('Months Since Most Recent Delinquency', 0.0, 100.0, values[24], 1.0 )\n j = st.sidebar.slider('Number of Total Trades', 0.0, 104.0, values[25], 1.0 )\n k = st.sidebar.slider('Number of Trades Open in Last 12 Months', 0.0, 19.0, values[26], 1.0 )\n l = st.sidebar.slider('Percent Installment Trades', 0.0, 100.0, values[27], 1.0 )\n m = st.sidebar.slider('Months Since Most Recent Inq excl 7days', 0.0, 100.0, values[28], 1.0 )\n n = st.sidebar.slider('Number of Inq Last 6 Months', 0.0, 100.0, values[29], 1.0 )\n o = st.sidebar.slider('Number of Inq Last 6 Months excl 7days', 0.0, 100.0, values[30], 1.0 )\n p = st.sidebar.slider('Net Fraction Revolving Burden', 0.0, 232.0, values[31], 1.0 )\n q = st.sidebar.slider('Net Fraction Installment Burden',0.0, 471.0, values[32], 1.0 )\n r = st.sidebar.slider('Number Revolving Trades with Balance', 0.0, 100.0, values[33], 1.0 )\n s = st.sidebar.slider('Number Intallment Trades with Balance', 0.0, 100.0, values[34], 1.0 )\n t = st.sidebar.slider('Number Bank/Natl Trades w high utilization ratio', 0.0, 100.0, values[35], 1.0 )\n u = st.sidebar.slider('Percent Trades with Balance', 0.0, 100.0, values[36], 1.0 )\n \n #Print the prediction result\n alg = ['Boosting', 'Random Forest', 'Support Vector Machine']\n classifier = st.selectbox('Which algorithm?', alg)\n\n if classifier == 'Boosting':\n \n pipe = pickle.load(open('pipe_boost.sav', 'rb'))\n res = pipe.predict(np.array([0,1,0,0,1,0,0,0,0,1,0,0,1,0,0,0,a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u]).reshape(1, -1))[0]\n st.write('Prediction: ', dic[res])\n pred = pipe.predict(X_test)\n score = pipe.score(X_test, y_test)\n cm = metrics.confusion_matrix(y_test, pred)\n st.write('Accuracy: ', score)\n st.write('Confusion Matrix: ', cm)\n \n st.text('Support Vector Machine Chosen')\n \n elif classifier == 'Random Forest':\n pipe = pickle.load(open('pipe_rf.sav', 'rb'))\n res = pipe.predict(np.array([0,1,0,0,1,0,0,0,0,1,0,0,1,0,0,0,a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u]).reshape(1, -1))[0]\n st.write('Prediction: ', dic[res])\n pred = pipe.predict(X_test)\n score = pipe.score(X_test, y_test)\n cm = metrics.confusion_matrix(y_test, pred)\n st.write('Accuracy: ', score)\n st.write('Confusion Matrix: ', cm)\n \n st.text('Random Forest Chosen')\n\n else:\n pipe = pickle.load(open('pipe_svc.sav', 'rb'))\n res = pipe.predict(np.array([0,1,0,0,1,0,0,0,0,1,0,0,1,0,0,0,a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u]).reshape(1, -1))[0]\n st.write('Prediction: ', dic[res])\n pred = pipe.predict(X_test)\n score = pipe.score(X_test, y_test)\n cm = metrics.confusion_matrix(y_test, pred)\n st.write('Accuracy: ', score)\n st.write('Confusion Matrix: ', cm)\n \n st.text('SVM Chosen')\n\n \n# title\nst.title('Heloc Prediction')\n# show data\nif st.checkbox('Show dataframe'):\n st.write(X_test)\nst.write(X_train.reset_index()) # Show the dataset\n\nnumber = st.text_input('Choose a row of information in the dataset:', 30) # Input the index number\n\ntest_demo(int(number)) # Run the test function\n","sub_path":"streamlit_demo.py","file_name":"streamlit_demo.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103297600","text":"from django.shortcuts import render\nfrom django.conf import settings\n\nfrom fromleaf_common.utils import database as db\nfrom fromleaf_common.utils.database import UserData\nfrom fromleaf_common.views import CommonTemplateView\n\ndef comment_parser(comment):\n comment_dict = {}\n element_list = comment.split(',')\n \n for element in element_list:\n key, value = element.split(':', maxsplit=1)\n comment_dict[key] = value\n \n return comment_dict\n\nclass OpeningView(CommonTemplateView):\n \n template_name = 'fromleaf_opening/opening.html'\n \n def get_context_data(self, **kwargs):\n context = super(OpeningView, self).get_context_data(**kwargs)\n user_data = UserData(settings.USER_EMAIL)\n \n comment_list = db.get_simple_comment_list(self, user_data.get_member_info())\n opening_page_info = comment_parser(comment_list[0].comment)\n \n context['site_user_extra_info'] = user_data.get_extra_user_info()\n context['opening_headding'] = comment_list[0].title.split('.', maxsplit=1)\n context['opening_page_info'] = opening_page_info\n \n return context","sub_path":"fromleaf_opening/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612694640","text":"from functools import reduce\n\nimport cv2\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom kocr import basis\nfrom kocr import FurthestApartPointsFinder\nfrom kocr import ConvexPointsConnector\n\n\ndef imshow(img, name=''):\n cv2.imshow(name, img)\n cv2.waitKey(0)\n cv2.destroyWindow(name)\n # plt.imshow(img), plt.title(name)\n # plt.xticks([]), plt.yticks([])\n # plt.show()\n\n\n# img_file = '../../img/20191228.png'\nimg_file = '../../img/20200101.png'\n# img_file = '../../img/20200105.png'\n# img_file = '../../img/201912082029.png'\n# img_file = '../../img/201912182116.png'\nimg_origin = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)\n# imshow(img_origin, '')\n(height, width) = img_origin.shape\n\nimg_inverse = cv2.bitwise_not(img_origin)\n\nimg_adapt = cv2.adaptiveThreshold(img_inverse, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)\nimshow(img_adapt)\n\nimg_depict_raw = img_adapt.copy()\nrawLines = cv2.HoughLinesP(img_depict_raw, 1, np.pi / 180, 200, minLineLength=30, maxLineGap=10)\nrawLineEnds = [line[0] for line in rawLines]\n\n# 定位最大边界的上下左右范围\nleftX, topY, rightX, bottomY = reduce(lambda a, b: [min(a[0], b[0]), min(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3])],\n rawLineEnds)\n\n# 获取与边界范围沾边的直线,即直线的一端在边界上\nboundThick = 5\nleftLines = [end for end in rawLineEnds if abs(end[0] - leftX) <= boundThick] + \\\n [end for end in rawLineEnds if abs(end[2] - leftX) <= boundThick]\ntopLines = [end for end in rawLineEnds if abs(end[1] - topY) <= boundThick] + \\\n [end for end in rawLineEnds if abs(end[3] - topY) <= boundThick]\nrightLines = [end for end in rawLineEnds if abs(end[0] - rightX) <= boundThick] + \\\n [end for end in rawLineEnds if abs(end[2] - rightX) <= boundThick]\nbottomLines = [end for end in rawLineEnds if abs(end[1] - bottomY) <= boundThick] + \\\n [end for end in rawLineEnds if abs(end[3] - bottomY) <= boundThick]\nlineEndsPoint = list(set([(end[0], end[1]) for end in leftLines] + [(end[2], end[3]) for end in leftLines] +\n [(end[0], end[1]) for end in topLines] + [(end[2], end[3]) for end in topLines] +\n [(end[0], end[1]) for end in rightLines] + [(end[2], end[3]) for end in rightLines] +\n [(end[0], end[1]) for end in bottomLines] + [(end[2], end[3]) for end in bottomLines]))\n\naparts = FurthestApartPointsFinder.find(4, lineEndsPoint)\ncorners = ConvexPointsConnector.connect(aparts)\n\nimg_frames = np.zeros((height, width, 3), np.uint8)\nfor line in leftLines + topLines + rightLines + bottomLines:\n x1, y1, x2, y2 = line\n cv2.line(img_frames, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\nfor apart, color in dict(zip(corners, [(0, 0, 255), (0, 255, 255), (0, 255, 0), (255, 0, 0)])).items():\n cv2.circle(img_frames, apart, 5, color, 2)\n\nimshow(img_frames)\ncv2.imwrite('../../img/frame.png', img_frames)\n\nredress_height, redress_width = ConvexPointsConnector.rect_shape(corners) # 矫正后的图像尺寸\norigin_corners = [(0, 0), (width, 0), (width, height), (0, height)] # 源图像的矫正点\nredress_corners = [(0, 0), (redress_width, 0), (redress_width, redress_height), (0, redress_height)] # 矫正图像的矫正点\ncorners_map = dict(zip(origin_corners, redress_corners))\n# 获取源角点对应的源图中的角点,再映射成矫正图像中的角点\nredress = [corners_map[red] for red in ConvexPointsConnector.redress_rect_corners(corners, origin_corners)]\nmargin = 5\nredress = list(map(lambda p: (p[0] + margin, p[1] + margin), redress))\n\nredress_transform = cv2.getPerspectiveTransform(np.array(corners, np.float32), np.array(redress, np.float32))\n\nreal_height = int(redress_height + 2 * margin)\nreal_width = int(redress_width + 2 * margin)\nimg_redress = cv2.warpPerspective(img_adapt.copy(), redress_transform, (real_width, real_height))\nimg_redress_origin = cv2.warpPerspective(img_origin.copy(), redress_transform, (real_width, real_height))\n# imshow(img_redress_origin)\ncv2.imwrite('../../img/redress.png', img_redress_origin)\n\nimg_redress_inverse = cv2.bitwise_not(img_redress_origin)\nimg_redress_adapt = cv2.adaptiveThreshold(img_redress_inverse, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15,\n -2)\n\nhorizontal = img_redress_adapt.copy()\nhorizontal_scale = 10.0\n# imshow(horizontal)\nhorizontalSize = int(width / horizontal_scale)\nhorizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontalSize, 1))\nhorizontal = cv2.erode(horizontal, horizontalStructure)\n# imshow(horizontal)\nhorizontal = cv2.dilate(horizontal, horizontalStructure)\n# imshow(horizontal)\n\nvertical = img_redress_adapt.copy()\nvertical_scale = 10.0\n# imshow(vertical)\nverticalSize = int(height / vertical_scale)\nverticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, verticalSize))\nvertical = cv2.erode(vertical, verticalStructure)\n# imshow(vertical)\nvertical = cv2.dilate(vertical, verticalStructure)\n# imshow(vertical)\n\nimshow(cv2.bitwise_or(horizontal, vertical))\ncv2.imwrite('../../img/bit_or.png', cv2.bitwise_or(horizontal, vertical))\n# imshow(cv2.bitwise_and(horizontal, vertical))\n# TODO 交叉点 与 角点 取并集\ncv2.imwrite('../../img/bit_and.png', cv2.bitwise_and(horizontal, vertical))\n\ncross_point = cv2.bitwise_and(horizontal, vertical)\ncross_ys, cross_xs = np.where(cross_point > 0)\nimg_cross = img_redress_adapt.copy()\nfor k, v in basis.clustering_points(zip(cross_xs, cross_ys), 5).items():\n print(k)\n cv2.circle(img_cross, k, 5, (255, 255, 255))\n# imshow(img_cross)\ncv2.imwrite('../../img/cross.png', img_cross)\n\n# split_xs = []\n# cross_sort_xs = np.sort(cross_xs)\n# i = 0\n# for j in range(len(cross_sort_xs) - 1):\n# if cross_sort_xs[j + 1] - cross_sort_xs[j] > 10:\n# split_xs.append(cross_sort_xs[j])\n# i = i + 1\n# split_xs.append(cross_sort_xs[i])\n#\n# split_ys = []\n# cross_sort_ys = np.sort(cross_ys)\n# i = 0\n# for j in range(len(cross_sort_ys) - 1):\n# if cross_sort_ys[j + 1] - cross_sort_ys[j] > 10:\n# split_ys.append(cross_sort_ys[j])\n# i = i + 1\n# split_ys.append(cross_sort_ys[i])\n#\n# for x, y in zip(split_xs, split_ys):\n# print(x, y)\n","sub_path":"kocr/demo/DemoContours.py","file_name":"DemoContours.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416315821","text":"from flask import Flask, make_response, jsonify\nfrom flask_cors import CORS\nfrom flask_jwt_extended import JWTManager\nfrom flask_restful import Api\n\nfrom database.db import db\nfrom resources.archives import ArchivesResource\nfrom resources.students import (\n StudentsResource, StudentCsvRegistration, StudentRegistration\n)\nfrom resources.allocations import (\n AllocationsJsonResource, AllocationsResource\n)\nfrom resources.progress import ProgressResource\nfrom resources.supervisors import (\n SupervisorResource, SupervisorRegister, SupervisorLogin, TokenRefresh, SupervisorDashboardResource\n)\nfrom resources.projects import ProjectsResource\nfrom resources.dashboard import DashboardResource\nimport flask_excel as excel\n\n\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile('config.py')\nexcel.init_excel(app)\ndb.init_app(app)\napi = Api(app)\nCORS(app, resources={r\"/*\": {\"origins\": app.config['ALLOWED_HOSTS']}}) # ToDo: Specify origins ar CORS for security purposes\njwt = JWTManager(app)\n\n\n@jwt.expired_token_loader\ndef expired_token_handler():\n \"\"\"token sent has expired\"\"\"\n response_obj = {\n 'status': 'failed',\n 'message': 'Your token has expired'\n }\n return make_response(jsonify(response_obj), 401)\n\n\n@jwt.invalid_token_loader\ndef invalid_token_handler():\n \"\"\"token sent deos not match generated token\"\"\"\n response_obj = {\n 'status': 'failed',\n 'message': 'Token is invalid'\n }\n return make_response(jsonify(response_obj), 401)\n\n\n@jwt.unauthorized_loader\ndef unauthorized_token_handler():\n \"\"\"unprivileged user\"\"\"\n response_obj = {\n 'status': 'failed',\n 'message': 'Unauthorized token'\n }\n return make_response(jsonify(response_obj), 401)\n\n\n@jwt.needs_fresh_token_loader\ndef fresh_token_loader_handler():\n \"\"\"token sent is not fresh\"\"\"\n response_obj = {\n 'status': 'failed',\n 'message': 'Needs a fresh token'\n }\n return make_response(jsonify(response_obj), 401)\n\n\n# projects routes\napi.add_resource(ProjectsResource, '/projects')\n\n# students routes\napi.add_resource(StudentsResource, '/students', endpoint='students')\napi.add_resource(StudentsResource, '/students/', endpoint='student')\napi.add_resource(StudentRegistration, '/students/register', endpoint='studentsregister')\napi.add_resource(StudentCsvRegistration, '/students/register/csv', endpoint='studentscsvregister')\n\n\n# allocations routes\napi.add_resource(AllocationsJsonResource, '/allocations/new')\napi.add_resource(AllocationsResource, '/allocations')\n\n# progress routes\napi.add_resource(ProgressResource, '/progress', endpoint='progress')\napi.add_resource(ProgressResource, '/progress/', endpoint='studentprogress')\n\n# supervisors routes\napi.add_resource(SupervisorLogin, '/auth/login')\napi.add_resource(SupervisorRegister, '/supervisor/register')\napi.add_resource(TokenRefresh, '/auth/refresh')\n\napi.add_resource(SupervisorResource, '/supervisor/', endpoint='supervisor')\napi.add_resource(SupervisorResource, '/supervisor', endpoint='supervisors')\napi.add_resource(SupervisorDashboardResource, '/supervisor/dashboard')\n\n# dashboard route\napi.add_resource(DashboardResource, '/dashboard/count')\n\n# archive route\napi.add_resource(ArchivesResource, '/archives')\n\nif __name__ == '__main__':\n with app.app_context():\n db.create_all()\n app.run(debug=app.config['DEBUG'], host=app.config['HOST'], port=app.config['PORT'])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238696092","text":"''' author: samtenka\n changed: 2017-06-04 \n created: 2017-06-04\n descr: symbolically differentiate\n'''\n\nclass expression(object):\n ''' record for expression '''\n def __init__(self, tag, args):\n self.tag = tag\n self.args = args \n\n def print(self, tabs=0):\n print('%s%s' % ('\\t'*tabs, self.tag))\n for arg in self.args:\n if type(arg)==type(self):\n arg.print(tabs+1)\n else:\n print('\\t%s%s' % ('\\t'*tabs, arg))\n\ndef differentiate(tree, var='x'):\n ''' recursively differentiate '''\n if tree.tag == 'CONST': \n return expression('CONST', [0.0])\n elif tree.tag == 'VAR':\n return expression('CONST', [1.0]) if tree.args[0]==var else \\\n expression('CONST', [0.0]) \n elif tree.tag == 'SUM':\n return expression('SUM', [differentiate(term, var) for term in tree.args])\n elif tree.tag == 'PROD':\n return expression('SUM', [\n expression('PROD', tree.args[:i]+[differentiate(tree.args[i], var)]+tree.args[i+1:])\n for i, factor in enumerate(tree.args)\n ])\n\nx = expression('VAR', ['x'])\nxx = expression('PROD', [x, x])\nxx.print()\ndifferentiate(xx, 'x').print()\ndifferentiate(xx, 'y').print()\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79986229","text":"# -*- coding: utf-8 -*-\nfrom public import *\nimport unittest\nimport time\nfrom setting import browser, urls\n\n\nclass add_consumer(unittest.TestCase):\n\n def setUp(self):\n self.driver = browser()\n login.css_login(self.driver)\n self.csvFile = 'add_consumer.csv'\n self.driver.implicitly_wait(30)\n self.base_url = urls['首页'] # 登入后首页\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def test_newConsumer(self):\n u'''新增客户'''\n driver = self.driver\n db = tools.postgreSql()\n\n bar.navigationBar(driver, '客户管理').click()\n driver.implicitly_wait(5)\n bar.secondNavBar(driver, '客户资料').click()\n iframe = bar.switchIframe(driver, '客户资料')\n\n driver.switch_to.frame(iframe)\n bar.buttonBar(driver, '新增').click()\n cunsumerCode = driver.find_element_by_name('customer_code').get_attribute('value')\n if db.isConsExist(cunsumerCode): # 如果当前客户代码数据已存在则从表格读取\n cusCode = tools.add_csv_datas(self.csvFile, 0)\n while db.isConsExist(cusCode): # 表格中客户代码也已存在则输入客户代码直到数据库里没有\n cusCode = input(\"输入客户代码:\")\n driver.find_element_by_name('customer_code').clear()\n driver.find_element_by_name('customer_code').send_keys(cusCode)\n\n cusName = tools.add_csv_datas(self.csvFile, 1) # 客户名\n driver.find_element_by_name('customer_name').send_keys(cusName)\n\n pw = tools.add_csv_datas(self.csvFile, 2) # 登录密码\n driver.find_element_by_name('login_pwd').send_keys(pw)\n\n telphone = tools.add_csv_datas(self.csvFile, 3) # 电话\n driver.find_element_by_name('tel').send_keys(telphone)\n\n fax = tools.add_csv_datas(self.csvFile, 4)\n driver.find_element_by_name('fax').send_keys(fax) # 传真\n\n mail = tools.add_csv_datas(self.csvFile, 5)\n driver.find_element_by_name('email').send_keys(mail) # 电子邮件\n\n company = tools.add_csv_datas(self.csvFile, 6)\n driver.find_element_by_name('company_name').send_keys(company) # 公司\n\n sn = tools.add_csv_datas(self.csvFile, 7)\n driver.find_element_by_name('company_sn').send_keys(sn) # 公司编号\n\n orgCode = tools.add_csv_datas(self.csvFile, 8)\n driver.find_element_by_name('org_code').send_keys(orgCode) # 组织代码\n\n web = tools.add_csv_datas(self.csvFile, 9)\n driver.find_element_by_name('domain_name').send_keys(web) # 网站\n\n consNum = db.tableCount('tbl_customer')\n bar.buttonBar(driver, '保存').click()\n time.sleep(0.5) # 点击保存,web插入数据时间有延迟\n if db.tableCount('tbl_customer') != (consNum + 1):\n raise NameError('添加失败')\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n # suite = unittest.TestSuite()\n # suite.addTest(add_consumer(\"test_newConsumer\"))\n # suite.addTest(add_consumer(\"test_a\"))\n # runner = unittest.TextTestRunner()\n # runner.run(suite)\n","sub_path":"test_case/css_add_consumer.py","file_name":"css_add_consumer.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465561793","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\naa=str(input(\"Enter the GitHub username:\"))\r\n\r\ntry:\r\n r=requests.get('https://github.com/'+str(aa))\r\n soup=BeautifulSoup(r.content,'html.parser')\r\n\r\n a=soup.find_all(class_=\"avatar avatar-user width-full border color-bg-primary\")\r\n try:\r\n print(\"Link to Dev's Profile Photo->\\t\",a[0].get('src'))\r\n\r\n r=requests.get(a[0].get('src'))\r\n f=open(str(aa)+str('.png'),'wb')\r\n f.write(r.content)\r\n\r\n print(\"Image Downloaded!\")\r\n except:\r\n print(\"Account Not Found.\")\r\nexcept:\r\n print(\"Connection Error!😔\")\r\n","sub_path":"github_photo.py","file_name":"github_photo.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409842309","text":"from flask import Flask, request, render_template\nimport json\n\napp = Flask(__name__)\n#структура дерева такова, что в каждой конечной ячейке слова хранится слово целиком\n#это сделано для того, чтобы упростить себе ЗЫЗНЬ. иначе не собирать его снизу вверх.\nclass Node:\n def __init__(self, label=None, data=None):\n self.label = label\n self.data = data\n self.children = dict()\n \n def addChild(self, key, data=None):\n if not isinstance(key, Node):\n self.children[key] = Node(key, data)\n else:\n self.children[key.label] = key\n \n def __getitem__(self, key):\n return self.children[key]\n\nclass PrefixTree:\n def __init__(self):\n self.head = Node()\n \n def __getitem__(self, key):\n return self.head.children[key]\n \n def add(self, string):\n now = self.head\n finish = True\n for i in range(len(string)):\n if string[i] in now.children:\n now = now.children[string[i]]\n else:\n finished = False\n break\n if not finished:\n while i < len(string):\n now.addChild(string[i])\n now = now.children[string[i]]\n i += 1\n now.data = string\n \n def check(self, string):\n if string == '':\n return False\n if string == None:\n raise ValueError('Trie.check requires a not-Null string')\n now = self.head\n exists = True\n for letter in string:\n if letter in now.children:\n now = now.children[letter]\n else:\n exists = False\n break\n if exists:\n if now.data == None:\n exists = False\n return exists\n \n def check_part(self,string):\n if string == '':\n return False\n if string == None:\n raise ValueError('Trie.check_part requires a not-Null string')\n now= self.head\n exists = True\n for letter in string:\n if letter in now.children:\n now = now.children[letter]\n else:\n exists = False\n break\n return exists\n \n def getTop10(self, prefix):\n prefix = prefix.lower()\n ptr = list()\n if prefix == None:\n raise ValueError('Requires not-Null prefix')\n top_node = self.head\n for letter in prefix:\n if letter in top_node.children:\n top_node = top_node.children[letter]\n else: \n return ptr\n if top_node == self.head:\n queue = [node for key, node in top_node.children.items()]\n else:\n queue = [top_node]\n while queue:\n now = queue.pop()\n if now.data != None:\n ptr.append(now.data)\n queue = [node for key,node in now.children.items()] + queue\n \n nums = []\n for word in ptr:\n nums.append(int(word[len(word)-3:len(word)]))\n for i in range(0, len(nums)):\n flag = 0\n for j in range(0,len(nums)-1):\n if nums[j] > nums[j + 1]:\n nums[j], nums[j + 1] = nums[j + 1], nums[j]\n ptr[j], ptr[j + 1] = ptr[j + 1], ptr[j]\n flag = 1\n if flag == 0:\n break\n result = []\n i = 0\n for word in ptr: \n i += 1\n if i > 10:\n break\n result.append(ptr[i - 1][0:len(ptr[i - 1]) - 3])\n return result\n #TODO реализация класса prefix tree, методы как на лекции + метод дать топ 10 продолжений. Скажем на строку кросс выдаем кроссовки, кроссовочки итп. Как хранить топ? \n #В терминальных (конечных) нодах может лежать json с топ актерами.\ntree=PrefixTree()\ndef init_prefix_tree(filename, tr):\n file = open(filename, 'r')\n i = 1\n w = str()\n for line in file:\n w = '0'\n if i < 10:\n w += '0'\n w += str(i)\n if i > 99:\n w = str(i)\n aline = line.lower().replace('\\xa0x', ' X').replace('ё', 'е')[:len(line)-1]\n aline += w\n tr.add(aline)\n i += 1\n\ninit_prefix_tree('movies.txt', tree)\ntree.getTop10('п')\n\n@app.route(\"/get_sudgest/\", methods=['GET', 'POST'])\ndef return_sudgest(string):\n #TODO по запросу string вернуть json, c топ-10 саджестами, и значениями из нод\n list_ = tree.getTop10(string)\n result = '
САМЫЕ ТОПОВЫЕ ФИЛЬМЫ ОВЕР ВСЕ ВРЕМЕНА по запросу ' + string + ':
'\n for item in list_:\n result += '
'\n result += item\n result += '
'\n return json.dumps(result, ensure_ascii = False)\n@app.route(\"/\")\ndef hello():\n #TODO должна возвращатьс инструкция по работе с сервером\n return render_template(\"notes.html\")\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"homeworks/05/flask_prefix_tree.py","file_name":"flask_prefix_tree.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650236701","text":"# python imports\nimport csv\nfrom io import StringIO\nfrom pytz import timezone\nfrom datetime import datetime\nimport logging\n\n# django imports\nfrom django.db.models.fields import BooleanField, SmallIntegerField, IntegerField\nfrom django.conf import settings\nfrom django.db import transaction\n# from django.contrib import messages\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemoveDeleteOptionMixin(object):\n \"\"\"\n Mixin to Remove delete options from admin\n \"\"\"\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"\n This removes the option to delete a model instance\n \"\"\"\n return False\n\n\nclass CSVImportMixin(object):\n \"\"\"\n \"\"\"\n @transaction.atomic\n def import_csv(self, files):\n # generate validated data for all object in csv\n validated_data_list = []\n\n # read csv file\n reader = csv.reader(\n files['csv'].read().decode('utf-8').splitlines()\n )\n\n # validate header\n header = next(reader)\n success = self._validate_header(header, self.csv_template_header)\n if success is False:\n return False, \"Invalid template format for header\"\n\n # validate data\n line_number = 1\n for line in reader:\n line_number += 1\n success, response = self._validate_data(header, line)\n if success is False:\n return (\n False, \"Line {line}: {response}\".format(line=line_number, response=response)\n )\n validated_data_list.append(response)\n\n # insert data\n for validated_data in validated_data_list:\n self.model.objects.create(**validated_data)\n\n # return response\n return True, \"All {obj} uploaded successfully\".format(obj=self.model._meta.model_name)\n\n def _validate_header(self, header, template_header):\n return header == template_header\n\n def _validate_data(self, header, line):\n if len(header) != len(line):\n return False, \"Invalid data\"\n _validate_data_map = dict(zip(header, line))\n\n fields = self.model._meta.fields\n fields_map = dict(\n (field.attname, field)\n for field in fields\n )\n\n # validate all attr in a header\n for attr in header:\n field = fields_map[attr]\n\n if field.validators:\n try:\n data = _validate_data_map[attr]\n if field.__class__ in [SmallIntegerField, IntegerField]:\n data = int(data)\n field.run_validators(data)\n except Exception:\n return False, \"Inavlid validation for {field}\".format(field=attr)\n\n if field.unique:\n if self.model.objects.filter(**{attr: _validate_data_map[attr]}).exists():\n return False, \"Already Exist\"\n\n if field.choices:\n if not (int(_validate_data_map[attr]) in field.choices):\n return False, \"Invalid choices for {field}\".format(field=attr)\n\n if field.is_relation:\n if not field.related_model.objects.filter(\n **{\"id\": _validate_data_map[attr]}\n ).exists():\n return False, \"{field} not exist\".format(field=attr)\n\n if type(field) == BooleanField:\n if int(_validate_data_map[attr]) not in [0, 1]:\n return False, \"{field} choices are: {choices}\".format(\n field=attr, choices=\"0, 1\"\n )\n\n if field.blank is False and _validate_data_map[attr] == '':\n return False, \"Invalid data for {field}\".format(field=attr)\n\n # check if unique constraint field already exist\n return True, _validate_data_map\n\n\nclass CSVDownloadMixin(object):\n \"\"\"\n \"\"\"\n # Add custom fields in extra_download_fields\n extra_download_fields = list()\n\n # Add fields to be excluded in exclude_download_fields\n exclude_download_fields = list()\n\n def createCSV(self, queryset):\n \"\"\"\n This method returns a csv file\n \"\"\"\n f = StringIO()\n writer = csv.writer(f)\n headers = list()\n\n for field in queryset.model._meta.fields:\n headers.append(field.name)\n\n if len(self.exclude_download_fields) > 0:\n headers = list(set(headers)-set(self.exclude_download_fields))\n\n # to maintain the order of fields\n headers.sort()\n\n headers += self.extra_download_fields\n my_timezone = timezone(settings.TIME_ZONE)\n\n writer.writerow(headers)\n for obj in queryset:\n row = list()\n for field in headers[0:len(headers)-len(self.extra_download_fields)]:\n field_obj = queryset.model._meta.get_field(field)\n val = getattr(obj, field)\n\n if isinstance(val, datetime):\n # changing timezone to local time\n val = val.astimezone(my_timezone)\n\n if len(field_obj.choices) > 0:\n # changing value of choice fields\n # String value for gender comes as ''\n if val is None or val == '':\n val = 'N/A'\n else:\n val = field_obj.choices[val]\n\n if callable(val):\n val = val()\n\n if type(val) == str:\n val = val.encode(\"utf-8\")\n\n try:\n row.append(val.decode(\"utf-8\"))\n except AttributeError:\n row.append(val)\n row = self.add_extra_headers(row, obj)\n writer.writerow(row)\n\n f.seek(0)\n return f\n\n def customCSV(self, response):\n f = StringIO()\n writer = csv.writer(f)\n headers = list()\n if len(response) > 0:\n headers = response[0].keys()\n writer.writerow(headers)\n for obj in response:\n row = list()\n for fields in headers:\n row.append(obj[fields])\n writer.writerow(row)\n f.seek(0)\n return f\n\n def add_extra_headers(self, row, obj):\n \"\"\"\n This method must be implement if specifying extra fields during download\n \"\"\"\n raise NotImplementedError('add_extra_headers() method must be implemented')\n\n\nclass MarkActiveInactiveMixin(object):\n \"\"\"\n Mixin to add inactive action\n \"\"\"\n def set_inactive(self, request, queryset):\n \"\"\"\n This action marks selected units inactive\n \"\"\"\n queryset.update(is_active=False)\n set_inactive.short_description = \"Mark as Inactive\"\n\n def set_active(self, request, queryset):\n \"\"\"\n This action marks selected units inactive\n \"\"\"\n queryset.update(is_active=True)\n set_active.short_description = \"Mark as Active\"\n","sub_path":"api/libs/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476514069","text":"import glob,os\nimport re, math\nimport sys\n\nfilename = sys.argv[-1]\n\nnewDict = {}\n\n'''\ndef removePostfix(argWord):\n leaves = \"s\", \"es\", \"ed\", \"er\", \"ly\", \"ing\"\n for leaf in leaves:\n if argWord[-len(leaf):] == leaf:\n return argWord[:-len(leaf)]\n'''\nstopwords = []\nwith open(filename + \"/stopwords.txt\") as f1:\n for line in f1:\n for word in line.split():\n stopwords.append(word)\n\nwith open(filename + '/nbmodel1.txt') as f:\n\n probability_spam = f.readline()\n probability_ham = f.readline()\n\n for line in f:\n splitLine = line.split()\n if(splitLine[0] != ' '):\n newDict[splitLine[0]] = [splitLine[1], splitLine[2]]\n\n for word in stopwords:\n if word in newDict:\n del newDict[word]\n\ntarget = open(filename + \"/nboutput1.txt\", 'w')\nclassified_correct_spam = 0\nclassified_incorrect_spam = 0\nclassified_correct_ham = 0\nclassified_incorrect_ham = 0\ntotal_files = 0\nclassified_as_spam = 0\nclassified_as_ham = 0\nfile_spam = 0\nfile_ham = 0\n\nfor root, subdirs, files in os.walk(filename + '/dev'):\n #if (os.path.basename(os.path.normpath(root)) == \"spam1\" || os.path.basename(os.path.normpath(root)) == 'ham1' ):\n os.chdir(root)\n\n for file in glob.glob(\"*.txt\"):\n with open(file, \"r\", encoding=\"latin1\") as f1:\n\n\n if os.path.basename(os.path.normpath(root)) == \"spam\":\n file_spam = file_spam + 1\n else:\n file_ham = file_ham + 1\n\n probability_spam_given_text = 0\n probability_ham_given_text = 0\n\n for line in f1:\n line = ''.join(line.splitlines())\n\n for word in line.split():\n word = re.sub('[^\\w]', '', word)\n if word in newDict:\n probability_spam_given_text = probability_spam_given_text + math.log(float(newDict.get(word)[0]))\n probability_ham_given_text = probability_ham_given_text + math.log(float(newDict.get(word)[1]))\n\n spam_pro = probability_spam_given_text + math.log(float(probability_spam))\n ham_pro = probability_ham_given_text + math.log(float(probability_ham))\n\n total_files = total_files + 1\n\n if(spam_pro > ham_pro):\n\n target.write(\"Spam\")\n\n classified_as_spam = classified_as_spam + 1\n if(os.path.basename(os.path.normpath(root)) == \"spam\"):\n\n classified_correct_spam = classified_correct_spam + 1\n else:\n classified_incorrect_spam = classified_incorrect_spam + 1\n\n else:\n target.write(\"Ham\")\n\n classified_as_ham = classified_as_ham + 1\n if (os.path.basename(os.path.normpath(root)) == \"ham\"):\n classified_correct_ham = classified_correct_ham + 1\n else:\n classified_incorrect_ham = classified_incorrect_ham + 1\n\n target.write(\"\\t\")\n target.write(root + \"/\" + file)\n target.write(\"\\n\")\n\nprecision_spam = classified_correct_spam / classified_as_spam\nprecision_ham = classified_correct_ham / classified_as_ham\n\nrecall_spam = classified_correct_spam / file_spam\nrecall_ham = classified_correct_ham / file_ham\n\nF1_spam = (2*precision_spam*recall_spam)/(precision_spam + recall_spam)\nF1_ham = (2*precision_ham*recall_ham) / (precision_spam + recall_ham)\n\nprint(precision_spam)\nprint(precision_ham)\n\nprint(recall_spam)\nprint(recall_ham)\n\nprint(F1_spam)\nprint(F1_ham)\n\n\ntarget.close()\n\n\n\n\n\n","sub_path":"code/nbclassifymod.py","file_name":"nbclassifymod.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314681999","text":"\"\"\"\r\n**予めトークンを取得しておく必要がある**\r\n同フォルダ内のprint_token.pyを実行し、認証とトークンの取得をしておくこと。\r\nトークンはprinted_token.txt内に平文で取得される。\r\n\"\"\"\r\n\r\nimport json\r\nimport requests\r\nimport os, tkinter, tkinter.filedialog, tkinter.messagebox\r\n\r\nprinted_token = open(\"printed_token.txt\", \"r\")\r\nareas = printed_token.readlines()\r\nareas[0] = areas[0].strip(\"\\n\")\r\n\r\n# 処理ファイルの選択\r\nroot = tkinter.Tk()\r\nroot.withdraw()\r\nfTyp = [(\"\",\"*.txt\")]\r\niDir = os.path.abspath(os.path.dirname(__file__))\r\ntkinter.messagebox.showinfo('Google翻訳API','処理するテキストファイルを選択してください。\\n一行あたり2000文字が翻訳の上限です。')\r\nfile = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)\r\nf = open(file, 'r', encoding=\"utf-8_sig\")\r\n\r\n# 出力ファイルの選択\r\ntkinter.messagebox.showinfo('Google翻訳API','書き込むファイルを選択してください。\\n名前が被ると前の内容が消えるので注意!')\r\ntranslatedfile = tkinter.filedialog.asksaveasfilename(filetypes = fTyp, initialdir = iDir)\r\ntranslated = open(translatedfile, \"w+\", encoding=\"utf-8_sig\")\r\n\r\nline = f.readline()\r\nwhile line:\r\n \"\"\"\r\n **set your access token**\r\n 1.sinup Google cloud pratform.(GCP)\r\n 1.install google cloud sdk and setup it,\r\n 2.get service-account key file from GCP (it's maybe json file)\r\n 3.type command \"gcloud auth activate-service-account --key-file=XXXXXXXXXXXXX.json\"\r\n 4.type command \"gcloud auth print-access-token\"\r\n 5.token show display . copy it and set this file and run.\r\n more detail:\r\n https://cloud.google.com/translate/docs/premium\r\n \"\"\"\r\n token = areas[0] # set token here\r\n\r\n #REST api / \"premium translation\" url not euqal \"normal transration\" url !\r\n url = \"https://translation.googleapis.com/language/translate/v2\"\r\n #oldurl = \"https://www.googleapis.com/language/translate/v2\"\r\n\r\n # translate / en -> ja\r\n source = \"en\"\r\n target = \"ja\"\r\n\r\n # new translation needed\r\n model = \"nmt\"\r\n\r\n # translate target chars / must be less than 2K characters.\r\n # see : https://cloud.google.com/translate/docs/translating-text#translate-translate-text-python\r\n # this is sample chars from : http://web-tan.forum.impressrd.jp/e/2016/11/17/24396\r\n q = line\r\n\r\n payload = {\r\n 'target':target,\r\n 'source':source,\r\n 'q':q,\r\n 'model':model\r\n }\r\n\r\n headers = {\r\n 'Content-Type':'application/json',\r\n 'Authorization': 'Bearer ' + token,\r\n }\r\n\r\n response = requests.get(url,params=payload,headers=headers)\r\n\r\n # JSON decode\r\n jObj = json.loads(response.text)\r\n\r\n print(jObj)\r\n translated.write(jObj[\"data\"][\"translations\"][0][\"translatedText\"]+\"\\n\")\r\n # json.dump(jObj.translations.translatedText.decode(), translated)\r\n # translated.write(jObj)\r\n\r\n line = f.readline()\r\n # g = open('translated.txt', 'r', encoding=\"utf-8_sig\")\r\n\r\nf.close\r\ntranslated.close\r\nprinted_token.close\r\n# g.close\r\n","sub_path":"translation/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124934984","text":"from fractions import Fraction\r\n\r\nLEVELS = ['None', 'Weak (1)', 'Basic (2)',\r\n 'Mediocre (3)', 'Fair (4)', 'Good (5)',\r\n 'Great (6)', 'Superb (7)',\r\n 'Legendary (8)', 'Legendary 2 (9)',\r\n 'Legendary 3 (10)', 'Legendary 4 (11)',\r\n 'Legendary 5 (12)', 'Legendary 6 (13)',\r\n 'Legendary 7 (14)', 'Legendary 8 (15)', ]\r\n\r\nMAX_HERBS = {'None': 0,\r\n 'Weak (1)': 1,\r\n 'Basic (2)': 2,\r\n 'Mediocre (3)': 2,\r\n 'Fair (4)': 3,\r\n 'Good (5)': 3,\r\n 'Great (6)': 4,\r\n 'Superb (7)': 4,\r\n 'Legendary (8)': 5,\r\n 'Legendary 2 (9)': 5,\r\n 'Legendary 3 (10)': 5,\r\n 'Legendary 4 (11)': 5,\r\n 'Legendary 5 (12)': 5,\r\n 'Legendary 6 (13)': 5,\r\n 'Legendary 7 (14)': 5,\r\n 'Legendary 8 (15)': 5,\r\n }\r\n\r\nREMEDY_DIFFS = {'Poultice': 0,\r\n 'Potion': 1,\r\n 'Oil': 2\r\n }\r\n\r\nREMEDY_COSTS = {\r\n 'Poultice': {},\r\n 'Potion': {},\r\n 'Oil': {}\r\n}\r\n\r\nLEVELS_DICT = {LEVELS[k]: k for k in range(16)}\r\n\r\nFUDGE_PROBS = {\r\n 4: Fraction(1, 81), # rolling +4\r\n 3: Fraction(5, 81), # rolling +3 or better\r\n 2: Fraction(15, 81), # rolling +2 or better\r\n 1: Fraction(31, 81), # rolling +1 or better\r\n 0: Fraction(50, 81), # etc.. etc...\r\n -1: Fraction(66, 81),\r\n -2: Fraction(76, 81),\r\n -3: Fraction(80, 81),\r\n}\r\n\r\nSAVE_POUCH_FILE = 'pouch_save.csv'\r\nREMEDY_COST_FILE = 'herb_remedies_costs.csv'\r\n","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479979273","text":"import sys\nimport time\nfrom threading import Thread\n\nimport logging\n\n\nclass Spinner:\n \"\"\"\n A python class to provide simple loading feature, thread based to a python application\n \"\"\"\n\n # character interval changing time\n SLOW = 0.2\n NORMAL = 0.15\n FAST = 0.05\n\n def __init__(self, change_interval=NORMAL):\n \"\"\"Set the load proprity to True\"\"\"\n self.load = True\n self.change_interval = change_interval\n\n def _spinning_cursor(self):\n \"\"\" A generator which itterate over the string element and yield it\"\"\"\n while True:\n for cursor in '|/-\\\\':\n yield cursor\n\n def _thread_spin(self):\n \"\"\" Handle the display of the different yielded elements\"\"\"\n spinner = self._spinning_cursor()\n\n try:\n while self.load:\n sys.stdout.write(next(spinner))\n sys.stdout.flush()\n time.sleep(self.change_interval) # time to wait before changing the element\n sys.stdout.write('\\b')\n\n except KeyboardInterrupt:\n pass\n\n def spin(self):\n t = Thread(target=self._thread_spin)\n t.start()\n # t.join()\n\n def stop(self):\n self.load = False\n\n# spinner = Spinner();\n\n# spinner.spin()\n\n# time.sleep(50)\n\n# spinner.stop()\n","sub_path":"mfetcher/Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396567902","text":"# USB camera display using PyQt and OpenCV, from iosoft.blog\n# Copyright (c) Jeremy P Bentham 2019\n# Please credit iosoft.blog if you use the information or software in it\n\n# python demo_screen.py --conf utils/config.json\n\nVERSION = \"Mirabeau Smart Shelf Demo\"\n\nimport sys, time, threading, cv2, argparse\nfrom imutils.video import VideoStream, FPS\nimport numpy as np\nfrom utils.conf import Conf\nfrom utils.detector_utils import detect_faces\n\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QTimer, QPoint, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTextEdit, QLabel\nfrom PyQt5.QtWidgets import QWidget, QAction, QVBoxLayout, QHBoxLayout\nfrom PyQt5.QtGui import QFont, QPainter, QImage, QTextCursor\nfrom utils.detector_utils import WebcamVideoStream\nfrom centroidtracker import CentroidTracker\ntry:\n import Queue as Queue\nexcept:\n import queue as Queue\n\n\n# Construct argument parser and parse argumentsa()\nap = argparse.ArgumentParser()\nap.add_argument('-c', '--conf', required=True,\n help='Path to config file')\nargs = vars(ap.parse_args())\nconf = Conf(args[\"conf\"])\n\nIMG_SIZE = 1280,720 # 640,480 or 1280,720 or 1920,1080\nIMG_FORMAT = QImage.Format_RGB888\nDISP_SCALE = 2 # Scaling factor for display image\nDISP_MSEC = 50 # Delay between display cycles\nCAP_API = cv2.CAP_ANY # API: CAP_ANY or CAP_DSHOW etc...\nEXPOSURE = 0 # Zero for automatic exposure\nTEXT_FONT = QFont(\"Courier\", 10)\n\ncamera_num = 1 # Default camera (first in list)\nimage_queue = Queue.Queue() # Queue to hold images\ncapturing = True # Flag to indicate capturing\n\n# Grab images from the camera (separate thread)\ndef grab_images(cam_num, queue):\n cap = cv2.VideoCapture(cam_num-1 + CAP_API)\n while capturing:\n if cap.grab():\n retval, image = cap.retrieve(0)\n if image is not None and queue.qsize() < 2:\n queue.put(image)\n else:\n time.sleep(DISP_MSEC / 1000.0)\n else:\n print(\"Error: can't grab camera image\")\n break\n cap.release()\n\n\n# Initialize centroid tracker\nct = CentroidTracker()\n\n# Load serialized model from disk\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(conf[\"prototxt\"], conf[\"model\"])\n\n# Initialize video stream and warmup camera sensor\nprint(\"[INFO] starting video stream\")\nvs = VideoStream(src=0).start()\ntime.sleep(2.0)\n\n# Image widget\nclass ImageWidget(QWidget):\n def __init__(self, parent=None):\n super(ImageWidget, self).__init__(parent)\n self.image = None\n\n def setImage(self, image):\n self.image = image\n self.setMinimumSize(image.size())\n self.update()\n\n def paintEvent(self, event):\n qp = QPainter()\n qp.begin(self)\n if self.image:\n qp.drawImage(QPoint(0, 0), self.image)\n qp.end()\n\n# Main window\nclass MyWindow(QMainWindow):\n\n # Create main window\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n\n self.central = QWidget(self)\n self.vlayout = QVBoxLayout() # Window layout\n self.displays = QHBoxLayout()\n self.disp = ImageWidget(self) \n self.displays.addWidget(self.disp)\n self.vlayout.addLayout(self.displays)\n self.label = QLabel(self)\n self.vlayout.addWidget(self.label)\n self.central.setLayout(self.vlayout)\n self.setCentralWidget(self.central)\n self.setWindowTitle(VERSION)\n\n # Start image capture & display\n def start(self):\n self.timer = QTimer(self) # Timer to trigger display\n self.timer.timeout.connect(lambda: \n self.show_image(image_queue, self.disp, DISP_SCALE))\n self.timer.start(DISP_MSEC) \n self.capture_thread = threading.Thread(target=grab_images, \n args=(camera_num, image_queue))\n self.capture_thread.start() # Thread to grab images\n\n # Fetch camera image from queue, and display it\n def show_image(self, imageq, display, scale):\n if not imageq.empty():\n image = imageq.get()\n if image is not None and len(image) > 0:\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n self.display_image(img, display, scale)\n\n # Display an image, reduce size if required\n def display_image(self, img, display, scale=1):\n disp_size = img.shape[1]//scale, img.shape[0]//scale\n disp_bpl = disp_size[0] * 3\n objects = detect_faces(img, net ,ct)\n if scale > 1:\n img = cv2.resize(img, disp_size, interpolation=cv2.INTER_CUBIC)\n #img = cv2.putText(img, 'OpenCV', (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)\n #img = cv2.rectangle(img, (50, 40), (100, 100), (0,255,0), 1)\n for (object_ID, centroid) in objects.items(): \n # Draw ID and centroid of the object in the output frame\n text = \"ID {}\".format(object_ID)\n cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)\n cv2.circle(img, (centroid[0], centroid[1]), 4, (0,255,0), -1)\n qimg = QImage(img.data, disp_size[0], disp_size[1], disp_bpl, IMG_FORMAT)\n display.setImage(qimg)\n\n\n\n\n # Window is closing: stop video capture\n def closeEvent(self, event):\n global capturing\n capturing = False\n self.capture_thread.join()\n\n def flush(self):\n pass\n\n\n\napp = QApplication(sys.argv)\nwin = MyWindow()\nwin.show()\nwin.start()\nsys.exit(app.exec_())\n\n","sub_path":"src/final/old/demo_screen.py","file_name":"demo_screen.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594107460","text":"def call_viable_methods(obj, attributes):\n rets = []\n\n for attribute in attributes:\n ret = obj\n finished_chain = True\n\n if isinstance(attribute, str):\n attribute = (attribute,)\n\n try:\n for part in attribute:\n ret = ret.__getattribute__(part)()\n except AttributeError:\n finished_chain = False\n\n if finished_chain:\n rets += ret\n\n return rets\n\n\nWINDOW_POSTFIX = ' \\t \\t \\t\\t '\nUNNAMED = ''\n\n\ndef get_name(node):\n name = None\n\n for attr in ('get_name', 'WindowText', 'Text'):\n if hasattr(node, attr):\n name = getattr(node, attr)()\n break\n\n if name is None or len(name) == 0:\n name = UNNAMED\n\n return name\n\n\ndef get_args_string(*args):\n string = ', '\n\n for arg in args:\n string += '{}, '.format(arg.__repr__())\n\n return string[2:-2]\n\n\ndef get_kwargs_string(**kwargs):\n string = ', '\n\n for keyword in kwargs.keys():\n string += '{}={}, '.format(keyword, kwargs[keyword].__repr__())\n\n return string[2:-2]\n\n\ndef save_action_list(filename, action_list):\n import pickle\n\n if filename.endswith('.aut'):\n file = open(filename, 'wb')\n pickle.dump(action_list, file)\n elif filename.endswith('.exe'):\n import os\n import subprocess\n import sys\n\n dll = 'python{}{}.dll'.format(sys.version_info.major, sys.version_info.minor)\n\n with open('autorun.aut', 'wb') as autorun:\n pickle.dump(action_list, autorun)\n\n files = ['autorun.aut', 'x86\\\\startSelfAutomator.exe']\n for bitness in 'x86', 'x64':\n files += [os.path.join(bitness, dll), bitness + '\\\\startListener.exe']\n\n subprocess.call(['export\\\\7za.exe', 'a', '-mx', 'archive.7z'] + files)\n\n with open(filename, 'wb') as exe:\n exe.write(open('export\\\\7zS.sfx', 'rb').read())\n exe.write(open('export\\\\manifest.xml', 'rb').read())\n exe.write(open('export\\\\autoConfig.txt', 'rb').read())\n exe.write(open('archive.7z', 'rb').read())\n\n os.remove('archive.7z')\n os.remove('autorun.aut')\n print('Done exporting')\n else:\n raise Exception('Cannot save file for filename ' + filename)\n\n\ndef load_action_list(filename):\n import pickle\n\n if filename.endswith('.aut'):\n file = open(filename, 'rb')\n action_list = pickle.load(file)\n file.close()\n else:\n raise Exception('Cannot open file for filename ' + filename)\n\n return action_list\n\n\ndef is64bit():\n if not is64bit.isset:\n import sys\n is64bit.value = sys.maxsize > 2 ** 32\n is64bit.isset = True\n\n return is64bit.value\n\n\nis64bit.isset = False","sub_path":"UIAutomator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522670338","text":"# Module imports.\nfrom . import (\n\tactivity_ensemble,\n\tactivity_numerical_experiment,\n\tactivity_simulation_run,\n\tdata_data_object,\n\tgrids_grid_spec,\n\tmisc_document_set,\n\tshared_platform,\n\tsoftware_model_component,\n\tquality_cimquality,\n\t)\n\n\n\n# Module exports.\n__all__ = ['SUPPORTED']\n\n\n\n# Supported extenders keyed by document type.\nSUPPORTED = {\n\t\"cim.1.activity.ensemble\": activity_ensemble,\n\t\"cim.1.activity.numericalexperiment\": activity_numerical_experiment,\n\t\"cim.1.activity.simulationrun\": activity_simulation_run,\n\t\"cim.1.data.dataobject\": data_data_object,\n\t\"cim.1.grids.gridspec\": grids_grid_spec,\n\t\"cim.1.misc.documentset\": misc_document_set,\n\t\"cim.1.shared.platform\": shared_platform,\n\t\"cim.1.software.modelcomponent\": software_model_component,\n\t\"cim.1.software.statisticalmodelcomponent\": software_model_component,\n\t\"cim.1.quality.cimquality\": quality_cimquality,\n}\n","sub_path":"src/pyesdoc/extensions/cim/v1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643971036","text":"import tornado.ioloop\nimport tornado.web\nimport shutil\nimport os\n\nclass UploadFileHandler(tornado.web.RequestHandler):\n\tdef get(self):\n\t\tself.write(\"\"\"\n\n\tUpload File\n\t\n\t\t
\n\t\t
\n\t\t\n\t\n\n\t\t\"\"\")\n\n\tdef post(self):\n\t\tself.write(\"post success!!\")\n\t\tupload_path = os.path.join(os.path.dirname(__file__), 'files')\n\t\t#upload_path = \"/Users/zhengwei/Desktop/\"\n\t\tupload_path = \"./\"\n\t\tprint(\"upload_path = \" + upload_path)\n\t\tprint(\"print self.request\")\n\t\tprint(self.request)\n\t\tprint(\"print self.request end\")\n\t\tprint(self.request.files)\n\t\tprint(\"print self.request.files end\")\n\t\tfile_metas = self.request.files['tp_shared_image']\n#\t\tfor meta in file_metas:\n#\t\t\tfilename = meta['filename']\n#\t\t\tfilepath = os.path.join(upload_path, filename)\n#\t\t\tprint(\"file name = \" + filename + \" filepath = \" + filepath)\n#\t\t\twith open(filepath, 'wb') as up:\n#\t\t\t\tup.write(meta['body'])\n#\t\t\tself.write('finished')\n\t\t#table_metas = self.request.\n\t\tprint(\"Weizmann post handle end!!\")\n\t\tprint(self.get_argument(\"user_name\"))\n\t\tprint(self.get_argument(\"email\"))\n\napp = tornado.web.Application([\n\t(r'/file', UploadFileHandler),\t\t\n])\n\nif __name__ == '__main__':\n\tapp.listen(3001)\n\ttornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"testTornado/simpleDemo/uploadFile.py","file_name":"uploadFile.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511025924","text":"import tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport sys\nimport json\n\nclass HomeDTO(object):\n def __init__(self):\n self.rooms=[];\n\nclass RoomDTO(object):\n def __init__(self):\n self.name=\"\";\n self.id=0;\n self.boards=[];\n\nclass BoardDTO(object):\n def __init__(self):\n self.roomName=\"\";\n self.roomId=0;\n self.name=\"\";\n self.id=0;\n self.status=False;\n self.components=[];\n\nclass ComponentDTO(object):\n def __init__(self):\n self.roomName=\"\";\n self.roomId=0;\n self.boardName=\"\";\n self.boardId=None;\n self.name=\"\";\n self.id=0;\n\ndef updateJson(cJson):\n roomId=cJson[\"roomId\"]\n roomName=cJson[\"roomName\"]\n boardId=cJson[\"boardId\"]\n boardName=cJson[\"boardName\"]\n componentId=cJson[\"componentId\"]\n componentName=cJson[\"componentName\"]\n componentStatus=cJson[\"componentStatus\"]\n for room in h.rooms:\n if room.id==roomId:\n for board in room.boards:\n if board.id==boardId:\n for component in board.components:\n if component.id ==componentId:\n component.status=componentStatus\n reformJson()\n\ndef updateBoardJson(bJson):\n roomId=bJson[\"roomId\"]\n roomName=bJson[\"roomName\"]\n boardId=bJson[\"boardId\"]\n boardName=bJson[\"boardName\"]\n boardStatus=bJson[\"boardStatus\"]\n for room in h.rooms:\n if room.id==roomId:\n for board in room.boards:\n if board.id==boardId:\n board.status=boardStatus\n reformJson()\n\n\n\ndef reformJson():\n global newJson;\n newJson=json.dumps(h,default=objectToJson);\n\ndef objectToJson(obj):\n return obj.__dict__;\n\ndef getNewJson():\n return newJson;\n\n\ndef populateDataStructure():\n global r;\n global b;\n global c;\n global h;\n js= open('home.json');\n pd=json.load(js);\n for home,rooms in pd.items():\n h=HomeDTO();\n for room in rooms:\n r=RoomDTO();\n for v,k in room.items():\n if(isinstance(k,list)):\n for board in k:\n b=BoardDTO();\n for i,j in board.items():\n if(isinstance(j,list)):\n for component in j:\n c=ComponentDTO();\n for x,y in component.items():\n if x==\"id\":\n c.id=y;\n if x==\"name\":\n c.name=y;\n if x==\"status\":\n c.status=y;\n if x==\"minLevel\":\n c.minLevel=y;\n if x==\"maxLevel\":\n c.maxLevel=y;\n if x==\"regulatable\":\n c.regulatable=y;\n if x==\"roomName\":\n c.roomName=y;\n if x==\"roomId\":\n c.roomId=y;\n if x==\"boardName\":\n c.boardName=y;\n if x==\"boardId\":\n c.boardId=y;\n b.components.append(c);\n else:\n if i==\"id\":\n b.id=j;\n if i==\"name\":\n b.name=j;\n if i==\"roomName\":\n b.roomName=j;\n if i==\"roomId\":\n b.roomId=j;\n r.boards.append(b);\n else:\n if v=='id':\n r.id=k;\n if v=='name':\n r.name=k;\n h.rooms.append(r);\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render('index.html');\n\nclass IndexTesting(tornado.web.RequestHandler):\n def get(self):\n self.render('index1.html');\n\nclass JsonHandler(tornado.web.RequestHandler):\n def post(self):\n self.render(\"home.json\");\n\nclass NewJsonHandler(tornado.web.RequestHandler):\n def post(self):\n self.write(getNewJson());\n self.flush();\n\nclass OnStatusChanged(tornado.web.RequestHandler):\n def post(self):\n bytesJson=self.request.body;\n jsonString=bytesJson.decode().replace(\"'\",'\"');\n print(str(jsonString));\n tempJson=json.loads(jsonString);\n cJson=dict({});\n cJson[\"roomId\"]=tempJson[\"roomId\"]\n cJson[\"roomName\"]=tempJson[\"roomName\"]\n cJson[\"boardId\"]=tempJson[\"boardId\"]\n cJson[\"boardName\"]=tempJson[\"boardName\"]\n cJson[\"componentId\"]=tempJson[\"id\"]\n cJson[\"componentName\"]=tempJson[\"name\"]\n cJson[\"componentStatus\"]=tempJson[\"status\"]\n updateJson(cJson);\n self.write(getNewJson());\n self.flush(); \n SimpleWebSocket.on_status_change(cJson);\n\nclass OnBoardStatusChanged(tornado.web.RequestHandler):\n def post(self):\n bytesJson=self.request.body;\n jsonString=bytesJson.decode().replace(\"'\",'\"');\n tempJson=json.loads(jsonString);\n bJson=dict({});\n bJson[\"roomId\"]=tempJson[\"roomId\"]\n bJson[\"roomName\"]=tempJson[\"roomName\"]\n bJson[\"boardId\"]=tempJson[\"id\"]\n bJson[\"boardName\"]=tempJson[\"name\"]\n bJson[\"boardStatus\"]=tempJson[\"status\"]\n print(bJson);\n updateBoardJson(bJson);\n self.write(getNewJson());\n self.flush(); \n SimpleWebSocket.on_status_change(bJson);\n \n\nclass SimpleWebSocket(tornado.websocket.WebSocketHandler):\n dict=dict([]);\n connections = set();\n def check_origin(self, origin):\n return True;\n def open(self):\n self.connections.add(self);\n print('Connected');\n def on_message(self, message):\n print(message);\n cJson=json.loads(message);\n updateJson(cJson);\n def on_close(self):\n self.connections.remove(self);\n print('Disconnected');\n @classmethod\n def on_status_change(self, cJson):\n for c in self.connections :\n c.write_message(cJson);\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/websocket\",SimpleWebSocket),\n (r\"/\",IndexHandler),\n (r\"/test\",IndexTesting),\n (r\"/requestNewJson\",NewJsonHandler),\n (r\"/requestJson\",JsonHandler),\n (r\"/onStatusChanged\",OnStatusChanged),\n (r\"/onBoardStatusChanged\",OnBoardStatusChanged)\n ]);\n\nif __name__ == \"__main__\":\n try:\n populateDataStructure();\n reformJson();\n app = make_app();\n print('Server Instantiated,Listening On Port 8888 ....');\n app.listen(8888);\n tornado.ioloop.IOLoop.current().start();\n except KeyboardInterrupt as ke:\n sys.exit();","sub_path":"del/server/ServerEndpoint.py","file_name":"ServerEndpoint.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560054004","text":"#!/usr/bin/python\nimport matrix_io as mio\nimport numpy as np\nimport os\n\ncells = [\"BT20\",\"BT549\",\"MCF7\",\"UACC812\"]\nstims = [\"EGF\",\"FGF1\",\"HGF\",\"IGF1\",\"Insulin\",\"NRG1\",\"PBS\",\"Serum\"]\npath = \"aug27.necnet\"\nteam = \"Alphabet\"\nout_path_exp = \"aug27.3am.submit.exp\"\nout_path_insil = \"aug27.3am.submit.insil\"\n\n#TeamName-CellLine-Stimulus-Network.sif\n#TeamName-CellLine-Stimulus-Network.eda\n#TeamName-Network-Writeup.txt\n\ndef save_files(CLS, DCOR, d, path_stem):\n assert CLS['M'].shape==DCOR['M'].shape\n assert CLS['M'].shape[0]==CLS['M'].shape[1]\n assert CLS['row_ids']==DCOR['row_ids']\n assert CLS['row_ids']==CLS['col_ids']\n assert DCOR['row_ids']==DCOR['col_ids']\n n = CLS['M'].shape[0]\n sif,eda = [],[]\n for row in range(n):\n for col in range(n):\n if CLS['M'][row,col] == 1 and DCOR['M'][row,col] > d:\n s,t = CLS['row_ids'][row], CLS['col_ids'][col]\n sif.append('\\t'.join((s,\"1\",t)))\n eda.append(\"%s (%s) %s = %.2f\" % (s,\"1\",t,1))\n with open(path_stem+\".sif\",\"w\") as fp:\n fp.write(\"\\n\".join(sif))\n with open(path_stem+\".eda\",\"w\") as fp:\n fp.write(\"EdgeScore\\n\")\n fp.write(\"\\n\".join(eda))\n\ndef main():\n for cell in cells:\n for stim in stims:\n CLS_D = mio.load(os.path.join(path,\"%s.%s.%s.combined.csv\" % (cell, stim, \"cls\")), delimit_c=\",\")\n DCOR_D = mio.load(os.path.join(path,\"%s.%s.%s.combined.csv\" % (cell, stim, \"dcor\")), delimit_c=\",\")\n stem = \"%s-%s-%s-Network\" % (team,cell,stim)\n save_files(CLS_D, DCOR_D, 0.5, os.path.join(out_path_exp,stem))\n fp = open(os.path.join(out_path_exp,\"%s-Network-Writeup.txt\"%team),\"w\")\n fp.write(\" \")\n fp.close()\n \n CLS_D = mio.load(os.path.join(path,\"insilico.all.cls.combined.csv\"))\n DCOR_D = mio.load(os.path.join(path,\"insilico.all.dcor.combined.csv\"))\n #TeamName-Network-Insilico.sif\n #TeamName-Network-Insilico.eda\n stem = \"%s-Network-Insilico\" % (team)\n save_files(CLS_D, DCOR_D, 0.5, os.path.join(out_path_insil,stem))\n fp = open(os.path.join(out_path_insil,\"%s-Network-Insilico-Writeup.txt\"%team),\"w\")\n fp.write(\" \")\n fp.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"dcor_cls_to_submission.py","file_name":"dcor_cls_to_submission.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306313929","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Eva Bühlmann\n# Sarah Kiener\n#\n# PCL II\n# Übung 4 - Aufgabe 1.2\n\n\nfrom typing import BinaryIO\nfrom lxml import etree as ET\nfrom pathlib import Path\nimport gzip, random\n\n\ndef split_corpus(infile: BinaryIO, targedir: str, n: int=1000):\n target = Path(targedir)\n corp_it = corpus_iterator(infile)\n\n double_selection = algo_r(corp_it, n*2)\n test_dev_set = algo_r(double_selection, n, keep_remaining=True)\n test_set = test_dev_set[0]\n dev_set = test_dev_set[1]\n\n infile.seek(0) # reopen infile\n corp_it2 = (x for x in corpus_iterator(infile) if x not in double_selection)\n\n with gzip.open(target / 'abstracts.txt.training.gz', 'wt', encoding='utf-8') as train_file:\n for sent in corp_it2:\n train_file.write(sent)\n train_file.write('\\n')\n\n with gzip.open(target / 'abstracts.txt.test.gz', 'wt', encoding='utf-8') as test_file:\n for sent in test_set:\n test_file.write(sent)\n test_file.write('\\n')\n\n with gzip.open(target / 'abstracts.txt.development.gz', 'wt', encoding='utf-8') as dev_file:\n for sent in dev_set:\n dev_file.write(sent)\n dev_file.write('\\n')\n\n\ndef algo_r(iterable, k, keep_remaining=False):\n reservoir = []\n remaining = []\n\n for i, item in enumerate(iterable):\n if i < k:\n reservoir.append(item)\n else:\n m = random.randint(0, i)\n if m < k:\n if keep_remaining:\n remaining.append(reservoir[m])\n reservoir[m] = item\n elif keep_remaining:\n remaining.append(item)\n\n if keep_remaining:\n return reservoir, remaining\n else:\n return reservoir\n\n\ndef corpus_iterator(infile): # BinaryIO):\n for _, element in ET.iterparse(infile, tag='document'):\n sentences = ' '.join(sentence.text for sentence in element.iterfind('.//sentence'))\n yield sentences\n element.clear()\n\n\ndef main():\n with gzip.open('Korpusdaten/abstracts.xml.gz', 'rb') as xml_file:\n split_corpus(xml_file, 'abstract_output')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"corpus_splitter.py","file_name":"corpus_splitter.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"195616655","text":"from OrnsteinUhlenbeckActionNoise import OrnsteinUhlenbeckActionNoise as ou\nfrom models_hra import Critic, ActorU, ActorC\nimport torch as th\nfrom copy import deepcopy\nfrom memory import ReplayMemory, Experience\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport numpy as np\nimport pdb\n\n\n\ndef soft_update(target, source, t):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n (1 - t) * target_param.data + t * source_param.data)\n\n\ndef hard_update(target, source):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(source_param.data)\n\n\nclass MADDPG:\n def __init__(self,\n n_agents,\n dim_obs_list,\n dim_act_list,\n dim_act_u,\n dim_act_c,\n batch_size,\n capacity,\n episodes_before_train,\n lr,\n weight_decay,\n action_noise=None,\n load_models=None):\n dim_obs_sum = sum(dim_obs_list)\n dim_act_sum = sum(dim_act_list)\n\n if load_models is None:\n self.actorsU = [ActorU(dim_obs, dim_act) for (dim_obs, dim_act) in zip(dim_obs_list, dim_act_u)]\n self.actorsC = [ActorC(dim_obs, dim_act) for (dim_obs, dim_act) in zip(dim_obs_list, dim_act_c)]\n self.criticsU = [Critic(dim_obs_sum, dim_act_sum) for i in range(n_agents)]\n self.criticsC = [Critic(dim_obs_sum, dim_act_sum) for i in range(n_agents)]\n self.actorsU_target = deepcopy(self.actorsU)\n self.actorsC_target = deepcopy(self.actorsC)\n self.criticsU_target = deepcopy(self.criticsU)\n self.criticsC_target = deepcopy(self.criticsC)\n self.criticU_optimizer = [Adam(x.parameters(), lr=lr, weight_decay=weight_decay) for x in self.criticsU] # 0.01, 0.005\n self.criticC_optimizer = [Adam(x.parameters(), lr=lr, weight_decay=weight_decay) for x in self.criticsC] # 0.01, 0.005\n self.actorU_optimizer = [Adam(x.parameters(), lr=lr, weight_decay=weight_decay) for x in self.actorsU] # 0.01, 0.005\n self.actorC_optimizer = [Adam(x.parameters(), lr=lr, weight_decay=weight_decay) for x in self.actorsC] # 0.01, 0.005\n self.var = [1.0 for i in range(n_agents)]\n if action_noise == \"OU_noise\":\n self.ou_noises = [ou(mu=np.zeros(dim_act_list[i])) for i in range(n_agents)]\n else:\n print('Start loading models!')\n states = th.load(load_models)\n self.criticsU = states['criticsU']\n self.criticsC = states['criticsC']\n self.actorsU = states['actorsU']\n self.actorsC = states['actorsC']\n self.criticU_optimizer = states['criticU_optimizer']\n self.criticC_optimizer = states['criticC_optimizer']\n self.actorU_optimizer = states['actorU_optimizer']\n self.actorC_optimizer = states['actorC_optimizer']\n self.criticsU_target = states['criticsU_target']\n self.criticsC_target = states['criticsC_target']\n self.actorsU_target = states['actorsU_target']\n self.actorsC_target = states['actorsC_target']\n self.var = states['var']\n if action_noise == \"OU_noise\":\n self.ou_noises = [ou(mu=np.zeros(dim_act_list[i]), x0=states['ou_prevs'][i]) for i in range(n_agents)]\n print('Models loaded!')\n\n self.memory = ReplayMemory(capacity)\n self.n_agents = n_agents\n self.batch_size = batch_size\n self.dim_obs_list = dim_obs_list\n self.dim_act_list = dim_act_list\n self.dim_act_u = dim_act_u\n self.dim_act_c = dim_act_c\n self.dim_obs_sum = dim_obs_sum\n self.dim_act_sum = dim_act_sum\n self.use_cuda = th.cuda.is_available()\n self.episodes_before_train = episodes_before_train\n self.clip = 50.0 # 10\n self.action_noise = action_noise\n\n self.GAMMA = 0.95\n self.tau = 0.01\n self.scale_reward = 0.01\n\n if self.use_cuda:\n for x in self.actorsU:\n x.cuda()\n for x in self.actorsC:\n x.cuda()\n for x in self.criticsU:\n x.cuda()\n for x in self.criticsC:\n x.cuda()\n for x in self.actorsU_target:\n x.cuda()\n for x in self.actorsC_target:\n x.cuda()\n for x in self.criticsU_target:\n x.cuda()\n for x in self.criticsC_target:\n x.cuda()\n\n self.steps_done = 0\n self.episode_done = 0\n\n def update_policy(self):\n if self.episode_done <= self.episodes_before_train:\n return None, None, None, None\n\n FloatTensor = th.cuda.FloatTensor if self.use_cuda else th.FloatTensor\n\n c_loss = []\n a_loss = []\n\n criticsU_grad = []\n criticsC_grad = []\n actorsU_grad = []\n actorsC_grad = []\n\n index_obs = 0\n index_act = 0\n for agent in range(self.n_agents):\n if self.episode_done > 30000:\n self.batch_size = 2048\n transitions = self.memory.sample(self.batch_size)\n batch = Experience(*zip(*transitions))\n state_batch = Variable(th.stack(batch.states).type(FloatTensor))\n action_batch = Variable(th.stack(batch.actions).type(FloatTensor))\n reward_batch = Variable(th.stack(batch.rewards).type(FloatTensor))\n next_states_batch = Variable(th.stack(batch.next_states).type(FloatTensor))\n\n # for current agent\n whole_state = state_batch.view(self.batch_size, -1)\n whole_action = action_batch.view(self.batch_size, -1)\n\n # pdb.set_trace()\n ###### critic network #####\n self.criticU_optimizer[agent].zero_grad()\n self.criticC_optimizer[agent].zero_grad()\n currentU_Q = self.criticsU[agent](whole_state, whole_action)\n currentC_Q = self.criticsC[agent](whole_state, whole_action)\n\n idx = 0\n next_actions_ls = []\n for i in range(self.n_agents):\n next_actionU_i = self.actorsU_target[i](next_states_batch[:, idx:(idx + self.dim_obs_list[i])])\n next_actionC_i = self.actorsC_target[i](next_states_batch[:, idx:(idx + self.dim_obs_list[i])])\n next_action_i = th.cat((next_actionU_i, next_actionC_i), 1)\n next_actions_ls.append(next_action_i)\n idx += self.dim_obs_list[i]\n\n next_actions = th.cat(next_actions_ls, 1)\n # pdb.set_trace()\n targetU_Q = self.criticsU_target[agent](\n next_states_batch.view(-1, self.dim_obs_sum),\n next_actions.view(-1, self.dim_act_sum)\n )\n targetC_Q = self.criticsC_target[agent](\n next_states_batch.view(-1, self.dim_obs_sum),\n next_actions.view(-1, self.dim_act_sum)\n )\n\n # here target_Q is y_i of TD error equation\n # target_Q = (target_Q * self.GAMMA) + (reward_batch[:, agent] * self.scale_reward)\n targetU_Q = targetU_Q * self.GAMMA + reward_batch[:, agent, :1]\n targetC_Q = targetC_Q * self.GAMMA + reward_batch[:, agent, 1:]\n lossU_Q = nn.MSELoss()(currentU_Q, targetU_Q.detach())\n lossC_Q = nn.MSELoss()(currentC_Q, targetC_Q.detach())\n lossU_Q.backward()\n lossC_Q.backward()\n\n if self.clip is not None:\n nn.utils.clip_grad_norm(self.criticsU[agent].parameters(), self.clip)\n nn.utils.clip_grad_norm(self.criticsC[agent].parameters(), self.clip)\n self.criticU_optimizer[agent].step()\n self.criticC_optimizer[agent].step()\n\n ##### actor network #####\n self.actorU_optimizer[agent].zero_grad()\n self.actorC_optimizer[agent].zero_grad()\n state_i = state_batch[:, index_obs:(index_obs+self.dim_obs_list[agent])]\n index_obs += self.dim_obs_list[agent]\n actionU_i = self.actorsU[agent](state_i)\n actionC_i = self.actorsC[agent](state_i)\n action_i_U = th.cat((actionU_i, actionC_i.detach()), 1)\n action_i_C = th.cat((actionU_i.detach(), actionC_i), 1)\n acU = action_batch.clone()\n acC = action_batch.clone()\n acU[:, index_act:(index_act + self.dim_act_list[agent])] = action_i_U\n acC[:, index_act:(index_act + self.dim_act_list[agent])] = action_i_C\n whole_actionU = acU.view(self.batch_size, -1)\n whole_actionC = acC.view(self.batch_size, -1)\n index_act += self.dim_act_list[agent]\n\n # pdb.set_trace()\n actorU_loss = -self.criticsU[agent](whole_state, whole_actionU)\n actorC_loss = -self.criticsC[agent](whole_state, whole_actionC)\n\n # update actor networks\n actorU_loss = actorU_loss.mean()\n actorU_loss.backward()\n actorC_loss = actorC_loss.mean()\n actorC_loss.backward()\n if self.clip is not None:\n nn.utils.clip_grad_norm(self.actorsU[agent].parameters(), self.clip)\n nn.utils.clip_grad_norm(self.actorsC[agent].parameters(), self.clip)\n self.actorU_optimizer[agent].step()\n self.actorC_optimizer[agent].step()\n\n '''\n # update actor network from gradients of physical and comm loss\n loss = []\n for i in range(len(actor_loss[0])):\n loss.append(actor_loss[:, i].mean())\n loss.backward(loss) # wrong one\n\n if self.clip is not None:\n nn.utils.clip_grad_norm(self.actors[agent].parameters(), self.clip)\n self.actor_optimizer[agent].step()\n '''\n\n # for plotting\n c_loss.append(lossU_Q)\n a_loss.append(actorU_loss)\n\n criticsU_agent_grad = []\n criticsC_agent_grad = []\n actorsU_agent_grad = []\n actorsC_agent_grad = []\n for x in self.criticsU[agent].parameters():\n criticsU_agent_grad.append(x.grad.data.norm(2))\n # critics_agent_grad.append(th.mean(x.grad).data[0])\n for x in self.criticsC[agent].parameters():\n criticsC_agent_grad.append(x.grad.data.norm(2))\n # critics_agent_grad.append(th.mean(x.grad).data[0])\n for x in self.actorsU[agent].parameters():\n actorsU_agent_grad.append(x.grad.data.norm(2))\n # actorsU_agent_grad.append(th.mean(x.grad).data[0])\n for x in self.actorsC[agent].parameters():\n actorsC_agent_grad.append(x.grad.data.norm(2))\n # actorsC_agent_grad.append(th.mean(x.grad).data[0])\n\n criticsU_grad.append(criticsU_agent_grad)\n criticsC_grad.append(criticsC_agent_grad)\n actorsU_grad.append(actorsU_agent_grad)\n actorsC_grad.append(actorsC_agent_grad)\n\n # update of target network\n if self.steps_done % 100 == 0 and self.steps_done > 0:\n for i in range(self.n_agents):\n soft_update(self.criticsU_target[i], self.criticsU[i], self.tau)\n soft_update(self.criticsC_target[i], self.criticsC[i], self.tau)\n soft_update(self.actorsU_target[i], self.actorsU[i], self.tau)\n soft_update(self.actorsC_target[i], self.actorsC[i], self.tau)\n\n return criticsU_grad, criticsC_grad, actorsU_grad, actorsC_grad\n\n def select_action(self, obs): # concatenation of observations from agents\n FloatTensor = th.cuda.FloatTensor if self.use_cuda else th.FloatTensor\n\n # pdb.set_trace()\n # obs is Variable with dimension of dim_state_sum\n obs = obs.view(-1, self.dim_obs_sum)\n actions = Variable(th.zeros(1, self.dim_act_sum)).type(FloatTensor)\n\n index_obs = 0\n index_act = 0\n for i in range(self.n_agents):\n sb = obs[:, index_obs:(index_obs+self.dim_obs_list[i])]\n actU = self.actorsU[i](sb)\n actC = self.actorsC[i](sb)\n act = th.cat((actU, actC), 1)\n # act = act.view(self.dim_act_list[i])\n\n # add exploration noise of OU process or Gaussian\n if self.action_noise == \"OU_noise\":\n if self.dim_act_list[i] == 5:\n act += Variable(th.FloatTensor(self.ou_noises[i]() * self.var[i]).type(FloatTensor))\n if self.dim_act_list[i] == 8:\n noise = th.FloatTensor(self.ou_noises[i]() * self.var[i]).type(FloatTensor)\n noise[-3:] = th.zeros(3).type(FloatTensor)\n act += Variable(noise)\n elif self.action_noise == \"Gaussian_noise\":\n if self.dim_act_list[i] == 5:\n act += Variable(\n th.FloatTensor(np.random.randn(self.dim_act_list[i]) * self.var[i]).type(FloatTensor))\n if self.dim_act_list[i] == 8:\n noise = th.FloatTensor(np.random.randn(self.dim_act_list[i]) * self.var[i]).type(FloatTensor)\n noise[-3:] = th.zeros(3).type(FloatTensor)\n act += Variable(noise)\n\n # decay of action exploration\n if self.episode_done > self.episodes_before_train and self.var[i] > 0.05:\n self.var[i] *= 0.999998\n\n # ? remove ?\n act = th.clamp(act, -1.0, 1.0)\n actions[:, index_act:(index_act+self.dim_act_list[i])] = act\n\n index_obs += self.dim_obs_list[i]\n index_act += self.dim_act_list[i]\n\n self.steps_done += 1\n\n return actions\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"maddpg_ref_hra/MADDPG_hra.py","file_name":"MADDPG_hra.py","file_ext":"py","file_size_in_byte":14025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647522074","text":"import sqlite3\nimport warnings\n\nfrom datasette.utils.asgi import Forbidden, NotFound, Response\n\nfrom datasette_reconcile.settings import DEFAULT_TYPE\n\n\nclass ReconcileError(Exception):\n pass\n\n\nasync def check_permissions(request, permissions, ds):\n \"permissions is a list of (action, resource) tuples or 'action' strings\"\n \"from https://github.com/simonw/datasette/blob/main/datasette/views/base.py#L69\"\n for permission in permissions:\n if isinstance(permission, str):\n action = permission\n resource = None\n elif isinstance(permission, (tuple, list)) and len(permission) == 2:\n action, resource = permission\n else:\n assert (\n False\n ), \"permission should be string or tuple of two items: {}\".format(\n repr(permission)\n )\n ok = await ds.permission_allowed(\n request.actor,\n action,\n resource=resource,\n default=None,\n )\n if ok is not None:\n if ok:\n return\n else:\n raise Forbidden(action)\n\n\nasync def check_config(config, db, table):\n is_view = bool(await db.get_view_definition(table))\n table_exists = bool(await db.table_exists(table))\n if not is_view and not table_exists:\n raise NotFound(\"Table not found: {}\".format(table))\n\n if not config:\n raise NotFound(\n \"datasette-reconcile not configured for table {} in database {}\".format(\n table, str(db)\n )\n )\n\n pks = await db.primary_keys(table)\n if not pks:\n pks = [\"rowid\"]\n\n if \"id_field\" not in config and len(pks) == 1:\n config[\"id_field\"] = pks[0]\n elif \"id_field\" not in config:\n raise ReconcileError(\"Could not determine an ID field to use\")\n\n if \"name_field\" not in config:\n raise ReconcileError(\"Name field must be defined to activate reconciliation\")\n\n if \"type_field\" not in config and \"type_default\" not in config:\n config[\"type_default\"] = [DEFAULT_TYPE]\n\n if \"max_limit\" in config and not isinstance(config[\"max_limit\"], int):\n raise TypeError(\"max_limit in reconciliation config must be an integer\")\n\n if \"type_default\" in config:\n if not isinstance(config[\"type_default\"], list):\n raise ReconcileError(\"type_default should be a list of objects\")\n for t in config[\"type_default\"]:\n if not isinstance(t, dict):\n raise ReconcileError(\"type_default values should be objects\")\n if not isinstance(t.get(\"id\"), str):\n raise ReconcileError(\"type_default 'id' values should be strings\")\n if not isinstance(t.get(\"name\"), str):\n raise ReconcileError(\"type_default 'name' values should be strings\")\n\n if \"view_url\" in config:\n if not \"{{id}}\" in config[\"view_url\"]:\n raise ReconcileError(\"View URL must contain {{id}}\")\n\n config[\"fts_table\"] = await db.fts_table(table)\n\n # let's show a warning if sqlite3 version is less than 3.30.0\n # full text search results will fail for < 3.30.0 if the table\n # name contains special characters\n if config[\"fts_table\"] and (\n (sqlite3.sqlite_version_info[0] == 3 and sqlite3.sqlite_version_info[1] < 30)\n or sqlite3.sqlite_version_info[0] < 3\n ):\n warnings.warn(\n \"Full Text Search queries for sqlite3 version < 3.30.0 wil fail if table name contains special characters\"\n )\n\n return config\n\n\ndef get_select_fields(config):\n select_fields = [config[\"id_field\"], config[\"name_field\"]] + config.get(\n \"additional_fields\", []\n )\n if config.get(\"type_field\"):\n select_fields.append(config[\"type_field\"])\n return select_fields\n\n\ndef get_view_url(ds, database, table):\n id_str = \"{{id}}\"\n if hasattr(ds, \"urls\"):\n return ds.urls.row(database, table, id_str)\n db = ds.databases[database]\n base_url = ds.config(\"base_url\")\n if ds.config(\"hash_urls\") and db.hash:\n return \"{}{}-{}/{}/{}\".format(\n base_url, database, db.hash[:HASH_LENGTH], table, id_str\n )\n else:\n return \"{}{}/{}/{}\".format(base_url, database, table, id_str)\n","sub_path":"datasette_reconcile/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277905727","text":"#!/usr/bin/env python2\n\nimport argparse\nimport logging\nimport os\nimport signal\nimport sys\nimport threading\nimport traceback\nfrom logging.handlers import RotatingFileHandler\n\nfrom apps.app_manager import AppManager\nfrom context_manager import ContextManager\nfrom helpers import read_config, local_path_gen, logger, env, read_or_create_config, \\\n zpui_running_as_service, is_emulator\nfrom input import input\nfrom output import output\nfrom actions import ContextSwitchAction\nfrom ui import Printer\nimport pidcheck\n\nrconsole_port = 9377\n\npid_path = '/run/zpui_pid.pid'\n\nlocal_path = local_path_gen(__name__)\nconfig_paths = ['/boot/zpui_config.json', '/boot/pylci_config.json'] if not is_emulator() else []\nconfig_paths.append(local_path('config.json'))\n#Using the .example config as a last resort\nconfig_paths.append(local_path('default_config.json'))\n\ninput_processor = None\ninput_device_manager = None\nscreen = None\ncm = None\nconfig = None\nconfig_path = None\napp_man = None\n\ndef load_config():\n config = None\n # Load config\n for config_path in config_paths:\n #Only try to load the config file if it's present\n #(unclutters the logs)\n if os.path.exists(config_path):\n try:\n logging.debug('Loading config from {}'.format(config_path))\n config = read_config(config_path)\n except:\n logging.exception('Failed to load config from {}'.format(config_path))\n config_path = None\n else:\n logging.info('Successfully loaded config from {}'.format(config_path))\n break\n # After this loop, the config_path global should contain\n # path for config that successfully loaded\n\n return config, config_path\n\ndefault_log_config = \"\"\"{\"dir\":\"logs/\", \"filename\":\"zpui.log\", \"format\":\n[\"[%(levelname)s] %(asctime)s %(name)s: %(message)s\",\"%Y-%m-%d %H:%M:%S\"],\n\"file_size\":1048576, \"files_to_store\":5}\n\"\"\"\nlog_config = read_or_create_config(\"log_config.json\", default_log_config, \"ZPUI logging\")\nlogging_dir = log_config[\"dir\"]\nlog_filename = log_config[\"filename\"]\n# Making sure the log dir exists - create it if it's not\ntry:\n os.makedirs(logging_dir)\nexcept OSError:\n pass\n#Set all the logging parameter variables\nlogging_path = os.path.join(logging_dir, log_filename)\nlogging_format = log_config[\"format\"]\nlogfile_size = log_config[\"file_size\"]\nfiles_to_store = log_config[\"files_to_store\"]\n\n\n\ndef init():\n \"\"\"Initialize input and output objects\"\"\"\n\n global input_processor, input_device_manager, screen, cm, config, config_path\n config, config_path = load_config()\n\n if config is None:\n sys.exit('Failed to load any config files!')\n\n # Initialize output\n try:\n screen = output.init(config['output'])\n except:\n logging.exception('Failed to initialize the output object')\n logging.exception(traceback.format_exc())\n sys.exit(2)\n\n # Initialize the context manager\n cm = ContextManager()\n # Initialize input\n try:\n # Now we can show errors on the display\n input_processor, input_device_manager = input.init(config[\"input\"], cm)\n except:\n logging.exception('Failed to initialize the input object')\n logging.exception(traceback.format_exc())\n Printer(['Oops. :(', 'y u make mistake'], None, screen, 0)\n sys.exit(3)\n\n # Tying objects together\n if hasattr(screen, \"set_backlight_callback\"):\n screen.set_backlight_callback(input_processor)\n cm.init_io(input_processor, screen)\n c = cm.contexts[\"main\"]\n c.register_action(ContextSwitchAction(\"switch_main_menu\", None, menu_name=\"Main menu\"))\n cm.switch_to_context(\"main\")\n i, o = cm.get_io_for_context(\"main\")\n\n return i, o\n\n\ndef launch(name=None, **kwargs):\n \"\"\"\n Launches ZPUI, either in full mode or in\n single-app mode (if ``name`` kwarg is passed).\n \"\"\"\n\n global app_man\n\n i, o = init()\n appman_config = config.get(\"app_manager\", {})\n app_man = AppManager('apps', cm, config=appman_config)\n\n if name is None:\n try:\n from splash import splash\n splash(i, o)\n except:\n logging.exception('Failed to load the splash screen')\n\n # Load all apps\n app_menu = app_man.load_all_apps()\n runner = app_menu.activate\n cm.switch_to_start_context()\n else:\n # If using autocompletion from main folder, it might\n # append a / at the name end, which isn't acceptable\n # for load_app\n name = name.rstrip('/')\n\n # Load only single app\n try:\n context_name, app = app_man.load_single_app_by_path(name, threaded=False)\n except:\n logging.exception('Failed to load the app: {0}'.format(name))\n input_processor.atexit()\n raise\n cm.switch_to_context(context_name)\n runner = app.on_start if hasattr(app, \"on_start\") else app.callback\n\n exception_wrapper(runner)\n\n\ndef exception_wrapper(callback):\n \"\"\"\n This is a wrapper for all applications and menus.\n It catches exceptions and stops the system the right\n way when something bad happens, be that a Ctrl+c or\n an exception in one of the applications.\n \"\"\"\n status = 0\n try:\n callback()\n except KeyboardInterrupt:\n logging.info('Caught KeyboardInterrupt')\n Printer([\"Does Ctrl+C\", \"hurt scripts?\"], None, screen, 0)\n status = 1\n except:\n logging.exception('A wild exception appears!')\n Printer([\"A wild exception\", \"appears!\"], None, screen, 0)\n status = 1\n else:\n logging.info('Exiting ZPUI')\n Printer(\"Exiting ZPUI\", None, screen, 0)\n finally:\n input_processor.atexit()\n sys.exit(status)\n\n\ndef dump_threads(*args):\n \"\"\"\n Helpful signal handler for debugging threads\n \"\"\"\n\n logger.critical('\\nSIGUSR received, dumping threads!\\n')\n for i, th in enumerate(threading.enumerate()):\n logger.critical(\"{} - {}\".format(i, th))\n for th in threading.enumerate():\n logger.critical(th)\n log = traceback.format_stack(sys._current_frames()[th.ident])\n for frame in log:\n logger.critical(frame)\n\n\ndef spawn_rconsole(*args):\n \"\"\"\n USR2-activated debug console\n \"\"\"\n try:\n from rfoo.utils import rconsole\n except ImportError:\n logger.exception(\"can't import rconsole - python-rfoo not installed?\")\n return False\n try:\n rconsole.spawn_server(port=rconsole_port)\n except:\n logger.exception(\"Can't spawn rconsole!\")\n\n\nif __name__ == '__main__':\n \"\"\"\n Parses arguments, initializes logging, launches ZPUI\n \"\"\"\n\n # Signal handler for debugging\n signal.signal(signal.SIGUSR1, dump_threads)\n signal.signal(signal.SIGUSR2, spawn_rconsole)\n signal.signal(signal.SIGHUP, logger.on_reload)\n\n # Setup argument parsing\n parser = argparse.ArgumentParser(description='ZPUI runner')\n parser.add_argument(\n '--app',\n '-a',\n help='Launch ZPUI with a single app loaded (useful for testing)',\n dest='name',\n default=None)\n parser.add_argument(\n '--log-level',\n '-l',\n help='The minimum log level to output',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default='INFO')\n parser.add_argument(\n '--ignore-pid',\n help='Skips PID check on startup (not applicable for emulator as it doesn\\'t do PID check)',\n action='store_true')\n args = parser.parse_args()\n\n # Setup logging\n logger = logging.getLogger()\n formatter = logging.Formatter(*logging_format)\n\n # Rotating file logs (for debugging crashes)\n rotating_handler = RotatingFileHandler(\n logging_path,\n maxBytes=logfile_size,\n backupCount=files_to_store)\n rotating_handler.setFormatter(formatter)\n logger.addHandler(rotating_handler)\n\n # Live console logging\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n # Set log level\n logger.setLevel(args.log_level)\n\n # Check if another instance is running\n if not is_emulator():\n if args.ignore_pid:\n logger.info(\"Skipping PID check\");\n else:\n is_interactive = not zpui_running_as_service()\n do_kill = zpui_running_as_service()\n pidcheck.check_and_create_pid(pid_path, interactive=is_interactive, kill_not_stop=do_kill)\n\n # Launch ZPUI\n launch(**vars(args))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363278465","text":"import sys\nimport StringIO\nimport re\nimport json\nfrom pycparser import c_ast\nfrom pycparser.plyparser import Coord\nfrom inspect import currentframe, getframeinfo\n\n\ndef str_literal(str):\n \"\"\"\n Escape a string into a c string literal\n \"\"\"\n # using json.dumps for cheap escaping, it's not going to be perfect\n # TODO: Probably should be improved\n return json.dumps(str)\n\n\ndef type_find(decl, ast_type):\n result = decl.type\n while result is not None and not isinstance(result, ast_type):\n if not hasattr(result, 'type'):\n raise ExpansionError(\n None, decl,\n 'Unable to find child of type ' + ast_type.__name__\n )\n result = result.type\n return result\n\n\ndef idtype_or_struct_find(decl):\n \"\"\"\n Find the IdentifierType or Struct inside a Decl. Count PtrDecls as dereference.\n arraydecl is an ArrayDecl is present\n Returns a tuple of (idtype, structtype, enumtype, arraydecl, dereference)\n \"\"\"\n idtype = decl.type\n structtype = None\n enumtype = None\n arraydecl = None\n dereference = 0\n while idtype and not isinstance(idtype, c_ast.IdentifierType):\n if isinstance(idtype, c_ast.PtrDecl):\n dereference = dereference + 1\n if isinstance(idtype, c_ast.ArrayDecl):\n arraydecl = idtype\n if isinstance(idtype, c_ast.Struct):\n structtype = idtype\n idtype = None\n break\n if isinstance(idtype, c_ast.Enum):\n enumtype = idtype\n idtype = None\n break\n if not hasattr(idtype, 'type'):\n raise ExpansionError(None, decl, 'Could not find IdentifierType or Struct')\n idtype = idtype.type\n return idtype, structtype, enumtype, arraydecl, dereference\n\n\nclass NodeList():\n def __init__(self, other):\n self.idx = 0;\n self._list = list(other)\n\n def seek(self, other):\n \"\"\"\n Place the cursor directly after other in the list\n Return True if other was found, False otherwise.\n Only goes forward\n \"\"\"\n i = max(0, self.idx - 1)\n length = len(self._list)\n while i < len:\n if self._list[i] == other:\n self.idx = i + 1\n return True\n i += 1\n return False\n\n def insert(self, other):\n \"\"\"\n Insert at the current cursor position.\n \"\"\"\n self._list.insert(self.idx, other)\n self.idx = self.idx + 1\n\n def splice(self, other):\n \"\"\"\n insert many at the current cursor position\n \"\"\"\n for o in other:\n self.insert(o)\n\n def __iter__(self):\n for i in self._list:\n yield i\n\n\nclass ExpansionError(Exception):\n def __init__(self, annotation, node, message=None):\n self.annotation = annotation\n self.node = node\n self.message = message\n def __str__(self):\n message = ''\n if self.message:\n message = ': ' + self.message\n node_show = ''\n if hasattr(self.node, 'show'):\n show_buf = StringIO.StringIO()\n self.node.show(buf=show_buf)\n node_show = str(self.node.coord) + '\\n'\n node_show += show_buf.getvalue()\n else:\n node_show = repr(self.node)\n\n try:\n return 'Unable to process annotation @{0} at line {1}{2}\\n{3}\\n{4}'.format(\n self.annotation['name'] if self.annotation else None,\n self.annotation['line'] if self.annotation else None,\n message,\n 'Annotation: ' + repr(self.annotation),\n 'Node: ' + node_show\n )\n except Exception as ex:\n return str(ex)\n\nclass Annotations():\n\n # TODO: make these configurable?\n PROPERTIES_NAME = '{0}__jstruct_properties__'.format\n JSON_TYPE_NAME = 'json_type_{0}'.format\n LENGTH_PROPERTY_NAME = '{0}__length__'.format\n NULLABLE_PROPERTY_NAME = '{0}__null__'.format\n \"\"\"\n Parse and apply annotations.\n \"\"\"\n def __init__(self, source):\n if source:\n self.parse(source)\n else:\n self.annotations = None\n self.idx = None\n self.len = None\n self._ast_info = None\n\n def get(self, line):\n \"\"\"\n Return a generator of all\n annotations for the specified line.\n line must be larger than any previous call,\n except after calling parse() or reset()\n \"\"\"\n a = True\n while a:\n a = self.get_next(line)\n if a:\n yield a\n\n def get_next(self, line):\n if self.idx >= self.len:\n return None\n\n if self.annotations[self.idx]['line'] <= line:\n a = self.annotations[self.idx]\n self.idx = self.idx + 1\n return a\n else:\n return None\n\n # general seek() instead ?\n def reset(self):\n \"\"\"\n reset the internal index to start pulling\n annotations from the start.\n \"\"\"\n self.idx = 0\n\n def _extract_ast_info(self, ast):\n \"\"\"\n Extract type info from the ast that can help to statically\n check the validity of annotation expansion\n \"\"\"\n keys = ('jstruct_extra_type', )\n nodes = (n for n in ast.ext if n.name in keys)\n\n def extract(n):\n if isinstance(n.type.type, c_ast.Enum):\n return set((e.name for e in n.type.type.values.enumerators))\n raise ExpansionError(None, n, 'Unable to extract useful info')\n\n self._ast_info = {n.name: extract(n) for n in nodes}\n\n def get_types(self, decl, type_annotations):\n \"\"\"\n get the proper types for json, member and jstruct types\n returns a dict with some or all of these members:\n {\n 'json': 'json_type_x',\n 'member': 'json_type_x',\n 'extra': 'jstruct_extra_type_x',\n 'jstruct': 'x__jstruct_properties__'\n }\n and an ArrayDecl if one was found\n \"\"\"\n # one-to-one json_types\n json_type_map = {\n 'bool': 'boolean',\n 'int': 'int',\n 'double': 'double',\n 'char*': 'string'\n }\n # types found here will have a jstruct_extra_type\n # assigned as well as the json_type\n # TODO: generate these instead?\n extra_type_map = {\n 'uint32_t': 'int',\n 'int64_t': 'int',\n 'uint64_t': 'int',\n }\n\n idtype, structtype, enumtype, arraydecl, dereference = idtype_or_struct_find(decl)\n is_array = arraydecl is not None\n\n def try_get_types(deref):\n if idtype:\n while deref > -1:\n ctypename = ' '.join(idtype.names)\n ctype = ctypename + ('*' * deref)\n result = {}\n try:\n result['json'] = Annotations.JSON_TYPE_NAME(json_type_map[ctype])\n except KeyError:\n try:\n result['json'] = Annotations.JSON_TYPE_NAME(extra_type_map[ctype])\n except KeyError:\n raise ExpansionError(\n type_annotations.get('array', None),\n decl,\n '\\nUnable to map {0} to json type\\n'.format(ctype)\n )\n result['extra'] = 'jstruct_extra_type_' + ctypename\n\n if result['extra'] not in self._ast_info['jstruct_extra_type']:\n raise ExpansionError(\n None,\n decl,\n result['extra'] + ' is not defined'\n )\n return result\n elif enumtype:\n result = {}\n result['json'] = Annotations.JSON_TYPE_NAME(json_type_map['int'])\n result['extra'] = 'jstruct_enum_extra_type(enum {0})'.format(enumtype.name)\n return result\n else:\n jstruct_type = self.PROPERTIES_NAME(structtype.name)\n # struct type\n if arraydecl:\n deref += 1\n if deref == 0:\n return {\n 'json': 'json_type_object',\n 'jstruct': jstruct_type\n }\n elif deref == 1:\n return {\n 'json': 'json_type_array',\n 'member': 'json_type_object',\n 'jstruct': jstruct_type\n }\n else:\n ExpansionError(\n None,\n decl,\n 'Unable to deal with property of type {0}{1}{2}'\n .format(\n '*' * dereference,\n structtype.name,\n '[]' if arraydecl else ''\n )\n )\n\n initial_err = None\n while dereference >= 0:\n try:\n types = try_get_types(dereference)\n\n if is_array and arraydecl is None:\n types['member'] = types['json']\n types['json'] = 'json_type_array'\n return (types, arraydecl)\n except ExpansionError as err:\n # try again in case it's an array\n if not is_array:\n dereference -= 1\n is_array = True\n if initial_err is None:\n initial_err = err\n\n raise initial_err\n\n def get_property_init_list(self, struct):\n \"\"\"\n Create an InitList describing the property list for a struct\n Also create a list of c_ast.Decl to append to the struct decls\n \"\"\"\n\n def make_extra_decl(name, t):\n idtype = c_ast.IdentifierType([t])\n td = c_ast.TypeDecl(name, [], idtype)\n return c_ast.Decl(\n name,\n [], # quals\n [], # storage\n [], # funcspec\n td, # type\n None, # init\n None, # bitsize\n )\n\n def make_prop_init_list(decl, extra_decls):\n \"\"\"\n Create an InitList to instantiate a struct which describes a single property\n decl is the current property c_ast.Decl\n \"\"\"\n taken = []\n exprs = []\n prop_name = None\n nullable = False\n type_annotations = {}\n\n annotations = self.get(decl.coord.line)\n for a in annotations:\n name = a['name']\n taken.append(name)\n # skip private properties\n if name == 'private':\n a = next(annotations, None)\n if a is not None:\n raise ExpansionError(a, decl, 'Unexpected annotation after ' + name)\n return\n\n init_name, expr = (None, None)\n # append the contents of these annotations directly\n if name in ['schema', 'name']:\n init_name = c_ast.ID(name)\n if name == 'name':\n prop_name = name\n if a['content'] == None:\n raise ExpansionError(a, decl, 'Content is None')\n expr = c_ast.Constant('string', str_literal(a['content']))\n\n if name in ['nullable']:\n init_name = c_ast.ID(name)\n expr = c_ast.Constant('int', '1')\n extra_decls[Annotations.NULLABLE_PROPERTY_NAME(decl.name)] = 'bool'\n nullable = True\n\n if name in ['array']:\n type_annotations[name] = a\n continue\n\n if init_name is None or expr is None:\n raise ExpansionError(a, decl, 'Unexpected annotation')\n\n exprs.append(c_ast.NamedInitializer([init_name], expr))\n\n # name the property if it hasn't already been named\n if not 'name' in taken:\n name = type_find(decl, c_ast.TypeDecl).declname\n exprs.append(\n c_ast.NamedInitializer(\n [c_ast.ID('name')],\n c_ast.Constant('string', str_literal(name))\n )\n )\n\n # assign types\n types, arraydecl = self.get_types(decl, type_annotations)\n type_inits = [c_ast.NamedInitializer(\n [c_ast.ID(ttype)],\n c_ast.ID(t)\n ) for ttype, t in types.iteritems()]\n fi = getframeinfo(currentframe())\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('type')],\n c_ast.InitList(type_inits, Coord(fi.filename, fi.lineno))\n ))\n # calculate struct offset\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('offset')],\n self.offsetof(struct.name, name)\n ))\n if nullable:\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('null_offset')],\n self.offsetof(struct.name, Annotations.NULLABLE_PROPERTY_NAME(name))\n ))\n if arraydecl:\n # static array\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('length')],\n arraydecl.dim\n ))\n elif types['json'] == 'json_type_array':\n # calculate length offset\n len_prop = Annotations.LENGTH_PROPERTY_NAME(name)\n extra_decls[len_prop] = 'int'\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('length_offset')],\n self.offsetof(struct.name, len_prop)\n ))\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('dereference')],\n c_ast.Constant('int', '1')\n ))\n\n if types['json'] == 'json_type_array':\n # assume PtrDecl for now\n exprs.append(c_ast.NamedInitializer(\n [c_ast.ID('stride')],\n self.sizeof(decl.type.type)\n ))\n\n\n fi = getframeinfo(currentframe())\n return c_ast.InitList(exprs, Coord(fi.filename, fi.lineno))\n\n fi = getframeinfo(currentframe())\n extra_decls = {}\n exprs = (make_prop_init_list(p, extra_decls) for p in struct.decls)\n exprs = [e for e in exprs if e is not None]\n # NULL terminator\n exprs.append(c_ast.InitList([c_ast.Constant('int', '0')]))\n\n initlist = c_ast.InitList(\n exprs,\n Coord(fi.filename, fi.lineno)\n )\n\n extra_decls = [make_extra_decl(name, t) for name, t in extra_decls.iteritems()]\n\n return (initlist, extra_decls)\n\n def offsetof(self, struct_name, name):\n return c_ast.FuncCall(c_ast.ID('offsetof'), c_ast.ExprList([\n c_ast.Typename(None, [],\n c_ast.TypeDecl(None, [],\n c_ast.Struct(struct_name, None))\n ),\n c_ast.ID(name)\n ]))\n\n def anonymize_type_decl(self, type_decl):\n types = (c_ast.PtrDecl, c_ast.TypeDecl)\n def anonymize(slot):\n obj = getattr(type_decl, slot)\n if obj.__class__ in types:\n obj = self.anonymize_type_decl(obj)\n if slot == 'declname':\n obj = None\n return obj\n\n slots = [s for s in type_decl.__class__.__slots__\n if not s in ('coord', '__weakref__')]\n args = [anonymize(s) for s in slots]\n return type_decl.__class__(*args)\n\n def sizeof(self, type_decl):\n anonymous_type_decl = self.anonymize_type_decl(type_decl)\n return c_ast.UnaryOp('sizeof', c_ast.Typename(None, [], anonymous_type_decl))\n\n def expand(self, ast, filename):\n \"\"\"\n Expand a pycparser ast with extra structures/data etc\n \"\"\"\n idx = 0\n\n self._extract_ast_info(ast)\n\n struct_object_property_decl = c_ast.Struct('jstruct_object_property', None)\n\n def ppdirective(a, n, ext):\n ext.insert(c_ast.ID('#{0} {1}'.format(a['directive'], a['content'])))\n return False\n\n def annotate_struct(a, n, ext):\n if not isinstance(n.type, c_ast.Struct):\n ExpansionError(a, n,\n 'Cannot expand annotation @{0} on {1}'\n .format(a['name'], n.__class__.__name__))\n\n name = n.type.name\n struct = n.type\n prop_init_list, struct_extra_decls = self.get_property_init_list(struct)\n properties = c_ast.Decl(\n self.PROPERTIES_NAME(name),\n [], #quals\n [], #storage\n [], #funcspec\n # type\n c_ast.ArrayDecl(\n c_ast.TypeDecl(\n self.PROPERTIES_NAME(name),\n [],\n struct_object_property_decl\n ),\n None, # dim\n [] # dim_quals\n ),\n prop_init_list, # init\n None # bitsize\n )\n struct.decls.extend(struct_extra_decls)\n if not ext.seek(n):\n raise ExpansionError(a, n, \"Couldn't seek to node\")\n ext.insert(properties)\n return True\n\n process = {\n '#': ppdirective,\n 'json': annotate_struct\n }\n ext = NodeList(ast.ext)\n for n in ast.ext:\n done = False\n if n.coord.file != filename:\n continue\n\n annotations = self.get(n.coord.line)\n for a in annotations:\n if not done and a['name'] in process:\n done = process[a['name']](a, n, ext)\n if done:\n try:\n a_name = a['name']\n a = next(annotations)\n raise ExpansionError(\n a, n, 'Unexpected annotation after ' + a_name)\n except Exception as e:\n pass\n else:\n raise ExpansionError(a, n, 'Unexpected annotation')\n\n ast.ext = ext\n\n def parse(self, source):\n ANNOTATION_NAME = r'[a-zA-Z_]+'\n\n # lex + yacc and i'm using a regex? HERESY!!\n annotation_expr = re.compile(\n r'(?:' +\n # match newlines (so we can count them)\n r'(?P\\n)|' +\n # match preprocessor statements\n r'((?:^|(?!\\n))#(?Pdefine|ifn?def|endif|include)' +\n # match preprocessor statement contents including line continuations\n r'(?:\\s+(?P.*?(?:\\\\\\n.*?)*))?(?=\\n|$|//|/\\*))|' +\n # match oneline comments\n r'(?://\\s*@(?P' + ANNOTATION_NAME + r') *' +\n # oneline annotation content\n r'(?P.*)?)|' +\n # match the entire multiline comment for line counting purposes\n r'/\\*(?P(?:\\s)*?@' +\n # match annotation name\n r'(?P' + ANNOTATION_NAME + r')\\s*' +\n # match everything after the @annotation in the comment\n r'(?P(?:\\n|.)*?)'+\n # end of multiline comment and non-capturing group\n r')\\*/)')\n annotations = []\n line = 1\n match = True\n pos = 0\n\n while match:\n match = annotation_expr.search(source, pos)\n\n if not match:\n break\n\n pos = match.end()\n ppname = match.group('ppname')\n olname = match.group('olname')\n mlname = match.group('mlname')\n if ppname:\n name = '#'\n else:\n name = olname or mlname\n\n wholecontent = match.group('ppcontent') or match.group('mlwhole')\n linecount = wholecontent.count('\\n') if wholecontent else 0\n\n if match.group('nl'):\n line = line + 1\n elif name:\n if ppname:\n content = match.group('ppcontent')\n else:\n content = match.group('olcontent') if olname else match.group('mlcontent')\n annotation = {\n 'line': line,\n 'lineEnd': line + linecount,\n 'name': name,\n 'content': content,\n }\n if ppname is not None:\n annotation['directive'] = ppname\n annotations.append(annotation)\n else:\n break\n\n line = line + linecount\n\n self.len = len(annotations)\n self.idx = 0\n\n self.annotations = annotations\n","sub_path":"parse/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":21343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252853549","text":"from objs.cmd import Command\n\nclass CmdObj(Command):\n level = 2000\n def cmd(self, ob, line):\n if getInt(ob['관리자등급']) < 2000:\n ob.sendLine('☞ 무슨 말인지 모르겠어요. *^_^*')\n return\n if line == '':\n ob.sendLine('☞ 사용법: [기연이름] 기연삭제')\n return\n \n msg = ''\n if line not in ONEITEM.index:\n ob.sendLine('☞ 그런 아이템은 없습니다.!')\n return\n index = ONEITEM.index[line]\n ONEITEM.attr.__delitem__(index)\n ONEITEM.save()\n ob.sendLine('☞ 기연이 삭제되었습니다.')\n\n","sub_path":"cmds/기연삭제.py","file_name":"기연삭제.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273931568","text":"#Practice 1A SOLUTION FILE\n\n#STARTING DOCUMENTATION:\n#-program prompt\n#-pseudocode\n#-notes for you to remember\n#-guidelines / information for anyone reading your documentation\n\n#VARIABLE DICTIONARY---------\n#sumtotalTemps the sum total of all F temps entered during session\n#totalGTemps the number of temps entered during session\n\n#FUNCTIONS------------------------------------------------------------------\ndef celsius_conv(tf):\n return (tf-32)*(5/9)\n\n\n\n#BASE PROGRAM CODE----------------------------------------------------------\n\n#initialize variables\nsumtotalTemps = 0\ntotalTemps = 0\n\nprint(\"Welcome to my Fahrenheit-to-Celsius Conversion Program\")\n\n# answer = input(\"Enter y to start: \")\ntempCount = int(input(\"\\nHow many temps would you like to enter: \"))\n\n\n\n\n\nwhile tempCount > totalTemps:\n\n tempF = float(input(\"\\tEnter tempF: \"))\n\n tempC = celsius_conv(tempF)\n\n #update sumtotal of all tempF values \n sumtotalTemps += tempF #same as: sumtotalTemps = sumtotalTemps + tempF\n\n totalTemps += 1\n \n\n \n print(\"\\tTOTAL TEMPS: \", totalTemps)\n print(\"\\tTemp F is {0:.1f} = Temp C is {1:.1f}\".format(tempF, tempC))\n\n #FOR TESTING --> print(\"current sum total: \", sumtotalTemps)\n\n # answer = input(\"\\tWould you like to enter another temperature? [y/n]: \")\n\n#calculate average\navgTempF = sumtotalTemps / totalTemps\n\navgTempC = celsius_conv(avgTempF)\n\nprint(\"\\nTOTAL TEMPERATURES: \", totalTemps)\nprint(\"AVERAGE TEMPERATURE: {0:.1f}F | {1:.2f}C\".format(avgTempF, avgTempC))\n\nprint(\"\\n\\nThank you. Goodbye\")","sub_path":"Practice1A_.py","file_name":"Practice1A_.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579791393","text":"\"\"\" This program dowonlads the used car information from the Trademe website and store the basic car information in the database.\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\n\r\nclass Car:\r\n \"\"\" the class that helps to store the car information and to add new car information \"\"\" \r\n \r\n next_id = 0\r\n data = {}\r\n \r\n def __init__(self, car, odometer, body_type, engine):\r\n \"\"\" initialise the class, animal \"\"\"\r\n self.next_id = Car.next_id \r\n self.data = Car.data\r\n self.car = car\r\n self.odometer = odometer\r\n self.body_type = body_type\r\n self.engine = engine\r\n \r\n def __str__(self):\r\n \"\"\" return a string-representation of the car information\"\"\"\r\n result = \"=\"*30\r\n result += \"\\n#{} - {}\\n\".format(self.next_id, self.car)\r\n result += \"-\"*30\r\n result += \"\\nOdometer: {}\\nBody Type: {}\\nEngine: {}\\n\".format(self.odometer, self.body_type, self.engine)\r\n result += \"-\"*30\r\n result += \"\\n\"\r\n return result\r\n\r\n \r\n def add_data(self):\r\n ''' add new car data in the data dictionary '''\r\n info = Car(self.car, self.odometer, self.body_type, self.engine)\r\n df = Car.data\r\n Car.next_id += 1\r\n df[self.next_id] = info\r\n return df\r\n \r\n def add_note(self, note):\r\n \"\"\" add the given note to the animal \"\"\"\r\n self.notes.append(note)\r\n \r\n \r\ndef read_data(url):\r\n \"\"\" To extract car information from the website given\r\n result1 shows that the title of each ad post.\r\n result2 shows that the spec information of the car: odometer, body_type, engine (but it can be added more)\r\n \"\"\"\r\n page = requests.get(url)\r\n content = BeautifulSoup(page.content, \"html.parser\")\r\n cars = content.findAll('div', {\"class\": \"listingTitle\"})\r\n specs = content.findAll('div', {\"id\": re.compile(\"listingSpecs\")})\r\n \r\n result1 = []\r\n for item in cars:\r\n if item.text != \"\":\r\n new_item = item.text.replace(\"\\n\", \"\")\r\n result1.append(new_item)\r\n \r\n result2 = []\r\n for item in specs:\r\n new_item = item.text.replace(\"\\n\", \"\")\r\n new_item = item.text.replace(\"<\", \"\")\r\n result2.append(new_item)\r\n return result1, result2\r\n #result = list(itertools.chain.from_iterable(zip(result1,result2)))\r\n #eturn result\r\n \r\n\r\ndef save_data(result1, result2):\r\n \"\"\" the function stores the car information given from the website by using the class, Car. \"\"\" \r\n \r\n for (item1, item2) in zip(result1, result2):\r\n car = item1\r\n alist = item2.split(',')\r\n if 'km' in alist[0]:\r\n odometer, body_type, engine = (alist[0], alist[1], alist[2])\r\n else:\r\n odometer, body_type, engine = (alist[1], alist[2], alist[3])\r\n\r\n info = Car(car, odometer, body_type, engine)\r\n info.add_data()\r\n #print(car, odometer, body_type, engine) \r\n \r\n \r\ndef main():\r\n \"\"\" To collect all information in the website because the information in the website has multiple pages \"\"\"\r\n for item in range(1, 3): \r\n url = 'http://www.trademe.co.nz/browse/motors/default.aspx?page={}&cid=1&rptpath=1-268-&sort_order=motors_default'.format(item)\r\n result1, result2 = read_data(url)\r\n save_data(result1, result2)\r\n\r\nmain()\r\nfinal_data = Car.data\r\n#for item in range(0, len(final_data)):\r\nprint(final_data[1])\r\n","sub_path":"simple_webscraping.py","file_name":"simple_webscraping.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9435585","text":"\nimport zmq\nimport random\nimport sys\nimport hashlib\nimport json\nimport string\n\n# State:\nhash_table = {} # to store the keys for this node\nclients = {} # to store the identities and response addresses of clients\nnode_name = '' # node dentifier\ncontext = ''\nlower_bound = '' # predecessor's id\nupper_bound = '' # successor's id\n\n# Returns a randomly generated identifier for the node.\n# TODO: This is weak !\n\n\ndef get_id(simple=True, length=6):\n if (simple):\n possible = list(string.ascii_lowercase)\n rnd = random.randint(0, len(possible))\n return str(possible[rnd])\n else:\n rnd = random.randint(0, 100)\n id = hashlib.sha1()\n id.update(str(rnd))\n return str(id.hexdigest())[:length]\n\n# Tests whether this node is responsible for storing key\n\n\ndef in_range(key):\n if upper_bound >= lower_bound:\n return key > lower_bound and key <= upper_bound\n else:\n return key >= lower_bound or key < upper_bound\n\n\ndef interval():\n return '({},{}]'.format(lower_bound, upper_bound)\n\n\ndef localOP(request, sc):\n operation = request['type']\n if operation == 'insert':\n key = request['key']\n value = request['value']\n hash_table[key] = value\n print(\"Stored {} {} at {}\".format(key, value, node_name)) # Comment\n sc.send(\"The value has been inserted\")\n elif operation == 'search':\n key = request['key']\n value_to_client = hash_table.get(key, None)\n\n if value_to_client is None:\n answer_client = {\n 'value': \"error\",\n 'key': \"error\"\n }\n sc.send_json(answer_client)\n else:\n print(\"Added- {} : {} \".format(key, value_to_client)) # Comment\n answer_client = {\n 'value': value_to_client,\n 'key': key\n }\n sc.send_json(answer_client)\n elif operation == 'delete':\n key = request['key']\n if hash_table.get(key, None) is None:\n sc.send(\"This key doesn't exist\")\n else:\n hash_table.pop(key)\n sc.send(\"The element has been eliminated\")\n else:\n print(\"Local operation not implemented\")\n\n\ndef handleClientRequest(request, successorSocket):\n clientId = request['id']\n clientAddress = request['answer_to']\n sc = context.socket(zmq.PUSH)\n sc.connect(clientAddress)\n clients[clientId] = sc\n\n key = request['key']\n if in_range(key):\n print(\"Key {} is mine!!!!\".format(key)) # Comment\n localOP(request, sc)\n else:\n print(\"Key {} is not mine, delegating...\".format(key)) # Comment\n successorSocket.send_json(request)\n\n\ndef main():\n global hash_table, clients, node_name, context, lower_bound, upper_bound\n\n if len(sys.argv) != 2:\n print('Enter configuration file')\n\n configFile = open(sys.argv[1], 'r')\n config = json.load(configFile)\n\n node_name = get_id()\n upper_bound = node_name\n print(\"Node name: {}\".format(node_name))\n\n myPort = config['port']\n succPort = config['successor']\n clientPort = config['client']\n bootstrap = config['bootstrap']\n\n print(\"Listening on {} and connectig to neighbor on {}\".format(myPort, succPort))\n\n context = zmq.Context()\n mySocket = context.socket(zmq.PAIR)\n mySocket.bind(\"tcp://*:\" + myPort)\n\n successorSocket = context.socket(zmq.PAIR)\n successorSocket.connect(\"tcp://localhost:\" + succPort)\n\n if bootstrap:\n # The first step is to send the node's identifier to the succcesor.\n # That way every node computes the range of keys it is responsible\n # for.\n message = {'type': 'send-id', 'data': node_name}\n successorSocket.send(json.dumps(message))\n\n # the second step is to wait for the predecessor's identifier\n req = json.loads(mySocket.recv())\n lower_bound = req['data']\n\n #print('({}, {}]'.format(req['data'], node_name))\n print(\"Responsible for keys in {}\".format(interval()))\n else:\n print(\"Not implemented yet!\")\n # All the client's requests from clients arrive to this socket\n client_socket = context.socket(zmq.PULL)\n client_socket.bind(\"tcp://*:\" + clientPort)\n print('Listening to clients on {}'.format(clientPort))\n\n poller = zmq.Poller()\n poller.register(mySocket, zmq.POLLIN)\n poller.register(client_socket, zmq.POLLIN)\n\n should_continue = True\n while should_continue:\n print(\"Iteration\")\n socks = dict(poller.poll())\n if mySocket in socks and socks[mySocket] == zmq.POLLIN:\n req = mySocket.recv_json()\n handleClientRequest(req, successorSocket)\n\n if client_socket in socks and socks[client_socket] == zmq.POLLIN:\n print(\"Message on client's socket\")\n req = client_socket.recv_json()\n handleClientRequest(req, successorSocket)\n\n should_continue = True\n\nif __name__ == '__main__':\n main()\n","sub_path":"dht-chord/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189688637","text":"\n\n#calss header\nclass _FEEDING():\n\tdef __init__(self,): \n\t\tself.name = \"FEEDING\"\n\t\tself.definitions = [u'an occasion when a baby has something to eat or drink: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_feeding.py","file_name":"_feeding.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285269915","text":"import pandas as pd \nimport numpy as np \nimport torch\nfrom torch.utils.data import Dataset,DataLoader\nfrom sys import exit\n\n####################################### load csv files #####################################\ntrain_dataset = pd.read_csv('yelp_review_polarity_csv/train.csv',header=None)\ntest_dataset = pd.read_csv('yelp_review_polarity_csv/test.csv',header=None)\n\n####################################### initialization #####################################\ntrain_input_tensor = torch.zeros(train_dataset.shape[0], 1024)\ntrain_label_tensor = torch.zeros(train_dataset.shape[0],dtype=torch.long)\ntest_input_tensor = torch.zeros(test_dataset.shape[0], 1024)\ntest_label_tensor = torch.zeros(test_dataset.shape[0],dtype=torch.long)\nchrs = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"\\\\/|_@#$%^&*~`+-=<>()[]{} \"\n#chrs = \"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"\\\\/|_@#$%^&*~`+-=<>()[]{}\\n \"\nlookup_table = {}\nfor i, c in enumerate(chrs):\n print(c,i)\n lookup_table[c] = i+1\n \n####################################### creating the training and test data #####################################\nfor j in range(train_dataset.shape[0]):\n flag = str(train_dataset.loc[j,1]) #+ str(train_dataset.loc[j,2])+str(train_dataset.loc[j,3])\n char = list(flag)\n for i,c in enumerate(char):\n if c not in lookup_table.keys():\n train_input_tensor[j,i] = 0\n else:\n train_input_tensor[j,i] = lookup_table[c]\n if i == 1023:\n break\n train_label_tensor[j] = int(train_dataset.loc[j,0]-1)\n del flag\n del char\nfor j in range(test_dataset.shape[0]):\n flag = str(test_dataset.loc[j,1]) #+ str(test_dataset.loc[j,2]) +str(test_dataset.loc[j,3])\n char = list(flag)\n for i,c in enumerate(char):\n if c not in lookup_table.keys():\n test_input_tensor[j,i] = 0\n else:\n test_input_tensor[j,i] = lookup_table[c]\n if i == 1023:\n break\n test_label_tensor[j] = int(test_dataset.loc[j,0]-1)\n del flag\n del char\n\n####################################### saving the training and test data #####################################\ntorch.save(train_input_tensor,'./yelp_review_polarity_csv/train_input_tensor.pt')\ntorch.save(train_label_tensor,'./yelp_review_polarity_csv/train_label_tensor.pt')\ntorch.save(test_input_tensor,'./yelp_review_polarity_csv/test_input_tensor.pt')\ntorch.save(test_label_tensor,'./yelp_review_polarity_csv/test_label_tensor.pt')\ndel train_input_tensor\ndel test_input_tensor\n","sub_path":"load_dataset_yelp.py","file_name":"load_dataset_yelp.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482428104","text":"#\n# @lc app=leetcode.cn id=169 lang=python3\n#\n# [169] 多数元素\n#\n\n# @lc code=start\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n def helper(lb, rb):\n if lb == rb:\n return nums[lb]\n \n mid = lb + (rb - lb) // 2\n left = helper(lb, mid)\n right = helper(mid+1,rb)\n\n if left == right: return left\n\n nof_left = sum([left == num for num in nums[lb:rb+1]])\n nof_right = sum([right == num for num in nums[lb:rb+1]])\n return left if nof_left > nof_right else right\n return helper(0, len(nums)-1)\n# @lc code=end\n\n","sub_path":"Week_03/五毒神掌第一掌/169.多数元素.py","file_name":"169.多数元素.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348110127","text":"from .. import irc, var, ini\nfrom ..tools import is_identified\nimport re\n\n# Require identification with NickServ to send messages.\ndef ident (f):\n def check (user, channel, word):\n if is_identified(user):\n f(user, channel, word)\n else:\n irc.msg(channel, \"{}: Identify with NickServ first.\".format(user))\n return check\n\n# Insert a message monitor to look for user activity.\ndef ins_monitor (message):\n user = message.split(\"!\")[0][1:]\n try:\n event = message.split(' ')[1]\n except IndexError:\n event = ''\n \n if event in [\"JOIN\", \"PRIVMSG\"]:\n send_messages(user)\n\n# Fill commands dictionary.\ndef ins_command ():\n var.commands[\"tell\"] = type(\"command\", (object,), {})()\n var.commands[\"tell\"].method = leave_message\n var.commands[\"tell\"].aliases = [\".tell\", \".msg\"]\n var.commands[\"tell\"].usage = [\"{} user message - Leave a message to user.\"]\n \n var.commands[\"listtell\"] = type(\"command\", (object,), {})()\n var.commands[\"listtell\"].method = list_messages\n var.commands[\"listtell\"].aliases = [\".listtell\", \".ltell\", \".listtells\", \".showtells\"]\n var.commands[\"listtell\"].usage = [\"{} - Check if you have any messages and show them.\"]\n\n# Fill a space for the messages database.\ndef ins_db ():\n var.data[\"messages\"] = ini.fill_dict(\"messages.ini\", \"Messages\")\n \n # Turning list of strings into a list of tuples.\n for user in var.data[\"messages\"]:\n msg_list = [(msg.split(\" ~ \")[0], msg.split(\" ~ \", 1)[1]) for msg in var.data[\"messages\"][user]]\n var.data[\"messages\"][user] = msg_list\n\n# Leave a message to someone.\ndef leave_message (user, channel, word):\n # It needs a nickname and a message.\n if len(word) < 3:\n irc.msg(channel, \"{}: Wrong syntax. Check .help\".format(user))\n return\n \n target = word[1]\n message = \" \".join(word[2:])\n \n # Check if target is a valid nickname.\n if not re.match(\"[a-zA-Z\\[\\]\\\\`_\\^\\{\\|\\}][a-zA-Z0-9\\[\\]\\\\`_\\^\\{\\|\\}]\", target):\n irc.msg(channel, \"{} is not a valid nickname.\".format(target))\n return\n \n # Check for repeated messages.\n if target in var.data[\"messages\"]:\n if (user, message) in var.data[\"messages\"][target]:\n irc.msg(channel, \"{}: You already left this message.\".format(user))\n return\n \n # Create an empty list for users not in the database.\n if target not in var.data[\"messages\"]:\n var.data[\"messages\"][target] = []\n \n # Append tuple and add to ini.\n var.data[\"messages\"][target].append((user, message))\n message_list = [\"{} ~ {}\".format(tuple[0], tuple[1]) for tuple in var.data[\"messages\"][target]]\n ini.add_to_ini(\"Messages\", target, \"\\n\".join(message_list), \"messages.ini\")\n \n irc.msg(channel, \"{}: Message stored.\".format(user))\n\n# Send a user stored messages.\ndef send_messages (user):\n # There's no use going on if the user isn't in the messages database.\n if user not in var.data[\"messages\"]:\n return\n \n if len(var.data[\"messages\"][user]) > 4:\n # Send the first 4 messages.\n for tuple in var.data[\"messages\"][user][0:4]:\n irc.msg(user, \"{} sent you: {}\".format(tuple[0], tuple[1]))\n \n # Remove the sent messages.\n st_messages = var.data[\"messages\"][user][0:4]\n for tuple in st_messages:\n var.data[\"messages\"][user].remove(tuple)\n new_messages = [\"{} ~ {}\".format(tuple[0], tuple[1]) for tuple in var.data[\"messages\"][user]]\n ini.add_to_ini(\"Messages\", user, \"\\n\".join(new_messages), \"messages.ini\")\n \n irc.msg(user, \"To reply to them, use .tell user message\")\n irc.msg(user, \"You have more messages. Type \\x034.showtells\\x0f to view them.\")\n else:\n # Send every message.\n for tuple in var.data[\"messages\"][user]:\n irc.msg(user, \"{} sent you: {}\".format(tuple[0], tuple[1]))\n \n # Remove them.\n del var.data[\"messages\"][user]\n ini.remove_from_ini(\"Messages\", user, \"messages.ini\")\n \n irc.msg(user, \"To reply to them, use .tell user message\")\n\n# Send the rest of the messages.\ndef list_messages (user, channel, word):\n # There's no use going on if the user isn't in the messages database.\n if user not in var.data[\"messages\"]:\n irc.msg(channel, \"{}: You don't have any messages.\".format(user))\n return\n \n send_messages(user)\n irc.msg(channel, \"{}: Sent ;)\".format(user))\n","sub_path":"modules/command_modules/tell.py","file_name":"tell.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455888266","text":"from time_manipulation.time_format_conversion import *\nfrom datetime import datetime, timedelta\n\n\nNUM_HOURS = 24\nSEGMENTS_PER_HOUR = 4\nMIN_TIME_BEFORE_FLIGHT = 2\n\n\ndef reset_time_table(time_table):\n time_table = [[] for _ in range(NUM_HOURS * SEGMENTS_PER_HOUR)]\n\n\ndef insert_ride_into_time_table(ride, time_table):\n wait_time = ride['preferences'][0]['wait_time']\n earliest_arrival_time, latest_arrivial_time = get_ride_arrival_time_range(ride, MIN_TIME_BEFORE_FLIGHT, wait_time)\n min_index = time_to_time_table_index(earliest_arrival_time)\n max_index = time_to_time_table_index(latest_arrivial_time)\n for i in range(min_index, max_index + 1):\n time_table[i].append(ride)\n\n\ndef insert_all_rides_into_time_table(rides, time_table):\n print(time_table)\n for ride in rides:\n insert_ride_into_time_table(ride, time_table)\n","sub_path":"app/time_manipulation/time_table_setup.py","file_name":"time_table_setup.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583305446","text":"from flask import Flask,render_template\nfrom flask import request\n\nimport datetime\nimport csv\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \n return render_template('Letting_Agency.html')\n\n@app.route('/Letting_Agency')\ndef Letting_Agency():\n \n return render_template('Letting_Agency.html')\n \n\n@app.route('/Home/')\ndef Home():\n return render_template('Home.html')\n\n\n\n@app.route('/AboutUs/')\ndef AboutUs():\n return render_template('AboutUs.html')\n\n\n@app.route('/Contact/')\ndef Contact():\n return render_template('Contact.html')\n\n\n@app.route('/Search/')\ndef Search():\n return render_template('Search.html')\n \n\n\t\n@app.route('/comments3rd/')\ndef comments():\n commentsFile = 'static\\\\comments.csv'\n comments = readFile(commentsFile)\n return render_template(\"comments3rd.html\",comments=comments)\n\n@app.route('/commentsadd/',methods=['POST'])\ndef commentsadd():\n commentsFile = 'static\\\\comments.csv'\n comments = readFile(commentsFile)\n \n name = request.form[('name')]\n content = request.form[('content')]\n date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n \n newComments =[name,date, content]\n comments.append(newComments)\n\t\n writeFile(comments,commentsFile)\n \n return render_template('comments3rd.html',comments=comments)\n\t\n\n\n@app.route('/bookingpage123')\ndef bookingpage123():\n bookingFile = 'static\\\\bookingpage.csv'\n bookingLook = readFile(bookingFile)\n return render_template('bookingpage123.html', bookingLook=bookingLook)\n \n \n@app.route('/bookingsubmit', methods = ['POST'])\ndef bookingsubmit():\n# add an entry to the phonebook\n# read the phoneBookfrom file\n\n bookingFile = 'static\\\\bookingpage.csv'\n bookingLook = readFile(bookingFile)\n \n error = \"\"\n#add the new entry\n name = request.form['name']\n startdate = request.form['startdate']\n enddate = request.form['enddate']\n address = request.form['address']\n email = request.form['email']\n days = int(request.form['days'])\n\n sdate = datetime.datetime.strptime(startdate, \"%Y-%m-%d\")\n edate = datetime.datetime.strptime(enddate, \"%Y-%m-%d\")\n for booking in bookingLook:\n booking_startdate = booking[1]\n booking_enddate = booking[2]\n print([booking_startdate])\n booking_startdate = datetime.datetime.strptime(booking_startdate, \"%Y-%m-%d\")\n booking_enddate = datetime.datetime.strptime(booking_enddate, \"%Y-%m-%d\")\n if sdate >= booking_startdate and sdate <= booking_enddate:\n error = \"Sorry, This date has been booked!\"\n break\n if edate >= booking_startdate and edate <= booking_enddate:\n error = \"Sorry, This date has been booked!\"\n break\n\n if not error:\n a = days\n b = 80\n startdate_month = int(startdate.split(\"-\")[1])\n if startdate_month < 5:\n b = 90\n elif startdate_month > 8:\n b = 70\n if a == 1:\n cost = a * b\n if a >= 2:\n cost = a * 0.9 * b\n \n newBooking = [name,startdate,enddate,address,email,a,cost]\n bookingLook.append(newBooking)\n writeFile(bookingLook,bookingFile)\n\n return render_template('bookingpage123.html', bookingLook=bookingLook, error=error)\n \n \ndef readFile(filename):\n open(filename,'a')\n with open(filename,'r')as inFile:\n reader = csv.reader(inFile)\n dataList = [row for row in reader]\n return dataList\n \ndef writeFile(dataList,filename):\n with open(filename,'w',newline='')as outFile:\n writer = csv.writer(outFile)\n writer.writerows(dataList)\n return \n \n\n\n \n \n@app.route('/exit', methods = ['GET'])\ndef exit():\n return render_template('Letting_Agency.html')\n \n \n \n\n\n \nif __name__ == \"__main__\":\n app.run(debug = True) \n \n ","sub_path":"coursework2_flask/dan.py","file_name":"dan.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7721728","text":"from application import db\nfrom schema import Author, TypeProjectLike\nfrom flask import session, request\nfrom attrdict import attrdict\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\n\n'''\nclass TypeProjectLike(db.Model) :\n id = db.Column(db.Integer, primary_key = True)\n project_id = db.Column(db.Integer, db.ForeignKey('type_project.id'))\n project = db.relationship('TypeProject', foreign_keys = [project_id])\n liker_id = db.Column(db.Integer, db.ForeignKey('author.id'))\n liker = db.relationship('Author', foreign_keys = [liker_id])\n'''\n\ndef add(liker_id, project_id) :\n db.session.add( TypeProjectLike (\n liker_id = liker_id,\n project_id = project_id\n ))\n db.session.commit()\n\n# IS THERE ANY TIMING ISSUE ON THIS???\ndef toggle(liker_id, project_id) :\n _is_liking_ = False\n try :\n _like_ = TypeProjectLike.query.filter(\n getattr(TypeProjectLike, 'liker_id') == liker_id,\n getattr(TypeProjectLike, 'project_id') == project_id\n ).one()\n db.session.delete(_like_)\n except NoResultFound :\n db.session.add( TypeProjectLike (\n liker_id = liker_id,\n project_id = project_id\n ))\n _is_liking_ = True\n except MultipleResultsFound :\n _likes_ = TypeProjectLike.query.filter(\n getattr(TypeProjectLike, 'liker_id') == liker_id,\n getattr(TypeProjectLike, 'project_id') == project_id\n ).all()\n for _like_ in _likes_ : db.session.delete(_like_)\n except :\n raise\n db.session.commit()\n return _is_liking_\n\ndef get(attr = None, value = None, limit = -1, default = None) :\n project_likes = None\n if (attr, value) == (None, None) : project_likes = TypeProjectLike.query.filter()\n else : project_likes = TypeProjectLike.query.filter(getattr(TypeProjectLike, attr) == value)\n\n if limit == 1 :\n try : return project_likes.one()\n except : return default\n elif limit > 1 : return project_likes.limit(limit)\n else : return project_likes.all()\n\ndef secure() :\n return attrdict( safe = False, action = 'alert', body = 'Authentication Function Not Implemented')\n # safe, action, body = None, 'alert', None\n # if 'project_id' not in session :\n # safe = False\n # elif 'like_id' not in request.form :\n # safe = False\n # body = 'like_id not exist'\n # else :\n # try :\n # _like = TypeProjectComment.query.filter(\n # getattr(TypeProjectComment, 'id') == request.form['like_id'],\n # getattr(TypeProjectComment, 'project_id') == session['project_id'],\n # getattr(TypeProjectComment, 'writer_id') == session['user_id']\n # ).one()\n # safe = _like is not None\n # except :\n # safe = False\n # body = 'could not find proper project_like object'\n # return attrdict( safe = safe, action = action, body = body )","sub_path":"application/models/project_like.py","file_name":"project_like.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628386347","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = [[]]\n for num in sorted(nums):\n res += [item+[num] for item in res]\n print(res)\n return res\n \n# Your input\n# [1,2,3]\n# Your stdout\n# [[], [1]]\n# [[], [1], [2], [1, 2]]\n# [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]\n","sub_path":"Problem_78/learning_solution.py","file_name":"learning_solution.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179630864","text":"from api import *\nimport time\nimport threading\nfrom config import *\n\n## Units in seconds\nunits = {\n \"second\": 1,\n \"minute\": 60,\n \"hour\": 60 * 60,\n \"day\": 24 * 60 * 60,\n}\n\n## SLEEPTIME Will define how accurately the messages can be planned\nSLEEPTIME = 1\nMAX_HOLD = units[\"day\"] * 2 ## The maximum interval\nMIN_HOLD = units[\"minute\"] * 2\nMAX_TIMES = 12 ## How many times someone may be reminded\n\nlast_remind = {\n # \"nick\": remindID\n }\n\ndef load():\n dbExecute('''CREATE TABLE IF NOT EXISTS reminders (\n remindID int auto_increment primary key,\n nick varchar(255),\n sender varchar(255),\n message text,\n channel text,\n time int,\n times int,\n remind_in int,\n index(nick) )''')\n\n registerFunction(\"remind %s %i times every %i %s %S\", addReminder, \"remind times every \")\n # registerFunction(\"remind %s %i time every %i %s %S\", addReminder, \"remind times every \")\n registerFunction(\"stop reminding me\", stopRemindingSender, \"stop reminding me\")\n registerFunction(\"list reminders for %s\", listReminders, \"list reminders for \")\n registerFunction(\"clear reminders for %s\", clearReminders, \"list reminders for \", restricted = True)\n\n registerService(checkForReminderSetup, checkForReminder, checkForReminderCleanup)\nregisterModule(\"Remind\", load)\n\ndef addReminder(channel, sender, target, times, mul, unit, message):\n \"\"\" Adds a message for someone, someone may also remind themself. \"\"\"\n thetime = int(time.time())\n time_unit = units[unit[:-1]] if unit[-1] == \"s\" else units[unit]\n remind_time = thetime + time_unit*mul\n if times > MAX_TIMES:\n sendMessage(channel, \"Reminding someone that many times is not allowed, maximum is {}\".format(MAX_TIMES))\n return\n if time_unit*mul < MIN_HOLD:\n sendMessage(channel, \"Reminding someone that often can become annoying, minimum interval is {}\".format(MIN_HOLD))\n return\n if remind_time > MAX_HOLD + thetime:\n sendMessage(channel, \"won't wait for that long, maximum interval is {} seconds\".format(MAX_HOLD))\n return\n dbExecute(\n \"INSERT INTO reminders (nick, sender, message, time, channel, times, remind_in) VALUES (%s, %s, %s, %s, %s, %s, %s)\",\n [target, sender, message, remind_time, channel, times, time_unit*mul])\n sendMessage(channel, \"will do\")\n\n## This function was created, because it made sence for it not to be restricted\ndef stopRemindingSender(channel, sender):\n stopRemindingUser(channel, sender, sender)\n\ndef stopRemindingUser(channel, sender, user):\n if not last_remind.get(user):\n sendMessage(channel, \"{} has not been reminded of anything\".format(user))\n return\n sendMessage(channel, \"Stopped reminding {}\".format(user))\n dbExecute(\"DELETE FROM reminders WHERE remindID = %s\", [last_remind[user]])\n\ndef clearReminders(channel, sender, nick):\n reminders = dbQuery(\"SELECT time, message FROM reminders WHERE sender = %s AND nick = %s\", [sender, nick])\n if not reminders:\n sendMessage(channel, \"No reminders for {} from {}\".format(nick, sender))\n return\n dbExecute(\"DELETE FROM reminders WHERE sender = %s AND nick = %s\", [sender, nick])\n sendMessage(channel, \"Cleared all reminders for {}\".format(nick))\n\ndef listReminders(channel, sender, nick):\n \"\"\"Displays all reminders set for a particular user (by the sender)\"\"\"\n reminders = dbQuery(\"SELECT time, message FROM reminders WHERE sender = %s AND nick = %s\", [sender, nick])\n for (when, reminder) in reminders:\n sendMessage(channel, \"[{}] Reminder for {}: \\\"{}\\\"\".format(time.ctime(when), nick, reminder))\n if not reminders:\n sendMessage(channel, \"No reminders for {}\".format(nick))\n\nfrom threading import currentThread\ndef checkForReminderSetup(service):\n ## Establish connection in thread\n dbConnect(DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE)\n\ndef checkForReminder(service, state):\n now = int(time.time())\n allusers = {}\n\n for channel in joinedChannels():\n for user in channelUserList(channel):\n if not allusers.get(user):\n allusers[user] = set()\n allusers[user].add(channel)\n\n reminders = dbQuery(\n \"SELECT remindID, sender, message, nick, channel, remind_in FROM reminders WHERE time <= %s AND times > 0\", [now],\n )\n\n for (ID, sender, message, nick, channel, remind_in) in reminders:\n if nick in allusers and channel in allusers.get(nick, {}):\n sendMessage(channel, \"%s, %s reminds you: %s\" % (nick, sender, message))\n newtime = int(time.time()) + remind_in\n last_remind[nick] = ID\n dbExecute(\"UPDATE reminders SET times = times - 1, time = %s WHERE remindID = %s\", (newtime, ID))\n\n dbExecute(\"DELETE FROM reminders WHERE times <= 0\", ())\n\n return state\n\ndef checkForReminderCleanup(service):\n dbDisconnect()\n","sub_path":"modules/remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433940510","text":"# ## Multiclass Classifier\n# ## ***Author - Boris Kundu*** ##\n\n#Import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\nimport time\n\n#Read image features from MNISTnumImages5000_balanced\nX = pd.read_csv('MNISTnumImages5000_balanced.txt',delimiter='\\t',header=None)\n#Print image feature shape\nprint(f'\\nImage feature shape: {X.shape}')\n#Show image feature head\nX.head()\n\n#Display image feature Info\nX.info()\n\n#Read image labels from MNISTnumImages5000_balanced\nY = pd.read_csv('MNISTnumLabels5000_balanced.txt',header=None)\n#Give name to column\nY.columns = ['label']\n#Print image label shape\nprint(f'\\nImage label shape: {Y.shape}')\n#Show image label head\nY.head()\n\n#Display image label Info\nY.info()\n\n#Merge X and Y\nXM = pd.concat([X,Y],axis=1)\n#Add bias feature as 1\n#XM['bias'] = np.ones(len(XM))\n#Check head after adding a bias feature and target variable label.\nXM.head()\n\nn = len(XM.columns) - 1\n\n#Function returns randomly generated training set of size 4000 and testing set of size 1000 data points.\n#It also ensures euqal data points of all 10 classes (0 to 9 digits) for both Training & Testing sets.\ndef getRandomTrainTestSet(full_df,frac):\n #Get data points for all classes\n X0 = full_df[full_df['label']==0]\n X1 = full_df[full_df['label']==1]\n X2 = full_df[full_df['label']==2]\n X3 = full_df[full_df['label']==3]\n X4 = full_df[full_df['label']==4]\n X5 = full_df[full_df['label']==5]\n X6 = full_df[full_df['label']==6]\n X7 = full_df[full_df['label']==7]\n X8 = full_df[full_df['label']==8]\n X9 = full_df[full_df['label']==9]\n \n #Get train (80%) and test (20%) data points for each class\n X0_Train = X0.sample(frac=frac,axis=0)\n X0_Test = pd.concat([X0, X0_Train]).loc[X0.index.symmetric_difference(X0_Train.index)]\n \n X1_Train = X1.sample(frac=frac,axis=0)\n X1_Test = pd.concat([X1, X1_Train]).loc[X1.index.symmetric_difference(X1_Train.index)]\n \n X2_Train = X2.sample(frac=frac,axis=0)\n X2_Test = pd.concat([X2, X2_Train]).loc[X2.index.symmetric_difference(X2_Train.index)]\n \n X3_Train = X3.sample(frac=frac,axis=0)\n X3_Test = pd.concat([X3, X3_Train]).loc[X3.index.symmetric_difference(X3_Train.index)]\n \n X4_Train = X4.sample(frac=frac,axis=0)\n X4_Test = pd.concat([X4, X4_Train]).loc[X4.index.symmetric_difference(X4_Train.index)]\n \n X5_Train = X5.sample(frac=frac,axis=0)\n X5_Test = pd.concat([X5, X5_Train]).loc[X5.index.symmetric_difference(X5_Train.index)]\n \n X6_Train = X6.sample(frac=frac,axis=0)\n X6_Test = pd.concat([X6, X6_Train]).loc[X6.index.symmetric_difference(X6_Train.index)]\n \n X7_Train = X7.sample(frac=frac,axis=0)\n X7_Test = pd.concat([X7, X7_Train]).loc[X7.index.symmetric_difference(X7_Train.index)]\n \n X8_Train = X8.sample(frac=frac,axis=0)\n X8_Test = pd.concat([X8, X8_Train]).loc[X8.index.symmetric_difference(X8_Train.index)]\n \n X9_Train = X9.sample(frac=frac,axis=0)\n X9_Test = pd.concat([X9, X9_Train]).loc[X9.index.symmetric_difference(X9_Train.index)]\n \n #Create complete balanced Train and Test sets.\n X_Train_full = pd.concat([X0_Train,X1_Train,X2_Train,X3_Train,X4_Train,X5_Train,X6_Train,X7_Train,X8_Train,X9_Train])\n X_Test_full = pd.concat([X0_Test,X1_Test,X2_Test,X3_Test,X4_Test,X5_Test,X6_Test,X7_Test,X8_Test,X9_Test])\n \n X_train = X_Train_full.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_train = X_Train_full['label'].reset_index(drop=True)\n X_test = X_Test_full.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_test = X_Test_full['label'].reset_index(drop=True)\n \n return(X_train.copy(),Y_train.copy(),X_test.copy(),Y_test.copy())\n\n#Calculate Sigmoid\ndef getSigmoid(val):\n sigmoid = (1/(1+np.exp(-val)))\n return sigmoid\n#Calculate Sigmoid Derivative\ndef getSignmodDerivative(val):\n sigmoid = getSigmoid(val)\n sigmoidDerivative = (1-sigmoid)*sigmoid\n return sigmoidDerivative\n#Initialize weights\ndef initializeWeights(feature_count):\n limit = np.sqrt(3/feature_count)\n initialWeights = np.random.uniform(-limit, limit,feature_count)\n return initialWeights\n#Get auroencoder hidden weights from file \ndef getWeightsFromFile(file_name='AutoencoderWeights.csv'):\n #Read hidden neuron weights from multiple classifier\n autoHiddenWeights = pd.read_csv(file_name,header=None)\n autoHiddenWeights = autoHiddenWeights.to_numpy()\n return autoHiddenWeights\n\n#Dictionary of expected outputs\n#Key represents digit in image\n#Value represents output layer\ny_ideal = {\n 0:[1,0,0,0,0,0,0,0,0,0],# Image is 0\n 1:[0,1,0,0,0,0,0,0,0,0],# Image is 1\n 2:[0,0,1,0,0,0,0,0,0,0],# Image is 2\n 3:[0,0,0,1,0,0,0,0,0,0],# Image is 3\n 4:[0,0,0,0,1,0,0,0,0,0],# Image is 4\n 5:[0,0,0,0,0,1,0,0,0,0],# Image is 5\n 6:[0,0,0,0,0,0,1,0,0,0],# Image is 6\n 7:[0,0,0,0,0,0,0,1,0,0],# Image is 7\n 8:[0,0,0,0,0,0,0,0,1,0],# Image is 8\n 9:[0,0,0,0,0,0,0,0,0,1] # Image is 9\n }\n\nwarnings.filterwarnings('ignore')\n\n#Below class represents a Neuron\nclass Neuron:\n #Initialize \n def __init__(self,weights,eta=0.01,alpha=0.01,activFunc=getSigmoid,derivFunc=getSignmodDerivative):\n self.weights = weights #Weights of features\n self.weightChange = np.zeros(len(weights))\n self.eta = eta #learning rate\n self.alpha = alpha #Momentum rate\n self.activation = activFunc #Activation function\n self.activationDerivative = derivFunc #Activation derivative function\n #Setters\n def setWeights(self,weights):\n self.weights = weights\n def setOriginalWeights(self,orgWeights):\n self.originalWeights = orgWeights\n def setEta(self,eta):\n self.eta = eta\n def setAlpha(self,alpha):\n self.alpha = alpha\n def setCurrentInput(self,currentInput):\n self.currentInput = currentInput\n self.originalWeights = self.weights\n o = self.getActivationFunction()\n d = self.getActivationDerivative()\n def setCurrentOutput(self, currentOutput):\n self.currentOutput = currentOutput\n def setCurrentOutputDerivative(self, currentOutDeriv):\n self.currentOutputDerivative = currentOutDeriv\n def setActivationFunction(self,activFunc):\n self.activation = activFunc\n def setActivationDerivative(self,derivFunc):\n self.activationDerivative = derivFunc\n def setDelta(self,delta):\n self.delta = delta\n def setWeightedSum(self,weightedSum):\n self.weightedSum = weightedSum\n def setError(self,error):\n self.error = error\n def setWeightChange(self,weightChange):\n self.weightChange = weightChange\n #Getters\n def getWeightChange(self):\n return self.weightChange\n def getOriginalWeight(self):\n return self.originalWeights\n def getWeights(self):\n return self.weights\n def getEta(self):\n return self.eta\n def getAlpha(self):\n return self.alpha\n def getWeightedSum(self):\n weightedSum = np.dot(self.weights,self.currentInput)\n self.setWeightedSum(weightedSum)\n return self.weightedSum\n def getCurrentInput(self):\n return self.currentInput\n def getCurrentOutput(self):\n return self.currentOutput\n def getCurrentOutputDerivative(self):\n return self.currentOutputDerivative\n def getActivationFunction(self):#Call activation function\n self.setCurrentOutput(self.activation(self.getWeightedSum()))\n return self.getCurrentOutput()\n def getActivationDerivative(self):\n self.currentOutputDerivative = self.activationDerivative(self.getWeightedSum())\n return self.getCurrentOutputDerivative()\n def getDelta(self):\n return self.delta\n def getError(self,expected):\n self.setError(expected - self.currentOutput)\n return self.error\n def getStandardizedOutput(self,expected,low = 0.25,high = 0.75):\n if self.currentOutput <= low:\n self.currentOutput = 0\n elif self.currentOutput >= high:\n self.currentOutput = 1\n trueError = self.getError(expected)\n return self.currentOutput\n def getHiddenDelta(self,weightedDeltaSum):\n self.setDelta(self.currentOutputDerivative*weightedDeltaSum)\n return self.delta\n def getOutputDelta(self):\n self.setDelta(self.currentOutputDerivative*self.error)\n return self.delta\n def updateWeights(self):\n #next_weight_change = 0\n next_weight_change = self.eta*self.delta*self.currentInput\n momentum = self.alpha*self.weightChange\n self.setWeights(self.weights + next_weight_change + momentum)\n self.setWeightChange(next_weight_change)\n def getWeightedDelta(self):\n return self.weightedDelta\n def calculateWeightedDelta(self):\n weightedDelta = self.originalWeights * self.delta\n self.setWeightedDelta(weightedDelta)\n def setWeightedDelta(self,weightedDelta):\n self.weightedDelta = weightedDelta\n\n#Take user input\nh = int(input('Enter the number of hidden layers:'))\n\n# Neural network layers\nlayers = []\n#Get neurons for each layer\nfor l in range(h):\n neurons = int(input(f'Enter the number of neurons for hidden layer {l+1}:'))\n layers.append(neurons)\n#Add output layer with 10 neurons\nlayers.append(10)\ntotal_layers = len(layers)\noutput_layer_index = total_layers - 1\n\n#Make (inputs,outputs) pairs for each layer\nlayer_sizes = []\nfor l in range(total_layers):\n if l == 0:\n layer_sizes.append((n,layers[0]))\n else:\n layer_sizes.append((layers[l-1],layers[l]))\n\nclass NeuralNetwork:\n #Initialize network\n def __init__(self,layers,layer_sizes,eta=0.01,alpha=0.01,activFunc=getSigmoid,derivFunc=getSignmodDerivative):\n self.layers = layers #Network layers\n self.layer_sizes = layer_sizes #Current size of layers\n self.eta = eta #learning rate\n self.alpha = alpha #Momentum rate\n self.activation = activFunc #Activation function\n self.activationDerivative = derivFunc #Activation derivative function\n self.total_layers = len(layers) #Total layers\n self.output_layer_index = self.total_layers - 1 #Output layers\n self.createNetwork()\n #Create network\n def createNetwork(self):\n neural_network = [] #Neurons in entire network\n for i,o in self.layer_sizes:\n layer_neurons = [] #Neurons in each layer including output\n for k in range(o):\n newNeuron = Neuron(initializeWeights(i),self.eta,self.alpha,self.activation,self.activationDerivative)\n layer_neurons.append(newNeuron)\n neural_network.append(layer_neurons)\n print(i,o)\n self.setNeuralNetwork(neural_network)\n #Initialize first hidden layer with autoencoder weights\n def initializeAutoWeights(self,autoWeights):\n index = 0\n for neuron in self.neuralNetwork[0]: #Every neuron in first hidden layer\n neuron.setWeights(autoWeights[index])\n index = index + 1\n def getNeuralNetwork(self):\n return self.neuralNetwork\n def setNeuralNetwork(self, nn):\n self.neuralNetwork = nn\n def predict(self,full_df):\n (X_trainp,Y_trainp,X_testp,Y_testp) = getRandomTrainTestSet(full_df,0.8)\n \n X_train_matp= X_trainp.to_numpy()\n Y_train_matp = Y_trainp.to_numpy()\n\n X_test_matp = X_testp.to_numpy()\n Y_test_matp = Y_testp.to_numpy()\n \n total_train_inputs = len(X_train_matp)\n total_test_inputs = len(X_test_matp)\n \n print(f'Total Trainining Data Points:{total_train_inputs}')\n print(f'Total Testing Data Points:{total_test_inputs}')\n \n #Digit wise error\n errorTrainDigits = [0 for i in range(10)]\n errorTestDigits = [0 for i in range(10)]\n \n expected_output_index_train = 0 #For getting expected(true) output for Train\n expected_output_index_test = 0 #For getting expected(true) output for Test\n \n w, h = 10, 10\n confusion_test = [[0 for x in range(w)] for y in range(h)]\n confusion_train = [[0 for x in range(w)] for y in range(h)] \n \n #Test Predictions - WINNER TAKE All\n for data_point_test in X_test_matp:#For every test point\n layer_outputs_test = []\n \n for layer in range(self.total_layers): #For each layer \n output_data_point_test = []\n if layer != output_layer_index: #Hidden Layer \n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point_test)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n layer_outputs_test.append(np.array(output_data_point_test))\n\n #Compare index of max value for both\n expected_test = Y_test_matp[expected_output_index_test] #Get expected output of data point\n expected_output_layer_test = y_ideal.get(expected_test) #Get expected output for all 10 neurons that make up the digit\n expected_output_layer_test = np.array(expected_output_layer_test)\n expected_test_out_max_index = np.where(expected_output_layer_test == expected_output_layer_test.max())\n\n testOutNeuron = layer_outputs_test[-1] #Output layer results\n test_out_max_index = np.where(testOutNeuron == testOutNeuron.max())\n \n #Increment count in confusion matrix\n confusion_test[expected_test_out_max_index[0][0]][test_out_max_index[0][0]] = confusion_test[expected_test_out_max_index[0][0]][test_out_max_index[0][0]] + 1\n #Get next dp\n expected_output_index_test = expected_output_index_test + 1\n \n #Train Predictions - WINNER TAKE All\n for data_point in X_train_matp:#For every train point\n layer_outputs = []\n #Train Predictions - WINNER TAKE All\n for layer in range(self.total_layers): #For each layer \n output_data_point = []\n if layer != output_layer_index: #Hidden Layer \n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n layer_outputs.append(np.array(output_data_point))\n\n #Compare index of max value for both\n expected = Y_train_matp[expected_output_index_train] #Get expected output of data point\n expected_output_layer = y_ideal.get(expected) #Get expected output for all 10 neurons that make up the digit\n expected_output_layer = np.array(expected_output_layer)\n expected_train_out_max_index = np.where(expected_output_layer == expected_output_layer.max())\n\n trainOutNeuron = layer_outputs[-1] #Output layer results\n train_out_max_index = np.where(trainOutNeuron == trainOutNeuron.max())\n \n #Increment count in confusion matrix\n confusion_train[expected_train_out_max_index[0][0]][train_out_max_index[0][0]] = int(confusion_train[expected_train_out_max_index[0][0]][train_out_max_index[0][0]] + 1)\n #Get next dp\n expected_output_index_train = expected_output_index_train + 1\n \n #Calculate Training and Test Errors\n \n correct_test_predictions = 0\n correct_train_predictions = 0\n for i in range (10):\n correct_train_predictions = correct_train_predictions + confusion_train[i][i]\n correct_test_predictions = correct_test_predictions + confusion_test[i][i]\n errorTrainDigits[i] = (400 - confusion_train[i][i])/400\n errorTestDigits[i] = (100 - confusion_test[i][i])/100\n \n trainEF = (total_train_inputs - correct_train_predictions)/total_train_inputs\n testEF = (total_test_inputs - correct_test_predictions)/total_test_inputs\n \n #Return confusion matrix\n conf_train = pd.DataFrame(confusion_train)\n conf_test = pd.DataFrame(confusion_test)\n \n return (conf_train,conf_test,trainEF,testEF,errorTrainDigits,errorTestDigits)\n \n def feedForward_backPropogate(self,full_df,epoch):\n #Save training error for epochs\n trainErrorFrac = []\n #Save testing error for epochs\n testErrorFrac = []\n #All epoch outputs\n epoch_outputs = []\n low = 0.25\n high = 0.75\n \n for e in range(epoch):\n #Get random Test and Train data points\n (X_traint,Y_traint,X_testt,Y_testt) = getRandomTrainTestSet(full_df,0.8)\n \n testErrors = 0\n trainErrors = 0\n\n XY_traint = pd.concat([X_traint, Y_traint], axis=1, join='inner')\n XY_testt = pd.concat([X_testt, Y_testt], axis=1, join='inner')\n \n #Train using random 25% of inputs in every epoch\n XY_traint = XY_traint.sample(frac=0.25,axis=0)\n XY_testt = XY_testt.sample(frac=1,axis=0)\n\n X_train_newt = XY_traint.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_train_newt = XY_traint['label'].reset_index(drop=True)\n \n X_test_newt = XY_testt.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_test_newt = XY_testt['label'].reset_index(drop=True)\n \n X_train_matt = X_train_newt.to_numpy()\n Y_train_matt = Y_train_newt.to_numpy()\n\n X_test_matt = X_test_newt.to_numpy()\n Y_test_matt = Y_test_newt.to_numpy()\n\n expected_output_index_train = 0 #For getting expected(true) output for Train\n expected_output_index_test = 0 #For getting expected(true) output for Test\n\n total_train_inputs = len(X_train_matt)\n total_test_inputs = len(X_test_matt)\n \n print(f'*** Running EPOCH:{e+1} ***')\n start_time = time.time()\n \n #Train & Test Simulation\n for index in range(len(X_train_matt)): #For every train data point\n correctPrediction = True\n correctPredictionTest = True\n \n layer_outputs = []\n layer_outputs_test = []\n \n data_point_test = X_test_matt[index]\n data_point = X_train_matt[index]\n \n #Test Predictions - WINNER TAKE All\n for layer in range(self.total_layers): #For each layer 128=>64=>10\n output_data_point_test = []\n if layer != output_layer_index: #Hidden Layer 128=>64\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point_test)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n layer_outputs_test.append(np.array(output_data_point_test))\n \n #Compare index of max value for both\n expected_test = Y_test_matt[expected_output_index_test] #Get expected output of data point\n expected_output_layer_test = y_ideal.get(expected_test) #Get expected output for all 10 neurons that make up the digit\n expected_output_layer_test = np.array(expected_output_layer_test)\n expected_test_out_max_index = np.where(expected_output_layer_test == expected_output_layer_test.max())\n \n testOutNeuron = layer_outputs_test[-1] #Output layer results\n test_out_max_index = np.where(testOutNeuron == testOutNeuron.max())\n \n if test_out_max_index[0][0] != expected_test_out_max_index[0][0]:\n correctPredictionTest = False\n \n if correctPredictionTest == False:\n testErrors = testErrors + 1 #Update testing error\n \n #Feed forward Train\n for layer in range(self.total_layers): #For each layer 128=>64=>10\n output_data_point = []\n if layer != output_layer_index: #Hidden Layer 128=>64\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n expected = Y_train_matt[expected_output_index_train] #Get expected output of data point\n expected_output_layer = y_ideal.get(expected) #Get expected output for all 10 neurons that make up the digit\n o = 0\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n output_delta = 0\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n expected_value = expected_output_layer[o] #Expected\n predicted_value = neuron.getStandardizedOutput(expected_value,low,high) #Predicted\n output_delta = neuron.getOutputDelta() #Output neuron delta\n if predicted_value != expected_value:\n #Update weight\n neuron.updateWeights()\n correctPrediction = False\n neuron.calculateWeightedDelta() #Populate weighted delta of output neurons for back propogation\n o = o + 1 #Get expected output for next neuron in output layer\n layer_outputs.append(np.array(output_data_point))\n \n if correctPrediction == False:\n trainErrors = trainErrors + 1 #Update training error\n \n #Back propogate\n for layer in range(output_layer_index,0,-1): #From top layer 10->64->128\n k = 0\n for lowerNeuron in self.neuralNetwork[layer-1]: #Every neuron in lower layer\n weightedDeltaSum = 0\n for upperNeuron in self.neuralNetwork[layer]: #Every neuron in upper layer\n weightedDeltaUp = upperNeuron.getWeightedDelta()\n weightedDeltaSum = weightedDeltaSum + weightedDeltaUp[k]\n hiddenDelta = lowerNeuron.getHiddenDelta(weightedDeltaSum) #Hidden neuron delta\n lowerNeuron.calculateWeightedDelta() #Populate weighted delta for next lower layer\n lowerNeuron.updateWeights() #Update weights\n k = k + 1 #For next neuron in current hidden layer\n\n expected_output_index_train = expected_output_index_train + 1\n expected_output_index_test = expected_output_index_test + 1\n \n trainErrorFrac.append(trainErrors/total_train_inputs)\n testErrorFrac.append(testErrors/total_test_inputs)\n \n print(\"--- Execution: %s seconds ---\" % (time.time() - start_time))\n \n #Break if training error fraction for current epoch reaches below 0.1\n if trainErrorFrac[-1] <= 0.0015 and testErrorFrac[-1] <= 0.001:\n print('Stop Training - Training and Testing error fractions below 0.001')\n break\n \n return (trainErrorFrac,testErrorFrac)\n\n def feedForward_backPropogate_new(self,full_df,epoch):\n #Save training error for epochs\n trainErrorFrac = []\n #Save testing error for epochs\n testErrorFrac = []\n #All epoch outputs\n epoch_outputs = []\n low = 0.25\n high = 0.75\n \n for e in range(epoch):\n #Get random Test and Train data points\n (X_traint,Y_traint,X_testt,Y_testt) = getRandomTrainTestSet(full_df,0.8)\n \n testErrors = 0\n trainErrors = 0\n\n XY_traint = pd.concat([X_traint, Y_traint], axis=1, join='inner')\n XY_testt = pd.concat([X_testt, Y_testt], axis=1, join='inner')\n \n #Train using random 25% of inputs in every epoch\n XY_traint = XY_traint.sample(frac=0.25,axis=0)\n XY_testt = XY_testt.sample(frac=1,axis=0)\n\n X_train_newt = XY_traint.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_train_newt = XY_traint['label'].reset_index(drop=True)\n \n X_test_newt = XY_testt.drop(['label'],inplace=False,axis=1).reset_index(drop=True)\n Y_test_newt = XY_testt['label'].reset_index(drop=True)\n \n X_train_matt = X_train_newt.to_numpy()\n Y_train_matt = Y_train_newt.to_numpy()\n\n X_test_matt = X_test_newt.to_numpy()\n Y_test_matt = Y_test_newt.to_numpy()\n\n expected_output_index_train = 0 #For getting expected(true) output for Train\n expected_output_index_test = 0 #For getting expected(true) output for Test\n\n total_train_inputs = len(X_train_matt)\n total_test_inputs = len(X_test_matt)\n \n print(f'*** Running EPOCH:{e+1} ***')\n start_time = time.time()\n \n #Train & Test Simulation\n for index in range(len(X_train_matt)): #For every train data point\n correctPrediction = True\n correctPredictionTest = True\n \n layer_outputs = []\n layer_outputs_test = []\n \n data_point_test = X_test_matt[index]\n data_point = X_train_matt[index]\n \n #Test Predictions - WINNER TAKE All\n for layer in range(self.total_layers): #For each layer 128=>64=>10\n output_data_point_test = []\n if layer != output_layer_index: #Hidden Layer 128=>64\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point_test)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n neuron.setCurrentInput(layer_outputs_test[layer-1])\n output_data_point_test.append(neuron.getCurrentOutput())\n layer_outputs_test.append(np.array(output_data_point_test))\n \n #Compare index of max value for both\n expected_test = Y_test_matt[expected_output_index_test] #Get expected output of data point\n expected_output_layer_test = y_ideal.get(expected_test) #Get expected output for all 10 neurons that make up the digit\n expected_output_layer_test = np.array(expected_output_layer_test)\n expected_test_out_max_index = np.where(expected_output_layer_test == expected_output_layer_test.max())\n \n testOutNeuron = layer_outputs_test[-1] #Output layer results\n test_out_max_index = np.where(testOutNeuron == testOutNeuron.max())\n \n if test_out_max_index[0][0] != expected_test_out_max_index[0][0]:\n correctPredictionTest = False\n \n if correctPredictionTest == False:\n testErrors = testErrors + 1 #Update testing error\n \n #Feed forward Train\n for layer in range(self.total_layers): #For each layer 128=>64=>10\n output_data_point = []\n if layer != output_layer_index: #Hidden Layer 128=>64\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n if layer == 0: #First hidden layer\n neuron.setCurrentInput(data_point)\n else: #Other hidden layers\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n else: #Output Layer 10\n expected = Y_train_matt[expected_output_index_train] #Get expected output of data point\n expected_output_layer = y_ideal.get(expected) #Get expected output for all 10 neurons that make up the digit\n o = 0\n for neuron in self.neuralNetwork[layer]: #Every neuron in current layer\n output_delta = 0\n neuron.setCurrentInput(layer_outputs[layer-1])\n output_data_point.append(neuron.getCurrentOutput())\n expected_value = expected_output_layer[o] #Expected\n predicted_value = neuron.getStandardizedOutput(expected_value,low,high) #Predicted\n output_delta = neuron.getOutputDelta() #Output neuron delta\n if predicted_value != expected_value:\n #Update weight\n neuron.updateWeights()\n correctPrediction = False\n neuron.calculateWeightedDelta() #Populate weighted delta of output neurons for back propogation\n o = o + 1 #Get expected output for next neuron in output layer\n layer_outputs.append(np.array(output_data_point))\n \n if correctPrediction == False:\n trainErrors = trainErrors + 1 #Update training error\n\n expected_output_index_train = expected_output_index_train + 1\n expected_output_index_test = expected_output_index_test + 1\n \n trainErrorFrac.append(trainErrors/total_train_inputs)\n testErrorFrac.append(testErrors/total_test_inputs)\n \n print(\"--- Execution: %s seconds ---\" % (time.time() - start_time))\n \n #Break if training error fraction for current epoch reaches below 0.1\n if trainErrorFrac[-1] <= 0.0015 and testErrorFrac[-1] <= 0.001:\n print('Stop Training - Training and Testing error fractions below 0.001')\n break\n \n return (trainErrorFrac,testErrorFrac)\n\n#Function to plot error fraction for Train & Test\ndef plotErrorFraction(trainEF,testEF,title,num):\n \n E = [i+1 for i in range(len(trainEF))]\n fig,axes = plt.subplots(figsize=(10,5), num='Error Fraction')\n \n axes.set_title(f'Figure {num} {title} Error Fraction vs Epoch')\n axes.set_xlabel('Epoch')\n axes.set_ylabel('Error Fraction')\n\n axes.plot(E,testEF,label = 'Testing Set')\n axes.plot(E,trainEF,label = 'Training Set')\n \n axes.legend()\n plt.show()\n\n#Display challenege matrix\ndef displayConfusionMatrix(data,err,title,num):\n plt.figure(num='Confusion Matrix')\n sns.heatmap(data, annot=True,cbar=False, cmap='coolwarm',fmt='g')\n plt.title(f'Figure {num} {title} Error Fraction:{err} with Confusion Matrix')\n plt.xticks(ticks=[0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5],labels=['0','1','2','3','4','5','6','7','8','9'])\n plt.yticks(ticks=[0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5],labels=['0','1','2','3','4','5','6','7','8','9'])\n plt.show()\n\n#Get autoencoder weights\nautoWeights = getWeightsFromFile('AutoencoderWeights.csv')\n\n#Create NeuralNetwork for Case 1\nbackpropnn_case1 = NeuralNetwork(layers,layer_sizes,0.1,0.1,getSigmoid,getSignmodDerivative)\nbackpropnn_case1.initializeAutoWeights(autoWeights)\n\n#Create NeuralNetwork for Case 2\nbackpropnn_case2 = NeuralNetwork(layers,layer_sizes,0.1,0.1,getSigmoid,getSignmodDerivative)\nbackpropnn_case2.initializeAutoWeights(autoWeights)\n\n\n#Function to display loss before and after training\ndef displayError(trainEF,trainEF1,testEF,testEF1,num):\n plt.figure(num='Error Comparison')\n #Prepare data frame\n case2 = [trainEF,testEF]\n case1 = [trainEF1,testEF1]\n index = ['Training','Testing']\n X_axis = np.arange(len(index))\n plt.bar(X_axis - 0.2, case1, 0.4, label = 'Case 1')\n plt.bar(X_axis + 0.2, case2, 0.4, label = 'Case 2')\n plt.xticks(X_axis, index)\n plt.title(f'Figure {num} Error Comparison')\n plt.xlabel(\"Dataset\")\n plt.ylabel('Average Error (per data point)')\n plt.legend()\n plt.show()\n\n#Function to display digit wise error after training\ndef displayDigitError(errorTrainDigits,errorTestDigits,errorTrainDigits1,errorTestDigits1,num):\n plt.figure(num = 'Train Digit Error Comparison')\n #Prepare train data frame\n case2 = [errorTrainDigits[0],errorTrainDigits[1],errorTrainDigits[2],\n errorTrainDigits[3],errorTrainDigits[4],errorTrainDigits[5],\n errorTrainDigits[6],errorTrainDigits[7],errorTrainDigits[8],\n errorTrainDigits[9]]\n case1 = [errorTrainDigits1[0],errorTrainDigits1[1],errorTrainDigits1[2],\n errorTrainDigits1[3],errorTrainDigits1[4],errorTrainDigits1[5],\n errorTrainDigits1[6],errorTrainDigits1[7],errorTrainDigits1[8],\n errorTrainDigits1[9]]\n index = ['0','1','2','3','4','5','6','7','8','9']\n X_axis = np.arange(len(index))\n plt.bar(X_axis - 0.2, case1, 0.4, label = 'Case 1')\n plt.bar(X_axis + 0.2, case2, 0.4, label = 'Case 2')\n plt.xticks(X_axis, index)\n plt.title(f'Figure {num} Train Digit Error Comparison')\n plt.xlabel(\"Train Digits\")\n plt.ylabel('Average Train Digit Error (per data point)')\n plt.legend()\n \n plt.figure(num = 'Test Digit Error Comparison')\n #Prepare test data frame\n case2 = [errorTestDigits[0],errorTestDigits[1],errorTestDigits[2],\n errorTestDigits[3],errorTestDigits[4],errorTestDigits[5],\n errorTestDigits[6],errorTestDigits[7],errorTestDigits[8],\n errorTestDigits[9]]\n case1 = [errorTestDigits1[0],errorTestDigits1[1],errorTestDigits1[2],\n errorTestDigits1[3],errorTestDigits1[4],errorTestDigits1[5],\n errorTestDigits1[6],errorTestDigits1[7],errorTestDigits1[8],\n errorTestDigits1[9]]\n\n plt.bar(X_axis - 0.2, case1, 0.4, label = 'Case 1')\n plt.bar(X_axis + 0.2, case2, 0.4, label = 'Case 2')\n plt.xticks(X_axis, index)\n plt.title(f'Figure {num+1} Test Digit Error Comparison')\n plt.xlabel(\"Test Digits\")\n plt.ylabel('Average Test Digit Error (per data point)')\n plt.legend()\n plt.show()\n\n#Train Network\nepoch = 400\n(trainErrorFrac1,testErrorFrac1) = backpropnn_case1.feedForward_backPropogate_new(XM.copy(),epoch)\n\n#Train Network\n(trainErrorFrac,testErrorFrac) = backpropnn_case2.feedForward_backPropogate(XM.copy(),epoch)\n\nplotErrorFraction(trainErrorFrac1,testErrorFrac1,'Case 1', 1)\n\nplotErrorFraction(trainErrorFrac,testErrorFrac,'Case 2', 2)\n\n#Prediction after training\n(conf_train,conf_test,trainEF,testEF,errorTrainDigits,errorTestDigits) = backpropnn_case2.predict(XM.copy())\n\n#Prediction after training\n(conf_train1,conf_test1,trainEF1,testEF1,errorTrainDigits1,errorTestDigits1) = backpropnn_case1.predict(XM.copy())\n\n#Display Train Confusion Matrix\ndisplayConfusionMatrix(conf_train1,trainEF1,'Case 1 After Training - Training Set',3)\n\n#Display Train Confusion Matrix\ndisplayConfusionMatrix(conf_train,trainEF,'Case 2 After Training - Training Set',4)\n\n#Display Test Confusion Matrix\ndisplayConfusionMatrix(conf_test1,testEF1,'Case 1 After Training - Testing Set',5)\n\n#Display Test Confusion Matrix\ndisplayConfusionMatrix(conf_test,testEF,'Case 2 After Training - Testing Set',6)\n\n#Display final training and testing errors\ndisplayError(trainEF,trainEF1,testEF,testEF1,7)\n\n#Display final training and testing errors per digirs\ndisplayDigitError(errorTrainDigits,errorTestDigits,errorTrainDigits1,errorTestDigits1,8)","sub_path":"Neural Networks/Classification/Multi Class/MultiClass_ImageClassifier.py","file_name":"MultiClass_ImageClassifier.py","file_ext":"py","file_size_in_byte":38351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42228959","text":"# Histograms - Visualize the distribution of numeric values within each attribute\n\n# Univariate Histograms\nfrom matplotlib import pyplot\nfrom pandas import read_csv\nfilename = '../../../pima-indians-diabetes.csv'\nnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndata = read_csv(filename, names=names)\ndata.hist()\npyplot.show()\n","sub_path":"2_Lessons/Chapter 6 Understand Your data With Visualization/1 Univariate Plots/6.1.1 Histograms.py","file_name":"6.1.1 Histograms.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459794814","text":"# -*- coding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# OpenERP, Open Source Management Solution\r\n# Copyright (C) 2004-2010 Tiny SPRL ().\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\nfrom openerp import tools\r\nimport openerp.addons.decimal_precision as dp\r\nfrom openerp.osv import fields,osv\r\nfrom openerp import netsvc\r\nimport time\r\nfrom openerp.tools.translate import _\r\n\r\nclass select_picking_internal(osv.osv_memory):\r\n _inherit = 'wf_select.picking_internal'\r\n def create_internal_picking(self, cr, uid, ids, context=None):\r\n\r\n if context is None:\r\n context = {} \r\n record_id = context and context.get('active_id', False) or False\r\n move_obj = self.pool.get('stock.move')\r\n pick_obj = self.pool.get('stock.picking')\r\n uom_obj = self.pool.get('product.uom')\r\n order_line_obj = self.pool.get('sale.order.line')\r\n data_obj = self.pool.get('wf_select.products_internal')\r\n act_obj = self.pool.get('ir.actions.act_window')\r\n model_obj = self.pool.get('ir.model.data')\r\n wf_service = netsvc.LocalService(\"workflow\")\r\n pick = pick_obj.browse(cr, uid, record_id, context=context)\r\n data = self.read(cr, uid, ids[0], context=context)\r\n date_cur = time.strftime('%Y-%m-%d %H:%M:%S')\r\n \r\n sale_obj = self.pool.get('sale.order')\r\n sale = sale_obj.browse(cr, uid, record_id, context=context)\r\n\r\n new_pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking')\r\n origin = \"%s / %s /Kd-Nr %s\" %(sale.name, sale.partner_id.last_name, sale.partner_id.ref)\r\n new_picking = pick_obj.create(cr, uid,{\r\n 'name': new_pick_name,\r\n 'move_lines': [],\r\n 'origin': origin, \r\n 'partner_id': sale.partner_id.id,\r\n 'state':'draft', \r\n 'type': 'internal',\r\n 'date':date_cur, \r\n 'invoice_state': 'none',\r\n 'company_id': sale.company_id.id,\r\n 'location_id': data['location_id'][0], \r\n 'location_dest_id': data['location_dest_id'][0],\r\n })\r\n \r\n val_id = data['product_internal_moves']\r\n for v in val_id:\r\n data_get = data_obj.browse(cr, uid, v, context=context)\r\n\r\n if not data_get.order_line_id:\r\n raise osv.except_osv(_('Warning !'), _(\"You have manually created product lines, please delete them to proceed\"))\r\n new_qty = data_get.quantity\r\n if new_qty:\r\n if data_get.product_id.wf_complete_vpe_qty_hl and data['location_id'][0] == 12:\r\n location = 14\r\n else:\r\n location = data['location_id'][0]\r\n\r\n new_move=move_obj.create(cr, uid, {\r\n 'name': data_get.order_line_id.name,\r\n 'product_id': data_get.product_id.id,\r\n 'date': sale.date_order,\r\n 'date_expected': sale.date_order,\r\n 'product_uom': data_get.order_line_id.product_uom.id,\r\n 'product_uos': data_get.order_line_id.product_uos.id,\r\n 'product_packaging': False,\r\n 'partner_id': sale.partner_id.id,\r\n 'tracking_id': False,\r\n 'company_id': sale.company_id.id,\r\n 'product_qty': new_qty,\r\n 'product_uos_qty': uom_obj._compute_qty(cr, uid, data_get.order_line_id.product_uos.id, new_qty, data_get.order_line_id.product_uos.id),\r\n 'picking_id': new_picking, \r\n 'state': 'draft',\r\n 'location_id': location, \r\n 'location_dest_id': data['location_dest_id'][0],\r\n 'date': date_cur,\r\n })\r\n if data_get.order_line_id.wf_notice:\r\n note = \"%s \\n%s\" %(data_get.order_line_id.wf_notice, new_pick_name)\r\n else:\r\n note = new_pick_name\r\n order_line_obj.write(cr,uid, data_get.order_line_id.id, {'wf_notice': note} )\r\n\r\n\r\n wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)\r\n pick_obj.action_assign(cr, uid, [new_picking], context)\r\n\r\n sale_obj.write(cr,uid, sale.id, {'wf_moves_created': True, 'wf_internal_picking': new_picking})\r\n\r\n return {\r\n 'domain': \"[('id', 'in', [\"+str(new_picking)+\"])]\",\r\n 'name': _('Internal Picking'),\r\n 'view_type':'form',\r\n 'view_mode':'tree,form',\r\n 'res_model': 'stock.picking',\r\n 'type':'ir.actions.act_window',\r\n 'context':context,\r\n }\r\n\r\nselect_picking_internal()\r\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\r\n","sub_path":"wf_store_sent/wizard/select_products.py","file_name":"select_products.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35503793","text":"import numpy as np\nimport cv2\nimport os\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport math\nimport time\nimport matplotlib.image as mpimg\nfrom random import randint\nimport sys\n\ndef resize_image(img_in, img_out, res_size = (32, 32)):\n '''\n\n :param img_in: file path of input image to resize\n :param img_out: file path to write resized image\n :param res_size: tuple (width, height) output image size\n :return:\n '''\n try:\n image = cv2.imread(img_in, 1)\n img_res = cv2.resize(image, res_size)\n cv2.imwrite(img_out, img_res)\n except:\n os.remove(img_in)\n\n\ndef process_image(path, res_size):\n '''\n\n :param path: path to folder with image\n :return:\n '''\n filelist = os.listdir(path)\n processed_dir = os.path.join(path, 'scale_image')\n if not os.path.isdir(processed_dir):\n os.mkdir(processed_dir)\n for f in filelist:\n probably_image = os.path.join(path, f)\n if probably_image[-4:] == '.jpg' or probably_image[-4:] == '.png':\n resize_image(probably_image, os.path.join(processed_dir, f), res_size)\n\ndef normalized(X):\n x_mean = X.mean(axis = 0)\n x_std = X.std(axis = 0)\n return (X-x_mean)/(x_std+0.00001)\n\ndef load_image(path, label, image_size, class_number):\n '''\n\n :param path: path to folder contains images\n :param label: 1 - plane, 0 - not a plane\n :param image_size: img_shape = (64, 64)\n :return: X - image array, Y - label array [1,0]- plane , [0,1] not plane\n '''\n filelist = os.listdir(path)\n N = len(filelist)\n\n imgN = image_size[0] * image_size[1]\n X = np.zeros([N, imgN])\n Y = np.zeros([N, class_number])\n\n for i, f in enumerate(filelist):\n img = cv2.imread(os.path.join(path, f), 0)\n X[i, :] = img.flatten()\n vec = np.zeros(class_number)\n vec[label] = 1\n Y[i, :] = vec\n\n return X, Y\n\ndef load_image_with_followers(path, label, image_size, class_number):\n X, Y = load_image(path, label, image_size, class_number)\n follower = [0 for i in range(Y.shape[0])]\n return X,Y,follower\n\ndef random_batch(X, Y, batch_size, follow=False):\n\n r = np.random.choice(range(X.shape[0]), batch_size, replace=False)\n X_shuffle = X[0:batch_size]\n Y_shuffle = Y[0:batch_size]\n for i in range(batch_size):\n X_shuffle[i] = X[r[i]]\n Y_shuffle[i] = Y[r[i]]\n if follow:\n follow_batches(X.shape[0], r)\n return X_shuffle, Y_shuffle\n\n\ndef follow_batches(shp, arr):\n for item in Follower:\n if len(item) == shp: # using fact that its highly impossible that we have equal number of image for each classes\n for a in arr:\n item[a]+=1\n else:\n continue\n\ndef usage_histogram(usage):\n plt.hist(range(len(usage)), weights=usage)\n print( len(usage), min(usage), max(usage))\n plt.show()\n\n\ndef batches_concatenate(batch_size, follow, *args):\n \"\"\"\n\n :param batch_size:\n :param args: (X_desert, Y_desert) (X_country, Y_country)\n :return:\n \"\"\"\n arraysX = []\n arraysY = []\n for arg in args:\n x, y = random_batch(arg[0], arg[1], batch_size, follow)\n arraysX.append(x)\n arraysY.append(y)\n return np.vstack(arraysX), np.vstack(arraysY)\n\n\ndef load_images_from_paths(img_shape, class_number, *args):\n \"\"\"\n :param img_shape: (img width, img height)\n :param args: list of tuples (path, label)\n :return: np arrays with img and vec labels\n \"\"\"\n arraysX = []\n arraysY = []\n for arg in args:\n x,y = load_image(arg[0], arg[1], img_shape, class_number)\n arraysX.append(x)\n arraysY.append(y)\n return np.vstack(arraysX), np.vstack(arraysY)\n\n\ndef show_layer(sess, feed_data, data, layer):\n units = sess.run(layer, feed_dict={feed_data: data})\n filters = units.shape[3]\n plt.figure(1, figsize=(units.shape[1], units.shape[2]))\n n_columns = 10\n n_rows = math.ceil(filters / n_columns) + 1\n for i in range(filters):\n plt.subplot(n_rows, n_columns, i + 1)\n plt.title('Filter ' + str(i))\n plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\")\n\n\ndef ground_truth_challenge():\n return [1, 2, 2, 0, 0, 0, 0, 0, 0, 5, 3, 1, 3, 5,5, 5, 0, 0, 0,0, 5, 3, 0, 0, 0, 3, 1, 1]\n\n\ndef challenge_loader():\n ground_thruth= ground_truth_challenge()\n X_test, Y_test = load_image('/media/niko/Nowy/AI_image/IrisChallenge/scale_image', 0, (64,64), 6)\n if Y_test.shape[0] != len(ground_thruth):\n print( 'ground_thruth len not equal Y len')\n return False\n else:\n for idx in range(Y_test.shape[0]):\n Y_test[idx][0] = 0\n Y_test[idx][ground_thruth[idx]] = 1\n return X_test, Y_test\n\n\ndef challenge_run(sess, logits, show=False):\n X_test, Y_test = challenge_loader()\n predict = sess.run(logits, feed_dict={X: X_test, dropout: DROPOUT})\n print( 'predict:\\n', predict, '\\n')\n preds = tf.nn.softmax(predict)\n print( 'preds \\n', sess.run(preds))\n argmax = tf.argmax(predict, 1)\n argmax_sess = sess.run(argmax)\n print( argmax_sess)\n print( argmax_sess == ground_truth_challenge())\n labels = ['urban', 'water', 'mountains', 'forest', 'desert', 'country', 'village']\n print( [labels[x] for x in argmax_sess])\n print( [labels[x] for x in ground_truth_challenge()])\n if show:\n for i in range(X_test.shape[0]):\n show_layer(sess, X, [X_test[i]], conv1)\n plt.show()\n\n\ndef resize_paths_images(size, paths):\n\n for path in paths:\n processed_dir = os.path.join(path, 'scale_image')\n if os._exists(processed_dir):\n os.rmdir(processed_dir)\n print( 'processing ', path )\n process_image(path, size)\n\ndef resize_folders(mainpath,size):\n paths = os.listdir(mainpath)\n dir_paths = []\n for path in paths:\n p = os.path.join(mainpath, path)\n if os.path.isdir(p):\n dir_paths.append(p)\n resize_paths_images(size, dir_paths)\n\n\ndef checksum(arr):\n arrnp = np.asarray(arr)\n return np.sum(arrnp.flatten())\n\n\ndef check_change_of_weights(sess, scoretab, *args):\n print( scoretab)\n for idx, arg in enumerate(args):\n weight = sess.run(arg)\n print( arg.name)\n print( weight)\n scoretab[idx] = (scoretab[idx]- checksum(weight))**2\n print( scoretab)\n\ndef load_img_uri(path):\n uri = os.listdir(path)\n return [os.path.join(path, u) for u in uri]\n\ndef label_vector(n_classes, idx):\n vec = [0 for i in range(n_classes)]\n vec[idx] = 1\n return vec\n\ndef random_batch_uri(batch_size, path_vec, label, n_classes, resize = 1):\n\n r = np.random.choice(range(len(path_vec)), batch_size, replace=False)\n X = []\n for i in range(batch_size):\n img = cv2.imread(path_vec[r[i]], 0)\n res = cv2.resize(img, None, fx=resize, fy=resize, interpolation=cv2.INTER_CUBIC)\n X.append( res.flatten())\n lab_vec = label_vector(n_classes, label)\n Y = [lab_vec for i in range(batch_size)]\n return X, Y\n\ndef batches_concatenate_uri(batch_size, *args):\n \"\"\"\n\n :param batch_size:\n :param args: (X_desert, Y_desert) (X_country, Y_country)\n :return:\n \"\"\"\n arraysX = []\n arraysY = []\n for idx, arg in enumerate(args):\n x, y = random_batch_uri(batch_size, arg, idx, len(args))\n arraysX.append(x)\n arraysY.append(y)\n return np.vstack(arraysX), np.vstack(arraysY)\n\n\ndef unpickle(file):\n if sys.version_info >= (3,0):\n import _pickle as cPickle\n with open(file, 'rb') as fo:\n dict = cPickle.load(fo, encoding='bytes')\n return dict\n else:\n import cPickle\n with open(file, 'rb') as fo:\n dict = cPickle.load(fo)\n return dict\n\n\n\ndef random_cifar_batch_file(path):\n return path+str(randint(1,4))\n\n\ndef random_cifar_batch_logits_labels(batchsize, path, classes):\n dict = unpickle(path)\n r = np.random.choice(range(10000), batchsize, replace=False)\n dataX = dict[b'data']#.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"uint8\")\n X=[]\n Y=[]\n for i in range(batchsize):\n X.append(dataX[r[i]])\n Y.append(label_vector(classes, dict[b'labels'][r[i]]))\n return X,Y\n\n\ndef show_layer_stimuli(sess, layer, stimuli, xInput, size=(224,224)):\n units = sess.run(layer, feed_dict={xInput: np.reshape(stimuli, [1, size[0] * size[1]], order='F')})\n plotNNFilter(units, size)\n\n\ndef plotNNFilter(units, size=(224,224)):\n filters = units.shape[3]\n plt.figure(1, figsize=size)\n n_columns = 6\n n_rows = math.ceil(filters / n_columns) + 1\n for i in range(filters):\n plt.subplot(n_rows, n_columns, i+1)\n plt.title('Filter ' + str(i))\n plt.imshow(units[0,:,:,i], interpolation=\"nearest\", cmap=\"gray\")","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91294423","text":"class Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n d={')':'(','}':'{',']':'['}\n stack=[]\n for c in s:\n if c=='(' or c=='{' or c=='[':\n stack.append(c)\n else:\n if not stack or stack.pop()!=d[c]:\n return False\n \n return False if stack else True","sub_path":"20 Valid Parentheses.py","file_name":"20 Valid Parentheses.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361018355","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport time\nimport base64\nfrom io import BytesIO\nfrom plotstreamer import Channel\n\npub_port = 'tcp://127.0.0.1:10000'\nsub_port = 'tcp://127.0.0.1:10002'\n\nchannel = sys.argv[1]\nplottype = sys.argv[2]\n\nwith Channel(channel, pub_port, sub_port) as c:\n def sendfigure():\n out = BytesIO()\n fig, ax = plt.subplots(1,1)\n if plottype == 'image':\n ax.imshow(np.random.rand(50,50), interpolation='none')\n fig.set_size_inches(8,8)\n fig.savefig(out, dpi=100)\n else:\n ax.plot(np.random.rand(10, 5))\n fig.set_size_inches(8, 5)\n fig.savefig(out, dpi=100)\n c.send_data(base64.b64encode(out.getvalue()))\n out.seek(0)\n plt.close(fig)\n\n while True:\n sendfigure()\n time.sleep(1)\n","sub_path":"zmqtest2.py","file_name":"zmqtest2.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404404098","text":"def user_names():\n # Used to prompt the user for user names\n import numpy as np\n user = []\n while True:\n temp = input(\"Please input a user name: \")\n while temp in user:\n print(\"You've already input this name, please alter it.\")\n temp = input(\"Please input a user name: \")\n \n user = user + [temp]\n\n print(\"Add another user?\")\n another = input(\"Y/N: \")\n if another == \"N\":\n break\n user = np.array(user)\n return user\n###########################################\n\n\ndef financial_activity(usernames):\n # Used to find out who paid for the service?\n import numpy as np\n payer = []\n pay_num = []\n while True:\n temp = input(\"Please input a payer name: \")\n while temp not in usernames:\n print(\"The name you input is not one of the users.\")\n temp = input(\"Please input a payer name: \")\n while temp in payer:\n print(\"You've already input this name, please alter it.\")\n temp = input(\"Please input a payer name: \")\n\n payer = payer + [temp]\n\n pay_num = pay_num + [float(input(\"How much did this person pay? \"))]\n print(\"Add another payer?\")\n another = input(\"Y/N? \")\n if another == \"N\":\n break\n \n payer = np.array(payer)\n pay_num = np.array(pay_num)\n pay_prop = pay_num / sum(pay_num)\n # Check all elements of payer are in user\n # User interface can probably resolve this issue\n row_index = np.zeros(shape = (1, len(payer)), dtype = 'int64')\n for i in range(len(payer)):\n row_index[0,i] = int(np.where(usernames == payer[i])[0][0])\n \n # Who used the service? And\n # who owes what to whom?\n # Multiple currencies#########!!!!!!\n #######\n # Allow adding new users on the fly\n payee = []\n debt = []\n print(\"Split the fare equally? \")\n go_dutch = input(\"Y/N: \")\n \n while True:\n temp = input(\"Please input a payee name: \")\n while temp not in usernames:\n print(\"The name you input is not one of the users.\")\n temp = input(\"Please input a payee name: \")\n payee = payee + [temp]\n\n if go_dutch == \"N\":\n debt = debt + [float(input(\"How much should this person pay?\"))]\n print(\"Add another payee?\")\n another = input(\"Y/N? \")\n if another == \"N\":\n debt = np.array(debt)\n break\n \n payee = np.array(payee)\n \n#==============================================================================\n# if sum(debt) != sum(paynum):\n# print(\"The sum of debt is not equal to the price of the service\\n\")\n# print(\"The sum of debt is \" + str(sum(debt)) + \"\\n\")\n# print(\"The price of the service is \" + str(sum(paynum)) + \"\\n\")\n# print(\"Do you want to proceed?\\n\")\n# print(\"If yes, we are going to assume that the price of the service is the sum of debt.\\n\")\n# print(\"If no, you will need to go back and change the debt.\\n\")\n# proceed = input(\"Y/N: \")\n#==============================================================================\n \n # Can be resolved by user interface\n \n if go_dutch == \"Y\":\n debt = np.zeros(shape = len(payee))\n debt[:] = np.mean(pay_num)\n \n \n col_index = np.zeros(shape = (1, len(payee)), dtype = 'int64')\n for i in range(len(payee)):\n col_index[0,i] = int(np.where(usernames == payee[i])[0][0])\n \n#==============================================================================\n# result[\"row_index\"] = row_index\n# result[\"col_index\"] = col_index\n# result[\"debt\"] = debt\n# result[\"pay_prop\"] = pay_prop\n#==============================================================================\n return row_index, col_index, debt, pay_prop\n\n###########################################\n\n\n\n\n","sub_path":"FareSplit/src/userInput.py","file_name":"userInput.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251892259","text":"from PIL import Image\n\nbird_img = Image.open('bird.jpg')\nbird_img.show()\nwidth, height = bird_img.size # (w,h)\nfor x in range(width):\n for y in range(height):\n pixel_coordinate = (x, y)\n r, g, b = bird_img.getpixel(pixel_coordinate) # if working with png need alpha as 4th arg\n\n negative_color = (255 - r, 255 - g, 255 - b)\n bird_img.putpixel(pixel_coordinate, negative_color)\n\nbird_img.show()\n","sub_path":"Beginning Python Vid Notes/Section8/Section8_4.py","file_name":"Section8_4.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93191750","text":"\"\"\"Tests for the features functionality.\"\"\"\n\n__author__ = \"Jeroen Van Der Donckt, Emiel Deprost, Jonas Van Der Donckt\"\n\nimport os\nimport random\n\nimport dill\nimport pytest\nimport math\nimport warnings\nimport pandas as pd\nimport numpy as np\n\nfrom tsflex.features import FuncWrapper\nfrom tsflex.features import FeatureDescriptor, MultipleFeatureDescriptors\nfrom tsflex.features import FeatureCollection\n\nfrom pathlib import Path\nfrom pandas.testing import assert_frame_equal\nfrom scipy.stats import linregress\nfrom typing import Tuple\nfrom .utils import dummy_data\n\n\n## FeatureCollection\n\n\ndef test_single_series_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"10s\",\n stride=\"5s\",\n )\n fc = FeatureCollection(feature_descriptors=fd)\n\n assert fc.get_required_series() == [\"EDA\"]\n\n res_list = fc.calculate(dummy_data, return_df=False, n_jobs=1)\n res_df = fc.calculate(dummy_data, return_df=True, n_jobs=1)\n\n assert isinstance(res_list, list) & (len(res_list) == 1)\n assert isinstance(res_df, pd.DataFrame)\n assert_frame_equal(res_list[0], res_df)\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 5\n window_s = 10\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(res_df.index[1:] - res_df.index[:-1] == pd.to_timedelta(5, unit=\"s\"))\n\n\ndef test_uneven_sampled_series_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"10s\",\n stride=\"16s\",\n )\n fc = FeatureCollection(feature_descriptors=fd)\n with pytest.raises(ValueError):\n fc.add(FeatureDescriptor(np.min, series_name=(\"TMP\",), window=\"10\", stride=\"6\"))\n with pytest.raises(ValueError):\n fc.add(FeatureDescriptor(np.min, series_name=(\"TMP\",), window=\"5s\", stride=\"6\"))\n with pytest.raises(ValueError):\n fc.add(FeatureDescriptor(np.min, series_name=(\"TMP\",), window=\"5\", stride=\"6s\"))\n\n fc.add(FeatureDescriptor(np.min, series_name=(\"TMP\",), window=\"10s\", stride=\"16s\"))\n fc.add(FeatureDescriptor(np.min, series_name=(\"EDA\",), window=\"10s\", stride=\"16s\"))\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n assert len(fc.get_required_series()) == 2\n\n # Drop some data to obtain an irregular sampling rate\n inp = dummy_data.drop(np.random.choice(dummy_data.index[1:-1], 500, replace=False))\n\n res_df = fc.calculate(inp, return_df=True, approve_sparsity=True, n_jobs=3)\n\n assert res_df.shape[1] == 3\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 16\n window_s = 10\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(\n res_df.index[1:] - res_df.index[:-1] == pd.to_timedelta(stride_s, unit=\"s\")\n )\n\n\ndef test_warning_uneven_sampled_series_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"2.5s\",\n )\n fc = FeatureCollection(feature_descriptors=fd)\n fc.add(FeatureDescriptor(np.min, series_name=(\"TMP\",), window=\"5s\", stride=\"2.5s\"))\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n assert len(fc.get_required_series()) == 2\n # Drop some data to obtain an irregular sampling rate\n inp = dummy_data.drop(np.random.choice(dummy_data.index[1:-1], 500, replace=False))\n\n with warnings.catch_warnings(record=True) as w:\n # Trigger the warning\n # Note -> for some (yet unkknown) reason, the warning's aren't caught anymore\n # when using multiprocess (they are thrown nevertheless!), so we changed\n # n_jobs=1\n res_df = fc.calculate(inp, return_df=True, n_jobs=1, approve_sparsity=False)\n # Verify the warning\n assert len(w) == 2\n assert all([issubclass(warn.category, RuntimeWarning) for warn in w])\n assert all([\"gaps in the sequence\" in str(warn) for warn in w])\n # Check the output\n assert res_df.shape[1] == 2\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 2.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(\n res_df.index[1:] - res_df.index[:-1] == pd.to_timedelta(2.5, unit=\"s\")\n )\n\n\ndef test_featurecollection_repr(dummy_data):\n def corr(s1, s2):\n min_len = min(len(s1), len(s2))\n s1 = s1[:min_len]\n s2 = s2[:min_len]\n return np.corrcoef(s1, s2)[0][-1].astype(s1.dtype)\n\n fc = FeatureCollection(\n feature_descriptors=[\n FeatureDescriptor(\n function=FuncWrapper(func=corr, output_names=\"corrcoef\"),\n series_name=(\"EDA\", \"TMP\"),\n window=\"30s\",\n stride=\"30s\",\n ),\n ]\n )\n fc_str: str = fc.__repr__()\n assert \"EDA|TMP\" in fc_str\n assert (\n fc_str\n == \"EDA|TMP: (\\n\\twin: 30s , stride: 30s: [\\n\\t\\tFeatureDescriptor - func: FuncWrapper(corr, ['corrcoef'], {}),\\n\\t]\\n)\\n\"\n )\n\n out = fc.calculate(dummy_data, n_jobs=1, return_df=True)\n assert out.columns[0] == \"EDA|TMP__corrcoef__w=30s_s=30s\"\n\n out = fc.calculate(dummy_data, n_jobs=None, return_df=True)\n assert out.columns[0] == \"EDA|TMP__corrcoef__w=30s_s=30s\"\n\n\ndef test_window_idx_single_series_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"12.5s\",\n )\n fc = FeatureCollection(feature_descriptors=fd)\n\n assert fc.get_required_series() == [\"EDA\"]\n\n res_begin = fc.calculate(dummy_data, return_df=True, n_jobs=0, window_idx=\"begin\")\n res_end = fc.calculate(dummy_data, return_df=True, n_jobs=0, window_idx=\"end\")\n res_middle = fc.calculate(dummy_data, return_df=True, n_jobs=0, window_idx=\"middle\")\n assert np.isclose(res_begin.values, res_end.values).all()\n assert np.isclose(res_begin.values, res_middle.values).all()\n\n res_begin = fc.calculate(\n dummy_data, return_df=True, n_jobs=None, window_idx=\"begin\"\n )\n res_end = fc.calculate(dummy_data, return_df=True, n_jobs=None, window_idx=\"end\")\n res_middle = fc.calculate(\n dummy_data, return_df=True, n_jobs=None, window_idx=\"middle\"\n )\n\n with pytest.raises(Exception):\n res_not_existing = fc.calculate(\n dummy_data, n_jobs=0, return_df=True, window_idx=\"somewhere\"\n )\n\n assert np.isclose(res_begin.values, res_end.values).all()\n assert np.isclose(res_begin.values, res_middle.values).all()\n\n assert res_begin.index[0] == dummy_data.index[0]\n assert res_end.index[0] == dummy_data.index[0] + pd.to_timedelta(5, unit=\"s\")\n # 2.5 -> refers to window / 2\n assert res_middle.index[0] == dummy_data.index[0] + pd.to_timedelta(2.5, unit=\"s\")\n\n for res_df in [res_begin, res_end, res_middle]:\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 12.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(\n res_df.index[1:] - res_df.index[:-1] == pd.to_timedelta(12.5, unit=\"s\")\n )\n\n\ndef test_multiplefeaturedescriptors_feature_collection(dummy_data):\n def sum_func(sig: np.ndarray) -> float:\n return sum(sig)\n\n mfd = MultipleFeatureDescriptors(\n functions=[sum_func, FuncWrapper(np.max), np.min],\n series_names=[\"EDA\", \"TMP\"],\n windows=[\"5s\", \"7.5s\"],\n strides=\"2.5s\",\n )\n fc = FeatureCollection(feature_descriptors=mfd)\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n assert len(fc.get_required_series()) == 2\n\n res_list = fc.calculate(dummy_data, return_df=False, n_jobs=6)\n res_df = fc.calculate(dummy_data, return_df=True, n_jobs=6)\n\n assert (len(res_list) == 3 * 2 * 2) & (res_df.shape[1] == 3 * 2 * 2)\n res_list_names = [res.columns.values[0] for res in res_list]\n assert set(res_list_names) == set(res_df.columns)\n expected_output_names = [\n [\n f\"{sig}__sum_func__w=5s_s=2.5s\",\n f\"{sig}__sum_func__w=7.5s_s=2.5s\",\n f\"{sig}__amax__w=5s_s=2.5s\",\n f\"{sig}__amax__w=7.5s_s=2.5s\",\n f\"{sig}__amin__w=5s_s=2.5s\",\n f\"{sig}__amin__w=7.5s_s=2.5s\",\n ]\n for sig in [\"EDA\", \"TMP\"]\n ]\n # Flatten\n expected_output_names = expected_output_names[0] + expected_output_names[1]\n assert set(res_df.columns) == set(expected_output_names)\n\n\n # No NaNs when returning a list of calculated featured\n assert all([~res.isna().values.any() for res in res_list])\n # NaNs when merging to a df (for some cols)\n assert all([res_df[col].isna().any() for col in res_df.columns if \"w=7.5s\" in col])\n assert all([~res_df[col].isna().any() for col in res_df.columns if \"w=5s\" in col])\n\n stride_s = 2.5\n window_s = 5\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n expected_length = math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(\n [\n len(res) == expected_length - 1\n for res in res_list\n if \"w=7.5s\" in res.columns.values[0]\n ]\n )\n assert all(\n [\n len(res) == expected_length\n for res in res_list\n if \"w=5s\" in res.columns.values[0]\n ]\n )\n assert len(res_df) == expected_length\n\n\ndef test_featurecollection_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"2.5s\",\n )\n fc = FeatureCollection(FeatureCollection(feature_descriptors=fd))\n\n assert fc.get_required_series() == [\"EDA\"]\n\n res_list = fc.calculate(dummy_data, return_df=False, n_jobs=1)\n res_df = fc.calculate(dummy_data, return_df=True, n_jobs=1)\n\n assert isinstance(res_list, list) & (len(res_list) == 1)\n assert isinstance(res_df, pd.DataFrame)\n assert_frame_equal(res_list[0], res_df)\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 2.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n assert all(res_df.index[1:] - res_df.index[:-1] == pd.to_timedelta(2.5, unit=\"s\"))\n\n\ndef test_feature_collection_column_sorted(dummy_data):\n fc = FeatureCollection(\n MultipleFeatureDescriptors(\n functions=[np.max, np.min, len, np.sum, np.median, np.mean, np.std],\n series_names=\"EDA\",\n windows=[\"5s\", \"30s\", \"2min\"],\n strides=\"5s\",\n )\n )\n df_eda = dummy_data[\"EDA\"].first('5min')\n out_cols = fc.calculate(df_eda, return_df=True, n_jobs=None).columns.values\n\n for _ in range(10):\n assert all(out_cols == fc.calculate(df_eda, return_df=True).columns.values)\n\n\ndef test_featurecollection_reduce(dummy_data):\n fc = FeatureCollection(\n MultipleFeatureDescriptors(\n functions=[np.max, np.min, np.std, np.sum],\n series_names=\"EDA\",\n windows=[\"5s\", \"30s\", \"1min\"],\n strides=\"5s\",\n )\n )\n df_feat_tot = fc.calculate(data=dummy_data, return_df=True, show_progress=True)\n\n for _ in range(5):\n col_subset = random.sample(\n list(df_feat_tot.columns), random.randint(1, len(df_feat_tot.columns))\n )\n fc_reduced = fc.reduce(col_subset)\n fc_reduced.calculate(dummy_data)\n\n # also test the reduce function on a single column\n fc_reduced = fc.reduce(random.sample(list(df_feat_tot.columns), 1))\n fc_reduced.calculate(dummy_data)\n\n # should also work when fc is deleted\n del fc\n fc_reduced.calculate(dummy_data)\n\n\ndef test_featurecollection_numeric_reduce(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=[240, 480, 1000],\n strides=240,\n functions=[np.mean, np.min, np.max, np.std],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n df_tmp = dummy_data[\"TMP\"].reset_index(drop=True)\n df_eda = dummy_data[\"EDA\"].reset_index(drop=True)\n out = fc.calculate([df_tmp, df_eda], window_idx=\"end\", return_df=True)\n\n n_retain = 8\n fc_reduced = fc.reduce(np.random.choice(out.columns, size=n_retain, replace=False))\n out_2 = fc_reduced.calculate([df_tmp, df_eda], return_df=True)\n assert out_2.shape[1] == n_retain\n\n\ndef test_featurecollection_reduce_multiple_feat_output(dummy_data):\n def get_stats(series: np.ndarray):\n return np.min(series), np.max(series)\n\n fd = FeatureDescriptor(\n function=FuncWrapper(get_stats, output_names=[\"min\", \"max\"]),\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"5s\",\n )\n\n fc = FeatureCollection(\n [\n MultipleFeatureDescriptors(\n functions=[np.std, np.sum],\n series_names=\"EDA\",\n windows=[\"5s\", \"30s\", \"1min\"],\n strides=\"5s\",\n ),\n fd,\n ]\n )\n # df_feat_tot = fc.calculate(data=dummy_data, return_df=True, show_progress=True)\n\n fc_reduce = fc.reduce(feat_cols_to_keep=[\"EDA__min__w=5s_s=5s\"])\n del fd\n fc_reduce.calculate(dummy_data)\n\n\ndef test_featurecollection_error_val(dummy_data):\n fd = FeatureDescriptor(\n function=np.max,\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"2.5s\",\n )\n fc = FeatureCollection(FeatureCollection(feature_descriptors=fd))\n\n eda_data = dummy_data[\"EDA\"].dropna()\n eda_data[2 : 1 + 25 * 4] = None # Leave gap of 25 s\n eda_data = eda_data.dropna()\n assert eda_data.isna().any() == False\n assert (eda_data.index[1:] - eda_data.index[:-1]).max() == pd.Timedelta(\"25 s\")\n\n with pytest.raises(Exception):\n fc.calculate(eda_data, return_df=True, approve_sparsity=True)\n\n\ndef test_featurecollection_error_val_multiple_outputs(dummy_data):\n def get_stats(series: np.ndarray):\n return np.min(series), np.max(series)\n\n fd = FeatureDescriptor(\n function=FuncWrapper(get_stats, output_names=[\"min\", \"max\"]),\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"2.5s\",\n )\n fc = FeatureCollection(FeatureCollection(feature_descriptors=fd))\n\n eda_data = dummy_data[\"EDA\"].dropna()\n eda_data[2 : 1 + 25 * 4] = None # Leave gap of 25 s\n eda_data = eda_data.dropna()\n assert eda_data.isna().any() == False\n assert (eda_data.index[1:] - eda_data.index[:-1]).max() == pd.Timedelta(\"25 s\")\n\n with pytest.raises(Exception):\n fc.calculate(eda_data, return_df=True, approve_sparsity=True)\n\n\ndef test_feature_collection_invalid_series_names(dummy_data):\n fd = FeatureDescriptor(\n function=FuncWrapper(np.min, output_names=[\"min\"]),\n series_name=\"EDA__col\", # invalid name, no '__' allowed\n window=\"10s\",\n stride=\"5s\",\n )\n\n with pytest.raises(Exception):\n fc = FeatureCollection(feature_descriptors=fd)\n\n fd = FeatureDescriptor(\n function=FuncWrapper(np.min, output_names=[\"min\"]),\n series_name=\"EDA|col\", # invalid name, no '|' allowed\n window=\"10s\",\n stride=\"5s\",\n )\n\n with pytest.raises(Exception):\n fc = FeatureCollection(feature_descriptors=fd)\n\n\ndef test_feature_collection_invalid_feature_output_names(dummy_data):\n fd = FeatureDescriptor(\n function=FuncWrapper(np.max, output_names=[\"max|feat\"]),\n series_name=\"EDA\",\n window=\"10s\",\n stride=\"5s\",\n )\n\n # this should work, no error should be raised\n fc = FeatureCollection(feature_descriptors=fd)\n\n fd = FeatureDescriptor(\n function=FuncWrapper(np.max, output_names=[\"max__feat\"]),\n # invalid output_name, no '__' allowed\n series_name=\"EDA\",\n window=\"10s\",\n stride=\"5s\",\n )\n\n with pytest.raises(Exception):\n fc = FeatureCollection(feature_descriptors=fd)\n\n\n### Test various feature descriptor functions\n\n\ndef test_one_to_many_feature_collection(dummy_data):\n def quantiles(sig: pd.Series) -> Tuple[float, float, float]:\n return np.quantile(sig, q=[0.1, 0.5, 0.9])\n\n q_func = FuncWrapper(quantiles, output_names=[\"q_0.1\", \"q_0.5\", \"q_0.9\"])\n fd = FeatureDescriptor(q_func, series_name=\"EDA\", window=\"5s\", stride=\"2.5s\")\n fc = FeatureCollection(fd)\n\n res_df = fc.calculate(dummy_data, return_df=True)\n assert res_df.shape[1] == 3\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 2.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n\n expected_output_names = [\n \"EDA__q_0.1__w=5s_s=2.5s\",\n \"EDA__q_0.5__w=5s_s=2.5s\",\n \"EDA__q_0.9__w=5s_s=2.5s\",\n ]\n assert set(res_df.columns.values) == set(expected_output_names)\n assert (res_df[expected_output_names[0]] != res_df[expected_output_names[1]]).any()\n assert (res_df[expected_output_names[0]] != res_df[expected_output_names[2]]).any()\n\n\ndef test_many_to_one_feature_collection(dummy_data):\n def abs_mean_diff(sig1: pd.Series, sig2: pd.Series) -> float:\n # Note that this func only works when sig1 and sig2 have the same length\n return np.mean(np.abs(sig1 - sig2))\n\n fd = FeatureDescriptor(\n abs_mean_diff, series_name=(\"EDA\", \"TMP\"), window=\"5s\", stride=\"2.5s\"\n )\n fc = FeatureCollection(fd)\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n\n res_df = fc.calculate(dummy_data, return_df=True)\n assert res_df.shape[1] == 1\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 2.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n\n expected_output_name = \"EDA|TMP__abs_mean_diff__w=5s_s=2.5s\"\n assert res_df.columns.values[0] == expected_output_name\n\n\ndef test_many_to_many_feature_collection(dummy_data):\n def quantiles_abs_diff(\n sig1: pd.Series, sig2: pd.Series\n ) -> Tuple[float, float, float]:\n return np.quantile(np.abs(sig1 - sig2), q=[0.1, 0.5, 0.9])\n\n q_func = FuncWrapper(\n quantiles_abs_diff,\n output_names=[\"q_0.1_abs_diff\", \"q_0.5_abs_diff\", \"q_0.9_abs_diff\"],\n )\n fd = FeatureDescriptor(\n q_func, series_name=(\"EDA\", \"TMP\"), window=\"5s\", stride=\"13.5s\"\n )\n fc = FeatureCollection(fd)\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n\n res_df = fc.calculate(dummy_data, return_df=True)\n assert res_df.shape[1] == 3\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 13.5\n window_s = 5\n assert len(res_df) == math.ceil((int(len(dummy_data) / (1 / freq)) - window_s) / stride_s)\n\n expected_output_names = [\n \"EDA|TMP__q_0.1_abs_diff__w=5s_s=13.5s\",\n \"EDA|TMP__q_0.5_abs_diff__w=5s_s=13.5s\",\n \"EDA|TMP__q_0.9_abs_diff__w=5s_s=13.5s\",\n ]\n assert set(res_df.columns.values) == set(expected_output_names)\n assert (res_df[expected_output_names[0]] != res_df[expected_output_names[1]]).any()\n assert (res_df[expected_output_names[0]] != res_df[expected_output_names[2]]).any()\n\n\ndef test_cleared_pools_when_feature_error(dummy_data):\n def mean_func(s: np.ndarray):\n assert 0 == 1 # make the feature function throw an error\n return np.mean(s)\n\n fc = FeatureCollection(\n MultipleFeatureDescriptors(\n mean_func, [\"EDA\", \"ACC_x\"], [\"30s\", \"45s\", \"1min\", \"2min\"], \"15s\"\n )\n )\n\n for n_jobs in [0, None]:\n with pytest.raises(Exception):\n out = fc.calculate(dummy_data, return_df=True, n_jobs=n_jobs)\n\n # Now fix the error in the feature function & make sure that pools are cleared,\n # i.e., the same error is not thrown again.\n def mean_func(s: np.ndarray):\n return np.mean(s)\n\n fc = FeatureCollection(\n MultipleFeatureDescriptors(\n mean_func, [\"EDA\", \"ACC_x\"], [\"30s\", \"45s\", \"1min\", \"2min\"], \"15s\"\n )\n )\n\n for n_jobs in [0, None]:\n out = fc.calculate(dummy_data, return_df=True, n_jobs=n_jobs)\n assert out.shape[0] > 0\n assert out.shape[1] == 2 * 4\n\n\ndef test_series_funcs(dummy_data):\n def min_max_time_diff(x: pd.Series, mult=1):\n diff = x.index.to_series().diff().dt.total_seconds() # .max()\n return diff.min() * mult, diff.max() * mult\n\n def time_diff(x: pd.Series):\n return (x.index[-1] - x.index[0]).total_seconds()\n\n def linear_trend_timewise(x):\n \"\"\"\n Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to\n length of the time series minus one.\n This feature uses the index of the time series to fit the model, which must be of a datetime\n dtype.\n The parameters control which of the characteristics are returned.\n Possible extracted attributes are \"pvalue\", \"rvalue\", \"intercept\", \"slope\", \"stderr\", see the documentation of\n linregress for more information.\n\n :param x: the time series to calculate the feature of. The index must be datetime.\n :type x: pandas.Series\n\n :param param: contains dictionaries {\"attr\": x} with x an string, the attribute name of the regression model\n :type param: list\n\n :return: the different feature values\n :return type: list\n \"\"\"\n ix = x.index\n\n # Get differences between each timestamp and the first timestamp in seconds.\n # Then convert to hours and reshape for linear regression\n times_seconds = (ix - ix[0]).total_seconds()\n times_hours = np.asarray(times_seconds / float(3600))\n\n linReg = linregress(times_hours, x.values)\n return linReg.slope, linReg.intercept, linReg.rvalue\n\n fc = FeatureCollection(\n MultipleFeatureDescriptors(\n functions=[\n np.mean,\n np.sum,\n len,\n FuncWrapper(\n min_max_time_diff,\n input_type=pd.Series,\n output_names=[\"min_time_diff\", \"max_time_diff\"],\n mult=3,\n ),\n FuncWrapper(\n linear_trend_timewise,\n input_type=pd.Series,\n output_names=[\n \"timewise_regr_slope\",\n \"timewise_regr_intercept\",\n \"timewise_regr_r_value\",\n ],\n ),\n FuncWrapper(time_diff, input_type=pd.Series),\n FuncWrapper(np.max, input_type=np.array),\n ],\n series_names=[\"EDA\", \"TMP\"],\n windows=\"5s\",\n strides=\"2.5s\",\n )\n )\n\n assert set(fc.get_required_series()) == set([\"EDA\", \"TMP\"])\n downscale_factor = 20\n res_df = fc.calculate(\n dummy_data[: int(len(dummy_data) / downscale_factor)], return_df=True\n )\n # Note: testing this single-threaded allows the code-cov to fire\n res_df_2 = fc.calculate(\n dummy_data[: int(len(dummy_data) / downscale_factor)],\n return_df=True,\n n_jobs=1,\n show_progress=True,\n )\n assert res_df.shape[1] == 2 * 10\n freq = pd.to_timedelta(pd.infer_freq(dummy_data.index)) / np.timedelta64(1, \"s\")\n stride_s = 2.5\n window_s = 5\n assert len(res_df) == math.ceil(\n (int(len(dummy_data) / downscale_factor / (1 / freq)) - window_s) / stride_s\n )\n\n expected_output_names = [\n \"EDA|TMP__q_0.1_abs_diff__w=5s_s=2.5s\",\n \"EDA|TMP__q_0.5_abs_diff__w=5s_s=2.5s\",\n \"EDA|TMP__q_0.9_abs_diff__w=5s_s=2.5s\",\n ]\n assert \"EDA__min_time_diff__w=5s_s=2.5s\" in res_df.columns\n assert \"EDA__amax__w=5s_s=2.5s\" in res_df.columns\n assert all(\n res_df[\"EDA__min_time_diff__w=5s_s=2.5s\"]\n == res_df[\"EDA__max_time_diff__w=5s_s=2.5s\"]\n )\n assert all(res_df[\"EDA__min_time_diff__w=5s_s=2.5s\"] == 0.25 * 3)\n\n\ndef test_categorical_funcs():\n categories = [\"a\", \"b\", \"c\", \"another_category\", 12]\n categorical_data = pd.Series(\n data=np.random.choice(categories, 1000),\n index=pd.date_range(\"2021-07-01\", freq=\"1h\", periods=1000),\n ).rename(\"cat\")\n\n # drop some data, as we don't make frequency assumptions\n categorical_data = categorical_data.drop(\n np.random.choice(categorical_data.index, 200, replace=False)\n )\n\n def count_categories(arr, categories):\n return [sum(arr.astype(str) == str(cat)) for cat in categories]\n\n cat_count = FuncWrapper(\n func=count_categories,\n output_names=[\"count-\" + str(cat) for cat in categories],\n # kwargs\n categories=categories,\n )\n\n # construct the collection in which you add all your features\n fc = FeatureCollection(\n feature_descriptors=[\n FeatureDescriptor(\n function=cat_count, series_name=\"cat\", window=\"1day\", stride=\"12hours\"\n )\n ]\n )\n\n for n_jobs in [0, None]:\n out = fc.calculate(\n data=categorical_data, approve_sparsity=True, n_jobs=n_jobs, return_df=True\n )\n for c in categories:\n assert f\"cat__count-{str(c)}__w=1D_s=12h\" in out.columns\n\n\ndef test_time_based_features():\n # create a time column\n time_value_series = (\n pd.Series(\n index=pd.date_range(\"2021-07-01\", freq=\"1h\", periods=1000), dtype=object\n )\n .index.to_series()\n .rename(\"time\")\n )\n\n # drop some data, as we don't make frequency assumptions\n time_value_series = time_value_series.drop(\n np.random.choice(time_value_series.index, 250, replace=False)\n )\n\n def std_hour(time_arr):\n # calcualtes the std in seconds\n if time_arr.shape[0] <= 3:\n return np.NaN\n return np.std(\n np.diff(time_arr).astype(\"timedelta64[us]\").astype(np.int64)\n / (60 * 60 * 1e6)\n )\n\n fc = FeatureCollection(\n feature_descriptors=[\n FeatureDescriptor(\n function=std_hour,\n series_name=\"time\",\n window=\"6 hours\",\n stride=\"4 hours\",\n )\n ]\n )\n out = fc.calculate(\n data=time_value_series, approve_sparsity=True, n_jobs=1, return_df=True\n )\n assert out.columns[0] == \"time__std_hour__w=6h_s=4h\"\n\n out = fc.calculate(\n data=time_value_series, approve_sparsity=True, n_jobs=None, return_df=True\n )\n assert out.columns[0] == \"time__std_hour__w=6h_s=4h\"\n\n\ndef test_pass_by_value(dummy_data):\n def try_change_view(series_view: np.ndarray):\n series_view[:5] = 0 # update the view -> error!\n return np.mean(series_view)\n\n fc_gsr = FeatureCollection(\n [\n FeatureDescriptor(\n try_change_view,\n \"EDA\",\n \"30s\",\n \"15s\",\n )\n ]\n )\n\n for n_jobs in [0, None]:\n with pytest.raises(Exception):\n out = fc_gsr.calculate(dummy_data, return_df=True, n_jobs=n_jobs)\n\n\ndef test_datatype_retention(dummy_data):\n for dtype in [np.float16, np.float32, np.int64, np.int32]:\n\n def mean_dtype(series_view: np.ndarray):\n return np.mean(series_view).astype(dtype)\n\n fc_gsr = FeatureCollection(\n [\n FeatureDescriptor(\n mean_dtype,\n \"EDA\",\n \"30s\",\n \"15s\",\n )\n ]\n )\n for n_jobs in [0, 1, 2, None]:\n print(dtype, n_jobs)\n out = fc_gsr.calculate(dummy_data, return_df=True, n_jobs=n_jobs)\n assert out.values.dtype == dtype\n\n\n### Test the various input data types combinations\n\ndef test_time_based_features_sequence_based_data_error(dummy_data):\n df_eda = dummy_data['EDA'].reset_index()\n df_tmp = dummy_data['TMP'].reset_index()\n\n fs = 4 # The sample frequency in Hz\n fc = FeatureCollection(\n feature_descriptors=[\n FeatureDescriptor(np.min, 'EDA', f'{250}s', f'{75}s'),\n FeatureDescriptor(np.min, 'TMP', 250 * fs, 75 * fs)\n ]\n )\n\n # cannot use time-based win-stride configurations on sequence based data\n with pytest.raises(RuntimeError):\n fc.calculate([df_eda, df_tmp])\n\n with pytest.raises(RuntimeError):\n fc.calculate([df_eda, df_tmp], n_jobs=0)\n\n\ndef test_mixed_featuredescriptors_time_data(dummy_data):\n df_eda = dummy_data['EDA']\n df_tmp = dummy_data['TMP']\n\n fs = 4 # The sample frequency in Hz\n with warnings.catch_warnings(record=True) as w:\n # generate the warning by adding mixed FeatureDescriptors\n fc = FeatureCollection(\n feature_descriptors=[\n # same data range -> so when we perform an outer merge we do not suspect a\n # nan error\n FeatureDescriptor(np.min, 'EDA', f'{250}s', f'{75}s'),\n FeatureDescriptor(np.min, 'EDA', 250 * fs, 75 * fs)\n ]\n )\n assert len(w) == 1\n assert all([issubclass(warn.category, RuntimeWarning) for warn in w])\n assert all([\"There are multiple FeatureDescriptor window-stride datatypes\" in str(warn) for warn in w])\n\n\n with warnings.catch_warnings(record=True) as w:\n # generate the warning by adding mixed FeatureDescriptors\n fc.add(FeatureDescriptor(np.std, 'EDA', 250 * fs, 75 * fs))\n assert len(w) == 1\n assert all([issubclass(warn.category, RuntimeWarning) for warn in w])\n assert all([\"There are multiple FeatureDescriptor window-stride datatypes\" in str(warn) for warn in w])\n\n\n\n\n out = fc.calculate([df_eda, df_tmp], return_df=True)\n assert all(out.notna().sum(axis=0))\n\n### Test 'error' use-cases\n\n\ndef test_type_error_add_feature_collection(dummy_data):\n fd = FeatureDescriptor(\n function=np.sum,\n series_name=\"EDA\",\n window=\"5s\",\n stride=\"2.5s\",\n )\n fc = FeatureCollection(feature_descriptors=fd)\n\n with pytest.raises(TypeError):\n fc.add(np.sum)\n\n\ndef test_one_to_many_error_feature_collection(dummy_data):\n def quantiles(sig: pd.Series) -> Tuple[float, float, float]:\n return np.quantile(sig, q=[0.1, 0.5, 0.9])\n\n # quantiles should be wrapped in a FuncWrapper\n fd = FeatureDescriptor(quantiles, series_name=\"EDA\", window=\"5s\", stride=\"2.5s\")\n fc = FeatureCollection(fd)\n\n with pytest.raises(Exception):\n fc.calculate(dummy_data)\n\n\ndef test_one_to_many_wrong_np_funcwrapper_error_feature_collection(dummy_data):\n def quantiles(sig: pd.Series) -> Tuple[float, float, float]:\n return np.quantile(sig, q=[0.1, 0.5, 0.9])\n\n # Wrong number of output_names in func wrapper\n q_func = FuncWrapper(quantiles, output_names=[\"q_0.1\", \"q_0.5\"])\n fd = FeatureDescriptor(q_func, series_name=\"EDA\", window=\"5s\", stride=\"2.5s\")\n fc = FeatureCollection(fd)\n\n with pytest.raises(Exception):\n fc.calculate(dummy_data)\n\n\ndef test_many_to_one_error_feature_collection(dummy_data):\n def abs_mean_diff(sig1: pd.Series, sig2: pd.Series) -> float:\n # Note that this func only works when sig1 and sig2 have the same length\n return np.mean(np.abs(sig1 - sig2))\n\n # Give wrong nb of series names in tuple\n fd = FeatureDescriptor(\n abs_mean_diff, series_name=(\"EDA\", \"TMP\", \"ACC_x\"), window=\"5s\", stride=\"2.5s\"\n )\n fc = FeatureCollection(fd)\n\n with pytest.raises(Exception):\n fc.calculate(dummy_data)\n\n\ndef test_error_same_output_feature_collection(dummy_data):\n def sum_func(sig: np.ndarray) -> float:\n return sum(sig)\n\n mfd = MultipleFeatureDescriptors(\n functions=[sum_func, FuncWrapper(np.max), np.min],\n series_names=[\"EDA\", \"TMP\"],\n windows=[\"5s\", \"7s\", \"5s\"], # Two times 5s\n strides=\"2.5s\",\n )\n with pytest.raises(AssertionError):\n fc = FeatureCollection(feature_descriptors=mfd)\n\n\ndef test_bound_method(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=[480, 1000],\n strides=480,\n functions=[np.mean, np.min, np.max, np.std],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp = dummy_data[\"TMP\"].reset_index(drop=True)\n df_eda = dummy_data[\"EDA\"].reset_index(drop=True).astype(float)\n df_tmp.index += 2\n\n for bound_method in [\"inner\", \"outer\", \"inner-outer\"]:\n fc.calculate(\n [df_tmp, df_eda],\n window_idx=\"middle\",\n return_df=True,\n approve_sparsity=True,\n bound_method=bound_method,\n )\n\n with pytest.raises(ValueError):\n fc.calculate(\n [df_tmp, df_eda],\n window_idx=\"end\",\n return_df=True,\n bound_method=\"invalid name\",\n )\n\n\ndef test_bound_method_uneven_index_numeric(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=1000,\n strides=500,\n functions=[np.min, np.max],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp_ = dummy_data[\"TMP\"].reset_index(drop=True)\n df_eda_ = dummy_data[\"EDA\"].reset_index(drop=True)\n df_eda_.index = df_eda_.index.astype(float)\n df_eda_.index += 2.33\n\n latest_start = df_eda_.index[0]\n earliest_start = df_tmp_.index[0]\n\n out_inner = fc.calculate(\n [df_tmp_, df_eda_], bound_method=\"inner\", window_idx=\"begin\", return_df=True\n )\n assert out_inner.index[0] == latest_start\n\n out_outer = fc.calculate(\n [df_tmp_, df_eda_], bound_method=\"outer\", window_idx=\"begin\", return_df=True\n )\n assert out_outer.index[0] == earliest_start\n\n\ndef test_bound_method_uneven_index_datetime(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=\"5min\",\n strides=\"3min\",\n functions=[np.min, np.max],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp = dummy_data[\"TMP\"]\n df_eda = dummy_data[\"EDA\"]\n df_eda.index += pd.Timedelta(seconds=10)\n\n latest_start = df_eda.index[0]\n earliest_start = df_tmp.index[0]\n\n out_inner = fc.calculate(\n [df_tmp, df_eda], bound_method=\"inner\", window_idx=\"begin\", return_df=True\n )\n assert out_inner.index[0] == latest_start\n\n out_outer = fc.calculate(\n [df_tmp, df_eda], bound_method=\"outer\", window_idx=\"begin\", return_df=True\n )\n assert out_outer.index[0] == earliest_start\n\n\ndef test_bound_method_uneven_index_datetime_sequence(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=300, # Sample based -> TimeIndexSampleStridedRolling\n strides=180,\n functions=[np.min, np.max],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp = dummy_data[\"TMP\"]\n df_eda = dummy_data[\"EDA\"]\n df_eda.index += pd.Timedelta(seconds=10)\n\n latest_start = df_eda.index[0]\n earliest_start = df_tmp.index[0]\n\n out_inner = fc.calculate(\n [df_tmp, df_eda], bound_method=\"inner\", window_idx=\"begin\", return_df=True\n )\n assert out_inner.index[0] == latest_start\n\n out_outer = fc.calculate(\n [df_tmp, df_eda], bound_method=\"outer\", window_idx=\"begin\", return_df=True\n )\n assert out_outer.index[0] == earliest_start\n\n\ndef test_not_sorted_fc(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=[480, 1000],\n strides=480,\n functions=[np.min, np.max],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp = dummy_data[\"TMP\"].reset_index(drop=True)\n df_eda = dummy_data[\"EDA\"].reset_index(drop=True).sample(frac=1)\n assert not df_eda.index.is_monotonic_increasing\n out = fc.calculate([df_tmp, df_eda], window_idx=\"end\", return_df=True)\n assert not df_eda.index.is_monotonic_increasing\n\n df_eda.index = df_eda.index.astype(float)\n assert not df_eda.index.is_monotonic_increasing\n out = fc.calculate([df_tmp, df_eda], window_idx=\"end\", return_df=True)\n assert not df_eda.index.is_monotonic_increasing\n\n\ndef test_serialization(dummy_data):\n fc = FeatureCollection(\n feature_descriptors=[\n MultipleFeatureDescriptors(\n windows=[480, 1000],\n strides=480,\n functions=[np.mean, np.min, np.max, np.std],\n series_names=[\"TMP\", \"EDA\"],\n )\n ]\n )\n\n df_tmp = dummy_data[\"TMP\"].reset_index(drop=True)\n df_eda = dummy_data[\"EDA\"].reset_index(drop=True)\n out = fc.calculate([df_tmp, df_eda], window_idx=\"end\", return_df=True)\n col_order = out.columns\n\n save_path = Path(\"featurecollection.pkl\")\n if save_path.exists():\n os.remove(save_path)\n assert not save_path.exists()\n fc.serialize(save_path)\n assert save_path.exists() and save_path.is_file()\n\n fc_deserialized = dill.load(open(save_path, \"rb\"))\n out_deserialized = fc_deserialized.calculate([df_tmp, df_eda], return_df=True)\n assert np.allclose(\n out[col_order].values, out_deserialized[col_order].values, equal_nan=True\n )\n os.remove(save_path)\n","sub_path":"tests/test_features_feature_collection.py","file_name":"test_features_feature_collection.py","file_ext":"py","file_size_in_byte":37857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187181617","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\n\r\nroot = Tk()\r\nroot.title(\"Calculator\")\r\n\r\nbttn_list = [\r\n \"7\", \"8\", \"9\", \"/\",\r\n \"4\", \"5\", \"6\", \"*\",\r\n \"1\", \"2\", \"3\", \"-\",\r\n \"0\", \".\", \"=\", \"+\"]\r\n\r\nr = 1\r\nc = 0\r\nfor i in bttn_list:\r\n rel = \"\"\r\n cmd = lambda x = i: calc(x)\r\n ttk.Button(root, text = i, command = cmd, width = 10).grid(row = r, column = c)\r\n c += 1\r\n if c > 3:\r\n c = 0\r\n r += 1\r\n\r\ncalc_entry = Entry(root, width = 40)\r\ncalc_entry.grid(row = 0, column = 0, columnspan = 5)\r\n\r\n\r\ndef calc(key):\r\n global memory\r\n if key == \"=\":\r\n try:\r\n result = eval(calc_entry.get())\r\n calc_entry.insert(END, \"=\" + str(result))\r\n except:\r\n calc_entry.insert(END, \"Error!\")\r\n messagebox.showerror(\"Error!\", \"Check the correctness of data\")\r\n else:\r\n if \"=\" in calc_entry.get():\r\n calc_entry.delete(0, END)\r\n calc_entry.insert(END, key)\r\n\r\nroot.mainloop()","sub_path":"hw_20_calculator.py","file_name":"hw_20_calculator.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333359411","text":"#!/usr/bin/env python3\n\nfrom common import *\n\nDATETIME = sys.argv[1] # format YYYYMMDDhh, e.g. 2017050900\nDATA_DIR = '../prep2_filt'\nOUTPUT_DIR = '../agg_filt'\n\nINTERACTION_THD = 2 # Minimum count of interaction within 24 hours\nLANG_FILT = ('en',)\n\n# Tweet Columns:\n # 'created_at',\n # 'text',\n # 'truncated',\n # 'lang',\n # 'source',\n # 'quoted_status_id',\n # 'in_reply_to_status_id',\n # 'in_reply_to_user_id',\n # 'retweeted_status_id',\n # 'retweeted_user_id',\n # 'hashtags',\n # 'urls',\n # 'mentioned_users',\n # 'has_media',\n # 'user_id',\n # 'place_id',\n # 'coord_lon',\n # 'coord_lat'\n\n# User Columns:\n # 'sampled_at',\n # 'created_at',\n # 'name',\n # 'screen_name',\n # 'description',\n # 'location',\n # 'verified',\n # 'default_profile',\n # 'default_profile_image',\n # 'geo_enabled',\n # 'is_translator',\n # 'lang',\n # 'time_zone',\n # 'protected',\n # 'url',\n # 'favourites_count',\n # 'friends_count',\n # 'followers_count',\n # 'listed_count',\n # 'statuses_count'\n\n# Place Columns:\n # 'sampled_at',\n # 'name',\n # 'full_name',\n # 'place_type',\n # 'country_code',\n # 'url'\n\ndt_start = pd.to_datetime(DATETIME + '00')\ndt_end = dt_start + pd.to_timedelta(1,'h')\nDATETIME_NEXT = (dt_start + pd.to_timedelta(1,'h')).strftime('%Y%m%d%H')\n\n#-- Aggregate tweets --#\nt0 = time.clock()\nfilelist = glob.glob(DATA_DIR + os.sep + \"*_%s*_tweets.gz\" % dt_start.strftime('%Y%m%d%H')) + (glob.glob(DATA_DIR + os.sep + \"*_%s*_tweets.gz\" % DATETIME_NEXT) if DATETIME_NEXT != dt_start.strftime('%Y%m%d%H') else [])\nprint(\"%s: aggregate tweets from %d files ... \" % (DATETIME,len(filelist)))\ndata = []\nfor filename in filelist:\n try:\n datai = read_prep(filename)\n except:\n continue\n datai['created_at'] = pd.to_datetime(datai.created_at)\n mask = (datai.created_at>=dt_start) & (datai.created_at=INTERACTION_THD) & (datai.lang.isin(LANG_FILT))\n if mask.sum() > 0:\n datai = datai[mask]\n data.append(datai)\n\ndata = pd.concat(data,0)\nwrite_data(data,dt_start.strftime('%Y%m%d%H'),name='tweets')\ndel data\nprint(time.clock() - t0)\n\n#-- Aggregate users --#\nt0 = time.clock()\nfilelist = glob.glob(DATA_DIR + os.sep + \"*_%s*_users.gz\" % dt_start.strftime('%Y%m%d%H')) + glob.glob(DATA_DIR + os.sep + \"*_%s*_users.gz\" % DATETIME_NEXT)\nprint(\"%s: aggregate users from %d files ... \" % (DATETIME,len(filelist)))\ndata = None\nfor filename in filelist:\n # t1 = time.clock()\n try:\n datai = read_prep(filename,index_col=None)\n except:\n continue\n datai['sampled_at'] = pd.to_datetime(datai.sampled_at)\n datai['created_at'] = pd.to_datetime(datai.created_at)\n mask = (datai.sampled_at>=dt_start) & (datai.sampled_at 0:\n if data is None:\n data = datai[mask]\n else:\n data = pd.concat([data,datai[mask]],0)\n data.sort_values('sampled_at',inplace=True)\n data.drop_duplicates(subset='id',keep='last',inplace=True)\n del datai\n # sys.stdout.write(\"\\t%s: %d in %f secs\\n\" % (filename,len(data),time.clock()-t1))\n\ndata.set_index('id',inplace=True)\nwrite_data(data,dt_start.strftime('%Y%m%d%H'),name='users')\nprint(time.clock() - t0)\n\n#-- Aggregate places --#\nt0 = time.clock()\nfilelist = glob.glob(DATA_DIR + os.sep + \"*_%s*_places.gz\" % dt_start.strftime('%Y%m%d%H')) + glob.glob(DATA_DIR + os.sep + \"*_%s*_places.gz\" % DATETIME_NEXT)\nprint(\"%s: aggregate places from %d files ... \" % (DATETIME,len(filelist)))\ndata = None\nfor filename in filelist:\n # t1 = time.clock()\n datai = read_prep(filename,index_col=None)\n datai['sampled_at'] = pd.to_datetime(datai.sampled_at)\n mask = (datai.sampled_at>=dt_start) & (datai.sampled_at 0:\n if data is None:\n data = datai[mask]\n else:\n data = pd.concat([data,datai[mask]],0)\n data.sort_values('sampled_at',inplace=True)\n data.drop_duplicates(subset='id',keep='last',inplace=True)\n del datai\n # sys.stdout.write(\"\\t%s: %d in %f secs\\n\" % (filename,len(data),time.clock()-t1))\n\ndata.set_index('id',inplace=True)\nwrite_data(data,dt_start.strftime('%Y%m%d%H'),name='places')\nprint(time.clock() - t0)\n","sub_path":"agg.py","file_name":"agg.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85905272","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport numpy as np\nimport cv2\nimport h5py\nfrom auxiliary.laserscan import LaserScan, SemLaserScan\n\nimport glob\nimport os\nimport yaml\n\n\nif __name__ == '__main__':\n \n seq_id = sys.argv[1]\n\n laser_scan_object = LaserScan()\n CFG = yaml.load(open('config/semantic-kitti.yaml', 'r'))\n color_dict = CFG[\"color_map\"]\n learning_map = CFG[\"learning_map\"]\n color_map_one_shot = {}\n for class_id, color in color_dict.items():\n if learning_map[class_id] in color_map_one_shot:\n continue\n else:\n color_map_one_shot[learning_map[class_id]] = color\n nclasses = len(color_dict)\n sem_laser_scan_object = SemLaserScan(nclasses, color_dict)\n #gt_labels = [filename for filename in sorted(glob.glob(os.path.join('/home/ayush/Downloads/sequence01/sequences/' + seq_id + '/labels/', '*.label')))]\n scans = [filename for filename in sorted(glob.glob(os.path.join('/home/dewan/data_training/dataset/sequences/' + seq_id + '/velodyne/', '*.bin')))]\n\n training_images = np.zeros((len(scans), 64, 1024, 5), np.float32)\n #gt_images = np.zeros((len(scans), 64, 1024), np.float32)\n #training_images = np.zeros((100, 64, 1024, 5), np.float32)\n #gt_images = np.zeros((100, 64, 1024), np.float32)\n\n #training_data = zip(scans, gt_labels)\n counter = 0\n for scan_filename in scans:\n sem_laser_scan_object.open_scan(scan_filename)\n #sem_laser_scan_object.open_label(gt_filename)\n sem_laser_scan_object.do_range_projection()\n #sem_laser_scan_object.do_label_projection()\n\n range_image = sem_laser_scan_object.proj_range\n intensity = sem_laser_scan_object.proj_remission\n xyz = sem_laser_scan_object.proj_xyz\n\n x = xyz[:, :, 0]\n y = xyz[:, :, 1]\n z = xyz[:, :, 2]\n\n x = np.expand_dims(x, axis=2)\n y = np.expand_dims(y, axis=2)\n z = np.expand_dims(z, axis=2)\n\n depth = (range_image * 500)/65536\n depth = cv2.convertScaleAbs(depth, alpha=255)\n depth = np.float32(depth)\n depth = np.expand_dims(depth, axis=2)\n\n\n intensity = intensity + 1.\n intensity /= 2.0\n intensity = cv2.convertScaleAbs(intensity, alpha=255)\n intensity = np.float32(intensity)\n intensity = np.expand_dims(intensity, 2)\n\n training_image = depth\n training_image = np.concatenate((training_image, intensity), axis=2)\n training_image = np.concatenate((training_image, x), axis=2)\n training_image = np.concatenate((training_image, y), axis=2)\n training_image = np.concatenate((training_image, z), axis=2)\n # label = sem_laser_scan_object.proj_sem_label\n #\n\n\n training_images[counter, :, :, :] = training_image\n # gt_images[counter, :, :] = label\n # label = np.expand_dims(label, 2)\n # label_colorized = np.zeros((64, 1024, 3), np.uint8)\n # #print(np.min(label))\n # #print(np.max(label))\n #\n # for class_id, color in color_map_one_shot.items():\n # mask = np.all(label == class_id, axis=-1)\n # label_colorized[mask] = color\n # #cv2.namedWindow('image')\n # #cv2.imshow('image', np.float32(label == 1))\n # #cv2.waitKey()\n\n\n # label_colorized = cv2.cvtColor(label_colorized, cv2.COLOR_BGR2RGB)\n\n counter += 1\n #if counter > 99:\n # break\n filename = '/home/dewan/data_training/dataset/sequences/' + seq_id + '/training_data.hdf5'\n hf = h5py.File(filename, 'w')\n hf.create_dataset('data', data=training_images)\n #hf.create_dataset('label', data=gt_images)\n hf.close()\n \n\n\n","sub_path":"generate_rangeimage.py","file_name":"generate_rangeimage.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596199467","text":"\nimport numpy as np\n\n\ndef init_2048():\n grille=np.zeros((4,4))\n A=np.random.choice([2,2,2,2,2,2,2,2,2,4],2) #on tire les deux chiffres initiaux selon leurs probabilités\n case1=np.random.choice([0,1,2,3],2)\n case2=np.random.choice([0,1,2,3],2)\n\n while case1[0]==case2[0] and case1[1]==case2[1]:\n case2=np.random.choice([0,1,2,3],2)#on choisit deux cases différentes\n grille[case1[0]][case1[1]]=A[0]\n grille[case2[0]][case2[1]]=A[1]\n return grille\n\n\n\n\n\n\n","sub_path":"2048_2/initialisation.py","file_name":"initialisation.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330463779","text":"# Authon :wang\nimport unittest\nimport HTMLTestRunner\n\n'''\n1.discover 方法里面有三个参数:\n-case_dir:这个是待执行用例的目录。\n-pattern:这个是匹配脚本名称的规则, casetest*.py 意思是匹配 casetest 开头的所有脚本。\n-top_level_dir:这个是顶层目录的名称,一般默认等于 None 就行了。\n2.discover 加载到的用例是一个 list 集合,需要重新写入到一个 list 对象 testcase 里,\n这样就可以用 unittest 里面的 TextTestRunner 这里类的 run 方法去执行。\n--stream:测试报告写入文件的存储区域\n--title:测试报告的主题\n--description:测试报告的描述\n2.report_path 是存放测试报告的地址\n'''\n\n\ndef all_case():\n # 待执行测试用例目录\n #case_dir = 'F:\\\\PythonPro_2018\\\\github\\\\wangcheng\\\\Python_Study\\\\com03_unittest\\\\case'\n #case_dir = 'E:\\\\github\\\\wangcheng\\\\Python_Study\\\\com03_unittest\\\\case'\n case_dir = 'case'\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_dir, pattern=\"test*.py\", top_level_dir=None)\n # # discover 方法筛选出来的用例,循环添加到测试套件中\n # for test_suite in discover:\n # for test_case in test_suite:\n # # 添加用例到 testcase\n # testcase.addTests(test_case)\n testcase.addTests(discover) # 直接加载 discover\n print(testcase)\n return testcase\n\n\nif __name__ == \"__main__\":\n # 返回实例\n runner = unittest.TextTestRunner()\n #report_path = \"E:\\\\github\\\\wangcheng\\\\Python_Study\\\\com03_unittest\\\\report\\\\result.html\"\n report_path = \"report\\\\result.html\"\n fp = open(report_path, \"wb\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'这是我的自动化测试报告', description=u'用例执行情况:')\n\n # 运行所有用例\n runner.run(all_case())\n fp.close()\n","sub_path":"Python_Study/com03_unittest/run_all_case.py","file_name":"run_all_case.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200470067","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport warnings\nimport functools\n\nimport six\nimport paddle\nfrom paddle.fluid import core\nfrom paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy\nfrom paddle.fluid.data_feeder import check_type\nfrom paddle.fluid.dygraph.base import program_desc_tracing_guard, switch_to_static_graph\nfrom paddle.fluid.dygraph.dygraph_to_static.logging_utils import set_code_level, set_verbosity\nfrom paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticLayer, unwrap_decorators\nfrom paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME, VARIABLE_FILENAME, TranslatedLayer\nfrom paddle.fluid.dygraph.layers import Layer\nfrom paddle.fluid.executor import Executor, scope_guard\nfrom paddle.fluid.framework import Block, ParamBase, Program, Variable\nfrom paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer\nfrom paddle.fluid.framework import dygraph_only, in_dygraph_mode\nfrom paddle.fluid.wrapped_decorator import wrap_decorator\n\n__all__ = [\n 'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',\n 'set_verbosity', 'save', 'load', 'SaveLoadConfig'\n]\n\n\ndef create_program_from_desc(program_desc):\n program = Program()\n program.desc = program_desc\n program.blocks = [Block(program, 0)]\n program._sync_with_cpp()\n return program\n\n\ndef _extract_vars(inputs, result_list):\n if isinstance(inputs, Variable):\n result_list.append(inputs)\n elif isinstance(inputs, (list, tuple)):\n for var in inputs:\n _extract_vars(var, result_list)\n else:\n raise TypeError(\n \"The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}.\".\n format(type(inputs)))\n\n\ndef extract_vars(inputs):\n result_list = []\n _extract_vars(inputs, result_list)\n return result_list\n\n\ndef _dygraph_to_static_func_(dygraph_func):\n \"\"\"\n Converts imperative dygraph APIs into declarative function APIs. Decorator\n @dygraph_to_static_func only converts imperative dygraph APIs into\n declarative net-building APIs, which means it doesn't return immediate\n digital result as imperative mode. Users should handle Program and Executor\n by themselves.\n\n Note:\n This decorator is NOT our recommended way to transform imperative function\n to declarative function. We will remove this decorator after we finalize\n cleaning up code.\n\n Args:\n dygraph_func (callable): callable imperative function.\n\n Returns:\n Callable: converting imperative dygraph APIs into declarative\n net-building APIs.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n from paddle.fluid.dygraph.jit import dygraph_to_static_func\n\n @dygraph_to_static_func\n def func(x):\n if fluid.layers.mean(x) < 0:\n x_v = x - 1\n else:\n x_v = x + 1\n\n return x_v\n\n x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64')\n\n x_v = func(x)\n exe = fluid.Executor(fluid.CPUPlace())\n out = exe.run(fetch_list=[x_v])\n print(out[0])\n # [[1. 1. 1.]\n # [1. 1. 1.]\n # [1. 1. 1.]]\n\n \"\"\"\n\n # TODO: remove this decorator after we finalize training API\n def __impl__(*args, **kwargs):\n program_translator = ProgramTranslator()\n if in_dygraph_mode() or not program_translator.enable_declarative:\n warnings.warn(\n \"The decorator 'dygraph_to_static_func' doesn't work in \"\n \"dygraph mode or set ProgramTranslator.enable to False. \"\n \"We will just return dygraph output.\")\n return dygraph_func(*args, **kwargs)\n static_func = program_translator.get_func(dygraph_func)\n return static_func(*args, **kwargs)\n\n return __impl__\n\n\ndygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)\n\n\ndef copy_decorator_attrs(original_func, decorated_obj):\n \"\"\"\n Copies some necessary attributes from original function into decorated function.\n\n Args:\n original_func(callable): the original decorated function.\n decorated_obj(StaticLayer): the target decorated StaticLayer object.\n \"\"\"\n decorator_name = \"declarative\"\n\n decorated_obj.__name__ = original_func.__name__\n decorated_obj._decorator_name = decorator_name\n decorated_obj.__wrapped__ = original_func\n decorated_obj.__doc__ = original_func.__doc__\n if hasattr(original_func, \"__module__\"):\n decorated_obj.__module__ = original_func.__module__\n\n return decorated_obj\n\n\ndef declarative(function=None, input_spec=None):\n \"\"\"\n Converts imperative dygraph APIs into declarative function APIs. Decorator\n @declarative handles the Program and Executor of static mode and returns\n the result as dygraph Tensor(s). Users could use the returned dygraph\n Tensor(s) to do imperative training, inference, or other operations. If the\n decorated function calls other imperative function, the called one will be\n converted into declarative function as well.\n\n Args:\n function (callable): callable imperative function.\n input_spec(list[InputSpec]): list of InputSpec to specific the shape/dtype/name\n information of each input Tensor.\n\n Returns:\n Tensor(s): containing the numerical result.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n from paddle.fluid.dygraph.jit import declarative\n\n fluid.enable_dygraph()\n\n @declarative\n def func(x):\n x = fluid.dygraph.to_variable(x)\n if fluid.layers.mean(x) < 0:\n x_v = x - 1\n else:\n x_v = x + 1\n return x_v\n\n x = np.ones([1, 2])\n x_v = func(x)\n print(x_v.numpy()) # [[2. 2.]]\n\n \"\"\"\n\n def decorated(python_func):\n \"\"\"\n Decorates a python function into a StaticLayer object.\n \"\"\"\n # Step 1. unwrap the function if it is already decorated.\n _, python_func = unwrap_decorators(python_func)\n\n # Step 2. copy some attributes from original python function.\n static_layer = copy_decorator_attrs(\n original_func=python_func,\n decorated_obj=StaticLayer(\n function=python_func, input_spec=input_spec))\n\n return static_layer\n\n # for usage: `declarative(foo, ...)`\n if function is not None:\n return decorated(function)\n\n # for usage: `@declarative`\n return decorated\n\n\nclass SaveLoadConfig(object):\n \"\"\"\n The additional configuration options may be used in function \n :ref:`api_imperative_jit_save` that save :ref:`api_imperative_TranslatedLayer` \n or used in function :ref:`api_imperative_jit_load` that \n load :ref:`api_imperative_TranslatedLayer` .\n \n Examples:\n 1. Using ``SaveLoadConfig`` when saving model\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n class SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self._linear = nn.Linear(in_size, out_size)\n\n @paddle.jit.to_static\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n return z\n\n # enable dygraph mode\n paddle.disable_static() \n\n # train model\n net = SimpleNet(8, 8)\n adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())\n x = paddle.randn([4, 8], 'float32')\n for i in range(10):\n out = net(x)\n loss = paddle.tensor.mean(out)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n # use SaveLoadconfig when saving model\n model_path = \"simplenet.example.model\"\n config = paddle.SaveLoadConfig()\n config.model_filename = \"__simplenet__\"\n paddle.jit.save(\n layer=net,\n model_path=model_path,\n config=config)\n\n 2. Using ``SaveLoadConfig`` when loading model\n\n .. code-block:: python\n\n import paddle\n\n # enable dygraph mode\n paddle.disable_static() \n\n # use SaveLoadconfig when loading model\n model_path = \"simplenet.example.model\"\n config = paddle.SaveLoadConfig()\n config.model_filename = \"__simplenet__\"\n infer_net = paddle.jit.load(model_path, config=config)\n # inference\n x = paddle.randn([4, 8], 'float32')\n pred = infer_net(x)\n \"\"\"\n\n def __init__(self):\n self._output_spec = None\n self._model_filename = None\n self._params_filename = None\n self._separate_params = False\n # used for `paddle.load`\n self._keep_name_table = False\n\n # NOTE: Users rarely use following configs, so these configs are not open to users,\n # reducing user learning costs, but we retain the configuration capabilities\n\n # If True, programs are modified to only support direct inference deployment. \n # Otherwise,more information will be stored for flexible optimization and re-training. \n # Currently, only True is supported\n self._export_for_deployment = True\n\n # If True, It will save inference program only, and do not save params of Program\n self._program_only = False\n\n @property\n def output_spec(self):\n \"\"\"\n Selects the output targets of the saved model ( :ref:`api_imperative_TranslatedLayer` ).\n By default, all return variables of original Layer's forward function\n are kept as the output of the saved TranslatedLayer.\n\n The ``output_spec`` type should be list[Variable]. If the provided ``output_spec``\n list is not all output variables, the saved model will be pruned according to the\n given ``output_spec`` list.\n\n .. note::\n The ``output_spec`` is only used when saving model.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n class SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self._linear = nn.Linear(in_size, out_size)\n\n @paddle.jit.to_static\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n loss = paddle.tensor.mean(z)\n return z, loss\n\n # enable dygraph mode\n paddle.disable_static() \n\n # train model\n net = SimpleNet(8, 8)\n adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())\n x = paddle.randn([4, 8], 'float32')\n for i in range(10):\n out, loss = net(x)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n # use SaveLoadconfig.output_spec\n model_path = \"simplenet.example.model.output_spec\"\n config = paddle.SaveLoadConfig()\n config.output_spec = [out]\n paddle.jit.save(\n layer=net,\n model_path=model_path,\n config=config)\n\n infer_net = paddle.jit.load(model_path)\n x = paddle.randn([4, 8], 'float32')\n pred = infer_net(x)\n \"\"\"\n return self._output_spec\n\n @output_spec.setter\n def output_spec(self, spec):\n if not isinstance(spec, list):\n raise TypeError(\n \"The SaveLoadConfig.output_spec should be 'list', but received input type is %s.\"\n % type(input))\n for var in spec:\n if not isinstance(var, core.VarBase):\n raise TypeError(\n \"The element in SaveLoadConfig.output_spec list should be 'Variable', but received element's type is %s.\"\n % type(var))\n self._output_spec = spec\n\n @property\n def model_filename(self):\n \"\"\"\n The name of file to save the translated program of target Layer.\n Default filename is :code:`__model__` .\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n class SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self._linear = nn.Linear(in_size, out_size)\n\n @paddle.jit.to_static\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n return z\n\n # enable dygraph mode\n paddle.disable_static() \n\n # train model\n net = SimpleNet(8, 8)\n adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())\n x = paddle.randn([4, 8], 'float32')\n for i in range(10):\n out = net(x)\n loss = paddle.tensor.mean(out)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n # saving with configs.model_filename\n model_path = \"simplenet.example.model.model_filename\"\n config = paddle.SaveLoadConfig()\n config.model_filename = \"__simplenet__\"\n paddle.jit.save(\n layer=net,\n model_path=model_path,\n config=config)\n\n # loading with configs.model_filename\n infer_net = paddle.jit.load(model_path, config=config)\n x = paddle.randn([4, 8], 'float32')\n pred = infer_net(x)\n \"\"\"\n return self._model_filename\n\n @model_filename.setter\n def model_filename(self, filename):\n if not isinstance(filename, six.string_types):\n raise TypeError(\n \"The SaveLoadConfig.model_filename should be str, but received input's type is %s.\"\n % type(filename))\n if len(filename) == 0:\n raise ValueError(\n \"The SaveLoadConfig.model_filename is empty string.\")\n self._model_filename = filename\n\n @property\n def params_filename(self):\n \"\"\"\n The name of file to save all persistable variables in target Layer. \n Default file name is :code:`__variables__` .\n \n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n class SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self._linear = nn.Linear(in_size, out_size)\n\n @paddle.jit.to_static\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n return z\n\n # enable dygraph mode\n paddle.disable_static() \n\n # train model\n net = SimpleNet(8, 8)\n adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())\n x = paddle.randn([4, 8], 'float32')\n for i in range(10):\n out = net(x)\n loss = paddle.tensor.mean(out)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n model_path = \"simplenet.example.model.params_filename\"\n config = paddle.SaveLoadConfig()\n config.params_filename = \"__params__\"\n\n # saving with configs.params_filename\n paddle.jit.save(\n layer=net,\n model_path=model_path,\n config=config)\n\n # loading with configs.params_filename\n infer_net = paddle.jit.load(model_path, config=config)\n x = paddle.randn([4, 8], 'float32')\n pred = infer_net(x)\n \"\"\"\n return self._params_filename\n\n @params_filename.setter\n def params_filename(self, filename):\n if not isinstance(filename, six.string_types):\n raise TypeError(\n \"The SaveLoadConfig.params_filename should be str, but received input's type is %s.\"\n % type(filename))\n if len(filename) == 0:\n raise ValueError(\n \"The SaveLoadConfig.params_filename is empty string.\")\n self._params_filename = filename\n\n # NOTE: [why not use params_filename=None control params saved separately]\n # The new save interface does not recommend parameters to be saved separately. \n # Here, the concept should be separated as clearly as possible. \n # Setting params_filename=None only means that the saved file name is set \n # and without any other meaning. New separate_params control for file saved\n # separately can makes the concept clearer.\n @property\n def separate_params(self):\n \"\"\"\n Configure whether to save the Layer parameters as separete files.\n (In order to be compatible with the behavior of :ref:`api_fluid_io_save_inference_model` )\n\n If True, each parameter will be saved to a file separately, the file name is the parameter name,\n and the SaveLoadConfig.params_filename configuration will not take effect. Default False.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n class SimpleNet(nn.Layer):\n def __init__(self, in_size, out_size):\n super(SimpleNet, self).__init__()\n self._linear = nn.Linear(in_size, out_size)\n\n @paddle.jit.to_static\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n return z\n\n # enable dygraph mode\n paddle.disable_static() \n\n # train model\n net = SimpleNet(8, 8)\n adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())\n x = paddle.randn([4, 8], 'float32')\n for i in range(10):\n out = net(x)\n loss = paddle.tensor.mean(out)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n model_path = \"simplenet.example.model.separate_params\"\n config = paddle.jit.SaveLoadConfig()\n config.separate_params = True\n\n # saving with configs.separate_params\n paddle.jit.save(\n layer=net,\n model_path=model_path,\n config=config)\n # [result] the saved model directory contains:\n # linear_0.b_0 linear_0.w_0 __model__ __variables.info__\n\n # loading with configs.params_filename\n infer_net = paddle.jit.load(model_path, config=config)\n x = paddle.randn([4, 8], 'float32')\n pred = infer_net(x)\n \"\"\"\n return self._separate_params\n\n @separate_params.setter\n def separate_params(self, value):\n if not isinstance(value, bool):\n raise TypeError(\n \"The SaveLoadConfig.separate_params should be bool value, but received input's type is %s.\"\n % type(value))\n self._separate_params = value\n\n @property\n def keep_name_table(self):\n \"\"\"\n Configures whether keep ``structured_name -> parameter_name`` dict in loaded state dict.\n This dict is the debugging information saved when call `paddle.save`. \n It is generally only used for debugging and does not affect the actual training or inference. \n By default, it will not be retained in `paddle.load` result. Default: False.\n \n .. note::\n Only used for ``paddle.load``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n \n paddle.disable_static()\n\n linear = paddle.nn.Linear(5, 1)\n\n state_dict = linear.state_dict()\n paddle.save(state_dict, \"paddle_dy\")\n\n configs = paddle.SaveLoadConfig()\n configs.keep_name_table = True\n para_state_dict, _ = paddle.load(\"paddle_dy\", configs)\n\n print(para_state_dict)\n # the name_table is 'StructuredToParameterName@@'\n # {'bias': array([0.], dtype=float32), \n # 'StructuredToParameterName@@': \n # {'bias': u'linear_0.b_0', 'weight': u'linear_0.w_0'}, \n # 'weight': array([[ 0.04230034],\n # [-0.1222527 ],\n # [ 0.7392676 ],\n # [-0.8136974 ],\n # [ 0.01211023]], dtype=float32)}\n \"\"\"\n return self._keep_name_table\n\n @keep_name_table.setter\n def keep_name_table(self, value):\n if not isinstance(value, bool):\n raise TypeError(\n \"The SaveLoadConfig.keep_name_table should be bool value, but received input's type is %s.\"\n % type(value))\n self._keep_name_table = value\n\n\n# NOTE(chenweihang): change jit.save/load argument `configs` to `config`\ndef deprecate_save_load_configs(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'configs' in kwargs:\n kwargs['config'] = kwargs['configs']\n kwargs.pop('configs')\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@deprecate_save_load_configs\n@switch_to_static_graph\ndef save(layer, model_path, input_spec=None, config=None):\n \"\"\"\n Saves input declarative Layer as :ref:`api_imperative_TranslatedLayer` \n format model, which can be used for inference or fine-tuning after loading.\n\n It will save the translated program and all related persistable \n variables of input declarative Layer to given ``model_path``.\n \n The default saved translated program file name is ``__model__``,\n and the default saved persistable variables file name is ``__variables__``,\n and it also saved some additional variable description information to file \n ``__variables.info__``, these additional information is used in fine-tuning.\n\n The saved model can be loaded by follow APIs:\n - :ref:`api_imperative_jit_load`\n - :ref:`api_fluid_io_load_inference_model` (need pass ``params_filename='__variables__'``)\n - Other C++ inference APIs\n\n Args:\n layer (Layer): the Layer to be saved. The Layer should be decorated by `@declarative`.\n model_path (str): the directory to save the model.\n input_spec (list[Variable], optional): Describes the input of the saved model. \n It is the example inputs that will be passed to saved TranslatedLayer's forward\n function. If None, all input variables of the original Layer's forward function\n would be the inputs of the saved model. Default None.\n config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig` object\n that specifies additional configuration options. Default None.\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n BATCH_SIZE = 16\n BATCH_NUM = 4\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n # define a random dataset\n class RandomDataset(paddle.io.Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n class LinearNet(nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n @paddle.jit.to_static\n def forward(self, x):\n return self._linear(x)\n\n def train(layer, loader, loss_fn, opt):\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = layer(image)\n loss = loss_fn(out, label)\n loss.backward()\n opt.step()\n opt.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(\n epoch_id, batch_id, np.mean(loss.numpy())))\n\n # enable dygraph mode\n place = paddle.CPUPlace()\n paddle.disable_static(place) \n\n # 1. train & save model.\n\n # create network\n layer = LinearNet()\n loss_fn = nn.CrossEntropyLoss()\n adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())\n\n # create data loader\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n loader = paddle.io.DataLoader(dataset,\n places=place,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=2)\n\n # train\n train(layer, loader, loss_fn, adam)\n\n # save\n model_path = \"linear.example.model\"\n paddle.jit.save(layer, model_path)\n \"\"\"\n\n def get_inout_spec(all_vars, target_vars, return_name=False):\n result_list = []\n valid_var_dict = {}\n valid_vars = [var for var in all_vars if isinstance(var, Variable)]\n for var in valid_vars:\n valid_var_dict[var.name] = var\n if target_vars:\n for i, var in enumerate(target_vars):\n # check target var whether exists\n if var.name not in valid_var_dict:\n raise RuntimeError(\n \"The variable to feed/fetch are not exist.\")\n result_list.append(valid_var_dict[var.name])\n else:\n result_list = valid_vars\n if return_name:\n result_list = [var.name for var in result_list]\n\n return result_list\n\n # 1. input check\n prog_translator = ProgramTranslator()\n if not prog_translator.enable:\n raise RuntimeError(\n \"The paddle.jit.save doesn't work when setting ProgramTranslator.enable=False.\"\n )\n if not isinstance(layer, Layer):\n raise TypeError(\n \"The input layer of paddle.jit.save should be 'Layer', but received layer type is %s.\"\n % type(layer))\n\n configs = config\n if configs is None:\n configs = SaveLoadConfig()\n\n if input_spec is not None:\n if not isinstance(input_spec, list):\n raise TypeError(\n \"The input input_spec should be 'list', but received input_spec's type is %s.\"\n % type(input_spec))\n for var in input_spec:\n if not isinstance(var, (core.VarBase, Variable,\n paddle.static.InputSpec)):\n raise TypeError(\n \"The element in input_spec list should be 'Variable' or `paddle.static.InputSpec`, but received element's type is %s.\"\n % type(var))\n\n # 2. get program of declarative Layer.forward\n if not isinstance(layer.forward, StaticLayer):\n raise RuntimeError(\n \"layer.forward need to be decorated by `@declarative`.\")\n concrete_program = layer.forward.concrete_program\n\n # NOTE: we maintain the mapping of variable name to\n # structured name, the buffer variable (non-persistable)\n # saved to inference program may not need by dygraph Layer, \n # we only record the state_dict variable's structured name\n state_names_dict = dict()\n for structured_name, var in six.iteritems(layer.state_dict()):\n state_names_dict[var.name] = structured_name\n\n # 3. share parameters from Layer to scope & record var info\n scope = core.Scope()\n extra_var_info = dict()\n for param_or_buffer in concrete_program.parameters:\n # share to scope\n param_or_buffer_tensor = scope.var(param_or_buffer.name).get_tensor()\n src_tensor = param_or_buffer.value().get_tensor()\n param_or_buffer_tensor._share_data_with(src_tensor)\n # record var info\n extra_info_dict = dict()\n if param_or_buffer.name in state_names_dict:\n extra_info_dict['structured_name'] = state_names_dict[\n param_or_buffer.name]\n extra_info_dict['stop_gradient'] = param_or_buffer.stop_gradient\n if isinstance(param_or_buffer, ParamBase):\n extra_info_dict['trainable'] = param_or_buffer.trainable\n extra_var_info[param_or_buffer.name] = extra_info_dict\n\n # 4. build input & output spec\n input_var_names = get_inout_spec(concrete_program.inputs, input_spec, True)\n output_vars = get_inout_spec(concrete_program.outputs, configs.output_spec)\n\n # 5. save inference model\n from paddle.fluid.io import save_inference_model\n\n # VARIABLE_FILENAME keep nameing style consistent with '__model__'\n if configs.params_filename is None:\n configs.params_filename = VARIABLE_FILENAME\n\n with scope_guard(scope):\n save_inference_model(\n dirname=model_path,\n feeded_var_names=input_var_names,\n target_vars=output_vars,\n executor=Executor(_current_expected_place()),\n main_program=concrete_program.main_program.clone(),\n model_filename=configs.model_filename,\n params_filename=None\n if configs.separate_params else configs.params_filename,\n export_for_deployment=configs._export_for_deployment,\n program_only=configs._program_only)\n\n # NOTE: [ Save extra variable info ]\n # save_inference_model will lose some important variable information, including:\n # - Variable name and correspondence (when saved variables as one file)\n # - Variable.stop_gradient information\n # - Which persistent variable are parameter and which are not\n # - Parameter.trainable information\n #\n # The lost information cannot be recovered when it is loaded again, \n # so if we want to perform fine-tune after loading, we may need to \n # configure redundant information to proceed.\n #\n # Due to compatibility issues, we cannot change the original storage structure, \n # but we can save these information in `jit.save` without changing the original \n # storage to improve user experience. So we save extra information into\n # file `__variables.info__`\n extra_var_info_path = os.path.join(model_path, EXTRA_VAR_INFO_FILENAME)\n with open(extra_var_info_path, 'wb') as f:\n pickle.dump(extra_var_info, f, protocol=2)\n\n\n@deprecate_save_load_configs\n@dygraph_only\ndef load(model_path, config=None):\n \"\"\"\n :api_attr: imperative\n\n Load model saved by :ref:`api_imperative_jit_save` or :ref:`api_fluid_io_save_inference_model`\n as :ref:`api_imperative_TranslatedLayer`, then performing inference or fine-tune training.\n\n .. note::\n For some historical reasons, if you load model saved by :ref:`api_fluid_io_save_inference_model`,\n there will be the following limitations when using it in fine-tuning:\n 1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.\n 2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.\n 3. The variable's ``stop_gradient`` information is lost and can not be recovered.\n 4. The parameter's ``trainable`` information is lost and can not be recovered.\n\n Args:\n model_path (str): The directory path where the model is saved.\n config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig` object that specifies \n additional configuration options. Default None.\n\n Returns:\n TranslatedLayer: A Layer object can run saved translated model.\n\n Examples:\n 1. Load model saved by :ref:`api_imperative_jit_save` then performing inference and fine-tune training.\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n BATCH_SIZE = 16\n BATCH_NUM = 4\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n # define a random dataset\n class RandomDataset(paddle.io.Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n class LinearNet(nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n @paddle.jit.to_static\n def forward(self, x):\n return self._linear(x)\n\n def train(layer, loader, loss_fn, opt):\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = layer(image)\n loss = loss_fn(out, label)\n loss.backward()\n opt.step()\n opt.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(\n epoch_id, batch_id, np.mean(loss.numpy())))\n\n # enable dygraph mode\n place = paddle.CPUPlace()\n paddle.disable_static(place) \n\n # 1. train & save model.\n\n # create network\n layer = LinearNet()\n loss_fn = nn.CrossEntropyLoss()\n adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())\n\n # create data loader\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n loader = paddle.io.DataLoader(dataset,\n places=place,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=2)\n\n # train\n train(layer, loader, loss_fn, adam)\n\n # save\n model_path = \"linear.example.model\"\n paddle.jit.save(layer, model_path)\n\n # 2. load model\n\n # load\n loaded_layer = paddle.jit.load(model_path)\n\n # inference\n loaded_layer.eval()\n x = paddle.randn([1, IMAGE_SIZE], 'float32')\n pred = loaded_layer(x)\n\n # fine-tune\n loaded_layer.train()\n adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())\n train(loaded_layer, loader, loss_fn, adam)\n\n\n 2. Load model saved by :ref:`api_fluid_io_save_inference_model` then performing and fine-tune training.\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n import paddle.fluid as fluid\n import paddle.nn as nn\n import paddle.optimizer as opt\n\n BATCH_SIZE = 16\n BATCH_NUM = 4\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n # define a random dataset\n class RandomDataset(paddle.io.Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n image = fluid.data(name='image', shape=[None, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n pred = fluid.layers.fc(input=image, size=10, act='softmax')\n loss = fluid.layers.cross_entropy(input=pred, label=label)\n avg_loss = fluid.layers.mean(loss)\n\n optimizer = fluid.optimizer.SGD(learning_rate=0.001)\n optimizer.minimize(avg_loss)\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n # create data loader\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n loader = paddle.io.DataLoader(dataset,\n feed_list=[image, label],\n places=place,\n batch_size=BATCH_SIZE, \n shuffle=True,\n drop_last=True,\n num_workers=2)\n\n # 1. train and save inference model\n for data in loader():\n exe.run(\n fluid.default_main_program(),\n feed=data, \n fetch_list=[avg_loss])\n\n model_path = \"fc.example.model\"\n fluid.io.save_inference_model(\n model_path, [\"image\"], [pred], exe)\n\n # 2. load model\n\n # enable dygraph mode\n paddle.disable_static(place)\n\n # load\n fc = paddle.jit.load(model_path)\n\n # inference\n fc.eval()\n x = paddle.randn([1, IMAGE_SIZE], 'float32')\n pred = fc(x)\n\n # fine-tune\n fc.train()\n loss_fn = nn.CrossEntropyLoss()\n adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())\n loader = paddle.io.DataLoader(dataset,\n places=place,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=2)\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = fc(image)\n loss = loss_fn(out, label)\n loss.backward()\n adam.step()\n adam.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(\n epoch_id, batch_id, np.mean(loss.numpy())))\n \"\"\"\n return TranslatedLayer._construct(model_path, config)\n\n\n@dygraph_only\ndef _trace(layer,\n inputs,\n feed_prefix='feed_',\n fetch_prefix='fetch_',\n tmp_prefix='t_'):\n assert isinstance(layer, Layer)\n\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n\n tracer = _dygraph_tracer()._get_program_desc_tracer()\n\n var_list = extract_vars(inputs)\n\n with program_desc_tracing_guard(True):\n original_outputs = layer(*inputs)\n if not isinstance(original_outputs, (list, tuple)):\n outputs = [original_outputs]\n else:\n outputs = original_outputs\n out_vars = [var for var in outputs]\n\n program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc(\n var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix)\n tracer.reset()\n\n with _dygraph_guard(None):\n program = create_program_from_desc(program_desc)\n\n return original_outputs, program, feed_names, fetch_names, parameters\n\n\nclass TracedLayer(object):\n \"\"\"\n :api_attr: imperative\n \n TracedLayer is used to convert a forward dygraph model to a static\n graph model. This is mainly used to save the dygraph model for online\n inference using C++. Besides, users can also do inference in Python\n using the converted static graph model, which usually has better\n performance than the original dygraph model.\n\n TracedLayer would run the static graph model using :code:`Executor`\n and :code:`CompiledProgram` . The static graph model would share\n parameters with the dygraph model.\n\n All TracedLayer objects should not be created by constructor and should\n be created by static method :code:`TracedLayer.trace(layer, inputs)` .\n\n The TracedLayer can only be used to convert the data-independent dygraph\n model into the static graph model, which means the dygraph model should\n be independent with the tensor data and shape.\n \"\"\"\n\n def __init__(self, program, parameters, feed_names, fetch_names):\n self._program = program\n self._feed_names = feed_names\n self._fetch_names = fetch_names\n self._params = parameters\n\n self._place = _current_expected_place()\n\n self._scope = core.Scope()\n for p in parameters:\n src_tensor = p.value().get_tensor()\n dst_tensor = self._scope.var(p.name).get_tensor()\n dst_tensor._share_data_with(src_tensor)\n\n self._exe = Executor(self._place)\n self._compiled_program = None\n self._build_strategy = None\n self._exec_strategy = None\n\n @property\n def program(self):\n return self._program\n\n def _switch(self, is_test=True):\n for block_id in range(self._program.num_blocks):\n block = self._program.block(block_id)\n for op in block.ops:\n if op.has_attr(\"is_test\"):\n op._set_attr(\"is_test\", is_test)\n\n @staticmethod\n @dygraph_only\n def trace(layer, inputs):\n \"\"\"\n This method is the only allowed method to create TracedLayer object.\n It would call the :code:`layer(*inputs)` method to run the dygraph\n model and convert it into a static graph model.\n\n Args:\n layer (dygraph.Layer): the layer object to be traced.\n inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of\n the layer object.\n\n Returns:\n tuple: A tuple of 2 items, whose the first item is the output of\n :code:`layer(*inputs)` , and the second item is the created\n TracedLayer object.\n\n Examples:\n .. code-block:: python:\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Linear, to_variable, TracedLayer\n import numpy as np\n\n class ExampleLayer(fluid.dygraph.Layer):\n def __init__(self):\n super(ExampleLayer, self).__init__()\n self._fc = Linear(3, 10)\n\n def forward(self, input):\n return self._fc(input)\n\n with fluid.dygraph.guard():\n layer = ExampleLayer()\n in_np = np.random.random([2, 3]).astype('float32')\n in_var = to_variable(in_np)\n out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var])\n\n # run the static graph model using Executor inside\n out_static_graph = static_layer([in_var])\n\n print(len(out_static_graph)) # 1\n print(out_static_graph[0].shape) # (2, 10)\n\n # save the static graph model for inference\n static_layer.save_inference_model(dirname='./saved_infer_model')\n \"\"\"\n assert isinstance(\n layer, Layer\n ), \"The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.\".format(\n type(layer))\n outs, prog, feed, fetch, parameters = _trace(layer, inputs)\n traced = TracedLayer(prog, parameters, feed, fetch)\n return outs, traced\n\n def set_strategy(self, build_strategy=None, exec_strategy=None):\n \"\"\"\n Set the strategies when running static graph model.\n\n Args:\n build_strategy (BuildStrategy, optional): build strategy of\n :code:`CompiledProgram` inside TracedLayer. Default None.\n exec_strategy (ExecutionStrategy, optional): execution strategy of\n :code:`CompiledProgram` inside TracedLayer. Default None.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python:\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Linear, to_variable, TracedLayer\n import numpy as np\n\n class ExampleLayer(fluid.dygraph.Layer):\n def __init__(self):\n super(ExampleLayer, self).__init__()\n self._fc = Linear(3, 10)\n\n def forward(self, input):\n return self._fc(input)\n\n with fluid.dygraph.guard():\n layer = ExampleLayer()\n in_np = np.random.random([2, 3]).astype('float32')\n in_var = to_variable(in_np)\n\n out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var])\n\n build_strategy = fluid.BuildStrategy()\n build_strategy.enable_inplace = True\n\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.num_threads = 2\n\n static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)\n out_static_graph = static_layer([in_var])\n \"\"\"\n assert self._compiled_program is None, \"Cannot set strategy after run\"\n assert isinstance(\n build_strategy, (type(None), BuildStrategy)\n ), \"The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.\".format(\n type(build_strategy))\n assert isinstance(\n exec_strategy, (type(None), ExecutionStrategy)\n ), \"The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.\".format(\n type(exec_strategy))\n self._build_strategy = build_strategy\n self._exec_strategy = exec_strategy\n\n @switch_to_static_graph\n def _compile(self):\n self._compiled_program = CompiledProgram(\n self._program).with_data_parallel(\n build_strategy=self._build_strategy,\n exec_strategy=self._exec_strategy,\n places=self._place)\n\n def _build_feed(self, inputs):\n assert isinstance(inputs, (list, tuple)), \\\n \"Inputs should be a list or tuple of variables\"\n assert len(inputs) == len(self._feed_names)\n feed_dict = {}\n if in_dygraph_mode():\n for x, name in zip(inputs, self._feed_names):\n feed_dict[name] = x.value().get_tensor()\n else:\n for x, name in zip(inputs, self._feed_names):\n feed_dict[name] = x\n\n return feed_dict\n\n @switch_to_static_graph\n def _run(self, feed):\n return self._exe.run(self._compiled_program,\n feed=feed,\n fetch_list=self._fetch_names)\n\n def __call__(self, inputs):\n with scope_guard(self._scope):\n if self._compiled_program is None:\n self._compile()\n\n return self._run(self._build_feed(inputs))\n\n @switch_to_static_graph\n def save_inference_model(self, dirname, feed=None, fetch=None):\n \"\"\"\n Save the TracedLayer to a model for inference. The saved\n inference model can be loaded by C++ inference APIs.\n\n Args:\n dirname (str): the directory to save the inference model.\n feed (list[int], optional): the input variable indices of the saved\n inference model. If None, all input variables of the\n TracedLayer object would be the inputs of the saved inference\n model. Default None.\n fetch (list[int], optional): the output variable indices of the\n saved inference model. If None, all output variables of the\n TracedLayer object would be the outputs of the saved inference\n model. Default None.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python:\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Linear, to_variable, TracedLayer\n import numpy as np\n\n class ExampleLayer(fluid.dygraph.Layer):\n def __init__(self):\n super(ExampleLayer, self).__init__()\n self._fc = Linear(3, 10)\n\n def forward(self, input):\n return self._fc(input)\n\n save_dirname = './saved_infer_model'\n in_np = np.random.random([2, 3]).astype('float32')\n\n with fluid.dygraph.guard():\n layer = ExampleLayer()\n in_var = to_variable(in_np)\n out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var])\n static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n program, feed_vars, fetch_vars = fluid.io.load_inference_model(save_dirname,\n exe)\n\n fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)\n print(fetch.shape) # (2, 10)\n \"\"\"\n check_type(dirname, \"dirname\", str,\n \"fluid.dygraph.jit.TracedLayer.save_inference_model\")\n check_type(feed, \"feed\", (type(None), list),\n \"fluid.dygraph.jit.TracedLayer.save_inference_model\")\n if isinstance(feed, list):\n for f in feed:\n check_type(f, \"each element of feed\", int,\n \"fluid.dygraph.jit.TracedLayer.save_inference_model\")\n check_type(fetch, \"fetch\", (type(None), list),\n \"fluid.dygraph.jit.TracedLayer.save_inference_model\")\n if isinstance(fetch, list):\n for f in fetch:\n check_type(f, \"each element of fetch\", int,\n \"fluid.dygraph.jit.TracedLayer.save_inference_model\")\n\n from paddle.fluid.io import save_inference_model\n\n def get_feed_fetch(all_vars, partial_vars):\n if partial_vars is None:\n return all_vars\n\n return [all_vars[idx] for idx in partial_vars]\n\n with scope_guard(self._scope):\n feeded_var_names = get_feed_fetch(self._feed_names, feed)\n target_var_names = get_feed_fetch(self._fetch_names, fetch)\n target_vars = []\n for name in target_var_names:\n target_var = self._program.global_block().vars.get(name, None)\n assert target_var is not None, \"{} cannot be found\".format(name)\n target_vars.append(target_var)\n\n save_inference_model(\n dirname=dirname,\n feeded_var_names=feeded_var_names,\n target_vars=target_vars,\n executor=self._exe,\n main_program=self._program.clone())\n","sub_path":"python/paddle/fluid/dygraph/jit.py","file_name":"jit.py","file_ext":"py","file_size_in_byte":53582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56204678","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom student_page.views import current_courses, course_page, course_page_lecture, course_page_quiz, settings_page, personal_page, marks_page\nfrom teacher_constructor.views import courses_list_page, course_videos, course_quizzes, course_content, quiz_content\nfrom edplatform.views import login, register, logout\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'edplatform.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n \n url(r'^courses/$', courses_list_page),\n url(r'^courses/([A-Za-z0-9_]+)/$', course_content),\n url(r'^courses/([A-Za-z0-9_]+)/class/$', course_page),\n url(r'^courses/([A-Za-z0-9_]+)/class/week(\\d+)/lecture(\\d+)/$', course_page_lecture),\n url(r'^courses/([A-Za-z0-9_]+)/class/week(\\d+)/quiz(\\d+)/$', course_page_quiz),\n #url(r'^courses/([A-Za-z0-9_]+)/videos/$', course_videos),\n #url(r'^courses/([A-Za-z0-9_]+)/tests/$', course_quizzes),\n\n url(r'^login/$', login),\n url(r'^logout/$', logout),\n # URL to quiz content\n url(r'^quiz/$', 'teacher_constructor.views.quiz_content'),\n\n url(r'^content_demo/$', 'teacher_constructor.views.new_course_content'),\n url(r'^register/$', register),\n \n url(r'^account/$', personal_page),\n url(r'^account/settings', settings_page),\n url(r'^account/marks', marks_page),\n)\n","sub_path":"edplatform/edplatform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601582345","text":"import jinja2\n\n\nenv = jinja2.Environment(loader=jinja2.PackageLoader(__name__, 'templates'))\n\n\ndef get_html(lesson):\n\n type_template_map = {\n 1: '1.html',\n 2: '2.html',\n 3: '3.html'\n }\n\n template = env.get_template(type_template_map[lesson['type']])\n\n html_str = template.render(\n number=lesson['number'],\n title=lesson['title'],\n\n main_body=lesson['tagged_content'],\n\n author=lesson['author'],\n paper=lesson['paper'],\n )\n\n return html_str\n","sub_path":"get_html.py","file_name":"get_html.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606851505","text":"import numpy as np\nimport pandas as pd\nimport glob\nimport csv\nimport sys\nimport os\n\nin_dir = '/home/jporcaro/cpol_analysis/cpol_out_allyears1/'\nallFiles = glob.glob(in_dir + \"*.csv\")\ndf = pd.DataFrame()\nmylist = []\nout_dir = '/home/jporcaro/cpol_analysis/cpol_out_allyears1_df/'\n\nfor i in allFiles:\n try:\n df = pd.read_csv(i, index_col=None, header=0)\n mylist.append(df)\n except:\n continue\n\nfn = 'allyears_df.csv'\nframe = pd.concat(mylist)\nframe.to_csv(fn)\nos.path.join(out_dir, fn)\nprint('task complete')\n\n","sub_path":"Scripts/dataframe_aggregation.py","file_name":"dataframe_aggregation.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623677944","text":"import sys\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nnum = 200\nxi = np.log(1000.0)/2.0\nz = np.linspace(0,5,num)\nalpha = np.linspace(0,0,num)\nfor i in range(0,num):\n if z[i]<2.0:\n alpha[i] = 1e-4\n elif z[i]<4.0:\n alpha[i] = 1e-4*np.exp((z[i]-2.0)*xi)\n else:\n alpha[i] = 1e-4*np.exp(2.0*xi)\nplt.semilogy(z,alpha,'k-',\n linewidth=2.0)\nmatplotlib.rcParams.update({'font.size': 22})\nplt.legend(loc=0)\nplt.xlabel(r'$\\rm Z/H$')\nplt.ylabel(r'$\\rm \\alpha_z$')\n#plt.title(r'$\\rm 50 AU$') \nplt.axis([0,5,1e-5,1e0])\nplt.savefig('alpha.eps',format='eps',dpi=300,bbox_inches='tight')\nplt.show()\nf.close()\n\n\n","sub_path":"alpha.py","file_name":"alpha.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16143406","text":"import gzip\nimport struct\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef read_idx(filename):\n \"\"\"\n Parse idx file and return numpy array\n\n :param filename: idx file\n :return: numpy array\n \"\"\"\n with gzip.open(filename, 'rb') as f:\n zero, data_type, dims = struct.unpack('>HBB', f.read(4))\n shape = tuple(struct.unpack('>I', f.read(4))[0] for _ in range(dims))\n return np.frombuffer(f.read(), dtype=np.uint8).reshape(shape)\n\n\n# def init_network(input_size, output_size):\n# neu_net = []\n# # w_0 = [np.random.uniform(low=-5, high=5, size=input_size) for _ in range(0, output_size)]\n# # b_0 = np.random.uniform(low=-1, high=1, size=output_size)\n# r_w = np.sqrt(6 / (input_size + output_size))\n# r_b = np.sqrt(6 / (output_size + output_size))\n# w_0 = [np.random.uniform(0, r_w, input_size) for _ in range(0, output_size)]\n# b_0 = np.random.uniform(0, r_b, output_size)\n# w = [list(np.hstack((w_0[i], b_0[i]))) for i in range(0, len(b_0))]\n# output_layer = [{'weights': w[i]} for i in range(len(w))]\n# neu_net.append(output_layer)\n# return neu_net\n\n\ndef init_network(input_size, hidden_size, output_size):\n neu_net = []\n\n # w_0 = [np.random.uniform(low=-5, high=5, size=input_size) for _ in range(hidden_size)]\n # b_0 = np.random.uniform(low=-1, high=1, size=hidden_size)\n # w_1 = [np.random.uniform(low=-5, high=5, size=hidden_size) for _ in range(output_size)]\n # b_1 = np.random.uniform(low=-1, high=1, size=output_size)\n\n r_w_0 = np.sqrt(6 / (input_size + hidden_size))\n r_b_0 = np.sqrt(6 / (hidden_size + output_size))\n r_w_1 = np.sqrt(6 / (hidden_size + output_size))\n r_b_1 = np.sqrt(6 / (output_size + output_size))\n w_0 = [np.random.uniform(low=0, high=r_w_0, size=input_size) for _ in range(hidden_size)]\n b_0 = np.random.uniform(low=0, high=r_b_0, size=hidden_size)\n w_1 = [np.random.uniform(low=0, high=r_w_1, size=hidden_size) for _ in range(output_size)]\n b_1 = np.random.uniform(low=0, high=r_b_1, size=output_size)\n\n l1_w = [np.hstack((w_0[i], b_0[i])) for i in range(hidden_size)]\n l2_w = [np.hstack((w_1[i], b_1[i])) for i in range(output_size)]\n hidden_layer = [{'weights': l1_w[i]} for i in range(len(l1_w))]\n neu_net.append(hidden_layer)\n output_layer = [{'weights': l2_w[i]} for i in range(len(l2_w))]\n neu_net.append(output_layer)\n return neu_net\n\n\ndef calc_v(weights, inputs):\n v = weights[:-1] @ inputs + weights[-1]\n return v\n # v = weights[-1]\n # for i in range(len(weights) - 1):\n # v += weights[i] * inputs[i]\n # return v\n\n\ndef activation_func(v):\n return math.tanh(v)\n\n\ndef activation_func_deriv(v):\n return 1 - (math.tanh(v) ** 2)\n\n\ndef filter_output(outputs):\n max_index = np.array(outputs).argmax()\n filtered_output = np.array([0] * len(outputs))\n filtered_output[max_index] = 1\n return filtered_output\n\n\ndef forward_propagate(net, init_inputs):\n inputs = np.array(init_inputs)\n for layer in range(0, len(net)):\n outputs = []\n for neuron in net[layer]:\n neuron['v'] = calc_v(neuron['weights'], inputs)\n neuron['output'] = activation_func(neuron['v'])\n outputs.append(neuron['output'])\n inputs = np.array(outputs)\n return filter_output(inputs)\n\n\ndef backward_propagate(net, desired):\n for i in reversed(range(len(net))):\n layer = net[i]\n errors = list()\n if i != len(net) - 1:\n for j in range(len(layer)):\n error = 0.0\n for neuron in net[i + 1]:\n error += (neuron['weights'][j] * neuron['delta'])\n errors.append(error)\n else:\n for j in range(len(layer)):\n neuron = layer[j]\n errors.append(desired[j] - neuron['output'])\n for j in range(len(layer)):\n neuron = layer[j]\n neuron['delta'] = errors[j] * activation_func_deriv(neuron['v'])\n\n\ndef update_weights(net, x, eta):\n for layer in range(len(net)):\n inputs = x\n if layer != 0:\n inputs = [neuron['output'] for neuron in net[layer - 1]]\n for neuron in net[layer]:\n for j in range(len(inputs)):\n neuron['weights'][j] += eta * neuron['delta'] * inputs[j]\n neuron['weights'][-1] += eta * neuron['delta'] * 1\n\n\ndef calc_mse(y_pred, y_act):\n mse = 0\n for i in range(0, len(y_act)):\n mse += np.linalg.norm(np.array(y_act[i]) - np.array(y_pred[i]), ord=2) ** 2\n return mse / len(y_act)\n\n\ndef progbar(curr, total, full_progbar):\n frac = curr / total\n filled_progbar = round(frac * full_progbar)\n print('\\r', '#' * filled_progbar + '-' * (full_progbar - filled_progbar), '[{:>7.2%}]'.format(frac), end='')\n sys.stdout.flush()\n\n\ndef train_network(net, train_data, test_data, eta, mse_threshold):\n epoch = 0\n prev_mse = 0\n tr_mse_list = []\n te_mse_list = []\n tr_misclf_list = []\n te_misclf_list = []\n\n while True:\n print('Training epoch {}.....'.format(epoch + 1))\n for i in range(0, len(train_data['x'])):\n forward_propagate(net, train_data['x'][i])\n backward_propagate(net, train_data['y'][i])\n update_weights(net, train_data['x'][i], eta)\n progbar(i + 1, len(train_data['x']), 10)\n print()\n epoch += 1\n print('Testing on train data.....')\n tr_y_pred = test_network(net, train_data, decode=False)\n print('Testing on test data.....')\n te_y_pred = test_network(net, test_data, decode=False)\n tr_misclf_list.append(count_misclfs(tr_y_pred, train_data['y']))\n te_misclf_list.append(count_misclfs(te_y_pred, test_data['y']))\n tr_mse_list.append(calc_mse(tr_y_pred, train_data['y']))\n te_mse_list.append(calc_mse(te_y_pred, test_data['y']))\n te_accuracy = (1 - te_misclf_list[-1] / len(test_data['y'])) * 100\n tr_accuracy = (1 - tr_misclf_list[-1] / len(train_data['y'])) * 100\n print('>epoch={}, eta={:.7f}, tr_mse={:.7f}, te_mse={:.7f}, tr_accuracy={:.4f}%, te_accuracy={:.4f}%'.format(\n epoch, eta,\n tr_mse_list[-1],\n te_mse_list[-1],\n tr_accuracy,\n te_accuracy))\n if te_accuracy >= 95 or tr_mse_list[-1] <= mse_threshold or eta < 10 ** -7:\n break\n if tr_mse_list[-1] >= prev_mse and epoch > 1:\n eta = 0.9 * eta\n prev_mse = tr_mse_list[-1]\n return tr_mse_list, te_mse_list, tr_misclf_list, te_misclf_list, te_accuracy\n\n\ndef count_misclfs(y_pred, y_act):\n count = 0\n for i in range(0, len(y_act)):\n if list(y_pred[i]) != y_act[i]:\n count += 1\n return count\n\n\ndef one_hot_decode(arr):\n return np.array(arr).argmax()\n\n\ndef predict(net, x, decode=True):\n return one_hot_decode(forward_propagate(net, x)) if decode else forward_propagate(net, x)\n\n\ndef one_hot_encode(value, size):\n encoded_val = [0] * size\n if value < size:\n encoded_val[value] = 1\n return encoded_val\n\n\ndef normalize(arr):\n return arr / np.linalg.norm(arr, ord=2)\n\n\ndef preprocess_data(images, labels, n=None):\n data = {}\n if not n:\n n = len(images)\n data['x'] = [normalize(images[i].reshape(-1)) for i in range(0, n)]\n data['y'] = [one_hot_encode(labels[i], size=10) for i in range(0, n)]\n return data\n\n\ndef test_network(net, data, decode=True):\n y_pred = []\n for i in range(0, len(data['x'])):\n y_pred.append(predict(net, data['x'][i], decode=decode))\n progbar(i + 1, len(data['x']), 20)\n print()\n return y_pred\n\n\ndef calc_accuracy(y_pred, y_act):\n return np.mean(np.array(y_pred) == np.array(y_act)) * 100\n\n\ndef plot_data(tr_mse_list, te_mse_list, tr_misclf_list, te_misclf_list):\n plt.plot(tr_mse_list)\n plt.title('Training: Epochs vs Energy')\n plt.xlabel('epochs')\n plt.ylabel('energy')\n plt.show()\n plt.plot(te_mse_list)\n plt.title('Testing: Epochs vs Energy')\n plt.xlabel('epochs')\n plt.ylabel('energy')\n plt.show()\n plt.plot(tr_misclf_list)\n plt.title('Training: Epochs vs Misclassifications')\n plt.xlabel('epochs')\n plt.ylabel('misclassifications')\n plt.show()\n plt.plot(te_misclf_list)\n plt.title('Testing: Epochs vs Misclassifications')\n plt.xlabel('epochs')\n plt.ylabel('misclassifications')\n plt.show()\n\n\ndef main():\n test_images = read_idx('t10k-images-idx3-ubyte.gz')\n test_labels = read_idx('t10k-labels-idx1-ubyte.gz')\n train_images = read_idx('train-images-idx3-ubyte.gz')\n train_labels = read_idx('train-labels-idx1-ubyte.gz')\n train_data = preprocess_data(train_images, train_labels, n=60000)\n test_data = preprocess_data(test_images, test_labels)\n # net = init_network(input_size=784, output_size=10)\n net = init_network(input_size=784, hidden_size=20, output_size=10)\n tr_mse_list, te_mse_list, tr_misclf_list, te_misclf_list, te_accuracy = train_network(net, train_data, test_data,\n eta=0.01,\n mse_threshold=0.01)\n print('\\nTest Accuracy {:.2f}%'.format(te_accuracy))\n plot_data(tr_mse_list, te_mse_list, tr_misclf_list, te_misclf_list)\n print(tr_mse_list, te_mse_list, tr_misclf_list, tr_misclf_list)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"nn_hw_4_5/hw5_copy.py","file_name":"hw5_copy.py","file_ext":"py","file_size_in_byte":9493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211784558","text":"# -*- coding: utf-8 -*-\nimport unittest2 as unittest\n\nfrom Products.CMFPlone.interfaces import ILanguage\nfrom plone.app.multilingual.testing import PAM_FUNCTIONAL_TESTING\nfrom plone.dexterity.utils import createContentInContainer\nfrom plone.app.multilingual.interfaces import IPloneAppMultilingualInstalled\nfrom zope.interface import alsoProvides\n\n\nclass TestSubscribers(unittest.TestCase):\n \"\"\"There are some events that are fired when an object\n is created, moved or copied.\n\n plone.multilingual registers some subscribers for each event\n to change the language of the object from the container where\n it has been created, moved or copied\n \"\"\"\n layer = PAM_FUNCTIONAL_TESTING\n\n def setUp(self):\n alsoProvides(self.layer['request'], IPloneAppMultilingualInstalled)\n self.portal = self.layer['portal']\n\n def test_created_event(self):\n \"\"\"When an object is created in a folder it takes its language from the\n folder itself\n \"\"\"\n a_ca = createContentInContainer(\n self.portal['ca'], 'Document', title=u\"Test document\")\n self.assertEqual(ILanguage(a_ca).get_language(), 'ca')\n\n def test_created_event_on_portal(self):\n \"\"\"When an object is created on portal it should be language\n independent\n \"\"\"\n a_ca = createContentInContainer(\n self.portal, 'Document', title=u\"Test document\")\n self.assertEqual(ILanguage(a_ca).get_language(), '')\n\n def test_moved_event(self):\n a_ca = createContentInContainer(\n self.portal['ca'], 'Document', title=u\"Test document\")\n\n id_ = self.portal['ca'].manage_cutObjects(a_ca.id)\n self.portal['en'].manage_pasteObjects(id_)\n a_ca_copied = self.portal['en'][a_ca.id]\n self.assertEqual(ILanguage(a_ca_copied).get_language(), 'en')\n\n def test_copied_event(self):\n a_ca = createContentInContainer(\n self.portal['ca'], 'Document', title=u\"Test document\")\n\n id_ = self.portal['ca'].manage_copyObjects(a_ca.id)\n self.portal['en'].manage_pasteObjects(id_)\n a_ca_copied = self.portal['en'][a_ca.id]\n self.assertEqual(ILanguage(a_ca_copied).get_language(), 'en')\n","sub_path":"buildout-cache/eggs/plone.app.multilingual-3.0.2-py2.7.egg/plone/app/multilingual/tests/test_subscribers.py","file_name":"test_subscribers.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263396078","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" \n01.10.2018\nIn this version we add to hub-model only single layer (as an object of class SingleLayerNeuralNetwork\nand we save all variables\nbut restore only for this layer.\n\nno params: just train and save model.\n-ev - restore variables from saved model, and create a graph without superfluous nodes,\n\tand save the graph to pb-file.\n\n\"\"\"\n\n# export CUDA_VISIBLE_DEVICES=1\n\nfrom __future__ import absolute_import, division, print_function\nimport os\nimport sys\nimport argparse\nimport math\nimport numpy as np\nnp.set_printoptions(precision=4, suppress=True)\n\n#import load_data\nimport _pickle as pickle\nimport gzip\n\nimport tensorflow as tf\n#import tensorflow_hub as hub\n\n#from rotate_images import *\nsys.path.append('.')\nsys.path.append('..')\nimport neural_networks.networks as networks\nfrom neural_networks.layers import *\nimport neural_networks.model as model\nfrom neural_networks.model import *\n\nimport settings\nfrom settings import *\n\nHIDDEN_NUM = 8\nCHECKPOINT_NAME = 'retrain_3'\n#output_node_names = ['sigmoid_out']\n\n#NUM_CLASSES = 412\n\n\n\"\"\"\nif os.path.exists('.notebook'):\n\tbottleneck_tensor_size = 588\n\tBATCH_SIZE = 2\n\tDISPLAY_INTERVAL, NUM_ITERS = 1, 500\nelse:\n\tbottleneck_tensor_size = 1536\n\t#bottleneck_tensor_size = 1001\n\tBATCH_SIZE = 10\n\tDISPLAY_INTERVAL, NUM_ITERS = 100, 20*1000*1000\n\"\"\"\n#------------------------\n\n\n\n\ndef createParser ():\n\t\"\"\"\n\tArgumentParser\n\t\"\"\"\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-r', '--restore', dest='restore', action='store_true')\n\tparser.add_argument('-ev', '--eval', dest='is_eval', action='store_true')\n\tparser.add_argument('-dud', '--dont_use_dump', dest='dont_use_dump', \n\t\taction='store_true', help='Create bottleneck data from origin dataset directory')\n\t#parser.add_argument('-t', '--is_training', dest='is_training', action='store_true')\n\tparser.add_argument('-i', '--in_file', default=\"dump.gz\", type=str,\\\n\t\thelp='input dir')\n\tparser.add_argument('-k', '--k', default=1, type=int,\\\n\t\thelp='number of network')\n\tparser.add_argument('-hn', '--hidden_num', default=8, type=int,\\\n\t\thelp='number of neurons in hiden layer')\n\tparser.add_argument('-ni', '--num_iters', default=None, type=int,\\\n\t\thelp='Set number of iterations')\n\n\treturn parser\n\n\n\n\nif __name__ == '__main__':\n\n\tparser = createParser()\n\targuments = parser.parse_args(sys.argv[1:])\t\t\t\n\tdata_file = arguments.in_file\n\tis_training = not arguments.is_eval\n\n\tif arguments.num_iters:\t\n\t\tnum_iters = arguments.num_iters\n\telse:\n\t\tnum_iters = settings.NUM_ITERS\t# from settings file\t\n\n\t# Use ready dump file with bottlenecks or bottleneck data from origin dataset directory\n\tLOAD_DUMP_FILE = not arguments.dont_use_dump\n\tif LOAD_DUMP_FILE:\n\t\tprint('data_file =', data_file)\n\t\tf = gzip.open(data_file, 'rb')\n\t\tdata = pickle.load(f)\n\telse:\n\t\tfrom train_full_model import make_bottleneck_data\n\t\tfrom settings import SHAPE, DATASET_DIR\n\t\tdata = make_bottleneck_data(src_dir=DATASET_DIR, shape=SHAPE, ratio=[9,1,0])\n\n\t#print('data_file =', data_file)\n\t#f = gzip.open(data_file, 'rb')\n\t#data = pickle.load(f)\n\n\ttrain = data['train']\n\tvalid = data['valid']\n\ttest = data['test']\n\ttrain_data = train['images'] # In fact, this is feature vectors\n\tvalid_data = valid['images']\n\ttest_data = test['images']\n\ttrain_labels = train['labels']\n\tvalid_labels = valid['labels']\n\ttest_labels = test['labels']\n\ttrain['size'] = len(train['labels'])\n\tvalid['size'] = len(valid['labels'])\n\ttest['size'] = len(test['labels'])\n\n\tprint('Train size:', len(train['labels']))\n\tprint('Valid size:', len(valid['labels']))\n\tprint('Test size:', len(test['labels']))\n\tprint('Data was loaded.')\n\t#print('Example of data:', train_data[0])\n\tprint('Size of vector:', len(train_data[0]))\n\t#print('Example of label:',train_labels[0])\n\tprint('Size of label:', len(train_labels[0]))\n\n\t#train_data = [np.transpose(t) for t in train_data]\n\t#valid_data = [np.transpose(t) for t in valid_data]\n\t#test_images = [np.transpose(t) for t in test_images]\n\tnum_train_batches = train['size'] // BATCH_SIZE\n\tnum_valid_batches = valid['size'] // BATCH_SIZE\n\tnum_test_batches = test['size'] // BATCH_SIZE\n\tprint('num_train_batches:', num_train_batches)\n\tprint('num_valid_batches:', num_valid_batches)\n\tprint('num_test_batches:', num_test_batches)\n\n\tSAMPLE_SIZE = train['size']\n\tmin_valid_acc = 0\n\n\tmap_label_id = data['label_id']\n\tNUM_CLASSES = len(map_label_id)\n\tprint('NUM_CLASSES =', NUM_CLASSES)\n\n\t#-------------------\n\n\t# Create a new graph\n\tgraph = tf.Graph() # no necessiry\n\n\twith graph.as_default():\n\n\t\t# 1. Construct a graph representing the model.\n\n\t\t#is_training = tf.Variable(True)\n\n\t\tshape = SHAPE\n\t\theight, width, color = shape\n\t\t#x = tf.placeholder(tf.float32, [None, height, width, 3], name='Placeholder-x')\n\t\tx = tf.placeholder(tf.float32, [None, height, width, 3], name=INPUT_NODE_NAME)\t\t\n\t\tresized_input_tensor = tf.reshape(x, [-1, height, width, 3])\n\n\t\tif USE_HUB:\n\t\t\thub_module = model.hub_module\n\t\t\tnetwork_model = hub_module(trainable=False)\t\n\t\telse:\n\t\t\tnetwork_model = model.network_model\n\t\n\t\tbottleneck_tensor = network_model(resized_input_tensor)\n\n\t\tif is_training:\n\t\t\tbottleneck_tensor_stop = tf.stop_gradient(bottleneck_tensor)\n\n\t\t\tbottleneck_input = tf.placeholder_with_default(\n\t\t\t\tbottleneck_tensor_stop, shape=[None, bottleneck_tensor_size], name='BottleneckInputPlaceholder') # Placeholder for input.\t\t\n\t\telse:\n\t\t\tbottleneck_input = bottleneck_tensor\n\t\n\t\tsingle_layer_nn = networks.SingleLayerNeuralNetwork(\n\t\t\tinput_size=bottleneck_tensor_size, \n\t\t\tnum_neurons=NUM_CLASSES,\n\t\t\tfunc=None, name='_out')\n\n\t\tlogits = single_layer_nn.module(bottleneck_input)\n\n\t\toutput = tf.nn.softmax(logits, name=OUTPUT_NODE_NAME)\n\n\t\t#tf.contrib.quantize.create_training_graph()\n\n\t\ty = tf.placeholder(tf.float32, [None, NUM_CLASSES], name='Placeholder-y') # Placeholder for labels.\n\n\t\t# 2. Add nodes that represent the optimization algorithm.\t\n\t\t# for classification:\n\t\t#loss = -tf.reduce_sum(y * tf.log(output), 1)\n\t\tprint('output = ', output)\n\n\t\t\"\"\"\n\t\tfrom loss_function import loss_function_top_6\n\t\tloss = loss_function_top_6(output, y, vector_size=NUM_CLASSES, tau=0.01)\n\t\t\"\"\"\n\t\t# or:\n\t\tloss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)\n\t\ttrain_op = tf.train.AdagradOptimizer(0.01).minimize(loss)\n\t\tcorrect_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y,1))\n\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # top-1\n\n\t\tacc_top5 = tf.nn.in_top_k(logits, tf.argmax(y,1), 5)\n\t\tacc_top6 = tf.nn.in_top_k(logits, tf.argmax(y,1), 6)\n\n\t\t# 3. Execute the graph on batches of input data.\n\t\twith tf.Session() as sess: # Connect to the TF runtime.\n\t\t\tinit = tf.global_variables_initializer()\n\t\t\tsess.run(init)\t# Randomly initialize weights.\n\n\t\t\tif arguments.restore or (not is_training):\t\t\n\t\t\t\tsingle_layer_nn.restore(sess)\n\t\t\t\tif False:\n\t\t\t\t\ttf.train.Saver().restore(sess, './save_model/{0}'.format(CHECKPOINT_NAME))\n\n\t\t\t#print('is_training=', is_training.eval())\n\n\t\t\tfor iteration in range(num_iters):\t\t\t # Train iteratively for NUM_iterationS.\t\t \n\n\t\t\t\tif iteration % settings.NUM_ITERS_DISPLAY == 0:\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tepoch = iteration//(num_train_batches // BATCH_SIZE * BATCH_SIZE)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tepoch = 0\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t# set limit on the number of batches for check of train process\n\t\t\t\t\tis_last_iterations = (iteration >= num_iters - NUM_ITERS_DISPLAY)\n\t\t\t\t\tif is_last_iterations:\n\t\t\t\t\t\tprint('This is the last iterations.')\t\t\t\t\t\n\t\t\t\t\t#last_display_iteration = False\n\t\t\t\t\tlimit = 10000 if is_last_iterations else 100\n\t\t\t\t\tnum_train_batches_limit = min(num_train_batches, limit)\n\t\t\t\t\tnum_valid_batches_limit = min(num_valid_batches, limit)\n\n\n\t\t\t\t\tloss_avg = np.mean( [loss.eval( \\\n\t\t\t\t\t\tfeed_dict={bottleneck_input:train['images'][i*BATCH_SIZE:(i+1)*BATCH_SIZE], \\\n\t\t\t\t\t\ty:train['labels'][i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) \\\n\t\t\t\t\t\tfor i in range(0,num_train_batches_limit)])\n\t\t\t\t\tprint('loss_avg = {}'.format(loss_avg))\n\n\t\t\t\t\ttrain_acc = np.mean( [accuracy.eval( \\\n\t\t\t\t\t\tfeed_dict={bottleneck_input:train['images'][i*BATCH_SIZE:(i+1)*BATCH_SIZE], \\\n\t\t\t\t\t\ty:train['labels'][i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) \\\n\t\t\t\t\t\tfor i in range(0,num_train_batches_limit)])\n\t\t\t\t\tvalid_acc = np.mean([ accuracy.eval( \\\n\t\t\t\t\t\tfeed_dict={bottleneck_input:valid['images'][i*BATCH_SIZE:(i+1)*BATCH_SIZE], \\\n\t\t\t\t\t\ty:valid['labels'][i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) \\\n\t\t\t\t\t\tfor i in range(0,num_valid_batches_limit)])\n\n\t\t\t\t\t# valid top5,6\n\t\t\t\t\tvalid_acc_top5 = np.mean([ acc_top5.eval( \\\n\t\t\t\t\t\tfeed_dict={bottleneck_input:valid['images'][i*BATCH_SIZE:(i+1)*BATCH_SIZE], \\\n\t\t\t\t\t\ty:valid['labels'][i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) \\\n\t\t\t\t\t\tfor i in range(0,num_valid_batches_limit)])\t\t\t\t\t\n\t\t\t\t\tvalid_acc_top6 = np.mean([ acc_top6.eval( \\\n\t\t\t\t\t\tfeed_dict={bottleneck_input:valid['images'][i*BATCH_SIZE:(i+1)*BATCH_SIZE], \\\n\t\t\t\t\t\ty:valid['labels'][i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) \\\n\t\t\t\t\t\tfor i in range(0,num_valid_batches_limit)])\t\t\n\t\t\t\t\tif valid_acc > min_valid_acc:\n\t\t\t\t\t\tmin_valid_acc = valid_acc\n\n\n\n\t\t\t\t\t#print('train={:0.4f}, valid={:0.4f} (max={:0.4f}) [top5={:0.4f}, top6={:0.4f}]'.\\\n\t\t\t\t\t#\tformat(train_acc, valid_acc, min_valid_acc, valid_acc_top5, valid_acc_top6))\n\t\t\t\t\tprint('epoch {0:2} (i={1:06}): train={2:0.4f}, valid={3:0.4f} (max={4:0.4f}) [top5={5:0.4f}, top6={6:0.4f}]'.\\\n\t\t\t\t\t\tformat(epoch, iteration, train_acc, valid_acc, min_valid_acc, valid_acc_top5, valid_acc_top6))\n\n\t\t\t\t\tif False:\n\t\t\t\t\t\tif iteration % settings.NUM_ITERS_CHECKPOINT == 0:\t\n\t\t\t\t\t\t\tsaver = tf.train.Saver()\t\t\n\t\t\t\t\t\t\tsaver.save(sess, './save_model/{0}'.format(CHECKPOINT_NAME))\n\t\t\t\t\t\t\tprint('')\n\n\t\t\t\tif not is_training: break\n\n\t\t\t\t# RUN OPTIMAIZER:\n\t\t\t\ta1 = iteration*BATCH_SIZE % train['size']\n\t\t\t\ta2 = (iteration + 1)*BATCH_SIZE % train['size']\n\t\t\t\tx_data = train['images'][a1:a2]\n\t\t\t\ty_data = train['labels'][a1:a2]\n\t\t\t\tif len(x_data) <= 0: continue\n\t\t\t\t#sess.run(train_op, {x: x_data, y: y_data}) # Perform one training iteration.\t\t\n\t\t\t\tsess.run(train_op, {bottleneck_input: x_data, y: y_data}) # Perform one training iteration.\n\t\t\t\t\n\n\t\t\t# Save the comp. graph\n\t\t\tif False:\n\t\t\t\tprint('Save the comp. graph')\n\t\t\t\tx_data, y_data = valid['images'], valid['labels'] \n\t\t\t\t#mnist.train.next_batch(BATCH_SIZE)\t\t\n\t\t\t\twriter = tf.summary.FileWriter(\"output\", sess.graph)\n\t\t\t\t#print(sess.run(train_op, {x: x_data, y: y_data}))\n\t\t\t\twriter.close() \n\n\t\t\t# Test of model\n\t\t\t\"\"\"\n\t\t\tHERE SOME ERROR ON GPU OCCURS\n\t\t\tnum_test_batches = test['size'] // BATCH_SIZE\n\t\t\ttest_loss = np.mean([ loss.eval( \\\n\t\t\t\tfeed_dict={x:test['images'][i*BATCH_SIZE : (i+1)*BATCH_SIZE]}) \\\n\t\t\t\tfor i in range(num_test_batches) ])\n\t\t\tprint('Test of model')\n\t\t\tprint('test_loss={0:0.4f}'.format(test_loss))\n\t\t\t\"\"\"\n\n\t\t\t\"\"\"\n\t\t\tprint('Test model')\n\t\t\ttest_loss = loss.eval(feed_dict={x:test['images'][0:BATCH_SIZE]})\n\t\t\tprint('test_loss={0:0.4f}'.format(test_loss))\t\t\t\t\n\t\t\t\"\"\"\n\n\t\t\t# Save model\n\t\t\tif False:\n\t\t\t\tsaver = tf.train.Saver()\t\t\n\t\t\t\tsaver.save(sess, './save_model/{0}'.format(CHECKPOINT_NAME)) \n\n\t\t\tif is_training:\n\t\t\t\t# save to checkpoint\n\t\t\t\tsingle_layer_nn.save(sess)\n\n\t\t\telse:\n\t\t\t\t# SAVE GRAPH TO PB\n\t\t\t\tgraph = sess.graph\t\t\t\n\t\t\t\t#op = is_training.assign(False)\n\t\t\t\t#sess.run(op)\n\t\t\t\ttf.graph_util.remove_training_nodes(graph.as_graph_def())\n\t\t\t\t# tf.contrib.quantize.create_eval_graph(graph)\n\t\t\t\t# tf.contrib.quantize.create_training_graph()\n\n\t\t\t\toutput_node_names = [OUTPUT_NODE_NAME]\n\t\t\t\toutput_graph_def = tf.graph_util.convert_variables_to_constants(\n\t\t\t\t\tsess, graph.as_graph_def(), output_node_names)\n\t\t\t\tdir_for_model = '.'\n\t\t\t\ttf.train.write_graph(output_graph_def, dir_for_model,\n\t\t\t\t\t'saved_model_pure.pb', as_text=False)\t\n\n\n\t\t\t# it doesn't work. I don't know why.\n\t\t\t#graph_file_name = 'saved_model_gf.pb'\t\t\t\n\t\t\t#with tf.gfile.FastGFile(graph_file_name, 'wb') as f:\n\t\t\t#\tf.write(output_graph_def.SerilizeToString())\n\n\t\t\t#tf.train.write_graph(graph, dir_for_model,\n\t\t\t#\t'saved_model.pb', as_text=False)\n","sub_path":"training/train_last_layer.py","file_name":"train_last_layer.py","file_ext":"py","file_size_in_byte":11845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221654002","text":"import random\n\ndef ConvertNumber2Binary(value, limit):\n binary = []\n while value > 0:\n binary.append(value%2)\n value = value//2\n while len(binary) < limit:\n binary.append(0)\n binary.reverse()\n return binary\n\nclass Individual(object):\n \"\"\"docstring for Individual - a possible solution for the problem\"\"\"\n QUEEN_GENE_SIZE = 3\n\n def __init__(self, qtd_queens, **kwargs):\n super(Individual, self).__init__()\n self.__fitness = None\n self.__phenotype = None\n self.__collisions = None\n self.__qtd_queens = qtd_queens\n self.__genotype = kwargs.get('recombination', [])\n if 'recombination' not in kwargs:\n permut = [i for i in range(0, self.__qtd_queens)]\n random.shuffle(permut)\n for val in permut:\n for gene in ConvertNumber2Binary(val, self.QUEEN_GENE_SIZE):\n self.__genotype.append(gene)\n\n def __str__(self):\n return str(self.phenotype)\n\n @property\n def phenotype(self):\n \"\"\"Returns the phenotype for the individual - a list of integers\"\"\"\n if self.__phenotype is not None:\n return self.__phenotype\n self.__phenotype = []\n for i in range(0, self.__qtd_queens):\n number = 0\n for j in range(0, self.QUEEN_GENE_SIZE):\n position = j + self.QUEEN_GENE_SIZE*i\n number = number*2 + self.__genotype[position]\n self.__phenotype.append(number)\n return self.__phenotype\n\n @property\n def count_collisions(self):\n \"\"\"Returns the number of collisions in the solution\"\"\"\n if self.__collisions is not None:\n return self.__collisions\n self.__collisions = 0\n for i in range(0, self.__qtd_queens):\n for j in range(i+1, self.__qtd_queens):\n if self.phenotype[i] == self.phenotype[j] or abs(self.phenotype[i]-self.phenotype[j]) == abs(i-j):\n self.__collisions += 1.0\n return self.__collisions\n\n @property\n def fitness(self):\n \"\"\"Returns the fitness of Indivudal based on how many queens are attacking each other - duplicates are not counted twice\"\"\"\n if self.__fitness is not None:\n return self.__fitness\n self.__fitness = (1.0/(1.0+self.count_collisions))\n return self.__fitness\n\n def mutate(self):\n \"\"\"Apply the mutation operator with 0.8 chance of mutation\"\"\"\n for i in range(0, self.__qtd_queens):\n for j in range(i+1, self.__qtd_queens):\n if random.randint(0,4) <= 3:\n for k in range(0, self.QUEEN_GENE_SIZE):\n tmp = self.__genotype[k+i*self.QUEEN_GENE_SIZE]\n self.__genotype[k+i*self.QUEEN_GENE_SIZE] = self.__genotype[k+j*self.QUEEN_GENE_SIZE]\n self.__genotype[k+j*self.QUEEN_GENE_SIZE] = tmp\n\n def recombinate(self, second_father):\n \"\"\"Apply yhe recombination operator with cut-and-crussfill, returing a tuple with two elements, the children\"\"\"\n\n positionSplit = random.randint(0,self.__qtd_queens-2)\n mark1 = []\n mark2 = []\n\n first_child_recombination = []\n second_child_recombination = []\n\n for i in range(0, positionSplit+1):\n for j in range(0, self.QUEEN_GENE_SIZE):\n first_child_recombination.append(self.__genotype[j+self.QUEEN_GENE_SIZE*i])\n second_child_recombination.append(second_father.__genotype[j+self.QUEEN_GENE_SIZE*i])\n mark1.append(first_child_recombination[self.QUEEN_GENE_SIZE*i:(self.QUEEN_GENE_SIZE*(i+1))])\n mark2.append(second_child_recombination[self.QUEEN_GENE_SIZE*i:(self.QUEEN_GENE_SIZE*(i+1))])\n\n for i in range(0, self.__qtd_queens):\n new_gene1 = []\n new_gene2 = []\n for j in range(0, self.QUEEN_GENE_SIZE):\n new_gene1.append(second_father.__genotype[j+self.QUEEN_GENE_SIZE*i])\n new_gene2.append(self.__genotype[j+self.QUEEN_GENE_SIZE*i])\n\n if new_gene1 not in mark1:\n for j in range(0, self.QUEEN_GENE_SIZE):\n first_child_recombination.append(new_gene1[j])\n mark1.append(new_gene1)\n\n if new_gene2 not in mark2:\n for j in range(0, self.QUEEN_GENE_SIZE):\n second_child_recombination.append(new_gene2[j])\n mark2.append(new_gene2)\n\n return (Individual(self.__qtd_queens, recombination = first_child_recombination), Individual(self.__qtd_queens, recombination = second_child_recombination))\n","sub_path":"eightQueensProblem/Individual.py","file_name":"Individual.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192850920","text":"# -*- coding: UTF-8 -*-\n'''\n@Author: sanjayzhong\n@Github: https://github.com/sanjayzzzhong\n@Date: 2019-05-13\n'''\n# coding:utf-8\nfrom PIL import Image\nimport os.path\nimport glob\nimport xml.etree.ElementTree as ET\nimport xml.dom.minidom\n\ni = 2510\nxmldir = \"/home/sanjay/DATA/Project_Datasets/2_Tibet_Project/1_Gun/gun_classify/xml/\"\nimgsdir = \"/home/sanjay/DATA/Project_Datasets/2_Tibet_Project/1_Gun/gun_classify/new_img/\"\n#outdir = \"/home/sanjay/Pictures/manhole_1/new\"\nfor xmlfile in os.listdir(xmldir):\n xmlname = os.path.splitext(xmlfile)[0]\n # print(os.path.splitext(xmlfile))\n for pngfile in os.listdir(imgsdir):\n pngname = os.path.splitext(pngfile)[0]\n # print(os.path.splitext(pngfile))\n if pngname == xmlname:\n # 修改图片文件名\n # 图片文件名修改前后的路径\n olddir = os.path.join(os.path.abspath(imgsdir), pngname + \".jpg\")\n # print(olddir)\n newdir = os.path.join(os.path.abspath(\n imgsdir), \"ak47_\" + str(i) + \".jpg\")\n os.rename(olddir, newdir)\n print(xmlfile, '----->', str(i) + '.jpg')\n # 修改filename结点属性\n # 读取xml文件\n dom = xml.dom.minidom.parse(os.path.join(xmldir, xmlfile))\n root = dom.documentElement\n\n # 获取标签对filename之间的值并赋予新值i\n root.getElementsByTagName('filename')[\n 0].firstChild.data = \"ak47_\" + str(i) + '.jpg'\n\n # 将修改后的xml文件保存\n # xml文件修改前后的路径\n old_xmldir = os.path.join(xmldir, xmlfile)\n new_xmldir = os.path.join(xmldir, \"ak47_\" + str(i)+'.xml')\n # 打开并写入\n with open(old_xmldir, 'w') as fh:\n dom.writexml(fh)\n os.rename(old_xmldir, new_xmldir)\n i += 1\nprint('total number is ', i)\n","sub_path":"data_process/rename_xml_pic.py","file_name":"rename_xml_pic.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344712702","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib import admin\nadmin.autodiscover()\nurlpatterns = patterns('',\n url(r'^$', 'jla.views.home', name='home'),\n url(r'^login/$', 'jla.views.login', name='login'),\n url(r'^nologin/$', 'jla.views.nologin', name='nologin'),\n url(r'^logout/$', 'jla.views.logout', name='logout'),\n url(r'^home/$', 'jla.views.home', name='home'),\n url(r'^admin/', include(admin.site.urls)),\n # rutas para gestion de cursos\n url(r'^cursos/', include('cursos.urls', namespace='cursos')),\n url(r'^matriculas/', include('matriculas.urls', namespace='matriculas')),\n url(r'^alumnos/', include('alumnos.urls', namespace='alumnos')),\n url(r'^informes/', include('informes.urls', namespace='informes')),\n)\n","sub_path":"intranet/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65275976","text":"\"\"\"Muestra que puertos activos que tiene el dominio\"\"\"\r\n\"\"\"Autores: Efrén Santiago Landeros Hernández\r\n David Alejandro Ayala Palacios \"\"\"\r\nimport os\r\n\r\n\r\ndef get_nmap(options, ip):\r\n\r\n command = \"nmap \" + options + \" \" + ip\r\n process = os.popen(command)\r\n\r\n results = str(process.read())\r\n\r\n return results\r\n\r\n\r\nprint(get_nmap('-F', '54.186.250.79'))\r\n\r\n","sub_path":"Real Applications_git/5_-_Bonus_Content/nmap.py","file_name":"nmap.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15481324","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom setuptools import setup\nfrom ska_helpers.setup_helper import duplicate_package_info\n\nname = \"ska_astro\"\nnamespace = \"Ska.astro\"\n\npackages = [\"ska_astro\"]\npackage_dir = {name: name}\n\nduplicate_package_info(packages, name, namespace)\nduplicate_package_info(package_dir, name, namespace)\n\nsetup(name=name,\n author = 'Tom Aldcroft',\n description='Astronomy utilities',\n author_email = 'taldcroft@cfa.harvard.edu',\n use_scm_version=True,\n setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'],\n zip_safe=False,\n packages=packages,\n package_dir=package_dir,\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207459656","text":"import sys\nimport threading\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.messagebox as messagebox\nimport webbrowser\n\nfrom core.facebookSearchTool import facebookSearchTool\n\nfbtool = facebookSearchTool()\n\ndicFbStalk = {\n \"Tags\": {\n \"Pictures\": \"https://www.facebook.com/search/%s/photos-of/intersect\",\n \"Videos\": \"https://www.facebook.com/search/%s/videos-of/intersect\",\n \"Publications\": \"https://www.facebook.com/search/%s/stories-tagged/intersect\",\n },\n \"Persons\": {\n \"Family\": \"https://www.facebook.com/search/%s/relatives/intersect\",\n \"Friends\": \"https://www.facebook.com/search/%s/friends/intersect\",\n \"Common friends\": \"https://www.facebook.com/search/%s/friends/friends/intersect\",\n \"Collegues / Employees\": \"https://www.facebook.com/search/%s/employees/intersect/\",\n \"School\": \"https://www.facebook.com/search/%s/schools-attended/ever-past/intersect/students/intersect/\",\n \"Nearby\": \"https://www.facebook.com/search/%s/current-cities/residents-near/present/intersect\",\n },\n \"Visited places\": {\n \"All\": \"https://www.facebook.com/search/%s/places-visited/\",\n \"Bars\": \"https://www.facebook.com/search/%s/places-visited/110290705711626/places/intersect/\",\n \"Restaurants\": \"https://www.facebook.com/search/%s/places-visited/273819889375819/places/intersect/\",\n \"Shops\": \"https://www.facebook.com/search/%s/places-visited/200600219953504/places/intersect/\",\n \"Outside\": \"https://www.facebook.com/search/%s/places-visited/935165616516865/places/intersect/\",\n \"Hotels\": \"https://www.facebook.com/search/%s/places-visited/164243073639257/places/intersect/\",\n \"Theatres\": \"https://www.facebook.com/search/%s/places-visited/192511100766680/places/intersect/\",\n },\n \"Likes\": {\n \"Pictures\": \"https://www.facebook.com/search/%s/photos-liked/intersect\",\n \"Videos\": \"https://www.facebook.com/search/%s/videos-liked/intersect\",\n \"Publications\": \"https://www.facebook.com/search/%s/stories-liked/intersect\",\n },\n \"Comments\": {\n \"Pictures\": \"https://www.facebook.com/search/%s/photos-commented/intersect\",\n },\n \"Profile\": {\n \"Pictures\": \"https://www.facebook.com/search/%s/photos-by/\",\n \"Videos\": \"https://www.facebook.com/search/%s/videos-by/\",\n \"Publications\": \"https://www.facebook.com/search/%s/stories-by/\",\n \"Groups\": \"https://www.facebook.com/search/%s/groups\",\n \"Future Events\": \"https://www.facebook.com/search/%s/events-joined/\",\n \"Past Events\": \"https://www.facebook.com/search/%s/events-joined/in-past/date/events/intersect/\",\n \"Games\": \"https://www.facebook.com/search/%s/apps-used/game/apps/intersect\",\n \"Apps\": \"https://www.facebook.com/search/%s/apps-used/\",\n },\n \"Interests\": {\n \"Pages\": \"https://www.facebook.com/search/%s/pages-liked/intersect\",\n \"Politics\": \"https://www.facebook.com/search/%s/pages-liked/161431733929266/pages/intersect/\",\n \"Religion\": \"https://www.facebook.com/search/%s/pages-liked/religion/pages/intersect/\",\n \"Musics\": \"https://www.facebook.com/search/%s/pages-liked/musician/pages/intersect/\",\n \"Films\": \"https://www.facebook.com/search/%s/pages-liked/movie/pages/intersect/\",\n \"Books\": \"https://www.facebook.com/search/%s/pages-liked/book/pages/intersect/\",\n \"Places\": \"https://www.facebook.com/search/%s/places-liked/\"\n }\n}\n\nresultProfile = \"\"\"\n [Name] %s\n [Work] %s\n [Loc] %s\n [ID] %s\n \n \"\"\"\n\n\ndef search_by_profile(window, search, profile):\n try:\n fbtool.getInfoProfile(profile)\n\n loc = fbtool.address\n work = fbtool.job\n name = fbtool.name\n facebookID = fbtool.facebookId\n\n search.destroy()\n\n old = sys.stdout\n with open(\"./output.txt\", \"w\", encoding=\"utf-8\") as f:\n sys.stdout = f\n\n print(resultProfile % (name, work, loc, facebookID))\n search_fb(window, facebookID)\n sys.stdout = old\n except:\n messagebox.showerror(\"Error!\", \"An error occured while trying to get this profile !\")\n\n\ndef search_by_id(window, search, facebookID):\n search.destroy()\n\n old = sys.stdout\n with open(\"./output.txt\", \"w\", encoding=\"utf-8\") as f:\n sys.stdout = f\n\n print(\"[ID]\\t{}\\n\".format(facebookID))\n search_fb(window, facebookID)\n sys.stdout = old\n\n\ndef search_fb(window, facebookID):\n search = tk.Toplevel(window)\n search.title(\"Search facebook\")\n search.geometry(\"400x300\")\n\n tk.Label(search, text=\"Stalking Facebook ID {}\".format(facebookID)).pack(side=tk.TOP, anchor=tk.NW)\n\n tab_parent = ttk.Notebook(search)\n tab_parent.pack(expand=1, fill=\"both\")\n\n for name, links in dicFbStalk.items():\n tab = tk.Frame(tab_parent)\n tab_parent.add(tab, text=name)\n for link_name, link in links.items():\n add_button(tab, link_name, link, facebookID)\n\n\ndef add_button(tab, name, link, facebookID):\n tk.Button(tab, text=name, command=lambda: webbrowser.open(link % facebookID)).pack()\n\n\ndef facebook_stalk(window, settings):\n search = tk.Toplevel(window)\n search.title(\"Facebook\")\n search.geometry(\"400x150\")\n\n city = tk.StringVar()\n\n frame1 = tk.Frame(search, background=\"White\")\n frame1.pack()\n frame2 = tk.Frame(search, background=\"White\")\n frame2.pack()\n\n tk.Label(frame1, text=\"Profile name: \").pack(side=tk.LEFT)\n facebook_name = tk.Entry(frame1)\n facebook_name.pack(side=tk.LEFT)\n tk.Button(frame1, text=\"Find by profile\", command=lambda: search_by_profile(window, search, facebook_name.get())) \\\n .pack(side=tk.RIGHT)\n\n tk.Label(frame2, text=\"Facebook ID: \").pack(side=tk.LEFT)\n facebook_id = tk.Entry(frame2)\n facebook_id.pack(side=tk.LEFT)\n tk.Button(frame2, text=\"Find by profile\", command=lambda: search_by_id(window, search, facebook_id.get())) \\\n .pack(side=tk.RIGHT)\n\n search.mainloop()\n\n\ndef done(search, code, number):\n search.destroy()\n old = sys.stdout\n with open(\"./output.txt\", \"w\", encoding=\"utf-8\") as f:\n sys.stdout = f\n try:\n x = threading.Thread(target=sp, args=(code, number))\n x.start()\n messagebox.showinfo(\"Searching\", \"Currently searching...\")\n x.join()\n except:\n messagebox.showinfo(\"Searching\", \"Currently searching...\")\n sp(code, number)\n sys.stdout = old\n\n messagebox.showinfo(\"Result\", \"The result has been written in the output.txt file\")\n","sub_path":"interface/facebookStalk.py","file_name":"facebookStalk.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212515397","text":"\"\"\"\n Author: Ankit Dutta \n Naive Bayes spam detection for NLP\n dataset: http://www.cs.jhu.edu/~mdredze/datasets/sentiment/index2.html\n\"\"\"\n\nfrom __future__ import print_function, division\nfrom future.utils import iteritems\nfrom builtins import range\n\nimport nltk\nimport numpy as np\n\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.linear_model import LogisticRegression\nfrom bs4 import BeautifulSoup\n\nwordnet_lemmatizer = WordNetLemmatizer()\n\n# from http://www.lextek.com/manuals/onix/stopwords1.html\nstopwords = set(w.rstrip() for w in open('../stopwords.txt'))\n# note: an alternative source of stopwords\n# from nltk.corpus import stopwords\n# stopwords.words('english')\n\n# load the reviews\n# data courtesy of http://www.cs.jhu.edu/~mdredze/datasets/sentiment/index2.html\npositive_reviews = BeautifulSoup(open('data/positive.review').read(), \"lxml\")\npositive_reviews = positive_reviews.find_all('review_text')\n\nnegative_reviews = BeautifulSoup(open('data/negative.review').read(), \"lxml\")\nnegative_reviews = negative_reviews.find_all('review_text')\n\n# there are more positive reviews than negative reviews\n# so let's take a random sample so we have balanced classes\nnp.random.shuffle(positive_reviews)\npositive_reviews = positive_reviews[:len(negative_reviews)]\n\n# we can also oversample the negative reviews\n# diff = len(positive_reviews) - len(negative_reviews)\n# idxs = np.random.choice(len(negative_reviews), size=diff)\n# extra = [negative_reviews[i] for i in idxs]\n# print(extra)\n# negative_reviews += extra\n\ndef myTokenizer(s):\n s = s.lower() # downcase\n tokens = nltk.tokenize.word_tokenize(s) # split string into words (tokens)\n tokens = [t for t in tokens if len(t) > 2] # remove short words, they're probably not useful\n tokens = [wordnet_lemmatizer.lemmatize(t) for t in tokens] # put words into base form\n tokens = [t for t in tokens if t not in stopwords] # remove stopwords\n return tokens\n\n\n# create a word-to-index map so that we can create our word-frequency vectors later\n# let's also save the tokenized versions so we don't have to tokenize again later\nword_index_map = {}\ncurrent_index = 0\npositive_tokenized = []\nnegative_tokenized = []\n\nfor review in positive_reviews:\n tokens = myTokenizer(review.text)\n positive_tokenized.append(tokens)\n for token in tokens:\n if token not in word_index_map:\n word_index_map[token] = current_index\n current_index += 1\n\nfor review in negative_reviews:\n tokens = myTokenizer(review.text)\n negative_tokenized.append(tokens)\n for token in tokens:\n if token not in word_index_map:\n word_index_map[token] = current_index\n current_index += 1\n\n\n# now let's create our input matrices\ndef tokensToVector(tokens, label):\n x = np.zeros(len(word_index_map) + 1) # last element is for the label\n for t in tokens:\n i = word_index_map[t]\n x[i] += 1\n x = x / x.sum() # normalize it before setting label\n x[-1] = label\n return x\n\nN = len(positive_tokenized) + len(negative_tokenized)\n# (N x D+1 matrix - keeping them together for now so we can shuffle more easily later\ndata = np.zeros((N, len(word_index_map) + 1))\ni = 0\nfor tokens in positive_tokenized:\n xy = tokensToVector(tokens, 1)\n data[i,:] = xy\n i += 1\n\nfor tokens in negative_tokenized:\n xy = tokensToVector(tokens, 0)\n data[i,:] = xy\n i += 1\n\n# shuffle the data and create train/test splits\n# try it multiple times!\nnp.random.shuffle(data)\n\nX = data[:,:-1]\nY = data[:,-1]\n\n# last 100 rows will be test\nXtrain = X[:-100,]\nYtrain = Y[:-100,]\nXtest = X[-100:,]\nYtest = Y[-100:,]\n\nmodel = LogisticRegression()\nmodel.fit(Xtrain, Ytrain)\nprint(\"Classification rate:\", model.score(Xtest, Ytest))\n\n# let's look at the weights for each word\n# try it with different threshold values!\nthreshold = 0.5\nfor word, index in iteritems(word_index_map):\n weight = model.coef_[0][index]\n if weight > threshold or weight < -threshold:\n print(word, weight)","sub_path":"sentiment_analyzer/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23669403","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 26 09:55:52 2011\n\n@author: -\n\"\"\"\n\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task\nfrom pandac.PandaModules import *\nfrom direct.task import Task\nfrom direct.gui.OnscreenText import OnscreenText\nfrom direct.interval.IntervalGlobal import *\nfrom panda3d.core import Point3\nfrom direct.showbase import DirectObject\n\nclass Panda(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n self.mover = NodePath(\"mover\")\n self.mover.reparentTo(render)\n self.xaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.xaxis.reparentTo(render)\n self.xaxis.setColorScale(1, 0, 0, 1)\n self.yaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.yaxis.reparentTo(render)\n self.yaxis.setH(90)\n self.yaxis.setColorScale(0, 1, 0, 1)\n self.zaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.zaxis.reparentTo(render)\n self.zaxis.setR(90)\n self.zaxis.setColorScale(0, 0, 1, 1)\n \n plight = PointLight('my plight')\n plnp = render.attachNewNode(plight)\n plnp.setPos(0, -5, 5)\n render.setLight(plnp)\n \n alight = AmbientLight('my alight')\n alnp = render.attachNewNode(alight)\n alight.setColor(VBase4(0.4, 0.4, 0.4, 1))\n render.setLight(alnp)\n self.setBackgroundColor(0.69,0.77,0.88)\n \n self.disableMouse()\n self.useDrive()\n base.drive.node().setPos(-10, -10, 2)\n base.drive.node().setHpr(310, -10, 0)\n \n self.xaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.xaxis.reparentTo(self.mover)\n self.xaxis.setColorScale(1, 0, 0, 1)\n self.yaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.yaxis.reparentTo(self.mover)\n self.yaxis.setH(90)\n self.yaxis.setColorScale(0, 1, 0, 1)\n self.zaxis = self.loader.loadModel(\"models/longslug.egg\")\n self.zaxis.reparentTo(self.mover)\n self.zaxis.setR(90)\n self.zaxis.setColorScale(0, 0, 1, 1)\n self.mover.setScale(.5, .5, .5)\n \n pos2 = self.mover.posInterval(5, (1, -1, 1))\n hpr2 = self.mover.hprInterval(5, (45, 30, 60))\n myParallel = Parallel(pos2, hpr2, name=\"par\")\n myParallel.start()\n \n \nscene = Panda()\nscene.run()\n","sub_path":"axes.py","file_name":"axes.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64239185","text":"\"\"\"\nUse this module to define routes that are only useful in development environment\nfor manual testing\n\"\"\"\nfrom flask import redirect, url_for, render_template, request\nfrom flask import redirect, url_for, render_template, session, request\nfrom flask_socketio import emit, join_room, rooms\nfrom flask_security import logout_user, login_user\n\nfrom controllers.health import divide_numbers\nfrom external_apis import sendgrid_api\nfrom store import database\nfrom webapp.dev_test import dev_test\nfrom webapp import socketio\nfrom lib import get_utcnow\nfrom webapp import socketio\nfrom webapp.dev_test import dev_test\nfrom webapp.helpers import get_real_or_anonymous_user, catch_errors, get_required_parameter\n\n\n@dev_test.route(\"/test-shop\")\ndef test_shop():\n return render_template(\"test-shop.html\")\n\n\n@dev_test.route(\"/test-async-success\")\ndef test_async_success():\n task = divide_numbers.delay(5, 5)\n return \"Okay, received %s \" % task.id\n\n\n@dev_test.route(\"/test-async-error\")\ndef test_async_error():\n task = divide_numbers.delay(5, 0)\n return \"Okay, received %s \" % task.id\n\n\n@dev_test.route(\"/test-error\")\ndef test_error():\n return \"Black hole: %s\" % (0 / 0)\n\n\n@dev_test.route(\"/test_review_send\")\ndef test_send_js():\n # need to reset the db after each try because the review request is completed\n return redirect(url_for(\"reviews.review_request_route\", review_request_id=1) + \"?review_request_token=token\")\n\n\n@dev_test.route(\"/list-celery-tasks\")\ndef list_celery_tasks():\n from celery import current_app\n tasks = [task for task in current_app.tasks]\n return str(tasks)\n\n\n@socketio.on('SYN')\ndef socket_connect(js):\n user = get_real_or_anonymous_user()\n user_id = user.id\n join_room('user-%s' % user.id)\n rooms_list = rooms()\n payload = {'user_id': user_id, 'datetime': str(get_utcnow()), 'rooms_list': rooms_list}\n print('< SYN: ' + str(js))\n print('> SYN/ACK: ' + str(payload))\n emit('SYN/ACK', payload)\n\n\n@dev_test.route('/socketio')\ndef socket_test():\n return render_template(\"socketio.html\")\n\n\n@dev_test.route('/test-dashboard_chat-embed')\ndef test_chat_embed():\n return render_template(\"dummy_shop.html\")\n\n\n@dev_test.route('/test-email')\ndef send_test_email():\n from controllers.mailing import schedule_email\n\n content = render_template(\"emails/review_us.html\", user_name=\"Tomasz\", subject=\"Review us please\")\n schedule_email(email_subject=\"Yo Brother!\", from_email_address=\"test@opinew.com\",\n destination_email=\"qba19922@gmail.com\",\n email_content=content, send_at=None)\n\n return \"Hey! Email sent :D\"\n\n\n\n@dev_test.route('/get-real-or-anonymous-user-id')\ndef get_real_or_anonymous_user_id():\n user = get_real_or_anonymous_user()\n return str(user.id)\n\n\n@dev_test.route('/logout-or-renew-anonymous-user-id')\ndef logout_or_renew_anonymous_user_id():\n logout_user()\n session['anonymous_user_id'] = None\n user = get_real_or_anonymous_user()\n return str(user.id)\n\n\n@dev_test.route('/login-user-by-id')\ndef login_user_by_id():\n user_id = request.args.get('user_id')\n user = database.User.get_one_by(id=user_id)\n login_user(user)\n return str(user.id)\n\n@dev_test.route('/generate-shop-nonce')\ndef generate_shop_nonce():\n from controllers.shopify import generate_nonce\n return generate_nonce(\"IWannaBreakYourTest\")\n\n\n@dev_test.route('/check-shopify-access-token-generation')\ndef check_shopify_access_token_gen():\n from external_apis.shopify import ShopifyAPI\n shop_domain = \"opinew.myshopify.com\"\n\n api = ShopifyAPI(shop_domain=shop_domain)\n token = api.get_access_token(\"2652696f8cd7357ce591f2b6217cd918\", \"03674e6fec01d1aa7eecf7731eed98bb\",\n \"42c86d15f1f81dad4fe3b7992dec68cb\")\n return token\n\n\n@dev_test.route('/test-requirement-exception')\n@catch_errors()\ndef test_referrer_url():\n get_required_parameter(request.args, 'not-existing-param')\n return 'FAIL'\n\n@dev_test.route('/test-db-event')\ndef test_db_event():\n subscription = database.Subscription.get_one_by(id=1)\n subscription.quota_left -= 1\n database.add(subscription)\n database.push()\n return \"\"\n\n@dev_test.route('/test-db-add')\ndef test_db_add():\n subscription = database.Subscription.create(quota_left=300)\n database.add(subscription)\n database.add(subscription)\n database.add(subscription)\n database.push()\n return \"\"\n","sub_path":"webapp/dev_test/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"593799554","text":"from openerp import tools\nfrom openerp.osv import osv, fields\nfrom openerp.tools.translate import _\nimport time\nimport openerp.addons.decimal_precision as dp\n\nclass kite_inventory(osv.osv):\n\n\t_name = \"kite.inventory\"\n\t_description = \"Kite Inventory\"\n\t\n\t_columns = {\n\t\t'supplier': fields.char('Supplier',size=128,required=True,readonly=False),\n\t\t'creation_date':fields.date('Creation Date',readonly=True),\n\t\t'state': fields.selection([('draft', 'Draft'),('cancel', 'Cancelled'),('confirm', 'Confirmed')],'Status',readonly=False),\n\t\t'active':fields.boolean('Active'),\n\t\t'grn_no': fields.char('GRN NO',size=128,required=True,readonly=False),\n\t\t'line_id':fields.one2many('kite.inventory.line','product_id','module line'),\n\t\t\n\t\t\n\t}\n\t_defaults = {\n\t\t'creation_date': fields.date.context_today,\n\t\t'state':'draft',\n\t\t'active':True,\n\t\t\n\t }\n\tdef create(self, cr, uid, vals,context=None):\t\t\n\t\tif vals.get('name','/')=='/':\n\t\t\tvals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'kite.inventory') or '/'\n\t\torder = super(kg_module, self).create(cr, uid, vals, context=context)\n\t\treturn order\n\tdef confirm_module(self,cr,uid,ids,context=None):\n\t\tmodule_plan_obj = self.pool.get('kite.inventory')\n\t\trec = self.browse(cr,uid,ids[0])\n\t\trec.write({'state':'confirm'})\n\t\nmodule_inventory()\nclass module_inventory_line(osv.osv):\n\t_name = \"kite.inventory.line\"\n\t_description = \"Kite Inventory line\"\n\t\n\t_columns = {\n\t\t\n\t\t'item_name':fields.char('Item name',size=128,required=True),\n\t\t'unit_price':fields.float('Unit Price'),\n\t\t'required_qty':fields.float('Required Qty'),\n\t\t'total_price':fields.float('Total price'),\n\t\t'product_id':fields.many2one('kite.inventory','Product line'),\n\t}\nmodule_inventory_line()\n","sub_path":"main project module/module_inventory/kite_inventory.py","file_name":"kite_inventory.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421039350","text":"import itertools, os, sys, time\nsys.path.append('../')\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nuse_gpu = torch.cuda.is_available()\n\nfrom .valid import validate, validate_losses\nfrom utils.utils import AverageMeter\n\ndef train(train_iter, val_iter, model, criterion, optimizer, scheduler, SRC, TRG, num_epochs, logger=None): \n\n # Iterate through epochs\n bleu_best = -1\n for epoch in range(num_epochs):\n \n # Validate model with BLEU\n start_time = time.time() # timer \n bleu_val = validate(val_iter, model, criterion, SRC, TRG, logger)\n if bleu_val > bleu_best:\n bleu_best = bleu_val\n logger.save_model(model.state_dict())\n logger.log('New best: {:.3f}'.format(bleu_best))\n val_time = time.time()\n logger.log('Validation time: {:.3f}'.format(val_time - start_time))\n\n # Validate model with teacher forcing (for PPL)\n val_loss = 0 #validate_losses(val_iter, model, criterion, logger) \n logger.log('PPL: {:.3f}'.format(torch.FloatTensor([val_loss]).exp()[0])) \n\n # Step learning rate scheduler\n scheduler.step(bleu_val) # input bleu score\n\n # Train model\n model.train()\n losses = AverageMeter()\n for i, batch in enumerate(train_iter): \n # Use GPU\n src = batch.src.cuda() if use_gpu else batch.src\n trg = batch.trg.cuda() if use_gpu else batch.trg\n \n # Forward, backprop, optimizer\n model.zero_grad()\n scores = model(src, trg)\n\n # Debug -- print sentences\n debug_print_sentences = False\n if i is 0 and debug_print_sentences:\n for k in range(src.size(1)):\n src_bs1 = src.select(1,k).unsqueeze(1) # bs1 means batch size 1\n trg_bs1 = trg.select(1,k).unsqueeze(1) \n model.eval() # predict mode\n predictions = model.predict(src_bs1, beam_size=1)\n predictions_beam = model.predict(src_bs1, beam_size=2)\n model.train() # test mode\n probs, maxwords = torch.max(scores.data.select(1,k), dim=1) # training mode\n logger.log('Source: ', ' '.join(SRC.vocab.itos[x] for x in src_bs1.squeeze().data))\n logger.log('Target: ', ' '.join(TRG.vocab.itos[x] for x in trg_bs1.squeeze().data))\n logger.log('Training Pred: ', ' '.join(TRG.vocab.itos[x] for x in maxwords))\n logger.log('Validation Greedy Pred: ', ' '.join(TRG.vocab.itos[x] for x in predictions))\n logger.log('Validation Beam Pred: ', ' '.join(TRG.vocab.itos[x] for x in predictions_beam)) \n logger.log()\n return # end after debugging\n\n # Remove from trg and from scores\n scores = scores[:-1]\n trg = trg[1:] \n\n # Reshape for loss function\n scores = scores.view(scores.size(0) * scores.size(1), scores.size(2))\n trg = trg.view(scores.size(0))\n\n # Pass through loss function\n loss = criterion(scores, trg) \n loss.backward()\n losses.update(loss.data[0])\n\n # Clip gradient norms and step optimizer\n torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)\n optimizer.step()\n\n # Log within epoch\n if i % 1000 == 10:\n logger.log('''Epoch [{e}/{num_e}]\\t Batch [{b}/{num_b}]\\t Loss: {l:.3f}'''.format(e=epoch+1, num_e=num_epochs, b=i, num_b=len(train_iter), l=losses.avg))\n\n # Log after each epoch\n logger.log('''Epoch [{e}/{num_e}] complete. Loss: {l:.3f}'''.format(e=epoch+1, num_e=num_epochs, l=losses.avg))\n logger.log('Training time: {:.3f}'.format(time.time() - val_time))\n \n","sub_path":"training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"464852614","text":"import turtle\n\ngame_over = False\nscreen = turtle.Screen()\n\nclass Bike:\n def __init__(self, size, color, pos):\n self.size = size\n self.color = color\n self.pos = pos\n self.lives = 3\n self.t = turtle.Turtle()\n\n def up(self):\n if not game_over:\n self.t.setheading(90)\n\n def down(self):\n if not game_over:\n self.t.setheading(270)\n\n def left(self):\n if not game_over:\n self.t.setheading(180)\n\n def right(self):\n if not game_over:\n self.t.setheading(0)\n\nclass Trail:\n def __init__(self, color):\n self.t = []\n self.color = color\n\nplayer = Bike(20, \"red\", [-180,0])\nscreen.onkeypress(player.up(), \"Up\")\nscreen.onkeypress(player.down(), \"Down\")\nscreen.onkeypress(player.left(), \"Left\")\nscreen.onkeypress(player.down(), \"Right\")\n\nwhile not game_over:\n player.t.forward(10)\n\n\n \n \n \n","sub_path":"Python/Projects Involving Classes/tron.py","file_name":"tron.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631295607","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nfrom re import finditer\nfrom shutil import copy\nfrom subprocess import run, PIPE\n\nd_corr = False\nzag = '''wait operator 0 * * * * *\n\nwait operator 0 \n\n'''\nwhile not d_corr:\n d = input('Введите дату необходимого отработанного плейлиста в формате ДД.ММ.ГГГГ:')\n dd = [num for num in d.split('.') if num.isdigit()]\n if len(dd)==3:\n if len(dd[2])==4 and len(dd[1])==2 and len(dd[0])==2:\n try:\n d1 = datetime(int(dd[2]), int(dd[1]), int(dd[0])) + timedelta(days=1)\n d1 = \"{:%d:%m:%Y}\".format(d1)\n d_corr = True\n except ValueError:\n d_corr = False\n\nairlog = 'air1_'+d1[6:]+d1[3:5]+d1[0:2]+'.log'\n\ntry:\n run(['net', 'use', '\\\\\\\\192.168.0.92', '/user:onair0', '3A9b'], stdout=PIPE,\n stderr=PIPE, shell=True)\n copy('\\\\\\\\192.168.0.92\\D$\\ForwardData\\\\'+airlog, airlog)\n with open(airlog) as log1, open('otrabot_za_'+dd[0]+'_'+dd[1]+'_'+dd[2]+'_92.air', 'w') as air:\n regex = (r'\\d{2}:\\d{2}:\\d{2}\\.\\d{2} (?:Script take:|Script skip:).+ \\[ (.+) \\]')\n air.write(zag)\n air.write(''.join([match.group(1)+'\\n' for match in finditer(regex, log1.read())]))\n\nexcept FileNotFoundError:\n print('Необходим файл: '+airlog)\n input('Нажмите ENTER и прощайте.')\n","sub_path":"92/Click_me_NEW.py","file_name":"Click_me_NEW.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441178883","text":"\"\"\"Problem: https://www.hackerrank.com/challenges/merge-the-tools/problem\"\"\"\n\ndef merge_the_tools(string, k): \n # Check each k-length segment in string\n for i in range(0, len(string), k):\n keep = set(string[i:i+k])\n for c in string[i:i+k]:\n # If letter is in set, remove and print it\n if c in keep:\n keep.remove(c)\n print(c, end='')\n print()\n return \n ","sub_path":"python/strings/merge_the_tools.py","file_name":"merge_the_tools.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81239917","text":"import sys\nimport argparse\nimport cv2\nfrom PIL import Image\nimport numpy as np\n\n# options\nVIDEONAME = './test.mp4'\nRESULTNAME = './result1_test3.jpg'\nBLURTRESH = -0.1\nFRAMESKIP = 15\n\ndef getFrames():\n frames = []\n cur_img = []\n counter = 0\n lastFrame = 0\n vidcap = cv2.VideoCapture(VIDEONAME)\n success,image = vidcap.read()\n old_image = None\n while success:\n success, image = vidcap.read()\n if(success):\n old_image = image\n w,h = tuple(image.shape[1::-1])\n left = image[0:0+h, 0:0+int(w / 100 * 5)]\n right = image[0:0+h, w-int(w / 100 * 5):0+w]\n top = image[h - int(h / 100 * 5):0+h, 0:0+w]\n bottom = image[0:0+int(h / 100 * 5), 0:0+w]\n\n if (counter == 0):\n cur_img.append(left)\n cur_img.append(right)\n cur_img.append(top)\n cur_img.append(bottom)\n frames.append(image)\n elif(lastFrame + 10 <= counter):\n resL = cv2.absdiff(left, cur_img[1]).astype(np.uint8)\n resR = cv2.absdiff(right, cur_img[0]).astype(np.uint8)\n resT = cv2.absdiff(top, cur_img[3]).astype(np.uint8)\n resB = cv2.absdiff(bottom, cur_img[2]).astype(np.uint8)\n\n percentageL = (np.count_nonzero(resL) * 100)/ resL.size\n percentageR = (np.count_nonzero(resR) * 100)/ resR.size\n percentageT = (np.count_nonzero(resT) * 100)/ resT.size\n percentageB = (np.count_nonzero(resB) * 100)/ resB.size\n print(percentageL, percentageR, percentageT, percentageB)\n if(percentageL >= 99.25 or percentageR >= 99.25 or percentageT >= 99.25 or percentageB >= 99.25):\n cur_img[0] = left\n cur_img[1] = right\n cur_img[2] = top\n cur_img[3] = bottom\n frames.append(image)\n print(\"found \" + str(len(frames)))\n lastFrame = counter\n \n counter = counter + 1\n\n return frames\n\nprint(\"[INFO] Getting frames...\")\nimages = []\n\nfor frame in getFrames():\n img = Image.fromarray(frame, 'RGB')\n images.append(frame)\n\nprint(\"[INFO] Stitching images... (images:\" + str(len(images)) + \")\")\nstitcher = cv2.Stitcher_create(1)\n\n(status, stitched) = stitcher.stitch(images)\n\nif status != 0:\n print(\"[ERR] Can't stitch images, error code = %d\" % status)\n sys.exit(-1)\n\nprint(\"[INFO] Writing new image...\")\ncv2.imwrite(RESULTNAME, stitched)\nprint(\"[INFO] Done\")","sub_path":"documentation/demo_tools/video_stitching_tool/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200786653","text":"##### 2.4 神经网络的“引擎”:基于梯度的优化\n\n# ⚠️这里没有可运行的代码。\n\n#### 2.4.2 张量运算的导数:梯度\n#### 2.4.3 随机梯度下降\n#### 2.4.4 链式求导:反向传播算法\n\n\"\"\"\n- 梯度:多元函数的导数。其中,多元函数是以张量为自变量的函数。\n\"\"\"\n# 原来就是这样。。。函数与导数的推广。。。\n\n# 而「损失」与「误差」同义(见Tariq的《Python神经网络编程》)\nloss_value = f(W)\n\n\"\"\"\n神经网络学习的目的就是找到一个使损失最小的权重值。这就转化成了微积分问题。\n\n求最值?那么需要确定函数的单调性。于是需要求导函数,判断导函数的大小(在某区间上\n大于/小于/等于0)。也需要找到导函数的零点。更多内容略。\n\n以上是数学的范围(解析法)。神经网络稍微大一点,函数表达式就会变得极其复杂而\n难以通过直接计算得出,因此需要转化为统计问题,并交给计算机暴力解决。\n\n到了工程上该怎么实现呢?不断给W加一个个△W(也就是「梯度」,函数在某个点上的曲率(一个张量))。\n怎样定△Wi呢?通过f'(Wi)的符号和大小。比如「曲率」(“函数导数/斜率”的推广)为正,\n我就让deltaW为负。比如曲率(|f'(Wi)|)越大,我就让|deltaWi|越小。即「方向」和\n「步长」/「学习率」。\n\n这是一种暴力算法。\n\n这从标量与导数推广到张量与梯度的范围,成了所谓的「梯度下降」。“山谷”就是函数的极值。\n多元函数通常有多个极值点,也就是多个“山谷”。随机起点的话,用上述暴力算法随机地运动,\n难免会掉落至不是最低的山谷里,也就是找到一个非最值的极值。因此就要涉及到更多问题。\n(不是此次的话题)\n\"\"\"\n\n\"\"\"\n- 随机梯度下降 (stochastic gradient descent)\n - 小批量SGD:每次迭代时只抽取一个样本及相应目标\n - 批量SGD:另一个极端,每次迭代使用所有数据\n - 真SGD:非小批量的SGD\n\"\"\"\n\n\"\"\"\nSGD还有多种变体,如带「动量」的SGD、Adagrad、RMSProp等。\n\n- 动量:来自物理学的隐喻。如果一个小球冲向谷底有足够大的动量,球就会冲出这个山谷到别的山谷。\n否则可能停留在不是最低的山谷里。这也就引入了“加速度”的概念。给实践的指导是,更新参数不仅要\n考虑当前梯度值,还要考虑之前的更新情况。\n\"\"\"\n\n\"\"\"\n- 反向传播/反式微分\n\n因此需要支持「符号微分」框架如TensorFlow来实现神经网络。\n\"\"\"","sub_path":"dlfr/ex020400.py","file_name":"ex020400.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46200935","text":"import random\n\ndef drawNumbers_fromList(list_ofNumbers,n):\n drawedNumbers=[]\n for x in range(n):\n randomNumber=random.choice(list_ofNumbers) #Chooses a random element(number) from the list\n drawedNumbers.append(randomNumber) #Appends the random number into a new list\n list_ofNumbers.remove(randomNumber) #removes the \"Drawed\" number from the pool numbers to draw from\n drawedNumbers.sort() #Sorts it for easier comparing\n return drawedNumbers\n\ndef compLists(list_1,list_2):\n match=0\n for i in range(len(list_1)): #Reads the list\n if list_2[i] in list_1: #If the number at spot i in the list2 is in list1,add 1 match\n match+=1\n return match \n\ndef Winnings(correctNumbers,addNumbers): #Checks conditions if they have won\n if correctNumbers==7:\n return 2749455\n if correctNumbers==6 and addNumbers>=0:\n return 102110\n if correctNumbers==6:\n return 3385\n if correctNumbers==5:\n return 95\n if correctNumbers==4 and addNumbers>=0:\n return 45\n return 0\n\ndef main(): #Returns amount they have won\n numbers=list(range(1,35))\n guessedList=[3, 6, 13, 17, 23, 25, 31]\n drawed=[3,6,13,18,23,25,31] #TestList if you win\n #drawed=drawNumbers_fromList(numbers,7)\n addNumbers=drawNumbers_fromList(numbers,3)\n print(guessedList,drawed,addNumbers)\n return Winnings(compLists(drawed,guessedList),compLists(addNumbers,guessedList))\n\nprint(\"Heisann! Hver forsøk koster 5 kr\")\n\nif main()>0:\n print(\"Du har vunnet {0} kr\".format(main()-5))\nelse:\n print(\"Beklage, du har tapt -5 kr. Lykke til neste gang\")\n\n \n\n\n","sub_path":"TDT4110 Python 2018H/Assignment 6/Exercise 10 Lotto/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197484833","text":"# This Python file uses the following encoding: utf-8\nimport os, sys\n\nimport json\nimport sys\nimport numpy as np\nimport collections\nimport itertools\nfrom collections import Counter\nimport re\nimport pandas as pd\nimport gensim \nfrom gensim.models import Word2Vec, Doc2Vec\nfrom gensim.models.keyedvectors import KeyedVectors\nimport csv\nfrom glove import Glove\nfrom glove import Corpus\nfrom keras.preprocessing.sequence import pad_sequences\nfrom importlib import reload\n\nimport requests\nimport csv\nimport xlrd\nimport codecs\n\nimport xlsxwriter\nfrom bs4 import BeautifulSoup\n\"\"\"\n\n\n\"\"\"\n\n \ndef load_data_labels_all_aspects():\n filename = 'data_cur.txt'\n with open(filename, 'r') as myfile:\n data=myfile.read().replace('\\n', '')\n data_json = json.loads(data)\n number_of_data = len(data_json)\n number_of_label = 7\n one_label = [0]*number_of_label\n labels = []\n x_train = []\n # for each data select data and label\n # general_satisfy = 1, apperance = 2, price = 3, battery = 4, performance =5, screen =6, camera =7\n for i in range(number_of_data):\n for j in range(7):\n if(data_json[i]['sentiment'][j]['value'] != 'disabled'):\n one_label[j] = 1\n row_arr = re.split(r'[ \\.]+', data_json[i]['review'])\n row_arr = [clean_vob(x) for x in row_arr]\n x_train.append(row_arr)\n labels.append(one_label)\n one_label = [0]*number_of_label\n\n return [x_train, np.asarray(labels)]\n\ndef load_data_labels_single_aspects():\n filename = 'data_cur.txt'\n filename1 = 'tokenize_data_products.txt'\n with open(filename, 'r') as myfile:\n data=myfile.read().replace('\\n', '')\n data_json = json.loads(data)\n\n with open(filename1) as f:\n content = f.readlines()\n content = [x.strip() for x in content] \n number_of_data = len(data_json)\n\n number_of_label = 2\n one_label = [0]*number_of_label\n labels = []\n x_train = []\n a = []\n # general_satisfy = 1, apperance = 2, price = 3, battery = 4, performance =5, screen =6, camera =7\n for i in range(number_of_data):\n # irrelevance\n if(data_json[i]['sentiment'][1]['value'] != 'disabled'):\n one_label[1] = 1\n a.append(1)\n else:\n #relevance\n one_label[0] = 1\n a.append(0)\n row_arr = re.split(r'[ \\.]+', content[i])\n row_arr = [clean_vob(x) for x in row_arr]\n x_train.append(row_arr)\n labels.append(one_label)\n one_label = [0]*number_of_label\n print(\"Counter\")\n print(Counter(a))\n return [x_train, np.asarray(labels)] \n\ndef pad_sentences(sentences, padding_word=\"\"):\n \"\"\"\n Pads all sentences to the same length. The length is defined by the longest sentence.\n Returns padded sentences.\n \"\"\"\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return (padded_sentences,sequence_length)\n\ndef build_vocab(sentences):\n \"\"\"\n Builds a vocabulary mapping from word to index based on the sentences.\n Returns vocabulary mapping and inverse vocabulary mapping.\n \"\"\"\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n\n # word_model = gensim.models.Word2Vec(padded_x_train, size=200, min_count = 1, window = 5)\n\n\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n\n return [vocabulary, vocabulary_inv]\n\n# def clean_vob(word):\n# word = word.replace(\".\",\"\")\n# word = word.replace(\",\",\"\")\n# word = word.replace(\"\\\"\",\"\")\n# return word\n\ndef clean_vob(string):\n \"\"\"\n Tokenization/string cleaning for datasets.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n stoplink = '/home/dtd/Documents/working_space/Product_Reviews/Data/vietnamese-stopwords-dash.txt'\n with open(stoplink) as f:\n content = f.readlines()\n prohibitedWords = [x.strip() for x in content] \n big_regex = re.compile('|'.join(map(re.escape, prohibitedWords)))\n the_message = big_regex.sub(\"\", str(string))\n return the_message\n # print(content)\n # string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n # string = re.sub(r\"\\'s\", \" \\'s\", string)\n # string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n # string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n # string = re.sub(r\"\\'re\", \" \\'re\", string)\n # string = re.sub(r\"\\'d\", \" \\'d\", string)\n # string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n # string = re.sub(r\",\", \" , \", string)\n # string = re.sub(r\"!\", \" ! \", string)\n # string = re.sub(r\"\\(\", \" \\( \", string)\n # string = re.sub(r\"\\)\", \" \\) \", string)\n # string = re.sub(r\"\\?\", \" \\? \", string)\n # string = re.sub(r\"\\s{2,}\", \" \", string)\n # exclusions = '|'.join(content)\n # string = re.sub(exclusions, '', string)\n\n # print(content[0])\n\n # for i in content:\n # string = string.replace(str(i), \"\", string)\n return string.strip().lower()\n\n\n# doc = 1 sentence\ndef document_vector(word2vec_model, doc):\n doc = [word for word in doc if word in word2vec_model.vocab]\n return np.mean(word2vec_model[doc], axis=0)\n\n \n\ndef build_vector_data(sentences, labels, model):\n n = len(sentences)\n distance = [[0 for k in range(n)] for j in range(n)]\n x_text = []\n sentences = sentences[0]\n for i in sentences:\n x_text.append(model[i])\n x_text = np.array(x_text)\n\n # print(distance)\n return x_text \n\ndef build_input_data(sentences, labels, vocabulary):\n \"\"\"\n Maps sentences and labels to vectors based on a vocabulary.\n \"\"\"\n x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])\n\n y = np.array(labels)\n return [x, y]\n\ndef return_embedding():\n model_wv = KeyedVectors.load_word2vec_format('W2VModelVN.bin', binary=True)\n return model_wv\n\n\ndef load_data():\n sentences, labels = load_data_labels_single_aspects()\n \n # sentences, labels = load_data_labels_all_aspects()\n padded_x_train, MAX_SEQUENCE_LENGTH = pad_sentences(sentences)\n #using gensim for word2vec.bin\n # model_wv = KeyedVectors.load_word2vec_format('W2VModelVN.bin', binary=True)\n # word_vectors.wv.train(padded_x_train)\n # model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True)\n model_wv = Word2Vec.load(\"Product_TGDD\") # word_vectors.train(padded_x_train)\n # word_vectors = Word2Vec.load_word2vec_format('W2VModelVN.bin', binary=True)\n\n #Using gensim for our data\n \n #model_wv = gensim.models.Word2Vec(padded_x_train, size=2*MAX_SEQUENCE_LENGTH, min_count = 1, window = 5)\n #mode_glove = Glove(no_components=30, learning_rate=0.05,alpha=0.75,max_count=100)\n #mode_glove.fit(padded_matrix,epochs=5,no_threads=2,verbose=False)\n weights = model_wv.wv.syn0\n voca = dict([(k, v.index) for k, v in model_wv.wv.vocab.items()])\n # print(model_wv.wv.similar_by_word(\"Cảm_ứng\"))\n #model_wv.save(\"Product\")\n\n # sentences_padded, maxxlen = pad_sentences(sentences)\n # vocabulary_inv = []\n vocabulary, vocabulary_inv = build_vocab(padded_x_train)\n x, y = build_input_data(padded_x_train, labels, voca)\n with open(\"vocabulary_list.txt\", \"w\") as text_file:\n text_file.write(\"Vocab : {0}\".format(len(vocabulary)))\n text_file.write(\"\\n\")\n text_file.write(\"sequence_length : {0}\".format(vocabulary))\n text_file.write(\"\\n\")\n with open(\"vocab.txt\", \"w\") as text_file:\n text_file.write(\"Vocab : {0}\".format(len(voca)))\n text_file.write(\"\\n\")\n text_file.write(\"sequence_length : {0}\".format(voca))\n text_file.write(\"\\n\")\n return [x, y, voca, weights]\n\ndef preprocess_single_sentence(sentence):\n x_text = []\n # x_text.append(clean_vob(sentence).split(\" \"))\n sentence = re.split(r'[ \\.]+', sentence)\n sentence = [clean_vob(x) for x in sentence]\n x_text.append(sentence)\n return x_text\n\ndef pad_single_sentence(sentence, size, padding_word=\"\"):\n \"\"\"\n Pads all sentences to the same length. The length is defined by the longest sentence.\n Returns padded sentences.\n \"\"\"\n sequence_length = size\n padded_sentences = []\n # for i in range(len(sentences)):\n sentence = sentence[0]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences\n\ndef replace_process(sentence, vocab):\n x = preprocess_single_sentence(sentence)\n for i in x:\n if(i not in vocab):\n x.remove(i)\n return x \n# m[0][i] => text; m[1][i] => label\n\n\ndef makeFeatureVec(words, model, num_features):\n # Function to average all of the word vectors in a given\n # paragraph\n #\n # Pre-initialize an empty numpy array (for speed)\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n #\n nwords = 0.\n # \n # Index2word is a list that contains the names of the words in \n # the model's vocabulary. Convert it to a set, for speed \n # index2word_set = model.vocab.items()\n # set(model.index2word)\n index2word_set = ( v.index for k, v in model.vocab.items())\n\n #\n # Loop over each word in the review and, if it is in the model's\n # vocaublary, add its feature vector to the total\n for word in words:\n if word in index2word_set: \n nwords = nwords + 1.\n featureVec = np.add(featureVec,model[word])\n # \n # Divide the result by the number of words to get the average\n featureVec = np.divide(featureVec,nwords)\n return featureVec\n\n\ndef getAvgFeatureVecs(reviews, model, num_features):\n # Given a set of reviews (each one a list of words), calculate \n # the average feature vector for each one and return a 2D numpy array \n # \n # Initialize a counter\n index = 0\n # \n # Preallocate a 2D numpy array, for speed\n reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype=\"float32\")\n # \n # Loop through the reviews\n for i in range(len(reviews)):\n #\n # Print a status message every 1000th review\n # if counter%1000. == 0.:\n # print (\"Review %d of %d\" % (counter, len(reviews)))\n # \n # Call the function (defined above) that makes average feature vectors\n reviewFeatureVecs[i] = makeFeatureVec(reviews[i], model,num_features)\n #\n # Increment the counter\n return reviewFeatureVecs\n\n# load data and save to Product_Reviews_Word2vec\ndef build_word2vec_model():\n path = '/home/dtd/Documents/working_space/Product_Reviews/Data/result_token.xlsx'\n workbook_read = xlrd.open_workbook(path)\n worksheet_read = workbook_read.sheet_by_index(0)\n text = []\n number_of_data = worksheet_read.nrows\n sentences, labels = load_data_labels_single_aspects()\n for i in range(number_of_data):\n content = worksheet_read.cell(i,2).value\n row_arr = re.split(r'[ \\.]+', str(content).lower())\n row_arr = [clean_vob(x) for x in row_arr]\n text.append(row_arr)\n # data_train = text + sentences\n padded_x_train, MAX_SEQUENCE_LENGTH = pad_sentences(sentences)\n data_train = text +padded_x_train\n model_wv = gensim.models.Word2Vec(data_train, size=200, min_count = 1, window = 5)\n weights = model_wv.wv.syn0\n voca = dict([(k, v.index) for k, v in model_wv.wv.vocab.items()])\n model_wv.save(\"Product_TGDD\")\n with open(\"vocabulary_list.txt\", \"w\") as text_file:\n text_file.write(\"Vocab : {0}\".format(len(voca)))\n text_file.write(\"\\n\")\n text_file.write(\"sequence_length : {0}\".format(voca))\n text_file.write(\"\\n\")\n\ndef load_predict_data():\n path = '/home/dtd/Documents/working_space/Product_Reviews/Data/result_token.xlsx'\n workbook_read = xlrd.open_workbook(path)\n worksheet_read = workbook_read.sheet_by_index(0)\n text = []\n number_of_data = worksheet_read.nrows\n labels = []\n for i in range(number_of_data):\n content = worksheet_read.cell(i,2).value\n row_arr = re.split(r'[ \\.]+', str(content).lower())\n row_arr = [clean_vob(x) for x in row_arr]\n text.append(row_arr)\n labels.append(0) \n return [text, np.asarray(labels)] \n\ndef predict_data():\n x,y = load_predict_data()\n\n padded_x, max_sequence = pad_sentences(x)\n model_wv = Word2Vec.load(\"Product_TGDD\") \n weights = model_wv.wv.syn0\n voca = dict([(k, v.index) for k, v in model_wv.wv.vocab.items()])\n x = np.array([[voca[word] for word in sentence] for sentence in padded_x])\n return x\ndef label_predict_data():\n model = load_model('Screen_Acc_0.9.hdf5')\n x_predict = predict_data()\n\n loop_number = x_predict[0].shape[0] - 99\n\n a = x_predict[9][:-loop_number]\n a = np.reshape(a,(1,99))\n\n m = model.predict(a)\n\n\n\nif __name__ == \"__main__\":\n # model = KeyedVectors.load('W2VModelVN.bin', mmap='r')\n # model.syn0norm = model.syn0 # prevent recalc of normed vectors\n data = 'Sản_phẩm này rất tốt . đến_thì cần_thiết cho 1 chiếc điện_thoại phổ_thông . Lên mạng chia_sẻ tức_thời để_giống '\n m = clean_vob(data)\n print(data)\n print(m)\n # build_word2vec_model()\n # x, y, vocbulary, vocabulary_inv = load_data()\n # x_predict = predict_data()\n # print(x.shape)\n # print(x_predict.shape)\n # print(x_predict[0].shape[0])\n # a = [1,2,3,4,5]\n # loop_number = x_predict[0].shape[0] - 99\n\n # # for i in range(loop_number):\n # a = x_predict[0][:-loop_number]\n # print(a.shape)\n\n\n # a = a[:-1]\n \n # model = load_model('Acc_0.9.hdf5')\n # a = model.predict\n # for i in vocbulary:\n # print(i)\n\n\n\n \n \n\n\n","sub_path":"data_helpers.py","file_name":"data_helpers.py","file_ext":"py","file_size_in_byte":13957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239825332","text":"from flask import render_template, request, redirect, url_for, flash\nfrom flask import session as login_session\n\nfrom RestaurantApp.models import Restaurant\nfrom RestaurantApp.models import Menu\nfrom RestaurantApp.utility import db_session\nfrom RestaurantApp import app\n\n\n@app.route('/restaurant//menu//edit', methods=['GET', 'POST'])\ndef EditMenu(restaurant_id, menu_id):\n if 'username' not in login_session:\n return redirect('/login')\n edited_menu = db_session.query(Menu).filter_by(id=menu_id).one()\n restaurant = db_session.query(Restaurant).filter_by(id=restaurant_id).one()\n if login_session['user_id'] != restaurant.user_id:\n return render_template('parts_alert.html')\n if request.method == 'POST':\n if request.form['name']:\n edited_menu.name = request.form['name']\n if request.form['description']:\n edited_menu.description = request.form['description']\n if request.form['price']:\n edited_menu.price = request.form['price']\n if request.form['course']:\n edited_menu.course = request.form['course']\n db_session.add(edited_menu)\n db_session.commit()\n flash('Menu Item Successfully Edited')\n return redirect(url_for('ShowRestaurant', restaurant_id=restaurant_id))\n else:\n return render_template('form_edit_menu.html', restaurant_id=restaurant_id, menu_id=menu_id, item=edited_menu)\n","sub_path":"RestaurantApp/views/edit_menu.py","file_name":"edit_menu.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524994470","text":"from django.shortcuts import render\nimport joblib\nfrom testapp.form import Customer_Form\nfrom django.contrib import messages\n\n# Create your views here.\n\ndef predict_salary(experience):\n predictor = joblib.load('linear_model.pkl')\n results = predictor.predict([[experience]])\n return results\n\ndef salary_view(request):\n if request.method == 'POST':\n form = Customer_Form(request.POST)\n if form.is_valid():\n exp = form.cleaned_data['experience']\n print(exp)\n salary = predict_salary(exp)\n messages.success(request,\"your salary is : {}\".format(salary))\n\n else:\n form = Customer_Form();\n return render(request,'testapp/employee_form.html', {'form': form})\n\n\n\n","sub_path":"testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418290240","text":"# -*- encoding: utf-8 -*-\r\n\"\"\"\r\n@File : Surprise_NormalPredictor.py\r\n@Time : 2020/11/21 14:44\r\n@Author : biao chen\r\n@Email : 1259319710@qq.com\r\n@Software: PyCharm\r\n\"\"\"\r\n\r\nfrom surprise import Dataset\r\nfrom surprise import Reader\r\nfrom surprise import NormalPredictor\r\nfrom surprise import accuracy\r\nfrom surprise.model_selection import KFold\r\n\r\n# 数据读取\r\nfile_path = 'E:/python/machina/kaggle_practice/week4/data/ratings.csv'\r\nreader = Reader(line_format='user item rating timestamp', sep=',', skip_lines=1)\r\ndata = Dataset.load_from_file(file_path, reader=reader)\r\ntrain_set = data.build_full_trainset()\r\n\r\n'''\r\n SGD参数:\r\n reg:代价函数的正则化项,默认为0.02。\r\n learning_rate:学习率,默认为0.005。\r\n n_epochs:迭代次数,默认为20。\r\n\r\n'''\r\n# NormalPredictor\r\nbsl_options = {'method': 'sgd','n_epochs': 5}\r\nalgo = NormalPredictor()\r\n# 定义K折交叉验证迭代器,K=3\r\nkf = KFold(n_splits=3)\r\nfor trainset, testset in kf.split(data):\r\n algo.fit(trainset)\r\n predictions = algo.test(testset)\r\n accuracy.rmse(predictions, verbose=True)\r\nuid = str(196)\r\niid = str(302)\r\npred = algo.predict(uid, iid, r_ui=4, verbose=True)\r\nprint(pred)\r\n","sub_path":"week4/Surprise_NormalPredictor.py","file_name":"Surprise_NormalPredictor.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181221968","text":"# -*- coding: utf-8 -*-\n#!/usr/local/bin/python\nimport json\nimport telebot\nimport requests\nfrom datetime import datetime\n\nAPI_TOKEN = '142988456:AAGVT1vDwAcEmTCE-J5OUcS6hc-Nd5pHeCo'\n\nbot = telebot.TeleBot(API_TOKEN)\n\n_logger = telebot.logger\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n chat_id = message.chat.id\n bot.send_message(chat_id, u\"\"\"\\\nHi! I am the non-official Telegram bot for the Python Brasil '12\nWrite /help for more help\n\"\"\")\n\n@bot.message_handler(commands=['help'])\ndef send_help(message):\n chat_id = message.chat.id\n bot.send_message(chat_id, u\"\"\"\\\n/start Send welcome message\n/help Show this help message\n/where Show location\n... (TODO)\nPlease contribute: https://github.com/citec/pybrtelegrambot\n\"\"\")\n\n@bot.message_handler(commands=['where'])\ndef send_where(message):\n chat_id = message.chat.id\n bot.send_message(chat_id, u'A #pybr12 vai ter lugar em Florianópolis')\n\nbot.polling()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328576370","text":"from collections import deque\n\nrotorDict = {'I': ['EKMFLGDQVZNTOWYHXUSPAIBRCJ', 'Q'],\n 'II': ['AJDKSIRUXBLHWTMCQGZNPYFVOE', 'E'],\n 'III': ['BDFHJLCPRTXVZNYEIWGAKMUSQO', 'V'],\n 'IV': ['ESOVPZJAYQUIRHXLNFTGKDCMWB', 'J'],\n 'V': ['VZBRGITYUPSDNHLXAWMJQOFECK', 'Z'],\n 'VI': ['JPGVOUMFYQBENHZRDKASXLICTW', 'ZM'],\n 'VII': ['NZJHGRCXMYSWBOUFAIVLPEKQDT', 'ZM'],\n 'VIII': ['FKQHTLXOCBJSPDZRAMEWNIUYGV', 'ZM'],\n 'Beta': ['LEYJVCNIXWPBQMDRTAKZGFUHOS',''],\n 'Gamma': ['FSOKANUERHMBTIYCWLQPZXVGJD', '']}\nreflDict = {'A': 'EJMZALYXVBWFCRQUONTSPIKHGD',\n 'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT',\n 'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL',\n 'Thin B': 'ENKQAUYWJICOPBLMDXZVFTHRGS',\n 'Thin C': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ'}\n\n\ndef init_enigma(name_reflektor, name_walzen, name_ringpos, paare_steckerbr, name_walzenpos):\n reflektor = [ord(c)-65 for c in reflDict[name_reflektor]]\n name_walzen = name_walzen.split()\n ringpositionen = [int(x)-1 for x in name_ringpos.split()]\n paare_steckerbr = paare_steckerbr.upper().split()\n walzenpositionen = [ord(c)-65 for c in name_walzenpos.upper()]\n steckerbrett_dict = {}\n for paar in paare_steckerbr:\n steckerbrett_dict[ord(paar[0])-65] = ord(paar[1])-65\n steckerbrett_dict[ord(paar[1])-65] = ord(paar[0])-65\n return reflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen\n\n\ndef init_walzen(name_walzen, ringpositionen, walzenpositionen):\n walzen = []\n for i, r in enumerate(name_walzen):\n walze = deque([ord(c)-65 for c in rotorDict[r][0]])\n kerbe = {ord(k)-65 for k in rotorDict[r][1]}\n walzen_pos = walzenpositionen[i]\n ring_pos = ringpositionen[i]\n walze.rotate(-walzen_pos+ring_pos)\n walzen.append([walze, kerbe, walzen_pos, ring_pos])\n return walzen\n\ndef schalte(w):\n w[0].rotate(-1)\n w[2] = (w[2]+1) % 26\n\ndef rotiere(walzen):\n #nur die rechten 3 Walzen rotieren, die 4. Walze (M4) ganz links rotiert nicht \n rechts, mitte, links = walzen[-1:-4:-1]\n #die Doppelschritt Anomalie der mittleren Walze führt dazu, dass immer wenn sich die linke\n #Walze weiterdreht, diese die mittlere Walze um eine Position mittdreht\n if mitte[2] in mitte[1]:\n schalte(mitte)\n schalte(links)\n elif rechts[2] in rechts[1]:\n schalte(mitte)\n schalte(rechts)\n\ndef get_wi(offset):\n alpha = deque([i for i in range(26)])\n alpha.rotate(offset)\n return alpha\n\n\ndef encode(plaintext):\n ciphertext = \"\"\n for c in plaintext:\n c = ord(c)-65\n if c < 0 or c > 65: continue\n rotiere(walzen)\n if c in steckerbrett_dict:\n c = steckerbrett_dict[c]\n for wlz, _, wp, rp in reversed(walzen):\n wi = get_wi(-wp+rp)\n c = wi.index(wlz[c])\n c = reflektor[c]\n for wlz, _, wp, rp in walzen:\n wi = get_wi(-wp+rp)\n c = wlz.index(wi[c])\n if c in steckerbrett_dict:\n c = steckerbrett_dict[c]\n ciphertext += chr(c+65)\n return ciphertext\n\n\n#Testfälle\nreflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen = init_enigma(\n \"B\", \"II IV V\", \"02 21 12\", \"AV BS CG DL FU HZ IN KM OW RX\", \"BLA\")\nwalzen = init_walzen(name_walzen, ringpositionen, walzenpositionen)\nplaintext = 'EDPUD NRGYS ZRCXN UYTPO MRMBO FKTBZ REZKM LXLVE FGUEY SIOZV EQMIK UBPMM YLKLT TDEIS MDICA GYKUA CTCDO MOHWX MUUIA UBSTS LRNBZ SZWNR FXWFY SSXJZ VIJHI DISHP RKLKA YUPAD TXQSP INQMA TLPIF SVKDA SCTAC DPBOP VHJK-'\ncyphertext = 'AUFKLXABTEILUNGXVONXKURTINOWAXKURTINOWAXNORDWESTLXSEBEZXSEBEZXUAFFLIEGERSTRASZERIQTUNGXDUBROWKIXDUBROWKIXOPOTSCHKAXOPOTSCHKAXUMXEINSAQTDREINULLXUHRANGETRETENXANGRIFFXINFXRGTX' \nassert encode(plaintext) == cyphertext\n\nreflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen = init_enigma(\n \"B\", \"II IV V\", \"02 21 12\", \"AV BS CG DL FU HZ IN KM OW RX\", \"LSD\")\nwalzen = init_walzen(name_walzen, ringpositionen, walzenpositionen)\nplaintext = 'SFBWD NJUSE GQOBH KRTAR EEZMW KPPRB XOHDR OEQGB BGTQV PGVKB VVGBI MHUSZ YDAJQ IROAX SSSNR EHYGG RPISE ZBOVM QIEMM ZCYSG QDGRE RVBIL EKXYQ IRGIR QNRDN VRXCY YTNJR'\ncyphertext = 'DREIGEHTLANGSAMABERSIQERVORWAERTSXEINSSIEBENNULLSEQSXUHRXROEMXEINSXINFRGTXDREIXAUFFLIEGERSTRASZEMITANFANGXEINSSEQSXKMXKMXOSTWXKAMENECXK' \nassert encode(plaintext) == cyphertext\n\nreflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen = init_enigma(\n \"B\", \"III VI VIII\", \"01 08 13\", \"AN EZ HK IJ LR MQ OT PV SW UX\", \"UZV\")\nwalzen = init_walzen(name_walzen, ringpositionen, walzenpositionen)\nplaintext = 'YKAE NZAP MSCH ZBFO CUVM RMDP YCOF HADZ IZME FXTH FLOL PZLF GGBO TGOX GRET DWTJ IQHL MXVJ WKZU ASTR'\ncyphertext = 'STEUEREJTANAFJORDJANSTANDORTQUAAACCCVIERNEUNNEUNZWOFAHRTZWONULSMXXSCHARNHORSTHCO' \nassert encode(plaintext) == cyphertext\n\nreflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen = init_enigma(\n \"Thin B\", \"Beta II IV I\", \"01 01 01 22\", \"AT BL DF GJ HM NW OP QY RZ VX\", \"VJNA\")\nwalzen = init_walzen(name_walzen, ringpositionen, walzenpositionen)\nplaintext = 'NCZW VUSX PNYM INHZ XMQX SFWX WLKJ AHSH NMCO CCAK UQPM KCSM HKSE INJU SBLK IOSX CKUB HMLL XCSJ USRR DVKO HULX WCCB GVLI YXEO AHXR HKKF VDRE WEZL XOBA FGYU JQUK GRTV UKAM EURB VEKS UHHV OYHA BCJW MAKL FKLM YFVN RIZR VVRT KOFD ANJM OLBG FFLE OPRG TFLV RHOW OPBE KVWM UQFM PWPA RMFH AGKX IIBG'\ncyphertext = 'VONVONJLOOKSJHFFTTTEINSEINSDREIZWOYYQNNSNEUNINHALTXXBEIANGRIFFUNTERWASSERGEDRUECKTYWABOSXLETZTERGEGNERSTANDNULACHTDREINULUHRMARQUANTONJOTANEUNACHTSEYHSDREIYZWOZWONULGRADYACHTSMYSTOSSENACHXEKNSVIERMBFAELLTYNNNNNNOOOVIERYSICHTEINSNULL' \nassert encode(plaintext) == cyphertext\n\nreflektor, name_walzen, ringpositionen, steckerbrett_dict, walzenpositionen = \\\n init_enigma(\"Thin B\", \"Beta II IV I\", \"01 01 01 22\", \"AT BL DF GJ HM NW OP QY RZ VX\", \"VJNA\")\nwalzen = init_walzen(name_walzen, ringpositionen, walzenpositionen)\n\n# Main Program Starts Here\nprint(\" ##### Enigma Encoder #####\")\nprint(\"\")\nplaintext = input(\"Enter text to encode or decode: \\n\").upper()\nprint(f'Encoded text: \\n{encode(plaintext).lower()}')\n","sub_path":"Teil_xx_enigma.py","file_name":"Teil_xx_enigma.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351343802","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass AcLinkS2TTx(Mo):\n \"\"\"\n The atomic counter link S2TTx.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.dbg.AcLinkS2TTx\")\n\n meta.moClassName = \"dbgAcLinkS2TTx\"\n meta.rnFormat = \"linkS2TTx-%(srcSlotId)s-%(srcPortId)s-to-%(dstSlotId)s-%(dstPortId)s\"\n meta.category = MoCategory.REGULAR\n meta.label = \"On-Going Link Atomic Counter Transmit Stats\"\n meta.writeAccessMask = 0x800081800000001\n meta.readAccessMask = 0x800081800000001\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.parentClasses.add(\"cobra.model.dbg.AcTrailTx\")\n\n meta.superClasses.add(\"cobra.model.dbg.AcLinkA\")\n meta.superClasses.add(\"cobra.model.dbg.AcOgE\")\n\n meta.rnPrefixes = [\n ('linkS2TTx-', True),\n ('-', True),\n ('-to-', True),\n ('-', True),\n ]\n\n prop = PropMeta(\"str\", \"admitB\", \"admitB\", 4567, PropCategory.REGULAR)\n prop.label = \"Admitted Bytes\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"admitB\", prop)\n\n prop = PropMeta(\"str\", \"admitP\", \"admitP\", 4568, PropCategory.REGULAR)\n prop.label = \"Admitted Packets\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"admitP\", prop)\n\n prop = PropMeta(\"str\", \"admitTotB\", \"admitTotB\", 4571, PropCategory.REGULAR)\n prop.label = \"Admitted Bytes\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"admitTotB\", prop)\n\n prop = PropMeta(\"str\", \"admitTotP\", \"admitTotP\", 4572, PropCategory.REGULAR)\n prop.label = \"Admitted Packets\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"admitTotP\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"dropB\", \"dropB\", 4569, PropCategory.REGULAR)\n prop.label = \"Dropped Bytes\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"dropB\", prop)\n\n prop = PropMeta(\"str\", \"dropP\", \"dropP\", 4570, PropCategory.REGULAR)\n prop.label = \"Dropped Packets\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"dropP\", prop)\n\n prop = PropMeta(\"str\", \"dropTotB\", \"dropTotB\", 4573, PropCategory.REGULAR)\n prop.label = \"Dropped Bytes\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"dropTotB\", prop)\n\n prop = PropMeta(\"str\", \"dropTotP\", \"dropTotP\", 4574, PropCategory.REGULAR)\n prop.label = \"Dropped Packets\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"dropTotP\", prop)\n\n prop = PropMeta(\"str\", \"dstNodeId\", \"dstNodeId\", 4563, PropCategory.REGULAR)\n prop.label = \"Destination Fabric Node ID\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 16000)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"dstNodeId\", prop)\n\n prop = PropMeta(\"str\", \"dstNodeId2\", \"dstNodeId2\", 16000, PropCategory.REGULAR)\n prop.label = \"Destination VPC peer Fabric Node ID; valid only if pathType is vpcToVpc or nodeToVpc\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 16000)]\n prop.defaultValue = 0\n prop.defaultValueStr = \"0\"\n meta.props.add(\"dstNodeId2\", prop)\n\n prop = PropMeta(\"str\", \"dstPortId\", \"dstPortId\", 6017, PropCategory.REGULAR)\n prop.label = \"Destination Port Id of the Link\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 127)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"dstPortId\", prop)\n\n prop = PropMeta(\"str\", \"dstSlotId\", \"dstSlotId\", 6020, PropCategory.REGULAR)\n prop.label = \"Destination Slot Id of the Link\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 16)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"dstSlotId\", prop)\n\n prop = PropMeta(\"str\", \"firstCollTs\", \"firstCollTs\", 4565, PropCategory.REGULAR)\n prop.label = \"First Collection Timestamp in Milliseconds Sicne Epoch\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"firstCollTs\", prop)\n\n prop = PropMeta(\"str\", \"lastCollTs\", \"lastCollTs\", 4564, PropCategory.REGULAR)\n prop.label = \"Last Collection Timestamp in Milliseconds Sicne Epoch\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"lastCollTs\", prop)\n\n prop = PropMeta(\"str\", \"lcOwn\", \"lcOwn\", 9, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"local\"\n prop._addConstant(\"implicit\", \"implicit\", 4)\n prop._addConstant(\"local\", \"local\", 0)\n prop._addConstant(\"policy\", \"policy\", 1)\n prop._addConstant(\"replica\", \"replica\", 2)\n prop._addConstant(\"resolveOnBehalf\", \"resolvedonbehalf\", 3)\n meta.props.add(\"lcOwn\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"pathDn\", \"pathDn\", 16506, PropCategory.REGULAR)\n prop.label = \"DN of the path\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"pathDn\", prop)\n\n prop = PropMeta(\"str\", \"pathType\", \"pathType\", 15998, PropCategory.REGULAR)\n prop.label = \"Type of path\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"nodeToNode\"\n prop._addConstant(\"nodeToNode\", \"nodetonode\", 0)\n prop._addConstant(\"nodeToVpc\", \"nodetovpc\", 3)\n prop._addConstant(\"vpcToNode\", \"vpctonode\", 2)\n prop._addConstant(\"vpcToVpc\", \"vpctovpc\", 1)\n meta.props.add(\"pathType\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"seqNo\", \"seqNo\", 4566, PropCategory.REGULAR)\n prop.label = \"Sequence Number, Increaed by 1 every Collection\"\n prop.isConfig = True\n prop.isAdmin = True\n meta.props.add(\"seqNo\", prop)\n\n prop = PropMeta(\"str\", \"srcNodeId\", \"srcNodeId\", 4562, PropCategory.REGULAR)\n prop.label = \"Source Fabric Node ID\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 16000)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"srcNodeId\", prop)\n\n prop = PropMeta(\"str\", \"srcNodeId2\", \"srcNodeId2\", 15999, PropCategory.REGULAR)\n prop.label = \"Source VPC peer Fabric Node ID; valid only if pathType is vpcToVpc or vpcToNode\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 16000)]\n prop.defaultValue = 0\n prop.defaultValueStr = \"0\"\n meta.props.add(\"srcNodeId2\", prop)\n\n prop = PropMeta(\"str\", \"srcPortId\", \"srcPortId\", 6018, PropCategory.REGULAR)\n prop.label = \"Source Port Id of the Link\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 127)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"srcPortId\", prop)\n\n prop = PropMeta(\"str\", \"srcSlotId\", \"srcSlotId\", 6019, PropCategory.REGULAR)\n prop.label = \"Source Slot Id of the Link\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 16)]\n prop.defaultValue = 1\n prop.defaultValueStr = \"1\"\n meta.props.add(\"srcSlotId\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"srcSlotId\"))\n meta.namingProps.append(getattr(meta.props, \"srcPortId\"))\n meta.namingProps.append(getattr(meta.props, \"dstSlotId\"))\n meta.namingProps.append(getattr(meta.props, \"dstPortId\"))\n\n def __init__(self, parentMoOrDn, srcSlotId, srcPortId, dstSlotId, dstPortId, markDirty=True, **creationProps):\n namingVals = [srcSlotId, srcPortId, dstSlotId, dstPortId]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/dbg/aclinks2ttx.py","file_name":"aclinks2ttx.py","file_ext":"py","file_size_in_byte":9920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121615776","text":"#! /usr/bin/env python\n# coding: utf-8\n\nimport os\nimport json\nimport subprocess\nfrom JYTools.JYWorker import RedisWorker\n\njava_path = os.environ.get(\"JAVA_PATH\", \"java\")\ngatk_path = os.environ.get(\"GATK_PATH\")\n\nclass RunMutect2(RedisWorker):\n def handle_task(self, key, params):\n self.task_log(\"Start Task\")\n self.task_log(\"get params: %s\" % json.dumps(params))\n\n bed_path = params[\"bed_path\"]\n out_dir = params[\"out_dir\"]\n tumor_getpileupsummaries_table = params[\"tumor_getpileupsummaries_table\"]\n normal_getpileupsummaries_table = params[\"normal_getpileupsummaries_table\"]\n mutect2_vcf = params[\"mutect2_vcf\"]\n\n for each_path in [bed_path, tumor_getpileupsummaries_table, normal_getpileupsummaries_table, mutect2_vcf]:\n if not os.path.exists(each_path):\n self.set_current_task_error(\"path %s not exist\" % each_path)\n if not os.path.isfile(each_path):\n self.set_current_task_error(\"path %s not a file\" % each_path)\n if not os.path.isdir(out_dir):\n self.set_current_task_error(\"out_dir %s not a dir path\" % out_dir)\n\n self.task_log(\"start process (5 steps in total)\")\n self.task_log(\"step 1/5\")\n contamination_table = os.path.join(out_dir, \"contamination.tabl\")\n self.task_log(\"{java_path} -jar {gatk_path} CalculateContamination -I {tumor_getpileupsummaries_table} \"\n \"-matched {normal_getpileupsummaries_table} -O {contamination_table}\".format(\n java_path=java_path, gatk_path=gatk_path, tumor_getpileupsummaries_table=tumor_getpileupsummaries_table,\n normal_getpileupsummaries_table=normal_getpileupsummaries_table, contamination_table=contamination_table))\n\n self.task_log(\"step 2/5\")\n self.sub_process(\"{java_path} -jar {gatk_path} IndexFeatureFile -F {mutect2_vcf}\".format(\n java_path=java_path, gatk_path=gatk_path, mutect2_vcf=mutect2_vcf))\n\n self.task_log(\"step 3/5\")\n mutect2_snp_vcf = os.path.join(out_dir, \"mutect2_snp.vcf\")\n self.sub_process(\"{java_path} -jar {gatk_path} FilterMutectCalls -V {mutect2_vcf} \"\n \"--max-events-in-region 20 --normal-artifact-lod 5 \"\n \"--contamination-table {contamination_table} --tumor-lod 10 -L {bed_path} \"\n \"-O {mutect2_snp_vcf}\".format(\n java_path=java_path, gatk_path=gatk_path, mutect2_vcf=mutect2_vcf, contamination_table=contamination_table,\n bed_path=bed_path, mutect2_snp_vcf=mutect2_snp_vcf))\n\n self.task_log(\"step 4/5\")\n mutect2_indel_vcf = os.path.join(out_dir, \"mutect2_indel.vcf\")\n self.sub_process(\"{java_path} -jar {gatk_path} FilterMutectCalls -V {mutect2_vcf} --max-events-in-region 20 \"\n \"--contamination-table {contamination_table} --tumor-lod 20 -L {bed_path} \"\n \"-O {mutect2_indel_vcf}\".format(\n java_path=java_path, gatk_path=gatk_path, mutect2_vcf=mutect2_vcf, contamination_table=contamination_table,\n bed_path=bed_path, mutect2_indel_vcf=mutect2_indel_vcf))\n\n self.task_log(\"step 5/5\")\n mutect2_snp_head = os.path.join(out_dir, \"mutect2_snp.head\")\n mutect2_indel_head = os.path.join(out_dir, \"mutect2_indel.head\")\n snp_vcf = os.path.join(out_dir, \"snp.vcf\")\n indel_vcf = os.path.join(out_dir, \"indel.vcf\")\n mutect2_filter_vcf = os.path.join(out_dir, \"mutect2_filter.vcf\")\n self.sub_process(\"grep '#' {mutect2_snp_vcf} > {mutect2_snp_head}\".format(\n mutect2_snp_vcf=mutect2_snp_vcf, mutect2_snp_head=mutect2_snp_head))\n self.sub_process(\"grep '#' {mutect2_indel_vcf} > {mutect2_indel_head}\".format(\n mutect2_indel_vcf=mutect2_indel_vcf, mutect2_indel_head=mutect2_indel_head))\n self.sub_process(\"grep -v '#' {mutect2_snp_vcf} | awk 'length($4) == length($5) ' | \"\n \"grep -w PASS > {snp_vcf}\".format(mutect2_snp_vcf=mutect2_snp_vcf, snp_vcf=snp_vcf))\n self.sub_process(\"grep -v '#' {mutect2_indel_vcf} | awk 'length($4) != length($5) ' | \"\n \"grep -w PASS > {indel_vcf}\".format(mutect2_indel_vcf=mutect2_indel_vcf, indel_vcf=indel_vcf))\n self.sub_process(\"cat {mutect2_snp_head} {snp_vcf} {indel_vcf} > \"\n \"{mutect2_filter_vcf}\".format(mutect2_snp_head=mutect2_snp_head, snp_vcf=snp_vcf,\n indel_vcf=indel_vcf, mutect2_filter_vcf=mutect2_filter_vcf))\n self.set_output(\"mutect2_filter_vcf\", mutect2_filter_vcf)\n self.task_log(\"finished\")\n\n def sub_process(self, cmd, respond=False):\n self.task_log(cmd)\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT)\n out, err = child.communicate()\n r_code = child.returncode\n if out is not None:\n sub_log = out\n else:\n sub_log = err\n if r_code != 0:\n self.task_log(sub_log)\n self.set_current_task_error(\"command %s exit code not 0, is \" % cmd.split(\" \")[0] + str(r_code))\n else:\n self.task_log(sub_log)\n if respond is True:\n return out\n\n","sub_path":"fastq2vcf_pipeline/FileterVcfWorker.py","file_name":"FileterVcfWorker.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47236999","text":"from numpy import genfromtxt\nimport numpy as np\nimport os\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport math\n\nfrom scipy.signal import savgol_filter\n# yhat = savgol_filter(y, 51, 2) # window size 51, polynomial order 3\n\nwindow_size = 55\norder = 2\n\n# dqn_abs = savgol_filter(genfromtxt('./data/dqn_abs.csv', delimiter=','), window_size, order)\n# dqn_do = savgol_filter(genfromtxt('./data/dqn_do.csv', delimiter=','), window_size, order)\n# dqn_fic = savgol_filter(genfromtxt('./data/dqn_fic.csv', delimiter=','), window_size, order)\n# dqn_prd = savgol_filter(genfromtxt('./data/dqn_prd.csv', delimiter=','), window_size, order)\n# dqn_do_uniform = savgol_filter(genfromtxt('./data/dqn_do_uniform.csv', delimiter=','), window_size, order)\n\n\n# dqn_abs = genfromtxt('./data/dqn_abs.csv', delimiter=',')\n# dqn_do = genfromtxt('./data/dqn_do.csv', delimiter=',')\n# dqn_fic = genfromtxt('./data/dqn_fic.csv', delimiter=',')\n# dqn_prd = genfromtxt('./data/dqn_prd.csv', delimiter=',')\n# dqn_do_uniform = genfromtxt('./data/dqn_do_uniform.csv', delimiter=',')\n\n# axes = plt.gca()\n# axes.set_ylim([0.5,4])\n\n# x = np.arange(1, len(dqn_abs)+1)\n# plt.plot(x, dqn_abs, '-C1', label= \"HBS\")\n# plt.plot(x, dqn_do, '-C5', label= \"DO\")\n# plt.plot(x, dqn_fic, '-C4', label= \"Uniform\")\n# plt.plot(x, dqn_prd, '-C3', label= \"PRD\")\n# plt.plot(x, dqn_do_uniform, '-C2', label= \"DO+Unifrom\")\n#\n#\n# plt.xlabel(\"Number of Iterations\")\n# plt.ylabel(\"NashConv\")\n# plt.title(\"Average NashConv over 10 runs in Leduc Poker\")\n# plt.legend(loc=\"best\")\n# plt.show()\n\n\n\n################### Draw different NashConvs ##########################\n# deepmind_fic = savgol_filter(genfromtxt('./data/merged_data_kuhn/deepmind_fic.csv', delimiter=','), window_size, order)\n# Mike_fic = savgol_filter(genfromtxt('./data/merged_data_kuhn/Mike_fic.csv', delimiter=','), window_size, order)\n# dqn_do = savgol_filter(genfromtxt('./data/merged_data_kuhn/dqn_do.csv', delimiter=','), window_size, order)\n\ndeepmind_fic_mean = genfromtxt('./data/merged_data_kuhn/dqn_fic_deepmind_kuhn_mean.csv', delimiter=',')\nMike_fic_mean = genfromtxt('./data/merged_data_kuhn/dqn_fic_Mike_kuhn_mean.csv', delimiter=',')\ndqn_do_mean = genfromtxt('./data/merged_data_kuhn/dqn_DO_Mike_kuhn_mean.csv', delimiter=',')\ndeepmind_prd_mean = genfromtxt('./data/merged_data_kuhn/dqn_prd_deepmind_kuhn_mean.csv', delimiter=',')\nMike_prd_mean = genfromtxt('./data/merged_data_kuhn/dqn_prd_Mike_kuhn_mean.csv', delimiter=',')\n\ndeepmind_fic_std = genfromtxt('./data/merged_data_kuhn/dqn_fic_deepmind_kuhn_std.csv', delimiter=',')\nMike_fic_std = genfromtxt('./data/merged_data_kuhn/dqn_fic_Mike_kuhn_std.csv', delimiter=',')\ndqn_do_std = genfromtxt('./data/merged_data_kuhn/dqn_DO_Mike_kuhn_std.csv', delimiter=',')\ndeepmind_prd_std = genfromtxt('./data/merged_data_kuhn/dqn_prd_deepmind_kuhn_std.csv', delimiter=',')\nMike_prd_std = genfromtxt('./data/merged_data_kuhn/dqn_prd_Mike_kuhn_std.csv', delimiter=',')\n\naxes = plt.gca()\naxes.set_ylim([0,0.4])\n\nx = np.arange(1, 151)\nplt.plot(x, dqn_do_mean, '-b', label= \"DO\")\nplt.fill_between(x, dqn_do_mean+dqn_do_std, dqn_do_mean-dqn_do_std, alpha=0.1, color=\"b\")\n\nplt.plot(x, deepmind_fic_mean, '-C2', label= \"Heuristic-based Uniform\")\nplt.fill_between(x, deepmind_fic_mean+deepmind_fic_std, deepmind_fic_mean-deepmind_fic_std, alpha=0.1, color=\"C2\")\n\nplt.plot(x, Mike_fic_mean, '-C1', label= \"NE-based Uniform\")\nplt.fill_between(x, Mike_fic_mean+Mike_fic_std, Mike_fic_mean-Mike_fic_std, alpha=0.1, color=\"C1\")\n\n# plt.plot(x, deepmind_prd_mean, '-C2', label= \"Heuristic-based PRD\")\n# plt.fill_between(x, deepmind_prd_mean+deepmind_prd_std, deepmind_prd_mean-deepmind_prd_std, alpha=0.1, color=\"C2\")\n#\n# plt.plot(x, Mike_prd_mean, '-C1', label= \"NE-based PRD\")\n# plt.fill_between(x, Mike_prd_mean+Mike_prd_std, Mike_prd_mean-Mike_prd_std, alpha=0.1, color=\"C1\")\n\nplt.xticks(size = 17)\nplt.yticks(size = 17)\n\nplt.xlabel('Number of Iterations', fontsize = 22)\nplt.ylabel('NashConv', fontsize = 19)\n\n\nplt.xlabel(\"Number of Iterations\")\nplt.ylabel(\"NashConv\")\n# plt.title(\"NashConvs under Different Metrics\")\nplt.legend(loc=\"best\")\nplt.show()","sub_path":"open_spiel/python/algorithms/psro_v2/plot_curves/mean_curves_kuhn.py","file_name":"mean_curves_kuhn.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653515778","text":"# -*- coding: utf-8 -*-\n\"\"\"Chapter 03: the red-chain\n\"\"\"\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n\nfrom storybuilder.builder import world as wd\nfrom src.redchain import config as cnf\nTHM = cnf.THEMES\n\n\n# scenes\n\n# episodes\ndef ep_idolyears(w: wd.World):\n ma, cafemas, manager, owner = w.masuda, w.cafemaster, w.manager, w.shopowner\n scenes = [\n w.scene(\"喫茶店にて\",\n ma.be(w.stage.cafe).d(\"池袋の西口を出て狭い路地を抜けて行った先に\",\n \"彼に指定された喫茶店はあった\"),\n ma.look().d(\"ガラスが格子状に嵌ったドアを開けると鈍いドアベルが鳴り\",\n \"エプロンをつけた体格の良い女性が「いらっしゃい」とよく通る声で迎えてくれる\"),\n ma.talk().t(\"えっと待ち合わせで……\"),\n ma.look().d(\"店内を見回す。\",\n \"十ほどのテーブル席は半分程度埋まっていたが\",\n \"どれも女性客ばかりで\",\n \"カウンターの一番奥に座っている彼だけが男性だった\"),\n ma.ask().t(\"$n_managerさんですか?\"),\n manager.reply().t(\"そうだがあんたが何を知りたいか知らんが\",\n \"もう$meからは$namehideのことは何も出てこんぞ\"),\n ma.look().d(\"小刻みに右膝が揺れている。\",\n \"白くなった頭髪の右の一部だけが黒く\",\n \"首がぐっと下がった猫背で\",\n \"濃い狐色のジャケットの襟元が汚れていた\"),\n ma.do().d(\"彼の左隣りに座り\", \"とりあえず珈琲のブレンドを注文しようとすると、\"),\n manager.talk().t(\"ここのは不味いからココアにしておきなさい\"),\n ma.deal().d(\"そう言われたので仕方なく\", \"苦手な甘い飲み物を頼んだ\"),\n ma.look().d(\"$na_managerは誰かを待っているのか何度も店の外側に視線を振っていたが\",\n \"十五分程度という約束で\",\n \"$n_hidekoについて話を聞かせてくれると言ってくれた\"),\n manager.talk().t(\"それで$namehideの何を知りたいんだ?\"),\n ma.look().d(\"七十は超えていると聞いていたが\",\n \"皺と染みの多い肌や潰れたような声\", \"その落ち着きのない仕草からは\",\n \"もっと歳がいっているように感じられる\"),\n ma.talk().t(\"実は今回$n_hidekoさんについてうちで特集記事を書くことになりまして\",\n \"それであなたが以前彼女が所属していた事務所の社長だったと聞いたので\",\n \"謎の多い彼女のその人となりや\",\n \"引退後も未だ多くの人の心に刻まれている$n_hidekoという女優について\",\n \"忌憚ないご意見をお聞かせ願えればと\"),\n ma.think().d(\"上滑りするような紋切り型の言葉だ。\",\n \"けれどまだ確定していない事実を口にして\",\n \"彼女が何故そんな場所で亡くなっていたのかの理由を探りたい\",\n \"等とは口が避けても言うことは出来なかった\"),\n manager.talk().t(\"何を勉強してきたのか知らないが$meにとって彼女\",\n \"$n_hidekoというのは将来この日本を代表する女優になる逸材だと今でも思っているし\",\n \"おそらく彼女に会ったことのあるこの業界の人間であればその名を決して忘れることはないと思うよ。\",\n \"華もない器量も良い訳じゃない人が好い訳でもなくただ真面目であるというだけの人間が\",\n \"役者という最高の仕事を得てスポットライトが当たるその一瞬だけは周囲の誰よりも輝きを見せる。\",\n \"それはね彼女が運良く掴んだ初主演の映画$i_movienameで既に完成されていたと言える\"),\n ma.look().d(\"$na_managerは先程までの貧乏ゆすりや落ち着かない視線を止め\",\n \"まるで長い長い台本があるかのように$heroの方を一度たりとも見ずに話を続ける\"),\n manager.talk().t(\"あの映画主演当時の彼女はまだ生娘だったよそれがだ\",\n \"突如身籠ってしまった少女の驚きと悲哀と絶望から最終的には覚悟を決めて一人荒屋に篭って出産をしたという\",\n \"常人では経験し得ないその物語を見事に演じ切りスクリーンにそれをリアルとして投影することに成功したんだ。\",\n \"監督による特別な演技指導があった訳じゃないしベテラン俳優たちに助けを求めた訳でもない。\",\n \"彼女の台本は撮影初日から既にボロボロで$meは新しいものを用意させようとしたくらいだったよ。\",\n \"けれど彼女はそれを拒否してもうその拒否するところから既に役になりきっていた\",\n \"いやあれは$i_movienameの$i_movieheroineそのものだったんだ\"),\n ma.ask().t(\"役者として凄かったというのも真面目だったというのも\",\n \"昔のお仲間さんたちから沢山聞かされましたよ。\",\n \"ただ彼らはあまり$namehideの私生活については語らなかったんです。\",\n \"当時\", \"公私ともに付き合いが深かったという$na_managerさんから見て\",\n \"普段はどんな人だったんですか?\"),\n ma.think().d(\"このままだと約束の十五分の大半を独演会に持って行かれてしまうと思い\",\n \"何とか方向修正しようと口を挟む\"),\n manager.talk().t(\"売れるようになるまでは確かに$meの家で一緒に暮らしていたよ。\",\n \"そのことで当時かなり週刊誌には色々と書かれてね。\",\n \"一体君等というのは売れれば何でもいいのか?\",\n \"人権も彼女らの人生も何もかも踏み躙ってそれで儲けて嬉しいのかね?\"),\n ma.talk().t(\"ごもっともな話だと思いますよ。\",\n \"ただそれだけ買う人間がいるということは\",\n \"やはり彼女たちについて何でも良いから知りたい\",\n \"知った上で近づきたいという願望があるんじゃないでしょうか\"),\n ma.look().d(\"咄嗟の思いつきだったが\",\n \"$na_managerはその言葉に小さく頷いてから初めてしっかりと$heroの方に顔を向けた\"),\n ma.talk().t(\"確かに適当なことを書いて儲けようという輩も当然います。\",\n \"そんな人間はゴミだと自分も思いますよ。\",\n \"けどそんな奴らよりはただ純粋に知りたくて取材をして記事を書いている\",\n \"その記事によってそこにはこんな真実が眠っていたんだって知らしめたいと考える人間だって\",\n \"同じかそれ以上にいると思うん��すよ\"),\n ma.think().d(\"どうして自分の口からそんな熱の入った言葉が出てくるのだろう\"),\n ma.think().d(\"別に今更もう新聞記者に戻ろうなんて思っていないし\",\n \"仕事も私生活も適当にしてなあなあで暮らしていって\", \"死んでいけばいいやと\",\n \"その程度にしか考えていなかったはずなのに。\",\n \"何だこれは\"),\n ma.think().d(\"それでも$heroの言葉は止まらなかった\"),\n ma.talk().t(\"だってですね\",\n \"誰かが真実を書かないと他人は勝手にああだこうだ適当なことを喋って\",\n \"恰もそれが本当のことだったみたいに彼らの記憶の中には埋没してしまうんですよ?\",\n \"他人の人生に責任なんて持てないはずなのに\",\n \"自分の人生にだって責任の一つも取れしない癖に\",\n \"好き勝手で都合の良い醜聞に仕立てて酒の肴にされるんですよ?\",\n \"それでもいいんですか?\"),\n ma.hear().d(\"ざわついていたと思った店内が\",\n \"いつの間にか静まり返っていた\"),\n ma.look().d(\"二十代から五十代くらいまでの女性客全ての視線が自分に集まっていることに気づいて\",\n \"$heroはわざとらしく咳払いをして膜の張ったココアに口を付ける\"),\n manager.talk().t(\"……あんたも$n_hidekoにやられた人間なんだな。\",\n \"これで安心して案内できるよ\"),\n ma.look().d(\"$na_managerの言葉の意味は理解出来なかったが\",\n \"彼は急に好々爺な笑みを浮かべるとカップの残りを飲み干して立ち上がる\"),\n ma.ask().t(\"あの……\"),\n manager.talk().t(\"店長。\",\n \"彼の分も$meに付けといてね\"),\n cafemas.talk().t(\"あいよ。\",\n \"良かったねお兄さん。\",\n \"この人気に入らないと奢ったりしないんだよ\"),\n ma.reply().t(\"はあ\"),\n ma.think().d(\"どこがどうなって$na_managerの心を掴めたのか分からないが\",\n \"席を離れて入り口の方に向かった彼を見て\",\n \"$heroも慌てて残りを飲み干すと\",\n \"慌ただしく鞄を手にして店を出て行った\"),\n ),\n w.scene(\"アイドル時代のこと\",\n ma.look(\"アイドル時代の資料を持っている人とやり取り\"),\n ma.come().d(\"連れて来られたのは十分ほど歩いたところに並ぶ五階建て雑居ビルの一つだった\"),\n ma.ask().t(\"あの\", \"ここは?\"),\n ma.look().d(\"エレベータは無く\",\n \"階段を登って四階まで行くと\",\n \"暗くなっている磨りガラスのドアを開けて中に入り\",\n \"$na_managerは電気を点けた\"),\n manager.talk().t(\"ここがね$meの今のオフィスなの\"),\n ma.look().d(\"そこはパーテーションで区切られた二十平米ほどの空間の片側に\",\n \"事務机が四つとプリンタやFAXや書類棚が並べられているだけの\",\n \"$heroの会社とそう大差ないように見える代物だった\"),\n ma.look().d(\"辛うじて芸能事務所らしさを感じさせるアイドルや映画のポスターが壁に貼られている程度で\",\n \"事務員すらいなくて何とも物悲しい。\",\n \"聞いていた話では$n_hidekoが所属していた頃には都内の一等地に持ちビルがあったというから\",\n \"その頃を知る人間がいたらあまりの落差にそっとドアを閉じて階段を駆け下りて行ってしまうことだろう\"),\n manager.talk().t(\"驚いたかい?\",\n \"でも世の中というのはこんなものだよ。\",\n \"良い時だけ見るから何だかとても日常とはかけ離れたきらびやかな世界に思われることが多いけれど現実なんてみんな風呂すらまともに入れずに\",\n \"水道で背中を流してご飯の上にはフリカケでもあればご馳走だなんて笑って話し合うような人間の集まりなんだよ\"),\n ma.think().d(\"そう笑って話���$na_managerはパソコンの電源を入れると\",\n \"インスタントコーヒーの粉を自分のカップに適当に振り落として\", \"ポットのお湯を注ぐ\"),\n manager.talk().t(\"その日を生きるにも大変な思いをしながらどうしてみんな舞台をやったり映画やドラマに出ようと躍起になってるか分かるかい?\"),\n ma.look().d(\"ティースプーンで適当に掻き回し\",\n \"まだ湯気を上げるそれを一口飲みながら彼は$heroを見る。\",\n \"どうやら一人で思いの丈を語ることはもう止めにしたらしい\"),\n ma.reply().t(\"夢だから\", \"ですか?\"),\n ma.think().d(\"なんてありきたりな答えなんだと思いながらも\",\n \"それ以外の言葉は$heroには見つけられない\"),\n manager.talk().t(\"夢だなんて綺麗な言葉じゃ現実が隠れちゃうよ。\",\n \"だから挨拶に来て「わたしの夢なんです」とか言う奴がいたら$meはこう言い直させることにしているんだ。\",\n \"それは夢じゃない。\",\n \"自分は有名になってみんなに認めてもらいたいんです。\",\n \"ここにいて自分はいいんだ。\",\n \"スポットライトを浴びてみんなが今の自分を愛してくれているんだ。\",\n \"ただそれが欲しいからがんばります……ってね\"),\n ma.think().d(\"先程まで年寄りの皺塗れの目をしていると思っていたそれは\",\n \"子供のような輝きで$heroへと向けられていた\"),\n manager.talk().t(\"何も芸能の仕事だけじゃないよあんただってそうだ。\",\n \"どんなに耳障りの良い口実を言ってみたところでその本質は自分を認めて欲しいっていうただそれだけの単純な動機なんだよ。\",\n \"それぞれにその実現方法が違うだけで誰しもが最初に誰かに認めてもらったという原体験の再現を目指しているだけなんだ\"),\n ma.think().d(\"自分の認められた原体験は\", \"一体何だったろうかと$heroは考える\"),\n ma.think().d(\"$n_hidekoにとってのそれは何だったのだろうかとも\", \"考える\"),\n ma.think().d(\"もし誰にも認められたことがなかったとしたら\",\n \"それは不幸な人生なのだろうか\"),\n ma.look().d(\"$na_managerは自分の席に座ると\",\n \"パソコンで作業を始めた\"),\n ma.talk().t(\"あの\", \"一つ訊いてもいいですか?\"),\n manager.talk().t(\"どうぞ。\",\n \"$meで答えられることなら\"),\n ma.ask().t(\"$n_hidekoはあなたが見つけて事務所に引き抜いたと教わったんですが\",\n \"彼女は元々何をしている人だったんですか?\"),\n ma.think().d(\"器用にマウスを動かしながら$na_managerは先程までの熱量を全く感じさせない淡々とした口調でこう返した\"),\n manager.talk().t(\"なんだ知らないのかい。\",\n \"彼女はね元アイドルだよ。\",\n \"若い人は聞いたことないかな。\",\n \"$i_idolnameっていうグループの$n_h_idolとしてそれなりに人気もあったんだよ\"),\n ),\n w.scene(\"元マネージャー\",\n ma.look().d(\"既に辺りは看板の電飾がよく目立つ時間帯に変わっていた\"),\n ma.come().d(\"$na_managerに連絡を取ってもらい\",\n \"当時のアイドルグループのマネージャーをやっていたという男に会いにやってきたのだが\",\n \"神田の駅の裏手をずっと歩いても一向にそれらしきビデオ店は見つからなかった\"),\n ma.ask().t(\"あの、すみません\"),\n ma.deal().d(\"仕方なくスーツ姿の男性に店の名前を出して場所を聞いてみる\"),\n w.man1.talk().t(\"それってさレンタルビデオじゃなくて\",\n \"ああいうのだよ\"),\n ma.look().d(\"男が指した方角を見ると赤と黄色の派手な看板に『個室ビデオ』と書かれていて\",\n \"$heroは思わずその男性に頭を下げた\"),\n ma.go().d(\"すれ違うには互いが体を傾けなければならないような狭い階段を登っていくと\",\n \"赤玉屋という屋��が書かれたドアが現れる。\",\n \"そっとそれを開けて中に入ったが\",\n \"狭い通路とその壁に沢山の女性のポスターが貼られているのがまず目に付いた。\",\n \"個室防音完備とあるが\",\n \"VRというのは何だろう\"),\n ma.deal().d(\"応対する店員は$heroと視線を合わせないまま呪文のようにぶつぶつと早口で注意点やメニューを呟いた後で\",\n \"どの個室を利用するのか尋ねてきた\"),\n ma.talk().t(\"あ、えっと$me\",\n \"オーナーの$na_shopownerさんに用があって。\",\n \"取り次いでもらえますかね?\"),\n ma.look().d(\"店員の男性は一瞬睨むように$heroを見たが\",\n \"「少々お待ち下さい」と口走ると暖簾を潜って奥の部屋に行き\",\n \"電話だろうか\", \"ぼそぼそとした話し声を響かせた\"),\n ma.deal().d(\"暫くして戻ってきた店員から店の裏手に行くように言われる。\",\n \"別の場所を管理室として借りていると説明された\"),\n ma.go().d(\"再び狭い階段を降り\", \"建物の裏手に回る。\",\n \"ビルとビルの間に入るようにして行くと\", \"何も書かれていないドアが突然現れた\"),\n ma.deal().d(\"それに手を掛けようとすると\", \"ドアノブが勝手に回り\",\n \"金属音の悲鳴を上げながらゆっくり開く\"),\n ma.look().d(\"そこにはドアから無事に出られるのだろうかと心配になるような横幅のしっかりした革ジャンの男性が\",\n \"たっぷりの無精髭を掻きながら出現した\"),\n owner.talk().t(\"あんた$na_managerさん紹介の人?\"),\n ma.reply().t(\"はいそうです。\",\n \"$st_officeの$na_masudaと言います\"),\n ma.deal().d(\"慌てて名刺を差し出したが、\"),\n owner.talk().t(\"もうそういうのはいいんだわ\"),\n ma.look().d(\"男はそう言って受け取らずに手で押し返した\"),\n ma.talk().t(\"えっと$na_managerさんから$n_hidekoのアイドル時代のマネージャーだったと聞いたのですけれど\"),\n ma.look().d(\"名刺を胸ポケットに仕舞いながら尋ねたが\",\n \"彼は小首を傾げてしばらく考え込んでから\",\n \"別の名前を口にする\"),\n owner.ask().t(\"それって$n_h_idolのことだよね?\",\n \"$i_idolnameの\"),\n ma.think().d(\"ここで押し問答するのも面倒だったので\",\n \"「そうです」と頷き\", \"中に入れてもらった\"),\n ma.look().d(\"中は十畳ほどの空間で\",\n \"そのうちの半分は畳が敷かれて一段高くなっており\",\n \"壁際に仮眠用だと思われる布団が追いやられていた\"),\n ma.move().d(\"その畳敷きの方に上がるよう言われ\",\n \"隅の黒くなったスニーカーを脱ぐ\"),\n ma.talk().t(\"こういう店を利用したことないんですけど\",\n \"その\",\n \"結構お客さんて多い方なんですかね\"),\n ma.behav().d(\"会話の緒が掴めないまま$heroは胡座で$na_shopownerの対面に坐った\"),\n owner.talk().t(\"防音効いてるし\",\n \"泊まったりもできるからね。\",\n \"漫画喫茶とそう変わらないよ\"),\n ma.look().d(\"彼は$heroの方を見ずに答えると\",\n \"テレビのリモコンを操作してバラエティ番組に切り替える。\",\n \"画面からは間断なく作られた笑い声が流れてきて不快だったが\",\n \"$na_shopownerは分厚い唇を歪ませてへらへらと声を出さずに笑っていた\"),\n ma.ask().t(\"それでお尋ねしたいのは$n_hidekoのアイドル時代についてなんですが\"),\n ma.think().d(\"まただった\"),\n ma.look().d(\"彼女のことを$n_hidekoと呼んだ途端に表情が険しくなる\"),\n owner.talk().t(\"そんな名前の女は知らない。\",\n \"$meが知ってるのはね\",\n \"$i_idolnameで活躍していた$n_h_idolだけだよ\"),\n ma.look().d(\"素手で袋に手を突っ込み\", \"掴んだポテチを口に放り込んでバリバリと音をさせる。\",\n \"テレビに合わせて鼻息を出すように笑い\", \"またポテチを食べる\"),\n ma.look().d(\"$na_managerから聞かされた人柄とは随分違っていた。\",\n \"当時アイドルたちをサポートするのに必死だった青年は\",\n \"一体どういう経緯で今個室ビデオ店のオーナーなんてやっているのだろう\"),\n ma.think().d(\"暫く待ってみたが何も話し出してくれる様子がないので\",\n \"資料には全然なかったそのアイドル時代の$n_hidekoについて尋ねてみる\"),\n ma.ask().t(\"その$n_h_idolさんですが\",\n \"グループの中でも人気があった方なんですか?\"),\n owner.talk().t(\"そんなことも調べないで来たの?\",\n \"まあ三人の中では断トツだったよ。\",\n \"そもそもね\",\n \"他の二人には悪いけど彼女一人だけ素質が違ったよ。\",\n \"何よりその姿勢。\",\n \"お客様に対する態度。\",\n \"そしてこれが一番大切なことだといつも言い聞かせていたんだけど\",\n \"普通の人間の女の子じゃ駄目だってこと。\",\n \"$beniはね\",\n \"触れると消えてしまいそうな儚さを持った妖精のようなアイドルだったよ。\",\n \"それがどうして女優なんてつまらないものに……\"),\n ma.hear().d(\"小さな舌打ちだった\"),\n ma.look().d(\"それを聞いた時に彼が$n_hidekoではなく$n_h_idolという名前に拘りを持っている理由の欠片が掴めたような気がした\"),\n ma.think().d(\"名前というものは彼女たちにとって自分そのものなのだろう。\",\n \"顔や出演した作品に番組\", \"出した曲や立った舞台\",\n \"実際にイベントやライブで対面したお客さんたちとの関係性も大事だが\",\n \"大多数の人間にとっては彼女というのは$n_hidekoであり$n_h_idolなのだ。\",\n \"それは今でも$n_shopownerの中で彼女というのは$n_h_idolであるように\",\n \"$n_managerや$n_coworkerにとっては$n_hidekoなのだろう\"),\n ma.ask().t(\"その$n_h_idolさんはあなたが見つけられたんですか?\"),\n ma.look().d(\"そう質問した時に初めて彼の目が一度だけだったが\", \"$heroに向けられた\"),\n owner.talk().t(\"そうだよ。\",\n \"地味な黒い制服姿で一人だけ街に埋もれるようにして立っていたんだ。\",\n \"当時はまだ中学生だった。\",\n \"何をしているのかと声を掛けたら\",\n \"彼女は真剣な表情でこう言ったんだ\"),\n w.h_idol.talk().d(\"$myは何もできません\"),\n owner.talk().t(\"最初は何か勘違いされたんだろうと思って\",\n \"自分はそういう人間じゃなくてアイドルのスカウトをしているけれど\",\n \"君みたいな子が一人でぶらついているのは危険だから声を掛けたと言ったんだ。\",\n \"そしたら彼女\", \"急に頭を下げてね\",\n \"こう言うんだよ\"),\n w.h_idol.talk().d(\"$myを何者かにして下さい\"),\n owner.talk().t(\"この子は一体何なんだろうって\",\n \"ちょっと言うとアレだけれど\",\n \"心を病んだ子なんじゃないだろうかって勘繰った。\",\n \"とにかく危ない気がして\",\n \"それでつい事務所に連れて帰ったんだ\"),\n ma.look().d(\"アイドルとしての$n_hidekoを語る姿は\",\n \"先程までとは別人だった。\",\n \"出会いのきっかけから家庭の事情を聞いた上での事務所通いの了承\",\n \"そしてグループとしてデビューさせるまでの苦労と彼女の努力。\",\n \"そういったものを話す内にテレビのバラエティ番組は終わり\",\n \"ベテラン女優の秘話をドキュメンタィ風に扱う番組へと内容が切り替わっていた\"),\n ma.ask().t(\"母子家庭で\",\n \"妹さんが一人いらしたんですか?\"),\n owner.talk().t(\"ああ、そうだよ。\",\n \"実際に一度も彼女の家族には会っていない。\",\n \"書類も全部彼女から手渡されたから\",\n \"ひょっとしたら全てが作り話で判子とかも勝手に使ったのかも知れない\",\n \"なんて当時は考えたりもしたよ。\",\n \"けど結果的に問題になったのはグループを組んだ他二人の方で\",\n \"$n_h_idolの家族は電話すらしてきたことはなかったね\"),\n ma.think().d(\"家族についての記述は集めたどの資料にも載っていなかったから\",\n \"今初めて母子家庭だったことを知った。\",\n \"それに加えてあまり家族仲も良くなかったようで\",\n \"連絡先も一切分からないと言われた\"),\n owner.talk().t(\"今でも時々考えるよ。\",\n \"彼女がどうしてアイドルを辞めて女優を選んだのかってね。\",\n \"だってその時の彼女にはソロデビューの話まで来てて\",\n \"アイドル映画だったけれど主演ドラマの企画も持ち上がっていたんだ。\",\n \"それを全て蹴ってまで女優を選んだ理由を\",\n \"今でも聞き出したいって思っているよ。\",\n \"$na_managerさんからは何も聞いてないんだよね?\"),\n ma.reply().t(\"ええ。そんな話は一切ありませんでしたから\"),\n ma.think().d(\"そこまで話し終えると\",\n \"$heroは$na_managerに訊けなかったことを口にしてみた\"),\n ma.ask().t(\"ところで今からニ十年前\",\n \"どうして彼女が突然芸能界を引退したのか。\",\n \"その件について何かご存知ではないですか?\"),\n owner.talk().t(\"それこそ$na_managerさんから訊いたんじゃないの?\"),\n ma.reply().t(\"自分の所為じゃない。\",\n \"これだけしか教えてもらえませんでしたよ。\",\n \"何か事務所とトラブルがあったとかなんですか?\"),\n ma.look().d(\"$na_shopownerは一瞬表情を曇らせたが\",\n \"それを振り払うように頭を横に振り\",\n \"一つだけ教えてくれた\"),\n owner.talk().t(\"あくまで噂話の一つだと思って聞いて欲しいんだが\",\n \"彼女\",\n \"不倫相手の子を身籠ったんだ。\",\n \"それもある大物俳優の子だって話で\",\n \"中絶を迫られて色々事務所も含めてゴタゴタがあったとか。\",\n \"本当に週刊誌レベルのネタだが\",\n \"当時この界隈じゃかなり信憑性が高いネタとして誰も表立っては口にしなかったんだ\"),\n ma.think().d(\"仮にそれが事実だったとすれば\",\n \"$n_hidekoはあの映画の女性と同じようにシングルマザーとして自分の子を育てながら生きた\",\n \"ということだろうか\"),\n ma.think().d(\"だとしても\",\n \"どういう経緯であの町に流れ着いたのか\",\n \"そして何故空き家で亡くなったのかについての疑問は何も解消されない\"),\n ma.think().d(\"その大物俳優の名は教えてもらえなかったが\",\n \"ただで一晩泊まらせてくれるというので\",\n \"$heroは個室ビデオの部屋の一つで夜を明かした\"),\n ma.look().d(\"準備されていたDVDの中には$n_hidekoの出演作なんて当然無く\",\n \"何となく写真から想像した$n_hidekoに似た女優のアダルトな作品を借りて\",\n \"彼女の人生を想像しながら眠りに就いた\"),\n ma.look().d(\"眠る前にメールを確認したが\",\n \"$n_tanabeからは何の言葉もなく\",\n \"$n_mikiにもう一日東京で調べる旨を送っておいた。\",\n \"すぐに返ってきたメールに驚きながらも許可は既に$n_tanabeから貰ってあると書かれていて\",\n \"相変わらずの卒の無さ��彼女のような女性が伴侶だったらという妄想が湧き上がったが\",\n \"場所が場所だけに何だか不埒なものになりそうだと\",\n \"すぐにかき消して瞼を閉じた\"),\n ma.know(w.h_idol),\n ma.look(w.h_idol, w.i.life),\n ma.come(w.stage.idoloffice, w.day.interview2),\n ma.deal(w.i.interview, \"当時を知る人\"),\n w.manager.talk(w.h_idol),\n w.manager.talk(\"辞めてからも付き合いがあった\"),\n w.manager.talk(\"事情があって芸能界引退\"),\n ma.know(w.i.retire_business),\n ),\n ]\n return [w.chaptertitle(\"アイドル時代\"),\n *scenes,\n ]\n\n\n\n# main\ndef story(w: wd.World):\n return ep_idolyears(w)\n\n\ndef main(): # pragma: no cover\n from src.redchain.story import world\n w = world()\n return w.build(story(w))\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main())\n\n","sub_path":"src/redchain/chapter03.py","file_name":"chapter03.py","file_ext":"py","file_size_in_byte":33785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250174878","text":"# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom typing import List, Dict, Tuple, Any, Iterator, Optional\n\nimport numpy as np\n\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.data_learning_iterator import DataLearningIterator\n\n\n@register('ner_few_shot_iterator')\nclass NERFewShotIterator(DataLearningIterator):\n \"\"\"Dataset iterator for simulating few-shot Named Entity Recognition setting.\n\n Args:\n data: list of (x, y) pairs for every data type in ``'train'``, ``'valid'`` and ``'test'``\n seed: random seed for data shuffling\n shuffle: whether to shuffle data during batching\n target_tag: the tag of interest. For this tag the few-shot setting will be simulated\n filter_bi: whether to filter BIO markup or not\n n_train_samples: number of training samples in the few shot setting. The validation and the test sets will be\n the same\n remove_not_targets: whether to replace all non target tags with `O` tag or not.\n \"\"\"\n\n def __init__(self,\n data: Dict[str, List[Tuple[Any, Any]]],\n seed: int = None,\n shuffle: bool = True,\n target_tag: str = None,\n filter_bi: bool = True,\n n_train_samples: int = 20,\n remove_not_targets: bool = True,\n *args, **kwargs) -> None:\n super(NERFewShotIterator, self).__init__(data=data, seed=seed, shuffle=shuffle)\n self.target_tag = target_tag\n self.filter_bi = filter_bi\n self.n_train_samples = n_train_samples\n self.remove_not_targets = remove_not_targets\n if self.target_tag is None:\n raise RuntimeError('You must provide a target tag to NERFewShotIterator!')\n\n self.n_samples = len(self.train)\n\n if self.remove_not_targets:\n self._remove_not_target_tags()\n\n if self.filter_bi:\n for key in self.data:\n for n, (x, y) in enumerate(self.data[key]):\n self.data[key][n] = [x, [re.sub('(B-|I-)', '', tag) for tag in y]]\n\n self.tag_map = np.zeros(self.n_samples, dtype=bool)\n for n, (toks, tags) in enumerate(self.data['train']):\n if self.filter_bi:\n self.tag_map[n] = any(self.target_tag == tag for tag in tags if len(tag) > 2)\n else:\n self.tag_map[n] = any(self.target_tag == tag[2:] for tag in tags if len(tag) > 2)\n\n self.marked_nums = None\n self.unmarked_nums = None\n self._sample_marked()\n\n def _sample_marked(self):\n np.zeros(len(self.data['train']), dtype=bool)\n n_marked = 0\n self.marked_mask = np.zeros(self.n_samples, dtype=bool)\n while n_marked < self.n_train_samples:\n is_picked = True\n while is_picked:\n n = np.random.randint(self.n_samples)\n if not self.marked_mask[n]:\n is_picked = False\n self.marked_mask[n] = True\n if self.tag_map[n]:\n n_marked += 1\n\n self.marked_nums = np.arange(self.n_samples)[self.marked_mask]\n self.unmarked_nums = np.arange(self.n_samples)[~self.marked_mask]\n\n def _remove_not_target_tags(self):\n if self.remove_not_targets:\n for key in self.data:\n for n, (x, y) in enumerate(self.data[key]):\n tags = []\n for tag in y:\n if tag.endswith('-' + self.target_tag):\n tags.append(tag)\n else:\n tags.append('O')\n self.data[key][n] = [x, tags]\n\n def get_instances(self, data_type: str = 'train') -> Tuple[List[List[str]], List[List[str]]]:\n \"\"\"Get all data for a selected data type\n\n Args:\n data_type (str): can be either ``'train'``, ``'test'``, ``'valid'`` or ``'all'``\n\n Returns:\n a tuple of all inputs for a data type and all expected outputs for a data type\n \"\"\"\n\n if data_type == 'train':\n samples = [self.data[data_type][i] for i in self.marked_nums]\n else:\n samples = self.data[data_type][:]\n\n x, y = list(zip(*samples))\n\n return x, y\n\n def gen_batches(self, batch_size: int,\n data_type: str = 'train',\n shuffle: Optional[bool] = None) -> Iterator[Tuple[List[List[str]], List[List[str]]]]:\n x, y = self.get_instances(data_type)\n data_len = len(x)\n\n if data_len == 0:\n return\n\n order = list(range(data_len))\n if shuffle is None and self.shuffle:\n self.random.shuffle(order)\n elif shuffle:\n self.random.shuffle(order)\n\n if batch_size < 0:\n batch_size = data_len\n\n for i in range((data_len - 1) // batch_size + 1):\n yield tuple(zip(*[(x[o], y[o]) for o in order[i * batch_size:(i + 1) * batch_size]]))\n","sub_path":"deeppavlov/dataset_iterators/ner_few_shot_iterator.py","file_name":"ner_few_shot_iterator.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437945094","text":"def solution(li):\n leng = len(li)\n maxLen = 0\n for i in range(0,leng-1):\n count=0\n temp=i\n if leng-1-ili[i]:\n count+=1\n temp+=1\n else:\n i=temp+1\n if maxLen int:\n \"\"\"\n Calculate n-th number of Fibonacci sequence using recursive algorithm\n\n :param n: number of item\n :return: Fibonacci number\n \"\"\"\n if n < 0:\n raise ValueError\n fib = [0, 1]\n if n <= 1:\n return n\n return fib_recursive(n-1) + fib_recursive(n-2)\n\n\ndef fib_iterative(n: int) -> int:\n \"\"\"\n Calculate n-th number of Fibonacci sequence using iterative algorithm\n\n :param n: number of item\n :return: Fibonacci number\n \"\"\"\n if n < 1:\n raise ValueError\n fib = [0, 1]\n if 1 <= n <= 2:\n return fib[n-1]\n i = 3\n prev_1 = fib[-1]\n prev_2 = fib[-2]\n while i != n+2:\n prev_1, prev_2 = prev_1+prev_2, prev_1\n i += 1\n return prev_1\n\n\nif __name__ == '__main__':\n n = 9\n print(fib_iterative(n))\n print(fib_recursive(n))\n","sub_path":"Tasks/c0_fib.py","file_name":"c0_fib.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405367151","text":"import cv2\nimport numpy as np\n\n#load image\nimage_name = 'displacemap'\n#image_name = 'depthmappppppppppppp'\nimg = cv2.imread(image_name + '.exr' ,cv2.IMREAD_UNCHANGED)\n\nmaxi = np.amax(img)\nmini = np.amin(img)\nprint(maxi, mini)\nmaxi = 0.01\nmini = -0.01\n\ndef f2uc(x):\n return int(255 * x)\n\nprint(img[1400][1600])\n\n\nimg2 = 255 * (img - mini) / (maxi - mini)\nimg2 = img2.astype(int)\n\n\nprint(img2[1400][1600])\n\n#cv2.imshow('img',img2)\ncv2.imwrite(image_name + '_normalize.bmp',img2)\n#cv2.waitKey(0)\n\ndel img","sub_path":"normalize_to_exr.py","file_name":"normalize_to_exr.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498073941","text":"\"\"\" foodmap_utils.py\n\n RIT Food Map is a demonstration of utilizing Cloud Technologies for SWEN 549\n It utilizes Google's Maps API, as well as Google's Cloud Storage.\n Deployed on Google App Engine.\n\n @Authors Nikolas Tilley\n\"\"\"\n\nimport os\nimport cloudstorage as gcs\n\nfrom google.appengine.api import users\nfrom google.appengine.api import app_identity\n\n\ndef get_bucket_info():\n bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())\n info = ('version: ' + os.environ['CURRENT_VERSION_ID'] + '\\n'\n + 'name: ' + bucket_name)\n return info\n\n\ndef list_bucket_contents(folder_name=''):\n # Get the Default Bucket Name\n bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())\n bucket = '/' + bucket_name\n contents = ''\n\n folder = '/' + folder_name\n\n page_size = 1\n files = gcs.listbucket(bucket + folder, max_keys=page_size)\n while True:\n count = 0\n for file in files:\n count += 1\n contents += repr(file) + '
'\n\n if count != page_size or count == 0:\n break\n files = gcs.listbucket(bucket + folder, max_keys=page_size, marker=file.filename)\n\n return contents\n\n\ndef read_gcs_file(filename):\n bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())\n bucket = '/' + bucket_name\n\n gcs_file = gcs.open(bucket + '/' + filename)\n file_contents = gcs_file.read()\n gcs_file.close()\n return file_contents","sub_path":"foodmap/foodmap_utils.py","file_name":"foodmap_utils.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}