diff --git "a/3648.jsonl" "b/3648.jsonl" new file mode 100644--- /dev/null +++ "b/3648.jsonl" @@ -0,0 +1,317 @@ +{"seq_id":"40921326438","text":"# 导入包含有 KNN 算法的模块 neighbors\nfrom sklearn import neighbors\n# 导入数据集 datasets 模块\nfrom sklearn import datasets\n\n\n# 调用 KNN 分类器\nknn = neighbors.KNeighborsClassifier()\n\n\n# datasets.load_iris() 返回数据库\niris = datasets.load_iris()\n\n\nprint(iris)\n\n\n# KNN 建模,iris.data:150*4(萼片长度、萼片宽度、花瓣长度、花瓣宽度) 的特征向量;iris.target:150*1 的特征向量:每一行的花都是什么类别\nknn.fit(iris.data, iris.target)\n# 根据已有模型预测新的对象\npredictedLabel = knn.predict([[0.1, 0.2, 0.3, 0.4]])\n# [0] 代表 setosa 类\nprint(predictedLabel)\n","repo_name":"EpitomM/ML","sub_path":"KNN/SklearnExample.py","file_name":"SklearnExample.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"32991158215","text":"import random\nimport copy\nimport time\n\ndef initialize_grid(rows, cols):\n return [[random.choice([0, 1]) for i in range(cols)] for j in range(rows)]\n\ndef print_grid(grid):\n for row in grid:\n print(' '.join(['*' if cell else ' ' for cell in row]))\n print('\\n')\ndef count_neighbors(grid, x, y):\n neighbors = [(x-1, y-1), (x-1, y), (x-1, y+1),\n (x, y-1), (x, y+1),\n (x+1, y-1), (x+1, y), (x+1, y+1)]\n rows, cols = len(grid), len(grid[0])\n count = 0\n for i, j in neighbors:\n if 0 <= i < rows and 0 <= j < cols:\n count += grid[i][j]\n return count\ndef update_grid(grid):\n new_grid = copy.deepcopy(grid)\n rows, cols = len(grid), len(grid[0])\n for i in range(rows):\n for j in range(cols):\n neighbors = count_neighbors(grid, i, j)\n if grid[i][j]:\n if neighbors < 2 or neighbors > 3:\n new_grid[i][j] = 0\n else:\n if neighbors == 3:\n new_grid[i][j] = 1\n return new_grid\n\ndef game_of_life(rows, cols, generations):\n grid = initialize_grid(rows, cols)\n for _ in range(generations):\n print_grid(grid)\n grid = update_grid(grid)\n time.sleep(0.7)\n\nif __name__ == \"__main__\":\n rows = int(input(\"Enter the number of rows: \"))\n cols = int(input(\"Enter the number of columns: \"))\n generations = int(input(\"No. of generations : \"))\n game_of_life(rows, cols, generations)\n\n\n\n\n","repo_name":"Althaf-S/game_of_life","sub_path":"gameoflife.py","file_name":"gameoflife.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"74564632018","text":"#!/usr/bin/env python\n\nfrom future.utils import viewitems\n\nimport hashlib\nimport hmac\n\nimport cherrypy\n\nfrom Utils.Utilities import lowerCmsHeaders\n\n\ndef get_user_info():\n \"\"\"\n Helper function to return user based information of the request\n \"\"\"\n return cherrypy.request.user\n\n\nclass FrontEndAuth(cherrypy.Tool):\n \"\"\"\n Transparently allows a back-end cmsweb app to do\n authn/z based on the headers sent by the front-end.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Read hmac secret and define cherrypy hook point.\"\"\"\n # reads the bin key used to verify the hmac\n with open(config.key_file, \"rb\") as f:\n self.key = f.read()\n\n # Defines the hook point for cherrypy\n self._name = None\n self._point = 'before_request_body'\n self._priority = 60 # Just after the sessions being enabled\n\n def callable(self, role=None, group=None, site=None, authzfunc=None):\n \"\"\"\n This is the method that is called in the cherrypy hook point (whenever\n the user-agent requests a page of the back-end app.\n \"\"\"\n role = role or []\n group = group or []\n site = site or []\n # Sets initial user information for this request\n assert getattr(cherrypy.request, \"user\", None) is None\n cherrypy.request.user = {'dn': None,\n 'method': None,\n 'login': None,\n 'name': None,\n 'roles': {}}\n\n # Checks authn by reading front-end headers\n self.check_authentication()\n\n # Now checks authz\n self.check_authorization(role, group, site, authzfunc)\n\n # The user is authenticated and authorized. Then cherrypy will\n # call the proper app handler to show the requested page.\n # User info still available under cherrypy.thread_data.user.\n\n def check_authentication(self):\n \"\"\"Read and verify the front-end headers, update the user\n dict with information about the authorized user.\"\"\"\n headers = lowerCmsHeaders(cherrypy.request.headers)\n user = get_user_info()\n\n if 'cms-auth-status' not in headers:\n # Non SSL request\n raise cherrypy.HTTPError(403, \"You are not allowed to access this resource.\")\n\n if headers['cms-auth-status'] == 'NONE':\n # User authentication is optional\n return # authn accepted\n\n # User information is available on headers\n prefix = suffix = \"\"\n hkeys = sorted(headers.keys())\n for hk in hkeys:\n hk = hk.lower()\n if hk[0:9] in [\"cms-authn\", \"cms-authz\"] and hk != \"cms-authn-hmac\":\n prefix += \"h%xv%x\" % (len(hk), len(headers[hk]))\n suffix += \"%s%s\" % (hk, headers[hk])\n hkname = hk.split('-', 2)[-1]\n if hk.startswith(\"cms-authn\"):\n user[hkname] = headers[hk]\n if hk.startswith(\"cms-authz\"):\n user['roles'][hkname] = {'site': set(), 'group': set()}\n for r in headers[hk].split():\n ste_or_grp, name = r.split(':')\n user['roles'][hkname][ste_or_grp].add(name)\n\n vfy = hmac.new(self.key, prefix + \"#\" + suffix, hashlib.sha1).hexdigest()\n if vfy != headers[\"cms-authn-hmac\"]:\n # HMAC does not match\n raise cherrypy.HTTPError(403, \"You are not allowed to access this resource, hmac mismatch\")\n\n # User authn accepted\n\n def check_authorization(self, role, group, site, authzfunc):\n \"\"\"Format the authorization rules into lists and verify if the given\n user is allowed to access.\"\"\"\n if authzfunc is None:\n authzfunc = self.defaultAuth\n\n # TOFIX: put role, group and site into canonical form\n\n # Turns arguments into lists\n if role and isinstance(role, str):\n role = [role]\n if group and isinstance(group, str):\n group = [group]\n if site and isinstance(site, str):\n site = [site]\n\n # Finally checks if the user is allowed\n if not authzfunc(get_user_info(), role, group, site):\n # Authorization denied\n raise cherrypy.HTTPError(403, \"You are not allowed to access this resource, authz denied\")\n\n def defaultAuth(self, user, role, group, site):\n \"\"\" Return True for authorized user, False otherwise.\n\n An user is authorized if he has any of the asked roles in the\n given sites or groups. When no roles is specified, belonging\n to any of the asked sites or groups is enough.\n \"\"\"\n if not (role or group or site):\n return True\n\n for k, v in viewitems(user['roles']):\n if (not role) or (k in role):\n if not (group or site):\n return True\n if set(group) & v['group']:\n return True\n if set(site) & v['site']:\n return True\n return False\n\n\nclass NullAuth(cherrypy.Tool):\n def __init__(self, config):\n # Defines the hook point for cherrypy\n self._name = None\n self._point = 'before_request_body'\n self._priority = 60 # Just after the sessions being enabled\n if cherrypy.server.environment == 'production':\n cherrypy.log.access_log.critical('You MUST NOT use the NullAuth in a production environment')\n raise cherrypy.CherryPyException('Invalid server authentication')\n else:\n cherrypy.log.access_log.warning(\"You are using the NullAuth, I hope you know what you're doing\")\n\n def callable(self, role=None, group=None, site=None, authzfunc=None):\n role = role or []\n group = group or []\n site = site or []\n cherrypy.log.access_log.warning('NullAuth called for:')\n cherrypy.log.access_log.warning('\\trole(s): %s \\n\\tgroup(s): %s \\n\\tsite(s): %s', role, group, site)\n\n if authzfunc:\n cherrypy.log.access_log.warning('\\tusing authorisation function %s', authzfunc.__name__)\n cherrypy.request.user = {'dn': 'None',\n 'method': 'Null Auth - totally insecure!',\n 'login': 'fbloggs',\n 'name': 'Fred Bloggs',\n 'roles': {}}\n","repo_name":"dmwm/WMCore","sub_path":"src/python/WMCore/WebTools/FrontEndAuth.py","file_name":"FrontEndAuth.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"13"} +{"seq_id":"2493667689","text":"import json\nimport time\nimport random\nimport telnetlib\n\nimport requests\n\nfrom lxml import etree\n\n\nclass ProxyPool(object):\n def __init__(self, name):\n self.proxies_pool = dict()\n self.UA_headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Mobile Safari/537.36'}\n self.black_list = set()\n self.name = name\n \n def verify_proxy(self, ip, port, proto, timeout=1.5):\n try:\n telnetlib.Telnet(ip, port=port, timeout=timeout)\n pxs = {\n 'http': \"{}://{}:{}\".format(proto, ip, port),\n 'https': \"{}://{}:{}\".format(proto, ip, port)\n }\n if self.name != 'google':\n return True\n response = requests.get('https://translate.google.cn', headers=self.UA_headers, proxies=pxs, timeout=timeout)\n response.raise_for_status()\n return True\n except:\n return False\n\n def save_proxy(self, ip, port, proto, filepath='./proxy_file_saved'):\n try:\n with open(filepath, 'a', encoding='utf-8') as f:\n f.write(\"{}:{}:{}\\n\".format(proto, ip, port))\n except:\n pass\n \n def clear(self):\n self.proxies_pool.clear()\n \n def load_proxy_file(self, file_path):\n proxies = dict()\n with open(file_path, 'r', encoding='utf-8') as f:\n for line in f:\n proto = line.split(':')[0].strip()\n ip = line.split(':')[1].strip()\n port = line.split(':')[2].strip()\n proxies[ip] = {'port': port, 'proto': proto}\n self.proxies_pool.clear()\n self.proxies_pool = proxies\n\n def crawl_proxies(self, name, *args, **kwargs):\n while True:\n try:\n if name == 'github':\n self._crawl_github_proxies(*args, **kwargs)\n elif name == 'xici':\n self._crawl_xici_proxies(*args, **kwargs)\n break\n except KeyboardInterrupt:\n break\n except Exception as err:\n continue\n \n def _crawl_github_proxies(self, speed=1.5, protocols={'http', 'https'}, early_stopped=None):\n url = 'https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list'\n while True:\n try:\n response = requests.get(url, headers=self.UA_headers, timeout=5.0)\n response.raise_for_status()\n break\n except:\n time.sleep(5)\n continue\n lines = response.text.split('\\n')\n lens = len(lines)\n cnt = 1\n for line in lines:\n px = json.loads(line.strip())\n t = px['type']\n h = px['host']\n p = px['port']\n if t.lower().strip() in protocols:\n if self.verify_proxy(h, p, t.strip().lower(), timeout=speed) \\\n and '{}:{}'.format(h, p) not in self.black_list:\n self.proxies_pool[h] = {'port': p, 'proto': t}\n self.save_proxy(h, p, t)\n print(' '*100 + '\\r', end='')\n print('Github Proxies - {} - Process: {}/{} - lens: {}'.format(self.name, cnt, lens, len(self.proxies_pool)))\n cnt += 1\n if early_stopped:\n if len(self.proxies_pool) >= early_stopped:\n break\n\n def _crawl_xici_proxies(self, speed=0.50, connecting_time=0.50, protocols={'http', 'https'}, early_stopped=None):\n base_url = 'https://www.xicidaili.com/wn/'\n response = requests.get(base_url, headers=self.UA_headers, timeout=1.0)\n response.raise_for_status()\n response.encoding = 'UTF-8'\n html = etree.HTML(response.text)\n pages = html.xpath('//a[@class=\"next_page\"]/preceding-sibling::a/@href')[-1].split('/')[-1]\n for page in range(1, int(pages) + 1):\n time.sleep(1)\n while True:\n try:\n url = base_url + str(page)\n response = requests.get(url, headers=self.UA_headers)\n response.encoding = 'UTF-8'\n html = etree.HTML(response.text)\n tds = html.xpath('//td/text()')\n if tds:\n break\n except AttributeError:\n print('.', end='')\n pass\n ips = tds[0::12]\n ports = tds[1::12]\n protos = tds[5::12]\n # seconds = html.xpath('//td[@class=\"country\"]/div[contains(@title, \"秒\")]/@title')\n # spds = seconds[0::2]\n # cons = seconds[1::2]\n\n for ip, port, proto in zip(ips, ports, protos):\n if proto.strip().lower() in protocols:\n if self.verify_proxy(ip, port, proto.strip().lower()):\n self.proxies_pool[ip] = {'port': port, 'proto': proto.lower().strip()}\n self.save_proxy(ip, port, proto)\n if early_stopped:\n if len(self.proxies_pool) >= early_stopped:\n break\n print(' '*100 + '\\r', end='')\n print('Xici Proxies - Process: {}/{} - proxies: {}\\r'.format(page, pages, len(self.proxies_pool)), end='')\n\n def get_proxies(self):\n if len(self.proxies_pool) <= 0:\n return {\n 'http': '{}://{}:{}',\n 'https': '{}://{}:{}'\n }\n proxies = list(self.proxies_pool.items())\n phttp = random.choice(proxies)\n phttps = phttp\n prox = {\n 'http': '{}://{}:{}'.format(phttp[1]['proto'], phttp[0], phttp[1]['port']),\n 'https': '{}://{}:{}'.format(phttps[1]['proto'], phttps[0], phttps[1]['port'])\n }\n return prox\n\n def del_proxy(self, ip):\n if self.proxies_pool.get(ip):\n del self.proxies_pool[ip]\n","repo_name":"Spico197/TransAPI","sub_path":"src/transkit/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"10290059911","text":"from django.conf.urls import include, url\n\n\nfrom .views import (\n\tinv_local,\n\tver_compras,\n\tcargar_factura,\n\tregistrar_compra,\n\tdocumento,\n\tdetallar_compra\n\n)\n\nurlpatterns = [\n\turl(r'^local/',inv_local.as_view(),name='inv_local'),\n\turl(r'^$',ver_compras.as_view(),name='ver'),\n\turl(r'^cargar/',cargar_factura.as_view(),name='cargar_factura'),\n\turl(r'^registrar_compra/',registrar_compra.as_view(),name=\"registrar_compra\"),\n\turl(r'^documento/',documento.as_view(),name='documento_compras'),\n url(r'^detallar_compra/',detallar_compra.as_view(),name=\"detallar_compra\"),\n\n\t\n]\n","repo_name":"corporacionrst/software_RST","sub_path":"app/productos/inventario/compras/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"25182330122","text":"from selenium_browser import UBrowse\nfrom reporter import SpiderReporter\nfrom app import scheduler\nfrom ..models import Affiliate, History, db\nfrom env import *\n\nimport psycopg2\nimport datetime\nimport json\nimport requests\n\nclass Paddy(object):\n \"\"\"docstring for Paddy\"\"\"\n def __init__(self):\n self.report = SpiderReporter()\n self.affiliate = \"Paddy\"\n self.headers = {\n 'Host': 'affiliates.paddypartners.com',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Content-Type': 'application/json; charset=utf-8',\n 'X-Requested-With': 'XMLHttpRequest',\n 'x-ms-request-root-id': 'cZ8hr',\n 'x-ms-request-id': 'ZK2p+',\n 'Referer': 'https://affiliates.paddypartners.com/affiliates/Reports/DailyFigures',\n }\n\n def _create_params(self, date = None):\n if date is None:\n date = self.get_delta_date(2, '%d-%m-%Y')\n\n self.params = (\n ('dateFilterFrom', [date, date]),\n ('dateFilterTo', [date, date]),\n )\n\n def _get_cookies(self):\n self.cookies = dict()\n cookies = self.client.driver.get_cookies()\n for i in cookies:\n self.cookies[i['name']] = i['value']\n\n def get_delta_date(self, delta = DELTA_DAYS, format_string = \"%Y/%m/%d\"):\n today = datetime.datetime.today()\n diff = datetime.timedelta(days = delta)\n return (today - diff).strftime(format_string)\n\n def log(self, message, type = \"info\"):\n self.report.write_log(\"Paddy\", message, self.get_delta_date(), type)\n\n def get_data(self):\n url = 'https://affiliates.paddypartners.com/affiliates/Reports/dailyFiguresReport'\n response = requests.get(url, headers=self.headers, params=self.params, cookies=self.cookies)\n return response\n\n def login(self):\n try:\n self.client.open_url('https://affiliates.paddypartners.com/affiliates/Account/Login')\n\n self.client.set_loginform('//*[@id=\"txtUsername\"]')\n self.client.set_passform('//*[@id=\"txtPassword\"]')\n self.client.set_loginbutton('//*[@id=\"btnLogin\"]')\n\n if self.client.login('betfyuk', 'dontfuckwithme') is True:\n self._get_cookies()\n self._create_params()\n else:\n return False\n return True\n except Exception as e:\n self.log(str(e), \"error\")\n return False\n\n def isExisting(self, date = None):\n try:\n if date is None:\n date = self.get_delta_date()\n\n app = scheduler.app\n with app.app_context():\n affiliate = Affiliate.query.filter_by(name = self.affiliate).first()\n\n if affiliate is None:\n return False\n\n history = History.query.filter_by(affiliate_id = affiliate.id, created_at = date).first()\n\n if history is None:\n return False\n else:\n return True\n except Exception as e:\n self.log(str(e), \"error\")\n return False\n\n def run(self):\n self.log(\"\"\"\n ======================================================\n ====== Starting Paddy Spider ======================\n \"\"\")\n if self.isExisting():\n self.log(\"Scrapped for `{0}` already done. Skipping...\".format(self.affiliate))\n return True\n else:\n self.client = UBrowse()\n if self.login():\n try:\n response = json.loads(self.get_data().content)\n data = response['data'][0]\n\n one_day = datetime.timedelta(days = 1)\n yesterday = datetime.datetime.now() - one_day\n date = yesterday.strftime('%Y-%m-%d')\n \n views = data[1]['Value']\n uniqueviews = data[2]['Value']\n clicks = data[3]['Value']\n uniqueclicks = data[4]['Value']\n signups = data[5]['Value']\n depositingcustomers = data[6]['Value']\n activecustomers = data[7]['Value']\n newdepositingcustomers = data[8]['Value']\n newactivecustomers = data[9]['Value']\n firsttimedepositingcustomers = data[10]['Value']\n firsttimeactivecustomers = data[11]['Value']\n netrevenue = data[12]['Value']\n\n app = scheduler.app\n with app.app_context():\n affiliate = Affiliate.query.filter_by(name = self.affiliate).first()\n if affiliate is None:\n affiliate = Affiliate(name = self.affiliate)\n db.session.add(affiliate)\n db.session.commit()\n \n created_at = self.get_delta_date()\n history = History.query.filter_by(affiliate_id = affiliate.id, created_at = created_at).first()\n if history is None:\n history = History(\n affiliate_id = affiliate.id,\n daily_click = clicks,\n daily_signup = signups,\n daily_commission = netrevenue,\n paid_signup = newdepositingcustomers,\n created_at = created_at\n )\n db.session.add(history)\n db.session.commit()\n return False\n except Exception as e:\n self.log(str(e), \"error\")\n return False\n else:\n self.log(\"Failed to login\", \"error\")\n return False\n\n self.client.close()\n\nif __name__ == '__main__':\n pp = Paddy()\n pp.run()\n# response = json.loads(pp.get_data().content)\n\n # data = response['data'][0]\n\n # one_day = datetime.timedelta(days = 1)\n # yesterday = datetime.datetime.now() - one_day\n # date = yesterday.strftime('%Y-%m-%d')\n\n # views = data[1]['Value']\n # uniqueviews = data[2]['Value']\n # clicks = data[3]['Value']\n # uniqueclicks = data[4]['Value']\n # signups = data[5]['Value']\n # depositingcustomers = data[6]['Value']\n # activecustomers = data[7]['Value']\n # newdepositingcustomers = data[8]['Value']\n # newactivecustomers = data[9]['Value']\n # firsttimedepositingcustomers = data[10]['Value']\n # firsttimeactivecustomers = data[11]['Value']\n # netrevenue = data[12]['Value']\n\n # pp.client.driver.close()\n\n # engine = create_engine(get_database_connection_string())\n # result = engine.execute(\"INSERT INTO paddyies (dateto, views, uniqueviews, clicks, uniqueclicks, signups, depositingcustomers, activecustomers, newdepositingcustomers, newactivecustomers, firsttimedepositingcustomers, firsttimeactivecustomers, netrevenue) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\", date, views, uniqueviews, clicks, uniqueclicks, signups, depositingcustomers, activecustomers, newdepositingcustomers, newactivecustomers, firsttimedepositingcustomers, firsttimeactivecustomers, netrevenue)\n\n","repo_name":"hawksuperguru/python-affiliate","sub_path":"app/spiders/paddy.py","file_name":"paddy.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"19905166137","text":"class Solution(object):\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n len_1 = len(word1)+1\n len_2 = len(word2)+1\n\n dp = [[0 for _ in range(len_2)] for _ in range(len_1)]\n\n for i in range(len_1):\n dp[i][0] = i\n\n for j in range(len_2):\n dp[0][j] = j\n\n for i in range(1, len_1):\n for j in range(1, len_2):\n dp[i][j] = min(dp[i-1][j]+1, dp[i][j-1]+1, dp[i-1][j-1]+(word1[i-1]!=word2[j-1]))\n\n return dp[-1][-1]\n\n\ns = Solution()\na = s.longestValidParentheses(')()())')\nprint(a)\n","repo_name":"littleliona/leetcode","sub_path":"hard/72.edit_distance.py","file_name":"72.edit_distance.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21106057558","text":"import math\n\nimport numpy as np\n\nimport torch\nfrom torch import logit, nn\nimport torch.nn.functional as F\nimport torch.distributions as D\n\nfrom .rnn import GenericRNN\nfrom .distributions import CensoredMixtureLogistic, reweight_top_p\n\nclass SineEmbedding(nn.Module):\n def __init__(self, n, hidden, w0=1e-3, w1=10, scale='log'):\n \"\"\"\n Args:\n n (int): number of sinusoids\n hidden (int): embedding size\n w0 (float): minimum wavelength\n w1 (float): maximum wavelength\n scale (str): if 'log', more wavelengths close to w0\n \"\"\"\n super().__init__()\n if scale=='log':\n w0 = np.log(w0)\n w1 = np.log(w1)\n ws = torch.linspace(w0, w1, n)\n if scale=='log':\n ws = ws.exp()\n self.register_buffer('fs', 2 * math.pi / ws)\n self.proj = nn.Linear(n,hidden)\n\n def forward(self, x):\n x = x[...,None] * self.fs\n return self.proj(x.sin())\n\nclass MixEmbedding(nn.Module):\n def __init__(self, n, domain=(0,1)):\n \"\"\"\n Args:\n n (int): number of channels\n domain (Tuple[float])\n \"\"\"\n super().__init__()\n self.domain = domain\n self.lo = nn.Parameter(torch.randn(n))\n self.hi = nn.Parameter(torch.randn(n))\n def forward(self, x):\n \"\"\"\n Args:\n x: Tensor[...]\n Returns:\n Tensor[...,n]\n \"\"\"\n x = (x - self.domain[0])/(self.domain[1] - self.domain[0])\n x = x[...,None]\n return self.hi * x + self.lo * (1-x)\n\nclass SelfGated(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n a, b = x.chunk(2, -1)\n return a * b.sigmoid()\n\nclass SelfGatedMLP(nn.Module):\n def __init__(self, input, hidden, output, layers, dropout=0, norm=None):\n super().__init__()\n h = input\n def get_dropout():\n if dropout > 0:\n return (nn.Dropout(dropout),)\n return tuple()\n def get_norm():\n if norm=='layer':\n return (nn.LayerNorm(hidden),)\n return tuple()\n self.net = []\n for _ in range(layers):\n self.net.append(nn.Sequential(\n *get_dropout(), nn.Linear(h, hidden*2), SelfGated(), *get_norm()))\n h = hidden\n self.net.append(nn.Linear(hidden, output))\n self.net = nn.Sequential(*self.net)\n\n with torch.no_grad():\n self.net[-1].weight.mul_(1e-2)\n\n def forward(self, x):\n return self.net(x)\n\n# class ModalityTransformer(nn.Module):\n# \"\"\"\n# Model joint distribution of note modalities (e.g. pitch, time, velocity).\n\n# This is an autoregressive Transformer model for the *internal* structure of notes.\n# It is *not* autoregressive in time, but in modality.\n# At training time, it executes in parallel over all timesteps and modalities, with\n# time dependencies provided via the RNN backbone.\n\n# At sampling time it is called serially, one modality at a time, \n# repeatedly at each time step.\n\n# Inspired by XLNet: http://arxiv.org/abs/1906.08237\n# \"\"\"\n# def __init__(self, input_size, hidden_size, heads=4, layers=1):\n# super().__init__()\n# self.net = nn.TransformerDecoder(\n# nn.TransformerDecoderLayer(\n# input_size, heads, hidden_size, norm_first=False\n# ), layers)\n\n# def forward(self, ctx, h_ctx, h_tgt):\n# \"\"\"\n# Args:\n# ctx: list of Tensor[batch x time x input_size], length note_dim-1\n# these are the embedded ground truth values\n# h_ctx: Tensor[batch x time x input_size]\n# projection of RNN state (need something to attend to when ctx is empty)\n# h_tgt: list of Tensor[batch x time x input_size], length note_dim\n# these are projections of the RNN state for each target,\n# which the Transformer will map to distribution parameters.\n# \"\"\"\n# # explicitly broadcast\n# h_ctx, *ctx = torch.broadcast_tensors(h_ctx, *ctx)\n# h_ctx, *h_tgt = torch.broadcast_tensors(h_ctx, *h_tgt)\n\n# # h_tgt is 'target' w.r.t TransformerDecoder\n# # h_ctx and context are 'memory'\n# batch_size = h_ctx.shape[0]*h_ctx.shape[1]\n# # fold time into batch, stack modes\n# tgt = torch.stack([\n# item.reshape(batch_size,-1)\n# for item in h_tgt\n# ],0)\n# mem = torch.stack([\n# item.reshape(batch_size,-1)\n# for item in [h_ctx, *ctx]\n# ],0)\n# # now \"time\"(mode) x \"batch\"(+time) x channel\n\n# # generate a mask\n# # this is both the target and memory mask\n# # masking is such that each target can only depend on \"previous\" context\n# n = len(h_tgt)\n# mask = ~tgt.new_ones((n,n), dtype=bool).tril()\n\n# x = self.net(tgt, mem, mask, mask)\n# return list(x.reshape(n, *h_ctx.shape).unbind(0))\n\n\nclass Notochord(nn.Module):\n # note: use named arguments only for benefit of training script\n def __init__(self, \n emb_size=256, \n rnn_hidden=2048, rnn_layers=1, kind='gru', \n mlp_layers=0,\n dropout=0.1, norm=None,\n num_pitches=128, \n num_instruments=272,\n time_sines=128, vel_sines=128,\n time_bounds=(0,10), time_components=32, time_res=1e-2,\n vel_components=16\n ):\n \"\"\"\n \"\"\"\n super().__init__()\n\n self.note_dim = 4 # instrument, pitch, time, velocity\n\n self.instrument_start_token = 0\n self.instrument_domain = num_instruments+1\n\n self.pitch_start_token = num_pitches\n self.pitch_domain = num_pitches+1\n\n self.time_dist = CensoredMixtureLogistic(\n time_components, time_res, \n lo=time_bounds[0], hi=time_bounds[1], init='time')\n self.vel_dist = CensoredMixtureLogistic(\n vel_components, 1.0, lo=0, hi=127, init='velocity')\n \n # embeddings for inputs\n self.instrument_emb = nn.Embedding(self.instrument_domain, emb_size)\n self.pitch_emb = nn.Embedding(self.pitch_domain, emb_size)\n self.time_emb = SineEmbedding(time_sines, emb_size, 1e-3, 30, scale='log')\n # self.vel_emb = MixEmbedding(emb_size, (0, 127))\n self.vel_emb = SineEmbedding(vel_sines, emb_size, 2, 512, scale='lin')\n\n # RNN backbone\n self.rnn = GenericRNN(kind, \n emb_size, rnn_hidden, \n num_layers=rnn_layers, batch_first=True, dropout=dropout)\n\n # learnable initial RNN state\n self.initial_state = nn.ParameterList([\n # layer x batch x hidden\n nn.Parameter(torch.randn(rnn_layers,1,rnn_hidden)*rnn_hidden**-0.5)\n for _ in range(2 if kind=='lstm' else 1)\n ])\n\n # projection from RNN state to distribution parameters\n h_proj = []\n if dropout:\n h_proj.append(nn.Dropout(dropout))\n h_proj.append(nn.Linear(rnn_hidden, emb_size))\n self.h_proj = nn.Sequential(*h_proj)\n # self.projections = nn.ModuleList([\n # nn.Linear(emb_size, self.instrument_domain),\n # nn.Linear(emb_size, self.pitch_domain),\n # nn.Linear(emb_size, self.time_dist.n_params, bias=False),\n # nn.Linear(emb_size, self.vel_dist.n_params, bias=False)\n # ])\n self.projections = nn.ModuleList([\n SelfGatedMLP(\n emb_size, emb_size, self.instrument_domain, \n mlp_layers, dropout, norm),\n SelfGatedMLP(\n emb_size, emb_size, self.pitch_domain, \n mlp_layers, dropout, norm),\n SelfGatedMLP(\n emb_size, emb_size, self.time_dist.n_params,\n mlp_layers, dropout, norm),\n SelfGatedMLP(\n emb_size, emb_size, self.vel_dist.n_params, \n mlp_layers, dropout, norm),\n ])\n\n self.end_proj = nn.Linear(rnn_hidden, 2)\n\n with torch.no_grad():\n for p in self.projections:\n p.net[-1].weight.mul_(1e-2)\n self.end_proj.weight.mul(1e-2)\n\n # persistent RNN state for inference\n for n,t in zip(self.cell_state_names(), self.initial_state):\n self.register_buffer(n, t.clone())\n self.step = 0\n\n def cell_state_names(self):\n return tuple(f'cell_state_{i}' for i in range(len(self.initial_state)))\n\n @property\n def cell_state(self):\n return tuple(getattr(self, n) for n in self.cell_state_names())\n\n @property\n def embeddings(self):\n return (\n self.instrument_emb,\n self.pitch_emb,\n self.time_emb,\n self.vel_emb\n )\n \n def forward(self, instruments, pitches, times, velocities, ends,\n validation=False, ar_mask=None):\n \"\"\"\n teacher-forced probabilistic loss and diagnostics for training.\n\n Args:\n instruments: LongTensor[batch, time]\n pitches: LongTensor[batch, time]\n times: FloatTensor[batch, time]\n velocities: FloatTensor[batch, time]\n ends: LongTensor[batch, time]\n validation: bool (computes some extra diagnostics)\n ar_mask: Optional[Tensor[note_dim x note_dim]] if None, generate random\n masks for training\n \"\"\"\n batch_size, batch_len = pitches.shape\n\n # embed data to input vectors\n inst_emb = self.instrument_emb(instruments) # batch, time, emb_size\n pitch_emb = self.pitch_emb(pitches) # batch, time, emb_size\n time_emb = self.time_emb(times) # batch, time, emb_size\n vel_emb = self.vel_emb(velocities) # batch, time, emb_size\n\n embs = (inst_emb, pitch_emb, time_emb, vel_emb)\n\n # feed to RNN backbone\n x = sum(embs)\n ## broadcast initial state to batch size\n initial_state = tuple(\n t.expand(self.rnn.num_layers, x.shape[0], -1).contiguous() # 1 x batch x hidden\n for t in self.initial_state)\n h, _ = self.rnn(x, initial_state) #batch, time, hidden_size\n\n # fit all note factorizations \n # e.g. inst->pitch->time->vel vs vel->time->inst->pitch\n trim_h = h[:,:-1]\n # always include hidden state, never include same modality,\n # other dependencies are random per time and position\n n = self.note_dim\n if ar_mask is None:\n # random binary mask\n ar_mask = torch.randint(2, (*trim_h.shape[:2],n,n), dtype=torch.bool, device=h.device)\n # zero diagonal\n ar_mask &= ~torch.eye(n,n, dtype=torch.bool, device=h.device)\n # include hidden state\n ar_mask = torch.cat((ar_mask.new_ones(*ar_mask.shape[:-2],1,n), ar_mask), -2).float()\n\n to_mask = torch.stack((\n self.h_proj(trim_h),\n *(emb[:,1:] for emb in embs)\n ), -1)\n mode_hs = (to_mask @ ar_mask).tanh().unbind(-1)\n \n # final projections to raw distribution parameters\n inst_params, pitch_params, time_params, vel_params = [\n proj(h) for proj,h in zip(self.projections, mode_hs)]\n\n # get likelihood of data for each modality\n inst_logits = F.log_softmax(inst_params, -1)\n inst_targets = instruments[:,1:,None] #batch, time, 1\n inst_log_probs = inst_logits.gather(-1, inst_targets)[...,0]\n\n pitch_logits = F.log_softmax(pitch_params, -1)\n pitch_targets = pitches[:,1:,None] #batch, time, 1\n pitch_log_probs = pitch_logits.gather(-1, pitch_targets)[...,0]\n\n time_targets = times[:,1:] # batch, time\n time_result = self.time_dist(time_params, time_targets)\n time_log_probs = time_result.pop('log_prob')\n\n vel_targets = velocities[:,1:] # batch, time\n vel_result = self.vel_dist(vel_params, vel_targets)\n vel_log_probs = vel_result.pop('log_prob')\n\n # end prediction\n # skip the first position for convenience \n # (so masking is the same for end as for note parts)\n end_params = self.end_proj(h[:,1:])\n end_logits = F.log_softmax(end_params, -1)\n end_log_probs = end_logits.gather(-1, ends[:,1:,None])[...,0]\n\n r = {\n 'end_log_probs': end_log_probs,\n 'instrument_log_probs': inst_log_probs,\n 'pitch_log_probs': pitch_log_probs,\n 'time_log_probs': time_log_probs,\n 'velocity_log_probs': vel_log_probs,\n **{'time_'+k:v for k,v in time_result.items()},\n **{'velocity_'+k:v for k,v in vel_result.items()}\n }\n # this just computes some extra diagnostics which are inconvenient to do in the\n # training script. should be turned off during training for performance.\n if validation:\n with torch.no_grad():\n r['time_acc_30ms'] = (\n self.time_dist.cdf(time_params, time_targets + 0.03)\n - torch.where(time_targets - 0.03 >= 0,\n self.time_dist.cdf(time_params, time_targets - 0.03),\n time_targets.new_zeros([]))\n )\n return r\n\n # TODO: remove allow_end here\n # allow_start should just be False\n def get_samplers(self, \n instrument_top_p=None, exclude_instrument=None,\n pitch_topk=None, index_pitch=None, allow_start=False, allow_end=False, \n pitch_top_p=None, velocity_temp=None,\n sweep_time=False, min_time=None, max_time=None, bias_time=None, time_weight_top_p=None, time_component_temp=None,\n min_vel=None, max_vel=None):\n \"\"\"\n this method converts the many arguments to `predict` into functions for\n sampling each note modality (e.g. pitch, time, velocity)\n \"\"\"\n\n def sample_instrument(x):\n if not allow_start:\n x[...,self.instrument_start_token] = -np.inf\n if exclude_instrument is not None:\n x[...,exclude_instrument] = -np.inf\n probs = x.softmax(-1)\n if instrument_top_p is not None:\n probs = reweight_top_p(probs, instrument_top_p)\n return D.Categorical(probs).sample()\n\n def sample_pitch(x):\n if not allow_start:\n x[...,self.pitch_start_token] = -np.inf\n if index_pitch is not None:\n return x.argsort(-1, True)[...,index_pitch]\n elif pitch_topk is not None:\n return x.argsort(-1, True)[...,:pitch_topk].transpose(0,-1)\n else:\n probs = x.softmax(-1)\n if pitch_top_p is not None:\n probs = reweight_top_p(probs, pitch_top_p)\n return D.Categorical(probs).sample()\n\n def sample_time(x):\n # TODO: respect trunc_time when sweep_time is True\n if sweep_time:\n if min_time is not None or max_time is not None:\n raise NotImplementedError(\"\"\"\n trunc_time with sweep_time needs implementation\n \"\"\")\n assert x.shape[0]==1, \"batch size should be 1 here\"\n log_pi, loc, s = self.time_dist.get_params(x)\n idx = log_pi.squeeze().argsort()[:9]\n loc = loc.squeeze()[idx].sort().values[...,None] # multiple times in batch dim\n # print(loc.shape)\n return loc\n else:\n trunc = (\n -np.inf if min_time is None else min_time,\n np.inf if max_time is None else max_time)\n return self.time_dist.sample(x, \n truncate=trunc, bias=bias_time,\n component_temp=time_component_temp, weight_top_p=time_weight_top_p)\n\n def sample_velocity(x):\n trunc = (\n -np.inf if min_vel is None else min_vel,\n np.inf if max_vel is None else max_vel)\n return self.vel_dist.sample(x, component_temp=velocity_temp, truncate=trunc)\n\n return (\n sample_instrument,\n sample_pitch, \n sample_time,\n sample_velocity,\n )\n \n # TODO: remove pitch_topk and sweep_time?\n def predict(self, \n inst, pitch, time, vel, \n fix_instrument=None, fix_pitch=None, fix_time=None, fix_vel=None, \n pitch_topk=None, index_pitch=None, allow_start=False, allow_end=False,\n sweep_time=False, min_time=None, max_time=None, bias_time=None, \n exclude_instrument=None,\n instrument_temp=None, pitch_temp=None, velocity_temp=None,\n rhythm_temp=None, timing_temp=None,\n min_vel=None, max_vel=None):\n \"\"\"\n consume the most recent note and return a prediction for the next note.\n\n various constraints on the the next note can be requested.\n\n Args:\n pitch: int. MIDI number of current note.\n time: float. elapsed time in seconds since previous note.\n vel: float. (possibly dequantized) MIDI velocity from 0-127 inclusive.\n fix_*: same as above, but to fix a value for the predicted note.\n sampled values will always condition on fixed values, so passing\n `fix_time=0`, for example, will make a probabilistically-sound\n prediction of a chord tone: \"what is the next note given that it \n happens immediately after the last one?\"\n pitch_topk: Optional[int]. if not None, instead of sampling pitch, stack\n the top k most likely pitches along the batch dimension\n index_pitch: Optional[int]. if not None, deterministically take the nth\n most likely pitch instead of sampling.\n allow_start: if False, zero probability for sampling the start token\n allow_end: if False, zero probaility for sampling the end token\n sweep_time: if True, instead of sampling time, choose a diverse set of\n times and stack along the batch dimension\n min_time, max_time: if not None, truncate the time distribution\n bias_time: add this delay to the time \n (after applying min/max but before clamping to 0).\n may be useful for latency correction.\n exclude_instrument: instrument id to exclude from sampling.\n instrument_temp: if not None, apply top_p sampling to instrument. 0 is\n deterministic, 1 is 'natural' according to the model\n pitch_temp: if not None, apply top_p sampling to pitch. 0 is\n deterministic, 1 is 'natural' according to the model\n velocity_temp: if not None, apply temperature sampling to the velocity\n component.\n rhythm_temp: if not None, apply top_p sampling to the weighting\n of mixture components. this affects coarse rhythmic patterns; 0 is\n deterministic, 1 is 'natural' according to the model\n timing_temp: if not None, apply temperature sampling to the time\n component. this affects fine timing; 0 is deterministic and precise,\n 1 is 'natural' according to the model.\n min_vel, max_vel: if not None, truncate the velocity distribution\n\n Returns: dict of\n 'pitch': int. predicted MIDI number of next note.\n 'time': float. predicted time to next note.\n 'velocity': float. unquantized predicted velocity of next note.\n '*_params': tensor. distribution parameters for visualization purposes.\n \"\"\"\n if (index_pitch is not None) and (pitch_temp is not None):\n print(\"warning: `index pitch` overrides `pitch_temp`\")\n\n with torch.inference_mode():\n inst = torch.LongTensor([[inst]]) # 1x1 (batch, time)\n pitch = torch.LongTensor([[pitch]]) # 1x1 (batch, time)\n time = torch.FloatTensor([[time]]) # 1x1 (batch, time)\n vel = torch.FloatTensor([[vel]]) # 1x1 (batch, time)\n\n embs = [\n self.instrument_emb(inst),\n self.pitch_emb(pitch), # 1, 1, emb_size\n self.time_emb(time),# 1, 1, emb_size\n self.vel_emb(vel)# 1, 1, emb_size\n ]\n x = sum(embs)\n \n h, new_state = self.rnn(x, self.cell_state)\n for t,new_t in zip(self.cell_state, new_state):\n t[:] = new_t\n\n # h_parts = self.h_proj(h).chunk(self.note_dim+1, -1)\n # h_ctx = h_parts[0]\n # h_tgt = h_parts[1:]\n\n modalities = list(zip(\n self.projections,\n self.get_samplers(\n instrument_temp, exclude_instrument,\n pitch_topk, index_pitch, allow_start, allow_end, \n pitch_temp,\n velocity_temp,\n sweep_time, min_time, max_time, bias_time, \n rhythm_temp, timing_temp,\n min_vel, max_vel),\n self.embeddings,\n ))\n\n context = [self.h_proj(h)] # embedded outputs for autoregressive prediction\n predicted = [] # raw outputs\n params = [] # distribution parameters for visualization\n\n fix = [\n None if item is None else torch.tensor([[item]], dtype=dtype)\n for item, dtype in zip(\n [fix_instrument, fix_pitch, fix_time, fix_vel],\n [torch.long, torch.long, torch.float, torch.float])]\n\n # if any modalities are determined, embed them\n # sort constrained modalities before unconstrained\n # TODO: option to skip modalities\n det_idx, cons_idx, uncons_idx = [], [], []\n for i,(item, embed) in enumerate(zip(fix, self.embeddings)):\n if item is None:\n if (\n i==0 and any(p is not None for p in (\n instrument_temp, exclude_instrument)) or\n i==1 and (pitch_topk or pitch_temp is not None) or\n i==2 and any(p is not None for p in (\n min_time, max_time, rhythm_temp, timing_temp)) or\n i==3 and any(p is not None for p in (\n min_vel, max_vel, velocity_temp))\n ):\n cons_idx.append(i)\n else:\n uncons_idx.append(i)\n else:\n det_idx.append(i)\n context.append(embed(item))\n predicted.append(item)\n params.append(None)\n undet_idx = cons_idx + uncons_idx\n perm = det_idx + undet_idx # permutation from the canonical order\n iperm = np.argsort(perm) # inverse permutation back to canonical order\n\n md = ['instrument', 'pitch', 'time', 'vel']\n print('sampling order:', [md[i] for i in perm])\n\n # for each undetermined modality, \n # sample a new value conditioned on alteady determined ones\n\n # TODO: allow constraints; \n # attempt to sort the strongest constraints first\n # constraints can be:\n # discrete set, in which case evaluate probs and then sample categorical;\n # range, in which case truncate;\n # temperature?\n \n running_ctx = sum(context)\n # print(running_ctx)\n # perm_h_tgt = [h_tgt[i] for i in perm]\n while len(undet_idx):\n # print(running_ctx.norm())\n i = undet_idx.pop(0) # index of modality to determine\n # j = len(det_idx) # number already determined\n project, sample, embed = modalities[i]\n # determine value for the next modality\n hidden = running_ctx.tanh() #self.xformer(context, h_ctx, perm_h_tgt[:j+1])[j]\n params.append(project(hidden))\n pred = sample(params[-1])\n predicted.append(pred)\n # prepare for next iteration\n if len(undet_idx):\n # context.append(embed(pred))\n running_ctx += embed(pred)\n det_idx.append(i)\n\n pred_inst = predicted[iperm[0]]\n pred_pitch = predicted[iperm[1]]\n pred_time = predicted[iperm[2]]\n pred_vel = predicted[iperm[3]]\n\n if allow_end:\n end_params = self.end_proj(h)\n # print(end_params)\n end = D.Categorical(logits=end_params).sample()\n else:\n end = torch.zeros(h.shape[:-1])\n\n if sweep_time or pitch_topk:\n # return lists of predictions\n pred_inst = [x.item() for x in pred_inst]\n pred_pitch = [x.item() for x in pred_pitch]\n pred_time = [x.item() for x in pred_time]\n pred_vel = [x.item() for x in pred_vel]\n end = [x.item() for x in end]\n # print(pred_time, pred_pitch, pred_vel)\n else:\n # return single predictions\n pred_inst = pred_inst.item()\n pred_pitch = pred_pitch.item()\n pred_time = pred_time.item()\n pred_vel = pred_vel.item()\n end = end.item()\n\n self.step += 1\n return {\n 'end': end,\n 'step': self.step,\n 'instrument': pred_inst,\n 'pitch': pred_pitch, \n 'time': pred_time,\n 'velocity': pred_vel,\n 'inst_params': params[iperm[0]],\n 'pitch_params': params[iperm[1]],\n 'time_params': params[iperm[2]],\n 'vel_params': params[iperm[3]]\n }\n \n def reset(self, start=True):\n \"\"\"\n resets internal model state.\n Args:\n start: if True, send a start token through the model with dt=0\n but discard the prediction\n \"\"\"\n self.step = 0\n for n,t in zip(self.cell_state_names(), self.initial_state):\n getattr(self, n)[:] = t.detach()\n if start:\n self.predict(self.instrument_start_token, self.pitch_start_token, 0., 0.)\n\n @classmethod\n def from_checkpoint(cls, path):\n \"\"\"\n create a Predictor from a checkpoint file containing hyperparameters and \n model weights.\n \"\"\"\n checkpoint = torch.load(path, map_location=torch.device('cpu'))\n model = cls(**checkpoint['kw']['model'])\n model.load_state_dict(checkpoint['model_state'], strict=False)\n return model\n ","repo_name":"adanlbenito/iil-python-tools","sub_path":"notochord/notochord/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":27077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"13"} +{"seq_id":"20751348668","text":"\"\"\"\n246. Strobogrammatic Number\n\n\"\"\"\n\nclass Solution:\n def isStrobogrammatic(self, num: str) -> bool:\n rotate = []\n for char in num[::-1]:\n if char == '0' or char=='1' or char == '8':\n rotate.append(char)\n elif char == '9':\n rotate.append('6')\n elif char == '6':\n rotate.append('9')\n else: \n return False\n rotate = ''.join(rotate)\n return rotate == num","repo_name":"nezlobnaya/leetcode_solutions","sub_path":"strobogrammatic_number.py","file_name":"strobogrammatic_number.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"12089384583","text":"\"\"\"\n 列表list\n 作用:存储单一维度数据\n 基础操作\n 创建\n 添加\n 定位\n 删除\n 遍历\n\"\"\"\n# 1. 创建\n# --列表名 = [元素1,元素2,元素3]\nlist_name = [\"马鹏\", \"王子依\", \"陆飞翔\"]\nlist_sex = [\"男\", \"女\", \"女\"]\nlist_age = [27, 23, 25]\n# --列表名 = list(可迭代对象)\nlist_gyt = list(\"郭玉涛\")\nprint(list_gyt)\n# 2. 添加\n# --列表名.append(元素)\nlist_name.append(\"贾瑞\")\nlist_age.append(26)\nprint(list_name)\n# --列表名.insert(索引,元素)\nlist_name.insert(2, \"梁缘\")\nprint(list_name)\n# 3. 定位\n# -- 索引\nprint(list_name)\nprint(list_name[1]) # 读取第二个元素\nlist_name[2] = \"老梁\" # 修改第三个元素\n# -- 切片\n# 读取前两个元素\nprint(list_name[:2])\n# 修改后两个元素\nlist_name[-2:] = [\"陆陆\", \"瑞瑞\"]\n# 将年龄列表所有元素归零\n# list_age[:] = [0,0,0,0]\n# list_age[:] = [0] * 4\nlist_age[:] = [0] * len(list_age)\nprint(list_age)\n# 4. 删除\n# -- 根据定位\ndel list_name[1]\ndel list_age[:2], list_sex[-2:]\n# -- 根据元素\n# 列表名.remove(元素)\n# 注意:元素不存在则报错\n# 元素存在多个,只会删除第一个\n# list_name.remove(\"老梁\")\nif \"梁哥\" in list_name:\n list_name.remove(\"梁哥\")\nprint(list_name)\n# 5. 遍历\n# -- 从头到尾读取\n# for item in 列表:\n# item 是列表元素\n# 查找叠字的姓名\nfor item in list_name:\n if item[0] == item[1]:\n print(item)\n\n# -- 从头到尾修改\n# for i in range(len(列表)):\n# i 是列表元素的索引\n# 列表[i]是列表元素\nlist_sex = [\"男\", \"女\", \"女\"]\nfor i in range(len(list_sex)): # 0 1 2\n if list_sex[i] == \"男\":\n list_sex[i] = \"女\"\n else:\n list_sex[i] = \"男\"\nprint(list_sex)\n","repo_name":"15149295552/Code","sub_path":"Month02/day04/demo08.py","file_name":"demo08.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"3406147969","text":"def order(*request):\n global in_stock\n recipe = {\"Эспрессо\": {\"coffee\": 1, \"milk\": 0, \"cream\": 0},\n \"Капучино\": {\"coffee\": 1, \"milk\": 3, \"cream\": 0},\n \"Макиато\": {\"coffee\": 2, \"milk\": 1, \"cream\": 0},\n \"Кофе по-венски\": {\"coffee\": 1, \"milk\": 0, \"cream\": 2},\n \"Латте Макиато\": {\"coffee\": 1, \"milk\": 2, \"cream\": 1},\n \"Кон Панна\": {\"coffee\": 1, \"milk\": 0, \"cream\": 1}}\n coffee, milk, cream = in_stock[\"coffee\"], in_stock[\"milk\"], in_stock[\"cream\"]\n in_request = None\n for req in request:\n coffee_, milk_, cream_ = recipe[req][\"coffee\"], recipe[req][\"milk\"], recipe[req][\"cream\"]\n if coffee_ <= coffee and milk_ <= milk and cream_ <= cream:\n coffee -= coffee_\n milk -= milk_\n cream -= cream_\n in_request = req\n break\n in_stock.update({\"coffee\": coffee, \"milk\": milk, \"cream\": cream})\n if not in_request:\n return 'К сожалению, не можем предложить Вам напиток'\n return in_request\n","repo_name":"0xMihalich/ya_handbook","sub_path":"основы_python/4.2/f/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"31833129269","text":"import socket\n\ndef run():\n s = socket.socket()\n port = 5000\n s.bind(('', port))\n\n # put the socket into listening mode\n s.listen(5)\n print (\"socket is listening\")\n\n while True:\n c, addr = s.accept()\n print('Got connection from', addr)\n data = c.recv(1024).decode()\n if not data:\n continue\n else:\n msg = str(data)[0]\n print(\"from connected user: \" + msg)\n if msg == \"0\":\n stop()\n elif msg == \"1\":\n forward()\n elif msg == \"2\":\n left()\n elif msg == \"3\":\n right()\n elif msg == \"4\":\n backward()\n else:\n print(msg)\n print(\"Unknown message\")\n c.send('1'.encode())\n c.close()\n\n\ndef left():\n print(\"Turning left\")\n\n\ndef right():\n print(\"Turning right\")\n\n\ndef forward():\n print(\"Moving forward\")\n\n\ndef backward():\n print(\"Moving backward\")\n\n\ndef stop():\n print(\"stop\")\n\n\nif __name__ == \"__main__\":\n run()","repo_name":"fsywudi/5725finalproject","sub_path":"app/src/main/java/com/example/a5725finalproject/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"40508465345","text":"import sys\nfrom gensim import corpora, matutils, models\nfrom morphological_analyze import morphological_analyze\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.grid_search import GridSearchCV\nimport numpy as np\n\ndictionary = corpora.Dictionary.load_from_text('app/analytics/dictionary.txt')\n\n# 正解の回答IDの配列\nlabel_train = sys.stdin.readline().split(\",\")\n\n# 各質問中の名詞、動詞、形容詞、形容動詞が入っている配列in配列\nwords_train = list(map(morphological_analyze, sys.stdin.readline().split(\",\")))\n\nbow_corpus = list(map(dictionary.doc2bow, words_train))\n\nbow_train = list(map(dictionary.doc2bow, words_train))\n#\n# # 各質問毎の特徴ベクトルの配列in配列を生成する\nvector_train = []\nfor bow in bow_train:\n dense = list(matutils.corpus2dense([bow], num_terms=len(dictionary)).T[0])\n vector_train.append(dense)\n\n# チューニングパラメーター\n# vector_train_s, vector_test_s, label_train_s, label_test_s = train_test_split(vector_train, label_train, test_size=0.2)\n# tuned_parameters = [{'C': [0.8, 0.9, 1, 1.1, 1.2], 'dual': [True, False], 'multi_class':['ovr', 'crammer_singer']}]\n#\n# scores = ['accuracy', 'precision', 'recall']\n# for score in scores:\n# print ('\\n' + '='*50)\n# print (score)\n# print ('='*50)\n#\n# clf = GridSearchCV(LinearSVC(), tuned_parameters, cv=5, scoring=score, n_jobs=-1)\n# clf.fit(vector_train_s, label_train_s)\n#\n# print(\"\\n+ ベストパラメータ:\\n\")\n# print(clf.best_estimator_)\n#\n# print(\"\\n+ トレーニングデータでCVした時の平均スコア:\\n\")\n# for params, mean_score, all_scores in clf.grid_scores_:\n# print(\"{:.3f} (+/- {:.3f}) for {}\".format(mean_score, all_scores.std() / 2, params))\n\n\n# 8割を学習用、 2割を試験用にするテストを10回実行して正解率の平均値を算出する\n# 線形SVM\naccuracy_training_rates = []\naccuracy_rates = []\nfor var in range(0, 100):\n vector_train_s, vector_test_s, label_train_s, label_test_s = train_test_split(vector_train, label_train, test_size=0.2)\n #分類器にパラメータを与える\n estimator = GaussianNB()\n # 学習用に切り出したやつだけで学習\n estimator.fit(vector_train_s, label_train_s)\n\n test_average = estimator.score(vector_test_s, label_test_s)\n training_average = estimator.score(vector_train_s, label_train_s)\n accuracy_training_rates.append(training_average)\n accuracy_rates.append(test_average)\n\nprint(\"テストセット正解率平均値\")\nprint(sum(accuracy_rates)/len(accuracy_rates))\nprint(\"テストセット正解率標準偏差\")\ndata = np.array(accuracy_rates)\nprint(str(np.std(data)))\n\nprint(\"トレーニングセット正解率平均値\")\nprint(sum(accuracy_training_rates)/len(accuracy_training_rates))\nprint(\"トレーニングセット正解率標準偏差\")\ntrain_data = np.array(accuracy_training_rates)\nprint(str(np.std(train_data)))\n","repo_name":"takayuki-ochiai/inquiry_bot","sub_path":"app/analytics/archive/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"74065549777","text":"def recursion(i):\n if i==0:\n return (s+1)*2\n else:\n return (recursion(i-1)+1)*2\n\nx=int(input(\"小猴吃了多少天:\"))\ns=int(input(\"最后一天剩多少个:\"))\nk=recursion(x-2)\nprint(\"小猴共有\"+str(k)+\"个桃\")\n","repo_name":"gzzzzy/python-file-for-the-first-semester","sub_path":"小猴吃桃.py","file_name":"小猴吃桃.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"12228648680","text":"# -*- coding:utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nimport time\nimport datetime\nimport logging\nimport string\nimport itertools\nfrom accounts.models import MyRoles,User\nfrom core.models import Organization,Personalized\nfrom core.menus import buildbasetree\nimport json\n\nfrom amrs.models import (\n Watermeter,\n Bigmeter,\n)\n\nlogger_info = logging.getLogger('info_logger')\n\n\ndef random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n\"\"\"\nWhen you call values() on a queryset where the Model has a ManyToManyField\nand there are multiple related items, it returns a separate dictionary for each\nrelated item. This function merges the dictionaries so that there is only\none dictionary per id at the end, with lists of related items for each.\n\"\"\"\ndef merge_values(values):\n grouped_results = itertools.groupby(values, key=lambda value: value['id'])\n print(grouped_results)\n merged_values = []\n for k, g in grouped_results:\n print( k)\n groups = list(g)\n merged_value = {}\n for group in groups:\n for key, val in group.items():\n if not merged_value.get(key):\n merged_value[key] = val\n elif val != merged_value[key]:\n if isinstance(merged_value[key], list):\n if val not in merged_value[key]:\n merged_value[key].append(val)\n else:\n old_val = merged_value[key]\n merged_value[key] = [old_val, val]\n merged_values.append(merged_value)\n return merged_values\n\nclass Command(BaseCommand):\n help = 'deloy project by intializer related data.'\n\n def add_arguments(self, parser):\n # parser.add_argument('sTime', type=str)\n\n \n\n parser.add_argument(\n '--initializer',\n action='store_true',\n dest='initializer',\n default=False,\n help='initializer Organization and super Role'\n )\n\n parser.add_argument(\n '--watermeter_repeat',\n action='store_true',\n dest='watermeter_repeat',\n default=False,\n help='watermeter_repeat Organization and super Role'\n )\n\n\n\n\n def handle(self, *args, **options):\n # sTime = options['sTime']\n t1=time.time()\n count = 0\n aft = 0\n\n if options['watermeter_repeat']:\n belongto = Organization.objects.get(name='宁夏水投吴忠')\n\n vwatermeter_list = belongto.watermeter_list_queryset('')\n for vm in vwatermeter_list:\n amrs = vm.amrs_watermeter\n wateraddr = amrs.wateraddr\n findit = Watermeter.objects.filter(wateraddr=wateraddr).count()\n if findit > 1:\n print(vm.amrs_watermeter_id,amrs.communityid,amrs.nodeaddr,amrs.wateraddr,amrs.serialnumber)\n \n\n if options['initializer']:\n # organization\n organ = {\n 'name':'威尔沃',\n 'pId':'organization',\n 'cid':'virvo_organization',\n 'is_org':True,\n 'attribute':'非自来水公司',\n 'register_date':'2018-06-01',\n 'owner_name':'申应统',\n 'uuid':'virvo_super'\n }\n try:\n organ_obj = Organization.objects.first()\n if not organ_obj:\n Organization.objects.create(**organ)\n except Exception as e:\n print('failed to create Organization Virvo.:',e)\n return\n\n count += 1\n # super Role\n virvo = Organization.objects.first()\n\n ctree = json.dumps(buildbasetree())\n role = {\n 'name':'超级管理员',\n 'permissionTree':ctree,\n 'belongto':virvo,\n 'uid':'virvo_super'\n\n }\n try:\n role_obj = MyRoles.objects.first()\n if not role_obj:\n MyRoles.objects.create(**role)\n else:\n role_obj.permissionTree = ctree\n role_obj.belongto = virvo\n role_obj.save()\n except:\n print(\"failed to create super role .\")\n return\n count += 1\n\n # super user\n try:\n super_role = MyRoles.objects.first()\n super_user = {\n 'user_name':'admin',\n 'is_active':True,\n 'staff':True,\n 'admin':True,\n 'belongto':virvo,\n 'Role':super_role,\n 'uuid':'virvo_super'\n }\n user_obj = User.objects.first()\n if not user_obj:\n user = User.objects.create(**super_user)\n user.set_password('123456')\n user.save()\n else:\n\n user_obj.Role = super_role\n user_obj.belongto = virvo\n user_obj.save()\n except:\n print(\"failed to set user role and belongto.\")\n return\n count+=1\n\n # personlized\n personl = {\n \"ptype\":\"default\",\n \"loginLogo\":\"LOGO-KINGDA.png\",\n \"webIco\":\"favicon.ico\",\n \"homeLogo\":\"LOGO-KINGDA.png\",\n \"topTitle\":\"智慧水务管控一体化\",\n \"copyright\":\"©2015-2017威尔沃自动化设备(深圳)有限公司\",\n \"websiteName\":\"www.virvo.com.cn\",\n \"recordNumber\":\"京ICP备15041746号-1\",\n \"frontPageMsg\":\"personality_systemconfig\",\n \"frontPageMsgUrl\":\"/sysm/personalized/list/\",\n \"updateDataUsername\":\"admin\"\n # \"updateDataTime\":,\n # \"belongto\":\n }\n try:\n Personalized.objects.create(**personl)\n\n except:\n print(\"failed to set default personlize information...\")\n count+=1\n \n \n \n \n # print('cnt=',cnt,cnt2)\n t2 = time.time() - t1\n self.stdout.write(self.style.SUCCESS(f'total {count} Affected {aft} row(s)!,elapsed {t2}'))\n","repo_name":"apengok/bsc2000","sub_path":"core/management/commands/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"22296089435","text":"import pygame\n\n\nclass frame_viewer_entity:\n\tdef __init__(self,frame_width,frame_height):\n\t\tpygame.init()\n\t\tself.width = frame_width\n\t\tself.height = frame_height\n\t\tself.screen = pygame.display.set_mode((frame_width,frame_height))\n\t\tself.types = ['display']\n\t\tself.actions =[]\n\t\tself.active = True\n\t\treturn\n\n\tdef insert_action(self,action):\n\t\taction.entity_state = self\n\t\tself.actions.append(action)\n\t\treturn\n\n\tdef insert_entity(self,entity):\n\t\tentity.screen = self.screen\n\t\tfor action in entity.actions:\n\t\t\tself.actions.append(action)\n\n\tdef terminate(self):\n\t\tfrom sys import exit\n\t\tpygame.quit()\n\t\texit()\n\t\treturn\n\n\n\n\t\t\n\n","repo_name":"akhilamol/CPSC-6160---2D-Game-Engine-Construction","sub_path":"game_jam_aaniyan/final_project_v10/engine/play/entity/frame_viewer.py","file_name":"frame_viewer.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"24909946207","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Storage\nfrom django.forms import ModelForm\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfirmForm(forms.Form):\n pk = forms.IntegerField(disabled=True, widget=forms.HiddenInput())\n\n\nclass StorageForm(ModelForm):\n class Meta:\n model = Storage\n fields = [\"owner\", \"what\", \"image\", \"location\", \"extra_info\", \"duration\"]\n labels = {\n \"extra_info\": \"Justification or explanation\",\n }\n help_texts = {\n \"image\": \"Optional - but highly recommended!\",\n \"location\": \"E.g. 'on the project table', 'on top of the CV cabinet', etc\",\n \"duration\": \"In days; short storage request (under 30 days) get automatic approval.\",\n }\n\n def __init__(self, *args, **kwargs):\n super(ModelForm, self).__init__(*args, **kwargs)\n storage = None\n\n if kwargs is not None and \"instance\" in kwargs:\n storage = kwargs[\"instance\"]\n\n # if storageOrFormOrNone and isinstance(storageOrFormOrNone,Storage):\n # storage = storageOrFormOrNone\n\n if not storage:\n return\n\n if storage:\n del self.fields[\"owner\"]\n\n if not storage.location_updatable():\n del self.fields[\"location\"]\n\n if not storage.justification_updatable():\n del self.fields[\"extra_info\"]\n\n if not storage.editable():\n del self.fields[\"what\"]\n del self.fields[\"duration\"]\n","repo_name":"MakerSpaceLeiden/makerspaceleiden-crm","sub_path":"storage/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"13"} +{"seq_id":"32576497315","text":"import pandas as pd\r\nimport numpy as np\r\nimport lightgbm as lgb\r\nfrom machine_learning_class import machine_learning_class\r\nimport machine_learning_class as ml\r\nfrom datetime import datetime\r\nimport scipy.stats as sts\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\t### model train\r\n\ttrain_data_path = 'data_sample.h5'\r\n\ttarget_name = 'Target'\r\n\tml_obj = machine_learning_class(train_data_path, target_name, select_key='X')\r\n\tml_obj.get_X_Y_train()\r\n\tdata_train = lgb.Dataset(ml_obj.X_train, ml_obj.y_train, silent=True)\r\n\tdata_eval = lgb.Dataset(ml_obj.X_test, ml_obj.y_test, reference=data_train)\r\n\r\n\t### setting model's training parameters important! \r\n\tmodel_1_params = {'application': 'regression', \r\n\t 'boosting':'gbdt',\r\n\t 'num_iterations':40,\r\n\t 'learning_rate':0.05,\r\n\t 'max_depth':10,\r\n\t 'num_leaves':40,\r\n\t 'verbose':2, \r\n\t 'feature_fraction':0.7,\r\n\t 'bagging_fraction':0.7,\r\n\t 'bagging_freq':5,\r\n\t 'min_data_in_leaf':10,\r\n\t 'lambda_l2':1,\r\n\t 'num_threads':15,\r\n\t 'early_stopping_round':10,\r\n\t 'metric':'l2'}\r\n\r\n\tstart = datetime.now()\r\n\t# lgb_model = lgb.train(model_1_params, data_train, valid_sets=data_eval)\r\n\r\n########\r\n\tdef corr_metric(y_hat, data):\r\n\t y_real = data.get_label()\r\n\t corr = np.corrcoef(y_hat, y_real)[0][1]\r\n\t return 'Correlation', corr, True\r\n\r\n\tlgb_model = lgb.train(model_1_params, data_train, valid_sets=[data_eval, data_train], feval=corr_metric, \r\n\t valid_names=['val', 'train'], learning_rates=lambda iter: 0.05 * (0.999 ** iter),\r\n\t evals_result = {})\r\n # The code in the section is to show how to define a metric for evaluate the training result during the model training process\r\n # learning_rates=lambda iter: 0.05 * (0.999 ** iter) is used to control the learning rate decay. With the growing of iteration nums,\r\n # the learning rate can be reduced.\r\n########\r\n\r\n\r\n\tend = datetime.now()\r\n\ttotal_minute = (end - start).days * 24 * 60 + (end - start).seconds / 60\r\n\tprint('total training time is {} minutes'.format(total_minute))\r\n\tlgb_model.save_model('sample_model' + '.txt')\r\n\r\n\r\n\t### model predict\r\n\tprint('use the model to predict...')\r\n\tdata_ = pd.read_hdf('data_sample.h5', key='X')\r\n\tdata_input = data_.loc[:, data_.columns != 'Target']\r\n\tdata_input_target = data_.loc[:, data_.columns == 'Target'].values\r\n\tlgb_model = lgb.Booster(model_file='sample_model.txt') \r\n\ty_testset_predict = lgb_model.predict(data_input.values, num_iteration=lgb_model.best_iteration)\r\n\t# plt.plot(y_testset_predict, data_input_target, '.')\r\n\t# plt.show()\r\n\r\n","repo_name":"kid3night/High-Frequency-Research","sub_path":"Machine_Learning_structure/lightgbm/lgb_sample_code.py","file_name":"lgb_sample_code.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"13"} +{"seq_id":"35856191042","text":"from torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms as transform_lib\nfrom torchvision.datasets import FashionMNIST\n\nfrom pl_bolts.datamodules.lightning_datamodule import LightningDataModule\n\n\nclass FashionMNISTDataModule(LightningDataModule):\n\n name = 'fashion_mnist'\n\n def __init__(\n self,\n data_dir: str,\n val_split: int = 5000,\n num_workers: int = 16,\n *args,\n **kwargs,\n ):\n \"\"\"\n Standard FashionMNIST, train, val, test splits and transforms\n\n Transforms::\n\n mnist_transforms = transform_lib.Compose([\n transform_lib.ToTensor()\n ])\n\n Example::\n\n from pl_bolts.datamodules import FashionMNISTDataModule\n\n dm = FashionMNISTDataModule('.')\n model = LitModel(datamodule=dm)\n\n Args:\n data_dir: where to save/load the data\n val_split: how many of the training images to use for the validation split\n num_workers: how many workers to use for loading data\n \"\"\"\n super().__init__(*args, **kwargs)\n self.dims = (1, 28, 28)\n self.data_dir = data_dir\n self.val_split = val_split\n self.num_workers = num_workers\n\n @property\n def num_classes(self):\n \"\"\"\n Return:\n 10\n \"\"\"\n return 10\n\n def prepare_data(self):\n \"\"\"\n Saves FashionMNIST files to data_dir\n \"\"\"\n FashionMNIST(self.data_dir, train=True, download=True, transform=transform_lib.ToTensor())\n FashionMNIST(self.data_dir, train=False, download=True, transform=transform_lib.ToTensor())\n\n def train_dataloader(self, batch_size=32, transforms=None):\n \"\"\"\n FashionMNIST train set removes a subset to use for validation\n\n Args:\n batch_size: size of batch\n transforms: custom transforms\n \"\"\"\n if transforms is None:\n transforms = self._default_transforms()\n\n dataset = FashionMNIST(self.data_dir, train=True, download=False, transform=transforms)\n train_length = len(dataset)\n dataset_train, _ = random_split(dataset, [train_length - self.val_split, self.val_split])\n loader = DataLoader(\n dataset_train,\n batch_size=batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n drop_last=True,\n pin_memory=True\n )\n return loader\n\n def val_dataloader(self, batch_size=32, transforms=None):\n \"\"\"\n FashionMNIST val set uses a subset of the training set for validation\n\n Args:\n batch_size: size of batch\n transforms: custom transforms\n \"\"\"\n if transforms is None:\n transforms = self._default_transforms()\n\n dataset = FashionMNIST(self.data_dir, train=True, download=True, transform=transforms)\n train_length = len(dataset)\n _, dataset_val = random_split(dataset, [train_length - self.val_split, self.val_split])\n loader = DataLoader(\n dataset_val,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=True,\n pin_memory=True\n )\n return loader\n\n def test_dataloader(self, batch_size=32, transforms=None):\n \"\"\"\n FashionMNIST test set uses the test split\n\n Args:\n batch_size: size of batch\n transforms: custom transforms\n \"\"\"\n if transforms is None:\n transforms = self._default_transforms()\n\n dataset = FashionMNIST(self.data_dir, train=False, download=False, transform=transforms)\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=True,\n pin_memory=True\n )\n return loader\n\n def _default_transforms(self):\n mnist_transforms = transform_lib.Compose([\n transform_lib.ToTensor()\n ])\n return mnist_transforms\n","repo_name":"lebrice/pytorch-lightning-bolts","sub_path":"pl_bolts/datamodules/fashion_mnist_datamodule.py","file_name":"fashion_mnist_datamodule.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"13"} +{"seq_id":"13301245914","text":"#!/usr/bin/python3 -u\n\nimport sys\nimport time\nimport datetime as dt\nfrom evohomeclient2 import EvohomeClient\nfrom keys import username, password\nimport prometheus_client as prom\n\npoll_interval = 60\n\n\nclass hashabledict(dict):\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n\ndef exceptKeyError(func, *args):\n try:\n return func(*args)\n except KeyError:\n pass\n\n\ndef loginEvohome(myclient):\n try:\n myclient._login()\n except Exception as e:\n print(\"{}: {}\".format(type(e).__name__, str(e)), file=sys.stderr)\n return False\n return True\n\n\ndef _get_set_point(zone_schedule, day_of_week, spot_time):\n daily_schedules = {\n s[\"DayOfWeek\"]: s[\"Switchpoints\"] for s in zone_schedule[\"DailySchedules\"]\n }\n if not daily_schedules:\n return None\n switch_points = {\n dt.time.fromisoformat(s[\"TimeOfDay\"]): s[\"heatSetpoint\"]\n for s in daily_schedules[day_of_week]\n }\n candidate_times = [k for k in switch_points.keys() if k <= spot_time]\n if len(candidate_times) == 0:\n # no time less than current time\n return None\n\n candidate_time = max(candidate_times)\n return switch_points[candidate_time]\n\n\ndef calculate_planned_temperature(zone_schedule):\n current_time = dt.datetime.now().time()\n day_of_week = dt.datetime.today().weekday()\n return (\n _get_set_point(zone_schedule, day_of_week, current_time)\n or _get_set_point(\n zone_schedule, day_of_week - 1 if day_of_week > 0 else 6, dt.time.max\n )\n or 0\n )\n\n\nschedules_updated = dt.datetime.min\nschedules = {}\n\n\ndef get_schedules():\n global schedules_updated\n global schedules\n\n # this takes time, update once per hour\n if schedules_updated < dt.datetime.now() - dt.timedelta(hours=1):\n for zone in client._get_single_heating_system()._zones:\n try:\n schedules[zone.zoneId] = zone.schedule()\n except:\n schedules[zone.zoneId] = {\"DailySchedules\": []}\n\n # schedules = {\n # zone.zone_id: zone.schedule()\n # for zone in client._get_single_heating_system()._zones\n # }\n schedules_updated = dt.datetime.now()\n\n\nif __name__ == \"__main__\":\n eht = prom.Gauge(\n \"evohome_temperature_celcius\",\n \"Evohome temperatuur in celsius\",\n [\"name\", \"thermostat\", \"id\", \"type\"],\n )\n zavail = prom.Gauge(\n \"evohome_zone_available\",\n \"Evohome zone availability\",\n [\"name\", \"thermostat\", \"id\"],\n )\n zfault = prom.Gauge(\n \"evohome_zone_fault\",\n \"Evohome zone has active fault(s)\",\n [\"name\", \"thermostat\", \"id\"],\n )\n zmode = prom.Enum(\n \"evohome_zone_mode\",\n \"Evohome zone mode\",\n [\"name\", \"thermostat\", \"id\"],\n states=[\"FollowSchedule\", \"TemporaryOverride\", \"PermanentOverride\"],\n )\n tcsperm = prom.Gauge(\n \"evohome_temperaturecontrolsystem_permanent\",\n \"Evohome temperatureControlSystem is in permanent state\",\n [\"id\"],\n )\n tcsfault = prom.Gauge(\n \"evohome_temperaturecontrolsystem_fault\",\n \"Evohome temperatureControlSystem has active fault(s)\",\n [\"id\"],\n )\n tcsmode = prom.Enum(\n \"evohome_temperaturecontrolsystem_mode\",\n \"Evohome temperatureControlSystem mode\",\n [\"id\"],\n states=[\n \"Auto\",\n \"AutoWithEco\",\n \"AutoWithReset\",\n \"Away\",\n \"DayOff\",\n \"HeatingOff\",\n \"Custom\",\n ],\n )\n upd = prom.Gauge(\"evohome_updated\", \"Evohome client last updated\")\n up = prom.Gauge(\"evohome_up\", \"Evohome client status\")\n prom.start_http_server(8082)\n try:\n client = EvohomeClient(username, password)\n except Exception as e:\n print(\n \"ERROR: can't create EvohomeClient\\n{}: {}\".format(\n type(e).__name__, str(e)\n ),\n file=sys.stderr,\n )\n sys.exit(1)\n loggedin = True\n lastupdated = 0\n tcsalerts = set()\n zonealerts = dict()\n\n oldids = set()\n labels = {}\n lastup = False\n while True:\n temps = []\n newids = set()\n try:\n temps = list(client.temperatures())\n get_schedules()\n loggedin = True\n updated = True\n lastupdated = time.time()\n except Exception as e:\n print(\"{}: {}\".format(type(e).__name__, str(e)), file=sys.stderr)\n temps = []\n updated = False\n loggedin = loginEvohome(client)\n if loggedin:\n continue\n\n if loggedin and updated:\n up.set(1)\n upd.set(lastupdated)\n tcs = client._get_single_heating_system()\n sysmode = tcs.systemModeStatus\n tcsperm.labels(client.system_id).set(\n float(sysmode.get(\"isPermanent\", True))\n )\n tcsmode.labels(client.system_id).state(sysmode.get(\"mode\", \"Auto\"))\n activefaults = set()\n if tcs.activeFaults:\n tcsfault.labels(client.system_id).set(1)\n for af in tcs.activeFaults:\n afhd = hashabledict(af)\n activefaults.add(afhd)\n if afhd not in tcsalerts:\n tcsalerts.add(afhd)\n print(\n \"fault in temperatureControlSystem: {}\".format(af),\n file=sys.stderr,\n )\n for af in tcsalerts - activefaults:\n afhd = hashabledict(af)\n tcsalerts.discard(afhd)\n print(\n \"resolved in temperatureControlSystem: {}\".format(af),\n file=sys.stderr,\n )\n if not tcs.activeFaults:\n tcsfault.labels(client.system_id).set(0)\n tcsalerts = set()\n for d in temps:\n if d[\"temp\"] is None and not d[\"name\"] and d[\"setpoint\"] == 62.0:\n continue\n newids.add(d[\"id\"])\n labels[d[\"id\"]] = [d[\"name\"], d[\"thermostat\"], d[\"id\"]]\n if d[\"temp\"] is None:\n zavail.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"]).set(0)\n exceptKeyError(\n eht.remove, d[\"name\"], d[\"thermostat\"], d[\"id\"], \"measured\"\n )\n else:\n zavail.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"]).set(1)\n eht.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"], \"measured\").set(\n d[\"temp\"]\n )\n eht.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"], \"setpoint\").set(\n d[\"setpoint\"]\n )\n eht.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"], \"planned\").set(\n calculate_planned_temperature(schedules[d[\"id\"]])\n )\n zmode.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"]).state(\n d.get(\"setpointmode\", \"FollowSchedule\")\n )\n if d[\"id\"] not in zonealerts.keys():\n zonealerts[d[\"id\"]] = set()\n activefaults = set()\n if d.get(\"activefaults\"):\n zonefault = 1\n for af in d[\"activefaults\"]:\n afhd = hashabledict(af)\n activefaults.add(afhd)\n if afhd not in zonealerts[d[\"id\"]]:\n zonealerts[d[\"id\"]].add(afhd)\n print(\n \"fault in zone {}: {}\".format(d[\"name\"], af),\n file=sys.stderr,\n )\n for af in zonealerts[d[\"id\"]] - activefaults:\n afhd = hashabledict(af)\n zonealerts[d[\"id\"]].discard(afhd)\n print(\n \"resolved in zone {}: {}\".format(d[\"name\"], af),\n file=sys.stderr,\n )\n if not d.get(\"activefaults\"):\n zonefault = 0\n zonealerts[d[\"id\"]] = set()\n zfault.labels(d[\"name\"], d[\"thermostat\"], d[\"id\"]).set(zonefault)\n lastup = True\n else:\n up.set(0)\n if lastup:\n exceptKeyError(tcsperm.remove, client.system_id)\n exceptKeyError(tcsfault.remove, client.system_id)\n exceptKeyError(tcsmode.remove, client.system_id)\n lastup = False\n\n for i in oldids:\n if i not in newids:\n exceptKeyError(eht.remove, *labels[i] + [\"measured\"])\n exceptKeyError(eht.remove, *labels[i] + [\"setpoint\"])\n exceptKeyError(eht.remove, *labels[i] + [\"planned\"])\n exceptKeyError(zavail.remove, *labels[i])\n exceptKeyError(zmode.remove, *labels[i])\n exceptKeyError(zfault.remove, *labels[i])\n oldids = newids\n\n time.sleep(poll_interval)\n","repo_name":"RichieB2B/evohome-exporter","sub_path":"evohome-exporter.py","file_name":"evohome-exporter.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"35054979928","text":"from pytest import raises\nfrom easel.main import EaselTest\nfrom conftest import tmp\n\ndef test_easel():\n # test easel without any subcommands or arguments\n with EaselTest() as app:\n app.run()\n assert app.exit_code == 0\n # app.db.storage.close()\n\ndef test_easel_debug():\n # test that debug mode is functional\n argv = ['--debug']\n with EaselTest(argv=argv) as app:\n app.run()\n assert app.debug is True\n # app.close()\n # app.db.storage.close()\n\ndef test_extensions():\n with EaselTest(argv=[]) as app:\n app.run()\n assert app.ext.list() is not None\n app.log.info(app.ext.list())\n # app.db.storage.close()\n\ndef test_api_requests():\n # async with self._session.get(url=\"http://httpbin.org/headers\") as r:\n # json_body = await r.json()\n # pprint(json_body)\n # assert json_body['headers']['Authorization'] == self.headers[\"Authorization\"]\n pass\n\ndef test_update():\n argv = ['-u']\n with EaselTest(argv=argv) as app:\n app.run()\n ids = []\n for table in app.db.tables():\n for doc in app.db.table(table).all():\n if table == \"pages\":\n assert doc.get(\"url\") is not None\n assert doc[\"url\"] not in ids\n ids.append(doc[\"url\"])\n else:\n assert table != \"pages\"\n assert doc.get(\"id\") is not None\n assert doc[\"id\"] not in ids\n ids.append(doc[\"id\"])\n if table != \"courses\":\n assert doc.get(\"course_id\") is not None\n # app.db.storage.close()\n\n# def test_assignments(tmp):\n# argv = ['assignments', 'list']\n# with EaselTest(argv=argv) as app:\n# app.run()\n \n# app.db.storage.close()\n","repo_name":"probably-neb/easel","sub_path":"tests/test_easel.py","file_name":"test_easel.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"17562732965","text":"import struct\n\nnb_data_point = 0\nwith open(\"2023_05_27_16_19_52.bin\",'rb') as f:\n chunk = f.read(44)\n while chunk != \"\":\n data_point = struct.unpack(\"= 200:\n break\n\n\n# typedef struct __attribute__ ((packed)) data_point {\n# float pitch; // The whillie/stoppy of the bike\n# float roll; // The lean of the motorbike\n# float acceleration; // Y acceleration\n# float speed; //\n# float direction; // Direction of the bike from the GPS\n# double lat; // Current latitude\n# double lng; // Current longitude\n# uint32_t date; // Current date\n# uint32_t time; // Current time \n#} data_point_t;\n","repo_name":"yohan-hicof/motorbike_lean_tracker","sub_path":"example_data/read_data_file.py","file_name":"read_data_file.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"18343204187","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\nfrom rest_framework.routers import DefaultRouter\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom users import views\n\nadmin.autodiscover()\nrouter = DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'questions', views.QuestionViewSet)\nrouter.register(r'answers', views.AnswerViewSet)\nrouter.register(r'girls', views.GirlViewSet)\n\nurlpatterns = patterns('',\n url(r'^$', # noqa\n TemplateView.as_view(template_name='pages/home.html'),\n name=\"home\"),\n url(r'^about/$',\n TemplateView.as_view(template_name='pages/about.html'),\n name=\"about\"),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # User management\n url(r'^users/', include(\"users.urls\", namespace=\"users\")),\n url(r'^accounts/', include('allauth.urls')),\n\n # Uncomment the next line to enable avatars\n url(r'^avatar/', include('avatar.urls')),\n\n # Your stuff: custom urls go here\n url(r'^api/v1/', include(router.urls)),\n url(r'^api/v1/rest-auth/', include('rest_auth.urls')),\n url(r'^api/v1/rest-auth/', include('rest_auth.urls')),\n url(r'^api/v1/rest-auth/registration/', include('rest_auth.registration.urls')),\n url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'),\n url(r'^api-auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n url(r'^docs/', include('rest_framework_swagger.urls')),\n\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"oneuptim/switch","sub_path":"switch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"5926223552","text":" # ############################################################################\n #\n # Copyright (c) Microsoft Corporation. \n #\n # This source code is subject to terms and conditions of the Apache License, Version 2.0. A \n # copy of the license can be found in the License.html file at the root of this distribution. If \n # you cannot locate the Apache License, Version 2.0, please send an email to \n # vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound \n # by the terms of the Apache License, Version 2.0.\n #\n # You must not remove this notice, or any other, from this software.\n #\n # ###########################################################################\n\nimport sys\nimport struct\nimport cStringIO\nimport os\nimport traceback\nfrom os import path\nfrom xml.dom import minidom\n\n# http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S3\n\nFCGI_VERSION_1 = 1\nFCGI_HEADER_LEN = 8\n \nFCGI_BEGIN_REQUEST = 1\nFCGI_ABORT_REQUEST = 2\nFCGI_END_REQUEST = 3\nFCGI_PARAMS = 4\nFCGI_STDIN = 5\nFCGI_STDOUT = 6\nFCGI_STDERR = 7\nFCGI_DATA = 8\nFCGI_GET_VALUES = 9\nFCGI_GET_VALUES_RESULT = 10\nFCGI_UNKNOWN_TYPE = 11\nFCGI_MAXTYPE = FCGI_UNKNOWN_TYPE\n\nFCGI_NULL_REQUEST_ID = 0\n\nFCGI_KEEP_CONN = 1\n\nFCGI_RESPONDER = 1\nFCGI_AUTHORIZER = 2\nFCGI_FILTER = 3\n\nFCGI_REQUEST_COMPLETE = 0\nFCGI_CANT_MPX_CONN = 1\nFCGI_OVERLOADED = 2\nFCGI_UNKNOWN_ROLE = 3\n\nFCGI_MAX_CONNS = \"FCGI_MAX_CONNS\"\nFCGI_MAX_REQS = \"FCGI_MAX_REQS\"\nFCGI_MPXS_CONNS = \"FCGI_MPXS_CONNS\"\n\nclass FastCgiRecord(object):\n \"\"\"Represents a FastCgiRecord. Encapulates the type, role, flags. Holds\nonto the params which we will receive and update later.\"\"\"\n def __init__(self, type, req_id, role, flags):\n self.type = type\n self.req_id = req_id\n self.role = role\n self.flags = flags\n self.params = {}\n \n def __repr__(self):\n return '' % (self.type, \n self.req_id, \n self.role, \n self.flags)\n\n#typedef struct {\n# unsigned char version;\n# unsigned char type;\n# unsigned char requestIdB1;\n# unsigned char requestIdB0;\n# unsigned char contentLengthB1;\n# unsigned char contentLengthB0;\n# unsigned char paddingLength;\n# unsigned char reserved;\n# unsigned char contentData[contentLength];\n# unsigned char paddingData[paddingLength];\n#} FCGI_Record;\n\ndef read_fastcgi_record(input):\n \"\"\"reads the main fast cgi record\"\"\"\n data = input.read(8) # read record\n content_size = ord(data[4]) << 8 | ord(data[5])\n\n content = input.read(content_size) # read content \n input.read(ord(data[6])) # read padding\n\n if ord(data[0]) != FCGI_VERSION_1:\n raise Exception('Unknown fastcgi version ' + str(data[0]))\n\n req_id = (ord(data[2]) << 8) | ord(data[3])\n \n reqtype = ord(data[1])\n processor = REQUEST_PROCESSORS.get(reqtype)\n if processor is None:\n # unknown type requested, send response\n send_response(req_id, FCGI_UNKNOWN_TYPE, data[1] + '\\0' * 7)\n return None\n\n return processor(req_id, content)\n\n\ndef read_fastcgi_begin_request(req_id, content):\n \"\"\"reads the begin request body and updates our\n_REQUESTS table to include the new request\"\"\"\n # typedef struct {\n # unsigned char roleB1;\n # unsigned char roleB0;\n # unsigned char flags;\n # unsigned char reserved[5];\n # } FCGI_BeginRequestBody;\n\n # TODO: Ignore request if it exists \n res = FastCgiRecord(\n FCGI_BEGIN_REQUEST,\n req_id,\n (ord(content[0]) << 8) | ord(content[1]), # role\n ord(content[2]), # flags\n )\n _REQUESTS[req_id] = res \n\n\ndef read_fastcgi_keyvalue_pairs(content, offset):\n \"\"\"Reads a FastCGI key/value pair stream\"\"\"\n\n name_len = ord(content[offset])\n\n if (name_len & 0x80) != 0:\n name_full_len = chr(name_len & ~0x80) + content[offset + 1:offset+4]\n name_len = int_struct.unpack(name_full_len)[0]\n offset += 4\n else:\n offset += 1\n \n value_len = ord(content[offset])\n\n if (value_len & 0x80) != 0:\n value_full_len = chr(value_len & ~0x80) + content[offset+1:offset+4]\n value_len = int_struct.unpack(value_full_len)[0]\n offset += 4\n else:\n offset += 1\n\n name = content[offset:offset+name_len]\n offset += name_len\n \n value = content[offset:offset+value_len]\n offset += value_len\n\n return offset, name, value\n\n\ndef write_name_len(io, name):\n \"\"\"Writes the length of a single name for a key or value in\na key/value stream\"\"\"\n if len(name) <= 0x7f:\n io.write(chr(len(name)))\n else:\n io.write(int_struct.pack(len(name)))\n\n\ndef write_fastcgi_keyvalue_pairs(pairs):\n \"\"\"creates a FastCGI key/value stream and returns it as a string\"\"\"\n res = cStringIO.StringIO()\n for key, value in pairs.iteritems():\n write_name_len(res, key)\n write_name_len(res, value)\n \n res.write(key)\n res.write(value)\n\n return res.getvalue()\n\n\ndef read_fastcgi_params(req_id, content):\n if not content:\n return None\n\n offset = 0\n res = _REQUESTS[req_id].params\n while offset < len(content):\n offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)\n res[name] = value\n\n\ndef read_fastcgi_input(req_id, content):\n \"\"\"reads FastCGI std-in and stores it in wsgi.input passed in the\nwsgi environment array\"\"\"\n res = _REQUESTS[req_id].params\n if 'wsgi.input' not in res:\n res['wsgi.input'] = content\n else:\n res['wsgi.input'] += content\n\n if not content:\n # we've hit the end of the input stream, time to process input...\n return _REQUESTS[req_id]\n\n\ndef read_fastcgi_data(req_id, content):\n \"\"\"reads FastCGI data stream and publishes it as wsgi.data\"\"\"\n res = _REQUESTS[req_id].params\n if 'wsgi.data' not in res:\n res['wsgi.data'] = content\n else:\n res['wsgi.data'] += content\n\n\ndef read_fastcgi_abort_request(req_id, content):\n \"\"\"reads the wsgi abort request, which we ignore, we'll send the\nfinish execution request anyway...\"\"\"\n pass\n\n\ndef read_fastcgi_get_values(req_id, content):\n \"\"\"reads the fastcgi request to get parameter values, and immediately \nresponds\"\"\"\n offset = 0\n request = {}\n while offset < len(content):\n offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)\n request[name] = value\n\n response = {}\n if FCGI_MAX_CONNS in request:\n response[FCGI_MAX_CONNS] = '1'\n\n if FCGI_MAX_REQS in request:\n response[FCGI_MAX_REQS] = '1'\n\n if FCGI_MPXS_CONNS in request:\n response[FCGI_MPXS_CONNS] = '0'\n\n send_response(req_id, FCGI_GET_VALUES_RESULT, \n write_fastcgi_keyvalue_pairs(response))\n\n\n# Formatting of 4-byte ints in network order\nint_struct = struct.Struct('!i')\n\n# Our request processors for different FastCGI protocol requests. Only\n# the requests which we receive are defined here.\nREQUEST_PROCESSORS = {\n FCGI_BEGIN_REQUEST : read_fastcgi_begin_request,\n FCGI_ABORT_REQUEST : read_fastcgi_abort_request,\n FCGI_PARAMS : read_fastcgi_params,\n FCGI_STDIN : read_fastcgi_input,\n FCGI_DATA : read_fastcgi_data,\n FCGI_GET_VALUES : read_fastcgi_get_values\n}\n\ndef log(txt):\n \"\"\"Logs fatal errors to a log file if WSGI_LOG env var is defined\"\"\"\n log_file = os.environ.get('WSGI_LOG')\n if log_file:\n with file(log_file, 'a+') as f:\n f.write(txt)\n\n\ndef send_response(id, resp_type, content, streaming = True):\n \"\"\"sends a response w/ the given id, type, and content to the server.\nIf the content is streaming then an empty record is sent at the end to \nterminate the stream\"\"\"\n offset = 0\n while 1:\n if id < 256:\n id_0 = 0\n id_1 = id\n else:\n id_0 = id >> 8\n id_1 = id & 0xff\n \n # content len, padding len, content\n len_remaining = len(content) - offset\n if len_remaining > 65535:\n len_0 = 0xff\n len_1 = 0xff\n content_str = content[offset:offset+65535]\n offset += 65535\n else:\n len_0 = len_remaining >> 8\n len_1 = len_remaining & 0xff\n content_str = content[offset:]\n offset += len_remaining\n\n data = '%c%c%c%c%c%c%c%c%s' % (\n FCGI_VERSION_1, # version\n resp_type, # type\n id_0, # requestIdB1\n id_1, # requestIdB0\n len_0, # contentLengthB1\n len_1, # contentLengthB0\n 0, # paddingLength\n 0, # reserved\n content_str)\n\n os.write(stdout, data)\n if len_remaining == 0 or not streaming:\n break\n\ndef update_environment():\n cur_dir = path.dirname(path.dirname(__file__))\n web_config = path.join(cur_dir, 'Web.config')\n if os.path.exists(web_config):\n try:\n with file(web_config) as wc:\n doc = minidom.parse(wc)\n config = doc.getElementsByTagName('configuration')\n for configSection in config:\n appSettings = configSection.getElementsByTagName('appSettings')\n for appSettingsSection in appSettings:\n values = appSettingsSection.getElementsByTagName('add')\n for curAdd in values:\n key = curAdd.getAttribute('key')\n value = curAdd.getAttribute('value')\n if key and value:\n os.environ[key] = value\n except:\n # unable to read file\n log(traceback.format_exc())\n pass\n\n\nif __name__ == '__main__':\n handler_name = os.getenv('WSGI_HANDLER', 'django.core.handlers.wsgi.WSGIHandler()')\n module, callable = handler_name.rsplit('.', 1)\n if callable.endswith('()'):\n callable = callable.rstrip('()')\n handler = getattr(__import__(module, fromlist=[callable]), callable)()\n else:\n handler = getattr(__import__(module, fromlist=[callable]), callable)\n\n stdout = sys.stdin.fileno()\n try:\n import msvcrt\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n except ImportError:\n pass\n\n update_environment()\n\n _REQUESTS = {}\n\n while True:\n try:\n record = read_fastcgi_record(sys.stdin)\n if record:\n record.params['wsgi.input'] = cStringIO.StringIO(record.params['wsgi.input'])\n record.params['wsgi.version'] = (1,0)\n record.params['wsgi.url_scheme'] = 'https' if record.params.has_key('HTTPS') and record.params['HTTPS'].lower() == 'on' else 'http'\n record.params['wsgi.multiprocess'] = True\n record.params['wsgi.multithread'] = False\n record.params['wsgi.run_once'] = False\n\n def start_response(status, headers, exc_info = None):\n global response_headers, status_line\n response_headers = headers\n status_line = status\n \n errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = cStringIO.StringIO()\n sys.stdout = sys.__stdout__ = cStringIO.StringIO()\n record.params['SCRIPT_NAME'] = ''\n try:\n response = ''.join(handler(record.params, start_response))\n except:\n send_response(record.req_id, FCGI_STDERR, errors.getvalue())\n else:\n status = 'Status: ' + status_line + '\\r\\n'\n headers = ''.join('%s: %s\\r\\n' % (name, value) for name, value in response_headers)\n full_response = status + headers + '\\r\\n' + response\n send_response(record.req_id, FCGI_STDOUT, full_response)\n\n # for testing of throughput of fastcgi handler vs static pages\n #send_response(record.req_id, FCGI_STDOUT, 'Content-type: text/html\\r\\n\\r\\n\\r\\n\\nbar')\n\n send_response(record.req_id, FCGI_END_REQUEST, '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', streaming=False)\n del _REQUESTS[record.req_id]\n except:\n log(traceback.format_exc()) \n","repo_name":"angelcolmenares/pash","sub_path":"Modules/WindowsAzure/Microsoft.WindowsAzure.Management.CloudService/Resources/Scaffolding/Python/WebRole/wfastcgi.py","file_name":"wfastcgi.py","file_ext":"py","file_size_in_byte":12722,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"13"} +{"seq_id":"8645154310","text":"from apps import app\nimport secrets\nimport redis\nfrom flask_pymongo import PyMongo\n\n# -------------------------------------------------------------------------------------------------------\ntokens_session = secrets.token_hex(20)\ntokens_session = app.config[\"SECRET_KEY\"]\n\ndef databaseConnection(database_name):\n try:\n app.config[\"MONGO_URI\"] = f\"mongodb+srv://erik1288:ingeniero010@userscluster.vyzqgn5.mongodb.net/{database_name}?retryWrites=true&w=majority\"\n print('conection success.......')\n mongo = PyMongo(app)\n except ConnectionError:\n print('conection error...')\n return mongo\n\n# ------------------------------------------------------------------------------------------------------\n\n# redis config\nredis_manager = redis.Redis(\n host='redis-10179.c14.us-east-1-3.ec2.cloud.redislabs.com',\n port=10179,\n password='ingeniero010')\n\n# check connection to redis\ndef checkRedisConnection():\n try:\n response = redis_manager.ping()\n if response:\n print(\"Connection to Redis successful\")\n except redis.ConnectionError:\n print(\"Failed to connect to Redis\")\n\n# Llamar a la función para comprobar la conexión a Redis\ncheckRedisConnection()\n# -------------------------------------------------------------------------------------------------------\n\nclass BaseConfig(object):\n 'Base configuration'\n TESTING = False\n DEBUG=True\n \nclass ProductionConfig(BaseConfig):\n 'Production configuration'\n DEBUG = False\n \nclass DevelopmentConfig(BaseConfig):\n 'Development configuration'\n TESTING = True","repo_name":"ErikHerazo/apiFlaskRestfull","sub_path":"flaskApps/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"30243352803","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport annoying.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('persons', '0009_person_valid_fiber'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('user', annoying.fields.AutoOneToOneField(related_name='profile', primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'db_table': 'user_profile',\n 'verbose_name': 'user profile',\n 'verbose_name_plural': 'user profiles',\n },\n ),\n migrations.AddField(\n model_name='person',\n name='owner',\n field=models.OneToOneField(null=True, blank=True, to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"pignacio/vld_django","sub_path":"vld_django/persons/migrations/0010_auto_20150603_1000.py","file_name":"0010_auto_20150603_1000.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"5661495079","text":"from __future__ import (absolute_import, division, print_function)\n\nimport mslice.plotting.pyplot as plt\n\nPICKER_TOL_PTS = 5\n\n\ndef plot_cached_slice(slice_cache, slice_workspace):\n _show_plot(slice_cache, slice_workspace)\n\n\n@plt.set_category(plt.CATEGORY_SLICE)\ndef create_slice_figure(workspace_name, presenter):\n fig_canvas = plt.gcf().canvas\n fig_canvas.manager.set_window_title(workspace_name)\n fig_canvas.manager.add_slice_plot(presenter, workspace_name)\n fig_canvas.manager.update_grid()\n plt.draw_all()\n\n\n@plt.set_category(plt.CATEGORY_SLICE)\ndef _show_plot(slice_cache, workspace):\n cur_fig = plt.gcf()\n cur_fig.clf()\n ax = cur_fig.add_subplot(111, projection='mslice')\n image = ax.pcolormesh(workspace, cmap=slice_cache.colourmap, norm=slice_cache.norm)\n\n cb = plt.colorbar(image, ax=ax)\n cb.set_label('Intensity (arb. units)', labelpad=20, rotation=270, picker=PICKER_TOL_PTS)\n\n plt_handler = cur_fig.canvas.manager.plot_handler\n\n plt_handler._update_lines()\n\n cur_fig.canvas.manager.plot_handler._update_lines()\n\n if plt_handler.icut is not None:\n # Because the axis is cleared, RectangleSelector needs to use the new axis\n plt_handler.icut.refresh_rect_selector(ax)\n\n cur_fig.canvas.draw_idle()\n cur_fig.show()\n\n # This ensures that another slice plotted in the same window saves the plot options\n # as the plot window's showEvent is called only once. The equivalent command is left\n # in the showEvent for use by the CLI.\n if plt_handler.default_options is None:\n plt_handler.save_default_options()\n\n\ndef set_colorbar_label(label):\n plt.gcf().get_axes()[1].set_ylabel(label, rotation=270, labelpad=20)\n","repo_name":"mantidproject/mslice","sub_path":"src/mslice/views/slice_plotter.py","file_name":"slice_plotter.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"73493600657","text":"# Menu matematico\r\n\r\nfrom time import sleep\r\n\r\n\r\nn1 = int(input('Primeiro valor: '))\r\nn2 = int(input('Segundo valor: '))\r\nopcao = 0\r\n\r\nwhile opcao != 5:\r\n print('''\r\n[ 1 ] Somar\r\n[ 2 ] Multiplicar\r\n[ 3 ] Maior valor\r\n[ 4 ] Novos números\r\n[ 5 ] Sair do programa\\n''')\r\n opcao = int(input('Qual é sua opção? '))\r\n\r\n if opcao == 1:\r\n soma = n1 + n2\r\n print('A soma entre {} e {} é {}.'.format(n1, n2, soma))\r\n elif opcao == 2:\r\n produto = n1 * n2\r\n print('O resultado de {} x {} é {}.'.format(n1, n2, produto))\r\n elif opcao == 3:\r\n if n1 > n2:\r\n maior = n1\r\n else:\r\n maior = n2\r\n print('Entre {} e {} o maior deles é {}.'.format(n1, n2, maior))\r\n elif opcao == 4:\r\n print('Informe os números desejados novamente: ')\r\n n1 = int(input('Primeiro Valor: '))\r\n n2 = int(input('Segundo Valor: '))\r\n elif opcao == 5:\r\n print('FINALIZANDO...')\r\n sleep(2)\r\n else:\r\n print('OPÇÃO INVÁLIDA. DIGITE NOVAMENTE!')\r\n print('=+='*10)\r\nprint('Obrigado por nos consultar!')\r\nsleep(2)\r\nprint('ATENCIOSAMENTE EQUIPE DE NÚMEROS DAMIATI')\r\n","repo_name":"damiati-a/CURSO-DE-PYTHON","sub_path":"Mundo 2/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"38433668347","text":"#!/usr/bin/python3\n\nfrom pwn import *\n\n# start process using pwntools\np = process(\"./crackme\")\n\n# send an input (e.g. 1234)\np.sendline(\"1234\")\n\n# keep reading output until program terminates\nwhile True:\n\ttry:\n\t\tline = p.readline()\n\t\t#use \"readline\" from pwntools\n\t\tprint (\"Read line: [%s]\" % line)\n\texcept:\n\t\t#could not read line => program exited\n\t\tbreak\n\n\n","repo_name":"MocanuAlexandru/LabWork","sub_path":"Reverse Engineering/Laboratory 1/Solutions/task11.py","file_name":"task11.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"20312615243","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', \n 'learning_django.settings')\nimport django \ndjango.setup()\nfrom rango.models import Category, Page\n\ndef populate():\n '''\n 1. Create lists of dicts containing pages to add into each category\n 2. Create dict of dicts for our categories to iterate through data\n structs and add data to model.\n '''\n python_pages = [\n {'title':'Official Python Tutorial', 'url':'http://docs.python.org/3/tutorial/', 'views':120},\n {'title':'How to Think like a Computer Scientist', 'url':'http://www.greenteapress.com/thinkpython/', 'views':101},\n {'title':'Learn Python in 10 Minutes', 'url':'http://www.korokithakis.net/tutorials/python/', 'views' : 82},\n {'title':'Real Python - Tricks', 'url':'\thttp://realpython.com/python-tricks/', 'views':87},\n ]\n\n django_pages = [\n {'title':'Official Django Tutorial', 'url':'https://docs.djangoproject.com/en/2.1/intro/tutorial01/', 'views' :108},\n {'title':'Django Rocks', 'url':'http://www.djangorocks.com/', 'views' : 23},\n {'title':'How to Tango with Django', 'url':'http://www.tangowithdjango.com/', 'views' : 78},\n {'title':'Django Girls - Tutorial', 'url':'https://djangogirls.org/', 'views' : 68},\n \n ]\n\n other_pages = [\n {'title':'Bottle', 'url':'http://bottlepy.org/docs/dev/', 'views' : 43},\n {'title':'Flask', 'url':'http://flask.pocoo.org', 'views' : 12}\n ]\n\n react_pages = [\n {'title':'FB React Documentation', 'url':'https://reactjs.org/', 'views' : 102},\n {'title':'Tyler McGinnis React Course', 'url':'https://tylermcginnis.com/courses/react/', 'views' : 65},\n {'title':'Egghead.io React Course', 'url':'https://egghead.io/', 'views' : 0}\n ]\n\n ml_pages = [\n {'title':\"Colah's ML Blog\", 'url':'https://reactjs.org/', 'views' : 15},\n {'title':'Distill', 'url':'https://distill.pub/', 'views' : 5},\n {'title':'Deep Learning Notes | CS230 Stanford', 'url':'https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-convolutional-neural-networks', 'views' : 0}\n ]\n\n cats = {'Python': {'pages': python_pages, 'views':128, 'likes':64}, \n 'Django': {'pages': django_pages, 'views':110, 'likes':32},\n 'Other Frameworks': {'pages': other_pages, 'views':50, 'likes':8},\n 'React': {'pages':react_pages, 'views':105, 'likes':16},\n 'Machine Learning': {'pages':ml_pages, 'views':24, 'likes':24},\n }\n # cat is not for the 'meow' cat but for category :-)\n\n for cat, cat_data in cats.items():\n c = add_cat(cat, views=cat_data['views'], likes=cat_data['likes']) \n for p in cat_data['pages']:\n add_page(c, p['title'], p['url'], p['views'])\n\n for c in Category.objects.all():\n for p in Page.objects.filter(category = c):\n print(f'- {c}: {p}')\n\ndef add_page(cat, title, url, views = 0):\n p = Page.objects.get_or_create(category = cat, title = title)[0]\n p.url = url \n p.views = views \n p.save()\n return p \n\ndef add_cat(name, views = 0, likes = 0):\n c = Category.objects.get_or_create(name = name)[0]\n c.views = views\n c.likes = likes\n c.save()\n return c \n\n# start execution\nif __name__ == '__main__':\n print(\"Initializing Rango population script...\")\n populate()\n","repo_name":"mdvsh/django-with-rango","sub_path":"populate_rango.py","file_name":"populate_rango.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"28054308626","text":"import aioredis\n\n\nclass RedisService:\n def __init__(self):\n self.redis = aioredis.from_url(\"redis://localhost\", encoding=\"utf-8\", decode_responses=True)\n\n async def write(self, key: str, value: str, ex: int):\n await self.redis.set(key, value, ex=ex)\n\n async def read(self, key: str):\n return await self.redis.get(key)\n","repo_name":"sb-elliot-7s/job-portal-fastapi","sub_path":"redis_service.py","file_name":"redis_service.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"13"} +{"seq_id":"34101294226","text":"\"\"\"\nRotate Function\n\"\"\"\n\n\nclass Solution(object):\n def maxRotateFunction(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n sumA = sum(A)\n lenA = len(A)\n count = 0\n ret = 0\n for eachNum in A:\n ret += count * eachNum\n count += 1\n\n current = ret\n for i in range(1, lenA):\n current = current + sumA - lenA * A[lenA - i]\n ret = max(ret, current)\n\n return ret\n\n\nsolution = Solution()\nprint(solution.maxRotateFunction([4, 3, 2, 6]))\n","repo_name":"Troy-Wang/LeetCode","sub_path":"201610_Week4/20161028_2.py","file_name":"20161028_2.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"32046042850","text":"#!/usr/bin/env python3\n\nimport argparse\nimport time\n\nimport numpy as np\n\nimport arkouda as ak\nfrom arkouda.sorting import SortingAlgorithm\n\n\ndef is_cosorted(data):\n # (b[0] > a[0]) | ((b[0] == a[0]) & recurse(a[1], b[1]))\n def helper(x, right):\n return (x[1:] > x[:-1]) | ((x[1:] == x[:-1]) & right)\n\n right = ak.ones(data[0].size - 1, dtype=ak.bool)\n for x in reversed(data):\n right = helper(x, right)\n return right.all()\n\n\ndef get_nbytes(data):\n if isinstance(data, ak.pdarray):\n return data.size * data.itemsize\n elif isinstance(data, ak.Strings):\n return data.size * 8 + data.nbytes\n else:\n return sum(get_nbytes(x) for x in data)\n\n\ndef apply_perm(data, perm):\n if isinstance(data, (ak.pdarray, ak.Strings)):\n return data[perm]\n else:\n return [x[perm] for x in data]\n\n\ndef check_sorted(s):\n if isinstance(s, (ak.pdarray, ak.Strings)):\n return ak.is_sorted(s)\n else:\n return is_cosorted(s)\n\n\ndef do_argsort(data, algo):\n if isinstance(data, (ak.pdarray, ak.Strings)):\n return ak.argsort(data, algo)\n else:\n return ak.coargsort(data, algo)\n\n\ndef check_correctness(data):\n \"\"\"\n Only check accuracy of sorting, do not measure performance\n \"\"\"\n for algo in SortingAlgorithm:\n perm = do_argsort(data, algo)\n s = apply_perm(data, perm)\n assert check_sorted(s)\n\n\ndef time_sort(name, data, trials):\n \"\"\"\n Measure both performance and correctness of sorting\n \"\"\"\n for algo in SortingAlgorithm:\n timings = []\n for i in range(trials):\n start = time.time()\n perm = do_argsort(data, algo)\n end = time.time()\n timings.append(end - start)\n tavg = sum(timings) / trials\n nbytes = get_nbytes(data)\n print(\"{} {} average time = {:.4f} sec\".format(name, algo.name, tavg))\n bytes_per_sec = nbytes / tavg\n print(\"{} {} average rate = {:.4f} GiB/sec\".format(name, algo.name, bytes_per_sec / 2**30))\n s = apply_perm(data, perm)\n assert check_sorted(s)\n\n\ndef random_uniform(N):\n \"\"\"\n Uniformly distributed integers of 1, 2, and 4 digits.\n Uniformly distributed reals in (0, 1)\n \"\"\"\n for lbound, ubound, bstr in (\n (0, 2**16, \"16-bit\"),\n (0, 2**32, \"32-bit\"),\n (-(2**63), 2**63, \"64-bit\"),\n ):\n name = \"uniform int64 {}\".format(bstr)\n data = ak.randint(lbound, ubound, N)\n yield name, data\n name = \"uniform float64\"\n data = ak.uniform(N)\n yield name, data\n\n\ndef power_law(N):\n \"\"\"\n Power law distributed (alpha = 2.5) reals and integers in (1, 2**32)\n \"\"\"\n y = ak.uniform(N)\n a = -2.5 # power law exponent, between -2 and -3\n ub = 2**32 # upper bound\n data = ((ub ** (a + 1) - 1) * y + 1) ** (1 / (a + 1))\n yield \"power-law float64\", data\n\n datai = ak.cast(data, ak.int64)\n yield \"power-law int64\", datai\n\n\ndef rmat(size):\n \"\"\"\n RMAT-generated edges (coargsort of two vertex arrays)\n \"\"\"\n # N = number of edges = number of elements / 2\n N = size // 2\n avgdegree = 10\n lgNv = int(np.log2(N / avgdegree))\n # probabilities\n a = 0.01\n b = (1.0 - a) / 3.0\n c = b\n d = b\n # quantites to use in edge generation loop\n ab = a + b\n c_norm = c / (c + d)\n a_norm = a / (a + b)\n # init edge arrays\n ii = ak.ones(N, dtype=ak.int64)\n jj = ak.ones(N, dtype=ak.int64)\n # generate edges\n for ib in range(1, lgNv):\n ii_bit = ak.uniform(N) > ab\n jj_bit = ak.uniform(N) > (c_norm * ii_bit + a_norm * (~ii_bit))\n ii = ii + ((2 ** (ib - 1)) * ii_bit)\n jj = jj + ((2 ** (ib - 1)) * jj_bit)\n\n yield \"RMAT int64\", (ii, jj)\n\n\ndef block_sorted(N):\n \"\"\"\n The concatenation of two sorted arrays of unequal length\n The interleaving of two sorted arrays of unequal length\n\n Most often occurs in array setops, where two arrays are\n uniqued (via sorting), then concatenated and sorted\n \"\"\"\n splitpoint = 0.4\n Na = int(splitpoint * N)\n Nb = N - Na\n # Construct a and b such that:\n # 1) Values overlap\n # 2) a and b are sorted\n a = ak.arange(Na)\n b = ak.arange(Nb)\n c = ak.concatenate((a, b), ordered=True)\n yield \"block-sorted concat int64\", c\n\n ci = ak.concatenate((a, b), ordered=False)\n yield \"block-sorted interleaved int64\", ci\n\n\ndef refinement(N):\n \"\"\"\n Coargsort of two arrays, where the first is already sorted\n but has many repeated values\n \"\"\"\n groupsize = 100\n a = ak.arange(N // 2) // groupsize\n factor = 2**32 // a.max()\n a *= factor\n b = ak.randint(0, 2**32, N // 2)\n yield \"refinement int64\", (a, b)\n\n\ndef time_like(N):\n \"\"\"\n Data like a datetime64[ns]:\n - spanning 1 year\n - with second granularity\n - but stored with nanosecond precision\n \"\"\"\n # seconds in a year\n year_sec = 365 * 24 * 60 * 60\n # offset to almost 2020 (yeah yeah, leap days)\n twentytwenty = 50 * year_sec\n # second-resolution timestamps spanning approx 2020-2021\n a = ak.randint(0, year_sec, N) + twentytwenty\n # stored as datetime64[ns]\n a *= 10**9\n yield \"datetime64[ns]\", a\n\n\ndef IP_like(N):\n \"\"\"\n Data like a 90/10 mix of IPv4 and IPv6 addresses\n \"\"\"\n multiplicity = 10\n nunique = N // (2 * multiplicity)\n # First generate unique addresses, then sample with replacement\n u1 = ak.zeros(nunique, dtype=ak.int64)\n u2 = ak.zeros(nunique, dtype=ak.int64)\n v4 = ak.uniform(nunique) < 0.9\n n4 = v4.sum()\n v6 = ~v4\n n6 = v4.size - n4\n u1[v4] = ak.randint(0, 2**32, n4)\n u1[v6] = ak.randint(-(2**63), 2**63, n6)\n u2[v6] = ak.randint(-(2**63), 2**63, n6)\n sample = ak.randint(0, nunique, N // 2)\n IP1 = u1[sample]\n IP2 = u2[sample]\n yield \"IP-like 2*int64\", (IP1, IP2)\n\n\nGENERATORS = (random_uniform, power_law, rmat, block_sorted, refinement, time_like, IP_like)\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(\n description=\"Measure performance of sorting an array of random values.\"\n )\n parser.add_argument(\"hostname\", help=\"Hostname of arkouda server\")\n parser.add_argument(\"port\", type=int, help=\"Port of arkouda server\")\n parser.add_argument(\n \"-n\", \"--size\", type=int, default=10**8, help=\"Problem size: length of array to argsort\"\n )\n parser.add_argument(\n \"-t\", \"--trials\", type=int, default=1, help=\"Number of times to run the benchmark\"\n )\n parser.add_argument(\n \"--correctness-only\",\n default=False,\n action=\"store_true\",\n help=\"Only check correctness, not performance.\",\n )\n # parser.add_argument('-s', '--seed', default=None, type=int, help='Value to initialize\n # random number generator')\n return parser\n\n\nif __name__ == \"__main__\":\n import sys\n\n parser = create_parser()\n args = parser.parse_args()\n ak.connect(args.hostname, args.port)\n\n if args.correctness_only:\n args.size = 1000\n else:\n print(\"array size = {:,}\".format(args.size))\n print(\"number of trials = \", args.trials)\n for gen in GENERATORS:\n for name, data in gen(args.size):\n if args.correctness_only:\n check_correctness(data)\n else:\n time_sort(name, data, args.trials)\n sys.exit(0)\n","repo_name":"Bears-R-Us/arkouda","sub_path":"benchmarks/sort-cases.py","file_name":"sort-cases.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"13"} +{"seq_id":"12870783855","text":"\"\"\"\n Problem statement:\n Given an array of distinct images nums and a target number target, return the number of possible combinations that add up to target.\n The test cases are generated so that the answer cat fit in a 32-bit integer\n\n Example 1:\n Input: nums = [1,2,3], target = 4\n Output: 7\n Explanation:\n The possible combination ways are:\n (1, 1, 1, 1)\n (1, 1, 2)\n (1, 2, 1)\n (1, 3)\n (2, 1, 1)\n (2, 2)\n (3, 1)\n Note that different sequences are counted as different combinations. (like 1,1,2 and 2,1,1 are difference)\n\n Example 2:\n Input: nums = [9], target = 3\n Output: 0\n\n Constraints:\n * 1 <= nums.length <= 200\n * 1 <= nums[i] <= 1000\n * All the elements of nums are unique.\n * 1 <= target <= 1000\n\n Follow up: What if negative numbers are allowed in the given array? How does it change the problem? What limitation we need to add to the question to allow negative numbers?\n\n Humm, this problem lead me to the hell. DP Again =)))\n \n\"\"\"\nfrom typing import List\nfrom itertools import product\nimport math\nfrom functools import reduce\n\n\nclass Solution:\n def TLE_combinationSum4(self, nums: List[int], target: int) -> int:\n \"\"\"\n Implemenatation here\n \n Idea:\n - Like a loop, inside each loop, we find all possible solution for this\n - Next loop, we drop the first index of the nums array, like [1, 2, 3], then [2, 3] and final [3] ...\n - This will reduce the time since we already found the same solution before (like 1, 1, 2 -> Then we can count the another solution)\n\n Note: May be this idea is not good for now :<\n So we need to counter all possible solution\n\n We can ignore the num inside nums that has value greater than the target\n \"\"\"\n nums = sorted(nums)\n print(f'Sorted num: {nums}')\n\n # Backtracking\n candidates = [] # Contains all possible solutions\n solutions = [] # Store all completed solution\n\n # Init the base for candidates\n for num in nums:\n if num <= target:\n candidates.append([num])\n \n # print(f'Prepared candidate: {candidates}')\n\n while len(candidates):\n print(f'Last append candidates: {candidates[-1]}')\n # Make new candidate, by looping all items inside the for loop\n candidate = candidates.pop()\n \n if sum(candidate) == target:\n solutions.append(candidate)\n continue\n\n for num in nums:\n if num < target - sum(candidate):\n candidates.append(candidate + [num])\n if num == target - sum(candidate):\n solutions.append(candidate + [num])\n if num > target - sum(candidate):\n break\n \n print(f'Solutions: {solutions}')\n return len(solutions)\n\n def TLE_again_combinationSum4(self, nums: List[int], target: int) -> int:\n \"\"\"\n Given the nums then find the difference combinations of weights for each num that can sum up the target\n Due to the nums are distinct, then for each solution we can find the number of ways to represent it\n\n Examples:\n [1, 2, 3] and 4\n We can have the weights (4, 0, 0), (2, 1, 0), (1, 0, 1), (0, 2, 0)\n \"\"\"\n def compute(combination):\n numerator = math.factorial(sum(combination))\n denumerator = reduce(lambda x, y: x * y, [math.factorial(_) for _ in combination])\n return int(numerator / denumerator)\n\n # Can I limit the loop range, yes, just by (target // nums[i] + 1)\n \n # 1. Construct the limited\n limits = [(0, target // num + 1, 1) for num in nums]\n print(f'Limits: {limits}')\n # print(f'Limit for each number: {limits}')\n \n # This what I want\n # Ref: https://stackoverflow.com/questions/38068669/dynamic-for-loops-in-python\n combinations = []\n\n for values in product(*(range(*b) for b in limits)):\n val = 0\n # print(f'Values: {values}')\n for num, weight in zip(nums, values):\n val += num * weight\n if val == target:\n combinations.append(values)\n \n print(f'Combinations: {combinations}')\n # Applied the math to compute real permutation here !!!\n rs = 0\n for comb in combinations:\n rs += compute(combination=comb)\n return rs\n \n def combinationSum4(self, nums: List[int], target: int) -> int:\n \"\"\"\n Must be solved by using DP :< so sad\n \"\"\"\n # DP is the possible combination that we can have for each target by the given nums\n dp = [0] * (target + 1)\n dp[0] = 1\n \n for i in range(1, target + 1):\n # Finding the number of combination for each target i by given the nums\n for num in nums:\n if i - num >= 0:\n # This mean we can combine this num with the previous set at target [i - num]\n dp[i] += dp[i - num]\n print(f'Adjust DP: {dp}')\n \n return dp[target]\n\n\n\nif __name__ == '__main__':\n # Just pass 6/15 case\n\n s = Solution()\n print(s.combinationSum4(nums=[1, 2, 3], target=3))\n # print(s.combinationSum4(nums=[9], target=3))\n # print(s.combinationSum4(nums= [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25], target=10)) # Expected 9\n # print(s.combinationSum4(nums = [4, 2, 1], target=20))\n\n # TLE Again (just pass 11/15)\n # print(s.combinationSum4(nums=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100],\n # target=31))\n\n","repo_name":"Nacriema/Leet-Code","sub_path":"daily_challenges/combination-sum-iv.py","file_name":"combination-sum-iv.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"3497944413","text":"import random\nimport turtle as turtle_module\n\ntim_screen = turtle_module.Screen()\n\ntim_screen.setup(600,600)\n\nuser_bet = tim_screen.textinput(title=\"Place your bet\", prompt=\"which turtle will win the race, enter turtle name\")\nprint(user_bet)\ntim1 = turtle_module.Turtle(shape=\"turtle\")\ntim2= turtle_module.Turtle(shape=\"turtle\")\ntim3 = turtle_module.Turtle(shape=\"turtle\")\ntim4 = turtle_module.Turtle(shape=\"turtle\")\n\ntim1.color(\"green\")\n\ntim2.color(\"yellow\")\n#\ntim3.color(\"red\")\n\ntim4.color(\"blue\")\n\ntim1.hideturtle()\ntim2.hideturtle()\ntim3.hideturtle()\ntim4.hideturtle()\n\ntim1.penup()\ntim2.penup()\n\ntim3.penup()\ntim4.penup()\ntim1.goto(20/2 - (tim_screen.window_width() - 20)/2, tim_screen.window_height()/2 - 20 /2)\ntim2.goto(20/2 - (tim_screen.window_width() - 20) /2, tim_screen.window_height()/2.5 - 20 /3)\ntim3.goto(20/2 - (tim_screen.window_width() - 20) /2, tim_screen.window_height()/3.5 - 20 /4)\ntim4.goto(20/2 - (tim_screen.window_width() - 20) /2, tim_screen.window_height()/4.5 - 20 /5)\ntim1.showturtle()\ntim2.showturtle()\ntim3.showturtle()\ntim4.showturtle()\n\n\ntim1.setheading(0)\ntim2.setheading(0)\ntim3.setheading(0)\ntim4.setheading(0)\nspeed = [10, 30, 40, 50, 24, 32]\n\ngrid_width = 600\ncell_size = 40\n\nturtles = ['tim1', 'tim2', ]\n\n\ndef get_name(turtle):\n if turtle == tim1:\n return \"tim1\"\n elif turtle == tim2:\n return \"tim2\"\n elif turtle == tim3:\n return \"tim3\"\n else:\n return \"tim4\"\n\ndef get_cord(turtle):\n tim_x,tim_y = turtle.position()\n if abs(tim_x) > (grid_width/2 - cell_size/2):\n if turtle == tim1 and user_bet == \"tim1\":\n print(\"tim 1 won\")\n elif turtle == tim2 and user_bet == \"tim2\":\n print(\"tim 2 won\")\n elif turtle == tim3 and user_bet == \"tim3\":\n print(\"tim 3 won\")\n elif turtle == tim4 and user_bet == \"tim4\":\n print(\"tim 4 won\")\n else:\n new_turtle = get_name(turtle)\n print(f\"you loose {new_turtle} won\")\n return True\n else:\n return False\n\n\nfor i in range(20):\n\n if get_cord(tim1) or get_cord(tim2) or get_cord(tim3) or get_cord(tim4):\n break\n tim1.forward(random.choice(speed))\n tim2.forward(random.choice(speed))\n tim3.forward(random.choice(speed))\n tim4.forward(random.choice(speed))\n\n\ntim_screen.exitonclick()","repo_name":"Imma76/turtle_race","sub_path":"mosh_lessons/intermediate_tutorial/day19/turtle_race.py","file_name":"turtle_race.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"71256370897","text":"#!/usr/bin/env python\n\n# Bootstrap installation of Distribute\nimport distribute_setup\ndistribute_setup.use_setuptools()\n\nimport os\n\nfrom setuptools import setup\n\n\nPROJECT = 'python-blueprint'\nVERSION = '0.6.1'\nURL = 'https://github.com/eykd/blueprint'\nAUTHOR = 'David Eyk'\nAUTHOR_EMAIL = 'david.eyk@gmail.com'\nDESC = \"Magical blueprints for procedural generation of content.\"\n\n\ndef read_file(file_name):\n file_path = os.path.join(\n os.path.dirname(__file__),\n file_name\n )\n return open(file_path).read()\n\nsetup(\n name=PROJECT,\n version=VERSION,\n description=DESC,\n long_description=read_file('README.rst'),\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n license=read_file('LICENSE'),\n packages=['blueprint'],\n install_requires=['six'],\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n # see http://pypi.python.org/pypi?:action=list_classifiers\n # -*- Classifiers -*-\n \"Programming Language :: Python\",\n \"Topic :: Games/Entertainment\",\n \"Topic :: Artistic Software\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n ],\n)\n","repo_name":"eykd/blueprint","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"13"} +{"seq_id":"1049536004","text":"import matplotlib.pyplot as plt\r\nimport sys\r\nfrom sklearn import datasets, metrics, svm\r\nfrom sklearn.model_selection import train_test_split\r\nimport pdb\r\nfrom joblib import dump,load\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom flask import Flask, request, jsonify\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport os\r\n\r\napp = Flask(__name__)\r\n\r\ncurrent_directory = os.path.split(os.getcwd())[0]\r\n\r\n\r\ndef load_model(model_type):\r\n if model_type == 'svm':\r\n print(\"svm\")\r\n file_path = os.path.join(current_directory, 'models', 'M23CSA009_gamma:0.01_C:1.joblib')\r\n elif model_type == 'logistic':\r\n file_path = os.path.join(current_directory, 'models', 'M23CSA009_lr_lbfgs.joblib')\r\n elif model_type == 'decision_tree':\r\n file_path = os.path.join(current_directory, 'models', 'tree_max_depth:50.joblib')\r\n else:\r\n raise ValueError(\"Invalid model_type. Choose 'svm', 'logistic', or 'decision_tree'.\")\r\n \r\n model = load(file_path)\r\n return model\r\n\r\nmodel = load_model('svm')\r\n\r\n@app.route('/predict/', methods=['POST'])\r\ndef compare_digits():\r\n try:\r\n # Get the two image files from the request\r\n data = request.get_json() # Parse JSON data from the request body\r\n image1 = data.get('image1', [])\r\n image2 = data.get('image2', [])\r\n\r\n # Preprocess the images and make predictions\r\n digit1 = predict_digit(image1)\r\n digit2 = predict_digit(image2)\r\n\r\n # Compare the predicted digits and return the result\r\n if (digit1 == digit2):\r\n return jsonify({'Result':\"Images same\"})\r\n\r\n else:\r\n return jsonify({'Result':\"Images different\"})\r\n except Exception as e:\r\n return jsonify({'ERROR': str(e)})\r\n \r\ndef predict_digit(image):\r\n try:\r\n # Convert the input list to a numpy array and preprocess for prediction\r\n reshaped_image = np.array(image, dtype=np.float32).reshape(1, 28, 28, 1) / 255\r\n\r\n prediction = model.predict(reshaped_image)\r\n digit = np.argmax(prediction)\r\n\r\n return digit\r\n except Exception as e:\r\n return str(e)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()","repo_name":"DipanMandal/mlops_23","sub_path":"api/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"2179903593","text":"import random\nfrom myhdl import *\nimport os\n\n# Commands\nmodule = 'preamble'\ntestbench = '%s_tb' % module\n\nsrcs = []\nsrcs.append(\"hdl/%s.v\" % module)\nsrcs.append(\"%s.v\" % testbench)\nsrc = ' '.join(srcs)\n\nbuild_cmd = \"iverilog -o %s.vvp %s\" % (testbench, src) # Builds the testbench with MyHDL operatives\n\n# Random Seeds\nrandom.seed(1)\nrandrange = random.randrange\n\n# Parameters\nWIDTH = 32\nTYPE = \"PRIORITY\"\nBLOCK = \"REQUEST\"\nACTIVE_LOW, INACTIVE_HIGH = 0, 1\n\n# DUT\ndef preamble(clk, rst, signal_in, signal_out, preamble_length, preamble_value, frame_length, valid_in, ready_in, ready_out, valid_out, error, preamble_flag):\n\n if os.system(build_cmd):\n raise Exception(\"Error running build command\")\n\n dut = Cosimulation(\"vvp -m myhdl {0}.vvp -lxt2\".format(testbench), **locals())\n return dut\n\n@block\ndef test_bench():\n # Inputs\n clk = Signal(bool(0))\n rst = Signal(bool(0))\n signal_in = Signal(intbv(0)[WIDTH:])\n preamble_length = Signal(intbv(0)[WIDTH:])\n preamble_value = Signal(intbv(0)[WIDTH:])\n frame_length = Signal(intbv(0)[WIDTH:])\n valid_in = Signal(bool(0))\n ready_in = Signal(bool(0))\n\n # Outputs\n signal_out = Signal(intbv(0)[WIDTH:])\n ready_out = Signal(bool(0))\n valid_out = Signal(bool(0))\n error = Signal(bool(0))\n preamble_flag = Signal(bool(0))\n # rst = rstSignal(0, active=0, async=True)\n\n preamble_1 = preamble(clk, rst, signal_in, signal_out, preamble_length, preamble_value, frame_length, valid_in, ready_in, valid_out, ready_out, error, preamble_flag)\n\n PERIOD = delay(10)\n\n @always(PERIOD)\n def clkGen():\n clk.next = not clk\n\n @instance\n def stimulus():\n rst.next = ACTIVE_LOW\n yield clk.negedge\n rst.next = INACTIVE_HIGH\n for i in range(16):\n enable.next = min(1, randrange(3))\n yield clk.negedge\n raise StopSimulation()\n\n @instance\n def monitor():\n print(\"enable count\")\n yield rst.posedge\n while 1:\n yield clk.posedge\n yield delay(1)\n print(\" %s %s\" % (int(signal_out), preamble_flag))\n\n return clkGen, stimulus, preamble_1, monitor\n\ndef test__bench():\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n sim = Simulation()\n sim.run()\n\nif __name__ == '__main__':\n print(\"Running test...\")\n test__bench()","repo_name":"Bucknalla/ip-cores","sub_path":"tb/preamble/preamble_tb.py","file_name":"preamble_tb.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"13"} +{"seq_id":"14893268613","text":"import requests \nimport pandas as pd \nimport numpy as np \n\nimport os\napi_key = os.getenv('api_key')\n\n\nstock = 'AAPL'\nstockprices = requests.get(f'https://financialmodelingprep.com/api/v3/historical-price-full/{stock}?serietype=line&apikey={api_key}').json()\n\nstockprices = stockprices['historical'][0:1200]\n\nstockprices = pd.DataFrame.from_dict(stockprices)\nstockprices = stockprices.set_index('date')\n\nstockprices = stockprices.iloc[::-1]\n\nstockprices['20d'] = stockprices['close'].rolling(20).mean() \n\n\nstockprices['return'] = np.log(stockprices['close'] / stockprices['close'].shift(1) )\n\n\nstockprices['difference'] = stockprices['close'] - stockprices['20d']\n\nstockprices['long'] = np.where(stockprices['difference'] < -2 ,1,np.nan)\nstockprices['long'] = np.where(stockprices['difference'] * stockprices['difference'].shift(1) < 0, 0, stockprices['long'])\nstockprices['long'] = stockprices['long'].ffill().fillna(0)\n\nstockprices['gain_loss'] = stockprices['long'].shift(1) * stockprices['return']\nstockprices = stockprices.dropna(subset=['20d'])\n\nstockprices['total'] = stockprices['gain_loss'].cumsum()\nprint(stockprices.tail(30))","repo_name":"PvrpleBlvck/Escuela","sub_path":"blvckfinance/mean_reversion.py","file_name":"mean_reversion.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"13"} +{"seq_id":"41563730829","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ncold_users = []\r\n\r\ndef prefilter_items(data, take_n_popular=5000, item_features=None):\r\n # Уберем самые популярные товары (их и так купят)\r\n popularity = data.groupby('item_id')['user_id'].nunique().reset_index() / data['user_id'].nunique()\r\n popularity.rename(columns={'user_id': 'share_unique_users'}, inplace=True)\r\n\r\n top_popular = popularity[popularity['share_unique_users'] > 0.2].item_id.tolist()\r\n data = data[~data['item_id'].isin(top_popular)]\r\n\r\n # Уберем самые НЕ популярные товары (их и так НЕ купят)\r\n top_notpopular = popularity[popularity['share_unique_users'] < 0.02].item_id.tolist()\r\n data = data[~data['item_id'].isin(top_notpopular)]\r\n\r\n # Уберем не интересные для рекоммендаций категории (department)\r\n if item_features is not None:\r\n department_size = pd.DataFrame(\r\n item_features.groupby('department')['item_id'].nunique().sort_values(ascending=False)).reset_index()\r\n department_size.columns = ['department', 'n_items']\r\n rare_departments = department_size[department_size['n_items'] < 150].department.tolist()\r\n items_in_rare_departments = item_features[\r\n item_features['department'].isin(rare_departments)].item_id.unique().tolist()\r\n\r\n data = data[~data['item_id'].isin(items_in_rare_departments)]\r\n\r\n # Уберем слишком дешевые товары (на них не заработаем). 1 покупка из рассылок стоит 60 руб.\r\n data['price'] = data['sales_value'] / (np.maximum(data['quantity'], 1))\r\n data = data[data['price'] > 2]\r\n\r\n # Уберем слишком дорогие товарыs\r\n data = data[data['price'] < 50]\r\n\r\n # уберем товары, не продававшиеся более 12-18 месяцев\r\n data = data[data['week_no'] >= data['week_no'].max() - 52]\r\n\r\n # Возьмем топ по популярности\r\n popularity = data.groupby('item_id')['quantity'].sum().reset_index()\r\n popularity.rename(columns={'quantity': 'n_sold'}, inplace=True)\r\n top = popularity.sort_values('n_sold', ascending=False).head(take_n_popular).item_id.tolist()\r\n\r\n # Заведем фиктивный item_id (если юзер покупал товары из топ-N, то он \"купил\" такой товар)\r\n data.loc[~data['item_id'].isin(top), 'item_id'] = 999999\r\n\r\n return data\r\n\r\n\r\ndef reduce_memory(df):\r\n for col in df.columns:\r\n col_type = df[col].dtype\r\n if col_type != object and str(col_type)[:4] != 'uint' and str(col_type) != 'category':\r\n c_min = df[col].min()\r\n c_max = df[col].max()\r\n if str(col_type)[:3] == 'int':\r\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\r\n df[col] = df[col].astype(np.int8)\r\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\r\n df[col] = df[col].astype(np.int16)\r\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\r\n df[col] = df[col].astype(np.int32)\r\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\r\n df[col] = df[col].astype(np.int64)\r\n else:\r\n if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\r\n df[col] = df[col].astype(np.float32)\r\n else:\r\n df[col] = df[col].astype(np.float64)\r\n elif str(col_type)[:4] != 'uint':\r\n df[col] = df[col].astype('category')\r\n return df\r\n\r\n\r\ndef popularity_recommendation(data, n=5):\r\n \"\"\"Топ-n популярных товаров\"\"\"\r\n\r\n popular = data.groupby('item_id')['sales_value'].sum().reset_index()\r\n popular.sort_values('sales_value', ascending=False, inplace=True)\r\n\r\n recs = popular.head(n).item_id\r\n\r\n return recs.tolist()\r\n\r\n\r\ndef postfilter(recommendations, item_info, N=5):\r\n \"\"\"Пост-фильтрация товаров\r\n\r\n Input\r\n -----\r\n recommendations: list\r\n Ранжированный список item_id для рекомендаций\r\n item_info: pd.DataFrame\r\n Датафрейм с информацией о товарах\r\n \"\"\"\r\n\r\n # Уникальность\r\n unique_recommendations = []\r\n [unique_recommendations.append(item) for item in recommendations if item not in unique_recommendations]\r\n\r\n # Разные категории\r\n categories_used = []\r\n final_recommendations = []\r\n CATEGORY_NAME = 'sub_commodity_desc'\r\n for item in unique_recommendations:\r\n category = item_features.loc[item_features['item_id'] == item, CATEGORY_NAME].values[0]\r\n\r\n if category not in categories_used:\r\n final_recommendations.append(item)\r\n\r\n unique_recommendations.remove(item)\r\n categories_used.append(category)\r\n\r\n n_rec = len(final_recommendations)\r\n if n_rec < N:\r\n final_recommendations.extend(unique_recommendations[:N - n_rec])\r\n else:\r\n final_recommendations = final_recommendations[:N]\r\n\r\n assert len(final_recommendations) == N, 'Количество рекомендаций != {}'.format(N)\r\n return final_recommendations\r\n\r\ndef rule(x, y, model, N=5):\r\n if x in y:\r\n return recommender.overall_top_purchases[:N]\r\n if model == 'als':\r\n return recommender.get_als_recommendations(x, N=N)\r\n elif model == 'own':\r\n return recommender.get_own_recommendations(x, N=N)\r\n elif model == 'similar_items':\r\n return recommender.get_similar_items_recommendation(x, N=N)\r\n elif model == 'similar_users':\r\n return recommender.get_similar_users_recommendation(x, N=N)\r\n elif model == 'bayesian':\r\n return recommender.get_bayesian_recommendations(x, N=N)\r\n\r\ndef rerank(user_id, N, out=cold_users):\r\n if user_id in df_predict.user_id:\r\n return df_predict[df_predict['user_id']==user_id].sort_values('proba_item_purchase', ascending=False).head(N).item_id.tolist()\r\n else:\r\n return rule(user_id, cold_users, model='own', N=5)\r\n\r\ndef rerank_post(user_id, N_rank=20, N_post=5):\r\n try:\r\n out = rerank(user_id, N=N_rank)\r\n out = postfilter(out, item_features, N=N_post)\r\n except AssertionError:\r\n out = rule(user_id, cold_users, model='own', N=N_post)\r\n return out\r\n\r\ndef transform_data_for_eval(dataset, rec_col, user_col='user_id'):\r\n '''\r\n Func for transforming recommendations into kaggle evaluation format\r\n\r\n Parameters:\r\n dataset (pd.DataFrame): Dataset with 2 required columns:\r\n rec_col - column with recommendations should be iterable\r\n user_col - columns with user id\r\n\r\n rec_col (str): name of column in dataset with recommendations\r\n\r\n user_col (str): name of column in dataset with user id\r\n\r\n Returns:\r\n pd.DataFrame: DataFrame in suitable format\r\n\r\n '''\r\n eval_dataset = dataset[[user_col, rec_col]].copy()\r\n eval_dataset[rec_col] = eval_dataset[rec_col].apply(lambda x: ' '.join([str(i) for i in x]))\r\n eval_dataset.rename(columns={\r\n user_col: 'UserId',\r\n rec_col: 'Predicted'\r\n }, inplace=True)\r\n return eval_dataset\r\n","repo_name":"pankratozzi/recommender","sub_path":"lesson6/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"17054403854","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.PosDiscountDetail import PosDiscountDetail\nfrom alipay.aop.api.domain.KbPosBillDishDetail import KbPosBillDishDetail\nfrom alipay.aop.api.domain.PosBillPayChannel import PosBillPayChannel\nfrom alipay.aop.api.domain.PosOrderKey import PosOrderKey\n\n\nclass KoubeiCateringOrderBillApplyModel(object):\n\n def __init__(self):\n self._bill_amount = None\n self._discount_details = None\n self._dish_details = None\n self._ext_info = None\n self._member_flag = None\n self._memo = None\n self._pay_amount = None\n self._pay_channels = None\n self._people_list = None\n self._pos_order_key = None\n self._receipt_amount = None\n self._settle_time = None\n\n @property\n def bill_amount(self):\n return self._bill_amount\n\n @bill_amount.setter\n def bill_amount(self, value):\n self._bill_amount = value\n @property\n def discount_details(self):\n return self._discount_details\n\n @discount_details.setter\n def discount_details(self, value):\n if isinstance(value, list):\n self._discount_details = list()\n for i in value:\n if isinstance(i, PosDiscountDetail):\n self._discount_details.append(i)\n else:\n self._discount_details.append(PosDiscountDetail.from_alipay_dict(i))\n @property\n def dish_details(self):\n return self._dish_details\n\n @dish_details.setter\n def dish_details(self, value):\n if isinstance(value, list):\n self._dish_details = list()\n for i in value:\n if isinstance(i, KbPosBillDishDetail):\n self._dish_details.append(i)\n else:\n self._dish_details.append(KbPosBillDishDetail.from_alipay_dict(i))\n @property\n def ext_info(self):\n return self._ext_info\n\n @ext_info.setter\n def ext_info(self, value):\n self._ext_info = value\n @property\n def member_flag(self):\n return self._member_flag\n\n @member_flag.setter\n def member_flag(self, value):\n self._member_flag = value\n @property\n def memo(self):\n return self._memo\n\n @memo.setter\n def memo(self, value):\n self._memo = value\n @property\n def pay_amount(self):\n return self._pay_amount\n\n @pay_amount.setter\n def pay_amount(self, value):\n self._pay_amount = value\n @property\n def pay_channels(self):\n return self._pay_channels\n\n @pay_channels.setter\n def pay_channels(self, value):\n if isinstance(value, list):\n self._pay_channels = list()\n for i in value:\n if isinstance(i, PosBillPayChannel):\n self._pay_channels.append(i)\n else:\n self._pay_channels.append(PosBillPayChannel.from_alipay_dict(i))\n @property\n def people_list(self):\n return self._people_list\n\n @people_list.setter\n def people_list(self, value):\n self._people_list = value\n @property\n def pos_order_key(self):\n return self._pos_order_key\n\n @pos_order_key.setter\n def pos_order_key(self, value):\n if isinstance(value, PosOrderKey):\n self._pos_order_key = value\n else:\n self._pos_order_key = PosOrderKey.from_alipay_dict(value)\n @property\n def receipt_amount(self):\n return self._receipt_amount\n\n @receipt_amount.setter\n def receipt_amount(self, value):\n self._receipt_amount = value\n @property\n def settle_time(self):\n return self._settle_time\n\n @settle_time.setter\n def settle_time(self, value):\n self._settle_time = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.bill_amount:\n if hasattr(self.bill_amount, 'to_alipay_dict'):\n params['bill_amount'] = self.bill_amount.to_alipay_dict()\n else:\n params['bill_amount'] = self.bill_amount\n if self.discount_details:\n if isinstance(self.discount_details, list):\n for i in range(0, len(self.discount_details)):\n element = self.discount_details[i]\n if hasattr(element, 'to_alipay_dict'):\n self.discount_details[i] = element.to_alipay_dict()\n if hasattr(self.discount_details, 'to_alipay_dict'):\n params['discount_details'] = self.discount_details.to_alipay_dict()\n else:\n params['discount_details'] = self.discount_details\n if self.dish_details:\n if isinstance(self.dish_details, list):\n for i in range(0, len(self.dish_details)):\n element = self.dish_details[i]\n if hasattr(element, 'to_alipay_dict'):\n self.dish_details[i] = element.to_alipay_dict()\n if hasattr(self.dish_details, 'to_alipay_dict'):\n params['dish_details'] = self.dish_details.to_alipay_dict()\n else:\n params['dish_details'] = self.dish_details\n if self.ext_info:\n if hasattr(self.ext_info, 'to_alipay_dict'):\n params['ext_info'] = self.ext_info.to_alipay_dict()\n else:\n params['ext_info'] = self.ext_info\n if self.member_flag:\n if hasattr(self.member_flag, 'to_alipay_dict'):\n params['member_flag'] = self.member_flag.to_alipay_dict()\n else:\n params['member_flag'] = self.member_flag\n if self.memo:\n if hasattr(self.memo, 'to_alipay_dict'):\n params['memo'] = self.memo.to_alipay_dict()\n else:\n params['memo'] = self.memo\n if self.pay_amount:\n if hasattr(self.pay_amount, 'to_alipay_dict'):\n params['pay_amount'] = self.pay_amount.to_alipay_dict()\n else:\n params['pay_amount'] = self.pay_amount\n if self.pay_channels:\n if isinstance(self.pay_channels, list):\n for i in range(0, len(self.pay_channels)):\n element = self.pay_channels[i]\n if hasattr(element, 'to_alipay_dict'):\n self.pay_channels[i] = element.to_alipay_dict()\n if hasattr(self.pay_channels, 'to_alipay_dict'):\n params['pay_channels'] = self.pay_channels.to_alipay_dict()\n else:\n params['pay_channels'] = self.pay_channels\n if self.people_list:\n if hasattr(self.people_list, 'to_alipay_dict'):\n params['people_list'] = self.people_list.to_alipay_dict()\n else:\n params['people_list'] = self.people_list\n if self.pos_order_key:\n if hasattr(self.pos_order_key, 'to_alipay_dict'):\n params['pos_order_key'] = self.pos_order_key.to_alipay_dict()\n else:\n params['pos_order_key'] = self.pos_order_key\n if self.receipt_amount:\n if hasattr(self.receipt_amount, 'to_alipay_dict'):\n params['receipt_amount'] = self.receipt_amount.to_alipay_dict()\n else:\n params['receipt_amount'] = self.receipt_amount\n if self.settle_time:\n if hasattr(self.settle_time, 'to_alipay_dict'):\n params['settle_time'] = self.settle_time.to_alipay_dict()\n else:\n params['settle_time'] = self.settle_time\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = KoubeiCateringOrderBillApplyModel()\n if 'bill_amount' in d:\n o.bill_amount = d['bill_amount']\n if 'discount_details' in d:\n o.discount_details = d['discount_details']\n if 'dish_details' in d:\n o.dish_details = d['dish_details']\n if 'ext_info' in d:\n o.ext_info = d['ext_info']\n if 'member_flag' in d:\n o.member_flag = d['member_flag']\n if 'memo' in d:\n o.memo = d['memo']\n if 'pay_amount' in d:\n o.pay_amount = d['pay_amount']\n if 'pay_channels' in d:\n o.pay_channels = d['pay_channels']\n if 'people_list' in d:\n o.people_list = d['people_list']\n if 'pos_order_key' in d:\n o.pos_order_key = d['pos_order_key']\n if 'receipt_amount' in d:\n o.receipt_amount = d['receipt_amount']\n if 'settle_time' in d:\n o.settle_time = d['settle_time']\n return o\n\n\n","repo_name":"alipay/alipay-sdk-python-all","sub_path":"alipay/aop/api/domain/KoubeiCateringOrderBillApplyModel.py","file_name":"KoubeiCateringOrderBillApplyModel.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"13"} +{"seq_id":"25336365094","text":"import aiohttp\nfrom aiohttp import web\n\nasync def handle(request):\n return web.Response(text=\"Hello, this is the server!\")\n\nasync def handle_second(request):\n return web.Response(text=\"Hello, this is second handle!\")\n\n# 실제로 서버에서는 여러 라우터를 추가할 수 있다.\n# 라우터는 URL 경로와 핸들러 함수를 매핑하는 역할을 한다.\n# 라우터를 추가할 때는 add_get() 메서드를 사용한다.\n# add_get() 메서드는 GET 요청을 처리하는 핸들러를 추가한다.\n# 첫 번째 인자는 URL 경로이고, 두 번째 인자는 핸들러 함수이다.\n\napp = web.Application()\napp.router.add_get('/', handle)\napp.router.add_get('/second', handle_second)\n\n# Set the desired domain and port\nhost = '0.0.0.0' # Listen on all available network interfaces\nport = 8080\n\nweb.run_app(app, host=host, port=port)","repo_name":"amirer21/python-exam","sub_path":"aiohttpExam/aiohttp_server_02_add_module.py","file_name":"aiohttp_server_02_add_module.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"37959967308","text":"#--------------------------------------------------------------\n# Control\n#--------------------------------------------------------------\n\n# --- Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )\nOutputLevel = INFO\n# --- produce trackingg ntuple\ndoTrkNtuple = False\n# --- produce an atlantis data file\ndoJiveXML = False\n# --- IO setup\ndoWriteESD = False\ndoWriteAOD = False\n# --- write BS\ndoWriteBS = True\n# --- do auditors ?\ndoAuditors = False\ndoEdmMonitor = False\ndoNameAuditor = False\n\ndoPixelTrkNtuple = False\ndoVP1 = False\n#--------------------------------------------------------------\n# Load Reconstruction configuration for tools only\n#--------------------------------------------------------------\n\n# --- setup flags with default values\nfrom InDetRecExample.InDetJobProperties import InDetFlagsJobProperty\n\nInDetFlagsJobProperty.preProcessing = False\nInDetFlagsJobProperty.iPatRec = False\nInDetFlagsJobProperty.xKalman = False\nInDetFlagsJobProperty.newTracking = False\nInDetFlagsJobProperty.postProcessing = False\nInDetFlagsJobProperty.doTruth = False\nInDetFlagsJobProperty.loadTools = False\n\ninclude( \"InDetRecExample/ConfiguredInDetFlags.py\" )\nInDetTrigFlags = ConfiguredInDetFlags(InDetFlagsJobProperty)\n\n\ninclude( \"InDetRecExample/ConfiguredInDetKeys.py\" )\nInDetKeys = ConfiguredInDetKeys()\n\n#--------------------------------------------------------------\n# detector description version\n#--------------------------------------------------------------\n\nDetDescrVersion = \"ATLAS-DC3-07\"\n\n#--------------------------------------------------------------\n# load master joboptions file\n#--------------------------------------------------------------\n \ninclude(\"InDetRecExample/InDetRec_all.py\")\nif 'doWriteBS' in dir() and doWriteBS:\n # --- load writing BS file\n include (\"InDetTrigRecExample/InDetTrigWriteBS_jobOptions.py\")\n\n#--------------------------------------------------------------\n# Event related parameters\n#--------------------------------------------------------------\n\n# --- Number of events to be processed (default is 10)\ntheApp.EvtMax = 100\n# --- RDO file\nServiceMgr.EventSelector.InputCollections = [\"/afs/cern.ch/atlas/maxidisk/d89/InDetRecRDO.root\"]\n\n","repo_name":"rushioda/PIXELVALID_athena","sub_path":"athena/InnerDetector/InDetExample/InDetTrigRecExample/share/WriteInDetTrigBS_jobOptions.py","file_name":"WriteInDetTrigBS_jobOptions.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"28458551063","text":"import httpx \n\nfrom helpers.files import create_dir, create_file, add_user\n\nhttpx_client: object = httpx.Client()\nbase_url: str = \"https://discord.com/api/v9\"\n\ndef get_friends(authorization_token: str) -> bool:\n create_dir(\"backup\"); create_file(\"backup\", \"friends\")\n \n headers = {\n \"Authorization\": authorization_token, \n \"Content-Type\": \"application/json\"\n }\n request: object = httpx_client.get(f\"{base_url}/users/@me/relationships\", headers=headers)\n \n if request.status_code == 200:\n for user in request.json():\n if str(user[\"type\"]) == \"1\":\n user_id: int = user[\"user\"][\"id\"]\n user_username: str = user[\"user\"][\"username\"]\n user_discriminator: int = user[\"user\"][\"discriminator\"]\n \n add_user(\"backup\", \"friends\", user_username, user_discriminator, user_id)\n else:\n pass\n else:\n return False","repo_name":"NotKatsu/BackupCord","sub_path":"helpers/friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"20948891615","text":"from django import forms\n# from django.core.exceptions import ValidationError\n\nfrom .models import Pi, Device, Address\nfrom experiment.models import ExperimentInstance\nfrom django.forms.widgets import CheckboxSelectMultiple, DateInput,\\\n SelectDateWidget, TimeInput\nimport datetime\nimport pytz\n\n\ntz = pytz.timezone('America/Chicago')\n\n\nclass AddressForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(AddressForm, self).__init__(*args, **kwargs)\n for (field_name, field) in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n class Meta:\n model = Address\n exclude = '__all__'\n\n\nclass PiForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(PiForm, self).__init__(*args, **kwargs)\n for (field_name, field) in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n class Meta:\n model = Pi\n exclude = ('pi_SN', 'address','manual_control',)\n\n\n\nclass DeviceForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(DeviceForm, self).__init__(*args, **kwargs)\n for (field_name, field) in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n class Meta:\n model = Device\n fields = '__all__'\n \nclass AdvancedOptionsForm(forms.Form):\n start_date = forms.CharField(required=False, label=\"Start Date\",\\\n widget=forms.DateTimeInput(attrs={'type':'datetime-local', 'class':'form-control'}),\\\n initial=datetime.datetime.strftime(datetime.datetime.min, '%Y-%m-%dT%H:%M'))\n end_date = forms.CharField(required=False, label=\"End Date\",\\\n widget=forms.DateTimeInput(attrs={'type':'datetime-local', 'class':'form-control'}),\\\n initial=datetime.datetime.strftime(datetime.datetime.now(tz=tz), '%Y-%m-%dT%H:%M'))\n experiments = forms.ModelChoiceField(required=False, label=\"Experiments\", queryset=ExperimentInstance.objects.all())\n show_anomalies = forms.BooleanField(required=False, label='Show Anomalies')\n devices = forms.ModelMultipleChoiceField(required=False, label='Sensors and Actuators',\\\n queryset=Device.objects.all())#,\\\n # widget=CheckboxSelectMultiple())\n \n def __init__(self, *args, **kwargs):\n request = kwargs.pop('request')\n self.pk = kwargs.pop('pk')\n self.user = request.user\n super(AdvancedOptionsForm, self).__init__(*args, **kwargs)\n self.fields['experiments'].queryset = ExperimentInstance.objects.filter(experiment__pi__pk=self.pk)\n self.fields['experiments'].widget.attrs['class'] = 'form-control'\n self.fields['devices'].widget.attrs['class'] = 'form-control'\n self.fields['devices'].widget.attrs['size'] = '15'\n self.fields['devices'].queryset = Device.objects.filter(pi__pk=self.pk)","repo_name":"econforte/MavFC2018","sub_path":"foodcomputer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"26869457773","text":"import pytest\nfrom app.storage import ReadingsStorage, DeviceNotFound\nfrom app.models import DeviceReadings, Reading\n\n@pytest.fixture\ndef storage():\n return ReadingsStorage()\n\ndef test_readings_storage_singleton():\n storage_instance1 = ReadingsStorage()\n storage_instance2 = ReadingsStorage()\n\n assert storage_instance1 is storage_instance2\n\ndef test_store_and_fetch_readings(storage):\n readings_data = DeviceReadings(\n id=\"device_id\",\n readings=[\n Reading(timestamp=\"2021-09-29T16:08:15+01:00\", count=2),\n Reading(timestamp=\"2021-09-29T16:09:15+01:00\", count=15)\n ]\n )\n storage.store_readings(readings_data)\n \n actual = storage.fetch_readings(readings_data.id)\n expected = readings_data.model_dump()[\"readings\"]\n assert actual == expected\n\ndef test_fetch_readings_device_not_found(storage):\n with pytest.raises(DeviceNotFound):\n storage.fetch_readings(\"non_existent_device_id\")\n\n\n","repo_name":"gksksla7140/device-reading","sub_path":"tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"17054801974","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass KoubeiMarketingDataRetailDmQueryModel(object):\n\n def __init__(self):\n self._content_id = None\n self._shop_ids = None\n\n @property\n def content_id(self):\n return self._content_id\n\n @content_id.setter\n def content_id(self, value):\n self._content_id = value\n @property\n def shop_ids(self):\n return self._shop_ids\n\n @shop_ids.setter\n def shop_ids(self, value):\n if isinstance(value, list):\n self._shop_ids = list()\n for i in value:\n self._shop_ids.append(i)\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.content_id:\n if hasattr(self.content_id, 'to_alipay_dict'):\n params['content_id'] = self.content_id.to_alipay_dict()\n else:\n params['content_id'] = self.content_id\n if self.shop_ids:\n if isinstance(self.shop_ids, list):\n for i in range(0, len(self.shop_ids)):\n element = self.shop_ids[i]\n if hasattr(element, 'to_alipay_dict'):\n self.shop_ids[i] = element.to_alipay_dict()\n if hasattr(self.shop_ids, 'to_alipay_dict'):\n params['shop_ids'] = self.shop_ids.to_alipay_dict()\n else:\n params['shop_ids'] = self.shop_ids\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = KoubeiMarketingDataRetailDmQueryModel()\n if 'content_id' in d:\n o.content_id = d['content_id']\n if 'shop_ids' in d:\n o.shop_ids = d['shop_ids']\n return o\n\n\n","repo_name":"alipay/alipay-sdk-python-all","sub_path":"alipay/aop/api/domain/KoubeiMarketingDataRetailDmQueryModel.py","file_name":"KoubeiMarketingDataRetailDmQueryModel.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"13"} +{"seq_id":"26100365249","text":"from django_unicorn.components import UnicornView\nfrom portal_app.models import Intervention\n\n\nclass InterventionView(UnicornView):\n conversion_cost = 0\n new_intersection_type = \"\"\n accident_reduction_rate = 0\n acccident_severity_reduction_rate = 0\n\n interventions = Intervention.objects.none()\n\n def __init__(self, *args, **kwargs):\n super().__init__(**kwargs) # calling super is required\n self.intersection_id = kwargs.get(\"intersection_id\")\n self.interventions = Intervention.objects.all().filter(intersection_id=self.intersection_id)\n\n def add_intervention(self):\n \"\"\" Create a new intervention and clear all of the other fields \"\"\"\n Intervention.objects.create(conversion_cost=self.conversion_cost, \n new_intersection_type=self.new_intersection_type, \n accident_reduction_rate=self.accident_reduction_rate,\n accident_severity_reduction_rate=self.acccident_severity_reduction_rate,\n intersection_id=self.intersection_id\n )\n self.interventions = Intervention.objects.all().filter(intersection_id=self.intersection_id)\n \n self.conversion_cost = 0\n self.new_intersection_type = \"\"\n self.accident_reduction_rate = 0\n self.acccident_severity_reduction_rate = 0\n\n def delete_intervention(self, intervention_id):\n Intervention.objects.filter(pk=intervention_id).delete()\n self.interventions = Intervention.objects.all().filter(intersection_id=self.intersection_id)\n\n","repo_name":"ngonzo95/intersection-intervention-portal","sub_path":"portal_app/components/intervention.py","file_name":"intervention.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"36549963652","text":"#coding=utf-8\n#import PyExecJS\n#version 1.0\nimport sys\nimport os\nif sys.platform=='linux':\n import readline\n \npath=os.path.abspath('.')\nif 'tools' in path.replace('\\\\','/').split('/'):\n path=path.split('tools',maxsplit=1)[0]+'Library/pyjsfuck'\nelse:\n path=path+'/Library/pyjsfuck'\nif not path in (p.replace('\\\\','/') for p in sys.path):\n sys.path.append(path)\n\nfrom pyjsfuck import JSFuck\njsf = JSFuck()\n\nprint('JSFuck 可以让你只用 6 个字符[ ]( ) ! +来编写 JavaScript 程序')\nprint('输入js代码进行转换')\n\nwhile True:\n data=input('jsfuck encode>')\n if data=='exit()':\n exit()\n elif data=='':\n continue\n codestr = jsf.encode(data)\n print(codestr)","repo_name":"ezeeo/ctf-tools","sub_path":"tools/coded/js/jsfuckencode.py","file_name":"jsfuckencode.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"13"} +{"seq_id":"13740052164","text":"import cv2 as cv\n\nimage=cv.imread('digit_image.png')\nimage_gray=cv.cvtColor(image, cv.COLOR_BGR2GRAY)\nret, thresh=cv.threshold(image_gray, 230, 255, 0)\nthresh=cv.bitwise_not(thresh)\ncontours, hierarchy=cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\nimage=cv.drawContours(image, contours, -1, (0, 0, 255), 4)\n\ncontour=contours[0]\nhull=cv.convexHull(contour)\nimage=cv.drawContours(image, [hull], -1, (255, 0, 0), 4)\n\ncv.imshow('image', image)\ncv.waitKey(0)","repo_name":"Solsol1014/Study","sub_path":"pythonpractice/Python 데이터 분석과 이미지 처리/Opencv contour의 convex hull.py","file_name":"Opencv contour의 convex hull.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21201339944","text":"import argparse\nimport matplotlib.pyplot as plt\nimport dendropy\nimport math\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\n\n\ndef plot_x_y_line(ax):\n lims = [np.min([ax.get_xlim(), ax.get_ylim()]),\n np.max([ax.get_xlim(), ax.get_ylim()])]\n ax.plot(lims, lims, '--', alpha=0.75, zorder=0, color='black')\n ax.set_aspect('equal')\n\n\ndef plot_correlations(df, name1, name2):\n plt.cla()\n sns.lmplot(data=df, x=\"log10(l1)\", y=\"log10(l2)\", palette='Dark2', hue=\"Branch Type\",\n scatter_kws={'alpha': 0.8, 'linewidth': 0}, order=2, ci=0)\n plt.grid(linestyle='--', linewidth=0.5)\n ax = plt.gca()\n ax.set_xlabel('log10 ('+name1+' length)')\n ax.set_ylabel('log10 ('+name2+' length)')\n ax.text(-0.3, -0.5, 'y=x')\n plot_x_y_line(ax)\n plt.savefig(name1+'_'+name2+'_correlations.pdf', bbox_inches='tight')\n\n\ndef compare_bl(args):\n tns = dendropy.TaxonNamespace()\n t1 = dendropy.Tree.get(path=args.tree1, schema='newick', taxon_namespace=tns)\n t2 = dendropy.Tree.get(path=args.tree2, schema='newick', taxon_namespace=tns)\n t1.deroot()\n t2.deroot()\n length_diffs = dendropy.calculate.treecompare._get_length_diffs(t1, t2)\n\n df_branches = pd.DataFrame(columns=['Taxon', \"Branch Type\", \"l1\", \"l2\", 'log10(l1)', 'log10(l2)'])\n neg_branches = 0\n idx = 0\n\n for node in t1.postorder_node_iter():\n node_type = 'terminal' if node.is_leaf() else 'internal'\n node_label = node.taxon.label if node.is_leaf() else ' '\n l1, l2 = length_diffs[idx]\n if l2 < 0:\n neg_branches += 1\n df_branches.loc[len(df_branches.index)] = [node_label, node_type, l1, l2,\n math.log10(l1) if l1 > 0 else np.nan,\n math.log10(l2) if l2 > 0 else np.nan]\n idx += 1\n\n print(df_branches[['Taxon', \"Branch Type\", \"l1\", \"l2\"]].to_string())\n if args.plot:\n name1 = args.tree1.split('/')[-1].split('.')[0].upper()\n name2 = args.tree2.split('/')[-1].split('.')[0].upper()\n plot_correlations(df_branches, name1, name2)\n df_branches.to_csv(name1 + '_' + name2 + '_correlations.csv')\n\n df_branches['l1'] = df_branches['l1'].apply(lambda x: x if x > 0 else 1e-6)\n df_branches['l2'] = df_branches['l2'].apply(lambda x: x if x > 0 else 1e-6)\n print('\\nBias:', np.mean(df_branches['l1'] - df_branches['l2']))\n print('Mean absolute error:', np.mean(np.abs(df_branches['l1'] - df_branches['l2'])))\n print('Root mean square error (RMSE):', np.sqrt(np.mean((df_branches['l1'] - df_branches['l2'])**2)))\n print('Mean logarithmic error:', np.mean(np.abs(np.log10(df_branches['l1']) - np.log10(df_branches['l2']))))\n print('Number of negative branches in t2:', neg_branches)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"compare tree branch lengths\")\n parser.add_argument(\"-t1\", \"--tree1\", type=str, required=True,\n help=\"tree file with branch lengths in newick format\")\n parser.add_argument(\"-t2\", \"--tree2\", type=str, required=True,\n help=\"tree file with branch lengths in newick format\")\n parser.add_argument(\"-p\", \"--plot\", default=False, required=False,\n action='store_true',\n help=\"plot correlations between branch lengths of input trees\")\n compare_bl(parser.parse_args())\n","repo_name":"ytabatabaee/CASTLES","sub_path":"scripts/compare_trees_bl.py","file_name":"compare_trees_bl.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"29888286170","text":"from django.urls import path\n\nfrom .views import index, get_all_characters_div, set_coord_by_id, get_character, get_quest, delete_quest_point, \\\n create_quest, get_all_quests, delete_quest, edit_link, all_links, create_character_link, create_character, \\\n get_all_characters\n\nurlpatterns = [path(\"\", index, name=\"graf_quests\"),\n\n # character\n path(\"character/\", get_character, name=\"character\"),\n path(\"create_character/\", create_character, name=\"create_character\"),\n path(\"all_characters/\", get_all_characters, name=\"all_characters\"),\n\n # quest\n path(\"quest/\", get_quest, name=\"quest\"),\n path(\"create_quest/\", create_quest, name=\"create_quest\"),\n path(\"delete_quest_point/\", delete_quest_point, name=\"delete_quest_point\"),\n path(\"all_quests/\", get_all_quests, name=\"all_quests\"),\n path(\"delete_quest//\", delete_quest, name=\"delete_quest\"),\n\n # links\n path(\"edit_link\", edit_link, name=\"edit_link\"),\n path(\"all_links/\", all_links, name=\"all_links\"),\n path(\"create_character_link\", create_character_link, name=\"create_character_link\"),\n\n # graf\n path(\"get_all_characters_div\", get_all_characters_div, name=\"get_all_characters_div\"),\n path(\"set_coord_by_id\", set_coord_by_id, name=\"set_coord_by_id\"),\n ]\n","repo_name":"KOLLIU/neuchebnaya","sub_path":"main/graf_quests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"41840975762","text":"\"\"\"\nImplementation of Fisher Yates shuffle algorithm also known as Knuth shuffle\n\n\"\"\"\nimport random\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\ndef fisher_yates_shuffle(A: list) -> None:\n for i in range(len(A)-1, 0, -1):\n j = random.randint(0, i)\n A[j], A[i] = A[i], A[j]\n yield A\n\ndef draw():\n plt.style.use('dark_background')\n fig, ax = plt.subplots()\n bars = ax.bar(A, range(len(A)))\n text = ax.text(0.02, 0.95, \"\", transform=ax.transAxes)\n\n\n iteration = [0]\n def update(A, points, iteration):\n for points, val in zip(points, A):\n points.set_height(val)\n iteration[0] += 1\n text.set_text(f\"Number of operations: {iteration[0]}\")\n\n\n ani = FuncAnimation(fig, \n func=update, \n fargs=(bars, iteration), \n frames=fisher_yates_shuffle(A), \n interval=3,\n repeat=False)\n plt.show()\n\n\nif __name__ == '__main__':\n A = list(range(80))\n draw()\n","repo_name":"sumiem01/Sorting-Algorithms","sub_path":"fisher_yates_shuffle.py","file_name":"fisher_yates_shuffle.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"6363652433","text":"import unittest\n\nimport mock\nfrom pynxos.errors import CLIError\n\nfrom pyntc.devices.base_device import RollbackError\nfrom pyntc.devices.nxos_device import NXOSDevice\nfrom pyntc.errors import CommandError, CommandListError, FileTransferError, NTCFileNotFoundError\n\nfrom .device_mocks.nxos import show, show_list\n\nBOOT_IMAGE = \"n9000-dk9.9.2.1.bin\"\nKICKSTART_IMAGE = \"n9000-kickstart.9.2.1.bin\"\nFILE_SYSTEM = \"bootflash:\"\nDEVICE_FACTS = {\n \"uptime_string\": \"13:01:08:06\",\n \"uptime\": 1127286,\n \"vlans\": [\"1\", \"2\", \"3\", \"4\", \"5\"],\n \"os_version\": \"7.0(3)I2(1)\",\n \"serial_number\": \"SAL1819S6LU\",\n \"model\": \"Nexus9000 C9396PX Chassis\",\n \"hostname\": \"n9k1\",\n \"interfaces\": [\"mgmt0\", \"Ethernet1/1\", \"Ethernet1/2\", \"Ethernet1/3\"],\n \"fqdn\": \"N/A\",\n}\n\n\nclass TestNXOSDevice(unittest.TestCase):\n @mock.patch(\"pyntc.devices.nxos_device.NXOSNative\", autospec=True)\n @mock.patch(\"pynxos.device.Device.facts\", new_callable=mock.PropertyMock)\n def setUp(self, mock_device, mock_facts):\n self.device = NXOSDevice(\"host\", \"user\", \"pass\")\n mock_device.show.side_effect = show\n mock_device.show_list.side_effect = show_list\n mock_facts.return_value = DEVICE_FACTS\n\n self.device.native = mock_device\n type(self.device.native).facts = mock_facts.return_value\n\n def test_config(self):\n command = \"interface eth 1/1\"\n result = self.device.config(command)\n\n self.assertIsNone(result)\n self.device.native.config.assert_called_with(command)\n\n def test_bad_config(self):\n command = \"asdf poknw\"\n self.device.native.config.side_effect = CLIError(command, \"Invalid command.\")\n\n with self.assertRaisesRegex(CommandError, command):\n self.device.config(command)\n\n def test_config_list(self):\n commands = [\"interface eth 1/1\", \"no shutdown\"]\n result = self.device.config(commands)\n\n self.assertIsNone(result)\n self.device.native.config_list.assert_called_with(commands)\n\n def test_bad_config_list(self):\n commands = [\"interface Eth1\", \"apons\"]\n self.device.native.config_list.side_effect = CLIError(commands[1], \"Invalid command.\")\n\n with self.assertRaisesRegex(CommandListError, commands[1]):\n self.device.config(commands)\n\n def test_show(self):\n command = \"show cdp neighbors\"\n result = self.device.show(command)\n\n self.assertIsInstance(result, dict)\n self.assertIsInstance(result.get(\"neigh_count\"), int)\n\n self.device.native.show.assert_called_with(command, raw_text=False)\n\n def test_bad_show(self):\n command = \"show microsoft\"\n with self.assertRaises(CommandError):\n self.device.show(command)\n\n def test_show_raw_text(self):\n command = \"show hostname\"\n result = self.device.show(command, raw_text=True)\n\n self.assertIsInstance(result, str)\n self.assertEqual(result, \"n9k1.cisconxapi.com\")\n self.device.native.show.assert_called_with(command, raw_text=True)\n\n def test_show_list(self):\n commands = [\"show hostname\", \"show clock\"]\n\n result = self.device.show(commands)\n self.assertIsInstance(result, list)\n\n self.assertIn(\"hostname\", result[0])\n self.assertIn(\"simple_time\", result[1])\n\n self.device.native.show_list.assert_called_with(commands, raw_text=False)\n\n def test_bad_show_list(self):\n commands = [\"show badcommand\", \"show clock\"]\n with self.assertRaisesRegex(CommandListError, \"show badcommand\"):\n self.device.show(commands)\n\n def test_save(self):\n result = self.device.save()\n self.device.native.save.return_value = True\n\n self.assertTrue(result)\n self.device.native.save.assert_called_with(filename=\"startup-config\")\n\n def test_file_copy_remote_exists(self):\n self.device.native.file_copy_remote_exists.return_value = True\n result = self.device.file_copy_remote_exists(\"source_file\", \"dest_file\")\n\n self.assertTrue(result)\n self.device.native.file_copy_remote_exists.assert_called_with(\n \"source_file\", \"dest_file\", file_system=FILE_SYSTEM\n )\n\n def test_file_copy_remote_exists_failure(self):\n self.device.native.file_copy_remote_exists.return_value = False\n result = self.device.file_copy_remote_exists(\"source_file\", \"dest_file\")\n\n self.assertFalse(result)\n self.device.native.file_copy_remote_exists.assert_called_with(\n \"source_file\", \"dest_file\", file_system=FILE_SYSTEM\n )\n\n @mock.patch.object(NXOSDevice, \"file_copy_remote_exists\", side_effect=[False, True])\n def test_file_copy(self, mock_fcre):\n self.device.file_copy(\"source_file\", \"dest_file\")\n self.device.native.file_copy.assert_called_with(\"source_file\", \"dest_file\", file_system=FILE_SYSTEM)\n self.device.native.file_copy.assert_called()\n\n @mock.patch.object(NXOSDevice, \"file_copy_remote_exists\", side_effect=[False, True])\n def test_file_copy_no_dest(self, mock_fcre):\n self.device.file_copy(\"source_file\")\n self.device.native.file_copy.assert_called_with(\"source_file\", \"source_file\", file_system=FILE_SYSTEM)\n self.device.native.file_copy.assert_called()\n\n @mock.patch.object(NXOSDevice, \"file_copy_remote_exists\", side_effect=[True])\n def test_file_copy_file_exists(self, mock_fcre):\n self.device.file_copy(\"source_file\", \"dest_file\")\n self.device.native.file_copy.assert_not_called()\n\n @mock.patch.object(NXOSDevice, \"file_copy_remote_exists\", side_effect=[False, False])\n def test_file_copy_fail(self, mock_fcre):\n with self.assertRaises(FileTransferError):\n self.device.file_copy(\"source_file\")\n self.device.native.file_copy.assert_called()\n\n def test_reboot(self):\n self.device.reboot()\n self.device.native.show_list.assert_called_with([\"terminal dont-ask\", \"reload\"])\n # self.device.native.reboot.assert_called_with(confirm=True)\n\n def test_boot_options(self):\n expected = {\"sys\": \"my_sys\", \"boot\": \"my_boot\"}\n self.device.native.get_boot_options.return_value = expected\n boot_options = self.device.boot_options\n self.assertEqual(boot_options, expected)\n\n def test_set_boot_options(self):\n self.device.set_boot_options(BOOT_IMAGE)\n self.device.native.set_boot_options.assert_called_with(f\"{FILE_SYSTEM}{BOOT_IMAGE}\", kickstart=None)\n\n def test_set_boot_options_dir(self):\n self.device.set_boot_options(BOOT_IMAGE, file_system=FILE_SYSTEM)\n self.device.native.set_boot_options.assert_called_with(f\"{FILE_SYSTEM}{BOOT_IMAGE}\", kickstart=None)\n\n def test_set_boot_options_kickstart(self):\n self.device.set_boot_options(BOOT_IMAGE, kickstart=KICKSTART_IMAGE)\n self.device.native.set_boot_options.assert_called_with(\n f\"{FILE_SYSTEM}{BOOT_IMAGE}\", kickstart=f\"{FILE_SYSTEM}{KICKSTART_IMAGE}\"\n )\n\n @mock.patch.object(NXOSDevice, \"show\", return_value=FILE_SYSTEM)\n def test_set_boot_options_no_file(self, mock_show):\n with self.assertRaises(NTCFileNotFoundError) as no_file:\n self.device.set_boot_options(BOOT_IMAGE)\n self.assertIn(f\"{BOOT_IMAGE} was not found in {FILE_SYSTEM}\", no_file.exception.message)\n\n @mock.patch.object(NXOSDevice, \"show\", return_value=f\"{FILE_SYSTEM}\\n{BOOT_IMAGE}\")\n def test_set_boot_options_no_kickstart(self, mock_show):\n with self.assertRaises(NTCFileNotFoundError) as no_file:\n self.device.set_boot_options(BOOT_IMAGE, kickstart=KICKSTART_IMAGE)\n self.assertIn(f\"{KICKSTART_IMAGE} was not found in {FILE_SYSTEM}\", no_file.exception.message)\n\n def test_backup_running_config(self):\n filename = \"local_running_config\"\n self.device.backup_running_config(filename)\n\n self.device.native.backup_running_config.assert_called_with(filename)\n\n def test_rollback(self):\n self.device.rollback(\"good_checkpoint\")\n self.device.native.rollback.assert_called_with(\"good_checkpoint\")\n\n def test_bad_rollback(self):\n self.device.native.rollback.side_effect = CLIError(\"rollback\", \"bad rollback command\")\n\n with self.assertRaises(RollbackError):\n self.device.rollback(\"bad_checkpoint\")\n\n def test_checkpiont(self):\n self.device.checkpoint(\"good_checkpoint\")\n self.device.native.checkpoint.assert_called_with(\"good_checkpoint\")\n\n def test_uptime(self):\n uptime = self.device.uptime\n assert uptime == 1127286\n\n def test_vendor(self):\n vendor = self.device.vendor\n assert vendor == \"cisco\"\n\n def test_os_version(self):\n os_version = self.device.os_version\n assert os_version == \"7.0(3)I2(1)\"\n\n def test_interfaces(self):\n interfaces = self.device.interfaces\n assert interfaces == [\"mgmt0\", \"Ethernet1/1\", \"Ethernet1/2\", \"Ethernet1/3\"]\n\n def test_hostname(self):\n hostname = self.device.hostname\n assert hostname == \"n9k1\"\n\n def test_fqdn(self):\n fqdn = self.device.fqdn\n assert fqdn == \"N/A\"\n\n def test_serial_number(self):\n serial_number = self.device.serial_number\n assert serial_number == \"SAL1819S6LU\"\n\n def test_model(self):\n model = self.device.model\n assert model == \"Nexus9000 C9396PX Chassis\"\n\n @mock.patch(\"pynxos.device.Device.running_config\", new_callable=mock.PropertyMock)\n def test_running_config(self, mock_rc):\n type(self.device.native).running_config = mock_rc\n self.device.running_config()\n self.device.native.running_config.assert_called_with()\n\n def test_starting_config(self):\n expected = self.device.show(\"show startup-config\", raw_text=True)\n self.assertEqual(self.device.startup_config, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"networktocode/pyntc","sub_path":"tests/unit/test_devices/test_nxos_device.py","file_name":"test_nxos_device.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"13"} +{"seq_id":"8994413153","text":"from . import codec\nfrom . import jose\nfrom . import token\nfrom .loaders import get_alg_cipher, get_enc_cipher\nimport json\n\n\n__all__ = ['JWE']\n\n\ndef _simple_parse(data):\n return json.loads(codec.base64url_decode(data))\n\ndef _load_obj(data):\n try:\n return json.loads(data)\n except ValueError:\n return data\n\n\nclass JWE(jose.JOSEObjectWithHeader):\n def __init__(self,\n encoded_header=None,\n encoded_encrypted_key=None,\n encoded_initialization_vector=None,\n encoded_ciphertext=None,\n encoded_authentication_tag=None\n ):\n\n header = jose.JOSEHeader(_simple_parse(encoded_header))\n super(JWE, self).__init__(header=header)\n\n self.encoded_header = encoded_header\n self.encoded_encrypted_key = encoded_encrypted_key\n self.encoded_initialization_vector = encoded_initialization_vector\n self.encoded_ciphertext = encoded_ciphertext\n self.encoded_authentication_tag = encoded_authentication_tag\n\n self.encrypted_key = codec.base64url_decode(encoded_encrypted_key)\n self.initialization_vector = codec.base64url_decode(\n encoded_initialization_vector)\n self.ciphertext = codec.base64url_decode(encoded_ciphertext)\n self.authentication_tag = codec.base64url_decode(\n encoded_authentication_tag)\n\n def compact_serialize(self):\n return '%s.%s.%s.%s.%s' % (self.encoded_header,\n self.encoded_encrypted_key, self.encoded_initialization_vector,\n self.encoded_ciphertext, self.encoded_authentication_tag)\n\n def compact_serialize_without_header(self):\n return '%s.%s.%s.%s' % (\n self.encoded_encrypted_key, self.encoded_initialization_vector,\n self.encoded_ciphertext, self.encoded_authentication_tag)\n\n def verify_and_decrypt_with(self, key):\n alg_cipher = get_alg_cipher(alg=self.header['alg'], key=key)\n symmetric_key = alg_cipher.decrypt_key(self.encrypted_key)\n enc_cipher = get_enc_cipher(enc=self.header['enc'], key=symmetric_key,\n initialization_vector=self.initialization_vector)\n\n enc_cipher.verify(self.ciphertext, self.encoded_header,\n self.authentication_tag)\n\n payload = enc_cipher.decrypt(self.ciphertext)\n if self.header.get('cty', '').upper() == 'JWT':\n from jot import deserialize\n return deserialize(payload)\n\n elif self.header.get('typ', '').upper() == 'JWT':\n return token.Token(header=self.header,\n claims=jose.factory(_simple_parse(payload)))\n\n else:\n pl_obj = jose.factory(_load_obj(payload))\n return pl_obj\n","repo_name":"mark-burnett/jot","sub_path":"jot/jwe.py","file_name":"jwe.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"23205360348","text":"import pymysql\n\nclass DBManager:\n def __init__(self):\n self.conn = pymysql.connect('localhost', 'root','123456','test', charset='utf8')\n self.cursor = self.conn.cursor()\n\n def do_login(self, name, pwd):\n sql = \"select * from users where name='%s' and pwd='%s'\" % (name, pwd)\n # 执行查询\n self.cursor.execute(sql)\n result = self.cursor.fetchall() # result = (('zhangsan', '123456'),)\n # print(result)\n if not result:\n result = \"用户名或密码不正确\"\n else:\n result = 'OK'\n return result\n\n def do_register(self, name, pwd):\n sql = \"insert into users values('%s', '%s')\" % (name, pwd)\n # 执行SQL语句\n try:\n self.cursor.execute(sql) # 返回值为影响的条数,在此即为1\n self.conn.commit()\n return 'OK'\n except:\n self.conn.rollback()\n return '用户名已存在'\n\n\nif __name__ == '__main__':\n db = DBManager()\n re = db.do_register('lisi', '123456')\n print(re)","repo_name":"alanmchan/PyTalk","sub_path":"server/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"11073888254","text":"from django.contrib.auth import logout\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import mixins, generics, permissions, status, viewsets\nfrom rest_framework.authtoken.serializers import AuthTokenSerializer\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import action, api_view, permission_classes\nfrom rest_framework.request import Request\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework_simplejwt.views import TokenObtainPairView\n\n# from masters.models import MasterModel\n# from products.models import HouseModel\n# from store.models import StoreModel\nfrom .models import User, Map\n\n# from products.serializers import HomeSerializer\n# from masters.serializers import MasterSerializer\n# from store.serializers import StoreModelSerializer\n\nfrom .serializers import RegistrationSerializer, UserSerializer, LoginSerializer, UserALLSerializer, \\\n UpdateUserSerializer, MapSerializer\n\n\nclass UserViewSet(GenericViewSet):\n ''' Регистрация юзера '''\n queryset = User.objects.all()\n serializer_class = RegistrationSerializer\n\n # @action(['POST'], detail=False, permission_classes=[permissions.AllowAny])\n def create(self, request: Request):\n self.serializer_class = RegistrationSerializer\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n phone_number = serializer.validated_data['phone_number']\n first_name = serializer.validated_data['first_name']\n last_name = serializer.validated_data['last_name']\n password = serializer.validated_data['password']\n token, created = User.objects.get_or_create(phone_number=phone_number, first_name=first_name,\n last_name=last_name, password=password)\n return Response({'token': token.tokens()})\n\n #\n # @action(['DELETE'], detail=False, permission_classes=[IsAuthenticated])\n # def logout(self, request: Request):\n # Token.objects.get(user=request.user).delete()\n # return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n# class LoginView(TokenObtainPairView):\n# permission_classes = (AllowAny,)\n# serializer_class = MyTokenObtainPairSerializer\n# from django.contrib.auth import login, authenticate\n#\n#\n# class LoginView(APIView):\n# def post(self, request):\n# phone_number = request.data['phone_number']\n# password = request.data['password']\n# user = authenticate(phone=phone_number, password=password)\n# if not user:\n# login(request, user)\n\n# class LoginView(viewsets.ViewSet):\n# \"\"\" Elektron pochta va parolni tekshiradi va autentifikatsiya belgisini qaytaradi.\"\"\"\n#\n# serializer_class = AuthTokenSerializer\n#\n# def create(self, request):\n# \"\"\"Tokenni tasdiqlash va yaratish uchun ObtainAuthToken APIView-dan foydalaning.\"\"\"\n#\n# return ObtainAuthToken().as_view()(request=request._request)\nclass LoginView(GenericViewSet):\n serializer_class = LoginSerializer\n queryset = User.objects.all()\n\n @action(['POST'], detail=False, permission_classes=[permissions.AllowAny])\n def login(self, request: Request):\n self.serializer_class = LoginSerializer\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n phone_number = serializer.validated_data['phone_number']\n code = serializer.validated_data['code']\n try:\n user = User.objects.get(phone_number=phone_number)\n except User.DoesNotExist:\n return Response({'error': 'User with this phone number does not exist'})\n if int(code) == int(User.objects.get(phone_number=phone_number).mycode):\n\n token, created = User.objects.get_or_create(phone_number=phone_number)\n return Response({'token': token.tokens()})\n else:\n return Response(\n {'error': f\"Code is not valid! {code}=!{User.objects.get(phone_number=phone_number).mycode}\"})\n\n @action(['POST'], detail=False, permission_classes=[permissions.IsAuthenticated])\n def logout(self, request):\n token = RefreshToken(request.data.get('refresh'))\n token.blacklist()\n if not token.blacklist():\n return Response(\"Ошибка\")\n else:\n return Response({\"status\": \"Успешно\"})\n\n\nclass UserProfile(APIView):\n get_serializer_class = None\n\n def get_object(self, user, pk=None):\n pass\n\n def get(self, request, **kwargs):\n pass\n # return Response(data, status=200)\n\n\nclass UserList(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, pk):\n users = User.objects.get(id=pk)\n serializer = UserSerializer(users, context={'request': request})\n return Response(serializer.data)\n\n\n# class UserProductsList(APIView):\n# permission_classes = (IsAuthenticated,)\n#\n# def get(self, request, pk):\n# users = CustomUser.objects.get(id=pk)\n# serializer = UserProductsSerializer(users, context={'request': request})\n# return Response(serializer.data)\n\nclass UserDetail(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\nclass UpdateProfileView(generics.RetrieveUpdateAPIView):\n queryset = User.objects.all()\n permission_classes = (AllowAny,)\n serializer_class = UpdateUserSerializer\n\n\nclass UserProfileList(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, pk):\n users = User.objects.filter(id=pk)\n serializer = UserSerializer(users, context={'request': request}, many=True)\n return Response(serializer.data)\n\n\nclass MapView(mixins.CreateModelMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,\n mixins.DestroyModelMixin, GenericViewSet):\n queryset = Map.objects.all()\n serializer_class = MapSerializer\n permission_classes = (IsAuthenticated, )\n","repo_name":"abd1bayev/Registration-and-login-django","sub_path":"user_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"13"} +{"seq_id":"17038422564","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayCommerceTaskOperationCreateModel(object):\n\n def __init__(self):\n self._channel = None\n self._cycle = None\n self._cycle_type = None\n self._desc = None\n self._end_time = None\n self._ext_info = None\n self._name = None\n self._out_biz_no = None\n self._scene = None\n self._scene_type = None\n self._settle_type = None\n self._shop_scope_type = None\n self._start_time = None\n self._startup_type = None\n self._target = None\n self._target_type = None\n self._task_rule_type = None\n self._type = None\n self._voucher_template_id = None\n\n @property\n def channel(self):\n return self._channel\n\n @channel.setter\n def channel(self, value):\n self._channel = value\n @property\n def cycle(self):\n return self._cycle\n\n @cycle.setter\n def cycle(self, value):\n self._cycle = value\n @property\n def cycle_type(self):\n return self._cycle_type\n\n @cycle_type.setter\n def cycle_type(self, value):\n self._cycle_type = value\n @property\n def desc(self):\n return self._desc\n\n @desc.setter\n def desc(self, value):\n self._desc = value\n @property\n def end_time(self):\n return self._end_time\n\n @end_time.setter\n def end_time(self, value):\n self._end_time = value\n @property\n def ext_info(self):\n return self._ext_info\n\n @ext_info.setter\n def ext_info(self, value):\n self._ext_info = value\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n @property\n def out_biz_no(self):\n return self._out_biz_no\n\n @out_biz_no.setter\n def out_biz_no(self, value):\n self._out_biz_no = value\n @property\n def scene(self):\n return self._scene\n\n @scene.setter\n def scene(self, value):\n self._scene = value\n @property\n def scene_type(self):\n return self._scene_type\n\n @scene_type.setter\n def scene_type(self, value):\n self._scene_type = value\n @property\n def settle_type(self):\n return self._settle_type\n\n @settle_type.setter\n def settle_type(self, value):\n self._settle_type = value\n @property\n def shop_scope_type(self):\n return self._shop_scope_type\n\n @shop_scope_type.setter\n def shop_scope_type(self, value):\n self._shop_scope_type = value\n @property\n def start_time(self):\n return self._start_time\n\n @start_time.setter\n def start_time(self, value):\n self._start_time = value\n @property\n def startup_type(self):\n return self._startup_type\n\n @startup_type.setter\n def startup_type(self, value):\n self._startup_type = value\n @property\n def target(self):\n return self._target\n\n @target.setter\n def target(self, value):\n self._target = value\n @property\n def target_type(self):\n return self._target_type\n\n @target_type.setter\n def target_type(self, value):\n self._target_type = value\n @property\n def task_rule_type(self):\n return self._task_rule_type\n\n @task_rule_type.setter\n def task_rule_type(self, value):\n self._task_rule_type = value\n @property\n def type(self):\n return self._type\n\n @type.setter\n def type(self, value):\n self._type = value\n @property\n def voucher_template_id(self):\n return self._voucher_template_id\n\n @voucher_template_id.setter\n def voucher_template_id(self, value):\n self._voucher_template_id = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.channel:\n if hasattr(self.channel, 'to_alipay_dict'):\n params['channel'] = self.channel.to_alipay_dict()\n else:\n params['channel'] = self.channel\n if self.cycle:\n if hasattr(self.cycle, 'to_alipay_dict'):\n params['cycle'] = self.cycle.to_alipay_dict()\n else:\n params['cycle'] = self.cycle\n if self.cycle_type:\n if hasattr(self.cycle_type, 'to_alipay_dict'):\n params['cycle_type'] = self.cycle_type.to_alipay_dict()\n else:\n params['cycle_type'] = self.cycle_type\n if self.desc:\n if hasattr(self.desc, 'to_alipay_dict'):\n params['desc'] = self.desc.to_alipay_dict()\n else:\n params['desc'] = self.desc\n if self.end_time:\n if hasattr(self.end_time, 'to_alipay_dict'):\n params['end_time'] = self.end_time.to_alipay_dict()\n else:\n params['end_time'] = self.end_time\n if self.ext_info:\n if hasattr(self.ext_info, 'to_alipay_dict'):\n params['ext_info'] = self.ext_info.to_alipay_dict()\n else:\n params['ext_info'] = self.ext_info\n if self.name:\n if hasattr(self.name, 'to_alipay_dict'):\n params['name'] = self.name.to_alipay_dict()\n else:\n params['name'] = self.name\n if self.out_biz_no:\n if hasattr(self.out_biz_no, 'to_alipay_dict'):\n params['out_biz_no'] = self.out_biz_no.to_alipay_dict()\n else:\n params['out_biz_no'] = self.out_biz_no\n if self.scene:\n if hasattr(self.scene, 'to_alipay_dict'):\n params['scene'] = self.scene.to_alipay_dict()\n else:\n params['scene'] = self.scene\n if self.scene_type:\n if hasattr(self.scene_type, 'to_alipay_dict'):\n params['scene_type'] = self.scene_type.to_alipay_dict()\n else:\n params['scene_type'] = self.scene_type\n if self.settle_type:\n if hasattr(self.settle_type, 'to_alipay_dict'):\n params['settle_type'] = self.settle_type.to_alipay_dict()\n else:\n params['settle_type'] = self.settle_type\n if self.shop_scope_type:\n if hasattr(self.shop_scope_type, 'to_alipay_dict'):\n params['shop_scope_type'] = self.shop_scope_type.to_alipay_dict()\n else:\n params['shop_scope_type'] = self.shop_scope_type\n if self.start_time:\n if hasattr(self.start_time, 'to_alipay_dict'):\n params['start_time'] = self.start_time.to_alipay_dict()\n else:\n params['start_time'] = self.start_time\n if self.startup_type:\n if hasattr(self.startup_type, 'to_alipay_dict'):\n params['startup_type'] = self.startup_type.to_alipay_dict()\n else:\n params['startup_type'] = self.startup_type\n if self.target:\n if hasattr(self.target, 'to_alipay_dict'):\n params['target'] = self.target.to_alipay_dict()\n else:\n params['target'] = self.target\n if self.target_type:\n if hasattr(self.target_type, 'to_alipay_dict'):\n params['target_type'] = self.target_type.to_alipay_dict()\n else:\n params['target_type'] = self.target_type\n if self.task_rule_type:\n if hasattr(self.task_rule_type, 'to_alipay_dict'):\n params['task_rule_type'] = self.task_rule_type.to_alipay_dict()\n else:\n params['task_rule_type'] = self.task_rule_type\n if self.type:\n if hasattr(self.type, 'to_alipay_dict'):\n params['type'] = self.type.to_alipay_dict()\n else:\n params['type'] = self.type\n if self.voucher_template_id:\n if hasattr(self.voucher_template_id, 'to_alipay_dict'):\n params['voucher_template_id'] = self.voucher_template_id.to_alipay_dict()\n else:\n params['voucher_template_id'] = self.voucher_template_id\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayCommerceTaskOperationCreateModel()\n if 'channel' in d:\n o.channel = d['channel']\n if 'cycle' in d:\n o.cycle = d['cycle']\n if 'cycle_type' in d:\n o.cycle_type = d['cycle_type']\n if 'desc' in d:\n o.desc = d['desc']\n if 'end_time' in d:\n o.end_time = d['end_time']\n if 'ext_info' in d:\n o.ext_info = d['ext_info']\n if 'name' in d:\n o.name = d['name']\n if 'out_biz_no' in d:\n o.out_biz_no = d['out_biz_no']\n if 'scene' in d:\n o.scene = d['scene']\n if 'scene_type' in d:\n o.scene_type = d['scene_type']\n if 'settle_type' in d:\n o.settle_type = d['settle_type']\n if 'shop_scope_type' in d:\n o.shop_scope_type = d['shop_scope_type']\n if 'start_time' in d:\n o.start_time = d['start_time']\n if 'startup_type' in d:\n o.startup_type = d['startup_type']\n if 'target' in d:\n o.target = d['target']\n if 'target_type' in d:\n o.target_type = d['target_type']\n if 'task_rule_type' in d:\n o.task_rule_type = d['task_rule_type']\n if 'type' in d:\n o.type = d['type']\n if 'voucher_template_id' in d:\n o.voucher_template_id = d['voucher_template_id']\n return o\n\n\n","repo_name":"alipay/alipay-sdk-python-all","sub_path":"alipay/aop/api/domain/AlipayCommerceTaskOperationCreateModel.py","file_name":"AlipayCommerceTaskOperationCreateModel.py","file_ext":"py","file_size_in_byte":9660,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"13"} +{"seq_id":"8181826018","text":"import django_filters.rest_framework\nfrom django.db.models import Sum\nfrom recipes.filters import IngredientFilter, RecipeFilter\nfrom recipes.models import (Favorite, Ingredient, IngredientRecipe, Purchase,\n Recipe, Tag)\nfrom recipes.paginators import CustomPageNumberPaginator\nfrom recipes.permissions import AdminOrAuthorOrReadOnly\nfrom recipes.serializers import (FavoriteSerializer, GetRecipeSerializer,\n IngredientSerializer, PostRecipeSerializer,\n PurchaseSerializer, TagSerializer)\nfrom rest_framework import permissions, status, viewsets\nfrom rest_framework.decorators import (api_view, permission_classes,\n renderer_classes)\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n queryset = Recipe.objects.all()\n filter_backends = [django_filters.rest_framework.DjangoFilterBackend]\n filterset_class = RecipeFilter\n pagination_class = CustomPageNumberPaginator\n permission_classes = [\n AdminOrAuthorOrReadOnly,\n ]\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n def get_serializer_class(self):\n if self.request.method in permissions.SAFE_METHODS:\n return GetRecipeSerializer\n return PostRecipeSerializer\n\n\nclass TagViewSet(viewsets.ModelViewSet):\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n pagination_class = None\n permission_classes = [\n AllowAny,\n ]\n\n\nclass IngredientViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Ingredient.objects.all()\n serializer_class = IngredientSerializer\n pagination_class = None\n permission_classes = [\n AllowAny,\n ]\n filter_backends = [django_filters.rest_framework.DjangoFilterBackend]\n filter_class = IngredientFilter\n\n\nclass IngredientRecipeViewSet(viewsets.ModelViewSet):\n queryset = IngredientRecipe.objects.all()\n serializer_class = GetRecipeSerializer\n permission_classes = [\n AllowAny,\n ]\n\n\n@api_view(['GET', 'DELETE'])\n@permission_classes([IsAuthenticated])\ndef favorite(request, pk):\n if request.method == 'GET':\n qs = Recipe.objects.all()\n recipe = get_object_or_404(qs, id=pk)\n Favorite.objects.create(user_id=request.user.id, recipe_id=pk)\n serializer = FavoriteSerializer(data=recipe)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n favorite_qs = Favorite.objects.all()\n favorite_recipe = get_object_or_404(favorite_qs, recipe_id=pk)\n favorite_recipe.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'DELETE'])\n@permission_classes([IsAuthenticated])\ndef purchase(request, pk):\n if request.method == 'GET':\n qs = Recipe.objects.all()\n recipe = get_object_or_404(qs, id=pk)\n Purchase.objects.create(user_id=request.user.id, recipe_id=pk)\n serializer = PurchaseSerializer(data=recipe)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n purchase_qs = Purchase.objects.all()\n purchase_recipe = get_object_or_404(purchase_qs, recipe_id=pk)\n purchase_recipe.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass MyUserRenderer(CSVRenderer):\n header = ['Название ингредиента', 'Количество', 'Единица измерения']\n\n\n@api_view(['GET'])\n@renderer_classes((MyUserRenderer,))\n@permission_classes([IsAuthenticated])\ndef export_purchase(request):\n purchases = (\n IngredientRecipe.objects.filter(recipe__purchases__user=request.user)\n .values('ingredient__name', 'ingredient__measurement_unit')\n .annotate(total_amount=Sum('amount'))\n )\n content = [\n {\n 'Название ингредиента': purchase['ingredient__name'],\n 'Количество': purchase['total_amount'],\n 'Единица измерения': purchase['ingredient__measurement_unit'],\n }\n for purchase in purchases\n ]\n return Response(content)\n","repo_name":"adm-in/foodgram-project-react","sub_path":"backend/apps/recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"25105116363","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport math\n\n########################## Building Functions ###############################################################\n\ndef find_new(lst):\n\tcount = 0\n\th = 0\n\tind = 0\n\tnew = np.matrix([0,0,0])\n\tfor i in range(0,lst.shape[0]):\n\t\tif(h=0 and x<=grid.shape[0]-1 and y>=0 and y<=grid.shape[1]-1):\n\t\t\t\tif(work_grid[x,y]==0):\n\t\t\t\t\tmov = np.matrix([[cost,udlr[j,0],udlr[j,1]]])\n\t\t\t\t\thist = np.concatenate((hist,new[i,:]+mov),axis=0)\n\t\t\t\t\tg_value = hist[len(hist)-1,0]\n\t\t\t\t\twork_grid[ hist[len(hist)-1,1] , hist[len(hist)-1,2] ] = 2\n\t\t\t\t\texp[ hist[len(hist)-1,1] , hist[len(hist)-1,2] ]=inc\n\treturn(hist)\n\n########################### Initiating Variables #######################################################\ngrid = np.array([[0,0,0,0,0,1],\n\t\t\t\t [0,0,1,0,0,0],\n\t\t\t\t [0,0,1,0,0,0],\n\t\t\t\t [1,0,1,0,1,0],\n\t\t\t\t [0,0,0,0,1,0]])\n\nstart = np.matrix([[0,0]])\ngoal = np.matrix([[4,5]])\n\nglobal g_value,cost,work_grid,inc,exp,path,direc,star,coor,udlr\ng_value = 0\ncost = 1 \ninc =0 \nstar=False\n\nudlr = np.matrix([[-1,0],\n\t\t\t\t [1,0],\n\t\t\t\t [0,-1],\n\t\t\t\t [0,1]])\n\ndirec = np.array(['v','^','>','<'])\n\nopen_list = np.matrix([[g_value,start[0,0],start[0,1]]])\n\nwork_grid = np.copy(grid)\n\nexp = np.ones((grid.shape[0],grid.shape[1]))\nexp = exp*(-2)\nexp[start[0,0],start[0,1]] = 0\n\npath = np.matrix([[' ' for row in range(grid.shape[1])] for col in range(grid.shape[0])])\npath[ goal[0,0],goal[0,1] ] = ('*')\n\nhistory = np.matrix([g_value,start[0,0],start[0,1]])\n\n########################## Main Loop To Find Goal ################################################################\nwhile True:\n\texit = 0\n\tmax_g = find_new(history)\n\thistory = move(max_g,history)\n\tmax_g0 = find_new(history)\n\tfor i in range(0,len(max_g0)):\n\t\tif(max_g0[i,1]==goal[0,0] and max_g0[i,2]==goal[0,1]):\n\t\t\tprint('Mandir yahin banwaenge.')\n\t\t\texit = 1\n\tif(exit==1):\n\t\tcoor = np.matrix([[goal[0,0],goal[0,1]]])\n\t\tprint(find_new(history))\n\t\tprint(history)\n\t\tprint(work_grid)\n\t\tprint(grid)\n\t\tprint(exp)\n\t\tbreak \n\tif(max_g[0,1]==max_g0[0,1] and max_g[0,2]==max_g0[0,2]):\n\t\tcoor = np.matrix(history[len(history)-1,1:])\n\t\tpath[coor[0,0],coor[0,1]]='#'\n\t\tprint(grid.shape)\n\t\tprint(exp)\n\t\tprint(coor)\n\t\tprint('Randi hai yeh')\n\t\tbreak\n\nwhile star==False:\n\tfor i in range(0,len(udlr)):\n\t\tx = coor[0,0]+udlr[i,0]\n\t\ty = coor[0,1]+udlr[i,1]\n\n\t\tif(x>=0 and x<=grid.shape[0]-1 and y>=0 and y<=grid.shape[1]-1):\n\t\t\tif(exp[x,y]==g_value-1):\n\t\t\t\tcoor = coor+udlr[i,:]\n\t\t\t\tg_value = g_value-1\n\t\t\t\tif(i==0):\n\t\t\t\t\tpath[x,y]=direc[0]\n\t\t\t\telif(i==1):\n\t\t\t\t\tpath[x,y]=direc[1]\n\t\t\t\telif(i==2):\n\t\t\t\t\tpath[x,y]=direc[2]\n\t\t\t\telse:\n\t\t\t\t\tpath[x,y]=direc[3]\n\t\tif(x==start[0,0] and y==start[0,1]):\n\t\t\tprint(path)\n\t\t\tstar = True","repo_name":"Kunal-khanwalkar/Tarzan_wc","sub_path":"Planning/practicecodes/udacity/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21411176188","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n if len(p) > len(s):\n return None\n\n result = []\n\n # acquire dict of p\n d_p = defaultdict(int)\n for c in p:\n d_p[c] += 1\n\n # add 0 ~ j - 1 to dict\n d_s = defaultdict(int)\n for c in s[: len(p) - 1]:\n d_s[c] += 1\n\n i, j = 0, len(p) - 1\n while j < len(s):\n # Remove i - 1 and add j\n if i > 0:\n if d_s[s[i - 1]] == 1:\n del d_s[s[i - 1]]\n else:\n d_s[s[i - 1]] -= 1\n d_s[s[j]] += 1\n\n if d_s == d_p:\n result.append(i)\n\n i += 1\n j += 1\n\n return result\n","repo_name":"stevenjst0121/leetcode","sub_path":"438_find_all_anagrams_in_string.py","file_name":"438_find_all_anagrams_in_string.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"27290388971","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 23 13:24:25 2018\n\n@author: Sameer\n\"\"\"\nfrom __future__ import print_function\nimport pandas as pd \nimport twitter\nimport time\n\ndata = pd.DataFrame(columns=[ 'created_at','id','text','user','source','hashtags'])\ndef t_api():\n return twitter.Api(consumer_key='45owJD5G5DDdSsWEAa3TQZCt9',\n consumer_secret='6WyuURska9ByI9zQXLFEDrBZgsn1HrGJ61oU3xyEacnorD9DCQ',\n access_token_key='2988319474-MTtuwJMgl4dZzG6hkjnTZjMNe26xrWm2VVbNVua',\n access_token_secret='W3J6f701MM2UL0eYp8Y6gs3Xrql09EEmPWcMnrdrqjCib' )\n\ndef search(hashtag,start,end,min_id):\n #print(\"searching for #\"+hashtag)\n return api.GetSearch(term=twitter, \n raw_query='q=%23'+hashtag+\n '&count=100&result_type=recent&since='+start+\n '&until='+end+'&count=100&max_id='+str(min_id))\ndef min_id(result):\n ids=([r.id for r in result])\n return min(ids)\n\ndef min_date(result):\n dates=([r.created_at for r in result])\n min_date= min(dates).lower().split(\" \")\n date=\"2018-\"\n if (min_date[1]=='feb'):\n date+=\"02-\"\n elif(min_date[1]=='mar'):\n date+=\"03-\"\n else:\n date+=\"02\"\n date+=min_date[2]\n return date\n\ndef panda_data(results):\n for r in results:\n created_at,hashtags,ids,source,text,user = [None]*6\n \n text=r.text\n if (text[:2]!=\"RT\") and r.lang!=\"en\":\n created_at = r.created_at\n ids=r.id\n source=r.source\n hashtags = [h.text for h in r.hashtags]\n user = r.user.screen_name\n i = data.shape[0]\n data.loc[i] = [created_at,ids,text,user,source,hashtags] \n data.to_csv('data/topasd_25.csv', encoding='utf-8', index=False)\n\n\ndef collect_data(hashtag):\n results = api.GetSearch(term=twitter, \n raw_query='q=%23'+hashtag+\n '&count=100&result_type=recent&since='+start_date+\n '&until='+end_date+'&count=100')\n minim = min_id(results)\n end=(min_date(results))\n count=1\n while(end!=start_date): \n try:\n results = search(hashtag,start_date,end_date,minim)\n minim = min_id(results)\n end=(min_date(results))\n count+=1\n if(count%25==0):\n print(hashtag)\n panda_data(results)\n except Exception as e:\n count=1\n print(e)\n try:\n if(e.message[0]['code']==88):\n print(str(time.strftime('%X %x %Z')))\n time.sleep(990)\n except:\n pass\n\napi=t_api()\nstart_date='2018-03-05'\nend_date='2018-03-7'\n\n#print(\"Start\")\n##collect_data(\"health\")\n#print(\"End\")","repo_name":"sdhoju/hashtag_associatoin","sub_path":"twitterapp.py","file_name":"twitterapp.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"24841730493","text":"#-*- coding:utf-8 -*- \r\n\r\nimport os\r\nimport torch as t\r\nimport torch.optim as optim\r\nfrom torch.utils import data\r\nimport torch.nn as nn\r\nimport numpy as np\r\nnp.random.seed(1080)\r\n\r\nclass InputConvUnit(nn.Module):\r\n def __init__(self, out_dim=48):\r\n super(InputConvUnit, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=8, out_channels=out_dim, kernel_size=3, padding=1)\r\n def forward(self, x):\r\n y = self.conv(x)\r\n return y\r\n\r\nclass ResBlock(nn.Module):\r\n \"\"\"\r\n Residual block\r\n \"\"\"\r\n def __init__(self, dim=48, dilation=1):\r\n super(ResBlock, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=1+(dilation-1), dilation=dilation)\r\n self.conv2 = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=5, padding=2+(dilation-1), dilation=dilation)\r\n self.in1 = nn.InstanceNorm2d(num_features=dim)\r\n self.in2 = nn.InstanceNorm2d(num_features=dim)\r\n self.dropout = nn.Dropout2d(p=0.25)\r\n self.elu1 = nn.ELU()\r\n self.elu2 = nn.ELU()\r\n def forward(self,x):\r\n y = self.elu1(x)\r\n y = self.in1(y)\r\n y = y.triu(diagonal=2)\r\n y = self.conv1(y)\r\n y = self.elu2(y)\r\n y = self.in2(y)\r\n y = y.triu(diagonal=2)\r\n y = self.dropout(y)\r\n y = self.conv2(y)\r\n return y + x\r\n\r\nclass BiLSTM(nn.Module):\r\n \"\"\"\r\n Bi-directional LSTM\r\n \"\"\"\r\n def __init__(self, in_dim=48, hidden_dim=400, num_layers=1, dropout=0):\r\n super(BiLSTM, self).__init__()\r\n self.lstm = nn.LSTM(input_size=in_dim, hidden_size=hidden_dim, \r\n num_layers=num_layers, dropout=dropout, bidirectional=True)\r\n def forward(self, x):\r\n # seq_len, batch, input_size\r\n x = x.squeeze(0)\r\n x = x.permute([1,2,0])\r\n output,(h_n, c_n) = self.lstm(x)\r\n output = output.permute([2,0,1]).unsqueeze(0)\r\n return output\r\n\r\nclass BooleanMask(nn.Module):\r\n \"\"\"\r\n Get upper triangle elements\r\n \"\"\"\r\n def __init__(self, dim=48):\r\n super(BooleanMask, self).__init__()\r\n self.elu = nn.ELU()\r\n self.in1 = nn.InstanceNorm2d(num_features=dim)\r\n def forward(self, x):\r\n y = self.elu(x)\r\n y = self.in1(y)\r\n batch,channel,H,W = y.shape\r\n n = H\r\n mask = t.triu(t.ones(n,n),diagonal=2)\r\n return y[:,:,mask==1].squeeze().t()\r\n\r\nclass FCL(nn.Module):\r\n \"\"\"\r\n Fully connected layer\r\n \"\"\"\r\n def __init__(self, in_dim=48, hidden_layers=2, hidden_dim=512):\r\n super(FCL, self).__init__()\r\n \r\n self.hidden_layers = hidden_layers\r\n for i in range(1, self.hidden_layers+1):\r\n if i==1:\r\n setattr(self, 'fc1', nn.Linear(in_features=in_dim, out_features=hidden_dim) )\r\n else:\r\n setattr(self, 'fc'+str(i), nn.Linear(in_features=hidden_dim, out_features=hidden_dim) )\r\n setattr(self, 'dropout'+str(i), nn.Dropout(p=0.5) )\r\n setattr(self, 'bn'+str(i), nn.BatchNorm1d(num_features=hidden_dim) )\r\n setattr(self, 'elu'+str(i), nn.ELU() )\r\n \r\n if self.hidden_layers:\r\n self.fc_end = nn.Linear(in_features=hidden_dim, out_features=1)\r\n else:\r\n self.fc_end = nn.Linear(in_features=in_dim, out_features=1)\r\n def forward(self,x):\r\n y = x\r\n for i in range(1, self.hidden_layers+1):\r\n y = getattr(self, 'fc'+str(i))( y )\r\n y = getattr(self, 'elu'+str(i))( y )\r\n y = getattr(self, 'dropout'+str(i))( y )\r\n y = getattr(self, 'bn'+str(i))( y )\r\n y = self.fc_end(y)\r\n return y\r\n\r\n\r\n","repo_name":"lipan6461188/SPOT-RNA","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"7448233622","text":"import numpy as np\n# import pygame\nnp.random.seed(1234)\nimport os\nimport tkinter as tk\n\n\ndef update_grid(grid):\n x_dim, y_dim = grid.shape\n grid[grid != -1] = 0 # Reset all non-bomb squares to 0\n \n # Loop through all squares and add 1 for all adjacent bombs\n for i in range(x_dim):\n for j in range(y_dim):\n if grid[i][j] != -1:\n # Add 1 to all adjacent squares:\n for k in range(-1, 2):\n for l in range(-1, 2):\n if i + k >= 0 and i + k < x_dim and j + l >= 0 and j + l < y_dim:\n if grid[i + k][j + l] == -1:\n grid[i][j] += 1\n return grid\n\n\ndef create_grid(x_dim, y_dim, mines):\n # Create a grid of size x by y filled with zeroes\n grid = np.zeros((x_dim, y_dim), dtype=int)\n\n # Place z # bombs randomly on the grid\n for _ in range(mines):\n x_coord = np.random.randint(0, x_dim)\n y_coord = np.random.randint(0, y_dim)\n grid[x_coord][y_coord] = -1\n \n update_grid(grid)\n \n return grid\n\n\ndef safe_first_click(grid, x, y):\n if grid[x][y] == -1:\n grid[x][y] = 0\n \n for i in range(grid.shape[0]):\n if grid[i, 0] != -1:\n grid[i, 0] = -1\n break\n\n return update_grid(grid)\n\n\ndef uncover_grid(input_coord, grid, grid_visible):\n neighbors_to_check = []\n \n if grid[input_coord] == -1:\n grid_visible[input_coord] = \"X\"\n return grid_visible, []\n elif grid[input_coord] != 0:\n grid_visible[input_coord] = str(grid[input_coord])\n return grid_visible, []\n elif grid[input_coord] == 0:\n grid_visible[input_coord] = \" \"\n cond = lambda x, y: x >= 0 and x < grid.shape[0] and y >= 0 and y < grid.shape[1]\n neighbors = [(input_coord[0]+dx, input_coord[1]+dy) for dx in range(-1, 2) for dy in range(-1, 2) if cond(input_coord[0]+dx, input_coord[1]+dy)]\n \n for coord in neighbors:\n if grid_visible[coord] == \"-\":\n grid_visible[coord] = \"o\"\n neighbors_to_check.append(coord)\n return grid_visible, neighbors_to_check\n \n \n # uncover_grid(neighbor, grid, grid_visible, neighbors_to_check)\n \n # if grid_visible[neighbor] == \"-\":\n # grid_visible = uncover_grid(neighbor, grid, grid_visible)\n\n\ngrid_print = lambda grid: print(np.array2string(grid, separator=' ', formatter={'str_kind': lambda x: x if x else ' '}))\n\nif __name__ == \"__main__\":\n pass\n grid = create_grid(15, 15, 50)\n grid_visible = np.full(grid.shape, \"-\", dtype=str)\n grid_print(grid_visible)\n \n # # Create the main window\n # window = tk.Tk()\n\n # # Function to be called when a button is clicked\n # def update_buttons():\n \n \n # def button_clicked(x, y):\n # coord_list.append((x, y))\n \n\n # buttons = []\n\n # # Create a grid of buttons\n # for x in range(grid.shape[0]):\n # row = []\n # for y in range(grid.shape[1]):\n # button = tk.Button(window, text=f\"{grid_visible[x, y]}\", command=lambda x=x, y=y: button_clicked(x, y))\n # button.grid(row=x, column=y)\n # row.append(button)\n # buttons.append(row)\n\n # # Run the main loop\n # window.mainloop()\n \n \n coord_list = [tuple([int(num) for num in input().split(\",\")])]\n \n print(grid_visible)\n grid = safe_first_click(grid, *coord_list[0])\n \n while True: \n i = 0\n while i < len(coord_list):\n grid_visible, new_coords = uncover_grid(coord_list[i], grid, grid_visible)\n for coord in new_coords:\n coord_list.append(coord)\n i+=1\n \n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n print(grid_visible)\n\n \n coord_list = [tuple([int(num) for num in input().split(\",\")])]\n \n\n","repo_name":"AlecHero/Project_Minesweeper","sub_path":"Try/simple_minesweeper.py","file_name":"simple_minesweeper.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"1886715114","text":"from flask import *\n\napp = Flask(__name__,template_folder=\"theme\")\n\nimport read\nimport md2html\n\nconfig = read.get_yaml_data(\"settings.yaml\")\n\n\n\ntitle = config.get(\"title\")\nsubtitle = config.get(\"subtitle\")\nsubscription = config.get(\"subscription\")\nname = config.get(\"name\")\nblog = config.get(\"blog\")\ntheme = config.get(\"theme\")\nthemedir1 = \"theme/\"+theme+\"/template-1.html\"\nthemedir2 = \"theme/\"+theme+\"/template-2.html\"\nprint(themedir1+\"|\"+themedir2)\naboutpage = md2html.about_page_generate()\nhistorypage = md2html.history_page_generate()\nfriendspage = md2html.friends_page_generate()\nindexpage = f'''

{title}

\n

{subscription}

\n{title}'''\ndef about_full():\n upAbout = open(themedir1,encoding=\"utf-8\")\n upAbout = upAbout.read()\n\n downAbout = open(themedir2,encoding=\"utf-8\")\n downAbout = downAbout.read()\n\n aboutContent = f\"{title}\"+upAbout+aboutpage+downAbout\n return aboutContent\n\ndef index_full():\n upAbout = open(themedir1,encoding=\"utf-8\")\n upAbout = upAbout.read()\n\n downAbout = open(themedir2,encoding=\"utf-8\")\n downAbout = downAbout.read()\n\n indexContent = f\"{title}\"+upAbout+indexpage+downAbout\n return indexContent\n\ndef history_full():\n upAbout = open(themedir1,encoding=\"utf-8\")\n upAbout = upAbout.read()\n\n downAbout = open(themedir2,encoding=\"utf-8\")\n downAbout = downAbout.read()\n\n indexContent = f\"{title}\"+upAbout+historypage+downAbout\n return indexContent\n\ndef friends_full():\n upAbout = open(themedir1,encoding=\"utf-8\")\n upAbout = upAbout.read()\n\n downAbout = open(themedir2,encoding=\"utf-8\")\n downAbout = downAbout.read()\n\n indexContent = f\"{title}\"+upAbout+friendspage+downAbout\n return indexContent\n\nfullindex = index_full()\n\nfullabout = about_full()\n\nfullhistory = history_full()\n\nfullfriends = friends_full()\n\n@app.route('/')\ndef index():\n return fullindex\n@app.route(\"/jump/blog\")\ndef jump():\n return f'''\n\n'''\n\n@app.route(\"/about/\")\ndef about():\n return fullabout\n\n@app.route(\"/history/\")\ndef his():\n return fullhistory\n\n@app.route(\"/friends/\")\ndef fri():\n return fullfriends\n\n\n\ndef Run():\n\n app.run(port=80)\n","repo_name":"Fat-Man-DJ/FlaskStatic","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"23511643852","text":"def parse(paths):\n cave = set()\n for path in paths:\n lines = path.split(\" -> \")\n for i in range(len(lines) - 1):\n a, b = list(map(int, lines[i + 1].split(\",\")))\n x, y = list(map(int, lines[i].split(\",\")))\n if x == a:\n for k in range(y, b + 1):\n cave.add((x, k))\n for k in range(b, y + 1):\n cave.add((x, k))\n elif y == b:\n for k in range(x, a + 1):\n cave.add((k, y))\n for k in range(a, x + 1):\n cave.add((k, y))\n else:\n raise Exception(\"x != a and y != b\")\n return cave\n\n\ndef basic(cave):\n initial = len(cave)\n max_vertical_position = max([position[1] for position in list(cave)])\n while True:\n i, j = 500, 0\n while True:\n if j > max_vertical_position:\n return len(cave) - initial\n elif (i, j + 1) not in cave:\n j += 1\n elif (i - 1, j + 1) not in cave:\n i -= 1\n j += 1\n elif (i + 1, j + 1) not in cave:\n i += 1\n j += 1\n else:\n cave.add((i, j))\n break\n\n\ndef advanced(cave):\n initial = len(cave)\n max_vertical_position = max([position[1] for position in list(cave)]) + 1\n while True:\n i, j = 500, 0\n if (i, j) in cave:\n return len(cave) - initial\n while True:\n if j == max_vertical_position:\n cave.add((i, j))\n break\n elif (i, j + 1) not in cave:\n j += 1\n elif (i - 1, j + 1) not in cave:\n i -= 1\n j += 1\n elif (i + 1, j + 1) not in cave:\n i += 1\n j += 1\n else:\n cave.add((i, j))\n break\n\n\nif __name__ == \"__main__\":\n with open(\"input.txt\") as f:\n lines = f.read().strip()\n\n caves = parse([line for line in lines.split(\"\\n\")])\n\n part1 = basic(caves)\n print(\"First part:\", part1)\n\n caves = parse([line for line in lines.split(\"\\n\")])\n part2 = advanced(caves)\n print(\"Second part:\", part2)\n","repo_name":"alexcosta13/advent-of-code-2022","sub_path":"day14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"10945416458","text":"\"\"\"\nIntegration tests for the model API\n\"\"\"\n\nfrom datetime import datetime\n\nfrom pynamodb.models import Model\nfrom pynamodb.indexes import GlobalSecondaryIndex, AllProjection, LocalSecondaryIndex\nfrom pynamodb.attributes import (\n UnicodeAttribute, BinaryAttribute, UTCDateTimeAttribute, NumberSetAttribute, NumberAttribute,\n VersionAttribute)\n\nimport pytest\n\n\nclass LSIndex(LocalSecondaryIndex):\n \"\"\"\n A model for the local secondary index\n \"\"\"\n class Meta:\n projection = AllProjection()\n forum = UnicodeAttribute(hash_key=True)\n view = NumberAttribute(range_key=True)\n\n\nclass GSIndex(GlobalSecondaryIndex):\n \"\"\"\n A model for the secondary index\n \"\"\"\n class Meta:\n projection = AllProjection()\n read_capacity_units = 2\n write_capacity_units = 1\n epoch = UTCDateTimeAttribute(hash_key=True)\n\n\n@pytest.mark.ddblocal\ndef test_model_integration(ddb_url):\n\n class TestModel(Model):\n \"\"\"\n A model for testing\n \"\"\"\n class Meta:\n region = 'us-east-1'\n table_name = 'pynamodb-ci'\n host = ddb_url\n forum = UnicodeAttribute(hash_key=True)\n thread = UnicodeAttribute(range_key=True)\n view = NumberAttribute(default=0)\n view_index = LSIndex()\n epoch_index = GSIndex()\n epoch = UTCDateTimeAttribute(default=datetime.now)\n content = BinaryAttribute(null=True, legacy_encoding=False)\n scores = NumberSetAttribute()\n version = VersionAttribute()\n\n if TestModel.exists():\n TestModel.delete_table()\n TestModel.create_table(read_capacity_units=1, write_capacity_units=1, wait=True)\n\n obj = TestModel('1', '2')\n obj.save()\n obj.refresh()\n obj = TestModel('foo', 'bar')\n obj.save()\n TestModel('foo2', 'bar2')\n obj3 = TestModel('setitem', 'setrange', scores={1, 2.1})\n obj3.save()\n obj3.refresh()\n\n with TestModel.batch_write() as batch:\n items = [TestModel('hash-{}'.format(x), '{}'.format(x)) for x in range(10)]\n for item in items:\n batch.save(item)\n\n item_keys = [('hash-{}'.format(x), 'thread-{}'.format(x)) for x in range(10)]\n\n for item in TestModel.batch_get(item_keys):\n print(item)\n\n for item in TestModel.query('setitem', TestModel.thread.startswith('set')):\n print(\"Query Item {}\".format(item))\n\n with TestModel.batch_write() as batch:\n items = [TestModel('hash-{}'.format(x), '{}'.format(x)) for x in range(10)]\n for item in items:\n print(\"Batch delete\")\n batch.delete(item)\n\n for item in TestModel.scan():\n print(\"Scanned item: {}\".format(item))\n\n tstamp = datetime.now()\n query_obj = TestModel('query_forum', 'query_thread')\n query_obj.forum = 'foo'\n query_obj.save()\n query_obj.update([TestModel.view.add(1)])\n for item in TestModel.epoch_index.query(tstamp):\n print(\"Item queried from index: {}\".format(item))\n\n for item in TestModel.view_index.query('foo', TestModel.view > 0):\n print(\"Item queried from index: {}\".format(item.view))\n\n query_obj.update([TestModel.scores.set([])])\n query_obj.refresh()\n assert query_obj.scores is None\n\n print(query_obj.update([TestModel.view.add(1)], condition=TestModel.forum.exists()))\n TestModel.delete_table()\n\n\ndef test_can_inherit_version_attribute(ddb_url) -> None:\n\n class TestModelA(Model):\n \"\"\"\n A model for testing\n \"\"\"\n\n class Meta:\n region = 'us-east-1'\n table_name = 'pynamodb-ci-a'\n host = ddb_url\n\n forum = UnicodeAttribute(hash_key=True)\n thread = UnicodeAttribute(range_key=True)\n scores = NumberAttribute()\n version = VersionAttribute()\n\n class TestModelB(TestModelA):\n class Meta:\n region = 'us-east-1'\n table_name = 'pynamodb-ci-b'\n host = ddb_url\n\n with pytest.raises(ValueError) as e:\n class TestModelC(TestModelA):\n class Meta:\n region = 'us-east-1'\n table_name = 'pynamodb-ci-c'\n host = ddb_url\n\n version_invalid = VersionAttribute()\n assert str(e.value) == 'The model has more than one Version attribute: version, version_invalid'\n","repo_name":"pynamodb/PynamoDB","sub_path":"tests/integration/model_integration_test.py","file_name":"model_integration_test.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":2311,"dataset":"github-code","pt":"13"} +{"seq_id":"30707054808","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom .models import Article\n\n\nclass ArticleSerializer(serializers.ModelSerializer):\n created_by = serializers.ReadOnlyField(source='created_by.username')\n\n class Meta:\n model = Article\n fields = '__all__'\n\nclass UserSerializer(serializers.ModelSerializer):\n articles = serializers.PrimaryKeyRelatedField(many=True, queryset=Article.objects.all())\n\n class Meta:\n model = User\n fields = ('id', 'username', 'articles')\n","repo_name":"Akshita07/JournalBook","sub_path":"JournalBook/article/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"28371868499","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom dataset import bAbIDataset, bAbIDataLoader\nfrom model import GGNN\n\n# Task ID to use out of the bAbI tasks. Task 19 is path finding and according\n# to the paper is \"arguably the hardest task\".\ntask_id = 19\n\n# Batch size for training\nbatch_size = 10\n\n# Some of the bAbI tasks (all of the ones we have here) have multiple question\n# types.\nquestion_id = 0\n\n# Use the GPU?\nuse_cuda = False\n\n# If we should log training loss to output.\nshould_log_train = False\n\n# Learning rate for training\nlr = 0.01\n\n# Number of epochs for training\nn_epochs = 10\n\n# GGNN hidden state size\nstate_dim = 4\n\n# Number of propogation steps\nn_steps = 5\n\n# Annotation dimension. For the bAbi tasks we have one hot encoding per node.\nannotation_dim = 1\n\n# One fold of our preprocessed dataset.\ndataset_path = 'babi_data/processed_1/train/%d_graphs.txt' % task_id\n\ntrain_dataset = bAbIDataset(dataset_path, question_id=0, is_train=True)\ntrain_data_loader = bAbIDataLoader(train_dataset, batch_size=batch_size,\n shuffle=True, num_workers=2)\n\ntest_dataset = bAbIDataset(dataset_path, question_id=0, is_train=False)\ntest_data_loader = bAbIDataLoader(test_dataset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n\nn_edge_types = train_dataset.n_edge_types\nn_nodes = train_dataset.n_node\n\n# The dataset has the form: [(adjacency matrix, annotation, target), ...]\nggnn = GGNN(state_dim, annotation_dim, n_edge_types, n_nodes, n_steps)\n\n# The dataset is all doubles so convert the model to be double\nggnn = ggnn.double()\n\ncrit = nn.CrossEntropyLoss()\n\nif use_cuda:\n net.use_cuda()\n crit.use_cuda()\n\nopt = optim.Adam(ggnn.parameters(), lr=lr)\n\ndef model_inference(ggnn, adj_matrix, annotation, target):\n padding = torch.zeros(len(annotation), n_nodes, state_dim -\n annotation_dim).double()\n\n # See section 3.1 of the paper for how we create the node annotations.\n init_input = torch.cat((annotation, padding), 2)\n\n if use_cuda:\n init_input = init_input.use_cuda()\n adj_matrix = adj_matrix.use_cuda()\n annotation = annotation.use_cuda()\n target = target.use_cuda()\n\n output = ggnn(init_input, annotation, adj_matrix)\n\n return output, target\n\n\nfor epoch in range(n_epochs):\n # Train\n ggnn.train()\n for i, (adj_matrix, annotation, target) in enumerate(train_data_loader):\n # Adjency matrix will have shape [batch_size, n_nodes, 2 * n_nodes * n_edge_types]\n ggnn.zero_grad()\n\n output, target = model_inference(ggnn, adj_matrix, annotation, target)\n loss = crit(output, target)\n\n loss.backward()\n opt.step()\n\n if should_log_train:\n print('[%i / %i], [%i / %i] Loss: %.4f' % (epoch, n_epochs, i,\n len(train_data_loader), loss.data))\n\n # Evaluate performance over validation dataset.\n ggnn.eval()\n test_loss = 0\n correct = 0\n for adj_matrix, annotation, target in test_data_loader:\n output, target = model_inference(ggnn, adj_matrix, annotation, target)\n\n test_loss += crit(output, target).data\n pred = output.data.max(1, keepdim=True)[1]\n\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_dataset)\n\n print('[%i, %i] Val: Avg Loss %.4f, Accuracy %i/%i' % (epoch, n_epochs, test_loss,\n correct, len(test_dataset)))\n\n","repo_name":"ASzot/ggnn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"13"} +{"seq_id":"4210390830","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def increasingBST(self, root: TreeNode) -> TreeNode:\n arr = []\n\n def inorder_traversal(root):\n if root:\n inorder_traversal(root.left)\n arr.append(root.val)\n inorder_traversal(root.right)\n\n inorder_traversal(root)\n\n new_tree = TreeNode(val=arr[0])\n\n # Use a marker to track our tree location in memory.\n temp = new_tree\n\n # Iterate through our vals, creating a new right node with the current val.\n for i in arr[1:]:\n temp.right = TreeNode(val=i)\n\n # Move the marker to the next node.\n temp = temp.right\n\n return new_tree\n","repo_name":"mhasan09/leetCode_M","sub_path":"increasing_order_search_tree.py","file_name":"increasing_order_search_tree.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"15575401413","text":"import random\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom ...factories import (\n UserFactory,\n TagFactory, QuestionFactory, AnswerFactory, CommentFactory)\nfrom ...models import User, Tag, Question, Voting, Answer, Comment\n\nNUM_USERS = 20\nNUM_TAGS = 10\n\nNUM_QUESTIONS = 20\nNUM_ANSWERS = 30\nNUM_COMMENTS = 30\n\nQUESTION_TAG_MAX = 3\n\n\nclass Command(BaseCommand):\n help = \"Generates test data\"\n\n @transaction.atomic\n def handle(self, *args, **kwargs):\n self.stdout.write(\"Deleting old data...\")\n Question.tag.through.objects.all().delete()\n models = [Comment, Answer, Voting, Question, Tag, User]\n for m in models:\n m.objects.all().delete()\n\n self.stdout.write(\"Creating new data...\")\n\n people = []\n for x in range(NUM_USERS):\n person = UserFactory()\n people.append(person)\n\n tags = []\n for _ in range(NUM_TAGS):\n tag = TagFactory()\n tags.append(tag)\n\n questions = []\n for _ in range(NUM_QUESTIONS):\n question = QuestionFactory()\n tag = random.choices(\n tags,\n k=QUESTION_TAG_MAX\n )\n question.tag.add(*tag)\n questions.append(question)\n\n answers = []\n for _ in range(NUM_ANSWERS):\n question = random.choice(questions)\n answer = AnswerFactory(question=question, is_useful=random.choice([True, False]))\n answers.append(answer)\n\n comments = []\n for _ in range(NUM_COMMENTS):\n answer = random.choice(answers)\n comment = CommentFactory(answer=answer)\n comments.append(comment)\n self.stdout.write(\"New data successfully сreated\")\n","repo_name":"Mulyarchik/forum","sub_path":"backends/management/commands/setup_test_data.py","file_name":"setup_test_data.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"12430046178","text":"# This is a Python Program to find the GCD of two numbers.\n\nn1 = int(input(\"\\nEnter First Number = \"))\nn2 = int(input(\"Enter Second Number = \"))\n\na = []\n\nif (n1>n2):\n r = n1\nelse:\n r = n2\n\nfor i in range (2, r+1):\n if (n1%i == 0 and n2%i == 0):\n a.append(i)\n \nprint(\"------------------------\")\nprint(\"GCD of {0} and {1} = {2}\".format(n1,n2,a[-1]))\nprint(\"------------------------\")\n\n# import fractions\n# print(\"The GCD of the two numbers is\",fractions.gcd(n1,n2))\n","repo_name":"Prashant1099/Python-Programming-Examples-on-Mathematical-Functions","sub_path":"18. Find the GCD of Two Numbers.py","file_name":"18. Find the GCD of Two Numbers.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"14833692996","text":"import time\nfrom time import sleep\n\nfrom pylgbst import *\nfrom pylgbst.hub import MoveHub\nfrom pylgbst.peripherals import EncodedMotor, TiltSensor, Current, Voltage, COLORS, COLOR_BLACK\n\nlog = logging.getLogger(\"demo\")\n\ndef demo_led_colors(movehub):\n # LED colors demo\n log.info(\"LED colors demo\")\n\n # We get a response with payload and port, not x and y here...\n def colour_callback(named):\n log.info(\"LED Color callback: %s\", named)\n\n movehub.led.subscribe(colour_callback)\n for color in list(COLORS.keys())[1:] + [COLOR_BLACK]:\n log.info(\"Setting LED color to: %s\", COLORS[color])\n movehub.led.set_color(color)\n sleep(1)\n\n #movehub.led.unsubscribe(colour_callback)\n\ndef demo_motors_timed(movehub):\n log.info(\"Motors movement demo: timed\")\n for level in range(0, 101, 10):\n level /= 100.0\n log.info(\"Speed level: %s%%\", level * 100)\n movehub.motor_A.timed(0.2, level)\n movehub.motor_B.timed(0.2, -level)\n movehub.motor_AB.timed(1.5, -0.2, 0.2)\n movehub.motor_AB.timed(0.5, 1)\n movehub.motor_AB.timed(0.5, -1)\n\ndef demo_voltage(movehub):\n def callback1(value):\n log.info(\"Amperage: %s\", value)\n\n def callback2(value):\n log.info(\"Voltage: %s\", value)\n\n movehub.current.subscribe(callback1, mode=Current.CURRENT_L, granularity=0)\n movehub.current.subscribe(callback1, mode=Current.CURRENT_L, granularity=1)\n\n movehub.voltage.subscribe(callback2, mode=Voltage.VOLTAGE_L, granularity=0)\n movehub.voltage.subscribe(callback2, mode=Voltage.VOLTAGE_L, granularity=1)\n time.sleep(5)\n movehub.current.unsubscribe(callback1)\n movehub.voltage.unsubscribe(callback2)\n\ndef demo_color_sensor(movehub):\n log.info(\"Color sensor test: wave your hand in front of it\")\n count = 0\n limit = 20\n\n def callback(color):\n log.info(\"Color: %s\",color[0])\n\n movehub.vision_sensor.subscribe(callback)\n while count < limit:\n count+=1\n time.sleep(1)\n\n movehub.vision_sensor.unsubscribe(callback)\n\ndef demo_tilt_sensor_precise(movehub):\n log.info(\"Tilt sensor precise test. Turn device in different ways.\")\n cnt = 0\n limit = 10\n\n def callback(Mylist): \n log.info(\"Tilt:%s, %s, %s\",Mylist[0],Mylist[1],Mylist[2])\n\n movehub.tilt_sensor.subscribe(callback, mode=TiltSensor.MODE_3AXIS_ACCEL)\n while cnt < limit:\n cnt += 1\n time.sleep(0,1)\n\n movehub.tilt_sensor.unsubscribe(callback)\n\ndef demo_tilt_sensor_simple(movehub):\n log.info(\"Tilt sensor simple test. Turn device in different ways.\")\n cnt = 0\n limit = 10\n\n def callback(state):\n log.info(\"Tilt: %s=%s\", TiltSensor.TRI_STATES[state[0]], state[0])\n\n movehub.tilt_sensor.subscribe(callback, mode=TiltSensor.MODE_3AXIS_SIMPLE)\n while cnt < limit:\n cnt += 1\n time.sleep(1)\n\n movehub.tilt_sensor.unsubscribe(callback)\n\n\n\ndef demo_port_cd_motor(movehub):\n motor = None\n if isinstance(movehub.port_D, EncodedMotor):\n log.info(\"Rotation motor is on port D\")\n motor = movehub.port_D\n elif isinstance(movehub.port_C, EncodedMotor):\n log.info(\"Rotation motor is on port C\")\n motor = movehub.port_C\n else:\n log.info(\"Motor not found on ports C or D\")\n\n if motor:\n motor.angled(20, 0.2)\n sleep(3)\n motor.angled(20, -0.2)\n sleep(1)\n\n motor.angled(20, -0.1)\n sleep(2)\n motor.angled(20, 0.1)\n sleep(1)\n\n\n\nif __name__ == '__main__':\n parameters = {}\n logging.basicConfig(level=logging.WARNING)\n logging.basicConfig(level=logging.INFO)\n hub = MoveHub(**parameters)\n demo_led_colors(hub)\n demo_voltage(hub)\n demo_motors_timed(hub)\n demo_color_sensor(hub)\n demo_port_cd_motor(hub)\n #demo_tilt_sensor_precise(hub)\n demo_tilt_sensor_simple(hub)\n #\n\n\n ","repo_name":"phildefer/lego-micropython-stm32","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"27518514611","text":"import networkx as nx\nimport random\nimport pylab as py\nfrom math import floor\n\nG = nx.complete_graph(20)\n\nfor edge in G.edges():\n if floor(edge[0]/5.)!=floor(edge[1]/5.):\n if random.random()<0.95:\n G.remove_edge(edge[0],edge[1])\n\n\n\n\nfixedpos = {1:(1,1), 6:(-1,-1), 11:(-1,1), 16:(1,-1)}\n\nprint(fixedpos.keys())\npos = nx.spring_layout(G, fixed = fixedpos.keys(), pos = fixedpos)\n\nnx.draw_networkx(G, pos=pos)\n\npy.show()","repo_name":"pranavtbhat/SocialGraphs","sub_path":"basic/sample1.py","file_name":"sample1.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"30594642507","text":"# TODO: Improve logging utilities\nimport csv, os, numpy, threading\nimport client_config as client_config\nfrom scapy.all import *\n\n# This works for now but I will probably need to \n# implement a storage cache to avoid rampant \n# memory usage\nEXPORT_DIR = client_config.EXPORT_DIR\nlogged_packets = []\ninterface = client_config.INTERFACE\nt = AsyncSniffer(iface=interface)\n\n\n# Captures packets indefinitely\ndef live_capture_for_packet_count(packet_count):\n sniff(iface=interface, prn=lambda x: log_packet(x), count=packet_count)\n\n\ndef recv_packet_callback(packet):\n print(\"Packet intercepted: \\n%s\" % packet)\n log_packet(packet)\n\n\n# Captures packets as long as the boolean argument\n# stopcondition is True. \ndef async_packet_capture(stop_condition):\n t.start()\n while True:\n if stop_condition:\n t.stop()\n for result in t.results:\n log_packet(result)\n break\n\n\n# Captures packets for x seconds in a synchronous manner\ndef sync_packet_capture_for(seconds):\n print(\"Capturing packets...\")\n t.start()\n time.sleep(float(seconds))\n t.stop()\n for result in t.results:\n log_packet(result)\n print(\"Done capturing packets\")\n\n\n# Captures packets for x seconds in an asynchronous manner\n# Since it is asynchronous, the boolean argument\n# export can be used to specify an automatic export\n# at the end of execution\ndef async_packet_capture_for(seconds, export, wipe):\n print(\"Capturing packets...\")\n # This executes once seconds has passed\n\n def done():\n print(\"Done capturing packets\")\n t.stop()\n for result in t.results:\n log_packet(result)\n if export:\n export_packet_log(True)\n\n timer = threading.Timer(seconds, done)\n t.start()\n timer.start()\n\n\n# Exports the logged packets to a pcap file\n# with a date-time filename\ndef export_packet_log(wipe):\n print(\"Exporting packet logs\")\n filename = \"Capture: %s\" % (time.strftime(\"%Y%m%d-%H%M%S\"))\n export_path = os.path.join(EXPORT_DIR, filename)\n wrpcap(export_path, logged_packets)\n if wipe:\n wipe_log()\n\n\n# Exports the logged packets to a pcap file\n# with a specified filename\ndef export_packet_log_with_name(filename, wipe):\n print(\"Exporting packet logs\")\n print(logged_packets)\n export_path = os.path.join(EXPORT_DIR, filename)\n wrpcap(export_path, logged_packets)\n if wipe:\n wipe_log()\n\n\ndef live_capture_with_callback_on_condition(condition):\n while condition:\n live_capture_with_callback(recv_packet_callback)\n\n\ndef live_capture_with_callback(callback):\n sniff(iface=interface, prn=callback)\n\n\n# Automatically determines whether to cache or continue\n# in memory\ndef log_packet(packet):\n logged_packets.append(packet)\n\n\n# Wipe cache and memory\ndef wipe_log():\n logged_packets.clear()\n","repo_name":"sandumjacob/Dynamic-NIDS-Evaluation-Utility","sub_path":"src/logging_util.py","file_name":"logging_util.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"6234139697","text":"import numpy as np\nfrom torch import optim\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\n\ntokenizer = GPT2Tokenizer.from_pretrained(\"gpt2-xl\")\ntokenizer.pad_token = tokenizer.eos_token\n\ndef _make_model():\n return GPT2LMHeadModel.from_pretrained(\"gpt2-xl\").cuda()\n\nclass GPT2ModelWrapper:\n def __init__(self, model, preprocessor=lambda x: x):\n self.model = model\n self.preprocessor = preprocessor\n\n def __call__(self, seq):\n seq_preproc = self.preprocessor(seq)\n seq_tok = tokenizer(seq_preproc, return_tensors=\"pt\").input_ids.cuda()\n return self.model(seq_tok, labels=seq_tok).loss.item()\n\nclass NullUpdater:\n def __init__(self):\n self.model = _make_model()\n self.model.eval()\n\n def __call__(self, task, supervision, targets):\n return GPT2ModelWrapper(self.model)\n\nclass PromptUpdater:\n def __init__(self):\n self.model = _make_model()\n self.model.eval()\n\n def __call__(self, task, supervision, targets):\n def preprocessor(seq):\n return task.make_evaluation_prompt(supervision) + \" \" + seq\n return GPT2ModelWrapper(self.model, preprocessor)\n\nclass ExampleUpdater:\n def __init__(self, extrapolator):\n self.extrapolator = extrapolator\n self.random = np.random.RandomState(0)\n\n def __call__(self, task, supervision, targets):\n extrap_examples = self.extrapolator(task, supervision)\n print(\"\\n\".join(extrap_examples))\n model = _make_model()\n model.eval()\n opt = optim.AdamW(model.parameters(), lr=3e-6)\n for i in range(0, len(extrap_examples), 4):\n ft_batch = tokenizer(\n #self.random.choice(extrap_examples, size=4).tolist(),\n extrap_examples[i:i+4],\n return_tensors=\"pt\",\n padding=True,\n ).input_ids.cuda()\n print(task.evaluate(GPT2ModelWrapper(model), targets))\n loss = model(ft_batch, labels=ft_batch).loss\n opt.zero_grad()\n loss.backward()\n opt.step()\n del opt\n return GPT2ModelWrapper(model)\n","repo_name":"feyzaakyurek/nl_sup","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"40967737009","text":"lines = [line.strip() for line in open('input').readlines()]\n\nf = 0\nd = set()\nd.add(f)\nwhile True:\n for line in lines:\n f += int(line)\n if f in d:\n print(f)\n exit(1)\n d.add(f)\n","repo_name":"skgbanga/AOC","sub_path":"2018/01/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"74289539537","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 18 11:28:52 2023\r\n\r\n@author: limyu\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 18 10:06:44 2023\r\n\r\n@author: limyu\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import StrMethodFormatter\r\nfrom scipy import interpolate\r\nfrom scipy.signal import chirp, find_peaks, peak_widths\r\nimport statistics\r\n\r\nurl = \"https://raw.githubusercontent.com/yd145763/mixed_pitch_grating_publication/main/different%20mixture%204%20beams%20near%20ref%20and%20a%20beam%203800-3900cnts%204x%2090mA.csv\"\r\ndf = pd.read_csv(\"https://raw.githubusercontent.com/yd145763/mixed_pitch_grating_publication/main/different%20mixture%204%20beams%20near%20ref%20and%20a%20beam%203800-3900cnts%204x%2090mA.csv\")\r\n \r\ndf=df.dropna(axis=1)\r\ndf_r = df.iloc[228:252, 210:244]\r\ndf_r = df_r.reset_index(drop=True)\r\ndf_r.columns = range(df_r.shape[1])\r\nH = np.arange(0,24,1)\r\nV = np.arange(0,34,1)\r\n\r\nmax_df_r = df_r.max().max()\r\nis_duplicate = df.eq(max_df_r)\r\n\r\nrow_idxs, col_idxs = np.where(df_r == max_df_r)\r\nprint(len(row_idxs))\r\nprint(len(col_idxs))\r\nfor row_idx, col_idx in zip(row_idxs, col_idxs):\r\n print(\"widest row index\", row_idx)\r\n print(\"widest column index\", col_idx)\r\n print(\" \")\r\nxr = np.linspace(0, 990, num=34)\r\nxr = xr/20\r\nyr = np.linspace(0, 690, num=24)\r\nyr = yr/20\r\ncolorbarmax = 5000\r\ncolorbartick = 5\r\n\r\nXr,Yr = np.meshgrid(xr,yr)\r\ndf_r = df_r.to_numpy()\r\nfig = plt.figure(figsize=(8, 4))\r\nax = plt.axes()\r\ncp=ax.contourf(Xr,Yr,df_r, 200, zdir='z', offset=-100, cmap='jet')\r\nclb=fig.colorbar(cp, ticks=(np.arange(0, colorbarmax, 200)).tolist())\r\nclb.ax.set_title('Photon/s', fontweight=\"bold\")\r\nfor l in clb.ax.yaxis.get_ticklabels():\r\n l.set_weight(\"bold\")\r\n l.set_fontsize(15)\r\nax.set_xlabel('x-position (µm)', fontsize=18, fontweight=\"bold\", labelpad=1)\r\nax.set_ylabel('y-position (µm)', fontsize=18, fontweight=\"bold\", labelpad=1)\r\nax.xaxis.label.set_fontsize(18)\r\nax.xaxis.label.set_weight(\"bold\")\r\nax.yaxis.label.set_fontsize(18)\r\nax.yaxis.label.set_weight(\"bold\")\r\nax.tick_params(axis='both', which='major', labelsize=15)\r\nax.set_yticklabels(ax.get_yticks(), weight='bold')\r\nax.set_xticklabels(ax.get_xticks(), weight='bold')\r\nax.xaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))\r\nax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))\r\nax.axhline(y=yr[int(row_idx)], color='r', linestyle = \"--\")\r\nax.axvline(x=xr[int(col_idx)], color='g', linestyle = \"--\")\r\nplt.show()\r\nplt.close()\r\n\r\n\r\nH = np.arange(0,24,1)\r\nV = np.arange(0,34,1)\r\n\r\nV1,H1 = np.meshgrid(V,H)\r\nfig = plt.figure(figsize=(7, 4))\r\nax = plt.axes()\r\ncp=ax.contourf(V1,H1,df_r, 200, zdir='z', offset=-100, cmap='jet')\r\nax.axhline(y=int(row_idx), color='r')\r\nax.axvline(x=int(col_idx), color='g')\r\nplt.show()\r\nplt.close()\r\n","repo_name":"yd145763/mixed_pitch_grating_publication","sub_path":"max value column and rows no filtering.py","file_name":"max value column and rows no filtering.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"30195274344","text":"#!/usr/bin/python3\n'''\nuse the GitHub API to display your id\n'''\nimport requests\nfrom sys import argv\nif __name__ == \"__main__\":\n response = requests.get('https://api.github.com/user',\n auth=(argv[1], argv[2]))\n if \"json\" not in response.headers.get('content-type'):\n print(\"Not a valid JSON\")\n else:\n response = response.json()\n print(response.get('id'))\n","repo_name":"wuhibe/alx-higher_level_programming","sub_path":"0x11-python-network_1/10-my_github.py","file_name":"10-my_github.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"13"} +{"seq_id":"4376266878","text":"import fileinput\r\nfrom playsound import playsound\r\nfrom gtts import gTTS\r\nfrom time import sleep\r\nimport json\r\nimport requests\r\nimport datetime\r\nimport os\r\nfrom colorama import Fore, Back, Style\r\n\r\nsymbol_input = input(\"Crypto Symbol [SLPBUSD - BTCUSDT - ETHBUSD]:\")\r\nprice_input = float(input(\"Alert the price when it reach [1245.5$]:\"))\r\nkey = \"https://api.binance.com/api/v3/ticker/price?symbol=\"+symbol_input\r\n\r\nprice_register = []\r\nos.system('cls')\r\nrepeat = 0\r\nwhile repeat <= 1:\r\n #Function to capture time of the computer and format it\r\n time = datetime.datetime.now().strftime(\"%d/%m/%y [%H:%M:%S]\")\r\n # requesting data from url\r\n # completing API for request\r\n data = requests.get(key) \r\n data = data.json()\r\n price = float(data['price'])\r\n symbol = str(data['symbol']).upper()\r\n price_register.append(price)\r\n\r\n # Edit \"BUSD\" TO \"-BUSD\" to look better: \"SLPBUSD\"->\"SLP\"\r\n symbol_convertor = symbol.replace(\"BUSD\",\"\").replace(\"USDT\",\"\").replace(\"USDC\",\"\")\r\n # \"0.02110000\" > \"0.0211\"\r\n print(Fore.WHITE + f\"🔔 [${price_input}] 📈 {time} - {symbol_convertor} \" + Fore.LIGHTBLACK_EX + f\"{price} $ \", end='\\r') \r\n if len(price_register) > 6:\r\n if price == price_register[len(price_register)-5]:\r\n print(Fore.WHITE + f\"🔔 [${price_input}] 📈 {time} - {symbol_convertor} \" + Fore.LIGHTBLACK_EX + f\"{price} $ \", end='\\r') \r\n if price > price_register[len(price_register)-5]:\r\n print(Fore.WHITE + f\"🔔 [${price_input}] 📈 {time} - {symbol_convertor} \" + Fore.GREEN + f\"{price} $ ↗ \", end='\\r') \r\n if price < price_register[len(price_register)-5]:\r\n print(Fore.WHITE + f\"🔔 [${price_input}] 📈 {time} - {symbol_convertor} \" + Fore.RED + f\"{price} $ ↘ \", end='\\r') \r\n elif len(price_register) > 300:\r\n price_register.clear()\r\n else: \r\n print(Fore.WHITE + f\"{data['symbol']} - \" + Fore.LIGHTBLACK_EX + f\"{price} $ \", end='\\r')\r\n\r\n sleep(0.25)\r\n #if the actual price of a coin is higher or equal than the alert price, do:\r\n if price >= price_input:\r\n # The text that you want to convert to audio\r\n mytext = f\"{symbol_convertor[0]} reach the alert price of ${symbol_convertor[1]}\"\r\n # Language in which you want to convert\r\n language = 'en'\r\n myobj = gTTS(text=mytext, lang=language, slow=True)\r\n # Saving the converted audio in a mp3 file named\r\n # alert \r\n myobj.save(r\"C:\\Users\\cage_\\Desktop\\Python\\alert.mp3\")\r\n # Playing the converted file\r\n playsound(r\"C:\\Users\\cage_\\Desktop\\Python\\alert.mp3\")\r\n##Code problems: For some reason when the price hits the alert, the code stop working due to problems with the audio file.\r\n \r\n","repo_name":"mateusorn/Crypto-Alert","sub_path":"Crypto_Alert.py","file_name":"Crypto_Alert.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"72414122897","text":"'''\n Cut out video by start_time & end_time (in `s`)\n output files would be named by `clipX.mp4` in order\n Author: BebeShen\n Created at: 2021/10/02\n'''\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip\nimport moviepy.editor as mpy\nimport os\n\n# # Path setting\n# while True:\n# print(\"[+] Current path: \", os.getcwd())\n# dir_list = os.listdir()\n# print(\"[+] List all dir, Select the video dir: \\n\")\n# for index, _dir in enumerate(dir_list):\n# print(\"\\t\" + str(index) + \". \" + _dir)\n# _select = int(input(\"\\n(Enter 0~\" + str(len(dir_list)-1) + \", Enter '-1' to stay): \"))\n# if _select == -1:\n# break\n# video_dir = dir_list[_select]\n# print(video_dir)\n# os.chdir(os.path.join(video_dir))\n\n# print(\"[+] Current path: \", os.getcwd())\n\n# File setting\nsrc_file = \"26C.mp4\"\n# src_file = input(\"\\n[+] Enter the video name: \")\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\nprint(\"[+] Current path: \", current_path)\n# clips = []\ndef FindMaxClip():\n allFileList = os.listdir(current_path)\n max_clip = 0\n for _file in allFileList:\n if not os.path.isdir(_file):\n # print(_file)\n if \"clip\" in _file and _file.endswith(\"mp4\"):\n # print(\"[+] Direct Find \", _file)\n clip_number = _file.split(\"clip\")[1]\n clip_number = int(clip_number.split(\".\")[0])\n if clip_number > max_clip:\n max_clip = clip_number\n\n print(\"[+] Max # of Clip: \", max_clip)\n return max_clip\n # if \"clip\" in os.path.split(_file)[1]:\n # print(\"Os path\", os.path.split(_file)[1])\n\nprint(\"[+] Start Video Cut\\n\")\n# Cut Video\nwhile True:\n t1, t2 = input(\"Enter start time/end time(s): \").split()\n _st = t1.split(\".\")\n _et = t2.split(\".\")\n # if \".\" in t1 and \".\" in t2:\n # t1 = float(t1.split(\".\")[0])*60.0 + float(t1.split(\".\")[1]) \n # t2 = float(t2.split(\".\")[0])*60.0 + float(t2.split(\".\")[1])\n # else:\n # continue\n # print(t1,t2)\n max_clip = FindMaxClip()+1\n target_file = \"clip\" + str(max_clip) + \".mp4\"\n # newclip = mpy.VideoFileClip(src_file).subclip(t1,t2)\n # # Remove audio can small shrink file size\n if len(_st) == 3:\n st = float(_st[0])*60.0 + float(_st[1] + \".\" + _st[2])\n elif len(_st) == 2:\n st = float(_st[0])*60.0 + float(_st[1])\n if len(_et) == 3:\n et = float(_et[0])*60.0 + float(_et[1] + \".\" + _et[2])\n elif len(_et) == 2:\n et = float(_et[0])*60.0 + float(_et[1])\n print(st, et)\n newclip = mpy.VideoFileClip(src_file, audio=False).subclip(st, et)\n newclip.write_videofile(target_file)\n # ffmpeg_extract_subclip(src_file, t1, t2, targetname=target_file)\n\nprint(\"\\n[+] End Video Cut\")","repo_name":"BebeShen/video-cut","sub_path":"clip.py","file_name":"clip.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"20896724531","text":"import os\nimport click\nimport datetime\n\nimport alembic.config\n\nfrom sedldata.database import datatable\nfrom sedldata.lib import upgrade as lib_upgrade\nfrom sedldata.lib import load as lib_load\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\ndef upgrade():\n lib_upgrade()\n\n\n@cli.command()\n@click.argument('infile')\n@click.argument('outfile')\n@click.option('--name', default=None)\ndef load(infile, outfile, name):\n lib_load(infile, outfile, name)\n\n\n@cli.command()\ndef dump():\n # Dump the datatable\n click.echo(\"Dump\\n\")\n\n select = datatable.select()\n rows = select.execute()\n for row in rows:\n print(row)\n","repo_name":"SocialEconomyDataLab/sedldata","sub_path":"sedldata/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"19478758200","text":"import optparse\nimport string\nimport pyparsing as pp\nfrom pyparsing import *\n\n#===============================================================================\n#===============================================================================\nclass Message(object):\n def __init__(self, id):\n self.id = id\n self.fields = dict()\n\n def addField(self, id, number):\n self.fields[id] = number\n\n def dump(self):\n print(self.id)\n for id, number in self.fields.items():\n print(\" \" + id + \" = \" + number)\n\n#===============================================================================\n#===============================================================================\nclass Command(object):\n def __init__(self, id, serviceId):\n self.commandId = id\n self.serviceId = serviceId\n self.oneOf = []\n\n def addOneOf(self, id, msgType, number):\n self.oneOf.append({'id': id, 'type': msgType, 'number': number})\n\n def dump(self):\n print(self.commandId)\n print(self.serviceId)\n for msg in self.oneOf:\n print(\" \" + msg['id'] + \": \" + msg['type'] + \" = \" + msg['number'])\n\n#===============================================================================\n#===============================================================================\nclass ProtoParser(object):\n\n PACKAGE_NAME_RES = \"packageName\"\n MSG_ID_RES = \"messageId\"\n MSG_BODY_RES = \"messageBody\"\n ONEOF_BODY_RES = \"oneOfBody\"\n FIELD_ID_RES = \"fieldId\"\n FIELD_TYPE_RES = \"fieldType\"\n FIELD_NB_RES = \"fieldNumber\"\n ENUM_ID_RES = \"enumId\"\n\n ident = Word(alphas + \"_.\", alphanums + \"_.\")\n integer = Regex(r\"[+-]?\\d+\")\n\n LBRACE, RBRACE, LBRACK, RBRACK, LPAR, RPAR, EQ, SEMI, COMMA, LESSER, GREATER = map(Suppress, \"{}[]()=;,<>\")\n\n kwds = \"\"\"message required optional repeated enum extensions extends extend\n to package service rpc returns true false option import syntax\n reserved oneof map\"\"\"\n for kw in kwds.split():\n exec(\"{}_ = Keyword('{}')\".format(kw.upper(), kw))\n\n messageBody = Forward()\n\n messageDefn = (MESSAGE_ - ident(MSG_ID_RES)\n + LBRACE + messageBody(MSG_BODY_RES) + RBRACE)\n\n oneOfBody = Forward()\n\n oneOfDefn = ONEOF_ - ident + LBRACE + oneOfBody(ONEOF_BODY_RES) + RBRACE\n\n mapDefn = (\n MAP_\n - LESSER\n + oneOf(\n \"\"\" int32 int64 uint32 uint64 sint32 sint64\n fixed32 fixed64 sfixed32 sfixed64 bool string\"\"\"\n )\n + COMMA\n + ident\n + GREATER\n )\n\n typespec = (\n oneOf(\n \"\"\"double float int32 int64 uint32 uint64 sint32 sint64\n fixed32 fixed64 sfixed32 sfixed64 bool string bytes\"\"\"\n )\n | mapDefn\n | ident\n )\n\n rvalue = integer | TRUE_ | FALSE_ | ident\n fieldDirective = LBRACK + Group(Optional(LPAR) + ident + Optional(RPAR) + EQ + Group(rvalue | quotedString)) + RBRACK\n fieldDefnPrefix = REQUIRED_ | OPTIONAL_ | REPEATED_\n fieldDefn = (\n Optional(fieldDefnPrefix)\n + typespec(FIELD_TYPE_RES)\n + ident(FIELD_ID_RES)\n + EQ\n + integer(FIELD_NB_RES)\n + ZeroOrMore(fieldDirective)\n + SEMI\n )\n\n optionDirective = OPTION_ - Optional(LPAR) + ident + Optional(RPAR) + EQ + ZeroOrMore(quotedString) + SEMI\n\n # reservedDefn ::= 'reserved' integer 'to' integer ';'\n # reservedDefn ::= 'reserved' integer ',' integer ';'\n # reservedDefn ::= 'reserved' integer ',' integer ',' 'to', integer ';'\n reservedDefn = RESERVED_ - integer + ZeroOrMore(Group(TO_ | COMMA) + integer) + SEMI\n\n # enumDefn ::= 'enum' ident '{' { ident '=' integer ';' }* '}'\n enumDefn = (\n ENUM_\n - ident(ENUM_ID_RES)\n + LBRACE\n + Dict(\n ZeroOrMore(\n Group(ident + EQ + integer + ZeroOrMore(fieldDirective) + SEMI\n | optionDirective | reservedDefn)\n )\n )\n + RBRACE\n )\n\n # extensionsDefn ::= 'extensions' integer 'to' integer ';'\n extensionsDefn = EXTENSIONS_ - integer + TO_ + integer + SEMI\n\n # messageExtension ::= 'extend' ident '{' messageBody '}'\n messageExtension = EXTEND_ - ident + LBRACE + messageBody + RBRACE\n\n # oneOfBody ::= { fieldDefn }*\n oneOfBody << Group(\n ZeroOrMore(\n Group(fieldDefn)\n )\n )\n\n # messageBody ::= { fieldDefn | enumDefn | messageDefn | extensionsDefn| reservedDef | messageExtension | oneOfDefn | optionDirective }*\n messageBody << Group(\n ZeroOrMore(\n Group(fieldDefn | enumDefn | messageDefn | extensionsDefn\n | reservedDefn | messageExtension | oneOfDefn | optionDirective)\n )\n )\n\n # methodDefn ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';'\n methodDefn = (\n RPC_\n - ident\n + LPAR\n + Optional(ident)\n + RPAR\n + RETURNS_\n + LPAR\n + Optional(ident)\n + RPAR\n )\n\n # serviceDefn ::= 'service' ident '{' methodDefn* '}'\n serviceDefn = (\n SERVICE_ - ident + LBRACE + ZeroOrMore(Group(methodDefn)) + RBRACE\n )\n\n syntaxDefn = SYNTAX_ + EQ - quotedString + SEMI\n\n # packageDirective ::= 'package' ident ';'\n packageDirective = PACKAGE_ - ident(PACKAGE_NAME_RES) + SEMI\n\n importDirective = IMPORT_ - quotedString + SEMI\n\n topLevelStatement = Group(\n messageDefn\n | messageExtension\n | enumDefn\n | serviceDefn\n | importDirective\n | optionDirective\n | syntaxDefn\n | packageDirective\n )\n\n def parseFile(self, filepath):\n \"\"\"\n Parses protobuf files.\n \"\"\"\n parser = ZeroOrMore(self.topLevelStatement)\n parser.ignore(javaStyleComment)\n parseResults = parser.parseFile(filepath, parseAll=False)\n\n packageName = self.getPackageName(parseResults)\n baseName = self.getBaseName(parseResults)\n messages = self.extractMessages(parseResults=parseResults, baseName=baseName)\n\n command = self.extractCmd(name=\"Command\", parseResults=parseResults, packageName=packageName, baseName=baseName, messages=messages)\n event = self.extractCmd(name=\"Event\", parseResults=parseResults, packageName=packageName, baseName=baseName, messages=messages)\n\n return messages, command, event\n\n def getPackageName(self, parseResults):\n \"\"\"\n Returns protobuf package name.\n \"\"\"\n for item in parseResults:\n if self.PACKAGE_NAME_RES in item:\n return item[self.PACKAGE_NAME_RES]\n return \"\"\n\n def getBaseName(self, parseResults):\n \"\"\"\n Returns protobuf messages base name.\n \"\"\"\n for item in parseResults:\n if self.PACKAGE_NAME_RES in item:\n return string.capwords(item[self.PACKAGE_NAME_RES], \".\").replace(\".\",\"_\")\n return \"\"\n\n def extractMessages(self, parseResults, baseName):\n \"\"\"\n Returns all messages found in protobuf parse results.\n The returned type is a list table of `Message`.\n \"\"\"\n messages = []\n self._extractMessages(parseResults, messages, baseName)\n return messages\n\n def _extractMessages(self, resultItem, messages, baseName, parentMessage = None):\n for item in resultItem:\n if self.MSG_ID_RES in item and self.MSG_BODY_RES in item:\n messageId = item[self.MSG_ID_RES]\n if parentMessage is None:\n messageId = baseName + \"_\" + messageId\n else:\n messageId = parentMessage.id + \".\" + messageId\n message = Message(messageId)\n self._extractMessages(item[self.MSG_BODY_RES], messages, messageId, message)\n messages.append(message)\n if self.ENUM_ID_RES in item:\n messageId = item[self.ENUM_ID_RES]\n if parentMessage is None:\n messageId = baseName + \"_\" + messageId\n else:\n messageId = parentMessage.id + \".\" + messageId\n message = Message(messageId)\n messages.append(message)\n if self.ONEOF_BODY_RES in item:\n self._extractMessages(item[self.ONEOF_BODY_RES], messages, baseName, parentMessage)\n if self.FIELD_ID_RES in item and self.FIELD_NB_RES in item:\n if parentMessage is not None:\n parentMessage.addField(id=item[self.FIELD_ID_RES], number=item[self.FIELD_NB_RES])\n\n def extractCmd(self, name, parseResults, packageName, baseName, messages):\n \"\"\"\n Returns arsdk commands matching a given name defined in the protobuf file.\n \"\"\"\n cmd = None\n for item in parseResults:\n if self.MSG_ID_RES in item and self.MSG_BODY_RES in item and item[self.MSG_ID_RES] == name:\n cmdId = baseName + \"_\" + item[self.MSG_ID_RES]\n serviceId = packageName + \".\" + item[self.MSG_ID_RES]\n cmd = Command(cmdId, serviceId)\n self.extractCmdContent(item[self.MSG_BODY_RES], cmd, messages)\n return cmd\n\n def extractCmdContent(self, resultItem, cmd, messages):\n \"\"\"\n Fill arsdk commands with content of oneOf fields.\n \"\"\"\n for item in resultItem:\n if self.ONEOF_BODY_RES in item:\n for oneOfItem in item[self.ONEOF_BODY_RES]:\n if self.FIELD_ID_RES in oneOfItem and self.FIELD_NB_RES in oneOfItem and self.FIELD_TYPE_RES in oneOfItem:\n if isinstance(oneOfItem[self.FIELD_TYPE_RES], ParseResults):\n fieldType = str(oneOfItem[self.FIELD_TYPE_RES][0])\n else:\n fieldType = oneOfItem[self.FIELD_TYPE_RES]\n msgType = self.normalizeType(fieldType, cmd.commandId, messages)\n cmd.addOneOf(str(oneOfItem[self.FIELD_ID_RES]), msgType, oneOfItem[self.FIELD_NB_RES])\n\n def normalizeType(self, messageType, parentType, messages):\n \"\"\"\n Returns field full type name.\n \"\"\"\n # exact match\n for message in messages:\n if message.id == str(parentType + \".\" + messageType):\n return message.id\n # end match\n for message in messages:\n if message.id.endswith(\"_\" + messageType):\n return message.id\n return messageType\n\n\n#===============================================================================\n#===============================================================================\ndef main():\n # Parse options\n parser = optparse.OptionParser()\n parser.add_option('-i', '--input',\n action=\"store\", dest=\"inputpath\",\n help=\"path to protobuf file\", default=\"in.proto\")\n parser.add_option('-o', '--output',\n action=\"store\", dest=\"outpath\",\n help=\"output directory\", default=\"out\")\n options, args = parser.parse_args()\n\n # Parse protobuf file\n protoParser = ProtoParser()\n messages, command, event = protoParser.parseFile(options.inputpath)\n\n # print results\n for message in messages:\n message.dump()\n if command is not None:\n command.dump()\n if event is not None:\n event.dump()\n\n\n#===============================================================================\n#===============================================================================\nif __name__ == \"__main__\":\n main()\n","repo_name":"sikbongda/camera-sample","sub_path":"Pods/ArsdkEngine/ArsdkEngine/scripts/arsdkprotoparser.py","file_name":"arsdkprotoparser.py","file_ext":"py","file_size_in_byte":11599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"71082174737","text":"import json\nimport os\nimport pickle\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, Union, List\n\nimport SimpleITK as sitk\nimport numpy as np\n\n\nclass BaseReaderWriter(ABC):\n @staticmethod\n def _check_all_same(input_list):\n # compare all entries to the first\n for i in input_list[1:]:\n if not len(i) == len(input_list[0]):\n return False\n all_same = all(i[j] == input_list[0][j] for j in range(len(i)))\n if not all_same:\n return False\n return True\n\n @staticmethod\n def _check_all_same_array(input_list):\n # compare all entries to the first\n for i in input_list[1:]:\n if not all([a == b for a, b in zip(i.shape, input_list[0].shape)]):\n return False\n all_same = np.allclose(i, input_list[0])\n if not all_same:\n return False\n return True\n\n @abstractmethod\n def read_images(\n self, image_fnames: Union[List[str], Tuple[str, ...]]\n ) -> Tuple[np.ndarray, dict]:\n \"\"\"\n Reads a sequence of images and returns a 4d (!) np.ndarray along with a dictionary. The 4d array must have the\n modalities (or color channels, or however you would like to call them) in its first axis, followed by the\n spatial dimensions (so shape must be c,x,y,z where c is the number of modalities (can be 1)).\n Use the dictionary to store necessary meta information that is lost when converting to numpy arrays, for\n example the Spacing, Orientation and Direction of the image. This dictionary will be handed over to write_seg\n for exporting the predicted segmentations, so make sure you have everything you need in there!\n\n IMPORTANT: dict MUST have a 'spacing' key with a tuple/list of length 3 with the voxel spacing of the np.ndarray.\n Example: my_dict = {'spacing': (3, 0.5, 0.5), ...}. This is needed for planning and\n preprocessing. The ordering of the numbers must correspond to the axis ordering in the returned numpy array. So\n if the array has shape c,x,y,z and the spacing is (a,b,c) then a must be the spacing of x, b the spacing of y\n and c the spacing of z.\n\n In the case of 2D images, the returned array should have shape (c, 1, x, y) and the spacing should be\n (999, sp_x, sp_y). Make sure 999 is larger than sp_x and sp_y! Example: shape=(3, 1, 224, 224),\n spacing=(999, 1, 1)\n\n For images that don't have a spacing, set the spacing to 1 (2d exception with 999 for the first axis still applies!)\n\n :param image_fnames:\n :return:\n 1) a np.ndarray of shape (c, x, y, z) where c is the number of image channels (can be 1) and x, y, z are\n the spatial dimensions (set x=1 for 2D! Example: (3, 1, 224, 224) for RGB image).\n 2) a dictionary with metadata. This can be anything. BUT it HAS to inclue a {'spacing': (a, b, c)} where a\n is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set\n a=999 (largest spacing value! Make it larger than b and c)\n\n \"\"\"\n pass\n\n @abstractmethod\n def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:\n \"\"\"\n Same requirements as BaseReaderWriter.read_image. Returned segmentations must have shape 1,x,y,z. Multiple\n segmentations are not (yet?) allowed\n\n If images and segmentations can be read the same way you can just `return self.read_image((image_fname,))`\n :param seg_fname:\n :return:\n 1) a np.ndarray of shape (1, x, y, z) where x, y, z are\n the spatial dimensions (set x=1 for 2D! Example: (1, 1, 224, 224) for 2D segmentation).\n 2) a dictionary with metadata. This can be anything. BUT it HAS to inclue a {'spacing': (a, b, c)} where a\n is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set\n a=999 (largest spacing value! Make it larger than b and c)\n \"\"\"\n pass\n\n @abstractmethod\n def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None:\n \"\"\"\n Export the predicted segmentation to the desired file format. The given seg array will have the same shape and\n orientation as the corresponding image data, so you don't need to do any resampling or whatever. Just save :-)\n\n properties is the same dictionary you created during read_images/read_seg so you can use the information here\n to restore metadata\n\n IMPORTANT: Segmentations are always 3D! If your input images were 2d then the segmentation will have shape\n 1,x,y. You need to catch that and export accordingly (for 2d images you need to convert the 3d segmentation\n to 2d via seg = seg[0])!\n\n :param seg: A segmentation (np.ndarray, integer) of shape (x, y, z). For 2D segmentations this will be (1, y, z)!\n :param output_fname:\n :param properties: the dictionary that you created in read_images (the ones this segmentation is based on).\n Use this to restore metadata\n :return:\n \"\"\"\n pass\n\n\nclass SimpleITKIO(BaseReaderWriter):\n supported_file_endings = [\".nii.gz\", \".nrrd\", \".mha\"]\n\n def read_images(\n self, image_fnames: Union[List[str], Tuple[str, ...]]\n ) -> Tuple[np.ndarray, dict]:\n images = []\n spacings = []\n origins = []\n directions = []\n\n spacings_for_nnunet = []\n for f in image_fnames:\n itk_image = sitk.ReadImage(f)\n spacings.append(itk_image.GetSpacing())\n origins.append(itk_image.GetOrigin())\n directions.append(itk_image.GetDirection())\n npy_image = sitk.GetArrayFromImage(itk_image)\n if len(npy_image.shape) == 2:\n # 2d\n npy_image = npy_image[None, None]\n max_spacing = max(spacings[-1])\n spacings_for_nnunet.append(\n (max_spacing * 999, *list(spacings[-1])[::-1])\n )\n elif len(npy_image.shape) == 3:\n # 3d, as in original nnunet\n npy_image = npy_image[None]\n spacings_for_nnunet.append(list(spacings[-1])[::-1])\n elif len(npy_image.shape) == 4:\n # 4d, multiple modalities in one file\n spacings_for_nnunet.append(list(spacings[-1])[::-1][1:])\n pass\n else:\n raise RuntimeError(\n \"Unexpected number of dimensions: %d in file %s\"\n % (len(npy_image.shape), f)\n )\n\n images.append(npy_image)\n spacings_for_nnunet[-1] = list(np.abs(spacings_for_nnunet[-1]))\n\n if not self._check_all_same([i.shape for i in images]):\n print(\"ERROR! Not all input images have the same shape!\")\n print(\"Shapes:\")\n print([i.shape for i in images])\n print(\"Image files:\")\n print(image_fnames)\n raise RuntimeError()\n if not self._check_all_same(spacings):\n print(\"ERROR! Not all input images have the same spacing!\")\n print(\"Spacings:\")\n print(spacings)\n print(\"Image files:\")\n print(image_fnames)\n raise RuntimeError()\n if not self._check_all_same(origins):\n print(\"WARNING! Not all input images have the same origin!\")\n print(\"Origins:\")\n print(origins)\n print(\"Image files:\")\n print(image_fnames)\n print(\n \"It is up to you to decide whether that's a problem. You should run nnUNet_plot_dataset_pngs to verify \"\n \"that segmentations and data overlap.\"\n )\n if not self._check_all_same(directions):\n print(\"WARNING! Not all input images have the same direction!\")\n print(\"Directions:\")\n print(directions)\n print(\"Image files:\")\n print(image_fnames)\n print(\n \"It is up to you to decide whether that's a problem. You should run nnUNet_plot_dataset_pngs to verify \"\n \"that segmentations and data overlap.\"\n )\n if not self._check_all_same(spacings_for_nnunet):\n print(\n \"ERROR! Not all input images have the same spacing_for_nnunet! (This should not happen and must be a \"\n \"bug. Please report!\"\n )\n print(\"spacings_for_nnunet:\")\n print(spacings_for_nnunet)\n print(\"Image files:\")\n print(image_fnames)\n raise RuntimeError()\n\n stacked_images = np.vstack(images)\n dict = {\n \"sitk_stuff\": {\n # this saves the sitk geometry information. This part is NOT used by nnU-Net!\n \"spacing\": spacings[0],\n \"origin\": origins[0],\n \"direction\": directions[0],\n },\n # the spacing is inverted with [::-1] because sitk returns the spacing in the wrong order lol. Image arrays\n # are returned x,y,z but spacing is returned z,y,x. Duh.\n \"spacing\": spacings_for_nnunet[0],\n }\n return stacked_images.astype(np.float32), dict\n\n def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:\n return self.read_images((seg_fname,))\n\n def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None:\n assert (\n len(seg.shape) == 3\n ), \"segmentation must be 3d. If you are exporting a 2d segmentation, please provide it as shape 1,x,y\"\n output_dimension = len(properties[\"sitk_stuff\"][\"spacing\"])\n assert 1 < output_dimension < 4\n if output_dimension == 2:\n seg = seg[0]\n\n itk_image = sitk.GetImageFromArray(seg.astype(np.uint8))\n itk_image.SetSpacing(properties[\"sitk_stuff\"][\"spacing\"])\n itk_image.SetOrigin(properties[\"sitk_stuff\"][\"origin\"])\n itk_image.SetDirection(properties[\"sitk_stuff\"][\"direction\"])\n\n sitk.WriteImage(itk_image, output_fname)\n\n\ndef subdirs(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [\n l(folder, i)\n for i in os.listdir(folder)\n if os.path.isdir(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))\n ]\n if sort:\n res.sort()\n return res\n\n\ndef subfiles(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [\n l(folder, i)\n for i in os.listdir(folder)\n if os.path.isfile(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))\n ]\n if sort:\n res.sort()\n return res\n\n\nsubfolders = subdirs # I am tired of confusing those\n\n\ndef maybe_mkdir_p(directory):\n directory = os.path.abspath(directory)\n splits = directory.split(\"/\")[1:]\n for i in range(0, len(splits)):\n if not os.path.isdir(os.path.join(\"/\", *splits[: i + 1])):\n try:\n os.mkdir(os.path.join(\"/\", *splits[: i + 1]))\n except FileExistsError:\n # this can sometimes happen when two jobs try to create the same directory at the same time,\n # especially on network drives.\n print(\n \"WARNING: Folder %s already existed and does not need to be created\"\n % directory\n )\n\n\ndef load_pickle(file, mode=\"rb\"):\n with open(file, mode) as f:\n a = pickle.load(f)\n return a\n\n\ndef write_pickle(obj, file, mode=\"wb\"):\n with open(file, mode) as f:\n pickle.dump(obj, f)\n\n\nsave_pickle = write_pickle\n\n\ndef load_json(file):\n with open(file, \"r\") as f:\n a = json.load(f)\n return a\n\n\ndef save_json(obj, file, indent=4, sort_keys=True):\n with open(file, \"w\") as f:\n json.dump(obj, f, sort_keys=sort_keys, indent=indent)\n\n\nwrite_json = save_json\n\n\ndef pardir(path):\n return os.path.join(path, os.pardir)\n\n\n# I'm tired of typing these out\njoin = os.path.join\nisdir = os.path.isdir\nisfile = os.path.isfile\nlistdir = os.listdir\n","repo_name":"weihuang-cs/nnUNet-Deploy","sub_path":"src/predictor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"11032618277","text":"import os\n\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.stats import multivariate_normal\nfrom sklearn.cluster import KMeans\nimport pandas as pd\n\nfrom Arguments import shuffled_argmax, multi_argmax\n\n\"\"\"\n1. Cluster the data\n2. Estimate P(y|k)\n3. Calculate P(y|x)\n4. Choose unlabeled sample based on Eq1 and label\n5. Re-cluster if necessary\n6. Repeat steps until stop\n\"\"\"\nwrk_path_3 = r\"/Users/calvin/Documents/OneDrive/Documents/2020/Liposomes Vitamins/LiposomeFormulation\"\nos.chdir(wrk_path_3)\n\n\ndef unlabelled_data(file, method):\n ul_df = pd.read_csv(file)\n column_drop = ['Duplicate_Check',\n 'PdI Width (d.nm)',\n 'PdI',\n 'Z-Average (d.nm)',\n 'ES_Aggregation']\n ul_df = ul_df.drop(columns=column_drop)\n ul_df.replace(np.nan, 'None', inplace=True)\n ul_df = pd.get_dummies(ul_df, columns=[\"Component_1\", \"Component_2\", \"Component_3\"],\n prefix=\"\", prefix_sep=\"\")\n # if method=='fillna': ul_df['Component_3'] = ul_df['Component_3'].apply(lambda x: None if pd.isnull(x) else x) #TODO This should be transformed into an IF function, thus when the function for unlabelled is filled with a parameter, then activates\n\n ul_df = ul_df.groupby(level=0, axis=1, sort=False).sum()\n\n print(ul_df.isna().any())\n X_val = ul_df.to_numpy()\n columns_x_val = ul_df.columns\n return X_val, columns_x_val\n\n\nfile = \"stratified_sample_experiment.csv'\"\n\nX_val, columns_x_val = unlabelled_data('stratified_sample_experiment.csv',\n method='fillna')\n\n\nclass DensityWeightedUncertaintySampling:\n\n def __init__(self, X_uld: np.ndarray, X_lld: np.ndarray, y_labelled: np.ndarray, sigma: int = 25,\n clusters: int = 10, max_iter: int = 100, C: float = 0.1, randomize_tie_break: bool = False,\n n_instances: int = 10, ):\n self.P_x_k = None\n self.d = None\n self.distrib_array = None\n self.centres_ck = None\n self.all_x = None\n self.all_y = None\n self.C = C\n self.sigma = sigma\n self.clusters = clusters\n self.max_iter = max_iter\n # Unlabelled Data\n self.X_uld = X_uld\n\n self.kmeans = KMeans(n_clusters=self.clusters, random_state=42)\n\n # Initial P(k) Value\n self.P_k = np.divide(np.ones(self.clusters), float(clusters))\n\n self.P_k_x = None\n self.P_x = None\n # Labelled Data\n self.X_lld = X_lld\n\n self.y_labelled = y_labelled\n self.y_labelled_entries = np.array([i for i in self.y_labelled if i]).reshape(-1, 1)\n self.all_x_y()\n\n self.kmeans_fit()\n self.randomize_tie_break = randomize_tie_break\n self.n_instances = n_instances\n\n def all_x_y(self):\n y_unlabelled_list = [None] * self.X_uld.shape[0]\n\n self.all_y = np.concatenate((y_unlabelled_list, self.y_labelled), axis=0)\n self.all_x = np.concatenate((self.X_uld, self.X_lld), axis=0)\n self.d = len(self.all_x[0])\n self.P_x_k = np.zeros((len(self.all_x), self.clusters))\n\n return self\n\n def mask_unlabelled(self):\n return np.fromiter((e is None for e in self.all_y), dtype=bool)\n\n def mask_labelled(self):\n return np.fromiter((e is not None for e in self.all_y), dtype=bool)\n\n def labeled_entries(self):\n return self.all_x[self.mask_labelled()], self.all_y[self.mask_labelled()].tolist()\n\n def labeled_entries_id(self):\n return np.where(self.mask_labelled())[0]\n\n def unlabelled_entries_ids(self):\n \"\"\"\n :return:idx - numpy array, shape = (n_samples unlabeled)\n X - numpy array, shape (n_sample unlabeled, n_features)\n \"\"\"\n\n return np.where(self.mask_unlabelled())[0], self.all_x[self.mask_unlabelled()]\n\n def kmeans_fit(self):\n self.kmeans.fit(self.all_x)\n self.centres_ck = self.kmeans.cluster_centers_\n\n return self\n\n def distribution(self):\n\n # Empty Distribution Array to be filler: exp^(-1*(||x-ck||)/2/sigma^2)\n self.distrib_array = np.zeros((len(self.all_x), self.clusters))\n for i in range(self.clusters):\n self.distrib_array[:, i] = np.exp((-1 * (np.abs(self.all_x - self.centres_ck[i]) ** 2).sum(axis=1))\n / (2 * np.square(self.sigma)))\n return self.distrib_array\n\n def em_step(self):\n self.distribution()\n for _ in range(self.max_iter):\n # E-Step P(k|x)\n temp = self.distrib_array * np.tile(self.P_k, (len(self.all_x), 1))\n P_k_x = np.divide(temp, np.tile(np.sum(temp, axis=1), (self.clusters, 1)).T)\n # M_step\n self.P_k = 1. / (len(self.all_x) * np.sum(P_k_x, axis=0))\n self.P_k_x = P_k_x\n return self.P_k, self.P_k_x\n\n def prob_x_k(self):\n for i in range(self.clusters):\n self.P_x_k[:, i] = multivariate_normal.pdf(\n x=self.all_x,\n mean=self.centres_ck[i],\n cov=np.ones(self.d) * self.sigma,\n )\n return self.P_x_k\n\n def prob_x(self):\n self.em_step()\n self.prob_x_k()\n\n self.P_x = np.dot(self.P_x_k, self.P_k).reshape(-1)\n\n return self.P_x\n\n def make_query(self):\n unlabeled_entry_ids, _ = self.unlabelled_entries_ids()\n labeled_entry, labels = self.labeled_entries()\n labeled_entry_ids = self.labeled_entries_id()\n\n centres = self.centres_ck\n labels = np.asarray(labels).reshape(-1, 1)\n\n P_k_x = self.P_k_x\n p_x = self.P_x[list(unlabeled_entry_ids)]\n\n clf = DensityWeightedLogisticRegression(P_k_x[labeled_entry_ids, :],\n centres,\n C=self.C)\n clf.train(labeled_entry_ids, labels)\n\n P_y_k = clf.predict()\n P_y_x = np.zeros(len(unlabeled_entry_ids))\n\n for k, centre in enumerate(centres):\n P_y_x += P_y_k[k] * P_k_x[unlabeled_entry_ids, k]\n\n # binary case\n expected_error = P_y_x\n expected_error[P_y_x >= 0.5] = 1. - P_y_x[P_y_x >= 0.5]\n\n if not self.randomize_tie_break:\n query_idx = multi_argmax((expected_error * p_x), self.n_instances)\n else:\n query_idx = shuffled_argmax((expected_error * p_x), self.n_instances)\n\n return unlabeled_entry_ids[query_idx]\n\n\n\"\"\"\n\nformula used in the github libact\nfor i in range(clusters):\n empty_array[:,i] = np.exp(-np.einsum('ij,ji->i',(X_val-centres_ck[i]),(X_val-centres_ck[i]).T)/2/sigma)\n \n\n\"\"\"\n\n\nclass DensityWeightedLogisticRegression(object):\n\n def __init__(self, density_estimate, centres, C: float = 0.01):\n self.density = np.asarray(density_estimate)\n self.centres = np.asarray(centres)\n self.C = C\n self.w_ = None\n\n def _likelihood(self, w, X, y):\n w = w.reshape(-1, 1)\n sigmoid = lambda t: 1. / (1. + np.exp(-t))\n\n L = lambda w: (self.C / 2. * np.dot(w[:-1].T, w[:-1]) -\n np.sum(np.log(\n np.sum(self.density *\n sigmoid(np.dot(y,\n (np.dot(self.centres, w[:-1]) + w[-1]).T)\n ), axis=1)\n ), axis=0))[0][0]\n\n return L(w)\n\n def train(self, X, y):\n d = np.shape(self.centres)[1]\n w = np.zeros((d + 1, 1))\n\n result = minimize(lambda _w: self._likelihood(_w, X, y),\n w.reshape(-1),\n method=\"CG\")\n\n w = result.x.reshape(-1, 1)\n self.w_ = w\n return self.w_\n\n def predict(self):\n\n if self.w_ is not None:\n sigmoid = lambda t: 1. / (1. + np.exp(-t))\n return sigmoid(np.dot(self.centres, self.w_[:-1]) + self.w_[-1])\n else:\n print(\"The model is not trained\")\n pass\n","repo_name":"calvinp0/AL_Master_ChemEng","sub_path":"DensityWeightedUncertaintySampling.py","file_name":"DensityWeightedUncertaintySampling.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"74764008976","text":"from rest_framework import serializers\nfrom .models import Todo\n\nclass TodoSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to map the Model instance into JSON format.\"\"\"\n\n class Meta:\n \"\"\"Meta class to map serializer's fields with the model fields.\"\"\"\n model = Todo\n fields = ('id', 'name', 'completed', 'date_created', 'date_modified')\n read_only_fields = ('date_created', 'date_modified')\n","repo_name":"fob413/dtodo","sub_path":"myproject/todoapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"14739790665","text":"# Uses python3\n\nimport sys\nimport queue\n\nq = queue.Queue()\n\ndef shortet_paths(adj, cost, s, distance, reachable, shortest):\n # write your code here\n\n negative_cycle(adj, cost, distance, reachable, shortest, s)\n\n print(q.qsize())\n\n for s in range(q.qsize()):\n bfs(adj, cost, shortest, s)\n\n print(shortest)\n print(reachable)\n print(distance)\n\n return\n\n\ndef negative_cycle(adj, cost, distance, reachable, shortest, s):\n # prev = [None] * len(adj)\n shortest[s] = 0\n distance[s] = 0\n reachable[s] = True\n for u in range(len(adj) - 1):\n for v, w in zip(adj[u], cost[u]):\n if distance[v] > distance[u] + w:\n distance[v] = distance[u] + w\n shortest[v] = distance[v]\n reachable[v] = True\n\n for v, w in zip(adj[s], cost[s]):\n if distance[v] > distance[s] + w:\n distance[v] = distance[s] + w\n shortest[v] = None\n reachable[v] = True\n q.put(v)\n\n\n\n\ndef bfs(adj, cost, shortest, s):\n q1 = queue.Queue()\n marked = [False] * len(adj)\n reachable[s] = True\n marked[s] = True\n #shortest[s] = 0\n q1.put(s)\n\n while not q1.empty():\n\n u = q1.get()\n print(u)\n for v, w in zip(adj[u], cost[u]):\n if not marked[v]:\n #shortest[v] = shortest[u] + w\n marked[v] = True\n #reachable[v] = True\n shortest[v] = None\n q1.put(v)\n\n\n\n\nif __name__ == '__main__':\n file1 = open(\"01short.txt\", \"r\")\n input = file1.read()\n #input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))\n data = data[3 * m:]\n adj = [[] for _ in range(n)]\n cost = [[] for _ in range(n)]\n for ((a, b), w) in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n s = data[0]\n s -= 1\n distance = [float('inf')] * n\n reachable = [False] * n\n shortest = [None] * n\n shortet_paths(adj, cost, s, distance, reachable, shortest)\n for x in range(n):\n if reachable[x] == False:\n print('*')\n elif shortest[x] == None:\n print('-')\n else:\n print(distance[x])\n","repo_name":"price-dj/Algorithms_On_Graphs","sub_path":"Week4/pset4/shortest_pathsv2.py","file_name":"shortest_pathsv2.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"16581786802","text":"import pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\n\ndef plot_metrics(data_path=None):\n data = pd.read_csv(data_path)\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))\n\n # Plot 1: Total Reward per epoch\n sns.lineplot(x=\"epoch\", y=\"validation_reward_epoch\", data=data, ax=ax1)\n ax1.set_xlabel(\"Epoch\")\n ax1.set_ylabel(\"Validation Reward Epoch\")\n ax1.set_title(\"Validation Reward Epoch\")\n\n # Plot 2: Loss per epoch\n sns.lineplot(x=\"epoch\", y=\"loss\", data=data, ax=ax2)\n ax2.set_xlabel(\"Epoch\")\n ax2.set_ylabel(\"Loss\")\n ax2.set_title(\"Loss per epoch\")\n\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot_metrics()\n","repo_name":"strasserpatrick/reinforcement-learning-atari-framework","sub_path":"src/utils/dqn_plot.py","file_name":"dqn_plot.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"21553810749","text":"\"\"\"module for getting balance of user\"\"\"\nimport math\nfrom helpers.mixins import LoggerMixin\nfrom .abstract_user_method import AbstractMethod\nfrom .invoices import GetInvoices\nfrom .onchain_txs import GetOnchainTxs\nfrom .offchain_txs import GetOffchainTxs\nfrom .locked_payments import GetLockedPayments\n\n\nclass GetBalance(AbstractMethod, LoggerMixin):\n \"\"\"method to get the calculated balance for the user\"\"\"\n\n async def run(self, user):\n balance = 0\n\n invoice_method = GetInvoices(paid=True)\n for invoice in await user.exec(invoice_method):\n if invoice.payee == user.username:\n balance += invoice.amount\n if invoice.payer == user.username:\n balance -= invoice.amount + invoice.fee\n\n if user.bitcoin_address:\n onchain_txfer_method = GetOnchainTxs(min_confirmations=3)\n for transaction in await user.exec(onchain_txfer_method):\n # Valid onchain btc transactions sent to this user's address\n # Debit user's account balance\n balance += transaction[\"amount\"]\n\n locked_payments_method = GetLockedPayments()\n for invoice in await user.exec(locked_payments_method):\n # for each locked payment (invoice that has been sent but not validated)\n # Credit user's account balance\n balance -= invoice[\"amount\"] + math.floor(\n invoice[\"amount\"] * 0.01\n ) # fee limit\n\n return balance\n","repo_name":"FeatherLightApp/FeatherLight-API","sub_path":"server/featherlight/classes/user/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"13"} +{"seq_id":"6612568980","text":"from django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('atrakcje/', views.atractions, name='atrakcje'),\n path('atrakcje/CrashKader', views.crash_kader, name='CrashKader'),\n path('atrakcje/CrashRunner', views.crash_runner, name='CrashRunner'),\n path('atrakcje/KlockiMaxi', views.klocki_maxi, name='KlockiMaxi'),\n path('atrakcje/ArcheryTag', views.archery_tag, name='ArcheryTag'),\n path('atrakcje/BumperBall', views.bumper_ball, name='BumperBall'),\n path('atrakcje/ZjezdzalniaKlocki', views.zjazd_klocki, name='ZjazdKolcki'),\n path('atrakcje/ClimbingWall', views.climbing_wall, name='ClimbingWall'),\n path('atrakcje/DmuchaniecKlocki', views.dmuchaniec_klocki, name='DmuchaniecKlocki'),\n path('atrakcje/SkakaniecKlocki', views.skakaniec_klocki, name='SkakaniecKlocki'),\n path('atrakcje/PoduchaWodna', views.poducha_wodna, name='PoduchaWodna'),\n path('atrakcje/Motorowka', views.motorowka, name='Motorowka'),\n path('atrakcje/GigaPilkarzyki', views.snooker, name='Snooker'),\n path('atrakcje/WodnyWalec', views.watherroller, name='WatherRoller'),\n path('atrakcje/Paintball', views.paintball, name='Paintball'),\n path('atrakcje/WataCukrowa', views.wata, name='WataCukrowa'),\n path('atrakcje/Agregat', views.agregat, name='Agregat'),\n path('atrakcje/wodne-atrakce', views.wodne, name='wodne-atrakcje'),\n path('eventy/', views.eventy, name='eventy'),\n path('oferta/', views.offer, name='oferta'),\n path('obozy/', views.camps, name='obozy'),\n path('przedszkola/', views.przedszkola, name='przedszkola'),\n path('przedszkola-zapisy/', views.kids_enroll, name='kids_enroll'),\n path('oboz-zapisy/', views.kids_enroll_camp, name='kids_enroll_camp'),\n path('polkolonie-zapisy/', views.kids_enroll_day_camp, name ='kids_enroll_day_camp'),\n path('polityka-prywatnosci/', views.polityka, name='polityka'),\n path('regulamin/', views.regulamin, name='regulamin'),\n path('thanks/', views.thanks, name='thanks'),\n path('kontakt/', views.contact_form, name='kontakt'),\n path('zielona-szkola/', views.green, name='zielona-szkola'),\n path('oferta-zielona-szkola/', views.offer_green, name='oferta-zielona-szkola'),\n path('zielona-szkola/ziomkolandia-mini', views.green_mini, name='ziomkolandia-mini'),\n path('zielona-szkola/ziomkolandia-maxi', views.green_maxi, name='ziomkolandia-maxi'),\n path('zielona-szkola/ziomkolandia-xl', views.green_xl, name='ziomkolandia-xl'),\n path('captcha/', include('captcha.urls')),\n\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \\\n + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"kaniamichal/ziomkolandia","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"74058842258","text":"from utils import read_input\n\nvalues = read_input('input-day-10.txt')\ntest_values = ['[({(<(())[]>[[{[]{<()<>>',\n '[(()[<>])]({[<{<<[]>>(',\n '{([(<{}[<>[]}>{[]{[(<()>',\n '(((({<>}<{<{<>}{[]{[]{}',\n '[[<[([]))<([[{}[[()]]]',\n '[{[{({}]{}}([{[{{{}}([]',\n '{<[[]]>}<{[{[{[]{()[[[]',\n '[<(<(<(<{}))><([]([]()',\n '<{([([[(<>()){}]>(<<{{',\n '<{([{{}}[<[[[<>{}]]]>[]]', ]\n# values = test_values\nbrackets = {\n '(': ')',\n '{': '}',\n '[': ']',\n '<': '>',\n}\n\n\ndef get_leftover_stack(chunk):\n chunk_stack = []\n for bracket in chunk:\n if bracket in brackets.keys():\n chunk_stack.append(bracket)\n else:\n last_open_bracket = chunk_stack.pop()\n if brackets[last_open_bracket] != bracket:\n return None\n return chunk_stack\n\n\ndef get_closing_stack(stack):\n closing_stack = []\n for bracket in stack[::-1]:\n closing_stack.append(brackets[bracket])\n return closing_stack\n\n\nbracket_points = {\n ')': 1,\n ']': 2,\n '}': 3,\n '>': 4,\n}\n\n\ndef calculate_score(closing_stack):\n total = 0\n for bracket in closing_stack:\n total *= 5\n total += bracket_points[bracket]\n return total\n\n\nclosing_list = []\nfor i, chunk in enumerate(values):\n stack = get_leftover_stack(chunk)\n if stack is None:\n continue\n else:\n closing_list.append(get_closing_stack(stack))\n\nscore_list = []\nfor values in closing_list:\n score_list.append(calculate_score(values))\n\nscore_list.sort()\nprint(score_list[len(score_list)//2])\n","repo_name":"clay099/adventOfCode2021","sub_path":"day10_pt2.py","file_name":"day10_pt2.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"26925514876","text":"class Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n if len(s)!=len(t):\n return False\n s_map,t_map={},{}\n for i in range(len(s)):\n if s[i] not in s_map and t[i] not in t_map:\n s_map[s[i]]=t[i]\n t_map[t[i]]=s[i]\n elif s[i] in s_map and t[i] in t_map:\n if s_map[s[i]]==t[i] and t_map[t[i]]==s[i]:\n continue\n else:\n return False\n else:\n return False\n\n return True\n\n\n# 执行用时:\n# 56 ms\n# , 在所有 Python3 提交中击败了\n# 34.79%\n# 的用户\n# 内存消耗:\n# 14.9 MB\n# , 在所有 Python3 提交中击败了\n# 13.21%\n# 的用户","repo_name":"hwngenius/leetcode","sub_path":"每日一题/205.py","file_name":"205.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"26286976125","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom Tkinter import *\n\n\nclass Application(Frame):\n\n def say_hi(self):\n self.print_text.insert(INSERT, self.count_spinbox.get())\n\n def createWidgets(self):\n\n frm_order = Frame(root)\n self.order_lable = Label(frm_order, text='订单号码:')\n self.order_lable.pack(side=LEFT)\n\n self.order_spinbox = Spinbox(\n frm_order, from_=113, to=9999999999, increment=1)\n self.order_spinbox.pack(side=RIGHT)\n frm_order.pack()\n\n frm_count = Frame(root)\n self.count_label = Label(frm_count, text='打印联数:')\n self.count_label.pack(side=LEFT)\n\n self.count_spinbox = Spinbox(frm_count, from_=2, to=5, increment=1)\n self.count_spinbox.pack(side=RIGHT)\n frm_count.pack()\n\n frm_click = Frame(root)\n\n self.print_button = Button(frm_click)\n self.print_button[\"text\"] = \"打印\"\n self.print_button[\"command\"] = self.say_hi\n\n self.print_button.pack()\n\n self.print_text = Text(frm_click)\n self.print_text.pack()\n\n frm_click.pack()\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack()\n self.createWidgets()\n\nroot = Tk()\nroot.title('校内店订单打印系统')\nroot.geometry('600x400')\nroot.resizable(width=False, height=False)\napp = Application(master=root)\napp.mainloop()\nroot.destroy()\n","repo_name":"pupboss/xndian","sub_path":"order_spider/test/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"13"} +{"seq_id":"20235899130","text":"from django.shortcuts import redirect, render, reverse\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.db.models import Case, CharField, Value, When\n\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\n\nfrom unidecode import unidecode # normalize strings Csii\n\nfrom alunos.models import Aluno\nfrom alunos.forms import AlunoForm\nfrom turmas.models import Turma\nfrom accounts.models import CustomUser\n\n# Classes to control admin acess and success messages\nfrom base.base_admin_permissions import BaseAdminUsersAdSe\n# Constants Vars\nfrom base.constants import CURRENT_YEAR\n\n\ndef create_user_after_registration(\n\t\tusername, password, first_name, last_name, department):\n\t\"\"\"\n\tCreate user after aluno registration\n\t\"\"\"\n\tCustomUser.objects.create_user(\n\t\tusername=username,\n\t\tpassword=password,\n\t\tfirst_name=first_name,\n\t\tlast_name=last_name,\n\t\tdepartment=department\n\t)\n\n\ndef data_processing_user_creation(cpf, name_form, department):\n\t\"\"\"\n\tProcessing data for user creation\n\t\"\"\"\n\n\tcpf_split_1 = cpf.split('.')\n\tcpf_split_2 = ''.join(cpf_split_1).split('-')\n\tcpf_join = ''.join(cpf_split_2)\n\tname_split = name_form.split()\n\tfirst_name = name_split[0]\n\tlast_name = name_split[-1]\n\tpassword = f'{unidecode(first_name).lower()}{cpf_join[0:6]}'\n\n\t# Test if user already exists\n\tcpf_qs = CustomUser.objects.filter(username=cpf_join)\n\n\tif not cpf_qs:\n\t\tcreate_user_after_registration(\n\t\t\tcpf_join, password, first_name, last_name, department)\n\n\n# --- General views --- #\nclass AlunoIndexView(TemplateView):\n\ttemplate_name = 'alunos/index-aluno.html'\n\n\n# --- Admin views --- #\nclass AlunoInfoView(BaseAdminUsersAdSe):\n\tpass\n\n\nclass AlunoNewView(BaseAdminUsersAdSe, CreateView):\n\tmodel = Aluno\n\ttemplate_name = 'alunos/aluno-novo.html'\n\tform_class = AlunoForm\n\tsuccess_url = reverse_lazy('aluno-novo')\n\tsuccess_message = 'Aluno Cadastrado com sucesso'\n\n\tdef post(self, request, *args, **kwargs):\n\t\t\"\"\"\n\t\tNecessary for user creation after 'Aluno' registration.\n\t\t\"\"\"\n\n\t\tform = self.get_form()\n\n\t\tif form.is_valid():\n\n\t\t\t# Data for user creation after 'aluno' registration\n\t\t\tcpfa = request.POST.get('aluno_cpf')\n\t\t\tcpf1 = request.POST.get('aluno_filiacao1_cpf')\n\t\t\tcpf2 = request.POST.get('aluno_filiacao2_cpf')\n\n\t\t\t# if 'aluno CPF' in form\n\t\t\tif cpfa:\n\t\t\t\t# Data from 'aluno' for user creation\n\t\t\t\tname_a_form = request.POST.get('aluno_nome')\n\n\t\t\t\tdata_processing_user_creation(cpfa, name_a_form, 'al')\n\n\t\t\t# if 'filiação1 CPF' in form\n\t\t\tif cpf1:\n\n\t\t\t\t# Data from Filiação 1 for user creation\n\t\t\t\tname1_form = request.POST.get('aluno_filiacao1_nome')\n\n\t\t\t\tdata_processing_user_creation(cpf1, name1_form, 're')\n\n\t\t\t# if 'filiação2 CPF' in form\n\t\t\tif cpf2:\n\n\t\t\t\t# Data from Filiação 2 for user creation\n\t\t\t\tname2_form = request.POST.get('aluno_filiacao2_nome')\n\n\t\t\t\tdata_processing_user_creation(cpf2, name2_form, 're')\n\n\t\t\treturn self.form_valid(form)\n\n\t\telse:\n\t\t\tcontext = {'form': form}\n\t\t\treturn render(request, self.template_name, context)\n\n\nclass AlunoUpdateView(BaseAdminUsersAdSe, UpdateView):\n\tmodel = Aluno\n\tform_class = AlunoForm\n\ttemplate_name = 'alunos/aluno-alterar.html'\n\tsuccess_message = 'As alterações foram efectuadas com sucesso'\n\n\tdef get_success_url(self):\n\t\t\"\"\"\n\t\tReverse to the form of created user, (update view).\n\t\t\"\"\"\n\t\treturn reverse('aluno-alterar', kwargs={'pk': self.object.pk})\n\n\nclass AlunoDeleteView(BaseAdminUsersAdSe, DeleteView):\n\tmodel = Aluno\n\ttemplate_name = 'alunos/aluno-delete.html'\n\tsuccess_message = 'Os dados do aluno(a) foram corretamente apagados da base de dados'\n\n\tdef get_success_url(self):\n\t\t\"\"\"\n\t\tOnly necessary for display sucess message after delete\n\t\t\"\"\"\n\t\tmessages.success(self.request, self.success_message)\n\n\t\treturn reverse('alunos')\n\n\n# --- Lists views --- #\nclass AlunosListView(BaseAdminUsersAdSe, ListView):\n\tmodel = Aluno\n\tpaginate_by = 20\n\ttemplate_name = 'alunos/alunos.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super().get_context_data(**kwargs)\n\n\t\tturmas = Turma.objects.filter(\n\t\t\tturma_ano_letivo=CURRENT_YEAR\n\t\t).annotate(\n\t\t\tano_escolar_display=Case(\n\t\t\t\tWhen(turma_ano_escolar='CR', then=Value('Creche')),\n\t\t\t\tWhen(turma_ano_escolar='G1', then=Value('Maternal I')),\n\t\t\t\tWhen(turma_ano_escolar='G2', then=Value('Maternal II')),\n\t\t\t\tWhen(turma_ano_escolar='G3', then=Value('Maternal III')),\n\t\t\t\tWhen(turma_ano_escolar='G4', then=Value('Jardim I')),\n\t\t\t\tWhen(turma_ano_escolar='G5', then=Value('Jardim II')),\n\t\t\t\tWhen(turma_ano_escolar='1A', then=Value('1º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='2A', then=Value('2º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='3A', then=Value('3º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='4A', then=Value('4º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='5A', then=Value('5º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='6A', then=Value('6º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='7A', then=Value('7º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='8A', then=Value('8º Ano')),\n\t\t\t\tWhen(turma_ano_escolar='9A', then=Value('9º Ano')),\n\t\t\t\toutput_field=CharField()\n\t\t\t)\n\t\t).values_list(\n\t\t\t'ano_escolar_display',\n\t\t\t'turma_nome',\n\t\t\t'turma_etapa_basica',\n\t\t\t'turma_aluno'\n\t\t)\n\n\t\tcontext['turmas'] = turmas\n\n\t\treturn context\n\n# class AlunosListView(BaseAdminUsersAdSe, ListView):\n# \tmodel = Aluno\n# \tpaginate_by = 20\n# \ttemplate_name = 'alunos/alunos.html'\n#\n# \tdef get_context_data(self, **kwargs):\n# \t\tcontext = super().get_context_data(**kwargs)\n#\n# \t\tturmas = Turma.objects.filter(\n# \t\t\tturma_ano_letivo=CURRENT_YEAR\n# \t\t).values_list(\n# \t\t\t'turma_ano_escolar',\n# \t\t\t'turma_nome',\n# \t\t\t'turma_etapa_basica',\n# \t\t\t'turma_aluno'\n# \t\t)\n#\n# \t\tcontext['turmas'] = turmas\n#\n# \t\treturn context\n\n\nclass AlunosEfetivoListView(BaseAdminUsersAdSe, ListView):\n\tmodel = Aluno\n\ttemplate_name = 'alunos/alunos-efetivo.html'\n","repo_name":"Antonio-Neves/Gestao-Escolar","sub_path":"alunos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"13"} +{"seq_id":"8116348921","text":"import logging\nimport config\nfrom aiogram import Bot, Dispatcher, executor, types\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=config.token)\ndp = Dispatcher(bot)\n\n@dp.message_handler(commands=[\"start\"])\nasync def cmd_start(message: types.Message):\n poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n poll_keyboard.add(types.KeyboardButton(text=\"Начать тестирование\",\n request_poll=types.KeyboardButtonPollType(type=types.PollType.QUIZ)))\n poll_keyboard.add(types.KeyboardButton(text=\"Отмена\"))\n await message.answer(\"Нажмите на кнопку ниже для старта!\", reply_markup=poll_keyboard)\n\n@dp.message_handler(lambda message: message.text == \"Отмена\")\nasync def action_cancel(message: types.Message):\n remove_keyboard = types.ReplyKeyboardRemove()\n await message.answer(\"Действие отменено. Введите /start, чтобы начать заново.\", reply_markup=remove_keyboard)\n\nif __name__ == \"__main__\":\n executor.start_polling(dp, skip_updates=True)\n","repo_name":"ExpLabCourse/startgrambot","sub_path":"start/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"1478009964","text":"from flask import Flask, request , render_template \nimport requests\n\napp = Flask(__name__)\n\n@app.route('//')\n@app.route('/weather/')\ndef weather(name=None):\n return render_template('weather.html', name=name)\n@app.route('/', methods =[\"GET\", \"POST\"])\ndef index(): \n Data = ''\n error = 0\n city = ''\n if request.method == \"POST\": \n city = request.form.get(\"city\") \n if city:\n Api_Key = '9d6004d73c651f1cc942a2e38a45d349'\n url = \"https://api.openweathermap.org/data/2.5/weather?q=\"+city+\"&units=metric&appid=\"+Api_Key\n Data = requests.get(url).json()\n else:\n error = 1 \n return render_template('weather.html', data = Data, city = city, error = error)\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"moneshapb/weatherapp","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"7831397220","text":"from typing import Any, Type, Union\n\nfrom elasticsearch_dsl.response import Hit\n\nfrom cl.alerts.models import Alert\nfrom cl.audio.models import Audio\nfrom cl.people_db.models import Education, Person, Position\nfrom cl.search.documents import (\n AudioDocument,\n AudioPercolator,\n DocketDocument,\n ESRECAPDocument,\n ParentheticalGroupDocument,\n PersonDocument,\n PositionDocument,\n)\nfrom cl.search.models import (\n Citation,\n Docket,\n Opinion,\n OpinionCluster,\n Parenthetical,\n ParentheticalGroup,\n)\n\nESModelType = Union[\n Citation,\n Docket,\n Opinion,\n OpinionCluster,\n Parenthetical,\n ParentheticalGroup,\n Audio,\n Person,\n Position,\n Education,\n]\n\nESDocumentInstanceType = Union[\n AudioDocument,\n ParentheticalGroupDocument,\n AudioPercolator,\n PersonDocument,\n PositionDocument,\n ESRECAPDocument,\n]\n\nESDocumentClassType = Union[\n Type[AudioDocument],\n Type[ParentheticalGroupDocument],\n Type[AudioPercolator],\n Type[PersonDocument],\n Type[PositionDocument],\n Type[DocketDocument],\n]\n\n\nESDictDocument = dict[str, Any]\n\nPercolatorResponseType = tuple[list[Hit], ESDictDocument]\n\nSaveDocumentResponseType = tuple[str, ESDictDocument]\n\nSearchAlertHitType = tuple[Alert, str, list[ESDictDocument], int]\n","repo_name":"freelawproject/courtlistener","sub_path":"cl/search/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":435,"dataset":"github-code","pt":"13"} +{"seq_id":"31070882799","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nPyCharm show\n2022年07月03日\nby littlefean\n\"\"\"\nfrom typing import *\n\nc = \"\"\"\n
\n
周常事件
\n
倒数日
\n
\"\"\"\n\n\ndef main():\n # n = 155\n # b = 135\n # print(\"{{{{{]}}}}\")\n # print(f\"{(n + 15) / 2}=={{=={b}\")\n # print(u\"SD劳烦赛道囧蛋劳烦;塞道具房东劳烦桑代克\")\n # print(b\"1011010101010101\")\n # # 3.8\n # print(f\"{n = } {b = }\")\n\n b = \"?\" * 2\n a = f\"我是人{f'!{b}' * 3}\"\n print(a)\n return None\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Littlefean/SmartPython","sub_path":"032 字符串f-string/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"13"} +{"seq_id":"12807068701","text":"#!/usr/local/bin/python\nimport json\nimport os\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nfrom i3SLibrary.api import common\n\n\nclass GoldenImage(object):\n\n def __init__(self, i3s_client):\n self.i3s_client = i3s_client\n\n def add(self, file, param, api=None, headers=None):\n ''' Save the contents of header before modifying the same'''\n header_org = self.i3s_client._headers\n self.i3s_client._headers = {}\n self.i3s_client._headers = self.i3s_client.set_def_api_version()\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n # Get the platform name\n platform = os.name\n if (platform == 'nt'):\n if (file == \"valid_file\"):\n localfile = '%s' % (common.goldenimageuploadfile.get('valid_file_nt'))\n elif (file == \"invalid_file\"):\n localfile = '%s' % (common.goldenimageuploadfile.get('invalid_file_nt'))\n else:\n logger._log_to_console_and_log_file(\"No file specified\\n\")\n localfile = \"\"\n else:\n if (file == \"valid_file\"):\n localfile = '%s' % (common.goldenimageuploadfile.get('valid_file_linux'))\n elif (file == \"invalid_file\"):\n localfile = '%s' % (common.goldenimageuploadfile.get('invalid_file_linux'))\n else:\n logger._log_to_console_and_log_file(\"No file specified\\n\")\n localfile = \"\"\n uri = 'https://%s%s/%s' % (self.i3s_client._host, common.uris.get('goldenimage'), param)\n response = self.i3s_client.post_file(uri=uri, localfile=localfile, headers=self.i3s_client._headers)\n self.i3s_client._headers = header_org\n return response\n \n def create(self, body, api=None, headers=None):\n \"\"\"Golden Image Capture method.\"\"\"\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n uri = 'https://%s%s' % (self.i3s_client._host, common.uris.get('goldenimage'))\n response = self.i3s_client.post(uri=uri, headers=headers,\n body=json.dumps(body))\n return response\n\n def get(self, uri=None, api=None, headers=None, param=''):\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n if uri:\n uri = 'https://%s%s/%s' % (self.i3s_client._host, common.uris.get('goldenimage'), uri)\n else:\n uri = 'https://%s%s%s' % (self.i3s_client._host, common.uris.get('goldenimage'), param)\n response = self.i3s_client.get(uri=uri, headers=headers)\n return response\n\n def getvolume(self, uri=None, api=None, headers=None, param=''):\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n if uri:\n uri = 'https://%s%s' % (self.i3s_client._host, uri)\n else:\n uri = 'https://%s%s/%s' % (self.i3s_client._host, common.uris.get('goldenvolume'), param)\n response = self.i3s_client.get(uri=uri, headers=headers)\n return response\n\n def update(self, api, body=None, uri=None, headers=None, param=''):\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n uri = 'https://%s%s%s' % (self.i3s_client._host, uri, param)\n response = self.i3s_client.put(uri=uri, headers=headers, body=json.dumps(body))\n return response\n\n def delete(self, name=None, uri=None, api=None, headers=None, param=''):\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n if uri:\n uri = 'https://%s%s%s' % (self.i3s_client._host, uri, param)\n elif name:\n param2 = '?&filter=\"\\'name\\' == \\'%s\\'\"' % (name)\n response = self.get(api=api, headers=headers, param=param2)\n if response['count'] == 0:\n logger._log('Goldenimage %s does not exist' % (name), level='WARN')\n return\n elif response['count'] > 1:\n msg = \"Filter %s returned more than one result\" % (name)\n raise Exception(msg)\n else:\n uri = 'https://%s%s%s' % (self.i3s_client._host, response['members'][0]['uri'], param)\n response = self.i3s_client.delete(uri=uri, headers=headers)\n return response\n\n def download(self, uri=None, api=None, headers=None, param=''):\n if api:\n headers = self.i3s_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.i3s_client._headers\n if uri:\n uri = 'https://%s%s' % (self.i3s_client._host, uri)\n else:\n uri = 'https://%s%s/%s' % (self.i3s_client._host, common.uris.get('goldenimage'), param)\n response = self.i3s_client.get(uri=uri, headers=headers)\n param1 = 'download'\n localfile = '%s' % (common.goldenimagedownloadfile.get('gi_valid_download_file'))\n if (len(response['members']) != 0):\n for goldimage in response['members']:\n if 'id' in goldimage:\n gi_downloaduri = 'https://%s%s/%s/%s' % (self.i3s_client._host, common.uris.get('goldenimage'), param1, str(goldimage['id']))\n gi_size = goldimage['size']\n else:\n logger._warn(\n \"Get GI failed... no goldimage exists..check for parameters\")\n return\n response = self.i3s_client.get_file(uri=gi_downloaduri, localfile=localfile, headers=self.i3s_client._headers)\n gi_download_size = int(response['headers']['Content-Length'])\n if (gi_size == gi_download_size):\n logger._log_to_console_and_log_file(\"\\n Size of goldenimage downloaded matches with the uploaded image, successful download...\\n\")\n else:\n logger._log_to_console_and_log_file(\"\\n Size of goldenimage downloaded does not match with the uploaded image, download failed...\\n\")\n response['status_code'] = 503\n return response\n","repo_name":"richa92/Jenkin_Regression_Testing","sub_path":"robo4.2/i3s/i3SLibrary/api/golden_image.py","file_name":"golden_image.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"23854925416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 19 08:33:05 2016\n\n@author: sthompson\n\"\"\"\nimport rvIO as io\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\n\nfrom astroML.density_estimation import KNeighborsDensity\nfrom astroML.plotting import hist\n\n# Scikit-learn 0.14 added sklearn.neighbors.KernelDensity, which is a very\n# fast kernel density estimator based on a KD Tree. We'll use this if\n# available (and raise a warning if it isn't).\ntry:\n from sklearn.neighbors import KernelDensity\n use_sklearn_KDE = True\nexcept:\n import warnings\n warnings.warn(\"KDE will be removed in astroML version 0.3. Please \"\n \"upgrade to scikit-learn 0.14+ and use \"\n \"sklearn.neighbors.KernelDensity.\", DeprecationWarning)\n from astroML.density_estimation import KDE\n use_sklearn_KDE = False\n \n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=False)\n\n#Get the data from the OPS TCEs\n#\ntcefile='/Users/sthompson/kepler/DR25/Robovetter/Versions/OPS/TCEs.txt'\nopsdata=io.readTceInfo(tcefile)\n\nperiods=np.log10(opsdata.period)\n#periods=opsdata.period\nmes=opsdata.mes\n\nnbins=1000\nbins=np.linspace(np.log10(0.5),np.log10(800),1000)\n#bins=np.linspace(0.5,700,nbins)\nkde = KernelDensity(0.01, kernel='gaussian')\nkde.fit(np.array(periods).reshape(-1,1))\ndens_kde = np.exp(kde.score_samples(bins[:,None])) \n\n# Compute density with Bayesian nearest neighbors\nk=100\nN=len(periods)\nnbrs = KNeighborsDensity('bayesian', n_neighbors=k).fit(periods[:,None])\ndens_nbrs = nbrs.eval(bins[:,None]) / N\n\nplt.figure()\nplt.hist(periods,bins=nbins,histtype='step',color='green', normed=True)\nplt.plot(bins,dens_kde,'b-',label='KDE')\nplt.plot(bins,dens_nbrs,'r-',label='KNN')\nplt.legend()\n#%%\nNx=200\nNy=100\nxmin=0.5\nxmax=700\nymin=7\nymax=40\npbins=np.linspace(np.log10(xmin),np.log10(xmax),Nx)\nmbins=np.linspace(np.log10(ymin),np.log10(ymax),Ny)\n\n#pbins=np.linspace(0.5,800,Nx)\n#mbins=np.linspace(7,1000,Ny)\n\ndata=np.array([periods,np.log10(mes)]).transpose()\nbin2=np.vstack(map(np.ravel,np.meshgrid(pbins,mbins))).T\nkde = KernelDensity(0.02, kernel='gaussian')\nlog_dens=kde.fit(data).score_samples(bin2)\ndens_kde = data.shape[0]*np.exp(log_dens).reshape(Ny,Nx)\n#%%\nk=10\nnbrs = KNeighborsDensity('bayesian', n_neighbors=k).fit(data)\ndens2_nbrs=nbrs.eval(bin2)/(data.size)\n#%\nfrom matplotlib.colors import LogNorm\nplt.figure(1)\nplt.clf()\nplt.subplot(211)\nplt.imshow(dens_kde, origin='lower',norm=LogNorm(), \\\n extent=np.log10([xmin,xmax,ymin,ymax]), aspect='auto',cmap='rainbow')\nplt.subplot(212)\nplt.scatter(data[:,0],data[:,1],s=1,lw=0,c=u'r')\nplt.ylim(np.log10([ymin,ymax])) \nplt.xlim(np.log10([xmin,xmax])) \n\nplt.figure(2)\nplt.clf()\nplt.subplot(211)\nplt.imshow(dens2_nbrs.reshape(Nx,Ny),origin='lower',norm=LogNorm(), \\\n extent=np.log10([xmin,xmax,ymin,ymax]),aspect='auto',cmap='rainbow')\nplt.subplot(212)\nplt.scatter(data[:,0],data[:,1],s=1,lw=0,c=u'k') \nplt.ylim(np.log10([ymin,ymax])) \nplt.xlim(np.log10([xmin,xmax])) ","repo_name":"mustaric/dr25CatalogGen","sub_path":"tryKernelDensityScript.py","file_name":"tryKernelDensityScript.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"23083115994","text":"#Ler uma lista com 4 notas, em seguida\n#o programa deve exibir as notas e a média.\n\nlista = []\n\nfor c in range(0, 4):\n lista.append(float(input('Digite uma nota: ')))\n\nmed = (sum(lista))/4\nprint(f'Notas: {lista}\\n Média: {med}')\n","repo_name":"nicole-pereira/Python-Desafios","sub_path":"ex4lista.py","file_name":"ex4lista.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"1449719151","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Zhoutao\n#%r %s区别\n\ntext = \"I am %d years old.\" % 22\nprint(text)\n\nprint(\"%s\" % (text))\nprint(\"%r\" %(text))\n\nend1 = \"C\"\nend2 = \"h\"\nend3 = \"e\"\nend4 = \"e\"\nend5 = \"s\"\nend6 = \"e\"\nend7 = \"B\"\nend8 = \"u\"\nend9 = \"r\"\nend10 = \"g\"\nend11 = \"e\"\nend12 = \"r\"\n# watch that comma at the end. try removing it to see what happens\nprint (end1 + end2 + end3 + end4 + end5 + end6)\nprint (end7 + end8 + end9 + end10 + end11 + end12)","repo_name":"248808194/python-study","sub_path":"各种书例子习题等/笨方法学习python/q5更多的变量和打印.py","file_name":"q5更多的变量和打印.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"26455926862","text":"import torch\r\nimport torch.utils.data as data\r\nimport os\r\nimport pickle\r\nfrom os.path import join\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nclass DatasetInMemory(torch.utils.data.Dataset):\r\n def __init__(self, validSet:torch.utils.data.dataset.Subset) -> None:\r\n super().__init__()\r\n self.validSet = validSet\r\n self.X = torch.zeros(size = [len(validSet.indices), validSet.dataset.frame_len, validSet.dataset.channel_num]).float()\r\n self.y = torch.zeros(size = [len(validSet.indices)]).long()\r\n # load data\r\n self.psgStartList = []\r\n self.subjList = []\r\n lastSubj = -1\r\n for index in range(len(validSet.indices)):\r\n subj = validSet.dataset.items_subj[validSet.indices[index]]\r\n idx = validSet.dataset.items_idx[validSet.indices[index]]\r\n root = validSet.dataset.root[0] if subj<15000 else validSet.dataset.root[1] \r\n path = '{:}\\\\{:06d}\\\\data\\\\{:04d}.pkl'.format(root, subj, idx)\r\n with open(path, 'rb') as f_data:\r\n pkl = pickle.load(f_data)\r\n self.X[index] = torch.from_numpy(pkl).float()\r\n self.y[index] = torch.tensor(validSet.dataset.y[validSet.indices[index]]).long()\r\n if lastSubj != subj:\r\n lastSubj = subj\r\n self.psgStartList += [index]\r\n self.subjList += [subj]\r\n self.psgStartList += [index + 1]\r\n\r\n def __getitem__(self, index):\r\n if index in self.psgStartList:\r\n seq_idx = [index, index, index, index + 1, index + 2]\r\n elif index - 1 in self.psgStartList:\r\n seq_idx = [index - 1, index - 1, index, index + 1, index + 2]\r\n elif index + 1 in self.psgStartList:\r\n seq_idx = [index - 2, index - 1, index, index, index]\r\n elif index + 2 in self.psgStartList:\r\n seq_idx = [index - 2, index - 1, index, index + 1, index + 1]\r\n else:\r\n seq_idx = [index - 2, index - 1, index, index + 1, index + 2]\r\n # seq_idx = [index, index, index, index, index]\r\n X = self.X[seq_idx].view([-1, self.validSet.dataset.channel_num])\r\n X = torch.clip(X, -1000, 1000)\r\n X = torch.swapaxes(X, 0, 1)\r\n y = self.y[index]\r\n subj = self.validSet.dataset.items_subj[self.validSet.indices[index]]\r\n return X, y, subj\r\n\r\n def __len__(self):\r\n return len(self.validSet.indices)\r\n\r\nclass XY_dataset_N2One(data.Dataset):\r\n def __init__(self, cfg, tvt = 'train', serial_len = 5):\r\n ###\r\n # tvt: 'train', 'valid', 'test', 'all'\r\n ###\r\n super(XY_dataset_N2One, self).__init__()\r\n self.serial_len = serial_len\r\n self.tvt = tvt\r\n self.frame_len = cfg.freq * 30\r\n self.channel_num = cfg.SWIN.IN_CHANS\r\n dataset = cfg.dataset\r\n if type(dataset) is str:\r\n savName = dataset\r\n dataset = [dataset]\r\n else:\r\n savName = 'Custom{:02d}'.format(len(dataset))\r\n redir_cache, redir_root = cfg.redir_cache, cfg.redir_root\r\n if not redir_cache:\r\n cache_path = join('experiments/{:}/prepared_data/{:}_{:}_cache.pkl'.format(cfg.experiment, tvt, savName))\r\n else:\r\n cache_path = join(redir_cache, '{:}_{:}_cache.pkl'.format(tvt, savName))\r\n #if cache\r\n if os.path.exists(cache_path):\r\n with open(cache_path, 'rb') as f:\r\n cache = pickle.load(f)\r\n if not redir_root:\r\n self.root = cache['root']\r\n else:\r\n self.root = redir_root\r\n self.items_psg = cache['items_subj']\r\n self.items_idx = cache['items_idx']\r\n self.boundary = cache['fences']\r\n self.y = cache['y']\r\n self.len = len(self.items_psg)\r\n return\r\n #else\r\n \r\n # subject selector\r\n if not redir_root:\r\n self.root = [r'G:\\data\\filtered_data_128\\subjects', r'E:\\data\\filtered_data_128\\subjects']\r\n else:\r\n self.root = redir_root\r\n self.subjIdx = {\r\n 'SHHS1': (0, 5666),\r\n 'SHHS2': (5667, 8287),\r\n 'CCSHS': (8288, 8802),\r\n 'SOF': (8803, 9251),\r\n 'CFS': (9252, 9973),\r\n 'MROS1': (9974, 12851),\r\n 'MROS2': (12852, 13859),\r\n 'MESA': (13860, 15893),\r\n 'HPAP1':(15894, 16083),\r\n 'HPAP2':(16084, 16138),\r\n 'ABC':(16139, 16269),\r\n 'NCHSDB':(16270, 17251),\r\n 'MASS13':(17252, 17366),\r\n 'HMC':(17367, 17520),\r\n 'SSC':(17521, 18288),\r\n 'CNC':(18289, 18365),\r\n 'PHY':(18366, 19358),\r\n 'DOD':(19359, 19439),\r\n 'DHC':(19440, 19521),\r\n 'DCSM':(19522, 19776),\r\n 'SHHS1ex':(19777,19902),\r\n 'WSC':(19903, 22436),\r\n 'ISRC':(22437, 22505)\r\n }\r\n\r\n # split\r\n psg_paths = []\r\n if tvt == 'all':\r\n for d in dataset:\r\n for i in range(self.subjIdx[d][0], self.subjIdx[d][1] + 1):\r\n root = self.root[0] if i<15000 else self.root[1]\r\n psg_path = join(root, '{:06d}'.format(i))\r\n assert os.path.exists(psg_path)\r\n psg_paths.append(psg_path)\r\n else:\r\n from tools.data_tools import Split\r\n train_idx, valid_idx, test_idx = Split().split_dataset(dataset)\r\n psg_idx = train_idx if tvt == 'train' else valid_idx if tvt == 'valid' else test_idx if tvt == 'test' else None\r\n for idx in psg_idx:\r\n root = self.root[0] if idx<15000 else self.root[1]\r\n psg_path = join(root, '{:06d}'.format(idx))\r\n assert os.path.exists(psg_path)\r\n psg_paths.append(psg_path)\r\n\r\n # generate idx\r\n self.items_psg, self.items_idx, self.boundary, self.y = [], [], [0], []\r\n for psg_path in psg_paths:\r\n frameNum = len(os.listdir(join(psg_path, 'data')))\r\n if os.path.exists(join(psg_path, 'stages.pkl')):\r\n with open(join(psg_path, 'stages.pkl'), 'rb') as f:\r\n anno = pickle.load(f)\r\n else:\r\n anno = torch.zeros(frameNum)\r\n for i in range(frameNum):\r\n self.items_idx.append(i)\r\n self.items_psg.append(int(psg_path[-6:]))\r\n self.y.append(int(anno[i]))\r\n self.boundary += [len(self.y)]\r\n\r\n self.len = len(self.items_psg)\r\n # save cache. TODO: replace variable name: 'items_subj', 'fences'\r\n cache = {'root':self.root, 'items_subj': self.items_psg,'items_idx':self.items_idx, 'fences':self.boundary, 'y': self.y}\r\n with open(cache_path, 'wb') as f:\r\n pickle.dump(cache, f)\r\n\r\n def _random_psg_split(self, subj_paths):\r\n train_paths, valid_paths = train_test_split(subj_paths, train_size = 0.8, random_state = 0)\r\n valid_paths, test_paths = train_test_split(valid_paths, train_size = 0.5, random_state = 0)\r\n \r\n tvt2paths = {'train':train_paths ,'valid':valid_paths, 'test':test_paths, 'all':subj_paths}\r\n return tvt2paths[self.tvt]\r\n\r\n def __getitem__(self, index):\r\n # with torch.autograd.profiler.profile(enabled=True) as prof:\r\n assert self.serial_len == 5 #TODO\r\n index_pkl = self.items_idx[index]\r\n # TODO run too slow in this step \r\n if index in self.boundary:\r\n seq_idx = [index_pkl, index_pkl, index_pkl, index_pkl + 1, index_pkl + 2]\r\n elif index - 1 in self.boundary:\r\n seq_idx = [index_pkl - 1, index_pkl - 1, index_pkl, index_pkl + 1, index_pkl + 2]\r\n elif index + 1 in self.boundary:\r\n seq_idx = [index_pkl - 2, index_pkl - 1, index_pkl, index_pkl, index_pkl]\r\n elif index + 2 in self.boundary:\r\n seq_idx = [index_pkl - 2, index_pkl - 1, index_pkl, index_pkl + 1, index_pkl + 1]\r\n else: \r\n seq_idx = [index_pkl - 2, index_pkl - 1, index_pkl, index_pkl + 1, index_pkl + 2]\r\n subj = self.items_psg[index]\r\n root = self.root[0] if subj<15000 else self.root[1] \r\n paths = ['{:}\\\\{:06d}\\\\data\\\\{:04d}.pkl'.format(root, subj, idx) for idx in seq_idx]\r\n X = torch.zeros(size = [self.serial_len * self.frame_len, self.channel_num]).float()\r\n for i in range(self.serial_len):\r\n with open(paths[i], 'rb') as f_data:\r\n pkl = pickle.load(f_data)\r\n X[i*self.frame_len:(i+1)*self.frame_len,:] = torch.from_numpy(pkl).float()\r\n X = torch.clip(X, -1000, 1000)\r\n X = torch.swapaxes(X, 0, 1)\r\n y = torch.tensor(self.y[index]).long()\r\n\r\n # print(prof.key_averages().table(sort_by='self_cpu_time_total'))\r\n return X, y, subj\r\n\r\n def __len__(self):\r\n return self.len\r\n\r\n\r\nif __name__ == '__main__':\r\n pass","repo_name":"DiZhang-XDU/SwinSleep","sub_path":"tools/dataset_cohort_5ep.py","file_name":"dataset_cohort_5ep.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"22468744393","text":"import qrcode\nfrom qrcode.image.svg import SvgPathImage\nfrom qrcode.image.styles.moduledrawers.svg import SvgPathCircleDrawer\nfrom qrcode.image.styles.moduledrawers.svg import SvgPathSquareDrawer\n\n\ndef get_svg_string(data: str, style: str, bg_color, fill_color) -> str:\n # create drawer either square or circle\n drawer = SvgPathCircleDrawer() if style == \"circle\" else SvgPathSquareDrawer()\n\n qr = qrcode.QRCode(image_factory=SvgPathImage)\n qr.add_data(data)\n qr.make(fit=True)\n img = qr.make_image(module_drawer=drawer)\n result = img.to_string()\n result = str(result)\n\n # add background color\n if bg_color is not None:\n if len(bg_color) == 0:\n bg_color = \"rgba(0, 0, 255, 0)\"\n idx = result.find(\"'\n result = result[:idx] + rect + result[idx:]\n\n # add fill color\n if fill_color is not None:\n if len(fill_color) == 0:\n fill_color = 'fill=\"#000000\"'\n result = result.replace('fill=\"#000000\"', f'fill=\"{fill_color}\"')\n\n return result[2:-1]\n","repo_name":"putuwaw/qr-generator","sub_path":"module/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"20232065795","text":"# -*- coding: utf-8 -*-\n\nimport lxml\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# ignore alert\n\ndef fetch_emoji(html_path):\n i = 0\n lll = []\n with open(html_path, 'r') as f:\n html_str = f.read()\n soup = BeautifulSoup(html_str, 'lxml')\n span_list = soup.findAll('span', class_='icon-name mat-caption')\n print(len(span_list))\n for span in span_list:\n name = str(span.text)\n name = name.replace(\" \", \"_\").lower()\n if i < 100 and name not in lll:\n i += 1\n lll.append(name)\n print(lll)\n\n\nfetch_emoji(\"md_action.html\")\nfetch_emoji(\"md_audio_video.html\")\nfetch_emoji(\"md_device.html\")\nfetch_emoji(\"md_image.html\")\nfetch_emoji(\"md_maps.html\")\n","repo_name":"weikeet/daily-scripts","sub_path":"script/icons/parse_popular_icons.py","file_name":"parse_popular_icons.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"13"} +{"seq_id":"39859621296","text":"import base64\nimport datetime\nimport hashlib\nimport itertools\nimport json\nimport os\nimport random\nimport re\nimport requests\nimport signal\nimport sys\n\nfrom collections import Counter\n\n#_UPSTREAM_GAME_URL = 'https://www.powerlanguage.co.uk/wordle' :'(\n_UPSTREAM_GAME_URL = 'https://www.nytimes.com/games/wordle'\n\nclass Words:\n\n __FIRST_DAY = datetime.date(2021, 6, 19)\n\n __OBFUSCATIONS = {'Base16': {'encode': base64.b16encode, 'decode': base64.b16decode},\n 'Base32': {'encode': base64.b32encode, 'decode': base64.b32decode},\n 'Base64': {'encode': base64.b64encode, 'decode': base64.b64decode},\n 'Base85': {'encode': base64.b85encode, 'decode': base64.b85decode},\n 'Ascii85': {'encode': base64.a85encode, 'decode': base64.a85decode}}\n __DEFAULT_OBFUSCATION = 'Ascii85'\n\n @classmethod\n def __calc_day_offset(cls, date=datetime.date.today()):\n return (date - cls.__FIRST_DAY).days\n\n @classmethod\n def __obfuscate(cls, word_list, codec):\n return [cls.__OBFUSCATIONS[codec]['encode'](w.encode()).decode()\n for w in word_list]\n\n @classmethod\n def __deobfuscate(cls, obfuscated_word_list, codec):\n return [cls.__OBFUSCATIONS[codec]['decode'](ow.encode()).decode()\n for ow in obfuscated_word_list]\n\n def __init__(self, file_path, force_download=False):\n self.__answer_series = []\n self.__additional_valid_guesses = set()\n self.__word_length = None\n if force_download or not os.path.exists(file_path):\n self.__download_lists_and_write_file(file_path)\n else:\n self.__read_file(file_path)\n\n def __read_file(self, file_path):\n with open(file_path, 'r') as f:\n words = json.load(f)\n if not all(list_name in words for list_name in ('answer_series', 'additional_valid_guesses')):\n raise\n if 'obfuscation' in words:\n if words['obfuscation'] not in self.__OBFUSCATIONS:\n raise\n for list_name in ('answer_series', 'additional_valid_guesses'):\n words[list_name] = self.__deobfuscate(words[list_name], words['obfuscation'])\n lengths = {len(w)\n for w in itertools.chain(words['answer_series'],\n words['additional_valid_guesses'])}\n if len(lengths) > 1:\n raise\n self.__answer_series = words['answer_series']\n self.__additional_valid_guesses = set(words['additional_valid_guesses'])\n self.__word_length = lengths.pop()\n\n def __download_lists_and_write_file(self, file_path):\n def get_text_or_abort(url):\n rs = requests.get(url)\n if not rs.ok:\n print(f'Failed to get URL \"{url}\", with status {rs.status_code}!',\n file=sys.stderr)\n return None\n return rs.text\n\n # download page to discover URL for JavaScript file\n html = get_text_or_abort(_UPSTREAM_GAME_URL)\n if not html:\n raise\n matches = re.findall(r'